From 293913568e6a7a86fd1479e1cff8e2ecb58d6568 Mon Sep 17 00:00:00 2001 From: Daniel Baumann Date: Sat, 13 Apr 2024 15:44:03 +0200 Subject: Adding upstream version 16.2. Signed-off-by: Daniel Baumann --- src/test/regress/expected/advisory_lock.out | 276 + src/test/regress/expected/aggregates.out | 3142 ++++++++ src/test/regress/expected/alter_generic.out | 755 ++ src/test/regress/expected/alter_operator.out | 139 + src/test/regress/expected/alter_table.out | 4665 ++++++++++++ src/test/regress/expected/amutils.out | 254 + src/test/regress/expected/arrays.out | 2545 +++++++ src/test/regress/expected/async.out | 42 + src/test/regress/expected/bit.out | 809 ++ src/test/regress/expected/bitmapops.out | 38 + src/test/regress/expected/boolean.out | 578 ++ src/test/regress/expected/box.out | 666 ++ src/test/regress/expected/brin.out | 574 ++ src/test/regress/expected/brin_bloom.out | 428 ++ src/test/regress/expected/brin_multi.out | 589 ++ src/test/regress/expected/btree_index.out | 389 + src/test/regress/expected/case.out | 419 ++ src/test/regress/expected/char.out | 199 + src/test/regress/expected/char_1.out | 199 + src/test/regress/expected/char_2.out | 199 + src/test/regress/expected/circle.out | 125 + src/test/regress/expected/cluster.out | 667 ++ src/test/regress/expected/collate.icu.utf8.out | 2059 +++++ src/test/regress/expected/collate.icu.utf8_1.out | 9 + src/test/regress/expected/collate.linux.utf8.out | 1174 +++ src/test/regress/expected/collate.linux.utf8_1.out | 11 + src/test/regress/expected/collate.out | 776 ++ .../regress/expected/collate.windows.win1252.out | 1000 +++ .../regress/expected/collate.windows.win1252_1.out | 13 + src/test/regress/expected/combocid.out | 169 + src/test/regress/expected/comments.out | 65 + src/test/regress/expected/compression.out | 362 + src/test/regress/expected/compression_1.out | 356 + src/test/regress/expected/constraints.out | 789 ++ src/test/regress/expected/conversion.out | 734 ++ src/test/regress/expected/copy.out | 296 + src/test/regress/expected/copy2.out | 780 ++ src/test/regress/expected/copydml.out | 112 + src/test/regress/expected/copyselect.out | 161 + src/test/regress/expected/create_aggregate.out | 324 + src/test/regress/expected/create_am.out | 390 + src/test/regress/expected/create_cast.out | 103 + src/test/regress/expected/create_function_c.out | 36 + src/test/regress/expected/create_function_sql.out | 743 ++ src/test/regress/expected/create_index.out | 2848 +++++++ src/test/regress/expected/create_index_spgist.out | 1371 ++++ src/test/regress/expected/create_misc.out | 487 ++ src/test/regress/expected/create_operator.out | 285 + src/test/regress/expected/create_procedure.out | 383 + src/test/regress/expected/create_role.out | 261 + src/test/regress/expected/create_schema.out | 98 + src/test/regress/expected/create_table.out | 1120 +++ src/test/regress/expected/create_table_like.out | 520 ++ src/test/regress/expected/create_type.out | 407 + src/test/regress/expected/create_view.out | 2313 ++++++ src/test/regress/expected/date.out | 1534 ++++ src/test/regress/expected/dbsize.out | 195 + src/test/regress/expected/delete.out | 33 + src/test/regress/expected/dependency.out | 154 + src/test/regress/expected/domain.out | 1209 +++ src/test/regress/expected/drop_if_exists.out | 342 + src/test/regress/expected/drop_operator.out | 61 + src/test/regress/expected/enum.out | 719 ++ src/test/regress/expected/equivclass.out | 453 ++ src/test/regress/expected/errors.out | 447 ++ src/test/regress/expected/event_trigger.out | 616 ++ src/test/regress/expected/explain.out | 561 ++ src/test/regress/expected/expressions.out | 423 ++ src/test/regress/expected/fast_default.out | 861 +++ .../regress/expected/float4-misrounded-input.out | 986 +++ src/test/regress/expected/float4.out | 986 +++ src/test/regress/expected/float8.out | 1444 ++++ src/test/regress/expected/foreign_data.out | 2105 ++++++ src/test/regress/expected/foreign_key.out | 2919 +++++++ src/test/regress/expected/functional_deps.out | 232 + src/test/regress/expected/generated.out | 1182 +++ src/test/regress/expected/geometry.out | 5322 +++++++++++++ src/test/regress/expected/gin.out | 299 + src/test/regress/expected/gist.out | 403 + src/test/regress/expected/groupingsets.out | 2153 ++++++ src/test/regress/expected/guc.out | 890 +++ src/test/regress/expected/hash_func.out | 374 + src/test/regress/expected/hash_index.out | 292 + src/test/regress/expected/hash_part.out | 114 + src/test/regress/expected/horology.out | 3528 +++++++++ src/test/regress/expected/identity.out | 616 ++ src/test/regress/expected/incremental_sort.out | 1662 ++++ src/test/regress/expected/index_including.out | 400 + src/test/regress/expected/index_including_gist.out | 166 + src/test/regress/expected/indexing.out | 1590 ++++ src/test/regress/expected/indirect_toast.out | 166 + src/test/regress/expected/inet.out | 1095 +++ src/test/regress/expected/infinite_recurse.out | 24 + src/test/regress/expected/infinite_recurse_1.out | 16 + src/test/regress/expected/inherit.out | 2794 +++++++ src/test/regress/expected/init_privs.out | 12 + src/test/regress/expected/insert.out | 982 +++ src/test/regress/expected/insert_conflict.out | 866 +++ src/test/regress/expected/int2.out | 486 ++ src/test/regress/expected/int4.out | 594 ++ src/test/regress/expected/int8.out | 1092 +++ src/test/regress/expected/interval.out | 1802 +++++ src/test/regress/expected/join.out | 7929 ++++++++++++++++++++ src/test/regress/expected/join_hash.out | 1166 +++ src/test/regress/expected/json.out | 2667 +++++++ src/test/regress/expected/json_encoding.out | 269 + src/test/regress/expected/json_encoding_1.out | 265 + src/test/regress/expected/json_encoding_2.out | 9 + src/test/regress/expected/jsonb.out | 5584 ++++++++++++++ src/test/regress/expected/jsonb_jsonpath.out | 2586 +++++++ src/test/regress/expected/jsonpath.out | 1218 +++ src/test/regress/expected/jsonpath_encoding.out | 180 + src/test/regress/expected/jsonpath_encoding_1.out | 168 + src/test/regress/expected/jsonpath_encoding_2.out | 9 + src/test/regress/expected/largeobject.out | 563 ++ src/test/regress/expected/largeobject_1.out | 563 ++ src/test/regress/expected/limit.out | 694 ++ src/test/regress/expected/line.out | 148 + src/test/regress/expected/lock.out | 252 + src/test/regress/expected/lseg.out | 57 + src/test/regress/expected/macaddr.out | 185 + src/test/regress/expected/macaddr8.out | 379 + src/test/regress/expected/matview.out | 694 ++ src/test/regress/expected/md5.out | 91 + src/test/regress/expected/memoize.out | 350 + src/test/regress/expected/merge.out | 2250 ++++++ src/test/regress/expected/misc.out | 398 + src/test/regress/expected/misc_functions.out | 644 ++ src/test/regress/expected/misc_sanity.out | 91 + src/test/regress/expected/money.out | 530 ++ src/test/regress/expected/multirangetypes.out | 3363 +++++++++ src/test/regress/expected/mvcc.out | 42 + src/test/regress/expected/name.out | 197 + src/test/regress/expected/namespace.out | 116 + src/test/regress/expected/numeric.out | 3600 +++++++++ src/test/regress/expected/numeric_big.out | 2082 +++++ src/test/regress/expected/numerology.out | 464 ++ src/test/regress/expected/object_address.out | 638 ++ src/test/regress/expected/oid.out | 184 + src/test/regress/expected/oidjoins.out | 268 + src/test/regress/expected/opr_sanity.out | 2301 ++++++ src/test/regress/expected/partition_aggregate.out | 1520 ++++ src/test/regress/expected/partition_info.out | 351 + src/test/regress/expected/partition_join.out | 5134 +++++++++++++ src/test/regress/expected/partition_prune.out | 4287 +++++++++++ src/test/regress/expected/password.out | 149 + src/test/regress/expected/path.out | 107 + src/test/regress/expected/pg_lsn.out | 270 + src/test/regress/expected/plancache.out | 400 + src/test/regress/expected/plpgsql.out | 5827 ++++++++++++++ src/test/regress/expected/point.out | 478 ++ src/test/regress/expected/polygon.out | 333 + src/test/regress/expected/polymorphism.out | 2098 ++++++ src/test/regress/expected/portals.out | 1563 ++++ src/test/regress/expected/portals_p2.out | 122 + src/test/regress/expected/prepare.out | 194 + src/test/regress/expected/prepared_xacts.out | 270 + src/test/regress/expected/prepared_xacts_1.out | 266 + src/test/regress/expected/privileges.out | 2915 +++++++ src/test/regress/expected/psql.out | 6660 ++++++++++++++++ src/test/regress/expected/psql_crosstab.out | 216 + src/test/regress/expected/publication.out | 1737 +++++ src/test/regress/expected/random.out | 178 + src/test/regress/expected/rangefuncs.out | 2487 ++++++ src/test/regress/expected/rangetypes.out | 1836 +++++ src/test/regress/expected/regex.out | 645 ++ src/test/regress/expected/regproc.out | 546 ++ src/test/regress/expected/reindex_catalog.out | 48 + src/test/regress/expected/reloptions.out | 226 + src/test/regress/expected/replica_identity.out | 270 + src/test/regress/expected/returning.out | 357 + src/test/regress/expected/roleattributes.out | 249 + src/test/regress/expected/rowsecurity.out | 4547 +++++++++++ src/test/regress/expected/rowtypes.out | 1342 ++++ src/test/regress/expected/rules.out | 3752 +++++++++ src/test/regress/expected/sanity_check.out | 56 + src/test/regress/expected/security_label.out | 44 + src/test/regress/expected/select.out | 970 +++ src/test/regress/expected/select_distinct.out | 446 ++ src/test/regress/expected/select_distinct_on.out | 125 + src/test/regress/expected/select_having.out | 93 + src/test/regress/expected/select_having_1.out | 93 + src/test/regress/expected/select_having_2.out | 93 + src/test/regress/expected/select_implicit.out | 338 + src/test/regress/expected/select_implicit_1.out | 338 + src/test/regress/expected/select_implicit_2.out | 338 + src/test/regress/expected/select_into.out | 222 + src/test/regress/expected/select_parallel.out | 1221 +++ src/test/regress/expected/select_views.out | 1552 ++++ src/test/regress/expected/sequence.out | 841 +++ src/test/regress/expected/spgist.out | 96 + src/test/regress/expected/sqljson.out | 948 +++ src/test/regress/expected/stats.out | 1631 ++++ src/test/regress/expected/stats_ext.out | 3292 ++++++++ src/test/regress/expected/strings.out | 2605 +++++++ src/test/regress/expected/subscription.out | 480 ++ src/test/regress/expected/subselect.out | 1928 +++++ src/test/regress/expected/sysviews.out | 169 + src/test/regress/expected/tablesample.out | 331 + src/test/regress/expected/tablespace.out | 968 +++ src/test/regress/expected/temp.out | 412 + src/test/regress/expected/test_setup.out | 245 + src/test/regress/expected/text.out | 438 ++ src/test/regress/expected/tid.out | 121 + src/test/regress/expected/tidrangescan.out | 300 + src/test/regress/expected/tidscan.out | 296 + src/test/regress/expected/time.out | 231 + src/test/regress/expected/timestamp.out | 2127 ++++++ src/test/regress/expected/timestamptz.out | 3156 ++++++++ src/test/regress/expected/timetz.out | 324 + src/test/regress/expected/transactions.out | 1198 +++ src/test/regress/expected/triggers.out | 3711 +++++++++ src/test/regress/expected/truncate.out | 594 ++ src/test/regress/expected/tsdicts.out | 723 ++ src/test/regress/expected/tsearch.out | 3007 ++++++++ src/test/regress/expected/tsrf.out | 712 ++ src/test/regress/expected/tstypes.out | 1444 ++++ src/test/regress/expected/tuplesort.out | 692 ++ src/test/regress/expected/txid.out | 327 + src/test/regress/expected/type_sanity.out | 742 ++ src/test/regress/expected/typed_table.out | 133 + src/test/regress/expected/unicode.out | 89 + src/test/regress/expected/unicode_1.out | 3 + src/test/regress/expected/union.out | 1434 ++++ src/test/regress/expected/updatable_views.out | 3366 +++++++++ src/test/regress/expected/update.out | 1028 +++ src/test/regress/expected/uuid.out | 172 + src/test/regress/expected/vacuum.out | 510 ++ src/test/regress/expected/vacuum_parallel.out | 49 + src/test/regress/expected/varchar.out | 132 + src/test/regress/expected/varchar_1.out | 132 + src/test/regress/expected/varchar_2.out | 132 + src/test/regress/expected/window.out | 4878 ++++++++++++ src/test/regress/expected/with.out | 3532 +++++++++ src/test/regress/expected/write_parallel.out | 80 + src/test/regress/expected/xid.out | 530 ++ src/test/regress/expected/xml.out | 1787 +++++ src/test/regress/expected/xml_1.out | 1404 ++++ src/test/regress/expected/xml_2.out | 1767 +++++ src/test/regress/expected/xmlmap.out | 1305 ++++ src/test/regress/expected/xmlmap_1.out | 107 + 241 files changed, 240614 insertions(+) create mode 100644 src/test/regress/expected/advisory_lock.out create mode 100644 src/test/regress/expected/aggregates.out create mode 100644 src/test/regress/expected/alter_generic.out create mode 100644 src/test/regress/expected/alter_operator.out create mode 100644 src/test/regress/expected/alter_table.out create mode 100644 src/test/regress/expected/amutils.out create mode 100644 src/test/regress/expected/arrays.out create mode 100644 src/test/regress/expected/async.out create mode 100644 src/test/regress/expected/bit.out create mode 100644 src/test/regress/expected/bitmapops.out create mode 100644 src/test/regress/expected/boolean.out create mode 100644 src/test/regress/expected/box.out create mode 100644 src/test/regress/expected/brin.out create mode 100644 src/test/regress/expected/brin_bloom.out create mode 100644 src/test/regress/expected/brin_multi.out create mode 100644 src/test/regress/expected/btree_index.out create mode 100644 src/test/regress/expected/case.out create mode 100644 src/test/regress/expected/char.out create mode 100644 src/test/regress/expected/char_1.out create mode 100644 src/test/regress/expected/char_2.out create mode 100644 src/test/regress/expected/circle.out create mode 100644 src/test/regress/expected/cluster.out create mode 100644 src/test/regress/expected/collate.icu.utf8.out create mode 100644 src/test/regress/expected/collate.icu.utf8_1.out create mode 100644 src/test/regress/expected/collate.linux.utf8.out create mode 100644 src/test/regress/expected/collate.linux.utf8_1.out create mode 100644 src/test/regress/expected/collate.out create mode 100644 src/test/regress/expected/collate.windows.win1252.out create mode 100644 src/test/regress/expected/collate.windows.win1252_1.out create mode 100644 src/test/regress/expected/combocid.out create mode 100644 src/test/regress/expected/comments.out create mode 100644 src/test/regress/expected/compression.out create mode 100644 src/test/regress/expected/compression_1.out create mode 100644 src/test/regress/expected/constraints.out create mode 100644 src/test/regress/expected/conversion.out create mode 100644 src/test/regress/expected/copy.out create mode 100644 src/test/regress/expected/copy2.out create mode 100644 src/test/regress/expected/copydml.out create mode 100644 src/test/regress/expected/copyselect.out create mode 100644 src/test/regress/expected/create_aggregate.out create mode 100644 src/test/regress/expected/create_am.out create mode 100644 src/test/regress/expected/create_cast.out create mode 100644 src/test/regress/expected/create_function_c.out create mode 100644 src/test/regress/expected/create_function_sql.out create mode 100644 src/test/regress/expected/create_index.out create mode 100644 src/test/regress/expected/create_index_spgist.out create mode 100644 src/test/regress/expected/create_misc.out create mode 100644 src/test/regress/expected/create_operator.out create mode 100644 src/test/regress/expected/create_procedure.out create mode 100644 src/test/regress/expected/create_role.out create mode 100644 src/test/regress/expected/create_schema.out create mode 100644 src/test/regress/expected/create_table.out create mode 100644 src/test/regress/expected/create_table_like.out create mode 100644 src/test/regress/expected/create_type.out create mode 100644 src/test/regress/expected/create_view.out create mode 100644 src/test/regress/expected/date.out create mode 100644 src/test/regress/expected/dbsize.out create mode 100644 src/test/regress/expected/delete.out create mode 100644 src/test/regress/expected/dependency.out create mode 100644 src/test/regress/expected/domain.out create mode 100644 src/test/regress/expected/drop_if_exists.out create mode 100644 src/test/regress/expected/drop_operator.out create mode 100644 src/test/regress/expected/enum.out create mode 100644 src/test/regress/expected/equivclass.out create mode 100644 src/test/regress/expected/errors.out create mode 100644 src/test/regress/expected/event_trigger.out create mode 100644 src/test/regress/expected/explain.out create mode 100644 src/test/regress/expected/expressions.out create mode 100644 src/test/regress/expected/fast_default.out create mode 100644 src/test/regress/expected/float4-misrounded-input.out create mode 100644 src/test/regress/expected/float4.out create mode 100644 src/test/regress/expected/float8.out create mode 100644 src/test/regress/expected/foreign_data.out create mode 100644 src/test/regress/expected/foreign_key.out create mode 100644 src/test/regress/expected/functional_deps.out create mode 100644 src/test/regress/expected/generated.out create mode 100644 src/test/regress/expected/geometry.out create mode 100644 src/test/regress/expected/gin.out create mode 100644 src/test/regress/expected/gist.out create mode 100644 src/test/regress/expected/groupingsets.out create mode 100644 src/test/regress/expected/guc.out create mode 100644 src/test/regress/expected/hash_func.out create mode 100644 src/test/regress/expected/hash_index.out create mode 100644 src/test/regress/expected/hash_part.out create mode 100644 src/test/regress/expected/horology.out create mode 100644 src/test/regress/expected/identity.out create mode 100644 src/test/regress/expected/incremental_sort.out create mode 100644 src/test/regress/expected/index_including.out create mode 100644 src/test/regress/expected/index_including_gist.out create mode 100644 src/test/regress/expected/indexing.out create mode 100644 src/test/regress/expected/indirect_toast.out create mode 100644 src/test/regress/expected/inet.out create mode 100644 src/test/regress/expected/infinite_recurse.out create mode 100644 src/test/regress/expected/infinite_recurse_1.out create mode 100644 src/test/regress/expected/inherit.out create mode 100644 src/test/regress/expected/init_privs.out create mode 100644 src/test/regress/expected/insert.out create mode 100644 src/test/regress/expected/insert_conflict.out create mode 100644 src/test/regress/expected/int2.out create mode 100644 src/test/regress/expected/int4.out create mode 100644 src/test/regress/expected/int8.out create mode 100644 src/test/regress/expected/interval.out create mode 100644 src/test/regress/expected/join.out create mode 100644 src/test/regress/expected/join_hash.out create mode 100644 src/test/regress/expected/json.out create mode 100644 src/test/regress/expected/json_encoding.out create mode 100644 src/test/regress/expected/json_encoding_1.out create mode 100644 src/test/regress/expected/json_encoding_2.out create mode 100644 src/test/regress/expected/jsonb.out create mode 100644 src/test/regress/expected/jsonb_jsonpath.out create mode 100644 src/test/regress/expected/jsonpath.out create mode 100644 src/test/regress/expected/jsonpath_encoding.out create mode 100644 src/test/regress/expected/jsonpath_encoding_1.out create mode 100644 src/test/regress/expected/jsonpath_encoding_2.out create mode 100644 src/test/regress/expected/largeobject.out create mode 100644 src/test/regress/expected/largeobject_1.out create mode 100644 src/test/regress/expected/limit.out create mode 100644 src/test/regress/expected/line.out create mode 100644 src/test/regress/expected/lock.out create mode 100644 src/test/regress/expected/lseg.out create mode 100644 src/test/regress/expected/macaddr.out create mode 100644 src/test/regress/expected/macaddr8.out create mode 100644 src/test/regress/expected/matview.out create mode 100644 src/test/regress/expected/md5.out create mode 100644 src/test/regress/expected/memoize.out create mode 100644 src/test/regress/expected/merge.out create mode 100644 src/test/regress/expected/misc.out create mode 100644 src/test/regress/expected/misc_functions.out create mode 100644 src/test/regress/expected/misc_sanity.out create mode 100644 src/test/regress/expected/money.out create mode 100644 src/test/regress/expected/multirangetypes.out create mode 100644 src/test/regress/expected/mvcc.out create mode 100644 src/test/regress/expected/name.out create mode 100644 src/test/regress/expected/namespace.out create mode 100644 src/test/regress/expected/numeric.out create mode 100644 src/test/regress/expected/numeric_big.out create mode 100644 src/test/regress/expected/numerology.out create mode 100644 src/test/regress/expected/object_address.out create mode 100644 src/test/regress/expected/oid.out create mode 100644 src/test/regress/expected/oidjoins.out create mode 100644 src/test/regress/expected/opr_sanity.out create mode 100644 src/test/regress/expected/partition_aggregate.out create mode 100644 src/test/regress/expected/partition_info.out create mode 100644 src/test/regress/expected/partition_join.out create mode 100644 src/test/regress/expected/partition_prune.out create mode 100644 src/test/regress/expected/password.out create mode 100644 src/test/regress/expected/path.out create mode 100644 src/test/regress/expected/pg_lsn.out create mode 100644 src/test/regress/expected/plancache.out create mode 100644 src/test/regress/expected/plpgsql.out create mode 100644 src/test/regress/expected/point.out create mode 100644 src/test/regress/expected/polygon.out create mode 100644 src/test/regress/expected/polymorphism.out create mode 100644 src/test/regress/expected/portals.out create mode 100644 src/test/regress/expected/portals_p2.out create mode 100644 src/test/regress/expected/prepare.out create mode 100644 src/test/regress/expected/prepared_xacts.out create mode 100644 src/test/regress/expected/prepared_xacts_1.out create mode 100644 src/test/regress/expected/privileges.out create mode 100644 src/test/regress/expected/psql.out create mode 100644 src/test/regress/expected/psql_crosstab.out create mode 100644 src/test/regress/expected/publication.out create mode 100644 src/test/regress/expected/random.out create mode 100644 src/test/regress/expected/rangefuncs.out create mode 100644 src/test/regress/expected/rangetypes.out create mode 100644 src/test/regress/expected/regex.out create mode 100644 src/test/regress/expected/regproc.out create mode 100644 src/test/regress/expected/reindex_catalog.out create mode 100644 src/test/regress/expected/reloptions.out create mode 100644 src/test/regress/expected/replica_identity.out create mode 100644 src/test/regress/expected/returning.out create mode 100644 src/test/regress/expected/roleattributes.out create mode 100644 src/test/regress/expected/rowsecurity.out create mode 100644 src/test/regress/expected/rowtypes.out create mode 100644 src/test/regress/expected/rules.out create mode 100644 src/test/regress/expected/sanity_check.out create mode 100644 src/test/regress/expected/security_label.out create mode 100644 src/test/regress/expected/select.out create mode 100644 src/test/regress/expected/select_distinct.out create mode 100644 src/test/regress/expected/select_distinct_on.out create mode 100644 src/test/regress/expected/select_having.out create mode 100644 src/test/regress/expected/select_having_1.out create mode 100644 src/test/regress/expected/select_having_2.out create mode 100644 src/test/regress/expected/select_implicit.out create mode 100644 src/test/regress/expected/select_implicit_1.out create mode 100644 src/test/regress/expected/select_implicit_2.out create mode 100644 src/test/regress/expected/select_into.out create mode 100644 src/test/regress/expected/select_parallel.out create mode 100644 src/test/regress/expected/select_views.out create mode 100644 src/test/regress/expected/sequence.out create mode 100644 src/test/regress/expected/spgist.out create mode 100644 src/test/regress/expected/sqljson.out create mode 100644 src/test/regress/expected/stats.out create mode 100644 src/test/regress/expected/stats_ext.out create mode 100644 src/test/regress/expected/strings.out create mode 100644 src/test/regress/expected/subscription.out create mode 100644 src/test/regress/expected/subselect.out create mode 100644 src/test/regress/expected/sysviews.out create mode 100644 src/test/regress/expected/tablesample.out create mode 100644 src/test/regress/expected/tablespace.out create mode 100644 src/test/regress/expected/temp.out create mode 100644 src/test/regress/expected/test_setup.out create mode 100644 src/test/regress/expected/text.out create mode 100644 src/test/regress/expected/tid.out create mode 100644 src/test/regress/expected/tidrangescan.out create mode 100644 src/test/regress/expected/tidscan.out create mode 100644 src/test/regress/expected/time.out create mode 100644 src/test/regress/expected/timestamp.out create mode 100644 src/test/regress/expected/timestamptz.out create mode 100644 src/test/regress/expected/timetz.out create mode 100644 src/test/regress/expected/transactions.out create mode 100644 src/test/regress/expected/triggers.out create mode 100644 src/test/regress/expected/truncate.out create mode 100644 src/test/regress/expected/tsdicts.out create mode 100644 src/test/regress/expected/tsearch.out create mode 100644 src/test/regress/expected/tsrf.out create mode 100644 src/test/regress/expected/tstypes.out create mode 100644 src/test/regress/expected/tuplesort.out create mode 100644 src/test/regress/expected/txid.out create mode 100644 src/test/regress/expected/type_sanity.out create mode 100644 src/test/regress/expected/typed_table.out create mode 100644 src/test/regress/expected/unicode.out create mode 100644 src/test/regress/expected/unicode_1.out create mode 100644 src/test/regress/expected/union.out create mode 100644 src/test/regress/expected/updatable_views.out create mode 100644 src/test/regress/expected/update.out create mode 100644 src/test/regress/expected/uuid.out create mode 100644 src/test/regress/expected/vacuum.out create mode 100644 src/test/regress/expected/vacuum_parallel.out create mode 100644 src/test/regress/expected/varchar.out create mode 100644 src/test/regress/expected/varchar_1.out create mode 100644 src/test/regress/expected/varchar_2.out create mode 100644 src/test/regress/expected/window.out create mode 100644 src/test/regress/expected/with.out create mode 100644 src/test/regress/expected/write_parallel.out create mode 100644 src/test/regress/expected/xid.out create mode 100644 src/test/regress/expected/xml.out create mode 100644 src/test/regress/expected/xml_1.out create mode 100644 src/test/regress/expected/xml_2.out create mode 100644 src/test/regress/expected/xmlmap.out create mode 100644 src/test/regress/expected/xmlmap_1.out (limited to 'src/test/regress/expected') diff --git a/src/test/regress/expected/advisory_lock.out b/src/test/regress/expected/advisory_lock.out new file mode 100644 index 0000000..02e0776 --- /dev/null +++ b/src/test/regress/expected/advisory_lock.out @@ -0,0 +1,276 @@ +-- +-- ADVISORY LOCKS +-- +SELECT oid AS datoid FROM pg_database WHERE datname = current_database() \gset +BEGIN; +SELECT + pg_advisory_xact_lock(1), pg_advisory_xact_lock_shared(2), + pg_advisory_xact_lock(1, 1), pg_advisory_xact_lock_shared(2, 2); + pg_advisory_xact_lock | pg_advisory_xact_lock_shared | pg_advisory_xact_lock | pg_advisory_xact_lock_shared +-----------------------+------------------------------+-----------------------+------------------------------ + | | | +(1 row) + +SELECT locktype, classid, objid, objsubid, mode, granted + FROM pg_locks WHERE locktype = 'advisory' AND database = :datoid + ORDER BY classid, objid, objsubid; + locktype | classid | objid | objsubid | mode | granted +----------+---------+-------+----------+---------------+--------- + advisory | 0 | 1 | 1 | ExclusiveLock | t + advisory | 0 | 2 | 1 | ShareLock | t + advisory | 1 | 1 | 2 | ExclusiveLock | t + advisory | 2 | 2 | 2 | ShareLock | t +(4 rows) + +-- pg_advisory_unlock_all() shouldn't release xact locks +SELECT pg_advisory_unlock_all(); + pg_advisory_unlock_all +------------------------ + +(1 row) + +SELECT count(*) FROM pg_locks WHERE locktype = 'advisory' AND database = :datoid; + count +------- + 4 +(1 row) + +-- can't unlock xact locks +SELECT + pg_advisory_unlock(1), pg_advisory_unlock_shared(2), + pg_advisory_unlock(1, 1), pg_advisory_unlock_shared(2, 2); +WARNING: you don't own a lock of type ExclusiveLock +WARNING: you don't own a lock of type ShareLock +WARNING: you don't own a lock of type ExclusiveLock +WARNING: you don't own a lock of type ShareLock + pg_advisory_unlock | pg_advisory_unlock_shared | pg_advisory_unlock | pg_advisory_unlock_shared +--------------------+---------------------------+--------------------+--------------------------- + f | f | f | f +(1 row) + +-- automatically release xact locks at commit +COMMIT; +SELECT count(*) FROM pg_locks WHERE locktype = 'advisory' AND database = :datoid; + count +------- + 0 +(1 row) + +BEGIN; +-- holding both session and xact locks on the same objects, xact first +SELECT + pg_advisory_xact_lock(1), pg_advisory_xact_lock_shared(2), + pg_advisory_xact_lock(1, 1), pg_advisory_xact_lock_shared(2, 2); + pg_advisory_xact_lock | pg_advisory_xact_lock_shared | pg_advisory_xact_lock | pg_advisory_xact_lock_shared +-----------------------+------------------------------+-----------------------+------------------------------ + | | | +(1 row) + +SELECT locktype, classid, objid, objsubid, mode, granted + FROM pg_locks WHERE locktype = 'advisory' AND database = :datoid + ORDER BY classid, objid, objsubid; + locktype | classid | objid | objsubid | mode | granted +----------+---------+-------+----------+---------------+--------- + advisory | 0 | 1 | 1 | ExclusiveLock | t + advisory | 0 | 2 | 1 | ShareLock | t + advisory | 1 | 1 | 2 | ExclusiveLock | t + advisory | 2 | 2 | 2 | ShareLock | t +(4 rows) + +SELECT + pg_advisory_lock(1), pg_advisory_lock_shared(2), + pg_advisory_lock(1, 1), pg_advisory_lock_shared(2, 2); + pg_advisory_lock | pg_advisory_lock_shared | pg_advisory_lock | pg_advisory_lock_shared +------------------+-------------------------+------------------+------------------------- + | | | +(1 row) + +ROLLBACK; +SELECT locktype, classid, objid, objsubid, mode, granted + FROM pg_locks WHERE locktype = 'advisory' AND database = :datoid + ORDER BY classid, objid, objsubid; + locktype | classid | objid | objsubid | mode | granted +----------+---------+-------+----------+---------------+--------- + advisory | 0 | 1 | 1 | ExclusiveLock | t + advisory | 0 | 2 | 1 | ShareLock | t + advisory | 1 | 1 | 2 | ExclusiveLock | t + advisory | 2 | 2 | 2 | ShareLock | t +(4 rows) + +-- unlocking session locks +SELECT + pg_advisory_unlock(1), pg_advisory_unlock(1), + pg_advisory_unlock_shared(2), pg_advisory_unlock_shared(2), + pg_advisory_unlock(1, 1), pg_advisory_unlock(1, 1), + pg_advisory_unlock_shared(2, 2), pg_advisory_unlock_shared(2, 2); +WARNING: you don't own a lock of type ExclusiveLock +WARNING: you don't own a lock of type ShareLock +WARNING: you don't own a lock of type ExclusiveLock +WARNING: you don't own a lock of type ShareLock + pg_advisory_unlock | pg_advisory_unlock | pg_advisory_unlock_shared | pg_advisory_unlock_shared | pg_advisory_unlock | pg_advisory_unlock | pg_advisory_unlock_shared | pg_advisory_unlock_shared +--------------------+--------------------+---------------------------+---------------------------+--------------------+--------------------+---------------------------+--------------------------- + t | f | t | f | t | f | t | f +(1 row) + +SELECT count(*) FROM pg_locks WHERE locktype = 'advisory' AND database = :datoid; + count +------- + 0 +(1 row) + +BEGIN; +-- holding both session and xact locks on the same objects, session first +SELECT + pg_advisory_lock(1), pg_advisory_lock_shared(2), + pg_advisory_lock(1, 1), pg_advisory_lock_shared(2, 2); + pg_advisory_lock | pg_advisory_lock_shared | pg_advisory_lock | pg_advisory_lock_shared +------------------+-------------------------+------------------+------------------------- + | | | +(1 row) + +SELECT locktype, classid, objid, objsubid, mode, granted + FROM pg_locks WHERE locktype = 'advisory' AND database = :datoid + ORDER BY classid, objid, objsubid; + locktype | classid | objid | objsubid | mode | granted +----------+---------+-------+----------+---------------+--------- + advisory | 0 | 1 | 1 | ExclusiveLock | t + advisory | 0 | 2 | 1 | ShareLock | t + advisory | 1 | 1 | 2 | ExclusiveLock | t + advisory | 2 | 2 | 2 | ShareLock | t +(4 rows) + +SELECT + pg_advisory_xact_lock(1), pg_advisory_xact_lock_shared(2), + pg_advisory_xact_lock(1, 1), pg_advisory_xact_lock_shared(2, 2); + pg_advisory_xact_lock | pg_advisory_xact_lock_shared | pg_advisory_xact_lock | pg_advisory_xact_lock_shared +-----------------------+------------------------------+-----------------------+------------------------------ + | | | +(1 row) + +ROLLBACK; +SELECT locktype, classid, objid, objsubid, mode, granted + FROM pg_locks WHERE locktype = 'advisory' AND database = :datoid + ORDER BY classid, objid, objsubid; + locktype | classid | objid | objsubid | mode | granted +----------+---------+-------+----------+---------------+--------- + advisory | 0 | 1 | 1 | ExclusiveLock | t + advisory | 0 | 2 | 1 | ShareLock | t + advisory | 1 | 1 | 2 | ExclusiveLock | t + advisory | 2 | 2 | 2 | ShareLock | t +(4 rows) + +-- releasing all session locks +SELECT pg_advisory_unlock_all(); + pg_advisory_unlock_all +------------------------ + +(1 row) + +SELECT count(*) FROM pg_locks WHERE locktype = 'advisory' AND database = :datoid; + count +------- + 0 +(1 row) + +BEGIN; +-- grabbing txn locks multiple times +SELECT + pg_advisory_xact_lock(1), pg_advisory_xact_lock(1), + pg_advisory_xact_lock_shared(2), pg_advisory_xact_lock_shared(2), + pg_advisory_xact_lock(1, 1), pg_advisory_xact_lock(1, 1), + pg_advisory_xact_lock_shared(2, 2), pg_advisory_xact_lock_shared(2, 2); + pg_advisory_xact_lock | pg_advisory_xact_lock | pg_advisory_xact_lock_shared | pg_advisory_xact_lock_shared | pg_advisory_xact_lock | pg_advisory_xact_lock | pg_advisory_xact_lock_shared | pg_advisory_xact_lock_shared +-----------------------+-----------------------+------------------------------+------------------------------+-----------------------+-----------------------+------------------------------+------------------------------ + | | | | | | | +(1 row) + +SELECT locktype, classid, objid, objsubid, mode, granted + FROM pg_locks WHERE locktype = 'advisory' AND database = :datoid + ORDER BY classid, objid, objsubid; + locktype | classid | objid | objsubid | mode | granted +----------+---------+-------+----------+---------------+--------- + advisory | 0 | 1 | 1 | ExclusiveLock | t + advisory | 0 | 2 | 1 | ShareLock | t + advisory | 1 | 1 | 2 | ExclusiveLock | t + advisory | 2 | 2 | 2 | ShareLock | t +(4 rows) + +COMMIT; +SELECT count(*) FROM pg_locks WHERE locktype = 'advisory' AND database = :datoid; + count +------- + 0 +(1 row) + +-- grabbing session locks multiple times +SELECT + pg_advisory_lock(1), pg_advisory_lock(1), + pg_advisory_lock_shared(2), pg_advisory_lock_shared(2), + pg_advisory_lock(1, 1), pg_advisory_lock(1, 1), + pg_advisory_lock_shared(2, 2), pg_advisory_lock_shared(2, 2); + pg_advisory_lock | pg_advisory_lock | pg_advisory_lock_shared | pg_advisory_lock_shared | pg_advisory_lock | pg_advisory_lock | pg_advisory_lock_shared | pg_advisory_lock_shared +------------------+------------------+-------------------------+-------------------------+------------------+------------------+-------------------------+------------------------- + | | | | | | | +(1 row) + +SELECT locktype, classid, objid, objsubid, mode, granted + FROM pg_locks WHERE locktype = 'advisory' AND database = :datoid + ORDER BY classid, objid, objsubid; + locktype | classid | objid | objsubid | mode | granted +----------+---------+-------+----------+---------------+--------- + advisory | 0 | 1 | 1 | ExclusiveLock | t + advisory | 0 | 2 | 1 | ShareLock | t + advisory | 1 | 1 | 2 | ExclusiveLock | t + advisory | 2 | 2 | 2 | ShareLock | t +(4 rows) + +SELECT + pg_advisory_unlock(1), pg_advisory_unlock(1), + pg_advisory_unlock_shared(2), pg_advisory_unlock_shared(2), + pg_advisory_unlock(1, 1), pg_advisory_unlock(1, 1), + pg_advisory_unlock_shared(2, 2), pg_advisory_unlock_shared(2, 2); + pg_advisory_unlock | pg_advisory_unlock | pg_advisory_unlock_shared | pg_advisory_unlock_shared | pg_advisory_unlock | pg_advisory_unlock | pg_advisory_unlock_shared | pg_advisory_unlock_shared +--------------------+--------------------+---------------------------+---------------------------+--------------------+--------------------+---------------------------+--------------------------- + t | t | t | t | t | t | t | t +(1 row) + +SELECT count(*) FROM pg_locks WHERE locktype = 'advisory' AND database = :datoid; + count +------- + 0 +(1 row) + +-- .. and releasing them all at once +SELECT + pg_advisory_lock(1), pg_advisory_lock(1), + pg_advisory_lock_shared(2), pg_advisory_lock_shared(2), + pg_advisory_lock(1, 1), pg_advisory_lock(1, 1), + pg_advisory_lock_shared(2, 2), pg_advisory_lock_shared(2, 2); + pg_advisory_lock | pg_advisory_lock | pg_advisory_lock_shared | pg_advisory_lock_shared | pg_advisory_lock | pg_advisory_lock | pg_advisory_lock_shared | pg_advisory_lock_shared +------------------+------------------+-------------------------+-------------------------+------------------+------------------+-------------------------+------------------------- + | | | | | | | +(1 row) + +SELECT locktype, classid, objid, objsubid, mode, granted + FROM pg_locks WHERE locktype = 'advisory' AND database = :datoid + ORDER BY classid, objid, objsubid; + locktype | classid | objid | objsubid | mode | granted +----------+---------+-------+----------+---------------+--------- + advisory | 0 | 1 | 1 | ExclusiveLock | t + advisory | 0 | 2 | 1 | ShareLock | t + advisory | 1 | 1 | 2 | ExclusiveLock | t + advisory | 2 | 2 | 2 | ShareLock | t +(4 rows) + +SELECT pg_advisory_unlock_all(); + pg_advisory_unlock_all +------------------------ + +(1 row) + +SELECT count(*) FROM pg_locks WHERE locktype = 'advisory' AND database = :datoid; + count +------- + 0 +(1 row) + diff --git a/src/test/regress/expected/aggregates.out b/src/test/regress/expected/aggregates.out new file mode 100644 index 0000000..f635c5a --- /dev/null +++ b/src/test/regress/expected/aggregates.out @@ -0,0 +1,3142 @@ +-- +-- AGGREGATES +-- +-- directory paths are passed to us in environment variables +\getenv abs_srcdir PG_ABS_SRCDIR +-- avoid bit-exact output here because operations may not be bit-exact. +SET extra_float_digits = 0; +-- prepare some test data +CREATE TABLE aggtest ( + a int2, + b float4 +); +\set filename :abs_srcdir '/data/agg.data' +COPY aggtest FROM :'filename'; +ANALYZE aggtest; +SELECT avg(four) AS avg_1 FROM onek; + avg_1 +-------------------- + 1.5000000000000000 +(1 row) + +SELECT avg(a) AS avg_32 FROM aggtest WHERE a < 100; + avg_32 +--------------------- + 32.6666666666666667 +(1 row) + +SELECT any_value(v) FROM (VALUES (1), (2), (3)) AS v (v); + any_value +----------- + 1 +(1 row) + +SELECT any_value(v) FROM (VALUES (NULL)) AS v (v); + any_value +----------- + +(1 row) + +SELECT any_value(v) FROM (VALUES (NULL), (1), (2)) AS v (v); + any_value +----------- + 1 +(1 row) + +SELECT any_value(v) FROM (VALUES (array['hello', 'world'])) AS v (v); + any_value +--------------- + {hello,world} +(1 row) + +-- In 7.1, avg(float4) is computed using float8 arithmetic. +-- Round the result to 3 digits to avoid platform-specific results. +SELECT avg(b)::numeric(10,3) AS avg_107_943 FROM aggtest; + avg_107_943 +------------- + 107.943 +(1 row) + +SELECT avg(gpa) AS avg_3_4 FROM ONLY student; + avg_3_4 +--------- + 3.4 +(1 row) + +SELECT sum(four) AS sum_1500 FROM onek; + sum_1500 +---------- + 1500 +(1 row) + +SELECT sum(a) AS sum_198 FROM aggtest; + sum_198 +--------- + 198 +(1 row) + +SELECT sum(b) AS avg_431_773 FROM aggtest; + avg_431_773 +------------- + 431.773 +(1 row) + +SELECT sum(gpa) AS avg_6_8 FROM ONLY student; + avg_6_8 +--------- + 6.8 +(1 row) + +SELECT max(four) AS max_3 FROM onek; + max_3 +------- + 3 +(1 row) + +SELECT max(a) AS max_100 FROM aggtest; + max_100 +--------- + 100 +(1 row) + +SELECT max(aggtest.b) AS max_324_78 FROM aggtest; + max_324_78 +------------ + 324.78 +(1 row) + +SELECT max(student.gpa) AS max_3_7 FROM student; + max_3_7 +--------- + 3.7 +(1 row) + +SELECT stddev_pop(b) FROM aggtest; + stddev_pop +----------------- + 131.10703231895 +(1 row) + +SELECT stddev_samp(b) FROM aggtest; + stddev_samp +------------------ + 151.389360803998 +(1 row) + +SELECT var_pop(b) FROM aggtest; + var_pop +------------------ + 17189.0539234823 +(1 row) + +SELECT var_samp(b) FROM aggtest; + var_samp +------------------ + 22918.7385646431 +(1 row) + +SELECT stddev_pop(b::numeric) FROM aggtest; + stddev_pop +------------------ + 131.107032862199 +(1 row) + +SELECT stddev_samp(b::numeric) FROM aggtest; + stddev_samp +------------------ + 151.389361431288 +(1 row) + +SELECT var_pop(b::numeric) FROM aggtest; + var_pop +-------------------- + 17189.054065929769 +(1 row) + +SELECT var_samp(b::numeric) FROM aggtest; + var_samp +-------------------- + 22918.738754573025 +(1 row) + +-- population variance is defined for a single tuple, sample variance +-- is not +SELECT var_pop(1.0::float8), var_samp(2.0::float8); + var_pop | var_samp +---------+---------- + 0 | +(1 row) + +SELECT stddev_pop(3.0::float8), stddev_samp(4.0::float8); + stddev_pop | stddev_samp +------------+------------- + 0 | +(1 row) + +SELECT var_pop('inf'::float8), var_samp('inf'::float8); + var_pop | var_samp +---------+---------- + NaN | +(1 row) + +SELECT stddev_pop('inf'::float8), stddev_samp('inf'::float8); + stddev_pop | stddev_samp +------------+------------- + NaN | +(1 row) + +SELECT var_pop('nan'::float8), var_samp('nan'::float8); + var_pop | var_samp +---------+---------- + NaN | +(1 row) + +SELECT stddev_pop('nan'::float8), stddev_samp('nan'::float8); + stddev_pop | stddev_samp +------------+------------- + NaN | +(1 row) + +SELECT var_pop(1.0::float4), var_samp(2.0::float4); + var_pop | var_samp +---------+---------- + 0 | +(1 row) + +SELECT stddev_pop(3.0::float4), stddev_samp(4.0::float4); + stddev_pop | stddev_samp +------------+------------- + 0 | +(1 row) + +SELECT var_pop('inf'::float4), var_samp('inf'::float4); + var_pop | var_samp +---------+---------- + NaN | +(1 row) + +SELECT stddev_pop('inf'::float4), stddev_samp('inf'::float4); + stddev_pop | stddev_samp +------------+------------- + NaN | +(1 row) + +SELECT var_pop('nan'::float4), var_samp('nan'::float4); + var_pop | var_samp +---------+---------- + NaN | +(1 row) + +SELECT stddev_pop('nan'::float4), stddev_samp('nan'::float4); + stddev_pop | stddev_samp +------------+------------- + NaN | +(1 row) + +SELECT var_pop(1.0::numeric), var_samp(2.0::numeric); + var_pop | var_samp +---------+---------- + 0 | +(1 row) + +SELECT stddev_pop(3.0::numeric), stddev_samp(4.0::numeric); + stddev_pop | stddev_samp +------------+------------- + 0 | +(1 row) + +SELECT var_pop('inf'::numeric), var_samp('inf'::numeric); + var_pop | var_samp +---------+---------- + NaN | +(1 row) + +SELECT stddev_pop('inf'::numeric), stddev_samp('inf'::numeric); + stddev_pop | stddev_samp +------------+------------- + NaN | +(1 row) + +SELECT var_pop('nan'::numeric), var_samp('nan'::numeric); + var_pop | var_samp +---------+---------- + NaN | +(1 row) + +SELECT stddev_pop('nan'::numeric), stddev_samp('nan'::numeric); + stddev_pop | stddev_samp +------------+------------- + NaN | +(1 row) + +-- verify correct results for null and NaN inputs +select sum(null::int4) from generate_series(1,3); + sum +----- + +(1 row) + +select sum(null::int8) from generate_series(1,3); + sum +----- + +(1 row) + +select sum(null::numeric) from generate_series(1,3); + sum +----- + +(1 row) + +select sum(null::float8) from generate_series(1,3); + sum +----- + +(1 row) + +select avg(null::int4) from generate_series(1,3); + avg +----- + +(1 row) + +select avg(null::int8) from generate_series(1,3); + avg +----- + +(1 row) + +select avg(null::numeric) from generate_series(1,3); + avg +----- + +(1 row) + +select avg(null::float8) from generate_series(1,3); + avg +----- + +(1 row) + +select sum('NaN'::numeric) from generate_series(1,3); + sum +----- + NaN +(1 row) + +select avg('NaN'::numeric) from generate_series(1,3); + avg +----- + NaN +(1 row) + +-- verify correct results for infinite inputs +SELECT sum(x::float8), avg(x::float8), var_pop(x::float8) +FROM (VALUES ('1'), ('infinity')) v(x); + sum | avg | var_pop +----------+----------+--------- + Infinity | Infinity | NaN +(1 row) + +SELECT sum(x::float8), avg(x::float8), var_pop(x::float8) +FROM (VALUES ('infinity'), ('1')) v(x); + sum | avg | var_pop +----------+----------+--------- + Infinity | Infinity | NaN +(1 row) + +SELECT sum(x::float8), avg(x::float8), var_pop(x::float8) +FROM (VALUES ('infinity'), ('infinity')) v(x); + sum | avg | var_pop +----------+----------+--------- + Infinity | Infinity | NaN +(1 row) + +SELECT sum(x::float8), avg(x::float8), var_pop(x::float8) +FROM (VALUES ('-infinity'), ('infinity')) v(x); + sum | avg | var_pop +-----+-----+--------- + NaN | NaN | NaN +(1 row) + +SELECT sum(x::float8), avg(x::float8), var_pop(x::float8) +FROM (VALUES ('-infinity'), ('-infinity')) v(x); + sum | avg | var_pop +-----------+-----------+--------- + -Infinity | -Infinity | NaN +(1 row) + +SELECT sum(x::numeric), avg(x::numeric), var_pop(x::numeric) +FROM (VALUES ('1'), ('infinity')) v(x); + sum | avg | var_pop +----------+----------+--------- + Infinity | Infinity | NaN +(1 row) + +SELECT sum(x::numeric), avg(x::numeric), var_pop(x::numeric) +FROM (VALUES ('infinity'), ('1')) v(x); + sum | avg | var_pop +----------+----------+--------- + Infinity | Infinity | NaN +(1 row) + +SELECT sum(x::numeric), avg(x::numeric), var_pop(x::numeric) +FROM (VALUES ('infinity'), ('infinity')) v(x); + sum | avg | var_pop +----------+----------+--------- + Infinity | Infinity | NaN +(1 row) + +SELECT sum(x::numeric), avg(x::numeric), var_pop(x::numeric) +FROM (VALUES ('-infinity'), ('infinity')) v(x); + sum | avg | var_pop +-----+-----+--------- + NaN | NaN | NaN +(1 row) + +SELECT sum(x::numeric), avg(x::numeric), var_pop(x::numeric) +FROM (VALUES ('-infinity'), ('-infinity')) v(x); + sum | avg | var_pop +-----------+-----------+--------- + -Infinity | -Infinity | NaN +(1 row) + +-- test accuracy with a large input offset +SELECT avg(x::float8), var_pop(x::float8) +FROM (VALUES (100000003), (100000004), (100000006), (100000007)) v(x); + avg | var_pop +-----------+--------- + 100000005 | 2.5 +(1 row) + +SELECT avg(x::float8), var_pop(x::float8) +FROM (VALUES (7000000000005), (7000000000007)) v(x); + avg | var_pop +---------------+--------- + 7000000000006 | 1 +(1 row) + +-- SQL2003 binary aggregates +SELECT regr_count(b, a) FROM aggtest; + regr_count +------------ + 4 +(1 row) + +SELECT regr_sxx(b, a) FROM aggtest; + regr_sxx +---------- + 5099 +(1 row) + +SELECT regr_syy(b, a) FROM aggtest; + regr_syy +------------------ + 68756.2156939293 +(1 row) + +SELECT regr_sxy(b, a) FROM aggtest; + regr_sxy +------------------ + 2614.51582155004 +(1 row) + +SELECT regr_avgx(b, a), regr_avgy(b, a) FROM aggtest; + regr_avgx | regr_avgy +-----------+------------------ + 49.5 | 107.943152273074 +(1 row) + +SELECT regr_r2(b, a) FROM aggtest; + regr_r2 +-------------------- + 0.0194977982031803 +(1 row) + +SELECT regr_slope(b, a), regr_intercept(b, a) FROM aggtest; + regr_slope | regr_intercept +-------------------+------------------ + 0.512750700441271 | 82.5619926012309 +(1 row) + +SELECT covar_pop(b, a), covar_samp(b, a) FROM aggtest; + covar_pop | covar_samp +-----------------+------------------ + 653.62895538751 | 871.505273850014 +(1 row) + +SELECT corr(b, a) FROM aggtest; + corr +------------------- + 0.139634516517873 +(1 row) + +-- check single-tuple behavior +SELECT covar_pop(1::float8,2::float8), covar_samp(3::float8,4::float8); + covar_pop | covar_samp +-----------+------------ + 0 | +(1 row) + +SELECT covar_pop(1::float8,'inf'::float8), covar_samp(3::float8,'inf'::float8); + covar_pop | covar_samp +-----------+------------ + NaN | +(1 row) + +SELECT covar_pop(1::float8,'nan'::float8), covar_samp(3::float8,'nan'::float8); + covar_pop | covar_samp +-----------+------------ + NaN | +(1 row) + +-- test accum and combine functions directly +CREATE TABLE regr_test (x float8, y float8); +INSERT INTO regr_test VALUES (10,150),(20,250),(30,350),(80,540),(100,200); +SELECT count(*), sum(x), regr_sxx(y,x), sum(y),regr_syy(y,x), regr_sxy(y,x) +FROM regr_test WHERE x IN (10,20,30,80); + count | sum | regr_sxx | sum | regr_syy | regr_sxy +-------+-----+----------+------+----------+---------- + 4 | 140 | 2900 | 1290 | 83075 | 15050 +(1 row) + +SELECT count(*), sum(x), regr_sxx(y,x), sum(y),regr_syy(y,x), regr_sxy(y,x) +FROM regr_test; + count | sum | regr_sxx | sum | regr_syy | regr_sxy +-------+-----+----------+------+----------+---------- + 5 | 240 | 6280 | 1490 | 95080 | 8680 +(1 row) + +SELECT float8_accum('{4,140,2900}'::float8[], 100); + float8_accum +-------------- + {5,240,6280} +(1 row) + +SELECT float8_regr_accum('{4,140,2900,1290,83075,15050}'::float8[], 200, 100); + float8_regr_accum +------------------------------ + {5,240,6280,1490,95080,8680} +(1 row) + +SELECT count(*), sum(x), regr_sxx(y,x), sum(y),regr_syy(y,x), regr_sxy(y,x) +FROM regr_test WHERE x IN (10,20,30); + count | sum | regr_sxx | sum | regr_syy | regr_sxy +-------+-----+----------+-----+----------+---------- + 3 | 60 | 200 | 750 | 20000 | 2000 +(1 row) + +SELECT count(*), sum(x), regr_sxx(y,x), sum(y),regr_syy(y,x), regr_sxy(y,x) +FROM regr_test WHERE x IN (80,100); + count | sum | regr_sxx | sum | regr_syy | regr_sxy +-------+-----+----------+-----+----------+---------- + 2 | 180 | 200 | 740 | 57800 | -3400 +(1 row) + +SELECT float8_combine('{3,60,200}'::float8[], '{0,0,0}'::float8[]); + float8_combine +---------------- + {3,60,200} +(1 row) + +SELECT float8_combine('{0,0,0}'::float8[], '{2,180,200}'::float8[]); + float8_combine +---------------- + {2,180,200} +(1 row) + +SELECT float8_combine('{3,60,200}'::float8[], '{2,180,200}'::float8[]); + float8_combine +---------------- + {5,240,6280} +(1 row) + +SELECT float8_regr_combine('{3,60,200,750,20000,2000}'::float8[], + '{0,0,0,0,0,0}'::float8[]); + float8_regr_combine +--------------------------- + {3,60,200,750,20000,2000} +(1 row) + +SELECT float8_regr_combine('{0,0,0,0,0,0}'::float8[], + '{2,180,200,740,57800,-3400}'::float8[]); + float8_regr_combine +----------------------------- + {2,180,200,740,57800,-3400} +(1 row) + +SELECT float8_regr_combine('{3,60,200,750,20000,2000}'::float8[], + '{2,180,200,740,57800,-3400}'::float8[]); + float8_regr_combine +------------------------------ + {5,240,6280,1490,95080,8680} +(1 row) + +DROP TABLE regr_test; +-- test count, distinct +SELECT count(four) AS cnt_1000 FROM onek; + cnt_1000 +---------- + 1000 +(1 row) + +SELECT count(DISTINCT four) AS cnt_4 FROM onek; + cnt_4 +------- + 4 +(1 row) + +select ten, count(*), sum(four) from onek +group by ten order by ten; + ten | count | sum +-----+-------+----- + 0 | 100 | 100 + 1 | 100 | 200 + 2 | 100 | 100 + 3 | 100 | 200 + 4 | 100 | 100 + 5 | 100 | 200 + 6 | 100 | 100 + 7 | 100 | 200 + 8 | 100 | 100 + 9 | 100 | 200 +(10 rows) + +select ten, count(four), sum(DISTINCT four) from onek +group by ten order by ten; + ten | count | sum +-----+-------+----- + 0 | 100 | 2 + 1 | 100 | 4 + 2 | 100 | 2 + 3 | 100 | 4 + 4 | 100 | 2 + 5 | 100 | 4 + 6 | 100 | 2 + 7 | 100 | 4 + 8 | 100 | 2 + 9 | 100 | 4 +(10 rows) + +-- user-defined aggregates +SELECT newavg(four) AS avg_1 FROM onek; + avg_1 +-------------------- + 1.5000000000000000 +(1 row) + +SELECT newsum(four) AS sum_1500 FROM onek; + sum_1500 +---------- + 1500 +(1 row) + +SELECT newcnt(four) AS cnt_1000 FROM onek; + cnt_1000 +---------- + 1000 +(1 row) + +SELECT newcnt(*) AS cnt_1000 FROM onek; + cnt_1000 +---------- + 1000 +(1 row) + +SELECT oldcnt(*) AS cnt_1000 FROM onek; + cnt_1000 +---------- + 1000 +(1 row) + +SELECT sum2(q1,q2) FROM int8_tbl; + sum2 +------------------- + 18271560493827981 +(1 row) + +-- test for outer-level aggregates +-- this should work +select ten, sum(distinct four) from onek a +group by ten +having exists (select 1 from onek b where sum(distinct a.four) = b.four); + ten | sum +-----+----- + 0 | 2 + 2 | 2 + 4 | 2 + 6 | 2 + 8 | 2 +(5 rows) + +-- this should fail because subquery has an agg of its own in WHERE +select ten, sum(distinct four) from onek a +group by ten +having exists (select 1 from onek b + where sum(distinct a.four + b.four) = b.four); +ERROR: aggregate functions are not allowed in WHERE +LINE 4: where sum(distinct a.four + b.four) = b.four)... + ^ +-- Test handling of sublinks within outer-level aggregates. +-- Per bug report from Daniel Grace. +select + (select max((select i.unique2 from tenk1 i where i.unique1 = o.unique1))) +from tenk1 o; + max +------ + 9999 +(1 row) + +-- Test handling of Params within aggregate arguments in hashed aggregation. +-- Per bug report from Jeevan Chalke. +explain (verbose, costs off) +select s1, s2, sm +from generate_series(1, 3) s1, + lateral (select s2, sum(s1 + s2) sm + from generate_series(1, 3) s2 group by s2) ss +order by 1, 2; + QUERY PLAN +------------------------------------------------------------------ + Sort + Output: s1.s1, s2.s2, (sum((s1.s1 + s2.s2))) + Sort Key: s1.s1, s2.s2 + -> Nested Loop + Output: s1.s1, s2.s2, (sum((s1.s1 + s2.s2))) + -> Function Scan on pg_catalog.generate_series s1 + Output: s1.s1 + Function Call: generate_series(1, 3) + -> HashAggregate + Output: s2.s2, sum((s1.s1 + s2.s2)) + Group Key: s2.s2 + -> Function Scan on pg_catalog.generate_series s2 + Output: s2.s2 + Function Call: generate_series(1, 3) +(14 rows) + +select s1, s2, sm +from generate_series(1, 3) s1, + lateral (select s2, sum(s1 + s2) sm + from generate_series(1, 3) s2 group by s2) ss +order by 1, 2; + s1 | s2 | sm +----+----+---- + 1 | 1 | 2 + 1 | 2 | 3 + 1 | 3 | 4 + 2 | 1 | 3 + 2 | 2 | 4 + 2 | 3 | 5 + 3 | 1 | 4 + 3 | 2 | 5 + 3 | 3 | 6 +(9 rows) + +explain (verbose, costs off) +select array(select sum(x+y) s + from generate_series(1,3) y group by y order by s) + from generate_series(1,3) x; + QUERY PLAN +------------------------------------------------------------------- + Function Scan on pg_catalog.generate_series x + Output: (SubPlan 1) + Function Call: generate_series(1, 3) + SubPlan 1 + -> Sort + Output: (sum((x.x + y.y))), y.y + Sort Key: (sum((x.x + y.y))) + -> HashAggregate + Output: sum((x.x + y.y)), y.y + Group Key: y.y + -> Function Scan on pg_catalog.generate_series y + Output: y.y + Function Call: generate_series(1, 3) +(13 rows) + +select array(select sum(x+y) s + from generate_series(1,3) y group by y order by s) + from generate_series(1,3) x; + array +--------- + {2,3,4} + {3,4,5} + {4,5,6} +(3 rows) + +-- +-- test for bitwise integer aggregates +-- +CREATE TEMPORARY TABLE bitwise_test( + i2 INT2, + i4 INT4, + i8 INT8, + i INTEGER, + x INT2, + y BIT(4) +); +-- empty case +SELECT + BIT_AND(i2) AS "?", + BIT_OR(i4) AS "?", + BIT_XOR(i8) AS "?" +FROM bitwise_test; + ? | ? | ? +---+---+--- + | | +(1 row) + +COPY bitwise_test FROM STDIN NULL 'null'; +SELECT + BIT_AND(i2) AS "1", + BIT_AND(i4) AS "1", + BIT_AND(i8) AS "1", + BIT_AND(i) AS "?", + BIT_AND(x) AS "0", + BIT_AND(y) AS "0100", + BIT_OR(i2) AS "7", + BIT_OR(i4) AS "7", + BIT_OR(i8) AS "7", + BIT_OR(i) AS "?", + BIT_OR(x) AS "7", + BIT_OR(y) AS "1101", + BIT_XOR(i2) AS "5", + BIT_XOR(i4) AS "5", + BIT_XOR(i8) AS "5", + BIT_XOR(i) AS "?", + BIT_XOR(x) AS "7", + BIT_XOR(y) AS "1101" +FROM bitwise_test; + 1 | 1 | 1 | ? | 0 | 0100 | 7 | 7 | 7 | ? | 7 | 1101 | 5 | 5 | 5 | ? | 7 | 1101 +---+---+---+---+---+------+---+---+---+---+---+------+---+---+---+---+---+------ + 1 | 1 | 1 | 1 | 0 | 0100 | 7 | 7 | 7 | 3 | 7 | 1101 | 5 | 5 | 5 | 2 | 7 | 1101 +(1 row) + +-- +-- test boolean aggregates +-- +-- first test all possible transition and final states +SELECT + -- boolean and transitions + -- null because strict + booland_statefunc(NULL, NULL) IS NULL AS "t", + booland_statefunc(TRUE, NULL) IS NULL AS "t", + booland_statefunc(FALSE, NULL) IS NULL AS "t", + booland_statefunc(NULL, TRUE) IS NULL AS "t", + booland_statefunc(NULL, FALSE) IS NULL AS "t", + -- and actual computations + booland_statefunc(TRUE, TRUE) AS "t", + NOT booland_statefunc(TRUE, FALSE) AS "t", + NOT booland_statefunc(FALSE, TRUE) AS "t", + NOT booland_statefunc(FALSE, FALSE) AS "t"; + t | t | t | t | t | t | t | t | t +---+---+---+---+---+---+---+---+--- + t | t | t | t | t | t | t | t | t +(1 row) + +SELECT + -- boolean or transitions + -- null because strict + boolor_statefunc(NULL, NULL) IS NULL AS "t", + boolor_statefunc(TRUE, NULL) IS NULL AS "t", + boolor_statefunc(FALSE, NULL) IS NULL AS "t", + boolor_statefunc(NULL, TRUE) IS NULL AS "t", + boolor_statefunc(NULL, FALSE) IS NULL AS "t", + -- actual computations + boolor_statefunc(TRUE, TRUE) AS "t", + boolor_statefunc(TRUE, FALSE) AS "t", + boolor_statefunc(FALSE, TRUE) AS "t", + NOT boolor_statefunc(FALSE, FALSE) AS "t"; + t | t | t | t | t | t | t | t | t +---+---+---+---+---+---+---+---+--- + t | t | t | t | t | t | t | t | t +(1 row) + +CREATE TEMPORARY TABLE bool_test( + b1 BOOL, + b2 BOOL, + b3 BOOL, + b4 BOOL); +-- empty case +SELECT + BOOL_AND(b1) AS "n", + BOOL_OR(b3) AS "n" +FROM bool_test; + n | n +---+--- + | +(1 row) + +COPY bool_test FROM STDIN NULL 'null'; +SELECT + BOOL_AND(b1) AS "f", + BOOL_AND(b2) AS "t", + BOOL_AND(b3) AS "f", + BOOL_AND(b4) AS "n", + BOOL_AND(NOT b2) AS "f", + BOOL_AND(NOT b3) AS "t" +FROM bool_test; + f | t | f | n | f | t +---+---+---+---+---+--- + f | t | f | | f | t +(1 row) + +SELECT + EVERY(b1) AS "f", + EVERY(b2) AS "t", + EVERY(b3) AS "f", + EVERY(b4) AS "n", + EVERY(NOT b2) AS "f", + EVERY(NOT b3) AS "t" +FROM bool_test; + f | t | f | n | f | t +---+---+---+---+---+--- + f | t | f | | f | t +(1 row) + +SELECT + BOOL_OR(b1) AS "t", + BOOL_OR(b2) AS "t", + BOOL_OR(b3) AS "f", + BOOL_OR(b4) AS "n", + BOOL_OR(NOT b2) AS "f", + BOOL_OR(NOT b3) AS "t" +FROM bool_test; + t | t | f | n | f | t +---+---+---+---+---+--- + t | t | f | | f | t +(1 row) + +-- +-- Test cases that should be optimized into indexscans instead of +-- the generic aggregate implementation. +-- +-- Basic cases +explain (costs off) + select min(unique1) from tenk1; + QUERY PLAN +------------------------------------------------------------ + Result + InitPlan 1 (returns $0) + -> Limit + -> Index Only Scan using tenk1_unique1 on tenk1 + Index Cond: (unique1 IS NOT NULL) +(5 rows) + +select min(unique1) from tenk1; + min +----- + 0 +(1 row) + +explain (costs off) + select max(unique1) from tenk1; + QUERY PLAN +--------------------------------------------------------------------- + Result + InitPlan 1 (returns $0) + -> Limit + -> Index Only Scan Backward using tenk1_unique1 on tenk1 + Index Cond: (unique1 IS NOT NULL) +(5 rows) + +select max(unique1) from tenk1; + max +------ + 9999 +(1 row) + +explain (costs off) + select max(unique1) from tenk1 where unique1 < 42; + QUERY PLAN +------------------------------------------------------------------------ + Result + InitPlan 1 (returns $0) + -> Limit + -> Index Only Scan Backward using tenk1_unique1 on tenk1 + Index Cond: ((unique1 IS NOT NULL) AND (unique1 < 42)) +(5 rows) + +select max(unique1) from tenk1 where unique1 < 42; + max +----- + 41 +(1 row) + +explain (costs off) + select max(unique1) from tenk1 where unique1 > 42; + QUERY PLAN +------------------------------------------------------------------------ + Result + InitPlan 1 (returns $0) + -> Limit + -> Index Only Scan Backward using tenk1_unique1 on tenk1 + Index Cond: ((unique1 IS NOT NULL) AND (unique1 > 42)) +(5 rows) + +select max(unique1) from tenk1 where unique1 > 42; + max +------ + 9999 +(1 row) + +-- the planner may choose a generic aggregate here if parallel query is +-- enabled, since that plan will be parallel safe and the "optimized" +-- plan, which has almost identical cost, will not be. we want to test +-- the optimized plan, so temporarily disable parallel query. +begin; +set local max_parallel_workers_per_gather = 0; +explain (costs off) + select max(unique1) from tenk1 where unique1 > 42000; + QUERY PLAN +--------------------------------------------------------------------------- + Result + InitPlan 1 (returns $0) + -> Limit + -> Index Only Scan Backward using tenk1_unique1 on tenk1 + Index Cond: ((unique1 IS NOT NULL) AND (unique1 > 42000)) +(5 rows) + +select max(unique1) from tenk1 where unique1 > 42000; + max +----- + +(1 row) + +rollback; +-- multi-column index (uses tenk1_thous_tenthous) +explain (costs off) + select max(tenthous) from tenk1 where thousand = 33; + QUERY PLAN +---------------------------------------------------------------------------- + Result + InitPlan 1 (returns $0) + -> Limit + -> Index Only Scan Backward using tenk1_thous_tenthous on tenk1 + Index Cond: ((thousand = 33) AND (tenthous IS NOT NULL)) +(5 rows) + +select max(tenthous) from tenk1 where thousand = 33; + max +------ + 9033 +(1 row) + +explain (costs off) + select min(tenthous) from tenk1 where thousand = 33; + QUERY PLAN +-------------------------------------------------------------------------- + Result + InitPlan 1 (returns $0) + -> Limit + -> Index Only Scan using tenk1_thous_tenthous on tenk1 + Index Cond: ((thousand = 33) AND (tenthous IS NOT NULL)) +(5 rows) + +select min(tenthous) from tenk1 where thousand = 33; + min +----- + 33 +(1 row) + +-- check parameter propagation into an indexscan subquery +explain (costs off) + select f1, (select min(unique1) from tenk1 where unique1 > f1) AS gt + from int4_tbl; + QUERY PLAN +----------------------------------------------------------------------------------------- + Seq Scan on int4_tbl + SubPlan 2 + -> Result + InitPlan 1 (returns $1) + -> Limit + -> Index Only Scan using tenk1_unique1 on tenk1 + Index Cond: ((unique1 IS NOT NULL) AND (unique1 > int4_tbl.f1)) +(7 rows) + +select f1, (select min(unique1) from tenk1 where unique1 > f1) AS gt + from int4_tbl; + f1 | gt +-------------+---- + 0 | 1 + 123456 | + -123456 | 0 + 2147483647 | + -2147483647 | 0 +(5 rows) + +-- check some cases that were handled incorrectly in 8.3.0 +explain (costs off) + select distinct max(unique2) from tenk1; + QUERY PLAN +--------------------------------------------------------------------- + HashAggregate + Group Key: $0 + InitPlan 1 (returns $0) + -> Limit + -> Index Only Scan Backward using tenk1_unique2 on tenk1 + Index Cond: (unique2 IS NOT NULL) + -> Result +(7 rows) + +select distinct max(unique2) from tenk1; + max +------ + 9999 +(1 row) + +explain (costs off) + select max(unique2) from tenk1 order by 1; + QUERY PLAN +--------------------------------------------------------------------- + Sort + Sort Key: ($0) + InitPlan 1 (returns $0) + -> Limit + -> Index Only Scan Backward using tenk1_unique2 on tenk1 + Index Cond: (unique2 IS NOT NULL) + -> Result +(7 rows) + +select max(unique2) from tenk1 order by 1; + max +------ + 9999 +(1 row) + +explain (costs off) + select max(unique2) from tenk1 order by max(unique2); + QUERY PLAN +--------------------------------------------------------------------- + Sort + Sort Key: ($0) + InitPlan 1 (returns $0) + -> Limit + -> Index Only Scan Backward using tenk1_unique2 on tenk1 + Index Cond: (unique2 IS NOT NULL) + -> Result +(7 rows) + +select max(unique2) from tenk1 order by max(unique2); + max +------ + 9999 +(1 row) + +explain (costs off) + select max(unique2) from tenk1 order by max(unique2)+1; + QUERY PLAN +--------------------------------------------------------------------- + Sort + Sort Key: (($0 + 1)) + InitPlan 1 (returns $0) + -> Limit + -> Index Only Scan Backward using tenk1_unique2 on tenk1 + Index Cond: (unique2 IS NOT NULL) + -> Result +(7 rows) + +select max(unique2) from tenk1 order by max(unique2)+1; + max +------ + 9999 +(1 row) + +explain (costs off) + select max(unique2), generate_series(1,3) as g from tenk1 order by g desc; + QUERY PLAN +--------------------------------------------------------------------- + Sort + Sort Key: (generate_series(1, 3)) DESC + InitPlan 1 (returns $0) + -> Limit + -> Index Only Scan Backward using tenk1_unique2 on tenk1 + Index Cond: (unique2 IS NOT NULL) + -> ProjectSet + -> Result +(8 rows) + +select max(unique2), generate_series(1,3) as g from tenk1 order by g desc; + max | g +------+--- + 9999 | 3 + 9999 | 2 + 9999 | 1 +(3 rows) + +-- interesting corner case: constant gets optimized into a seqscan +explain (costs off) + select max(100) from tenk1; + QUERY PLAN +---------------------------------------------------- + Result + InitPlan 1 (returns $0) + -> Limit + -> Result + One-Time Filter: (100 IS NOT NULL) + -> Seq Scan on tenk1 +(6 rows) + +select max(100) from tenk1; + max +----- + 100 +(1 row) + +-- try it on an inheritance tree +create table minmaxtest(f1 int); +create table minmaxtest1() inherits (minmaxtest); +create table minmaxtest2() inherits (minmaxtest); +create table minmaxtest3() inherits (minmaxtest); +create index minmaxtesti on minmaxtest(f1); +create index minmaxtest1i on minmaxtest1(f1); +create index minmaxtest2i on minmaxtest2(f1 desc); +create index minmaxtest3i on minmaxtest3(f1) where f1 is not null; +insert into minmaxtest values(11), (12); +insert into minmaxtest1 values(13), (14); +insert into minmaxtest2 values(15), (16); +insert into minmaxtest3 values(17), (18); +explain (costs off) + select min(f1), max(f1) from minmaxtest; + QUERY PLAN +--------------------------------------------------------------------------------------------- + Result + InitPlan 1 (returns $0) + -> Limit + -> Merge Append + Sort Key: minmaxtest.f1 + -> Index Only Scan using minmaxtesti on minmaxtest minmaxtest_1 + Index Cond: (f1 IS NOT NULL) + -> Index Only Scan using minmaxtest1i on minmaxtest1 minmaxtest_2 + Index Cond: (f1 IS NOT NULL) + -> Index Only Scan Backward using minmaxtest2i on minmaxtest2 minmaxtest_3 + Index Cond: (f1 IS NOT NULL) + -> Index Only Scan using minmaxtest3i on minmaxtest3 minmaxtest_4 + InitPlan 2 (returns $1) + -> Limit + -> Merge Append + Sort Key: minmaxtest_5.f1 DESC + -> Index Only Scan Backward using minmaxtesti on minmaxtest minmaxtest_6 + Index Cond: (f1 IS NOT NULL) + -> Index Only Scan Backward using minmaxtest1i on minmaxtest1 minmaxtest_7 + Index Cond: (f1 IS NOT NULL) + -> Index Only Scan using minmaxtest2i on minmaxtest2 minmaxtest_8 + Index Cond: (f1 IS NOT NULL) + -> Index Only Scan Backward using minmaxtest3i on minmaxtest3 minmaxtest_9 +(23 rows) + +select min(f1), max(f1) from minmaxtest; + min | max +-----+----- + 11 | 18 +(1 row) + +-- DISTINCT doesn't do anything useful here, but it shouldn't fail +explain (costs off) + select distinct min(f1), max(f1) from minmaxtest; + QUERY PLAN +--------------------------------------------------------------------------------------------- + Unique + InitPlan 1 (returns $0) + -> Limit + -> Merge Append + Sort Key: minmaxtest.f1 + -> Index Only Scan using minmaxtesti on minmaxtest minmaxtest_1 + Index Cond: (f1 IS NOT NULL) + -> Index Only Scan using minmaxtest1i on minmaxtest1 minmaxtest_2 + Index Cond: (f1 IS NOT NULL) + -> Index Only Scan Backward using minmaxtest2i on minmaxtest2 minmaxtest_3 + Index Cond: (f1 IS NOT NULL) + -> Index Only Scan using minmaxtest3i on minmaxtest3 minmaxtest_4 + InitPlan 2 (returns $1) + -> Limit + -> Merge Append + Sort Key: minmaxtest_5.f1 DESC + -> Index Only Scan Backward using minmaxtesti on minmaxtest minmaxtest_6 + Index Cond: (f1 IS NOT NULL) + -> Index Only Scan Backward using minmaxtest1i on minmaxtest1 minmaxtest_7 + Index Cond: (f1 IS NOT NULL) + -> Index Only Scan using minmaxtest2i on minmaxtest2 minmaxtest_8 + Index Cond: (f1 IS NOT NULL) + -> Index Only Scan Backward using minmaxtest3i on minmaxtest3 minmaxtest_9 + -> Sort + Sort Key: ($0), ($1) + -> Result +(26 rows) + +select distinct min(f1), max(f1) from minmaxtest; + min | max +-----+----- + 11 | 18 +(1 row) + +drop table minmaxtest cascade; +NOTICE: drop cascades to 3 other objects +DETAIL: drop cascades to table minmaxtest1 +drop cascades to table minmaxtest2 +drop cascades to table minmaxtest3 +-- check for correct detection of nested-aggregate errors +select max(min(unique1)) from tenk1; +ERROR: aggregate function calls cannot be nested +LINE 1: select max(min(unique1)) from tenk1; + ^ +select (select max(min(unique1)) from int8_tbl) from tenk1; +ERROR: aggregate function calls cannot be nested +LINE 1: select (select max(min(unique1)) from int8_tbl) from tenk1; + ^ +select avg((select avg(a1.col1 order by (select avg(a2.col2) from tenk1 a3)) + from tenk1 a1(col1))) +from tenk1 a2(col2); +ERROR: aggregate function calls cannot be nested +LINE 1: select avg((select avg(a1.col1 order by (select avg(a2.col2)... + ^ +-- +-- Test removal of redundant GROUP BY columns +-- +create temp table t1 (a int, b int, c int, d int, primary key (a, b)); +create temp table t2 (x int, y int, z int, primary key (x, y)); +create temp table t3 (a int, b int, c int, primary key(a, b) deferrable); +-- Non-primary-key columns can be removed from GROUP BY +explain (costs off) select * from t1 group by a,b,c,d; + QUERY PLAN +---------------------- + HashAggregate + Group Key: a, b + -> Seq Scan on t1 +(3 rows) + +-- No removal can happen if the complete PK is not present in GROUP BY +explain (costs off) select a,c from t1 group by a,c,d; + QUERY PLAN +---------------------- + HashAggregate + Group Key: a, c, d + -> Seq Scan on t1 +(3 rows) + +-- Test removal across multiple relations +explain (costs off) select * +from t1 inner join t2 on t1.a = t2.x and t1.b = t2.y +group by t1.a,t1.b,t1.c,t1.d,t2.x,t2.y,t2.z; + QUERY PLAN +------------------------------------------------------ + HashAggregate + Group Key: t1.a, t1.b + -> Hash Join + Hash Cond: ((t2.x = t1.a) AND (t2.y = t1.b)) + -> Seq Scan on t2 + -> Hash + -> Seq Scan on t1 +(7 rows) + +-- Test case where t1 can be optimized but not t2 +explain (costs off) select t1.*,t2.x,t2.z +from t1 inner join t2 on t1.a = t2.x and t1.b = t2.y +group by t1.a,t1.b,t1.c,t1.d,t2.x,t2.z; + QUERY PLAN +------------------------------------------------------ + HashAggregate + Group Key: t1.a, t1.b, t2.z + -> Hash Join + Hash Cond: ((t2.x = t1.a) AND (t2.y = t1.b)) + -> Seq Scan on t2 + -> Hash + -> Seq Scan on t1 +(7 rows) + +-- Cannot optimize when PK is deferrable +explain (costs off) select * from t3 group by a,b,c; + QUERY PLAN +---------------------- + HashAggregate + Group Key: a, b, c + -> Seq Scan on t3 +(3 rows) + +create temp table t1c () inherits (t1); +-- Ensure we don't remove any columns when t1 has a child table +explain (costs off) select * from t1 group by a,b,c,d; + QUERY PLAN +------------------------------------- + HashAggregate + Group Key: t1.a, t1.b, t1.c, t1.d + -> Append + -> Seq Scan on t1 t1_1 + -> Seq Scan on t1c t1_2 +(5 rows) + +-- Okay to remove columns if we're only querying the parent. +explain (costs off) select * from only t1 group by a,b,c,d; + QUERY PLAN +---------------------- + HashAggregate + Group Key: a, b + -> Seq Scan on t1 +(3 rows) + +create temp table p_t1 ( + a int, + b int, + c int, + d int, + primary key(a,b) +) partition by list(a); +create temp table p_t1_1 partition of p_t1 for values in(1); +create temp table p_t1_2 partition of p_t1 for values in(2); +-- Ensure we can remove non-PK columns for partitioned tables. +explain (costs off) select * from p_t1 group by a,b,c,d; + QUERY PLAN +-------------------------------- + HashAggregate + Group Key: p_t1.a, p_t1.b + -> Append + -> Seq Scan on p_t1_1 + -> Seq Scan on p_t1_2 +(5 rows) + +drop table t1 cascade; +NOTICE: drop cascades to table t1c +drop table t2; +drop table t3; +drop table p_t1; +-- +-- Test GROUP BY matching of join columns that are type-coerced due to USING +-- +create temp table t1(f1 int, f2 int); +create temp table t2(f1 bigint, f2 oid); +select f1 from t1 left join t2 using (f1) group by f1; + f1 +---- +(0 rows) + +select f1 from t1 left join t2 using (f1) group by t1.f1; + f1 +---- +(0 rows) + +select t1.f1 from t1 left join t2 using (f1) group by t1.f1; + f1 +---- +(0 rows) + +-- only this one should fail: +select t1.f1 from t1 left join t2 using (f1) group by f1; +ERROR: column "t1.f1" must appear in the GROUP BY clause or be used in an aggregate function +LINE 1: select t1.f1 from t1 left join t2 using (f1) group by f1; + ^ +-- check case where we have to inject nullingrels into coerced join alias +select f1, count(*) from +t1 x(x0,x1) left join (t1 left join t2 using(f1)) on (x0 = 0) +group by f1; + f1 | count +----+------- +(0 rows) + +-- same, for a RelabelType coercion +select f2, count(*) from +t1 x(x0,x1) left join (t1 left join t2 using(f2)) on (x0 = 0) +group by f2; + f2 | count +----+------- +(0 rows) + +drop table t1, t2; +-- +-- Test planner's selection of pathkeys for ORDER BY aggregates +-- +-- Ensure we order by four. This suits the most aggregate functions. +explain (costs off) +select sum(two order by two),max(four order by four), min(four order by four) +from tenk1; + QUERY PLAN +------------------------------- + Aggregate + -> Sort + Sort Key: four + -> Seq Scan on tenk1 +(4 rows) + +-- Ensure we order by two. It's a tie between ordering by two and four but +-- we tiebreak on the aggregate's position. +explain (costs off) +select + sum(two order by two), max(four order by four), + min(four order by four), max(two order by two) +from tenk1; + QUERY PLAN +------------------------------- + Aggregate + -> Sort + Sort Key: two + -> Seq Scan on tenk1 +(4 rows) + +-- Similar to above, but tiebreak on ordering by four +explain (costs off) +select + max(four order by four), sum(two order by two), + min(four order by four), max(two order by two) +from tenk1; + QUERY PLAN +------------------------------- + Aggregate + -> Sort + Sort Key: four + -> Seq Scan on tenk1 +(4 rows) + +-- Ensure this one orders by ten since there are 3 aggregates that require ten +-- vs two that suit two and four. +explain (costs off) +select + max(four order by four), sum(two order by two), + min(four order by four), max(two order by two), + sum(ten order by ten), min(ten order by ten), max(ten order by ten) +from tenk1; + QUERY PLAN +------------------------------- + Aggregate + -> Sort + Sort Key: ten + -> Seq Scan on tenk1 +(4 rows) + +-- Try a case involving a GROUP BY clause where the GROUP BY column is also +-- part of an aggregate's ORDER BY clause. We want a sort order that works +-- for the GROUP BY along with the first and the last aggregate. +explain (costs off) +select + sum(unique1 order by ten, two), sum(unique1 order by four), + sum(unique1 order by two, four) +from tenk1 +group by ten; + QUERY PLAN +---------------------------------- + GroupAggregate + Group Key: ten + -> Sort + Sort Key: ten, two, four + -> Seq Scan on tenk1 +(5 rows) + +-- Ensure that we never choose to provide presorted input to an Aggref with +-- a volatile function in the ORDER BY / DISTINCT clause. We want to ensure +-- these sorts are performed individually rather than at the query level. +explain (costs off) +select + sum(unique1 order by two), sum(unique1 order by four), + sum(unique1 order by four, two), sum(unique1 order by two, random()), + sum(unique1 order by two, random(), random() + 1) +from tenk1 +group by ten; + QUERY PLAN +---------------------------------- + GroupAggregate + Group Key: ten + -> Sort + Sort Key: ten, four, two + -> Seq Scan on tenk1 +(5 rows) + +-- Ensure consecutive NULLs are properly treated as distinct from each other +select array_agg(distinct val) +from (select null as val from generate_series(1, 2)); + array_agg +----------- + {NULL} +(1 row) + +-- Ensure no ordering is requested when enable_presorted_aggregate is off +set enable_presorted_aggregate to off; +explain (costs off) +select sum(two order by two) from tenk1; + QUERY PLAN +------------------------- + Aggregate + -> Seq Scan on tenk1 +(2 rows) + +reset enable_presorted_aggregate; +-- +-- Test combinations of DISTINCT and/or ORDER BY +-- +select array_agg(a order by b) + from (values (1,4),(2,3),(3,1),(4,2)) v(a,b); + array_agg +----------- + {3,4,2,1} +(1 row) + +select array_agg(a order by a) + from (values (1,4),(2,3),(3,1),(4,2)) v(a,b); + array_agg +----------- + {1,2,3,4} +(1 row) + +select array_agg(a order by a desc) + from (values (1,4),(2,3),(3,1),(4,2)) v(a,b); + array_agg +----------- + {4,3,2,1} +(1 row) + +select array_agg(b order by a desc) + from (values (1,4),(2,3),(3,1),(4,2)) v(a,b); + array_agg +----------- + {2,1,3,4} +(1 row) + +select array_agg(distinct a) + from (values (1),(2),(1),(3),(null),(2)) v(a); + array_agg +-------------- + {1,2,3,NULL} +(1 row) + +select array_agg(distinct a order by a) + from (values (1),(2),(1),(3),(null),(2)) v(a); + array_agg +-------------- + {1,2,3,NULL} +(1 row) + +select array_agg(distinct a order by a desc) + from (values (1),(2),(1),(3),(null),(2)) v(a); + array_agg +-------------- + {NULL,3,2,1} +(1 row) + +select array_agg(distinct a order by a desc nulls last) + from (values (1),(2),(1),(3),(null),(2)) v(a); + array_agg +-------------- + {3,2,1,NULL} +(1 row) + +-- multi-arg aggs, strict/nonstrict, distinct/order by +select aggfstr(a,b,c) + from (values (1,3,'foo'),(0,null,null),(2,2,'bar'),(3,1,'baz')) v(a,b,c); + aggfstr +--------------------------------------- + {"(1,3,foo)","(2,2,bar)","(3,1,baz)"} +(1 row) + +select aggfns(a,b,c) + from (values (1,3,'foo'),(0,null,null),(2,2,'bar'),(3,1,'baz')) v(a,b,c); + aggfns +----------------------------------------------- + {"(1,3,foo)","(0,,)","(2,2,bar)","(3,1,baz)"} +(1 row) + +select aggfstr(distinct a,b,c) + from (values (1,3,'foo'),(0,null,null),(2,2,'bar'),(3,1,'baz')) v(a,b,c), + generate_series(1,3) i; + aggfstr +--------------------------------------- + {"(1,3,foo)","(2,2,bar)","(3,1,baz)"} +(1 row) + +select aggfns(distinct a,b,c) + from (values (1,3,'foo'),(0,null,null),(2,2,'bar'),(3,1,'baz')) v(a,b,c), + generate_series(1,3) i; + aggfns +----------------------------------------------- + {"(0,,)","(1,3,foo)","(2,2,bar)","(3,1,baz)"} +(1 row) + +select aggfstr(distinct a,b,c order by b) + from (values (1,3,'foo'),(0,null,null),(2,2,'bar'),(3,1,'baz')) v(a,b,c), + generate_series(1,3) i; + aggfstr +--------------------------------------- + {"(3,1,baz)","(2,2,bar)","(1,3,foo)"} +(1 row) + +select aggfns(distinct a,b,c order by b) + from (values (1,3,'foo'),(0,null,null),(2,2,'bar'),(3,1,'baz')) v(a,b,c), + generate_series(1,3) i; + aggfns +----------------------------------------------- + {"(3,1,baz)","(2,2,bar)","(1,3,foo)","(0,,)"} +(1 row) + +-- test specific code paths +select aggfns(distinct a,a,c order by c using ~<~,a) + from (values (1,3,'foo'),(0,null,null),(2,2,'bar'),(3,1,'baz')) v(a,b,c), + generate_series(1,2) i; + aggfns +------------------------------------------------ + {"(2,2,bar)","(3,3,baz)","(1,1,foo)","(0,0,)"} +(1 row) + +select aggfns(distinct a,a,c order by c using ~<~) + from (values (1,3,'foo'),(0,null,null),(2,2,'bar'),(3,1,'baz')) v(a,b,c), + generate_series(1,2) i; + aggfns +------------------------------------------------ + {"(2,2,bar)","(3,3,baz)","(1,1,foo)","(0,0,)"} +(1 row) + +select aggfns(distinct a,a,c order by a) + from (values (1,3,'foo'),(0,null,null),(2,2,'bar'),(3,1,'baz')) v(a,b,c), + generate_series(1,2) i; + aggfns +------------------------------------------------ + {"(0,0,)","(1,1,foo)","(2,2,bar)","(3,3,baz)"} +(1 row) + +select aggfns(distinct a,b,c order by a,c using ~<~,b) + from (values (1,3,'foo'),(0,null,null),(2,2,'bar'),(3,1,'baz')) v(a,b,c), + generate_series(1,2) i; + aggfns +----------------------------------------------- + {"(0,,)","(1,3,foo)","(2,2,bar)","(3,1,baz)"} +(1 row) + +-- test a more complex permutation that has previous caused issues +select + string_agg(distinct 'a', ','), + sum(( + select sum(1) + from (values(1)) b(id) + where a.id = b.id +)) from unnest(array[1]) a(id); + string_agg | sum +------------+----- + a | 1 +(1 row) + +-- check node I/O via view creation and usage, also deparsing logic +create view agg_view1 as + select aggfns(a,b,c) + from (values (1,3,'foo'),(0,null,null),(2,2,'bar'),(3,1,'baz')) v(a,b,c); +select * from agg_view1; + aggfns +----------------------------------------------- + {"(1,3,foo)","(0,,)","(2,2,bar)","(3,1,baz)"} +(1 row) + +select pg_get_viewdef('agg_view1'::regclass); + pg_get_viewdef +--------------------------------------------------------------------------------------------------------------------- + SELECT aggfns(a, b, c) AS aggfns + + FROM ( VALUES (1,3,'foo'::text), (0,NULL::integer,NULL::text), (2,2,'bar'::text), (3,1,'baz'::text)) v(a, b, c); +(1 row) + +create or replace view agg_view1 as + select aggfns(distinct a,b,c) + from (values (1,3,'foo'),(0,null,null),(2,2,'bar'),(3,1,'baz')) v(a,b,c), + generate_series(1,3) i; +select * from agg_view1; + aggfns +----------------------------------------------- + {"(0,,)","(1,3,foo)","(2,2,bar)","(3,1,baz)"} +(1 row) + +select pg_get_viewdef('agg_view1'::regclass); + pg_get_viewdef +--------------------------------------------------------------------------------------------------------------------- + SELECT aggfns(DISTINCT v.a, v.b, v.c) AS aggfns + + FROM ( VALUES (1,3,'foo'::text), (0,NULL::integer,NULL::text), (2,2,'bar'::text), (3,1,'baz'::text)) v(a, b, c),+ + generate_series(1, 3) i(i); +(1 row) + +create or replace view agg_view1 as + select aggfns(distinct a,b,c order by b) + from (values (1,3,'foo'),(0,null,null),(2,2,'bar'),(3,1,'baz')) v(a,b,c), + generate_series(1,3) i; +select * from agg_view1; + aggfns +----------------------------------------------- + {"(3,1,baz)","(2,2,bar)","(1,3,foo)","(0,,)"} +(1 row) + +select pg_get_viewdef('agg_view1'::regclass); + pg_get_viewdef +--------------------------------------------------------------------------------------------------------------------- + SELECT aggfns(DISTINCT v.a, v.b, v.c ORDER BY v.b) AS aggfns + + FROM ( VALUES (1,3,'foo'::text), (0,NULL::integer,NULL::text), (2,2,'bar'::text), (3,1,'baz'::text)) v(a, b, c),+ + generate_series(1, 3) i(i); +(1 row) + +create or replace view agg_view1 as + select aggfns(a,b,c order by b+1) + from (values (1,3,'foo'),(0,null,null),(2,2,'bar'),(3,1,'baz')) v(a,b,c); +select * from agg_view1; + aggfns +----------------------------------------------- + {"(3,1,baz)","(2,2,bar)","(1,3,foo)","(0,,)"} +(1 row) + +select pg_get_viewdef('agg_view1'::regclass); + pg_get_viewdef +--------------------------------------------------------------------------------------------------------------------- + SELECT aggfns(a, b, c ORDER BY (b + 1)) AS aggfns + + FROM ( VALUES (1,3,'foo'::text), (0,NULL::integer,NULL::text), (2,2,'bar'::text), (3,1,'baz'::text)) v(a, b, c); +(1 row) + +create or replace view agg_view1 as + select aggfns(a,a,c order by b) + from (values (1,3,'foo'),(0,null,null),(2,2,'bar'),(3,1,'baz')) v(a,b,c); +select * from agg_view1; + aggfns +------------------------------------------------ + {"(3,3,baz)","(2,2,bar)","(1,1,foo)","(0,0,)"} +(1 row) + +select pg_get_viewdef('agg_view1'::regclass); + pg_get_viewdef +--------------------------------------------------------------------------------------------------------------------- + SELECT aggfns(a, a, c ORDER BY b) AS aggfns + + FROM ( VALUES (1,3,'foo'::text), (0,NULL::integer,NULL::text), (2,2,'bar'::text), (3,1,'baz'::text)) v(a, b, c); +(1 row) + +create or replace view agg_view1 as + select aggfns(a,b,c order by c using ~<~) + from (values (1,3,'foo'),(0,null,null),(2,2,'bar'),(3,1,'baz')) v(a,b,c); +select * from agg_view1; + aggfns +----------------------------------------------- + {"(2,2,bar)","(3,1,baz)","(1,3,foo)","(0,,)"} +(1 row) + +select pg_get_viewdef('agg_view1'::regclass); + pg_get_viewdef +--------------------------------------------------------------------------------------------------------------------- + SELECT aggfns(a, b, c ORDER BY c USING ~<~ NULLS LAST) AS aggfns + + FROM ( VALUES (1,3,'foo'::text), (0,NULL::integer,NULL::text), (2,2,'bar'::text), (3,1,'baz'::text)) v(a, b, c); +(1 row) + +create or replace view agg_view1 as + select aggfns(distinct a,b,c order by a,c using ~<~,b) + from (values (1,3,'foo'),(0,null,null),(2,2,'bar'),(3,1,'baz')) v(a,b,c), + generate_series(1,2) i; +select * from agg_view1; + aggfns +----------------------------------------------- + {"(0,,)","(1,3,foo)","(2,2,bar)","(3,1,baz)"} +(1 row) + +select pg_get_viewdef('agg_view1'::regclass); + pg_get_viewdef +--------------------------------------------------------------------------------------------------------------------- + SELECT aggfns(DISTINCT v.a, v.b, v.c ORDER BY v.a, v.c USING ~<~ NULLS LAST, v.b) AS aggfns + + FROM ( VALUES (1,3,'foo'::text), (0,NULL::integer,NULL::text), (2,2,'bar'::text), (3,1,'baz'::text)) v(a, b, c),+ + generate_series(1, 2) i(i); +(1 row) + +drop view agg_view1; +-- incorrect DISTINCT usage errors +select aggfns(distinct a,b,c order by i) + from (values (1,1,'foo')) v(a,b,c), generate_series(1,2) i; +ERROR: in an aggregate with DISTINCT, ORDER BY expressions must appear in argument list +LINE 1: select aggfns(distinct a,b,c order by i) + ^ +select aggfns(distinct a,b,c order by a,b+1) + from (values (1,1,'foo')) v(a,b,c), generate_series(1,2) i; +ERROR: in an aggregate with DISTINCT, ORDER BY expressions must appear in argument list +LINE 1: select aggfns(distinct a,b,c order by a,b+1) + ^ +select aggfns(distinct a,b,c order by a,b,i,c) + from (values (1,1,'foo')) v(a,b,c), generate_series(1,2) i; +ERROR: in an aggregate with DISTINCT, ORDER BY expressions must appear in argument list +LINE 1: select aggfns(distinct a,b,c order by a,b,i,c) + ^ +select aggfns(distinct a,a,c order by a,b) + from (values (1,1,'foo')) v(a,b,c), generate_series(1,2) i; +ERROR: in an aggregate with DISTINCT, ORDER BY expressions must appear in argument list +LINE 1: select aggfns(distinct a,a,c order by a,b) + ^ +-- string_agg tests +select string_agg(a,',') from (values('aaaa'),('bbbb'),('cccc')) g(a); + string_agg +---------------- + aaaa,bbbb,cccc +(1 row) + +select string_agg(a,',') from (values('aaaa'),(null),('bbbb'),('cccc')) g(a); + string_agg +---------------- + aaaa,bbbb,cccc +(1 row) + +select string_agg(a,'AB') from (values(null),(null),('bbbb'),('cccc')) g(a); + string_agg +------------ + bbbbABcccc +(1 row) + +select string_agg(a,',') from (values(null),(null)) g(a); + string_agg +------------ + +(1 row) + +-- check some implicit casting cases, as per bug #5564 +select string_agg(distinct f1, ',' order by f1) from varchar_tbl; -- ok + string_agg +------------ + a,ab,abcd +(1 row) + +select string_agg(distinct f1::text, ',' order by f1) from varchar_tbl; -- not ok +ERROR: in an aggregate with DISTINCT, ORDER BY expressions must appear in argument list +LINE 1: select string_agg(distinct f1::text, ',' order by f1) from v... + ^ +select string_agg(distinct f1, ',' order by f1::text) from varchar_tbl; -- not ok +ERROR: in an aggregate with DISTINCT, ORDER BY expressions must appear in argument list +LINE 1: select string_agg(distinct f1, ',' order by f1::text) from v... + ^ +select string_agg(distinct f1::text, ',' order by f1::text) from varchar_tbl; -- ok + string_agg +------------ + a,ab,abcd +(1 row) + +-- string_agg bytea tests +create table bytea_test_table(v bytea); +select string_agg(v, '') from bytea_test_table; + string_agg +------------ + +(1 row) + +insert into bytea_test_table values(decode('ff','hex')); +select string_agg(v, '') from bytea_test_table; + string_agg +------------ + \xff +(1 row) + +insert into bytea_test_table values(decode('aa','hex')); +select string_agg(v, '') from bytea_test_table; + string_agg +------------ + \xffaa +(1 row) + +select string_agg(v, NULL) from bytea_test_table; + string_agg +------------ + \xffaa +(1 row) + +select string_agg(v, decode('ee', 'hex')) from bytea_test_table; + string_agg +------------ + \xffeeaa +(1 row) + +drop table bytea_test_table; +-- Test parallel string_agg and array_agg +create table pagg_test (x int, y int); +insert into pagg_test +select (case x % 4 when 1 then null else x end), x % 10 +from generate_series(1,5000) x; +set parallel_setup_cost TO 0; +set parallel_tuple_cost TO 0; +set parallel_leader_participation TO 0; +set min_parallel_table_scan_size = 0; +set bytea_output = 'escape'; +set max_parallel_workers_per_gather = 2; +-- create a view as we otherwise have to repeat this query a few times. +create view v_pagg_test AS +select + y, + min(t) AS tmin,max(t) AS tmax,count(distinct t) AS tndistinct, + min(b) AS bmin,max(b) AS bmax,count(distinct b) AS bndistinct, + min(a) AS amin,max(a) AS amax,count(distinct a) AS andistinct, + min(aa) AS aamin,max(aa) AS aamax,count(distinct aa) AS aandistinct +from ( + select + y, + unnest(regexp_split_to_array(a1.t, ','))::int AS t, + unnest(regexp_split_to_array(a1.b::text, ',')) AS b, + unnest(a1.a) AS a, + unnest(a1.aa) AS aa + from ( + select + y, + string_agg(x::text, ',') AS t, + string_agg(x::text::bytea, ',') AS b, + array_agg(x) AS a, + array_agg(ARRAY[x]) AS aa + from pagg_test + group by y + ) a1 +) a2 +group by y; +-- Ensure results are correct. +select * from v_pagg_test order by y; + y | tmin | tmax | tndistinct | bmin | bmax | bndistinct | amin | amax | andistinct | aamin | aamax | aandistinct +---+------+------+------------+------+------+------------+------+------+------------+-------+-------+------------- + 0 | 10 | 5000 | 500 | 10 | 990 | 500 | 10 | 5000 | 500 | 10 | 5000 | 500 + 1 | 11 | 4991 | 250 | 1011 | 991 | 250 | 11 | 4991 | 250 | 11 | 4991 | 250 + 2 | 2 | 4992 | 500 | 1002 | 992 | 500 | 2 | 4992 | 500 | 2 | 4992 | 500 + 3 | 3 | 4983 | 250 | 1003 | 983 | 250 | 3 | 4983 | 250 | 3 | 4983 | 250 + 4 | 4 | 4994 | 500 | 1004 | 994 | 500 | 4 | 4994 | 500 | 4 | 4994 | 500 + 5 | 15 | 4995 | 250 | 1015 | 995 | 250 | 15 | 4995 | 250 | 15 | 4995 | 250 + 6 | 6 | 4996 | 500 | 1006 | 996 | 500 | 6 | 4996 | 500 | 6 | 4996 | 500 + 7 | 7 | 4987 | 250 | 1007 | 987 | 250 | 7 | 4987 | 250 | 7 | 4987 | 250 + 8 | 8 | 4998 | 500 | 1008 | 998 | 500 | 8 | 4998 | 500 | 8 | 4998 | 500 + 9 | 19 | 4999 | 250 | 1019 | 999 | 250 | 19 | 4999 | 250 | 19 | 4999 | 250 +(10 rows) + +-- Ensure parallel aggregation is actually being used. +explain (costs off) select * from v_pagg_test order by y; + QUERY PLAN +-------------------------------------------------------------------------------------------------------------------------------------- + GroupAggregate + Group Key: pagg_test.y + -> Sort + Sort Key: pagg_test.y, (((unnest(regexp_split_to_array((string_agg((pagg_test.x)::text, ','::text)), ','::text))))::integer) + -> Result + -> ProjectSet + -> Finalize HashAggregate + Group Key: pagg_test.y + -> Gather + Workers Planned: 2 + -> Partial HashAggregate + Group Key: pagg_test.y + -> Parallel Seq Scan on pagg_test +(13 rows) + +set max_parallel_workers_per_gather = 0; +-- Ensure results are the same without parallel aggregation. +select * from v_pagg_test order by y; + y | tmin | tmax | tndistinct | bmin | bmax | bndistinct | amin | amax | andistinct | aamin | aamax | aandistinct +---+------+------+------------+------+------+------------+------+------+------------+-------+-------+------------- + 0 | 10 | 5000 | 500 | 10 | 990 | 500 | 10 | 5000 | 500 | 10 | 5000 | 500 + 1 | 11 | 4991 | 250 | 1011 | 991 | 250 | 11 | 4991 | 250 | 11 | 4991 | 250 + 2 | 2 | 4992 | 500 | 1002 | 992 | 500 | 2 | 4992 | 500 | 2 | 4992 | 500 + 3 | 3 | 4983 | 250 | 1003 | 983 | 250 | 3 | 4983 | 250 | 3 | 4983 | 250 + 4 | 4 | 4994 | 500 | 1004 | 994 | 500 | 4 | 4994 | 500 | 4 | 4994 | 500 + 5 | 15 | 4995 | 250 | 1015 | 995 | 250 | 15 | 4995 | 250 | 15 | 4995 | 250 + 6 | 6 | 4996 | 500 | 1006 | 996 | 500 | 6 | 4996 | 500 | 6 | 4996 | 500 + 7 | 7 | 4987 | 250 | 1007 | 987 | 250 | 7 | 4987 | 250 | 7 | 4987 | 250 + 8 | 8 | 4998 | 500 | 1008 | 998 | 500 | 8 | 4998 | 500 | 8 | 4998 | 500 + 9 | 19 | 4999 | 250 | 1019 | 999 | 250 | 19 | 4999 | 250 | 19 | 4999 | 250 +(10 rows) + +-- Clean up +reset max_parallel_workers_per_gather; +reset bytea_output; +reset min_parallel_table_scan_size; +reset parallel_leader_participation; +reset parallel_tuple_cost; +reset parallel_setup_cost; +drop view v_pagg_test; +drop table pagg_test; +-- FILTER tests +select min(unique1) filter (where unique1 > 100) from tenk1; + min +----- + 101 +(1 row) + +select sum(1/ten) filter (where ten > 0) from tenk1; + sum +------ + 1000 +(1 row) + +select ten, sum(distinct four) filter (where four::text ~ '123') from onek a +group by ten; + ten | sum +-----+----- + 0 | + 1 | + 2 | + 3 | + 4 | + 5 | + 6 | + 7 | + 8 | + 9 | +(10 rows) + +select ten, sum(distinct four) filter (where four > 10) from onek a +group by ten +having exists (select 1 from onek b where sum(distinct a.four) = b.four); + ten | sum +-----+----- + 0 | + 2 | + 4 | + 6 | + 8 | +(5 rows) + +select max(foo COLLATE "C") filter (where (bar collate "POSIX") > '0') +from (values ('a', 'b')) AS v(foo,bar); + max +----- + a +(1 row) + +select any_value(v) filter (where v > 2) from (values (1), (2), (3)) as v (v); + any_value +----------- + 3 +(1 row) + +-- outer reference in FILTER (PostgreSQL extension) +select (select count(*) + from (values (1)) t0(inner_c)) +from (values (2),(3)) t1(outer_c); -- inner query is aggregation query + count +------- + 1 + 1 +(2 rows) + +select (select count(*) filter (where outer_c <> 0) + from (values (1)) t0(inner_c)) +from (values (2),(3)) t1(outer_c); -- outer query is aggregation query + count +------- + 2 +(1 row) + +select (select count(inner_c) filter (where outer_c <> 0) + from (values (1)) t0(inner_c)) +from (values (2),(3)) t1(outer_c); -- inner query is aggregation query + count +------- + 1 + 1 +(2 rows) + +select + (select max((select i.unique2 from tenk1 i where i.unique1 = o.unique1)) + filter (where o.unique1 < 10)) +from tenk1 o; -- outer query is aggregation query + max +------ + 9998 +(1 row) + +-- subquery in FILTER clause (PostgreSQL extension) +select sum(unique1) FILTER (WHERE + unique1 IN (SELECT unique1 FROM onek where unique1 < 100)) FROM tenk1; + sum +------ + 4950 +(1 row) + +-- exercise lots of aggregate parts with FILTER +select aggfns(distinct a,b,c order by a,c using ~<~,b) filter (where a > 1) + from (values (1,3,'foo'),(0,null,null),(2,2,'bar'),(3,1,'baz')) v(a,b,c), + generate_series(1,2) i; + aggfns +--------------------------- + {"(2,2,bar)","(3,1,baz)"} +(1 row) + +-- check handling of bare boolean Var in FILTER +select max(0) filter (where b1) from bool_test; + max +----- + 0 +(1 row) + +select (select max(0) filter (where b1)) from bool_test; + max +----- + 0 +(1 row) + +-- check for correct detection of nested-aggregate errors in FILTER +select max(unique1) filter (where sum(ten) > 0) from tenk1; +ERROR: aggregate functions are not allowed in FILTER +LINE 1: select max(unique1) filter (where sum(ten) > 0) from tenk1; + ^ +select (select max(unique1) filter (where sum(ten) > 0) from int8_tbl) from tenk1; +ERROR: aggregate function calls cannot be nested +LINE 1: select (select max(unique1) filter (where sum(ten) > 0) from... + ^ +select max(unique1) filter (where bool_or(ten > 0)) from tenk1; +ERROR: aggregate functions are not allowed in FILTER +LINE 1: select max(unique1) filter (where bool_or(ten > 0)) from ten... + ^ +select (select max(unique1) filter (where bool_or(ten > 0)) from int8_tbl) from tenk1; +ERROR: aggregate function calls cannot be nested +LINE 1: select (select max(unique1) filter (where bool_or(ten > 0)) ... + ^ +-- ordered-set aggregates +select p, percentile_cont(p) within group (order by x::float8) +from generate_series(1,5) x, + (values (0::float8),(0.1),(0.25),(0.4),(0.5),(0.6),(0.75),(0.9),(1)) v(p) +group by p order by p; + p | percentile_cont +------+----------------- + 0 | 1 + 0.1 | 1.4 + 0.25 | 2 + 0.4 | 2.6 + 0.5 | 3 + 0.6 | 3.4 + 0.75 | 4 + 0.9 | 4.6 + 1 | 5 +(9 rows) + +select p, percentile_cont(p order by p) within group (order by x) -- error +from generate_series(1,5) x, + (values (0::float8),(0.1),(0.25),(0.4),(0.5),(0.6),(0.75),(0.9),(1)) v(p) +group by p order by p; +ERROR: cannot use multiple ORDER BY clauses with WITHIN GROUP +LINE 1: select p, percentile_cont(p order by p) within group (order ... + ^ +select p, sum() within group (order by x::float8) -- error +from generate_series(1,5) x, + (values (0::float8),(0.1),(0.25),(0.4),(0.5),(0.6),(0.75),(0.9),(1)) v(p) +group by p order by p; +ERROR: sum is not an ordered-set aggregate, so it cannot have WITHIN GROUP +LINE 1: select p, sum() within group (order by x::float8) -- error + ^ +select p, percentile_cont(p,p) -- error +from generate_series(1,5) x, + (values (0::float8),(0.1),(0.25),(0.4),(0.5),(0.6),(0.75),(0.9),(1)) v(p) +group by p order by p; +ERROR: WITHIN GROUP is required for ordered-set aggregate percentile_cont +LINE 1: select p, percentile_cont(p,p) -- error + ^ +select percentile_cont(0.5) within group (order by b) from aggtest; + percentile_cont +------------------ + 53.4485001564026 +(1 row) + +select percentile_cont(0.5) within group (order by b), sum(b) from aggtest; + percentile_cont | sum +------------------+--------- + 53.4485001564026 | 431.773 +(1 row) + +select percentile_cont(0.5) within group (order by thousand) from tenk1; + percentile_cont +----------------- + 499.5 +(1 row) + +select percentile_disc(0.5) within group (order by thousand) from tenk1; + percentile_disc +----------------- + 499 +(1 row) + +select rank(3) within group (order by x) +from (values (1),(1),(2),(2),(3),(3),(4)) v(x); + rank +------ + 5 +(1 row) + +select cume_dist(3) within group (order by x) +from (values (1),(1),(2),(2),(3),(3),(4)) v(x); + cume_dist +----------- + 0.875 +(1 row) + +select percent_rank(3) within group (order by x) +from (values (1),(1),(2),(2),(3),(3),(4),(5)) v(x); + percent_rank +-------------- + 0.5 +(1 row) + +select dense_rank(3) within group (order by x) +from (values (1),(1),(2),(2),(3),(3),(4)) v(x); + dense_rank +------------ + 3 +(1 row) + +select percentile_disc(array[0,0.1,0.25,0.5,0.75,0.9,1]) within group (order by thousand) +from tenk1; + percentile_disc +---------------------------- + {0,99,249,499,749,899,999} +(1 row) + +select percentile_cont(array[0,0.25,0.5,0.75,1]) within group (order by thousand) +from tenk1; + percentile_cont +----------------------------- + {0,249.75,499.5,749.25,999} +(1 row) + +select percentile_disc(array[[null,1,0.5],[0.75,0.25,null]]) within group (order by thousand) +from tenk1; + percentile_disc +--------------------------------- + {{NULL,999,499},{749,249,NULL}} +(1 row) + +select percentile_cont(array[0,1,0.25,0.75,0.5,1,0.3,0.32,0.35,0.38,0.4]) within group (order by x) +from generate_series(1,6) x; + percentile_cont +------------------------------------------ + {1,6,2.25,4.75,3.5,6,2.5,2.6,2.75,2.9,3} +(1 row) + +select ten, mode() within group (order by string4) from tenk1 group by ten; + ten | mode +-----+-------- + 0 | HHHHxx + 1 | OOOOxx + 2 | VVVVxx + 3 | OOOOxx + 4 | HHHHxx + 5 | HHHHxx + 6 | OOOOxx + 7 | AAAAxx + 8 | VVVVxx + 9 | VVVVxx +(10 rows) + +select percentile_disc(array[0.25,0.5,0.75]) within group (order by x) +from unnest('{fred,jim,fred,jack,jill,fred,jill,jim,jim,sheila,jim,sheila}'::text[]) u(x); + percentile_disc +----------------- + {fred,jill,jim} +(1 row) + +-- check collation propagates up in suitable cases: +select pg_collation_for(percentile_disc(1) within group (order by x collate "POSIX")) + from (values ('fred'),('jim')) v(x); + pg_collation_for +------------------ + "POSIX" +(1 row) + +-- ordered-set aggs created with CREATE AGGREGATE +select test_rank(3) within group (order by x) +from (values (1),(1),(2),(2),(3),(3),(4)) v(x); + test_rank +----------- + 5 +(1 row) + +select test_percentile_disc(0.5) within group (order by thousand) from tenk1; + test_percentile_disc +---------------------- + 499 +(1 row) + +-- ordered-set aggs can't use ungrouped vars in direct args: +select rank(x) within group (order by x) from generate_series(1,5) x; +ERROR: column "x.x" must appear in the GROUP BY clause or be used in an aggregate function +LINE 1: select rank(x) within group (order by x) from generate_serie... + ^ +DETAIL: Direct arguments of an ordered-set aggregate must use only grouped columns. +-- outer-level agg can't use a grouped arg of a lower level, either: +select array(select percentile_disc(a) within group (order by x) + from (values (0.3),(0.7)) v(a) group by a) + from generate_series(1,5) g(x); +ERROR: outer-level aggregate cannot contain a lower-level variable in its direct arguments +LINE 1: select array(select percentile_disc(a) within group (order b... + ^ +-- agg in the direct args is a grouping violation, too: +select rank(sum(x)) within group (order by x) from generate_series(1,5) x; +ERROR: aggregate function calls cannot be nested +LINE 1: select rank(sum(x)) within group (order by x) from generate_... + ^ +-- hypothetical-set type unification and argument-count failures: +select rank(3) within group (order by x) from (values ('fred'),('jim')) v(x); +ERROR: WITHIN GROUP types text and integer cannot be matched +LINE 1: select rank(3) within group (order by x) from (values ('fred... + ^ +select rank(3) within group (order by stringu1,stringu2) from tenk1; +ERROR: function rank(integer, name, name) does not exist +LINE 1: select rank(3) within group (order by stringu1,stringu2) fro... + ^ +HINT: To use the hypothetical-set aggregate rank, the number of hypothetical direct arguments (here 1) must match the number of ordering columns (here 2). +select rank('fred') within group (order by x) from generate_series(1,5) x; +ERROR: invalid input syntax for type integer: "fred" +LINE 1: select rank('fred') within group (order by x) from generate_... + ^ +select rank('adam'::text collate "C") within group (order by x collate "POSIX") + from (values ('fred'),('jim')) v(x); +ERROR: collation mismatch between explicit collations "C" and "POSIX" +LINE 1: ...adam'::text collate "C") within group (order by x collate "P... + ^ +-- hypothetical-set type unification successes: +select rank('adam'::varchar) within group (order by x) from (values ('fred'),('jim')) v(x); + rank +------ + 1 +(1 row) + +select rank('3') within group (order by x) from generate_series(1,5) x; + rank +------ + 3 +(1 row) + +-- divide by zero check +select percent_rank(0) within group (order by x) from generate_series(1,0) x; + percent_rank +-------------- + 0 +(1 row) + +-- deparse and multiple features: +create view aggordview1 as +select ten, + percentile_disc(0.5) within group (order by thousand) as p50, + percentile_disc(0.5) within group (order by thousand) filter (where hundred=1) as px, + rank(5,'AZZZZ',50) within group (order by hundred, string4 desc, hundred) + from tenk1 + group by ten order by ten; +select pg_get_viewdef('aggordview1'); + pg_get_viewdef +------------------------------------------------------------------------------------------------------------------- + SELECT ten, + + percentile_disc((0.5)::double precision) WITHIN GROUP (ORDER BY thousand) AS p50, + + percentile_disc((0.5)::double precision) WITHIN GROUP (ORDER BY thousand) FILTER (WHERE (hundred = 1)) AS px,+ + rank(5, 'AZZZZ'::name, 50) WITHIN GROUP (ORDER BY hundred, string4 DESC, hundred) AS rank + + FROM tenk1 + + GROUP BY ten + + ORDER BY ten; +(1 row) + +select * from aggordview1 order by ten; + ten | p50 | px | rank +-----+-----+-----+------ + 0 | 490 | | 101 + 1 | 491 | 401 | 101 + 2 | 492 | | 101 + 3 | 493 | | 101 + 4 | 494 | | 101 + 5 | 495 | | 67 + 6 | 496 | | 1 + 7 | 497 | | 1 + 8 | 498 | | 1 + 9 | 499 | | 1 +(10 rows) + +drop view aggordview1; +-- variadic aggregates +select least_agg(q1,q2) from int8_tbl; + least_agg +------------------- + -4567890123456789 +(1 row) + +select least_agg(variadic array[q1,q2]) from int8_tbl; + least_agg +------------------- + -4567890123456789 +(1 row) + +select cleast_agg(q1,q2) from int8_tbl; + cleast_agg +------------------- + -4567890123456789 +(1 row) + +select cleast_agg(4.5,f1) from int4_tbl; + cleast_agg +------------- + -2147483647 +(1 row) + +select cleast_agg(variadic array[4.5,f1]) from int4_tbl; + cleast_agg +------------- + -2147483647 +(1 row) + +select pg_typeof(cleast_agg(variadic array[4.5,f1])) from int4_tbl; + pg_typeof +----------- + numeric +(1 row) + +-- test aggregates with common transition functions share the same states +begin work; +create type avg_state as (total bigint, count bigint); +create or replace function avg_transfn(state avg_state, n int) returns avg_state as +$$ +declare new_state avg_state; +begin + raise notice 'avg_transfn called with %', n; + if state is null then + if n is not null then + new_state.total := n; + new_state.count := 1; + return new_state; + end if; + return null; + elsif n is not null then + state.total := state.total + n; + state.count := state.count + 1; + return state; + end if; + + return null; +end +$$ language plpgsql; +create function avg_finalfn(state avg_state) returns int4 as +$$ +begin + if state is null then + return NULL; + else + return state.total / state.count; + end if; +end +$$ language plpgsql; +create function sum_finalfn(state avg_state) returns int4 as +$$ +begin + if state is null then + return NULL; + else + return state.total; + end if; +end +$$ language plpgsql; +create aggregate my_avg(int4) +( + stype = avg_state, + sfunc = avg_transfn, + finalfunc = avg_finalfn +); +create aggregate my_sum(int4) +( + stype = avg_state, + sfunc = avg_transfn, + finalfunc = sum_finalfn +); +-- aggregate state should be shared as aggs are the same. +select my_avg(one),my_avg(one) from (values(1),(3)) t(one); +NOTICE: avg_transfn called with 1 +NOTICE: avg_transfn called with 3 + my_avg | my_avg +--------+-------- + 2 | 2 +(1 row) + +-- aggregate state should be shared as transfn is the same for both aggs. +select my_avg(one),my_sum(one) from (values(1),(3)) t(one); +NOTICE: avg_transfn called with 1 +NOTICE: avg_transfn called with 3 + my_avg | my_sum +--------+-------- + 2 | 4 +(1 row) + +-- same as previous one, but with DISTINCT, which requires sorting the input. +select my_avg(distinct one),my_sum(distinct one) from (values(1),(3),(1)) t(one); +NOTICE: avg_transfn called with 1 +NOTICE: avg_transfn called with 3 + my_avg | my_sum +--------+-------- + 2 | 4 +(1 row) + +-- shouldn't share states due to the distinctness not matching. +select my_avg(distinct one),my_sum(one) from (values(1),(3)) t(one); +NOTICE: avg_transfn called with 1 +NOTICE: avg_transfn called with 1 +NOTICE: avg_transfn called with 3 +NOTICE: avg_transfn called with 3 + my_avg | my_sum +--------+-------- + 2 | 4 +(1 row) + +-- shouldn't share states due to the filter clause not matching. +select my_avg(one) filter (where one > 1),my_sum(one) from (values(1),(3)) t(one); +NOTICE: avg_transfn called with 1 +NOTICE: avg_transfn called with 3 +NOTICE: avg_transfn called with 3 + my_avg | my_sum +--------+-------- + 3 | 4 +(1 row) + +-- this should not share the state due to different input columns. +select my_avg(one),my_sum(two) from (values(1,2),(3,4)) t(one,two); +NOTICE: avg_transfn called with 1 +NOTICE: avg_transfn called with 2 +NOTICE: avg_transfn called with 3 +NOTICE: avg_transfn called with 4 + my_avg | my_sum +--------+-------- + 2 | 6 +(1 row) + +-- exercise cases where OSAs share state +select + percentile_cont(0.5) within group (order by a), + percentile_disc(0.5) within group (order by a) +from (values(1::float8),(3),(5),(7)) t(a); + percentile_cont | percentile_disc +-----------------+----------------- + 4 | 3 +(1 row) + +select + percentile_cont(0.25) within group (order by a), + percentile_disc(0.5) within group (order by a) +from (values(1::float8),(3),(5),(7)) t(a); + percentile_cont | percentile_disc +-----------------+----------------- + 2.5 | 3 +(1 row) + +-- these can't share state currently +select + rank(4) within group (order by a), + dense_rank(4) within group (order by a) +from (values(1),(3),(5),(7)) t(a); + rank | dense_rank +------+------------ + 3 | 3 +(1 row) + +-- test that aggs with the same sfunc and initcond share the same agg state +create aggregate my_sum_init(int4) +( + stype = avg_state, + sfunc = avg_transfn, + finalfunc = sum_finalfn, + initcond = '(10,0)' +); +create aggregate my_avg_init(int4) +( + stype = avg_state, + sfunc = avg_transfn, + finalfunc = avg_finalfn, + initcond = '(10,0)' +); +create aggregate my_avg_init2(int4) +( + stype = avg_state, + sfunc = avg_transfn, + finalfunc = avg_finalfn, + initcond = '(4,0)' +); +-- state should be shared if INITCONDs are matching +select my_sum_init(one),my_avg_init(one) from (values(1),(3)) t(one); +NOTICE: avg_transfn called with 1 +NOTICE: avg_transfn called with 3 + my_sum_init | my_avg_init +-------------+------------- + 14 | 7 +(1 row) + +-- Varying INITCONDs should cause the states not to be shared. +select my_sum_init(one),my_avg_init2(one) from (values(1),(3)) t(one); +NOTICE: avg_transfn called with 1 +NOTICE: avg_transfn called with 1 +NOTICE: avg_transfn called with 3 +NOTICE: avg_transfn called with 3 + my_sum_init | my_avg_init2 +-------------+-------------- + 14 | 4 +(1 row) + +rollback; +-- test aggregate state sharing to ensure it works if one aggregate has a +-- finalfn and the other one has none. +begin work; +create or replace function sum_transfn(state int4, n int4) returns int4 as +$$ +declare new_state int4; +begin + raise notice 'sum_transfn called with %', n; + if state is null then + if n is not null then + new_state := n; + return new_state; + end if; + return null; + elsif n is not null then + state := state + n; + return state; + end if; + + return null; +end +$$ language plpgsql; +create function halfsum_finalfn(state int4) returns int4 as +$$ +begin + if state is null then + return NULL; + else + return state / 2; + end if; +end +$$ language plpgsql; +create aggregate my_sum(int4) +( + stype = int4, + sfunc = sum_transfn +); +create aggregate my_half_sum(int4) +( + stype = int4, + sfunc = sum_transfn, + finalfunc = halfsum_finalfn +); +-- Agg state should be shared even though my_sum has no finalfn +select my_sum(one),my_half_sum(one) from (values(1),(2),(3),(4)) t(one); +NOTICE: sum_transfn called with 1 +NOTICE: sum_transfn called with 2 +NOTICE: sum_transfn called with 3 +NOTICE: sum_transfn called with 4 + my_sum | my_half_sum +--------+------------- + 10 | 5 +(1 row) + +rollback; +-- test that the aggregate transition logic correctly handles +-- transition / combine functions returning NULL +-- First test the case of a normal transition function returning NULL +BEGIN; +CREATE FUNCTION balkifnull(int8, int4) +RETURNS int8 +STRICT +LANGUAGE plpgsql AS $$ +BEGIN + IF $1 IS NULL THEN + RAISE 'erroneously called with NULL argument'; + END IF; + RETURN NULL; +END$$; +CREATE AGGREGATE balk(int4) +( + SFUNC = balkifnull(int8, int4), + STYPE = int8, + PARALLEL = SAFE, + INITCOND = '0' +); +SELECT balk(hundred) FROM tenk1; + balk +------ + +(1 row) + +ROLLBACK; +-- Secondly test the case of a parallel aggregate combiner function +-- returning NULL. For that use normal transition function, but a +-- combiner function returning NULL. +BEGIN; +CREATE FUNCTION balkifnull(int8, int8) +RETURNS int8 +PARALLEL SAFE +STRICT +LANGUAGE plpgsql AS $$ +BEGIN + IF $1 IS NULL THEN + RAISE 'erroneously called with NULL argument'; + END IF; + RETURN NULL; +END$$; +CREATE AGGREGATE balk(int4) +( + SFUNC = int4_sum(int8, int4), + STYPE = int8, + COMBINEFUNC = balkifnull(int8, int8), + PARALLEL = SAFE, + INITCOND = '0' +); +-- force use of parallelism +ALTER TABLE tenk1 set (parallel_workers = 4); +SET LOCAL parallel_setup_cost=0; +SET LOCAL max_parallel_workers_per_gather=4; +EXPLAIN (COSTS OFF) SELECT balk(hundred) FROM tenk1; + QUERY PLAN +------------------------------------------------------------------------- + Finalize Aggregate + -> Gather + Workers Planned: 4 + -> Partial Aggregate + -> Parallel Index Only Scan using tenk1_hundred on tenk1 +(5 rows) + +SELECT balk(hundred) FROM tenk1; + balk +------ + +(1 row) + +ROLLBACK; +-- test multiple usage of an aggregate whose finalfn returns a R/W datum +BEGIN; +CREATE FUNCTION rwagg_sfunc(x anyarray, y anyarray) RETURNS anyarray +LANGUAGE plpgsql IMMUTABLE AS $$ +BEGIN + RETURN array_fill(y[1], ARRAY[4]); +END; +$$; +CREATE FUNCTION rwagg_finalfunc(x anyarray) RETURNS anyarray +LANGUAGE plpgsql STRICT IMMUTABLE AS $$ +DECLARE + res x%TYPE; +BEGIN + -- assignment is essential for this test, it expands the array to R/W + res := array_fill(x[1], ARRAY[4]); + RETURN res; +END; +$$; +CREATE AGGREGATE rwagg(anyarray) ( + STYPE = anyarray, + SFUNC = rwagg_sfunc, + FINALFUNC = rwagg_finalfunc +); +CREATE FUNCTION eatarray(x real[]) RETURNS real[] +LANGUAGE plpgsql STRICT IMMUTABLE AS $$ +BEGIN + x[1] := x[1] + 1; + RETURN x; +END; +$$; +SELECT eatarray(rwagg(ARRAY[1.0::real])), eatarray(rwagg(ARRAY[1.0::real])); + eatarray | eatarray +-----------+----------- + {2,1,1,1} | {2,1,1,1} +(1 row) + +ROLLBACK; +-- test coverage for aggregate combine/serial/deserial functions +BEGIN; +SET parallel_setup_cost = 0; +SET parallel_tuple_cost = 0; +SET min_parallel_table_scan_size = 0; +SET max_parallel_workers_per_gather = 4; +SET parallel_leader_participation = off; +SET enable_indexonlyscan = off; +-- variance(int4) covers numeric_poly_combine +-- sum(int8) covers int8_avg_combine +-- regr_count(float8, float8) covers int8inc_float8_float8 and aggregates with > 1 arg +EXPLAIN (COSTS OFF, VERBOSE) +SELECT variance(unique1::int4), sum(unique1::int8), regr_count(unique1::float8, unique1::float8) +FROM (SELECT * FROM tenk1 + UNION ALL SELECT * FROM tenk1 + UNION ALL SELECT * FROM tenk1 + UNION ALL SELECT * FROM tenk1) u; + QUERY PLAN +--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Finalize Aggregate + Output: variance(tenk1.unique1), sum((tenk1.unique1)::bigint), regr_count((tenk1.unique1)::double precision, (tenk1.unique1)::double precision) + -> Gather + Output: (PARTIAL variance(tenk1.unique1)), (PARTIAL sum((tenk1.unique1)::bigint)), (PARTIAL regr_count((tenk1.unique1)::double precision, (tenk1.unique1)::double precision)) + Workers Planned: 4 + -> Partial Aggregate + Output: PARTIAL variance(tenk1.unique1), PARTIAL sum((tenk1.unique1)::bigint), PARTIAL regr_count((tenk1.unique1)::double precision, (tenk1.unique1)::double precision) + -> Parallel Append + -> Parallel Seq Scan on public.tenk1 + Output: tenk1.unique1 + -> Parallel Seq Scan on public.tenk1 tenk1_1 + Output: tenk1_1.unique1 + -> Parallel Seq Scan on public.tenk1 tenk1_2 + Output: tenk1_2.unique1 + -> Parallel Seq Scan on public.tenk1 tenk1_3 + Output: tenk1_3.unique1 +(16 rows) + +SELECT variance(unique1::int4), sum(unique1::int8), regr_count(unique1::float8, unique1::float8) +FROM (SELECT * FROM tenk1 + UNION ALL SELECT * FROM tenk1 + UNION ALL SELECT * FROM tenk1 + UNION ALL SELECT * FROM tenk1) u; + variance | sum | regr_count +----------------------+-----------+------------ + 8333541.588539713493 | 199980000 | 40000 +(1 row) + +-- variance(int8) covers numeric_combine +-- avg(numeric) covers numeric_avg_combine +EXPLAIN (COSTS OFF, VERBOSE) +SELECT variance(unique1::int8), avg(unique1::numeric) +FROM (SELECT * FROM tenk1 + UNION ALL SELECT * FROM tenk1 + UNION ALL SELECT * FROM tenk1 + UNION ALL SELECT * FROM tenk1) u; + QUERY PLAN +-------------------------------------------------------------------------------------------------------- + Finalize Aggregate + Output: variance((tenk1.unique1)::bigint), avg((tenk1.unique1)::numeric) + -> Gather + Output: (PARTIAL variance((tenk1.unique1)::bigint)), (PARTIAL avg((tenk1.unique1)::numeric)) + Workers Planned: 4 + -> Partial Aggregate + Output: PARTIAL variance((tenk1.unique1)::bigint), PARTIAL avg((tenk1.unique1)::numeric) + -> Parallel Append + -> Parallel Seq Scan on public.tenk1 + Output: tenk1.unique1 + -> Parallel Seq Scan on public.tenk1 tenk1_1 + Output: tenk1_1.unique1 + -> Parallel Seq Scan on public.tenk1 tenk1_2 + Output: tenk1_2.unique1 + -> Parallel Seq Scan on public.tenk1 tenk1_3 + Output: tenk1_3.unique1 +(16 rows) + +SELECT variance(unique1::int8), avg(unique1::numeric) +FROM (SELECT * FROM tenk1 + UNION ALL SELECT * FROM tenk1 + UNION ALL SELECT * FROM tenk1 + UNION ALL SELECT * FROM tenk1) u; + variance | avg +----------------------+----------------------- + 8333541.588539713493 | 4999.5000000000000000 +(1 row) + +ROLLBACK; +-- test coverage for dense_rank +SELECT dense_rank(x) WITHIN GROUP (ORDER BY x) FROM (VALUES (1),(1),(2),(2),(3),(3)) v(x) GROUP BY (x) ORDER BY 1; + dense_rank +------------ + 1 + 1 + 1 +(3 rows) + +-- Ensure that the STRICT checks for aggregates does not take NULLness +-- of ORDER BY columns into account. See bug report around +-- 2a505161-2727-2473-7c46-591ed108ac52@email.cz +SELECT min(x ORDER BY y) FROM (VALUES(1, NULL)) AS d(x,y); + min +----- + 1 +(1 row) + +SELECT min(x ORDER BY y) FROM (VALUES(1, 2)) AS d(x,y); + min +----- + 1 +(1 row) + +-- check collation-sensitive matching between grouping expressions +select v||'a', case v||'a' when 'aa' then 1 else 0 end, count(*) + from unnest(array['a','b']) u(v) + group by v||'a' order by 1; + ?column? | case | count +----------+------+------- + aa | 1 | 1 + ba | 0 | 1 +(2 rows) + +select v||'a', case when v||'a' = 'aa' then 1 else 0 end, count(*) + from unnest(array['a','b']) u(v) + group by v||'a' order by 1; + ?column? | case | count +----------+------+------- + aa | 1 | 1 + ba | 0 | 1 +(2 rows) + +-- Make sure that generation of HashAggregate for uniqification purposes +-- does not lead to array overflow due to unexpected duplicate hash keys +-- see CAFeeJoKKu0u+A_A9R9316djW-YW3-+Gtgvy3ju655qRHR3jtdA@mail.gmail.com +set enable_memoize to off; +explain (costs off) + select 1 from tenk1 + where (hundred, thousand) in (select twothousand, twothousand from onek); + QUERY PLAN +------------------------------------------------------------- + Hash Join + Hash Cond: (tenk1.hundred = onek.twothousand) + -> Seq Scan on tenk1 + Filter: (hundred = thousand) + -> Hash + -> HashAggregate + Group Key: onek.twothousand, onek.twothousand + -> Seq Scan on onek +(8 rows) + +reset enable_memoize; +-- +-- Hash Aggregation Spill tests +-- +set enable_sort=false; +set work_mem='64kB'; +select unique1, count(*), sum(twothousand) from tenk1 +group by unique1 +having sum(fivethous) > 4975 +order by sum(twothousand); + unique1 | count | sum +---------+-------+------ + 4976 | 1 | 976 + 4977 | 1 | 977 + 4978 | 1 | 978 + 4979 | 1 | 979 + 4980 | 1 | 980 + 4981 | 1 | 981 + 4982 | 1 | 982 + 4983 | 1 | 983 + 4984 | 1 | 984 + 4985 | 1 | 985 + 4986 | 1 | 986 + 4987 | 1 | 987 + 4988 | 1 | 988 + 4989 | 1 | 989 + 4990 | 1 | 990 + 4991 | 1 | 991 + 4992 | 1 | 992 + 4993 | 1 | 993 + 4994 | 1 | 994 + 4995 | 1 | 995 + 4996 | 1 | 996 + 4997 | 1 | 997 + 4998 | 1 | 998 + 4999 | 1 | 999 + 9976 | 1 | 1976 + 9977 | 1 | 1977 + 9978 | 1 | 1978 + 9979 | 1 | 1979 + 9980 | 1 | 1980 + 9981 | 1 | 1981 + 9982 | 1 | 1982 + 9983 | 1 | 1983 + 9984 | 1 | 1984 + 9985 | 1 | 1985 + 9986 | 1 | 1986 + 9987 | 1 | 1987 + 9988 | 1 | 1988 + 9989 | 1 | 1989 + 9990 | 1 | 1990 + 9991 | 1 | 1991 + 9992 | 1 | 1992 + 9993 | 1 | 1993 + 9994 | 1 | 1994 + 9995 | 1 | 1995 + 9996 | 1 | 1996 + 9997 | 1 | 1997 + 9998 | 1 | 1998 + 9999 | 1 | 1999 +(48 rows) + +set work_mem to default; +set enable_sort to default; +-- +-- Compare results between plans using sorting and plans using hash +-- aggregation. Force spilling in both cases by setting work_mem low. +-- +set work_mem='64kB'; +create table agg_data_2k as +select g from generate_series(0, 1999) g; +analyze agg_data_2k; +create table agg_data_20k as +select g from generate_series(0, 19999) g; +analyze agg_data_20k; +-- Produce results with sorting. +set enable_hashagg = false; +set jit_above_cost = 0; +explain (costs off) +select g%10000 as c1, sum(g::numeric) as c2, count(*) as c3 + from agg_data_20k group by g%10000; + QUERY PLAN +-------------------------------------- + GroupAggregate + Group Key: ((g % 10000)) + -> Sort + Sort Key: ((g % 10000)) + -> Seq Scan on agg_data_20k +(5 rows) + +create table agg_group_1 as +select g%10000 as c1, sum(g::numeric) as c2, count(*) as c3 + from agg_data_20k group by g%10000; +create table agg_group_2 as +select * from + (values (100), (300), (500)) as r(a), + lateral ( + select (g/2)::numeric as c1, + array_agg(g::numeric) as c2, + count(*) as c3 + from agg_data_2k + where g < r.a + group by g/2) as s; +set jit_above_cost to default; +create table agg_group_3 as +select (g/2)::numeric as c1, sum(7::int4) as c2, count(*) as c3 + from agg_data_2k group by g/2; +create table agg_group_4 as +select (g/2)::numeric as c1, array_agg(g::numeric) as c2, count(*) as c3 + from agg_data_2k group by g/2; +-- Produce results with hash aggregation +set enable_hashagg = true; +set enable_sort = false; +set jit_above_cost = 0; +explain (costs off) +select g%10000 as c1, sum(g::numeric) as c2, count(*) as c3 + from agg_data_20k group by g%10000; + QUERY PLAN +-------------------------------- + HashAggregate + Group Key: (g % 10000) + -> Seq Scan on agg_data_20k +(3 rows) + +create table agg_hash_1 as +select g%10000 as c1, sum(g::numeric) as c2, count(*) as c3 + from agg_data_20k group by g%10000; +create table agg_hash_2 as +select * from + (values (100), (300), (500)) as r(a), + lateral ( + select (g/2)::numeric as c1, + array_agg(g::numeric) as c2, + count(*) as c3 + from agg_data_2k + where g < r.a + group by g/2) as s; +set jit_above_cost to default; +create table agg_hash_3 as +select (g/2)::numeric as c1, sum(7::int4) as c2, count(*) as c3 + from agg_data_2k group by g/2; +create table agg_hash_4 as +select (g/2)::numeric as c1, array_agg(g::numeric) as c2, count(*) as c3 + from agg_data_2k group by g/2; +set enable_sort = true; +set work_mem to default; +-- Compare group aggregation results to hash aggregation results +(select * from agg_hash_1 except select * from agg_group_1) + union all +(select * from agg_group_1 except select * from agg_hash_1); + c1 | c2 | c3 +----+----+---- +(0 rows) + +(select * from agg_hash_2 except select * from agg_group_2) + union all +(select * from agg_group_2 except select * from agg_hash_2); + a | c1 | c2 | c3 +---+----+----+---- +(0 rows) + +(select * from agg_hash_3 except select * from agg_group_3) + union all +(select * from agg_group_3 except select * from agg_hash_3); + c1 | c2 | c3 +----+----+---- +(0 rows) + +(select * from agg_hash_4 except select * from agg_group_4) + union all +(select * from agg_group_4 except select * from agg_hash_4); + c1 | c2 | c3 +----+----+---- +(0 rows) + +drop table agg_group_1; +drop table agg_group_2; +drop table agg_group_3; +drop table agg_group_4; +drop table agg_hash_1; +drop table agg_hash_2; +drop table agg_hash_3; +drop table agg_hash_4; diff --git a/src/test/regress/expected/alter_generic.out b/src/test/regress/expected/alter_generic.out new file mode 100644 index 0000000..ae54cb2 --- /dev/null +++ b/src/test/regress/expected/alter_generic.out @@ -0,0 +1,755 @@ +-- +-- Test for ALTER some_object {RENAME TO, OWNER TO, SET SCHEMA} +-- +-- directory paths and dlsuffix are passed to us in environment variables +\getenv libdir PG_LIBDIR +\getenv dlsuffix PG_DLSUFFIX +\set regresslib :libdir '/regress' :dlsuffix +CREATE FUNCTION test_opclass_options_func(internal) + RETURNS void + AS :'regresslib', 'test_opclass_options_func' + LANGUAGE C; +-- Clean up in case a prior regression run failed +SET client_min_messages TO 'warning'; +DROP ROLE IF EXISTS regress_alter_generic_user1; +DROP ROLE IF EXISTS regress_alter_generic_user2; +DROP ROLE IF EXISTS regress_alter_generic_user3; +RESET client_min_messages; +CREATE USER regress_alter_generic_user3; +CREATE USER regress_alter_generic_user2; +CREATE USER regress_alter_generic_user1 IN ROLE regress_alter_generic_user3; +CREATE SCHEMA alt_nsp1; +CREATE SCHEMA alt_nsp2; +GRANT ALL ON SCHEMA alt_nsp1, alt_nsp2 TO public; +SET search_path = alt_nsp1, public; +-- +-- Function and Aggregate +-- +SET SESSION AUTHORIZATION regress_alter_generic_user1; +CREATE FUNCTION alt_func1(int) RETURNS int LANGUAGE sql + AS 'SELECT $1 + 1'; +CREATE FUNCTION alt_func2(int) RETURNS int LANGUAGE sql + AS 'SELECT $1 - 1'; +CREATE AGGREGATE alt_agg1 ( + sfunc1 = int4pl, basetype = int4, stype1 = int4, initcond = 0 +); +CREATE AGGREGATE alt_agg2 ( + sfunc1 = int4mi, basetype = int4, stype1 = int4, initcond = 0 +); +ALTER AGGREGATE alt_func1(int) RENAME TO alt_func3; -- failed (not aggregate) +ERROR: function alt_func1(integer) is not an aggregate +ALTER AGGREGATE alt_func1(int) OWNER TO regress_alter_generic_user3; -- failed (not aggregate) +ERROR: function alt_func1(integer) is not an aggregate +ALTER AGGREGATE alt_func1(int) SET SCHEMA alt_nsp2; -- failed (not aggregate) +ERROR: function alt_func1(integer) is not an aggregate +ALTER FUNCTION alt_func1(int) RENAME TO alt_func2; -- failed (name conflict) +ERROR: function alt_func2(integer) already exists in schema "alt_nsp1" +ALTER FUNCTION alt_func1(int) RENAME TO alt_func3; -- OK +ALTER FUNCTION alt_func2(int) OWNER TO regress_alter_generic_user2; -- failed (no role membership) +ERROR: must be able to SET ROLE "regress_alter_generic_user2" +ALTER FUNCTION alt_func2(int) OWNER TO regress_alter_generic_user3; -- OK +ALTER FUNCTION alt_func2(int) SET SCHEMA alt_nsp1; -- OK, already there +ALTER FUNCTION alt_func2(int) SET SCHEMA alt_nsp2; -- OK +ALTER AGGREGATE alt_agg1(int) RENAME TO alt_agg2; -- failed (name conflict) +ERROR: function alt_agg2(integer) already exists in schema "alt_nsp1" +ALTER AGGREGATE alt_agg1(int) RENAME TO alt_agg3; -- OK +ALTER AGGREGATE alt_agg2(int) OWNER TO regress_alter_generic_user2; -- failed (no role membership) +ERROR: must be able to SET ROLE "regress_alter_generic_user2" +ALTER AGGREGATE alt_agg2(int) OWNER TO regress_alter_generic_user3; -- OK +ALTER AGGREGATE alt_agg2(int) SET SCHEMA alt_nsp2; -- OK +SET SESSION AUTHORIZATION regress_alter_generic_user2; +CREATE FUNCTION alt_func1(int) RETURNS int LANGUAGE sql + AS 'SELECT $1 + 2'; +CREATE FUNCTION alt_func2(int) RETURNS int LANGUAGE sql + AS 'SELECT $1 - 2'; +CREATE AGGREGATE alt_agg1 ( + sfunc1 = int4pl, basetype = int4, stype1 = int4, initcond = 100 +); +CREATE AGGREGATE alt_agg2 ( + sfunc1 = int4mi, basetype = int4, stype1 = int4, initcond = -100 +); +ALTER FUNCTION alt_func3(int) RENAME TO alt_func4; -- failed (not owner) +ERROR: must be owner of function alt_func3 +ALTER FUNCTION alt_func1(int) RENAME TO alt_func4; -- OK +ALTER FUNCTION alt_func3(int) OWNER TO regress_alter_generic_user2; -- failed (not owner) +ERROR: must be owner of function alt_func3 +ALTER FUNCTION alt_func2(int) OWNER TO regress_alter_generic_user3; -- failed (no role membership) +ERROR: must be able to SET ROLE "regress_alter_generic_user3" +ALTER FUNCTION alt_func3(int) SET SCHEMA alt_nsp2; -- failed (not owner) +ERROR: must be owner of function alt_func3 +ALTER FUNCTION alt_func2(int) SET SCHEMA alt_nsp2; -- failed (name conflicts) +ERROR: function alt_func2(integer) already exists in schema "alt_nsp2" +ALTER AGGREGATE alt_agg3(int) RENAME TO alt_agg4; -- failed (not owner) +ERROR: must be owner of function alt_agg3 +ALTER AGGREGATE alt_agg1(int) RENAME TO alt_agg4; -- OK +ALTER AGGREGATE alt_agg3(int) OWNER TO regress_alter_generic_user2; -- failed (not owner) +ERROR: must be owner of function alt_agg3 +ALTER AGGREGATE alt_agg2(int) OWNER TO regress_alter_generic_user3; -- failed (no role membership) +ERROR: must be able to SET ROLE "regress_alter_generic_user3" +ALTER AGGREGATE alt_agg3(int) SET SCHEMA alt_nsp2; -- failed (not owner) +ERROR: must be owner of function alt_agg3 +ALTER AGGREGATE alt_agg2(int) SET SCHEMA alt_nsp2; -- failed (name conflict) +ERROR: function alt_agg2(integer) already exists in schema "alt_nsp2" +RESET SESSION AUTHORIZATION; +SELECT n.nspname, proname, prorettype::regtype, prokind, a.rolname + FROM pg_proc p, pg_namespace n, pg_authid a + WHERE p.pronamespace = n.oid AND p.proowner = a.oid + AND n.nspname IN ('alt_nsp1', 'alt_nsp2') + ORDER BY nspname, proname; + nspname | proname | prorettype | prokind | rolname +----------+-----------+------------+---------+----------------------------- + alt_nsp1 | alt_agg2 | integer | a | regress_alter_generic_user2 + alt_nsp1 | alt_agg3 | integer | a | regress_alter_generic_user1 + alt_nsp1 | alt_agg4 | integer | a | regress_alter_generic_user2 + alt_nsp1 | alt_func2 | integer | f | regress_alter_generic_user2 + alt_nsp1 | alt_func3 | integer | f | regress_alter_generic_user1 + alt_nsp1 | alt_func4 | integer | f | regress_alter_generic_user2 + alt_nsp2 | alt_agg2 | integer | a | regress_alter_generic_user3 + alt_nsp2 | alt_func2 | integer | f | regress_alter_generic_user3 +(8 rows) + +-- +-- We would test collations here, but it's not possible because the error +-- messages tend to be nonportable. +-- +-- +-- Conversion +-- +SET SESSION AUTHORIZATION regress_alter_generic_user1; +CREATE CONVERSION alt_conv1 FOR 'LATIN1' TO 'UTF8' FROM iso8859_1_to_utf8; +CREATE CONVERSION alt_conv2 FOR 'LATIN1' TO 'UTF8' FROM iso8859_1_to_utf8; +ALTER CONVERSION alt_conv1 RENAME TO alt_conv2; -- failed (name conflict) +ERROR: conversion "alt_conv2" already exists in schema "alt_nsp1" +ALTER CONVERSION alt_conv1 RENAME TO alt_conv3; -- OK +ALTER CONVERSION alt_conv2 OWNER TO regress_alter_generic_user2; -- failed (no role membership) +ERROR: must be able to SET ROLE "regress_alter_generic_user2" +ALTER CONVERSION alt_conv2 OWNER TO regress_alter_generic_user3; -- OK +ALTER CONVERSION alt_conv2 SET SCHEMA alt_nsp2; -- OK +SET SESSION AUTHORIZATION regress_alter_generic_user2; +CREATE CONVERSION alt_conv1 FOR 'LATIN1' TO 'UTF8' FROM iso8859_1_to_utf8; +CREATE CONVERSION alt_conv2 FOR 'LATIN1' TO 'UTF8' FROM iso8859_1_to_utf8; +ALTER CONVERSION alt_conv3 RENAME TO alt_conv4; -- failed (not owner) +ERROR: must be owner of conversion alt_conv3 +ALTER CONVERSION alt_conv1 RENAME TO alt_conv4; -- OK +ALTER CONVERSION alt_conv3 OWNER TO regress_alter_generic_user2; -- failed (not owner) +ERROR: must be owner of conversion alt_conv3 +ALTER CONVERSION alt_conv2 OWNER TO regress_alter_generic_user3; -- failed (no role membership) +ERROR: must be able to SET ROLE "regress_alter_generic_user3" +ALTER CONVERSION alt_conv3 SET SCHEMA alt_nsp2; -- failed (not owner) +ERROR: must be owner of conversion alt_conv3 +ALTER CONVERSION alt_conv2 SET SCHEMA alt_nsp2; -- failed (name conflict) +ERROR: conversion "alt_conv2" already exists in schema "alt_nsp2" +RESET SESSION AUTHORIZATION; +SELECT n.nspname, c.conname, a.rolname + FROM pg_conversion c, pg_namespace n, pg_authid a + WHERE c.connamespace = n.oid AND c.conowner = a.oid + AND n.nspname IN ('alt_nsp1', 'alt_nsp2') + ORDER BY nspname, conname; + nspname | conname | rolname +----------+-----------+----------------------------- + alt_nsp1 | alt_conv2 | regress_alter_generic_user2 + alt_nsp1 | alt_conv3 | regress_alter_generic_user1 + alt_nsp1 | alt_conv4 | regress_alter_generic_user2 + alt_nsp2 | alt_conv2 | regress_alter_generic_user3 +(4 rows) + +-- +-- Foreign Data Wrapper and Foreign Server +-- +CREATE FOREIGN DATA WRAPPER alt_fdw1; +CREATE FOREIGN DATA WRAPPER alt_fdw2; +CREATE SERVER alt_fserv1 FOREIGN DATA WRAPPER alt_fdw1; +CREATE SERVER alt_fserv2 FOREIGN DATA WRAPPER alt_fdw2; +ALTER FOREIGN DATA WRAPPER alt_fdw1 RENAME TO alt_fdw2; -- failed (name conflict) +ERROR: foreign-data wrapper "alt_fdw2" already exists +ALTER FOREIGN DATA WRAPPER alt_fdw1 RENAME TO alt_fdw3; -- OK +ALTER SERVER alt_fserv1 RENAME TO alt_fserv2; -- failed (name conflict) +ERROR: server "alt_fserv2" already exists +ALTER SERVER alt_fserv1 RENAME TO alt_fserv3; -- OK +SELECT fdwname FROM pg_foreign_data_wrapper WHERE fdwname like 'alt_fdw%'; + fdwname +---------- + alt_fdw2 + alt_fdw3 +(2 rows) + +SELECT srvname FROM pg_foreign_server WHERE srvname like 'alt_fserv%'; + srvname +------------ + alt_fserv2 + alt_fserv3 +(2 rows) + +-- +-- Procedural Language +-- +CREATE LANGUAGE alt_lang1 HANDLER plpgsql_call_handler; +CREATE LANGUAGE alt_lang2 HANDLER plpgsql_call_handler; +ALTER LANGUAGE alt_lang1 OWNER TO regress_alter_generic_user1; -- OK +ALTER LANGUAGE alt_lang2 OWNER TO regress_alter_generic_user2; -- OK +SET SESSION AUTHORIZATION regress_alter_generic_user1; +ALTER LANGUAGE alt_lang1 RENAME TO alt_lang2; -- failed (name conflict) +ERROR: language "alt_lang2" already exists +ALTER LANGUAGE alt_lang2 RENAME TO alt_lang3; -- failed (not owner) +ERROR: must be owner of language alt_lang2 +ALTER LANGUAGE alt_lang1 RENAME TO alt_lang3; -- OK +ALTER LANGUAGE alt_lang2 OWNER TO regress_alter_generic_user3; -- failed (not owner) +ERROR: must be owner of language alt_lang2 +ALTER LANGUAGE alt_lang3 OWNER TO regress_alter_generic_user2; -- failed (no role membership) +ERROR: must be able to SET ROLE "regress_alter_generic_user2" +ALTER LANGUAGE alt_lang3 OWNER TO regress_alter_generic_user3; -- OK +RESET SESSION AUTHORIZATION; +SELECT lanname, a.rolname + FROM pg_language l, pg_authid a + WHERE l.lanowner = a.oid AND l.lanname like 'alt_lang%' + ORDER BY lanname; + lanname | rolname +-----------+----------------------------- + alt_lang2 | regress_alter_generic_user2 + alt_lang3 | regress_alter_generic_user3 +(2 rows) + +-- +-- Operator +-- +SET SESSION AUTHORIZATION regress_alter_generic_user1; +CREATE OPERATOR @-@ ( leftarg = int4, rightarg = int4, procedure = int4mi ); +CREATE OPERATOR @+@ ( leftarg = int4, rightarg = int4, procedure = int4pl ); +ALTER OPERATOR @+@(int4, int4) OWNER TO regress_alter_generic_user2; -- failed (no role membership) +ERROR: must be able to SET ROLE "regress_alter_generic_user2" +ALTER OPERATOR @+@(int4, int4) OWNER TO regress_alter_generic_user3; -- OK +ALTER OPERATOR @-@(int4, int4) SET SCHEMA alt_nsp2; -- OK +SET SESSION AUTHORIZATION regress_alter_generic_user2; +CREATE OPERATOR @-@ ( leftarg = int4, rightarg = int4, procedure = int4mi ); +ALTER OPERATOR @+@(int4, int4) OWNER TO regress_alter_generic_user2; -- failed (not owner) +ERROR: must be owner of operator @+@ +ALTER OPERATOR @-@(int4, int4) OWNER TO regress_alter_generic_user3; -- failed (no role membership) +ERROR: must be able to SET ROLE "regress_alter_generic_user3" +ALTER OPERATOR @+@(int4, int4) SET SCHEMA alt_nsp2; -- failed (not owner) +ERROR: must be owner of operator @+@ +-- can't test this: the error message includes the raw oid of namespace +-- ALTER OPERATOR @-@(int4, int4) SET SCHEMA alt_nsp2; -- failed (name conflict) +RESET SESSION AUTHORIZATION; +SELECT n.nspname, oprname, a.rolname, + oprleft::regtype, oprright::regtype, oprcode::regproc + FROM pg_operator o, pg_namespace n, pg_authid a + WHERE o.oprnamespace = n.oid AND o.oprowner = a.oid + AND n.nspname IN ('alt_nsp1', 'alt_nsp2') + ORDER BY nspname, oprname; + nspname | oprname | rolname | oprleft | oprright | oprcode +----------+---------+-----------------------------+---------+----------+--------- + alt_nsp1 | @+@ | regress_alter_generic_user3 | integer | integer | int4pl + alt_nsp1 | @-@ | regress_alter_generic_user2 | integer | integer | int4mi + alt_nsp2 | @-@ | regress_alter_generic_user1 | integer | integer | int4mi +(3 rows) + +-- +-- OpFamily and OpClass +-- +CREATE OPERATOR FAMILY alt_opf1 USING hash; +CREATE OPERATOR FAMILY alt_opf2 USING hash; +ALTER OPERATOR FAMILY alt_opf1 USING hash OWNER TO regress_alter_generic_user1; +ALTER OPERATOR FAMILY alt_opf2 USING hash OWNER TO regress_alter_generic_user1; +CREATE OPERATOR CLASS alt_opc1 FOR TYPE uuid USING hash AS STORAGE uuid; +CREATE OPERATOR CLASS alt_opc2 FOR TYPE uuid USING hash AS STORAGE uuid; +ALTER OPERATOR CLASS alt_opc1 USING hash OWNER TO regress_alter_generic_user1; +ALTER OPERATOR CLASS alt_opc2 USING hash OWNER TO regress_alter_generic_user1; +SET SESSION AUTHORIZATION regress_alter_generic_user1; +ALTER OPERATOR FAMILY alt_opf1 USING hash RENAME TO alt_opf2; -- failed (name conflict) +ERROR: operator family "alt_opf2" for access method "hash" already exists in schema "alt_nsp1" +ALTER OPERATOR FAMILY alt_opf1 USING hash RENAME TO alt_opf3; -- OK +ALTER OPERATOR FAMILY alt_opf2 USING hash OWNER TO regress_alter_generic_user2; -- failed (no role membership) +ERROR: must be able to SET ROLE "regress_alter_generic_user2" +ALTER OPERATOR FAMILY alt_opf2 USING hash OWNER TO regress_alter_generic_user3; -- OK +ALTER OPERATOR FAMILY alt_opf2 USING hash SET SCHEMA alt_nsp2; -- OK +ALTER OPERATOR CLASS alt_opc1 USING hash RENAME TO alt_opc2; -- failed (name conflict) +ERROR: operator class "alt_opc2" for access method "hash" already exists in schema "alt_nsp1" +ALTER OPERATOR CLASS alt_opc1 USING hash RENAME TO alt_opc3; -- OK +ALTER OPERATOR CLASS alt_opc2 USING hash OWNER TO regress_alter_generic_user2; -- failed (no role membership) +ERROR: must be able to SET ROLE "regress_alter_generic_user2" +ALTER OPERATOR CLASS alt_opc2 USING hash OWNER TO regress_alter_generic_user3; -- OK +ALTER OPERATOR CLASS alt_opc2 USING hash SET SCHEMA alt_nsp2; -- OK +RESET SESSION AUTHORIZATION; +CREATE OPERATOR FAMILY alt_opf1 USING hash; +CREATE OPERATOR FAMILY alt_opf2 USING hash; +ALTER OPERATOR FAMILY alt_opf1 USING hash OWNER TO regress_alter_generic_user2; +ALTER OPERATOR FAMILY alt_opf2 USING hash OWNER TO regress_alter_generic_user2; +CREATE OPERATOR CLASS alt_opc1 FOR TYPE macaddr USING hash AS STORAGE macaddr; +CREATE OPERATOR CLASS alt_opc2 FOR TYPE macaddr USING hash AS STORAGE macaddr; +ALTER OPERATOR CLASS alt_opc1 USING hash OWNER TO regress_alter_generic_user2; +ALTER OPERATOR CLASS alt_opc2 USING hash OWNER TO regress_alter_generic_user2; +SET SESSION AUTHORIZATION regress_alter_generic_user2; +ALTER OPERATOR FAMILY alt_opf3 USING hash RENAME TO alt_opf4; -- failed (not owner) +ERROR: must be owner of operator family alt_opf3 +ALTER OPERATOR FAMILY alt_opf1 USING hash RENAME TO alt_opf4; -- OK +ALTER OPERATOR FAMILY alt_opf3 USING hash OWNER TO regress_alter_generic_user2; -- failed (not owner) +ERROR: must be owner of operator family alt_opf3 +ALTER OPERATOR FAMILY alt_opf2 USING hash OWNER TO regress_alter_generic_user3; -- failed (no role membership) +ERROR: must be able to SET ROLE "regress_alter_generic_user3" +ALTER OPERATOR FAMILY alt_opf3 USING hash SET SCHEMA alt_nsp2; -- failed (not owner) +ERROR: must be owner of operator family alt_opf3 +ALTER OPERATOR FAMILY alt_opf2 USING hash SET SCHEMA alt_nsp2; -- failed (name conflict) +ERROR: operator family "alt_opf2" for access method "hash" already exists in schema "alt_nsp2" +ALTER OPERATOR CLASS alt_opc3 USING hash RENAME TO alt_opc4; -- failed (not owner) +ERROR: must be owner of operator class alt_opc3 +ALTER OPERATOR CLASS alt_opc1 USING hash RENAME TO alt_opc4; -- OK +ALTER OPERATOR CLASS alt_opc3 USING hash OWNER TO regress_alter_generic_user2; -- failed (not owner) +ERROR: must be owner of operator class alt_opc3 +ALTER OPERATOR CLASS alt_opc2 USING hash OWNER TO regress_alter_generic_user3; -- failed (no role membership) +ERROR: must be able to SET ROLE "regress_alter_generic_user3" +ALTER OPERATOR CLASS alt_opc3 USING hash SET SCHEMA alt_nsp2; -- failed (not owner) +ERROR: must be owner of operator class alt_opc3 +ALTER OPERATOR CLASS alt_opc2 USING hash SET SCHEMA alt_nsp2; -- failed (name conflict) +ERROR: operator class "alt_opc2" for access method "hash" already exists in schema "alt_nsp2" +RESET SESSION AUTHORIZATION; +SELECT nspname, opfname, amname, rolname + FROM pg_opfamily o, pg_am m, pg_namespace n, pg_authid a + WHERE o.opfmethod = m.oid AND o.opfnamespace = n.oid AND o.opfowner = a.oid + AND n.nspname IN ('alt_nsp1', 'alt_nsp2') + AND NOT opfname LIKE 'alt_opc%' + ORDER BY nspname, opfname; + nspname | opfname | amname | rolname +----------+----------+--------+----------------------------- + alt_nsp1 | alt_opf2 | hash | regress_alter_generic_user2 + alt_nsp1 | alt_opf3 | hash | regress_alter_generic_user1 + alt_nsp1 | alt_opf4 | hash | regress_alter_generic_user2 + alt_nsp2 | alt_opf2 | hash | regress_alter_generic_user3 +(4 rows) + +SELECT nspname, opcname, amname, rolname + FROM pg_opclass o, pg_am m, pg_namespace n, pg_authid a + WHERE o.opcmethod = m.oid AND o.opcnamespace = n.oid AND o.opcowner = a.oid + AND n.nspname IN ('alt_nsp1', 'alt_nsp2') + ORDER BY nspname, opcname; + nspname | opcname | amname | rolname +----------+----------+--------+----------------------------- + alt_nsp1 | alt_opc2 | hash | regress_alter_generic_user2 + alt_nsp1 | alt_opc3 | hash | regress_alter_generic_user1 + alt_nsp1 | alt_opc4 | hash | regress_alter_generic_user2 + alt_nsp2 | alt_opc2 | hash | regress_alter_generic_user3 +(4 rows) + +-- ALTER OPERATOR FAMILY ... ADD/DROP +-- Should work. Textbook case of CREATE / ALTER ADD / ALTER DROP / DROP +BEGIN TRANSACTION; +CREATE OPERATOR FAMILY alt_opf4 USING btree; +ALTER OPERATOR FAMILY alt_opf4 USING btree ADD + -- int4 vs int2 + OPERATOR 1 < (int4, int2) , + OPERATOR 2 <= (int4, int2) , + OPERATOR 3 = (int4, int2) , + OPERATOR 4 >= (int4, int2) , + OPERATOR 5 > (int4, int2) , + FUNCTION 1 btint42cmp(int4, int2); +ALTER OPERATOR FAMILY alt_opf4 USING btree DROP + -- int4 vs int2 + OPERATOR 1 (int4, int2) , + OPERATOR 2 (int4, int2) , + OPERATOR 3 (int4, int2) , + OPERATOR 4 (int4, int2) , + OPERATOR 5 (int4, int2) , + FUNCTION 1 (int4, int2) ; +DROP OPERATOR FAMILY alt_opf4 USING btree; +ROLLBACK; +-- Should fail. Invalid values for ALTER OPERATOR FAMILY .. ADD / DROP +CREATE OPERATOR FAMILY alt_opf4 USING btree; +ALTER OPERATOR FAMILY alt_opf4 USING invalid_index_method ADD OPERATOR 1 < (int4, int2); -- invalid indexing_method +ERROR: access method "invalid_index_method" does not exist +ALTER OPERATOR FAMILY alt_opf4 USING btree ADD OPERATOR 6 < (int4, int2); -- operator number should be between 1 and 5 +ERROR: invalid operator number 6, must be between 1 and 5 +ALTER OPERATOR FAMILY alt_opf4 USING btree ADD OPERATOR 0 < (int4, int2); -- operator number should be between 1 and 5 +ERROR: invalid operator number 0, must be between 1 and 5 +ALTER OPERATOR FAMILY alt_opf4 USING btree ADD OPERATOR 1 < ; -- operator without argument types +ERROR: operator argument types must be specified in ALTER OPERATOR FAMILY +ALTER OPERATOR FAMILY alt_opf4 USING btree ADD FUNCTION 0 btint42cmp(int4, int2); -- invalid options parsing function +ERROR: invalid function number 0, must be between 1 and 5 +ALTER OPERATOR FAMILY alt_opf4 USING btree ADD FUNCTION 6 btint42cmp(int4, int2); -- function number should be between 1 and 5 +ERROR: invalid function number 6, must be between 1 and 5 +ALTER OPERATOR FAMILY alt_opf4 USING btree ADD STORAGE invalid_storage; -- Ensure STORAGE is not a part of ALTER OPERATOR FAMILY +ERROR: STORAGE cannot be specified in ALTER OPERATOR FAMILY +DROP OPERATOR FAMILY alt_opf4 USING btree; +-- Should fail. Need to be SUPERUSER to do ALTER OPERATOR FAMILY .. ADD / DROP +BEGIN TRANSACTION; +CREATE ROLE regress_alter_generic_user5 NOSUPERUSER; +CREATE OPERATOR FAMILY alt_opf5 USING btree; +SET ROLE regress_alter_generic_user5; +ALTER OPERATOR FAMILY alt_opf5 USING btree ADD OPERATOR 1 < (int4, int2), FUNCTION 1 btint42cmp(int4, int2); +ERROR: must be superuser to alter an operator family +RESET ROLE; +ERROR: current transaction is aborted, commands ignored until end of transaction block +DROP OPERATOR FAMILY alt_opf5 USING btree; +ERROR: current transaction is aborted, commands ignored until end of transaction block +ROLLBACK; +-- Should fail. Need rights to namespace for ALTER OPERATOR FAMILY .. ADD / DROP +BEGIN TRANSACTION; +CREATE ROLE regress_alter_generic_user6; +CREATE SCHEMA alt_nsp6; +REVOKE ALL ON SCHEMA alt_nsp6 FROM regress_alter_generic_user6; +CREATE OPERATOR FAMILY alt_nsp6.alt_opf6 USING btree; +SET ROLE regress_alter_generic_user6; +ALTER OPERATOR FAMILY alt_nsp6.alt_opf6 USING btree ADD OPERATOR 1 < (int4, int2); +ERROR: permission denied for schema alt_nsp6 +ROLLBACK; +-- Should fail. Only two arguments required for ALTER OPERATOR FAMILY ... DROP OPERATOR +CREATE OPERATOR FAMILY alt_opf7 USING btree; +ALTER OPERATOR FAMILY alt_opf7 USING btree ADD OPERATOR 1 < (int4, int2); +ALTER OPERATOR FAMILY alt_opf7 USING btree DROP OPERATOR 1 (int4, int2, int8); +ERROR: one or two argument types must be specified +DROP OPERATOR FAMILY alt_opf7 USING btree; +-- Should work. During ALTER OPERATOR FAMILY ... DROP OPERATOR +-- when left type is the same as right type, a DROP with only one argument type should work +CREATE OPERATOR FAMILY alt_opf8 USING btree; +ALTER OPERATOR FAMILY alt_opf8 USING btree ADD OPERATOR 1 < (int4, int4); +DROP OPERATOR FAMILY alt_opf8 USING btree; +-- Should work. Textbook case of ALTER OPERATOR FAMILY ... ADD OPERATOR with FOR ORDER BY +CREATE OPERATOR FAMILY alt_opf9 USING gist; +ALTER OPERATOR FAMILY alt_opf9 USING gist ADD OPERATOR 1 < (int4, int4) FOR ORDER BY float_ops; +DROP OPERATOR FAMILY alt_opf9 USING gist; +-- Should fail. Ensure correct ordering methods in ALTER OPERATOR FAMILY ... ADD OPERATOR .. FOR ORDER BY +CREATE OPERATOR FAMILY alt_opf10 USING btree; +ALTER OPERATOR FAMILY alt_opf10 USING btree ADD OPERATOR 1 < (int4, int4) FOR ORDER BY float_ops; +ERROR: access method "btree" does not support ordering operators +DROP OPERATOR FAMILY alt_opf10 USING btree; +-- Should work. Textbook case of ALTER OPERATOR FAMILY ... ADD OPERATOR with FOR ORDER BY +CREATE OPERATOR FAMILY alt_opf11 USING gist; +ALTER OPERATOR FAMILY alt_opf11 USING gist ADD OPERATOR 1 < (int4, int4) FOR ORDER BY float_ops; +ALTER OPERATOR FAMILY alt_opf11 USING gist DROP OPERATOR 1 (int4, int4); +DROP OPERATOR FAMILY alt_opf11 USING gist; +-- Should fail. btree comparison functions should return INTEGER in ALTER OPERATOR FAMILY ... ADD FUNCTION +BEGIN TRANSACTION; +CREATE OPERATOR FAMILY alt_opf12 USING btree; +CREATE FUNCTION fn_opf12 (int4, int2) RETURNS BIGINT AS 'SELECT NULL::BIGINT;' LANGUAGE SQL; +ALTER OPERATOR FAMILY alt_opf12 USING btree ADD FUNCTION 1 fn_opf12(int4, int2); +ERROR: btree comparison functions must return integer +DROP OPERATOR FAMILY alt_opf12 USING btree; +ERROR: current transaction is aborted, commands ignored until end of transaction block +ROLLBACK; +-- Should fail. hash comparison functions should return INTEGER in ALTER OPERATOR FAMILY ... ADD FUNCTION +BEGIN TRANSACTION; +CREATE OPERATOR FAMILY alt_opf13 USING hash; +CREATE FUNCTION fn_opf13 (int4) RETURNS BIGINT AS 'SELECT NULL::BIGINT;' LANGUAGE SQL; +ALTER OPERATOR FAMILY alt_opf13 USING hash ADD FUNCTION 1 fn_opf13(int4); +ERROR: hash function 1 must return integer +DROP OPERATOR FAMILY alt_opf13 USING hash; +ERROR: current transaction is aborted, commands ignored until end of transaction block +ROLLBACK; +-- Should fail. btree comparison functions should have two arguments in ALTER OPERATOR FAMILY ... ADD FUNCTION +BEGIN TRANSACTION; +CREATE OPERATOR FAMILY alt_opf14 USING btree; +CREATE FUNCTION fn_opf14 (int4) RETURNS BIGINT AS 'SELECT NULL::BIGINT;' LANGUAGE SQL; +ALTER OPERATOR FAMILY alt_opf14 USING btree ADD FUNCTION 1 fn_opf14(int4); +ERROR: btree comparison functions must have two arguments +DROP OPERATOR FAMILY alt_opf14 USING btree; +ERROR: current transaction is aborted, commands ignored until end of transaction block +ROLLBACK; +-- Should fail. hash comparison functions should have one argument in ALTER OPERATOR FAMILY ... ADD FUNCTION +BEGIN TRANSACTION; +CREATE OPERATOR FAMILY alt_opf15 USING hash; +CREATE FUNCTION fn_opf15 (int4, int2) RETURNS BIGINT AS 'SELECT NULL::BIGINT;' LANGUAGE SQL; +ALTER OPERATOR FAMILY alt_opf15 USING hash ADD FUNCTION 1 fn_opf15(int4, int2); +ERROR: hash function 1 must have one argument +DROP OPERATOR FAMILY alt_opf15 USING hash; +ERROR: current transaction is aborted, commands ignored until end of transaction block +ROLLBACK; +-- Should fail. In gist throw an error when giving different data types for function argument +-- without defining left / right type in ALTER OPERATOR FAMILY ... ADD FUNCTION +CREATE OPERATOR FAMILY alt_opf16 USING gist; +ALTER OPERATOR FAMILY alt_opf16 USING gist ADD FUNCTION 1 btint42cmp(int4, int2); +ERROR: associated data types must be specified for index support function +DROP OPERATOR FAMILY alt_opf16 USING gist; +-- Should fail. duplicate operator number / function number in ALTER OPERATOR FAMILY ... ADD FUNCTION +CREATE OPERATOR FAMILY alt_opf17 USING btree; +ALTER OPERATOR FAMILY alt_opf17 USING btree ADD OPERATOR 1 < (int4, int4), OPERATOR 1 < (int4, int4); -- operator # appears twice in same statement +ERROR: operator number 1 for (integer,integer) appears more than once +ALTER OPERATOR FAMILY alt_opf17 USING btree ADD OPERATOR 1 < (int4, int4); -- operator 1 requested first-time +ALTER OPERATOR FAMILY alt_opf17 USING btree ADD OPERATOR 1 < (int4, int4); -- operator 1 requested again in separate statement +ERROR: operator 1(integer,integer) already exists in operator family "alt_opf17" +ALTER OPERATOR FAMILY alt_opf17 USING btree ADD + OPERATOR 1 < (int4, int2) , + OPERATOR 2 <= (int4, int2) , + OPERATOR 3 = (int4, int2) , + OPERATOR 4 >= (int4, int2) , + OPERATOR 5 > (int4, int2) , + FUNCTION 1 btint42cmp(int4, int2) , + FUNCTION 1 btint42cmp(int4, int2); -- procedure 1 appears twice in same statement +ERROR: function number 1 for (integer,smallint) appears more than once +ALTER OPERATOR FAMILY alt_opf17 USING btree ADD + OPERATOR 1 < (int4, int2) , + OPERATOR 2 <= (int4, int2) , + OPERATOR 3 = (int4, int2) , + OPERATOR 4 >= (int4, int2) , + OPERATOR 5 > (int4, int2) , + FUNCTION 1 btint42cmp(int4, int2); -- procedure 1 appears first time +ALTER OPERATOR FAMILY alt_opf17 USING btree ADD + OPERATOR 1 < (int4, int2) , + OPERATOR 2 <= (int4, int2) , + OPERATOR 3 = (int4, int2) , + OPERATOR 4 >= (int4, int2) , + OPERATOR 5 > (int4, int2) , + FUNCTION 1 btint42cmp(int4, int2); -- procedure 1 requested again in separate statement +ERROR: operator 1(integer,smallint) already exists in operator family "alt_opf17" +DROP OPERATOR FAMILY alt_opf17 USING btree; +-- Should fail. Ensure that DROP requests for missing OPERATOR / FUNCTIONS +-- return appropriate message in ALTER OPERATOR FAMILY ... DROP OPERATOR / FUNCTION +CREATE OPERATOR FAMILY alt_opf18 USING btree; +ALTER OPERATOR FAMILY alt_opf18 USING btree DROP OPERATOR 1 (int4, int4); +ERROR: operator 1(integer,integer) does not exist in operator family "alt_opf18" +ALTER OPERATOR FAMILY alt_opf18 USING btree ADD + OPERATOR 1 < (int4, int2) , + OPERATOR 2 <= (int4, int2) , + OPERATOR 3 = (int4, int2) , + OPERATOR 4 >= (int4, int2) , + OPERATOR 5 > (int4, int2) , + FUNCTION 1 btint42cmp(int4, int2); +-- Should fail. Not allowed to have cross-type equalimage function. +ALTER OPERATOR FAMILY alt_opf18 USING btree + ADD FUNCTION 4 (int4, int2) btequalimage(oid); +ERROR: btree equal image functions must not be cross-type +ALTER OPERATOR FAMILY alt_opf18 USING btree DROP FUNCTION 2 (int4, int4); +ERROR: function 2(integer,integer) does not exist in operator family "alt_opf18" +DROP OPERATOR FAMILY alt_opf18 USING btree; +-- Should fail. Invalid opclass options function (#5) specifications. +CREATE OPERATOR FAMILY alt_opf19 USING btree; +ALTER OPERATOR FAMILY alt_opf19 USING btree ADD FUNCTION 5 test_opclass_options_func(internal, text[], bool); +ERROR: function test_opclass_options_func(internal, text[], boolean) does not exist +ALTER OPERATOR FAMILY alt_opf19 USING btree ADD FUNCTION 5 (int4) btint42cmp(int4, int2); +ERROR: invalid operator class options parsing function +HINT: Valid signature of operator class options parsing function is (internal) RETURNS void. +ALTER OPERATOR FAMILY alt_opf19 USING btree ADD FUNCTION 5 (int4, int2) btint42cmp(int4, int2); +ERROR: left and right associated data types for operator class options parsing functions must match +ALTER OPERATOR FAMILY alt_opf19 USING btree ADD FUNCTION 5 (int4) test_opclass_options_func(internal); -- Ok +ALTER OPERATOR FAMILY alt_opf19 USING btree DROP FUNCTION 5 (int4, int4); +DROP OPERATOR FAMILY alt_opf19 USING btree; +-- +-- Statistics +-- +SET SESSION AUTHORIZATION regress_alter_generic_user1; +CREATE TABLE alt_regress_1 (a INTEGER, b INTEGER); +CREATE STATISTICS alt_stat1 ON a, b FROM alt_regress_1; +CREATE STATISTICS alt_stat2 ON a, b FROM alt_regress_1; +ALTER STATISTICS alt_stat1 RENAME TO alt_stat2; -- failed (name conflict) +ERROR: statistics object "alt_stat2" already exists in schema "alt_nsp1" +ALTER STATISTICS alt_stat1 RENAME TO alt_stat3; -- OK +ALTER STATISTICS alt_stat2 OWNER TO regress_alter_generic_user2; -- failed (no role membership) +ERROR: must be able to SET ROLE "regress_alter_generic_user2" +ALTER STATISTICS alt_stat2 OWNER TO regress_alter_generic_user3; -- OK +ALTER STATISTICS alt_stat2 SET SCHEMA alt_nsp2; -- OK +SET SESSION AUTHORIZATION regress_alter_generic_user2; +CREATE TABLE alt_regress_2 (a INTEGER, b INTEGER); +CREATE STATISTICS alt_stat1 ON a, b FROM alt_regress_2; +CREATE STATISTICS alt_stat2 ON a, b FROM alt_regress_2; +ALTER STATISTICS alt_stat3 RENAME TO alt_stat4; -- failed (not owner) +ERROR: must be owner of statistics object alt_stat3 +ALTER STATISTICS alt_stat1 RENAME TO alt_stat4; -- OK +ALTER STATISTICS alt_stat3 OWNER TO regress_alter_generic_user2; -- failed (not owner) +ERROR: must be owner of statistics object alt_stat3 +ALTER STATISTICS alt_stat2 OWNER TO regress_alter_generic_user3; -- failed (no role membership) +ERROR: must be able to SET ROLE "regress_alter_generic_user3" +ALTER STATISTICS alt_stat3 SET SCHEMA alt_nsp2; -- failed (not owner) +ERROR: must be owner of statistics object alt_stat3 +ALTER STATISTICS alt_stat2 SET SCHEMA alt_nsp2; -- failed (name conflict) +ERROR: statistics object "alt_stat2" already exists in schema "alt_nsp2" +RESET SESSION AUTHORIZATION; +SELECT nspname, stxname, rolname + FROM pg_statistic_ext s, pg_namespace n, pg_authid a + WHERE s.stxnamespace = n.oid AND s.stxowner = a.oid + AND n.nspname in ('alt_nsp1', 'alt_nsp2') + ORDER BY nspname, stxname; + nspname | stxname | rolname +----------+-----------+----------------------------- + alt_nsp1 | alt_stat2 | regress_alter_generic_user2 + alt_nsp1 | alt_stat3 | regress_alter_generic_user1 + alt_nsp1 | alt_stat4 | regress_alter_generic_user2 + alt_nsp2 | alt_stat2 | regress_alter_generic_user3 +(4 rows) + +-- +-- Text Search Dictionary +-- +SET SESSION AUTHORIZATION regress_alter_generic_user1; +CREATE TEXT SEARCH DICTIONARY alt_ts_dict1 (template=simple); +CREATE TEXT SEARCH DICTIONARY alt_ts_dict2 (template=simple); +ALTER TEXT SEARCH DICTIONARY alt_ts_dict1 RENAME TO alt_ts_dict2; -- failed (name conflict) +ERROR: text search dictionary "alt_ts_dict2" already exists in schema "alt_nsp1" +ALTER TEXT SEARCH DICTIONARY alt_ts_dict1 RENAME TO alt_ts_dict3; -- OK +ALTER TEXT SEARCH DICTIONARY alt_ts_dict2 OWNER TO regress_alter_generic_user2; -- failed (no role membership) +ERROR: must be able to SET ROLE "regress_alter_generic_user2" +ALTER TEXT SEARCH DICTIONARY alt_ts_dict2 OWNER TO regress_alter_generic_user3; -- OK +ALTER TEXT SEARCH DICTIONARY alt_ts_dict2 SET SCHEMA alt_nsp2; -- OK +SET SESSION AUTHORIZATION regress_alter_generic_user2; +CREATE TEXT SEARCH DICTIONARY alt_ts_dict1 (template=simple); +CREATE TEXT SEARCH DICTIONARY alt_ts_dict2 (template=simple); +ALTER TEXT SEARCH DICTIONARY alt_ts_dict3 RENAME TO alt_ts_dict4; -- failed (not owner) +ERROR: must be owner of text search dictionary alt_ts_dict3 +ALTER TEXT SEARCH DICTIONARY alt_ts_dict1 RENAME TO alt_ts_dict4; -- OK +ALTER TEXT SEARCH DICTIONARY alt_ts_dict3 OWNER TO regress_alter_generic_user2; -- failed (not owner) +ERROR: must be owner of text search dictionary alt_ts_dict3 +ALTER TEXT SEARCH DICTIONARY alt_ts_dict2 OWNER TO regress_alter_generic_user3; -- failed (no role membership) +ERROR: must be able to SET ROLE "regress_alter_generic_user3" +ALTER TEXT SEARCH DICTIONARY alt_ts_dict3 SET SCHEMA alt_nsp2; -- failed (not owner) +ERROR: must be owner of text search dictionary alt_ts_dict3 +ALTER TEXT SEARCH DICTIONARY alt_ts_dict2 SET SCHEMA alt_nsp2; -- failed (name conflict) +ERROR: text search dictionary "alt_ts_dict2" already exists in schema "alt_nsp2" +RESET SESSION AUTHORIZATION; +SELECT nspname, dictname, rolname + FROM pg_ts_dict t, pg_namespace n, pg_authid a + WHERE t.dictnamespace = n.oid AND t.dictowner = a.oid + AND n.nspname in ('alt_nsp1', 'alt_nsp2') + ORDER BY nspname, dictname; + nspname | dictname | rolname +----------+--------------+----------------------------- + alt_nsp1 | alt_ts_dict2 | regress_alter_generic_user2 + alt_nsp1 | alt_ts_dict3 | regress_alter_generic_user1 + alt_nsp1 | alt_ts_dict4 | regress_alter_generic_user2 + alt_nsp2 | alt_ts_dict2 | regress_alter_generic_user3 +(4 rows) + +-- +-- Text Search Configuration +-- +SET SESSION AUTHORIZATION regress_alter_generic_user1; +CREATE TEXT SEARCH CONFIGURATION alt_ts_conf1 (copy=english); +CREATE TEXT SEARCH CONFIGURATION alt_ts_conf2 (copy=english); +ALTER TEXT SEARCH CONFIGURATION alt_ts_conf1 RENAME TO alt_ts_conf2; -- failed (name conflict) +ERROR: text search configuration "alt_ts_conf2" already exists in schema "alt_nsp1" +ALTER TEXT SEARCH CONFIGURATION alt_ts_conf1 RENAME TO alt_ts_conf3; -- OK +ALTER TEXT SEARCH CONFIGURATION alt_ts_conf2 OWNER TO regress_alter_generic_user2; -- failed (no role membership) +ERROR: must be able to SET ROLE "regress_alter_generic_user2" +ALTER TEXT SEARCH CONFIGURATION alt_ts_conf2 OWNER TO regress_alter_generic_user3; -- OK +ALTER TEXT SEARCH CONFIGURATION alt_ts_conf2 SET SCHEMA alt_nsp2; -- OK +SET SESSION AUTHORIZATION regress_alter_generic_user2; +CREATE TEXT SEARCH CONFIGURATION alt_ts_conf1 (copy=english); +CREATE TEXT SEARCH CONFIGURATION alt_ts_conf2 (copy=english); +ALTER TEXT SEARCH CONFIGURATION alt_ts_conf3 RENAME TO alt_ts_conf4; -- failed (not owner) +ERROR: must be owner of text search configuration alt_ts_conf3 +ALTER TEXT SEARCH CONFIGURATION alt_ts_conf1 RENAME TO alt_ts_conf4; -- OK +ALTER TEXT SEARCH CONFIGURATION alt_ts_conf3 OWNER TO regress_alter_generic_user2; -- failed (not owner) +ERROR: must be owner of text search configuration alt_ts_conf3 +ALTER TEXT SEARCH CONFIGURATION alt_ts_conf2 OWNER TO regress_alter_generic_user3; -- failed (no role membership) +ERROR: must be able to SET ROLE "regress_alter_generic_user3" +ALTER TEXT SEARCH CONFIGURATION alt_ts_conf3 SET SCHEMA alt_nsp2; -- failed (not owner) +ERROR: must be owner of text search configuration alt_ts_conf3 +ALTER TEXT SEARCH CONFIGURATION alt_ts_conf2 SET SCHEMA alt_nsp2; -- failed (name conflict) +ERROR: text search configuration "alt_ts_conf2" already exists in schema "alt_nsp2" +RESET SESSION AUTHORIZATION; +SELECT nspname, cfgname, rolname + FROM pg_ts_config t, pg_namespace n, pg_authid a + WHERE t.cfgnamespace = n.oid AND t.cfgowner = a.oid + AND n.nspname in ('alt_nsp1', 'alt_nsp2') + ORDER BY nspname, cfgname; + nspname | cfgname | rolname +----------+--------------+----------------------------- + alt_nsp1 | alt_ts_conf2 | regress_alter_generic_user2 + alt_nsp1 | alt_ts_conf3 | regress_alter_generic_user1 + alt_nsp1 | alt_ts_conf4 | regress_alter_generic_user2 + alt_nsp2 | alt_ts_conf2 | regress_alter_generic_user3 +(4 rows) + +-- +-- Text Search Template +-- +CREATE TEXT SEARCH TEMPLATE alt_ts_temp1 (lexize=dsimple_lexize); +CREATE TEXT SEARCH TEMPLATE alt_ts_temp2 (lexize=dsimple_lexize); +ALTER TEXT SEARCH TEMPLATE alt_ts_temp1 RENAME TO alt_ts_temp2; -- failed (name conflict) +ERROR: text search template "alt_ts_temp2" already exists in schema "alt_nsp1" +ALTER TEXT SEARCH TEMPLATE alt_ts_temp1 RENAME TO alt_ts_temp3; -- OK +ALTER TEXT SEARCH TEMPLATE alt_ts_temp2 SET SCHEMA alt_nsp2; -- OK +CREATE TEXT SEARCH TEMPLATE alt_ts_temp2 (lexize=dsimple_lexize); +ALTER TEXT SEARCH TEMPLATE alt_ts_temp2 SET SCHEMA alt_nsp2; -- failed (name conflict) +ERROR: text search template "alt_ts_temp2" already exists in schema "alt_nsp2" +-- invalid: non-lowercase quoted identifiers +CREATE TEXT SEARCH TEMPLATE tstemp_case ("Init" = init_function); +ERROR: text search template parameter "Init" not recognized +SELECT nspname, tmplname + FROM pg_ts_template t, pg_namespace n + WHERE t.tmplnamespace = n.oid AND nspname like 'alt_nsp%' + ORDER BY nspname, tmplname; + nspname | tmplname +----------+-------------- + alt_nsp1 | alt_ts_temp2 + alt_nsp1 | alt_ts_temp3 + alt_nsp2 | alt_ts_temp2 +(3 rows) + +-- +-- Text Search Parser +-- +CREATE TEXT SEARCH PARSER alt_ts_prs1 + (start = prsd_start, gettoken = prsd_nexttoken, end = prsd_end, lextypes = prsd_lextype); +CREATE TEXT SEARCH PARSER alt_ts_prs2 + (start = prsd_start, gettoken = prsd_nexttoken, end = prsd_end, lextypes = prsd_lextype); +ALTER TEXT SEARCH PARSER alt_ts_prs1 RENAME TO alt_ts_prs2; -- failed (name conflict) +ERROR: text search parser "alt_ts_prs2" already exists in schema "alt_nsp1" +ALTER TEXT SEARCH PARSER alt_ts_prs1 RENAME TO alt_ts_prs3; -- OK +ALTER TEXT SEARCH PARSER alt_ts_prs2 SET SCHEMA alt_nsp2; -- OK +CREATE TEXT SEARCH PARSER alt_ts_prs2 + (start = prsd_start, gettoken = prsd_nexttoken, end = prsd_end, lextypes = prsd_lextype); +ALTER TEXT SEARCH PARSER alt_ts_prs2 SET SCHEMA alt_nsp2; -- failed (name conflict) +ERROR: text search parser "alt_ts_prs2" already exists in schema "alt_nsp2" +-- invalid: non-lowercase quoted identifiers +CREATE TEXT SEARCH PARSER tspars_case ("Start" = start_function); +ERROR: text search parser parameter "Start" not recognized +SELECT nspname, prsname + FROM pg_ts_parser t, pg_namespace n + WHERE t.prsnamespace = n.oid AND nspname like 'alt_nsp%' + ORDER BY nspname, prsname; + nspname | prsname +----------+------------- + alt_nsp1 | alt_ts_prs2 + alt_nsp1 | alt_ts_prs3 + alt_nsp2 | alt_ts_prs2 +(3 rows) + +--- +--- Cleanup resources +--- +DROP FOREIGN DATA WRAPPER alt_fdw2 CASCADE; +NOTICE: drop cascades to server alt_fserv2 +DROP FOREIGN DATA WRAPPER alt_fdw3 CASCADE; +NOTICE: drop cascades to server alt_fserv3 +DROP LANGUAGE alt_lang2 CASCADE; +DROP LANGUAGE alt_lang3 CASCADE; +DROP SCHEMA alt_nsp1 CASCADE; +NOTICE: drop cascades to 28 other objects +DETAIL: drop cascades to function alt_func3(integer) +drop cascades to function alt_agg3(integer) +drop cascades to function alt_func4(integer) +drop cascades to function alt_func2(integer) +drop cascades to function alt_agg4(integer) +drop cascades to function alt_agg2(integer) +drop cascades to conversion alt_conv3 +drop cascades to conversion alt_conv4 +drop cascades to conversion alt_conv2 +drop cascades to operator @+@(integer,integer) +drop cascades to operator @-@(integer,integer) +drop cascades to operator family alt_opf3 for access method hash +drop cascades to operator family alt_opc1 for access method hash +drop cascades to operator family alt_opc2 for access method hash +drop cascades to operator family alt_opf4 for access method hash +drop cascades to operator family alt_opf2 for access method hash +drop cascades to table alt_regress_1 +drop cascades to table alt_regress_2 +drop cascades to text search dictionary alt_ts_dict3 +drop cascades to text search dictionary alt_ts_dict4 +drop cascades to text search dictionary alt_ts_dict2 +drop cascades to text search configuration alt_ts_conf3 +drop cascades to text search configuration alt_ts_conf4 +drop cascades to text search configuration alt_ts_conf2 +drop cascades to text search template alt_ts_temp3 +drop cascades to text search template alt_ts_temp2 +drop cascades to text search parser alt_ts_prs3 +drop cascades to text search parser alt_ts_prs2 +DROP SCHEMA alt_nsp2 CASCADE; +NOTICE: drop cascades to 9 other objects +DETAIL: drop cascades to function alt_nsp2.alt_func2(integer) +drop cascades to function alt_nsp2.alt_agg2(integer) +drop cascades to conversion alt_nsp2.alt_conv2 +drop cascades to operator alt_nsp2.@-@(integer,integer) +drop cascades to operator family alt_nsp2.alt_opf2 for access method hash +drop cascades to text search dictionary alt_nsp2.alt_ts_dict2 +drop cascades to text search configuration alt_nsp2.alt_ts_conf2 +drop cascades to text search template alt_nsp2.alt_ts_temp2 +drop cascades to text search parser alt_nsp2.alt_ts_prs2 +DROP USER regress_alter_generic_user1; +DROP USER regress_alter_generic_user2; +DROP USER regress_alter_generic_user3; diff --git a/src/test/regress/expected/alter_operator.out b/src/test/regress/expected/alter_operator.out new file mode 100644 index 0000000..71bd484 --- /dev/null +++ b/src/test/regress/expected/alter_operator.out @@ -0,0 +1,139 @@ +CREATE FUNCTION alter_op_test_fn(boolean, boolean) +RETURNS boolean AS $$ SELECT NULL::BOOLEAN; $$ LANGUAGE sql IMMUTABLE; +CREATE FUNCTION customcontsel(internal, oid, internal, integer) +RETURNS float8 AS 'contsel' LANGUAGE internal STABLE STRICT; +CREATE OPERATOR === ( + LEFTARG = boolean, + RIGHTARG = boolean, + PROCEDURE = alter_op_test_fn, + COMMUTATOR = ===, + NEGATOR = !==, + RESTRICT = customcontsel, + JOIN = contjoinsel, + HASHES, MERGES +); +SELECT pg_describe_object(refclassid,refobjid,refobjsubid) as ref, deptype +FROM pg_depend +WHERE classid = 'pg_operator'::regclass AND + objid = '===(bool,bool)'::regoperator +ORDER BY 1; + ref | deptype +-------------------------------------------------------+--------- + function alter_op_test_fn(boolean,boolean) | n + function customcontsel(internal,oid,internal,integer) | n + schema public | n +(3 rows) + +-- +-- Reset and set params +-- +ALTER OPERATOR === (boolean, boolean) SET (RESTRICT = NONE); +ALTER OPERATOR === (boolean, boolean) SET (JOIN = NONE); +SELECT oprrest, oprjoin FROM pg_operator WHERE oprname = '===' + AND oprleft = 'boolean'::regtype AND oprright = 'boolean'::regtype; + oprrest | oprjoin +---------+--------- + - | - +(1 row) + +SELECT pg_describe_object(refclassid,refobjid,refobjsubid) as ref, deptype +FROM pg_depend +WHERE classid = 'pg_operator'::regclass AND + objid = '===(bool,bool)'::regoperator +ORDER BY 1; + ref | deptype +--------------------------------------------+--------- + function alter_op_test_fn(boolean,boolean) | n + schema public | n +(2 rows) + +ALTER OPERATOR === (boolean, boolean) SET (RESTRICT = contsel); +ALTER OPERATOR === (boolean, boolean) SET (JOIN = contjoinsel); +SELECT oprrest, oprjoin FROM pg_operator WHERE oprname = '===' + AND oprleft = 'boolean'::regtype AND oprright = 'boolean'::regtype; + oprrest | oprjoin +---------+------------- + contsel | contjoinsel +(1 row) + +SELECT pg_describe_object(refclassid,refobjid,refobjsubid) as ref, deptype +FROM pg_depend +WHERE classid = 'pg_operator'::regclass AND + objid = '===(bool,bool)'::regoperator +ORDER BY 1; + ref | deptype +--------------------------------------------+--------- + function alter_op_test_fn(boolean,boolean) | n + schema public | n +(2 rows) + +ALTER OPERATOR === (boolean, boolean) SET (RESTRICT = NONE, JOIN = NONE); +SELECT oprrest, oprjoin FROM pg_operator WHERE oprname = '===' + AND oprleft = 'boolean'::regtype AND oprright = 'boolean'::regtype; + oprrest | oprjoin +---------+--------- + - | - +(1 row) + +SELECT pg_describe_object(refclassid,refobjid,refobjsubid) as ref, deptype +FROM pg_depend +WHERE classid = 'pg_operator'::regclass AND + objid = '===(bool,bool)'::regoperator +ORDER BY 1; + ref | deptype +--------------------------------------------+--------- + function alter_op_test_fn(boolean,boolean) | n + schema public | n +(2 rows) + +ALTER OPERATOR === (boolean, boolean) SET (RESTRICT = customcontsel, JOIN = contjoinsel); +SELECT oprrest, oprjoin FROM pg_operator WHERE oprname = '===' + AND oprleft = 'boolean'::regtype AND oprright = 'boolean'::regtype; + oprrest | oprjoin +---------------+------------- + customcontsel | contjoinsel +(1 row) + +SELECT pg_describe_object(refclassid,refobjid,refobjsubid) as ref, deptype +FROM pg_depend +WHERE classid = 'pg_operator'::regclass AND + objid = '===(bool,bool)'::regoperator +ORDER BY 1; + ref | deptype +-------------------------------------------------------+--------- + function alter_op_test_fn(boolean,boolean) | n + function customcontsel(internal,oid,internal,integer) | n + schema public | n +(3 rows) + +-- +-- Test invalid options. +-- +ALTER OPERATOR === (boolean, boolean) SET (COMMUTATOR = ====); +ERROR: operator attribute "commutator" cannot be changed +ALTER OPERATOR === (boolean, boolean) SET (NEGATOR = ====); +ERROR: operator attribute "negator" cannot be changed +ALTER OPERATOR === (boolean, boolean) SET (RESTRICT = non_existent_func); +ERROR: function non_existent_func(internal, oid, internal, integer) does not exist +ALTER OPERATOR === (boolean, boolean) SET (JOIN = non_existent_func); +ERROR: function non_existent_func(internal, oid, internal, smallint, internal) does not exist +ALTER OPERATOR === (boolean, boolean) SET (COMMUTATOR = !==); +ERROR: operator attribute "commutator" cannot be changed +ALTER OPERATOR === (boolean, boolean) SET (NEGATOR = !==); +ERROR: operator attribute "negator" cannot be changed +-- invalid: non-lowercase quoted identifiers +ALTER OPERATOR & (bit, bit) SET ("Restrict" = _int_contsel, "Join" = _int_contjoinsel); +ERROR: operator attribute "Restrict" not recognized +-- +-- Test permission check. Must be owner to ALTER OPERATOR. +-- +CREATE USER regress_alter_op_user; +SET SESSION AUTHORIZATION regress_alter_op_user; +ALTER OPERATOR === (boolean, boolean) SET (RESTRICT = NONE); +ERROR: must be owner of operator === +-- Clean up +RESET SESSION AUTHORIZATION; +DROP USER regress_alter_op_user; +DROP OPERATOR === (boolean, boolean); +DROP FUNCTION customcontsel(internal, oid, internal, integer); +DROP FUNCTION alter_op_test_fn(boolean, boolean); diff --git a/src/test/regress/expected/alter_table.out b/src/test/regress/expected/alter_table.out new file mode 100644 index 0000000..05351cb --- /dev/null +++ b/src/test/regress/expected/alter_table.out @@ -0,0 +1,4665 @@ +-- +-- ALTER_TABLE +-- +-- Clean up in case a prior regression run failed +SET client_min_messages TO 'warning'; +DROP ROLE IF EXISTS regress_alter_table_user1; +RESET client_min_messages; +CREATE USER regress_alter_table_user1; +-- +-- add attribute +-- +CREATE TABLE attmp (initial int4); +COMMENT ON TABLE attmp_wrong IS 'table comment'; +ERROR: relation "attmp_wrong" does not exist +COMMENT ON TABLE attmp IS 'table comment'; +COMMENT ON TABLE attmp IS NULL; +ALTER TABLE attmp ADD COLUMN xmin integer; -- fails +ERROR: column name "xmin" conflicts with a system column name +ALTER TABLE attmp ADD COLUMN a int4 default 3; +ALTER TABLE attmp ADD COLUMN b name; +ALTER TABLE attmp ADD COLUMN c text; +ALTER TABLE attmp ADD COLUMN d float8; +ALTER TABLE attmp ADD COLUMN e float4; +ALTER TABLE attmp ADD COLUMN f int2; +ALTER TABLE attmp ADD COLUMN g polygon; +ALTER TABLE attmp ADD COLUMN i char; +ALTER TABLE attmp ADD COLUMN k int4; +ALTER TABLE attmp ADD COLUMN l tid; +ALTER TABLE attmp ADD COLUMN m xid; +ALTER TABLE attmp ADD COLUMN n oidvector; +--ALTER TABLE attmp ADD COLUMN o lock; +ALTER TABLE attmp ADD COLUMN p boolean; +ALTER TABLE attmp ADD COLUMN q point; +ALTER TABLE attmp ADD COLUMN r lseg; +ALTER TABLE attmp ADD COLUMN s path; +ALTER TABLE attmp ADD COLUMN t box; +ALTER TABLE attmp ADD COLUMN v timestamp; +ALTER TABLE attmp ADD COLUMN w interval; +ALTER TABLE attmp ADD COLUMN x float8[]; +ALTER TABLE attmp ADD COLUMN y float4[]; +ALTER TABLE attmp ADD COLUMN z int2[]; +INSERT INTO attmp (a, b, c, d, e, f, g, i, k, l, m, n, p, q, r, s, t, + v, w, x, y, z) + VALUES (4, 'name', 'text', 4.1, 4.1, 2, '(4.1,4.1,3.1,3.1)', + 'c', + 314159, '(1,1)', '512', + '1 2 3 4 5 6 7 8', true, '(1.1,1.1)', '(4.1,4.1,3.1,3.1)', + '(0,2,4.1,4.1,3.1,3.1)', '(4.1,4.1,3.1,3.1)', + 'epoch', '01:00:10', '{1.0,2.0,3.0,4.0}', '{1.0,2.0,3.0,4.0}', '{1,2,3,4}'); +SELECT * FROM attmp; + initial | a | b | c | d | e | f | g | i | k | l | m | n | p | q | r | s | t | v | w | x | y | z +---------+---+------+------+-----+-----+---+-----------------------+---+--------+-------+-----+-----------------+---+-----------+-----------------------+-----------------------------+---------------------+--------------------------+------------------+-----------+-----------+----------- + | 4 | name | text | 4.1 | 4.1 | 2 | ((4.1,4.1),(3.1,3.1)) | c | 314159 | (1,1) | 512 | 1 2 3 4 5 6 7 8 | t | (1.1,1.1) | [(4.1,4.1),(3.1,3.1)] | ((0,2),(4.1,4.1),(3.1,3.1)) | (4.1,4.1),(3.1,3.1) | Thu Jan 01 00:00:00 1970 | @ 1 hour 10 secs | {1,2,3,4} | {1,2,3,4} | {1,2,3,4} +(1 row) + +DROP TABLE attmp; +-- the wolf bug - schema mods caused inconsistent row descriptors +CREATE TABLE attmp ( + initial int4 +); +ALTER TABLE attmp ADD COLUMN a int4; +ALTER TABLE attmp ADD COLUMN b name; +ALTER TABLE attmp ADD COLUMN c text; +ALTER TABLE attmp ADD COLUMN d float8; +ALTER TABLE attmp ADD COLUMN e float4; +ALTER TABLE attmp ADD COLUMN f int2; +ALTER TABLE attmp ADD COLUMN g polygon; +ALTER TABLE attmp ADD COLUMN i char; +ALTER TABLE attmp ADD COLUMN k int4; +ALTER TABLE attmp ADD COLUMN l tid; +ALTER TABLE attmp ADD COLUMN m xid; +ALTER TABLE attmp ADD COLUMN n oidvector; +--ALTER TABLE attmp ADD COLUMN o lock; +ALTER TABLE attmp ADD COLUMN p boolean; +ALTER TABLE attmp ADD COLUMN q point; +ALTER TABLE attmp ADD COLUMN r lseg; +ALTER TABLE attmp ADD COLUMN s path; +ALTER TABLE attmp ADD COLUMN t box; +ALTER TABLE attmp ADD COLUMN v timestamp; +ALTER TABLE attmp ADD COLUMN w interval; +ALTER TABLE attmp ADD COLUMN x float8[]; +ALTER TABLE attmp ADD COLUMN y float4[]; +ALTER TABLE attmp ADD COLUMN z int2[]; +INSERT INTO attmp (a, b, c, d, e, f, g, i, k, l, m, n, p, q, r, s, t, + v, w, x, y, z) + VALUES (4, 'name', 'text', 4.1, 4.1, 2, '(4.1,4.1,3.1,3.1)', + 'c', + 314159, '(1,1)', '512', + '1 2 3 4 5 6 7 8', true, '(1.1,1.1)', '(4.1,4.1,3.1,3.1)', + '(0,2,4.1,4.1,3.1,3.1)', '(4.1,4.1,3.1,3.1)', + 'epoch', '01:00:10', '{1.0,2.0,3.0,4.0}', '{1.0,2.0,3.0,4.0}', '{1,2,3,4}'); +SELECT * FROM attmp; + initial | a | b | c | d | e | f | g | i | k | l | m | n | p | q | r | s | t | v | w | x | y | z +---------+---+------+------+-----+-----+---+-----------------------+---+--------+-------+-----+-----------------+---+-----------+-----------------------+-----------------------------+---------------------+--------------------------+------------------+-----------+-----------+----------- + | 4 | name | text | 4.1 | 4.1 | 2 | ((4.1,4.1),(3.1,3.1)) | c | 314159 | (1,1) | 512 | 1 2 3 4 5 6 7 8 | t | (1.1,1.1) | [(4.1,4.1),(3.1,3.1)] | ((0,2),(4.1,4.1),(3.1,3.1)) | (4.1,4.1),(3.1,3.1) | Thu Jan 01 00:00:00 1970 | @ 1 hour 10 secs | {1,2,3,4} | {1,2,3,4} | {1,2,3,4} +(1 row) + +CREATE INDEX attmp_idx ON attmp (a, (d + e), b); +ALTER INDEX attmp_idx ALTER COLUMN 0 SET STATISTICS 1000; +ERROR: column number must be in range from 1 to 32767 +LINE 1: ALTER INDEX attmp_idx ALTER COLUMN 0 SET STATISTICS 1000; + ^ +ALTER INDEX attmp_idx ALTER COLUMN 1 SET STATISTICS 1000; +ERROR: cannot alter statistics on non-expression column "a" of index "attmp_idx" +HINT: Alter statistics on table column instead. +ALTER INDEX attmp_idx ALTER COLUMN 2 SET STATISTICS 1000; +\d+ attmp_idx + Index "public.attmp_idx" + Column | Type | Key? | Definition | Storage | Stats target +--------+------------------+------+------------+---------+-------------- + a | integer | yes | a | plain | + expr | double precision | yes | (d + e) | plain | 1000 + b | cstring | yes | b | plain | +btree, for table "public.attmp" + +ALTER INDEX attmp_idx ALTER COLUMN 3 SET STATISTICS 1000; +ERROR: cannot alter statistics on non-expression column "b" of index "attmp_idx" +HINT: Alter statistics on table column instead. +ALTER INDEX attmp_idx ALTER COLUMN 4 SET STATISTICS 1000; +ERROR: column number 4 of relation "attmp_idx" does not exist +ALTER INDEX attmp_idx ALTER COLUMN 2 SET STATISTICS -1; +DROP TABLE attmp; +-- +-- rename - check on both non-temp and temp tables +-- +CREATE TABLE attmp (regtable int); +CREATE TEMP TABLE attmp (attmptable int); +ALTER TABLE attmp RENAME TO attmp_new; +SELECT * FROM attmp; + regtable +---------- +(0 rows) + +SELECT * FROM attmp_new; + attmptable +------------ +(0 rows) + +ALTER TABLE attmp RENAME TO attmp_new2; +SELECT * FROM attmp; -- should fail +ERROR: relation "attmp" does not exist +LINE 1: SELECT * FROM attmp; + ^ +SELECT * FROM attmp_new; + attmptable +------------ +(0 rows) + +SELECT * FROM attmp_new2; + regtable +---------- +(0 rows) + +DROP TABLE attmp_new; +DROP TABLE attmp_new2; +-- check rename of partitioned tables and indexes also +CREATE TABLE part_attmp (a int primary key) partition by range (a); +CREATE TABLE part_attmp1 PARTITION OF part_attmp FOR VALUES FROM (0) TO (100); +ALTER INDEX part_attmp_pkey RENAME TO part_attmp_index; +ALTER INDEX part_attmp1_pkey RENAME TO part_attmp1_index; +ALTER TABLE part_attmp RENAME TO part_at2tmp; +ALTER TABLE part_attmp1 RENAME TO part_at2tmp1; +SET ROLE regress_alter_table_user1; +ALTER INDEX part_attmp_index RENAME TO fail; +ERROR: must be owner of index part_attmp_index +ALTER INDEX part_attmp1_index RENAME TO fail; +ERROR: must be owner of index part_attmp1_index +ALTER TABLE part_at2tmp RENAME TO fail; +ERROR: must be owner of table part_at2tmp +ALTER TABLE part_at2tmp1 RENAME TO fail; +ERROR: must be owner of table part_at2tmp1 +RESET ROLE; +DROP TABLE part_at2tmp; +-- +-- check renaming to a table's array type's autogenerated name +-- (the array type's name should get out of the way) +-- +CREATE TABLE attmp_array (id int); +CREATE TABLE attmp_array2 (id int); +SELECT typname FROM pg_type WHERE oid = 'attmp_array[]'::regtype; + typname +-------------- + _attmp_array +(1 row) + +SELECT typname FROM pg_type WHERE oid = 'attmp_array2[]'::regtype; + typname +--------------- + _attmp_array2 +(1 row) + +ALTER TABLE attmp_array2 RENAME TO _attmp_array; +SELECT typname FROM pg_type WHERE oid = 'attmp_array[]'::regtype; + typname +--------------- + __attmp_array +(1 row) + +SELECT typname FROM pg_type WHERE oid = '_attmp_array[]'::regtype; + typname +----------------- + __attmp_array_1 +(1 row) + +DROP TABLE _attmp_array; +DROP TABLE attmp_array; +-- renaming to table's own array type's name is an interesting corner case +CREATE TABLE attmp_array (id int); +SELECT typname FROM pg_type WHERE oid = 'attmp_array[]'::regtype; + typname +-------------- + _attmp_array +(1 row) + +ALTER TABLE attmp_array RENAME TO _attmp_array; +SELECT typname FROM pg_type WHERE oid = '_attmp_array[]'::regtype; + typname +--------------- + __attmp_array +(1 row) + +DROP TABLE _attmp_array; +-- ALTER TABLE ... RENAME on non-table relations +-- renaming indexes (FIXME: this should probably test the index's functionality) +ALTER INDEX IF EXISTS __onek_unique1 RENAME TO attmp_onek_unique1; +NOTICE: relation "__onek_unique1" does not exist, skipping +ALTER INDEX IF EXISTS __attmp_onek_unique1 RENAME TO onek_unique1; +NOTICE: relation "__attmp_onek_unique1" does not exist, skipping +ALTER INDEX onek_unique1 RENAME TO attmp_onek_unique1; +ALTER INDEX attmp_onek_unique1 RENAME TO onek_unique1; +SET ROLE regress_alter_table_user1; +ALTER INDEX onek_unique1 RENAME TO fail; -- permission denied +ERROR: must be owner of index onek_unique1 +RESET ROLE; +-- rename statements with mismatching statement and object types +CREATE TABLE alter_idx_rename_test (a INT); +CREATE INDEX alter_idx_rename_test_idx ON alter_idx_rename_test (a); +CREATE TABLE alter_idx_rename_test_parted (a INT) PARTITION BY LIST (a); +CREATE INDEX alter_idx_rename_test_parted_idx ON alter_idx_rename_test_parted (a); +BEGIN; +ALTER INDEX alter_idx_rename_test RENAME TO alter_idx_rename_test_2; +ALTER INDEX alter_idx_rename_test_parted RENAME TO alter_idx_rename_test_parted_2; +SELECT relation::regclass, mode FROM pg_locks +WHERE pid = pg_backend_pid() AND locktype = 'relation' + AND relation::regclass::text LIKE 'alter\_idx%' +ORDER BY relation::regclass::text COLLATE "C"; + relation | mode +--------------------------------+--------------------- + alter_idx_rename_test_2 | AccessExclusiveLock + alter_idx_rename_test_parted_2 | AccessExclusiveLock +(2 rows) + +COMMIT; +BEGIN; +ALTER INDEX alter_idx_rename_test_idx RENAME TO alter_idx_rename_test_idx_2; +ALTER INDEX alter_idx_rename_test_parted_idx RENAME TO alter_idx_rename_test_parted_idx_2; +SELECT relation::regclass, mode FROM pg_locks +WHERE pid = pg_backend_pid() AND locktype = 'relation' + AND relation::regclass::text LIKE 'alter\_idx%' +ORDER BY relation::regclass::text COLLATE "C"; + relation | mode +------------------------------------+-------------------------- + alter_idx_rename_test_idx_2 | ShareUpdateExclusiveLock + alter_idx_rename_test_parted_idx_2 | ShareUpdateExclusiveLock +(2 rows) + +COMMIT; +BEGIN; +ALTER TABLE alter_idx_rename_test_idx_2 RENAME TO alter_idx_rename_test_idx_3; +ALTER TABLE alter_idx_rename_test_parted_idx_2 RENAME TO alter_idx_rename_test_parted_idx_3; +SELECT relation::regclass, mode FROM pg_locks +WHERE pid = pg_backend_pid() AND locktype = 'relation' + AND relation::regclass::text LIKE 'alter\_idx%' +ORDER BY relation::regclass::text COLLATE "C"; + relation | mode +------------------------------------+--------------------- + alter_idx_rename_test_idx_3 | AccessExclusiveLock + alter_idx_rename_test_parted_idx_3 | AccessExclusiveLock +(2 rows) + +COMMIT; +DROP TABLE alter_idx_rename_test_2; +-- renaming views +CREATE VIEW attmp_view (unique1) AS SELECT unique1 FROM tenk1; +ALTER TABLE attmp_view RENAME TO attmp_view_new; +SET ROLE regress_alter_table_user1; +ALTER VIEW attmp_view_new RENAME TO fail; -- permission denied +ERROR: must be owner of view attmp_view_new +RESET ROLE; +-- hack to ensure we get an indexscan here +set enable_seqscan to off; +set enable_bitmapscan to off; +-- 5 values, sorted +SELECT unique1 FROM tenk1 WHERE unique1 < 5; + unique1 +--------- + 0 + 1 + 2 + 3 + 4 +(5 rows) + +reset enable_seqscan; +reset enable_bitmapscan; +DROP VIEW attmp_view_new; +-- toast-like relation name +alter table stud_emp rename to pg_toast_stud_emp; +alter table pg_toast_stud_emp rename to stud_emp; +-- renaming index should rename constraint as well +ALTER TABLE onek ADD CONSTRAINT onek_unique1_constraint UNIQUE (unique1); +ALTER INDEX onek_unique1_constraint RENAME TO onek_unique1_constraint_foo; +ALTER TABLE onek DROP CONSTRAINT onek_unique1_constraint_foo; +-- renaming constraint +ALTER TABLE onek ADD CONSTRAINT onek_check_constraint CHECK (unique1 >= 0); +ALTER TABLE onek RENAME CONSTRAINT onek_check_constraint TO onek_check_constraint_foo; +ALTER TABLE onek DROP CONSTRAINT onek_check_constraint_foo; +-- renaming constraint should rename index as well +ALTER TABLE onek ADD CONSTRAINT onek_unique1_constraint UNIQUE (unique1); +DROP INDEX onek_unique1_constraint; -- to see whether it's there +ERROR: cannot drop index onek_unique1_constraint because constraint onek_unique1_constraint on table onek requires it +HINT: You can drop constraint onek_unique1_constraint on table onek instead. +ALTER TABLE onek RENAME CONSTRAINT onek_unique1_constraint TO onek_unique1_constraint_foo; +DROP INDEX onek_unique1_constraint_foo; -- to see whether it's there +ERROR: cannot drop index onek_unique1_constraint_foo because constraint onek_unique1_constraint_foo on table onek requires it +HINT: You can drop constraint onek_unique1_constraint_foo on table onek instead. +ALTER TABLE onek DROP CONSTRAINT onek_unique1_constraint_foo; +-- renaming constraints vs. inheritance +CREATE TABLE constraint_rename_test (a int CONSTRAINT con1 CHECK (a > 0), b int, c int); +\d constraint_rename_test + Table "public.constraint_rename_test" + Column | Type | Collation | Nullable | Default +--------+---------+-----------+----------+--------- + a | integer | | | + b | integer | | | + c | integer | | | +Check constraints: + "con1" CHECK (a > 0) + +CREATE TABLE constraint_rename_test2 (a int CONSTRAINT con1 CHECK (a > 0), d int) INHERITS (constraint_rename_test); +NOTICE: merging column "a" with inherited definition +NOTICE: merging constraint "con1" with inherited definition +\d constraint_rename_test2 + Table "public.constraint_rename_test2" + Column | Type | Collation | Nullable | Default +--------+---------+-----------+----------+--------- + a | integer | | | + b | integer | | | + c | integer | | | + d | integer | | | +Check constraints: + "con1" CHECK (a > 0) +Inherits: constraint_rename_test + +ALTER TABLE constraint_rename_test2 RENAME CONSTRAINT con1 TO con1foo; -- fail +ERROR: cannot rename inherited constraint "con1" +ALTER TABLE ONLY constraint_rename_test RENAME CONSTRAINT con1 TO con1foo; -- fail +ERROR: inherited constraint "con1" must be renamed in child tables too +ALTER TABLE constraint_rename_test RENAME CONSTRAINT con1 TO con1foo; -- ok +\d constraint_rename_test + Table "public.constraint_rename_test" + Column | Type | Collation | Nullable | Default +--------+---------+-----------+----------+--------- + a | integer | | | + b | integer | | | + c | integer | | | +Check constraints: + "con1foo" CHECK (a > 0) +Number of child tables: 1 (Use \d+ to list them.) + +\d constraint_rename_test2 + Table "public.constraint_rename_test2" + Column | Type | Collation | Nullable | Default +--------+---------+-----------+----------+--------- + a | integer | | | + b | integer | | | + c | integer | | | + d | integer | | | +Check constraints: + "con1foo" CHECK (a > 0) +Inherits: constraint_rename_test + +ALTER TABLE constraint_rename_test ADD CONSTRAINT con2 CHECK (b > 0) NO INHERIT; +ALTER TABLE ONLY constraint_rename_test RENAME CONSTRAINT con2 TO con2foo; -- ok +ALTER TABLE constraint_rename_test RENAME CONSTRAINT con2foo TO con2bar; -- ok +\d constraint_rename_test + Table "public.constraint_rename_test" + Column | Type | Collation | Nullable | Default +--------+---------+-----------+----------+--------- + a | integer | | | + b | integer | | | + c | integer | | | +Check constraints: + "con1foo" CHECK (a > 0) + "con2bar" CHECK (b > 0) NO INHERIT +Number of child tables: 1 (Use \d+ to list them.) + +\d constraint_rename_test2 + Table "public.constraint_rename_test2" + Column | Type | Collation | Nullable | Default +--------+---------+-----------+----------+--------- + a | integer | | | + b | integer | | | + c | integer | | | + d | integer | | | +Check constraints: + "con1foo" CHECK (a > 0) +Inherits: constraint_rename_test + +ALTER TABLE constraint_rename_test ADD CONSTRAINT con3 PRIMARY KEY (a); +ALTER TABLE constraint_rename_test RENAME CONSTRAINT con3 TO con3foo; -- ok +\d constraint_rename_test + Table "public.constraint_rename_test" + Column | Type | Collation | Nullable | Default +--------+---------+-----------+----------+--------- + a | integer | | not null | + b | integer | | | + c | integer | | | +Indexes: + "con3foo" PRIMARY KEY, btree (a) +Check constraints: + "con1foo" CHECK (a > 0) + "con2bar" CHECK (b > 0) NO INHERIT +Number of child tables: 1 (Use \d+ to list them.) + +\d constraint_rename_test2 + Table "public.constraint_rename_test2" + Column | Type | Collation | Nullable | Default +--------+---------+-----------+----------+--------- + a | integer | | not null | + b | integer | | | + c | integer | | | + d | integer | | | +Check constraints: + "con1foo" CHECK (a > 0) +Inherits: constraint_rename_test + +DROP TABLE constraint_rename_test2; +DROP TABLE constraint_rename_test; +ALTER TABLE IF EXISTS constraint_not_exist RENAME CONSTRAINT con3 TO con3foo; -- ok +NOTICE: relation "constraint_not_exist" does not exist, skipping +ALTER TABLE IF EXISTS constraint_rename_test ADD CONSTRAINT con4 UNIQUE (a); +NOTICE: relation "constraint_rename_test" does not exist, skipping +-- renaming constraints with cache reset of target relation +CREATE TABLE constraint_rename_cache (a int, + CONSTRAINT chk_a CHECK (a > 0), + PRIMARY KEY (a)); +ALTER TABLE constraint_rename_cache + RENAME CONSTRAINT chk_a TO chk_a_new; +ALTER TABLE constraint_rename_cache + RENAME CONSTRAINT constraint_rename_cache_pkey TO constraint_rename_pkey_new; +CREATE TABLE like_constraint_rename_cache + (LIKE constraint_rename_cache INCLUDING ALL); +\d like_constraint_rename_cache + Table "public.like_constraint_rename_cache" + Column | Type | Collation | Nullable | Default +--------+---------+-----------+----------+--------- + a | integer | | not null | +Indexes: + "like_constraint_rename_cache_pkey" PRIMARY KEY, btree (a) +Check constraints: + "chk_a_new" CHECK (a > 0) + +DROP TABLE constraint_rename_cache; +DROP TABLE like_constraint_rename_cache; +-- FOREIGN KEY CONSTRAINT adding TEST +CREATE TABLE attmp2 (a int primary key); +CREATE TABLE attmp3 (a int, b int); +CREATE TABLE attmp4 (a int, b int, unique(a,b)); +CREATE TABLE attmp5 (a int, b int); +-- Insert rows into attmp2 (pktable) +INSERT INTO attmp2 values (1); +INSERT INTO attmp2 values (2); +INSERT INTO attmp2 values (3); +INSERT INTO attmp2 values (4); +-- Insert rows into attmp3 +INSERT INTO attmp3 values (1,10); +INSERT INTO attmp3 values (1,20); +INSERT INTO attmp3 values (5,50); +-- Try (and fail) to add constraint due to invalid source columns +ALTER TABLE attmp3 add constraint attmpconstr foreign key(c) references attmp2 match full; +ERROR: column "c" referenced in foreign key constraint does not exist +-- Try (and fail) to add constraint due to invalid destination columns explicitly given +ALTER TABLE attmp3 add constraint attmpconstr foreign key(a) references attmp2(b) match full; +ERROR: column "b" referenced in foreign key constraint does not exist +-- Try (and fail) to add constraint due to invalid data +ALTER TABLE attmp3 add constraint attmpconstr foreign key (a) references attmp2 match full; +ERROR: insert or update on table "attmp3" violates foreign key constraint "attmpconstr" +DETAIL: Key (a)=(5) is not present in table "attmp2". +-- Delete failing row +DELETE FROM attmp3 where a=5; +-- Try (and succeed) +ALTER TABLE attmp3 add constraint attmpconstr foreign key (a) references attmp2 match full; +ALTER TABLE attmp3 drop constraint attmpconstr; +INSERT INTO attmp3 values (5,50); +-- Try NOT VALID and then VALIDATE CONSTRAINT, but fails. Delete failure then re-validate +ALTER TABLE attmp3 add constraint attmpconstr foreign key (a) references attmp2 match full NOT VALID; +ALTER TABLE attmp3 validate constraint attmpconstr; +ERROR: insert or update on table "attmp3" violates foreign key constraint "attmpconstr" +DETAIL: Key (a)=(5) is not present in table "attmp2". +-- Delete failing row +DELETE FROM attmp3 where a=5; +-- Try (and succeed) and repeat to show it works on already valid constraint +ALTER TABLE attmp3 validate constraint attmpconstr; +ALTER TABLE attmp3 validate constraint attmpconstr; +-- Try a non-verified CHECK constraint +ALTER TABLE attmp3 ADD CONSTRAINT b_greater_than_ten CHECK (b > 10); -- fail +ERROR: check constraint "b_greater_than_ten" of relation "attmp3" is violated by some row +ALTER TABLE attmp3 ADD CONSTRAINT b_greater_than_ten CHECK (b > 10) NOT VALID; -- succeeds +ALTER TABLE attmp3 VALIDATE CONSTRAINT b_greater_than_ten; -- fails +ERROR: check constraint "b_greater_than_ten" of relation "attmp3" is violated by some row +DELETE FROM attmp3 WHERE NOT b > 10; +ALTER TABLE attmp3 VALIDATE CONSTRAINT b_greater_than_ten; -- succeeds +ALTER TABLE attmp3 VALIDATE CONSTRAINT b_greater_than_ten; -- succeeds +-- Test inherited NOT VALID CHECK constraints +select * from attmp3; + a | b +---+---- + 1 | 20 +(1 row) + +CREATE TABLE attmp6 () INHERITS (attmp3); +CREATE TABLE attmp7 () INHERITS (attmp3); +INSERT INTO attmp6 VALUES (6, 30), (7, 16); +ALTER TABLE attmp3 ADD CONSTRAINT b_le_20 CHECK (b <= 20) NOT VALID; +ALTER TABLE attmp3 VALIDATE CONSTRAINT b_le_20; -- fails +ERROR: check constraint "b_le_20" of relation "attmp6" is violated by some row +DELETE FROM attmp6 WHERE b > 20; +ALTER TABLE attmp3 VALIDATE CONSTRAINT b_le_20; -- succeeds +-- An already validated constraint must not be revalidated +CREATE FUNCTION boo(int) RETURNS int IMMUTABLE STRICT LANGUAGE plpgsql AS $$ BEGIN RAISE NOTICE 'boo: %', $1; RETURN $1; END; $$; +INSERT INTO attmp7 VALUES (8, 18); +ALTER TABLE attmp7 ADD CONSTRAINT identity CHECK (b = boo(b)); +NOTICE: boo: 18 +ALTER TABLE attmp3 ADD CONSTRAINT IDENTITY check (b = boo(b)) NOT VALID; +NOTICE: merging constraint "identity" with inherited definition +ALTER TABLE attmp3 VALIDATE CONSTRAINT identity; +NOTICE: boo: 20 +NOTICE: boo: 16 +-- A NO INHERIT constraint should not be looked for in children during VALIDATE CONSTRAINT +create table parent_noinh_convalid (a int); +create table child_noinh_convalid () inherits (parent_noinh_convalid); +insert into parent_noinh_convalid values (1); +insert into child_noinh_convalid values (1); +alter table parent_noinh_convalid add constraint check_a_is_2 check (a = 2) no inherit not valid; +-- fail, because of the row in parent +alter table parent_noinh_convalid validate constraint check_a_is_2; +ERROR: check constraint "check_a_is_2" of relation "parent_noinh_convalid" is violated by some row +delete from only parent_noinh_convalid; +-- ok (parent itself contains no violating rows) +alter table parent_noinh_convalid validate constraint check_a_is_2; +select convalidated from pg_constraint where conrelid = 'parent_noinh_convalid'::regclass and conname = 'check_a_is_2'; + convalidated +-------------- + t +(1 row) + +-- cleanup +drop table parent_noinh_convalid, child_noinh_convalid; +-- Try (and fail) to create constraint from attmp5(a) to attmp4(a) - unique constraint on +-- attmp4 is a,b +ALTER TABLE attmp5 add constraint attmpconstr foreign key(a) references attmp4(a) match full; +ERROR: there is no unique constraint matching given keys for referenced table "attmp4" +DROP TABLE attmp7; +DROP TABLE attmp6; +DROP TABLE attmp5; +DROP TABLE attmp4; +DROP TABLE attmp3; +DROP TABLE attmp2; +-- NOT VALID with plan invalidation -- ensure we don't use a constraint for +-- exclusion until validated +set constraint_exclusion TO 'partition'; +create table nv_parent (d date, check (false) no inherit not valid); +-- not valid constraint added at creation time should automatically become valid +\d nv_parent + Table "public.nv_parent" + Column | Type | Collation | Nullable | Default +--------+------+-----------+----------+--------- + d | date | | | +Check constraints: + "nv_parent_check" CHECK (false) NO INHERIT + +create table nv_child_2010 () inherits (nv_parent); +create table nv_child_2011 () inherits (nv_parent); +alter table nv_child_2010 add check (d between '2010-01-01'::date and '2010-12-31'::date) not valid; +alter table nv_child_2011 add check (d between '2011-01-01'::date and '2011-12-31'::date) not valid; +explain (costs off) select * from nv_parent where d between '2011-08-01' and '2011-08-31'; + QUERY PLAN +--------------------------------------------------------------------------- + Append + -> Seq Scan on nv_parent nv_parent_1 + Filter: ((d >= '08-01-2011'::date) AND (d <= '08-31-2011'::date)) + -> Seq Scan on nv_child_2010 nv_parent_2 + Filter: ((d >= '08-01-2011'::date) AND (d <= '08-31-2011'::date)) + -> Seq Scan on nv_child_2011 nv_parent_3 + Filter: ((d >= '08-01-2011'::date) AND (d <= '08-31-2011'::date)) +(7 rows) + +create table nv_child_2009 (check (d between '2009-01-01'::date and '2009-12-31'::date)) inherits (nv_parent); +explain (costs off) select * from nv_parent where d between '2011-08-01'::date and '2011-08-31'::date; + QUERY PLAN +--------------------------------------------------------------------------- + Append + -> Seq Scan on nv_parent nv_parent_1 + Filter: ((d >= '08-01-2011'::date) AND (d <= '08-31-2011'::date)) + -> Seq Scan on nv_child_2010 nv_parent_2 + Filter: ((d >= '08-01-2011'::date) AND (d <= '08-31-2011'::date)) + -> Seq Scan on nv_child_2011 nv_parent_3 + Filter: ((d >= '08-01-2011'::date) AND (d <= '08-31-2011'::date)) +(7 rows) + +explain (costs off) select * from nv_parent where d between '2009-08-01'::date and '2009-08-31'::date; + QUERY PLAN +--------------------------------------------------------------------------- + Append + -> Seq Scan on nv_parent nv_parent_1 + Filter: ((d >= '08-01-2009'::date) AND (d <= '08-31-2009'::date)) + -> Seq Scan on nv_child_2010 nv_parent_2 + Filter: ((d >= '08-01-2009'::date) AND (d <= '08-31-2009'::date)) + -> Seq Scan on nv_child_2011 nv_parent_3 + Filter: ((d >= '08-01-2009'::date) AND (d <= '08-31-2009'::date)) + -> Seq Scan on nv_child_2009 nv_parent_4 + Filter: ((d >= '08-01-2009'::date) AND (d <= '08-31-2009'::date)) +(9 rows) + +-- after validation, the constraint should be used +alter table nv_child_2011 VALIDATE CONSTRAINT nv_child_2011_d_check; +explain (costs off) select * from nv_parent where d between '2009-08-01'::date and '2009-08-31'::date; + QUERY PLAN +--------------------------------------------------------------------------- + Append + -> Seq Scan on nv_parent nv_parent_1 + Filter: ((d >= '08-01-2009'::date) AND (d <= '08-31-2009'::date)) + -> Seq Scan on nv_child_2010 nv_parent_2 + Filter: ((d >= '08-01-2009'::date) AND (d <= '08-31-2009'::date)) + -> Seq Scan on nv_child_2009 nv_parent_3 + Filter: ((d >= '08-01-2009'::date) AND (d <= '08-31-2009'::date)) +(7 rows) + +-- add an inherited NOT VALID constraint +alter table nv_parent add check (d between '2001-01-01'::date and '2099-12-31'::date) not valid; +\d nv_child_2009 + Table "public.nv_child_2009" + Column | Type | Collation | Nullable | Default +--------+------+-----------+----------+--------- + d | date | | | +Check constraints: + "nv_child_2009_d_check" CHECK (d >= '01-01-2009'::date AND d <= '12-31-2009'::date) + "nv_parent_d_check" CHECK (d >= '01-01-2001'::date AND d <= '12-31-2099'::date) NOT VALID +Inherits: nv_parent + +-- we leave nv_parent and children around to help test pg_dump logic +-- Foreign key adding test with mixed types +-- Note: these tables are TEMP to avoid name conflicts when this test +-- is run in parallel with foreign_key.sql. +CREATE TEMP TABLE PKTABLE (ptest1 int PRIMARY KEY); +INSERT INTO PKTABLE VALUES(42); +CREATE TEMP TABLE FKTABLE (ftest1 inet); +-- This next should fail, because int=inet does not exist +ALTER TABLE FKTABLE ADD FOREIGN KEY(ftest1) references pktable; +ERROR: foreign key constraint "fktable_ftest1_fkey" cannot be implemented +DETAIL: Key columns "ftest1" and "ptest1" are of incompatible types: inet and integer. +-- This should also fail for the same reason, but here we +-- give the column name +ALTER TABLE FKTABLE ADD FOREIGN KEY(ftest1) references pktable(ptest1); +ERROR: foreign key constraint "fktable_ftest1_fkey" cannot be implemented +DETAIL: Key columns "ftest1" and "ptest1" are of incompatible types: inet and integer. +DROP TABLE FKTABLE; +-- This should succeed, even though they are different types, +-- because int=int8 exists and is a member of the integer opfamily +CREATE TEMP TABLE FKTABLE (ftest1 int8); +ALTER TABLE FKTABLE ADD FOREIGN KEY(ftest1) references pktable; +-- Check it actually works +INSERT INTO FKTABLE VALUES(42); -- should succeed +INSERT INTO FKTABLE VALUES(43); -- should fail +ERROR: insert or update on table "fktable" violates foreign key constraint "fktable_ftest1_fkey" +DETAIL: Key (ftest1)=(43) is not present in table "pktable". +DROP TABLE FKTABLE; +-- This should fail, because we'd have to cast numeric to int which is +-- not an implicit coercion (or use numeric=numeric, but that's not part +-- of the integer opfamily) +CREATE TEMP TABLE FKTABLE (ftest1 numeric); +ALTER TABLE FKTABLE ADD FOREIGN KEY(ftest1) references pktable; +ERROR: foreign key constraint "fktable_ftest1_fkey" cannot be implemented +DETAIL: Key columns "ftest1" and "ptest1" are of incompatible types: numeric and integer. +DROP TABLE FKTABLE; +DROP TABLE PKTABLE; +-- On the other hand, this should work because int implicitly promotes to +-- numeric, and we allow promotion on the FK side +CREATE TEMP TABLE PKTABLE (ptest1 numeric PRIMARY KEY); +INSERT INTO PKTABLE VALUES(42); +CREATE TEMP TABLE FKTABLE (ftest1 int); +ALTER TABLE FKTABLE ADD FOREIGN KEY(ftest1) references pktable; +-- Check it actually works +INSERT INTO FKTABLE VALUES(42); -- should succeed +INSERT INTO FKTABLE VALUES(43); -- should fail +ERROR: insert or update on table "fktable" violates foreign key constraint "fktable_ftest1_fkey" +DETAIL: Key (ftest1)=(43) is not present in table "pktable". +DROP TABLE FKTABLE; +DROP TABLE PKTABLE; +CREATE TEMP TABLE PKTABLE (ptest1 int, ptest2 inet, + PRIMARY KEY(ptest1, ptest2)); +-- This should fail, because we just chose really odd types +CREATE TEMP TABLE FKTABLE (ftest1 cidr, ftest2 timestamp); +ALTER TABLE FKTABLE ADD FOREIGN KEY(ftest1, ftest2) references pktable; +ERROR: foreign key constraint "fktable_ftest1_ftest2_fkey" cannot be implemented +DETAIL: Key columns "ftest1" and "ptest1" are of incompatible types: cidr and integer. +DROP TABLE FKTABLE; +-- Again, so should this... +CREATE TEMP TABLE FKTABLE (ftest1 cidr, ftest2 timestamp); +ALTER TABLE FKTABLE ADD FOREIGN KEY(ftest1, ftest2) + references pktable(ptest1, ptest2); +ERROR: foreign key constraint "fktable_ftest1_ftest2_fkey" cannot be implemented +DETAIL: Key columns "ftest1" and "ptest1" are of incompatible types: cidr and integer. +DROP TABLE FKTABLE; +-- This fails because we mixed up the column ordering +CREATE TEMP TABLE FKTABLE (ftest1 int, ftest2 inet); +ALTER TABLE FKTABLE ADD FOREIGN KEY(ftest1, ftest2) + references pktable(ptest2, ptest1); +ERROR: foreign key constraint "fktable_ftest1_ftest2_fkey" cannot be implemented +DETAIL: Key columns "ftest1" and "ptest2" are of incompatible types: integer and inet. +-- As does this... +ALTER TABLE FKTABLE ADD FOREIGN KEY(ftest2, ftest1) + references pktable(ptest1, ptest2); +ERROR: foreign key constraint "fktable_ftest2_ftest1_fkey" cannot be implemented +DETAIL: Key columns "ftest2" and "ptest1" are of incompatible types: inet and integer. +DROP TABLE FKTABLE; +DROP TABLE PKTABLE; +-- Test that ALTER CONSTRAINT updates trigger deferrability properly +CREATE TEMP TABLE PKTABLE (ptest1 int primary key); +CREATE TEMP TABLE FKTABLE (ftest1 int); +ALTER TABLE FKTABLE ADD CONSTRAINT fknd FOREIGN KEY(ftest1) REFERENCES pktable + ON DELETE CASCADE ON UPDATE NO ACTION NOT DEFERRABLE; +ALTER TABLE FKTABLE ADD CONSTRAINT fkdd FOREIGN KEY(ftest1) REFERENCES pktable + ON DELETE CASCADE ON UPDATE NO ACTION DEFERRABLE INITIALLY DEFERRED; +ALTER TABLE FKTABLE ADD CONSTRAINT fkdi FOREIGN KEY(ftest1) REFERENCES pktable + ON DELETE CASCADE ON UPDATE NO ACTION DEFERRABLE INITIALLY IMMEDIATE; +ALTER TABLE FKTABLE ADD CONSTRAINT fknd2 FOREIGN KEY(ftest1) REFERENCES pktable + ON DELETE CASCADE ON UPDATE NO ACTION DEFERRABLE INITIALLY DEFERRED; +ALTER TABLE FKTABLE ALTER CONSTRAINT fknd2 NOT DEFERRABLE; +ALTER TABLE FKTABLE ADD CONSTRAINT fkdd2 FOREIGN KEY(ftest1) REFERENCES pktable + ON DELETE CASCADE ON UPDATE NO ACTION NOT DEFERRABLE; +ALTER TABLE FKTABLE ALTER CONSTRAINT fkdd2 DEFERRABLE INITIALLY DEFERRED; +ALTER TABLE FKTABLE ADD CONSTRAINT fkdi2 FOREIGN KEY(ftest1) REFERENCES pktable + ON DELETE CASCADE ON UPDATE NO ACTION NOT DEFERRABLE; +ALTER TABLE FKTABLE ALTER CONSTRAINT fkdi2 DEFERRABLE INITIALLY IMMEDIATE; +SELECT conname, tgfoid::regproc, tgtype, tgdeferrable, tginitdeferred +FROM pg_trigger JOIN pg_constraint con ON con.oid = tgconstraint +WHERE tgrelid = 'pktable'::regclass +ORDER BY 1,2,3; + conname | tgfoid | tgtype | tgdeferrable | tginitdeferred +---------+------------------------+--------+--------------+---------------- + fkdd | "RI_FKey_cascade_del" | 9 | f | f + fkdd | "RI_FKey_noaction_upd" | 17 | t | t + fkdd2 | "RI_FKey_cascade_del" | 9 | f | f + fkdd2 | "RI_FKey_noaction_upd" | 17 | t | t + fkdi | "RI_FKey_cascade_del" | 9 | f | f + fkdi | "RI_FKey_noaction_upd" | 17 | t | f + fkdi2 | "RI_FKey_cascade_del" | 9 | f | f + fkdi2 | "RI_FKey_noaction_upd" | 17 | t | f + fknd | "RI_FKey_cascade_del" | 9 | f | f + fknd | "RI_FKey_noaction_upd" | 17 | f | f + fknd2 | "RI_FKey_cascade_del" | 9 | f | f + fknd2 | "RI_FKey_noaction_upd" | 17 | f | f +(12 rows) + +SELECT conname, tgfoid::regproc, tgtype, tgdeferrable, tginitdeferred +FROM pg_trigger JOIN pg_constraint con ON con.oid = tgconstraint +WHERE tgrelid = 'fktable'::regclass +ORDER BY 1,2,3; + conname | tgfoid | tgtype | tgdeferrable | tginitdeferred +---------+---------------------+--------+--------------+---------------- + fkdd | "RI_FKey_check_ins" | 5 | t | t + fkdd | "RI_FKey_check_upd" | 17 | t | t + fkdd2 | "RI_FKey_check_ins" | 5 | t | t + fkdd2 | "RI_FKey_check_upd" | 17 | t | t + fkdi | "RI_FKey_check_ins" | 5 | t | f + fkdi | "RI_FKey_check_upd" | 17 | t | f + fkdi2 | "RI_FKey_check_ins" | 5 | t | f + fkdi2 | "RI_FKey_check_upd" | 17 | t | f + fknd | "RI_FKey_check_ins" | 5 | f | f + fknd | "RI_FKey_check_upd" | 17 | f | f + fknd2 | "RI_FKey_check_ins" | 5 | f | f + fknd2 | "RI_FKey_check_upd" | 17 | f | f +(12 rows) + +-- temp tables should go away by themselves, need not drop them. +-- test check constraint adding +create table atacc1 ( test int ); +-- add a check constraint +alter table atacc1 add constraint atacc_test1 check (test>3); +-- should fail +insert into atacc1 (test) values (2); +ERROR: new row for relation "atacc1" violates check constraint "atacc_test1" +DETAIL: Failing row contains (2). +-- should succeed +insert into atacc1 (test) values (4); +drop table atacc1; +-- let's do one where the check fails when added +create table atacc1 ( test int ); +-- insert a soon to be failing row +insert into atacc1 (test) values (2); +-- add a check constraint (fails) +alter table atacc1 add constraint atacc_test1 check (test>3); +ERROR: check constraint "atacc_test1" of relation "atacc1" is violated by some row +insert into atacc1 (test) values (4); +drop table atacc1; +-- let's do one where the check fails because the column doesn't exist +create table atacc1 ( test int ); +-- add a check constraint (fails) +alter table atacc1 add constraint atacc_test1 check (test1>3); +ERROR: column "test1" does not exist +HINT: Perhaps you meant to reference the column "atacc1.test". +drop table atacc1; +-- something a little more complicated +create table atacc1 ( test int, test2 int, test3 int); +-- add a check constraint (fails) +alter table atacc1 add constraint atacc_test1 check (test+test23), test2 int); +alter table atacc1 add check (test2>test); +-- should fail for $2 +insert into atacc1 (test2, test) values (3, 4); +ERROR: new row for relation "atacc1" violates check constraint "atacc1_check" +DETAIL: Failing row contains (4, 3). +drop table atacc1; +-- inheritance related tests +create table atacc1 (test int); +create table atacc2 (test2 int); +create table atacc3 (test3 int) inherits (atacc1, atacc2); +alter table atacc2 add constraint foo check (test2>0); +-- fail and then succeed on atacc2 +insert into atacc2 (test2) values (-3); +ERROR: new row for relation "atacc2" violates check constraint "foo" +DETAIL: Failing row contains (-3). +insert into atacc2 (test2) values (3); +-- fail and then succeed on atacc3 +insert into atacc3 (test2) values (-3); +ERROR: new row for relation "atacc3" violates check constraint "foo" +DETAIL: Failing row contains (null, -3, null). +insert into atacc3 (test2) values (3); +drop table atacc3; +drop table atacc2; +drop table atacc1; +-- same things with one created with INHERIT +create table atacc1 (test int); +create table atacc2 (test2 int); +create table atacc3 (test3 int) inherits (atacc1, atacc2); +alter table atacc3 no inherit atacc2; +-- fail +alter table atacc3 no inherit atacc2; +ERROR: relation "atacc2" is not a parent of relation "atacc3" +-- make sure it really isn't a child +insert into atacc3 (test2) values (3); +select test2 from atacc2; + test2 +------- +(0 rows) + +-- fail due to missing constraint +alter table atacc2 add constraint foo check (test2>0); +alter table atacc3 inherit atacc2; +ERROR: child table is missing constraint "foo" +-- fail due to missing column +alter table atacc3 rename test2 to testx; +alter table atacc3 inherit atacc2; +ERROR: child table is missing column "test2" +-- fail due to mismatched data type +alter table atacc3 add test2 bool; +alter table atacc3 inherit atacc2; +ERROR: child table "atacc3" has different type for column "test2" +alter table atacc3 drop test2; +-- succeed +alter table atacc3 add test2 int; +update atacc3 set test2 = 4 where test2 is null; +alter table atacc3 add constraint foo check (test2>0); +alter table atacc3 inherit atacc2; +-- fail due to duplicates and circular inheritance +alter table atacc3 inherit atacc2; +ERROR: relation "atacc2" would be inherited from more than once +alter table atacc2 inherit atacc3; +ERROR: circular inheritance not allowed +DETAIL: "atacc3" is already a child of "atacc2". +alter table atacc2 inherit atacc2; +ERROR: circular inheritance not allowed +DETAIL: "atacc2" is already a child of "atacc2". +-- test that we really are a child now (should see 4 not 3 and cascade should go through) +select test2 from atacc2; + test2 +------- + 4 +(1 row) + +drop table atacc2 cascade; +NOTICE: drop cascades to table atacc3 +drop table atacc1; +-- adding only to a parent is allowed as of 9.2 +create table atacc1 (test int); +create table atacc2 (test2 int) inherits (atacc1); +-- ok: +alter table atacc1 add constraint foo check (test>0) no inherit; +-- check constraint is not there on child +insert into atacc2 (test) values (-3); +-- check constraint is there on parent +insert into atacc1 (test) values (-3); +ERROR: new row for relation "atacc1" violates check constraint "foo" +DETAIL: Failing row contains (-3). +insert into atacc1 (test) values (3); +-- fail, violating row: +alter table atacc2 add constraint foo check (test>0) no inherit; +ERROR: check constraint "foo" of relation "atacc2" is violated by some row +drop table atacc2; +drop table atacc1; +-- test unique constraint adding +create table atacc1 ( test int ) ; +-- add a unique constraint +alter table atacc1 add constraint atacc_test1 unique (test); +-- insert first value +insert into atacc1 (test) values (2); +-- should fail +insert into atacc1 (test) values (2); +ERROR: duplicate key value violates unique constraint "atacc_test1" +DETAIL: Key (test)=(2) already exists. +-- should succeed +insert into atacc1 (test) values (4); +-- try to create duplicates via alter table using - should fail +alter table atacc1 alter column test type integer using 0; +ERROR: could not create unique index "atacc_test1" +DETAIL: Key (test)=(0) is duplicated. +drop table atacc1; +-- let's do one where the unique constraint fails when added +create table atacc1 ( test int ); +-- insert soon to be failing rows +insert into atacc1 (test) values (2); +insert into atacc1 (test) values (2); +-- add a unique constraint (fails) +alter table atacc1 add constraint atacc_test1 unique (test); +ERROR: could not create unique index "atacc_test1" +DETAIL: Key (test)=(2) is duplicated. +insert into atacc1 (test) values (3); +drop table atacc1; +-- let's do one where the unique constraint fails +-- because the column doesn't exist +create table atacc1 ( test int ); +-- add a unique constraint (fails) +alter table atacc1 add constraint atacc_test1 unique (test1); +ERROR: column "test1" named in key does not exist +drop table atacc1; +-- something a little more complicated +create table atacc1 ( test int, test2 int); +-- add a unique constraint +alter table atacc1 add constraint atacc_test1 unique (test, test2); +-- insert initial value +insert into atacc1 (test,test2) values (4,4); +-- should fail +insert into atacc1 (test,test2) values (4,4); +ERROR: duplicate key value violates unique constraint "atacc_test1" +DETAIL: Key (test, test2)=(4, 4) already exists. +-- should all succeed +insert into atacc1 (test,test2) values (4,5); +insert into atacc1 (test,test2) values (5,4); +insert into atacc1 (test,test2) values (5,5); +drop table atacc1; +-- lets do some naming tests +create table atacc1 (test int, test2 int, unique(test)); +alter table atacc1 add unique (test2); +-- should fail for @@ second one @@ +insert into atacc1 (test2, test) values (3, 3); +insert into atacc1 (test2, test) values (2, 3); +ERROR: duplicate key value violates unique constraint "atacc1_test_key" +DETAIL: Key (test)=(3) already exists. +drop table atacc1; +-- test primary key constraint adding +create table atacc1 ( id serial, test int) ; +-- add a primary key constraint +alter table atacc1 add constraint atacc_test1 primary key (test); +-- insert first value +insert into atacc1 (test) values (2); +-- should fail +insert into atacc1 (test) values (2); +ERROR: duplicate key value violates unique constraint "atacc_test1" +DETAIL: Key (test)=(2) already exists. +-- should succeed +insert into atacc1 (test) values (4); +-- inserting NULL should fail +insert into atacc1 (test) values(NULL); +ERROR: null value in column "test" of relation "atacc1" violates not-null constraint +DETAIL: Failing row contains (4, null). +-- try adding a second primary key (should fail) +alter table atacc1 add constraint atacc_oid1 primary key(id); +ERROR: multiple primary keys for table "atacc1" are not allowed +-- drop first primary key constraint +alter table atacc1 drop constraint atacc_test1 restrict; +-- try adding a primary key on oid (should succeed) +alter table atacc1 add constraint atacc_oid1 primary key(id); +drop table atacc1; +-- let's do one where the primary key constraint fails when added +create table atacc1 ( test int ); +-- insert soon to be failing rows +insert into atacc1 (test) values (2); +insert into atacc1 (test) values (2); +-- add a primary key (fails) +alter table atacc1 add constraint atacc_test1 primary key (test); +ERROR: could not create unique index "atacc_test1" +DETAIL: Key (test)=(2) is duplicated. +insert into atacc1 (test) values (3); +drop table atacc1; +-- let's do another one where the primary key constraint fails when added +create table atacc1 ( test int ); +-- insert soon to be failing row +insert into atacc1 (test) values (NULL); +-- add a primary key (fails) +alter table atacc1 add constraint atacc_test1 primary key (test); +ERROR: column "test" of relation "atacc1" contains null values +insert into atacc1 (test) values (3); +drop table atacc1; +-- let's do one where the primary key constraint fails +-- because the column doesn't exist +create table atacc1 ( test int ); +-- add a primary key constraint (fails) +alter table atacc1 add constraint atacc_test1 primary key (test1); +ERROR: column "test1" of relation "atacc1" does not exist +drop table atacc1; +-- adding a new column as primary key to a non-empty table. +-- should fail unless the column has a non-null default value. +create table atacc1 ( test int ); +insert into atacc1 (test) values (0); +-- add a primary key column without a default (fails). +alter table atacc1 add column test2 int primary key; +ERROR: column "test2" of relation "atacc1" contains null values +-- now add a primary key column with a default (succeeds). +alter table atacc1 add column test2 int default 0 primary key; +drop table atacc1; +-- this combination used to have order-of-execution problems (bug #15580) +create table atacc1 (a int); +insert into atacc1 values(1); +alter table atacc1 + add column b float8 not null default random(), + add primary key(a); +drop table atacc1; +-- additionally, we've seen issues with foreign key validation not being +-- properly delayed until after a table rewrite. Check that works ok. +create table atacc1 (a int primary key); +alter table atacc1 add constraint atacc1_fkey foreign key (a) references atacc1 (a) not valid; +alter table atacc1 validate constraint atacc1_fkey, alter a type bigint; +drop table atacc1; +-- we've also seen issues with check constraints being validated at the wrong +-- time when there's a pending table rewrite. +create table atacc1 (a bigint, b int); +insert into atacc1 values(1,1); +alter table atacc1 add constraint atacc1_chk check(b = 1) not valid; +alter table atacc1 validate constraint atacc1_chk, alter a type int; +drop table atacc1; +-- same as above, but ensure the constraint violation is detected +create table atacc1 (a bigint, b int); +insert into atacc1 values(1,2); +alter table atacc1 add constraint atacc1_chk check(b = 1) not valid; +alter table atacc1 validate constraint atacc1_chk, alter a type int; +ERROR: check constraint "atacc1_chk" of relation "atacc1" is violated by some row +drop table atacc1; +-- something a little more complicated +create table atacc1 ( test int, test2 int); +-- add a primary key constraint +alter table atacc1 add constraint atacc_test1 primary key (test, test2); +-- try adding a second primary key - should fail +alter table atacc1 add constraint atacc_test2 primary key (test); +ERROR: multiple primary keys for table "atacc1" are not allowed +-- insert initial value +insert into atacc1 (test,test2) values (4,4); +-- should fail +insert into atacc1 (test,test2) values (4,4); +ERROR: duplicate key value violates unique constraint "atacc_test1" +DETAIL: Key (test, test2)=(4, 4) already exists. +insert into atacc1 (test,test2) values (NULL,3); +ERROR: null value in column "test" of relation "atacc1" violates not-null constraint +DETAIL: Failing row contains (null, 3). +insert into atacc1 (test,test2) values (3, NULL); +ERROR: null value in column "test2" of relation "atacc1" violates not-null constraint +DETAIL: Failing row contains (3, null). +insert into atacc1 (test,test2) values (NULL,NULL); +ERROR: null value in column "test" of relation "atacc1" violates not-null constraint +DETAIL: Failing row contains (null, null). +-- should all succeed +insert into atacc1 (test,test2) values (4,5); +insert into atacc1 (test,test2) values (5,4); +insert into atacc1 (test,test2) values (5,5); +drop table atacc1; +-- lets do some naming tests +create table atacc1 (test int, test2 int, primary key(test)); +-- only first should succeed +insert into atacc1 (test2, test) values (3, 3); +insert into atacc1 (test2, test) values (2, 3); +ERROR: duplicate key value violates unique constraint "atacc1_pkey" +DETAIL: Key (test)=(3) already exists. +insert into atacc1 (test2, test) values (1, NULL); +ERROR: null value in column "test" of relation "atacc1" violates not-null constraint +DETAIL: Failing row contains (null, 1). +drop table atacc1; +-- alter table / alter column [set/drop] not null tests +-- try altering system catalogs, should fail +alter table pg_class alter column relname drop not null; +ERROR: permission denied: "pg_class" is a system catalog +alter table pg_class alter relname set not null; +ERROR: permission denied: "pg_class" is a system catalog +-- try altering non-existent table, should fail +alter table non_existent alter column bar set not null; +ERROR: relation "non_existent" does not exist +alter table non_existent alter column bar drop not null; +ERROR: relation "non_existent" does not exist +-- test setting columns to null and not null and vice versa +-- test checking for null values and primary key +create table atacc1 (test int not null); +alter table atacc1 add constraint "atacc1_pkey" primary key (test); +alter table atacc1 alter column test drop not null; +ERROR: column "test" is in a primary key +alter table atacc1 drop constraint "atacc1_pkey"; +alter table atacc1 alter column test drop not null; +insert into atacc1 values (null); +alter table atacc1 alter test set not null; +ERROR: column "test" of relation "atacc1" contains null values +delete from atacc1; +alter table atacc1 alter test set not null; +-- try altering a non-existent column, should fail +alter table atacc1 alter bar set not null; +ERROR: column "bar" of relation "atacc1" does not exist +alter table atacc1 alter bar drop not null; +ERROR: column "bar" of relation "atacc1" does not exist +-- try creating a view and altering that, should fail +create view myview as select * from atacc1; +alter table myview alter column test drop not null; +ERROR: ALTER action ALTER COLUMN ... DROP NOT NULL cannot be performed on relation "myview" +DETAIL: This operation is not supported for views. +alter table myview alter column test set not null; +ERROR: ALTER action ALTER COLUMN ... SET NOT NULL cannot be performed on relation "myview" +DETAIL: This operation is not supported for views. +drop view myview; +drop table atacc1; +-- set not null verified by constraints +create table atacc1 (test_a int, test_b int); +insert into atacc1 values (null, 1); +-- constraint not cover all values, should fail +alter table atacc1 add constraint atacc1_constr_or check(test_a is not null or test_b < 10); +alter table atacc1 alter test_a set not null; +ERROR: column "test_a" of relation "atacc1" contains null values +alter table atacc1 drop constraint atacc1_constr_or; +-- not valid constraint, should fail +alter table atacc1 add constraint atacc1_constr_invalid check(test_a is not null) not valid; +alter table atacc1 alter test_a set not null; +ERROR: column "test_a" of relation "atacc1" contains null values +alter table atacc1 drop constraint atacc1_constr_invalid; +-- with valid constraint +update atacc1 set test_a = 1; +alter table atacc1 add constraint atacc1_constr_a_valid check(test_a is not null); +alter table atacc1 alter test_a set not null; +delete from atacc1; +insert into atacc1 values (2, null); +alter table atacc1 alter test_a drop not null; +-- test multiple set not null at same time +-- test_a checked by atacc1_constr_a_valid, test_b should fail by table scan +alter table atacc1 alter test_a set not null, alter test_b set not null; +ERROR: column "test_b" of relation "atacc1" contains null values +-- commands order has no importance +alter table atacc1 alter test_b set not null, alter test_a set not null; +ERROR: column "test_b" of relation "atacc1" contains null values +-- valid one by table scan, one by check constraints +update atacc1 set test_b = 1; +alter table atacc1 alter test_b set not null, alter test_a set not null; +alter table atacc1 alter test_a drop not null, alter test_b drop not null; +-- both column has check constraints +alter table atacc1 add constraint atacc1_constr_b_valid check(test_b is not null); +alter table atacc1 alter test_b set not null, alter test_a set not null; +drop table atacc1; +-- test inheritance +create table parent (a int); +create table child (b varchar(255)) inherits (parent); +alter table parent alter a set not null; +insert into parent values (NULL); +ERROR: null value in column "a" of relation "parent" violates not-null constraint +DETAIL: Failing row contains (null). +insert into child (a, b) values (NULL, 'foo'); +ERROR: null value in column "a" of relation "child" violates not-null constraint +DETAIL: Failing row contains (null, foo). +alter table parent alter a drop not null; +insert into parent values (NULL); +insert into child (a, b) values (NULL, 'foo'); +alter table only parent alter a set not null; +ERROR: column "a" of relation "parent" contains null values +alter table child alter a set not null; +ERROR: column "a" of relation "child" contains null values +delete from parent; +alter table only parent alter a set not null; +insert into parent values (NULL); +ERROR: null value in column "a" of relation "parent" violates not-null constraint +DETAIL: Failing row contains (null). +alter table child alter a set not null; +insert into child (a, b) values (NULL, 'foo'); +ERROR: null value in column "a" of relation "child" violates not-null constraint +DETAIL: Failing row contains (null, foo). +delete from child; +alter table child alter a set not null; +insert into child (a, b) values (NULL, 'foo'); +ERROR: null value in column "a" of relation "child" violates not-null constraint +DETAIL: Failing row contains (null, foo). +drop table child; +drop table parent; +-- test setting and removing default values +create table def_test ( + c1 int4 default 5, + c2 text default 'initial_default' +); +insert into def_test default values; +alter table def_test alter column c1 drop default; +insert into def_test default values; +alter table def_test alter column c2 drop default; +insert into def_test default values; +alter table def_test alter column c1 set default 10; +alter table def_test alter column c2 set default 'new_default'; +insert into def_test default values; +select * from def_test; + c1 | c2 +----+----------------- + 5 | initial_default + | initial_default + | + 10 | new_default +(4 rows) + +-- set defaults to an incorrect type: this should fail +alter table def_test alter column c1 set default 'wrong_datatype'; +ERROR: invalid input syntax for type integer: "wrong_datatype" +alter table def_test alter column c2 set default 20; +-- set defaults on a non-existent column: this should fail +alter table def_test alter column c3 set default 30; +ERROR: column "c3" of relation "def_test" does not exist +-- set defaults on views: we need to create a view, add a rule +-- to allow insertions into it, and then alter the view to add +-- a default +create view def_view_test as select * from def_test; +create rule def_view_test_ins as + on insert to def_view_test + do instead insert into def_test select new.*; +insert into def_view_test default values; +alter table def_view_test alter column c1 set default 45; +insert into def_view_test default values; +alter table def_view_test alter column c2 set default 'view_default'; +insert into def_view_test default values; +select * from def_view_test; + c1 | c2 +----+----------------- + 5 | initial_default + | initial_default + | + 10 | new_default + | + 45 | + 45 | view_default +(7 rows) + +drop rule def_view_test_ins on def_view_test; +drop view def_view_test; +drop table def_test; +-- alter table / drop column tests +-- try altering system catalogs, should fail +alter table pg_class drop column relname; +ERROR: permission denied: "pg_class" is a system catalog +-- try altering non-existent table, should fail +alter table nosuchtable drop column bar; +ERROR: relation "nosuchtable" does not exist +-- test dropping columns +create table atacc1 (a int4 not null, b int4, c int4 not null, d int4); +insert into atacc1 values (1, 2, 3, 4); +alter table atacc1 drop a; +alter table atacc1 drop a; +ERROR: column "a" of relation "atacc1" does not exist +-- SELECTs +select * from atacc1; + b | c | d +---+---+--- + 2 | 3 | 4 +(1 row) + +select * from atacc1 order by a; +ERROR: column "a" does not exist +LINE 1: select * from atacc1 order by a; + ^ +select * from atacc1 order by "........pg.dropped.1........"; +ERROR: column "........pg.dropped.1........" does not exist +LINE 1: select * from atacc1 order by "........pg.dropped.1........"... + ^ +select * from atacc1 group by a; +ERROR: column "a" does not exist +LINE 1: select * from atacc1 group by a; + ^ +select * from atacc1 group by "........pg.dropped.1........"; +ERROR: column "........pg.dropped.1........" does not exist +LINE 1: select * from atacc1 group by "........pg.dropped.1........"... + ^ +select atacc1.* from atacc1; + b | c | d +---+---+--- + 2 | 3 | 4 +(1 row) + +select a from atacc1; +ERROR: column "a" does not exist +LINE 1: select a from atacc1; + ^ +select atacc1.a from atacc1; +ERROR: column atacc1.a does not exist +LINE 1: select atacc1.a from atacc1; + ^ +select b,c,d from atacc1; + b | c | d +---+---+--- + 2 | 3 | 4 +(1 row) + +select a,b,c,d from atacc1; +ERROR: column "a" does not exist +LINE 1: select a,b,c,d from atacc1; + ^ +select * from atacc1 where a = 1; +ERROR: column "a" does not exist +LINE 1: select * from atacc1 where a = 1; + ^ +select "........pg.dropped.1........" from atacc1; +ERROR: column "........pg.dropped.1........" does not exist +LINE 1: select "........pg.dropped.1........" from atacc1; + ^ +select atacc1."........pg.dropped.1........" from atacc1; +ERROR: column atacc1.........pg.dropped.1........ does not exist +LINE 1: select atacc1."........pg.dropped.1........" from atacc1; + ^ +select "........pg.dropped.1........",b,c,d from atacc1; +ERROR: column "........pg.dropped.1........" does not exist +LINE 1: select "........pg.dropped.1........",b,c,d from atacc1; + ^ +select * from atacc1 where "........pg.dropped.1........" = 1; +ERROR: column "........pg.dropped.1........" does not exist +LINE 1: select * from atacc1 where "........pg.dropped.1........" = ... + ^ +-- UPDATEs +update atacc1 set a = 3; +ERROR: column "a" of relation "atacc1" does not exist +LINE 1: update atacc1 set a = 3; + ^ +update atacc1 set b = 2 where a = 3; +ERROR: column "a" does not exist +LINE 1: update atacc1 set b = 2 where a = 3; + ^ +update atacc1 set "........pg.dropped.1........" = 3; +ERROR: column "........pg.dropped.1........" of relation "atacc1" does not exist +LINE 1: update atacc1 set "........pg.dropped.1........" = 3; + ^ +update atacc1 set b = 2 where "........pg.dropped.1........" = 3; +ERROR: column "........pg.dropped.1........" does not exist +LINE 1: update atacc1 set b = 2 where "........pg.dropped.1........"... + ^ +-- INSERTs +insert into atacc1 values (10, 11, 12, 13); +ERROR: INSERT has more expressions than target columns +LINE 1: insert into atacc1 values (10, 11, 12, 13); + ^ +insert into atacc1 values (default, 11, 12, 13); +ERROR: INSERT has more expressions than target columns +LINE 1: insert into atacc1 values (default, 11, 12, 13); + ^ +insert into atacc1 values (11, 12, 13); +insert into atacc1 (a) values (10); +ERROR: column "a" of relation "atacc1" does not exist +LINE 1: insert into atacc1 (a) values (10); + ^ +insert into atacc1 (a) values (default); +ERROR: column "a" of relation "atacc1" does not exist +LINE 1: insert into atacc1 (a) values (default); + ^ +insert into atacc1 (a,b,c,d) values (10,11,12,13); +ERROR: column "a" of relation "atacc1" does not exist +LINE 1: insert into atacc1 (a,b,c,d) values (10,11,12,13); + ^ +insert into atacc1 (a,b,c,d) values (default,11,12,13); +ERROR: column "a" of relation "atacc1" does not exist +LINE 1: insert into atacc1 (a,b,c,d) values (default,11,12,13); + ^ +insert into atacc1 (b,c,d) values (11,12,13); +insert into atacc1 ("........pg.dropped.1........") values (10); +ERROR: column "........pg.dropped.1........" of relation "atacc1" does not exist +LINE 1: insert into atacc1 ("........pg.dropped.1........") values (... + ^ +insert into atacc1 ("........pg.dropped.1........") values (default); +ERROR: column "........pg.dropped.1........" of relation "atacc1" does not exist +LINE 1: insert into atacc1 ("........pg.dropped.1........") values (... + ^ +insert into atacc1 ("........pg.dropped.1........",b,c,d) values (10,11,12,13); +ERROR: column "........pg.dropped.1........" of relation "atacc1" does not exist +LINE 1: insert into atacc1 ("........pg.dropped.1........",b,c,d) va... + ^ +insert into atacc1 ("........pg.dropped.1........",b,c,d) values (default,11,12,13); +ERROR: column "........pg.dropped.1........" of relation "atacc1" does not exist +LINE 1: insert into atacc1 ("........pg.dropped.1........",b,c,d) va... + ^ +-- DELETEs +delete from atacc1 where a = 3; +ERROR: column "a" does not exist +LINE 1: delete from atacc1 where a = 3; + ^ +delete from atacc1 where "........pg.dropped.1........" = 3; +ERROR: column "........pg.dropped.1........" does not exist +LINE 1: delete from atacc1 where "........pg.dropped.1........" = 3; + ^ +delete from atacc1; +-- try dropping a non-existent column, should fail +alter table atacc1 drop bar; +ERROR: column "bar" of relation "atacc1" does not exist +-- try removing an oid column, should succeed (as it's nonexistent) +alter table atacc1 SET WITHOUT OIDS; +-- try adding an oid column, should fail (not supported) +alter table atacc1 SET WITH OIDS; +ERROR: syntax error at or near "WITH" +LINE 1: alter table atacc1 SET WITH OIDS; + ^ +-- try dropping the xmin column, should fail +alter table atacc1 drop xmin; +ERROR: cannot drop system column "xmin" +-- try creating a view and altering that, should fail +create view myview as select * from atacc1; +select * from myview; + b | c | d +---+---+--- +(0 rows) + +alter table myview drop d; +ERROR: ALTER action DROP COLUMN cannot be performed on relation "myview" +DETAIL: This operation is not supported for views. +drop view myview; +-- test some commands to make sure they fail on the dropped column +analyze atacc1(a); +ERROR: column "a" of relation "atacc1" does not exist +analyze atacc1("........pg.dropped.1........"); +ERROR: column "........pg.dropped.1........" of relation "atacc1" does not exist +vacuum analyze atacc1(a); +ERROR: column "a" of relation "atacc1" does not exist +vacuum analyze atacc1("........pg.dropped.1........"); +ERROR: column "........pg.dropped.1........" of relation "atacc1" does not exist +comment on column atacc1.a is 'testing'; +ERROR: column "a" of relation "atacc1" does not exist +comment on column atacc1."........pg.dropped.1........" is 'testing'; +ERROR: column "........pg.dropped.1........" of relation "atacc1" does not exist +alter table atacc1 alter a set storage plain; +ERROR: column "a" of relation "atacc1" does not exist +alter table atacc1 alter "........pg.dropped.1........" set storage plain; +ERROR: column "........pg.dropped.1........" of relation "atacc1" does not exist +alter table atacc1 alter a set statistics 0; +ERROR: column "a" of relation "atacc1" does not exist +alter table atacc1 alter "........pg.dropped.1........" set statistics 0; +ERROR: column "........pg.dropped.1........" of relation "atacc1" does not exist +alter table atacc1 alter a set default 3; +ERROR: column "a" of relation "atacc1" does not exist +alter table atacc1 alter "........pg.dropped.1........" set default 3; +ERROR: column "........pg.dropped.1........" of relation "atacc1" does not exist +alter table atacc1 alter a drop default; +ERROR: column "a" of relation "atacc1" does not exist +alter table atacc1 alter "........pg.dropped.1........" drop default; +ERROR: column "........pg.dropped.1........" of relation "atacc1" does not exist +alter table atacc1 alter a set not null; +ERROR: column "a" of relation "atacc1" does not exist +alter table atacc1 alter "........pg.dropped.1........" set not null; +ERROR: column "........pg.dropped.1........" of relation "atacc1" does not exist +alter table atacc1 alter a drop not null; +ERROR: column "a" of relation "atacc1" does not exist +alter table atacc1 alter "........pg.dropped.1........" drop not null; +ERROR: column "........pg.dropped.1........" of relation "atacc1" does not exist +alter table atacc1 rename a to x; +ERROR: column "a" does not exist +alter table atacc1 rename "........pg.dropped.1........" to x; +ERROR: column "........pg.dropped.1........" does not exist +alter table atacc1 add primary key(a); +ERROR: column "a" of relation "atacc1" does not exist +alter table atacc1 add primary key("........pg.dropped.1........"); +ERROR: column "........pg.dropped.1........" of relation "atacc1" does not exist +alter table atacc1 add unique(a); +ERROR: column "a" named in key does not exist +alter table atacc1 add unique("........pg.dropped.1........"); +ERROR: column "........pg.dropped.1........" named in key does not exist +alter table atacc1 add check (a > 3); +ERROR: column "a" does not exist +alter table atacc1 add check ("........pg.dropped.1........" > 3); +ERROR: column "........pg.dropped.1........" does not exist +create table atacc2 (id int4 unique); +alter table atacc1 add foreign key (a) references atacc2(id); +ERROR: column "a" referenced in foreign key constraint does not exist +alter table atacc1 add foreign key ("........pg.dropped.1........") references atacc2(id); +ERROR: column "........pg.dropped.1........" referenced in foreign key constraint does not exist +alter table atacc2 add foreign key (id) references atacc1(a); +ERROR: column "a" referenced in foreign key constraint does not exist +alter table atacc2 add foreign key (id) references atacc1("........pg.dropped.1........"); +ERROR: column "........pg.dropped.1........" referenced in foreign key constraint does not exist +drop table atacc2; +create index "testing_idx" on atacc1(a); +ERROR: column "a" does not exist +create index "testing_idx" on atacc1("........pg.dropped.1........"); +ERROR: column "........pg.dropped.1........" does not exist +-- test create as and select into +insert into atacc1 values (21, 22, 23); +create table attest1 as select * from atacc1; +select * from attest1; + b | c | d +----+----+---- + 21 | 22 | 23 +(1 row) + +drop table attest1; +select * into attest2 from atacc1; +select * from attest2; + b | c | d +----+----+---- + 21 | 22 | 23 +(1 row) + +drop table attest2; +-- try dropping all columns +alter table atacc1 drop c; +alter table atacc1 drop d; +alter table atacc1 drop b; +select * from atacc1; +-- +(1 row) + +drop table atacc1; +-- test constraint error reporting in presence of dropped columns +create table atacc1 (id serial primary key, value int check (value < 10)); +insert into atacc1(value) values (100); +ERROR: new row for relation "atacc1" violates check constraint "atacc1_value_check" +DETAIL: Failing row contains (1, 100). +alter table atacc1 drop column value; +alter table atacc1 add column value int check (value < 10); +insert into atacc1(value) values (100); +ERROR: new row for relation "atacc1" violates check constraint "atacc1_value_check" +DETAIL: Failing row contains (2, 100). +insert into atacc1(id, value) values (null, 0); +ERROR: null value in column "id" of relation "atacc1" violates not-null constraint +DETAIL: Failing row contains (null, 0). +drop table atacc1; +-- test inheritance +create table parent (a int, b int, c int); +insert into parent values (1, 2, 3); +alter table parent drop a; +create table child (d varchar(255)) inherits (parent); +insert into child values (12, 13, 'testing'); +select * from parent; + b | c +----+---- + 2 | 3 + 12 | 13 +(2 rows) + +select * from child; + b | c | d +----+----+--------- + 12 | 13 | testing +(1 row) + +alter table parent drop c; +select * from parent; + b +---- + 2 + 12 +(2 rows) + +select * from child; + b | d +----+--------- + 12 | testing +(1 row) + +drop table child; +drop table parent; +-- check error cases for inheritance column merging +create table parent (a float8, b numeric(10,4), c text collate "C"); +create table child (a float4) inherits (parent); -- fail +NOTICE: merging column "a" with inherited definition +ERROR: column "a" has a type conflict +DETAIL: double precision versus real +create table child (b decimal(10,7)) inherits (parent); -- fail +NOTICE: moving and merging column "b" with inherited definition +DETAIL: User-specified column moved to the position of the inherited column. +ERROR: column "b" has a type conflict +DETAIL: numeric(10,4) versus numeric(10,7) +create table child (c text collate "POSIX") inherits (parent); -- fail +NOTICE: moving and merging column "c" with inherited definition +DETAIL: User-specified column moved to the position of the inherited column. +ERROR: column "c" has a collation conflict +DETAIL: "C" versus "POSIX" +create table child (a double precision, b decimal(10,4)) inherits (parent); +NOTICE: merging column "a" with inherited definition +NOTICE: merging column "b" with inherited definition +drop table child; +drop table parent; +-- test copy in/out +create table attest (a int4, b int4, c int4); +insert into attest values (1,2,3); +alter table attest drop a; +copy attest to stdout; +2 3 +copy attest(a) to stdout; +ERROR: column "a" of relation "attest" does not exist +copy attest("........pg.dropped.1........") to stdout; +ERROR: column "........pg.dropped.1........" of relation "attest" does not exist +copy attest from stdin; +ERROR: extra data after last expected column +CONTEXT: COPY attest, line 1: "10 11 12" +select * from attest; + b | c +---+--- + 2 | 3 +(1 row) + +copy attest from stdin; +select * from attest; + b | c +----+---- + 2 | 3 + 21 | 22 +(2 rows) + +copy attest(a) from stdin; +ERROR: column "a" of relation "attest" does not exist +copy attest("........pg.dropped.1........") from stdin; +ERROR: column "........pg.dropped.1........" of relation "attest" does not exist +copy attest(b,c) from stdin; +select * from attest; + b | c +----+---- + 2 | 3 + 21 | 22 + 31 | 32 +(3 rows) + +drop table attest; +-- test inheritance +create table dropColumn (a int, b int, e int); +create table dropColumnChild (c int) inherits (dropColumn); +create table dropColumnAnother (d int) inherits (dropColumnChild); +-- these two should fail +alter table dropColumnchild drop column a; +ERROR: cannot drop inherited column "a" +alter table only dropColumnChild drop column b; +ERROR: cannot drop inherited column "b" +-- these three should work +alter table only dropColumn drop column e; +alter table dropColumnChild drop column c; +alter table dropColumn drop column a; +create table renameColumn (a int); +create table renameColumnChild (b int) inherits (renameColumn); +create table renameColumnAnother (c int) inherits (renameColumnChild); +-- these three should fail +alter table renameColumnChild rename column a to d; +ERROR: cannot rename inherited column "a" +alter table only renameColumnChild rename column a to d; +ERROR: inherited column "a" must be renamed in child tables too +alter table only renameColumn rename column a to d; +ERROR: inherited column "a" must be renamed in child tables too +-- these should work +alter table renameColumn rename column a to d; +alter table renameColumnChild rename column b to a; +-- these should work +alter table if exists doesnt_exist_tab rename column a to d; +NOTICE: relation "doesnt_exist_tab" does not exist, skipping +alter table if exists doesnt_exist_tab rename column b to a; +NOTICE: relation "doesnt_exist_tab" does not exist, skipping +-- this should work +alter table renameColumn add column w int; +-- this should fail +alter table only renameColumn add column x int; +ERROR: column must be added to child tables too +-- Test corner cases in dropping of inherited columns +create table p1 (f1 int, f2 int); +create table c1 (f1 int not null) inherits(p1); +NOTICE: merging column "f1" with inherited definition +-- should be rejected since c1.f1 is inherited +alter table c1 drop column f1; +ERROR: cannot drop inherited column "f1" +-- should work +alter table p1 drop column f1; +-- c1.f1 is still there, but no longer inherited +select f1 from c1; + f1 +---- +(0 rows) + +alter table c1 drop column f1; +select f1 from c1; +ERROR: column "f1" does not exist +LINE 1: select f1 from c1; + ^ +HINT: Perhaps you meant to reference the column "c1.f2". +drop table p1 cascade; +NOTICE: drop cascades to table c1 +create table p1 (f1 int, f2 int); +create table c1 () inherits(p1); +-- should be rejected since c1.f1 is inherited +alter table c1 drop column f1; +ERROR: cannot drop inherited column "f1" +alter table p1 drop column f1; +-- c1.f1 is dropped now, since there is no local definition for it +select f1 from c1; +ERROR: column "f1" does not exist +LINE 1: select f1 from c1; + ^ +HINT: Perhaps you meant to reference the column "c1.f2". +drop table p1 cascade; +NOTICE: drop cascades to table c1 +create table p1 (f1 int, f2 int); +create table c1 () inherits(p1); +-- should be rejected since c1.f1 is inherited +alter table c1 drop column f1; +ERROR: cannot drop inherited column "f1" +alter table only p1 drop column f1; +-- c1.f1 is NOT dropped, but must now be considered non-inherited +alter table c1 drop column f1; +drop table p1 cascade; +NOTICE: drop cascades to table c1 +create table p1 (f1 int, f2 int); +create table c1 (f1 int not null) inherits(p1); +NOTICE: merging column "f1" with inherited definition +-- should be rejected since c1.f1 is inherited +alter table c1 drop column f1; +ERROR: cannot drop inherited column "f1" +alter table only p1 drop column f1; +-- c1.f1 is still there, but no longer inherited +alter table c1 drop column f1; +drop table p1 cascade; +NOTICE: drop cascades to table c1 +create table p1(id int, name text); +create table p2(id2 int, name text, height int); +create table c1(age int) inherits(p1,p2); +NOTICE: merging multiple inherited definitions of column "name" +create table gc1() inherits (c1); +select relname, attname, attinhcount, attislocal +from pg_class join pg_attribute on (pg_class.oid = pg_attribute.attrelid) +where relname in ('p1','p2','c1','gc1') and attnum > 0 and not attisdropped +order by relname, attnum; + relname | attname | attinhcount | attislocal +---------+---------+-------------+------------ + c1 | id | 1 | f + c1 | name | 2 | f + c1 | id2 | 1 | f + c1 | height | 1 | f + c1 | age | 0 | t + gc1 | id | 1 | f + gc1 | name | 1 | f + gc1 | id2 | 1 | f + gc1 | height | 1 | f + gc1 | age | 1 | f + p1 | id | 0 | t + p1 | name | 0 | t + p2 | id2 | 0 | t + p2 | name | 0 | t + p2 | height | 0 | t +(15 rows) + +-- should work +alter table only p1 drop column name; +-- should work. Now c1.name is local and inhcount is 0. +alter table p2 drop column name; +-- should be rejected since its inherited +alter table gc1 drop column name; +ERROR: cannot drop inherited column "name" +-- should work, and drop gc1.name along +alter table c1 drop column name; +-- should fail: column does not exist +alter table gc1 drop column name; +ERROR: column "name" of relation "gc1" does not exist +-- should work and drop the attribute in all tables +alter table p2 drop column height; +-- IF EXISTS test +create table dropColumnExists (); +alter table dropColumnExists drop column non_existing; --fail +ERROR: column "non_existing" of relation "dropcolumnexists" does not exist +alter table dropColumnExists drop column if exists non_existing; --succeed +NOTICE: column "non_existing" of relation "dropcolumnexists" does not exist, skipping +select relname, attname, attinhcount, attislocal +from pg_class join pg_attribute on (pg_class.oid = pg_attribute.attrelid) +where relname in ('p1','p2','c1','gc1') and attnum > 0 and not attisdropped +order by relname, attnum; + relname | attname | attinhcount | attislocal +---------+---------+-------------+------------ + c1 | id | 1 | f + c1 | id2 | 1 | f + c1 | age | 0 | t + gc1 | id | 1 | f + gc1 | id2 | 1 | f + gc1 | age | 1 | f + p1 | id | 0 | t + p2 | id2 | 0 | t +(8 rows) + +drop table p1, p2 cascade; +NOTICE: drop cascades to 2 other objects +DETAIL: drop cascades to table c1 +drop cascades to table gc1 +-- test attinhcount tracking with merged columns +create table depth0(); +create table depth1(c text) inherits (depth0); +create table depth2() inherits (depth1); +alter table depth0 add c text; +NOTICE: merging definition of column "c" for child "depth1" +select attrelid::regclass, attname, attinhcount, attislocal +from pg_attribute +where attnum > 0 and attrelid::regclass in ('depth0', 'depth1', 'depth2') +order by attrelid::regclass::text, attnum; + attrelid | attname | attinhcount | attislocal +----------+---------+-------------+------------ + depth0 | c | 0 | t + depth1 | c | 1 | t + depth2 | c | 1 | f +(3 rows) + +-- test renumbering of child-table columns in inherited operations +create table p1 (f1 int); +create table c1 (f2 text, f3 int) inherits (p1); +alter table p1 add column a1 int check (a1 > 0); +alter table p1 add column f2 text; +NOTICE: merging definition of column "f2" for child "c1" +insert into p1 values (1,2,'abc'); +insert into c1 values(11,'xyz',33,0); -- should fail +ERROR: new row for relation "c1" violates check constraint "p1_a1_check" +DETAIL: Failing row contains (11, xyz, 33, 0). +insert into c1 values(11,'xyz',33,22); +select * from p1; + f1 | a1 | f2 +----+----+----- + 1 | 2 | abc + 11 | 22 | xyz +(2 rows) + +update p1 set a1 = a1 + 1, f2 = upper(f2); +select * from p1; + f1 | a1 | f2 +----+----+----- + 1 | 3 | ABC + 11 | 23 | XYZ +(2 rows) + +drop table p1 cascade; +NOTICE: drop cascades to table c1 +-- test that operations with a dropped column do not try to reference +-- its datatype +create domain mytype as text; +create temp table foo (f1 text, f2 mytype, f3 text); +insert into foo values('bb','cc','dd'); +select * from foo; + f1 | f2 | f3 +----+----+---- + bb | cc | dd +(1 row) + +drop domain mytype cascade; +NOTICE: drop cascades to column f2 of table foo +select * from foo; + f1 | f3 +----+---- + bb | dd +(1 row) + +insert into foo values('qq','rr'); +select * from foo; + f1 | f3 +----+---- + bb | dd + qq | rr +(2 rows) + +update foo set f3 = 'zz'; +select * from foo; + f1 | f3 +----+---- + bb | zz + qq | zz +(2 rows) + +select f3,max(f1) from foo group by f3; + f3 | max +----+----- + zz | qq +(1 row) + +-- Simple tests for alter table column type +alter table foo alter f1 TYPE integer; -- fails +ERROR: column "f1" cannot be cast automatically to type integer +HINT: You might need to specify "USING f1::integer". +alter table foo alter f1 TYPE varchar(10); +create table anothertab (atcol1 serial8, atcol2 boolean, + constraint anothertab_chk check (atcol1 <= 3)); +insert into anothertab (atcol1, atcol2) values (default, true); +insert into anothertab (atcol1, atcol2) values (default, false); +select * from anothertab; + atcol1 | atcol2 +--------+-------- + 1 | t + 2 | f +(2 rows) + +alter table anothertab alter column atcol1 type boolean; -- fails +ERROR: column "atcol1" cannot be cast automatically to type boolean +HINT: You might need to specify "USING atcol1::boolean". +alter table anothertab alter column atcol1 type boolean using atcol1::int; -- fails +ERROR: result of USING clause for column "atcol1" cannot be cast automatically to type boolean +HINT: You might need to add an explicit cast. +alter table anothertab alter column atcol1 type integer; +select * from anothertab; + atcol1 | atcol2 +--------+-------- + 1 | t + 2 | f +(2 rows) + +insert into anothertab (atcol1, atcol2) values (45, null); -- fails +ERROR: new row for relation "anothertab" violates check constraint "anothertab_chk" +DETAIL: Failing row contains (45, null). +insert into anothertab (atcol1, atcol2) values (default, null); +select * from anothertab; + atcol1 | atcol2 +--------+-------- + 1 | t + 2 | f + 3 | +(3 rows) + +alter table anothertab alter column atcol2 type text + using case when atcol2 is true then 'IT WAS TRUE' + when atcol2 is false then 'IT WAS FALSE' + else 'IT WAS NULL!' end; +select * from anothertab; + atcol1 | atcol2 +--------+-------------- + 1 | IT WAS TRUE + 2 | IT WAS FALSE + 3 | IT WAS NULL! +(3 rows) + +alter table anothertab alter column atcol1 type boolean + using case when atcol1 % 2 = 0 then true else false end; -- fails +ERROR: default for column "atcol1" cannot be cast automatically to type boolean +alter table anothertab alter column atcol1 drop default; +alter table anothertab alter column atcol1 type boolean + using case when atcol1 % 2 = 0 then true else false end; -- fails +ERROR: operator does not exist: boolean <= integer +HINT: No operator matches the given name and argument types. You might need to add explicit type casts. +alter table anothertab drop constraint anothertab_chk; +alter table anothertab drop constraint anothertab_chk; -- fails +ERROR: constraint "anothertab_chk" of relation "anothertab" does not exist +alter table anothertab drop constraint IF EXISTS anothertab_chk; -- succeeds +NOTICE: constraint "anothertab_chk" of relation "anothertab" does not exist, skipping +alter table anothertab alter column atcol1 type boolean + using case when atcol1 % 2 = 0 then true else false end; +select * from anothertab; + atcol1 | atcol2 +--------+-------------- + f | IT WAS TRUE + t | IT WAS FALSE + f | IT WAS NULL! +(3 rows) + +drop table anothertab; +-- Test index handling in alter table column type (cf. bugs #15835, #15865) +create table anothertab(f1 int primary key, f2 int unique, + f3 int, f4 int, f5 int); +alter table anothertab + add exclude using btree (f3 with =); +alter table anothertab + add exclude using btree (f4 with =) where (f4 is not null); +alter table anothertab + add exclude using btree (f4 with =) where (f5 > 0); +alter table anothertab + add unique(f1,f4); +create index on anothertab(f2,f3); +create unique index on anothertab(f4); +\d anothertab + Table "public.anothertab" + Column | Type | Collation | Nullable | Default +--------+---------+-----------+----------+--------- + f1 | integer | | not null | + f2 | integer | | | + f3 | integer | | | + f4 | integer | | | + f5 | integer | | | +Indexes: + "anothertab_pkey" PRIMARY KEY, btree (f1) + "anothertab_f1_f4_key" UNIQUE CONSTRAINT, btree (f1, f4) + "anothertab_f2_f3_idx" btree (f2, f3) + "anothertab_f2_key" UNIQUE CONSTRAINT, btree (f2) + "anothertab_f3_excl" EXCLUDE USING btree (f3 WITH =) + "anothertab_f4_excl" EXCLUDE USING btree (f4 WITH =) WHERE (f4 IS NOT NULL) + "anothertab_f4_excl1" EXCLUDE USING btree (f4 WITH =) WHERE (f5 > 0) + "anothertab_f4_idx" UNIQUE, btree (f4) + +alter table anothertab alter column f1 type bigint; +alter table anothertab + alter column f2 type bigint, + alter column f3 type bigint, + alter column f4 type bigint; +alter table anothertab alter column f5 type bigint; +\d anothertab + Table "public.anothertab" + Column | Type | Collation | Nullable | Default +--------+--------+-----------+----------+--------- + f1 | bigint | | not null | + f2 | bigint | | | + f3 | bigint | | | + f4 | bigint | | | + f5 | bigint | | | +Indexes: + "anothertab_pkey" PRIMARY KEY, btree (f1) + "anothertab_f1_f4_key" UNIQUE CONSTRAINT, btree (f1, f4) + "anothertab_f2_f3_idx" btree (f2, f3) + "anothertab_f2_key" UNIQUE CONSTRAINT, btree (f2) + "anothertab_f3_excl" EXCLUDE USING btree (f3 WITH =) + "anothertab_f4_excl" EXCLUDE USING btree (f4 WITH =) WHERE (f4 IS NOT NULL) + "anothertab_f4_excl1" EXCLUDE USING btree (f4 WITH =) WHERE (f5 > 0) + "anothertab_f4_idx" UNIQUE, btree (f4) + +drop table anothertab; +-- test that USING expressions are parsed before column alter type / drop steps +create table another (f1 int, f2 text, f3 text); +insert into another values(1, 'one', 'uno'); +insert into another values(2, 'two', 'due'); +insert into another values(3, 'three', 'tre'); +select * from another; + f1 | f2 | f3 +----+-------+----- + 1 | one | uno + 2 | two | due + 3 | three | tre +(3 rows) + +alter table another + alter f1 type text using f2 || ' and ' || f3 || ' more', + alter f2 type bigint using f1 * 10, + drop column f3; +select * from another; + f1 | f2 +--------------------+---- + one and uno more | 10 + two and due more | 20 + three and tre more | 30 +(3 rows) + +drop table another; +-- Create an index that skips WAL, then perform a SET DATA TYPE that skips +-- rewriting the index. +begin; +create table skip_wal_skip_rewrite_index (c varchar(10) primary key); +alter table skip_wal_skip_rewrite_index alter c type varchar(20); +commit; +-- We disallow changing table's row type if it's used for storage +create table at_tab1 (a int, b text); +create table at_tab2 (x int, y at_tab1); +alter table at_tab1 alter column b type varchar; -- fails +ERROR: cannot alter table "at_tab1" because column "at_tab2.y" uses its row type +drop table at_tab2; +-- Use of row type in an expression is defended differently +create table at_tab2 (x int, y text, check((x,y)::at_tab1 = (1,'42')::at_tab1)); +alter table at_tab1 alter column b type varchar; -- allowed, but ... +insert into at_tab2 values(1,'42'); -- ... this will fail +ERROR: ROW() column has type text instead of type character varying +drop table at_tab1, at_tab2; +-- Check it for a partitioned table, too +create table at_tab1 (a int, b text) partition by list(a); +create table at_tab2 (x int, y at_tab1); +alter table at_tab1 alter column b type varchar; -- fails +ERROR: cannot alter table "at_tab1" because column "at_tab2.y" uses its row type +drop table at_tab1, at_tab2; +-- Alter column type that's part of a partitioned index +create table at_partitioned (a int, b text) partition by range (a); +create table at_part_1 partition of at_partitioned for values from (0) to (1000); +insert into at_partitioned values (512, '0.123'); +create table at_part_2 (b text, a int); +insert into at_part_2 values ('1.234', 1024); +create index on at_partitioned (b); +create index on at_partitioned (a); +\d at_part_1 + Table "public.at_part_1" + Column | Type | Collation | Nullable | Default +--------+---------+-----------+----------+--------- + a | integer | | | + b | text | | | +Partition of: at_partitioned FOR VALUES FROM (0) TO (1000) +Indexes: + "at_part_1_a_idx" btree (a) + "at_part_1_b_idx" btree (b) + +\d at_part_2 + Table "public.at_part_2" + Column | Type | Collation | Nullable | Default +--------+---------+-----------+----------+--------- + b | text | | | + a | integer | | | + +alter table at_partitioned attach partition at_part_2 for values from (1000) to (2000); +\d at_part_2 + Table "public.at_part_2" + Column | Type | Collation | Nullable | Default +--------+---------+-----------+----------+--------- + b | text | | | + a | integer | | | +Partition of: at_partitioned FOR VALUES FROM (1000) TO (2000) +Indexes: + "at_part_2_a_idx" btree (a) + "at_part_2_b_idx" btree (b) + +alter table at_partitioned alter column b type numeric using b::numeric; +\d at_part_1 + Table "public.at_part_1" + Column | Type | Collation | Nullable | Default +--------+---------+-----------+----------+--------- + a | integer | | | + b | numeric | | | +Partition of: at_partitioned FOR VALUES FROM (0) TO (1000) +Indexes: + "at_part_1_a_idx" btree (a) + "at_part_1_b_idx" btree (b) + +\d at_part_2 + Table "public.at_part_2" + Column | Type | Collation | Nullable | Default +--------+---------+-----------+----------+--------- + b | numeric | | | + a | integer | | | +Partition of: at_partitioned FOR VALUES FROM (1000) TO (2000) +Indexes: + "at_part_2_a_idx" btree (a) + "at_part_2_b_idx" btree (b) + +drop table at_partitioned; +-- Alter column type when no table rewrite is required +-- Also check that comments are preserved +create table at_partitioned(id int, name varchar(64), unique (id, name)) + partition by hash(id); +comment on constraint at_partitioned_id_name_key on at_partitioned is 'parent constraint'; +comment on index at_partitioned_id_name_key is 'parent index'; +create table at_partitioned_0 partition of at_partitioned + for values with (modulus 2, remainder 0); +comment on constraint at_partitioned_0_id_name_key on at_partitioned_0 is 'child 0 constraint'; +comment on index at_partitioned_0_id_name_key is 'child 0 index'; +create table at_partitioned_1 partition of at_partitioned + for values with (modulus 2, remainder 1); +comment on constraint at_partitioned_1_id_name_key on at_partitioned_1 is 'child 1 constraint'; +comment on index at_partitioned_1_id_name_key is 'child 1 index'; +insert into at_partitioned values(1, 'foo'); +insert into at_partitioned values(3, 'bar'); +create temp table old_oids as + select relname, oid as oldoid, relfilenode as oldfilenode + from pg_class where relname like 'at_partitioned%'; +select relname, + c.oid = oldoid as orig_oid, + case relfilenode + when 0 then 'none' + when c.oid then 'own' + when oldfilenode then 'orig' + else 'OTHER' + end as storage, + obj_description(c.oid, 'pg_class') as desc + from pg_class c left join old_oids using (relname) + where relname like 'at_partitioned%' + order by relname; + relname | orig_oid | storage | desc +------------------------------+----------+---------+--------------- + at_partitioned | t | none | + at_partitioned_0 | t | own | + at_partitioned_0_id_name_key | t | own | child 0 index + at_partitioned_1 | t | own | + at_partitioned_1_id_name_key | t | own | child 1 index + at_partitioned_id_name_key | t | none | parent index +(6 rows) + +select conname, obj_description(oid, 'pg_constraint') as desc + from pg_constraint where conname like 'at_partitioned%' + order by conname; + conname | desc +------------------------------+-------------------- + at_partitioned_0_id_name_key | child 0 constraint + at_partitioned_1_id_name_key | child 1 constraint + at_partitioned_id_name_key | parent constraint +(3 rows) + +alter table at_partitioned alter column name type varchar(127); +-- Note: these tests currently show the wrong behavior for comments :-( +select relname, + c.oid = oldoid as orig_oid, + case relfilenode + when 0 then 'none' + when c.oid then 'own' + when oldfilenode then 'orig' + else 'OTHER' + end as storage, + obj_description(c.oid, 'pg_class') as desc + from pg_class c left join old_oids using (relname) + where relname like 'at_partitioned%' + order by relname; + relname | orig_oid | storage | desc +------------------------------+----------+---------+-------------- + at_partitioned | t | none | + at_partitioned_0 | t | own | + at_partitioned_0_id_name_key | f | own | parent index + at_partitioned_1 | t | own | + at_partitioned_1_id_name_key | f | own | parent index + at_partitioned_id_name_key | f | none | parent index +(6 rows) + +select conname, obj_description(oid, 'pg_constraint') as desc + from pg_constraint where conname like 'at_partitioned%' + order by conname; + conname | desc +------------------------------+------------------- + at_partitioned_0_id_name_key | + at_partitioned_1_id_name_key | + at_partitioned_id_name_key | parent constraint +(3 rows) + +-- Don't remove this DROP, it exposes bug #15672 +drop table at_partitioned; +-- disallow recursive containment of row types +create temp table recur1 (f1 int); +alter table recur1 add column f2 recur1; -- fails +ERROR: composite type recur1 cannot be made a member of itself +alter table recur1 add column f2 recur1[]; -- fails +ERROR: composite type recur1 cannot be made a member of itself +create domain array_of_recur1 as recur1[]; +alter table recur1 add column f2 array_of_recur1; -- fails +ERROR: composite type recur1 cannot be made a member of itself +create temp table recur2 (f1 int, f2 recur1); +alter table recur1 add column f2 recur2; -- fails +ERROR: composite type recur1 cannot be made a member of itself +alter table recur1 add column f2 int; +alter table recur1 alter column f2 type recur2; -- fails +ERROR: composite type recur1 cannot be made a member of itself +-- SET STORAGE may need to add a TOAST table +create table test_storage (a text, c text storage plain); +select reltoastrelid <> 0 as has_toast_table + from pg_class where oid = 'test_storage'::regclass; + has_toast_table +----------------- + t +(1 row) + +alter table test_storage alter a set storage plain; +-- rewrite table to remove its TOAST table; need a non-constant column default +alter table test_storage add b int default random()::int; +select reltoastrelid <> 0 as has_toast_table + from pg_class where oid = 'test_storage'::regclass; + has_toast_table +----------------- + f +(1 row) + +alter table test_storage alter a set storage default; -- re-add TOAST table +select reltoastrelid <> 0 as has_toast_table + from pg_class where oid = 'test_storage'::regclass; + has_toast_table +----------------- + t +(1 row) + +-- check STORAGE correctness +create table test_storage_failed (a text, b int storage extended); +ERROR: column data type integer can only have storage PLAIN +-- test that SET STORAGE propagates to index correctly +create index test_storage_idx on test_storage (b, a); +alter table test_storage alter column a set storage external; +\d+ test_storage + Table "public.test_storage" + Column | Type | Collation | Nullable | Default | Storage | Stats target | Description +--------+---------+-----------+----------+-------------------+----------+--------------+------------- + a | text | | | | external | | + c | text | | | | plain | | + b | integer | | | random()::integer | plain | | +Indexes: + "test_storage_idx" btree (b, a) + +\d+ test_storage_idx + Index "public.test_storage_idx" + Column | Type | Key? | Definition | Storage | Stats target +--------+---------+------+------------+----------+-------------- + b | integer | yes | b | plain | + a | text | yes | a | external | +btree, for table "public.test_storage" + +-- ALTER COLUMN TYPE with a check constraint and a child table (bug #13779) +CREATE TABLE test_inh_check (a float check (a > 10.2), b float); +CREATE TABLE test_inh_check_child() INHERITS(test_inh_check); +\d test_inh_check + Table "public.test_inh_check" + Column | Type | Collation | Nullable | Default +--------+------------------+-----------+----------+--------- + a | double precision | | | + b | double precision | | | +Check constraints: + "test_inh_check_a_check" CHECK (a > 10.2::double precision) +Number of child tables: 1 (Use \d+ to list them.) + +\d test_inh_check_child + Table "public.test_inh_check_child" + Column | Type | Collation | Nullable | Default +--------+------------------+-----------+----------+--------- + a | double precision | | | + b | double precision | | | +Check constraints: + "test_inh_check_a_check" CHECK (a > 10.2::double precision) +Inherits: test_inh_check + +select relname, conname, coninhcount, conislocal, connoinherit + from pg_constraint c, pg_class r + where relname like 'test_inh_check%' and c.conrelid = r.oid + order by 1, 2; + relname | conname | coninhcount | conislocal | connoinherit +----------------------+------------------------+-------------+------------+-------------- + test_inh_check | test_inh_check_a_check | 0 | t | f + test_inh_check_child | test_inh_check_a_check | 1 | f | f +(2 rows) + +ALTER TABLE test_inh_check ALTER COLUMN a TYPE numeric; +\d test_inh_check + Table "public.test_inh_check" + Column | Type | Collation | Nullable | Default +--------+------------------+-----------+----------+--------- + a | numeric | | | + b | double precision | | | +Check constraints: + "test_inh_check_a_check" CHECK (a::double precision > 10.2::double precision) +Number of child tables: 1 (Use \d+ to list them.) + +\d test_inh_check_child + Table "public.test_inh_check_child" + Column | Type | Collation | Nullable | Default +--------+------------------+-----------+----------+--------- + a | numeric | | | + b | double precision | | | +Check constraints: + "test_inh_check_a_check" CHECK (a::double precision > 10.2::double precision) +Inherits: test_inh_check + +select relname, conname, coninhcount, conislocal, connoinherit + from pg_constraint c, pg_class r + where relname like 'test_inh_check%' and c.conrelid = r.oid + order by 1, 2; + relname | conname | coninhcount | conislocal | connoinherit +----------------------+------------------------+-------------+------------+-------------- + test_inh_check | test_inh_check_a_check | 0 | t | f + test_inh_check_child | test_inh_check_a_check | 1 | f | f +(2 rows) + +-- also try noinherit, local, and local+inherited cases +ALTER TABLE test_inh_check ADD CONSTRAINT bnoinherit CHECK (b > 100) NO INHERIT; +ALTER TABLE test_inh_check_child ADD CONSTRAINT blocal CHECK (b < 1000); +ALTER TABLE test_inh_check_child ADD CONSTRAINT bmerged CHECK (b > 1); +ALTER TABLE test_inh_check ADD CONSTRAINT bmerged CHECK (b > 1); +NOTICE: merging constraint "bmerged" with inherited definition +\d test_inh_check + Table "public.test_inh_check" + Column | Type | Collation | Nullable | Default +--------+------------------+-----------+----------+--------- + a | numeric | | | + b | double precision | | | +Check constraints: + "bmerged" CHECK (b > 1::double precision) + "bnoinherit" CHECK (b > 100::double precision) NO INHERIT + "test_inh_check_a_check" CHECK (a::double precision > 10.2::double precision) +Number of child tables: 1 (Use \d+ to list them.) + +\d test_inh_check_child + Table "public.test_inh_check_child" + Column | Type | Collation | Nullable | Default +--------+------------------+-----------+----------+--------- + a | numeric | | | + b | double precision | | | +Check constraints: + "blocal" CHECK (b < 1000::double precision) + "bmerged" CHECK (b > 1::double precision) + "test_inh_check_a_check" CHECK (a::double precision > 10.2::double precision) +Inherits: test_inh_check + +select relname, conname, coninhcount, conislocal, connoinherit + from pg_constraint c, pg_class r + where relname like 'test_inh_check%' and c.conrelid = r.oid + order by 1, 2; + relname | conname | coninhcount | conislocal | connoinherit +----------------------+------------------------+-------------+------------+-------------- + test_inh_check | bmerged | 0 | t | f + test_inh_check | bnoinherit | 0 | t | t + test_inh_check | test_inh_check_a_check | 0 | t | f + test_inh_check_child | blocal | 0 | t | f + test_inh_check_child | bmerged | 1 | t | f + test_inh_check_child | test_inh_check_a_check | 1 | f | f +(6 rows) + +ALTER TABLE test_inh_check ALTER COLUMN b TYPE numeric; +NOTICE: merging constraint "bmerged" with inherited definition +\d test_inh_check + Table "public.test_inh_check" + Column | Type | Collation | Nullable | Default +--------+---------+-----------+----------+--------- + a | numeric | | | + b | numeric | | | +Check constraints: + "bmerged" CHECK (b::double precision > 1::double precision) + "bnoinherit" CHECK (b::double precision > 100::double precision) NO INHERIT + "test_inh_check_a_check" CHECK (a::double precision > 10.2::double precision) +Number of child tables: 1 (Use \d+ to list them.) + +\d test_inh_check_child + Table "public.test_inh_check_child" + Column | Type | Collation | Nullable | Default +--------+---------+-----------+----------+--------- + a | numeric | | | + b | numeric | | | +Check constraints: + "blocal" CHECK (b::double precision < 1000::double precision) + "bmerged" CHECK (b::double precision > 1::double precision) + "test_inh_check_a_check" CHECK (a::double precision > 10.2::double precision) +Inherits: test_inh_check + +select relname, conname, coninhcount, conislocal, connoinherit + from pg_constraint c, pg_class r + where relname like 'test_inh_check%' and c.conrelid = r.oid + order by 1, 2; + relname | conname | coninhcount | conislocal | connoinherit +----------------------+------------------------+-------------+------------+-------------- + test_inh_check | bmerged | 0 | t | f + test_inh_check | bnoinherit | 0 | t | t + test_inh_check | test_inh_check_a_check | 0 | t | f + test_inh_check_child | blocal | 0 | t | f + test_inh_check_child | bmerged | 1 | t | f + test_inh_check_child | test_inh_check_a_check | 1 | f | f +(6 rows) + +-- ALTER COLUMN TYPE with different schema in children +-- Bug at https://postgr.es/m/20170102225618.GA10071@telsasoft.com +CREATE TABLE test_type_diff (f1 int); +CREATE TABLE test_type_diff_c (extra smallint) INHERITS (test_type_diff); +ALTER TABLE test_type_diff ADD COLUMN f2 int; +INSERT INTO test_type_diff_c VALUES (1, 2, 3); +ALTER TABLE test_type_diff ALTER COLUMN f2 TYPE bigint USING f2::bigint; +CREATE TABLE test_type_diff2 (int_two int2, int_four int4, int_eight int8); +CREATE TABLE test_type_diff2_c1 (int_four int4, int_eight int8, int_two int2); +CREATE TABLE test_type_diff2_c2 (int_eight int8, int_two int2, int_four int4); +CREATE TABLE test_type_diff2_c3 (int_two int2, int_four int4, int_eight int8); +ALTER TABLE test_type_diff2_c1 INHERIT test_type_diff2; +ALTER TABLE test_type_diff2_c2 INHERIT test_type_diff2; +ALTER TABLE test_type_diff2_c3 INHERIT test_type_diff2; +INSERT INTO test_type_diff2_c1 VALUES (1, 2, 3); +INSERT INTO test_type_diff2_c2 VALUES (4, 5, 6); +INSERT INTO test_type_diff2_c3 VALUES (7, 8, 9); +ALTER TABLE test_type_diff2 ALTER COLUMN int_four TYPE int8 USING int_four::int8; +-- whole-row references are disallowed +ALTER TABLE test_type_diff2 ALTER COLUMN int_four TYPE int4 USING (pg_column_size(test_type_diff2)); +ERROR: cannot convert whole-row table reference +DETAIL: USING expression contains a whole-row table reference. +-- check for rollback of ANALYZE corrupting table property flags (bug #11638) +CREATE TABLE check_fk_presence_1 (id int PRIMARY KEY, t text); +CREATE TABLE check_fk_presence_2 (id int REFERENCES check_fk_presence_1, t text); +BEGIN; +ALTER TABLE check_fk_presence_2 DROP CONSTRAINT check_fk_presence_2_id_fkey; +ANALYZE check_fk_presence_2; +ROLLBACK; +\d check_fk_presence_2 + Table "public.check_fk_presence_2" + Column | Type | Collation | Nullable | Default +--------+---------+-----------+----------+--------- + id | integer | | | + t | text | | | +Foreign-key constraints: + "check_fk_presence_2_id_fkey" FOREIGN KEY (id) REFERENCES check_fk_presence_1(id) + +DROP TABLE check_fk_presence_1, check_fk_presence_2; +-- check column addition within a view (bug #14876) +create table at_base_table(id int, stuff text); +insert into at_base_table values (23, 'skidoo'); +create view at_view_1 as select * from at_base_table bt; +create view at_view_2 as select *, to_json(v1) as j from at_view_1 v1; +\d+ at_view_1 + View "public.at_view_1" + Column | Type | Collation | Nullable | Default | Storage | Description +--------+---------+-----------+----------+---------+----------+------------- + id | integer | | | | plain | + stuff | text | | | | extended | +View definition: + SELECT id, + stuff + FROM at_base_table bt; + +\d+ at_view_2 + View "public.at_view_2" + Column | Type | Collation | Nullable | Default | Storage | Description +--------+---------+-----------+----------+---------+----------+------------- + id | integer | | | | plain | + stuff | text | | | | extended | + j | json | | | | extended | +View definition: + SELECT id, + stuff, + to_json(v1.*) AS j + FROM at_view_1 v1; + +explain (verbose, costs off) select * from at_view_2; + QUERY PLAN +---------------------------------------------------------- + Seq Scan on public.at_base_table bt + Output: bt.id, bt.stuff, to_json(ROW(bt.id, bt.stuff)) +(2 rows) + +select * from at_view_2; + id | stuff | j +----+--------+---------------------------- + 23 | skidoo | {"id":23,"stuff":"skidoo"} +(1 row) + +create or replace view at_view_1 as select *, 2+2 as more from at_base_table bt; +\d+ at_view_1 + View "public.at_view_1" + Column | Type | Collation | Nullable | Default | Storage | Description +--------+---------+-----------+----------+---------+----------+------------- + id | integer | | | | plain | + stuff | text | | | | extended | + more | integer | | | | plain | +View definition: + SELECT id, + stuff, + 2 + 2 AS more + FROM at_base_table bt; + +\d+ at_view_2 + View "public.at_view_2" + Column | Type | Collation | Nullable | Default | Storage | Description +--------+---------+-----------+----------+---------+----------+------------- + id | integer | | | | plain | + stuff | text | | | | extended | + j | json | | | | extended | +View definition: + SELECT id, + stuff, + to_json(v1.*) AS j + FROM at_view_1 v1; + +explain (verbose, costs off) select * from at_view_2; + QUERY PLAN +------------------------------------------------------------- + Seq Scan on public.at_base_table bt + Output: bt.id, bt.stuff, to_json(ROW(bt.id, bt.stuff, 4)) +(2 rows) + +select * from at_view_2; + id | stuff | j +----+--------+------------------------------------- + 23 | skidoo | {"id":23,"stuff":"skidoo","more":4} +(1 row) + +drop view at_view_2; +drop view at_view_1; +drop table at_base_table; +-- related case (bug #17811) +begin; +create temp table t1 as select * from int8_tbl; +create temp view v1 as select 1::int8 as q1; +create temp view v2 as select * from v1; +create or replace temp view v1 with (security_barrier = true) + as select * from t1; +create temp table log (q1 int8, q2 int8); +create rule v1_upd_rule as on update to v1 + do also insert into log values (new.*); +update v2 set q1 = q1 + 1 where q1 = 123; +select * from t1; + q1 | q2 +------------------+------------------- + 4567890123456789 | 123 + 4567890123456789 | 4567890123456789 + 4567890123456789 | -4567890123456789 + 124 | 456 + 124 | 4567890123456789 +(5 rows) + +select * from log; + q1 | q2 +-----+------------------ + 124 | 456 + 124 | 4567890123456789 +(2 rows) + +rollback; +-- check adding a column not itself requiring a rewrite, together with +-- a column requiring a default (bug #16038) +-- ensure that rewrites aren't silently optimized away, removing the +-- value of the test +CREATE FUNCTION check_ddl_rewrite(p_tablename regclass, p_ddl text) +RETURNS boolean +LANGUAGE plpgsql AS $$ +DECLARE + v_relfilenode oid; +BEGIN + v_relfilenode := relfilenode FROM pg_class WHERE oid = p_tablename; + + EXECUTE p_ddl; + + RETURN v_relfilenode <> (SELECT relfilenode FROM pg_class WHERE oid = p_tablename); +END; +$$; +CREATE TABLE rewrite_test(col text); +INSERT INTO rewrite_test VALUES ('something'); +INSERT INTO rewrite_test VALUES (NULL); +-- empty[12] don't need rewrite, but notempty[12]_rewrite will force one +SELECT check_ddl_rewrite('rewrite_test', $$ + ALTER TABLE rewrite_test + ADD COLUMN empty1 text, + ADD COLUMN notempty1_rewrite serial; +$$); + check_ddl_rewrite +------------------- + t +(1 row) + +SELECT check_ddl_rewrite('rewrite_test', $$ + ALTER TABLE rewrite_test + ADD COLUMN notempty2_rewrite serial, + ADD COLUMN empty2 text; +$$); + check_ddl_rewrite +------------------- + t +(1 row) + +-- also check that fast defaults cause no problem, first without rewrite +SELECT check_ddl_rewrite('rewrite_test', $$ + ALTER TABLE rewrite_test + ADD COLUMN empty3 text, + ADD COLUMN notempty3_norewrite int default 42; +$$); + check_ddl_rewrite +------------------- + f +(1 row) + +SELECT check_ddl_rewrite('rewrite_test', $$ + ALTER TABLE rewrite_test + ADD COLUMN notempty4_norewrite int default 42, + ADD COLUMN empty4 text; +$$); + check_ddl_rewrite +------------------- + f +(1 row) + +-- then with rewrite +SELECT check_ddl_rewrite('rewrite_test', $$ + ALTER TABLE rewrite_test + ADD COLUMN empty5 text, + ADD COLUMN notempty5_norewrite int default 42, + ADD COLUMN notempty5_rewrite serial; +$$); + check_ddl_rewrite +------------------- + t +(1 row) + +SELECT check_ddl_rewrite('rewrite_test', $$ + ALTER TABLE rewrite_test + ADD COLUMN notempty6_rewrite serial, + ADD COLUMN empty6 text, + ADD COLUMN notempty6_norewrite int default 42; +$$); + check_ddl_rewrite +------------------- + t +(1 row) + +-- cleanup +DROP FUNCTION check_ddl_rewrite(regclass, text); +DROP TABLE rewrite_test; +-- +-- lock levels +-- +drop type lockmodes; +ERROR: type "lockmodes" does not exist +create type lockmodes as enum ( + 'SIReadLock' +,'AccessShareLock' +,'RowShareLock' +,'RowExclusiveLock' +,'ShareUpdateExclusiveLock' +,'ShareLock' +,'ShareRowExclusiveLock' +,'ExclusiveLock' +,'AccessExclusiveLock' +); +drop view my_locks; +ERROR: view "my_locks" does not exist +create or replace view my_locks as +select case when c.relname like 'pg_toast%' then 'pg_toast' else c.relname end, max(mode::lockmodes) as max_lockmode +from pg_locks l join pg_class c on l.relation = c.oid +where virtualtransaction = ( + select virtualtransaction + from pg_locks + where transactionid = pg_current_xact_id()::xid) +and locktype = 'relation' +and relnamespace != (select oid from pg_namespace where nspname = 'pg_catalog') +and c.relname != 'my_locks' +group by c.relname; +create table alterlock (f1 int primary key, f2 text); +insert into alterlock values (1, 'foo'); +create table alterlock2 (f3 int primary key, f1 int); +insert into alterlock2 values (1, 1); +begin; alter table alterlock alter column f2 set statistics 150; +select * from my_locks order by 1; + relname | max_lockmode +-----------+-------------------------- + alterlock | ShareUpdateExclusiveLock +(1 row) + +rollback; +begin; alter table alterlock cluster on alterlock_pkey; +select * from my_locks order by 1; + relname | max_lockmode +----------------+-------------------------- + alterlock | ShareUpdateExclusiveLock + alterlock_pkey | ShareUpdateExclusiveLock +(2 rows) + +commit; +begin; alter table alterlock set without cluster; +select * from my_locks order by 1; + relname | max_lockmode +-----------+-------------------------- + alterlock | ShareUpdateExclusiveLock +(1 row) + +commit; +begin; alter table alterlock set (fillfactor = 100); +select * from my_locks order by 1; + relname | max_lockmode +-----------+-------------------------- + alterlock | ShareUpdateExclusiveLock + pg_toast | ShareUpdateExclusiveLock +(2 rows) + +commit; +begin; alter table alterlock reset (fillfactor); +select * from my_locks order by 1; + relname | max_lockmode +-----------+-------------------------- + alterlock | ShareUpdateExclusiveLock + pg_toast | ShareUpdateExclusiveLock +(2 rows) + +commit; +begin; alter table alterlock set (toast.autovacuum_enabled = off); +select * from my_locks order by 1; + relname | max_lockmode +-----------+-------------------------- + alterlock | ShareUpdateExclusiveLock + pg_toast | ShareUpdateExclusiveLock +(2 rows) + +commit; +begin; alter table alterlock set (autovacuum_enabled = off); +select * from my_locks order by 1; + relname | max_lockmode +-----------+-------------------------- + alterlock | ShareUpdateExclusiveLock + pg_toast | ShareUpdateExclusiveLock +(2 rows) + +commit; +begin; alter table alterlock alter column f2 set (n_distinct = 1); +select * from my_locks order by 1; + relname | max_lockmode +-----------+-------------------------- + alterlock | ShareUpdateExclusiveLock +(1 row) + +rollback; +-- test that mixing options with different lock levels works as expected +begin; alter table alterlock set (autovacuum_enabled = off, fillfactor = 80); +select * from my_locks order by 1; + relname | max_lockmode +-----------+-------------------------- + alterlock | ShareUpdateExclusiveLock + pg_toast | ShareUpdateExclusiveLock +(2 rows) + +commit; +begin; alter table alterlock alter column f2 set storage extended; +select * from my_locks order by 1; + relname | max_lockmode +-----------+--------------------- + alterlock | AccessExclusiveLock +(1 row) + +rollback; +begin; alter table alterlock alter column f2 set default 'x'; +select * from my_locks order by 1; + relname | max_lockmode +-----------+--------------------- + alterlock | AccessExclusiveLock +(1 row) + +rollback; +begin; +create trigger ttdummy + before delete or update on alterlock + for each row + execute procedure + ttdummy (1, 1); +select * from my_locks order by 1; + relname | max_lockmode +-----------+----------------------- + alterlock | ShareRowExclusiveLock +(1 row) + +rollback; +begin; +select * from my_locks order by 1; + relname | max_lockmode +---------+-------------- +(0 rows) + +alter table alterlock2 add foreign key (f1) references alterlock (f1); +select * from my_locks order by 1; + relname | max_lockmode +-----------------+----------------------- + alterlock | ShareRowExclusiveLock + alterlock2 | ShareRowExclusiveLock + alterlock2_pkey | AccessShareLock + alterlock_pkey | AccessShareLock +(4 rows) + +rollback; +begin; +alter table alterlock2 +add constraint alterlock2nv foreign key (f1) references alterlock (f1) NOT VALID; +select * from my_locks order by 1; + relname | max_lockmode +------------+----------------------- + alterlock | ShareRowExclusiveLock + alterlock2 | ShareRowExclusiveLock +(2 rows) + +commit; +begin; +alter table alterlock2 validate constraint alterlock2nv; +select * from my_locks order by 1; + relname | max_lockmode +-----------------+-------------------------- + alterlock | RowShareLock + alterlock2 | ShareUpdateExclusiveLock + alterlock2_pkey | AccessShareLock + alterlock_pkey | AccessShareLock +(4 rows) + +rollback; +create or replace view my_locks as +select case when c.relname like 'pg_toast%' then 'pg_toast' else c.relname end, max(mode::lockmodes) as max_lockmode +from pg_locks l join pg_class c on l.relation = c.oid +where virtualtransaction = ( + select virtualtransaction + from pg_locks + where transactionid = pg_current_xact_id()::xid) +and locktype = 'relation' +and relnamespace != (select oid from pg_namespace where nspname = 'pg_catalog') +and c.relname = 'my_locks' +group by c.relname; +-- raise exception +alter table my_locks set (autovacuum_enabled = false); +ERROR: unrecognized parameter "autovacuum_enabled" +alter view my_locks set (autovacuum_enabled = false); +ERROR: unrecognized parameter "autovacuum_enabled" +alter table my_locks reset (autovacuum_enabled); +alter view my_locks reset (autovacuum_enabled); +begin; +alter view my_locks set (security_barrier=off); +select * from my_locks order by 1; + relname | max_lockmode +----------+--------------------- + my_locks | AccessExclusiveLock +(1 row) + +alter view my_locks reset (security_barrier); +rollback; +-- this test intentionally applies the ALTER TABLE command against a view, but +-- uses a view option so we expect this to succeed. This form of SQL is +-- accepted for historical reasons, as shown in the docs for ALTER VIEW +begin; +alter table my_locks set (security_barrier=off); +select * from my_locks order by 1; + relname | max_lockmode +----------+--------------------- + my_locks | AccessExclusiveLock +(1 row) + +alter table my_locks reset (security_barrier); +rollback; +-- cleanup +drop table alterlock2; +drop table alterlock; +drop view my_locks; +drop type lockmodes; +-- +-- alter function +-- +create function test_strict(text) returns text as + 'select coalesce($1, ''got passed a null'');' + language sql returns null on null input; +select test_strict(NULL); + test_strict +------------- + +(1 row) + +alter function test_strict(text) called on null input; +select test_strict(NULL); + test_strict +------------------- + got passed a null +(1 row) + +create function non_strict(text) returns text as + 'select coalesce($1, ''got passed a null'');' + language sql called on null input; +select non_strict(NULL); + non_strict +------------------- + got passed a null +(1 row) + +alter function non_strict(text) returns null on null input; +select non_strict(NULL); + non_strict +------------ + +(1 row) + +-- +-- alter object set schema +-- +create schema alter1; +create schema alter2; +create table alter1.t1(f1 serial primary key, f2 int check (f2 > 0)); +create view alter1.v1 as select * from alter1.t1; +create function alter1.plus1(int) returns int as 'select $1+1' language sql; +create domain alter1.posint integer check (value > 0); +create type alter1.ctype as (f1 int, f2 text); +create function alter1.same(alter1.ctype, alter1.ctype) returns boolean language sql +as 'select $1.f1 is not distinct from $2.f1 and $1.f2 is not distinct from $2.f2'; +create operator alter1.=(procedure = alter1.same, leftarg = alter1.ctype, rightarg = alter1.ctype); +create operator class alter1.ctype_hash_ops default for type alter1.ctype using hash as + operator 1 alter1.=(alter1.ctype, alter1.ctype); +create conversion alter1.latin1_to_utf8 for 'latin1' to 'utf8' from iso8859_1_to_utf8; +create text search parser alter1.prs(start = prsd_start, gettoken = prsd_nexttoken, end = prsd_end, lextypes = prsd_lextype); +create text search configuration alter1.cfg(parser = alter1.prs); +create text search template alter1.tmpl(init = dsimple_init, lexize = dsimple_lexize); +create text search dictionary alter1.dict(template = alter1.tmpl); +insert into alter1.t1(f2) values(11); +insert into alter1.t1(f2) values(12); +alter table alter1.t1 set schema alter1; -- no-op, same schema +alter table alter1.t1 set schema alter2; +alter table alter1.v1 set schema alter2; +alter function alter1.plus1(int) set schema alter2; +alter domain alter1.posint set schema alter2; +alter operator class alter1.ctype_hash_ops using hash set schema alter2; +alter operator family alter1.ctype_hash_ops using hash set schema alter2; +alter operator alter1.=(alter1.ctype, alter1.ctype) set schema alter2; +alter function alter1.same(alter1.ctype, alter1.ctype) set schema alter2; +alter type alter1.ctype set schema alter1; -- no-op, same schema +alter type alter1.ctype set schema alter2; +alter conversion alter1.latin1_to_utf8 set schema alter2; +alter text search parser alter1.prs set schema alter2; +alter text search configuration alter1.cfg set schema alter2; +alter text search template alter1.tmpl set schema alter2; +alter text search dictionary alter1.dict set schema alter2; +-- this should succeed because nothing is left in alter1 +drop schema alter1; +insert into alter2.t1(f2) values(13); +insert into alter2.t1(f2) values(14); +select * from alter2.t1; + f1 | f2 +----+---- + 1 | 11 + 2 | 12 + 3 | 13 + 4 | 14 +(4 rows) + +select * from alter2.v1; + f1 | f2 +----+---- + 1 | 11 + 2 | 12 + 3 | 13 + 4 | 14 +(4 rows) + +select alter2.plus1(41); + plus1 +------- + 42 +(1 row) + +-- clean up +drop schema alter2 cascade; +NOTICE: drop cascades to 13 other objects +DETAIL: drop cascades to table alter2.t1 +drop cascades to view alter2.v1 +drop cascades to function alter2.plus1(integer) +drop cascades to type alter2.posint +drop cascades to type alter2.ctype +drop cascades to function alter2.same(alter2.ctype,alter2.ctype) +drop cascades to operator alter2.=(alter2.ctype,alter2.ctype) +drop cascades to operator family alter2.ctype_hash_ops for access method hash +drop cascades to conversion alter2.latin1_to_utf8 +drop cascades to text search parser alter2.prs +drop cascades to text search configuration alter2.cfg +drop cascades to text search template alter2.tmpl +drop cascades to text search dictionary alter2.dict +-- +-- composite types +-- +CREATE TYPE test_type AS (a int); +\d test_type + Composite type "public.test_type" + Column | Type | Collation | Nullable | Default +--------+---------+-----------+----------+--------- + a | integer | | | + +ALTER TYPE nosuchtype ADD ATTRIBUTE b text; -- fails +ERROR: relation "nosuchtype" does not exist +ALTER TYPE test_type ADD ATTRIBUTE b text; +\d test_type + Composite type "public.test_type" + Column | Type | Collation | Nullable | Default +--------+---------+-----------+----------+--------- + a | integer | | | + b | text | | | + +ALTER TYPE test_type ADD ATTRIBUTE b text; -- fails +ERROR: column "b" of relation "test_type" already exists +ALTER TYPE test_type ALTER ATTRIBUTE b SET DATA TYPE varchar; +\d test_type + Composite type "public.test_type" + Column | Type | Collation | Nullable | Default +--------+-------------------+-----------+----------+--------- + a | integer | | | + b | character varying | | | + +ALTER TYPE test_type ALTER ATTRIBUTE b SET DATA TYPE integer; +\d test_type + Composite type "public.test_type" + Column | Type | Collation | Nullable | Default +--------+---------+-----------+----------+--------- + a | integer | | | + b | integer | | | + +ALTER TYPE test_type DROP ATTRIBUTE b; +\d test_type + Composite type "public.test_type" + Column | Type | Collation | Nullable | Default +--------+---------+-----------+----------+--------- + a | integer | | | + +ALTER TYPE test_type DROP ATTRIBUTE c; -- fails +ERROR: column "c" of relation "test_type" does not exist +ALTER TYPE test_type DROP ATTRIBUTE IF EXISTS c; +NOTICE: column "c" of relation "test_type" does not exist, skipping +ALTER TYPE test_type DROP ATTRIBUTE a, ADD ATTRIBUTE d boolean; +\d test_type + Composite type "public.test_type" + Column | Type | Collation | Nullable | Default +--------+---------+-----------+----------+--------- + d | boolean | | | + +ALTER TYPE test_type RENAME ATTRIBUTE a TO aa; +ERROR: column "a" does not exist +ALTER TYPE test_type RENAME ATTRIBUTE d TO dd; +\d test_type + Composite type "public.test_type" + Column | Type | Collation | Nullable | Default +--------+---------+-----------+----------+--------- + dd | boolean | | | + +DROP TYPE test_type; +CREATE TYPE test_type1 AS (a int, b text); +CREATE TABLE test_tbl1 (x int, y test_type1); +ALTER TYPE test_type1 ALTER ATTRIBUTE b TYPE varchar; -- fails +ERROR: cannot alter type "test_type1" because column "test_tbl1.y" uses it +DROP TABLE test_tbl1; +CREATE TABLE test_tbl1 (x int, y text); +CREATE INDEX test_tbl1_idx ON test_tbl1((row(x,y)::test_type1)); +ALTER TYPE test_type1 ALTER ATTRIBUTE b TYPE varchar; -- fails +ERROR: cannot alter type "test_type1" because column "test_tbl1_idx.row" uses it +DROP TABLE test_tbl1; +DROP TYPE test_type1; +CREATE TYPE test_type2 AS (a int, b text); +CREATE TABLE test_tbl2 OF test_type2; +CREATE TABLE test_tbl2_subclass () INHERITS (test_tbl2); +\d test_type2 + Composite type "public.test_type2" + Column | Type | Collation | Nullable | Default +--------+---------+-----------+----------+--------- + a | integer | | | + b | text | | | + +\d test_tbl2 + Table "public.test_tbl2" + Column | Type | Collation | Nullable | Default +--------+---------+-----------+----------+--------- + a | integer | | | + b | text | | | +Number of child tables: 1 (Use \d+ to list them.) +Typed table of type: test_type2 + +ALTER TYPE test_type2 ADD ATTRIBUTE c text; -- fails +ERROR: cannot alter type "test_type2" because it is the type of a typed table +HINT: Use ALTER ... CASCADE to alter the typed tables too. +ALTER TYPE test_type2 ADD ATTRIBUTE c text CASCADE; +\d test_type2 + Composite type "public.test_type2" + Column | Type | Collation | Nullable | Default +--------+---------+-----------+----------+--------- + a | integer | | | + b | text | | | + c | text | | | + +\d test_tbl2 + Table "public.test_tbl2" + Column | Type | Collation | Nullable | Default +--------+---------+-----------+----------+--------- + a | integer | | | + b | text | | | + c | text | | | +Number of child tables: 1 (Use \d+ to list them.) +Typed table of type: test_type2 + +ALTER TYPE test_type2 ALTER ATTRIBUTE b TYPE varchar; -- fails +ERROR: cannot alter type "test_type2" because it is the type of a typed table +HINT: Use ALTER ... CASCADE to alter the typed tables too. +ALTER TYPE test_type2 ALTER ATTRIBUTE b TYPE varchar CASCADE; +\d test_type2 + Composite type "public.test_type2" + Column | Type | Collation | Nullable | Default +--------+-------------------+-----------+----------+--------- + a | integer | | | + b | character varying | | | + c | text | | | + +\d test_tbl2 + Table "public.test_tbl2" + Column | Type | Collation | Nullable | Default +--------+-------------------+-----------+----------+--------- + a | integer | | | + b | character varying | | | + c | text | | | +Number of child tables: 1 (Use \d+ to list them.) +Typed table of type: test_type2 + +ALTER TYPE test_type2 DROP ATTRIBUTE b; -- fails +ERROR: cannot alter type "test_type2" because it is the type of a typed table +HINT: Use ALTER ... CASCADE to alter the typed tables too. +ALTER TYPE test_type2 DROP ATTRIBUTE b CASCADE; +\d test_type2 + Composite type "public.test_type2" + Column | Type | Collation | Nullable | Default +--------+---------+-----------+----------+--------- + a | integer | | | + c | text | | | + +\d test_tbl2 + Table "public.test_tbl2" + Column | Type | Collation | Nullable | Default +--------+---------+-----------+----------+--------- + a | integer | | | + c | text | | | +Number of child tables: 1 (Use \d+ to list them.) +Typed table of type: test_type2 + +ALTER TYPE test_type2 RENAME ATTRIBUTE a TO aa; -- fails +ERROR: cannot alter type "test_type2" because it is the type of a typed table +HINT: Use ALTER ... CASCADE to alter the typed tables too. +ALTER TYPE test_type2 RENAME ATTRIBUTE a TO aa CASCADE; +\d test_type2 + Composite type "public.test_type2" + Column | Type | Collation | Nullable | Default +--------+---------+-----------+----------+--------- + aa | integer | | | + c | text | | | + +\d test_tbl2 + Table "public.test_tbl2" + Column | Type | Collation | Nullable | Default +--------+---------+-----------+----------+--------- + aa | integer | | | + c | text | | | +Number of child tables: 1 (Use \d+ to list them.) +Typed table of type: test_type2 + +\d test_tbl2_subclass + Table "public.test_tbl2_subclass" + Column | Type | Collation | Nullable | Default +--------+---------+-----------+----------+--------- + aa | integer | | | + c | text | | | +Inherits: test_tbl2 + +DROP TABLE test_tbl2_subclass, test_tbl2; +DROP TYPE test_type2; +CREATE TYPE test_typex AS (a int, b text); +CREATE TABLE test_tblx (x int, y test_typex check ((y).a > 0)); +ALTER TYPE test_typex DROP ATTRIBUTE a; -- fails +ERROR: cannot drop column a of composite type test_typex because other objects depend on it +DETAIL: constraint test_tblx_y_check on table test_tblx depends on column a of composite type test_typex +HINT: Use DROP ... CASCADE to drop the dependent objects too. +ALTER TYPE test_typex DROP ATTRIBUTE a CASCADE; +NOTICE: drop cascades to constraint test_tblx_y_check on table test_tblx +\d test_tblx + Table "public.test_tblx" + Column | Type | Collation | Nullable | Default +--------+------------+-----------+----------+--------- + x | integer | | | + y | test_typex | | | + +DROP TABLE test_tblx; +DROP TYPE test_typex; +-- This test isn't that interesting on its own, but the purpose is to leave +-- behind a table to test pg_upgrade with. The table has a composite type +-- column in it, and the composite type has a dropped attribute. +CREATE TYPE test_type3 AS (a int); +CREATE TABLE test_tbl3 (c) AS SELECT '(1)'::test_type3; +ALTER TYPE test_type3 DROP ATTRIBUTE a, ADD ATTRIBUTE b int; +CREATE TYPE test_type_empty AS (); +DROP TYPE test_type_empty; +-- +-- typed tables: OF / NOT OF +-- +CREATE TYPE tt_t0 AS (z inet, x int, y numeric(8,2)); +ALTER TYPE tt_t0 DROP ATTRIBUTE z; +CREATE TABLE tt0 (x int NOT NULL, y numeric(8,2)); -- OK +CREATE TABLE tt1 (x int, y bigint); -- wrong base type +CREATE TABLE tt2 (x int, y numeric(9,2)); -- wrong typmod +CREATE TABLE tt3 (y numeric(8,2), x int); -- wrong column order +CREATE TABLE tt4 (x int); -- too few columns +CREATE TABLE tt5 (x int, y numeric(8,2), z int); -- too few columns +CREATE TABLE tt6 () INHERITS (tt0); -- can't have a parent +CREATE TABLE tt7 (x int, q text, y numeric(8,2)); +ALTER TABLE tt7 DROP q; -- OK +ALTER TABLE tt0 OF tt_t0; +ALTER TABLE tt1 OF tt_t0; +ERROR: table "tt1" has different type for column "y" +ALTER TABLE tt2 OF tt_t0; +ERROR: table "tt2" has different type for column "y" +ALTER TABLE tt3 OF tt_t0; +ERROR: table has column "y" where type requires "x" +ALTER TABLE tt4 OF tt_t0; +ERROR: table is missing column "y" +ALTER TABLE tt5 OF tt_t0; +ERROR: table has extra column "z" +ALTER TABLE tt6 OF tt_t0; +ERROR: typed tables cannot inherit +ALTER TABLE tt7 OF tt_t0; +CREATE TYPE tt_t1 AS (x int, y numeric(8,2)); +ALTER TABLE tt7 OF tt_t1; -- reassign an already-typed table +ALTER TABLE tt7 NOT OF; +\d tt7 + Table "public.tt7" + Column | Type | Collation | Nullable | Default +--------+--------------+-----------+----------+--------- + x | integer | | | + y | numeric(8,2) | | | + +-- make sure we can drop a constraint on the parent but it remains on the child +CREATE TABLE test_drop_constr_parent (c text CHECK (c IS NOT NULL)); +CREATE TABLE test_drop_constr_child () INHERITS (test_drop_constr_parent); +ALTER TABLE ONLY test_drop_constr_parent DROP CONSTRAINT "test_drop_constr_parent_c_check"; +-- should fail +INSERT INTO test_drop_constr_child (c) VALUES (NULL); +ERROR: new row for relation "test_drop_constr_child" violates check constraint "test_drop_constr_parent_c_check" +DETAIL: Failing row contains (null). +DROP TABLE test_drop_constr_parent CASCADE; +NOTICE: drop cascades to table test_drop_constr_child +-- +-- IF EXISTS test +-- +ALTER TABLE IF EXISTS tt8 ADD COLUMN f int; +NOTICE: relation "tt8" does not exist, skipping +ALTER TABLE IF EXISTS tt8 ADD CONSTRAINT xxx PRIMARY KEY(f); +NOTICE: relation "tt8" does not exist, skipping +ALTER TABLE IF EXISTS tt8 ADD CHECK (f BETWEEN 0 AND 10); +NOTICE: relation "tt8" does not exist, skipping +ALTER TABLE IF EXISTS tt8 ALTER COLUMN f SET DEFAULT 0; +NOTICE: relation "tt8" does not exist, skipping +ALTER TABLE IF EXISTS tt8 RENAME COLUMN f TO f1; +NOTICE: relation "tt8" does not exist, skipping +ALTER TABLE IF EXISTS tt8 SET SCHEMA alter2; +NOTICE: relation "tt8" does not exist, skipping +CREATE TABLE tt8(a int); +CREATE SCHEMA alter2; +ALTER TABLE IF EXISTS tt8 ADD COLUMN f int; +ALTER TABLE IF EXISTS tt8 ADD CONSTRAINT xxx PRIMARY KEY(f); +ALTER TABLE IF EXISTS tt8 ADD CHECK (f BETWEEN 0 AND 10); +ALTER TABLE IF EXISTS tt8 ALTER COLUMN f SET DEFAULT 0; +ALTER TABLE IF EXISTS tt8 RENAME COLUMN f TO f1; +ALTER TABLE IF EXISTS tt8 SET SCHEMA alter2; +\d alter2.tt8 + Table "alter2.tt8" + Column | Type | Collation | Nullable | Default +--------+---------+-----------+----------+--------- + a | integer | | | + f1 | integer | | not null | 0 +Indexes: + "xxx" PRIMARY KEY, btree (f1) +Check constraints: + "tt8_f_check" CHECK (f1 >= 0 AND f1 <= 10) + +DROP TABLE alter2.tt8; +DROP SCHEMA alter2; +-- +-- Check conflicts between index and CHECK constraint names +-- +CREATE TABLE tt9(c integer); +ALTER TABLE tt9 ADD CHECK(c > 1); +ALTER TABLE tt9 ADD CHECK(c > 2); -- picks nonconflicting name +ALTER TABLE tt9 ADD CONSTRAINT foo CHECK(c > 3); +ALTER TABLE tt9 ADD CONSTRAINT foo CHECK(c > 4); -- fail, dup name +ERROR: constraint "foo" for relation "tt9" already exists +ALTER TABLE tt9 ADD UNIQUE(c); +ALTER TABLE tt9 ADD UNIQUE(c); -- picks nonconflicting name +ALTER TABLE tt9 ADD CONSTRAINT tt9_c_key UNIQUE(c); -- fail, dup name +ERROR: relation "tt9_c_key" already exists +ALTER TABLE tt9 ADD CONSTRAINT foo UNIQUE(c); -- fail, dup name +ERROR: constraint "foo" for relation "tt9" already exists +ALTER TABLE tt9 ADD CONSTRAINT tt9_c_key CHECK(c > 5); -- fail, dup name +ERROR: constraint "tt9_c_key" for relation "tt9" already exists +ALTER TABLE tt9 ADD CONSTRAINT tt9_c_key2 CHECK(c > 6); +ALTER TABLE tt9 ADD UNIQUE(c); -- picks nonconflicting name +\d tt9 + Table "public.tt9" + Column | Type | Collation | Nullable | Default +--------+---------+-----------+----------+--------- + c | integer | | | +Indexes: + "tt9_c_key" UNIQUE CONSTRAINT, btree (c) + "tt9_c_key1" UNIQUE CONSTRAINT, btree (c) + "tt9_c_key3" UNIQUE CONSTRAINT, btree (c) +Check constraints: + "foo" CHECK (c > 3) + "tt9_c_check" CHECK (c > 1) + "tt9_c_check1" CHECK (c > 2) + "tt9_c_key2" CHECK (c > 6) + +DROP TABLE tt9; +-- Check that comments on constraints and indexes are not lost at ALTER TABLE. +CREATE TABLE comment_test ( + id int, + positive_col int CHECK (positive_col > 0), + indexed_col int, + CONSTRAINT comment_test_pk PRIMARY KEY (id)); +CREATE INDEX comment_test_index ON comment_test(indexed_col); +COMMENT ON COLUMN comment_test.id IS 'Column ''id'' on comment_test'; +COMMENT ON INDEX comment_test_index IS 'Simple index on comment_test'; +COMMENT ON CONSTRAINT comment_test_positive_col_check ON comment_test IS 'CHECK constraint on comment_test.positive_col'; +COMMENT ON CONSTRAINT comment_test_pk ON comment_test IS 'PRIMARY KEY constraint of comment_test'; +COMMENT ON INDEX comment_test_pk IS 'Index backing the PRIMARY KEY of comment_test'; +SELECT col_description('comment_test'::regclass, 1) as comment; + comment +----------------------------- + Column 'id' on comment_test +(1 row) + +SELECT indexrelid::regclass::text as index, obj_description(indexrelid, 'pg_class') as comment FROM pg_index where indrelid = 'comment_test'::regclass ORDER BY 1, 2; + index | comment +--------------------+----------------------------------------------- + comment_test_index | Simple index on comment_test + comment_test_pk | Index backing the PRIMARY KEY of comment_test +(2 rows) + +SELECT conname as constraint, obj_description(oid, 'pg_constraint') as comment FROM pg_constraint where conrelid = 'comment_test'::regclass ORDER BY 1, 2; + constraint | comment +---------------------------------+----------------------------------------------- + comment_test_pk | PRIMARY KEY constraint of comment_test + comment_test_positive_col_check | CHECK constraint on comment_test.positive_col +(2 rows) + +-- Change the datatype of all the columns. ALTER TABLE is optimized to not +-- rebuild an index if the new data type is binary compatible with the old +-- one. Check do a dummy ALTER TABLE that doesn't change the datatype +-- first, to test that no-op codepath, and another one that does. +ALTER TABLE comment_test ALTER COLUMN indexed_col SET DATA TYPE int; +ALTER TABLE comment_test ALTER COLUMN indexed_col SET DATA TYPE text; +ALTER TABLE comment_test ALTER COLUMN id SET DATA TYPE int; +ALTER TABLE comment_test ALTER COLUMN id SET DATA TYPE text; +ALTER TABLE comment_test ALTER COLUMN positive_col SET DATA TYPE int; +ALTER TABLE comment_test ALTER COLUMN positive_col SET DATA TYPE bigint; +-- Check that the comments are intact. +SELECT col_description('comment_test'::regclass, 1) as comment; + comment +----------------------------- + Column 'id' on comment_test +(1 row) + +SELECT indexrelid::regclass::text as index, obj_description(indexrelid, 'pg_class') as comment FROM pg_index where indrelid = 'comment_test'::regclass ORDER BY 1, 2; + index | comment +--------------------+----------------------------------------------- + comment_test_index | Simple index on comment_test + comment_test_pk | Index backing the PRIMARY KEY of comment_test +(2 rows) + +SELECT conname as constraint, obj_description(oid, 'pg_constraint') as comment FROM pg_constraint where conrelid = 'comment_test'::regclass ORDER BY 1, 2; + constraint | comment +---------------------------------+----------------------------------------------- + comment_test_pk | PRIMARY KEY constraint of comment_test + comment_test_positive_col_check | CHECK constraint on comment_test.positive_col +(2 rows) + +-- Check compatibility for foreign keys and comments. This is done +-- separately as rebuilding the column type of the parent leads +-- to an error and would reduce the test scope. +CREATE TABLE comment_test_child ( + id text CONSTRAINT comment_test_child_fk REFERENCES comment_test); +CREATE INDEX comment_test_child_fk ON comment_test_child(id); +COMMENT ON COLUMN comment_test_child.id IS 'Column ''id'' on comment_test_child'; +COMMENT ON INDEX comment_test_child_fk IS 'Index backing the FOREIGN KEY of comment_test_child'; +COMMENT ON CONSTRAINT comment_test_child_fk ON comment_test_child IS 'FOREIGN KEY constraint of comment_test_child'; +-- Change column type of parent +ALTER TABLE comment_test ALTER COLUMN id SET DATA TYPE text; +ALTER TABLE comment_test ALTER COLUMN id SET DATA TYPE int USING id::integer; +ERROR: foreign key constraint "comment_test_child_fk" cannot be implemented +DETAIL: Key columns "id" and "id" are of incompatible types: text and integer. +-- Comments should be intact +SELECT col_description('comment_test_child'::regclass, 1) as comment; + comment +----------------------------------- + Column 'id' on comment_test_child +(1 row) + +SELECT indexrelid::regclass::text as index, obj_description(indexrelid, 'pg_class') as comment FROM pg_index where indrelid = 'comment_test_child'::regclass ORDER BY 1, 2; + index | comment +-----------------------+----------------------------------------------------- + comment_test_child_fk | Index backing the FOREIGN KEY of comment_test_child +(1 row) + +SELECT conname as constraint, obj_description(oid, 'pg_constraint') as comment FROM pg_constraint where conrelid = 'comment_test_child'::regclass ORDER BY 1, 2; + constraint | comment +-----------------------+---------------------------------------------- + comment_test_child_fk | FOREIGN KEY constraint of comment_test_child +(1 row) + +-- Check that we map relation oids to filenodes and back correctly. Only +-- display bad mappings so the test output doesn't change all the time. A +-- filenode function call can return NULL for a relation dropped concurrently +-- with the call's surrounding query, so ignore a NULL mapped_oid for +-- relations that no longer exist after all calls finish. +CREATE TEMP TABLE filenode_mapping AS +SELECT + oid, mapped_oid, reltablespace, relfilenode, relname +FROM pg_class, + pg_filenode_relation(reltablespace, pg_relation_filenode(oid)) AS mapped_oid +WHERE relkind IN ('r', 'i', 'S', 't', 'm') AND mapped_oid IS DISTINCT FROM oid; +SELECT m.* FROM filenode_mapping m LEFT JOIN pg_class c ON c.oid = m.oid +WHERE c.oid IS NOT NULL OR m.mapped_oid IS NOT NULL; + oid | mapped_oid | reltablespace | relfilenode | relname +-----+------------+---------------+-------------+--------- +(0 rows) + +-- Checks on creating and manipulation of user defined relations in +-- pg_catalog. +SHOW allow_system_table_mods; + allow_system_table_mods +------------------------- + off +(1 row) + +-- disallowed because of search_path issues with pg_dump +CREATE TABLE pg_catalog.new_system_table(); +ERROR: permission denied to create "pg_catalog.new_system_table" +DETAIL: System catalog modifications are currently disallowed. +-- instead create in public first, move to catalog +CREATE TABLE new_system_table(id serial primary key, othercol text); +ALTER TABLE new_system_table SET SCHEMA pg_catalog; +ALTER TABLE new_system_table SET SCHEMA public; +ALTER TABLE new_system_table SET SCHEMA pg_catalog; +-- will be ignored -- already there: +ALTER TABLE new_system_table SET SCHEMA pg_catalog; +ALTER TABLE new_system_table RENAME TO old_system_table; +CREATE INDEX old_system_table__othercol ON old_system_table (othercol); +INSERT INTO old_system_table(othercol) VALUES ('somedata'), ('otherdata'); +UPDATE old_system_table SET id = -id; +DELETE FROM old_system_table WHERE othercol = 'somedata'; +TRUNCATE old_system_table; +ALTER TABLE old_system_table DROP CONSTRAINT new_system_table_pkey; +ALTER TABLE old_system_table DROP COLUMN othercol; +DROP TABLE old_system_table; +-- set logged +CREATE UNLOGGED TABLE unlogged1(f1 SERIAL PRIMARY KEY, f2 TEXT); -- has sequence, toast +-- check relpersistence of an unlogged table +SELECT relname, relkind, relpersistence FROM pg_class WHERE relname ~ '^unlogged1' +UNION ALL +SELECT r.relname || ' toast table', t.relkind, t.relpersistence FROM pg_class r JOIN pg_class t ON t.oid = r.reltoastrelid WHERE r.relname ~ '^unlogged1' +UNION ALL +SELECT r.relname || ' toast index', ri.relkind, ri.relpersistence FROM pg_class r join pg_class t ON t.oid = r.reltoastrelid JOIN pg_index i ON i.indrelid = t.oid JOIN pg_class ri ON ri.oid = i.indexrelid WHERE r.relname ~ '^unlogged1' +ORDER BY relname; + relname | relkind | relpersistence +-----------------------+---------+---------------- + unlogged1 | r | u + unlogged1 toast index | i | u + unlogged1 toast table | t | u + unlogged1_f1_seq | S | u + unlogged1_pkey | i | u +(5 rows) + +CREATE UNLOGGED TABLE unlogged2(f1 SERIAL PRIMARY KEY, f2 INTEGER REFERENCES unlogged1); -- foreign key +CREATE UNLOGGED TABLE unlogged3(f1 SERIAL PRIMARY KEY, f2 INTEGER REFERENCES unlogged3); -- self-referencing foreign key +ALTER TABLE unlogged3 SET LOGGED; -- skip self-referencing foreign key +ALTER TABLE unlogged2 SET LOGGED; -- fails because a foreign key to an unlogged table exists +ERROR: could not change table "unlogged2" to logged because it references unlogged table "unlogged1" +ALTER TABLE unlogged1 SET LOGGED; +-- check relpersistence of an unlogged table after changing to permanent +SELECT relname, relkind, relpersistence FROM pg_class WHERE relname ~ '^unlogged1' +UNION ALL +SELECT r.relname || ' toast table', t.relkind, t.relpersistence FROM pg_class r JOIN pg_class t ON t.oid = r.reltoastrelid WHERE r.relname ~ '^unlogged1' +UNION ALL +SELECT r.relname || ' toast index', ri.relkind, ri.relpersistence FROM pg_class r join pg_class t ON t.oid = r.reltoastrelid JOIN pg_index i ON i.indrelid = t.oid JOIN pg_class ri ON ri.oid = i.indexrelid WHERE r.relname ~ '^unlogged1' +ORDER BY relname; + relname | relkind | relpersistence +-----------------------+---------+---------------- + unlogged1 | r | p + unlogged1 toast index | i | p + unlogged1 toast table | t | p + unlogged1_f1_seq | S | p + unlogged1_pkey | i | p +(5 rows) + +ALTER TABLE unlogged1 SET LOGGED; -- silently do nothing +DROP TABLE unlogged3; +DROP TABLE unlogged2; +DROP TABLE unlogged1; +-- set unlogged +CREATE TABLE logged1(f1 SERIAL PRIMARY KEY, f2 TEXT); -- has sequence, toast +-- check relpersistence of a permanent table +SELECT relname, relkind, relpersistence FROM pg_class WHERE relname ~ '^logged1' +UNION ALL +SELECT r.relname || ' toast table', t.relkind, t.relpersistence FROM pg_class r JOIN pg_class t ON t.oid = r.reltoastrelid WHERE r.relname ~ '^logged1' +UNION ALL +SELECT r.relname ||' toast index', ri.relkind, ri.relpersistence FROM pg_class r join pg_class t ON t.oid = r.reltoastrelid JOIN pg_index i ON i.indrelid = t.oid JOIN pg_class ri ON ri.oid = i.indexrelid WHERE r.relname ~ '^logged1' +ORDER BY relname; + relname | relkind | relpersistence +---------------------+---------+---------------- + logged1 | r | p + logged1 toast index | i | p + logged1 toast table | t | p + logged1_f1_seq | S | p + logged1_pkey | i | p +(5 rows) + +CREATE TABLE logged2(f1 SERIAL PRIMARY KEY, f2 INTEGER REFERENCES logged1); -- foreign key +CREATE TABLE logged3(f1 SERIAL PRIMARY KEY, f2 INTEGER REFERENCES logged3); -- self-referencing foreign key +ALTER TABLE logged1 SET UNLOGGED; -- fails because a foreign key from a permanent table exists +ERROR: could not change table "logged1" to unlogged because it references logged table "logged2" +ALTER TABLE logged3 SET UNLOGGED; -- skip self-referencing foreign key +ALTER TABLE logged2 SET UNLOGGED; +ALTER TABLE logged1 SET UNLOGGED; +-- check relpersistence of a permanent table after changing to unlogged +SELECT relname, relkind, relpersistence FROM pg_class WHERE relname ~ '^logged1' +UNION ALL +SELECT r.relname || ' toast table', t.relkind, t.relpersistence FROM pg_class r JOIN pg_class t ON t.oid = r.reltoastrelid WHERE r.relname ~ '^logged1' +UNION ALL +SELECT r.relname || ' toast index', ri.relkind, ri.relpersistence FROM pg_class r join pg_class t ON t.oid = r.reltoastrelid JOIN pg_index i ON i.indrelid = t.oid JOIN pg_class ri ON ri.oid = i.indexrelid WHERE r.relname ~ '^logged1' +ORDER BY relname; + relname | relkind | relpersistence +---------------------+---------+---------------- + logged1 | r | u + logged1 toast index | i | u + logged1 toast table | t | u + logged1_f1_seq | S | u + logged1_pkey | i | u +(5 rows) + +ALTER TABLE logged1 SET UNLOGGED; -- silently do nothing +DROP TABLE logged3; +DROP TABLE logged2; +DROP TABLE logged1; +-- test ADD COLUMN IF NOT EXISTS +CREATE TABLE test_add_column(c1 integer); +\d test_add_column + Table "public.test_add_column" + Column | Type | Collation | Nullable | Default +--------+---------+-----------+----------+--------- + c1 | integer | | | + +ALTER TABLE test_add_column + ADD COLUMN c2 integer; +\d test_add_column + Table "public.test_add_column" + Column | Type | Collation | Nullable | Default +--------+---------+-----------+----------+--------- + c1 | integer | | | + c2 | integer | | | + +ALTER TABLE test_add_column + ADD COLUMN c2 integer; -- fail because c2 already exists +ERROR: column "c2" of relation "test_add_column" already exists +ALTER TABLE ONLY test_add_column + ADD COLUMN c2 integer; -- fail because c2 already exists +ERROR: column "c2" of relation "test_add_column" already exists +\d test_add_column + Table "public.test_add_column" + Column | Type | Collation | Nullable | Default +--------+---------+-----------+----------+--------- + c1 | integer | | | + c2 | integer | | | + +ALTER TABLE test_add_column + ADD COLUMN IF NOT EXISTS c2 integer; -- skipping because c2 already exists +NOTICE: column "c2" of relation "test_add_column" already exists, skipping +ALTER TABLE ONLY test_add_column + ADD COLUMN IF NOT EXISTS c2 integer; -- skipping because c2 already exists +NOTICE: column "c2" of relation "test_add_column" already exists, skipping +\d test_add_column + Table "public.test_add_column" + Column | Type | Collation | Nullable | Default +--------+---------+-----------+----------+--------- + c1 | integer | | | + c2 | integer | | | + +ALTER TABLE test_add_column + ADD COLUMN c2 integer, -- fail because c2 already exists + ADD COLUMN c3 integer primary key; +ERROR: column "c2" of relation "test_add_column" already exists +\d test_add_column + Table "public.test_add_column" + Column | Type | Collation | Nullable | Default +--------+---------+-----------+----------+--------- + c1 | integer | | | + c2 | integer | | | + +ALTER TABLE test_add_column + ADD COLUMN IF NOT EXISTS c2 integer, -- skipping because c2 already exists + ADD COLUMN c3 integer primary key; +NOTICE: column "c2" of relation "test_add_column" already exists, skipping +\d test_add_column + Table "public.test_add_column" + Column | Type | Collation | Nullable | Default +--------+---------+-----------+----------+--------- + c1 | integer | | | + c2 | integer | | | + c3 | integer | | not null | +Indexes: + "test_add_column_pkey" PRIMARY KEY, btree (c3) + +ALTER TABLE test_add_column + ADD COLUMN IF NOT EXISTS c2 integer, -- skipping because c2 already exists + ADD COLUMN IF NOT EXISTS c3 integer primary key; -- skipping because c3 already exists +NOTICE: column "c2" of relation "test_add_column" already exists, skipping +NOTICE: column "c3" of relation "test_add_column" already exists, skipping +\d test_add_column + Table "public.test_add_column" + Column | Type | Collation | Nullable | Default +--------+---------+-----------+----------+--------- + c1 | integer | | | + c2 | integer | | | + c3 | integer | | not null | +Indexes: + "test_add_column_pkey" PRIMARY KEY, btree (c3) + +ALTER TABLE test_add_column + ADD COLUMN IF NOT EXISTS c2 integer, -- skipping because c2 already exists + ADD COLUMN IF NOT EXISTS c3 integer, -- skipping because c3 already exists + ADD COLUMN c4 integer REFERENCES test_add_column; +NOTICE: column "c2" of relation "test_add_column" already exists, skipping +NOTICE: column "c3" of relation "test_add_column" already exists, skipping +\d test_add_column + Table "public.test_add_column" + Column | Type | Collation | Nullable | Default +--------+---------+-----------+----------+--------- + c1 | integer | | | + c2 | integer | | | + c3 | integer | | not null | + c4 | integer | | | +Indexes: + "test_add_column_pkey" PRIMARY KEY, btree (c3) +Foreign-key constraints: + "test_add_column_c4_fkey" FOREIGN KEY (c4) REFERENCES test_add_column(c3) +Referenced by: + TABLE "test_add_column" CONSTRAINT "test_add_column_c4_fkey" FOREIGN KEY (c4) REFERENCES test_add_column(c3) + +ALTER TABLE test_add_column + ADD COLUMN IF NOT EXISTS c4 integer REFERENCES test_add_column; +NOTICE: column "c4" of relation "test_add_column" already exists, skipping +\d test_add_column + Table "public.test_add_column" + Column | Type | Collation | Nullable | Default +--------+---------+-----------+----------+--------- + c1 | integer | | | + c2 | integer | | | + c3 | integer | | not null | + c4 | integer | | | +Indexes: + "test_add_column_pkey" PRIMARY KEY, btree (c3) +Foreign-key constraints: + "test_add_column_c4_fkey" FOREIGN KEY (c4) REFERENCES test_add_column(c3) +Referenced by: + TABLE "test_add_column" CONSTRAINT "test_add_column_c4_fkey" FOREIGN KEY (c4) REFERENCES test_add_column(c3) + +ALTER TABLE test_add_column + ADD COLUMN IF NOT EXISTS c5 SERIAL CHECK (c5 > 8); +\d test_add_column + Table "public.test_add_column" + Column | Type | Collation | Nullable | Default +--------+---------+-----------+----------+--------------------------------------------- + c1 | integer | | | + c2 | integer | | | + c3 | integer | | not null | + c4 | integer | | | + c5 | integer | | not null | nextval('test_add_column_c5_seq'::regclass) +Indexes: + "test_add_column_pkey" PRIMARY KEY, btree (c3) +Check constraints: + "test_add_column_c5_check" CHECK (c5 > 8) +Foreign-key constraints: + "test_add_column_c4_fkey" FOREIGN KEY (c4) REFERENCES test_add_column(c3) +Referenced by: + TABLE "test_add_column" CONSTRAINT "test_add_column_c4_fkey" FOREIGN KEY (c4) REFERENCES test_add_column(c3) + +ALTER TABLE test_add_column + ADD COLUMN IF NOT EXISTS c5 SERIAL CHECK (c5 > 10); +NOTICE: column "c5" of relation "test_add_column" already exists, skipping +\d test_add_column* + Table "public.test_add_column" + Column | Type | Collation | Nullable | Default +--------+---------+-----------+----------+--------------------------------------------- + c1 | integer | | | + c2 | integer | | | + c3 | integer | | not null | + c4 | integer | | | + c5 | integer | | not null | nextval('test_add_column_c5_seq'::regclass) +Indexes: + "test_add_column_pkey" PRIMARY KEY, btree (c3) +Check constraints: + "test_add_column_c5_check" CHECK (c5 > 8) +Foreign-key constraints: + "test_add_column_c4_fkey" FOREIGN KEY (c4) REFERENCES test_add_column(c3) +Referenced by: + TABLE "test_add_column" CONSTRAINT "test_add_column_c4_fkey" FOREIGN KEY (c4) REFERENCES test_add_column(c3) + + Sequence "public.test_add_column_c5_seq" + Type | Start | Minimum | Maximum | Increment | Cycles? | Cache +---------+-------+---------+------------+-----------+---------+------- + integer | 1 | 1 | 2147483647 | 1 | no | 1 +Owned by: public.test_add_column.c5 + + Index "public.test_add_column_pkey" + Column | Type | Key? | Definition +--------+---------+------+------------ + c3 | integer | yes | c3 +primary key, btree, for table "public.test_add_column" + +DROP TABLE test_add_column; +\d test_add_column* +-- assorted cases with multiple ALTER TABLE steps +CREATE TABLE ataddindex(f1 INT); +INSERT INTO ataddindex VALUES (42), (43); +CREATE UNIQUE INDEX ataddindexi0 ON ataddindex(f1); +ALTER TABLE ataddindex + ADD PRIMARY KEY USING INDEX ataddindexi0, + ALTER f1 TYPE BIGINT; +\d ataddindex + Table "public.ataddindex" + Column | Type | Collation | Nullable | Default +--------+--------+-----------+----------+--------- + f1 | bigint | | not null | +Indexes: + "ataddindexi0" PRIMARY KEY, btree (f1) + +DROP TABLE ataddindex; +CREATE TABLE ataddindex(f1 VARCHAR(10)); +INSERT INTO ataddindex(f1) VALUES ('foo'), ('a'); +ALTER TABLE ataddindex + ALTER f1 SET DATA TYPE TEXT, + ADD EXCLUDE ((f1 LIKE 'a') WITH =); +\d ataddindex + Table "public.ataddindex" + Column | Type | Collation | Nullable | Default +--------+------+-----------+----------+--------- + f1 | text | | | +Indexes: + "ataddindex_expr_excl" EXCLUDE USING btree ((f1 ~~ 'a'::text) WITH =) + +DROP TABLE ataddindex; +CREATE TABLE ataddindex(id int, ref_id int); +ALTER TABLE ataddindex + ADD PRIMARY KEY (id), + ADD FOREIGN KEY (ref_id) REFERENCES ataddindex; +\d ataddindex + Table "public.ataddindex" + Column | Type | Collation | Nullable | Default +--------+---------+-----------+----------+--------- + id | integer | | not null | + ref_id | integer | | | +Indexes: + "ataddindex_pkey" PRIMARY KEY, btree (id) +Foreign-key constraints: + "ataddindex_ref_id_fkey" FOREIGN KEY (ref_id) REFERENCES ataddindex(id) +Referenced by: + TABLE "ataddindex" CONSTRAINT "ataddindex_ref_id_fkey" FOREIGN KEY (ref_id) REFERENCES ataddindex(id) + +DROP TABLE ataddindex; +CREATE TABLE ataddindex(id int, ref_id int); +ALTER TABLE ataddindex + ADD UNIQUE (id), + ADD FOREIGN KEY (ref_id) REFERENCES ataddindex (id); +\d ataddindex + Table "public.ataddindex" + Column | Type | Collation | Nullable | Default +--------+---------+-----------+----------+--------- + id | integer | | | + ref_id | integer | | | +Indexes: + "ataddindex_id_key" UNIQUE CONSTRAINT, btree (id) +Foreign-key constraints: + "ataddindex_ref_id_fkey" FOREIGN KEY (ref_id) REFERENCES ataddindex(id) +Referenced by: + TABLE "ataddindex" CONSTRAINT "ataddindex_ref_id_fkey" FOREIGN KEY (ref_id) REFERENCES ataddindex(id) + +DROP TABLE ataddindex; +-- unsupported constraint types for partitioned tables +CREATE TABLE partitioned ( + a int, + b int +) PARTITION BY RANGE (a, (a+b+1)); +ALTER TABLE partitioned ADD EXCLUDE USING gist (a WITH &&); +ERROR: exclusion constraints are not supported on partitioned tables +LINE 1: ALTER TABLE partitioned ADD EXCLUDE USING gist (a WITH &&); + ^ +-- cannot drop column that is part of the partition key +ALTER TABLE partitioned DROP COLUMN a; +ERROR: cannot drop column "a" because it is part of the partition key of relation "partitioned" +ALTER TABLE partitioned ALTER COLUMN a TYPE char(5); +ERROR: cannot alter column "a" because it is part of the partition key of relation "partitioned" +ALTER TABLE partitioned DROP COLUMN b; +ERROR: cannot drop column "b" because it is part of the partition key of relation "partitioned" +ALTER TABLE partitioned ALTER COLUMN b TYPE char(5); +ERROR: cannot alter column "b" because it is part of the partition key of relation "partitioned" +-- specifying storage parameters for partitioned tables is not supported +ALTER TABLE partitioned SET (fillfactor=100); +ERROR: cannot specify storage parameters for a partitioned table +HINT: Specify storage parameters for its leaf partitions instead. +-- partitioned table cannot participate in regular inheritance +CREATE TABLE nonpartitioned ( + a int, + b int +); +ALTER TABLE partitioned INHERIT nonpartitioned; +ERROR: cannot change inheritance of partitioned table +ALTER TABLE nonpartitioned INHERIT partitioned; +ERROR: cannot inherit from partitioned table "partitioned" +-- cannot add NO INHERIT constraint to partitioned tables +ALTER TABLE partitioned ADD CONSTRAINT chk_a CHECK (a > 0) NO INHERIT; +ERROR: cannot add NO INHERIT constraint to partitioned table "partitioned" +DROP TABLE partitioned, nonpartitioned; +-- +-- ATTACH PARTITION +-- +-- check that target table is partitioned +CREATE TABLE unparted ( + a int +); +CREATE TABLE fail_part (like unparted); +ALTER TABLE unparted ATTACH PARTITION fail_part FOR VALUES IN ('a'); +ERROR: table "unparted" is not partitioned +DROP TABLE unparted, fail_part; +-- check that partition bound is compatible +CREATE TABLE list_parted ( + a int NOT NULL, + b char(2) COLLATE "C", + CONSTRAINT check_a CHECK (a > 0) +) PARTITION BY LIST (a); +CREATE TABLE fail_part (LIKE list_parted); +ALTER TABLE list_parted ATTACH PARTITION fail_part FOR VALUES FROM (1) TO (10); +ERROR: invalid bound specification for a list partition +LINE 1: ...list_parted ATTACH PARTITION fail_part FOR VALUES FROM (1) T... + ^ +DROP TABLE fail_part; +-- check that the table being attached exists +ALTER TABLE list_parted ATTACH PARTITION nonexistent FOR VALUES IN (1); +ERROR: relation "nonexistent" does not exist +-- check ownership of the source table +CREATE ROLE regress_test_me; +CREATE ROLE regress_test_not_me; +CREATE TABLE not_owned_by_me (LIKE list_parted); +ALTER TABLE not_owned_by_me OWNER TO regress_test_not_me; +SET SESSION AUTHORIZATION regress_test_me; +CREATE TABLE owned_by_me ( + a int +) PARTITION BY LIST (a); +ALTER TABLE owned_by_me ATTACH PARTITION not_owned_by_me FOR VALUES IN (1); +ERROR: must be owner of table not_owned_by_me +RESET SESSION AUTHORIZATION; +DROP TABLE owned_by_me, not_owned_by_me; +DROP ROLE regress_test_not_me; +DROP ROLE regress_test_me; +-- check that the table being attached is not part of regular inheritance +CREATE TABLE parent (LIKE list_parted); +CREATE TABLE child () INHERITS (parent); +ALTER TABLE list_parted ATTACH PARTITION child FOR VALUES IN (1); +ERROR: cannot attach inheritance child as partition +ALTER TABLE list_parted ATTACH PARTITION parent FOR VALUES IN (1); +ERROR: cannot attach inheritance parent as partition +DROP TABLE parent CASCADE; +NOTICE: drop cascades to table child +-- check any TEMP-ness +CREATE TEMP TABLE temp_parted (a int) PARTITION BY LIST (a); +CREATE TABLE perm_part (a int); +ALTER TABLE temp_parted ATTACH PARTITION perm_part FOR VALUES IN (1); +ERROR: cannot attach a permanent relation as partition of temporary relation "temp_parted" +DROP TABLE temp_parted, perm_part; +-- check that the table being attached is not a typed table +CREATE TYPE mytype AS (a int); +CREATE TABLE fail_part OF mytype; +ALTER TABLE list_parted ATTACH PARTITION fail_part FOR VALUES IN (1); +ERROR: cannot attach a typed table as partition +DROP TYPE mytype CASCADE; +NOTICE: drop cascades to table fail_part +-- check that the table being attached has only columns present in the parent +CREATE TABLE fail_part (like list_parted, c int); +ALTER TABLE list_parted ATTACH PARTITION fail_part FOR VALUES IN (1); +ERROR: table "fail_part" contains column "c" not found in parent "list_parted" +DETAIL: The new partition may contain only the columns present in parent. +DROP TABLE fail_part; +-- check that the table being attached has every column of the parent +CREATE TABLE fail_part (a int NOT NULL); +ALTER TABLE list_parted ATTACH PARTITION fail_part FOR VALUES IN (1); +ERROR: child table is missing column "b" +DROP TABLE fail_part; +-- check that columns match in type, collation and NOT NULL status +CREATE TABLE fail_part ( + b char(3), + a int NOT NULL +); +ALTER TABLE list_parted ATTACH PARTITION fail_part FOR VALUES IN (1); +ERROR: child table "fail_part" has different type for column "b" +ALTER TABLE fail_part ALTER b TYPE char (2) COLLATE "POSIX"; +ALTER TABLE list_parted ATTACH PARTITION fail_part FOR VALUES IN (1); +ERROR: child table "fail_part" has different collation for column "b" +DROP TABLE fail_part; +-- check that the table being attached has all constraints of the parent +CREATE TABLE fail_part ( + b char(2) COLLATE "C", + a int NOT NULL +); +ALTER TABLE list_parted ATTACH PARTITION fail_part FOR VALUES IN (1); +ERROR: child table is missing constraint "check_a" +-- check that the constraint matches in definition with parent's constraint +ALTER TABLE fail_part ADD CONSTRAINT check_a CHECK (a >= 0); +ALTER TABLE list_parted ATTACH PARTITION fail_part FOR VALUES IN (1); +ERROR: child table "fail_part" has different definition for check constraint "check_a" +DROP TABLE fail_part; +-- check the attributes and constraints after partition is attached +CREATE TABLE part_1 ( + a int NOT NULL, + b char(2) COLLATE "C", + CONSTRAINT check_a CHECK (a > 0) +); +ALTER TABLE list_parted ATTACH PARTITION part_1 FOR VALUES IN (1); +-- attislocal and conislocal are always false for merged attributes and constraints respectively. +SELECT attislocal, attinhcount FROM pg_attribute WHERE attrelid = 'part_1'::regclass AND attnum > 0; + attislocal | attinhcount +------------+------------- + f | 1 + f | 1 +(2 rows) + +SELECT conislocal, coninhcount FROM pg_constraint WHERE conrelid = 'part_1'::regclass AND conname = 'check_a'; + conislocal | coninhcount +------------+------------- + f | 1 +(1 row) + +-- check that the new partition won't overlap with an existing partition +CREATE TABLE fail_part (LIKE part_1 INCLUDING CONSTRAINTS); +ALTER TABLE list_parted ATTACH PARTITION fail_part FOR VALUES IN (1); +ERROR: partition "fail_part" would overlap partition "part_1" +LINE 1: ...LE list_parted ATTACH PARTITION fail_part FOR VALUES IN (1); + ^ +DROP TABLE fail_part; +-- check that an existing table can be attached as a default partition +CREATE TABLE def_part (LIKE list_parted INCLUDING CONSTRAINTS); +ALTER TABLE list_parted ATTACH PARTITION def_part DEFAULT; +-- check attaching default partition fails if a default partition already +-- exists +CREATE TABLE fail_def_part (LIKE part_1 INCLUDING CONSTRAINTS); +ALTER TABLE list_parted ATTACH PARTITION fail_def_part DEFAULT; +ERROR: partition "fail_def_part" conflicts with existing default partition "def_part" +LINE 1: ...ER TABLE list_parted ATTACH PARTITION fail_def_part DEFAULT; + ^ +-- check validation when attaching list partitions +CREATE TABLE list_parted2 ( + a int, + b char +) PARTITION BY LIST (a); +-- check that violating rows are correctly reported +CREATE TABLE part_2 (LIKE list_parted2); +INSERT INTO part_2 VALUES (3, 'a'); +ALTER TABLE list_parted2 ATTACH PARTITION part_2 FOR VALUES IN (2); +ERROR: partition constraint of relation "part_2" is violated by some row +-- should be ok after deleting the bad row +DELETE FROM part_2; +ALTER TABLE list_parted2 ATTACH PARTITION part_2 FOR VALUES IN (2); +-- check partition cannot be attached if default has some row for its values +CREATE TABLE list_parted2_def PARTITION OF list_parted2 DEFAULT; +INSERT INTO list_parted2_def VALUES (11, 'z'); +CREATE TABLE part_3 (LIKE list_parted2); +ALTER TABLE list_parted2 ATTACH PARTITION part_3 FOR VALUES IN (11); +ERROR: updated partition constraint for default partition "list_parted2_def" would be violated by some row +-- should be ok after deleting the bad row +DELETE FROM list_parted2_def WHERE a = 11; +ALTER TABLE list_parted2 ATTACH PARTITION part_3 FOR VALUES IN (11); +-- adding constraints that describe the desired partition constraint +-- (or more restrictive) will help skip the validation scan +CREATE TABLE part_3_4 ( + LIKE list_parted2, + CONSTRAINT check_a CHECK (a IN (3)) +); +-- however, if a list partition does not accept nulls, there should be +-- an explicit NOT NULL constraint on the partition key column for the +-- validation scan to be skipped; +ALTER TABLE list_parted2 ATTACH PARTITION part_3_4 FOR VALUES IN (3, 4); +-- adding a NOT NULL constraint will cause the scan to be skipped +ALTER TABLE list_parted2 DETACH PARTITION part_3_4; +ALTER TABLE part_3_4 ALTER a SET NOT NULL; +ALTER TABLE list_parted2 ATTACH PARTITION part_3_4 FOR VALUES IN (3, 4); +-- check if default partition scan skipped +ALTER TABLE list_parted2_def ADD CONSTRAINT check_a CHECK (a IN (5, 6)); +CREATE TABLE part_55_66 PARTITION OF list_parted2 FOR VALUES IN (55, 66); +-- check validation when attaching range partitions +CREATE TABLE range_parted ( + a int, + b int +) PARTITION BY RANGE (a, b); +-- check that violating rows are correctly reported +CREATE TABLE part1 ( + a int NOT NULL CHECK (a = 1), + b int NOT NULL CHECK (b >= 1 AND b <= 10) +); +INSERT INTO part1 VALUES (1, 10); +-- Remember the TO bound is exclusive +ALTER TABLE range_parted ATTACH PARTITION part1 FOR VALUES FROM (1, 1) TO (1, 10); +ERROR: partition constraint of relation "part1" is violated by some row +-- should be ok after deleting the bad row +DELETE FROM part1; +ALTER TABLE range_parted ATTACH PARTITION part1 FOR VALUES FROM (1, 1) TO (1, 10); +-- adding constraints that describe the desired partition constraint +-- (or more restrictive) will help skip the validation scan +CREATE TABLE part2 ( + a int NOT NULL CHECK (a = 1), + b int NOT NULL CHECK (b >= 10 AND b < 18) +); +ALTER TABLE range_parted ATTACH PARTITION part2 FOR VALUES FROM (1, 10) TO (1, 20); +-- Create default partition +CREATE TABLE partr_def1 PARTITION OF range_parted DEFAULT; +-- Only one default partition is allowed, hence, following should give error +CREATE TABLE partr_def2 (LIKE part1 INCLUDING CONSTRAINTS); +ALTER TABLE range_parted ATTACH PARTITION partr_def2 DEFAULT; +ERROR: partition "partr_def2" conflicts with existing default partition "partr_def1" +LINE 1: ...LTER TABLE range_parted ATTACH PARTITION partr_def2 DEFAULT; + ^ +-- Overlapping partitions cannot be attached, hence, following should give error +INSERT INTO partr_def1 VALUES (2, 10); +CREATE TABLE part3 (LIKE range_parted); +ALTER TABLE range_parted ATTACH partition part3 FOR VALUES FROM (2, 10) TO (2, 20); +ERROR: updated partition constraint for default partition "partr_def1" would be violated by some row +-- Attaching partitions should be successful when there are no overlapping rows +ALTER TABLE range_parted ATTACH partition part3 FOR VALUES FROM (3, 10) TO (3, 20); +-- check that leaf partitions are scanned when attaching a partitioned +-- table +CREATE TABLE part_5 ( + LIKE list_parted2 +) PARTITION BY LIST (b); +-- check that violating rows are correctly reported +CREATE TABLE part_5_a PARTITION OF part_5 FOR VALUES IN ('a'); +INSERT INTO part_5_a (a, b) VALUES (6, 'a'); +ALTER TABLE list_parted2 ATTACH PARTITION part_5 FOR VALUES IN (5); +ERROR: partition constraint of relation "part_5_a" is violated by some row +-- delete the faulting row and also add a constraint to skip the scan +DELETE FROM part_5_a WHERE a NOT IN (3); +ALTER TABLE part_5 ADD CONSTRAINT check_a CHECK (a IS NOT NULL AND a = 5); +ALTER TABLE list_parted2 ATTACH PARTITION part_5 FOR VALUES IN (5); +ALTER TABLE list_parted2 DETACH PARTITION part_5; +ALTER TABLE part_5 DROP CONSTRAINT check_a; +-- scan should again be skipped, even though NOT NULL is now a column property +ALTER TABLE part_5 ADD CONSTRAINT check_a CHECK (a IN (5)), ALTER a SET NOT NULL; +ALTER TABLE list_parted2 ATTACH PARTITION part_5 FOR VALUES IN (5); +-- Check the case where attnos of the partitioning columns in the table being +-- attached differs from the parent. It should not affect the constraint- +-- checking logic that allows to skip the scan. +CREATE TABLE part_6 ( + c int, + LIKE list_parted2, + CONSTRAINT check_a CHECK (a IS NOT NULL AND a = 6) +); +ALTER TABLE part_6 DROP c; +ALTER TABLE list_parted2 ATTACH PARTITION part_6 FOR VALUES IN (6); +-- Similar to above, but the table being attached is a partitioned table +-- whose partition has still different attnos for the root partitioning +-- columns. +CREATE TABLE part_7 ( + LIKE list_parted2, + CONSTRAINT check_a CHECK (a IS NOT NULL AND a = 7) +) PARTITION BY LIST (b); +CREATE TABLE part_7_a_null ( + c int, + d int, + e int, + LIKE list_parted2, -- 'a' will have attnum = 4 + CONSTRAINT check_b CHECK (b IS NULL OR b = 'a'), + CONSTRAINT check_a CHECK (a IS NOT NULL AND a = 7) +); +ALTER TABLE part_7_a_null DROP c, DROP d, DROP e; +ALTER TABLE part_7 ATTACH PARTITION part_7_a_null FOR VALUES IN ('a', null); +ALTER TABLE list_parted2 ATTACH PARTITION part_7 FOR VALUES IN (7); +-- Same example, but check this time that the constraint correctly detects +-- violating rows +ALTER TABLE list_parted2 DETACH PARTITION part_7; +ALTER TABLE part_7 DROP CONSTRAINT check_a; -- thusly, scan won't be skipped +INSERT INTO part_7 (a, b) VALUES (8, null), (9, 'a'); +SELECT tableoid::regclass, a, b FROM part_7 order by a; + tableoid | a | b +---------------+---+--- + part_7_a_null | 8 | + part_7_a_null | 9 | a +(2 rows) + +ALTER TABLE list_parted2 ATTACH PARTITION part_7 FOR VALUES IN (7); +ERROR: partition constraint of relation "part_7_a_null" is violated by some row +-- check that leaf partitions of default partition are scanned when +-- attaching a partitioned table. +ALTER TABLE part_5 DROP CONSTRAINT check_a; +CREATE TABLE part5_def PARTITION OF part_5 DEFAULT PARTITION BY LIST(a); +CREATE TABLE part5_def_p1 PARTITION OF part5_def FOR VALUES IN (5); +INSERT INTO part5_def_p1 VALUES (5, 'y'); +CREATE TABLE part5_p1 (LIKE part_5); +ALTER TABLE part_5 ATTACH PARTITION part5_p1 FOR VALUES IN ('y'); +ERROR: updated partition constraint for default partition "part5_def_p1" would be violated by some row +-- should be ok after deleting the bad row +DELETE FROM part5_def_p1 WHERE b = 'y'; +ALTER TABLE part_5 ATTACH PARTITION part5_p1 FOR VALUES IN ('y'); +-- check that the table being attached is not already a partition +ALTER TABLE list_parted2 ATTACH PARTITION part_2 FOR VALUES IN (2); +ERROR: "part_2" is already a partition +-- check that circular inheritance is not allowed +ALTER TABLE part_5 ATTACH PARTITION list_parted2 FOR VALUES IN ('b'); +ERROR: circular inheritance not allowed +DETAIL: "part_5" is already a child of "list_parted2". +ALTER TABLE list_parted2 ATTACH PARTITION list_parted2 FOR VALUES IN (0); +ERROR: circular inheritance not allowed +DETAIL: "list_parted2" is already a child of "list_parted2". +-- If a partitioned table being created or an existing table being attached +-- as a partition does not have a constraint that would allow validation scan +-- to be skipped, but an individual partition does, then the partition's +-- validation scan is skipped. +CREATE TABLE quuux (a int, b text) PARTITION BY LIST (a); +CREATE TABLE quuux_default PARTITION OF quuux DEFAULT PARTITION BY LIST (b); +CREATE TABLE quuux_default1 PARTITION OF quuux_default ( + CONSTRAINT check_1 CHECK (a IS NOT NULL AND a = 1) +) FOR VALUES IN ('b'); +CREATE TABLE quuux1 (a int, b text); +ALTER TABLE quuux ATTACH PARTITION quuux1 FOR VALUES IN (1); -- validate! +CREATE TABLE quuux2 (a int, b text); +ALTER TABLE quuux ATTACH PARTITION quuux2 FOR VALUES IN (2); -- skip validation +DROP TABLE quuux1, quuux2; +-- should validate for quuux1, but not for quuux2 +CREATE TABLE quuux1 PARTITION OF quuux FOR VALUES IN (1); +CREATE TABLE quuux2 PARTITION OF quuux FOR VALUES IN (2); +DROP TABLE quuux; +-- check validation when attaching hash partitions +-- Use hand-rolled hash functions and operator class to get predictable result +-- on different machines. part_test_int4_ops is defined in insert.sql. +-- check that the new partition won't overlap with an existing partition +CREATE TABLE hash_parted ( + a int, + b int +) PARTITION BY HASH (a part_test_int4_ops); +CREATE TABLE hpart_1 PARTITION OF hash_parted FOR VALUES WITH (MODULUS 4, REMAINDER 0); +CREATE TABLE fail_part (LIKE hpart_1); +ALTER TABLE hash_parted ATTACH PARTITION fail_part FOR VALUES WITH (MODULUS 8, REMAINDER 4); +ERROR: partition "fail_part" would overlap partition "hpart_1" +LINE 1: ...hash_parted ATTACH PARTITION fail_part FOR VALUES WITH (MODU... + ^ +ALTER TABLE hash_parted ATTACH PARTITION fail_part FOR VALUES WITH (MODULUS 8, REMAINDER 0); +ERROR: partition "fail_part" would overlap partition "hpart_1" +LINE 1: ...hash_parted ATTACH PARTITION fail_part FOR VALUES WITH (MODU... + ^ +DROP TABLE fail_part; +-- check validation when attaching hash partitions +-- check that violating rows are correctly reported +CREATE TABLE hpart_2 (LIKE hash_parted); +INSERT INTO hpart_2 VALUES (3, 0); +ALTER TABLE hash_parted ATTACH PARTITION hpart_2 FOR VALUES WITH (MODULUS 4, REMAINDER 1); +ERROR: partition constraint of relation "hpart_2" is violated by some row +-- should be ok after deleting the bad row +DELETE FROM hpart_2; +ALTER TABLE hash_parted ATTACH PARTITION hpart_2 FOR VALUES WITH (MODULUS 4, REMAINDER 1); +-- check that leaf partitions are scanned when attaching a partitioned +-- table +CREATE TABLE hpart_5 ( + LIKE hash_parted +) PARTITION BY LIST (b); +-- check that violating rows are correctly reported +CREATE TABLE hpart_5_a PARTITION OF hpart_5 FOR VALUES IN ('1', '2', '3'); +INSERT INTO hpart_5_a (a, b) VALUES (7, 1); +ALTER TABLE hash_parted ATTACH PARTITION hpart_5 FOR VALUES WITH (MODULUS 4, REMAINDER 2); +ERROR: partition constraint of relation "hpart_5_a" is violated by some row +-- should be ok after deleting the bad row +DELETE FROM hpart_5_a; +ALTER TABLE hash_parted ATTACH PARTITION hpart_5 FOR VALUES WITH (MODULUS 4, REMAINDER 2); +-- check that the table being attach is with valid modulus and remainder value +CREATE TABLE fail_part(LIKE hash_parted); +ALTER TABLE hash_parted ATTACH PARTITION fail_part FOR VALUES WITH (MODULUS 0, REMAINDER 1); +ERROR: modulus for hash partition must be an integer value greater than zero +ALTER TABLE hash_parted ATTACH PARTITION fail_part FOR VALUES WITH (MODULUS 8, REMAINDER 8); +ERROR: remainder for hash partition must be less than modulus +ALTER TABLE hash_parted ATTACH PARTITION fail_part FOR VALUES WITH (MODULUS 3, REMAINDER 2); +ERROR: every hash partition modulus must be a factor of the next larger modulus +DETAIL: The new modulus 3 is not a factor of 4, the modulus of existing partition "hpart_1". +DROP TABLE fail_part; +-- +-- DETACH PARTITION +-- +-- check that the table is partitioned at all +CREATE TABLE regular_table (a int); +ALTER TABLE regular_table DETACH PARTITION any_name; +ERROR: table "regular_table" is not partitioned +DROP TABLE regular_table; +-- check that the partition being detached exists at all +ALTER TABLE list_parted2 DETACH PARTITION part_4; +ERROR: relation "part_4" does not exist +ALTER TABLE hash_parted DETACH PARTITION hpart_4; +ERROR: relation "hpart_4" does not exist +-- check that the partition being detached is actually a partition of the parent +CREATE TABLE not_a_part (a int); +ALTER TABLE list_parted2 DETACH PARTITION not_a_part; +ERROR: relation "not_a_part" is not a partition of relation "list_parted2" +ALTER TABLE list_parted2 DETACH PARTITION part_1; +ERROR: relation "part_1" is not a partition of relation "list_parted2" +ALTER TABLE hash_parted DETACH PARTITION not_a_part; +ERROR: relation "not_a_part" is not a partition of relation "hash_parted" +DROP TABLE not_a_part; +-- check that, after being detached, attinhcount/coninhcount is dropped to 0 and +-- attislocal/conislocal is set to true +ALTER TABLE list_parted2 DETACH PARTITION part_3_4; +SELECT attinhcount, attislocal FROM pg_attribute WHERE attrelid = 'part_3_4'::regclass AND attnum > 0; + attinhcount | attislocal +-------------+------------ + 0 | t + 0 | t +(2 rows) + +SELECT coninhcount, conislocal FROM pg_constraint WHERE conrelid = 'part_3_4'::regclass AND conname = 'check_a'; + coninhcount | conislocal +-------------+------------ + 0 | t +(1 row) + +DROP TABLE part_3_4; +-- check that a detached partition is not dropped on dropping a partitioned table +CREATE TABLE range_parted2 ( + a int +) PARTITION BY RANGE(a); +CREATE TABLE part_rp PARTITION OF range_parted2 FOR VALUES FROM (0) to (100); +ALTER TABLE range_parted2 DETACH PARTITION part_rp; +DROP TABLE range_parted2; +SELECT * from part_rp; + a +--- +(0 rows) + +DROP TABLE part_rp; +-- concurrent detach +CREATE TABLE range_parted2 ( + a int +) PARTITION BY RANGE(a); +CREATE TABLE part_rp PARTITION OF range_parted2 FOR VALUES FROM (0) to (100); +BEGIN; +-- doesn't work in a partition block +ALTER TABLE range_parted2 DETACH PARTITION part_rp CONCURRENTLY; +ERROR: ALTER TABLE ... DETACH CONCURRENTLY cannot run inside a transaction block +COMMIT; +CREATE TABLE part_rpd PARTITION OF range_parted2 DEFAULT; +-- doesn't work if there's a default partition +ALTER TABLE range_parted2 DETACH PARTITION part_rp CONCURRENTLY; +ERROR: cannot detach partitions concurrently when a default partition exists +-- doesn't work for the default partition +ALTER TABLE range_parted2 DETACH PARTITION part_rpd CONCURRENTLY; +ERROR: cannot detach partitions concurrently when a default partition exists +DROP TABLE part_rpd; +-- works fine +ALTER TABLE range_parted2 DETACH PARTITION part_rp CONCURRENTLY; +\d+ range_parted2 + Partitioned table "public.range_parted2" + Column | Type | Collation | Nullable | Default | Storage | Stats target | Description +--------+---------+-----------+----------+---------+---------+--------------+------------- + a | integer | | | | plain | | +Partition key: RANGE (a) +Number of partitions: 0 + +-- constraint should be created +\d part_rp + Table "public.part_rp" + Column | Type | Collation | Nullable | Default +--------+---------+-----------+----------+--------- + a | integer | | | +Check constraints: + "part_rp_a_check" CHECK (a IS NOT NULL AND a >= 0 AND a < 100) + +CREATE TABLE part_rp100 PARTITION OF range_parted2 (CHECK (a>=123 AND a<133 AND a IS NOT NULL)) FOR VALUES FROM (100) to (200); +ALTER TABLE range_parted2 DETACH PARTITION part_rp100 CONCURRENTLY; +-- redundant constraint should not be created +\d part_rp100 + Table "public.part_rp100" + Column | Type | Collation | Nullable | Default +--------+---------+-----------+----------+--------- + a | integer | | | +Check constraints: + "part_rp100_a_check" CHECK (a >= 123 AND a < 133 AND a IS NOT NULL) + +DROP TABLE range_parted2; +-- Check ALTER TABLE commands for partitioned tables and partitions +-- cannot add/drop column to/from *only* the parent +ALTER TABLE ONLY list_parted2 ADD COLUMN c int; +ERROR: column must be added to child tables too +ALTER TABLE ONLY list_parted2 DROP COLUMN b; +ERROR: cannot drop column from only the partitioned table when partitions exist +HINT: Do not specify the ONLY keyword. +-- cannot add a column to partition or drop an inherited one +ALTER TABLE part_2 ADD COLUMN c text; +ERROR: cannot add column to a partition +ALTER TABLE part_2 DROP COLUMN b; +ERROR: cannot drop inherited column "b" +-- Nor rename, alter type +ALTER TABLE part_2 RENAME COLUMN b to c; +ERROR: cannot rename inherited column "b" +ALTER TABLE part_2 ALTER COLUMN b TYPE text; +ERROR: cannot alter inherited column "b" +-- cannot add/drop NOT NULL or check constraints to *only* the parent, when +-- partitions exist +ALTER TABLE ONLY list_parted2 ALTER b SET NOT NULL; +ERROR: constraint must be added to child tables too +DETAIL: Column "b" of relation "part_2" is not already NOT NULL. +HINT: Do not specify the ONLY keyword. +ALTER TABLE ONLY list_parted2 ADD CONSTRAINT check_b CHECK (b <> 'zz'); +ERROR: constraint must be added to child tables too +ALTER TABLE list_parted2 ALTER b SET NOT NULL; +ALTER TABLE ONLY list_parted2 ALTER b DROP NOT NULL; +ERROR: cannot remove constraint from only the partitioned table when partitions exist +HINT: Do not specify the ONLY keyword. +ALTER TABLE list_parted2 ADD CONSTRAINT check_b CHECK (b <> 'zz'); +ALTER TABLE ONLY list_parted2 DROP CONSTRAINT check_b; +ERROR: cannot remove constraint from only the partitioned table when partitions exist +HINT: Do not specify the ONLY keyword. +-- It's alright though, if no partitions are yet created +CREATE TABLE parted_no_parts (a int) PARTITION BY LIST (a); +ALTER TABLE ONLY parted_no_parts ALTER a SET NOT NULL; +ALTER TABLE ONLY parted_no_parts ADD CONSTRAINT check_a CHECK (a > 0); +ALTER TABLE ONLY parted_no_parts ALTER a DROP NOT NULL; +ALTER TABLE ONLY parted_no_parts DROP CONSTRAINT check_a; +DROP TABLE parted_no_parts; +-- cannot drop inherited NOT NULL or check constraints from partition +ALTER TABLE list_parted2 ALTER b SET NOT NULL, ADD CONSTRAINT check_a2 CHECK (a > 0); +ALTER TABLE part_2 ALTER b DROP NOT NULL; +ERROR: column "b" is marked NOT NULL in parent table +ALTER TABLE part_2 DROP CONSTRAINT check_a2; +ERROR: cannot drop inherited constraint "check_a2" of relation "part_2" +-- Doesn't make sense to add NO INHERIT constraints on partitioned tables +ALTER TABLE list_parted2 add constraint check_b2 check (b <> 'zz') NO INHERIT; +ERROR: cannot add NO INHERIT constraint to partitioned table "list_parted2" +-- check that a partition cannot participate in regular inheritance +CREATE TABLE inh_test () INHERITS (part_2); +ERROR: cannot inherit from partition "part_2" +CREATE TABLE inh_test (LIKE part_2); +ALTER TABLE inh_test INHERIT part_2; +ERROR: cannot inherit from a partition +ALTER TABLE part_2 INHERIT inh_test; +ERROR: cannot change inheritance of a partition +-- cannot drop or alter type of partition key columns of lower level +-- partitioned tables; for example, part_5, which is list_parted2's +-- partition, is partitioned on b; +ALTER TABLE list_parted2 DROP COLUMN b; +ERROR: cannot drop column "b" because it is part of the partition key of relation "part_5" +ALTER TABLE list_parted2 ALTER COLUMN b TYPE text; +ERROR: cannot alter column "b" because it is part of the partition key of relation "part_5" +-- dropping non-partition key columns should be allowed on the parent table. +ALTER TABLE list_parted DROP COLUMN b; +SELECT * FROM list_parted; + a +--- +(0 rows) + +-- cleanup +DROP TABLE list_parted, list_parted2, range_parted; +DROP TABLE fail_def_part; +DROP TABLE hash_parted; +-- more tests for certain multi-level partitioning scenarios +create table p (a int, b int) partition by range (a, b); +create table p1 (b int, a int not null) partition by range (b); +create table p11 (like p1); +alter table p11 drop a; +alter table p11 add a int; +alter table p11 drop a; +alter table p11 add a int not null; +-- attnum for key attribute 'a' is different in p, p1, and p11 +select attrelid::regclass, attname, attnum +from pg_attribute +where attname = 'a' + and (attrelid = 'p'::regclass + or attrelid = 'p1'::regclass + or attrelid = 'p11'::regclass) +order by attrelid::regclass::text; + attrelid | attname | attnum +----------+---------+-------- + p | a | 1 + p1 | a | 2 + p11 | a | 4 +(3 rows) + +alter table p1 attach partition p11 for values from (2) to (5); +insert into p1 (a, b) values (2, 3); +-- check that partition validation scan correctly detects violating rows +alter table p attach partition p1 for values from (1, 2) to (1, 10); +ERROR: partition constraint of relation "p11" is violated by some row +-- cleanup +drop table p; +drop table p1; +-- validate constraint on partitioned tables should only scan leaf partitions +create table parted_validate_test (a int) partition by list (a); +create table parted_validate_test_1 partition of parted_validate_test for values in (0, 1); +alter table parted_validate_test add constraint parted_validate_test_chka check (a > 0) not valid; +alter table parted_validate_test validate constraint parted_validate_test_chka; +drop table parted_validate_test; +-- test alter column options +CREATE TABLE attmp(i integer); +INSERT INTO attmp VALUES (1); +ALTER TABLE attmp ALTER COLUMN i SET (n_distinct = 1, n_distinct_inherited = 2); +ALTER TABLE attmp ALTER COLUMN i RESET (n_distinct_inherited); +ANALYZE attmp; +DROP TABLE attmp; +DROP USER regress_alter_table_user1; +-- check that violating rows are correctly reported when attaching as the +-- default partition +create table defpart_attach_test (a int) partition by list (a); +create table defpart_attach_test1 partition of defpart_attach_test for values in (1); +create table defpart_attach_test_d (b int, a int); +alter table defpart_attach_test_d drop b; +insert into defpart_attach_test_d values (1), (2); +-- error because its constraint as the default partition would be violated +-- by the row containing 1 +alter table defpart_attach_test attach partition defpart_attach_test_d default; +ERROR: partition constraint of relation "defpart_attach_test_d" is violated by some row +delete from defpart_attach_test_d where a = 1; +alter table defpart_attach_test_d add check (a > 1); +-- should be attached successfully and without needing to be scanned +alter table defpart_attach_test attach partition defpart_attach_test_d default; +-- check that attaching a partition correctly reports any rows in the default +-- partition that should not be there for the new partition to be attached +-- successfully +create table defpart_attach_test_2 (like defpart_attach_test_d); +alter table defpart_attach_test attach partition defpart_attach_test_2 for values in (2); +ERROR: updated partition constraint for default partition "defpart_attach_test_d" would be violated by some row +drop table defpart_attach_test; +-- check combinations of temporary and permanent relations when attaching +-- partitions. +create table perm_part_parent (a int) partition by list (a); +create temp table temp_part_parent (a int) partition by list (a); +create table perm_part_child (a int); +create temp table temp_part_child (a int); +alter table temp_part_parent attach partition perm_part_child default; -- error +ERROR: cannot attach a permanent relation as partition of temporary relation "temp_part_parent" +alter table perm_part_parent attach partition temp_part_child default; -- error +ERROR: cannot attach a temporary relation as partition of permanent relation "perm_part_parent" +alter table temp_part_parent attach partition temp_part_child default; -- ok +drop table perm_part_parent cascade; +drop table temp_part_parent cascade; +-- check that attaching partitions to a table while it is being used is +-- prevented +create table tab_part_attach (a int) partition by list (a); +create or replace function func_part_attach() returns trigger + language plpgsql as $$ + begin + execute 'create table tab_part_attach_1 (a int)'; + execute 'alter table tab_part_attach attach partition tab_part_attach_1 for values in (1)'; + return null; + end $$; +create trigger trig_part_attach before insert on tab_part_attach + for each statement execute procedure func_part_attach(); +insert into tab_part_attach values (1); +ERROR: cannot ALTER TABLE "tab_part_attach" because it is being used by active queries in this session +CONTEXT: SQL statement "alter table tab_part_attach attach partition tab_part_attach_1 for values in (1)" +PL/pgSQL function func_part_attach() line 4 at EXECUTE +drop table tab_part_attach; +drop function func_part_attach(); +-- test case where the partitioning operator is a SQL function whose +-- evaluation results in the table's relcache being rebuilt partway through +-- the execution of an ATTACH PARTITION command +create function at_test_sql_partop (int4, int4) returns int language sql +as $$ select case when $1 = $2 then 0 when $1 > $2 then 1 else -1 end; $$; +create operator class at_test_sql_partop for type int4 using btree as + operator 1 < (int4, int4), operator 2 <= (int4, int4), + operator 3 = (int4, int4), operator 4 >= (int4, int4), + operator 5 > (int4, int4), function 1 at_test_sql_partop(int4, int4); +create table at_test_sql_partop (a int) partition by range (a at_test_sql_partop); +create table at_test_sql_partop_1 (a int); +alter table at_test_sql_partop attach partition at_test_sql_partop_1 for values from (0) to (10); +drop table at_test_sql_partop; +drop operator class at_test_sql_partop using btree; +drop function at_test_sql_partop; +/* Test case for bug #16242 */ +-- We create a parent and child where the child has missing +-- non-null attribute values, and arrange to pass them through +-- tuple conversion from the child to the parent tupdesc +create table bar1 (a integer, b integer not null default 1) + partition by range (a); +create table bar2 (a integer); +insert into bar2 values (1); +alter table bar2 add column b integer not null default 1; +-- (at this point bar2 contains tuple with natts=1) +alter table bar1 attach partition bar2 default; +-- this works: +select * from bar1; + a | b +---+--- + 1 | 1 +(1 row) + +-- this exercises tuple conversion: +create function xtrig() + returns trigger language plpgsql +as $$ + declare + r record; + begin + for r in select * from old loop + raise info 'a=%, b=%', r.a, r.b; + end loop; + return NULL; + end; +$$; +create trigger xtrig + after update on bar1 + referencing old table as old + for each statement execute procedure xtrig(); +update bar1 set a = a + 1; +INFO: a=1, b=1 +/* End test case for bug #16242 */ +/* Test case for bug #17409 */ +create table attbl (p1 int constraint pk_attbl primary key); +create table atref (c1 int references attbl(p1)); +cluster attbl using pk_attbl; +alter table attbl alter column p1 set data type bigint; +alter table atref alter column c1 set data type bigint; +drop table attbl, atref; +create table attbl (p1 int constraint pk_attbl primary key); +alter table attbl replica identity using index pk_attbl; +create table atref (c1 int references attbl(p1)); +alter table attbl alter column p1 set data type bigint; +alter table atref alter column c1 set data type bigint; +drop table attbl, atref; +/* End test case for bug #17409 */ +-- Test that ALTER TABLE rewrite preserves a clustered index +-- for normal indexes and indexes on constraints. +create table alttype_cluster (a int); +alter table alttype_cluster add primary key (a); +create index alttype_cluster_ind on alttype_cluster (a); +alter table alttype_cluster cluster on alttype_cluster_ind; +-- Normal index remains clustered. +select indexrelid::regclass, indisclustered from pg_index + where indrelid = 'alttype_cluster'::regclass + order by indexrelid::regclass::text; + indexrelid | indisclustered +----------------------+---------------- + alttype_cluster_ind | t + alttype_cluster_pkey | f +(2 rows) + +alter table alttype_cluster alter a type bigint; +select indexrelid::regclass, indisclustered from pg_index + where indrelid = 'alttype_cluster'::regclass + order by indexrelid::regclass::text; + indexrelid | indisclustered +----------------------+---------------- + alttype_cluster_ind | t + alttype_cluster_pkey | f +(2 rows) + +-- Constraint index remains clustered. +alter table alttype_cluster cluster on alttype_cluster_pkey; +select indexrelid::regclass, indisclustered from pg_index + where indrelid = 'alttype_cluster'::regclass + order by indexrelid::regclass::text; + indexrelid | indisclustered +----------------------+---------------- + alttype_cluster_ind | f + alttype_cluster_pkey | t +(2 rows) + +alter table alttype_cluster alter a type int; +select indexrelid::regclass, indisclustered from pg_index + where indrelid = 'alttype_cluster'::regclass + order by indexrelid::regclass::text; + indexrelid | indisclustered +----------------------+---------------- + alttype_cluster_ind | f + alttype_cluster_pkey | t +(2 rows) + +drop table alttype_cluster; +-- +-- Check that attaching or detaching a partitioned partition correctly leads +-- to its partitions' constraint being updated to reflect the parent's +-- newly added/removed constraint +create table target_parted (a int, b int) partition by list (a); +create table attach_parted (a int, b int) partition by list (b); +create table attach_parted_part1 partition of attach_parted for values in (1); +-- insert a row directly into the leaf partition so that its partition +-- constraint is built and stored in the relcache +insert into attach_parted_part1 values (1, 1); +-- the following better invalidate the partition constraint of the leaf +-- partition too... +alter table target_parted attach partition attach_parted for values in (1); +-- ...such that the following insert fails +insert into attach_parted_part1 values (2, 1); +ERROR: new row for relation "attach_parted_part1" violates partition constraint +DETAIL: Failing row contains (2, 1). +-- ...and doesn't when the partition is detached along with its own partition +alter table target_parted detach partition attach_parted; +insert into attach_parted_part1 values (2, 1); +-- Test altering table having publication +create schema alter1; +create schema alter2; +create table alter1.t1 (a int); +set client_min_messages = 'ERROR'; +create publication pub1 for table alter1.t1, tables in schema alter2; +reset client_min_messages; +alter table alter1.t1 set schema alter2; +\d+ alter2.t1 + Table "alter2.t1" + Column | Type | Collation | Nullable | Default | Storage | Stats target | Description +--------+---------+-----------+----------+---------+---------+--------------+------------- + a | integer | | | | plain | | +Publications: + "pub1" + +drop publication pub1; +drop schema alter1 cascade; +drop schema alter2 cascade; +NOTICE: drop cascades to table alter2.t1 diff --git a/src/test/regress/expected/amutils.out b/src/test/regress/expected/amutils.out new file mode 100644 index 0000000..7ab6113 --- /dev/null +++ b/src/test/regress/expected/amutils.out @@ -0,0 +1,254 @@ +-- +-- Test index AM property-reporting functions +-- +select prop, + pg_indexam_has_property(a.oid, prop) as "AM", + pg_index_has_property('onek_hundred'::regclass, prop) as "Index", + pg_index_column_has_property('onek_hundred'::regclass, 1, prop) as "Column" + from pg_am a, + unnest(array['asc', 'desc', 'nulls_first', 'nulls_last', + 'orderable', 'distance_orderable', 'returnable', + 'search_array', 'search_nulls', + 'clusterable', 'index_scan', 'bitmap_scan', + 'backward_scan', + 'can_order', 'can_unique', 'can_multi_col', + 'can_exclude', 'can_include', + 'bogus']::text[]) + with ordinality as u(prop,ord) + where a.amname = 'btree' + order by ord; + prop | AM | Index | Column +--------------------+----+-------+-------- + asc | | | t + desc | | | f + nulls_first | | | f + nulls_last | | | t + orderable | | | t + distance_orderable | | | f + returnable | | | t + search_array | | | t + search_nulls | | | t + clusterable | | t | + index_scan | | t | + bitmap_scan | | t | + backward_scan | | t | + can_order | t | | + can_unique | t | | + can_multi_col | t | | + can_exclude | t | | + can_include | t | | + bogus | | | +(19 rows) + +select prop, + pg_indexam_has_property(a.oid, prop) as "AM", + pg_index_has_property('gcircleind'::regclass, prop) as "Index", + pg_index_column_has_property('gcircleind'::regclass, 1, prop) as "Column" + from pg_am a, + unnest(array['asc', 'desc', 'nulls_first', 'nulls_last', + 'orderable', 'distance_orderable', 'returnable', + 'search_array', 'search_nulls', + 'clusterable', 'index_scan', 'bitmap_scan', + 'backward_scan', + 'can_order', 'can_unique', 'can_multi_col', + 'can_exclude', 'can_include', + 'bogus']::text[]) + with ordinality as u(prop,ord) + where a.amname = 'gist' + order by ord; + prop | AM | Index | Column +--------------------+----+-------+-------- + asc | | | f + desc | | | f + nulls_first | | | f + nulls_last | | | f + orderable | | | f + distance_orderable | | | t + returnable | | | f + search_array | | | f + search_nulls | | | t + clusterable | | t | + index_scan | | t | + bitmap_scan | | t | + backward_scan | | f | + can_order | f | | + can_unique | f | | + can_multi_col | t | | + can_exclude | t | | + can_include | t | | + bogus | | | +(19 rows) + +select prop, + pg_index_column_has_property('onek_hundred'::regclass, 1, prop) as btree, + pg_index_column_has_property('hash_i4_index'::regclass, 1, prop) as hash, + pg_index_column_has_property('gcircleind'::regclass, 1, prop) as gist, + pg_index_column_has_property('sp_radix_ind'::regclass, 1, prop) as spgist_radix, + pg_index_column_has_property('sp_quad_ind'::regclass, 1, prop) as spgist_quad, + pg_index_column_has_property('botharrayidx'::regclass, 1, prop) as gin, + pg_index_column_has_property('brinidx'::regclass, 1, prop) as brin + from unnest(array['asc', 'desc', 'nulls_first', 'nulls_last', + 'orderable', 'distance_orderable', 'returnable', + 'search_array', 'search_nulls', + 'bogus']::text[]) + with ordinality as u(prop,ord) + order by ord; + prop | btree | hash | gist | spgist_radix | spgist_quad | gin | brin +--------------------+-------+------+------+--------------+-------------+-----+------ + asc | t | f | f | f | f | f | f + desc | f | f | f | f | f | f | f + nulls_first | f | f | f | f | f | f | f + nulls_last | t | f | f | f | f | f | f + orderable | t | f | f | f | f | f | f + distance_orderable | f | f | t | f | t | f | f + returnable | t | f | f | t | t | f | f + search_array | t | f | f | f | f | f | f + search_nulls | t | f | t | t | t | f | t + bogus | | | | | | | +(10 rows) + +select prop, + pg_index_has_property('onek_hundred'::regclass, prop) as btree, + pg_index_has_property('hash_i4_index'::regclass, prop) as hash, + pg_index_has_property('gcircleind'::regclass, prop) as gist, + pg_index_has_property('sp_radix_ind'::regclass, prop) as spgist, + pg_index_has_property('botharrayidx'::regclass, prop) as gin, + pg_index_has_property('brinidx'::regclass, prop) as brin + from unnest(array['clusterable', 'index_scan', 'bitmap_scan', + 'backward_scan', + 'bogus']::text[]) + with ordinality as u(prop,ord) + order by ord; + prop | btree | hash | gist | spgist | gin | brin +---------------+-------+------+------+--------+-----+------ + clusterable | t | f | t | f | f | f + index_scan | t | t | t | t | f | f + bitmap_scan | t | t | t | t | t | t + backward_scan | t | t | f | f | f | f + bogus | | | | | | +(5 rows) + +select amname, prop, pg_indexam_has_property(a.oid, prop) as p + from pg_am a, + unnest(array['can_order', 'can_unique', 'can_multi_col', + 'can_exclude', 'can_include', 'bogus']::text[]) + with ordinality as u(prop,ord) + where amtype = 'i' + order by amname, ord; + amname | prop | p +--------+---------------+--- + brin | can_order | f + brin | can_unique | f + brin | can_multi_col | t + brin | can_exclude | f + brin | can_include | f + brin | bogus | + btree | can_order | t + btree | can_unique | t + btree | can_multi_col | t + btree | can_exclude | t + btree | can_include | t + btree | bogus | + gin | can_order | f + gin | can_unique | f + gin | can_multi_col | t + gin | can_exclude | f + gin | can_include | f + gin | bogus | + gist | can_order | f + gist | can_unique | f + gist | can_multi_col | t + gist | can_exclude | t + gist | can_include | t + gist | bogus | + hash | can_order | f + hash | can_unique | f + hash | can_multi_col | f + hash | can_exclude | t + hash | can_include | f + hash | bogus | + spgist | can_order | f + spgist | can_unique | f + spgist | can_multi_col | f + spgist | can_exclude | t + spgist | can_include | t + spgist | bogus | +(36 rows) + +-- +-- additional checks for pg_index_column_has_property +-- +CREATE TEMP TABLE foo (f1 int, f2 int, f3 int, f4 int); +CREATE INDEX fooindex ON foo (f1 desc, f2 asc, f3 nulls first, f4 nulls last); +select col, prop, pg_index_column_has_property(o, col, prop) + from (values ('fooindex'::regclass)) v1(o), + (values (1,'orderable'),(2,'asc'),(3,'desc'), + (4,'nulls_first'),(5,'nulls_last'), + (6, 'bogus')) v2(idx,prop), + generate_series(1,4) col + order by col, idx; + col | prop | pg_index_column_has_property +-----+-------------+------------------------------ + 1 | orderable | t + 1 | asc | f + 1 | desc | t + 1 | nulls_first | t + 1 | nulls_last | f + 1 | bogus | + 2 | orderable | t + 2 | asc | t + 2 | desc | f + 2 | nulls_first | f + 2 | nulls_last | t + 2 | bogus | + 3 | orderable | t + 3 | asc | t + 3 | desc | f + 3 | nulls_first | t + 3 | nulls_last | f + 3 | bogus | + 4 | orderable | t + 4 | asc | t + 4 | desc | f + 4 | nulls_first | f + 4 | nulls_last | t + 4 | bogus | +(24 rows) + +CREATE INDEX foocover ON foo (f1) INCLUDE (f2,f3); +select col, prop, pg_index_column_has_property(o, col, prop) + from (values ('foocover'::regclass)) v1(o), + (values (1,'orderable'),(2,'asc'),(3,'desc'), + (4,'nulls_first'),(5,'nulls_last'), + (6,'distance_orderable'),(7,'returnable'), + (8, 'bogus')) v2(idx,prop), + generate_series(1,3) col + order by col, idx; + col | prop | pg_index_column_has_property +-----+--------------------+------------------------------ + 1 | orderable | t + 1 | asc | t + 1 | desc | f + 1 | nulls_first | f + 1 | nulls_last | t + 1 | distance_orderable | f + 1 | returnable | t + 1 | bogus | + 2 | orderable | f + 2 | asc | + 2 | desc | + 2 | nulls_first | + 2 | nulls_last | + 2 | distance_orderable | f + 2 | returnable | t + 2 | bogus | + 3 | orderable | f + 3 | asc | + 3 | desc | + 3 | nulls_first | + 3 | nulls_last | + 3 | distance_orderable | f + 3 | returnable | t + 3 | bogus | +(24 rows) + diff --git a/src/test/regress/expected/arrays.out b/src/test/regress/expected/arrays.out new file mode 100644 index 0000000..9574984 --- /dev/null +++ b/src/test/regress/expected/arrays.out @@ -0,0 +1,2545 @@ +-- +-- ARRAYS +-- +-- directory paths are passed to us in environment variables +\getenv abs_srcdir PG_ABS_SRCDIR +CREATE TABLE arrtest ( + a int2[], + b int4[][][], + c name[], + d text[][], + e float8[], + f char(5)[], + g varchar(5)[] +); +CREATE TABLE array_op_test ( + seqno int4, + i int4[], + t text[] +); +\set filename :abs_srcdir '/data/array.data' +COPY array_op_test FROM :'filename'; +ANALYZE array_op_test; +-- +-- only the 'e' array is 0-based, the others are 1-based. +-- +INSERT INTO arrtest (a[1:5], b[1:1][1:2][1:2], c, d, f, g) + VALUES ('{1,2,3,4,5}', '{{{0,0},{1,2}}}', '{}', '{}', '{}', '{}'); +UPDATE arrtest SET e[0] = '1.1'; +UPDATE arrtest SET e[1] = '2.2'; +INSERT INTO arrtest (f) + VALUES ('{"too long"}'); +ERROR: value too long for type character(5) +INSERT INTO arrtest (a, b[1:2][1:2], c, d, e, f, g) + VALUES ('{11,12,23}', '{{3,4},{4,5}}', '{"foobar"}', + '{{"elt1", "elt2"}}', '{"3.4", "6.7"}', + '{"abc","abcde"}', '{"abc","abcde"}'); +INSERT INTO arrtest (a, b[1:2], c, d[1:2]) + VALUES ('{}', '{3,4}', '{foo,bar}', '{bar,foo}'); +INSERT INTO arrtest (b[2]) VALUES(now()); -- error, type mismatch +ERROR: subscripted assignment to "b" requires type integer but expression is of type timestamp with time zone +LINE 1: INSERT INTO arrtest (b[2]) VALUES(now()); + ^ +HINT: You will need to rewrite or cast the expression. +INSERT INTO arrtest (b[1:2]) VALUES(now()); -- error, type mismatch +ERROR: subscripted assignment to "b" requires type integer[] but expression is of type timestamp with time zone +LINE 1: INSERT INTO arrtest (b[1:2]) VALUES(now()); + ^ +HINT: You will need to rewrite or cast the expression. +SELECT * FROM arrtest; + a | b | c | d | e | f | g +-------------+-----------------+-----------+---------------+-----------------+-----------------+------------- + {1,2,3,4,5} | {{{0,0},{1,2}}} | {} | {} | [0:1]={1.1,2.2} | {} | {} + {11,12,23} | {{3,4},{4,5}} | {foobar} | {{elt1,elt2}} | {3.4,6.7} | {"abc ",abcde} | {abc,abcde} + {} | {3,4} | {foo,bar} | {bar,foo} | | | +(3 rows) + +SELECT arrtest.a[1], + arrtest.b[1][1][1], + arrtest.c[1], + arrtest.d[1][1], + arrtest.e[0] + FROM arrtest; + a | b | c | d | e +----+---+--------+------+----- + 1 | 0 | | | 1.1 + 11 | | foobar | elt1 | + | | foo | | +(3 rows) + +SELECT a[1], b[1][1][1], c[1], d[1][1], e[0] + FROM arrtest; + a | b | c | d | e +----+---+--------+------+----- + 1 | 0 | | | 1.1 + 11 | | foobar | elt1 | + | | foo | | +(3 rows) + +SELECT a[1:3], + b[1:1][1:2][1:2], + c[1:2], + d[1:1][1:2] + FROM arrtest; + a | b | c | d +------------+-----------------+-----------+--------------- + {1,2,3} | {{{0,0},{1,2}}} | {} | {} + {11,12,23} | {} | {foobar} | {{elt1,elt2}} + {} | {} | {foo,bar} | {} +(3 rows) + +SELECT array_ndims(a) AS a,array_ndims(b) AS b,array_ndims(c) AS c + FROM arrtest; + a | b | c +---+---+--- + 1 | 3 | + 1 | 2 | 1 + | 1 | 1 +(3 rows) + +SELECT array_dims(a) AS a,array_dims(b) AS b,array_dims(c) AS c + FROM arrtest; + a | b | c +-------+-----------------+------- + [1:5] | [1:1][1:2][1:2] | + [1:3] | [1:2][1:2] | [1:1] + | [1:2] | [1:2] +(3 rows) + +-- returns nothing +SELECT * + FROM arrtest + WHERE a[1] < 5 and + c = '{"foobar"}'::_name; + a | b | c | d | e | f | g +---+---+---+---+---+---+--- +(0 rows) + +UPDATE arrtest + SET a[1:2] = '{16,25}' + WHERE NOT a = '{}'::_int2; +UPDATE arrtest + SET b[1:1][1:1][1:2] = '{113, 117}', + b[1:1][1:2][2:2] = '{142, 147}' + WHERE array_dims(b) = '[1:1][1:2][1:2]'; +UPDATE arrtest + SET c[2:2] = '{"new_word"}' + WHERE array_dims(c) is not null; +SELECT a,b,c FROM arrtest; + a | b | c +---------------+-----------------------+------------------- + {16,25,3,4,5} | {{{113,142},{1,147}}} | {} + {} | {3,4} | {foo,new_word} + {16,25,23} | {{3,4},{4,5}} | {foobar,new_word} +(3 rows) + +SELECT a[1:3], + b[1:1][1:2][1:2], + c[1:2], + d[1:1][2:2] + FROM arrtest; + a | b | c | d +------------+-----------------------+-------------------+---------- + {16,25,3} | {{{113,142},{1,147}}} | {} | {} + {} | {} | {foo,new_word} | {} + {16,25,23} | {} | {foobar,new_word} | {{elt2}} +(3 rows) + +SELECT b[1:1][2][2], + d[1:1][2] + FROM arrtest; + b | d +-----------------------+--------------- + {{{113,142},{1,147}}} | {} + {} | {} + {} | {{elt1,elt2}} +(3 rows) + +INSERT INTO arrtest(a) VALUES('{1,null,3}'); +SELECT a FROM arrtest; + a +--------------- + {16,25,3,4,5} + {} + {16,25,23} + {1,NULL,3} +(4 rows) + +UPDATE arrtest SET a[4] = NULL WHERE a[2] IS NULL; +SELECT a FROM arrtest WHERE a[2] IS NULL; + a +----------------- + [4:4]={NULL} + {1,NULL,3,NULL} +(2 rows) + +DELETE FROM arrtest WHERE a[2] IS NULL AND b IS NULL; +SELECT a,b,c FROM arrtest; + a | b | c +---------------+-----------------------+------------------- + {16,25,3,4,5} | {{{113,142},{1,147}}} | {} + {16,25,23} | {{3,4},{4,5}} | {foobar,new_word} + [4:4]={NULL} | {3,4} | {foo,new_word} +(3 rows) + +-- test non-error-throwing API +SELECT pg_input_is_valid('{1,2,3}', 'integer[]'); + pg_input_is_valid +------------------- + t +(1 row) + +SELECT pg_input_is_valid('{1,2', 'integer[]'); + pg_input_is_valid +------------------- + f +(1 row) + +SELECT pg_input_is_valid('{1,zed}', 'integer[]'); + pg_input_is_valid +------------------- + f +(1 row) + +SELECT * FROM pg_input_error_info('{1,zed}', 'integer[]'); + message | detail | hint | sql_error_code +----------------------------------------------+--------+------+---------------- + invalid input syntax for type integer: "zed" | | | 22P02 +(1 row) + +-- test mixed slice/scalar subscripting +select '{{1,2,3},{4,5,6},{7,8,9}}'::int[]; + int4 +--------------------------- + {{1,2,3},{4,5,6},{7,8,9}} +(1 row) + +select ('{{1,2,3},{4,5,6},{7,8,9}}'::int[])[1:2][2]; + int4 +--------------- + {{1,2},{4,5}} +(1 row) + +select '[0:2][0:2]={{1,2,3},{4,5,6},{7,8,9}}'::int[]; + int4 +-------------------------------------- + [0:2][0:2]={{1,2,3},{4,5,6},{7,8,9}} +(1 row) + +select ('[0:2][0:2]={{1,2,3},{4,5,6},{7,8,9}}'::int[])[1:2][2]; + int4 +--------------- + {{5,6},{8,9}} +(1 row) + +-- +-- check subscription corner cases +-- +-- More subscripts than MAXDIM (6) +SELECT ('{}'::int[])[1][2][3][4][5][6][7]; +ERROR: number of array dimensions (7) exceeds the maximum allowed (6) +-- NULL index yields NULL when selecting +SELECT ('{{{1},{2},{3}},{{4},{5},{6}}}'::int[])[1][NULL][1]; + int4 +------ + +(1 row) + +SELECT ('{{{1},{2},{3}},{{4},{5},{6}}}'::int[])[1][NULL:1][1]; + int4 +------ + +(1 row) + +SELECT ('{{{1},{2},{3}},{{4},{5},{6}}}'::int[])[1][1:NULL][1]; + int4 +------ + +(1 row) + +-- NULL index in assignment is an error +UPDATE arrtest + SET c[NULL] = '{"can''t assign"}' + WHERE array_dims(c) is not null; +ERROR: array subscript in assignment must not be null +UPDATE arrtest + SET c[NULL:1] = '{"can''t assign"}' + WHERE array_dims(c) is not null; +ERROR: array subscript in assignment must not be null +UPDATE arrtest + SET c[1:NULL] = '{"can''t assign"}' + WHERE array_dims(c) is not null; +ERROR: array subscript in assignment must not be null +-- Un-subscriptable type +SELECT (now())[1]; +ERROR: cannot subscript type timestamp with time zone because it does not support subscripting +LINE 1: SELECT (now())[1]; + ^ +-- test slices with empty lower and/or upper index +CREATE TEMP TABLE arrtest_s ( + a int2[], + b int2[][] +); +INSERT INTO arrtest_s VALUES ('{1,2,3,4,5}', '{{1,2,3}, {4,5,6}, {7,8,9}}'); +INSERT INTO arrtest_s VALUES ('[0:4]={1,2,3,4,5}', '[0:2][0:2]={{1,2,3}, {4,5,6}, {7,8,9}}'); +SELECT * FROM arrtest_s; + a | b +-------------------+-------------------------------------- + {1,2,3,4,5} | {{1,2,3},{4,5,6},{7,8,9}} + [0:4]={1,2,3,4,5} | [0:2][0:2]={{1,2,3},{4,5,6},{7,8,9}} +(2 rows) + +SELECT a[:3], b[:2][:2] FROM arrtest_s; + a | b +-----------+--------------------------- + {1,2,3} | {{1,2},{4,5}} + {1,2,3,4} | {{1,2,3},{4,5,6},{7,8,9}} +(2 rows) + +SELECT a[2:], b[2:][2:] FROM arrtest_s; + a | b +-----------+--------------- + {2,3,4,5} | {{5,6},{8,9}} + {3,4,5} | {{9}} +(2 rows) + +SELECT a[:], b[:] FROM arrtest_s; + a | b +-------------+--------------------------- + {1,2,3,4,5} | {{1,2,3},{4,5,6},{7,8,9}} + {1,2,3,4,5} | {{1,2,3},{4,5,6},{7,8,9}} +(2 rows) + +-- updates +UPDATE arrtest_s SET a[:3] = '{11, 12, 13}', b[:2][:2] = '{{11,12}, {14,15}}' + WHERE array_lower(a,1) = 1; +SELECT * FROM arrtest_s; + a | b +-------------------+-------------------------------------- + [0:4]={1,2,3,4,5} | [0:2][0:2]={{1,2,3},{4,5,6},{7,8,9}} + {11,12,13,4,5} | {{11,12,3},{14,15,6},{7,8,9}} +(2 rows) + +UPDATE arrtest_s SET a[3:] = '{23, 24, 25}', b[2:][2:] = '{{25,26}, {28,29}}'; +SELECT * FROM arrtest_s; + a | b +---------------------+--------------------------------------- + [0:4]={1,2,3,23,24} | [0:2][0:2]={{1,2,3},{4,5,6},{7,8,25}} + {11,12,23,24,25} | {{11,12,3},{14,25,26},{7,28,29}} +(2 rows) + +UPDATE arrtest_s SET a[:] = '{11, 12, 13, 14, 15}'; +SELECT * FROM arrtest_s; + a | b +------------------------+--------------------------------------- + [0:4]={11,12,13,14,15} | [0:2][0:2]={{1,2,3},{4,5,6},{7,8,25}} + {11,12,13,14,15} | {{11,12,3},{14,25,26},{7,28,29}} +(2 rows) + +UPDATE arrtest_s SET a[:] = '{23, 24, 25}'; -- fail, too small +ERROR: source array too small +INSERT INTO arrtest_s VALUES(NULL, NULL); +UPDATE arrtest_s SET a[:] = '{11, 12, 13, 14, 15}'; -- fail, no good with null +ERROR: array slice subscript must provide both boundaries +DETAIL: When assigning to a slice of an empty array value, slice boundaries must be fully specified. +-- we want to work with a point_tbl that includes a null +CREATE TEMP TABLE point_tbl AS SELECT * FROM public.point_tbl; +INSERT INTO POINT_TBL(f1) VALUES (NULL); +-- check with fixed-length-array type, such as point +SELECT f1[0:1] FROM POINT_TBL; +ERROR: slices of fixed-length arrays not implemented +SELECT f1[0:] FROM POINT_TBL; +ERROR: slices of fixed-length arrays not implemented +SELECT f1[:1] FROM POINT_TBL; +ERROR: slices of fixed-length arrays not implemented +SELECT f1[:] FROM POINT_TBL; +ERROR: slices of fixed-length arrays not implemented +-- subscript assignments to fixed-width result in NULL if previous value is NULL +UPDATE point_tbl SET f1[0] = 10 WHERE f1 IS NULL RETURNING *; + f1 +---- + +(1 row) + +INSERT INTO point_tbl(f1[0]) VALUES(0) RETURNING *; + f1 +---- + +(1 row) + +-- NULL assignments get ignored +UPDATE point_tbl SET f1[0] = NULL WHERE f1::text = '(10,10)'::point::text RETURNING *; + f1 +--------- + (10,10) +(1 row) + +-- but non-NULL subscript assignments work +UPDATE point_tbl SET f1[0] = -10, f1[1] = -10 WHERE f1::text = '(10,10)'::point::text RETURNING *; + f1 +----------- + (-10,-10) +(1 row) + +-- but not to expand the range +UPDATE point_tbl SET f1[3] = 10 WHERE f1::text = '(-10,-10)'::point::text RETURNING *; +ERROR: array subscript out of range +-- +-- test array extension +-- +CREATE TEMP TABLE arrtest1 (i int[], t text[]); +insert into arrtest1 values(array[1,2,null,4], array['one','two',null,'four']); +select * from arrtest1; + i | t +--------------+--------------------- + {1,2,NULL,4} | {one,two,NULL,four} +(1 row) + +update arrtest1 set i[2] = 22, t[2] = 'twenty-two'; +select * from arrtest1; + i | t +---------------+---------------------------- + {1,22,NULL,4} | {one,twenty-two,NULL,four} +(1 row) + +update arrtest1 set i[5] = 5, t[5] = 'five'; +select * from arrtest1; + i | t +-----------------+--------------------------------- + {1,22,NULL,4,5} | {one,twenty-two,NULL,four,five} +(1 row) + +update arrtest1 set i[8] = 8, t[8] = 'eight'; +select * from arrtest1; + i | t +-----------------------------+------------------------------------------------- + {1,22,NULL,4,5,NULL,NULL,8} | {one,twenty-two,NULL,four,five,NULL,NULL,eight} +(1 row) + +update arrtest1 set i[0] = 0, t[0] = 'zero'; +select * from arrtest1; + i | t +-------------------------------------+------------------------------------------------------------ + [0:8]={0,1,22,NULL,4,5,NULL,NULL,8} | [0:8]={zero,one,twenty-two,NULL,four,five,NULL,NULL,eight} +(1 row) + +update arrtest1 set i[-3] = -3, t[-3] = 'minus-three'; +select * from arrtest1; + i | t +---------------------------------------------------+----------------------------------------------------------------------------------- + [-3:8]={-3,NULL,NULL,0,1,22,NULL,4,5,NULL,NULL,8} | [-3:8]={minus-three,NULL,NULL,zero,one,twenty-two,NULL,four,five,NULL,NULL,eight} +(1 row) + +update arrtest1 set i[0:2] = array[10,11,12], t[0:2] = array['ten','eleven','twelve']; +select * from arrtest1; + i | t +-----------------------------------------------------+--------------------------------------------------------------------------------- + [-3:8]={-3,NULL,NULL,10,11,12,NULL,4,5,NULL,NULL,8} | [-3:8]={minus-three,NULL,NULL,ten,eleven,twelve,NULL,four,five,NULL,NULL,eight} +(1 row) + +update arrtest1 set i[8:10] = array[18,null,20], t[8:10] = array['p18',null,'p20']; +select * from arrtest1; + i | t +---------------------------------------------------------------+----------------------------------------------------------------------------------------- + [-3:10]={-3,NULL,NULL,10,11,12,NULL,4,5,NULL,NULL,18,NULL,20} | [-3:10]={minus-three,NULL,NULL,ten,eleven,twelve,NULL,four,five,NULL,NULL,p18,NULL,p20} +(1 row) + +update arrtest1 set i[11:12] = array[null,22], t[11:12] = array[null,'p22']; +select * from arrtest1; + i | t +-----------------------------------------------------------------------+-------------------------------------------------------------------------------------------------- + [-3:12]={-3,NULL,NULL,10,11,12,NULL,4,5,NULL,NULL,18,NULL,20,NULL,22} | [-3:12]={minus-three,NULL,NULL,ten,eleven,twelve,NULL,four,five,NULL,NULL,p18,NULL,p20,NULL,p22} +(1 row) + +update arrtest1 set i[15:16] = array[null,26], t[15:16] = array[null,'p26']; +select * from arrtest1; + i | t +-----------------------------------------------------------------------------------------+--------------------------------------------------------------------------------------------------------------------- + [-3:16]={-3,NULL,NULL,10,11,12,NULL,4,5,NULL,NULL,18,NULL,20,NULL,22,NULL,NULL,NULL,26} | [-3:16]={minus-three,NULL,NULL,ten,eleven,twelve,NULL,four,five,NULL,NULL,p18,NULL,p20,NULL,p22,NULL,NULL,NULL,p26} +(1 row) + +update arrtest1 set i[-5:-3] = array[-15,-14,-13], t[-5:-3] = array['m15','m14','m13']; +select * from arrtest1; + i | t +--------------------------------------------------------------------------------------------------+--------------------------------------------------------------------------------------------------------------------- + [-5:16]={-15,-14,-13,NULL,NULL,10,11,12,NULL,4,5,NULL,NULL,18,NULL,20,NULL,22,NULL,NULL,NULL,26} | [-5:16]={m15,m14,m13,NULL,NULL,ten,eleven,twelve,NULL,four,five,NULL,NULL,p18,NULL,p20,NULL,p22,NULL,NULL,NULL,p26} +(1 row) + +update arrtest1 set i[-7:-6] = array[-17,null], t[-7:-6] = array['m17',null]; +select * from arrtest1; + i | t +-----------------------------------------------------------------------------------------------------------+------------------------------------------------------------------------------------------------------------------------------ + [-7:16]={-17,NULL,-15,-14,-13,NULL,NULL,10,11,12,NULL,4,5,NULL,NULL,18,NULL,20,NULL,22,NULL,NULL,NULL,26} | [-7:16]={m17,NULL,m15,m14,m13,NULL,NULL,ten,eleven,twelve,NULL,four,five,NULL,NULL,p18,NULL,p20,NULL,p22,NULL,NULL,NULL,p26} +(1 row) + +update arrtest1 set i[-12:-10] = array[-22,null,-20], t[-12:-10] = array['m22',null,'m20']; +select * from arrtest1; + i | t +-----------------------------------------------------------------------------------------------------------------------------------+------------------------------------------------------------------------------------------------------------------------------------------------------ + [-12:16]={-22,NULL,-20,NULL,NULL,-17,NULL,-15,-14,-13,NULL,NULL,10,11,12,NULL,4,5,NULL,NULL,18,NULL,20,NULL,22,NULL,NULL,NULL,26} | [-12:16]={m22,NULL,m20,NULL,NULL,m17,NULL,m15,m14,m13,NULL,NULL,ten,eleven,twelve,NULL,four,five,NULL,NULL,p18,NULL,p20,NULL,p22,NULL,NULL,NULL,p26} +(1 row) + +delete from arrtest1; +insert into arrtest1 values(array[1,2,null,4], array['one','two',null,'four']); +select * from arrtest1; + i | t +--------------+--------------------- + {1,2,NULL,4} | {one,two,NULL,four} +(1 row) + +update arrtest1 set i[0:5] = array[0,1,2,null,4,5], t[0:5] = array['z','p1','p2',null,'p4','p5']; +select * from arrtest1; + i | t +------------------------+---------------------------- + [0:5]={0,1,2,NULL,4,5} | [0:5]={z,p1,p2,NULL,p4,p5} +(1 row) + +-- +-- array expressions and operators +-- +-- table creation and INSERTs +CREATE TEMP TABLE arrtest2 (i integer ARRAY[4], f float8[], n numeric[], t text[], d timestamp[]); +INSERT INTO arrtest2 VALUES( + ARRAY[[[113,142],[1,147]]], + ARRAY[1.1,1.2,1.3]::float8[], + ARRAY[1.1,1.2,1.3], + ARRAY[[['aaa','aab'],['aba','abb'],['aca','acb']],[['baa','bab'],['bba','bbb'],['bca','bcb']]], + ARRAY['19620326','19931223','19970117']::timestamp[] +); +-- some more test data +CREATE TEMP TABLE arrtest_f (f0 int, f1 text, f2 float8); +insert into arrtest_f values(1,'cat1',1.21); +insert into arrtest_f values(2,'cat1',1.24); +insert into arrtest_f values(3,'cat1',1.18); +insert into arrtest_f values(4,'cat1',1.26); +insert into arrtest_f values(5,'cat1',1.15); +insert into arrtest_f values(6,'cat2',1.15); +insert into arrtest_f values(7,'cat2',1.26); +insert into arrtest_f values(8,'cat2',1.32); +insert into arrtest_f values(9,'cat2',1.30); +CREATE TEMP TABLE arrtest_i (f0 int, f1 text, f2 int); +insert into arrtest_i values(1,'cat1',21); +insert into arrtest_i values(2,'cat1',24); +insert into arrtest_i values(3,'cat1',18); +insert into arrtest_i values(4,'cat1',26); +insert into arrtest_i values(5,'cat1',15); +insert into arrtest_i values(6,'cat2',15); +insert into arrtest_i values(7,'cat2',26); +insert into arrtest_i values(8,'cat2',32); +insert into arrtest_i values(9,'cat2',30); +-- expressions +SELECT t.f[1][3][1] AS "131", t.f[2][2][1] AS "221" FROM ( + SELECT ARRAY[[[111,112],[121,122],[131,132]],[[211,212],[221,122],[231,232]]] AS f +) AS t; + 131 | 221 +-----+----- + 131 | 221 +(1 row) + +SELECT ARRAY[[[[[['hello'],['world']]]]]]; + array +--------------------------- + {{{{{{hello},{world}}}}}} +(1 row) + +SELECT ARRAY[ARRAY['hello'],ARRAY['world']]; + array +------------------- + {{hello},{world}} +(1 row) + +SELECT ARRAY(select f2 from arrtest_f order by f2) AS "ARRAY"; + ARRAY +----------------------------------------------- + {1.15,1.15,1.18,1.21,1.24,1.26,1.26,1.3,1.32} +(1 row) + +-- with nulls +SELECT '{1,null,3}'::int[]; + int4 +------------ + {1,NULL,3} +(1 row) + +SELECT ARRAY[1,NULL,3]; + array +------------ + {1,NULL,3} +(1 row) + +-- functions +SELECT array_append(array[42], 6) AS "{42,6}"; + {42,6} +-------- + {42,6} +(1 row) + +SELECT array_prepend(6, array[42]) AS "{6,42}"; + {6,42} +-------- + {6,42} +(1 row) + +SELECT array_cat(ARRAY[1,2], ARRAY[3,4]) AS "{1,2,3,4}"; + {1,2,3,4} +----------- + {1,2,3,4} +(1 row) + +SELECT array_cat(ARRAY[1,2], ARRAY[[3,4],[5,6]]) AS "{{1,2},{3,4},{5,6}}"; + {{1,2},{3,4},{5,6}} +--------------------- + {{1,2},{3,4},{5,6}} +(1 row) + +SELECT array_cat(ARRAY[[3,4],[5,6]], ARRAY[1,2]) AS "{{3,4},{5,6},{1,2}}"; + {{3,4},{5,6},{1,2}} +--------------------- + {{3,4},{5,6},{1,2}} +(1 row) + +SELECT array_position(ARRAY[1,2,3,4,5], 4); + array_position +---------------- + 4 +(1 row) + +SELECT array_position(ARRAY[5,3,4,2,1], 4); + array_position +---------------- + 3 +(1 row) + +SELECT array_position(ARRAY[[1,2],[3,4]], 3); +ERROR: searching for elements in multidimensional arrays is not supported +SELECT array_position(ARRAY['sun','mon','tue','wed','thu','fri','sat'], 'mon'); + array_position +---------------- + 2 +(1 row) + +SELECT array_position(ARRAY['sun','mon','tue','wed','thu','fri','sat'], 'sat'); + array_position +---------------- + 7 +(1 row) + +SELECT array_position(ARRAY['sun','mon','tue','wed','thu','fri','sat'], NULL); + array_position +---------------- + +(1 row) + +SELECT array_position(ARRAY['sun','mon','tue','wed','thu',NULL,'fri','sat'], NULL); + array_position +---------------- + 6 +(1 row) + +SELECT array_position(ARRAY['sun','mon','tue','wed','thu',NULL,'fri','sat'], 'sat'); + array_position +---------------- + 8 +(1 row) + +SELECT array_positions(NULL, 10); + array_positions +----------------- + +(1 row) + +SELECT array_positions(NULL, NULL::int); + array_positions +----------------- + +(1 row) + +SELECT array_positions(ARRAY[1,2,3,4,5,6,1,2,3,4,5,6], 4); + array_positions +----------------- + {4,10} +(1 row) + +SELECT array_positions(ARRAY[[1,2],[3,4]], 4); +ERROR: searching for elements in multidimensional arrays is not supported +SELECT array_positions(ARRAY[1,2,3,4,5,6,1,2,3,4,5,6], NULL); + array_positions +----------------- + {} +(1 row) + +SELECT array_positions(ARRAY[1,2,3,NULL,5,6,1,2,3,NULL,5,6], NULL); + array_positions +----------------- + {4,10} +(1 row) + +SELECT array_length(array_positions(ARRAY(SELECT 'AAAAAAAAAAAAAAAAAAAAAAAAA'::text || i % 10 + FROM generate_series(1,100) g(i)), + 'AAAAAAAAAAAAAAAAAAAAAAAAA5'), 1); + array_length +-------------- + 10 +(1 row) + +DO $$ +DECLARE + o int; + a int[] := ARRAY[1,2,3,2,3,1,2]; +BEGIN + o := array_position(a, 2); + WHILE o IS NOT NULL + LOOP + RAISE NOTICE '%', o; + o := array_position(a, 2, o + 1); + END LOOP; +END +$$ LANGUAGE plpgsql; +NOTICE: 2 +NOTICE: 4 +NOTICE: 7 +SELECT array_position('[2:4]={1,2,3}'::int[], 1); + array_position +---------------- + 2 +(1 row) + +SELECT array_positions('[2:4]={1,2,3}'::int[], 1); + array_positions +----------------- + {2} +(1 row) + +SELECT + array_position(ids, (1, 1)), + array_positions(ids, (1, 1)) + FROM +(VALUES + (ARRAY[(0, 0), (1, 1)]), + (ARRAY[(1, 1)]) +) AS f (ids); + array_position | array_positions +----------------+----------------- + 2 | {2} + 1 | {1} +(2 rows) + +-- operators +SELECT a FROM arrtest WHERE b = ARRAY[[[113,142],[1,147]]]; + a +--------------- + {16,25,3,4,5} +(1 row) + +SELECT NOT ARRAY[1.1,1.2,1.3] = ARRAY[1.1,1.2,1.3] AS "FALSE"; + FALSE +------- + f +(1 row) + +SELECT ARRAY[1,2] || 3 AS "{1,2,3}"; + {1,2,3} +--------- + {1,2,3} +(1 row) + +SELECT 0 || ARRAY[1,2] AS "{0,1,2}"; + {0,1,2} +--------- + {0,1,2} +(1 row) + +SELECT ARRAY[1,2] || ARRAY[3,4] AS "{1,2,3,4}"; + {1,2,3,4} +----------- + {1,2,3,4} +(1 row) + +SELECT ARRAY[[['hello','world']]] || ARRAY[[['happy','birthday']]] AS "ARRAY"; + ARRAY +-------------------------------------- + {{{hello,world}},{{happy,birthday}}} +(1 row) + +SELECT ARRAY[[1,2],[3,4]] || ARRAY[5,6] AS "{{1,2},{3,4},{5,6}}"; + {{1,2},{3,4},{5,6}} +--------------------- + {{1,2},{3,4},{5,6}} +(1 row) + +SELECT ARRAY[0,0] || ARRAY[1,1] || ARRAY[2,2] AS "{0,0,1,1,2,2}"; + {0,0,1,1,2,2} +--------------- + {0,0,1,1,2,2} +(1 row) + +SELECT 0 || ARRAY[1,2] || 3 AS "{0,1,2,3}"; + {0,1,2,3} +----------- + {0,1,2,3} +(1 row) + +SELECT ARRAY[1.1] || ARRAY[2,3,4]; + ?column? +------------- + {1.1,2,3,4} +(1 row) + +SELECT array_agg(x) || array_agg(x) FROM (VALUES (ROW(1,2)), (ROW(3,4))) v(x); + ?column? +----------------------------------- + {"(1,2)","(3,4)","(1,2)","(3,4)"} +(1 row) + +SELECT ROW(1,2) || array_agg(x) FROM (VALUES (ROW(3,4)), (ROW(5,6))) v(x); + ?column? +--------------------------- + {"(1,2)","(3,4)","(5,6)"} +(1 row) + +SELECT * FROM array_op_test WHERE i @> '{32}' ORDER BY seqno; + seqno | i | t +-------+---------------------------------+------------------------------------------------------------------------------------------------------------------------------------ + 6 | {39,35,5,94,17,92,60,32} | {AAAAAAAAAAAAAAA35875,AAAAAAAAAAAAAAAA23657} + 74 | {32} | {AAAAAAAAAAAAAAAA1729,AAAAAAAAAAAAA22860,AAAAAA99807,AAAAA17383,AAAAAAAAAAAAAAA67062,AAAAAAAAAAA15165,AAAAAAAAAAA50956} + 77 | {97,15,32,17,55,59,18,37,50,39} | {AAAAAAAAAAAA67946,AAAAAA54032,AAAAAAAA81587,55847,AAAAAAAAAAAAAA28620,AAAAAAAAAAAAAAAAA43052,AAAAAA75463,AAAA49534,AAAAAAAA44066} + 89 | {40,32,17,6,30,88} | {AA44673,AAAAAAAAAAA6119,AAAAAAAAAAAAAAAA23657,AAAAAAAAAAAAAAAAAA47955,AAAAAAAAAAAAAAAA33598,AAAAAAAAAAA33576,AA44673} + 98 | {38,34,32,89} | {AAAAAAAAAAAAAAAAAA71621,AAAA8857,AAAAAAAAAAAAAAAAAAA65037,AAAAAAAAAAAAAAAA31334,AAAAAAAAAA48845} + 100 | {85,32,57,39,49,84,32,3,30} | {AAAAAAA80240,AAAAAAAAAAAAAAAA1729,AAAAA60038,AAAAAAAAAAA92631,AAAAAAAA9523} +(6 rows) + +SELECT * FROM array_op_test WHERE i && '{32}' ORDER BY seqno; + seqno | i | t +-------+---------------------------------+------------------------------------------------------------------------------------------------------------------------------------ + 6 | {39,35,5,94,17,92,60,32} | {AAAAAAAAAAAAAAA35875,AAAAAAAAAAAAAAAA23657} + 74 | {32} | {AAAAAAAAAAAAAAAA1729,AAAAAAAAAAAAA22860,AAAAAA99807,AAAAA17383,AAAAAAAAAAAAAAA67062,AAAAAAAAAAA15165,AAAAAAAAAAA50956} + 77 | {97,15,32,17,55,59,18,37,50,39} | {AAAAAAAAAAAA67946,AAAAAA54032,AAAAAAAA81587,55847,AAAAAAAAAAAAAA28620,AAAAAAAAAAAAAAAAA43052,AAAAAA75463,AAAA49534,AAAAAAAA44066} + 89 | {40,32,17,6,30,88} | {AA44673,AAAAAAAAAAA6119,AAAAAAAAAAAAAAAA23657,AAAAAAAAAAAAAAAAAA47955,AAAAAAAAAAAAAAAA33598,AAAAAAAAAAA33576,AA44673} + 98 | {38,34,32,89} | {AAAAAAAAAAAAAAAAAA71621,AAAA8857,AAAAAAAAAAAAAAAAAAA65037,AAAAAAAAAAAAAAAA31334,AAAAAAAAAA48845} + 100 | {85,32,57,39,49,84,32,3,30} | {AAAAAAA80240,AAAAAAAAAAAAAAAA1729,AAAAA60038,AAAAAAAAAAA92631,AAAAAAAA9523} +(6 rows) + +SELECT * FROM array_op_test WHERE i @> '{17}' ORDER BY seqno; + seqno | i | t +-------+---------------------------------+------------------------------------------------------------------------------------------------------------------------------------ + 6 | {39,35,5,94,17,92,60,32} | {AAAAAAAAAAAAAAA35875,AAAAAAAAAAAAAAAA23657} + 12 | {17,99,18,52,91,72,0,43,96,23} | {AAAAA33250,AAAAAAAAAAAAAAAAAAA85420,AAAAAAAAAAA33576} + 15 | {17,14,16,63,67} | {AA6416,AAAAAAAAAA646,AAAAA95309} + 19 | {52,82,17,74,23,46,69,51,75} | {AAAAAAAAAAAAA73084,AAAAA75968,AAAAAAAAAAAAAAAA14047,AAAAAAA80240,AAAAAAAAAAAAAAAAAAA1205,A68938} + 53 | {38,17} | {AAAAAAAAAAA21658} + 65 | {61,5,76,59,17} | {AAAAAA99807,AAAAA64741,AAAAAAAAAAA53908,AA21643,AAAAAAAAA10012} + 77 | {97,15,32,17,55,59,18,37,50,39} | {AAAAAAAAAAAA67946,AAAAAA54032,AAAAAAAA81587,55847,AAAAAAAAAAAAAA28620,AAAAAAAAAAAAAAAAA43052,AAAAAA75463,AAAA49534,AAAAAAAA44066} + 89 | {40,32,17,6,30,88} | {AA44673,AAAAAAAAAAA6119,AAAAAAAAAAAAAAAA23657,AAAAAAAAAAAAAAAAAA47955,AAAAAAAAAAAAAAAA33598,AAAAAAAAAAA33576,AA44673} +(8 rows) + +SELECT * FROM array_op_test WHERE i && '{17}' ORDER BY seqno; + seqno | i | t +-------+---------------------------------+------------------------------------------------------------------------------------------------------------------------------------ + 6 | {39,35,5,94,17,92,60,32} | {AAAAAAAAAAAAAAA35875,AAAAAAAAAAAAAAAA23657} + 12 | {17,99,18,52,91,72,0,43,96,23} | {AAAAA33250,AAAAAAAAAAAAAAAAAAA85420,AAAAAAAAAAA33576} + 15 | {17,14,16,63,67} | {AA6416,AAAAAAAAAA646,AAAAA95309} + 19 | {52,82,17,74,23,46,69,51,75} | {AAAAAAAAAAAAA73084,AAAAA75968,AAAAAAAAAAAAAAAA14047,AAAAAAA80240,AAAAAAAAAAAAAAAAAAA1205,A68938} + 53 | {38,17} | {AAAAAAAAAAA21658} + 65 | {61,5,76,59,17} | {AAAAAA99807,AAAAA64741,AAAAAAAAAAA53908,AA21643,AAAAAAAAA10012} + 77 | {97,15,32,17,55,59,18,37,50,39} | {AAAAAAAAAAAA67946,AAAAAA54032,AAAAAAAA81587,55847,AAAAAAAAAAAAAA28620,AAAAAAAAAAAAAAAAA43052,AAAAAA75463,AAAA49534,AAAAAAAA44066} + 89 | {40,32,17,6,30,88} | {AA44673,AAAAAAAAAAA6119,AAAAAAAAAAAAAAAA23657,AAAAAAAAAAAAAAAAAA47955,AAAAAAAAAAAAAAAA33598,AAAAAAAAAAA33576,AA44673} +(8 rows) + +SELECT * FROM array_op_test WHERE i @> '{32,17}' ORDER BY seqno; + seqno | i | t +-------+---------------------------------+------------------------------------------------------------------------------------------------------------------------------------ + 6 | {39,35,5,94,17,92,60,32} | {AAAAAAAAAAAAAAA35875,AAAAAAAAAAAAAAAA23657} + 77 | {97,15,32,17,55,59,18,37,50,39} | {AAAAAAAAAAAA67946,AAAAAA54032,AAAAAAAA81587,55847,AAAAAAAAAAAAAA28620,AAAAAAAAAAAAAAAAA43052,AAAAAA75463,AAAA49534,AAAAAAAA44066} + 89 | {40,32,17,6,30,88} | {AA44673,AAAAAAAAAAA6119,AAAAAAAAAAAAAAAA23657,AAAAAAAAAAAAAAAAAA47955,AAAAAAAAAAAAAAAA33598,AAAAAAAAAAA33576,AA44673} +(3 rows) + +SELECT * FROM array_op_test WHERE i && '{32,17}' ORDER BY seqno; + seqno | i | t +-------+---------------------------------+------------------------------------------------------------------------------------------------------------------------------------ + 6 | {39,35,5,94,17,92,60,32} | {AAAAAAAAAAAAAAA35875,AAAAAAAAAAAAAAAA23657} + 12 | {17,99,18,52,91,72,0,43,96,23} | {AAAAA33250,AAAAAAAAAAAAAAAAAAA85420,AAAAAAAAAAA33576} + 15 | {17,14,16,63,67} | {AA6416,AAAAAAAAAA646,AAAAA95309} + 19 | {52,82,17,74,23,46,69,51,75} | {AAAAAAAAAAAAA73084,AAAAA75968,AAAAAAAAAAAAAAAA14047,AAAAAAA80240,AAAAAAAAAAAAAAAAAAA1205,A68938} + 53 | {38,17} | {AAAAAAAAAAA21658} + 65 | {61,5,76,59,17} | {AAAAAA99807,AAAAA64741,AAAAAAAAAAA53908,AA21643,AAAAAAAAA10012} + 74 | {32} | {AAAAAAAAAAAAAAAA1729,AAAAAAAAAAAAA22860,AAAAAA99807,AAAAA17383,AAAAAAAAAAAAAAA67062,AAAAAAAAAAA15165,AAAAAAAAAAA50956} + 77 | {97,15,32,17,55,59,18,37,50,39} | {AAAAAAAAAAAA67946,AAAAAA54032,AAAAAAAA81587,55847,AAAAAAAAAAAAAA28620,AAAAAAAAAAAAAAAAA43052,AAAAAA75463,AAAA49534,AAAAAAAA44066} + 89 | {40,32,17,6,30,88} | {AA44673,AAAAAAAAAAA6119,AAAAAAAAAAAAAAAA23657,AAAAAAAAAAAAAAAAAA47955,AAAAAAAAAAAAAAAA33598,AAAAAAAAAAA33576,AA44673} + 98 | {38,34,32,89} | {AAAAAAAAAAAAAAAAAA71621,AAAA8857,AAAAAAAAAAAAAAAAAAA65037,AAAAAAAAAAAAAAAA31334,AAAAAAAAAA48845} + 100 | {85,32,57,39,49,84,32,3,30} | {AAAAAAA80240,AAAAAAAAAAAAAAAA1729,AAAAA60038,AAAAAAAAAAA92631,AAAAAAAA9523} +(11 rows) + +SELECT * FROM array_op_test WHERE i <@ '{38,34,32,89}' ORDER BY seqno; + seqno | i | t +-------+---------------+---------------------------------------------------------------------------------------------------------------------------- + 40 | {34} | {AAAAAAAAAAAAAA10611,AAAAAAAAAAAAAAAAAAA1205,AAAAAAAAAAA50956,AAAAAAAAAAAAAAAA31334,AAAAA70466,AAAAAAAA81587,AAAAAAA74623} + 74 | {32} | {AAAAAAAAAAAAAAAA1729,AAAAAAAAAAAAA22860,AAAAAA99807,AAAAA17383,AAAAAAAAAAAAAAA67062,AAAAAAAAAAA15165,AAAAAAAAAAA50956} + 98 | {38,34,32,89} | {AAAAAAAAAAAAAAAAAA71621,AAAA8857,AAAAAAAAAAAAAAAAAAA65037,AAAAAAAAAAAAAAAA31334,AAAAAAAAAA48845} + 101 | {} | {} +(4 rows) + +SELECT * FROM array_op_test WHERE i = '{}' ORDER BY seqno; + seqno | i | t +-------+----+---- + 101 | {} | {} +(1 row) + +SELECT * FROM array_op_test WHERE i @> '{}' ORDER BY seqno; + seqno | i | t +-------+---------------------------------+---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + 1 | {92,75,71,52,64,83} | {AAAAAAAA44066,AAAAAA1059,AAAAAAAAAAA176,AAAAAAA48038} + 2 | {3,6} | {AAAAAA98232,AAAAAAAA79710,AAAAAAAAAAAAAAAAA69675,AAAAAAAAAAAAAAAA55798,AAAAAAAAA12793} + 3 | {37,64,95,43,3,41,13,30,11,43} | {AAAAAAAAAA48845,AAAAA75968,AAAAA95309,AAA54451,AAAAAAAAAA22292,AAAAAAA99836,A96617,AA17009,AAAAAAAAAAAAAA95246} + 4 | {71,39,99,55,33,75,45} | {AAAAAAAAA53663,AAAAAAAAAAAAAAA67062,AAAAAAAAAA64777,AAA99043,AAAAAAAAAAAAAAAAAAA91804,39557} + 5 | {50,42,77,50,4} | {AAAAAAAAAAAAAAAAA26540,AAAAAAA79710,AAAAAAAAAAAAAAAAAAA1205,AAAAAAAAAAA176,AAAAA95309,AAAAAAAAAAA46154,AAAAAA66777,AAAAAAAAA27249,AAAAAAAAAA64777,AAAAAAAAAAAAAAAAAAA70104} + 6 | {39,35,5,94,17,92,60,32} | {AAAAAAAAAAAAAAA35875,AAAAAAAAAAAAAAAA23657} + 7 | {12,51,88,64,8} | {AAAAAAAAAAAAAAAAAA12591,AAAAAAAAAAAAAAAAA50407,AAAAAAAAAAAA67946} + 8 | {60,84} | {AAAAAAA81898,AAAAAA1059,AAAAAAAAAAAA81511,AAAAA961,AAAAAAAAAAAAAAAA31334,AAAAA64741,AA6416,AAAAAAAAAAAAAAAAAA32918,AAAAAAAAAAAAAAAAA50407} + 9 | {56,52,35,27,80,44,81,22} | {AAAAAAAAAAAAAAA73034,AAAAAAAAAAAAA7929,AAAAAAA66161,AA88409,39557,A27153,AAAAAAAA9523,AAAAAAAAAAA99000} + 10 | {71,5,45} | {AAAAAAAAAAA21658,AAAAAAAAAAAA21089,AAA54451,AAAAAAAAAAAAAAAAAA54141,AAAAAAAAAAAAAA28620,AAAAAAAAAAA21658,AAAAAAAAAAA74076,AAAAAAAAA27249} + 11 | {41,86,74,48,22,74,47,50} | {AAAAAAAA9523,AAAAAAAAAAAA37562,AAAAAAAAAAAAAAAA14047,AAAAAAAAAAA46154,AAAA41702,AAAAAAAAAAAAAAAAA764,AAAAA62737,39557} + 12 | {17,99,18,52,91,72,0,43,96,23} | {AAAAA33250,AAAAAAAAAAAAAAAAAAA85420,AAAAAAAAAAA33576} + 13 | {3,52,34,23} | {AAAAAA98232,AAAA49534,AAAAAAAAAAA21658} + 14 | {78,57,19} | {AAAA8857,AAAAAAAAAAAAAAA73034,AAAAAAAA81587,AAAAAAAAAAAAAAA68526,AAAAA75968,AAAAAAAAAAAAAA65909,AAAAAAAAA10012,AAAAAAAAAAAAAA65909} + 15 | {17,14,16,63,67} | {AA6416,AAAAAAAAAA646,AAAAA95309} + 16 | {14,63,85,11} | {AAAAAA66777} + 17 | {7,10,81,85} | {AAAAAA43678,AAAAAAA12144,AAAAAAAAAAA50956,AAAAAAAAAAAAAAAAAAA15356} + 18 | {1} | {AAAAAAAAAAA33576,AAAAA95309,64261,AAA59323,AAAAAAAAAAAAAA95246,55847,AAAAAAAAAAAA67946,AAAAAAAAAAAAAAAAAA64374} + 19 | {52,82,17,74,23,46,69,51,75} | {AAAAAAAAAAAAA73084,AAAAA75968,AAAAAAAAAAAAAAAA14047,AAAAAAA80240,AAAAAAAAAAAAAAAAAAA1205,A68938} + 20 | {72,89,70,51,54,37,8,49,79} | {AAAAAA58494} + 21 | {2,8,65,10,5,79,43} | {AAAAAAAAAAAAAAAAA88852,AAAAAAAAAAAAAAAAAAA91804,AAAAA64669,AAAAAAAAAAAAAAAA1443,AAAAAAAAAAAAAAAA23657,AAAAA12179,AAAAAAAAAAAAAAAAA88852,AAAAAAAAAAAAAAAA31334,AAAAAAAAAAAAAAAA41303,AAAAAAAAAAAAAAAAAAA85420} + 22 | {11,6,56,62,53,30} | {AAAAAAAA72908} + 23 | {40,90,5,38,72,40,30,10,43,55} | {A6053,AAAAAAAAAAA6119,AA44673,AAAAAAAAAAAAAAAAA764,AA17009,AAAAA17383,AAAAA70514,AAAAA33250,AAAAA95309,AAAAAAAAAAAA37562} + 24 | {94,61,99,35,48} | {AAAAAAAAAAA50956,AAAAAAAAAAA15165,AAAA85070,AAAAAAAAAAAAAAA36627,AAAAA961,AAAAAAAAAA55219} + 25 | {31,1,10,11,27,79,38} | {AAAAAAAAAAAAAAAAAA59334,45449} + 26 | {71,10,9,69,75} | {47735,AAAAAAA21462,AAAAAAAAAAAAAAAAA6897,AAAAAAAAAAAAAAAAAAA91804,AAAAAAAAA72121,AAAAAAAAAAAAAAAAAAA1205,AAAAA41597,AAAA8857,AAAAAAAAAAAAAAAAAAA15356,AA17009} + 27 | {94} | {AA6416,A6053,AAAAAAA21462,AAAAAAA57334,AAAAAAAAAAAAAAAAAA12591,AA88409,AAAAAAAAAAAAA70254} + 28 | {14,33,6,34,14} | {AAAAAAAAAAAAAAA13198,AAAAAAAA69452,AAAAAAAAAAA82945,AAAAAAA12144,AAAAAAAAA72121,AAAAAAAAAA18601} + 29 | {39,21} | {AAAAAAAAAAAAAAAAA6897,AAAAAAAAAAAAAAAAAAA38885,AAAA85070,AAAAAAAAAAAAAAAAAAA70104,AAAAA66674,AAAAAAAAAAAAA62007,AAAAAAAA69452,AAAAAAA1242,AAAAAAAAAAAAAAAA1729,AAAA35194} + 30 | {26,81,47,91,34} | {AAAAAAAAAAAAAAAAAAA70104,AAAAAAA80240} + 31 | {80,24,18,21,54} | {AAAAAAAAAAAAAAA13198,AAAAAAAAAAAAAAAAAAA70415,A27153,AAAAAAAAA53663,AAAAAAAAAAAAAAAAA50407,A68938} + 32 | {58,79,82,80,67,75,98,10,41} | {AAAAAAAAAAAAAAAAAA61286,AAA54451,AAAAAAAAAAAAAAAAAAA87527,A96617,51533} + 33 | {74,73} | {A85417,AAAAAAA56483,AAAAA17383,AAAAAAAAAAAAA62159,AAAAAAAAAAAA52814,AAAAAAAAAAAAA85723,AAAAAAAAAAAAAAAAAA55796} + 34 | {70,45} | {AAAAAAAAAAAAAAAAAA71621,AAAAAAAAAAAAAA28620,AAAAAAAAAA55219,AAAAAAAA23648,AAAAAAAAAA22292,AAAAAAA1242} + 35 | {23,40} | {AAAAAAAAAAAA52814,AAAA48949,AAAAAAAAA34727,AAAA8857,AAAAAAAAAAAAAAAAAAA62179,AAAAAAAAAAAAAAA68526,AAAAAAA99836,AAAAAAAA50094,AAAA91194,AAAAAAAAAAAAA73084} + 36 | {79,82,14,52,30,5,79} | {AAAAAAAAA53663,AAAAAAAAAAAAAAAA55798,AAAAAAAAAAAAAAAAAAA89194,AA88409,AAAAAAAAAAAAAAA81326,AAAAAAAAAAAAAAAAA63050,AAAAAAAAAAAAAAAA33598} + 37 | {53,11,81,39,3,78,58,64,74} | {AAAAAAAAAAAAAAAAAAA17075,AAAAAAA66161,AAAAAAAA23648,AAAAAAAAAAAAAA10611} + 38 | {59,5,4,95,28} | {AAAAAAAAAAA82945,A96617,47735,AAAAA12179,AAAAA64669,AAAAAA99807,AA74433,AAAAAAAAAAAAAAAAA59387} + 39 | {82,43,99,16,74} | {AAAAAAAAAAAAAAA67062,AAAAAAA57334,AAAAAAAAAAAAAA65909,A27153,AAAAAAAAAAAAAAAAAAA17075,AAAAAAAAAAAAAAAAA43052,AAAAAAAAAA64777,AAAAAAAAAAAA81511,AAAAAAAAAAAAAA65909,AAAAAAAAAAAAAA28620} + 40 | {34} | {AAAAAAAAAAAAAA10611,AAAAAAAAAAAAAAAAAAA1205,AAAAAAAAAAA50956,AAAAAAAAAAAAAAAA31334,AAAAA70466,AAAAAAAA81587,AAAAAAA74623} + 41 | {19,26,63,12,93,73,27,94} | {AAAAAAA79710,AAAAAAAAAA55219,AAAA41702,AAAAAAAAAAAAAAAAAAA17075,AAAAAAAAAAAAAAAAAA71621,AAAAAAAAAAAAAAAAA63050,AAAAAAA99836,AAAAAAAAAAAAAA8666} + 42 | {15,76,82,75,8,91} | {AAAAAAAAAAA176,AAAAAA38063,45449,AAAAAA54032,AAAAAAA81898,AA6416,AAAAAAAAAAAAAAAAAAA62179,45449,AAAAA60038,AAAAAAAA81587} + 43 | {39,87,91,97,79,28} | {AAAAAAAAAAA74076,A96617,AAAAAAAAAAAAAAAAAAA89194,AAAAAAAAAAAAAAAAAA55796,AAAAAAAAAAAAAAAA23657,AAAAAAAAAAAA67946} + 44 | {40,58,68,29,54} | {AAAAAAA81898,AAAAAA66777,AAAAAA98232} + 45 | {99,45} | {AAAAAAAA72908,AAAAAAAAAAAAAAAAAAA17075,AA88409,AAAAAAAAAAAAAAAAAA36842,AAAAAAA48038,AAAAAAAAAAAAAA10611} + 46 | {53,24} | {AAAAAAAAAAA53908,AAAAAA54032,AAAAA17383,AAAA48949,AAAAAAAAAA18601,AAAAA64669,45449,AAAAAAAAAAA98051,AAAAAAAAAAAAAAAAAA71621} + 47 | {98,23,64,12,75,61} | {AAA59323,AAAAA95309,AAAAAAAAAAAAAAAA31334,AAAAAAAAA27249,AAAAA17383,AAAAAAAAAAAA37562,AAAAAA1059,A84822,55847,AAAAA70466} + 48 | {76,14} | {AAAAAAAAAAAAA59671,AAAAAAAAAAAAAAAAAAA91804,AAAAAA66777,AAAAAAAAAAAAAAAAAAA89194,AAAAAAAAAAAAAAA36627,AAAAAAAAAAAAAAAAAAA17075,AAAAAAAAAAAAA73084,AAAAAAA79710,AAAAAAAAAAAAAAA40402,AAAAAAAAAAAAAAAAAAA65037} + 49 | {56,5,54,37,49} | {AA21643,AAAAAAAAAAA92631,AAAAAAAA81587} + 50 | {20,12,37,64,93} | {AAAAAAAAAA5483,AAAAAAAAAAAAAAAAAAA1205,AA6416,AAAAAAAAAAAAAAAAA63050,AAAAAAAAAAAAAAAAAA47955} + 51 | {47} | {AAAAAAAAAAAAAA96505,AAAAAAAAAAAAAAAAAA36842,AAAAA95309,AAAAAAAA81587,AA6416,AAAA91194,AAAAAA58494,AAAAAA1059,AAAAAAAA69452} + 52 | {89,0} | {AAAAAAAAAAAAAAAAAA47955,AAAAAAA48038,AAAAAAAAAAAAAAAAA43052,AAAAAAAAAAAAA73084,AAAAA70466,AAAAAAAAAAAAAAAAA764,AAAAAAAAAAA46154,AA66862} + 53 | {38,17} | {AAAAAAAAAAA21658} + 54 | {70,47} | {AAAAAAAAAAAAAAAAAA54141,AAAAA40681,AAAAAAA48038,AAAAAAAAAAAAAAAA29150,AAAAA41597,AAAAAAAAAAAAAAAAAA59334,AA15322} + 55 | {47,79,47,64,72,25,71,24,93} | {AAAAAAAAAAAAAAAAAA55796,AAAAA62737} + 56 | {33,7,60,54,93,90,77,85,39} | {AAAAAAAAAAAAAAAAAA32918,AA42406} + 57 | {23,45,10,42,36,21,9,96} | {AAAAAAAAAAAAAAAAAAA70415} + 58 | {92} | {AAAAAAAAAAAAAAAA98414,AAAAAAAA23648,AAAAAAAAAAAAAAAAAA55796,AA25381,AAAAAAAAAAA6119} + 59 | {9,69,46,77} | {39557,AAAAAAA89932,AAAAAAAAAAAAAAAAA43052,AAAAAAAAAAAAAAAAA26540,AAA20874,AA6416,AAAAAAAAAAAAAAAAAA47955} + 60 | {62,2,59,38,89} | {AAAAAAA89932,AAAAAAAAAAAAAAAAAAA15356,AA99927,AA17009,AAAAAAAAAAAAAAA35875} + 61 | {72,2,44,95,54,54,13} | {AAAAAAAAAAAAAAAAAAA91804} + 62 | {83,72,29,73} | {AAAAAAAAAAAAA15097,AAAA8857,AAAAAAAAAAAA35809,AAAAAAAAAAAA52814,AAAAAAAAAAAAAAAAAAA38885,AAAAAAAAAAAAAAAAAA24183,AAAAAA43678,A96617} + 63 | {11,4,61,87} | {AAAAAAAAA27249,AAAAAAAAAAAAAAAAAA32918,AAAAAAAAAAAAAAA13198,AAA20874,39557,51533,AAAAAAAAAAA53908,AAAAAAAAAAAAAA96505,AAAAAAAA78938} + 64 | {26,19,34,24,81,78} | {A96617,AAAAAAAAAAAAAAAAAAA70104,A68938,AAAAAAAAAAA53908,AAAAAAAAAAAAAAA453,AA17009,AAAAAAA80240} + 65 | {61,5,76,59,17} | {AAAAAA99807,AAAAA64741,AAAAAAAAAAA53908,AA21643,AAAAAAAAA10012} + 66 | {31,23,70,52,4,33,48,25} | {AAAAAAAAAAAAAAAAA69675,AAAAAAAA50094,AAAAAAAAAAA92631,AAAA35194,39557,AAAAAAA99836} + 67 | {31,94,7,10} | {AAAAAA38063,A96617,AAAA35194,AAAAAAAAAAAA67946} + 68 | {90,43,38} | {AA75092,AAAAAAAAAAAAAAAAA69675,AAAAAAAAAAA92631,AAAAAAAAA10012,AAAAAAAAAAAAA7929,AA21643} + 69 | {67,35,99,85,72,86,44} | {AAAAAAAAAAAAAAAAAAA1205,AAAAAAAA50094,AAAAAAAAAAAAAAAA1729,AAAAAAAAAAAAAAAAAA47955} + 70 | {56,70,83} | {AAAA41702,AAAAAAAAAAA82945,AA21643,AAAAAAAAAAA99000,A27153,AA25381,AAAAAAAAAAAAAA96505,AAAAAAA1242} + 71 | {74,26} | {AAAAAAAAAAA50956,AA74433,AAAAAAA21462,AAAAAAAAAAAAAAAAAAA17075,AAAAAAAAAAAAAAA36627,AAAAAAAAAAAAA70254,AAAAAAAAAA43419,39557} + 72 | {22,1,16,78,20,91,83} | {47735,AAAAAAA56483,AAAAAAAAAAAAA93788,AA42406,AAAAAAAAAAAAA73084,AAAAAAAA72908,AAAAAAAAAAAAAAAAAA61286,AAAAA66674,AAAAAAAAAAAAAAAAA50407} + 73 | {88,25,96,78,65,15,29,19} | {AAA54451,AAAAAAAAA27249,AAAAAAA9228,AAAAAAAAAAAAAAA67062,AAAAAAAAAAAAAAAAAAA70415,AAAAA17383,AAAAAAAAAAAAAAAA33598} + 74 | {32} | {AAAAAAAAAAAAAAAA1729,AAAAAAAAAAAAA22860,AAAAAA99807,AAAAA17383,AAAAAAAAAAAAAAA67062,AAAAAAAAAAA15165,AAAAAAAAAAA50956} + 75 | {12,96,83,24,71,89,55} | {AAAA48949,AAAAAAAA29716,AAAAAAAAAAAAAAAAAAA1205,AAAAAAAAAAAA67946,AAAAAAAAAAAAAAAA29150,AAA28075,AAAAAAAAAAAAAAAAA43052} + 76 | {92,55,10,7} | {AAAAAAAAAAAAAAA67062} + 77 | {97,15,32,17,55,59,18,37,50,39} | {AAAAAAAAAAAA67946,AAAAAA54032,AAAAAAAA81587,55847,AAAAAAAAAAAAAA28620,AAAAAAAAAAAAAAAAA43052,AAAAAA75463,AAAA49534,AAAAAAAA44066} + 78 | {55,89,44,84,34} | {AAAAAAAAAAA6119,AAAAAAAAAAAAAA8666,AA99927,AA42406,AAAAAAA81898,AAAAAAA9228,AAAAAAAAAAA92631,AA21643,AAAAAAAAAAAAAA28620} + 79 | {45} | {AAAAAAAAAA646,AAAAAAAAAAAAAAAAAAA70415,AAAAAA43678,AAAAAAAA72908} + 80 | {74,89,44,80,0} | {AAAA35194,AAAAAAAA79710,AAA20874,AAAAAAAAAAAAAAAAAAA70104,AAAAAAAAAAAAA73084,AAAAAAA57334,AAAAAAA9228,AAAAAAAAAAAAA62007} + 81 | {63,77,54,48,61,53,97} | {AAAAAAAAAAAAAAA81326,AAAAAAAAAA22292,AA25381,AAAAAAAAAAA74076,AAAAAAA81898,AAAAAAAAA72121} + 82 | {34,60,4,79,78,16,86,89,42,50} | {AAAAA40681,AAAAAAAAAAAAAAAAAA12591,AAAAAAA80240,AAAAAAAAAAAAAAAA55798,AAAAAAAAAAAAAAAAAAA70104} + 83 | {14,10} | {AAAAAAAAAA22292,AAAAAAAAAAAAA70254,AAAAAAAAAAA6119} + 84 | {11,83,35,13,96,94} | {AAAAA95309,AAAAAAAAAAAAAAAAAA32918,AAAAAAAAAAAAAAAAAA24183} + 85 | {39,60} | {AAAAAAAAAAAAAAAA55798,AAAAAAAAAA22292,AAAAAAA66161,AAAAAAA21462,AAAAAAAAAAAAAAAAAA12591,55847,AAAAAA98232,AAAAAAAAAAA46154} + 86 | {33,81,72,74,45,36,82} | {AAAAAAAA81587,AAAAAAAAAAAAAA96505,45449,AAAA80176} + 87 | {57,27,50,12,97,68} | {AAAAAAAAAAAAAAAAA26540,AAAAAAAAA10012,AAAAAAAAAAAA35809,AAAAAAAAAAAAAAAA29150,AAAAAAAAAAA82945,AAAAAA66777,31228,AAAAAAAAAAAAAAAA23657,AAAAAAAAAAAAAA28620,AAAAAAAAAAAAAA96505} + 88 | {41,90,77,24,6,24} | {AAAA35194,AAAA35194,AAAAAAA80240,AAAAAAAAAAA46154,AAAAAA58494,AAAAAAAAAAAAAAAAAAA17075,AAAAAAAAAAAAAAAAAA59334,AAAAAAAAAAAAAAAAAAA91804,AA74433} + 89 | {40,32,17,6,30,88} | {AA44673,AAAAAAAAAAA6119,AAAAAAAAAAAAAAAA23657,AAAAAAAAAAAAAAAAAA47955,AAAAAAAAAAAAAAAA33598,AAAAAAAAAAA33576,AA44673} + 90 | {88,75} | {AAAAA60038,AAAAAAAA23648,AAAAAAAAAAA99000,AAAA41702,AAAAAAAAAAAAA22860,AAAAAAAAAAAAAAA68526} + 91 | {78} | {AAAAAAAAAAAAA62007,AAA99043} + 92 | {85,63,49,45} | {AAAAAAA89932,AAAAAAAAAAAAA22860,AAAAAAAAAAAAAAAAAAA1205,AAAAAAAAAAAA21089} + 93 | {11} | {AAAAAAAAAAA176,AAAAAAAAAAAAAA8666,AAAAAAAAAAAAAAA453,AAAAAAAAAAAAA85723,A68938,AAAAAAAAAAAAA9821,AAAAAAA48038,AAAAAAAAAAAAAAAAA59387,AA99927,AAAAA17383} + 94 | {98,9,85,62,88,91,60,61,38,86} | {AAAAAAAA81587,AAAAA17383,AAAAAAAA81587} + 95 | {47,77} | {AAAAAAAAAAAAAAAAA764,AAAAAAAAAAA74076,AAAAAAAAAA18107,AAAAA40681,AAAAAAAAAAAAAAA35875,AAAAA60038,AAAAAAA56483} + 96 | {23,97,43} | {AAAAAAAAAA646,A87088} + 97 | {54,2,86,65} | {47735,AAAAAAA99836,AAAAAAAAAAAAAAAAA6897,AAAAAAAAAAAAAAAA29150,AAAAAAA80240,AAAAAAAAAAAAAAAA98414,AAAAAAA56483,AAAAAAAAAAAAAAAA29150,AAAAAAA39692,AA21643} + 98 | {38,34,32,89} | {AAAAAAAAAAAAAAAAAA71621,AAAA8857,AAAAAAAAAAAAAAAAAAA65037,AAAAAAAAAAAAAAAA31334,AAAAAAAAAA48845} + 99 | {37,86} | {AAAAAAAAAAAAAAAAAA32918,AAAAA70514,AAAAAAAAA10012,AAAAAAAAAAAAAAAAA59387,AAAAAAAAAA64777,AAAAAAAAAAAAAAAAAAA15356} + 100 | {85,32,57,39,49,84,32,3,30} | {AAAAAAA80240,AAAAAAAAAAAAAAAA1729,AAAAA60038,AAAAAAAAAAA92631,AAAAAAAA9523} + 101 | {} | {} + 102 | {NULL} | {NULL} +(102 rows) + +SELECT * FROM array_op_test WHERE i && '{}' ORDER BY seqno; + seqno | i | t +-------+---+--- +(0 rows) + +SELECT * FROM array_op_test WHERE i <@ '{}' ORDER BY seqno; + seqno | i | t +-------+----+---- + 101 | {} | {} +(1 row) + +SELECT * FROM array_op_test WHERE i = '{NULL}' ORDER BY seqno; + seqno | i | t +-------+--------+-------- + 102 | {NULL} | {NULL} +(1 row) + +SELECT * FROM array_op_test WHERE i @> '{NULL}' ORDER BY seqno; + seqno | i | t +-------+---+--- +(0 rows) + +SELECT * FROM array_op_test WHERE i && '{NULL}' ORDER BY seqno; + seqno | i | t +-------+---+--- +(0 rows) + +SELECT * FROM array_op_test WHERE i <@ '{NULL}' ORDER BY seqno; + seqno | i | t +-------+----+---- + 101 | {} | {} +(1 row) + +SELECT * FROM array_op_test WHERE t @> '{AAAAAAAA72908}' ORDER BY seqno; + seqno | i | t +-------+-----------------------+-------------------------------------------------------------------------------------------------------------------------------------------- + 22 | {11,6,56,62,53,30} | {AAAAAAAA72908} + 45 | {99,45} | {AAAAAAAA72908,AAAAAAAAAAAAAAAAAAA17075,AA88409,AAAAAAAAAAAAAAAAAA36842,AAAAAAA48038,AAAAAAAAAAAAAA10611} + 72 | {22,1,16,78,20,91,83} | {47735,AAAAAAA56483,AAAAAAAAAAAAA93788,AA42406,AAAAAAAAAAAAA73084,AAAAAAAA72908,AAAAAAAAAAAAAAAAAA61286,AAAAA66674,AAAAAAAAAAAAAAAAA50407} + 79 | {45} | {AAAAAAAAAA646,AAAAAAAAAAAAAAAAAAA70415,AAAAAA43678,AAAAAAAA72908} +(4 rows) + +SELECT * FROM array_op_test WHERE t && '{AAAAAAAA72908}' ORDER BY seqno; + seqno | i | t +-------+-----------------------+-------------------------------------------------------------------------------------------------------------------------------------------- + 22 | {11,6,56,62,53,30} | {AAAAAAAA72908} + 45 | {99,45} | {AAAAAAAA72908,AAAAAAAAAAAAAAAAAAA17075,AA88409,AAAAAAAAAAAAAAAAAA36842,AAAAAAA48038,AAAAAAAAAAAAAA10611} + 72 | {22,1,16,78,20,91,83} | {47735,AAAAAAA56483,AAAAAAAAAAAAA93788,AA42406,AAAAAAAAAAAAA73084,AAAAAAAA72908,AAAAAAAAAAAAAAAAAA61286,AAAAA66674,AAAAAAAAAAAAAAAAA50407} + 79 | {45} | {AAAAAAAAAA646,AAAAAAAAAAAAAAAAAAA70415,AAAAAA43678,AAAAAAAA72908} +(4 rows) + +SELECT * FROM array_op_test WHERE t @> '{AAAAAAAAAA646}' ORDER BY seqno; + seqno | i | t +-------+------------------+-------------------------------------------------------------------- + 15 | {17,14,16,63,67} | {AA6416,AAAAAAAAAA646,AAAAA95309} + 79 | {45} | {AAAAAAAAAA646,AAAAAAAAAAAAAAAAAAA70415,AAAAAA43678,AAAAAAAA72908} + 96 | {23,97,43} | {AAAAAAAAAA646,A87088} +(3 rows) + +SELECT * FROM array_op_test WHERE t && '{AAAAAAAAAA646}' ORDER BY seqno; + seqno | i | t +-------+------------------+-------------------------------------------------------------------- + 15 | {17,14,16,63,67} | {AA6416,AAAAAAAAAA646,AAAAA95309} + 79 | {45} | {AAAAAAAAAA646,AAAAAAAAAAAAAAAAAAA70415,AAAAAA43678,AAAAAAAA72908} + 96 | {23,97,43} | {AAAAAAAAAA646,A87088} +(3 rows) + +SELECT * FROM array_op_test WHERE t @> '{AAAAAAAA72908,AAAAAAAAAA646}' ORDER BY seqno; + seqno | i | t +-------+------+-------------------------------------------------------------------- + 79 | {45} | {AAAAAAAAAA646,AAAAAAAAAAAAAAAAAAA70415,AAAAAA43678,AAAAAAAA72908} +(1 row) + +SELECT * FROM array_op_test WHERE t && '{AAAAAAAA72908,AAAAAAAAAA646}' ORDER BY seqno; + seqno | i | t +-------+-----------------------+-------------------------------------------------------------------------------------------------------------------------------------------- + 15 | {17,14,16,63,67} | {AA6416,AAAAAAAAAA646,AAAAA95309} + 22 | {11,6,56,62,53,30} | {AAAAAAAA72908} + 45 | {99,45} | {AAAAAAAA72908,AAAAAAAAAAAAAAAAAAA17075,AA88409,AAAAAAAAAAAAAAAAAA36842,AAAAAAA48038,AAAAAAAAAAAAAA10611} + 72 | {22,1,16,78,20,91,83} | {47735,AAAAAAA56483,AAAAAAAAAAAAA93788,AA42406,AAAAAAAAAAAAA73084,AAAAAAAA72908,AAAAAAAAAAAAAAAAAA61286,AAAAA66674,AAAAAAAAAAAAAAAAA50407} + 79 | {45} | {AAAAAAAAAA646,AAAAAAAAAAAAAAAAAAA70415,AAAAAA43678,AAAAAAAA72908} + 96 | {23,97,43} | {AAAAAAAAAA646,A87088} +(6 rows) + +SELECT * FROM array_op_test WHERE t <@ '{AAAAAAAA72908,AAAAAAAAAAAAAAAAAAA17075,AA88409,AAAAAAAAAAAAAAAAAA36842,AAAAAAA48038,AAAAAAAAAAAAAA10611}' ORDER BY seqno; + seqno | i | t +-------+--------------------+----------------------------------------------------------------------------------------------------------- + 22 | {11,6,56,62,53,30} | {AAAAAAAA72908} + 45 | {99,45} | {AAAAAAAA72908,AAAAAAAAAAAAAAAAAAA17075,AA88409,AAAAAAAAAAAAAAAAAA36842,AAAAAAA48038,AAAAAAAAAAAAAA10611} + 101 | {} | {} +(3 rows) + +SELECT * FROM array_op_test WHERE t = '{}' ORDER BY seqno; + seqno | i | t +-------+----+---- + 101 | {} | {} +(1 row) + +SELECT * FROM array_op_test WHERE t @> '{}' ORDER BY seqno; + seqno | i | t +-------+---------------------------------+---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + 1 | {92,75,71,52,64,83} | {AAAAAAAA44066,AAAAAA1059,AAAAAAAAAAA176,AAAAAAA48038} + 2 | {3,6} | {AAAAAA98232,AAAAAAAA79710,AAAAAAAAAAAAAAAAA69675,AAAAAAAAAAAAAAAA55798,AAAAAAAAA12793} + 3 | {37,64,95,43,3,41,13,30,11,43} | {AAAAAAAAAA48845,AAAAA75968,AAAAA95309,AAA54451,AAAAAAAAAA22292,AAAAAAA99836,A96617,AA17009,AAAAAAAAAAAAAA95246} + 4 | {71,39,99,55,33,75,45} | {AAAAAAAAA53663,AAAAAAAAAAAAAAA67062,AAAAAAAAAA64777,AAA99043,AAAAAAAAAAAAAAAAAAA91804,39557} + 5 | {50,42,77,50,4} | {AAAAAAAAAAAAAAAAA26540,AAAAAAA79710,AAAAAAAAAAAAAAAAAAA1205,AAAAAAAAAAA176,AAAAA95309,AAAAAAAAAAA46154,AAAAAA66777,AAAAAAAAA27249,AAAAAAAAAA64777,AAAAAAAAAAAAAAAAAAA70104} + 6 | {39,35,5,94,17,92,60,32} | {AAAAAAAAAAAAAAA35875,AAAAAAAAAAAAAAAA23657} + 7 | {12,51,88,64,8} | {AAAAAAAAAAAAAAAAAA12591,AAAAAAAAAAAAAAAAA50407,AAAAAAAAAAAA67946} + 8 | {60,84} | {AAAAAAA81898,AAAAAA1059,AAAAAAAAAAAA81511,AAAAA961,AAAAAAAAAAAAAAAA31334,AAAAA64741,AA6416,AAAAAAAAAAAAAAAAAA32918,AAAAAAAAAAAAAAAAA50407} + 9 | {56,52,35,27,80,44,81,22} | {AAAAAAAAAAAAAAA73034,AAAAAAAAAAAAA7929,AAAAAAA66161,AA88409,39557,A27153,AAAAAAAA9523,AAAAAAAAAAA99000} + 10 | {71,5,45} | {AAAAAAAAAAA21658,AAAAAAAAAAAA21089,AAA54451,AAAAAAAAAAAAAAAAAA54141,AAAAAAAAAAAAAA28620,AAAAAAAAAAA21658,AAAAAAAAAAA74076,AAAAAAAAA27249} + 11 | {41,86,74,48,22,74,47,50} | {AAAAAAAA9523,AAAAAAAAAAAA37562,AAAAAAAAAAAAAAAA14047,AAAAAAAAAAA46154,AAAA41702,AAAAAAAAAAAAAAAAA764,AAAAA62737,39557} + 12 | {17,99,18,52,91,72,0,43,96,23} | {AAAAA33250,AAAAAAAAAAAAAAAAAAA85420,AAAAAAAAAAA33576} + 13 | {3,52,34,23} | {AAAAAA98232,AAAA49534,AAAAAAAAAAA21658} + 14 | {78,57,19} | {AAAA8857,AAAAAAAAAAAAAAA73034,AAAAAAAA81587,AAAAAAAAAAAAAAA68526,AAAAA75968,AAAAAAAAAAAAAA65909,AAAAAAAAA10012,AAAAAAAAAAAAAA65909} + 15 | {17,14,16,63,67} | {AA6416,AAAAAAAAAA646,AAAAA95309} + 16 | {14,63,85,11} | {AAAAAA66777} + 17 | {7,10,81,85} | {AAAAAA43678,AAAAAAA12144,AAAAAAAAAAA50956,AAAAAAAAAAAAAAAAAAA15356} + 18 | {1} | {AAAAAAAAAAA33576,AAAAA95309,64261,AAA59323,AAAAAAAAAAAAAA95246,55847,AAAAAAAAAAAA67946,AAAAAAAAAAAAAAAAAA64374} + 19 | {52,82,17,74,23,46,69,51,75} | {AAAAAAAAAAAAA73084,AAAAA75968,AAAAAAAAAAAAAAAA14047,AAAAAAA80240,AAAAAAAAAAAAAAAAAAA1205,A68938} + 20 | {72,89,70,51,54,37,8,49,79} | {AAAAAA58494} + 21 | {2,8,65,10,5,79,43} | {AAAAAAAAAAAAAAAAA88852,AAAAAAAAAAAAAAAAAAA91804,AAAAA64669,AAAAAAAAAAAAAAAA1443,AAAAAAAAAAAAAAAA23657,AAAAA12179,AAAAAAAAAAAAAAAAA88852,AAAAAAAAAAAAAAAA31334,AAAAAAAAAAAAAAAA41303,AAAAAAAAAAAAAAAAAAA85420} + 22 | {11,6,56,62,53,30} | {AAAAAAAA72908} + 23 | {40,90,5,38,72,40,30,10,43,55} | {A6053,AAAAAAAAAAA6119,AA44673,AAAAAAAAAAAAAAAAA764,AA17009,AAAAA17383,AAAAA70514,AAAAA33250,AAAAA95309,AAAAAAAAAAAA37562} + 24 | {94,61,99,35,48} | {AAAAAAAAAAA50956,AAAAAAAAAAA15165,AAAA85070,AAAAAAAAAAAAAAA36627,AAAAA961,AAAAAAAAAA55219} + 25 | {31,1,10,11,27,79,38} | {AAAAAAAAAAAAAAAAAA59334,45449} + 26 | {71,10,9,69,75} | {47735,AAAAAAA21462,AAAAAAAAAAAAAAAAA6897,AAAAAAAAAAAAAAAAAAA91804,AAAAAAAAA72121,AAAAAAAAAAAAAAAAAAA1205,AAAAA41597,AAAA8857,AAAAAAAAAAAAAAAAAAA15356,AA17009} + 27 | {94} | {AA6416,A6053,AAAAAAA21462,AAAAAAA57334,AAAAAAAAAAAAAAAAAA12591,AA88409,AAAAAAAAAAAAA70254} + 28 | {14,33,6,34,14} | {AAAAAAAAAAAAAAA13198,AAAAAAAA69452,AAAAAAAAAAA82945,AAAAAAA12144,AAAAAAAAA72121,AAAAAAAAAA18601} + 29 | {39,21} | {AAAAAAAAAAAAAAAAA6897,AAAAAAAAAAAAAAAAAAA38885,AAAA85070,AAAAAAAAAAAAAAAAAAA70104,AAAAA66674,AAAAAAAAAAAAA62007,AAAAAAAA69452,AAAAAAA1242,AAAAAAAAAAAAAAAA1729,AAAA35194} + 30 | {26,81,47,91,34} | {AAAAAAAAAAAAAAAAAAA70104,AAAAAAA80240} + 31 | {80,24,18,21,54} | {AAAAAAAAAAAAAAA13198,AAAAAAAAAAAAAAAAAAA70415,A27153,AAAAAAAAA53663,AAAAAAAAAAAAAAAAA50407,A68938} + 32 | {58,79,82,80,67,75,98,10,41} | {AAAAAAAAAAAAAAAAAA61286,AAA54451,AAAAAAAAAAAAAAAAAAA87527,A96617,51533} + 33 | {74,73} | {A85417,AAAAAAA56483,AAAAA17383,AAAAAAAAAAAAA62159,AAAAAAAAAAAA52814,AAAAAAAAAAAAA85723,AAAAAAAAAAAAAAAAAA55796} + 34 | {70,45} | {AAAAAAAAAAAAAAAAAA71621,AAAAAAAAAAAAAA28620,AAAAAAAAAA55219,AAAAAAAA23648,AAAAAAAAAA22292,AAAAAAA1242} + 35 | {23,40} | {AAAAAAAAAAAA52814,AAAA48949,AAAAAAAAA34727,AAAA8857,AAAAAAAAAAAAAAAAAAA62179,AAAAAAAAAAAAAAA68526,AAAAAAA99836,AAAAAAAA50094,AAAA91194,AAAAAAAAAAAAA73084} + 36 | {79,82,14,52,30,5,79} | {AAAAAAAAA53663,AAAAAAAAAAAAAAAA55798,AAAAAAAAAAAAAAAAAAA89194,AA88409,AAAAAAAAAAAAAAA81326,AAAAAAAAAAAAAAAAA63050,AAAAAAAAAAAAAAAA33598} + 37 | {53,11,81,39,3,78,58,64,74} | {AAAAAAAAAAAAAAAAAAA17075,AAAAAAA66161,AAAAAAAA23648,AAAAAAAAAAAAAA10611} + 38 | {59,5,4,95,28} | {AAAAAAAAAAA82945,A96617,47735,AAAAA12179,AAAAA64669,AAAAAA99807,AA74433,AAAAAAAAAAAAAAAAA59387} + 39 | {82,43,99,16,74} | {AAAAAAAAAAAAAAA67062,AAAAAAA57334,AAAAAAAAAAAAAA65909,A27153,AAAAAAAAAAAAAAAAAAA17075,AAAAAAAAAAAAAAAAA43052,AAAAAAAAAA64777,AAAAAAAAAAAA81511,AAAAAAAAAAAAAA65909,AAAAAAAAAAAAAA28620} + 40 | {34} | {AAAAAAAAAAAAAA10611,AAAAAAAAAAAAAAAAAAA1205,AAAAAAAAAAA50956,AAAAAAAAAAAAAAAA31334,AAAAA70466,AAAAAAAA81587,AAAAAAA74623} + 41 | {19,26,63,12,93,73,27,94} | {AAAAAAA79710,AAAAAAAAAA55219,AAAA41702,AAAAAAAAAAAAAAAAAAA17075,AAAAAAAAAAAAAAAAAA71621,AAAAAAAAAAAAAAAAA63050,AAAAAAA99836,AAAAAAAAAAAAAA8666} + 42 | {15,76,82,75,8,91} | {AAAAAAAAAAA176,AAAAAA38063,45449,AAAAAA54032,AAAAAAA81898,AA6416,AAAAAAAAAAAAAAAAAAA62179,45449,AAAAA60038,AAAAAAAA81587} + 43 | {39,87,91,97,79,28} | {AAAAAAAAAAA74076,A96617,AAAAAAAAAAAAAAAAAAA89194,AAAAAAAAAAAAAAAAAA55796,AAAAAAAAAAAAAAAA23657,AAAAAAAAAAAA67946} + 44 | {40,58,68,29,54} | {AAAAAAA81898,AAAAAA66777,AAAAAA98232} + 45 | {99,45} | {AAAAAAAA72908,AAAAAAAAAAAAAAAAAAA17075,AA88409,AAAAAAAAAAAAAAAAAA36842,AAAAAAA48038,AAAAAAAAAAAAAA10611} + 46 | {53,24} | {AAAAAAAAAAA53908,AAAAAA54032,AAAAA17383,AAAA48949,AAAAAAAAAA18601,AAAAA64669,45449,AAAAAAAAAAA98051,AAAAAAAAAAAAAAAAAA71621} + 47 | {98,23,64,12,75,61} | {AAA59323,AAAAA95309,AAAAAAAAAAAAAAAA31334,AAAAAAAAA27249,AAAAA17383,AAAAAAAAAAAA37562,AAAAAA1059,A84822,55847,AAAAA70466} + 48 | {76,14} | {AAAAAAAAAAAAA59671,AAAAAAAAAAAAAAAAAAA91804,AAAAAA66777,AAAAAAAAAAAAAAAAAAA89194,AAAAAAAAAAAAAAA36627,AAAAAAAAAAAAAAAAAAA17075,AAAAAAAAAAAAA73084,AAAAAAA79710,AAAAAAAAAAAAAAA40402,AAAAAAAAAAAAAAAAAAA65037} + 49 | {56,5,54,37,49} | {AA21643,AAAAAAAAAAA92631,AAAAAAAA81587} + 50 | {20,12,37,64,93} | {AAAAAAAAAA5483,AAAAAAAAAAAAAAAAAAA1205,AA6416,AAAAAAAAAAAAAAAAA63050,AAAAAAAAAAAAAAAAAA47955} + 51 | {47} | {AAAAAAAAAAAAAA96505,AAAAAAAAAAAAAAAAAA36842,AAAAA95309,AAAAAAAA81587,AA6416,AAAA91194,AAAAAA58494,AAAAAA1059,AAAAAAAA69452} + 52 | {89,0} | {AAAAAAAAAAAAAAAAAA47955,AAAAAAA48038,AAAAAAAAAAAAAAAAA43052,AAAAAAAAAAAAA73084,AAAAA70466,AAAAAAAAAAAAAAAAA764,AAAAAAAAAAA46154,AA66862} + 53 | {38,17} | {AAAAAAAAAAA21658} + 54 | {70,47} | {AAAAAAAAAAAAAAAAAA54141,AAAAA40681,AAAAAAA48038,AAAAAAAAAAAAAAAA29150,AAAAA41597,AAAAAAAAAAAAAAAAAA59334,AA15322} + 55 | {47,79,47,64,72,25,71,24,93} | {AAAAAAAAAAAAAAAAAA55796,AAAAA62737} + 56 | {33,7,60,54,93,90,77,85,39} | {AAAAAAAAAAAAAAAAAA32918,AA42406} + 57 | {23,45,10,42,36,21,9,96} | {AAAAAAAAAAAAAAAAAAA70415} + 58 | {92} | {AAAAAAAAAAAAAAAA98414,AAAAAAAA23648,AAAAAAAAAAAAAAAAAA55796,AA25381,AAAAAAAAAAA6119} + 59 | {9,69,46,77} | {39557,AAAAAAA89932,AAAAAAAAAAAAAAAAA43052,AAAAAAAAAAAAAAAAA26540,AAA20874,AA6416,AAAAAAAAAAAAAAAAAA47955} + 60 | {62,2,59,38,89} | {AAAAAAA89932,AAAAAAAAAAAAAAAAAAA15356,AA99927,AA17009,AAAAAAAAAAAAAAA35875} + 61 | {72,2,44,95,54,54,13} | {AAAAAAAAAAAAAAAAAAA91804} + 62 | {83,72,29,73} | {AAAAAAAAAAAAA15097,AAAA8857,AAAAAAAAAAAA35809,AAAAAAAAAAAA52814,AAAAAAAAAAAAAAAAAAA38885,AAAAAAAAAAAAAAAAAA24183,AAAAAA43678,A96617} + 63 | {11,4,61,87} | {AAAAAAAAA27249,AAAAAAAAAAAAAAAAAA32918,AAAAAAAAAAAAAAA13198,AAA20874,39557,51533,AAAAAAAAAAA53908,AAAAAAAAAAAAAA96505,AAAAAAAA78938} + 64 | {26,19,34,24,81,78} | {A96617,AAAAAAAAAAAAAAAAAAA70104,A68938,AAAAAAAAAAA53908,AAAAAAAAAAAAAAA453,AA17009,AAAAAAA80240} + 65 | {61,5,76,59,17} | {AAAAAA99807,AAAAA64741,AAAAAAAAAAA53908,AA21643,AAAAAAAAA10012} + 66 | {31,23,70,52,4,33,48,25} | {AAAAAAAAAAAAAAAAA69675,AAAAAAAA50094,AAAAAAAAAAA92631,AAAA35194,39557,AAAAAAA99836} + 67 | {31,94,7,10} | {AAAAAA38063,A96617,AAAA35194,AAAAAAAAAAAA67946} + 68 | {90,43,38} | {AA75092,AAAAAAAAAAAAAAAAA69675,AAAAAAAAAAA92631,AAAAAAAAA10012,AAAAAAAAAAAAA7929,AA21643} + 69 | {67,35,99,85,72,86,44} | {AAAAAAAAAAAAAAAAAAA1205,AAAAAAAA50094,AAAAAAAAAAAAAAAA1729,AAAAAAAAAAAAAAAAAA47955} + 70 | {56,70,83} | {AAAA41702,AAAAAAAAAAA82945,AA21643,AAAAAAAAAAA99000,A27153,AA25381,AAAAAAAAAAAAAA96505,AAAAAAA1242} + 71 | {74,26} | {AAAAAAAAAAA50956,AA74433,AAAAAAA21462,AAAAAAAAAAAAAAAAAAA17075,AAAAAAAAAAAAAAA36627,AAAAAAAAAAAAA70254,AAAAAAAAAA43419,39557} + 72 | {22,1,16,78,20,91,83} | {47735,AAAAAAA56483,AAAAAAAAAAAAA93788,AA42406,AAAAAAAAAAAAA73084,AAAAAAAA72908,AAAAAAAAAAAAAAAAAA61286,AAAAA66674,AAAAAAAAAAAAAAAAA50407} + 73 | {88,25,96,78,65,15,29,19} | {AAA54451,AAAAAAAAA27249,AAAAAAA9228,AAAAAAAAAAAAAAA67062,AAAAAAAAAAAAAAAAAAA70415,AAAAA17383,AAAAAAAAAAAAAAAA33598} + 74 | {32} | {AAAAAAAAAAAAAAAA1729,AAAAAAAAAAAAA22860,AAAAAA99807,AAAAA17383,AAAAAAAAAAAAAAA67062,AAAAAAAAAAA15165,AAAAAAAAAAA50956} + 75 | {12,96,83,24,71,89,55} | {AAAA48949,AAAAAAAA29716,AAAAAAAAAAAAAAAAAAA1205,AAAAAAAAAAAA67946,AAAAAAAAAAAAAAAA29150,AAA28075,AAAAAAAAAAAAAAAAA43052} + 76 | {92,55,10,7} | {AAAAAAAAAAAAAAA67062} + 77 | {97,15,32,17,55,59,18,37,50,39} | {AAAAAAAAAAAA67946,AAAAAA54032,AAAAAAAA81587,55847,AAAAAAAAAAAAAA28620,AAAAAAAAAAAAAAAAA43052,AAAAAA75463,AAAA49534,AAAAAAAA44066} + 78 | {55,89,44,84,34} | {AAAAAAAAAAA6119,AAAAAAAAAAAAAA8666,AA99927,AA42406,AAAAAAA81898,AAAAAAA9228,AAAAAAAAAAA92631,AA21643,AAAAAAAAAAAAAA28620} + 79 | {45} | {AAAAAAAAAA646,AAAAAAAAAAAAAAAAAAA70415,AAAAAA43678,AAAAAAAA72908} + 80 | {74,89,44,80,0} | {AAAA35194,AAAAAAAA79710,AAA20874,AAAAAAAAAAAAAAAAAAA70104,AAAAAAAAAAAAA73084,AAAAAAA57334,AAAAAAA9228,AAAAAAAAAAAAA62007} + 81 | {63,77,54,48,61,53,97} | {AAAAAAAAAAAAAAA81326,AAAAAAAAAA22292,AA25381,AAAAAAAAAAA74076,AAAAAAA81898,AAAAAAAAA72121} + 82 | {34,60,4,79,78,16,86,89,42,50} | {AAAAA40681,AAAAAAAAAAAAAAAAAA12591,AAAAAAA80240,AAAAAAAAAAAAAAAA55798,AAAAAAAAAAAAAAAAAAA70104} + 83 | {14,10} | {AAAAAAAAAA22292,AAAAAAAAAAAAA70254,AAAAAAAAAAA6119} + 84 | {11,83,35,13,96,94} | {AAAAA95309,AAAAAAAAAAAAAAAAAA32918,AAAAAAAAAAAAAAAAAA24183} + 85 | {39,60} | {AAAAAAAAAAAAAAAA55798,AAAAAAAAAA22292,AAAAAAA66161,AAAAAAA21462,AAAAAAAAAAAAAAAAAA12591,55847,AAAAAA98232,AAAAAAAAAAA46154} + 86 | {33,81,72,74,45,36,82} | {AAAAAAAA81587,AAAAAAAAAAAAAA96505,45449,AAAA80176} + 87 | {57,27,50,12,97,68} | {AAAAAAAAAAAAAAAAA26540,AAAAAAAAA10012,AAAAAAAAAAAA35809,AAAAAAAAAAAAAAAA29150,AAAAAAAAAAA82945,AAAAAA66777,31228,AAAAAAAAAAAAAAAA23657,AAAAAAAAAAAAAA28620,AAAAAAAAAAAAAA96505} + 88 | {41,90,77,24,6,24} | {AAAA35194,AAAA35194,AAAAAAA80240,AAAAAAAAAAA46154,AAAAAA58494,AAAAAAAAAAAAAAAAAAA17075,AAAAAAAAAAAAAAAAAA59334,AAAAAAAAAAAAAAAAAAA91804,AA74433} + 89 | {40,32,17,6,30,88} | {AA44673,AAAAAAAAAAA6119,AAAAAAAAAAAAAAAA23657,AAAAAAAAAAAAAAAAAA47955,AAAAAAAAAAAAAAAA33598,AAAAAAAAAAA33576,AA44673} + 90 | {88,75} | {AAAAA60038,AAAAAAAA23648,AAAAAAAAAAA99000,AAAA41702,AAAAAAAAAAAAA22860,AAAAAAAAAAAAAAA68526} + 91 | {78} | {AAAAAAAAAAAAA62007,AAA99043} + 92 | {85,63,49,45} | {AAAAAAA89932,AAAAAAAAAAAAA22860,AAAAAAAAAAAAAAAAAAA1205,AAAAAAAAAAAA21089} + 93 | {11} | {AAAAAAAAAAA176,AAAAAAAAAAAAAA8666,AAAAAAAAAAAAAAA453,AAAAAAAAAAAAA85723,A68938,AAAAAAAAAAAAA9821,AAAAAAA48038,AAAAAAAAAAAAAAAAA59387,AA99927,AAAAA17383} + 94 | {98,9,85,62,88,91,60,61,38,86} | {AAAAAAAA81587,AAAAA17383,AAAAAAAA81587} + 95 | {47,77} | {AAAAAAAAAAAAAAAAA764,AAAAAAAAAAA74076,AAAAAAAAAA18107,AAAAA40681,AAAAAAAAAAAAAAA35875,AAAAA60038,AAAAAAA56483} + 96 | {23,97,43} | {AAAAAAAAAA646,A87088} + 97 | {54,2,86,65} | {47735,AAAAAAA99836,AAAAAAAAAAAAAAAAA6897,AAAAAAAAAAAAAAAA29150,AAAAAAA80240,AAAAAAAAAAAAAAAA98414,AAAAAAA56483,AAAAAAAAAAAAAAAA29150,AAAAAAA39692,AA21643} + 98 | {38,34,32,89} | {AAAAAAAAAAAAAAAAAA71621,AAAA8857,AAAAAAAAAAAAAAAAAAA65037,AAAAAAAAAAAAAAAA31334,AAAAAAAAAA48845} + 99 | {37,86} | {AAAAAAAAAAAAAAAAAA32918,AAAAA70514,AAAAAAAAA10012,AAAAAAAAAAAAAAAAA59387,AAAAAAAAAA64777,AAAAAAAAAAAAAAAAAAA15356} + 100 | {85,32,57,39,49,84,32,3,30} | {AAAAAAA80240,AAAAAAAAAAAAAAAA1729,AAAAA60038,AAAAAAAAAAA92631,AAAAAAAA9523} + 101 | {} | {} + 102 | {NULL} | {NULL} +(102 rows) + +SELECT * FROM array_op_test WHERE t && '{}' ORDER BY seqno; + seqno | i | t +-------+---+--- +(0 rows) + +SELECT * FROM array_op_test WHERE t <@ '{}' ORDER BY seqno; + seqno | i | t +-------+----+---- + 101 | {} | {} +(1 row) + +-- array casts +SELECT ARRAY[1,2,3]::text[]::int[]::float8[] AS "{1,2,3}"; + {1,2,3} +--------- + {1,2,3} +(1 row) + +SELECT pg_typeof(ARRAY[1,2,3]::text[]::int[]::float8[]) AS "double precision[]"; + double precision[] +-------------------- + double precision[] +(1 row) + +SELECT ARRAY[['a','bc'],['def','hijk']]::text[]::varchar[] AS "{{a,bc},{def,hijk}}"; + {{a,bc},{def,hijk}} +--------------------- + {{a,bc},{def,hijk}} +(1 row) + +SELECT pg_typeof(ARRAY[['a','bc'],['def','hijk']]::text[]::varchar[]) AS "character varying[]"; + character varying[] +--------------------- + character varying[] +(1 row) + +SELECT CAST(ARRAY[[[[[['a','bb','ccc']]]]]] as text[]) as "{{{{{{a,bb,ccc}}}}}}"; + {{{{{{a,bb,ccc}}}}}} +---------------------- + {{{{{{a,bb,ccc}}}}}} +(1 row) + +SELECT NULL::text[]::int[] AS "NULL"; + NULL +------ + +(1 row) + +-- scalar op any/all (array) +select 33 = any ('{1,2,3}'); + ?column? +---------- + f +(1 row) + +select 33 = any ('{1,2,33}'); + ?column? +---------- + t +(1 row) + +select 33 = all ('{1,2,33}'); + ?column? +---------- + f +(1 row) + +select 33 >= all ('{1,2,33}'); + ?column? +---------- + t +(1 row) + +-- boundary cases +select null::int >= all ('{1,2,33}'); + ?column? +---------- + +(1 row) + +select null::int >= all ('{}'); + ?column? +---------- + t +(1 row) + +select null::int >= any ('{}'); + ?column? +---------- + f +(1 row) + +-- cross-datatype +select 33.4 = any (array[1,2,3]); + ?column? +---------- + f +(1 row) + +select 33.4 > all (array[1,2,3]); + ?column? +---------- + t +(1 row) + +-- errors +select 33 * any ('{1,2,3}'); +ERROR: op ANY/ALL (array) requires operator to yield boolean +LINE 1: select 33 * any ('{1,2,3}'); + ^ +select 33 * any (44); +ERROR: op ANY/ALL (array) requires array on right side +LINE 1: select 33 * any (44); + ^ +-- nulls +select 33 = any (null::int[]); + ?column? +---------- + +(1 row) + +select null::int = any ('{1,2,3}'); + ?column? +---------- + +(1 row) + +select 33 = any ('{1,null,3}'); + ?column? +---------- + +(1 row) + +select 33 = any ('{1,null,33}'); + ?column? +---------- + t +(1 row) + +select 33 = all (null::int[]); + ?column? +---------- + +(1 row) + +select null::int = all ('{1,2,3}'); + ?column? +---------- + +(1 row) + +select 33 = all ('{1,null,3}'); + ?column? +---------- + f +(1 row) + +select 33 = all ('{33,null,33}'); + ?column? +---------- + +(1 row) + +-- nulls later in the bitmap +SELECT -1 != ALL(ARRAY(SELECT NULLIF(g.i, 900) FROM generate_series(1,1000) g(i))); + ?column? +---------- + +(1 row) + +-- test indexes on arrays +create temp table arr_tbl (f1 int[] unique); +insert into arr_tbl values ('{1,2,3}'); +insert into arr_tbl values ('{1,2}'); +-- failure expected: +insert into arr_tbl values ('{1,2,3}'); +ERROR: duplicate key value violates unique constraint "arr_tbl_f1_key" +DETAIL: Key (f1)=({1,2,3}) already exists. +insert into arr_tbl values ('{2,3,4}'); +insert into arr_tbl values ('{1,5,3}'); +insert into arr_tbl values ('{1,2,10}'); +set enable_seqscan to off; +set enable_bitmapscan to off; +select * from arr_tbl where f1 > '{1,2,3}' and f1 <= '{1,5,3}'; + f1 +---------- + {1,2,10} + {1,5,3} +(2 rows) + +select * from arr_tbl where f1 >= '{1,2,3}' and f1 < '{1,5,3}'; + f1 +---------- + {1,2,3} + {1,2,10} +(2 rows) + +-- test ON CONFLICT DO UPDATE with arrays +create temp table arr_pk_tbl (pk int4 primary key, f1 int[]); +insert into arr_pk_tbl values (1, '{1,2,3}'); +insert into arr_pk_tbl values (1, '{3,4,5}') on conflict (pk) + do update set f1[1] = excluded.f1[1], f1[3] = excluded.f1[3] + returning pk, f1; + pk | f1 +----+--------- + 1 | {3,2,5} +(1 row) + +insert into arr_pk_tbl(pk, f1[1:2]) values (1, '{6,7,8}') on conflict (pk) + do update set f1[1] = excluded.f1[1], + f1[2] = excluded.f1[2], + f1[3] = excluded.f1[3] + returning pk, f1; + pk | f1 +----+------------ + 1 | {6,7,NULL} +(1 row) + +-- note: if above selects don't produce the expected tuple order, +-- then you didn't get an indexscan plan, and something is busted. +reset enable_seqscan; +reset enable_bitmapscan; +-- test subscript overflow detection +-- The normal error message includes a platform-dependent limit, +-- so suppress it to avoid needing multiple expected-files. +\set VERBOSITY sqlstate +insert into arr_pk_tbl values(10, '[-2147483648:-2147483647]={1,2}'); +update arr_pk_tbl set f1[2147483647] = 42 where pk = 10; +ERROR: 54000 +update arr_pk_tbl set f1[2147483646:2147483647] = array[4,2] where pk = 10; +ERROR: 54000 +-- also exercise the expanded-array case +do $$ declare a int[]; +begin + a := '[-2147483648:-2147483647]={1,2}'::int[]; + a[2147483647] := 42; +end $$; +ERROR: 54000 +\set VERBOSITY default +-- test [not] (like|ilike) (any|all) (...) +select 'foo' like any (array['%a', '%o']); -- t + ?column? +---------- + t +(1 row) + +select 'foo' like any (array['%a', '%b']); -- f + ?column? +---------- + f +(1 row) + +select 'foo' like all (array['f%', '%o']); -- t + ?column? +---------- + t +(1 row) + +select 'foo' like all (array['f%', '%b']); -- f + ?column? +---------- + f +(1 row) + +select 'foo' not like any (array['%a', '%b']); -- t + ?column? +---------- + t +(1 row) + +select 'foo' not like all (array['%a', '%o']); -- f + ?column? +---------- + f +(1 row) + +select 'foo' ilike any (array['%A', '%O']); -- t + ?column? +---------- + t +(1 row) + +select 'foo' ilike all (array['F%', '%O']); -- t + ?column? +---------- + t +(1 row) + +-- +-- General array parser tests +-- +-- none of the following should be accepted +select '{{1,{2}},{2,3}}'::text[]; +ERROR: malformed array literal: "{{1,{2}},{2,3}}" +LINE 1: select '{{1,{2}},{2,3}}'::text[]; + ^ +DETAIL: Unexpected "{" character. +select '{{},{}}'::text[]; +ERROR: malformed array literal: "{{},{}}" +LINE 1: select '{{},{}}'::text[]; + ^ +DETAIL: Unexpected "}" character. +select E'{{1,2},\\{2,3}}'::text[]; +ERROR: malformed array literal: "{{1,2},\{2,3}}" +LINE 1: select E'{{1,2},\\{2,3}}'::text[]; + ^ +DETAIL: Unexpected "\" character. +select '{{"1 2" x},{3}}'::text[]; +ERROR: malformed array literal: "{{"1 2" x},{3}}" +LINE 1: select '{{"1 2" x},{3}}'::text[]; + ^ +DETAIL: Unexpected array element. +select '{}}'::text[]; +ERROR: malformed array literal: "{}}" +LINE 1: select '{}}'::text[]; + ^ +DETAIL: Junk after closing right brace. +select '{ }}'::text[]; +ERROR: malformed array literal: "{ }}" +LINE 1: select '{ }}'::text[]; + ^ +DETAIL: Junk after closing right brace. +select array[]; +ERROR: cannot determine type of empty array +LINE 1: select array[]; + ^ +HINT: Explicitly cast to the desired type, for example ARRAY[]::integer[]. +-- none of the above should be accepted +-- all of the following should be accepted +select '{}'::text[]; + text +------ + {} +(1 row) + +select '{{{1,2,3,4},{2,3,4,5}},{{3,4,5,6},{4,5,6,7}}}'::text[]; + text +----------------------------------------------- + {{{1,2,3,4},{2,3,4,5}},{{3,4,5,6},{4,5,6,7}}} +(1 row) + +select '{0 second ,0 second}'::interval[]; + interval +--------------- + {"@ 0","@ 0"} +(1 row) + +select '{ { "," } , { 3 } }'::text[]; + text +------------- + {{","},{3}} +(1 row) + +select ' { { " 0 second " , 0 second } }'::text[]; + text +------------------------------- + {{" 0 second ","0 second"}} +(1 row) + +select '{ + 0 second, + @ 1 hour @ 42 minutes @ 20 seconds + }'::interval[]; + interval +------------------------------------ + {"@ 0","@ 1 hour 42 mins 20 secs"} +(1 row) + +select array[]::text[]; + array +------- + {} +(1 row) + +select '[0:1]={1.1,2.2}'::float8[]; + float8 +----------------- + [0:1]={1.1,2.2} +(1 row) + +-- all of the above should be accepted +-- tests for array aggregates +CREATE TEMP TABLE arraggtest ( f1 INT[], f2 TEXT[][], f3 FLOAT[]); +INSERT INTO arraggtest (f1, f2, f3) VALUES +('{1,2,3,4}','{{grey,red},{blue,blue}}','{1.6, 0.0}'); +INSERT INTO arraggtest (f1, f2, f3) VALUES +('{1,2,3}','{{grey,red},{grey,blue}}','{1.6}'); +SELECT max(f1), min(f1), max(f2), min(f2), max(f3), min(f3) FROM arraggtest; + max | min | max | min | max | min +-----------+---------+--------------------------+--------------------------+---------+------- + {1,2,3,4} | {1,2,3} | {{grey,red},{grey,blue}} | {{grey,red},{blue,blue}} | {1.6,0} | {1.6} +(1 row) + +INSERT INTO arraggtest (f1, f2, f3) VALUES +('{3,3,2,4,5,6}','{{white,yellow},{pink,orange}}','{2.1,3.3,1.8,1.7,1.6}'); +SELECT max(f1), min(f1), max(f2), min(f2), max(f3), min(f3) FROM arraggtest; + max | min | max | min | max | min +---------------+---------+--------------------------------+--------------------------+-----------------------+------- + {3,3,2,4,5,6} | {1,2,3} | {{white,yellow},{pink,orange}} | {{grey,red},{blue,blue}} | {2.1,3.3,1.8,1.7,1.6} | {1.6} +(1 row) + +INSERT INTO arraggtest (f1, f2, f3) VALUES +('{2}','{{black,red},{green,orange}}','{1.6,2.2,2.6,0.4}'); +SELECT max(f1), min(f1), max(f2), min(f2), max(f3), min(f3) FROM arraggtest; + max | min | max | min | max | min +---------------+---------+--------------------------------+------------------------------+-----------------------+------- + {3,3,2,4,5,6} | {1,2,3} | {{white,yellow},{pink,orange}} | {{black,red},{green,orange}} | {2.1,3.3,1.8,1.7,1.6} | {1.6} +(1 row) + +INSERT INTO arraggtest (f1, f2, f3) VALUES +('{4,2,6,7,8,1}','{{red},{black},{purple},{blue},{blue}}',NULL); +SELECT max(f1), min(f1), max(f2), min(f2), max(f3), min(f3) FROM arraggtest; + max | min | max | min | max | min +---------------+---------+--------------------------------+------------------------------+-----------------------+------- + {4,2,6,7,8,1} | {1,2,3} | {{white,yellow},{pink,orange}} | {{black,red},{green,orange}} | {2.1,3.3,1.8,1.7,1.6} | {1.6} +(1 row) + +INSERT INTO arraggtest (f1, f2, f3) VALUES +('{}','{{pink,white,blue,red,grey,orange}}','{2.1,1.87,1.4,2.2}'); +SELECT max(f1), min(f1), max(f2), min(f2), max(f3), min(f3) FROM arraggtest; + max | min | max | min | max | min +---------------+-----+--------------------------------+------------------------------+-----------------------+------- + {4,2,6,7,8,1} | {} | {{white,yellow},{pink,orange}} | {{black,red},{green,orange}} | {2.1,3.3,1.8,1.7,1.6} | {1.6} +(1 row) + +-- A few simple tests for arrays of composite types +create type comptype as (f1 int, f2 text); +create table comptable (c1 comptype, c2 comptype[]); +-- XXX would like to not have to specify row() construct types here ... +insert into comptable + values (row(1,'foo'), array[row(2,'bar')::comptype, row(3,'baz')::comptype]); +-- check that implicitly named array type _comptype isn't a problem +create type _comptype as enum('fooey'); +select * from comptable; + c1 | c2 +---------+----------------------- + (1,foo) | {"(2,bar)","(3,baz)"} +(1 row) + +select c2[2].f2 from comptable; + f2 +----- + baz +(1 row) + +drop type _comptype; +drop table comptable; +drop type comptype; +create or replace function unnest1(anyarray) +returns setof anyelement as $$ +select $1[s] from generate_subscripts($1,1) g(s); +$$ language sql immutable; +create or replace function unnest2(anyarray) +returns setof anyelement as $$ +select $1[s1][s2] from generate_subscripts($1,1) g1(s1), + generate_subscripts($1,2) g2(s2); +$$ language sql immutable; +select * from unnest1(array[1,2,3]); + unnest1 +--------- + 1 + 2 + 3 +(3 rows) + +select * from unnest2(array[[1,2,3],[4,5,6]]); + unnest2 +--------- + 1 + 2 + 3 + 4 + 5 + 6 +(6 rows) + +drop function unnest1(anyarray); +drop function unnest2(anyarray); +select array_fill(null::integer, array[3,3],array[2,2]); + array_fill +----------------------------------------------------------------- + [2:4][2:4]={{NULL,NULL,NULL},{NULL,NULL,NULL},{NULL,NULL,NULL}} +(1 row) + +select array_fill(null::integer, array[3,3]); + array_fill +------------------------------------------------------ + {{NULL,NULL,NULL},{NULL,NULL,NULL},{NULL,NULL,NULL}} +(1 row) + +select array_fill(null::text, array[3,3],array[2,2]); + array_fill +----------------------------------------------------------------- + [2:4][2:4]={{NULL,NULL,NULL},{NULL,NULL,NULL},{NULL,NULL,NULL}} +(1 row) + +select array_fill(null::text, array[3,3]); + array_fill +------------------------------------------------------ + {{NULL,NULL,NULL},{NULL,NULL,NULL},{NULL,NULL,NULL}} +(1 row) + +select array_fill(7, array[3,3],array[2,2]); + array_fill +-------------------------------------- + [2:4][2:4]={{7,7,7},{7,7,7},{7,7,7}} +(1 row) + +select array_fill(7, array[3,3]); + array_fill +--------------------------- + {{7,7,7},{7,7,7},{7,7,7}} +(1 row) + +select array_fill('juhu'::text, array[3,3],array[2,2]); + array_fill +----------------------------------------------------------------- + [2:4][2:4]={{juhu,juhu,juhu},{juhu,juhu,juhu},{juhu,juhu,juhu}} +(1 row) + +select array_fill('juhu'::text, array[3,3]); + array_fill +------------------------------------------------------ + {{juhu,juhu,juhu},{juhu,juhu,juhu},{juhu,juhu,juhu}} +(1 row) + +select a, a = '{}' as is_eq, array_dims(a) + from (select array_fill(42, array[0]) as a) ss; + a | is_eq | array_dims +----+-------+------------ + {} | t | +(1 row) + +select a, a = '{}' as is_eq, array_dims(a) + from (select array_fill(42, '{}') as a) ss; + a | is_eq | array_dims +----+-------+------------ + {} | t | +(1 row) + +select a, a = '{}' as is_eq, array_dims(a) + from (select array_fill(42, '{}', '{}') as a) ss; + a | is_eq | array_dims +----+-------+------------ + {} | t | +(1 row) + +-- raise exception +select array_fill(1, null, array[2,2]); +ERROR: dimension array or low bound array cannot be null +select array_fill(1, array[2,2], null); +ERROR: dimension array or low bound array cannot be null +select array_fill(1, array[2,2], '{}'); +ERROR: wrong number of array subscripts +DETAIL: Low bound array has different size than dimensions array. +select array_fill(1, array[3,3], array[1,1,1]); +ERROR: wrong number of array subscripts +DETAIL: Low bound array has different size than dimensions array. +select array_fill(1, array[1,2,null]); +ERROR: dimension values cannot be null +select array_fill(1, array[[1,2],[3,4]]); +ERROR: wrong number of array subscripts +DETAIL: Dimension array must be one dimensional. +select string_to_array('1|2|3', '|'); + string_to_array +----------------- + {1,2,3} +(1 row) + +select string_to_array('1|2|3|', '|'); + string_to_array +----------------- + {1,2,3,""} +(1 row) + +select string_to_array('1||2|3||', '||'); + string_to_array +----------------- + {1,2|3,""} +(1 row) + +select string_to_array('1|2|3', ''); + string_to_array +----------------- + {1|2|3} +(1 row) + +select string_to_array('', '|'); + string_to_array +----------------- + {} +(1 row) + +select string_to_array('1|2|3', NULL); + string_to_array +----------------- + {1,|,2,|,3} +(1 row) + +select string_to_array(NULL, '|') IS NULL; + ?column? +---------- + t +(1 row) + +select string_to_array('abc', ''); + string_to_array +----------------- + {abc} +(1 row) + +select string_to_array('abc', '', 'abc'); + string_to_array +----------------- + {NULL} +(1 row) + +select string_to_array('abc', ','); + string_to_array +----------------- + {abc} +(1 row) + +select string_to_array('abc', ',', 'abc'); + string_to_array +----------------- + {NULL} +(1 row) + +select string_to_array('1,2,3,4,,6', ','); + string_to_array +----------------- + {1,2,3,4,"",6} +(1 row) + +select string_to_array('1,2,3,4,,6', ',', ''); + string_to_array +------------------ + {1,2,3,4,NULL,6} +(1 row) + +select string_to_array('1,2,3,4,*,6', ',', '*'); + string_to_array +------------------ + {1,2,3,4,NULL,6} +(1 row) + +select v, v is null as "is null" from string_to_table('1|2|3', '|') g(v); + v | is null +---+--------- + 1 | f + 2 | f + 3 | f +(3 rows) + +select v, v is null as "is null" from string_to_table('1|2|3|', '|') g(v); + v | is null +---+--------- + 1 | f + 2 | f + 3 | f + | f +(4 rows) + +select v, v is null as "is null" from string_to_table('1||2|3||', '||') g(v); + v | is null +-----+--------- + 1 | f + 2|3 | f + | f +(3 rows) + +select v, v is null as "is null" from string_to_table('1|2|3', '') g(v); + v | is null +-------+--------- + 1|2|3 | f +(1 row) + +select v, v is null as "is null" from string_to_table('', '|') g(v); + v | is null +---+--------- +(0 rows) + +select v, v is null as "is null" from string_to_table('1|2|3', NULL) g(v); + v | is null +---+--------- + 1 | f + | | f + 2 | f + | | f + 3 | f +(5 rows) + +select v, v is null as "is null" from string_to_table(NULL, '|') g(v); + v | is null +---+--------- +(0 rows) + +select v, v is null as "is null" from string_to_table('abc', '') g(v); + v | is null +-----+--------- + abc | f +(1 row) + +select v, v is null as "is null" from string_to_table('abc', '', 'abc') g(v); + v | is null +---+--------- + | t +(1 row) + +select v, v is null as "is null" from string_to_table('abc', ',') g(v); + v | is null +-----+--------- + abc | f +(1 row) + +select v, v is null as "is null" from string_to_table('abc', ',', 'abc') g(v); + v | is null +---+--------- + | t +(1 row) + +select v, v is null as "is null" from string_to_table('1,2,3,4,,6', ',') g(v); + v | is null +---+--------- + 1 | f + 2 | f + 3 | f + 4 | f + | f + 6 | f +(6 rows) + +select v, v is null as "is null" from string_to_table('1,2,3,4,,6', ',', '') g(v); + v | is null +---+--------- + 1 | f + 2 | f + 3 | f + 4 | f + | t + 6 | f +(6 rows) + +select v, v is null as "is null" from string_to_table('1,2,3,4,*,6', ',', '*') g(v); + v | is null +---+--------- + 1 | f + 2 | f + 3 | f + 4 | f + | t + 6 | f +(6 rows) + +select array_to_string(NULL::int4[], ',') IS NULL; + ?column? +---------- + t +(1 row) + +select array_to_string('{}'::int4[], ','); + array_to_string +----------------- + +(1 row) + +select array_to_string(array[1,2,3,4,NULL,6], ','); + array_to_string +----------------- + 1,2,3,4,6 +(1 row) + +select array_to_string(array[1,2,3,4,NULL,6], ',', '*'); + array_to_string +----------------- + 1,2,3,4,*,6 +(1 row) + +select array_to_string(array[1,2,3,4,NULL,6], NULL); + array_to_string +----------------- + +(1 row) + +select array_to_string(array[1,2,3,4,NULL,6], ',', NULL); + array_to_string +----------------- + 1,2,3,4,6 +(1 row) + +select array_to_string(string_to_array('1|2|3', '|'), '|'); + array_to_string +----------------- + 1|2|3 +(1 row) + +select array_length(array[1,2,3], 1); + array_length +-------------- + 3 +(1 row) + +select array_length(array[[1,2,3], [4,5,6]], 0); + array_length +-------------- + +(1 row) + +select array_length(array[[1,2,3], [4,5,6]], 1); + array_length +-------------- + 2 +(1 row) + +select array_length(array[[1,2,3], [4,5,6]], 2); + array_length +-------------- + 3 +(1 row) + +select array_length(array[[1,2,3], [4,5,6]], 3); + array_length +-------------- + +(1 row) + +select cardinality(NULL::int[]); + cardinality +------------- + +(1 row) + +select cardinality('{}'::int[]); + cardinality +------------- + 0 +(1 row) + +select cardinality(array[1,2,3]); + cardinality +------------- + 3 +(1 row) + +select cardinality('[2:4]={5,6,7}'::int[]); + cardinality +------------- + 3 +(1 row) + +select cardinality('{{1,2}}'::int[]); + cardinality +------------- + 2 +(1 row) + +select cardinality('{{1,2},{3,4},{5,6}}'::int[]); + cardinality +------------- + 6 +(1 row) + +select cardinality('{{{1,9},{5,6}},{{2,3},{3,4}}}'::int[]); + cardinality +------------- + 8 +(1 row) + +-- array_agg(anynonarray) +select array_agg(unique1) from (select unique1 from tenk1 where unique1 < 15 order by unique1) ss; + array_agg +-------------------------------------- + {0,1,2,3,4,5,6,7,8,9,10,11,12,13,14} +(1 row) + +select array_agg(ten) from (select ten from tenk1 where unique1 < 15 order by unique1) ss; + array_agg +--------------------------------- + {0,1,2,3,4,5,6,7,8,9,0,1,2,3,4} +(1 row) + +select array_agg(nullif(ten, 4)) from (select ten from tenk1 where unique1 < 15 order by unique1) ss; + array_agg +--------------------------------------- + {0,1,2,3,NULL,5,6,7,8,9,0,1,2,3,NULL} +(1 row) + +select array_agg(unique1) from tenk1 where unique1 < -15; + array_agg +----------- + +(1 row) + +-- array_agg(anyarray) +select array_agg(ar) + from (values ('{1,2}'::int[]), ('{3,4}'::int[])) v(ar); + array_agg +--------------- + {{1,2},{3,4}} +(1 row) + +select array_agg(distinct ar order by ar desc) + from (select array[i / 2] from generate_series(1,10) a(i)) b(ar); + array_agg +--------------------------- + {{5},{4},{3},{2},{1},{0}} +(1 row) + +select array_agg(ar) + from (select array_agg(array[i, i+1, i-1]) + from generate_series(1,2) a(i)) b(ar); + array_agg +--------------------- + {{{1,2,0},{2,3,1}}} +(1 row) + +select array_agg(array[i+1.2, i+1.3, i+1.4]) from generate_series(1,3) g(i); + array_agg +--------------------------------------------- + {{2.2,2.3,2.4},{3.2,3.3,3.4},{4.2,4.3,4.4}} +(1 row) + +select array_agg(array['Hello', i::text]) from generate_series(9,11) g(i); + array_agg +----------------------------------- + {{Hello,9},{Hello,10},{Hello,11}} +(1 row) + +select array_agg(array[i, nullif(i, 3), i+1]) from generate_series(1,4) g(i); + array_agg +-------------------------------------- + {{1,1,2},{2,2,3},{3,NULL,4},{4,4,5}} +(1 row) + +-- errors +select array_agg('{}'::int[]) from generate_series(1,2); +ERROR: cannot accumulate empty arrays +select array_agg(null::int[]) from generate_series(1,2); +ERROR: cannot accumulate null arrays +select array_agg(ar) + from (values ('{1,2}'::int[]), ('{3}'::int[])) v(ar); +ERROR: cannot accumulate arrays of different dimensionality +select unnest(array[1,2,3]); + unnest +-------- + 1 + 2 + 3 +(3 rows) + +select * from unnest(array[1,2,3]); + unnest +-------- + 1 + 2 + 3 +(3 rows) + +select unnest(array[1,2,3,4.5]::float8[]); + unnest +-------- + 1 + 2 + 3 + 4.5 +(4 rows) + +select unnest(array[1,2,3,4.5]::numeric[]); + unnest +-------- + 1 + 2 + 3 + 4.5 +(4 rows) + +select unnest(array[1,2,3,null,4,null,null,5,6]); + unnest +-------- + 1 + 2 + 3 + + 4 + + + 5 + 6 +(9 rows) + +select unnest(array[1,2,3,null,4,null,null,5,6]::text[]); + unnest +-------- + 1 + 2 + 3 + + 4 + + + 5 + 6 +(9 rows) + +select abs(unnest(array[1,2,null,-3])); + abs +----- + 1 + 2 + + 3 +(4 rows) + +select array_remove(array[1,2,2,3], 2); + array_remove +-------------- + {1,3} +(1 row) + +select array_remove(array[1,2,2,3], 5); + array_remove +-------------- + {1,2,2,3} +(1 row) + +select array_remove(array[1,NULL,NULL,3], NULL); + array_remove +-------------- + {1,3} +(1 row) + +select array_remove(array['A','CC','D','C','RR'], 'RR'); + array_remove +-------------- + {A,CC,D,C} +(1 row) + +select array_remove(array[1.0, 2.1, 3.3], 1); + array_remove +-------------- + {2.1,3.3} +(1 row) + +select array_remove('{{1,2,2},{1,4,3}}', 2); -- not allowed +ERROR: removing elements from multidimensional arrays is not supported +select array_remove(array['X','X','X'], 'X') = '{}'; + ?column? +---------- + t +(1 row) + +select array_replace(array[1,2,5,4],5,3); + array_replace +--------------- + {1,2,3,4} +(1 row) + +select array_replace(array[1,2,5,4],5,NULL); + array_replace +--------------- + {1,2,NULL,4} +(1 row) + +select array_replace(array[1,2,NULL,4,NULL],NULL,5); + array_replace +--------------- + {1,2,5,4,5} +(1 row) + +select array_replace(array['A','B','DD','B'],'B','CC'); + array_replace +--------------- + {A,CC,DD,CC} +(1 row) + +select array_replace(array[1,NULL,3],NULL,NULL); + array_replace +--------------- + {1,NULL,3} +(1 row) + +select array_replace(array['AB',NULL,'CDE'],NULL,'12'); + array_replace +--------------- + {AB,12,CDE} +(1 row) + +-- array(select array-value ...) +select array(select array[i,i/2] from generate_series(1,5) i); + array +--------------------------------- + {{1,0},{2,1},{3,1},{4,2},{5,2}} +(1 row) + +select array(select array['Hello', i::text] from generate_series(9,11) i); + array +----------------------------------- + {{Hello,9},{Hello,10},{Hello,11}} +(1 row) + +-- Insert/update on a column that is array of composite +create temp table t1 (f1 int8_tbl[]); +insert into t1 (f1[5].q1) values(42); +select * from t1; + f1 +----------------- + [5:5]={"(42,)"} +(1 row) + +update t1 set f1[5].q2 = 43; +select * from t1; + f1 +------------------- + [5:5]={"(42,43)"} +(1 row) + +-- Check that arrays of composites are safely detoasted when needed +create temp table src (f1 text); +insert into src + select string_agg(random()::text,'') from generate_series(1,10000); +create type textandtext as (c1 text, c2 text); +create temp table dest (f1 textandtext[]); +insert into dest select array[row(f1,f1)::textandtext] from src; +select length(fipshash((f1[1]).c2)) from dest; + length +-------- + 32 +(1 row) + +delete from src; +select length(fipshash((f1[1]).c2)) from dest; + length +-------- + 32 +(1 row) + +truncate table src; +drop table src; +select length(fipshash((f1[1]).c2)) from dest; + length +-------- + 32 +(1 row) + +drop table dest; +drop type textandtext; +-- Tests for polymorphic-array form of width_bucket() +-- this exercises the varwidth and float8 code paths +SELECT + op, + width_bucket(op::numeric, ARRAY[1, 3, 5, 10.0]::numeric[]) AS wb_n1, + width_bucket(op::numeric, ARRAY[0, 5.5, 9.99]::numeric[]) AS wb_n2, + width_bucket(op::numeric, ARRAY[-6, -5, 2.0]::numeric[]) AS wb_n3, + width_bucket(op::float8, ARRAY[1, 3, 5, 10.0]::float8[]) AS wb_f1, + width_bucket(op::float8, ARRAY[0, 5.5, 9.99]::float8[]) AS wb_f2, + width_bucket(op::float8, ARRAY[-6, -5, 2.0]::float8[]) AS wb_f3 +FROM (VALUES + (-5.2), + (-0.0000000001), + (0.000000000001), + (1), + (1.99999999999999), + (2), + (2.00000000000001), + (3), + (4), + (4.5), + (5), + (5.5), + (6), + (7), + (8), + (9), + (9.99999999999999), + (10), + (10.0000000000001) +) v(op); + op | wb_n1 | wb_n2 | wb_n3 | wb_f1 | wb_f2 | wb_f3 +------------------+-------+-------+-------+-------+-------+------- + -5.2 | 0 | 0 | 1 | 0 | 0 | 1 + -0.0000000001 | 0 | 0 | 2 | 0 | 0 | 2 + 0.000000000001 | 0 | 1 | 2 | 0 | 1 | 2 + 1 | 1 | 1 | 2 | 1 | 1 | 2 + 1.99999999999999 | 1 | 1 | 2 | 1 | 1 | 2 + 2 | 1 | 1 | 3 | 1 | 1 | 3 + 2.00000000000001 | 1 | 1 | 3 | 1 | 1 | 3 + 3 | 2 | 1 | 3 | 2 | 1 | 3 + 4 | 2 | 1 | 3 | 2 | 1 | 3 + 4.5 | 2 | 1 | 3 | 2 | 1 | 3 + 5 | 3 | 1 | 3 | 3 | 1 | 3 + 5.5 | 3 | 2 | 3 | 3 | 2 | 3 + 6 | 3 | 2 | 3 | 3 | 2 | 3 + 7 | 3 | 2 | 3 | 3 | 2 | 3 + 8 | 3 | 2 | 3 | 3 | 2 | 3 + 9 | 3 | 2 | 3 | 3 | 2 | 3 + 9.99999999999999 | 3 | 3 | 3 | 3 | 3 | 3 + 10 | 4 | 3 | 3 | 4 | 3 | 3 + 10.0000000000001 | 4 | 3 | 3 | 4 | 3 | 3 +(19 rows) + +-- ensure float8 path handles NaN properly +SELECT + op, + width_bucket(op, ARRAY[1, 3, 9, 'NaN', 'NaN']::float8[]) AS wb +FROM (VALUES + (-5.2::float8), + (4::float8), + (77::float8), + ('NaN'::float8) +) v(op); + op | wb +------+---- + -5.2 | 0 + 4 | 2 + 77 | 3 + NaN | 5 +(4 rows) + +-- these exercise the generic fixed-width code path +SELECT + op, + width_bucket(op, ARRAY[1, 3, 5, 10]) AS wb_1 +FROM generate_series(0,11) as op; + op | wb_1 +----+------ + 0 | 0 + 1 | 1 + 2 | 1 + 3 | 2 + 4 | 2 + 5 | 3 + 6 | 3 + 7 | 3 + 8 | 3 + 9 | 3 + 10 | 4 + 11 | 4 +(12 rows) + +SELECT width_bucket(now(), + array['yesterday', 'today', 'tomorrow']::timestamptz[]); + width_bucket +-------------- + 2 +(1 row) + +-- corner cases +SELECT width_bucket(5, ARRAY[3]); + width_bucket +-------------- + 1 +(1 row) + +SELECT width_bucket(5, '{}'); + width_bucket +-------------- + 0 +(1 row) + +-- error cases +SELECT width_bucket('5'::text, ARRAY[3, 4]::integer[]); +ERROR: function width_bucket(text, integer[]) does not exist +LINE 1: SELECT width_bucket('5'::text, ARRAY[3, 4]::integer[]); + ^ +HINT: No function matches the given name and argument types. You might need to add explicit type casts. +SELECT width_bucket(5, ARRAY[3, 4, NULL]); +ERROR: thresholds array must not contain NULLs +SELECT width_bucket(5, ARRAY[ARRAY[1, 2], ARRAY[3, 4]]); +ERROR: thresholds must be one-dimensional array +-- trim_array +SELECT arr, trim_array(arr, 2) +FROM +(VALUES ('{1,2,3,4,5,6}'::bigint[]), + ('{1,2}'), + ('[10:16]={1,2,3,4,5,6,7}'), + ('[-15:-10]={1,2,3,4,5,6}'), + ('{{1,10},{2,20},{3,30},{4,40}}')) v(arr); + arr | trim_array +-------------------------------+----------------- + {1,2,3,4,5,6} | {1,2,3,4} + {1,2} | {} + [10:16]={1,2,3,4,5,6,7} | {1,2,3,4,5} + [-15:-10]={1,2,3,4,5,6} | {1,2,3,4} + {{1,10},{2,20},{3,30},{4,40}} | {{1,10},{2,20}} +(5 rows) + +SELECT trim_array(ARRAY[1, 2, 3], -1); -- fail +ERROR: number of elements to trim must be between 0 and 3 +SELECT trim_array(ARRAY[1, 2, 3], 10); -- fail +ERROR: number of elements to trim must be between 0 and 3 +SELECT trim_array(ARRAY[]::int[], 1); -- fail +ERROR: number of elements to trim must be between 0 and 0 +-- array_shuffle +SELECT array_shuffle('{1,2,3,4,5,6}'::int[]) <@ '{1,2,3,4,5,6}'; + ?column? +---------- + t +(1 row) + +SELECT array_shuffle('{1,2,3,4,5,6}'::int[]) @> '{1,2,3,4,5,6}'; + ?column? +---------- + t +(1 row) + +SELECT array_dims(array_shuffle('[-1:2][2:3]={{1,2},{3,NULL},{5,6},{7,8}}'::int[])); + array_dims +------------- + [-1:2][2:3] +(1 row) + +SELECT array_dims(array_shuffle('{{{1,2},{3,NULL}},{{5,6},{7,8}},{{9,10},{11,12}}}'::int[])); + array_dims +----------------- + [1:3][1:2][1:2] +(1 row) + +-- array_sample +SELECT array_sample('{1,2,3,4,5,6}'::int[], 3) <@ '{1,2,3,4,5,6}'; + ?column? +---------- + t +(1 row) + +SELECT array_length(array_sample('{1,2,3,4,5,6}'::int[], 3), 1); + array_length +-------------- + 3 +(1 row) + +SELECT array_dims(array_sample('[-1:2][2:3]={{1,2},{3,NULL},{5,6},{7,8}}'::int[], 3)); + array_dims +------------ + [1:3][2:3] +(1 row) + +SELECT array_dims(array_sample('{{{1,2},{3,NULL}},{{5,6},{7,8}},{{9,10},{11,12}}}'::int[], 2)); + array_dims +----------------- + [1:2][1:2][1:2] +(1 row) + +SELECT array_sample('{1,2,3,4,5,6}'::int[], -1); -- fail +ERROR: sample size must be between 0 and 6 +SELECT array_sample('{1,2,3,4,5,6}'::int[], 7); --fail +ERROR: sample size must be between 0 and 6 diff --git a/src/test/regress/expected/async.out b/src/test/regress/expected/async.out new file mode 100644 index 0000000..19cbe38 --- /dev/null +++ b/src/test/regress/expected/async.out @@ -0,0 +1,42 @@ +-- +-- ASYNC +-- +--Should work. Send a valid message via a valid channel name +SELECT pg_notify('notify_async1','sample message1'); + pg_notify +----------- + +(1 row) + +SELECT pg_notify('notify_async1',''); + pg_notify +----------- + +(1 row) + +SELECT pg_notify('notify_async1',NULL); + pg_notify +----------- + +(1 row) + +-- Should fail. Send a valid message via an invalid channel name +SELECT pg_notify('','sample message1'); +ERROR: channel name cannot be empty +SELECT pg_notify(NULL,'sample message1'); +ERROR: channel name cannot be empty +SELECT pg_notify('notify_async_channel_name_too_long______________________________','sample_message1'); +ERROR: channel name too long +--Should work. Valid NOTIFY/LISTEN/UNLISTEN commands +NOTIFY notify_async2; +LISTEN notify_async2; +UNLISTEN notify_async2; +UNLISTEN *; +-- Should return zero while there are no pending notifications. +-- src/test/isolation/specs/async-notify.spec tests for actual usage. +SELECT pg_notification_queue_usage(); + pg_notification_queue_usage +----------------------------- + 0 +(1 row) + diff --git a/src/test/regress/expected/bit.out b/src/test/regress/expected/bit.out new file mode 100644 index 0000000..98c2655 --- /dev/null +++ b/src/test/regress/expected/bit.out @@ -0,0 +1,809 @@ +-- +-- BIT types +-- +-- +-- Build tables for testing +-- +CREATE TABLE BIT_TABLE(b BIT(11)); +INSERT INTO BIT_TABLE VALUES (B'10'); -- too short +ERROR: bit string length 2 does not match type bit(11) +INSERT INTO BIT_TABLE VALUES (B'00000000000'); +INSERT INTO BIT_TABLE VALUES (B'11011000000'); +INSERT INTO BIT_TABLE VALUES (B'01010101010'); +INSERT INTO BIT_TABLE VALUES (B'101011111010'); -- too long +ERROR: bit string length 12 does not match type bit(11) +--INSERT INTO BIT_TABLE VALUES ('X554'); +--INSERT INTO BIT_TABLE VALUES ('X555'); +SELECT * FROM BIT_TABLE; + b +------------- + 00000000000 + 11011000000 + 01010101010 +(3 rows) + +CREATE TABLE VARBIT_TABLE(v BIT VARYING(11)); +INSERT INTO VARBIT_TABLE VALUES (B''); +INSERT INTO VARBIT_TABLE VALUES (B'0'); +INSERT INTO VARBIT_TABLE VALUES (B'010101'); +INSERT INTO VARBIT_TABLE VALUES (B'01010101010'); +INSERT INTO VARBIT_TABLE VALUES (B'101011111010'); -- too long +ERROR: bit string too long for type bit varying(11) +--INSERT INTO VARBIT_TABLE VALUES ('X554'); +--INSERT INTO VARBIT_TABLE VALUES ('X555'); +SELECT * FROM VARBIT_TABLE; + v +------------- + + 0 + 010101 + 01010101010 +(4 rows) + +-- Concatenation +SELECT v, b, (v || b) AS concat + FROM BIT_TABLE, VARBIT_TABLE + ORDER BY 3; + v | b | concat +-------------+-------------+------------------------ + | 00000000000 | 00000000000 + 0 | 00000000000 | 000000000000 + 0 | 01010101010 | 001010101010 + 010101 | 00000000000 | 01010100000000000 + | 01010101010 | 01010101010 + 01010101010 | 00000000000 | 0101010101000000000000 + 01010101010 | 01010101010 | 0101010101001010101010 + 010101 | 01010101010 | 01010101010101010 + 01010101010 | 11011000000 | 0101010101011011000000 + 010101 | 11011000000 | 01010111011000000 + 0 | 11011000000 | 011011000000 + | 11011000000 | 11011000000 +(12 rows) + +-- Length +SELECT b, length(b) AS lb + FROM BIT_TABLE; + b | lb +-------------+---- + 00000000000 | 11 + 11011000000 | 11 + 01010101010 | 11 +(3 rows) + +SELECT v, length(v) AS lv + FROM VARBIT_TABLE; + v | lv +-------------+---- + | 0 + 0 | 1 + 010101 | 6 + 01010101010 | 11 +(4 rows) + +-- Substring +SELECT b, + SUBSTRING(b FROM 2 FOR 4) AS sub_2_4, + SUBSTRING(b FROM 7 FOR 13) AS sub_7_13, + SUBSTRING(b FROM 6) AS sub_6 + FROM BIT_TABLE; + b | sub_2_4 | sub_7_13 | sub_6 +-------------+---------+----------+-------- + 00000000000 | 0000 | 00000 | 000000 + 11011000000 | 1011 | 00000 | 000000 + 01010101010 | 1010 | 01010 | 101010 +(3 rows) + +SELECT v, + SUBSTRING(v FROM 2 FOR 4) AS sub_2_4, + SUBSTRING(v FROM 7 FOR 13) AS sub_7_13, + SUBSTRING(v FROM 6) AS sub_6 + FROM VARBIT_TABLE; + v | sub_2_4 | sub_7_13 | sub_6 +-------------+---------+----------+-------- + | | | + 0 | | | + 010101 | 1010 | | 1 + 01010101010 | 1010 | 01010 | 101010 +(4 rows) + +-- test overflow cases +SELECT SUBSTRING('01010101'::bit(8) FROM 2 FOR 2147483646) AS "1010101"; + 1010101 +--------- + 1010101 +(1 row) + +SELECT SUBSTRING('01010101'::bit(8) FROM -10 FOR 2147483646) AS "01010101"; + 01010101 +---------- + 01010101 +(1 row) + +SELECT SUBSTRING('01010101'::bit(8) FROM -10 FOR -2147483646) AS "error"; +ERROR: negative substring length not allowed +SELECT SUBSTRING('01010101'::varbit FROM 2 FOR 2147483646) AS "1010101"; + 1010101 +--------- + 1010101 +(1 row) + +SELECT SUBSTRING('01010101'::varbit FROM -10 FOR 2147483646) AS "01010101"; + 01010101 +---------- + 01010101 +(1 row) + +SELECT SUBSTRING('01010101'::varbit FROM -10 FOR -2147483646) AS "error"; +ERROR: negative substring length not allowed +--- Bit operations +DROP TABLE varbit_table; +CREATE TABLE varbit_table (a BIT VARYING(16), b BIT VARYING(16)); +COPY varbit_table FROM stdin; +SELECT a, b, ~a AS "~ a", a & b AS "a & b", + a | b AS "a | b", a # b AS "a # b" FROM varbit_table; + a | b | ~ a | a & b | a | b | a # b +------------------+------------------+------------------+------------------+------------------+------------------ + 00001111 | 00010000 | 11110000 | 00000000 | 00011111 | 00011111 + 00011111 | 00010001 | 11100000 | 00010001 | 00011111 | 00001110 + 00101111 | 00010010 | 11010000 | 00000010 | 00111111 | 00111101 + 00111111 | 00010011 | 11000000 | 00010011 | 00111111 | 00101100 + 10001111 | 00000100 | 01110000 | 00000100 | 10001111 | 10001011 + 0000000000001111 | 0000000000010000 | 1111111111110000 | 0000000000000000 | 0000000000011111 | 0000000000011111 + 0000000100100011 | 1111111111111111 | 1111111011011100 | 0000000100100011 | 1111111111111111 | 1111111011011100 + 0010010001101000 | 0010010001101000 | 1101101110010111 | 0010010001101000 | 0010010001101000 | 0000000000000000 + 1111101001010000 | 0000010110101111 | 0000010110101111 | 0000000000000000 | 1111111111111111 | 1111111111111111 + 0001001000110100 | 1111111111110101 | 1110110111001011 | 0001001000110100 | 1111111111110101 | 1110110111000001 +(10 rows) + +SELECT a,b,a=b AS "a>=b",a>b AS "a>b",a<>b AS "a<>b" FROM varbit_table; + a | b | a=b | a>b | a<>b +------------------+------------------+-----+------+-----+------+-----+------ + 00001111 | 00010000 | t | t | f | f | f | t + 00011111 | 00010001 | f | f | f | t | t | t + 00101111 | 00010010 | f | f | f | t | t | t + 00111111 | 00010011 | f | f | f | t | t | t + 10001111 | 00000100 | f | f | f | t | t | t + 0000000000001111 | 0000000000010000 | t | t | f | f | f | t + 0000000100100011 | 1111111111111111 | t | t | f | f | f | t + 0010010001101000 | 0010010001101000 | f | t | t | t | f | f + 1111101001010000 | 0000010110101111 | f | f | f | t | t | t + 0001001000110100 | 1111111111110101 | t | t | f | f | f | t +(10 rows) + +SELECT a,a<<4 AS "a<<4",b,b>>2 AS "b>>2" FROM varbit_table; + a | a<<4 | b | b>>2 +------------------+------------------+------------------+------------------ + 00001111 | 11110000 | 00010000 | 00000100 + 00011111 | 11110000 | 00010001 | 00000100 + 00101111 | 11110000 | 00010010 | 00000100 + 00111111 | 11110000 | 00010011 | 00000100 + 10001111 | 11110000 | 00000100 | 00000001 + 0000000000001111 | 0000000011110000 | 0000000000010000 | 0000000000000100 + 0000000100100011 | 0001001000110000 | 1111111111111111 | 0011111111111111 + 0010010001101000 | 0100011010000000 | 0010010001101000 | 0000100100011010 + 1111101001010000 | 1010010100000000 | 0000010110101111 | 0000000101101011 + 0001001000110100 | 0010001101000000 | 1111111111110101 | 0011111111111101 +(10 rows) + +DROP TABLE varbit_table; +--- Bit operations +DROP TABLE bit_table; +CREATE TABLE bit_table (a BIT(16), b BIT(16)); +COPY bit_table FROM stdin; +SELECT a,b,~a AS "~ a",a & b AS "a & b", + a|b AS "a | b", a # b AS "a # b" FROM bit_table; + a | b | ~ a | a & b | a | b | a # b +------------------+------------------+------------------+------------------+------------------+------------------ + 0000111100000000 | 0001000000000000 | 1111000011111111 | 0000000000000000 | 0001111100000000 | 0001111100000000 + 0001111100000000 | 0001000100000000 | 1110000011111111 | 0001000100000000 | 0001111100000000 | 0000111000000000 + 0010111100000000 | 0001001000000000 | 1101000011111111 | 0000001000000000 | 0011111100000000 | 0011110100000000 + 0011111100000000 | 0001001100000000 | 1100000011111111 | 0001001100000000 | 0011111100000000 | 0010110000000000 + 1000111100000000 | 0000010000000000 | 0111000011111111 | 0000010000000000 | 1000111100000000 | 1000101100000000 + 0000000000001111 | 0000000000010000 | 1111111111110000 | 0000000000000000 | 0000000000011111 | 0000000000011111 + 0000000100100011 | 1111111111111111 | 1111111011011100 | 0000000100100011 | 1111111111111111 | 1111111011011100 + 0010010001101000 | 0010010001101000 | 1101101110010111 | 0010010001101000 | 0010010001101000 | 0000000000000000 + 1111101001010000 | 0000010110101111 | 0000010110101111 | 0000000000000000 | 1111111111111111 | 1111111111111111 + 0001001000110100 | 1111111111110101 | 1110110111001011 | 0001001000110100 | 1111111111110101 | 1110110111000001 +(10 rows) + +SELECT a,b,a=b AS "a>=b",a>b AS "a>b",a<>b AS "a<>b" FROM bit_table; + a | b | a=b | a>b | a<>b +------------------+------------------+-----+------+-----+------+-----+------ + 0000111100000000 | 0001000000000000 | t | t | f | f | f | t + 0001111100000000 | 0001000100000000 | f | f | f | t | t | t + 0010111100000000 | 0001001000000000 | f | f | f | t | t | t + 0011111100000000 | 0001001100000000 | f | f | f | t | t | t + 1000111100000000 | 0000010000000000 | f | f | f | t | t | t + 0000000000001111 | 0000000000010000 | t | t | f | f | f | t + 0000000100100011 | 1111111111111111 | t | t | f | f | f | t + 0010010001101000 | 0010010001101000 | f | t | t | t | f | f + 1111101001010000 | 0000010110101111 | f | f | f | t | t | t + 0001001000110100 | 1111111111110101 | t | t | f | f | f | t +(10 rows) + +SELECT a,a<<4 AS "a<<4",b,b>>2 AS "b>>2" FROM bit_table; + a | a<<4 | b | b>>2 +------------------+------------------+------------------+------------------ + 0000111100000000 | 1111000000000000 | 0001000000000000 | 0000010000000000 + 0001111100000000 | 1111000000000000 | 0001000100000000 | 0000010001000000 + 0010111100000000 | 1111000000000000 | 0001001000000000 | 0000010010000000 + 0011111100000000 | 1111000000000000 | 0001001100000000 | 0000010011000000 + 1000111100000000 | 1111000000000000 | 0000010000000000 | 0000000100000000 + 0000000000001111 | 0000000011110000 | 0000000000010000 | 0000000000000100 + 0000000100100011 | 0001001000110000 | 1111111111111111 | 0011111111111111 + 0010010001101000 | 0100011010000000 | 0010010001101000 | 0000100100011010 + 1111101001010000 | 1010010100000000 | 0000010110101111 | 0000000101101011 + 0001001000110100 | 0010001101000000 | 1111111111110101 | 0011111111111101 +(10 rows) + +DROP TABLE bit_table; +-- The following should fail +select B'001' & B'10'; +ERROR: cannot AND bit strings of different sizes +select B'0111' | B'011'; +ERROR: cannot OR bit strings of different sizes +select B'0010' # B'011101'; +ERROR: cannot XOR bit strings of different sizes +-- More position tests, checking all the boundary cases +SELECT POSITION(B'1010' IN B'0000101'); -- 0 + position +---------- + 0 +(1 row) + +SELECT POSITION(B'1010' IN B'00001010'); -- 5 + position +---------- + 5 +(1 row) + +SELECT POSITION(B'1010' IN B'00000101'); -- 0 + position +---------- + 0 +(1 row) + +SELECT POSITION(B'1010' IN B'000001010'); -- 6 + position +---------- + 6 +(1 row) + +SELECT POSITION(B'' IN B'00001010'); -- 1 + position +---------- + 1 +(1 row) + +SELECT POSITION(B'0' IN B''); -- 0 + position +---------- + 0 +(1 row) + +SELECT POSITION(B'' IN B''); -- 0 + position +---------- + 0 +(1 row) + +SELECT POSITION(B'101101' IN B'001011011011011000'); -- 3 + position +---------- + 3 +(1 row) + +SELECT POSITION(B'10110110' IN B'001011011011010'); -- 3 + position +---------- + 3 +(1 row) + +SELECT POSITION(B'1011011011011' IN B'001011011011011'); -- 3 + position +---------- + 3 +(1 row) + +SELECT POSITION(B'1011011011011' IN B'00001011011011011'); -- 5 + position +---------- + 5 +(1 row) + +SELECT POSITION(B'11101011' IN B'11101011'); -- 1 + position +---------- + 1 +(1 row) + +SELECT POSITION(B'11101011' IN B'011101011'); -- 2 + position +---------- + 2 +(1 row) + +SELECT POSITION(B'11101011' IN B'00011101011'); -- 4 + position +---------- + 4 +(1 row) + +SELECT POSITION(B'11101011' IN B'0000011101011'); -- 6 + position +---------- + 6 +(1 row) + +SELECT POSITION(B'111010110' IN B'111010110'); -- 1 + position +---------- + 1 +(1 row) + +SELECT POSITION(B'111010110' IN B'0111010110'); -- 2 + position +---------- + 2 +(1 row) + +SELECT POSITION(B'111010110' IN B'000111010110'); -- 4 + position +---------- + 4 +(1 row) + +SELECT POSITION(B'111010110' IN B'00000111010110'); -- 6 + position +---------- + 6 +(1 row) + +SELECT POSITION(B'111010110' IN B'11101011'); -- 0 + position +---------- + 0 +(1 row) + +SELECT POSITION(B'111010110' IN B'011101011'); -- 0 + position +---------- + 0 +(1 row) + +SELECT POSITION(B'111010110' IN B'00011101011'); -- 0 + position +---------- + 0 +(1 row) + +SELECT POSITION(B'111010110' IN B'0000011101011'); -- 0 + position +---------- + 0 +(1 row) + +SELECT POSITION(B'111010110' IN B'111010110'); -- 1 + position +---------- + 1 +(1 row) + +SELECT POSITION(B'111010110' IN B'0111010110'); -- 2 + position +---------- + 2 +(1 row) + +SELECT POSITION(B'111010110' IN B'000111010110'); -- 4 + position +---------- + 4 +(1 row) + +SELECT POSITION(B'111010110' IN B'00000111010110'); -- 6 + position +---------- + 6 +(1 row) + +SELECT POSITION(B'111010110' IN B'000001110101111101011'); -- 0 + position +---------- + 0 +(1 row) + +SELECT POSITION(B'111010110' IN B'0000001110101111101011'); -- 0 + position +---------- + 0 +(1 row) + +SELECT POSITION(B'111010110' IN B'000000001110101111101011'); -- 0 + position +---------- + 0 +(1 row) + +SELECT POSITION(B'111010110' IN B'00000000001110101111101011'); -- 0 + position +---------- + 0 +(1 row) + +SELECT POSITION(B'111010110' IN B'0000011101011111010110'); -- 14 + position +---------- + 14 +(1 row) + +SELECT POSITION(B'111010110' IN B'00000011101011111010110'); -- 15 + position +---------- + 15 +(1 row) + +SELECT POSITION(B'111010110' IN B'0000000011101011111010110'); -- 17 + position +---------- + 17 +(1 row) + +SELECT POSITION(B'111010110' IN B'000000000011101011111010110'); -- 19 + position +---------- + 19 +(1 row) + +SELECT POSITION(B'000000000011101011111010110' IN B'000000000011101011111010110'); -- 1 + position +---------- + 1 +(1 row) + +SELECT POSITION(B'00000000011101011111010110' IN B'000000000011101011111010110'); -- 2 + position +---------- + 2 +(1 row) + +SELECT POSITION(B'0000000000011101011111010110' IN B'000000000011101011111010110'); -- 0 + position +---------- + 0 +(1 row) + +-- Shifting +CREATE TABLE BIT_SHIFT_TABLE(b BIT(16)); +INSERT INTO BIT_SHIFT_TABLE VALUES (B'1101100000000000'); +INSERT INTO BIT_SHIFT_TABLE SELECT b>>1 FROM BIT_SHIFT_TABLE; +INSERT INTO BIT_SHIFT_TABLE SELECT b>>2 FROM BIT_SHIFT_TABLE; +INSERT INTO BIT_SHIFT_TABLE SELECT b>>4 FROM BIT_SHIFT_TABLE; +INSERT INTO BIT_SHIFT_TABLE SELECT b>>8 FROM BIT_SHIFT_TABLE; +SELECT POSITION(B'1101' IN b), + POSITION(B'11011' IN b), + b + FROM BIT_SHIFT_TABLE ; + position | position | b +----------+----------+------------------ + 1 | 1 | 1101100000000000 + 2 | 2 | 0110110000000000 + 3 | 3 | 0011011000000000 + 4 | 4 | 0001101100000000 + 5 | 5 | 0000110110000000 + 6 | 6 | 0000011011000000 + 7 | 7 | 0000001101100000 + 8 | 8 | 0000000110110000 + 9 | 9 | 0000000011011000 + 10 | 10 | 0000000001101100 + 11 | 11 | 0000000000110110 + 12 | 12 | 0000000000011011 + 13 | 0 | 0000000000001101 + 0 | 0 | 0000000000000110 + 0 | 0 | 0000000000000011 + 0 | 0 | 0000000000000001 +(16 rows) + +SELECT b, b >> 1 AS bsr, b << 1 AS bsl + FROM BIT_SHIFT_TABLE ; + b | bsr | bsl +------------------+------------------+------------------ + 1101100000000000 | 0110110000000000 | 1011000000000000 + 0110110000000000 | 0011011000000000 | 1101100000000000 + 0011011000000000 | 0001101100000000 | 0110110000000000 + 0001101100000000 | 0000110110000000 | 0011011000000000 + 0000110110000000 | 0000011011000000 | 0001101100000000 + 0000011011000000 | 0000001101100000 | 0000110110000000 + 0000001101100000 | 0000000110110000 | 0000011011000000 + 0000000110110000 | 0000000011011000 | 0000001101100000 + 0000000011011000 | 0000000001101100 | 0000000110110000 + 0000000001101100 | 0000000000110110 | 0000000011011000 + 0000000000110110 | 0000000000011011 | 0000000001101100 + 0000000000011011 | 0000000000001101 | 0000000000110110 + 0000000000001101 | 0000000000000110 | 0000000000011010 + 0000000000000110 | 0000000000000011 | 0000000000001100 + 0000000000000011 | 0000000000000001 | 0000000000000110 + 0000000000000001 | 0000000000000000 | 0000000000000010 +(16 rows) + +SELECT b, b >> 8 AS bsr8, b << 8 AS bsl8 + FROM BIT_SHIFT_TABLE ; + b | bsr8 | bsl8 +------------------+------------------+------------------ + 1101100000000000 | 0000000011011000 | 0000000000000000 + 0110110000000000 | 0000000001101100 | 0000000000000000 + 0011011000000000 | 0000000000110110 | 0000000000000000 + 0001101100000000 | 0000000000011011 | 0000000000000000 + 0000110110000000 | 0000000000001101 | 1000000000000000 + 0000011011000000 | 0000000000000110 | 1100000000000000 + 0000001101100000 | 0000000000000011 | 0110000000000000 + 0000000110110000 | 0000000000000001 | 1011000000000000 + 0000000011011000 | 0000000000000000 | 1101100000000000 + 0000000001101100 | 0000000000000000 | 0110110000000000 + 0000000000110110 | 0000000000000000 | 0011011000000000 + 0000000000011011 | 0000000000000000 | 0001101100000000 + 0000000000001101 | 0000000000000000 | 0000110100000000 + 0000000000000110 | 0000000000000000 | 0000011000000000 + 0000000000000011 | 0000000000000000 | 0000001100000000 + 0000000000000001 | 0000000000000000 | 0000000100000000 +(16 rows) + +SELECT b::bit(15), b::bit(15) >> 1 AS bsr, b::bit(15) << 1 AS bsl + FROM BIT_SHIFT_TABLE ; + b | bsr | bsl +-----------------+-----------------+----------------- + 110110000000000 | 011011000000000 | 101100000000000 + 011011000000000 | 001101100000000 | 110110000000000 + 001101100000000 | 000110110000000 | 011011000000000 + 000110110000000 | 000011011000000 | 001101100000000 + 000011011000000 | 000001101100000 | 000110110000000 + 000001101100000 | 000000110110000 | 000011011000000 + 000000110110000 | 000000011011000 | 000001101100000 + 000000011011000 | 000000001101100 | 000000110110000 + 000000001101100 | 000000000110110 | 000000011011000 + 000000000110110 | 000000000011011 | 000000001101100 + 000000000011011 | 000000000001101 | 000000000110110 + 000000000001101 | 000000000000110 | 000000000011010 + 000000000000110 | 000000000000011 | 000000000001100 + 000000000000011 | 000000000000001 | 000000000000110 + 000000000000001 | 000000000000000 | 000000000000010 + 000000000000000 | 000000000000000 | 000000000000000 +(16 rows) + +SELECT b::bit(15), b::bit(15) >> 8 AS bsr8, b::bit(15) << 8 AS bsl8 + FROM BIT_SHIFT_TABLE ; + b | bsr8 | bsl8 +-----------------+-----------------+----------------- + 110110000000000 | 000000001101100 | 000000000000000 + 011011000000000 | 000000000110110 | 000000000000000 + 001101100000000 | 000000000011011 | 000000000000000 + 000110110000000 | 000000000001101 | 000000000000000 + 000011011000000 | 000000000000110 | 100000000000000 + 000001101100000 | 000000000000011 | 110000000000000 + 000000110110000 | 000000000000001 | 011000000000000 + 000000011011000 | 000000000000000 | 101100000000000 + 000000001101100 | 000000000000000 | 110110000000000 + 000000000110110 | 000000000000000 | 011011000000000 + 000000000011011 | 000000000000000 | 001101100000000 + 000000000001101 | 000000000000000 | 000110100000000 + 000000000000110 | 000000000000000 | 000011000000000 + 000000000000011 | 000000000000000 | 000001100000000 + 000000000000001 | 000000000000000 | 000000100000000 + 000000000000000 | 000000000000000 | 000000000000000 +(16 rows) + +CREATE TABLE VARBIT_SHIFT_TABLE(v BIT VARYING(20)); +INSERT INTO VARBIT_SHIFT_TABLE VALUES (B'11011'); +INSERT INTO VARBIT_SHIFT_TABLE SELECT CAST(v || B'0' AS BIT VARYING(6)) >>1 FROM VARBIT_SHIFT_TABLE; +INSERT INTO VARBIT_SHIFT_TABLE SELECT CAST(v || B'00' AS BIT VARYING(8)) >>2 FROM VARBIT_SHIFT_TABLE; +INSERT INTO VARBIT_SHIFT_TABLE SELECT CAST(v || B'0000' AS BIT VARYING(12)) >>4 FROM VARBIT_SHIFT_TABLE; +INSERT INTO VARBIT_SHIFT_TABLE SELECT CAST(v || B'00000000' AS BIT VARYING(20)) >>8 FROM VARBIT_SHIFT_TABLE; +SELECT POSITION(B'1101' IN v), + POSITION(B'11011' IN v), + v + FROM VARBIT_SHIFT_TABLE ; + position | position | v +----------+----------+---------------------- + 1 | 1 | 11011 + 2 | 2 | 011011 + 3 | 3 | 0011011 + 4 | 4 | 00011011 + 5 | 5 | 000011011 + 6 | 6 | 0000011011 + 7 | 7 | 00000011011 + 8 | 8 | 000000011011 + 9 | 9 | 0000000011011 + 10 | 10 | 00000000011011 + 11 | 11 | 000000000011011 + 12 | 12 | 0000000000011011 + 13 | 13 | 00000000000011011 + 14 | 14 | 000000000000011011 + 15 | 15 | 0000000000000011011 + 16 | 16 | 00000000000000011011 +(16 rows) + +SELECT v, v >> 1 AS vsr, v << 1 AS vsl + FROM VARBIT_SHIFT_TABLE ; + v | vsr | vsl +----------------------+----------------------+---------------------- + 11011 | 01101 | 10110 + 011011 | 001101 | 110110 + 0011011 | 0001101 | 0110110 + 00011011 | 00001101 | 00110110 + 000011011 | 000001101 | 000110110 + 0000011011 | 0000001101 | 0000110110 + 00000011011 | 00000001101 | 00000110110 + 000000011011 | 000000001101 | 000000110110 + 0000000011011 | 0000000001101 | 0000000110110 + 00000000011011 | 00000000001101 | 00000000110110 + 000000000011011 | 000000000001101 | 000000000110110 + 0000000000011011 | 0000000000001101 | 0000000000110110 + 00000000000011011 | 00000000000001101 | 00000000000110110 + 000000000000011011 | 000000000000001101 | 000000000000110110 + 0000000000000011011 | 0000000000000001101 | 0000000000000110110 + 00000000000000011011 | 00000000000000001101 | 00000000000000110110 +(16 rows) + +SELECT v, v >> 8 AS vsr8, v << 8 AS vsl8 + FROM VARBIT_SHIFT_TABLE ; + v | vsr8 | vsl8 +----------------------+----------------------+---------------------- + 11011 | 00000 | 00000 + 011011 | 000000 | 000000 + 0011011 | 0000000 | 0000000 + 00011011 | 00000000 | 00000000 + 000011011 | 000000000 | 100000000 + 0000011011 | 0000000000 | 1100000000 + 00000011011 | 00000000000 | 01100000000 + 000000011011 | 000000000000 | 101100000000 + 0000000011011 | 0000000000000 | 1101100000000 + 00000000011011 | 00000000000000 | 01101100000000 + 000000000011011 | 000000000000000 | 001101100000000 + 0000000000011011 | 0000000000000000 | 0001101100000000 + 00000000000011011 | 00000000000000000 | 00001101100000000 + 000000000000011011 | 000000000000000000 | 000001101100000000 + 0000000000000011011 | 0000000000000000000 | 0000001101100000000 + 00000000000000011011 | 00000000000000000000 | 00000001101100000000 +(16 rows) + +DROP TABLE BIT_SHIFT_TABLE; +DROP TABLE VARBIT_SHIFT_TABLE; +-- Get/Set bit +SELECT get_bit(B'0101011000100', 10); + get_bit +--------- + 1 +(1 row) + +SELECT set_bit(B'0101011000100100', 15, 1); + set_bit +------------------ + 0101011000100101 +(1 row) + +SELECT set_bit(B'0101011000100100', 16, 1); -- fail +ERROR: bit index 16 out of valid range (0..15) +-- Overlay +SELECT overlay(B'0101011100' placing '001' from 2 for 3); + overlay +------------ + 0001011100 +(1 row) + +SELECT overlay(B'0101011100' placing '101' from 6); + overlay +------------ + 0101010100 +(1 row) + +SELECT overlay(B'0101011100' placing '001' from 11); + overlay +--------------- + 0101011100001 +(1 row) + +SELECT overlay(B'0101011100' placing '001' from 20); + overlay +--------------- + 0101011100001 +(1 row) + +-- bit_count +SELECT bit_count(B'0101011100'::bit(10)); + bit_count +----------- + 5 +(1 row) + +SELECT bit_count(B'1111111111'::bit(10)); + bit_count +----------- + 10 +(1 row) + +-- This table is intentionally left around to exercise pg_dump/pg_upgrade +CREATE TABLE bit_defaults( + b1 bit(4) DEFAULT '1001', + b2 bit(4) DEFAULT B'0101', + b3 bit varying(5) DEFAULT '1001', + b4 bit varying(5) DEFAULT B'0101' +); +\d bit_defaults + Table "public.bit_defaults" + Column | Type | Collation | Nullable | Default +--------+----------------+-----------+----------+--------------------- + b1 | bit(4) | | | '1001'::"bit" + b2 | bit(4) | | | '0101'::"bit" + b3 | bit varying(5) | | | '1001'::bit varying + b4 | bit varying(5) | | | '0101'::"bit" + +INSERT INTO bit_defaults DEFAULT VALUES; +TABLE bit_defaults; + b1 | b2 | b3 | b4 +------+------+------+------ + 1001 | 0101 | 1001 | 0101 +(1 row) + +-- test non-error-throwing API for some core types +SELECT pg_input_is_valid('01010001', 'bit(10)'); + pg_input_is_valid +------------------- + f +(1 row) + +SELECT * FROM pg_input_error_info('01010001', 'bit(10)'); + message | detail | hint | sql_error_code +-------------------------------------------------+--------+------+---------------- + bit string length 8 does not match type bit(10) | | | 22026 +(1 row) + +SELECT pg_input_is_valid('01010Z01', 'bit(8)'); + pg_input_is_valid +------------------- + f +(1 row) + +SELECT * FROM pg_input_error_info('01010Z01', 'bit(8)'); + message | detail | hint | sql_error_code +---------------------------------+--------+------+---------------- + "Z" is not a valid binary digit | | | 22P02 +(1 row) + +SELECT pg_input_is_valid('x01010Z01', 'bit(32)'); + pg_input_is_valid +------------------- + f +(1 row) + +SELECT * FROM pg_input_error_info('x01010Z01', 'bit(32)'); + message | detail | hint | sql_error_code +--------------------------------------+--------+------+---------------- + "Z" is not a valid hexadecimal digit | | | 22P02 +(1 row) + +SELECT pg_input_is_valid('01010Z01', 'varbit'); + pg_input_is_valid +------------------- + f +(1 row) + +SELECT * FROM pg_input_error_info('01010Z01', 'varbit'); + message | detail | hint | sql_error_code +---------------------------------+--------+------+---------------- + "Z" is not a valid binary digit | | | 22P02 +(1 row) + +SELECT pg_input_is_valid('x01010Z01', 'varbit'); + pg_input_is_valid +------------------- + f +(1 row) + +SELECT * FROM pg_input_error_info('x01010Z01', 'varbit'); + message | detail | hint | sql_error_code +--------------------------------------+--------+------+---------------- + "Z" is not a valid hexadecimal digit | | | 22P02 +(1 row) + diff --git a/src/test/regress/expected/bitmapops.out b/src/test/regress/expected/bitmapops.out new file mode 100644 index 0000000..3570973 --- /dev/null +++ b/src/test/regress/expected/bitmapops.out @@ -0,0 +1,38 @@ +-- Test bitmap AND and OR +-- Generate enough data that we can test the lossy bitmaps. +-- There's 55 tuples per page in the table. 53 is just +-- below 55, so that an index scan with qual a = constant +-- will return at least one hit per page. 59 is just above +-- 55, so that an index scan with qual b = constant will return +-- hits on most but not all pages. 53 and 59 are prime, so that +-- there's a maximum number of a,b combinations in the table. +-- That allows us to test all the different combinations of +-- lossy and non-lossy pages with the minimum amount of data +CREATE TABLE bmscantest (a int, b int, t text); +INSERT INTO bmscantest + SELECT (r%53), (r%59), 'foooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooo' + FROM generate_series(1,70000) r; +CREATE INDEX i_bmtest_a ON bmscantest(a); +CREATE INDEX i_bmtest_b ON bmscantest(b); +-- We want to use bitmapscans. With default settings, the planner currently +-- chooses a bitmap scan for the queries below anyway, but let's make sure. +set enable_indexscan=false; +set enable_seqscan=false; +-- Lower work_mem to trigger use of lossy bitmaps +set work_mem = 64; +-- Test bitmap-and. +SELECT count(*) FROM bmscantest WHERE a = 1 AND b = 1; + count +------- + 23 +(1 row) + +-- Test bitmap-or. +SELECT count(*) FROM bmscantest WHERE a = 1 OR b = 1; + count +------- + 2485 +(1 row) + +-- clean up +DROP TABLE bmscantest; diff --git a/src/test/regress/expected/boolean.out b/src/test/regress/expected/boolean.out new file mode 100644 index 0000000..ee9c244 --- /dev/null +++ b/src/test/regress/expected/boolean.out @@ -0,0 +1,578 @@ +-- +-- BOOLEAN +-- +-- +-- sanity check - if this fails go insane! +-- +SELECT 1 AS one; + one +----- + 1 +(1 row) + +-- ******************testing built-in type bool******************** +-- check bool input syntax +SELECT true AS true; + true +------ + t +(1 row) + +SELECT false AS false; + false +------- + f +(1 row) + +SELECT bool 't' AS true; + true +------ + t +(1 row) + +SELECT bool ' f ' AS false; + false +------- + f +(1 row) + +SELECT bool 'true' AS true; + true +------ + t +(1 row) + +SELECT bool 'test' AS error; +ERROR: invalid input syntax for type boolean: "test" +LINE 1: SELECT bool 'test' AS error; + ^ +SELECT bool 'false' AS false; + false +------- + f +(1 row) + +SELECT bool 'foo' AS error; +ERROR: invalid input syntax for type boolean: "foo" +LINE 1: SELECT bool 'foo' AS error; + ^ +SELECT bool 'y' AS true; + true +------ + t +(1 row) + +SELECT bool 'yes' AS true; + true +------ + t +(1 row) + +SELECT bool 'yeah' AS error; +ERROR: invalid input syntax for type boolean: "yeah" +LINE 1: SELECT bool 'yeah' AS error; + ^ +SELECT bool 'n' AS false; + false +------- + f +(1 row) + +SELECT bool 'no' AS false; + false +------- + f +(1 row) + +SELECT bool 'nay' AS error; +ERROR: invalid input syntax for type boolean: "nay" +LINE 1: SELECT bool 'nay' AS error; + ^ +SELECT bool 'on' AS true; + true +------ + t +(1 row) + +SELECT bool 'off' AS false; + false +------- + f +(1 row) + +SELECT bool 'of' AS false; + false +------- + f +(1 row) + +SELECT bool 'o' AS error; +ERROR: invalid input syntax for type boolean: "o" +LINE 1: SELECT bool 'o' AS error; + ^ +SELECT bool 'on_' AS error; +ERROR: invalid input syntax for type boolean: "on_" +LINE 1: SELECT bool 'on_' AS error; + ^ +SELECT bool 'off_' AS error; +ERROR: invalid input syntax for type boolean: "off_" +LINE 1: SELECT bool 'off_' AS error; + ^ +SELECT bool '1' AS true; + true +------ + t +(1 row) + +SELECT bool '11' AS error; +ERROR: invalid input syntax for type boolean: "11" +LINE 1: SELECT bool '11' AS error; + ^ +SELECT bool '0' AS false; + false +------- + f +(1 row) + +SELECT bool '000' AS error; +ERROR: invalid input syntax for type boolean: "000" +LINE 1: SELECT bool '000' AS error; + ^ +SELECT bool '' AS error; +ERROR: invalid input syntax for type boolean: "" +LINE 1: SELECT bool '' AS error; + ^ +-- Also try it with non-error-throwing API +SELECT pg_input_is_valid('true', 'bool'); + pg_input_is_valid +------------------- + t +(1 row) + +SELECT pg_input_is_valid('asdf', 'bool'); + pg_input_is_valid +------------------- + f +(1 row) + +SELECT * FROM pg_input_error_info('junk', 'bool'); + message | detail | hint | sql_error_code +-----------------------------------------------+--------+------+---------------- + invalid input syntax for type boolean: "junk" | | | 22P02 +(1 row) + +-- and, or, not in qualifications +SELECT bool 't' or bool 'f' AS true; + true +------ + t +(1 row) + +SELECT bool 't' and bool 'f' AS false; + false +------- + f +(1 row) + +SELECT not bool 'f' AS true; + true +------ + t +(1 row) + +SELECT bool 't' = bool 'f' AS false; + false +------- + f +(1 row) + +SELECT bool 't' <> bool 'f' AS true; + true +------ + t +(1 row) + +SELECT bool 't' > bool 'f' AS true; + true +------ + t +(1 row) + +SELECT bool 't' >= bool 'f' AS true; + true +------ + t +(1 row) + +SELECT bool 'f' < bool 't' AS true; + true +------ + t +(1 row) + +SELECT bool 'f' <= bool 't' AS true; + true +------ + t +(1 row) + +-- explicit casts to/from text +SELECT 'TrUe'::text::boolean AS true, 'fAlse'::text::boolean AS false; + true | false +------+------- + t | f +(1 row) + +SELECT ' true '::text::boolean AS true, + ' FALSE'::text::boolean AS false; + true | false +------+------- + t | f +(1 row) + +SELECT true::boolean::text AS true, false::boolean::text AS false; + true | false +------+------- + true | false +(1 row) + +SELECT ' tru e '::text::boolean AS invalid; -- error +ERROR: invalid input syntax for type boolean: " tru e " +SELECT ''::text::boolean AS invalid; -- error +ERROR: invalid input syntax for type boolean: "" +CREATE TABLE BOOLTBL1 (f1 bool); +INSERT INTO BOOLTBL1 (f1) VALUES (bool 't'); +INSERT INTO BOOLTBL1 (f1) VALUES (bool 'True'); +INSERT INTO BOOLTBL1 (f1) VALUES (bool 'true'); +-- BOOLTBL1 should be full of true's at this point +SELECT BOOLTBL1.* FROM BOOLTBL1; + f1 +---- + t + t + t +(3 rows) + +SELECT BOOLTBL1.* + FROM BOOLTBL1 + WHERE f1 = bool 'true'; + f1 +---- + t + t + t +(3 rows) + +SELECT BOOLTBL1.* + FROM BOOLTBL1 + WHERE f1 <> bool 'false'; + f1 +---- + t + t + t +(3 rows) + +SELECT BOOLTBL1.* + FROM BOOLTBL1 + WHERE booleq(bool 'false', f1); + f1 +---- +(0 rows) + +INSERT INTO BOOLTBL1 (f1) VALUES (bool 'f'); +SELECT BOOLTBL1.* + FROM BOOLTBL1 + WHERE f1 = bool 'false'; + f1 +---- + f +(1 row) + +CREATE TABLE BOOLTBL2 (f1 bool); +INSERT INTO BOOLTBL2 (f1) VALUES (bool 'f'); +INSERT INTO BOOLTBL2 (f1) VALUES (bool 'false'); +INSERT INTO BOOLTBL2 (f1) VALUES (bool 'False'); +INSERT INTO BOOLTBL2 (f1) VALUES (bool 'FALSE'); +-- This is now an invalid expression +-- For pre-v6.3 this evaluated to false - thomas 1997-10-23 +INSERT INTO BOOLTBL2 (f1) + VALUES (bool 'XXX'); +ERROR: invalid input syntax for type boolean: "XXX" +LINE 2: VALUES (bool 'XXX'); + ^ +-- BOOLTBL2 should be full of false's at this point +SELECT BOOLTBL2.* FROM BOOLTBL2; + f1 +---- + f + f + f + f +(4 rows) + +SELECT BOOLTBL1.*, BOOLTBL2.* + FROM BOOLTBL1, BOOLTBL2 + WHERE BOOLTBL2.f1 <> BOOLTBL1.f1; + f1 | f1 +----+---- + t | f + t | f + t | f + t | f + t | f + t | f + t | f + t | f + t | f + t | f + t | f + t | f +(12 rows) + +SELECT BOOLTBL1.*, BOOLTBL2.* + FROM BOOLTBL1, BOOLTBL2 + WHERE boolne(BOOLTBL2.f1,BOOLTBL1.f1); + f1 | f1 +----+---- + t | f + t | f + t | f + t | f + t | f + t | f + t | f + t | f + t | f + t | f + t | f + t | f +(12 rows) + +SELECT BOOLTBL1.*, BOOLTBL2.* + FROM BOOLTBL1, BOOLTBL2 + WHERE BOOLTBL2.f1 = BOOLTBL1.f1 and BOOLTBL1.f1 = bool 'false'; + f1 | f1 +----+---- + f | f + f | f + f | f + f | f +(4 rows) + +SELECT BOOLTBL1.*, BOOLTBL2.* + FROM BOOLTBL1, BOOLTBL2 + WHERE BOOLTBL2.f1 = BOOLTBL1.f1 or BOOLTBL1.f1 = bool 'true' + ORDER BY BOOLTBL1.f1, BOOLTBL2.f1; + f1 | f1 +----+---- + f | f + f | f + f | f + f | f + t | f + t | f + t | f + t | f + t | f + t | f + t | f + t | f + t | f + t | f + t | f + t | f +(16 rows) + +-- +-- SQL syntax +-- Try all combinations to ensure that we get nothing when we expect nothing +-- - thomas 2000-01-04 +-- +SELECT f1 + FROM BOOLTBL1 + WHERE f1 IS TRUE; + f1 +---- + t + t + t +(3 rows) + +SELECT f1 + FROM BOOLTBL1 + WHERE f1 IS NOT FALSE; + f1 +---- + t + t + t +(3 rows) + +SELECT f1 + FROM BOOLTBL1 + WHERE f1 IS FALSE; + f1 +---- + f +(1 row) + +SELECT f1 + FROM BOOLTBL1 + WHERE f1 IS NOT TRUE; + f1 +---- + f +(1 row) + +SELECT f1 + FROM BOOLTBL2 + WHERE f1 IS TRUE; + f1 +---- +(0 rows) + +SELECT f1 + FROM BOOLTBL2 + WHERE f1 IS NOT FALSE; + f1 +---- +(0 rows) + +SELECT f1 + FROM BOOLTBL2 + WHERE f1 IS FALSE; + f1 +---- + f + f + f + f +(4 rows) + +SELECT f1 + FROM BOOLTBL2 + WHERE f1 IS NOT TRUE; + f1 +---- + f + f + f + f +(4 rows) + +-- +-- Tests for BooleanTest +-- +CREATE TABLE BOOLTBL3 (d text, b bool, o int); +INSERT INTO BOOLTBL3 (d, b, o) VALUES ('true', true, 1); +INSERT INTO BOOLTBL3 (d, b, o) VALUES ('false', false, 2); +INSERT INTO BOOLTBL3 (d, b, o) VALUES ('null', null, 3); +SELECT + d, + b IS TRUE AS istrue, + b IS NOT TRUE AS isnottrue, + b IS FALSE AS isfalse, + b IS NOT FALSE AS isnotfalse, + b IS UNKNOWN AS isunknown, + b IS NOT UNKNOWN AS isnotunknown +FROM booltbl3 ORDER BY o; + d | istrue | isnottrue | isfalse | isnotfalse | isunknown | isnotunknown +-------+--------+-----------+---------+------------+-----------+-------------- + true | t | f | f | t | f | t + false | f | t | t | f | f | t + null | f | t | f | t | t | f +(3 rows) + +-- Test to make sure short-circuiting and NULL handling is +-- correct. Use a table as source to prevent constant simplification +-- to interfer. +CREATE TABLE booltbl4(isfalse bool, istrue bool, isnul bool); +INSERT INTO booltbl4 VALUES (false, true, null); +\pset null '(null)' +-- AND expression need to return null if there's any nulls and not all +-- of the value are true +SELECT istrue AND isnul AND istrue FROM booltbl4; + ?column? +---------- + (null) +(1 row) + +SELECT istrue AND istrue AND isnul FROM booltbl4; + ?column? +---------- + (null) +(1 row) + +SELECT isnul AND istrue AND istrue FROM booltbl4; + ?column? +---------- + (null) +(1 row) + +SELECT isfalse AND isnul AND istrue FROM booltbl4; + ?column? +---------- + f +(1 row) + +SELECT istrue AND isfalse AND isnul FROM booltbl4; + ?column? +---------- + f +(1 row) + +SELECT isnul AND istrue AND isfalse FROM booltbl4; + ?column? +---------- + f +(1 row) + +-- OR expression need to return null if there's any nulls and none +-- of the value is true +SELECT isfalse OR isnul OR isfalse FROM booltbl4; + ?column? +---------- + (null) +(1 row) + +SELECT isfalse OR isfalse OR isnul FROM booltbl4; + ?column? +---------- + (null) +(1 row) + +SELECT isnul OR isfalse OR isfalse FROM booltbl4; + ?column? +---------- + (null) +(1 row) + +SELECT isfalse OR isnul OR istrue FROM booltbl4; + ?column? +---------- + t +(1 row) + +SELECT istrue OR isfalse OR isnul FROM booltbl4; + ?column? +---------- + t +(1 row) + +SELECT isnul OR istrue OR isfalse FROM booltbl4; + ?column? +---------- + t +(1 row) + +-- +-- Clean up +-- Many tables are retained by the regression test, but these do not seem +-- particularly useful so just get rid of them for now. +-- - thomas 1997-11-30 +-- +DROP TABLE BOOLTBL1; +DROP TABLE BOOLTBL2; +DROP TABLE BOOLTBL3; +DROP TABLE BOOLTBL4; diff --git a/src/test/regress/expected/box.out b/src/test/regress/expected/box.out new file mode 100644 index 0000000..8c9e9e3 --- /dev/null +++ b/src/test/regress/expected/box.out @@ -0,0 +1,666 @@ +-- +-- BOX +-- +-- +-- box logic +-- o +-- 3 o--|X +-- | o| +-- 2 +-+-+ | +-- | | | | +-- 1 | o-+-o +-- | | +-- 0 +---+ +-- +-- 0 1 2 3 +-- +-- boxes are specified by two points, given by four floats x1,y1,x2,y2 +CREATE TABLE BOX_TBL (f1 box); +INSERT INTO BOX_TBL (f1) VALUES ('(2.0,2.0,0.0,0.0)'); +INSERT INTO BOX_TBL (f1) VALUES ('(1.0,1.0,3.0,3.0)'); +INSERT INTO BOX_TBL (f1) VALUES ('((-8, 2), (-2, -10))'); +-- degenerate cases where the box is a line or a point +-- note that lines and points boxes all have zero area +INSERT INTO BOX_TBL (f1) VALUES ('(2.5, 2.5, 2.5,3.5)'); +INSERT INTO BOX_TBL (f1) VALUES ('(3.0, 3.0,3.0,3.0)'); +-- badly formatted box inputs +INSERT INTO BOX_TBL (f1) VALUES ('(2.3, 4.5)'); +ERROR: invalid input syntax for type box: "(2.3, 4.5)" +LINE 1: INSERT INTO BOX_TBL (f1) VALUES ('(2.3, 4.5)'); + ^ +INSERT INTO BOX_TBL (f1) VALUES ('[1, 2, 3, 4)'); +ERROR: invalid input syntax for type box: "[1, 2, 3, 4)" +LINE 1: INSERT INTO BOX_TBL (f1) VALUES ('[1, 2, 3, 4)'); + ^ +INSERT INTO BOX_TBL (f1) VALUES ('(1, 2, 3, 4]'); +ERROR: invalid input syntax for type box: "(1, 2, 3, 4]" +LINE 1: INSERT INTO BOX_TBL (f1) VALUES ('(1, 2, 3, 4]'); + ^ +INSERT INTO BOX_TBL (f1) VALUES ('(1, 2, 3, 4) x'); +ERROR: invalid input syntax for type box: "(1, 2, 3, 4) x" +LINE 1: INSERT INTO BOX_TBL (f1) VALUES ('(1, 2, 3, 4) x'); + ^ +INSERT INTO BOX_TBL (f1) VALUES ('asdfasdf(ad'); +ERROR: invalid input syntax for type box: "asdfasdf(ad" +LINE 1: INSERT INTO BOX_TBL (f1) VALUES ('asdfasdf(ad'); + ^ +SELECT * FROM BOX_TBL; + f1 +--------------------- + (2,2),(0,0) + (3,3),(1,1) + (-2,2),(-8,-10) + (2.5,3.5),(2.5,2.5) + (3,3),(3,3) +(5 rows) + +SELECT b.*, area(b.f1) as barea + FROM BOX_TBL b; + f1 | barea +---------------------+------- + (2,2),(0,0) | 4 + (3,3),(1,1) | 4 + (-2,2),(-8,-10) | 72 + (2.5,3.5),(2.5,2.5) | 0 + (3,3),(3,3) | 0 +(5 rows) + +-- overlap +SELECT b.f1 + FROM BOX_TBL b + WHERE b.f1 && box '(2.5,2.5,1.0,1.0)'; + f1 +--------------------- + (2,2),(0,0) + (3,3),(1,1) + (2.5,3.5),(2.5,2.5) +(3 rows) + +-- left-or-overlap (x only) +SELECT b1.* + FROM BOX_TBL b1 + WHERE b1.f1 &< box '(2.0,2.0,2.5,2.5)'; + f1 +--------------------- + (2,2),(0,0) + (-2,2),(-8,-10) + (2.5,3.5),(2.5,2.5) +(3 rows) + +-- right-or-overlap (x only) +SELECT b1.* + FROM BOX_TBL b1 + WHERE b1.f1 &> box '(2.0,2.0,2.5,2.5)'; + f1 +--------------------- + (2.5,3.5),(2.5,2.5) + (3,3),(3,3) +(2 rows) + +-- left of +SELECT b.f1 + FROM BOX_TBL b + WHERE b.f1 << box '(3.0,3.0,5.0,5.0)'; + f1 +--------------------- + (2,2),(0,0) + (-2,2),(-8,-10) + (2.5,3.5),(2.5,2.5) +(3 rows) + +-- area <= +SELECT b.f1 + FROM BOX_TBL b + WHERE b.f1 <= box '(3.0,3.0,5.0,5.0)'; + f1 +--------------------- + (2,2),(0,0) + (3,3),(1,1) + (2.5,3.5),(2.5,2.5) + (3,3),(3,3) +(4 rows) + +-- area < +SELECT b.f1 + FROM BOX_TBL b + WHERE b.f1 < box '(3.0,3.0,5.0,5.0)'; + f1 +--------------------- + (2.5,3.5),(2.5,2.5) + (3,3),(3,3) +(2 rows) + +-- area = +SELECT b.f1 + FROM BOX_TBL b + WHERE b.f1 = box '(3.0,3.0,5.0,5.0)'; + f1 +------------- + (2,2),(0,0) + (3,3),(1,1) +(2 rows) + +-- area > +SELECT b.f1 + FROM BOX_TBL b -- zero area + WHERE b.f1 > box '(3.5,3.0,4.5,3.0)'; + f1 +----------------- + (2,2),(0,0) + (3,3),(1,1) + (-2,2),(-8,-10) +(3 rows) + +-- area >= +SELECT b.f1 + FROM BOX_TBL b -- zero area + WHERE b.f1 >= box '(3.5,3.0,4.5,3.0)'; + f1 +--------------------- + (2,2),(0,0) + (3,3),(1,1) + (-2,2),(-8,-10) + (2.5,3.5),(2.5,2.5) + (3,3),(3,3) +(5 rows) + +-- right of +SELECT b.f1 + FROM BOX_TBL b + WHERE box '(3.0,3.0,5.0,5.0)' >> b.f1; + f1 +--------------------- + (2,2),(0,0) + (-2,2),(-8,-10) + (2.5,3.5),(2.5,2.5) +(3 rows) + +-- contained in +SELECT b.f1 + FROM BOX_TBL b + WHERE b.f1 <@ box '(0,0,3,3)'; + f1 +------------- + (2,2),(0,0) + (3,3),(1,1) + (3,3),(3,3) +(3 rows) + +-- contains +SELECT b.f1 + FROM BOX_TBL b + WHERE box '(0,0,3,3)' @> b.f1; + f1 +------------- + (2,2),(0,0) + (3,3),(1,1) + (3,3),(3,3) +(3 rows) + +-- box equality +SELECT b.f1 + FROM BOX_TBL b + WHERE box '(1,1,3,3)' ~= b.f1; + f1 +------------- + (3,3),(1,1) +(1 row) + +-- center of box, left unary operator +SELECT @@(b1.f1) AS p + FROM BOX_TBL b1; + p +--------- + (1,1) + (2,2) + (-5,-4) + (2.5,3) + (3,3) +(5 rows) + +-- wholly-contained +SELECT b1.*, b2.* + FROM BOX_TBL b1, BOX_TBL b2 + WHERE b1.f1 @> b2.f1 and not b1.f1 ~= b2.f1; + f1 | f1 +-------------+------------- + (3,3),(1,1) | (3,3),(3,3) +(1 row) + +SELECT height(f1), width(f1) FROM BOX_TBL; + height | width +--------+------- + 2 | 2 + 2 | 2 + 12 | 6 + 1 | 0 + 0 | 0 +(5 rows) + +-- +-- Test the SP-GiST index +-- +CREATE TEMPORARY TABLE box_temp (f1 box); +INSERT INTO box_temp + SELECT box(point(i, i), point(i * 2, i * 2)) + FROM generate_series(1, 50) AS i; +CREATE INDEX box_spgist ON box_temp USING spgist (f1); +INSERT INTO box_temp + VALUES (NULL), + ('(0,0)(0,100)'), + ('(-3,4.3333333333)(40,1)'), + ('(0,100)(0,infinity)'), + ('(-infinity,0)(0,infinity)'), + ('(-infinity,-infinity)(infinity,infinity)'); +SET enable_seqscan = false; +SELECT * FROM box_temp WHERE f1 << '(10,20),(30,40)'; + f1 +---------------------------- + (2,2),(1,1) + (4,4),(2,2) + (6,6),(3,3) + (8,8),(4,4) + (0,100),(0,0) + (0,Infinity),(0,100) + (0,Infinity),(-Infinity,0) +(7 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM box_temp WHERE f1 << '(10,20),(30,40)'; + QUERY PLAN +---------------------------------------------- + Index Only Scan using box_spgist on box_temp + Index Cond: (f1 << '(30,40),(10,20)'::box) +(2 rows) + +SELECT * FROM box_temp WHERE f1 &< '(10,4.333334),(5,100)'; + f1 +---------------------------- + (2,2),(1,1) + (4,4),(2,2) + (6,6),(3,3) + (8,8),(4,4) + (10,10),(5,5) + (0,100),(0,0) + (0,Infinity),(0,100) + (0,Infinity),(-Infinity,0) +(8 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM box_temp WHERE f1 &< '(10,4.333334),(5,100)'; + QUERY PLAN +---------------------------------------------------- + Index Only Scan using box_spgist on box_temp + Index Cond: (f1 &< '(10,100),(5,4.333334)'::box) +(2 rows) + +SELECT * FROM box_temp WHERE f1 && '(15,20),(25,30)'; + f1 +------------------------------------------- + (20,20),(10,10) + (22,22),(11,11) + (24,24),(12,12) + (26,26),(13,13) + (28,28),(14,14) + (30,30),(15,15) + (32,32),(16,16) + (34,34),(17,17) + (36,36),(18,18) + (38,38),(19,19) + (40,40),(20,20) + (42,42),(21,21) + (44,44),(22,22) + (46,46),(23,23) + (48,48),(24,24) + (50,50),(25,25) + (Infinity,Infinity),(-Infinity,-Infinity) +(17 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM box_temp WHERE f1 && '(15,20),(25,30)'; + QUERY PLAN +---------------------------------------------- + Index Only Scan using box_spgist on box_temp + Index Cond: (f1 && '(25,30),(15,20)'::box) +(2 rows) + +SELECT * FROM box_temp WHERE f1 &> '(40,30),(45,50)'; + f1 +------------------- + (80,80),(40,40) + (82,82),(41,41) + (84,84),(42,42) + (86,86),(43,43) + (88,88),(44,44) + (90,90),(45,45) + (92,92),(46,46) + (94,94),(47,47) + (96,96),(48,48) + (98,98),(49,49) + (100,100),(50,50) +(11 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM box_temp WHERE f1 &> '(40,30),(45,50)'; + QUERY PLAN +---------------------------------------------- + Index Only Scan using box_spgist on box_temp + Index Cond: (f1 &> '(45,50),(40,30)'::box) +(2 rows) + +SELECT * FROM box_temp WHERE f1 >> '(30,40),(40,30)'; + f1 +------------------- + (82,82),(41,41) + (84,84),(42,42) + (86,86),(43,43) + (88,88),(44,44) + (90,90),(45,45) + (92,92),(46,46) + (94,94),(47,47) + (96,96),(48,48) + (98,98),(49,49) + (100,100),(50,50) +(10 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM box_temp WHERE f1 >> '(30,40),(40,30)'; + QUERY PLAN +---------------------------------------------- + Index Only Scan using box_spgist on box_temp + Index Cond: (f1 >> '(40,40),(30,30)'::box) +(2 rows) + +SELECT * FROM box_temp WHERE f1 <<| '(10,4.33334),(5,100)'; + f1 +-------------------------- + (2,2),(1,1) + (4,4),(2,2) + (40,4.3333333333),(-3,1) +(3 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM box_temp WHERE f1 <<| '(10,4.33334),(5,100)'; + QUERY PLAN +---------------------------------------------------- + Index Only Scan using box_spgist on box_temp + Index Cond: (f1 <<| '(10,100),(5,4.33334)'::box) +(2 rows) + +SELECT * FROM box_temp WHERE f1 &<| '(10,4.3333334),(5,1)'; + f1 +-------------------------- + (2,2),(1,1) + (4,4),(2,2) + (40,4.3333333333),(-3,1) +(3 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM box_temp WHERE f1 &<| '(10,4.3333334),(5,1)'; + QUERY PLAN +---------------------------------------------------- + Index Only Scan using box_spgist on box_temp + Index Cond: (f1 &<| '(10,4.3333334),(5,1)'::box) +(2 rows) + +SELECT * FROM box_temp WHERE f1 |&> '(49.99,49.99),(49.99,49.99)'; + f1 +---------------------- + (100,100),(50,50) + (0,Infinity),(0,100) +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM box_temp WHERE f1 |&> '(49.99,49.99),(49.99,49.99)'; + QUERY PLAN +----------------------------------------------------------- + Index Only Scan using box_spgist on box_temp + Index Cond: (f1 |&> '(49.99,49.99),(49.99,49.99)'::box) +(2 rows) + +SELECT * FROM box_temp WHERE f1 |>> '(37,38),(39,40)'; + f1 +---------------------- + (82,82),(41,41) + (84,84),(42,42) + (86,86),(43,43) + (88,88),(44,44) + (90,90),(45,45) + (92,92),(46,46) + (94,94),(47,47) + (96,96),(48,48) + (98,98),(49,49) + (100,100),(50,50) + (0,Infinity),(0,100) +(11 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM box_temp WHERE f1 |>> '(37,38),(39,40)'; + QUERY PLAN +----------------------------------------------- + Index Only Scan using box_spgist on box_temp + Index Cond: (f1 |>> '(39,40),(37,38)'::box) +(2 rows) + +SELECT * FROM box_temp WHERE f1 @> '(10,11),(15,16)'; + f1 +------------------------------------------- + (16,16),(8,8) + (18,18),(9,9) + (20,20),(10,10) + (Infinity,Infinity),(-Infinity,-Infinity) +(4 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM box_temp WHERE f1 @> '(10,11),(15,15)'; + QUERY PLAN +---------------------------------------------- + Index Only Scan using box_spgist on box_temp + Index Cond: (f1 @> '(15,15),(10,11)'::box) +(2 rows) + +SELECT * FROM box_temp WHERE f1 <@ '(10,15),(30,35)'; + f1 +----------------- + (30,30),(15,15) +(1 row) + +EXPLAIN (COSTS OFF) SELECT * FROM box_temp WHERE f1 <@ '(10,15),(30,35)'; + QUERY PLAN +---------------------------------------------- + Index Only Scan using box_spgist on box_temp + Index Cond: (f1 <@ '(30,35),(10,15)'::box) +(2 rows) + +SELECT * FROM box_temp WHERE f1 ~= '(20,20),(40,40)'; + f1 +----------------- + (40,40),(20,20) +(1 row) + +EXPLAIN (COSTS OFF) SELECT * FROM box_temp WHERE f1 ~= '(20,20),(40,40)'; + QUERY PLAN +---------------------------------------------- + Index Only Scan using box_spgist on box_temp + Index Cond: (f1 ~= '(40,40),(20,20)'::box) +(2 rows) + +RESET enable_seqscan; +DROP INDEX box_spgist; +-- +-- Test the SP-GiST index on the larger volume of data +-- +CREATE TABLE quad_box_tbl (id int, b box); +INSERT INTO quad_box_tbl + SELECT (x - 1) * 100 + y, box(point(x * 10, y * 10), point(x * 10 + 5, y * 10 + 5)) + FROM generate_series(1, 100) x, + generate_series(1, 100) y; +-- insert repeating data to test allTheSame +INSERT INTO quad_box_tbl + SELECT i, '((200, 300),(210, 310))' + FROM generate_series(10001, 11000) AS i; +INSERT INTO quad_box_tbl +VALUES + (11001, NULL), + (11002, NULL), + (11003, '((-infinity,-infinity),(infinity,infinity))'), + (11004, '((-infinity,100),(-infinity,500))'), + (11005, '((-infinity,-infinity),(700,infinity))'); +CREATE INDEX quad_box_tbl_idx ON quad_box_tbl USING spgist(b); +-- get reference results for ORDER BY distance from seq scan +SET enable_seqscan = ON; +SET enable_indexscan = OFF; +SET enable_bitmapscan = OFF; +CREATE TABLE quad_box_tbl_ord_seq1 AS +SELECT rank() OVER (ORDER BY b <-> point '123,456') n, b <-> point '123,456' dist, id +FROM quad_box_tbl; +CREATE TABLE quad_box_tbl_ord_seq2 AS +SELECT rank() OVER (ORDER BY b <-> point '123,456') n, b <-> point '123,456' dist, id +FROM quad_box_tbl WHERE b <@ box '((200,300),(500,600))'; +SET enable_seqscan = OFF; +SET enable_indexscan = ON; +SET enable_bitmapscan = ON; +SELECT count(*) FROM quad_box_tbl WHERE b << box '((100,200),(300,500))'; + count +------- + 901 +(1 row) + +SELECT count(*) FROM quad_box_tbl WHERE b &< box '((100,200),(300,500))'; + count +------- + 3901 +(1 row) + +SELECT count(*) FROM quad_box_tbl WHERE b && box '((100,200),(300,500))'; + count +------- + 1653 +(1 row) + +SELECT count(*) FROM quad_box_tbl WHERE b &> box '((100,200),(300,500))'; + count +------- + 10100 +(1 row) + +SELECT count(*) FROM quad_box_tbl WHERE b >> box '((100,200),(300,500))'; + count +------- + 7000 +(1 row) + +SELECT count(*) FROM quad_box_tbl WHERE b >> box '((100,200),(300,500))'; + count +------- + 7000 +(1 row) + +SELECT count(*) FROM quad_box_tbl WHERE b <<| box '((100,200),(300,500))'; + count +------- + 1900 +(1 row) + +SELECT count(*) FROM quad_box_tbl WHERE b &<| box '((100,200),(300,500))'; + count +------- + 5901 +(1 row) + +SELECT count(*) FROM quad_box_tbl WHERE b |&> box '((100,200),(300,500))'; + count +------- + 9100 +(1 row) + +SELECT count(*) FROM quad_box_tbl WHERE b |>> box '((100,200),(300,500))'; + count +------- + 5000 +(1 row) + +SELECT count(*) FROM quad_box_tbl WHERE b @> box '((201,301),(202,303))'; + count +------- + 1003 +(1 row) + +SELECT count(*) FROM quad_box_tbl WHERE b <@ box '((100,200),(300,500))'; + count +------- + 1600 +(1 row) + +SELECT count(*) FROM quad_box_tbl WHERE b ~= box '((200,300),(205,305))'; + count +------- + 1 +(1 row) + +-- test ORDER BY distance +SET enable_indexscan = ON; +SET enable_bitmapscan = OFF; +EXPLAIN (COSTS OFF) +SELECT rank() OVER (ORDER BY b <-> point '123,456') n, b <-> point '123,456' dist, id +FROM quad_box_tbl; + QUERY PLAN +--------------------------------------------------------- + WindowAgg + -> Index Scan using quad_box_tbl_idx on quad_box_tbl + Order By: (b <-> '(123,456)'::point) +(3 rows) + +CREATE TEMP TABLE quad_box_tbl_ord_idx1 AS +SELECT rank() OVER (ORDER BY b <-> point '123,456') n, b <-> point '123,456' dist, id +FROM quad_box_tbl; +SELECT * +FROM quad_box_tbl_ord_seq1 seq FULL JOIN quad_box_tbl_ord_idx1 idx + ON seq.n = idx.n AND seq.id = idx.id AND + (seq.dist = idx.dist OR seq.dist IS NULL AND idx.dist IS NULL) +WHERE seq.id IS NULL OR idx.id IS NULL; + n | dist | id | n | dist | id +---+------+----+---+------+---- +(0 rows) + +EXPLAIN (COSTS OFF) +SELECT rank() OVER (ORDER BY b <-> point '123,456') n, b <-> point '123,456' dist, id +FROM quad_box_tbl WHERE b <@ box '((200,300),(500,600))'; + QUERY PLAN +--------------------------------------------------------- + WindowAgg + -> Index Scan using quad_box_tbl_idx on quad_box_tbl + Index Cond: (b <@ '(500,600),(200,300)'::box) + Order By: (b <-> '(123,456)'::point) +(4 rows) + +CREATE TEMP TABLE quad_box_tbl_ord_idx2 AS +SELECT rank() OVER (ORDER BY b <-> point '123,456') n, b <-> point '123,456' dist, id +FROM quad_box_tbl WHERE b <@ box '((200,300),(500,600))'; +SELECT * +FROM quad_box_tbl_ord_seq2 seq FULL JOIN quad_box_tbl_ord_idx2 idx + ON seq.n = idx.n AND seq.id = idx.id AND + (seq.dist = idx.dist OR seq.dist IS NULL AND idx.dist IS NULL) +WHERE seq.id IS NULL OR idx.id IS NULL; + n | dist | id | n | dist | id +---+------+----+---+------+---- +(0 rows) + +RESET enable_seqscan; +RESET enable_indexscan; +RESET enable_bitmapscan; +-- test non-error-throwing API for some core types +SELECT pg_input_is_valid('200', 'box'); + pg_input_is_valid +------------------- + f +(1 row) + +SELECT * FROM pg_input_error_info('200', 'box'); + message | detail | hint | sql_error_code +------------------------------------------+--------+------+---------------- + invalid input syntax for type box: "200" | | | 22P02 +(1 row) + +SELECT pg_input_is_valid('((200,300),(500, xyz))', 'box'); + pg_input_is_valid +------------------- + f +(1 row) + +SELECT * FROM pg_input_error_info('((200,300),(500, xyz))', 'box'); + message | detail | hint | sql_error_code +-------------------------------------------------------------+--------+------+---------------- + invalid input syntax for type box: "((200,300),(500, xyz))" | | | 22P02 +(1 row) + diff --git a/src/test/regress/expected/brin.out b/src/test/regress/expected/brin.out new file mode 100644 index 0000000..f0b7de5 --- /dev/null +++ b/src/test/regress/expected/brin.out @@ -0,0 +1,574 @@ +CREATE TABLE brintest (byteacol bytea, + charcol "char", + namecol name, + int8col bigint, + int2col smallint, + int4col integer, + textcol text, + oidcol oid, + tidcol tid, + float4col real, + float8col double precision, + macaddrcol macaddr, + inetcol inet, + cidrcol cidr, + bpcharcol character, + datecol date, + timecol time without time zone, + timestampcol timestamp without time zone, + timestamptzcol timestamp with time zone, + intervalcol interval, + timetzcol time with time zone, + bitcol bit(10), + varbitcol bit varying(16), + numericcol numeric, + uuidcol uuid, + int4rangecol int4range, + lsncol pg_lsn, + boxcol box +) WITH (fillfactor=10, autovacuum_enabled=off); +INSERT INTO brintest SELECT + repeat(stringu1, 8)::bytea, + substr(stringu1, 1, 1)::"char", + stringu1::name, 142857 * tenthous, + thousand, + twothousand, + repeat(stringu1, 8), + unique1::oid, + format('(%s,%s)', tenthous, twenty)::tid, + (four + 1.0)/(hundred+1), + odd::float8 / (tenthous + 1), + format('%s:00:%s:00:%s:00', to_hex(odd), to_hex(even), to_hex(hundred))::macaddr, + inet '10.2.3.4/24' + tenthous, + cidr '10.2.3/24' + tenthous, + substr(stringu1, 1, 1)::bpchar, + date '1995-08-15' + tenthous, + time '01:20:30' + thousand * interval '18.5 second', + timestamp '1942-07-23 03:05:09' + tenthous * interval '36.38 hours', + timestamptz '1972-10-10 03:00' + thousand * interval '1 hour', + justify_days(justify_hours(tenthous * interval '12 minutes')), + timetz '01:30:20+02' + hundred * interval '15 seconds', + thousand::bit(10), + tenthous::bit(16)::varbit, + tenthous::numeric(36,30) * fivethous * even / (hundred + 1), + format('%s%s-%s-%s-%s-%s%s%s', to_char(tenthous, 'FM0000'), to_char(tenthous, 'FM0000'), to_char(tenthous, 'FM0000'), to_char(tenthous, 'FM0000'), to_char(tenthous, 'FM0000'), to_char(tenthous, 'FM0000'), to_char(tenthous, 'FM0000'), to_char(tenthous, 'FM0000'))::uuid, + int4range(thousand, twothousand), + format('%s/%s%s', odd, even, tenthous)::pg_lsn, + box(point(odd, even), point(thousand, twothousand)) +FROM tenk1 ORDER BY unique2 LIMIT 100; +-- throw in some NULL's and different values +INSERT INTO brintest (inetcol, cidrcol, int4rangecol) SELECT + inet 'fe80::6e40:8ff:fea9:8c46' + tenthous, + cidr 'fe80::6e40:8ff:fea9:8c46' + tenthous, + 'empty'::int4range +FROM tenk1 ORDER BY thousand, tenthous LIMIT 25; +CREATE INDEX brinidx ON brintest USING brin ( + byteacol, + charcol, + namecol, + int8col, + int2col, + int4col, + textcol, + oidcol, + tidcol, + float4col, + float8col, + macaddrcol, + inetcol inet_inclusion_ops, + inetcol inet_minmax_ops, + cidrcol inet_inclusion_ops, + cidrcol inet_minmax_ops, + bpcharcol, + datecol, + timecol, + timestampcol, + timestamptzcol, + intervalcol, + timetzcol, + bitcol, + varbitcol, + numericcol, + uuidcol, + int4rangecol, + lsncol, + boxcol +) with (pages_per_range = 1); +CREATE TABLE brinopers (colname name, typ text, + op text[], value text[], matches int[], + check (cardinality(op) = cardinality(value)), + check (cardinality(op) = cardinality(matches))); +INSERT INTO brinopers VALUES + ('byteacol', 'bytea', + '{>, >=, =, <=, <}', + '{AAAAAA, AAAAAA, BNAAAABNAAAABNAAAABNAAAABNAAAABNAAAABNAAAABNAAAA, ZZZZZZ, ZZZZZZ}', + '{100, 100, 1, 100, 100}'), + ('charcol', '"char"', + '{>, >=, =, <=, <}', + '{A, A, M, Z, Z}', + '{97, 100, 6, 100, 98}'), + ('namecol', 'name', + '{>, >=, =, <=, <}', + '{AAAAAA, AAAAAA, MAAAAA, ZZAAAA, ZZAAAA}', + '{100, 100, 2, 100, 100}'), + ('int2col', 'int2', + '{>, >=, =, <=, <}', + '{0, 0, 800, 999, 999}', + '{100, 100, 1, 100, 100}'), + ('int2col', 'int4', + '{>, >=, =, <=, <}', + '{0, 0, 800, 999, 1999}', + '{100, 100, 1, 100, 100}'), + ('int2col', 'int8', + '{>, >=, =, <=, <}', + '{0, 0, 800, 999, 1428427143}', + '{100, 100, 1, 100, 100}'), + ('int4col', 'int2', + '{>, >=, =, <=, <}', + '{0, 0, 800, 1999, 1999}', + '{100, 100, 1, 100, 100}'), + ('int4col', 'int4', + '{>, >=, =, <=, <}', + '{0, 0, 800, 1999, 1999}', + '{100, 100, 1, 100, 100}'), + ('int4col', 'int8', + '{>, >=, =, <=, <}', + '{0, 0, 800, 1999, 1428427143}', + '{100, 100, 1, 100, 100}'), + ('int8col', 'int2', + '{>, >=}', + '{0, 0}', + '{100, 100}'), + ('int8col', 'int4', + '{>, >=}', + '{0, 0}', + '{100, 100}'), + ('int8col', 'int8', + '{>, >=, =, <=, <}', + '{0, 0, 1257141600, 1428427143, 1428427143}', + '{100, 100, 1, 100, 100}'), + ('textcol', 'text', + '{>, >=, =, <=, <}', + '{ABABAB, ABABAB, BNAAAABNAAAABNAAAABNAAAABNAAAABNAAAABNAAAABNAAAA, ZZAAAA, ZZAAAA}', + '{100, 100, 1, 100, 100}'), + ('oidcol', 'oid', + '{>, >=, =, <=, <}', + '{0, 0, 8800, 9999, 9999}', + '{100, 100, 1, 100, 100}'), + ('tidcol', 'tid', + '{>, >=, =, <=, <}', + '{"(0,0)", "(0,0)", "(8800,0)", "(9999,19)", "(9999,19)"}', + '{100, 100, 1, 100, 100}'), + ('float4col', 'float4', + '{>, >=, =, <=, <}', + '{0.0103093, 0.0103093, 1, 1, 1}', + '{100, 100, 4, 100, 96}'), + ('float4col', 'float8', + '{>, >=, =, <=, <}', + '{0.0103093, 0.0103093, 1, 1, 1}', + '{100, 100, 4, 100, 96}'), + ('float8col', 'float4', + '{>, >=, =, <=, <}', + '{0, 0, 0, 1.98, 1.98}', + '{99, 100, 1, 100, 100}'), + ('float8col', 'float8', + '{>, >=, =, <=, <}', + '{0, 0, 0, 1.98, 1.98}', + '{99, 100, 1, 100, 100}'), + ('macaddrcol', 'macaddr', + '{>, >=, =, <=, <}', + '{00:00:01:00:00:00, 00:00:01:00:00:00, 2c:00:2d:00:16:00, ff:fe:00:00:00:00, ff:fe:00:00:00:00}', + '{99, 100, 2, 100, 100}'), + ('inetcol', 'inet', + '{&&, =, <, <=, >, >=, >>=, >>, <<=, <<}', + '{10/8, 10.2.14.231/24, 255.255.255.255, 255.255.255.255, 0.0.0.0, 0.0.0.0, 10.2.14.231/24, 10.2.14.231/25, 10.2.14.231/8, 0/0}', + '{100, 1, 100, 100, 125, 125, 2, 2, 100, 100}'), + ('inetcol', 'inet', + '{&&, >>=, <<=, =}', + '{fe80::6e40:8ff:fea9:a673/32, fe80::6e40:8ff:fea9:8c46, fe80::6e40:8ff:fea9:a673/32, fe80::6e40:8ff:fea9:8c46}', + '{25, 1, 25, 1}'), + ('inetcol', 'cidr', + '{&&, <, <=, >, >=, >>=, >>, <<=, <<}', + '{10/8, 255.255.255.255, 255.255.255.255, 0.0.0.0, 0.0.0.0, 10.2.14/24, 10.2.14/25, 10/8, 0/0}', + '{100, 100, 100, 125, 125, 2, 2, 100, 100}'), + ('inetcol', 'cidr', + '{&&, >>=, <<=, =}', + '{fe80::/32, fe80::6e40:8ff:fea9:8c46, fe80::/32, fe80::6e40:8ff:fea9:8c46}', + '{25, 1, 25, 1}'), + ('cidrcol', 'inet', + '{&&, =, <, <=, >, >=, >>=, >>, <<=, <<}', + '{10/8, 10.2.14/24, 255.255.255.255, 255.255.255.255, 0.0.0.0, 0.0.0.0, 10.2.14.231/24, 10.2.14.231/25, 10.2.14.231/8, 0/0}', + '{100, 2, 100, 100, 125, 125, 2, 2, 100, 100}'), + ('cidrcol', 'inet', + '{&&, >>=, <<=, =}', + '{fe80::6e40:8ff:fea9:a673/32, fe80::6e40:8ff:fea9:8c46, fe80::6e40:8ff:fea9:a673/32, fe80::6e40:8ff:fea9:8c46}', + '{25, 1, 25, 1}'), + ('cidrcol', 'cidr', + '{&&, =, <, <=, >, >=, >>=, >>, <<=, <<}', + '{10/8, 10.2.14/24, 255.255.255.255, 255.255.255.255, 0.0.0.0, 0.0.0.0, 10.2.14/24, 10.2.14/25, 10/8, 0/0}', + '{100, 2, 100, 100, 125, 125, 2, 2, 100, 100}'), + ('cidrcol', 'cidr', + '{&&, >>=, <<=, =}', + '{fe80::/32, fe80::6e40:8ff:fea9:8c46, fe80::/32, fe80::6e40:8ff:fea9:8c46}', + '{25, 1, 25, 1}'), + ('bpcharcol', 'bpchar', + '{>, >=, =, <=, <}', + '{A, A, W, Z, Z}', + '{97, 100, 6, 100, 98}'), + ('datecol', 'date', + '{>, >=, =, <=, <}', + '{1995-08-15, 1995-08-15, 2009-12-01, 2022-12-30, 2022-12-30}', + '{100, 100, 1, 100, 100}'), + ('timecol', 'time', + '{>, >=, =, <=, <}', + '{01:20:30, 01:20:30, 02:28:57, 06:28:31.5, 06:28:31.5}', + '{100, 100, 1, 100, 100}'), + ('timestampcol', 'timestamp', + '{>, >=, =, <=, <}', + '{1942-07-23 03:05:09, 1942-07-23 03:05:09, 1964-03-24 19:26:45, 1984-01-20 22:42:21, 1984-01-20 22:42:21}', + '{100, 100, 1, 100, 100}'), + ('timestampcol', 'timestamptz', + '{>, >=, =, <=, <}', + '{1942-07-23 03:05:09, 1942-07-23 03:05:09, 1964-03-24 19:26:45, 1984-01-20 22:42:21, 1984-01-20 22:42:21}', + '{100, 100, 1, 100, 100}'), + ('timestamptzcol', 'timestamptz', + '{>, >=, =, <=, <}', + '{1972-10-10 03:00:00-04, 1972-10-10 03:00:00-04, 1972-10-19 09:00:00-07, 1972-11-20 19:00:00-03, 1972-11-20 19:00:00-03}', + '{100, 100, 1, 100, 100}'), + ('intervalcol', 'interval', + '{>, >=, =, <=, <}', + '{00:00:00, 00:00:00, 1 mons 13 days 12:24, 2 mons 23 days 07:48:00, 1 year}', + '{100, 100, 1, 100, 100}'), + ('timetzcol', 'timetz', + '{>, >=, =, <=, <}', + '{01:30:20+02, 01:30:20+02, 01:35:50+02, 23:55:05+02, 23:55:05+02}', + '{99, 100, 2, 100, 100}'), + ('bitcol', 'bit(10)', + '{>, >=, =, <=, <}', + '{0000000010, 0000000010, 0011011110, 1111111000, 1111111000}', + '{100, 100, 1, 100, 100}'), + ('varbitcol', 'varbit(16)', + '{>, >=, =, <=, <}', + '{0000000000000100, 0000000000000100, 0001010001100110, 1111111111111000, 1111111111111000}', + '{100, 100, 1, 100, 100}'), + ('numericcol', 'numeric', + '{>, >=, =, <=, <}', + '{0.00, 0.01, 2268164.347826086956521739130434782609, 99470151.9, 99470151.9}', + '{100, 100, 1, 100, 100}'), + ('uuidcol', 'uuid', + '{>, >=, =, <=, <}', + '{00040004-0004-0004-0004-000400040004, 00040004-0004-0004-0004-000400040004, 52225222-5222-5222-5222-522252225222, 99989998-9998-9998-9998-999899989998, 99989998-9998-9998-9998-999899989998}', + '{100, 100, 1, 100, 100}'), + ('int4rangecol', 'int4range', + '{<<, &<, &&, &>, >>, @>, <@, =, <, <=, >, >=}', + '{"[10000,)","[10000,)","(,]","[3,4)","[36,44)","(1500,1501]","[3,4)","[222,1222)","[36,44)","[43,1043)","[367,4466)","[519,)"}', + '{53, 53, 53, 53, 50, 22, 72, 1, 74, 75, 34, 21}'), + ('int4rangecol', 'int4range', + '{@>, <@, =, <=, >, >=}', + '{empty, empty, empty, empty, empty, empty}', + '{125, 72, 72, 72, 53, 125}'), + ('int4rangecol', 'int4', + '{@>}', + '{1500}', + '{22}'), + ('lsncol', 'pg_lsn', + '{>, >=, =, <=, <, IS, IS NOT}', + '{0/1200, 0/1200, 44/455222, 198/1999799, 198/1999799, NULL, NULL}', + '{100, 100, 1, 100, 100, 25, 100}'), + ('boxcol', 'point', + '{@>}', + '{"(500,43)"}', + '{11}'), + ('boxcol', 'box', + '{<<, &<, &&, &>, >>, <<|, &<|, |&>, |>>, @>, <@, ~=}', + '{"((1000,2000),(3000,4000))","((1,2),(3000,4000))","((1,2),(3000,4000))","((1,2),(3000,4000))","((1,2),(3,4))","((1000,2000),(3000,4000))","((1,2000),(3,4000))","((1000,2),(3000,4))","((1,2),(3,4))","((1,2),(300,400))","((1,2),(3000,4000))","((222,1222),(44,45))"}', + '{100, 100, 100, 99, 96, 100, 100, 99, 96, 1, 99, 1}'); +DO $x$ +DECLARE + r record; + r2 record; + cond text; + idx_ctids tid[]; + ss_ctids tid[]; + count int; + plan_ok bool; + plan_line text; +BEGIN + FOR r IN SELECT colname, oper, typ, value[ordinality], matches[ordinality] FROM brinopers, unnest(op) WITH ORDINALITY AS oper LOOP + + -- prepare the condition + IF r.value IS NULL THEN + cond := format('%I %s %L', r.colname, r.oper, r.value); + ELSE + cond := format('%I %s %L::%s', r.colname, r.oper, r.value, r.typ); + END IF; + + -- run the query using the brin index + SET enable_seqscan = 0; + SET enable_bitmapscan = 1; + + plan_ok := false; + FOR plan_line IN EXECUTE format($y$EXPLAIN SELECT array_agg(ctid) FROM brintest WHERE %s $y$, cond) LOOP + IF plan_line LIKE '%Bitmap Heap Scan on brintest%' THEN + plan_ok := true; + END IF; + END LOOP; + IF NOT plan_ok THEN + RAISE WARNING 'did not get bitmap indexscan plan for %', r; + END IF; + + EXECUTE format($y$SELECT array_agg(ctid) FROM brintest WHERE %s $y$, cond) + INTO idx_ctids; + + -- run the query using a seqscan + SET enable_seqscan = 1; + SET enable_bitmapscan = 0; + + plan_ok := false; + FOR plan_line IN EXECUTE format($y$EXPLAIN SELECT array_agg(ctid) FROM brintest WHERE %s $y$, cond) LOOP + IF plan_line LIKE '%Seq Scan on brintest%' THEN + plan_ok := true; + END IF; + END LOOP; + IF NOT plan_ok THEN + RAISE WARNING 'did not get seqscan plan for %', r; + END IF; + + EXECUTE format($y$SELECT array_agg(ctid) FROM brintest WHERE %s $y$, cond) + INTO ss_ctids; + + -- make sure both return the same results + count := array_length(idx_ctids, 1); + + IF NOT (count = array_length(ss_ctids, 1) AND + idx_ctids @> ss_ctids AND + idx_ctids <@ ss_ctids) THEN + -- report the results of each scan to make the differences obvious + RAISE WARNING 'something not right in %: count %', r, count; + SET enable_seqscan = 1; + SET enable_bitmapscan = 0; + FOR r2 IN EXECUTE 'SELECT ' || r.colname || ' FROM brintest WHERE ' || cond LOOP + RAISE NOTICE 'seqscan: %', r2; + END LOOP; + + SET enable_seqscan = 0; + SET enable_bitmapscan = 1; + FOR r2 IN EXECUTE 'SELECT ' || r.colname || ' FROM brintest WHERE ' || cond LOOP + RAISE NOTICE 'bitmapscan: %', r2; + END LOOP; + END IF; + + -- make sure we found expected number of matches + IF count != r.matches THEN RAISE WARNING 'unexpected number of results % for %', count, r; END IF; + END LOOP; +END; +$x$; +RESET enable_seqscan; +RESET enable_bitmapscan; +INSERT INTO brintest SELECT + repeat(stringu1, 42)::bytea, + substr(stringu1, 1, 1)::"char", + stringu1::name, 142857 * tenthous, + thousand, + twothousand, + repeat(stringu1, 42), + unique1::oid, + format('(%s,%s)', tenthous, twenty)::tid, + (four + 1.0)/(hundred+1), + odd::float8 / (tenthous + 1), + format('%s:00:%s:00:%s:00', to_hex(odd), to_hex(even), to_hex(hundred))::macaddr, + inet '10.2.3.4' + tenthous, + cidr '10.2.3/24' + tenthous, + substr(stringu1, 1, 1)::bpchar, + date '1995-08-15' + tenthous, + time '01:20:30' + thousand * interval '18.5 second', + timestamp '1942-07-23 03:05:09' + tenthous * interval '36.38 hours', + timestamptz '1972-10-10 03:00' + thousand * interval '1 hour', + justify_days(justify_hours(tenthous * interval '12 minutes')), + timetz '01:30:20' + hundred * interval '15 seconds', + thousand::bit(10), + tenthous::bit(16)::varbit, + tenthous::numeric(36,30) * fivethous * even / (hundred + 1), + format('%s%s-%s-%s-%s-%s%s%s', to_char(tenthous, 'FM0000'), to_char(tenthous, 'FM0000'), to_char(tenthous, 'FM0000'), to_char(tenthous, 'FM0000'), to_char(tenthous, 'FM0000'), to_char(tenthous, 'FM0000'), to_char(tenthous, 'FM0000'), to_char(tenthous, 'FM0000'))::uuid, + int4range(thousand, twothousand), + format('%s/%s%s', odd, even, tenthous)::pg_lsn, + box(point(odd, even), point(thousand, twothousand)) +FROM tenk1 ORDER BY unique2 LIMIT 5 OFFSET 5; +SELECT brin_desummarize_range('brinidx', 0); + brin_desummarize_range +------------------------ + +(1 row) + +VACUUM brintest; -- force a summarization cycle in brinidx +UPDATE brintest SET int8col = int8col * int4col; +UPDATE brintest SET textcol = '' WHERE textcol IS NOT NULL; +-- Tests for brin_summarize_new_values +SELECT brin_summarize_new_values('brintest'); -- error, not an index +ERROR: "brintest" is not an index +SELECT brin_summarize_new_values('tenk1_unique1'); -- error, not a BRIN index +ERROR: "tenk1_unique1" is not a BRIN index +SELECT brin_summarize_new_values('brinidx'); -- ok, no change expected + brin_summarize_new_values +--------------------------- + 0 +(1 row) + +-- Tests for brin_desummarize_range +SELECT brin_desummarize_range('brinidx', -1); -- error, invalid range +ERROR: block number out of range: -1 +SELECT brin_desummarize_range('brinidx', 0); + brin_desummarize_range +------------------------ + +(1 row) + +SELECT brin_desummarize_range('brinidx', 0); + brin_desummarize_range +------------------------ + +(1 row) + +SELECT brin_desummarize_range('brinidx', 100000000); + brin_desummarize_range +------------------------ + +(1 row) + +-- Test brin_summarize_range +CREATE TABLE brin_summarize ( + value int +) WITH (fillfactor=10, autovacuum_enabled=false); +CREATE INDEX brin_summarize_idx ON brin_summarize USING brin (value) WITH (pages_per_range=2); +-- Fill a few pages +DO $$ +DECLARE curtid tid; +BEGIN + LOOP + INSERT INTO brin_summarize VALUES (1) RETURNING ctid INTO curtid; + EXIT WHEN curtid > tid '(2, 0)'; + END LOOP; +END; +$$; +-- summarize one range +SELECT brin_summarize_range('brin_summarize_idx', 0); + brin_summarize_range +---------------------- + 0 +(1 row) + +-- nothing: already summarized +SELECT brin_summarize_range('brin_summarize_idx', 1); + brin_summarize_range +---------------------- + 0 +(1 row) + +-- summarize one range +SELECT brin_summarize_range('brin_summarize_idx', 2); + brin_summarize_range +---------------------- + 1 +(1 row) + +-- nothing: page doesn't exist in table +SELECT brin_summarize_range('brin_summarize_idx', 4294967295); + brin_summarize_range +---------------------- + 0 +(1 row) + +-- invalid block number values +SELECT brin_summarize_range('brin_summarize_idx', -1); +ERROR: block number out of range: -1 +SELECT brin_summarize_range('brin_summarize_idx', 4294967296); +ERROR: block number out of range: 4294967296 +-- test value merging in add_value +CREATE TABLE brintest_2 (n numrange); +CREATE INDEX brinidx_2 ON brintest_2 USING brin (n); +INSERT INTO brintest_2 VALUES ('empty'); +INSERT INTO brintest_2 VALUES (numrange(0, 2^1000::numeric)); +INSERT INTO brintest_2 VALUES ('(-1, 0)'); +SELECT brin_desummarize_range('brinidx', 0); + brin_desummarize_range +------------------------ + +(1 row) + +SELECT brin_summarize_range('brinidx', 0); + brin_summarize_range +---------------------- + 1 +(1 row) + +DROP TABLE brintest_2; +-- test brin cost estimates behave sanely based on correlation of values +CREATE TABLE brin_test (a INT, b INT); +INSERT INTO brin_test SELECT x/100,x%100 FROM generate_series(1,10000) x(x); +CREATE INDEX brin_test_a_idx ON brin_test USING brin (a) WITH (pages_per_range = 2); +CREATE INDEX brin_test_b_idx ON brin_test USING brin (b) WITH (pages_per_range = 2); +VACUUM ANALYZE brin_test; +-- Ensure brin index is used when columns are perfectly correlated +EXPLAIN (COSTS OFF) SELECT * FROM brin_test WHERE a = 1; + QUERY PLAN +-------------------------------------------- + Bitmap Heap Scan on brin_test + Recheck Cond: (a = 1) + -> Bitmap Index Scan on brin_test_a_idx + Index Cond: (a = 1) +(4 rows) + +-- Ensure brin index is not used when values are not correlated +EXPLAIN (COSTS OFF) SELECT * FROM brin_test WHERE b = 1; + QUERY PLAN +----------------------- + Seq Scan on brin_test + Filter: (b = 1) +(2 rows) + +-- make sure data are properly de-toasted in BRIN index +CREATE TABLE brintest_3 (a text, b text, c text, d text); +-- long random strings (~2000 chars each, so ~6kB for min/max on two +-- columns) to trigger toasting +WITH rand_value AS (SELECT string_agg(fipshash(i::text),'') AS val FROM generate_series(1,60) s(i)) +INSERT INTO brintest_3 +SELECT val, val, val, val FROM rand_value; +CREATE INDEX brin_test_toast_idx ON brintest_3 USING brin (b, c); +DELETE FROM brintest_3; +-- We need to wait a bit for all transactions to complete, so that the +-- vacuum actually removes the TOAST rows. Creating an index concurrently +-- is a one way to achieve that, because it does exactly such wait. +CREATE INDEX CONCURRENTLY brin_test_temp_idx ON brintest_3(a); +DROP INDEX brin_test_temp_idx; +-- vacuum the table, to discard TOAST data +VACUUM brintest_3; +-- retry insert with a different random-looking (but deterministic) value +-- the value is different, and so should replace either min or max in the +-- brin summary +WITH rand_value AS (SELECT string_agg(fipshash((-i)::text),'') AS val FROM generate_series(1,60) s(i)) +INSERT INTO brintest_3 +SELECT val, val, val, val FROM rand_value; +-- now try some queries, accessing the brin index +SET enable_seqscan = off; +EXPLAIN (COSTS OFF) +SELECT * FROM brintest_3 WHERE b < '0'; + QUERY PLAN +------------------------------------------------ + Bitmap Heap Scan on brintest_3 + Recheck Cond: (b < '0'::text) + -> Bitmap Index Scan on brin_test_toast_idx + Index Cond: (b < '0'::text) +(4 rows) + +SELECT * FROM brintest_3 WHERE b < '0'; + a | b | c | d +---+---+---+--- +(0 rows) + +DROP TABLE brintest_3; +RESET enable_seqscan; +-- test an unlogged table, mostly to get coverage of brinbuildempty +CREATE UNLOGGED TABLE brintest_unlogged (n numrange); +CREATE INDEX brinidx_unlogged ON brintest_unlogged USING brin (n); +INSERT INTO brintest_unlogged VALUES (numrange(0, 2^1000::numeric)); +DROP TABLE brintest_unlogged; diff --git a/src/test/regress/expected/brin_bloom.out b/src/test/regress/expected/brin_bloom.out new file mode 100644 index 0000000..32c56a9 --- /dev/null +++ b/src/test/regress/expected/brin_bloom.out @@ -0,0 +1,428 @@ +CREATE TABLE brintest_bloom (byteacol bytea, + charcol "char", + namecol name, + int8col bigint, + int2col smallint, + int4col integer, + textcol text, + oidcol oid, + float4col real, + float8col double precision, + macaddrcol macaddr, + inetcol inet, + cidrcol cidr, + bpcharcol character, + datecol date, + timecol time without time zone, + timestampcol timestamp without time zone, + timestamptzcol timestamp with time zone, + intervalcol interval, + timetzcol time with time zone, + numericcol numeric, + uuidcol uuid, + lsncol pg_lsn +) WITH (fillfactor=10); +INSERT INTO brintest_bloom SELECT + repeat(stringu1, 8)::bytea, + substr(stringu1, 1, 1)::"char", + stringu1::name, 142857 * tenthous, + thousand, + twothousand, + repeat(stringu1, 8), + unique1::oid, + (four + 1.0)/(hundred+1), + odd::float8 / (tenthous + 1), + format('%s:00:%s:00:%s:00', to_hex(odd), to_hex(even), to_hex(hundred))::macaddr, + inet '10.2.3.4/24' + tenthous, + cidr '10.2.3/24' + tenthous, + substr(stringu1, 1, 1)::bpchar, + date '1995-08-15' + tenthous, + time '01:20:30' + thousand * interval '18.5 second', + timestamp '1942-07-23 03:05:09' + tenthous * interval '36.38 hours', + timestamptz '1972-10-10 03:00' + thousand * interval '1 hour', + justify_days(justify_hours(tenthous * interval '12 minutes')), + timetz '01:30:20+02' + hundred * interval '15 seconds', + tenthous::numeric(36,30) * fivethous * even / (hundred + 1), + format('%s%s-%s-%s-%s-%s%s%s', to_char(tenthous, 'FM0000'), to_char(tenthous, 'FM0000'), to_char(tenthous, 'FM0000'), to_char(tenthous, 'FM0000'), to_char(tenthous, 'FM0000'), to_char(tenthous, 'FM0000'), to_char(tenthous, 'FM0000'), to_char(tenthous, 'FM0000'))::uuid, + format('%s/%s%s', odd, even, tenthous)::pg_lsn +FROM tenk1 ORDER BY unique2 LIMIT 100; +-- throw in some NULL's and different values +INSERT INTO brintest_bloom (inetcol, cidrcol) SELECT + inet 'fe80::6e40:8ff:fea9:8c46' + tenthous, + cidr 'fe80::6e40:8ff:fea9:8c46' + tenthous +FROM tenk1 ORDER BY thousand, tenthous LIMIT 25; +-- test bloom specific index options +-- ndistinct must be >= -1.0 +CREATE INDEX brinidx_bloom ON brintest_bloom USING brin ( + byteacol bytea_bloom_ops(n_distinct_per_range = -1.1) +); +ERROR: value -1.1 out of bounds for option "n_distinct_per_range" +DETAIL: Valid values are between "-1.000000" and "2147483647.000000". +-- false_positive_rate must be between 0.0001 and 0.25 +CREATE INDEX brinidx_bloom ON brintest_bloom USING brin ( + byteacol bytea_bloom_ops(false_positive_rate = 0.00009) +); +ERROR: value 0.00009 out of bounds for option "false_positive_rate" +DETAIL: Valid values are between "0.000100" and "0.250000". +CREATE INDEX brinidx_bloom ON brintest_bloom USING brin ( + byteacol bytea_bloom_ops(false_positive_rate = 0.26) +); +ERROR: value 0.26 out of bounds for option "false_positive_rate" +DETAIL: Valid values are between "0.000100" and "0.250000". +CREATE INDEX brinidx_bloom ON brintest_bloom USING brin ( + byteacol bytea_bloom_ops, + charcol char_bloom_ops, + namecol name_bloom_ops, + int8col int8_bloom_ops, + int2col int2_bloom_ops, + int4col int4_bloom_ops, + textcol text_bloom_ops, + oidcol oid_bloom_ops, + float4col float4_bloom_ops, + float8col float8_bloom_ops, + macaddrcol macaddr_bloom_ops, + inetcol inet_bloom_ops, + cidrcol inet_bloom_ops, + bpcharcol bpchar_bloom_ops, + datecol date_bloom_ops, + timecol time_bloom_ops, + timestampcol timestamp_bloom_ops, + timestamptzcol timestamptz_bloom_ops, + intervalcol interval_bloom_ops, + timetzcol timetz_bloom_ops, + numericcol numeric_bloom_ops, + uuidcol uuid_bloom_ops, + lsncol pg_lsn_bloom_ops +) with (pages_per_range = 1); +CREATE TABLE brinopers_bloom (colname name, typ text, + op text[], value text[], matches int[], + check (cardinality(op) = cardinality(value)), + check (cardinality(op) = cardinality(matches))); +INSERT INTO brinopers_bloom VALUES + ('byteacol', 'bytea', + '{=}', + '{BNAAAABNAAAABNAAAABNAAAABNAAAABNAAAABNAAAABNAAAA}', + '{1}'), + ('charcol', '"char"', + '{=}', + '{M}', + '{6}'), + ('namecol', 'name', + '{=}', + '{MAAAAA}', + '{2}'), + ('int2col', 'int2', + '{=}', + '{800}', + '{1}'), + ('int4col', 'int4', + '{=}', + '{800}', + '{1}'), + ('int8col', 'int8', + '{=}', + '{1257141600}', + '{1}'), + ('textcol', 'text', + '{=}', + '{BNAAAABNAAAABNAAAABNAAAABNAAAABNAAAABNAAAABNAAAA}', + '{1}'), + ('oidcol', 'oid', + '{=}', + '{8800}', + '{1}'), + ('float4col', 'float4', + '{=}', + '{1}', + '{4}'), + ('float8col', 'float8', + '{=}', + '{0}', + '{1}'), + ('macaddrcol', 'macaddr', + '{=}', + '{2c:00:2d:00:16:00}', + '{2}'), + ('inetcol', 'inet', + '{=}', + '{10.2.14.231/24}', + '{1}'), + ('inetcol', 'cidr', + '{=}', + '{fe80::6e40:8ff:fea9:8c46}', + '{1}'), + ('cidrcol', 'inet', + '{=}', + '{10.2.14/24}', + '{2}'), + ('cidrcol', 'inet', + '{=}', + '{fe80::6e40:8ff:fea9:8c46}', + '{1}'), + ('cidrcol', 'cidr', + '{=}', + '{10.2.14/24}', + '{2}'), + ('cidrcol', 'cidr', + '{=}', + '{fe80::6e40:8ff:fea9:8c46}', + '{1}'), + ('bpcharcol', 'bpchar', + '{=}', + '{W}', + '{6}'), + ('datecol', 'date', + '{=}', + '{2009-12-01}', + '{1}'), + ('timecol', 'time', + '{=}', + '{02:28:57}', + '{1}'), + ('timestampcol', 'timestamp', + '{=}', + '{1964-03-24 19:26:45}', + '{1}'), + ('timestamptzcol', 'timestamptz', + '{=}', + '{1972-10-19 09:00:00-07}', + '{1}'), + ('intervalcol', 'interval', + '{=}', + '{1 mons 13 days 12:24}', + '{1}'), + ('timetzcol', 'timetz', + '{=}', + '{01:35:50+02}', + '{2}'), + ('numericcol', 'numeric', + '{=}', + '{2268164.347826086956521739130434782609}', + '{1}'), + ('uuidcol', 'uuid', + '{=}', + '{52225222-5222-5222-5222-522252225222}', + '{1}'), + ('lsncol', 'pg_lsn', + '{=, IS, IS NOT}', + '{44/455222, NULL, NULL}', + '{1, 25, 100}'); +DO $x$ +DECLARE + r record; + r2 record; + cond text; + idx_ctids tid[]; + ss_ctids tid[]; + count int; + plan_ok bool; + plan_line text; +BEGIN + FOR r IN SELECT colname, oper, typ, value[ordinality], matches[ordinality] FROM brinopers_bloom, unnest(op) WITH ORDINALITY AS oper LOOP + + -- prepare the condition + IF r.value IS NULL THEN + cond := format('%I %s %L', r.colname, r.oper, r.value); + ELSE + cond := format('%I %s %L::%s', r.colname, r.oper, r.value, r.typ); + END IF; + + -- run the query using the brin index + SET enable_seqscan = 0; + SET enable_bitmapscan = 1; + + plan_ok := false; + FOR plan_line IN EXECUTE format($y$EXPLAIN SELECT array_agg(ctid) FROM brintest_bloom WHERE %s $y$, cond) LOOP + IF plan_line LIKE '%Bitmap Heap Scan on brintest_bloom%' THEN + plan_ok := true; + END IF; + END LOOP; + IF NOT plan_ok THEN + RAISE WARNING 'did not get bitmap indexscan plan for %', r; + END IF; + + EXECUTE format($y$SELECT array_agg(ctid) FROM brintest_bloom WHERE %s $y$, cond) + INTO idx_ctids; + + -- run the query using a seqscan + SET enable_seqscan = 1; + SET enable_bitmapscan = 0; + + plan_ok := false; + FOR plan_line IN EXECUTE format($y$EXPLAIN SELECT array_agg(ctid) FROM brintest_bloom WHERE %s $y$, cond) LOOP + IF plan_line LIKE '%Seq Scan on brintest_bloom%' THEN + plan_ok := true; + END IF; + END LOOP; + IF NOT plan_ok THEN + RAISE WARNING 'did not get seqscan plan for %', r; + END IF; + + EXECUTE format($y$SELECT array_agg(ctid) FROM brintest_bloom WHERE %s $y$, cond) + INTO ss_ctids; + + -- make sure both return the same results + count := array_length(idx_ctids, 1); + + IF NOT (count = array_length(ss_ctids, 1) AND + idx_ctids @> ss_ctids AND + idx_ctids <@ ss_ctids) THEN + -- report the results of each scan to make the differences obvious + RAISE WARNING 'something not right in %: count %', r, count; + SET enable_seqscan = 1; + SET enable_bitmapscan = 0; + FOR r2 IN EXECUTE 'SELECT ' || r.colname || ' FROM brintest_bloom WHERE ' || cond LOOP + RAISE NOTICE 'seqscan: %', r2; + END LOOP; + + SET enable_seqscan = 0; + SET enable_bitmapscan = 1; + FOR r2 IN EXECUTE 'SELECT ' || r.colname || ' FROM brintest_bloom WHERE ' || cond LOOP + RAISE NOTICE 'bitmapscan: %', r2; + END LOOP; + END IF; + + -- make sure we found expected number of matches + IF count != r.matches THEN RAISE WARNING 'unexpected number of results % for %', count, r; END IF; + END LOOP; +END; +$x$; +RESET enable_seqscan; +RESET enable_bitmapscan; +INSERT INTO brintest_bloom SELECT + repeat(stringu1, 42)::bytea, + substr(stringu1, 1, 1)::"char", + stringu1::name, 142857 * tenthous, + thousand, + twothousand, + repeat(stringu1, 42), + unique1::oid, + (four + 1.0)/(hundred+1), + odd::float8 / (tenthous + 1), + format('%s:00:%s:00:%s:00', to_hex(odd), to_hex(even), to_hex(hundred))::macaddr, + inet '10.2.3.4' + tenthous, + cidr '10.2.3/24' + tenthous, + substr(stringu1, 1, 1)::bpchar, + date '1995-08-15' + tenthous, + time '01:20:30' + thousand * interval '18.5 second', + timestamp '1942-07-23 03:05:09' + tenthous * interval '36.38 hours', + timestamptz '1972-10-10 03:00' + thousand * interval '1 hour', + justify_days(justify_hours(tenthous * interval '12 minutes')), + timetz '01:30:20' + hundred * interval '15 seconds', + tenthous::numeric(36,30) * fivethous * even / (hundred + 1), + format('%s%s-%s-%s-%s-%s%s%s', to_char(tenthous, 'FM0000'), to_char(tenthous, 'FM0000'), to_char(tenthous, 'FM0000'), to_char(tenthous, 'FM0000'), to_char(tenthous, 'FM0000'), to_char(tenthous, 'FM0000'), to_char(tenthous, 'FM0000'), to_char(tenthous, 'FM0000'))::uuid, + format('%s/%s%s', odd, even, tenthous)::pg_lsn +FROM tenk1 ORDER BY unique2 LIMIT 5 OFFSET 5; +SELECT brin_desummarize_range('brinidx_bloom', 0); + brin_desummarize_range +------------------------ + +(1 row) + +VACUUM brintest_bloom; -- force a summarization cycle in brinidx +UPDATE brintest_bloom SET int8col = int8col * int4col; +UPDATE brintest_bloom SET textcol = '' WHERE textcol IS NOT NULL; +-- Tests for brin_summarize_new_values +SELECT brin_summarize_new_values('brintest_bloom'); -- error, not an index +ERROR: "brintest_bloom" is not an index +SELECT brin_summarize_new_values('tenk1_unique1'); -- error, not a BRIN index +ERROR: "tenk1_unique1" is not a BRIN index +SELECT brin_summarize_new_values('brinidx_bloom'); -- ok, no change expected + brin_summarize_new_values +--------------------------- + 0 +(1 row) + +-- Tests for brin_desummarize_range +SELECT brin_desummarize_range('brinidx_bloom', -1); -- error, invalid range +ERROR: block number out of range: -1 +SELECT brin_desummarize_range('brinidx_bloom', 0); + brin_desummarize_range +------------------------ + +(1 row) + +SELECT brin_desummarize_range('brinidx_bloom', 0); + brin_desummarize_range +------------------------ + +(1 row) + +SELECT brin_desummarize_range('brinidx_bloom', 100000000); + brin_desummarize_range +------------------------ + +(1 row) + +-- Test brin_summarize_range +CREATE TABLE brin_summarize_bloom ( + value int +) WITH (fillfactor=10, autovacuum_enabled=false); +CREATE INDEX brin_summarize_bloom_idx ON brin_summarize_bloom USING brin (value) WITH (pages_per_range=2); +-- Fill a few pages +DO $$ +DECLARE curtid tid; +BEGIN + LOOP + INSERT INTO brin_summarize_bloom VALUES (1) RETURNING ctid INTO curtid; + EXIT WHEN curtid > tid '(2, 0)'; + END LOOP; +END; +$$; +-- summarize one range +SELECT brin_summarize_range('brin_summarize_bloom_idx', 0); + brin_summarize_range +---------------------- + 0 +(1 row) + +-- nothing: already summarized +SELECT brin_summarize_range('brin_summarize_bloom_idx', 1); + brin_summarize_range +---------------------- + 0 +(1 row) + +-- summarize one range +SELECT brin_summarize_range('brin_summarize_bloom_idx', 2); + brin_summarize_range +---------------------- + 1 +(1 row) + +-- nothing: page doesn't exist in table +SELECT brin_summarize_range('brin_summarize_bloom_idx', 4294967295); + brin_summarize_range +---------------------- + 0 +(1 row) + +-- invalid block number values +SELECT brin_summarize_range('brin_summarize_bloom_idx', -1); +ERROR: block number out of range: -1 +SELECT brin_summarize_range('brin_summarize_bloom_idx', 4294967296); +ERROR: block number out of range: 4294967296 +-- test brin cost estimates behave sanely based on correlation of values +CREATE TABLE brin_test_bloom (a INT, b INT); +INSERT INTO brin_test_bloom SELECT x/100,x%100 FROM generate_series(1,10000) x(x); +CREATE INDEX brin_test_bloom_a_idx ON brin_test_bloom USING brin (a) WITH (pages_per_range = 2); +CREATE INDEX brin_test_bloom_b_idx ON brin_test_bloom USING brin (b) WITH (pages_per_range = 2); +VACUUM ANALYZE brin_test_bloom; +-- Ensure brin index is used when columns are perfectly correlated +EXPLAIN (COSTS OFF) SELECT * FROM brin_test_bloom WHERE a = 1; + QUERY PLAN +-------------------------------------------------- + Bitmap Heap Scan on brin_test_bloom + Recheck Cond: (a = 1) + -> Bitmap Index Scan on brin_test_bloom_a_idx + Index Cond: (a = 1) +(4 rows) + +-- Ensure brin index is not used when values are not correlated +EXPLAIN (COSTS OFF) SELECT * FROM brin_test_bloom WHERE b = 1; + QUERY PLAN +----------------------------- + Seq Scan on brin_test_bloom + Filter: (b = 1) +(2 rows) + diff --git a/src/test/regress/expected/brin_multi.out b/src/test/regress/expected/brin_multi.out new file mode 100644 index 0000000..838d0c7 --- /dev/null +++ b/src/test/regress/expected/brin_multi.out @@ -0,0 +1,589 @@ +CREATE TABLE brintest_multi ( + int8col bigint, + int2col smallint, + int4col integer, + oidcol oid, + tidcol tid, + float4col real, + float8col double precision, + macaddrcol macaddr, + macaddr8col macaddr8, + inetcol inet, + cidrcol cidr, + datecol date, + timecol time without time zone, + timestampcol timestamp without time zone, + timestamptzcol timestamp with time zone, + intervalcol interval, + timetzcol time with time zone, + numericcol numeric, + uuidcol uuid, + lsncol pg_lsn +) WITH (fillfactor=10); +INSERT INTO brintest_multi SELECT + 142857 * tenthous, + thousand, + twothousand, + unique1::oid, + format('(%s,%s)', tenthous, twenty)::tid, + (four + 1.0)/(hundred+1), + odd::float8 / (tenthous + 1), + format('%s:00:%s:00:%s:00', to_hex(odd), to_hex(even), to_hex(hundred))::macaddr, + substr(fipshash(unique1::text), 1, 16)::macaddr8, + inet '10.2.3.4/24' + tenthous, + cidr '10.2.3/24' + tenthous, + date '1995-08-15' + tenthous, + time '01:20:30' + thousand * interval '18.5 second', + timestamp '1942-07-23 03:05:09' + tenthous * interval '36.38 hours', + timestamptz '1972-10-10 03:00' + thousand * interval '1 hour', + justify_days(justify_hours(tenthous * interval '12 minutes')), + timetz '01:30:20+02' + hundred * interval '15 seconds', + tenthous::numeric(36,30) * fivethous * even / (hundred + 1), + format('%s%s-%s-%s-%s-%s%s%s', to_char(tenthous, 'FM0000'), to_char(tenthous, 'FM0000'), to_char(tenthous, 'FM0000'), to_char(tenthous, 'FM0000'), to_char(tenthous, 'FM0000'), to_char(tenthous, 'FM0000'), to_char(tenthous, 'FM0000'), to_char(tenthous, 'FM0000'))::uuid, + format('%s/%s%s', odd, even, tenthous)::pg_lsn +FROM tenk1 ORDER BY unique2 LIMIT 100; +-- throw in some NULL's and different values +INSERT INTO brintest_multi (inetcol, cidrcol) SELECT + inet 'fe80::6e40:8ff:fea9:8c46' + tenthous, + cidr 'fe80::6e40:8ff:fea9:8c46' + tenthous +FROM tenk1 ORDER BY thousand, tenthous LIMIT 25; +-- test minmax-multi specific index options +-- number of values must be >= 16 +CREATE INDEX brinidx_multi ON brintest_multi USING brin ( + int8col int8_minmax_multi_ops(values_per_range = 7) +); +ERROR: value 7 out of bounds for option "values_per_range" +DETAIL: Valid values are between "8" and "256". +-- number of values must be <= 256 +CREATE INDEX brinidx_multi ON brintest_multi USING brin ( + int8col int8_minmax_multi_ops(values_per_range = 257) +); +ERROR: value 257 out of bounds for option "values_per_range" +DETAIL: Valid values are between "8" and "256". +-- first create an index with a single page range, to force compaction +-- due to exceeding the number of values per summary +CREATE INDEX brinidx_multi ON brintest_multi USING brin ( + int8col int8_minmax_multi_ops, + int2col int2_minmax_multi_ops, + int4col int4_minmax_multi_ops, + oidcol oid_minmax_multi_ops, + tidcol tid_minmax_multi_ops, + float4col float4_minmax_multi_ops, + float8col float8_minmax_multi_ops, + macaddrcol macaddr_minmax_multi_ops, + macaddr8col macaddr8_minmax_multi_ops, + inetcol inet_minmax_multi_ops, + cidrcol inet_minmax_multi_ops, + datecol date_minmax_multi_ops, + timecol time_minmax_multi_ops, + timestampcol timestamp_minmax_multi_ops, + timestamptzcol timestamptz_minmax_multi_ops, + intervalcol interval_minmax_multi_ops, + timetzcol timetz_minmax_multi_ops, + numericcol numeric_minmax_multi_ops, + uuidcol uuid_minmax_multi_ops, + lsncol pg_lsn_minmax_multi_ops +); +DROP INDEX brinidx_multi; +CREATE INDEX brinidx_multi ON brintest_multi USING brin ( + int8col int8_minmax_multi_ops, + int2col int2_minmax_multi_ops, + int4col int4_minmax_multi_ops, + oidcol oid_minmax_multi_ops, + tidcol tid_minmax_multi_ops, + float4col float4_minmax_multi_ops, + float8col float8_minmax_multi_ops, + macaddrcol macaddr_minmax_multi_ops, + macaddr8col macaddr8_minmax_multi_ops, + inetcol inet_minmax_multi_ops, + cidrcol inet_minmax_multi_ops, + datecol date_minmax_multi_ops, + timecol time_minmax_multi_ops, + timestampcol timestamp_minmax_multi_ops, + timestamptzcol timestamptz_minmax_multi_ops, + intervalcol interval_minmax_multi_ops, + timetzcol timetz_minmax_multi_ops, + numericcol numeric_minmax_multi_ops, + uuidcol uuid_minmax_multi_ops, + lsncol pg_lsn_minmax_multi_ops +) with (pages_per_range = 1); +CREATE TABLE brinopers_multi (colname name, typ text, + op text[], value text[], matches int[], + check (cardinality(op) = cardinality(value)), + check (cardinality(op) = cardinality(matches))); +INSERT INTO brinopers_multi VALUES + ('int2col', 'int2', + '{>, >=, =, <=, <}', + '{0, 0, 800, 999, 999}', + '{100, 100, 1, 100, 100}'), + ('int2col', 'int4', + '{>, >=, =, <=, <}', + '{0, 0, 800, 999, 1999}', + '{100, 100, 1, 100, 100}'), + ('int2col', 'int8', + '{>, >=, =, <=, <}', + '{0, 0, 800, 999, 1428427143}', + '{100, 100, 1, 100, 100}'), + ('int4col', 'int2', + '{>, >=, =, <=, <}', + '{0, 0, 800, 1999, 1999}', + '{100, 100, 1, 100, 100}'), + ('int4col', 'int4', + '{>, >=, =, <=, <}', + '{0, 0, 800, 1999, 1999}', + '{100, 100, 1, 100, 100}'), + ('int4col', 'int8', + '{>, >=, =, <=, <}', + '{0, 0, 800, 1999, 1428427143}', + '{100, 100, 1, 100, 100}'), + ('int8col', 'int2', + '{>, >=}', + '{0, 0}', + '{100, 100}'), + ('int8col', 'int4', + '{>, >=}', + '{0, 0}', + '{100, 100}'), + ('int8col', 'int8', + '{>, >=, =, <=, <}', + '{0, 0, 1257141600, 1428427143, 1428427143}', + '{100, 100, 1, 100, 100}'), + ('oidcol', 'oid', + '{>, >=, =, <=, <}', + '{0, 0, 8800, 9999, 9999}', + '{100, 100, 1, 100, 100}'), + ('tidcol', 'tid', + '{>, >=, =, <=, <}', + '{"(0,0)", "(0,0)", "(8800,0)", "(9999,19)", "(9999,19)"}', + '{100, 100, 1, 100, 100}'), + ('float4col', 'float4', + '{>, >=, =, <=, <}', + '{0.0103093, 0.0103093, 1, 1, 1}', + '{100, 100, 4, 100, 96}'), + ('float4col', 'float8', + '{>, >=, =, <=, <}', + '{0.0103093, 0.0103093, 1, 1, 1}', + '{100, 100, 4, 100, 96}'), + ('float8col', 'float4', + '{>, >=, =, <=, <}', + '{0, 0, 0, 1.98, 1.98}', + '{99, 100, 1, 100, 100}'), + ('float8col', 'float8', + '{>, >=, =, <=, <}', + '{0, 0, 0, 1.98, 1.98}', + '{99, 100, 1, 100, 100}'), + ('macaddrcol', 'macaddr', + '{>, >=, =, <=, <}', + '{00:00:01:00:00:00, 00:00:01:00:00:00, 2c:00:2d:00:16:00, ff:fe:00:00:00:00, ff:fe:00:00:00:00}', + '{99, 100, 2, 100, 100}'), + ('macaddr8col', 'macaddr8', + '{>, >=, =, <=, <}', + '{b1:d1:0e:7b:af:a4:42:12, d9:35:91:bd:f7:86:0e:1e, 72:8f:20:6c:2a:01:bf:57, 23:e8:46:63:86:07:ad:cb, 13:16:8e:6a:2e:6c:84:b4}', + '{31, 17, 1, 11, 4}'), + ('inetcol', 'inet', + '{=, <, <=, >, >=}', + '{10.2.14.231/24, 255.255.255.255, 255.255.255.255, 0.0.0.0, 0.0.0.0}', + '{1, 100, 100, 125, 125}'), + ('inetcol', 'cidr', + '{<, <=, >, >=}', + '{255.255.255.255, 255.255.255.255, 0.0.0.0, 0.0.0.0}', + '{100, 100, 125, 125}'), + ('cidrcol', 'inet', + '{=, <, <=, >, >=}', + '{10.2.14/24, 255.255.255.255, 255.255.255.255, 0.0.0.0, 0.0.0.0}', + '{2, 100, 100, 125, 125}'), + ('cidrcol', 'cidr', + '{=, <, <=, >, >=}', + '{10.2.14/24, 255.255.255.255, 255.255.255.255, 0.0.0.0, 0.0.0.0}', + '{2, 100, 100, 125, 125}'), + ('datecol', 'date', + '{>, >=, =, <=, <}', + '{1995-08-15, 1995-08-15, 2009-12-01, 2022-12-30, 2022-12-30}', + '{100, 100, 1, 100, 100}'), + ('timecol', 'time', + '{>, >=, =, <=, <}', + '{01:20:30, 01:20:30, 02:28:57, 06:28:31.5, 06:28:31.5}', + '{100, 100, 1, 100, 100}'), + ('timestampcol', 'timestamp', + '{>, >=, =, <=, <}', + '{1942-07-23 03:05:09, 1942-07-23 03:05:09, 1964-03-24 19:26:45, 1984-01-20 22:42:21, 1984-01-20 22:42:21}', + '{100, 100, 1, 100, 100}'), + ('timestampcol', 'timestamptz', + '{>, >=, =, <=, <}', + '{1942-07-23 03:05:09, 1942-07-23 03:05:09, 1964-03-24 19:26:45, 1984-01-20 22:42:21, 1984-01-20 22:42:21}', + '{100, 100, 1, 100, 100}'), + ('timestamptzcol', 'timestamptz', + '{>, >=, =, <=, <}', + '{1972-10-10 03:00:00-04, 1972-10-10 03:00:00-04, 1972-10-19 09:00:00-07, 1972-11-20 19:00:00-03, 1972-11-20 19:00:00-03}', + '{100, 100, 1, 100, 100}'), + ('intervalcol', 'interval', + '{>, >=, =, <=, <}', + '{00:00:00, 00:00:00, 1 mons 13 days 12:24, 2 mons 23 days 07:48:00, 1 year}', + '{100, 100, 1, 100, 100}'), + ('timetzcol', 'timetz', + '{>, >=, =, <=, <}', + '{01:30:20+02, 01:30:20+02, 01:35:50+02, 23:55:05+02, 23:55:05+02}', + '{99, 100, 2, 100, 100}'), + ('numericcol', 'numeric', + '{>, >=, =, <=, <}', + '{0.00, 0.01, 2268164.347826086956521739130434782609, 99470151.9, 99470151.9}', + '{100, 100, 1, 100, 100}'), + ('uuidcol', 'uuid', + '{>, >=, =, <=, <}', + '{00040004-0004-0004-0004-000400040004, 00040004-0004-0004-0004-000400040004, 52225222-5222-5222-5222-522252225222, 99989998-9998-9998-9998-999899989998, 99989998-9998-9998-9998-999899989998}', + '{100, 100, 1, 100, 100}'), + ('lsncol', 'pg_lsn', + '{>, >=, =, <=, <, IS, IS NOT}', + '{0/1200, 0/1200, 44/455222, 198/1999799, 198/1999799, NULL, NULL}', + '{100, 100, 1, 100, 100, 25, 100}'); +DO $x$ +DECLARE + r record; + r2 record; + cond text; + idx_ctids tid[]; + ss_ctids tid[]; + count int; + plan_ok bool; + plan_line text; +BEGIN + FOR r IN SELECT colname, oper, typ, value[ordinality], matches[ordinality] FROM brinopers_multi, unnest(op) WITH ORDINALITY AS oper LOOP + + -- prepare the condition + IF r.value IS NULL THEN + cond := format('%I %s %L', r.colname, r.oper, r.value); + ELSE + cond := format('%I %s %L::%s', r.colname, r.oper, r.value, r.typ); + END IF; + + -- run the query using the brin index + SET enable_seqscan = 0; + SET enable_bitmapscan = 1; + + plan_ok := false; + FOR plan_line IN EXECUTE format($y$EXPLAIN SELECT array_agg(ctid) FROM brintest_multi WHERE %s $y$, cond) LOOP + IF plan_line LIKE '%Bitmap Heap Scan on brintest_multi%' THEN + plan_ok := true; + END IF; + END LOOP; + IF NOT plan_ok THEN + RAISE WARNING 'did not get bitmap indexscan plan for %', r; + END IF; + + EXECUTE format($y$SELECT array_agg(ctid) FROM brintest_multi WHERE %s $y$, cond) + INTO idx_ctids; + + -- run the query using a seqscan + SET enable_seqscan = 1; + SET enable_bitmapscan = 0; + + plan_ok := false; + FOR plan_line IN EXECUTE format($y$EXPLAIN SELECT array_agg(ctid) FROM brintest_multi WHERE %s $y$, cond) LOOP + IF plan_line LIKE '%Seq Scan on brintest_multi%' THEN + plan_ok := true; + END IF; + END LOOP; + IF NOT plan_ok THEN + RAISE WARNING 'did not get seqscan plan for %', r; + END IF; + + EXECUTE format($y$SELECT array_agg(ctid) FROM brintest_multi WHERE %s $y$, cond) + INTO ss_ctids; + + -- make sure both return the same results + count := array_length(idx_ctids, 1); + + IF NOT (count = array_length(ss_ctids, 1) AND + idx_ctids @> ss_ctids AND + idx_ctids <@ ss_ctids) THEN + -- report the results of each scan to make the differences obvious + RAISE WARNING 'something not right in %: count %', r, count; + SET enable_seqscan = 1; + SET enable_bitmapscan = 0; + FOR r2 IN EXECUTE 'SELECT ' || r.colname || ' FROM brintest_multi WHERE ' || cond LOOP + RAISE NOTICE 'seqscan: %', r2; + END LOOP; + + SET enable_seqscan = 0; + SET enable_bitmapscan = 1; + FOR r2 IN EXECUTE 'SELECT ' || r.colname || ' FROM brintest_multi WHERE ' || cond LOOP + RAISE NOTICE 'bitmapscan: %', r2; + END LOOP; + END IF; + + -- make sure we found expected number of matches + IF count != r.matches THEN RAISE WARNING 'unexpected number of results % for %', count, r; END IF; + END LOOP; +END; +$x$; +RESET enable_seqscan; +RESET enable_bitmapscan; +INSERT INTO brintest_multi SELECT + 142857 * tenthous, + thousand, + twothousand, + unique1::oid, + format('(%s,%s)', tenthous, twenty)::tid, + (four + 1.0)/(hundred+1), + odd::float8 / (tenthous + 1), + format('%s:00:%s:00:%s:00', to_hex(odd), to_hex(even), to_hex(hundred))::macaddr, + substr(fipshash(unique1::text), 1, 16)::macaddr8, + inet '10.2.3.4' + tenthous, + cidr '10.2.3/24' + tenthous, + date '1995-08-15' + tenthous, + time '01:20:30' + thousand * interval '18.5 second', + timestamp '1942-07-23 03:05:09' + tenthous * interval '36.38 hours', + timestamptz '1972-10-10 03:00' + thousand * interval '1 hour', + justify_days(justify_hours(tenthous * interval '12 minutes')), + timetz '01:30:20' + hundred * interval '15 seconds', + tenthous::numeric(36,30) * fivethous * even / (hundred + 1), + format('%s%s-%s-%s-%s-%s%s%s', to_char(tenthous, 'FM0000'), to_char(tenthous, 'FM0000'), to_char(tenthous, 'FM0000'), to_char(tenthous, 'FM0000'), to_char(tenthous, 'FM0000'), to_char(tenthous, 'FM0000'), to_char(tenthous, 'FM0000'), to_char(tenthous, 'FM0000'))::uuid, + format('%s/%s%s', odd, even, tenthous)::pg_lsn +FROM tenk1 ORDER BY unique2 LIMIT 5 OFFSET 5; +SELECT brin_desummarize_range('brinidx_multi', 0); + brin_desummarize_range +------------------------ + +(1 row) + +VACUUM brintest_multi; -- force a summarization cycle in brinidx +-- Try inserting a values with NaN, to test distance calculation. +insert into public.brintest_multi (float4col) values (real 'nan'); +insert into public.brintest_multi (float8col) values (real 'nan'); +UPDATE brintest_multi SET int8col = int8col * int4col; +-- Test handling of inet netmasks with inet_minmax_multi_ops +CREATE TABLE brin_test_inet (a inet); +CREATE INDEX ON brin_test_inet USING brin (a inet_minmax_multi_ops); +INSERT INTO brin_test_inet VALUES ('127.0.0.1/0'); +INSERT INTO brin_test_inet VALUES ('0.0.0.0/12'); +DROP TABLE brin_test_inet; +-- Tests for brin_summarize_new_values +SELECT brin_summarize_new_values('brintest_multi'); -- error, not an index +ERROR: "brintest_multi" is not an index +SELECT brin_summarize_new_values('tenk1_unique1'); -- error, not a BRIN index +ERROR: "tenk1_unique1" is not a BRIN index +SELECT brin_summarize_new_values('brinidx_multi'); -- ok, no change expected + brin_summarize_new_values +--------------------------- + 0 +(1 row) + +-- Tests for brin_desummarize_range +SELECT brin_desummarize_range('brinidx_multi', -1); -- error, invalid range +ERROR: block number out of range: -1 +SELECT brin_desummarize_range('brinidx_multi', 0); + brin_desummarize_range +------------------------ + +(1 row) + +SELECT brin_desummarize_range('brinidx_multi', 0); + brin_desummarize_range +------------------------ + +(1 row) + +SELECT brin_desummarize_range('brinidx_multi', 100000000); + brin_desummarize_range +------------------------ + +(1 row) + +-- test building an index with many values, to force compaction of the buffer +CREATE TABLE brin_large_range (a int4); +INSERT INTO brin_large_range SELECT i FROM generate_series(1,10000) s(i); +CREATE INDEX brin_large_range_idx ON brin_large_range USING brin (a int4_minmax_multi_ops); +DROP TABLE brin_large_range; +-- Test brin_summarize_range +CREATE TABLE brin_summarize_multi ( + value int +) WITH (fillfactor=10, autovacuum_enabled=false); +CREATE INDEX brin_summarize_multi_idx ON brin_summarize_multi USING brin (value) WITH (pages_per_range=2); +-- Fill a few pages +DO $$ +DECLARE curtid tid; +BEGIN + LOOP + INSERT INTO brin_summarize_multi VALUES (1) RETURNING ctid INTO curtid; + EXIT WHEN curtid > tid '(2, 0)'; + END LOOP; +END; +$$; +-- summarize one range +SELECT brin_summarize_range('brin_summarize_multi_idx', 0); + brin_summarize_range +---------------------- + 0 +(1 row) + +-- nothing: already summarized +SELECT brin_summarize_range('brin_summarize_multi_idx', 1); + brin_summarize_range +---------------------- + 0 +(1 row) + +-- summarize one range +SELECT brin_summarize_range('brin_summarize_multi_idx', 2); + brin_summarize_range +---------------------- + 1 +(1 row) + +-- nothing: page doesn't exist in table +SELECT brin_summarize_range('brin_summarize_multi_idx', 4294967295); + brin_summarize_range +---------------------- + 0 +(1 row) + +-- invalid block number values +SELECT brin_summarize_range('brin_summarize_multi_idx', -1); +ERROR: block number out of range: -1 +SELECT brin_summarize_range('brin_summarize_multi_idx', 4294967296); +ERROR: block number out of range: 4294967296 +-- test brin cost estimates behave sanely based on correlation of values +CREATE TABLE brin_test_multi (a INT, b INT); +INSERT INTO brin_test_multi SELECT x/100,x%100 FROM generate_series(1,10000) x(x); +CREATE INDEX brin_test_multi_a_idx ON brin_test_multi USING brin (a) WITH (pages_per_range = 2); +CREATE INDEX brin_test_multi_b_idx ON brin_test_multi USING brin (b) WITH (pages_per_range = 2); +VACUUM ANALYZE brin_test_multi; +-- Ensure brin index is used when columns are perfectly correlated +EXPLAIN (COSTS OFF) SELECT * FROM brin_test_multi WHERE a = 1; + QUERY PLAN +-------------------------------------------------- + Bitmap Heap Scan on brin_test_multi + Recheck Cond: (a = 1) + -> Bitmap Index Scan on brin_test_multi_a_idx + Index Cond: (a = 1) +(4 rows) + +-- Ensure brin index is not used when values are not correlated +EXPLAIN (COSTS OFF) SELECT * FROM brin_test_multi WHERE b = 1; + QUERY PLAN +----------------------------- + Seq Scan on brin_test_multi + Filter: (b = 1) +(2 rows) + +-- test overflows during CREATE INDEX with extreme timestamp values +CREATE TABLE brin_timestamp_test(a TIMESTAMPTZ); +SET datestyle TO iso; +-- values close to timetamp minimum +INSERT INTO brin_timestamp_test +SELECT '4713-01-01 00:00:01 BC'::timestamptz + (i || ' seconds')::interval + FROM generate_series(1,30) s(i); +-- values close to timetamp maximum +INSERT INTO brin_timestamp_test +SELECT '294276-12-01 00:00:01'::timestamptz + (i || ' seconds')::interval + FROM generate_series(1,30) s(i); +CREATE INDEX ON brin_timestamp_test USING brin (a timestamptz_minmax_multi_ops) WITH (pages_per_range=1); +DROP TABLE brin_timestamp_test; +-- test overflows during CREATE INDEX with extreme date values +CREATE TABLE brin_date_test(a DATE); +-- insert values close to date minimum +INSERT INTO brin_date_test SELECT '4713-01-01 BC'::date + i FROM generate_series(1, 30) s(i); +-- insert values close to date minimum +INSERT INTO brin_date_test SELECT '5874897-12-01'::date + i FROM generate_series(1, 30) s(i); +CREATE INDEX ON brin_date_test USING brin (a date_minmax_multi_ops) WITH (pages_per_range=1); +SET enable_seqscan = off; +-- make sure the ranges were built correctly and 2023-01-01 eliminates all +EXPLAIN (ANALYZE, TIMING OFF, COSTS OFF, SUMMARY OFF) +SELECT * FROM brin_date_test WHERE a = '2023-01-01'::date; + QUERY PLAN +------------------------------------------------------------------------- + Bitmap Heap Scan on brin_date_test (actual rows=0 loops=1) + Recheck Cond: (a = '2023-01-01'::date) + -> Bitmap Index Scan on brin_date_test_a_idx (actual rows=0 loops=1) + Index Cond: (a = '2023-01-01'::date) +(4 rows) + +DROP TABLE brin_date_test; +RESET enable_seqscan; +-- test handling of infinite timestamp values +CREATE TABLE brin_timestamp_test(a TIMESTAMP); +INSERT INTO brin_timestamp_test VALUES ('-infinity'), ('infinity'); +INSERT INTO brin_timestamp_test +SELECT i FROM generate_series('2000-01-01'::timestamp, '2000-02-09'::timestamp, '1 day'::interval) s(i); +CREATE INDEX ON brin_timestamp_test USING brin (a timestamp_minmax_multi_ops) WITH (pages_per_range=1); +SET enable_seqscan = off; +EXPLAIN (ANALYZE, TIMING OFF, COSTS OFF, SUMMARY OFF) +SELECT * FROM brin_timestamp_test WHERE a = '2023-01-01'::timestamp; + QUERY PLAN +------------------------------------------------------------------------------ + Bitmap Heap Scan on brin_timestamp_test (actual rows=0 loops=1) + Recheck Cond: (a = '2023-01-01 00:00:00'::timestamp without time zone) + -> Bitmap Index Scan on brin_timestamp_test_a_idx (actual rows=0 loops=1) + Index Cond: (a = '2023-01-01 00:00:00'::timestamp without time zone) +(4 rows) + +EXPLAIN (ANALYZE, TIMING OFF, COSTS OFF, SUMMARY OFF) +SELECT * FROM brin_timestamp_test WHERE a = '1900-01-01'::timestamp; + QUERY PLAN +------------------------------------------------------------------------------ + Bitmap Heap Scan on brin_timestamp_test (actual rows=0 loops=1) + Recheck Cond: (a = '1900-01-01 00:00:00'::timestamp without time zone) + -> Bitmap Index Scan on brin_timestamp_test_a_idx (actual rows=0 loops=1) + Index Cond: (a = '1900-01-01 00:00:00'::timestamp without time zone) +(4 rows) + +DROP TABLE brin_timestamp_test; +RESET enable_seqscan; +-- test handling of infinite date values +CREATE TABLE brin_date_test(a DATE); +INSERT INTO brin_date_test VALUES ('-infinity'), ('infinity'); +INSERT INTO brin_date_test SELECT '2000-01-01'::date + i FROM generate_series(1, 40) s(i); +CREATE INDEX ON brin_date_test USING brin (a date_minmax_multi_ops) WITH (pages_per_range=1); +SET enable_seqscan = off; +EXPLAIN (ANALYZE, TIMING OFF, COSTS OFF, SUMMARY OFF) +SELECT * FROM brin_date_test WHERE a = '2023-01-01'::date; + QUERY PLAN +------------------------------------------------------------------------- + Bitmap Heap Scan on brin_date_test (actual rows=0 loops=1) + Recheck Cond: (a = '2023-01-01'::date) + -> Bitmap Index Scan on brin_date_test_a_idx (actual rows=0 loops=1) + Index Cond: (a = '2023-01-01'::date) +(4 rows) + +EXPLAIN (ANALYZE, TIMING OFF, COSTS OFF, SUMMARY OFF) +SELECT * FROM brin_date_test WHERE a = '1900-01-01'::date; + QUERY PLAN +------------------------------------------------------------------------- + Bitmap Heap Scan on brin_date_test (actual rows=0 loops=1) + Recheck Cond: (a = '1900-01-01'::date) + -> Bitmap Index Scan on brin_date_test_a_idx (actual rows=0 loops=1) + Index Cond: (a = '1900-01-01'::date) +(4 rows) + +DROP TABLE brin_date_test; +RESET enable_seqscan; +RESET datestyle; +-- test handling of overflow for interval values +CREATE TABLE brin_interval_test(a INTERVAL); +INSERT INTO brin_interval_test SELECT (i || ' years')::interval FROM generate_series(-178000000, -177999980) s(i); +INSERT INTO brin_interval_test SELECT (i || ' years')::interval FROM generate_series( 177999980, 178000000) s(i); +CREATE INDEX ON brin_interval_test USING brin (a interval_minmax_multi_ops) WITH (pages_per_range=1); +SET enable_seqscan = off; +EXPLAIN (ANALYZE, TIMING OFF, COSTS OFF, SUMMARY OFF) +SELECT * FROM brin_interval_test WHERE a = '-30 years'::interval; + QUERY PLAN +----------------------------------------------------------------------------- + Bitmap Heap Scan on brin_interval_test (actual rows=0 loops=1) + Recheck Cond: (a = '@ 30 years ago'::interval) + -> Bitmap Index Scan on brin_interval_test_a_idx (actual rows=0 loops=1) + Index Cond: (a = '@ 30 years ago'::interval) +(4 rows) + +EXPLAIN (ANALYZE, TIMING OFF, COSTS OFF, SUMMARY OFF) +SELECT * FROM brin_interval_test WHERE a = '30 years'::interval; + QUERY PLAN +----------------------------------------------------------------------------- + Bitmap Heap Scan on brin_interval_test (actual rows=0 loops=1) + Recheck Cond: (a = '@ 30 years'::interval) + -> Bitmap Index Scan on brin_interval_test_a_idx (actual rows=0 loops=1) + Index Cond: (a = '@ 30 years'::interval) +(4 rows) + +DROP TABLE brin_interval_test; +RESET enable_seqscan; +RESET datestyle; diff --git a/src/test/regress/expected/btree_index.out b/src/test/regress/expected/btree_index.out new file mode 100644 index 0000000..93ed5e8 --- /dev/null +++ b/src/test/regress/expected/btree_index.out @@ -0,0 +1,389 @@ +-- +-- BTREE_INDEX +-- +-- directory paths are passed to us in environment variables +\getenv abs_srcdir PG_ABS_SRCDIR +CREATE TABLE bt_i4_heap ( + seqno int4, + random int4 +); +CREATE TABLE bt_name_heap ( + seqno name, + random int4 +); +CREATE TABLE bt_txt_heap ( + seqno text, + random int4 +); +CREATE TABLE bt_f8_heap ( + seqno float8, + random int4 +); +\set filename :abs_srcdir '/data/desc.data' +COPY bt_i4_heap FROM :'filename'; +\set filename :abs_srcdir '/data/hash.data' +COPY bt_name_heap FROM :'filename'; +\set filename :abs_srcdir '/data/desc.data' +COPY bt_txt_heap FROM :'filename'; +\set filename :abs_srcdir '/data/hash.data' +COPY bt_f8_heap FROM :'filename'; +ANALYZE bt_i4_heap; +ANALYZE bt_name_heap; +ANALYZE bt_txt_heap; +ANALYZE bt_f8_heap; +-- +-- BTREE ascending/descending cases +-- +-- we load int4/text from pure descending data (each key is a new +-- low key) and name/f8 from pure ascending data (each key is a new +-- high key). we had a bug where new low keys would sometimes be +-- "lost". +-- +CREATE INDEX bt_i4_index ON bt_i4_heap USING btree (seqno int4_ops); +CREATE INDEX bt_name_index ON bt_name_heap USING btree (seqno name_ops); +CREATE INDEX bt_txt_index ON bt_txt_heap USING btree (seqno text_ops); +CREATE INDEX bt_f8_index ON bt_f8_heap USING btree (seqno float8_ops); +-- +-- test retrieval of min/max keys for each index +-- +SELECT b.* + FROM bt_i4_heap b + WHERE b.seqno < 1; + seqno | random +-------+------------ + 0 | 1935401906 +(1 row) + +SELECT b.* + FROM bt_i4_heap b + WHERE b.seqno >= 9999; + seqno | random +-------+------------ + 9999 | 1227676208 +(1 row) + +SELECT b.* + FROM bt_i4_heap b + WHERE b.seqno = 4500; + seqno | random +-------+------------ + 4500 | 2080851358 +(1 row) + +SELECT b.* + FROM bt_name_heap b + WHERE b.seqno < '1'::name; + seqno | random +-------+------------ + 0 | 1935401906 +(1 row) + +SELECT b.* + FROM bt_name_heap b + WHERE b.seqno >= '9999'::name; + seqno | random +-------+------------ + 9999 | 1227676208 +(1 row) + +SELECT b.* + FROM bt_name_heap b + WHERE b.seqno = '4500'::name; + seqno | random +-------+------------ + 4500 | 2080851358 +(1 row) + +SELECT b.* + FROM bt_txt_heap b + WHERE b.seqno < '1'::text; + seqno | random +-------+------------ + 0 | 1935401906 +(1 row) + +SELECT b.* + FROM bt_txt_heap b + WHERE b.seqno >= '9999'::text; + seqno | random +-------+------------ + 9999 | 1227676208 +(1 row) + +SELECT b.* + FROM bt_txt_heap b + WHERE b.seqno = '4500'::text; + seqno | random +-------+------------ + 4500 | 2080851358 +(1 row) + +SELECT b.* + FROM bt_f8_heap b + WHERE b.seqno < '1'::float8; + seqno | random +-------+------------ + 0 | 1935401906 +(1 row) + +SELECT b.* + FROM bt_f8_heap b + WHERE b.seqno >= '9999'::float8; + seqno | random +-------+------------ + 9999 | 1227676208 +(1 row) + +SELECT b.* + FROM bt_f8_heap b + WHERE b.seqno = '4500'::float8; + seqno | random +-------+------------ + 4500 | 2080851358 +(1 row) + +-- +-- Check correct optimization of LIKE (special index operator support) +-- for both indexscan and bitmapscan cases +-- +set enable_seqscan to false; +set enable_indexscan to true; +set enable_bitmapscan to false; +explain (costs off) +select proname from pg_proc where proname like E'RI\\_FKey%del' order by 1; + QUERY PLAN +------------------------------------------------------------------------------ + Index Only Scan using pg_proc_proname_args_nsp_index on pg_proc + Index Cond: ((proname >= 'RI_FKey'::text) AND (proname < 'RI_FKez'::text)) + Filter: (proname ~~ 'RI\_FKey%del'::text) +(3 rows) + +select proname from pg_proc where proname like E'RI\\_FKey%del' order by 1; + proname +------------------------ + RI_FKey_cascade_del + RI_FKey_noaction_del + RI_FKey_restrict_del + RI_FKey_setdefault_del + RI_FKey_setnull_del +(5 rows) + +explain (costs off) +select proname from pg_proc where proname ilike '00%foo' order by 1; + QUERY PLAN +-------------------------------------------------------------------- + Index Only Scan using pg_proc_proname_args_nsp_index on pg_proc + Index Cond: ((proname >= '00'::text) AND (proname < '01'::text)) + Filter: (proname ~~* '00%foo'::text) +(3 rows) + +select proname from pg_proc where proname ilike '00%foo' order by 1; + proname +--------- +(0 rows) + +explain (costs off) +select proname from pg_proc where proname ilike 'ri%foo' order by 1; + QUERY PLAN +----------------------------------------------------------------- + Index Only Scan using pg_proc_proname_args_nsp_index on pg_proc + Filter: (proname ~~* 'ri%foo'::text) +(2 rows) + +set enable_indexscan to false; +set enable_bitmapscan to true; +explain (costs off) +select proname from pg_proc where proname like E'RI\\_FKey%del' order by 1; + QUERY PLAN +------------------------------------------------------------------------------------------ + Sort + Sort Key: proname + -> Bitmap Heap Scan on pg_proc + Filter: (proname ~~ 'RI\_FKey%del'::text) + -> Bitmap Index Scan on pg_proc_proname_args_nsp_index + Index Cond: ((proname >= 'RI_FKey'::text) AND (proname < 'RI_FKez'::text)) +(6 rows) + +select proname from pg_proc where proname like E'RI\\_FKey%del' order by 1; + proname +------------------------ + RI_FKey_cascade_del + RI_FKey_noaction_del + RI_FKey_restrict_del + RI_FKey_setdefault_del + RI_FKey_setnull_del +(5 rows) + +explain (costs off) +select proname from pg_proc where proname ilike '00%foo' order by 1; + QUERY PLAN +-------------------------------------------------------------------------------- + Sort + Sort Key: proname + -> Bitmap Heap Scan on pg_proc + Filter: (proname ~~* '00%foo'::text) + -> Bitmap Index Scan on pg_proc_proname_args_nsp_index + Index Cond: ((proname >= '00'::text) AND (proname < '01'::text)) +(6 rows) + +select proname from pg_proc where proname ilike '00%foo' order by 1; + proname +--------- +(0 rows) + +explain (costs off) +select proname from pg_proc where proname ilike 'ri%foo' order by 1; + QUERY PLAN +----------------------------------------------------------------- + Index Only Scan using pg_proc_proname_args_nsp_index on pg_proc + Filter: (proname ~~* 'ri%foo'::text) +(2 rows) + +reset enable_seqscan; +reset enable_indexscan; +reset enable_bitmapscan; +-- Also check LIKE optimization with binary-compatible cases +create temp table btree_bpchar (f1 text collate "C"); +create index on btree_bpchar(f1 bpchar_ops) WITH (deduplicate_items=on); +insert into btree_bpchar values ('foo'), ('fool'), ('bar'), ('quux'); +-- doesn't match index: +explain (costs off) +select * from btree_bpchar where f1 like 'foo'; + QUERY PLAN +------------------------------- + Seq Scan on btree_bpchar + Filter: (f1 ~~ 'foo'::text) +(2 rows) + +select * from btree_bpchar where f1 like 'foo'; + f1 +----- + foo +(1 row) + +explain (costs off) +select * from btree_bpchar where f1 like 'foo%'; + QUERY PLAN +-------------------------------- + Seq Scan on btree_bpchar + Filter: (f1 ~~ 'foo%'::text) +(2 rows) + +select * from btree_bpchar where f1 like 'foo%'; + f1 +------ + foo + fool +(2 rows) + +-- these do match the index: +explain (costs off) +select * from btree_bpchar where f1::bpchar like 'foo'; + QUERY PLAN +---------------------------------------------------- + Bitmap Heap Scan on btree_bpchar + Filter: ((f1)::bpchar ~~ 'foo'::text) + -> Bitmap Index Scan on btree_bpchar_f1_idx + Index Cond: ((f1)::bpchar = 'foo'::bpchar) +(4 rows) + +select * from btree_bpchar where f1::bpchar like 'foo'; + f1 +----- + foo +(1 row) + +explain (costs off) +select * from btree_bpchar where f1::bpchar like 'foo%'; + QUERY PLAN +------------------------------------------------------------------------------------------ + Bitmap Heap Scan on btree_bpchar + Filter: ((f1)::bpchar ~~ 'foo%'::text) + -> Bitmap Index Scan on btree_bpchar_f1_idx + Index Cond: (((f1)::bpchar >= 'foo'::bpchar) AND ((f1)::bpchar < 'fop'::bpchar)) +(4 rows) + +select * from btree_bpchar where f1::bpchar like 'foo%'; + f1 +------ + foo + fool +(2 rows) + +-- get test coverage for "single value" deduplication strategy: +insert into btree_bpchar select 'foo' from generate_series(1,1500); +-- +-- Perform unique checking, with and without the use of deduplication +-- +CREATE TABLE dedup_unique_test_table (a int) WITH (autovacuum_enabled=false); +CREATE UNIQUE INDEX dedup_unique ON dedup_unique_test_table (a) WITH (deduplicate_items=on); +CREATE UNIQUE INDEX plain_unique ON dedup_unique_test_table (a) WITH (deduplicate_items=off); +-- Generate enough garbage tuples in index to ensure that even the unique index +-- with deduplication enabled has to check multiple leaf pages during unique +-- checking (at least with a BLCKSZ of 8192 or less) +DO $$ +BEGIN + FOR r IN 1..1350 LOOP + DELETE FROM dedup_unique_test_table; + INSERT INTO dedup_unique_test_table SELECT 1; + END LOOP; +END$$; +-- Exercise the LP_DEAD-bit-set tuple deletion code with a posting list tuple. +-- The implementation prefers deleting existing items to merging any duplicate +-- tuples into a posting list, so we need an explicit test to make sure we get +-- coverage (note that this test also assumes BLCKSZ is 8192 or less): +DROP INDEX plain_unique; +DELETE FROM dedup_unique_test_table WHERE a = 1; +INSERT INTO dedup_unique_test_table SELECT i FROM generate_series(0,450) i; +-- +-- Test B-tree fast path (cache rightmost leaf page) optimization. +-- +-- First create a tree that's at least three levels deep (i.e. has one level +-- between the root and leaf levels). The text inserted is long. It won't be +-- TOAST compressed because we use plain storage in the table. Only a few +-- index tuples fit on each internal page, allowing us to get a tall tree with +-- few pages. (A tall tree is required to trigger caching.) +-- +-- The text column must be the leading column in the index, since suffix +-- truncation would otherwise truncate tuples on internal pages, leaving us +-- with a short tree. +create table btree_tall_tbl(id int4, t text); +alter table btree_tall_tbl alter COLUMN t set storage plain; +create index btree_tall_idx on btree_tall_tbl (t, id) with (fillfactor = 10); +insert into btree_tall_tbl select g, repeat('x', 250) +from generate_series(1, 130) g; +-- +-- Test for multilevel page deletion +-- +CREATE TABLE delete_test_table (a bigint, b bigint, c bigint, d bigint); +INSERT INTO delete_test_table SELECT i, 1, 2, 3 FROM generate_series(1,80000) i; +ALTER TABLE delete_test_table ADD PRIMARY KEY (a,b,c,d); +-- Delete most entries, and vacuum, deleting internal pages and creating "fast +-- root" +DELETE FROM delete_test_table WHERE a < 79990; +VACUUM delete_test_table; +-- +-- Test B-tree insertion with a metapage update (XLOG_BTREE_INSERT_META +-- WAL record type). This happens when a "fast root" page is split. This +-- also creates coverage for nbtree FSM page recycling. +-- +-- The vacuum above should've turned the leaf page into a fast root. We just +-- need to insert some rows to cause the fast root page to split. +INSERT INTO delete_test_table SELECT i, 1, 2, 3 FROM generate_series(1,1000) i; +-- Test unsupported btree opclass parameters +create index on btree_tall_tbl (id int4_ops(foo=1)); +ERROR: operator class int4_ops has no options +-- Test case of ALTER INDEX with abuse of column names for indexes. +-- This grammar is not officially supported, but the parser allows it. +CREATE INDEX btree_tall_idx2 ON btree_tall_tbl (id); +ALTER INDEX btree_tall_idx2 ALTER COLUMN id SET (n_distinct=100); +ERROR: ALTER action ALTER COLUMN ... SET cannot be performed on relation "btree_tall_idx2" +DETAIL: This operation is not supported for indexes. +DROP INDEX btree_tall_idx2; +-- Partitioned index +CREATE TABLE btree_part (id int4) PARTITION BY RANGE (id); +CREATE INDEX btree_part_idx ON btree_part(id); +ALTER INDEX btree_part_idx ALTER COLUMN id SET (n_distinct=100); +ERROR: ALTER action ALTER COLUMN ... SET cannot be performed on relation "btree_part_idx" +DETAIL: This operation is not supported for partitioned indexes. +DROP TABLE btree_part; diff --git a/src/test/regress/expected/case.out b/src/test/regress/expected/case.out new file mode 100644 index 0000000..f5136c1 --- /dev/null +++ b/src/test/regress/expected/case.out @@ -0,0 +1,419 @@ +-- +-- CASE +-- Test the case statement +-- +CREATE TABLE CASE_TBL ( + i integer, + f double precision +); +CREATE TABLE CASE2_TBL ( + i integer, + j integer +); +INSERT INTO CASE_TBL VALUES (1, 10.1); +INSERT INTO CASE_TBL VALUES (2, 20.2); +INSERT INTO CASE_TBL VALUES (3, -30.3); +INSERT INTO CASE_TBL VALUES (4, NULL); +INSERT INTO CASE2_TBL VALUES (1, -1); +INSERT INTO CASE2_TBL VALUES (2, -2); +INSERT INTO CASE2_TBL VALUES (3, -3); +INSERT INTO CASE2_TBL VALUES (2, -4); +INSERT INTO CASE2_TBL VALUES (1, NULL); +INSERT INTO CASE2_TBL VALUES (NULL, -6); +-- +-- Simplest examples without tables +-- +SELECT '3' AS "One", + CASE + WHEN 1 < 2 THEN 3 + END AS "Simple WHEN"; + One | Simple WHEN +-----+------------- + 3 | 3 +(1 row) + +SELECT '' AS "One", + CASE + WHEN 1 > 2 THEN 3 + END AS "Simple default"; + One | Simple default +--------+---------------- + | +(1 row) + +SELECT '3' AS "One", + CASE + WHEN 1 < 2 THEN 3 + ELSE 4 + END AS "Simple ELSE"; + One | Simple ELSE +-----+------------- + 3 | 3 +(1 row) + +SELECT '4' AS "One", + CASE + WHEN 1 > 2 THEN 3 + ELSE 4 + END AS "ELSE default"; + One | ELSE default +-----+-------------- + 4 | 4 +(1 row) + +SELECT '6' AS "One", + CASE + WHEN 1 > 2 THEN 3 + WHEN 4 < 5 THEN 6 + ELSE 7 + END AS "Two WHEN with default"; + One | Two WHEN with default +-----+----------------------- + 6 | 6 +(1 row) + +SELECT '7' AS "None", + CASE WHEN random() < 0 THEN 1 + END AS "NULL on no matches"; + None | NULL on no matches +------+-------------------- + 7 | +(1 row) + +-- Constant-expression folding shouldn't evaluate unreachable subexpressions +SELECT CASE WHEN 1=0 THEN 1/0 WHEN 1=1 THEN 1 ELSE 2/0 END; + case +------ + 1 +(1 row) + +SELECT CASE 1 WHEN 0 THEN 1/0 WHEN 1 THEN 1 ELSE 2/0 END; + case +------ + 1 +(1 row) + +-- However we do not currently suppress folding of potentially +-- reachable subexpressions +SELECT CASE WHEN i > 100 THEN 1/0 ELSE 0 END FROM case_tbl; +ERROR: division by zero +-- Test for cases involving untyped literals in test expression +SELECT CASE 'a' WHEN 'a' THEN 1 ELSE 2 END; + case +------ + 1 +(1 row) + +-- +-- Examples of targets involving tables +-- +SELECT + CASE + WHEN i >= 3 THEN i + END AS ">= 3 or Null" + FROM CASE_TBL; + >= 3 or Null +-------------- + + + 3 + 4 +(4 rows) + +SELECT + CASE WHEN i >= 3 THEN (i + i) + ELSE i + END AS "Simplest Math" + FROM CASE_TBL; + Simplest Math +--------------- + 1 + 2 + 6 + 8 +(4 rows) + +SELECT i AS "Value", + CASE WHEN (i < 0) THEN 'small' + WHEN (i = 0) THEN 'zero' + WHEN (i = 1) THEN 'one' + WHEN (i = 2) THEN 'two' + ELSE 'big' + END AS "Category" + FROM CASE_TBL; + Value | Category +-------+---------- + 1 | one + 2 | two + 3 | big + 4 | big +(4 rows) + +SELECT + CASE WHEN ((i < 0) or (i < 0)) THEN 'small' + WHEN ((i = 0) or (i = 0)) THEN 'zero' + WHEN ((i = 1) or (i = 1)) THEN 'one' + WHEN ((i = 2) or (i = 2)) THEN 'two' + ELSE 'big' + END AS "Category" + FROM CASE_TBL; + Category +---------- + one + two + big + big +(4 rows) + +-- +-- Examples of qualifications involving tables +-- +-- +-- NULLIF() and COALESCE() +-- Shorthand forms for typical CASE constructs +-- defined in the SQL standard. +-- +SELECT * FROM CASE_TBL WHERE COALESCE(f,i) = 4; + i | f +---+--- + 4 | +(1 row) + +SELECT * FROM CASE_TBL WHERE NULLIF(f,i) = 2; + i | f +---+--- +(0 rows) + +SELECT COALESCE(a.f, b.i, b.j) + FROM CASE_TBL a, CASE2_TBL b; + coalesce +---------- + 10.1 + 20.2 + -30.3 + 1 + 10.1 + 20.2 + -30.3 + 2 + 10.1 + 20.2 + -30.3 + 3 + 10.1 + 20.2 + -30.3 + 2 + 10.1 + 20.2 + -30.3 + 1 + 10.1 + 20.2 + -30.3 + -6 +(24 rows) + +SELECT * + FROM CASE_TBL a, CASE2_TBL b + WHERE COALESCE(a.f, b.i, b.j) = 2; + i | f | i | j +---+---+---+---- + 4 | | 2 | -2 + 4 | | 2 | -4 +(2 rows) + +SELECT NULLIF(a.i,b.i) AS "NULLIF(a.i,b.i)", + NULLIF(b.i, 4) AS "NULLIF(b.i,4)" + FROM CASE_TBL a, CASE2_TBL b; + NULLIF(a.i,b.i) | NULLIF(b.i,4) +-----------------+--------------- + | 1 + 2 | 1 + 3 | 1 + 4 | 1 + 1 | 2 + | 2 + 3 | 2 + 4 | 2 + 1 | 3 + 2 | 3 + | 3 + 4 | 3 + 1 | 2 + | 2 + 3 | 2 + 4 | 2 + | 1 + 2 | 1 + 3 | 1 + 4 | 1 + 1 | + 2 | + 3 | + 4 | +(24 rows) + +SELECT * + FROM CASE_TBL a, CASE2_TBL b + WHERE COALESCE(f,b.i) = 2; + i | f | i | j +---+---+---+---- + 4 | | 2 | -2 + 4 | | 2 | -4 +(2 rows) + +-- Tests for constant subexpression simplification +explain (costs off) +SELECT * FROM CASE_TBL WHERE NULLIF(1, 2) = 2; + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +explain (costs off) +SELECT * FROM CASE_TBL WHERE NULLIF(1, 1) IS NOT NULL; + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +explain (costs off) +SELECT * FROM CASE_TBL WHERE NULLIF(1, null) = 2; + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +-- +-- Examples of updates involving tables +-- +UPDATE CASE_TBL + SET i = CASE WHEN i >= 3 THEN (- i) + ELSE (2 * i) END; +SELECT * FROM CASE_TBL; + i | f +----+------- + 2 | 10.1 + 4 | 20.2 + -3 | -30.3 + -4 | +(4 rows) + +UPDATE CASE_TBL + SET i = CASE WHEN i >= 2 THEN (2 * i) + ELSE (3 * i) END; +SELECT * FROM CASE_TBL; + i | f +-----+------- + 4 | 10.1 + 8 | 20.2 + -9 | -30.3 + -12 | +(4 rows) + +UPDATE CASE_TBL + SET i = CASE WHEN b.i >= 2 THEN (2 * j) + ELSE (3 * j) END + FROM CASE2_TBL b + WHERE j = -CASE_TBL.i; +SELECT * FROM CASE_TBL; + i | f +-----+------- + 8 | 20.2 + -9 | -30.3 + -12 | + -8 | 10.1 +(4 rows) + +-- +-- Nested CASE expressions +-- +-- This test exercises a bug caused by aliasing econtext->caseValue_isNull +-- with the isNull argument of the inner CASE's CaseExpr evaluation. After +-- evaluating the vol(null) expression in the inner CASE's second WHEN-clause, +-- the isNull flag for the case test value incorrectly became true, causing +-- the third WHEN-clause not to match. The volatile function calls are needed +-- to prevent constant-folding in the planner, which would hide the bug. +-- Wrap this in a single transaction so the transient '=' operator doesn't +-- cause problems in concurrent sessions +BEGIN; +CREATE FUNCTION vol(text) returns text as + 'begin return $1; end' language plpgsql volatile; +SELECT CASE + (CASE vol('bar') + WHEN 'foo' THEN 'it was foo!' + WHEN vol(null) THEN 'null input' + WHEN 'bar' THEN 'it was bar!' END + ) + WHEN 'it was foo!' THEN 'foo recognized' + WHEN 'it was bar!' THEN 'bar recognized' + ELSE 'unrecognized' END; + case +---------------- + bar recognized +(1 row) + +-- In this case, we can't inline the SQL function without confusing things. +CREATE DOMAIN foodomain AS text; +CREATE FUNCTION volfoo(text) returns foodomain as + 'begin return $1::foodomain; end' language plpgsql volatile; +CREATE FUNCTION inline_eq(foodomain, foodomain) returns boolean as + 'SELECT CASE $2::text WHEN $1::text THEN true ELSE false END' language sql; +CREATE OPERATOR = (procedure = inline_eq, + leftarg = foodomain, rightarg = foodomain); +SELECT CASE volfoo('bar') WHEN 'foo'::foodomain THEN 'is foo' ELSE 'is not foo' END; + case +------------ + is not foo +(1 row) + +ROLLBACK; +-- Test multiple evaluation of a CASE arg that is a read/write object (#14472) +-- Wrap this in a single transaction so the transient '=' operator doesn't +-- cause problems in concurrent sessions +BEGIN; +CREATE DOMAIN arrdomain AS int[]; +CREATE FUNCTION make_ad(int,int) returns arrdomain as + 'declare x arrdomain; + begin + x := array[$1,$2]; + return x; + end' language plpgsql volatile; +CREATE FUNCTION ad_eq(arrdomain, arrdomain) returns boolean as + 'begin return array_eq($1, $2); end' language plpgsql; +CREATE OPERATOR = (procedure = ad_eq, + leftarg = arrdomain, rightarg = arrdomain); +SELECT CASE make_ad(1,2) + WHEN array[2,4]::arrdomain THEN 'wrong' + WHEN array[2,5]::arrdomain THEN 'still wrong' + WHEN array[1,2]::arrdomain THEN 'right' + END; + case +------- + right +(1 row) + +ROLLBACK; +-- Test interaction of CASE with ArrayCoerceExpr (bug #15471) +BEGIN; +CREATE TYPE casetestenum AS ENUM ('e', 'f', 'g'); +SELECT + CASE 'foo'::text + WHEN 'foo' THEN ARRAY['a', 'b', 'c', 'd'] || enum_range(NULL::casetestenum)::text[] + ELSE ARRAY['x', 'y'] + END; + array +----------------- + {a,b,c,d,e,f,g} +(1 row) + +ROLLBACK; +-- +-- Clean up +-- +DROP TABLE CASE_TBL; +DROP TABLE CASE2_TBL; diff --git a/src/test/regress/expected/char.out b/src/test/regress/expected/char.out new file mode 100644 index 0000000..4df596d --- /dev/null +++ b/src/test/regress/expected/char.out @@ -0,0 +1,199 @@ +-- +-- CHAR +-- +-- Per SQL standard, CHAR means character(1), that is a varlena type +-- with a constraint restricting it to one character (not byte) +SELECT char 'c' = char 'c' AS true; + true +------ + t +(1 row) + +-- +-- Build a table for testing +-- (This temporarily hides the table created in test_setup.sql) +-- +CREATE TEMP TABLE CHAR_TBL(f1 char); +INSERT INTO CHAR_TBL (f1) VALUES ('a'); +INSERT INTO CHAR_TBL (f1) VALUES ('A'); +-- any of the following three input formats are acceptable +INSERT INTO CHAR_TBL (f1) VALUES ('1'); +INSERT INTO CHAR_TBL (f1) VALUES (2); +INSERT INTO CHAR_TBL (f1) VALUES ('3'); +-- zero-length char +INSERT INTO CHAR_TBL (f1) VALUES (''); +-- try char's of greater than 1 length +INSERT INTO CHAR_TBL (f1) VALUES ('cd'); +ERROR: value too long for type character(1) +INSERT INTO CHAR_TBL (f1) VALUES ('c '); +SELECT * FROM CHAR_TBL; + f1 +---- + a + A + 1 + 2 + 3 + + c +(7 rows) + +SELECT c.* + FROM CHAR_TBL c + WHERE c.f1 <> 'a'; + f1 +---- + A + 1 + 2 + 3 + + c +(6 rows) + +SELECT c.* + FROM CHAR_TBL c + WHERE c.f1 = 'a'; + f1 +---- + a +(1 row) + +SELECT c.* + FROM CHAR_TBL c + WHERE c.f1 < 'a'; + f1 +---- + A + 1 + 2 + 3 + +(5 rows) + +SELECT c.* + FROM CHAR_TBL c + WHERE c.f1 <= 'a'; + f1 +---- + a + A + 1 + 2 + 3 + +(6 rows) + +SELECT c.* + FROM CHAR_TBL c + WHERE c.f1 > 'a'; + f1 +---- + c +(1 row) + +SELECT c.* + FROM CHAR_TBL c + WHERE c.f1 >= 'a'; + f1 +---- + a + c +(2 rows) + +DROP TABLE CHAR_TBL; +-- +-- Now test longer arrays of char +-- +-- This char_tbl was already created and filled in test_setup.sql. +-- Here we just try to insert bad values. +-- +INSERT INTO CHAR_TBL (f1) VALUES ('abcde'); +ERROR: value too long for type character(4) +SELECT * FROM CHAR_TBL; + f1 +------ + a + ab + abcd + abcd +(4 rows) + +-- Also try it with non-error-throwing API +SELECT pg_input_is_valid('abcd ', 'char(4)'); + pg_input_is_valid +------------------- + t +(1 row) + +SELECT pg_input_is_valid('abcde', 'char(4)'); + pg_input_is_valid +------------------- + f +(1 row) + +SELECT * FROM pg_input_error_info('abcde', 'char(4)'); + message | detail | hint | sql_error_code +--------------------------------------+--------+------+---------------- + value too long for type character(4) | | | 22001 +(1 row) + +-- +-- Also test "char", which is an ad-hoc one-byte type. It can only +-- really store ASCII characters, but we allow high-bit-set characters +-- to be accessed via bytea-like escapes. +-- +SELECT 'a'::"char"; + char +------ + a +(1 row) + +SELECT '\101'::"char"; + char +------ + A +(1 row) + +SELECT '\377'::"char"; + char +------ + \377 +(1 row) + +SELECT 'a'::"char"::text; + text +------ + a +(1 row) + +SELECT '\377'::"char"::text; + text +------ + \377 +(1 row) + +SELECT '\000'::"char"::text; + text +------ + +(1 row) + +SELECT 'a'::text::"char"; + char +------ + a +(1 row) + +SELECT '\377'::text::"char"; + char +------ + \377 +(1 row) + +SELECT ''::text::"char"; + char +------ + +(1 row) + diff --git a/src/test/regress/expected/char_1.out b/src/test/regress/expected/char_1.out new file mode 100644 index 0000000..3add81e --- /dev/null +++ b/src/test/regress/expected/char_1.out @@ -0,0 +1,199 @@ +-- +-- CHAR +-- +-- Per SQL standard, CHAR means character(1), that is a varlena type +-- with a constraint restricting it to one character (not byte) +SELECT char 'c' = char 'c' AS true; + true +------ + t +(1 row) + +-- +-- Build a table for testing +-- (This temporarily hides the table created in test_setup.sql) +-- +CREATE TEMP TABLE CHAR_TBL(f1 char); +INSERT INTO CHAR_TBL (f1) VALUES ('a'); +INSERT INTO CHAR_TBL (f1) VALUES ('A'); +-- any of the following three input formats are acceptable +INSERT INTO CHAR_TBL (f1) VALUES ('1'); +INSERT INTO CHAR_TBL (f1) VALUES (2); +INSERT INTO CHAR_TBL (f1) VALUES ('3'); +-- zero-length char +INSERT INTO CHAR_TBL (f1) VALUES (''); +-- try char's of greater than 1 length +INSERT INTO CHAR_TBL (f1) VALUES ('cd'); +ERROR: value too long for type character(1) +INSERT INTO CHAR_TBL (f1) VALUES ('c '); +SELECT * FROM CHAR_TBL; + f1 +---- + a + A + 1 + 2 + 3 + + c +(7 rows) + +SELECT c.* + FROM CHAR_TBL c + WHERE c.f1 <> 'a'; + f1 +---- + A + 1 + 2 + 3 + + c +(6 rows) + +SELECT c.* + FROM CHAR_TBL c + WHERE c.f1 = 'a'; + f1 +---- + a +(1 row) + +SELECT c.* + FROM CHAR_TBL c + WHERE c.f1 < 'a'; + f1 +---- + 1 + 2 + 3 + +(4 rows) + +SELECT c.* + FROM CHAR_TBL c + WHERE c.f1 <= 'a'; + f1 +---- + a + 1 + 2 + 3 + +(5 rows) + +SELECT c.* + FROM CHAR_TBL c + WHERE c.f1 > 'a'; + f1 +---- + A + c +(2 rows) + +SELECT c.* + FROM CHAR_TBL c + WHERE c.f1 >= 'a'; + f1 +---- + a + A + c +(3 rows) + +DROP TABLE CHAR_TBL; +-- +-- Now test longer arrays of char +-- +-- This char_tbl was already created and filled in test_setup.sql. +-- Here we just try to insert bad values. +-- +INSERT INTO CHAR_TBL (f1) VALUES ('abcde'); +ERROR: value too long for type character(4) +SELECT * FROM CHAR_TBL; + f1 +------ + a + ab + abcd + abcd +(4 rows) + +-- Also try it with non-error-throwing API +SELECT pg_input_is_valid('abcd ', 'char(4)'); + pg_input_is_valid +------------------- + t +(1 row) + +SELECT pg_input_is_valid('abcde', 'char(4)'); + pg_input_is_valid +------------------- + f +(1 row) + +SELECT * FROM pg_input_error_info('abcde', 'char(4)'); + message | detail | hint | sql_error_code +--------------------------------------+--------+------+---------------- + value too long for type character(4) | | | 22001 +(1 row) + +-- +-- Also test "char", which is an ad-hoc one-byte type. It can only +-- really store ASCII characters, but we allow high-bit-set characters +-- to be accessed via bytea-like escapes. +-- +SELECT 'a'::"char"; + char +------ + a +(1 row) + +SELECT '\101'::"char"; + char +------ + A +(1 row) + +SELECT '\377'::"char"; + char +------ + \377 +(1 row) + +SELECT 'a'::"char"::text; + text +------ + a +(1 row) + +SELECT '\377'::"char"::text; + text +------ + \377 +(1 row) + +SELECT '\000'::"char"::text; + text +------ + +(1 row) + +SELECT 'a'::text::"char"; + char +------ + a +(1 row) + +SELECT '\377'::text::"char"; + char +------ + \377 +(1 row) + +SELECT ''::text::"char"; + char +------ + +(1 row) + diff --git a/src/test/regress/expected/char_2.out b/src/test/regress/expected/char_2.out new file mode 100644 index 0000000..ebde0f3 --- /dev/null +++ b/src/test/regress/expected/char_2.out @@ -0,0 +1,199 @@ +-- +-- CHAR +-- +-- Per SQL standard, CHAR means character(1), that is a varlena type +-- with a constraint restricting it to one character (not byte) +SELECT char 'c' = char 'c' AS true; + true +------ + t +(1 row) + +-- +-- Build a table for testing +-- (This temporarily hides the table created in test_setup.sql) +-- +CREATE TEMP TABLE CHAR_TBL(f1 char); +INSERT INTO CHAR_TBL (f1) VALUES ('a'); +INSERT INTO CHAR_TBL (f1) VALUES ('A'); +-- any of the following three input formats are acceptable +INSERT INTO CHAR_TBL (f1) VALUES ('1'); +INSERT INTO CHAR_TBL (f1) VALUES (2); +INSERT INTO CHAR_TBL (f1) VALUES ('3'); +-- zero-length char +INSERT INTO CHAR_TBL (f1) VALUES (''); +-- try char's of greater than 1 length +INSERT INTO CHAR_TBL (f1) VALUES ('cd'); +ERROR: value too long for type character(1) +INSERT INTO CHAR_TBL (f1) VALUES ('c '); +SELECT * FROM CHAR_TBL; + f1 +---- + a + A + 1 + 2 + 3 + + c +(7 rows) + +SELECT c.* + FROM CHAR_TBL c + WHERE c.f1 <> 'a'; + f1 +---- + A + 1 + 2 + 3 + + c +(6 rows) + +SELECT c.* + FROM CHAR_TBL c + WHERE c.f1 = 'a'; + f1 +---- + a +(1 row) + +SELECT c.* + FROM CHAR_TBL c + WHERE c.f1 < 'a'; + f1 +---- + +(1 row) + +SELECT c.* + FROM CHAR_TBL c + WHERE c.f1 <= 'a'; + f1 +---- + a + +(2 rows) + +SELECT c.* + FROM CHAR_TBL c + WHERE c.f1 > 'a'; + f1 +---- + A + 1 + 2 + 3 + c +(5 rows) + +SELECT c.* + FROM CHAR_TBL c + WHERE c.f1 >= 'a'; + f1 +---- + a + A + 1 + 2 + 3 + c +(6 rows) + +DROP TABLE CHAR_TBL; +-- +-- Now test longer arrays of char +-- +-- This char_tbl was already created and filled in test_setup.sql. +-- Here we just try to insert bad values. +-- +INSERT INTO CHAR_TBL (f1) VALUES ('abcde'); +ERROR: value too long for type character(4) +SELECT * FROM CHAR_TBL; + f1 +------ + a + ab + abcd + abcd +(4 rows) + +-- Also try it with non-error-throwing API +SELECT pg_input_is_valid('abcd ', 'char(4)'); + pg_input_is_valid +------------------- + t +(1 row) + +SELECT pg_input_is_valid('abcde', 'char(4)'); + pg_input_is_valid +------------------- + f +(1 row) + +SELECT * FROM pg_input_error_info('abcde', 'char(4)'); + message | detail | hint | sql_error_code +--------------------------------------+--------+------+---------------- + value too long for type character(4) | | | 22001 +(1 row) + +-- +-- Also test "char", which is an ad-hoc one-byte type. It can only +-- really store ASCII characters, but we allow high-bit-set characters +-- to be accessed via bytea-like escapes. +-- +SELECT 'a'::"char"; + char +------ + a +(1 row) + +SELECT '\101'::"char"; + char +------ + A +(1 row) + +SELECT '\377'::"char"; + char +------ + \377 +(1 row) + +SELECT 'a'::"char"::text; + text +------ + a +(1 row) + +SELECT '\377'::"char"::text; + text +------ + \377 +(1 row) + +SELECT '\000'::"char"::text; + text +------ + +(1 row) + +SELECT 'a'::text::"char"; + char +------ + a +(1 row) + +SELECT '\377'::text::"char"; + char +------ + \377 +(1 row) + +SELECT ''::text::"char"; + char +------ + +(1 row) + diff --git a/src/test/regress/expected/circle.out b/src/test/regress/expected/circle.out new file mode 100644 index 0000000..c3b0527 --- /dev/null +++ b/src/test/regress/expected/circle.out @@ -0,0 +1,125 @@ +-- +-- CIRCLE +-- +-- Back off displayed precision a little bit to reduce platform-to-platform +-- variation in results. +SET extra_float_digits = -1; +CREATE TABLE CIRCLE_TBL (f1 circle); +INSERT INTO CIRCLE_TBL VALUES ('<(5,1),3>'); +INSERT INTO CIRCLE_TBL VALUES ('((1,2),100)'); +INSERT INTO CIRCLE_TBL VALUES (' 1 , 3 , 5 '); +INSERT INTO CIRCLE_TBL VALUES (' ( ( 1 , 2 ) , 3 ) '); +INSERT INTO CIRCLE_TBL VALUES (' ( 100 , 200 ) , 10 '); +INSERT INTO CIRCLE_TBL VALUES (' < ( 100 , 1 ) , 115 > '); +INSERT INTO CIRCLE_TBL VALUES ('<(3,5),0>'); -- Zero radius +INSERT INTO CIRCLE_TBL VALUES ('<(3,5),NaN>'); -- NaN radius +-- bad values +INSERT INTO CIRCLE_TBL VALUES ('<(-100,0),-100>'); +ERROR: invalid input syntax for type circle: "<(-100,0),-100>" +LINE 1: INSERT INTO CIRCLE_TBL VALUES ('<(-100,0),-100>'); + ^ +INSERT INTO CIRCLE_TBL VALUES ('<(100,200),10'); +ERROR: invalid input syntax for type circle: "<(100,200),10" +LINE 1: INSERT INTO CIRCLE_TBL VALUES ('<(100,200),10'); + ^ +INSERT INTO CIRCLE_TBL VALUES ('<(100,200),10> x'); +ERROR: invalid input syntax for type circle: "<(100,200),10> x" +LINE 1: INSERT INTO CIRCLE_TBL VALUES ('<(100,200),10> x'); + ^ +INSERT INTO CIRCLE_TBL VALUES ('1abc,3,5'); +ERROR: invalid input syntax for type circle: "1abc,3,5" +LINE 1: INSERT INTO CIRCLE_TBL VALUES ('1abc,3,5'); + ^ +INSERT INTO CIRCLE_TBL VALUES ('(3,(1,2),3)'); +ERROR: invalid input syntax for type circle: "(3,(1,2),3)" +LINE 1: INSERT INTO CIRCLE_TBL VALUES ('(3,(1,2),3)'); + ^ +SELECT * FROM CIRCLE_TBL; + f1 +---------------- + <(5,1),3> + <(1,2),100> + <(1,3),5> + <(1,2),3> + <(100,200),10> + <(100,1),115> + <(3,5),0> + <(3,5),NaN> +(8 rows) + +SELECT center(f1) AS center + FROM CIRCLE_TBL; + center +----------- + (5,1) + (1,2) + (1,3) + (1,2) + (100,200) + (100,1) + (3,5) + (3,5) +(8 rows) + +SELECT radius(f1) AS radius + FROM CIRCLE_TBL; + radius +-------- + 3 + 100 + 5 + 3 + 10 + 115 + 0 + NaN +(8 rows) + +SELECT diameter(f1) AS diameter + FROM CIRCLE_TBL; + diameter +---------- + 6 + 200 + 10 + 6 + 20 + 230 + 0 + NaN +(8 rows) + +SELECT f1 FROM CIRCLE_TBL WHERE radius(f1) < 5; + f1 +----------- + <(5,1),3> + <(1,2),3> + <(3,5),0> +(3 rows) + +SELECT f1 FROM CIRCLE_TBL WHERE diameter(f1) >= 10; + f1 +---------------- + <(1,2),100> + <(1,3),5> + <(100,200),10> + <(100,1),115> + <(3,5),NaN> +(5 rows) + +SELECT c1.f1 AS one, c2.f1 AS two, (c1.f1 <-> c2.f1) AS distance + FROM CIRCLE_TBL c1, CIRCLE_TBL c2 + WHERE (c1.f1 < c2.f1) AND ((c1.f1 <-> c2.f1) > 0) + ORDER BY distance, area(c1.f1), area(c2.f1); + one | two | distance +----------------+----------------+------------------ + <(3,5),0> | <(1,2),3> | 0.60555127546399 + <(3,5),0> | <(5,1),3> | 1.4721359549996 + <(100,200),10> | <(100,1),115> | 74 + <(100,200),10> | <(1,2),100> | 111.37072977248 + <(1,3),5> | <(100,200),10> | 205.4767561445 + <(5,1),3> | <(100,200),10> | 207.51303816328 + <(3,5),0> | <(100,200),10> | 207.79348015953 + <(1,2),3> | <(100,200),10> | 208.37072977248 +(8 rows) + diff --git a/src/test/regress/expected/cluster.out b/src/test/regress/expected/cluster.out new file mode 100644 index 0000000..542c2e0 --- /dev/null +++ b/src/test/regress/expected/cluster.out @@ -0,0 +1,667 @@ +-- +-- CLUSTER +-- +CREATE TABLE clstr_tst_s (rf_a SERIAL PRIMARY KEY, + b INT); +CREATE TABLE clstr_tst (a SERIAL PRIMARY KEY, + b INT, + c TEXT, + d TEXT, + CONSTRAINT clstr_tst_con FOREIGN KEY (b) REFERENCES clstr_tst_s); +CREATE INDEX clstr_tst_b ON clstr_tst (b); +CREATE INDEX clstr_tst_c ON clstr_tst (c); +CREATE INDEX clstr_tst_c_b ON clstr_tst (c,b); +CREATE INDEX clstr_tst_b_c ON clstr_tst (b,c); +INSERT INTO clstr_tst_s (b) VALUES (0); +INSERT INTO clstr_tst_s (b) SELECT b FROM clstr_tst_s; +INSERT INTO clstr_tst_s (b) SELECT b FROM clstr_tst_s; +INSERT INTO clstr_tst_s (b) SELECT b FROM clstr_tst_s; +INSERT INTO clstr_tst_s (b) SELECT b FROM clstr_tst_s; +INSERT INTO clstr_tst_s (b) SELECT b FROM clstr_tst_s; +CREATE TABLE clstr_tst_inh () INHERITS (clstr_tst); +INSERT INTO clstr_tst (b, c) VALUES (11, 'once'); +INSERT INTO clstr_tst (b, c) VALUES (10, 'diez'); +INSERT INTO clstr_tst (b, c) VALUES (31, 'treinta y uno'); +INSERT INTO clstr_tst (b, c) VALUES (22, 'veintidos'); +INSERT INTO clstr_tst (b, c) VALUES (3, 'tres'); +INSERT INTO clstr_tst (b, c) VALUES (20, 'veinte'); +INSERT INTO clstr_tst (b, c) VALUES (23, 'veintitres'); +INSERT INTO clstr_tst (b, c) VALUES (21, 'veintiuno'); +INSERT INTO clstr_tst (b, c) VALUES (4, 'cuatro'); +INSERT INTO clstr_tst (b, c) VALUES (14, 'catorce'); +INSERT INTO clstr_tst (b, c) VALUES (2, 'dos'); +INSERT INTO clstr_tst (b, c) VALUES (18, 'dieciocho'); +INSERT INTO clstr_tst (b, c) VALUES (27, 'veintisiete'); +INSERT INTO clstr_tst (b, c) VALUES (25, 'veinticinco'); +INSERT INTO clstr_tst (b, c) VALUES (13, 'trece'); +INSERT INTO clstr_tst (b, c) VALUES (28, 'veintiocho'); +INSERT INTO clstr_tst (b, c) VALUES (32, 'treinta y dos'); +INSERT INTO clstr_tst (b, c) VALUES (5, 'cinco'); +INSERT INTO clstr_tst (b, c) VALUES (29, 'veintinueve'); +INSERT INTO clstr_tst (b, c) VALUES (1, 'uno'); +INSERT INTO clstr_tst (b, c) VALUES (24, 'veinticuatro'); +INSERT INTO clstr_tst (b, c) VALUES (30, 'treinta'); +INSERT INTO clstr_tst (b, c) VALUES (12, 'doce'); +INSERT INTO clstr_tst (b, c) VALUES (17, 'diecisiete'); +INSERT INTO clstr_tst (b, c) VALUES (9, 'nueve'); +INSERT INTO clstr_tst (b, c) VALUES (19, 'diecinueve'); +INSERT INTO clstr_tst (b, c) VALUES (26, 'veintiseis'); +INSERT INTO clstr_tst (b, c) VALUES (15, 'quince'); +INSERT INTO clstr_tst (b, c) VALUES (7, 'siete'); +INSERT INTO clstr_tst (b, c) VALUES (16, 'dieciseis'); +INSERT INTO clstr_tst (b, c) VALUES (8, 'ocho'); +-- This entry is needed to test that TOASTED values are copied correctly. +INSERT INTO clstr_tst (b, c, d) VALUES (6, 'seis', repeat('xyzzy', 100000)); +CLUSTER clstr_tst_c ON clstr_tst; +SELECT a,b,c,substring(d for 30), length(d) from clstr_tst; + a | b | c | substring | length +----+----+---------------+--------------------------------+-------- + 10 | 14 | catorce | | + 18 | 5 | cinco | | + 9 | 4 | cuatro | | + 26 | 19 | diecinueve | | + 12 | 18 | dieciocho | | + 30 | 16 | dieciseis | | + 24 | 17 | diecisiete | | + 2 | 10 | diez | | + 23 | 12 | doce | | + 11 | 2 | dos | | + 25 | 9 | nueve | | + 31 | 8 | ocho | | + 1 | 11 | once | | + 28 | 15 | quince | | + 32 | 6 | seis | xyzzyxyzzyxyzzyxyzzyxyzzyxyzzy | 500000 + 29 | 7 | siete | | + 15 | 13 | trece | | + 22 | 30 | treinta | | + 17 | 32 | treinta y dos | | + 3 | 31 | treinta y uno | | + 5 | 3 | tres | | + 20 | 1 | uno | | + 6 | 20 | veinte | | + 14 | 25 | veinticinco | | + 21 | 24 | veinticuatro | | + 4 | 22 | veintidos | | + 19 | 29 | veintinueve | | + 16 | 28 | veintiocho | | + 27 | 26 | veintiseis | | + 13 | 27 | veintisiete | | + 7 | 23 | veintitres | | + 8 | 21 | veintiuno | | +(32 rows) + +SELECT a,b,c,substring(d for 30), length(d) from clstr_tst ORDER BY a; + a | b | c | substring | length +----+----+---------------+--------------------------------+-------- + 1 | 11 | once | | + 2 | 10 | diez | | + 3 | 31 | treinta y uno | | + 4 | 22 | veintidos | | + 5 | 3 | tres | | + 6 | 20 | veinte | | + 7 | 23 | veintitres | | + 8 | 21 | veintiuno | | + 9 | 4 | cuatro | | + 10 | 14 | catorce | | + 11 | 2 | dos | | + 12 | 18 | dieciocho | | + 13 | 27 | veintisiete | | + 14 | 25 | veinticinco | | + 15 | 13 | trece | | + 16 | 28 | veintiocho | | + 17 | 32 | treinta y dos | | + 18 | 5 | cinco | | + 19 | 29 | veintinueve | | + 20 | 1 | uno | | + 21 | 24 | veinticuatro | | + 22 | 30 | treinta | | + 23 | 12 | doce | | + 24 | 17 | diecisiete | | + 25 | 9 | nueve | | + 26 | 19 | diecinueve | | + 27 | 26 | veintiseis | | + 28 | 15 | quince | | + 29 | 7 | siete | | + 30 | 16 | dieciseis | | + 31 | 8 | ocho | | + 32 | 6 | seis | xyzzyxyzzyxyzzyxyzzyxyzzyxyzzy | 500000 +(32 rows) + +SELECT a,b,c,substring(d for 30), length(d) from clstr_tst ORDER BY b; + a | b | c | substring | length +----+----+---------------+--------------------------------+-------- + 20 | 1 | uno | | + 11 | 2 | dos | | + 5 | 3 | tres | | + 9 | 4 | cuatro | | + 18 | 5 | cinco | | + 32 | 6 | seis | xyzzyxyzzyxyzzyxyzzyxyzzyxyzzy | 500000 + 29 | 7 | siete | | + 31 | 8 | ocho | | + 25 | 9 | nueve | | + 2 | 10 | diez | | + 1 | 11 | once | | + 23 | 12 | doce | | + 15 | 13 | trece | | + 10 | 14 | catorce | | + 28 | 15 | quince | | + 30 | 16 | dieciseis | | + 24 | 17 | diecisiete | | + 12 | 18 | dieciocho | | + 26 | 19 | diecinueve | | + 6 | 20 | veinte | | + 8 | 21 | veintiuno | | + 4 | 22 | veintidos | | + 7 | 23 | veintitres | | + 21 | 24 | veinticuatro | | + 14 | 25 | veinticinco | | + 27 | 26 | veintiseis | | + 13 | 27 | veintisiete | | + 16 | 28 | veintiocho | | + 19 | 29 | veintinueve | | + 22 | 30 | treinta | | + 3 | 31 | treinta y uno | | + 17 | 32 | treinta y dos | | +(32 rows) + +SELECT a,b,c,substring(d for 30), length(d) from clstr_tst ORDER BY c; + a | b | c | substring | length +----+----+---------------+--------------------------------+-------- + 10 | 14 | catorce | | + 18 | 5 | cinco | | + 9 | 4 | cuatro | | + 26 | 19 | diecinueve | | + 12 | 18 | dieciocho | | + 30 | 16 | dieciseis | | + 24 | 17 | diecisiete | | + 2 | 10 | diez | | + 23 | 12 | doce | | + 11 | 2 | dos | | + 25 | 9 | nueve | | + 31 | 8 | ocho | | + 1 | 11 | once | | + 28 | 15 | quince | | + 32 | 6 | seis | xyzzyxyzzyxyzzyxyzzyxyzzyxyzzy | 500000 + 29 | 7 | siete | | + 15 | 13 | trece | | + 22 | 30 | treinta | | + 17 | 32 | treinta y dos | | + 3 | 31 | treinta y uno | | + 5 | 3 | tres | | + 20 | 1 | uno | | + 6 | 20 | veinte | | + 14 | 25 | veinticinco | | + 21 | 24 | veinticuatro | | + 4 | 22 | veintidos | | + 19 | 29 | veintinueve | | + 16 | 28 | veintiocho | | + 27 | 26 | veintiseis | | + 13 | 27 | veintisiete | | + 7 | 23 | veintitres | | + 8 | 21 | veintiuno | | +(32 rows) + +-- Verify that inheritance link still works +INSERT INTO clstr_tst_inh VALUES (0, 100, 'in child table'); +SELECT a,b,c,substring(d for 30), length(d) from clstr_tst; + a | b | c | substring | length +----+-----+----------------+--------------------------------+-------- + 10 | 14 | catorce | | + 18 | 5 | cinco | | + 9 | 4 | cuatro | | + 26 | 19 | diecinueve | | + 12 | 18 | dieciocho | | + 30 | 16 | dieciseis | | + 24 | 17 | diecisiete | | + 2 | 10 | diez | | + 23 | 12 | doce | | + 11 | 2 | dos | | + 25 | 9 | nueve | | + 31 | 8 | ocho | | + 1 | 11 | once | | + 28 | 15 | quince | | + 32 | 6 | seis | xyzzyxyzzyxyzzyxyzzyxyzzyxyzzy | 500000 + 29 | 7 | siete | | + 15 | 13 | trece | | + 22 | 30 | treinta | | + 17 | 32 | treinta y dos | | + 3 | 31 | treinta y uno | | + 5 | 3 | tres | | + 20 | 1 | uno | | + 6 | 20 | veinte | | + 14 | 25 | veinticinco | | + 21 | 24 | veinticuatro | | + 4 | 22 | veintidos | | + 19 | 29 | veintinueve | | + 16 | 28 | veintiocho | | + 27 | 26 | veintiseis | | + 13 | 27 | veintisiete | | + 7 | 23 | veintitres | | + 8 | 21 | veintiuno | | + 0 | 100 | in child table | | +(33 rows) + +-- Verify that foreign key link still works +INSERT INTO clstr_tst (b, c) VALUES (1111, 'this should fail'); +ERROR: insert or update on table "clstr_tst" violates foreign key constraint "clstr_tst_con" +DETAIL: Key (b)=(1111) is not present in table "clstr_tst_s". +SELECT conname FROM pg_constraint WHERE conrelid = 'clstr_tst'::regclass +ORDER BY 1; + conname +---------------- + clstr_tst_con + clstr_tst_pkey +(2 rows) + +SELECT relname, relkind, + EXISTS(SELECT 1 FROM pg_class WHERE oid = c.reltoastrelid) AS hastoast +FROM pg_class c WHERE relname LIKE 'clstr_tst%' ORDER BY relname; + relname | relkind | hastoast +----------------------+---------+---------- + clstr_tst | r | t + clstr_tst_a_seq | S | f + clstr_tst_b | i | f + clstr_tst_b_c | i | f + clstr_tst_c | i | f + clstr_tst_c_b | i | f + clstr_tst_inh | r | t + clstr_tst_pkey | i | f + clstr_tst_s | r | f + clstr_tst_s_pkey | i | f + clstr_tst_s_rf_a_seq | S | f +(11 rows) + +-- Verify that indisclustered is correctly set +SELECT pg_class.relname FROM pg_index, pg_class, pg_class AS pg_class_2 +WHERE pg_class.oid=indexrelid + AND indrelid=pg_class_2.oid + AND pg_class_2.relname = 'clstr_tst' + AND indisclustered; + relname +------------- + clstr_tst_c +(1 row) + +-- Try changing indisclustered +ALTER TABLE clstr_tst CLUSTER ON clstr_tst_b_c; +SELECT pg_class.relname FROM pg_index, pg_class, pg_class AS pg_class_2 +WHERE pg_class.oid=indexrelid + AND indrelid=pg_class_2.oid + AND pg_class_2.relname = 'clstr_tst' + AND indisclustered; + relname +--------------- + clstr_tst_b_c +(1 row) + +-- Try turning off all clustering +ALTER TABLE clstr_tst SET WITHOUT CLUSTER; +SELECT pg_class.relname FROM pg_index, pg_class, pg_class AS pg_class_2 +WHERE pg_class.oid=indexrelid + AND indrelid=pg_class_2.oid + AND pg_class_2.relname = 'clstr_tst' + AND indisclustered; + relname +--------- +(0 rows) + +-- Verify that toast tables are clusterable +CLUSTER pg_toast.pg_toast_826 USING pg_toast_826_index; +-- Verify that clustering all tables does in fact cluster the right ones +CREATE USER regress_clstr_user; +CREATE TABLE clstr_1 (a INT PRIMARY KEY); +CREATE TABLE clstr_2 (a INT PRIMARY KEY); +CREATE TABLE clstr_3 (a INT PRIMARY KEY); +ALTER TABLE clstr_1 OWNER TO regress_clstr_user; +ALTER TABLE clstr_3 OWNER TO regress_clstr_user; +GRANT SELECT ON clstr_2 TO regress_clstr_user; +INSERT INTO clstr_1 VALUES (2); +INSERT INTO clstr_1 VALUES (1); +INSERT INTO clstr_2 VALUES (2); +INSERT INTO clstr_2 VALUES (1); +INSERT INTO clstr_3 VALUES (2); +INSERT INTO clstr_3 VALUES (1); +-- "CLUSTER " on a table that hasn't been clustered +CLUSTER clstr_2; +ERROR: there is no previously clustered index for table "clstr_2" +CLUSTER clstr_1_pkey ON clstr_1; +CLUSTER clstr_2 USING clstr_2_pkey; +SELECT * FROM clstr_1 UNION ALL + SELECT * FROM clstr_2 UNION ALL + SELECT * FROM clstr_3; + a +--- + 1 + 2 + 1 + 2 + 2 + 1 +(6 rows) + +-- revert to the original state +DELETE FROM clstr_1; +DELETE FROM clstr_2; +DELETE FROM clstr_3; +INSERT INTO clstr_1 VALUES (2); +INSERT INTO clstr_1 VALUES (1); +INSERT INTO clstr_2 VALUES (2); +INSERT INTO clstr_2 VALUES (1); +INSERT INTO clstr_3 VALUES (2); +INSERT INTO clstr_3 VALUES (1); +-- this user can only cluster clstr_1 and clstr_3, but the latter +-- has not been clustered +SET SESSION AUTHORIZATION regress_clstr_user; +CLUSTER; +SELECT * FROM clstr_1 UNION ALL + SELECT * FROM clstr_2 UNION ALL + SELECT * FROM clstr_3; + a +--- + 1 + 2 + 2 + 1 + 2 + 1 +(6 rows) + +-- cluster a single table using the indisclustered bit previously set +DELETE FROM clstr_1; +INSERT INTO clstr_1 VALUES (2); +INSERT INTO clstr_1 VALUES (1); +CLUSTER clstr_1; +SELECT * FROM clstr_1; + a +--- + 1 + 2 +(2 rows) + +-- Test MVCC-safety of cluster. There isn't much we can do to verify the +-- results with a single backend... +CREATE TABLE clustertest (key int PRIMARY KEY); +INSERT INTO clustertest VALUES (10); +INSERT INTO clustertest VALUES (20); +INSERT INTO clustertest VALUES (30); +INSERT INTO clustertest VALUES (40); +INSERT INTO clustertest VALUES (50); +-- Use a transaction so that updates are not committed when CLUSTER sees 'em +BEGIN; +-- Test update where the old row version is found first in the scan +UPDATE clustertest SET key = 100 WHERE key = 10; +-- Test update where the new row version is found first in the scan +UPDATE clustertest SET key = 35 WHERE key = 40; +-- Test longer update chain +UPDATE clustertest SET key = 60 WHERE key = 50; +UPDATE clustertest SET key = 70 WHERE key = 60; +UPDATE clustertest SET key = 80 WHERE key = 70; +SELECT * FROM clustertest; + key +----- + 20 + 30 + 100 + 35 + 80 +(5 rows) + +CLUSTER clustertest_pkey ON clustertest; +SELECT * FROM clustertest; + key +----- + 20 + 30 + 35 + 80 + 100 +(5 rows) + +COMMIT; +SELECT * FROM clustertest; + key +----- + 20 + 30 + 35 + 80 + 100 +(5 rows) + +-- check that temp tables can be clustered +create temp table clstr_temp (col1 int primary key, col2 text); +insert into clstr_temp values (2, 'two'), (1, 'one'); +cluster clstr_temp using clstr_temp_pkey; +select * from clstr_temp; + col1 | col2 +------+------ + 1 | one + 2 | two +(2 rows) + +drop table clstr_temp; +RESET SESSION AUTHORIZATION; +-- check clustering an empty table +DROP TABLE clustertest; +CREATE TABLE clustertest (f1 int PRIMARY KEY); +CLUSTER clustertest USING clustertest_pkey; +CLUSTER clustertest; +-- Check that partitioned tables can be clustered +CREATE TABLE clstrpart (a int) PARTITION BY RANGE (a); +CREATE TABLE clstrpart1 PARTITION OF clstrpart FOR VALUES FROM (1) TO (10) PARTITION BY RANGE (a); +CREATE TABLE clstrpart11 PARTITION OF clstrpart1 FOR VALUES FROM (1) TO (5); +CREATE TABLE clstrpart12 PARTITION OF clstrpart1 FOR VALUES FROM (5) TO (10) PARTITION BY RANGE (a); +CREATE TABLE clstrpart2 PARTITION OF clstrpart FOR VALUES FROM (10) TO (20); +CREATE TABLE clstrpart3 PARTITION OF clstrpart DEFAULT PARTITION BY RANGE (a); +CREATE TABLE clstrpart33 PARTITION OF clstrpart3 DEFAULT; +CREATE INDEX clstrpart_only_idx ON ONLY clstrpart (a); +CLUSTER clstrpart USING clstrpart_only_idx; -- fails +ERROR: cannot cluster on invalid index "clstrpart_only_idx" +DROP INDEX clstrpart_only_idx; +CREATE INDEX clstrpart_idx ON clstrpart (a); +-- Check that clustering sets new relfilenodes: +CREATE TEMP TABLE old_cluster_info AS SELECT relname, level, relfilenode, relkind FROM pg_partition_tree('clstrpart'::regclass) AS tree JOIN pg_class c ON c.oid=tree.relid ; +CLUSTER clstrpart USING clstrpart_idx; +CREATE TEMP TABLE new_cluster_info AS SELECT relname, level, relfilenode, relkind FROM pg_partition_tree('clstrpart'::regclass) AS tree JOIN pg_class c ON c.oid=tree.relid ; +SELECT relname, old.level, old.relkind, old.relfilenode = new.relfilenode FROM old_cluster_info AS old JOIN new_cluster_info AS new USING (relname) ORDER BY relname COLLATE "C"; + relname | level | relkind | ?column? +-------------+-------+---------+---------- + clstrpart | 0 | p | t + clstrpart1 | 1 | p | t + clstrpart11 | 2 | r | f + clstrpart12 | 2 | p | t + clstrpart2 | 1 | r | f + clstrpart3 | 1 | p | t + clstrpart33 | 2 | r | f +(7 rows) + +-- Partitioned indexes aren't and can't be marked un/clustered: +\d clstrpart + Partitioned table "public.clstrpart" + Column | Type | Collation | Nullable | Default +--------+---------+-----------+----------+--------- + a | integer | | | +Partition key: RANGE (a) +Indexes: + "clstrpart_idx" btree (a) +Number of partitions: 3 (Use \d+ to list them.) + +CLUSTER clstrpart; +ERROR: there is no previously clustered index for table "clstrpart" +ALTER TABLE clstrpart SET WITHOUT CLUSTER; +ERROR: cannot mark index clustered in partitioned table +ALTER TABLE clstrpart CLUSTER ON clstrpart_idx; +ERROR: cannot mark index clustered in partitioned table +DROP TABLE clstrpart; +-- Ownership of partitions is checked +CREATE TABLE ptnowner(i int unique) PARTITION BY LIST (i); +CREATE INDEX ptnowner_i_idx ON ptnowner(i); +CREATE TABLE ptnowner1 PARTITION OF ptnowner FOR VALUES IN (1); +CREATE ROLE regress_ptnowner; +CREATE TABLE ptnowner2 PARTITION OF ptnowner FOR VALUES IN (2); +ALTER TABLE ptnowner1 OWNER TO regress_ptnowner; +ALTER TABLE ptnowner OWNER TO regress_ptnowner; +CREATE TEMP TABLE ptnowner_oldnodes AS + SELECT oid, relname, relfilenode FROM pg_partition_tree('ptnowner') AS tree + JOIN pg_class AS c ON c.oid=tree.relid; +SET SESSION AUTHORIZATION regress_ptnowner; +CLUSTER ptnowner USING ptnowner_i_idx; +RESET SESSION AUTHORIZATION; +SELECT a.relname, a.relfilenode=b.relfilenode FROM pg_class a + JOIN ptnowner_oldnodes b USING (oid) ORDER BY a.relname COLLATE "C"; + relname | ?column? +-----------+---------- + ptnowner | t + ptnowner1 | f + ptnowner2 | t +(3 rows) + +DROP TABLE ptnowner; +DROP ROLE regress_ptnowner; +-- Test CLUSTER with external tuplesorting +create table clstr_4 as select * from tenk1; +create index cluster_sort on clstr_4 (hundred, thousand, tenthous); +-- ensure we don't use the index in CLUSTER nor the checking SELECTs +set enable_indexscan = off; +-- Use external sort: +set maintenance_work_mem = '1MB'; +cluster clstr_4 using cluster_sort; +select * from +(select hundred, lag(hundred) over () as lhundred, + thousand, lag(thousand) over () as lthousand, + tenthous, lag(tenthous) over () as ltenthous from clstr_4) ss +where row(hundred, thousand, tenthous) <= row(lhundred, lthousand, ltenthous); + hundred | lhundred | thousand | lthousand | tenthous | ltenthous +---------+----------+----------+-----------+----------+----------- +(0 rows) + +reset enable_indexscan; +reset maintenance_work_mem; +-- test CLUSTER on expression index +CREATE TABLE clstr_expression(id serial primary key, a int, b text COLLATE "C"); +INSERT INTO clstr_expression(a, b) SELECT g.i % 42, 'prefix'||g.i FROM generate_series(1, 133) g(i); +CREATE INDEX clstr_expression_minus_a ON clstr_expression ((-a), b); +CREATE INDEX clstr_expression_upper_b ON clstr_expression ((upper(b))); +-- verify indexes work before cluster +BEGIN; +SET LOCAL enable_seqscan = false; +EXPLAIN (COSTS OFF) SELECT * FROM clstr_expression WHERE upper(b) = 'PREFIX3'; + QUERY PLAN +--------------------------------------------------------------- + Index Scan using clstr_expression_upper_b on clstr_expression + Index Cond: (upper(b) = 'PREFIX3'::text) +(2 rows) + +SELECT * FROM clstr_expression WHERE upper(b) = 'PREFIX3'; + id | a | b +----+---+--------- + 3 | 3 | prefix3 +(1 row) + +EXPLAIN (COSTS OFF) SELECT * FROM clstr_expression WHERE -a = -3 ORDER BY -a, b; + QUERY PLAN +--------------------------------------------------------------- + Index Scan using clstr_expression_minus_a on clstr_expression + Index Cond: ((- a) = '-3'::integer) +(2 rows) + +SELECT * FROM clstr_expression WHERE -a = -3 ORDER BY -a, b; + id | a | b +-----+---+----------- + 129 | 3 | prefix129 + 3 | 3 | prefix3 + 45 | 3 | prefix45 + 87 | 3 | prefix87 +(4 rows) + +COMMIT; +-- and after clustering on clstr_expression_minus_a +CLUSTER clstr_expression USING clstr_expression_minus_a; +WITH rows AS + (SELECT ctid, lag(a) OVER (ORDER BY ctid) AS la, a FROM clstr_expression) +SELECT * FROM rows WHERE la < a; + ctid | la | a +------+----+--- +(0 rows) + +BEGIN; +SET LOCAL enable_seqscan = false; +EXPLAIN (COSTS OFF) SELECT * FROM clstr_expression WHERE upper(b) = 'PREFIX3'; + QUERY PLAN +--------------------------------------------------------------- + Index Scan using clstr_expression_upper_b on clstr_expression + Index Cond: (upper(b) = 'PREFIX3'::text) +(2 rows) + +SELECT * FROM clstr_expression WHERE upper(b) = 'PREFIX3'; + id | a | b +----+---+--------- + 3 | 3 | prefix3 +(1 row) + +EXPLAIN (COSTS OFF) SELECT * FROM clstr_expression WHERE -a = -3 ORDER BY -a, b; + QUERY PLAN +--------------------------------------------------------------- + Index Scan using clstr_expression_minus_a on clstr_expression + Index Cond: ((- a) = '-3'::integer) +(2 rows) + +SELECT * FROM clstr_expression WHERE -a = -3 ORDER BY -a, b; + id | a | b +-----+---+----------- + 129 | 3 | prefix129 + 3 | 3 | prefix3 + 45 | 3 | prefix45 + 87 | 3 | prefix87 +(4 rows) + +COMMIT; +-- and after clustering on clstr_expression_upper_b +CLUSTER clstr_expression USING clstr_expression_upper_b; +WITH rows AS + (SELECT ctid, lag(b) OVER (ORDER BY ctid) AS lb, b FROM clstr_expression) +SELECT * FROM rows WHERE upper(lb) > upper(b); + ctid | lb | b +------+----+--- +(0 rows) + +BEGIN; +SET LOCAL enable_seqscan = false; +EXPLAIN (COSTS OFF) SELECT * FROM clstr_expression WHERE upper(b) = 'PREFIX3'; + QUERY PLAN +--------------------------------------------------------------- + Index Scan using clstr_expression_upper_b on clstr_expression + Index Cond: (upper(b) = 'PREFIX3'::text) +(2 rows) + +SELECT * FROM clstr_expression WHERE upper(b) = 'PREFIX3'; + id | a | b +----+---+--------- + 3 | 3 | prefix3 +(1 row) + +EXPLAIN (COSTS OFF) SELECT * FROM clstr_expression WHERE -a = -3 ORDER BY -a, b; + QUERY PLAN +--------------------------------------------------------------- + Index Scan using clstr_expression_minus_a on clstr_expression + Index Cond: ((- a) = '-3'::integer) +(2 rows) + +SELECT * FROM clstr_expression WHERE -a = -3 ORDER BY -a, b; + id | a | b +-----+---+----------- + 129 | 3 | prefix129 + 3 | 3 | prefix3 + 45 | 3 | prefix45 + 87 | 3 | prefix87 +(4 rows) + +COMMIT; +-- clean up +DROP TABLE clustertest; +DROP TABLE clstr_1; +DROP TABLE clstr_2; +DROP TABLE clstr_3; +DROP TABLE clstr_4; +DROP TABLE clstr_expression; +DROP USER regress_clstr_user; diff --git a/src/test/regress/expected/collate.icu.utf8.out b/src/test/regress/expected/collate.icu.utf8.out new file mode 100644 index 0000000..97bbe53 --- /dev/null +++ b/src/test/regress/expected/collate.icu.utf8.out @@ -0,0 +1,2059 @@ +/* + * This test is for ICU collations. + */ +/* skip test if not UTF8 server encoding or no ICU collations installed */ +SELECT getdatabaseencoding() <> 'UTF8' OR + (SELECT count(*) FROM pg_collation WHERE collprovider = 'i' AND collname <> 'unicode') = 0 + AS skip_test \gset +\if :skip_test +\quit +\endif +SET client_encoding TO UTF8; +CREATE SCHEMA collate_tests; +SET search_path = collate_tests; +CREATE TABLE collate_test1 ( + a int, + b text COLLATE "en-x-icu" NOT NULL +); +\d collate_test1 + Table "collate_tests.collate_test1" + Column | Type | Collation | Nullable | Default +--------+---------+-----------+----------+--------- + a | integer | | | + b | text | en-x-icu | not null | + +CREATE TABLE collate_test_fail ( + a int, + b text COLLATE "ja_JP.eucjp-x-icu" +); +ERROR: collation "ja_JP.eucjp-x-icu" for encoding "UTF8" does not exist +LINE 3: b text COLLATE "ja_JP.eucjp-x-icu" + ^ +CREATE TABLE collate_test_fail ( + a int, + b text COLLATE "foo-x-icu" +); +ERROR: collation "foo-x-icu" for encoding "UTF8" does not exist +LINE 3: b text COLLATE "foo-x-icu" + ^ +CREATE TABLE collate_test_fail ( + a int COLLATE "en-x-icu", + b text +); +ERROR: collations are not supported by type integer +LINE 2: a int COLLATE "en-x-icu", + ^ +CREATE TABLE collate_test_like ( + LIKE collate_test1 +); +\d collate_test_like + Table "collate_tests.collate_test_like" + Column | Type | Collation | Nullable | Default +--------+---------+-----------+----------+--------- + a | integer | | | + b | text | en-x-icu | not null | + +CREATE TABLE collate_test2 ( + a int, + b text COLLATE "sv-x-icu" +); +CREATE TABLE collate_test3 ( + a int, + b text COLLATE "C" +); +INSERT INTO collate_test1 VALUES (1, 'abc'), (2, 'äbc'), (3, 'bbc'), (4, 'ABC'); +INSERT INTO collate_test2 SELECT * FROM collate_test1; +INSERT INTO collate_test3 SELECT * FROM collate_test1; +SELECT * FROM collate_test1 WHERE b >= 'bbc'; + a | b +---+----- + 3 | bbc +(1 row) + +SELECT * FROM collate_test2 WHERE b >= 'bbc'; + a | b +---+----- + 2 | äbc + 3 | bbc +(2 rows) + +SELECT * FROM collate_test3 WHERE b >= 'bbc'; + a | b +---+----- + 2 | äbc + 3 | bbc +(2 rows) + +SELECT * FROM collate_test3 WHERE b >= 'BBC'; + a | b +---+----- + 1 | abc + 2 | äbc + 3 | bbc +(3 rows) + +SELECT * FROM collate_test1 WHERE b COLLATE "C" >= 'bbc'; + a | b +---+----- + 2 | äbc + 3 | bbc +(2 rows) + +SELECT * FROM collate_test1 WHERE b >= 'bbc' COLLATE "C"; + a | b +---+----- + 2 | äbc + 3 | bbc +(2 rows) + +SELECT * FROM collate_test1 WHERE b COLLATE "C" >= 'bbc' COLLATE "C"; + a | b +---+----- + 2 | äbc + 3 | bbc +(2 rows) + +SELECT * FROM collate_test1 WHERE b COLLATE "C" >= 'bbc' COLLATE "en-x-icu"; +ERROR: collation mismatch between explicit collations "C" and "en-x-icu" +LINE 1: ...* FROM collate_test1 WHERE b COLLATE "C" >= 'bbc' COLLATE "e... + ^ +CREATE DOMAIN testdomain_sv AS text COLLATE "sv-x-icu"; +CREATE DOMAIN testdomain_i AS int COLLATE "sv-x-icu"; -- fails +ERROR: collations are not supported by type integer +CREATE TABLE collate_test4 ( + a int, + b testdomain_sv +); +INSERT INTO collate_test4 SELECT * FROM collate_test1; +SELECT a, b FROM collate_test4 ORDER BY b; + a | b +---+----- + 1 | abc + 4 | ABC + 3 | bbc + 2 | äbc +(4 rows) + +CREATE TABLE collate_test5 ( + a int, + b testdomain_sv COLLATE "en-x-icu" +); +INSERT INTO collate_test5 SELECT * FROM collate_test1; +SELECT a, b FROM collate_test5 ORDER BY b; + a | b +---+----- + 1 | abc + 4 | ABC + 2 | äbc + 3 | bbc +(4 rows) + +SELECT a, b FROM collate_test1 ORDER BY b; + a | b +---+----- + 1 | abc + 4 | ABC + 2 | äbc + 3 | bbc +(4 rows) + +SELECT a, b FROM collate_test2 ORDER BY b; + a | b +---+----- + 1 | abc + 4 | ABC + 3 | bbc + 2 | äbc +(4 rows) + +SELECT a, b FROM collate_test3 ORDER BY b; + a | b +---+----- + 4 | ABC + 1 | abc + 3 | bbc + 2 | äbc +(4 rows) + +SELECT a, b FROM collate_test1 ORDER BY b COLLATE "C"; + a | b +---+----- + 4 | ABC + 1 | abc + 3 | bbc + 2 | äbc +(4 rows) + +-- star expansion +SELECT * FROM collate_test1 ORDER BY b; + a | b +---+----- + 1 | abc + 4 | ABC + 2 | äbc + 3 | bbc +(4 rows) + +SELECT * FROM collate_test2 ORDER BY b; + a | b +---+----- + 1 | abc + 4 | ABC + 3 | bbc + 2 | äbc +(4 rows) + +SELECT * FROM collate_test3 ORDER BY b; + a | b +---+----- + 4 | ABC + 1 | abc + 3 | bbc + 2 | äbc +(4 rows) + +-- constant expression folding +SELECT 'bbc' COLLATE "en-x-icu" > 'äbc' COLLATE "en-x-icu" AS "true"; + true +------ + t +(1 row) + +SELECT 'bbc' COLLATE "sv-x-icu" > 'äbc' COLLATE "sv-x-icu" AS "false"; + false +------- + f +(1 row) + +-- upper/lower +CREATE TABLE collate_test10 ( + a int, + x text COLLATE "en-x-icu", + y text COLLATE "tr-x-icu" +); +INSERT INTO collate_test10 VALUES (1, 'hij', 'hij'), (2, 'HIJ', 'HIJ'); +SELECT a, lower(x), lower(y), upper(x), upper(y), initcap(x), initcap(y) FROM collate_test10; + a | lower | lower | upper | upper | initcap | initcap +---+-------+-------+-------+-------+---------+--------- + 1 | hij | hij | HIJ | HÄ°J | Hij | Hij + 2 | hij | hıj | HIJ | HIJ | Hij | Hıj +(2 rows) + +SELECT a, lower(x COLLATE "C"), lower(y COLLATE "C") FROM collate_test10; + a | lower | lower +---+-------+------- + 1 | hij | hij + 2 | hij | hij +(2 rows) + +SELECT a, x, y FROM collate_test10 ORDER BY lower(y), a; + a | x | y +---+-----+----- + 2 | HIJ | HIJ + 1 | hij | hij +(2 rows) + +-- LIKE/ILIKE +SELECT * FROM collate_test1 WHERE b LIKE 'abc'; + a | b +---+----- + 1 | abc +(1 row) + +SELECT * FROM collate_test1 WHERE b LIKE 'abc%'; + a | b +---+----- + 1 | abc +(1 row) + +SELECT * FROM collate_test1 WHERE b LIKE '%bc%'; + a | b +---+----- + 1 | abc + 2 | äbc + 3 | bbc +(3 rows) + +SELECT * FROM collate_test1 WHERE b ILIKE 'abc'; + a | b +---+----- + 1 | abc + 4 | ABC +(2 rows) + +SELECT * FROM collate_test1 WHERE b ILIKE 'abc%'; + a | b +---+----- + 1 | abc + 4 | ABC +(2 rows) + +SELECT * FROM collate_test1 WHERE b ILIKE '%bc%'; + a | b +---+----- + 1 | abc + 2 | äbc + 3 | bbc + 4 | ABC +(4 rows) + +SELECT 'Türkiye' COLLATE "en-x-icu" ILIKE '%KI%' AS "true"; + true +------ + t +(1 row) + +SELECT 'Türkiye' COLLATE "tr-x-icu" ILIKE '%KI%' AS "false"; + false +------- + f +(1 row) + +SELECT 'bıt' ILIKE 'BIT' COLLATE "en-x-icu" AS "false"; + false +------- + f +(1 row) + +SELECT 'bıt' ILIKE 'BIT' COLLATE "tr-x-icu" AS "true"; + true +------ + t +(1 row) + +-- The following actually exercises the selectivity estimation for ILIKE. +SELECT relname FROM pg_class WHERE relname ILIKE 'abc%'; + relname +--------- +(0 rows) + +-- regular expressions +SELECT * FROM collate_test1 WHERE b ~ '^abc$'; + a | b +---+----- + 1 | abc +(1 row) + +SELECT * FROM collate_test1 WHERE b ~ '^abc'; + a | b +---+----- + 1 | abc +(1 row) + +SELECT * FROM collate_test1 WHERE b ~ 'bc'; + a | b +---+----- + 1 | abc + 2 | äbc + 3 | bbc +(3 rows) + +SELECT * FROM collate_test1 WHERE b ~* '^abc$'; + a | b +---+----- + 1 | abc + 4 | ABC +(2 rows) + +SELECT * FROM collate_test1 WHERE b ~* '^abc'; + a | b +---+----- + 1 | abc + 4 | ABC +(2 rows) + +SELECT * FROM collate_test1 WHERE b ~* 'bc'; + a | b +---+----- + 1 | abc + 2 | äbc + 3 | bbc + 4 | ABC +(4 rows) + +CREATE TABLE collate_test6 ( + a int, + b text COLLATE "en-x-icu" +); +INSERT INTO collate_test6 VALUES (1, 'abc'), (2, 'ABC'), (3, '123'), (4, 'ab1'), + (5, 'a1!'), (6, 'a c'), (7, '!.;'), (8, ' '), + (9, 'äbç'), (10, 'ÄBÇ'); +SELECT b, + b ~ '^[[:alpha:]]+$' AS is_alpha, + b ~ '^[[:upper:]]+$' AS is_upper, + b ~ '^[[:lower:]]+$' AS is_lower, + b ~ '^[[:digit:]]+$' AS is_digit, + b ~ '^[[:alnum:]]+$' AS is_alnum, + b ~ '^[[:graph:]]+$' AS is_graph, + b ~ '^[[:print:]]+$' AS is_print, + b ~ '^[[:punct:]]+$' AS is_punct, + b ~ '^[[:space:]]+$' AS is_space +FROM collate_test6; + b | is_alpha | is_upper | is_lower | is_digit | is_alnum | is_graph | is_print | is_punct | is_space +-----+----------+----------+----------+----------+----------+----------+----------+----------+---------- + abc | t | f | t | f | t | t | t | f | f + ABC | t | t | f | f | t | t | t | f | f + 123 | f | f | f | t | t | t | t | f | f + ab1 | f | f | f | f | t | t | t | f | f + a1! | f | f | f | f | f | t | t | f | f + a c | f | f | f | f | f | f | t | f | f + !.; | f | f | f | f | f | t | t | t | f + | f | f | f | f | f | f | t | f | t + äbç | t | f | t | f | t | t | t | f | f + ÄBÇ | t | t | f | f | t | t | t | f | f +(10 rows) + +SELECT 'Türkiye' COLLATE "en-x-icu" ~* 'KI' AS "true"; + true +------ + t +(1 row) + +SELECT 'Türkiye' COLLATE "tr-x-icu" ~* 'KI' AS "true"; -- true with ICU + true +------ + t +(1 row) + +SELECT 'bıt' ~* 'BIT' COLLATE "en-x-icu" AS "false"; + false +------- + f +(1 row) + +SELECT 'bıt' ~* 'BIT' COLLATE "tr-x-icu" AS "false"; -- false with ICU + false +------- + f +(1 row) + +-- The following actually exercises the selectivity estimation for ~*. +SELECT relname FROM pg_class WHERE relname ~* '^abc'; + relname +--------- +(0 rows) + +/* not run by default because it requires tr_TR system locale +-- to_char + +SET lc_time TO 'tr_TR'; +SELECT to_char(date '2010-04-01', 'DD TMMON YYYY'); +SELECT to_char(date '2010-04-01', 'DD TMMON YYYY' COLLATE "tr-x-icu"); +*/ +-- backwards parsing +CREATE VIEW collview1 AS SELECT * FROM collate_test1 WHERE b COLLATE "C" >= 'bbc'; +CREATE VIEW collview2 AS SELECT a, b FROM collate_test1 ORDER BY b COLLATE "C"; +CREATE VIEW collview3 AS SELECT a, lower((x || x) COLLATE "C") FROM collate_test10; +SELECT table_name, view_definition FROM information_schema.views + WHERE table_name LIKE 'collview%' ORDER BY 1; + table_name | view_definition +------------+-------------------------------------------- + collview1 | SELECT a, + + | b + + | FROM collate_test1 + + | WHERE ((b COLLATE "C") >= 'bbc'::text); + collview2 | SELECT a, + + | b + + | FROM collate_test1 + + | ORDER BY (b COLLATE "C"); + collview3 | SELECT a, + + | lower(((x || x) COLLATE "C")) AS lower+ + | FROM collate_test10; +(3 rows) + +-- collation propagation in various expression types +SELECT a, coalesce(b, 'foo') FROM collate_test1 ORDER BY 2; + a | coalesce +---+---------- + 1 | abc + 4 | ABC + 2 | äbc + 3 | bbc +(4 rows) + +SELECT a, coalesce(b, 'foo') FROM collate_test2 ORDER BY 2; + a | coalesce +---+---------- + 1 | abc + 4 | ABC + 3 | bbc + 2 | äbc +(4 rows) + +SELECT a, coalesce(b, 'foo') FROM collate_test3 ORDER BY 2; + a | coalesce +---+---------- + 4 | ABC + 1 | abc + 3 | bbc + 2 | äbc +(4 rows) + +SELECT a, lower(coalesce(x, 'foo')), lower(coalesce(y, 'foo')) FROM collate_test10; + a | lower | lower +---+-------+------- + 1 | hij | hij + 2 | hij | hıj +(2 rows) + +SELECT a, b, greatest(b, 'CCC') FROM collate_test1 ORDER BY 3; + a | b | greatest +---+-----+---------- + 1 | abc | CCC + 2 | äbc | CCC + 3 | bbc | CCC + 4 | ABC | CCC +(4 rows) + +SELECT a, b, greatest(b, 'CCC') FROM collate_test2 ORDER BY 3; + a | b | greatest +---+-----+---------- + 1 | abc | CCC + 3 | bbc | CCC + 4 | ABC | CCC + 2 | äbc | äbc +(4 rows) + +SELECT a, b, greatest(b, 'CCC') FROM collate_test3 ORDER BY 3; + a | b | greatest +---+-----+---------- + 4 | ABC | CCC + 1 | abc | abc + 3 | bbc | bbc + 2 | äbc | äbc +(4 rows) + +SELECT a, x, y, lower(greatest(x, 'foo')), lower(greatest(y, 'foo')) FROM collate_test10; + a | x | y | lower | lower +---+-----+-----+-------+------- + 1 | hij | hij | hij | hij + 2 | HIJ | HIJ | hij | hıj +(2 rows) + +SELECT a, nullif(b, 'abc') FROM collate_test1 ORDER BY 2; + a | nullif +---+-------- + 4 | ABC + 2 | äbc + 3 | bbc + 1 | +(4 rows) + +SELECT a, nullif(b, 'abc') FROM collate_test2 ORDER BY 2; + a | nullif +---+-------- + 4 | ABC + 3 | bbc + 2 | äbc + 1 | +(4 rows) + +SELECT a, nullif(b, 'abc') FROM collate_test3 ORDER BY 2; + a | nullif +---+-------- + 4 | ABC + 3 | bbc + 2 | äbc + 1 | +(4 rows) + +SELECT a, lower(nullif(x, 'foo')), lower(nullif(y, 'foo')) FROM collate_test10; + a | lower | lower +---+-------+------- + 1 | hij | hij + 2 | hij | hıj +(2 rows) + +SELECT a, CASE b WHEN 'abc' THEN 'abcd' ELSE b END FROM collate_test1 ORDER BY 2; + a | b +---+------ + 4 | ABC + 2 | äbc + 1 | abcd + 3 | bbc +(4 rows) + +SELECT a, CASE b WHEN 'abc' THEN 'abcd' ELSE b END FROM collate_test2 ORDER BY 2; + a | b +---+------ + 4 | ABC + 1 | abcd + 3 | bbc + 2 | äbc +(4 rows) + +SELECT a, CASE b WHEN 'abc' THEN 'abcd' ELSE b END FROM collate_test3 ORDER BY 2; + a | b +---+------ + 4 | ABC + 1 | abcd + 3 | bbc + 2 | äbc +(4 rows) + +CREATE DOMAIN testdomain AS text; +SELECT a, b::testdomain FROM collate_test1 ORDER BY 2; + a | b +---+----- + 1 | abc + 4 | ABC + 2 | äbc + 3 | bbc +(4 rows) + +SELECT a, b::testdomain FROM collate_test2 ORDER BY 2; + a | b +---+----- + 1 | abc + 4 | ABC + 3 | bbc + 2 | äbc +(4 rows) + +SELECT a, b::testdomain FROM collate_test3 ORDER BY 2; + a | b +---+----- + 4 | ABC + 1 | abc + 3 | bbc + 2 | äbc +(4 rows) + +SELECT a, b::testdomain_sv FROM collate_test3 ORDER BY 2; + a | b +---+----- + 1 | abc + 4 | ABC + 3 | bbc + 2 | äbc +(4 rows) + +SELECT a, lower(x::testdomain), lower(y::testdomain) FROM collate_test10; + a | lower | lower +---+-------+------- + 1 | hij | hij + 2 | hij | hıj +(2 rows) + +SELECT min(b), max(b) FROM collate_test1; + min | max +-----+----- + abc | bbc +(1 row) + +SELECT min(b), max(b) FROM collate_test2; + min | max +-----+----- + abc | äbc +(1 row) + +SELECT min(b), max(b) FROM collate_test3; + min | max +-----+----- + ABC | äbc +(1 row) + +SELECT array_agg(b ORDER BY b) FROM collate_test1; + array_agg +------------------- + {abc,ABC,äbc,bbc} +(1 row) + +SELECT array_agg(b ORDER BY b) FROM collate_test2; + array_agg +------------------- + {abc,ABC,bbc,äbc} +(1 row) + +SELECT array_agg(b ORDER BY b) FROM collate_test3; + array_agg +------------------- + {ABC,abc,bbc,äbc} +(1 row) + +SELECT a, b FROM collate_test1 UNION ALL SELECT a, b FROM collate_test1 ORDER BY 2; + a | b +---+----- + 1 | abc + 1 | abc + 4 | ABC + 4 | ABC + 2 | äbc + 2 | äbc + 3 | bbc + 3 | bbc +(8 rows) + +SELECT a, b FROM collate_test2 UNION SELECT a, b FROM collate_test2 ORDER BY 2; + a | b +---+----- + 1 | abc + 4 | ABC + 3 | bbc + 2 | äbc +(4 rows) + +SELECT a, b FROM collate_test3 WHERE a < 4 INTERSECT SELECT a, b FROM collate_test3 WHERE a > 1 ORDER BY 2; + a | b +---+----- + 3 | bbc + 2 | äbc +(2 rows) + +SELECT a, b FROM collate_test3 EXCEPT SELECT a, b FROM collate_test3 WHERE a < 2 ORDER BY 2; + a | b +---+----- + 4 | ABC + 3 | bbc + 2 | äbc +(3 rows) + +SELECT a, b FROM collate_test1 UNION ALL SELECT a, b FROM collate_test3 ORDER BY 2; -- fail +ERROR: could not determine which collation to use for string comparison +HINT: Use the COLLATE clause to set the collation explicitly. +SELECT a, b FROM collate_test1 UNION ALL SELECT a, b FROM collate_test3; -- ok + a | b +---+----- + 1 | abc + 2 | äbc + 3 | bbc + 4 | ABC + 1 | abc + 2 | äbc + 3 | bbc + 4 | ABC +(8 rows) + +SELECT a, b FROM collate_test1 UNION SELECT a, b FROM collate_test3 ORDER BY 2; -- fail +ERROR: collation mismatch between implicit collations "en-x-icu" and "C" +LINE 1: SELECT a, b FROM collate_test1 UNION SELECT a, b FROM collat... + ^ +HINT: You can choose the collation by applying the COLLATE clause to one or both expressions. +SELECT a, b COLLATE "C" FROM collate_test1 UNION SELECT a, b FROM collate_test3 ORDER BY 2; -- ok + a | b +---+----- + 4 | ABC + 1 | abc + 3 | bbc + 2 | äbc +(4 rows) + +SELECT a, b FROM collate_test1 INTERSECT SELECT a, b FROM collate_test3 ORDER BY 2; -- fail +ERROR: collation mismatch between implicit collations "en-x-icu" and "C" +LINE 1: ...ELECT a, b FROM collate_test1 INTERSECT SELECT a, b FROM col... + ^ +HINT: You can choose the collation by applying the COLLATE clause to one or both expressions. +SELECT a, b FROM collate_test1 EXCEPT SELECT a, b FROM collate_test3 ORDER BY 2; -- fail +ERROR: collation mismatch between implicit collations "en-x-icu" and "C" +LINE 1: SELECT a, b FROM collate_test1 EXCEPT SELECT a, b FROM colla... + ^ +HINT: You can choose the collation by applying the COLLATE clause to one or both expressions. +CREATE TABLE test_u AS SELECT a, b FROM collate_test1 UNION ALL SELECT a, b FROM collate_test3; -- fail +ERROR: no collation was derived for column "b" with collatable type text +HINT: Use the COLLATE clause to set the collation explicitly. +-- ideally this would be a parse-time error, but for now it must be run-time: +select x < y from collate_test10; -- fail +ERROR: could not determine which collation to use for string comparison +HINT: Use the COLLATE clause to set the collation explicitly. +select x || y from collate_test10; -- ok, because || is not collation aware + ?column? +---------- + hijhij + HIJHIJ +(2 rows) + +select x, y from collate_test10 order by x || y; -- not so ok +ERROR: collation mismatch between implicit collations "en-x-icu" and "tr-x-icu" +LINE 1: select x, y from collate_test10 order by x || y; + ^ +HINT: You can choose the collation by applying the COLLATE clause to one or both expressions. +-- collation mismatch between recursive and non-recursive term +WITH RECURSIVE foo(x) AS + (SELECT x FROM (VALUES('a' COLLATE "en-x-icu"),('b')) t(x) + UNION ALL + SELECT (x || 'c') COLLATE "de-x-icu" FROM foo WHERE length(x) < 10) +SELECT * FROM foo; +ERROR: recursive query "foo" column 1 has collation "en-x-icu" in non-recursive term but collation "de-x-icu" overall +LINE 2: (SELECT x FROM (VALUES('a' COLLATE "en-x-icu"),('b')) t(x... + ^ +HINT: Use the COLLATE clause to set the collation of the non-recursive term. +-- casting +SELECT CAST('42' AS text COLLATE "C"); +ERROR: syntax error at or near "COLLATE" +LINE 1: SELECT CAST('42' AS text COLLATE "C"); + ^ +SELECT a, CAST(b AS varchar) FROM collate_test1 ORDER BY 2; + a | b +---+----- + 1 | abc + 4 | ABC + 2 | äbc + 3 | bbc +(4 rows) + +SELECT a, CAST(b AS varchar) FROM collate_test2 ORDER BY 2; + a | b +---+----- + 1 | abc + 4 | ABC + 3 | bbc + 2 | äbc +(4 rows) + +SELECT a, CAST(b AS varchar) FROM collate_test3 ORDER BY 2; + a | b +---+----- + 4 | ABC + 1 | abc + 3 | bbc + 2 | äbc +(4 rows) + +-- propagation of collation in SQL functions (inlined and non-inlined cases) +-- and plpgsql functions too +CREATE FUNCTION mylt (text, text) RETURNS boolean LANGUAGE sql + AS $$ select $1 < $2 $$; +CREATE FUNCTION mylt_noninline (text, text) RETURNS boolean LANGUAGE sql + AS $$ select $1 < $2 limit 1 $$; +CREATE FUNCTION mylt_plpgsql (text, text) RETURNS boolean LANGUAGE plpgsql + AS $$ begin return $1 < $2; end $$; +SELECT a.b AS a, b.b AS b, a.b < b.b AS lt, + mylt(a.b, b.b), mylt_noninline(a.b, b.b), mylt_plpgsql(a.b, b.b) +FROM collate_test1 a, collate_test1 b +ORDER BY a.b, b.b; + a | b | lt | mylt | mylt_noninline | mylt_plpgsql +-----+-----+----+------+----------------+-------------- + abc | abc | f | f | f | f + abc | ABC | t | t | t | t + abc | äbc | t | t | t | t + abc | bbc | t | t | t | t + ABC | abc | f | f | f | f + ABC | ABC | f | f | f | f + ABC | äbc | t | t | t | t + ABC | bbc | t | t | t | t + äbc | abc | f | f | f | f + äbc | ABC | f | f | f | f + äbc | äbc | f | f | f | f + äbc | bbc | t | t | t | t + bbc | abc | f | f | f | f + bbc | ABC | f | f | f | f + bbc | äbc | f | f | f | f + bbc | bbc | f | f | f | f +(16 rows) + +SELECT a.b AS a, b.b AS b, a.b < b.b COLLATE "C" AS lt, + mylt(a.b, b.b COLLATE "C"), mylt_noninline(a.b, b.b COLLATE "C"), + mylt_plpgsql(a.b, b.b COLLATE "C") +FROM collate_test1 a, collate_test1 b +ORDER BY a.b, b.b; + a | b | lt | mylt | mylt_noninline | mylt_plpgsql +-----+-----+----+------+----------------+-------------- + abc | abc | f | f | f | f + abc | ABC | f | f | f | f + abc | äbc | t | t | t | t + abc | bbc | t | t | t | t + ABC | abc | t | t | t | t + ABC | ABC | f | f | f | f + ABC | äbc | t | t | t | t + ABC | bbc | t | t | t | t + äbc | abc | f | f | f | f + äbc | ABC | f | f | f | f + äbc | äbc | f | f | f | f + äbc | bbc | f | f | f | f + bbc | abc | f | f | f | f + bbc | ABC | f | f | f | f + bbc | äbc | t | t | t | t + bbc | bbc | f | f | f | f +(16 rows) + +-- collation override in plpgsql +CREATE FUNCTION mylt2 (x text, y text) RETURNS boolean LANGUAGE plpgsql AS $$ +declare + xx text := x; + yy text := y; +begin + return xx < yy; +end +$$; +SELECT mylt2('a', 'B' collate "en-x-icu") as t, mylt2('a', 'B' collate "C") as f; + t | f +---+--- + t | f +(1 row) + +CREATE OR REPLACE FUNCTION + mylt2 (x text, y text) RETURNS boolean LANGUAGE plpgsql AS $$ +declare + xx text COLLATE "POSIX" := x; + yy text := y; +begin + return xx < yy; +end +$$; +SELECT mylt2('a', 'B') as f; + f +--- + f +(1 row) + +SELECT mylt2('a', 'B' collate "C") as fail; -- conflicting collations +ERROR: could not determine which collation to use for string comparison +HINT: Use the COLLATE clause to set the collation explicitly. +CONTEXT: PL/pgSQL function mylt2(text,text) line 6 at RETURN +SELECT mylt2('a', 'B' collate "POSIX") as f; + f +--- + f +(1 row) + +-- polymorphism +SELECT * FROM unnest((SELECT array_agg(b ORDER BY b) FROM collate_test1)) ORDER BY 1; + unnest +-------- + abc + ABC + äbc + bbc +(4 rows) + +SELECT * FROM unnest((SELECT array_agg(b ORDER BY b) FROM collate_test2)) ORDER BY 1; + unnest +-------- + abc + ABC + bbc + äbc +(4 rows) + +SELECT * FROM unnest((SELECT array_agg(b ORDER BY b) FROM collate_test3)) ORDER BY 1; + unnest +-------- + ABC + abc + bbc + äbc +(4 rows) + +CREATE FUNCTION dup (anyelement) RETURNS anyelement + AS 'select $1' LANGUAGE sql; +SELECT a, dup(b) FROM collate_test1 ORDER BY 2; + a | dup +---+----- + 1 | abc + 4 | ABC + 2 | äbc + 3 | bbc +(4 rows) + +SELECT a, dup(b) FROM collate_test2 ORDER BY 2; + a | dup +---+----- + 1 | abc + 4 | ABC + 3 | bbc + 2 | äbc +(4 rows) + +SELECT a, dup(b) FROM collate_test3 ORDER BY 2; + a | dup +---+----- + 4 | ABC + 1 | abc + 3 | bbc + 2 | äbc +(4 rows) + +-- indexes +CREATE INDEX collate_test1_idx1 ON collate_test1 (b); +CREATE INDEX collate_test1_idx2 ON collate_test1 (b COLLATE "C"); +CREATE INDEX collate_test1_idx3 ON collate_test1 ((b COLLATE "C")); -- this is different grammatically +CREATE INDEX collate_test1_idx4 ON collate_test1 (((b||'foo') COLLATE "POSIX")); +CREATE INDEX collate_test1_idx5 ON collate_test1 (a COLLATE "C"); -- fail +ERROR: collations are not supported by type integer +CREATE INDEX collate_test1_idx6 ON collate_test1 ((a COLLATE "C")); -- fail +ERROR: collations are not supported by type integer +LINE 1: ...ATE INDEX collate_test1_idx6 ON collate_test1 ((a COLLATE "C... + ^ +SELECT relname, pg_get_indexdef(oid) FROM pg_class WHERE relname LIKE 'collate_test%_idx%' ORDER BY 1; + relname | pg_get_indexdef +--------------------+------------------------------------------------------------------------------------------------------------------- + collate_test1_idx1 | CREATE INDEX collate_test1_idx1 ON collate_tests.collate_test1 USING btree (b) + collate_test1_idx2 | CREATE INDEX collate_test1_idx2 ON collate_tests.collate_test1 USING btree (b COLLATE "C") + collate_test1_idx3 | CREATE INDEX collate_test1_idx3 ON collate_tests.collate_test1 USING btree (b COLLATE "C") + collate_test1_idx4 | CREATE INDEX collate_test1_idx4 ON collate_tests.collate_test1 USING btree (((b || 'foo'::text)) COLLATE "POSIX") +(4 rows) + +set enable_seqscan = off; +explain (costs off) +select * from collate_test1 where b ilike 'abc'; + QUERY PLAN +------------------------------- + Seq Scan on collate_test1 + Filter: (b ~~* 'abc'::text) +(2 rows) + +select * from collate_test1 where b ilike 'abc'; + a | b +---+----- + 1 | abc + 4 | ABC +(2 rows) + +explain (costs off) +select * from collate_test1 where b ilike 'ABC'; + QUERY PLAN +------------------------------- + Seq Scan on collate_test1 + Filter: (b ~~* 'ABC'::text) +(2 rows) + +select * from collate_test1 where b ilike 'ABC'; + a | b +---+----- + 1 | abc + 4 | ABC +(2 rows) + +reset enable_seqscan; +-- schema manipulation commands +CREATE ROLE regress_test_role; +CREATE SCHEMA test_schema; +-- We need to do this this way to cope with varying names for encodings: +SET client_min_messages TO WARNING; +SET icu_validation_level = disabled; +do $$ +BEGIN + EXECUTE 'CREATE COLLATION test0 (provider = icu, locale = ' || + quote_literal((SELECT CASE WHEN datlocprovider='i' THEN daticulocale ELSE datcollate END FROM pg_database WHERE datname = current_database())) || ');'; +END +$$; +CREATE COLLATION test0 FROM "C"; -- fail, duplicate name +ERROR: collation "test0" already exists +do $$ +BEGIN + EXECUTE 'CREATE COLLATION test1 (provider = icu, locale = ' || + quote_literal((SELECT CASE WHEN datlocprovider='i' THEN daticulocale ELSE datcollate END FROM pg_database WHERE datname = current_database())) || ');'; +END +$$; +RESET icu_validation_level; +RESET client_min_messages; +CREATE COLLATION test3 (provider = icu, lc_collate = 'en_US.utf8'); -- fail, needs "locale" +ERROR: parameter "locale" must be specified +SET icu_validation_level = ERROR; +CREATE COLLATION testx (provider = icu, locale = 'nonsense-nowhere'); -- fails +ERROR: ICU locale "nonsense-nowhere" has unknown language "nonsense" +HINT: To disable ICU locale validation, set the parameter "icu_validation_level" to "disabled". +CREATE COLLATION testx (provider = icu, locale = '@colStrength=primary;nonsense=yes'); -- fails +ERROR: could not convert locale name "@colStrength=primary;nonsense=yes" to language tag: U_ILLEGAL_ARGUMENT_ERROR +RESET icu_validation_level; +CREATE COLLATION testx (provider = icu, locale = '@colStrength=primary;nonsense=yes'); DROP COLLATION testx; +WARNING: could not convert locale name "@colStrength=primary;nonsense=yes" to language tag: U_ILLEGAL_ARGUMENT_ERROR +CREATE COLLATION testx (provider = icu, locale = 'nonsense-nowhere'); DROP COLLATION testx; +WARNING: ICU locale "nonsense-nowhere" has unknown language "nonsense" +HINT: To disable ICU locale validation, set the parameter "icu_validation_level" to "disabled". +CREATE COLLATION test4 FROM nonsense; +ERROR: collation "nonsense" for encoding "UTF8" does not exist +CREATE COLLATION test5 FROM test0; +SELECT collname FROM pg_collation WHERE collname LIKE 'test%' ORDER BY 1; + collname +---------- + test0 + test1 + test5 +(3 rows) + +ALTER COLLATION test1 RENAME TO test11; +ALTER COLLATION test0 RENAME TO test11; -- fail +ERROR: collation "test11" already exists in schema "collate_tests" +ALTER COLLATION test1 RENAME TO test22; -- fail +ERROR: collation "test1" for encoding "UTF8" does not exist +ALTER COLLATION test11 OWNER TO regress_test_role; +ALTER COLLATION test11 OWNER TO nonsense; +ERROR: role "nonsense" does not exist +ALTER COLLATION test11 SET SCHEMA test_schema; +COMMENT ON COLLATION test0 IS 'US English'; +SELECT collname, nspname, obj_description(pg_collation.oid, 'pg_collation') + FROM pg_collation JOIN pg_namespace ON (collnamespace = pg_namespace.oid) + WHERE collname LIKE 'test%' + ORDER BY 1; + collname | nspname | obj_description +----------+---------------+----------------- + test0 | collate_tests | US English + test11 | test_schema | + test5 | collate_tests | +(3 rows) + +DROP COLLATION test0, test_schema.test11, test5; +DROP COLLATION test0; -- fail +ERROR: collation "test0" for encoding "UTF8" does not exist +DROP COLLATION IF EXISTS test0; +NOTICE: collation "test0" does not exist, skipping +SELECT collname FROM pg_collation WHERE collname LIKE 'test%'; + collname +---------- +(0 rows) + +DROP SCHEMA test_schema; +DROP ROLE regress_test_role; +-- ALTER +ALTER COLLATION "en-x-icu" REFRESH VERSION; +NOTICE: version has not changed +-- also test for database while we are here +SELECT current_database() AS datname \gset +ALTER DATABASE :"datname" REFRESH COLLATION VERSION; +NOTICE: version has not changed +-- dependencies +CREATE COLLATION test0 FROM "C"; +CREATE TABLE collate_dep_test1 (a int, b text COLLATE test0); +CREATE DOMAIN collate_dep_dom1 AS text COLLATE test0; +CREATE TYPE collate_dep_test2 AS (x int, y text COLLATE test0); +CREATE VIEW collate_dep_test3 AS SELECT text 'foo' COLLATE test0 AS foo; +CREATE TABLE collate_dep_test4t (a int, b text); +CREATE INDEX collate_dep_test4i ON collate_dep_test4t (b COLLATE test0); +DROP COLLATION test0 RESTRICT; -- fail +ERROR: cannot drop collation test0 because other objects depend on it +DETAIL: column b of table collate_dep_test1 depends on collation test0 +type collate_dep_dom1 depends on collation test0 +column y of composite type collate_dep_test2 depends on collation test0 +view collate_dep_test3 depends on collation test0 +index collate_dep_test4i depends on collation test0 +HINT: Use DROP ... CASCADE to drop the dependent objects too. +DROP COLLATION test0 CASCADE; +NOTICE: drop cascades to 5 other objects +DETAIL: drop cascades to column b of table collate_dep_test1 +drop cascades to type collate_dep_dom1 +drop cascades to column y of composite type collate_dep_test2 +drop cascades to view collate_dep_test3 +drop cascades to index collate_dep_test4i +\d collate_dep_test1 + Table "collate_tests.collate_dep_test1" + Column | Type | Collation | Nullable | Default +--------+---------+-----------+----------+--------- + a | integer | | | + +\d collate_dep_test2 + Composite type "collate_tests.collate_dep_test2" + Column | Type | Collation | Nullable | Default +--------+---------+-----------+----------+--------- + x | integer | | | + +DROP TABLE collate_dep_test1, collate_dep_test4t; +DROP TYPE collate_dep_test2; +-- test range types and collations +create type textrange_c as range(subtype=text, collation="C"); +create type textrange_en_us as range(subtype=text, collation="en-x-icu"); +select textrange_c('A','Z') @> 'b'::text; + ?column? +---------- + f +(1 row) + +select textrange_en_us('A','Z') @> 'b'::text; + ?column? +---------- + t +(1 row) + +drop type textrange_c; +drop type textrange_en_us; +-- standard collations +SELECT * FROM collate_test2 ORDER BY b COLLATE UCS_BASIC; + a | b +---+----- + 4 | ABC + 1 | abc + 3 | bbc + 2 | äbc +(4 rows) + +SELECT * FROM collate_test2 ORDER BY b COLLATE UNICODE; + a | b +---+----- + 1 | abc + 4 | ABC + 2 | äbc + 3 | bbc +(4 rows) + +-- test ICU collation customization +-- test the attributes handled by icu_set_collation_attributes() +SET client_min_messages=WARNING; +CREATE COLLATION testcoll_ignore_accents (provider = icu, locale = '@colStrength=primary;colCaseLevel=yes'); +RESET client_min_messages; +SELECT 'aaá' > 'AAA' COLLATE "und-x-icu", 'aaá' < 'AAA' COLLATE testcoll_ignore_accents; + ?column? | ?column? +----------+---------- + t | t +(1 row) + +SET client_min_messages=WARNING; +CREATE COLLATION testcoll_backwards (provider = icu, locale = '@colBackwards=yes'); +RESET client_min_messages; +SELECT 'coté' < 'côte' COLLATE "und-x-icu", 'coté' > 'côte' COLLATE testcoll_backwards; + ?column? | ?column? +----------+---------- + t | t +(1 row) + +CREATE COLLATION testcoll_lower_first (provider = icu, locale = '@colCaseFirst=lower'); +NOTICE: using standard form "und-u-kf-lower" for ICU locale "@colCaseFirst=lower" +CREATE COLLATION testcoll_upper_first (provider = icu, locale = '@colCaseFirst=upper'); +NOTICE: using standard form "und-u-kf-upper" for ICU locale "@colCaseFirst=upper" +SELECT 'aaa' < 'AAA' COLLATE testcoll_lower_first, 'aaa' > 'AAA' COLLATE testcoll_upper_first; + ?column? | ?column? +----------+---------- + t | t +(1 row) + +CREATE COLLATION testcoll_shifted (provider = icu, locale = '@colAlternate=shifted'); +NOTICE: using standard form "und-u-ka-shifted" for ICU locale "@colAlternate=shifted" +SELECT 'de-luge' < 'deanza' COLLATE "und-x-icu", 'de-luge' > 'deanza' COLLATE testcoll_shifted; + ?column? | ?column? +----------+---------- + t | t +(1 row) + +SET client_min_messages=WARNING; +CREATE COLLATION testcoll_numeric (provider = icu, locale = '@colNumeric=yes'); +RESET client_min_messages; +SELECT 'A-21' > 'A-123' COLLATE "und-x-icu", 'A-21' < 'A-123' COLLATE testcoll_numeric; + ?column? | ?column? +----------+---------- + t | t +(1 row) + +CREATE COLLATION testcoll_error1 (provider = icu, locale = '@colNumeric=lower'); +NOTICE: using standard form "und-u-kn-lower" for ICU locale "@colNumeric=lower" +ERROR: could not open collator for locale "und-u-kn-lower": U_ILLEGAL_ARGUMENT_ERROR +-- test that attributes not handled by icu_set_collation_attributes() +-- (handled by ucol_open() directly) also work +CREATE COLLATION testcoll_de_phonebook (provider = icu, locale = 'de@collation=phonebook'); +NOTICE: using standard form "de-u-co-phonebk" for ICU locale "de@collation=phonebook" +SELECT 'Goldmann' < 'Götz' COLLATE "de-x-icu", 'Goldmann' > 'Götz' COLLATE testcoll_de_phonebook; + ?column? | ?column? +----------+---------- + t | t +(1 row) + +-- rules +CREATE COLLATION testcoll_rules1 (provider = icu, locale = '', rules = '&a < g'); +NOTICE: using standard form "und" for ICU locale "" +CREATE TABLE test7 (a text); +-- example from https://unicode-org.github.io/icu/userguide/collation/customization/#syntax +INSERT INTO test7 VALUES ('Abernathy'), ('apple'), ('bird'), ('Boston'), ('Graham'), ('green'); +SELECT * FROM test7 ORDER BY a COLLATE "en-x-icu"; + a +----------- + Abernathy + apple + bird + Boston + Graham + green +(6 rows) + +SELECT * FROM test7 ORDER BY a COLLATE testcoll_rules1; + a +----------- + Abernathy + apple + green + bird + Boston + Graham +(6 rows) + +DROP TABLE test7; +CREATE COLLATION testcoll_rulesx (provider = icu, locale = '', rules = '!!wrong!!'); +NOTICE: using standard form "und" for ICU locale "" +ERROR: could not open collator for locale "und" with rules "!!wrong!!": U_INVALID_FORMAT_ERROR +-- nondeterministic collations +CREATE COLLATION ctest_det (provider = icu, locale = '', deterministic = true); +NOTICE: using standard form "und" for ICU locale "" +CREATE COLLATION ctest_nondet (provider = icu, locale = '', deterministic = false); +NOTICE: using standard form "und" for ICU locale "" +CREATE TABLE test6 (a int, b text); +-- same string in different normal forms +INSERT INTO test6 VALUES (1, U&'\00E4bc'); +INSERT INTO test6 VALUES (2, U&'\0061\0308bc'); +SELECT * FROM test6; + a | b +---+----- + 1 | äbc + 2 | äbc +(2 rows) + +SELECT * FROM test6 WHERE b = 'äbc' COLLATE ctest_det; + a | b +---+----- + 1 | äbc +(1 row) + +SELECT * FROM test6 WHERE b = 'äbc' COLLATE ctest_nondet; + a | b +---+----- + 1 | äbc + 2 | äbc +(2 rows) + +-- same with arrays +CREATE TABLE test6a (a int, b text[]); +INSERT INTO test6a VALUES (1, ARRAY[U&'\00E4bc']); +INSERT INTO test6a VALUES (2, ARRAY[U&'\0061\0308bc']); +SELECT * FROM test6a; + a | b +---+------- + 1 | {äbc} + 2 | {äbc} +(2 rows) + +SELECT * FROM test6a WHERE b = ARRAY['äbc'] COLLATE ctest_det; + a | b +---+------- + 1 | {äbc} +(1 row) + +SELECT * FROM test6a WHERE b = ARRAY['äbc'] COLLATE ctest_nondet; + a | b +---+------- + 1 | {äbc} + 2 | {äbc} +(2 rows) + +CREATE COLLATION case_sensitive (provider = icu, locale = ''); +NOTICE: using standard form "und" for ICU locale "" +CREATE COLLATION case_insensitive (provider = icu, locale = '@colStrength=secondary', deterministic = false); +NOTICE: using standard form "und-u-ks-level2" for ICU locale "@colStrength=secondary" +SELECT 'abc' <= 'ABC' COLLATE case_sensitive, 'abc' >= 'ABC' COLLATE case_sensitive; + ?column? | ?column? +----------+---------- + t | f +(1 row) + +SELECT 'abc' <= 'ABC' COLLATE case_insensitive, 'abc' >= 'ABC' COLLATE case_insensitive; + ?column? | ?column? +----------+---------- + t | t +(1 row) + +-- test language tags +CREATE COLLATION lt_insensitive (provider = icu, locale = 'en-u-ks-level1', deterministic = false); +SELECT 'aBcD' COLLATE lt_insensitive = 'AbCd' COLLATE lt_insensitive; + ?column? +---------- + t +(1 row) + +CREATE COLLATION lt_upperfirst (provider = icu, locale = 'und-u-kf-upper'); +SELECT 'Z' COLLATE lt_upperfirst < 'z' COLLATE lt_upperfirst; + ?column? +---------- + t +(1 row) + +CREATE TABLE test1cs (x text COLLATE case_sensitive); +CREATE TABLE test2cs (x text COLLATE case_sensitive); +CREATE TABLE test3cs (x text COLLATE case_sensitive); +INSERT INTO test1cs VALUES ('abc'), ('def'), ('ghi'); +INSERT INTO test2cs VALUES ('ABC'), ('ghi'); +INSERT INTO test3cs VALUES ('abc'), ('ABC'), ('def'), ('ghi'); +SELECT x FROM test3cs WHERE x = 'abc'; + x +----- + abc +(1 row) + +SELECT x FROM test3cs WHERE x <> 'abc'; + x +----- + ABC + def + ghi +(3 rows) + +SELECT x FROM test3cs WHERE x LIKE 'a%'; + x +----- + abc +(1 row) + +SELECT x FROM test3cs WHERE x ILIKE 'a%'; + x +----- + abc + ABC +(2 rows) + +SELECT x FROM test3cs WHERE x SIMILAR TO 'a%'; + x +----- + abc +(1 row) + +SELECT x FROM test3cs WHERE x ~ 'a'; + x +----- + abc +(1 row) + +SELECT x FROM test1cs UNION SELECT x FROM test2cs ORDER BY x; + x +----- + abc + ABC + def + ghi +(4 rows) + +SELECT x FROM test2cs UNION SELECT x FROM test1cs ORDER BY x; + x +----- + abc + ABC + def + ghi +(4 rows) + +SELECT x FROM test1cs INTERSECT SELECT x FROM test2cs; + x +----- + ghi +(1 row) + +SELECT x FROM test2cs INTERSECT SELECT x FROM test1cs; + x +----- + ghi +(1 row) + +SELECT x FROM test1cs EXCEPT SELECT x FROM test2cs; + x +----- + abc + def +(2 rows) + +SELECT x FROM test2cs EXCEPT SELECT x FROM test1cs; + x +----- + ABC +(1 row) + +SELECT DISTINCT x FROM test3cs ORDER BY x; + x +----- + abc + ABC + def + ghi +(4 rows) + +SELECT count(DISTINCT x) FROM test3cs; + count +------- + 4 +(1 row) + +SELECT x, count(*) FROM test3cs GROUP BY x ORDER BY x; + x | count +-----+------- + abc | 1 + ABC | 1 + def | 1 + ghi | 1 +(4 rows) + +SELECT x, row_number() OVER (ORDER BY x), rank() OVER (ORDER BY x) FROM test3cs ORDER BY x; + x | row_number | rank +-----+------------+------ + abc | 1 | 1 + ABC | 2 | 2 + def | 3 | 3 + ghi | 4 | 4 +(4 rows) + +CREATE UNIQUE INDEX ON test1cs (x); -- ok +INSERT INTO test1cs VALUES ('ABC'); -- ok +CREATE UNIQUE INDEX ON test3cs (x); -- ok +SELECT string_to_array('ABC,DEF,GHI' COLLATE case_sensitive, ',', 'abc'); + string_to_array +----------------- + {ABC,DEF,GHI} +(1 row) + +SELECT string_to_array('ABCDEFGHI' COLLATE case_sensitive, NULL, 'b'); + string_to_array +--------------------- + {A,B,C,D,E,F,G,H,I} +(1 row) + +CREATE TABLE test1ci (x text COLLATE case_insensitive); +CREATE TABLE test2ci (x text COLLATE case_insensitive); +CREATE TABLE test3ci (x text COLLATE case_insensitive); +CREATE INDEX ON test3ci (x text_pattern_ops); -- error +ERROR: nondeterministic collations are not supported for operator class "text_pattern_ops" +INSERT INTO test1ci VALUES ('abc'), ('def'), ('ghi'); +INSERT INTO test2ci VALUES ('ABC'), ('ghi'); +INSERT INTO test3ci VALUES ('abc'), ('ABC'), ('def'), ('ghi'); +SELECT x FROM test3ci WHERE x = 'abc'; + x +----- + abc + ABC +(2 rows) + +SELECT x FROM test3ci WHERE x <> 'abc'; + x +----- + def + ghi +(2 rows) + +SELECT x FROM test3ci WHERE x LIKE 'a%'; +ERROR: nondeterministic collations are not supported for LIKE +SELECT x FROM test3ci WHERE x ILIKE 'a%'; +ERROR: nondeterministic collations are not supported for ILIKE +SELECT x FROM test3ci WHERE x SIMILAR TO 'a%'; +ERROR: nondeterministic collations are not supported for regular expressions +SELECT x FROM test3ci WHERE x ~ 'a'; +ERROR: nondeterministic collations are not supported for regular expressions +SELECT x FROM test1ci UNION SELECT x FROM test2ci ORDER BY x; + x +----- + abc + def + ghi +(3 rows) + +SELECT x FROM test2ci UNION SELECT x FROM test1ci ORDER BY x; + x +----- + ABC + def + ghi +(3 rows) + +SELECT x FROM test1ci INTERSECT SELECT x FROM test2ci ORDER BY x; + x +----- + abc + ghi +(2 rows) + +SELECT x FROM test2ci INTERSECT SELECT x FROM test1ci ORDER BY x; + x +----- + ABC + ghi +(2 rows) + +SELECT x FROM test1ci EXCEPT SELECT x FROM test2ci; + x +----- + def +(1 row) + +SELECT x FROM test2ci EXCEPT SELECT x FROM test1ci; + x +--- +(0 rows) + +SELECT DISTINCT x FROM test3ci ORDER BY x; + x +----- + abc + def + ghi +(3 rows) + +SELECT count(DISTINCT x) FROM test3ci; + count +------- + 3 +(1 row) + +SELECT x, count(*) FROM test3ci GROUP BY x ORDER BY x; + x | count +-----+------- + abc | 2 + def | 1 + ghi | 1 +(3 rows) + +SELECT x, row_number() OVER (ORDER BY x), rank() OVER (ORDER BY x) FROM test3ci ORDER BY x; + x | row_number | rank +-----+------------+------ + abc | 1 | 1 + ABC | 2 | 1 + def | 3 | 3 + ghi | 4 | 4 +(4 rows) + +CREATE UNIQUE INDEX ON test1ci (x); -- ok +INSERT INTO test1ci VALUES ('ABC'); -- error +ERROR: duplicate key value violates unique constraint "test1ci_x_idx" +DETAIL: Key (x)=(ABC) already exists. +CREATE UNIQUE INDEX ON test3ci (x); -- error +ERROR: could not create unique index "test3ci_x_idx" +DETAIL: Key (x)=(abc) is duplicated. +SELECT string_to_array('ABC,DEF,GHI' COLLATE case_insensitive, ',', 'abc'); +ERROR: nondeterministic collations are not supported for substring searches +SELECT string_to_array('ABCDEFGHI' COLLATE case_insensitive, NULL, 'b'); + string_to_array +------------------------ + {A,NULL,C,D,E,F,G,H,I} +(1 row) + +-- bpchar +CREATE TABLE test1bpci (x char(3) COLLATE case_insensitive); +CREATE TABLE test2bpci (x char(3) COLLATE case_insensitive); +CREATE TABLE test3bpci (x char(3) COLLATE case_insensitive); +CREATE INDEX ON test3bpci (x bpchar_pattern_ops); -- error +ERROR: nondeterministic collations are not supported for operator class "bpchar_pattern_ops" +INSERT INTO test1bpci VALUES ('abc'), ('def'), ('ghi'); +INSERT INTO test2bpci VALUES ('ABC'), ('ghi'); +INSERT INTO test3bpci VALUES ('abc'), ('ABC'), ('def'), ('ghi'); +SELECT x FROM test3bpci WHERE x = 'abc'; + x +----- + abc + ABC +(2 rows) + +SELECT x FROM test3bpci WHERE x <> 'abc'; + x +----- + def + ghi +(2 rows) + +SELECT x FROM test3bpci WHERE x LIKE 'a%'; +ERROR: nondeterministic collations are not supported for LIKE +SELECT x FROM test3bpci WHERE x ILIKE 'a%'; +ERROR: nondeterministic collations are not supported for ILIKE +SELECT x FROM test3bpci WHERE x SIMILAR TO 'a%'; +ERROR: nondeterministic collations are not supported for regular expressions +SELECT x FROM test3bpci WHERE x ~ 'a'; +ERROR: nondeterministic collations are not supported for regular expressions +SELECT x FROM test1bpci UNION SELECT x FROM test2bpci ORDER BY x; + x +----- + abc + def + ghi +(3 rows) + +SELECT x FROM test2bpci UNION SELECT x FROM test1bpci ORDER BY x; + x +----- + ABC + def + ghi +(3 rows) + +SELECT x FROM test1bpci INTERSECT SELECT x FROM test2bpci ORDER BY x; + x +----- + abc + ghi +(2 rows) + +SELECT x FROM test2bpci INTERSECT SELECT x FROM test1bpci ORDER BY x; + x +----- + ABC + ghi +(2 rows) + +SELECT x FROM test1bpci EXCEPT SELECT x FROM test2bpci; + x +----- + def +(1 row) + +SELECT x FROM test2bpci EXCEPT SELECT x FROM test1bpci; + x +--- +(0 rows) + +SELECT DISTINCT x FROM test3bpci ORDER BY x; + x +----- + abc + def + ghi +(3 rows) + +SELECT count(DISTINCT x) FROM test3bpci; + count +------- + 3 +(1 row) + +SELECT x, count(*) FROM test3bpci GROUP BY x ORDER BY x; + x | count +-----+------- + abc | 2 + def | 1 + ghi | 1 +(3 rows) + +SELECT x, row_number() OVER (ORDER BY x), rank() OVER (ORDER BY x) FROM test3bpci ORDER BY x; + x | row_number | rank +-----+------------+------ + abc | 1 | 1 + ABC | 2 | 1 + def | 3 | 3 + ghi | 4 | 4 +(4 rows) + +CREATE UNIQUE INDEX ON test1bpci (x); -- ok +INSERT INTO test1bpci VALUES ('ABC'); -- error +ERROR: duplicate key value violates unique constraint "test1bpci_x_idx" +DETAIL: Key (x)=(ABC) already exists. +CREATE UNIQUE INDEX ON test3bpci (x); -- error +ERROR: could not create unique index "test3bpci_x_idx" +DETAIL: Key (x)=(abc) is duplicated. +SELECT string_to_array('ABC,DEF,GHI'::char(11) COLLATE case_insensitive, ',', 'abc'); +ERROR: nondeterministic collations are not supported for substring searches +SELECT string_to_array('ABCDEFGHI'::char(9) COLLATE case_insensitive, NULL, 'b'); + string_to_array +------------------------ + {A,NULL,C,D,E,F,G,H,I} +(1 row) + +-- This tests the issue described in match_pattern_prefix(). In the +-- absence of that check, the case_insensitive tests below would +-- return no rows where they should logically return one. +CREATE TABLE test4c (x text COLLATE "C"); +INSERT INTO test4c VALUES ('abc'); +CREATE INDEX ON test4c (x); +SET enable_seqscan = off; +SELECT x FROM test4c WHERE x LIKE 'ABC' COLLATE case_sensitive; -- ok, no rows + x +--- +(0 rows) + +SELECT x FROM test4c WHERE x LIKE 'ABC%' COLLATE case_sensitive; -- ok, no rows + x +--- +(0 rows) + +SELECT x FROM test4c WHERE x LIKE 'ABC' COLLATE case_insensitive; -- error +ERROR: nondeterministic collations are not supported for LIKE +SELECT x FROM test4c WHERE x LIKE 'ABC%' COLLATE case_insensitive; -- error +ERROR: nondeterministic collations are not supported for LIKE +RESET enable_seqscan; +-- Unicode special case: different variants of Greek lower case sigma. +-- A naive implementation like citext that just does lower(x) = +-- lower(y) will do the wrong thing here, because lower('Σ') is 'σ' +-- but upper('Ï‚') is 'Σ'. +SELECT 'ὀδυσσεÏÏ‚' = 'ὈΔΥΣΣΕΎΣ' COLLATE case_sensitive; + ?column? +---------- + f +(1 row) + +SELECT 'ὀδυσσεÏÏ‚' = 'ὈΔΥΣΣΕΎΣ' COLLATE case_insensitive; + ?column? +---------- + t +(1 row) + +-- name vs. text comparison operators +SELECT relname FROM pg_class WHERE relname = 'PG_CLASS'::text COLLATE case_insensitive; + relname +---------- + pg_class +(1 row) + +SELECT relname FROM pg_class WHERE 'PG_CLASS'::text = relname COLLATE case_insensitive; + relname +---------- + pg_class +(1 row) + +SELECT typname FROM pg_type WHERE typname LIKE 'int_' AND typname <> 'INT2'::text + COLLATE case_insensitive ORDER BY typname; + typname +--------- + int4 + int8 +(2 rows) + +SELECT typname FROM pg_type WHERE typname LIKE 'int_' AND 'INT2'::text <> typname + COLLATE case_insensitive ORDER BY typname; + typname +--------- + int4 + int8 +(2 rows) + +-- test case adapted from subselect.sql +CREATE TEMP TABLE outer_text (f1 text COLLATE case_insensitive, f2 text); +INSERT INTO outer_text VALUES ('a', 'a'); +INSERT INTO outer_text VALUES ('b', 'a'); +INSERT INTO outer_text VALUES ('A', NULL); +INSERT INTO outer_text VALUES ('B', NULL); +CREATE TEMP TABLE inner_text (c1 text COLLATE case_insensitive, c2 text); +INSERT INTO inner_text VALUES ('a', NULL); +SELECT * FROM outer_text WHERE (f1, f2) NOT IN (SELECT * FROM inner_text); + f1 | f2 +----+---- + b | a + B | +(2 rows) + +-- accents +SET client_min_messages=WARNING; +CREATE COLLATION ignore_accents (provider = icu, locale = '@colStrength=primary;colCaseLevel=yes', deterministic = false); +RESET client_min_messages; +CREATE TABLE test4 (a int, b text); +INSERT INTO test4 VALUES (1, 'cote'), (2, 'côte'), (3, 'coté'), (4, 'côté'); +SELECT * FROM test4 WHERE b = 'cote'; + a | b +---+------ + 1 | cote +(1 row) + +SELECT * FROM test4 WHERE b = 'cote' COLLATE ignore_accents; + a | b +---+------ + 1 | cote + 2 | côte + 3 | coté + 4 | côté +(4 rows) + +SELECT * FROM test4 WHERE b = 'Cote' COLLATE ignore_accents; -- still case-sensitive + a | b +---+--- +(0 rows) + +SELECT * FROM test4 WHERE b = 'Cote' COLLATE case_insensitive; + a | b +---+------ + 1 | cote +(1 row) + +-- foreign keys (should use collation of primary key) +-- PK is case-sensitive, FK is case-insensitive +CREATE TABLE test10pk (x text COLLATE case_sensitive PRIMARY KEY); +INSERT INTO test10pk VALUES ('abc'), ('def'), ('ghi'); +CREATE TABLE test10fk (x text COLLATE case_insensitive REFERENCES test10pk (x) ON UPDATE CASCADE ON DELETE CASCADE); +INSERT INTO test10fk VALUES ('abc'); -- ok +INSERT INTO test10fk VALUES ('ABC'); -- error +ERROR: insert or update on table "test10fk" violates foreign key constraint "test10fk_x_fkey" +DETAIL: Key (x)=(ABC) is not present in table "test10pk". +INSERT INTO test10fk VALUES ('xyz'); -- error +ERROR: insert or update on table "test10fk" violates foreign key constraint "test10fk_x_fkey" +DETAIL: Key (x)=(xyz) is not present in table "test10pk". +SELECT * FROM test10pk; + x +----- + abc + def + ghi +(3 rows) + +SELECT * FROM test10fk; + x +----- + abc +(1 row) + +-- restrict update even though the values are "equal" in the FK table +UPDATE test10fk SET x = 'ABC' WHERE x = 'abc'; -- error +ERROR: insert or update on table "test10fk" violates foreign key constraint "test10fk_x_fkey" +DETAIL: Key (x)=(ABC) is not present in table "test10pk". +SELECT * FROM test10fk; + x +----- + abc +(1 row) + +DELETE FROM test10pk WHERE x = 'abc'; +SELECT * FROM test10pk; + x +----- + def + ghi +(2 rows) + +SELECT * FROM test10fk; + x +--- +(0 rows) + +-- PK is case-insensitive, FK is case-sensitive +CREATE TABLE test11pk (x text COLLATE case_insensitive PRIMARY KEY); +INSERT INTO test11pk VALUES ('abc'), ('def'), ('ghi'); +CREATE TABLE test11fk (x text COLLATE case_sensitive REFERENCES test11pk (x) ON UPDATE CASCADE ON DELETE CASCADE); +INSERT INTO test11fk VALUES ('abc'); -- ok +INSERT INTO test11fk VALUES ('ABC'); -- ok +INSERT INTO test11fk VALUES ('xyz'); -- error +ERROR: insert or update on table "test11fk" violates foreign key constraint "test11fk_x_fkey" +DETAIL: Key (x)=(xyz) is not present in table "test11pk". +SELECT * FROM test11pk; + x +----- + abc + def + ghi +(3 rows) + +SELECT * FROM test11fk; + x +----- + abc + ABC +(2 rows) + +-- cascade update even though the values are "equal" in the PK table +UPDATE test11pk SET x = 'ABC' WHERE x = 'abc'; +SELECT * FROM test11fk; + x +----- + ABC + ABC +(2 rows) + +DELETE FROM test11pk WHERE x = 'abc'; +SELECT * FROM test11pk; + x +----- + def + ghi +(2 rows) + +SELECT * FROM test11fk; + x +--- +(0 rows) + +-- partitioning +CREATE TABLE test20 (a int, b text COLLATE case_insensitive) PARTITION BY LIST (b); +CREATE TABLE test20_1 PARTITION OF test20 FOR VALUES IN ('abc'); +INSERT INTO test20 VALUES (1, 'abc'); +INSERT INTO test20 VALUES (2, 'ABC'); +SELECT * FROM test20_1; + a | b +---+----- + 1 | abc + 2 | ABC +(2 rows) + +CREATE TABLE test21 (a int, b text COLLATE case_insensitive) PARTITION BY RANGE (b); +CREATE TABLE test21_1 PARTITION OF test21 FOR VALUES FROM ('ABC') TO ('DEF'); +INSERT INTO test21 VALUES (1, 'abc'); +INSERT INTO test21 VALUES (2, 'ABC'); +SELECT * FROM test21_1; + a | b +---+----- + 1 | abc + 2 | ABC +(2 rows) + +CREATE TABLE test22 (a int, b text COLLATE case_sensitive) PARTITION BY HASH (b); +CREATE TABLE test22_0 PARTITION OF test22 FOR VALUES WITH (MODULUS 2, REMAINDER 0); +CREATE TABLE test22_1 PARTITION OF test22 FOR VALUES WITH (MODULUS 2, REMAINDER 1); +INSERT INTO test22 VALUES (1, 'def'); +INSERT INTO test22 VALUES (2, 'DEF'); +-- they end up in different partitions +SELECT (SELECT count(*) FROM test22_0) = (SELECT count(*) FROM test22_1); + ?column? +---------- + t +(1 row) + +-- same with arrays +CREATE TABLE test22a (a int, b text[] COLLATE case_sensitive) PARTITION BY HASH (b); +CREATE TABLE test22a_0 PARTITION OF test22a FOR VALUES WITH (MODULUS 2, REMAINDER 0); +CREATE TABLE test22a_1 PARTITION OF test22a FOR VALUES WITH (MODULUS 2, REMAINDER 1); +INSERT INTO test22a VALUES (1, ARRAY['def']); +INSERT INTO test22a VALUES (2, ARRAY['DEF']); +-- they end up in different partitions +SELECT (SELECT count(*) FROM test22a_0) = (SELECT count(*) FROM test22a_1); + ?column? +---------- + t +(1 row) + +CREATE TABLE test23 (a int, b text COLLATE case_insensitive) PARTITION BY HASH (b); +CREATE TABLE test23_0 PARTITION OF test23 FOR VALUES WITH (MODULUS 2, REMAINDER 0); +CREATE TABLE test23_1 PARTITION OF test23 FOR VALUES WITH (MODULUS 2, REMAINDER 1); +INSERT INTO test23 VALUES (1, 'def'); +INSERT INTO test23 VALUES (2, 'DEF'); +-- they end up in the same partition (but it's platform-dependent which one) +SELECT (SELECT count(*) FROM test23_0) <> (SELECT count(*) FROM test23_1); + ?column? +---------- + t +(1 row) + +-- same with arrays +CREATE TABLE test23a (a int, b text[] COLLATE case_insensitive) PARTITION BY HASH (b); +CREATE TABLE test23a_0 PARTITION OF test23a FOR VALUES WITH (MODULUS 2, REMAINDER 0); +CREATE TABLE test23a_1 PARTITION OF test23a FOR VALUES WITH (MODULUS 2, REMAINDER 1); +INSERT INTO test23a VALUES (1, ARRAY['def']); +INSERT INTO test23a VALUES (2, ARRAY['DEF']); +-- they end up in the same partition (but it's platform-dependent which one) +SELECT (SELECT count(*) FROM test23a_0) <> (SELECT count(*) FROM test23a_1); + ?column? +---------- + t +(1 row) + +CREATE TABLE test30 (a int, b char(3) COLLATE case_insensitive) PARTITION BY LIST (b); +CREATE TABLE test30_1 PARTITION OF test30 FOR VALUES IN ('abc'); +INSERT INTO test30 VALUES (1, 'abc'); +INSERT INTO test30 VALUES (2, 'ABC'); +SELECT * FROM test30_1; + a | b +---+----- + 1 | abc + 2 | ABC +(2 rows) + +CREATE TABLE test31 (a int, b char(3) COLLATE case_insensitive) PARTITION BY RANGE (b); +CREATE TABLE test31_1 PARTITION OF test31 FOR VALUES FROM ('ABC') TO ('DEF'); +INSERT INTO test31 VALUES (1, 'abc'); +INSERT INTO test31 VALUES (2, 'ABC'); +SELECT * FROM test31_1; + a | b +---+----- + 1 | abc + 2 | ABC +(2 rows) + +CREATE TABLE test32 (a int, b char(3) COLLATE case_sensitive) PARTITION BY HASH (b); +CREATE TABLE test32_0 PARTITION OF test32 FOR VALUES WITH (MODULUS 2, REMAINDER 0); +CREATE TABLE test32_1 PARTITION OF test32 FOR VALUES WITH (MODULUS 2, REMAINDER 1); +INSERT INTO test32 VALUES (1, 'def'); +INSERT INTO test32 VALUES (2, 'DEF'); +-- they end up in different partitions +SELECT (SELECT count(*) FROM test32_0) = (SELECT count(*) FROM test32_1); + ?column? +---------- + t +(1 row) + +CREATE TABLE test33 (a int, b char(3) COLLATE case_insensitive) PARTITION BY HASH (b); +CREATE TABLE test33_0 PARTITION OF test33 FOR VALUES WITH (MODULUS 2, REMAINDER 0); +CREATE TABLE test33_1 PARTITION OF test33 FOR VALUES WITH (MODULUS 2, REMAINDER 1); +INSERT INTO test33 VALUES (1, 'def'); +INSERT INTO test33 VALUES (2, 'DEF'); +-- they end up in the same partition (but it's platform-dependent which one) +SELECT (SELECT count(*) FROM test33_0) <> (SELECT count(*) FROM test33_1); + ?column? +---------- + t +(1 row) + +-- cleanup +RESET search_path; +SET client_min_messages TO warning; +DROP SCHEMA collate_tests CASCADE; +RESET client_min_messages; +-- leave a collation for pg_upgrade test +CREATE COLLATION coll_icu_upgrade FROM "und-x-icu"; diff --git a/src/test/regress/expected/collate.icu.utf8_1.out b/src/test/regress/expected/collate.icu.utf8_1.out new file mode 100644 index 0000000..25c99c4 --- /dev/null +++ b/src/test/regress/expected/collate.icu.utf8_1.out @@ -0,0 +1,9 @@ +/* + * This test is for ICU collations. + */ +/* skip test if not UTF8 server encoding or no ICU collations installed */ +SELECT getdatabaseencoding() <> 'UTF8' OR + (SELECT count(*) FROM pg_collation WHERE collprovider = 'i' AND collname <> 'unicode') = 0 + AS skip_test \gset +\if :skip_test +\quit diff --git a/src/test/regress/expected/collate.linux.utf8.out b/src/test/regress/expected/collate.linux.utf8.out new file mode 100644 index 0000000..01664f7 --- /dev/null +++ b/src/test/regress/expected/collate.linux.utf8.out @@ -0,0 +1,1174 @@ +/* + * This test is for Linux/glibc systems and assumes that a full set of + * locales is installed. It must be run in a database with UTF-8 encoding, + * because other encodings don't support all the characters used. + */ +SELECT getdatabaseencoding() <> 'UTF8' OR + (SELECT count(*) FROM pg_collation WHERE collname IN ('de_DE', 'en_US', 'sv_SE', 'tr_TR') AND collencoding = pg_char_to_encoding('UTF8')) <> 4 OR + version() !~ 'linux-gnu' + AS skip_test \gset +\if :skip_test +\quit +\endif +SET client_encoding TO UTF8; +CREATE SCHEMA collate_tests; +SET search_path = collate_tests; +CREATE TABLE collate_test1 ( + a int, + b text COLLATE "en_US" NOT NULL +); +\d collate_test1 + Table "collate_tests.collate_test1" + Column | Type | Collation | Nullable | Default +--------+---------+-----------+----------+--------- + a | integer | | | + b | text | en_US | not null | + +CREATE TABLE collate_test_fail ( + a int, + b text COLLATE "ja_JP.eucjp" +); +ERROR: collation "ja_JP.eucjp" for encoding "UTF8" does not exist +LINE 3: b text COLLATE "ja_JP.eucjp" + ^ +CREATE TABLE collate_test_fail ( + a int, + b text COLLATE "foo" +); +ERROR: collation "foo" for encoding "UTF8" does not exist +LINE 3: b text COLLATE "foo" + ^ +CREATE TABLE collate_test_fail ( + a int COLLATE "en_US", + b text +); +ERROR: collations are not supported by type integer +LINE 2: a int COLLATE "en_US", + ^ +CREATE TABLE collate_test_like ( + LIKE collate_test1 +); +\d collate_test_like + Table "collate_tests.collate_test_like" + Column | Type | Collation | Nullable | Default +--------+---------+-----------+----------+--------- + a | integer | | | + b | text | en_US | not null | + +CREATE TABLE collate_test2 ( + a int, + b text COLLATE "sv_SE" +); +CREATE TABLE collate_test3 ( + a int, + b text COLLATE "C" +); +INSERT INTO collate_test1 VALUES (1, 'abc'), (2, 'äbc'), (3, 'bbc'), (4, 'ABC'); +INSERT INTO collate_test2 SELECT * FROM collate_test1; +INSERT INTO collate_test3 SELECT * FROM collate_test1; +SELECT * FROM collate_test1 WHERE b >= 'bbc'; + a | b +---+----- + 3 | bbc +(1 row) + +SELECT * FROM collate_test2 WHERE b >= 'bbc'; + a | b +---+----- + 2 | äbc + 3 | bbc +(2 rows) + +SELECT * FROM collate_test3 WHERE b >= 'bbc'; + a | b +---+----- + 2 | äbc + 3 | bbc +(2 rows) + +SELECT * FROM collate_test3 WHERE b >= 'BBC'; + a | b +---+----- + 1 | abc + 2 | äbc + 3 | bbc +(3 rows) + +SELECT * FROM collate_test1 WHERE b COLLATE "C" >= 'bbc'; + a | b +---+----- + 2 | äbc + 3 | bbc +(2 rows) + +SELECT * FROM collate_test1 WHERE b >= 'bbc' COLLATE "C"; + a | b +---+----- + 2 | äbc + 3 | bbc +(2 rows) + +SELECT * FROM collate_test1 WHERE b COLLATE "C" >= 'bbc' COLLATE "C"; + a | b +---+----- + 2 | äbc + 3 | bbc +(2 rows) + +SELECT * FROM collate_test1 WHERE b COLLATE "C" >= 'bbc' COLLATE "en_US"; +ERROR: collation mismatch between explicit collations "C" and "en_US" +LINE 1: ...* FROM collate_test1 WHERE b COLLATE "C" >= 'bbc' COLLATE "e... + ^ +CREATE DOMAIN testdomain_sv AS text COLLATE "sv_SE"; +CREATE DOMAIN testdomain_i AS int COLLATE "sv_SE"; -- fails +ERROR: collations are not supported by type integer +CREATE TABLE collate_test4 ( + a int, + b testdomain_sv +); +INSERT INTO collate_test4 SELECT * FROM collate_test1; +SELECT a, b FROM collate_test4 ORDER BY b; + a | b +---+----- + 1 | abc + 4 | ABC + 3 | bbc + 2 | äbc +(4 rows) + +CREATE TABLE collate_test5 ( + a int, + b testdomain_sv COLLATE "en_US" +); +INSERT INTO collate_test5 SELECT * FROM collate_test1; +SELECT a, b FROM collate_test5 ORDER BY b; + a | b +---+----- + 1 | abc + 4 | ABC + 2 | äbc + 3 | bbc +(4 rows) + +SELECT a, b FROM collate_test1 ORDER BY b; + a | b +---+----- + 1 | abc + 4 | ABC + 2 | äbc + 3 | bbc +(4 rows) + +SELECT a, b FROM collate_test2 ORDER BY b; + a | b +---+----- + 1 | abc + 4 | ABC + 3 | bbc + 2 | äbc +(4 rows) + +SELECT a, b FROM collate_test3 ORDER BY b; + a | b +---+----- + 4 | ABC + 1 | abc + 3 | bbc + 2 | äbc +(4 rows) + +SELECT a, b FROM collate_test1 ORDER BY b COLLATE "C"; + a | b +---+----- + 4 | ABC + 1 | abc + 3 | bbc + 2 | äbc +(4 rows) + +-- star expansion +SELECT * FROM collate_test1 ORDER BY b; + a | b +---+----- + 1 | abc + 4 | ABC + 2 | äbc + 3 | bbc +(4 rows) + +SELECT * FROM collate_test2 ORDER BY b; + a | b +---+----- + 1 | abc + 4 | ABC + 3 | bbc + 2 | äbc +(4 rows) + +SELECT * FROM collate_test3 ORDER BY b; + a | b +---+----- + 4 | ABC + 1 | abc + 3 | bbc + 2 | äbc +(4 rows) + +-- constant expression folding +SELECT 'bbc' COLLATE "en_US" > 'äbc' COLLATE "en_US" AS "true"; + true +------ + t +(1 row) + +SELECT 'bbc' COLLATE "sv_SE" > 'äbc' COLLATE "sv_SE" AS "false"; + false +------- + f +(1 row) + +-- upper/lower +CREATE TABLE collate_test10 ( + a int, + x text COLLATE "en_US", + y text COLLATE "tr_TR" +); +INSERT INTO collate_test10 VALUES (1, 'hij', 'hij'), (2, 'HIJ', 'HIJ'); +SELECT a, lower(x), lower(y), upper(x), upper(y), initcap(x), initcap(y) FROM collate_test10; + a | lower | lower | upper | upper | initcap | initcap +---+-------+-------+-------+-------+---------+--------- + 1 | hij | hij | HIJ | HÄ°J | Hij | Hij + 2 | hij | hıj | HIJ | HIJ | Hij | Hıj +(2 rows) + +SELECT a, lower(x COLLATE "C"), lower(y COLLATE "C") FROM collate_test10; + a | lower | lower +---+-------+------- + 1 | hij | hij + 2 | hij | hij +(2 rows) + +SELECT a, x, y FROM collate_test10 ORDER BY lower(y), a; + a | x | y +---+-----+----- + 2 | HIJ | HIJ + 1 | hij | hij +(2 rows) + +-- LIKE/ILIKE +SELECT * FROM collate_test1 WHERE b LIKE 'abc'; + a | b +---+----- + 1 | abc +(1 row) + +SELECT * FROM collate_test1 WHERE b LIKE 'abc%'; + a | b +---+----- + 1 | abc +(1 row) + +SELECT * FROM collate_test1 WHERE b LIKE '%bc%'; + a | b +---+----- + 1 | abc + 2 | äbc + 3 | bbc +(3 rows) + +SELECT * FROM collate_test1 WHERE b ILIKE 'abc'; + a | b +---+----- + 1 | abc + 4 | ABC +(2 rows) + +SELECT * FROM collate_test1 WHERE b ILIKE 'abc%'; + a | b +---+----- + 1 | abc + 4 | ABC +(2 rows) + +SELECT * FROM collate_test1 WHERE b ILIKE '%bc%'; + a | b +---+----- + 1 | abc + 2 | äbc + 3 | bbc + 4 | ABC +(4 rows) + +SELECT 'Türkiye' COLLATE "en_US" ILIKE '%KI%' AS "true"; + true +------ + t +(1 row) + +SELECT 'Türkiye' COLLATE "tr_TR" ILIKE '%KI%' AS "false"; + false +------- + f +(1 row) + +SELECT 'bıt' ILIKE 'BIT' COLLATE "en_US" AS "false"; + false +------- + f +(1 row) + +SELECT 'bıt' ILIKE 'BIT' COLLATE "tr_TR" AS "true"; + true +------ + t +(1 row) + +-- The following actually exercises the selectivity estimation for ILIKE. +SELECT relname FROM pg_class WHERE relname ILIKE 'abc%'; + relname +--------- +(0 rows) + +-- regular expressions +SELECT * FROM collate_test1 WHERE b ~ '^abc$'; + a | b +---+----- + 1 | abc +(1 row) + +SELECT * FROM collate_test1 WHERE b ~ '^abc'; + a | b +---+----- + 1 | abc +(1 row) + +SELECT * FROM collate_test1 WHERE b ~ 'bc'; + a | b +---+----- + 1 | abc + 2 | äbc + 3 | bbc +(3 rows) + +SELECT * FROM collate_test1 WHERE b ~* '^abc$'; + a | b +---+----- + 1 | abc + 4 | ABC +(2 rows) + +SELECT * FROM collate_test1 WHERE b ~* '^abc'; + a | b +---+----- + 1 | abc + 4 | ABC +(2 rows) + +SELECT * FROM collate_test1 WHERE b ~* 'bc'; + a | b +---+----- + 1 | abc + 2 | äbc + 3 | bbc + 4 | ABC +(4 rows) + +CREATE TABLE collate_test6 ( + a int, + b text COLLATE "en_US" +); +INSERT INTO collate_test6 VALUES (1, 'abc'), (2, 'ABC'), (3, '123'), (4, 'ab1'), + (5, 'a1!'), (6, 'a c'), (7, '!.;'), (8, ' '), + (9, 'äbç'), (10, 'ÄBÇ'); +SELECT b, + b ~ '^[[:alpha:]]+$' AS is_alpha, + b ~ '^[[:upper:]]+$' AS is_upper, + b ~ '^[[:lower:]]+$' AS is_lower, + b ~ '^[[:digit:]]+$' AS is_digit, + b ~ '^[[:alnum:]]+$' AS is_alnum, + b ~ '^[[:graph:]]+$' AS is_graph, + b ~ '^[[:print:]]+$' AS is_print, + b ~ '^[[:punct:]]+$' AS is_punct, + b ~ '^[[:space:]]+$' AS is_space +FROM collate_test6; + b | is_alpha | is_upper | is_lower | is_digit | is_alnum | is_graph | is_print | is_punct | is_space +-----+----------+----------+----------+----------+----------+----------+----------+----------+---------- + abc | t | f | t | f | t | t | t | f | f + ABC | t | t | f | f | t | t | t | f | f + 123 | f | f | f | t | t | t | t | f | f + ab1 | f | f | f | f | t | t | t | f | f + a1! | f | f | f | f | f | t | t | f | f + a c | f | f | f | f | f | f | t | f | f + !.; | f | f | f | f | f | t | t | t | f + | f | f | f | f | f | f | t | f | t + äbç | t | f | t | f | t | t | t | f | f + ÄBÇ | t | t | f | f | t | t | t | f | f +(10 rows) + +SELECT 'Türkiye' COLLATE "en_US" ~* 'KI' AS "true"; + true +------ + t +(1 row) + +SELECT 'Türkiye' COLLATE "tr_TR" ~* 'KI' AS "false"; + false +------- + f +(1 row) + +SELECT 'bıt' ~* 'BIT' COLLATE "en_US" AS "false"; + false +------- + f +(1 row) + +SELECT 'bıt' ~* 'BIT' COLLATE "tr_TR" AS "true"; + true +------ + t +(1 row) + +-- The following actually exercises the selectivity estimation for ~*. +SELECT relname FROM pg_class WHERE relname ~* '^abc'; + relname +--------- +(0 rows) + +-- to_char +SET lc_time TO 'tr_TR'; +SELECT to_char(date '2010-02-01', 'DD TMMON YYYY'); + to_char +------------- + 01 ÅžUB 2010 +(1 row) + +SELECT to_char(date '2010-02-01', 'DD TMMON YYYY' COLLATE "tr_TR"); + to_char +------------- + 01 ÅžUB 2010 +(1 row) + +SELECT to_char(date '2010-04-01', 'DD TMMON YYYY'); + to_char +------------- + 01 NIS 2010 +(1 row) + +SELECT to_char(date '2010-04-01', 'DD TMMON YYYY' COLLATE "tr_TR"); + to_char +------------- + 01 NÄ°S 2010 +(1 row) + +-- to_date +SELECT to_date('01 ÅžUB 2010', 'DD TMMON YYYY'); + to_date +------------ + 02-01-2010 +(1 row) + +SELECT to_date('01 Åžub 2010', 'DD TMMON YYYY'); + to_date +------------ + 02-01-2010 +(1 row) + +SELECT to_date('1234567890ab 2010', 'TMMONTH YYYY'); -- fail +ERROR: invalid value "1234567890ab" for "MONTH" +DETAIL: The given value did not match any of the allowed values for this field. +-- backwards parsing +CREATE VIEW collview1 AS SELECT * FROM collate_test1 WHERE b COLLATE "C" >= 'bbc'; +CREATE VIEW collview2 AS SELECT a, b FROM collate_test1 ORDER BY b COLLATE "C"; +CREATE VIEW collview3 AS SELECT a, lower((x || x) COLLATE "C") FROM collate_test10; +SELECT table_name, view_definition FROM information_schema.views + WHERE table_name LIKE 'collview%' ORDER BY 1; + table_name | view_definition +------------+-------------------------------------------- + collview1 | SELECT a, + + | b + + | FROM collate_test1 + + | WHERE ((b COLLATE "C") >= 'bbc'::text); + collview2 | SELECT a, + + | b + + | FROM collate_test1 + + | ORDER BY (b COLLATE "C"); + collview3 | SELECT a, + + | lower(((x || x) COLLATE "C")) AS lower+ + | FROM collate_test10; +(3 rows) + +-- collation propagation in various expression types +SELECT a, coalesce(b, 'foo') FROM collate_test1 ORDER BY 2; + a | coalesce +---+---------- + 1 | abc + 4 | ABC + 2 | äbc + 3 | bbc +(4 rows) + +SELECT a, coalesce(b, 'foo') FROM collate_test2 ORDER BY 2; + a | coalesce +---+---------- + 1 | abc + 4 | ABC + 3 | bbc + 2 | äbc +(4 rows) + +SELECT a, coalesce(b, 'foo') FROM collate_test3 ORDER BY 2; + a | coalesce +---+---------- + 4 | ABC + 1 | abc + 3 | bbc + 2 | äbc +(4 rows) + +SELECT a, lower(coalesce(x, 'foo')), lower(coalesce(y, 'foo')) FROM collate_test10; + a | lower | lower +---+-------+------- + 1 | hij | hij + 2 | hij | hıj +(2 rows) + +SELECT a, b, greatest(b, 'CCC') FROM collate_test1 ORDER BY 3; + a | b | greatest +---+-----+---------- + 1 | abc | CCC + 2 | äbc | CCC + 3 | bbc | CCC + 4 | ABC | CCC +(4 rows) + +SELECT a, b, greatest(b, 'CCC') FROM collate_test2 ORDER BY 3; + a | b | greatest +---+-----+---------- + 1 | abc | CCC + 3 | bbc | CCC + 4 | ABC | CCC + 2 | äbc | äbc +(4 rows) + +SELECT a, b, greatest(b, 'CCC') FROM collate_test3 ORDER BY 3; + a | b | greatest +---+-----+---------- + 4 | ABC | CCC + 1 | abc | abc + 3 | bbc | bbc + 2 | äbc | äbc +(4 rows) + +SELECT a, x, y, lower(greatest(x, 'foo')), lower(greatest(y, 'foo')) FROM collate_test10; + a | x | y | lower | lower +---+-----+-----+-------+------- + 1 | hij | hij | hij | hij + 2 | HIJ | HIJ | hij | hıj +(2 rows) + +SELECT a, nullif(b, 'abc') FROM collate_test1 ORDER BY 2; + a | nullif +---+-------- + 4 | ABC + 2 | äbc + 3 | bbc + 1 | +(4 rows) + +SELECT a, nullif(b, 'abc') FROM collate_test2 ORDER BY 2; + a | nullif +---+-------- + 4 | ABC + 3 | bbc + 2 | äbc + 1 | +(4 rows) + +SELECT a, nullif(b, 'abc') FROM collate_test3 ORDER BY 2; + a | nullif +---+-------- + 4 | ABC + 3 | bbc + 2 | äbc + 1 | +(4 rows) + +SELECT a, lower(nullif(x, 'foo')), lower(nullif(y, 'foo')) FROM collate_test10; + a | lower | lower +---+-------+------- + 1 | hij | hij + 2 | hij | hıj +(2 rows) + +SELECT a, CASE b WHEN 'abc' THEN 'abcd' ELSE b END FROM collate_test1 ORDER BY 2; + a | b +---+------ + 4 | ABC + 2 | äbc + 1 | abcd + 3 | bbc +(4 rows) + +SELECT a, CASE b WHEN 'abc' THEN 'abcd' ELSE b END FROM collate_test2 ORDER BY 2; + a | b +---+------ + 4 | ABC + 1 | abcd + 3 | bbc + 2 | äbc +(4 rows) + +SELECT a, CASE b WHEN 'abc' THEN 'abcd' ELSE b END FROM collate_test3 ORDER BY 2; + a | b +---+------ + 4 | ABC + 1 | abcd + 3 | bbc + 2 | äbc +(4 rows) + +CREATE DOMAIN testdomain AS text; +SELECT a, b::testdomain FROM collate_test1 ORDER BY 2; + a | b +---+----- + 1 | abc + 4 | ABC + 2 | äbc + 3 | bbc +(4 rows) + +SELECT a, b::testdomain FROM collate_test2 ORDER BY 2; + a | b +---+----- + 1 | abc + 4 | ABC + 3 | bbc + 2 | äbc +(4 rows) + +SELECT a, b::testdomain FROM collate_test3 ORDER BY 2; + a | b +---+----- + 4 | ABC + 1 | abc + 3 | bbc + 2 | äbc +(4 rows) + +SELECT a, b::testdomain_sv FROM collate_test3 ORDER BY 2; + a | b +---+----- + 1 | abc + 4 | ABC + 3 | bbc + 2 | äbc +(4 rows) + +SELECT a, lower(x::testdomain), lower(y::testdomain) FROM collate_test10; + a | lower | lower +---+-------+------- + 1 | hij | hij + 2 | hij | hıj +(2 rows) + +SELECT min(b), max(b) FROM collate_test1; + min | max +-----+----- + abc | bbc +(1 row) + +SELECT min(b), max(b) FROM collate_test2; + min | max +-----+----- + abc | äbc +(1 row) + +SELECT min(b), max(b) FROM collate_test3; + min | max +-----+----- + ABC | äbc +(1 row) + +SELECT array_agg(b ORDER BY b) FROM collate_test1; + array_agg +------------------- + {abc,ABC,äbc,bbc} +(1 row) + +SELECT array_agg(b ORDER BY b) FROM collate_test2; + array_agg +------------------- + {abc,ABC,bbc,äbc} +(1 row) + +SELECT array_agg(b ORDER BY b) FROM collate_test3; + array_agg +------------------- + {ABC,abc,bbc,äbc} +(1 row) + +SELECT a, b FROM collate_test1 UNION ALL SELECT a, b FROM collate_test1 ORDER BY 2; + a | b +---+----- + 1 | abc + 1 | abc + 4 | ABC + 4 | ABC + 2 | äbc + 2 | äbc + 3 | bbc + 3 | bbc +(8 rows) + +SELECT a, b FROM collate_test2 UNION SELECT a, b FROM collate_test2 ORDER BY 2; + a | b +---+----- + 1 | abc + 4 | ABC + 3 | bbc + 2 | äbc +(4 rows) + +SELECT a, b FROM collate_test3 WHERE a < 4 INTERSECT SELECT a, b FROM collate_test3 WHERE a > 1 ORDER BY 2; + a | b +---+----- + 3 | bbc + 2 | äbc +(2 rows) + +SELECT a, b FROM collate_test3 EXCEPT SELECT a, b FROM collate_test3 WHERE a < 2 ORDER BY 2; + a | b +---+----- + 4 | ABC + 3 | bbc + 2 | äbc +(3 rows) + +SELECT a, b FROM collate_test1 UNION ALL SELECT a, b FROM collate_test3 ORDER BY 2; -- fail +ERROR: could not determine which collation to use for string comparison +HINT: Use the COLLATE clause to set the collation explicitly. +SELECT a, b FROM collate_test1 UNION ALL SELECT a, b FROM collate_test3; -- ok + a | b +---+----- + 1 | abc + 2 | äbc + 3 | bbc + 4 | ABC + 1 | abc + 2 | äbc + 3 | bbc + 4 | ABC +(8 rows) + +SELECT a, b FROM collate_test1 UNION SELECT a, b FROM collate_test3 ORDER BY 2; -- fail +ERROR: collation mismatch between implicit collations "en_US" and "C" +LINE 1: SELECT a, b FROM collate_test1 UNION SELECT a, b FROM collat... + ^ +HINT: You can choose the collation by applying the COLLATE clause to one or both expressions. +SELECT a, b COLLATE "C" FROM collate_test1 UNION SELECT a, b FROM collate_test3 ORDER BY 2; -- ok + a | b +---+----- + 4 | ABC + 1 | abc + 3 | bbc + 2 | äbc +(4 rows) + +SELECT a, b FROM collate_test1 INTERSECT SELECT a, b FROM collate_test3 ORDER BY 2; -- fail +ERROR: collation mismatch between implicit collations "en_US" and "C" +LINE 1: ...ELECT a, b FROM collate_test1 INTERSECT SELECT a, b FROM col... + ^ +HINT: You can choose the collation by applying the COLLATE clause to one or both expressions. +SELECT a, b FROM collate_test1 EXCEPT SELECT a, b FROM collate_test3 ORDER BY 2; -- fail +ERROR: collation mismatch between implicit collations "en_US" and "C" +LINE 1: SELECT a, b FROM collate_test1 EXCEPT SELECT a, b FROM colla... + ^ +HINT: You can choose the collation by applying the COLLATE clause to one or both expressions. +CREATE TABLE test_u AS SELECT a, b FROM collate_test1 UNION ALL SELECT a, b FROM collate_test3; -- fail +ERROR: no collation was derived for column "b" with collatable type text +HINT: Use the COLLATE clause to set the collation explicitly. +-- ideally this would be a parse-time error, but for now it must be run-time: +select x < y from collate_test10; -- fail +ERROR: could not determine which collation to use for string comparison +HINT: Use the COLLATE clause to set the collation explicitly. +select x || y from collate_test10; -- ok, because || is not collation aware + ?column? +---------- + hijhij + HIJHIJ +(2 rows) + +select x, y from collate_test10 order by x || y; -- not so ok +ERROR: collation mismatch between implicit collations "en_US" and "tr_TR" +LINE 1: select x, y from collate_test10 order by x || y; + ^ +HINT: You can choose the collation by applying the COLLATE clause to one or both expressions. +-- collation mismatch between recursive and non-recursive term +WITH RECURSIVE foo(x) AS + (SELECT x FROM (VALUES('a' COLLATE "en_US"),('b')) t(x) + UNION ALL + SELECT (x || 'c') COLLATE "de_DE" FROM foo WHERE length(x) < 10) +SELECT * FROM foo; +ERROR: recursive query "foo" column 1 has collation "en_US" in non-recursive term but collation "de_DE" overall +LINE 2: (SELECT x FROM (VALUES('a' COLLATE "en_US"),('b')) t(x) + ^ +HINT: Use the COLLATE clause to set the collation of the non-recursive term. +-- casting +SELECT CAST('42' AS text COLLATE "C"); +ERROR: syntax error at or near "COLLATE" +LINE 1: SELECT CAST('42' AS text COLLATE "C"); + ^ +SELECT a, CAST(b AS varchar) FROM collate_test1 ORDER BY 2; + a | b +---+----- + 1 | abc + 4 | ABC + 2 | äbc + 3 | bbc +(4 rows) + +SELECT a, CAST(b AS varchar) FROM collate_test2 ORDER BY 2; + a | b +---+----- + 1 | abc + 4 | ABC + 3 | bbc + 2 | äbc +(4 rows) + +SELECT a, CAST(b AS varchar) FROM collate_test3 ORDER BY 2; + a | b +---+----- + 4 | ABC + 1 | abc + 3 | bbc + 2 | äbc +(4 rows) + +-- propagation of collation in SQL functions (inlined and non-inlined cases) +-- and plpgsql functions too +CREATE FUNCTION mylt (text, text) RETURNS boolean LANGUAGE sql + AS $$ select $1 < $2 $$; +CREATE FUNCTION mylt_noninline (text, text) RETURNS boolean LANGUAGE sql + AS $$ select $1 < $2 limit 1 $$; +CREATE FUNCTION mylt_plpgsql (text, text) RETURNS boolean LANGUAGE plpgsql + AS $$ begin return $1 < $2; end $$; +SELECT a.b AS a, b.b AS b, a.b < b.b AS lt, + mylt(a.b, b.b), mylt_noninline(a.b, b.b), mylt_plpgsql(a.b, b.b) +FROM collate_test1 a, collate_test1 b +ORDER BY a.b, b.b; + a | b | lt | mylt | mylt_noninline | mylt_plpgsql +-----+-----+----+------+----------------+-------------- + abc | abc | f | f | f | f + abc | ABC | t | t | t | t + abc | äbc | t | t | t | t + abc | bbc | t | t | t | t + ABC | abc | f | f | f | f + ABC | ABC | f | f | f | f + ABC | äbc | t | t | t | t + ABC | bbc | t | t | t | t + äbc | abc | f | f | f | f + äbc | ABC | f | f | f | f + äbc | äbc | f | f | f | f + äbc | bbc | t | t | t | t + bbc | abc | f | f | f | f + bbc | ABC | f | f | f | f + bbc | äbc | f | f | f | f + bbc | bbc | f | f | f | f +(16 rows) + +SELECT a.b AS a, b.b AS b, a.b < b.b COLLATE "C" AS lt, + mylt(a.b, b.b COLLATE "C"), mylt_noninline(a.b, b.b COLLATE "C"), + mylt_plpgsql(a.b, b.b COLLATE "C") +FROM collate_test1 a, collate_test1 b +ORDER BY a.b, b.b; + a | b | lt | mylt | mylt_noninline | mylt_plpgsql +-----+-----+----+------+----------------+-------------- + abc | abc | f | f | f | f + abc | ABC | f | f | f | f + abc | äbc | t | t | t | t + abc | bbc | t | t | t | t + ABC | abc | t | t | t | t + ABC | ABC | f | f | f | f + ABC | äbc | t | t | t | t + ABC | bbc | t | t | t | t + äbc | abc | f | f | f | f + äbc | ABC | f | f | f | f + äbc | äbc | f | f | f | f + äbc | bbc | f | f | f | f + bbc | abc | f | f | f | f + bbc | ABC | f | f | f | f + bbc | äbc | t | t | t | t + bbc | bbc | f | f | f | f +(16 rows) + +-- collation override in plpgsql +CREATE FUNCTION mylt2 (x text, y text) RETURNS boolean LANGUAGE plpgsql AS $$ +declare + xx text := x; + yy text := y; +begin + return xx < yy; +end +$$; +SELECT mylt2('a', 'B' collate "en_US") as t, mylt2('a', 'B' collate "C") as f; + t | f +---+--- + t | f +(1 row) + +CREATE OR REPLACE FUNCTION + mylt2 (x text, y text) RETURNS boolean LANGUAGE plpgsql AS $$ +declare + xx text COLLATE "POSIX" := x; + yy text := y; +begin + return xx < yy; +end +$$; +SELECT mylt2('a', 'B') as f; + f +--- + f +(1 row) + +SELECT mylt2('a', 'B' collate "C") as fail; -- conflicting collations +ERROR: could not determine which collation to use for string comparison +HINT: Use the COLLATE clause to set the collation explicitly. +CONTEXT: PL/pgSQL function mylt2(text,text) line 6 at RETURN +SELECT mylt2('a', 'B' collate "POSIX") as f; + f +--- + f +(1 row) + +-- polymorphism +SELECT * FROM unnest((SELECT array_agg(b ORDER BY b) FROM collate_test1)) ORDER BY 1; + unnest +-------- + abc + ABC + äbc + bbc +(4 rows) + +SELECT * FROM unnest((SELECT array_agg(b ORDER BY b) FROM collate_test2)) ORDER BY 1; + unnest +-------- + abc + ABC + bbc + äbc +(4 rows) + +SELECT * FROM unnest((SELECT array_agg(b ORDER BY b) FROM collate_test3)) ORDER BY 1; + unnest +-------- + ABC + abc + bbc + äbc +(4 rows) + +CREATE FUNCTION dup (anyelement) RETURNS anyelement + AS 'select $1' LANGUAGE sql; +SELECT a, dup(b) FROM collate_test1 ORDER BY 2; + a | dup +---+----- + 1 | abc + 4 | ABC + 2 | äbc + 3 | bbc +(4 rows) + +SELECT a, dup(b) FROM collate_test2 ORDER BY 2; + a | dup +---+----- + 1 | abc + 4 | ABC + 3 | bbc + 2 | äbc +(4 rows) + +SELECT a, dup(b) FROM collate_test3 ORDER BY 2; + a | dup +---+----- + 4 | ABC + 1 | abc + 3 | bbc + 2 | äbc +(4 rows) + +-- indexes +CREATE INDEX collate_test1_idx1 ON collate_test1 (b); +CREATE INDEX collate_test1_idx2 ON collate_test1 (b COLLATE "C"); +CREATE INDEX collate_test1_idx3 ON collate_test1 ((b COLLATE "C")); -- this is different grammatically +CREATE INDEX collate_test1_idx4 ON collate_test1 (((b||'foo') COLLATE "POSIX")); +CREATE INDEX collate_test1_idx5 ON collate_test1 (a COLLATE "C"); -- fail +ERROR: collations are not supported by type integer +CREATE INDEX collate_test1_idx6 ON collate_test1 ((a COLLATE "C")); -- fail +ERROR: collations are not supported by type integer +LINE 1: ...ATE INDEX collate_test1_idx6 ON collate_test1 ((a COLLATE "C... + ^ +SELECT relname, pg_get_indexdef(oid) FROM pg_class WHERE relname LIKE 'collate_test%_idx%' ORDER BY 1; + relname | pg_get_indexdef +--------------------+------------------------------------------------------------------------------------------------------------------- + collate_test1_idx1 | CREATE INDEX collate_test1_idx1 ON collate_tests.collate_test1 USING btree (b) + collate_test1_idx2 | CREATE INDEX collate_test1_idx2 ON collate_tests.collate_test1 USING btree (b COLLATE "C") + collate_test1_idx3 | CREATE INDEX collate_test1_idx3 ON collate_tests.collate_test1 USING btree (b COLLATE "C") + collate_test1_idx4 | CREATE INDEX collate_test1_idx4 ON collate_tests.collate_test1 USING btree (((b || 'foo'::text)) COLLATE "POSIX") +(4 rows) + +-- schema manipulation commands +CREATE ROLE regress_test_role; +CREATE SCHEMA test_schema; +-- We need to do this this way to cope with varying names for encodings: +do $$ +BEGIN + EXECUTE 'CREATE COLLATION test0 (locale = ' || + quote_literal((SELECT datcollate FROM pg_database WHERE datname = current_database())) || ');'; +END +$$; +CREATE COLLATION test0 FROM "C"; -- fail, duplicate name +ERROR: collation "test0" already exists +CREATE COLLATION IF NOT EXISTS test0 FROM "C"; -- ok, skipped +NOTICE: collation "test0" already exists, skipping +CREATE COLLATION IF NOT EXISTS test0 (locale = 'foo'); -- ok, skipped +NOTICE: collation "test0" for encoding "UTF8" already exists, skipping +do $$ +BEGIN + EXECUTE 'CREATE COLLATION test1 (lc_collate = ' || + quote_literal((SELECT datcollate FROM pg_database WHERE datname = current_database())) || + ', lc_ctype = ' || + quote_literal((SELECT datctype FROM pg_database WHERE datname = current_database())) || ');'; +END +$$; +CREATE COLLATION test3 (lc_collate = 'en_US.utf8'); -- fail, need lc_ctype +ERROR: parameter "lc_ctype" must be specified +CREATE COLLATION testx (locale = 'nonsense'); -- fail +ERROR: could not create locale "nonsense": No such file or directory +DETAIL: The operating system could not find any locale data for the locale name "nonsense". +CREATE COLLATION test4 FROM nonsense; +ERROR: collation "nonsense" for encoding "UTF8" does not exist +CREATE COLLATION test5 FROM test0; +SELECT collname FROM pg_collation WHERE collname LIKE 'test%' ORDER BY 1; + collname +---------- + test0 + test1 + test5 +(3 rows) + +ALTER COLLATION test1 RENAME TO test11; +ALTER COLLATION test0 RENAME TO test11; -- fail +ERROR: collation "test11" for encoding "UTF8" already exists in schema "collate_tests" +ALTER COLLATION test1 RENAME TO test22; -- fail +ERROR: collation "test1" for encoding "UTF8" does not exist +ALTER COLLATION test11 OWNER TO regress_test_role; +ALTER COLLATION test11 OWNER TO nonsense; +ERROR: role "nonsense" does not exist +ALTER COLLATION test11 SET SCHEMA test_schema; +COMMENT ON COLLATION test0 IS 'US English'; +SELECT collname, nspname, obj_description(pg_collation.oid, 'pg_collation') + FROM pg_collation JOIN pg_namespace ON (collnamespace = pg_namespace.oid) + WHERE collname LIKE 'test%' + ORDER BY 1; + collname | nspname | obj_description +----------+---------------+----------------- + test0 | collate_tests | US English + test11 | test_schema | + test5 | collate_tests | +(3 rows) + +DROP COLLATION test0, test_schema.test11, test5; +DROP COLLATION test0; -- fail +ERROR: collation "test0" for encoding "UTF8" does not exist +DROP COLLATION IF EXISTS test0; +NOTICE: collation "test0" does not exist, skipping +SELECT collname FROM pg_collation WHERE collname LIKE 'test%'; + collname +---------- +(0 rows) + +DROP SCHEMA test_schema; +DROP ROLE regress_test_role; +-- ALTER +ALTER COLLATION "en_US" REFRESH VERSION; +NOTICE: version has not changed +-- also test for database while we are here +SELECT current_database() AS datname \gset +ALTER DATABASE :"datname" REFRESH COLLATION VERSION; +NOTICE: version has not changed +-- dependencies +CREATE COLLATION test0 FROM "C"; +CREATE TABLE collate_dep_test1 (a int, b text COLLATE test0); +CREATE DOMAIN collate_dep_dom1 AS text COLLATE test0; +CREATE TYPE collate_dep_test2 AS (x int, y text COLLATE test0); +CREATE VIEW collate_dep_test3 AS SELECT text 'foo' COLLATE test0 AS foo; +CREATE TABLE collate_dep_test4t (a int, b text); +CREATE INDEX collate_dep_test4i ON collate_dep_test4t (b COLLATE test0); +DROP COLLATION test0 RESTRICT; -- fail +ERROR: cannot drop collation test0 because other objects depend on it +DETAIL: column b of table collate_dep_test1 depends on collation test0 +type collate_dep_dom1 depends on collation test0 +column y of composite type collate_dep_test2 depends on collation test0 +view collate_dep_test3 depends on collation test0 +index collate_dep_test4i depends on collation test0 +HINT: Use DROP ... CASCADE to drop the dependent objects too. +DROP COLLATION test0 CASCADE; +NOTICE: drop cascades to 5 other objects +DETAIL: drop cascades to column b of table collate_dep_test1 +drop cascades to type collate_dep_dom1 +drop cascades to column y of composite type collate_dep_test2 +drop cascades to view collate_dep_test3 +drop cascades to index collate_dep_test4i +\d collate_dep_test1 + Table "collate_tests.collate_dep_test1" + Column | Type | Collation | Nullable | Default +--------+---------+-----------+----------+--------- + a | integer | | | + +\d collate_dep_test2 + Composite type "collate_tests.collate_dep_test2" + Column | Type | Collation | Nullable | Default +--------+---------+-----------+----------+--------- + x | integer | | | + +DROP TABLE collate_dep_test1, collate_dep_test4t; +DROP TYPE collate_dep_test2; +-- test range types and collations +create type textrange_c as range(subtype=text, collation="C"); +create type textrange_en_us as range(subtype=text, collation="en_US"); +select textrange_c('A','Z') @> 'b'::text; + ?column? +---------- + f +(1 row) + +select textrange_en_us('A','Z') @> 'b'::text; + ?column? +---------- + t +(1 row) + +drop type textrange_c; +drop type textrange_en_us; +-- standard collations +SELECT * FROM collate_test2 ORDER BY b COLLATE UCS_BASIC; + a | b +---+----- + 4 | ABC + 1 | abc + 3 | bbc + 2 | äbc +(4 rows) + +-- nondeterministic collations +-- (not supported with libc provider) +CREATE COLLATION ctest_det (locale = 'en_US.utf8', deterministic = true); +CREATE COLLATION ctest_nondet (locale = 'en_US.utf8', deterministic = false); +ERROR: nondeterministic collations not supported with this provider +-- cleanup +SET client_min_messages TO warning; +DROP SCHEMA collate_tests CASCADE; diff --git a/src/test/regress/expected/collate.linux.utf8_1.out b/src/test/regress/expected/collate.linux.utf8_1.out new file mode 100644 index 0000000..ede5fdb --- /dev/null +++ b/src/test/regress/expected/collate.linux.utf8_1.out @@ -0,0 +1,11 @@ +/* + * This test is for Linux/glibc systems and assumes that a full set of + * locales is installed. It must be run in a database with UTF-8 encoding, + * because other encodings don't support all the characters used. + */ +SELECT getdatabaseencoding() <> 'UTF8' OR + (SELECT count(*) FROM pg_collation WHERE collname IN ('de_DE', 'en_US', 'sv_SE', 'tr_TR') AND collencoding = pg_char_to_encoding('UTF8')) <> 4 OR + version() !~ 'linux-gnu' + AS skip_test \gset +\if :skip_test +\quit diff --git a/src/test/regress/expected/collate.out b/src/test/regress/expected/collate.out new file mode 100644 index 0000000..0649564 --- /dev/null +++ b/src/test/regress/expected/collate.out @@ -0,0 +1,776 @@ +/* + * This test is intended to pass on all platforms supported by Postgres. + * We can therefore only assume that the default, C, and POSIX collations + * are available --- and since the regression tests are often run in a + * C-locale database, these may well all have the same behavior. But + * fortunately, the system doesn't know that and will treat them as + * incompatible collations. It is therefore at least possible to test + * parser behaviors such as collation conflict resolution. This test will, + * however, be more revealing when run in a database with non-C locale, + * since any departure from C sorting behavior will show as a failure. + */ +CREATE SCHEMA collate_tests; +SET search_path = collate_tests; +CREATE TABLE collate_test1 ( + a int, + b text COLLATE "C" NOT NULL +); +\d collate_test1 + Table "collate_tests.collate_test1" + Column | Type | Collation | Nullable | Default +--------+---------+-----------+----------+--------- + a | integer | | | + b | text | C | not null | + +CREATE TABLE collate_test_fail ( + a int COLLATE "C", + b text +); +ERROR: collations are not supported by type integer +LINE 2: a int COLLATE "C", + ^ +CREATE TABLE collate_test_like ( + LIKE collate_test1 +); +\d collate_test_like + Table "collate_tests.collate_test_like" + Column | Type | Collation | Nullable | Default +--------+---------+-----------+----------+--------- + a | integer | | | + b | text | C | not null | + +CREATE TABLE collate_test2 ( + a int, + b text COLLATE "POSIX" +); +INSERT INTO collate_test1 VALUES (1, 'abc'), (2, 'Abc'), (3, 'bbc'), (4, 'ABD'); +INSERT INTO collate_test2 SELECT * FROM collate_test1; +SELECT * FROM collate_test1 WHERE b COLLATE "C" >= 'abc'; + a | b +---+----- + 1 | abc + 3 | bbc +(2 rows) + +SELECT * FROM collate_test1 WHERE b >= 'abc' COLLATE "C"; + a | b +---+----- + 1 | abc + 3 | bbc +(2 rows) + +SELECT * FROM collate_test1 WHERE b COLLATE "C" >= 'abc' COLLATE "C"; + a | b +---+----- + 1 | abc + 3 | bbc +(2 rows) + +SELECT * FROM collate_test1 WHERE b COLLATE "C" >= 'bbc' COLLATE "POSIX"; -- fail +ERROR: collation mismatch between explicit collations "C" and "POSIX" +LINE 1: ...* FROM collate_test1 WHERE b COLLATE "C" >= 'bbc' COLLATE "P... + ^ +CREATE DOMAIN testdomain_p AS text COLLATE "POSIX"; +CREATE DOMAIN testdomain_i AS int COLLATE "POSIX"; -- fail +ERROR: collations are not supported by type integer +CREATE TABLE collate_test4 ( + a int, + b testdomain_p +); +INSERT INTO collate_test4 SELECT * FROM collate_test1; +SELECT a, b FROM collate_test4 ORDER BY b; + a | b +---+----- + 4 | ABD + 2 | Abc + 1 | abc + 3 | bbc +(4 rows) + +CREATE TABLE collate_test5 ( + a int, + b testdomain_p COLLATE "C" +); +INSERT INTO collate_test5 SELECT * FROM collate_test1; +SELECT a, b FROM collate_test5 ORDER BY b; + a | b +---+----- + 4 | ABD + 2 | Abc + 1 | abc + 3 | bbc +(4 rows) + +SELECT a, b FROM collate_test1 ORDER BY b; + a | b +---+----- + 4 | ABD + 2 | Abc + 1 | abc + 3 | bbc +(4 rows) + +SELECT a, b FROM collate_test2 ORDER BY b; + a | b +---+----- + 4 | ABD + 2 | Abc + 1 | abc + 3 | bbc +(4 rows) + +SELECT a, b FROM collate_test1 ORDER BY b COLLATE "C"; + a | b +---+----- + 4 | ABD + 2 | Abc + 1 | abc + 3 | bbc +(4 rows) + +-- star expansion +SELECT * FROM collate_test1 ORDER BY b; + a | b +---+----- + 4 | ABD + 2 | Abc + 1 | abc + 3 | bbc +(4 rows) + +SELECT * FROM collate_test2 ORDER BY b; + a | b +---+----- + 4 | ABD + 2 | Abc + 1 | abc + 3 | bbc +(4 rows) + +-- constant expression folding +SELECT 'bbc' COLLATE "C" > 'Abc' COLLATE "C" AS "true"; + true +------ + t +(1 row) + +SELECT 'bbc' COLLATE "POSIX" < 'Abc' COLLATE "POSIX" AS "false"; + false +------- + f +(1 row) + +-- upper/lower +CREATE TABLE collate_test10 ( + a int, + x text COLLATE "C", + y text COLLATE "POSIX" +); +INSERT INTO collate_test10 VALUES (1, 'hij', 'hij'), (2, 'HIJ', 'HIJ'); +SELECT a, lower(x), lower(y), upper(x), upper(y), initcap(x), initcap(y) FROM collate_test10; + a | lower | lower | upper | upper | initcap | initcap +---+-------+-------+-------+-------+---------+--------- + 1 | hij | hij | HIJ | HIJ | Hij | Hij + 2 | hij | hij | HIJ | HIJ | Hij | Hij +(2 rows) + +SELECT a, lower(x COLLATE "C"), lower(y COLLATE "C") FROM collate_test10; + a | lower | lower +---+-------+------- + 1 | hij | hij + 2 | hij | hij +(2 rows) + +SELECT a, x, y FROM collate_test10 ORDER BY lower(y), a; + a | x | y +---+-----+----- + 1 | hij | hij + 2 | HIJ | HIJ +(2 rows) + +-- backwards parsing +CREATE VIEW collview1 AS SELECT * FROM collate_test1 WHERE b COLLATE "C" >= 'bbc'; +CREATE VIEW collview2 AS SELECT a, b FROM collate_test1 ORDER BY b COLLATE "C"; +CREATE VIEW collview3 AS SELECT a, lower((x || x) COLLATE "POSIX") FROM collate_test10; +SELECT table_name, view_definition FROM information_schema.views + WHERE table_name LIKE 'collview%' ORDER BY 1; + table_name | view_definition +------------+------------------------------------------------ + collview1 | SELECT a, + + | b + + | FROM collate_test1 + + | WHERE ((b COLLATE "C") >= 'bbc'::text); + collview2 | SELECT a, + + | b + + | FROM collate_test1 + + | ORDER BY (b COLLATE "C"); + collview3 | SELECT a, + + | lower(((x || x) COLLATE "POSIX")) AS lower+ + | FROM collate_test10; +(3 rows) + +-- collation propagation in various expression types +SELECT a, coalesce(b, 'foo') FROM collate_test1 ORDER BY 2; + a | coalesce +---+---------- + 4 | ABD + 2 | Abc + 1 | abc + 3 | bbc +(4 rows) + +SELECT a, coalesce(b, 'foo') FROM collate_test2 ORDER BY 2; + a | coalesce +---+---------- + 4 | ABD + 2 | Abc + 1 | abc + 3 | bbc +(4 rows) + +SELECT a, lower(coalesce(x, 'foo')), lower(coalesce(y, 'foo')) FROM collate_test10; + a | lower | lower +---+-------+------- + 1 | hij | hij + 2 | hij | hij +(2 rows) + +SELECT a, b, greatest(b, 'CCC') FROM collate_test1 ORDER BY 3; + a | b | greatest +---+-----+---------- + 2 | Abc | CCC + 4 | ABD | CCC + 1 | abc | abc + 3 | bbc | bbc +(4 rows) + +SELECT a, b, greatest(b, 'CCC') FROM collate_test2 ORDER BY 3; + a | b | greatest +---+-----+---------- + 2 | Abc | CCC + 4 | ABD | CCC + 1 | abc | abc + 3 | bbc | bbc +(4 rows) + +SELECT a, x, y, lower(greatest(x, 'foo')), lower(greatest(y, 'foo')) FROM collate_test10; + a | x | y | lower | lower +---+-----+-----+-------+------- + 1 | hij | hij | hij | hij + 2 | HIJ | HIJ | foo | foo +(2 rows) + +SELECT a, nullif(b, 'abc') FROM collate_test1 ORDER BY 2; + a | nullif +---+-------- + 4 | ABD + 2 | Abc + 3 | bbc + 1 | +(4 rows) + +SELECT a, nullif(b, 'abc') FROM collate_test2 ORDER BY 2; + a | nullif +---+-------- + 4 | ABD + 2 | Abc + 3 | bbc + 1 | +(4 rows) + +SELECT a, lower(nullif(x, 'foo')), lower(nullif(y, 'foo')) FROM collate_test10; + a | lower | lower +---+-------+------- + 1 | hij | hij + 2 | hij | hij +(2 rows) + +SELECT a, CASE b WHEN 'abc' THEN 'abcd' ELSE b END FROM collate_test1 ORDER BY 2; + a | b +---+------ + 4 | ABD + 2 | Abc + 1 | abcd + 3 | bbc +(4 rows) + +SELECT a, CASE b WHEN 'abc' THEN 'abcd' ELSE b END FROM collate_test2 ORDER BY 2; + a | b +---+------ + 4 | ABD + 2 | Abc + 1 | abcd + 3 | bbc +(4 rows) + +CREATE DOMAIN testdomain AS text; +SELECT a, b::testdomain FROM collate_test1 ORDER BY 2; + a | b +---+----- + 4 | ABD + 2 | Abc + 1 | abc + 3 | bbc +(4 rows) + +SELECT a, b::testdomain FROM collate_test2 ORDER BY 2; + a | b +---+----- + 4 | ABD + 2 | Abc + 1 | abc + 3 | bbc +(4 rows) + +SELECT a, b::testdomain_p FROM collate_test2 ORDER BY 2; + a | b +---+----- + 4 | ABD + 2 | Abc + 1 | abc + 3 | bbc +(4 rows) + +SELECT a, lower(x::testdomain), lower(y::testdomain) FROM collate_test10; + a | lower | lower +---+-------+------- + 1 | hij | hij + 2 | hij | hij +(2 rows) + +SELECT min(b), max(b) FROM collate_test1; + min | max +-----+----- + ABD | bbc +(1 row) + +SELECT min(b), max(b) FROM collate_test2; + min | max +-----+----- + ABD | bbc +(1 row) + +SELECT array_agg(b ORDER BY b) FROM collate_test1; + array_agg +------------------- + {ABD,Abc,abc,bbc} +(1 row) + +SELECT array_agg(b ORDER BY b) FROM collate_test2; + array_agg +------------------- + {ABD,Abc,abc,bbc} +(1 row) + +-- In aggregates, ORDER BY expressions don't affect aggregate's collation +SELECT string_agg(x COLLATE "C", y COLLATE "POSIX") FROM collate_test10; -- fail +ERROR: collation mismatch between explicit collations "C" and "POSIX" +LINE 1: SELECT string_agg(x COLLATE "C", y COLLATE "POSIX") FROM col... + ^ +SELECT array_agg(x COLLATE "C" ORDER BY y COLLATE "POSIX") FROM collate_test10; + array_agg +----------- + {HIJ,hij} +(1 row) + +SELECT array_agg(a ORDER BY x COLLATE "C", y COLLATE "POSIX") FROM collate_test10; + array_agg +----------- + {2,1} +(1 row) + +SELECT array_agg(a ORDER BY x||y) FROM collate_test10; -- fail +ERROR: collation mismatch between implicit collations "C" and "POSIX" +LINE 1: SELECT array_agg(a ORDER BY x||y) FROM collate_test10; + ^ +HINT: You can choose the collation by applying the COLLATE clause to one or both expressions. +SELECT a, b FROM collate_test1 UNION ALL SELECT a, b FROM collate_test1 ORDER BY 2; + a | b +---+----- + 4 | ABD + 4 | ABD + 2 | Abc + 2 | Abc + 1 | abc + 1 | abc + 3 | bbc + 3 | bbc +(8 rows) + +SELECT a, b FROM collate_test2 UNION SELECT a, b FROM collate_test2 ORDER BY 2; + a | b +---+----- + 4 | ABD + 2 | Abc + 1 | abc + 3 | bbc +(4 rows) + +SELECT a, b FROM collate_test2 WHERE a < 4 INTERSECT SELECT a, b FROM collate_test2 WHERE a > 1 ORDER BY 2; + a | b +---+----- + 2 | Abc + 3 | bbc +(2 rows) + +SELECT a, b FROM collate_test2 EXCEPT SELECT a, b FROM collate_test2 WHERE a < 2 ORDER BY 2; + a | b +---+----- + 4 | ABD + 2 | Abc + 3 | bbc +(3 rows) + +SELECT a, b FROM collate_test1 UNION ALL SELECT a, b FROM collate_test2 ORDER BY 2; -- fail +ERROR: could not determine which collation to use for string comparison +HINT: Use the COLLATE clause to set the collation explicitly. +SELECT a, b FROM collate_test1 UNION ALL SELECT a, b FROM collate_test2; -- ok + a | b +---+----- + 1 | abc + 2 | Abc + 3 | bbc + 4 | ABD + 1 | abc + 2 | Abc + 3 | bbc + 4 | ABD +(8 rows) + +SELECT a, b FROM collate_test1 UNION SELECT a, b FROM collate_test2 ORDER BY 2; -- fail +ERROR: collation mismatch between implicit collations "C" and "POSIX" +LINE 1: SELECT a, b FROM collate_test1 UNION SELECT a, b FROM collat... + ^ +HINT: You can choose the collation by applying the COLLATE clause to one or both expressions. +SELECT a, b COLLATE "C" FROM collate_test1 UNION SELECT a, b FROM collate_test2 ORDER BY 2; -- ok + a | b +---+----- + 4 | ABD + 2 | Abc + 1 | abc + 3 | bbc +(4 rows) + +SELECT a, b FROM collate_test1 INTERSECT SELECT a, b FROM collate_test2 ORDER BY 2; -- fail +ERROR: collation mismatch between implicit collations "C" and "POSIX" +LINE 1: ...ELECT a, b FROM collate_test1 INTERSECT SELECT a, b FROM col... + ^ +HINT: You can choose the collation by applying the COLLATE clause to one or both expressions. +SELECT a, b FROM collate_test1 EXCEPT SELECT a, b FROM collate_test2 ORDER BY 2; -- fail +ERROR: collation mismatch between implicit collations "C" and "POSIX" +LINE 1: SELECT a, b FROM collate_test1 EXCEPT SELECT a, b FROM colla... + ^ +HINT: You can choose the collation by applying the COLLATE clause to one or both expressions. +CREATE TABLE test_u AS SELECT a, b FROM collate_test1 UNION ALL SELECT a, b FROM collate_test2; -- fail +ERROR: no collation was derived for column "b" with collatable type text +HINT: Use the COLLATE clause to set the collation explicitly. +-- ideally this would be a parse-time error, but for now it must be run-time: +select x < y from collate_test10; -- fail +ERROR: could not determine which collation to use for string comparison +HINT: Use the COLLATE clause to set the collation explicitly. +select x || y from collate_test10; -- ok, because || is not collation aware + ?column? +---------- + hijhij + HIJHIJ +(2 rows) + +select x, y from collate_test10 order by x || y; -- not so ok +ERROR: collation mismatch between implicit collations "C" and "POSIX" +LINE 1: select x, y from collate_test10 order by x || y; + ^ +HINT: You can choose the collation by applying the COLLATE clause to one or both expressions. +-- collation mismatch between recursive and non-recursive term +WITH RECURSIVE foo(x) AS + (SELECT x FROM (VALUES('a' COLLATE "C"),('b')) t(x) + UNION ALL + SELECT (x || 'c') COLLATE "POSIX" FROM foo WHERE length(x) < 10) +SELECT * FROM foo; +ERROR: recursive query "foo" column 1 has collation "C" in non-recursive term but collation "POSIX" overall +LINE 2: (SELECT x FROM (VALUES('a' COLLATE "C"),('b')) t(x) + ^ +HINT: Use the COLLATE clause to set the collation of the non-recursive term. +SELECT a, b, a < b as lt FROM + (VALUES ('a', 'B'), ('A', 'b' COLLATE "C")) v(a,b); + a | b | lt +---+---+---- + a | B | f + A | b | t +(2 rows) + +-- collation mismatch in subselects +SELECT * FROM collate_test10 WHERE (x, y) NOT IN (SELECT y, x FROM collate_test10); +ERROR: could not determine which collation to use for string hashing +HINT: Use the COLLATE clause to set the collation explicitly. +-- now it works with overrides +SELECT * FROM collate_test10 WHERE (x COLLATE "POSIX", y COLLATE "C") NOT IN (SELECT y, x FROM collate_test10); + a | x | y +---+---+--- +(0 rows) + +SELECT * FROM collate_test10 WHERE (x, y) NOT IN (SELECT y COLLATE "C", x COLLATE "POSIX" FROM collate_test10); + a | x | y +---+---+--- +(0 rows) + +-- casting +SELECT CAST('42' AS text COLLATE "C"); +ERROR: syntax error at or near "COLLATE" +LINE 1: SELECT CAST('42' AS text COLLATE "C"); + ^ +SELECT a, CAST(b AS varchar) FROM collate_test1 ORDER BY 2; + a | b +---+----- + 4 | ABD + 2 | Abc + 1 | abc + 3 | bbc +(4 rows) + +SELECT a, CAST(b AS varchar) FROM collate_test2 ORDER BY 2; + a | b +---+----- + 4 | ABD + 2 | Abc + 1 | abc + 3 | bbc +(4 rows) + +-- result of a SQL function +CREATE FUNCTION vc (text) RETURNS text LANGUAGE sql + AS 'select $1::varchar'; +SELECT a, b FROM collate_test1 ORDER BY a, vc(b); + a | b +---+----- + 1 | abc + 2 | Abc + 3 | bbc + 4 | ABD +(4 rows) + +-- polymorphism +SELECT * FROM unnest((SELECT array_agg(b ORDER BY b) FROM collate_test1)) ORDER BY 1; + unnest +-------- + ABD + Abc + abc + bbc +(4 rows) + +SELECT * FROM unnest((SELECT array_agg(b ORDER BY b) FROM collate_test2)) ORDER BY 1; + unnest +-------- + ABD + Abc + abc + bbc +(4 rows) + +CREATE FUNCTION dup (anyelement) RETURNS anyelement + AS 'select $1' LANGUAGE sql; +SELECT a, dup(b) FROM collate_test1 ORDER BY 2; + a | dup +---+----- + 4 | ABD + 2 | Abc + 1 | abc + 3 | bbc +(4 rows) + +SELECT a, dup(b) FROM collate_test2 ORDER BY 2; + a | dup +---+----- + 4 | ABD + 2 | Abc + 1 | abc + 3 | bbc +(4 rows) + +-- indexes +CREATE INDEX collate_test1_idx1 ON collate_test1 (b); +CREATE INDEX collate_test1_idx2 ON collate_test1 (b COLLATE "POSIX"); +CREATE INDEX collate_test1_idx3 ON collate_test1 ((b COLLATE "POSIX")); -- this is different grammatically +CREATE INDEX collate_test1_idx4 ON collate_test1 (((b||'foo') COLLATE "POSIX")); +CREATE INDEX collate_test1_idx5 ON collate_test1 (a COLLATE "POSIX"); -- fail +ERROR: collations are not supported by type integer +CREATE INDEX collate_test1_idx6 ON collate_test1 ((a COLLATE "POSIX")); -- fail +ERROR: collations are not supported by type integer +LINE 1: ...ATE INDEX collate_test1_idx6 ON collate_test1 ((a COLLATE "P... + ^ +SELECT relname, pg_get_indexdef(oid) FROM pg_class WHERE relname LIKE 'collate_test%_idx%' ORDER BY 1; + relname | pg_get_indexdef +--------------------+------------------------------------------------------------------------------------------------------------------- + collate_test1_idx1 | CREATE INDEX collate_test1_idx1 ON collate_tests.collate_test1 USING btree (b) + collate_test1_idx2 | CREATE INDEX collate_test1_idx2 ON collate_tests.collate_test1 USING btree (b COLLATE "POSIX") + collate_test1_idx3 | CREATE INDEX collate_test1_idx3 ON collate_tests.collate_test1 USING btree (b COLLATE "POSIX") + collate_test1_idx4 | CREATE INDEX collate_test1_idx4 ON collate_tests.collate_test1 USING btree (((b || 'foo'::text)) COLLATE "POSIX") +(4 rows) + +-- foreign keys +-- force indexes and mergejoins to be used for FK checking queries, +-- else they might not exercise collation-dependent operators +SET enable_seqscan TO 0; +SET enable_hashjoin TO 0; +SET enable_nestloop TO 0; +CREATE TABLE collate_test20 (f1 text COLLATE "C" PRIMARY KEY); +INSERT INTO collate_test20 VALUES ('foo'), ('bar'); +CREATE TABLE collate_test21 (f2 text COLLATE "POSIX" REFERENCES collate_test20); +INSERT INTO collate_test21 VALUES ('foo'), ('bar'); +INSERT INTO collate_test21 VALUES ('baz'); -- fail +ERROR: insert or update on table "collate_test21" violates foreign key constraint "collate_test21_f2_fkey" +DETAIL: Key (f2)=(baz) is not present in table "collate_test20". +CREATE TABLE collate_test22 (f2 text COLLATE "POSIX"); +INSERT INTO collate_test22 VALUES ('foo'), ('bar'), ('baz'); +ALTER TABLE collate_test22 ADD FOREIGN KEY (f2) REFERENCES collate_test20; -- fail +ERROR: insert or update on table "collate_test22" violates foreign key constraint "collate_test22_f2_fkey" +DETAIL: Key (f2)=(baz) is not present in table "collate_test20". +DELETE FROM collate_test22 WHERE f2 = 'baz'; +ALTER TABLE collate_test22 ADD FOREIGN KEY (f2) REFERENCES collate_test20; +RESET enable_seqscan; +RESET enable_hashjoin; +RESET enable_nestloop; +-- EXPLAIN +EXPLAIN (COSTS OFF) + SELECT * FROM collate_test10 ORDER BY x, y; + QUERY PLAN +---------------------------------------------- + Sort + Sort Key: x COLLATE "C", y COLLATE "POSIX" + -> Seq Scan on collate_test10 +(3 rows) + +EXPLAIN (COSTS OFF) + SELECT * FROM collate_test10 ORDER BY x DESC, y COLLATE "C" ASC NULLS FIRST; + QUERY PLAN +----------------------------------------------------------- + Sort + Sort Key: x COLLATE "C" DESC, y COLLATE "C" NULLS FIRST + -> Seq Scan on collate_test10 +(3 rows) + +-- CREATE/DROP COLLATION +CREATE COLLATION mycoll1 FROM "C"; +CREATE COLLATION mycoll2 ( LC_COLLATE = "POSIX", LC_CTYPE = "POSIX" ); +CREATE COLLATION mycoll3 FROM "default"; -- intentionally unsupported +ERROR: collation "default" cannot be copied +DROP COLLATION mycoll1; +CREATE TABLE collate_test23 (f1 text collate mycoll2); +DROP COLLATION mycoll2; -- fail +ERROR: cannot drop collation mycoll2 because other objects depend on it +DETAIL: column f1 of table collate_test23 depends on collation mycoll2 +HINT: Use DROP ... CASCADE to drop the dependent objects too. +-- invalid: non-lowercase quoted identifiers +CREATE COLLATION case_coll ("Lc_Collate" = "POSIX", "Lc_Ctype" = "POSIX"); +ERROR: collation attribute "Lc_Collate" not recognized +LINE 1: CREATE COLLATION case_coll ("Lc_Collate" = "POSIX", "Lc_Ctyp... + ^ +-- 9.1 bug with useless COLLATE in an expression subject to length coercion +CREATE TEMP TABLE vctable (f1 varchar(25)); +INSERT INTO vctable VALUES ('foo' COLLATE "C"); +SELECT collation for ('foo'); -- unknown type - null + pg_collation_for +------------------ + +(1 row) + +SELECT collation for ('foo'::text); + pg_collation_for +------------------ + "default" +(1 row) + +SELECT collation for ((SELECT a FROM collate_test1 LIMIT 1)); -- non-collatable type - error +ERROR: collations are not supported by type integer +SELECT collation for ((SELECT b FROM collate_test1 LIMIT 1)); + pg_collation_for +------------------ + "C" +(1 row) + +-- old bug with not dropping COLLATE when coercing to non-collatable type +CREATE VIEW collate_on_int AS +SELECT c1+1 AS c1p FROM + (SELECT ('4' COLLATE "C")::INT AS c1) ss; +\d+ collate_on_int + View "collate_tests.collate_on_int" + Column | Type | Collation | Nullable | Default | Storage | Description +--------+---------+-----------+----------+---------+---------+------------- + c1p | integer | | | | plain | +View definition: + SELECT c1 + 1 AS c1p + FROM ( SELECT 4 AS c1) ss; + +-- Check conflicting or redundant options in CREATE COLLATION +-- LC_COLLATE +CREATE COLLATION coll_dup_chk (LC_COLLATE = "POSIX", LC_COLLATE = "NONSENSE", LC_CTYPE = "POSIX"); +ERROR: conflicting or redundant options +LINE 1: ...ATE COLLATION coll_dup_chk (LC_COLLATE = "POSIX", LC_COLLATE... + ^ +-- LC_CTYPE +CREATE COLLATION coll_dup_chk (LC_CTYPE = "POSIX", LC_CTYPE = "NONSENSE", LC_COLLATE = "POSIX"); +ERROR: conflicting or redundant options +LINE 1: ...REATE COLLATION coll_dup_chk (LC_CTYPE = "POSIX", LC_CTYPE =... + ^ +-- PROVIDER +CREATE COLLATION coll_dup_chk (PROVIDER = icu, PROVIDER = NONSENSE, LC_COLLATE = "POSIX", LC_CTYPE = "POSIX"); +ERROR: conflicting or redundant options +LINE 1: CREATE COLLATION coll_dup_chk (PROVIDER = icu, PROVIDER = NO... + ^ +-- LOCALE +CREATE COLLATION case_sensitive (LOCALE = '', LOCALE = "NONSENSE"); +ERROR: conflicting or redundant options +LINE 1: CREATE COLLATION case_sensitive (LOCALE = '', LOCALE = "NONS... + ^ +-- DETERMINISTIC +CREATE COLLATION coll_dup_chk (DETERMINISTIC = TRUE, DETERMINISTIC = NONSENSE, LOCALE = ''); +ERROR: conflicting or redundant options +LINE 1: ...ATE COLLATION coll_dup_chk (DETERMINISTIC = TRUE, DETERMINIS... + ^ +-- VERSION +CREATE COLLATION coll_dup_chk (VERSION = '1', VERSION = "NONSENSE", LOCALE = ''); +ERROR: conflicting or redundant options +LINE 1: CREATE COLLATION coll_dup_chk (VERSION = '1', VERSION = "NON... + ^ +-- LOCALE conflicts with LC_COLLATE and LC_CTYPE +CREATE COLLATION coll_dup_chk (LC_COLLATE = "POSIX", LC_CTYPE = "POSIX", LOCALE = ''); +ERROR: conflicting or redundant options +DETAIL: LOCALE cannot be specified together with LC_COLLATE or LC_CTYPE. +-- LOCALE conflicts with LC_COLLATE +CREATE COLLATION coll_dup_chk (LC_COLLATE = "POSIX", LOCALE = ''); +ERROR: conflicting or redundant options +DETAIL: LOCALE cannot be specified together with LC_COLLATE or LC_CTYPE. +-- LOCALE conflicts with LC_CTYPE +CREATE COLLATION coll_dup_chk (LC_CTYPE = "POSIX", LOCALE = ''); +ERROR: conflicting or redundant options +DETAIL: LOCALE cannot be specified together with LC_COLLATE or LC_CTYPE. +-- FROM conflicts with any other option +CREATE COLLATION coll_dup_chk (FROM = "C", VERSION = "1"); +ERROR: conflicting or redundant options +DETAIL: FROM cannot be specified together with any other options. +-- +-- Clean up. Many of these table names will be re-used if the user is +-- trying to run any platform-specific collation tests later, so we +-- must get rid of them. +-- +DROP SCHEMA collate_tests CASCADE; +NOTICE: drop cascades to 19 other objects +DETAIL: drop cascades to table collate_test1 +drop cascades to table collate_test_like +drop cascades to table collate_test2 +drop cascades to type testdomain_p +drop cascades to table collate_test4 +drop cascades to table collate_test5 +drop cascades to table collate_test10 +drop cascades to view collview1 +drop cascades to view collview2 +drop cascades to view collview3 +drop cascades to type testdomain +drop cascades to function vc(text) +drop cascades to function dup(anyelement) +drop cascades to table collate_test20 +drop cascades to table collate_test21 +drop cascades to table collate_test22 +drop cascades to collation mycoll2 +drop cascades to table collate_test23 +drop cascades to view collate_on_int diff --git a/src/test/regress/expected/collate.windows.win1252.out b/src/test/regress/expected/collate.windows.win1252.out new file mode 100644 index 0000000..b7b9395 --- /dev/null +++ b/src/test/regress/expected/collate.windows.win1252.out @@ -0,0 +1,1000 @@ +/* + * This test is meant to run on Windows systems that has successfully + * run pg_import_system_collations(). Also, the database must have + * WIN1252 encoding, because of the locales' own encodings. Because + * of this, some test are lost from UTF-8 version, such as Turkish + * dotted and undotted 'i'. + */ +SELECT getdatabaseencoding() <> 'WIN1252' OR + (SELECT count(*) FROM pg_collation WHERE collname IN ('de_DE', 'en_US', 'sv_SE') AND collencoding = pg_char_to_encoding('WIN1252')) <> 3 OR + (version() !~ 'Visual C\+\+' AND version() !~ 'mingw32' AND version() !~ 'windows') + AS skip_test \gset +\if :skip_test +\quit +\endif +SET client_encoding TO WIN1252; +CREATE SCHEMA collate_tests; +SET search_path = collate_tests; +CREATE TABLE collate_test1 ( + a int, + b text COLLATE "en_US" NOT NULL +); +\d collate_test1 + Table "collate_tests.collate_test1" + Column | Type | Collation | Nullable | Default +--------+---------+-----------+----------+--------- + a | integer | | | + b | text | en_US | not null | + +CREATE TABLE collate_test_fail ( + a int, + b text COLLATE "ja_JP.eucjp" +); +ERROR: collation "ja_JP.eucjp" for encoding "WIN1252" does not exist +LINE 3: b text COLLATE "ja_JP.eucjp" + ^ +CREATE TABLE collate_test_fail ( + a int, + b text COLLATE "foo" +); +ERROR: collation "foo" for encoding "WIN1252" does not exist +LINE 3: b text COLLATE "foo" + ^ +CREATE TABLE collate_test_fail ( + a int COLLATE "en_US", + b text +); +ERROR: collations are not supported by type integer +LINE 2: a int COLLATE "en_US", + ^ +CREATE TABLE collate_test_like ( + LIKE collate_test1 +); +\d collate_test_like + Table "collate_tests.collate_test_like" + Column | Type | Collation | Nullable | Default +--------+---------+-----------+----------+--------- + a | integer | | | + b | text | en_US | not null | + +CREATE TABLE collate_test2 ( + a int, + b text COLLATE "sv_SE" +); +CREATE TABLE collate_test3 ( + a int, + b text COLLATE "C" +); +INSERT INTO collate_test1 VALUES (1, 'abc'), (2, 'äbc'), (3, 'bbc'), (4, 'ABC'); +INSERT INTO collate_test2 SELECT * FROM collate_test1; +INSERT INTO collate_test3 SELECT * FROM collate_test1; +SELECT * FROM collate_test1 WHERE b >= 'bbc'; + a | b +---+----- + 3 | bbc +(1 row) + +SELECT * FROM collate_test2 WHERE b >= 'bbc'; + a | b +---+----- + 2 | äbc + 3 | bbc +(2 rows) + +SELECT * FROM collate_test3 WHERE b >= 'bbc'; + a | b +---+----- + 2 | äbc + 3 | bbc +(2 rows) + +SELECT * FROM collate_test3 WHERE b >= 'BBC'; + a | b +---+----- + 1 | abc + 2 | äbc + 3 | bbc +(3 rows) + +SELECT * FROM collate_test1 WHERE b COLLATE "C" >= 'bbc'; + a | b +---+----- + 2 | äbc + 3 | bbc +(2 rows) + +SELECT * FROM collate_test1 WHERE b >= 'bbc' COLLATE "C"; + a | b +---+----- + 2 | äbc + 3 | bbc +(2 rows) + +SELECT * FROM collate_test1 WHERE b COLLATE "C" >= 'bbc' COLLATE "C"; + a | b +---+----- + 2 | äbc + 3 | bbc +(2 rows) + +SELECT * FROM collate_test1 WHERE b COLLATE "C" >= 'bbc' COLLATE "en_US"; +ERROR: collation mismatch between explicit collations "C" and "en_US" +LINE 1: ...* FROM collate_test1 WHERE b COLLATE "C" >= 'bbc' COLLATE "e... + ^ +CREATE DOMAIN testdomain_sv AS text COLLATE "sv_SE"; +CREATE DOMAIN testdomain_i AS int COLLATE "sv_SE"; -- fails +ERROR: collations are not supported by type integer +CREATE TABLE collate_test4 ( + a int, + b testdomain_sv +); +INSERT INTO collate_test4 SELECT * FROM collate_test1; +SELECT a, b FROM collate_test4 ORDER BY b; + a | b +---+----- + 1 | abc + 4 | ABC + 3 | bbc + 2 | äbc +(4 rows) + +CREATE TABLE collate_test5 ( + a int, + b testdomain_sv COLLATE "en_US" +); +INSERT INTO collate_test5 SELECT * FROM collate_test1; +SELECT a, b FROM collate_test5 ORDER BY b; + a | b +---+----- + 1 | abc + 4 | ABC + 2 | äbc + 3 | bbc +(4 rows) + +SELECT a, b FROM collate_test1 ORDER BY b; + a | b +---+----- + 1 | abc + 4 | ABC + 2 | äbc + 3 | bbc +(4 rows) + +SELECT a, b FROM collate_test2 ORDER BY b; + a | b +---+----- + 1 | abc + 4 | ABC + 3 | bbc + 2 | äbc +(4 rows) + +SELECT a, b FROM collate_test3 ORDER BY b; + a | b +---+----- + 4 | ABC + 1 | abc + 3 | bbc + 2 | äbc +(4 rows) + +SELECT a, b FROM collate_test1 ORDER BY b COLLATE "C"; + a | b +---+----- + 4 | ABC + 1 | abc + 3 | bbc + 2 | äbc +(4 rows) + +-- star expansion +SELECT * FROM collate_test1 ORDER BY b; + a | b +---+----- + 1 | abc + 4 | ABC + 2 | äbc + 3 | bbc +(4 rows) + +SELECT * FROM collate_test2 ORDER BY b; + a | b +---+----- + 1 | abc + 4 | ABC + 3 | bbc + 2 | äbc +(4 rows) + +SELECT * FROM collate_test3 ORDER BY b; + a | b +---+----- + 4 | ABC + 1 | abc + 3 | bbc + 2 | äbc +(4 rows) + +-- constant expression folding +SELECT 'bbc' COLLATE "en_US" > 'äbc' COLLATE "en_US" AS "true"; + true +------ + t +(1 row) + +SELECT 'bbc' COLLATE "sv_SE" > 'äbc' COLLATE "sv_SE" AS "false"; + false +------- + f +(1 row) + +-- LIKE/ILIKE +SELECT * FROM collate_test1 WHERE b LIKE 'abc'; + a | b +---+----- + 1 | abc +(1 row) + +SELECT * FROM collate_test1 WHERE b LIKE 'abc%'; + a | b +---+----- + 1 | abc +(1 row) + +SELECT * FROM collate_test1 WHERE b LIKE '%bc%'; + a | b +---+----- + 1 | abc + 2 | äbc + 3 | bbc +(3 rows) + +SELECT * FROM collate_test1 WHERE b ILIKE 'abc'; + a | b +---+----- + 1 | abc + 4 | ABC +(2 rows) + +SELECT * FROM collate_test1 WHERE b ILIKE 'abc%'; + a | b +---+----- + 1 | abc + 4 | ABC +(2 rows) + +SELECT * FROM collate_test1 WHERE b ILIKE '%bc%'; + a | b +---+----- + 1 | abc + 2 | äbc + 3 | bbc + 4 | ABC +(4 rows) + +-- The following actually exercises the selectivity estimation for ILIKE. +SELECT relname FROM pg_class WHERE relname ILIKE 'abc%'; + relname +--------- +(0 rows) + +-- regular expressions +SELECT * FROM collate_test1 WHERE b ~ '^abc$'; + a | b +---+----- + 1 | abc +(1 row) + +SELECT * FROM collate_test1 WHERE b ~ '^abc'; + a | b +---+----- + 1 | abc +(1 row) + +SELECT * FROM collate_test1 WHERE b ~ 'bc'; + a | b +---+----- + 1 | abc + 2 | äbc + 3 | bbc +(3 rows) + +SELECT * FROM collate_test1 WHERE b ~* '^abc$'; + a | b +---+----- + 1 | abc + 4 | ABC +(2 rows) + +SELECT * FROM collate_test1 WHERE b ~* '^abc'; + a | b +---+----- + 1 | abc + 4 | ABC +(2 rows) + +SELECT * FROM collate_test1 WHERE b ~* 'bc'; + a | b +---+----- + 1 | abc + 2 | äbc + 3 | bbc + 4 | ABC +(4 rows) + +CREATE TABLE collate_test6 ( + a int, + b text COLLATE "en_US" +); +INSERT INTO collate_test6 VALUES (1, 'abc'), (2, 'ABC'), (3, '123'), (4, 'ab1'), + (5, 'a1!'), (6, 'a c'), (7, '!.;'), (8, ' '), + (9, 'äbç'), (10, 'ÄBÇ'); +SELECT b, + b ~ '^[[:alpha:]]+$' AS is_alpha, + b ~ '^[[:upper:]]+$' AS is_upper, + b ~ '^[[:lower:]]+$' AS is_lower, + b ~ '^[[:digit:]]+$' AS is_digit, + b ~ '^[[:alnum:]]+$' AS is_alnum, + b ~ '^[[:graph:]]+$' AS is_graph, + b ~ '^[[:print:]]+$' AS is_print, + b ~ '^[[:punct:]]+$' AS is_punct, + b ~ '^[[:space:]]+$' AS is_space +FROM collate_test6; + b | is_alpha | is_upper | is_lower | is_digit | is_alnum | is_graph | is_print | is_punct | is_space +-----+----------+----------+----------+----------+----------+----------+----------+----------+---------- + abc | t | f | t | f | t | t | t | f | f + ABC | t | t | f | f | t | t | t | f | f + 123 | f | f | f | t | t | t | t | f | f + ab1 | f | f | f | f | t | t | t | f | f + a1! | f | f | f | f | f | t | t | f | f + a c | f | f | f | f | f | f | t | f | f + !.; | f | f | f | f | f | t | t | t | f + | f | f | f | f | f | f | t | f | t + äbç | t | f | t | f | t | t | t | f | f + ÄBÇ | t | t | f | f | t | t | t | f | f +(10 rows) + +-- The following actually exercises the selectivity estimation for ~*. +SELECT relname FROM pg_class WHERE relname ~* '^abc'; + relname +--------- +(0 rows) + +-- backwards parsing +CREATE VIEW collview1 AS SELECT * FROM collate_test1 WHERE b COLLATE "C" >= 'bbc'; +CREATE VIEW collview2 AS SELECT a, b FROM collate_test1 ORDER BY b COLLATE "C"; +SELECT table_name, view_definition FROM information_schema.views + WHERE table_name LIKE 'collview%' ORDER BY 1; + table_name | view_definition +------------+------------------------------------------- + collview1 | SELECT a, + + | b + + | FROM collate_test1 + + | WHERE ((b COLLATE "C") >= 'bbc'::text); + collview2 | SELECT a, + + | b + + | FROM collate_test1 + + | ORDER BY (b COLLATE "C"); +(2 rows) + +-- collation propagation in various expression types +SELECT a, coalesce(b, 'foo') FROM collate_test1 ORDER BY 2; + a | coalesce +---+---------- + 1 | abc + 4 | ABC + 2 | äbc + 3 | bbc +(4 rows) + +SELECT a, coalesce(b, 'foo') FROM collate_test2 ORDER BY 2; + a | coalesce +---+---------- + 1 | abc + 4 | ABC + 3 | bbc + 2 | äbc +(4 rows) + +SELECT a, coalesce(b, 'foo') FROM collate_test3 ORDER BY 2; + a | coalesce +---+---------- + 4 | ABC + 1 | abc + 3 | bbc + 2 | äbc +(4 rows) + +SELECT a, b, greatest(b, 'CCC') FROM collate_test1 ORDER BY 3; + a | b | greatest +---+-----+---------- + 1 | abc | CCC + 2 | äbc | CCC + 3 | bbc | CCC + 4 | ABC | CCC +(4 rows) + +SELECT a, b, greatest(b, 'CCC') FROM collate_test2 ORDER BY 3; + a | b | greatest +---+-----+---------- + 1 | abc | CCC + 3 | bbc | CCC + 4 | ABC | CCC + 2 | äbc | äbc +(4 rows) + +SELECT a, b, greatest(b, 'CCC') FROM collate_test3 ORDER BY 3; + a | b | greatest +---+-----+---------- + 4 | ABC | CCC + 1 | abc | abc + 3 | bbc | bbc + 2 | äbc | äbc +(4 rows) + +SELECT a, nullif(b, 'abc') FROM collate_test1 ORDER BY 2; + a | nullif +---+-------- + 4 | ABC + 2 | äbc + 3 | bbc + 1 | +(4 rows) + +SELECT a, nullif(b, 'abc') FROM collate_test2 ORDER BY 2; + a | nullif +---+-------- + 4 | ABC + 3 | bbc + 2 | äbc + 1 | +(4 rows) + +SELECT a, nullif(b, 'abc') FROM collate_test3 ORDER BY 2; + a | nullif +---+-------- + 4 | ABC + 3 | bbc + 2 | äbc + 1 | +(4 rows) + +SELECT a, CASE b WHEN 'abc' THEN 'abcd' ELSE b END FROM collate_test1 ORDER BY 2; + a | b +---+------ + 4 | ABC + 2 | äbc + 1 | abcd + 3 | bbc +(4 rows) + +SELECT a, CASE b WHEN 'abc' THEN 'abcd' ELSE b END FROM collate_test2 ORDER BY 2; + a | b +---+------ + 4 | ABC + 1 | abcd + 3 | bbc + 2 | äbc +(4 rows) + +SELECT a, CASE b WHEN 'abc' THEN 'abcd' ELSE b END FROM collate_test3 ORDER BY 2; + a | b +---+------ + 4 | ABC + 1 | abcd + 3 | bbc + 2 | äbc +(4 rows) + +CREATE DOMAIN testdomain AS text; +SELECT a, b::testdomain FROM collate_test1 ORDER BY 2; + a | b +---+----- + 1 | abc + 4 | ABC + 2 | äbc + 3 | bbc +(4 rows) + +SELECT a, b::testdomain FROM collate_test2 ORDER BY 2; + a | b +---+----- + 1 | abc + 4 | ABC + 3 | bbc + 2 | äbc +(4 rows) + +SELECT a, b::testdomain FROM collate_test3 ORDER BY 2; + a | b +---+----- + 4 | ABC + 1 | abc + 3 | bbc + 2 | äbc +(4 rows) + +SELECT a, b::testdomain_sv FROM collate_test3 ORDER BY 2; + a | b +---+----- + 1 | abc + 4 | ABC + 3 | bbc + 2 | äbc +(4 rows) + +SELECT min(b), max(b) FROM collate_test1; + min | max +-----+----- + abc | bbc +(1 row) + +SELECT min(b), max(b) FROM collate_test2; + min | max +-----+----- + abc | äbc +(1 row) + +SELECT min(b), max(b) FROM collate_test3; + min | max +-----+----- + ABC | äbc +(1 row) + +SELECT array_agg(b ORDER BY b) FROM collate_test1; + array_agg +------------------- + {abc,ABC,äbc,bbc} +(1 row) + +SELECT array_agg(b ORDER BY b) FROM collate_test2; + array_agg +------------------- + {abc,ABC,bbc,äbc} +(1 row) + +SELECT array_agg(b ORDER BY b) FROM collate_test3; + array_agg +------------------- + {ABC,abc,bbc,äbc} +(1 row) + +SELECT a, b FROM collate_test1 UNION ALL SELECT a, b FROM collate_test1 ORDER BY 2; + a | b +---+----- + 1 | abc + 1 | abc + 4 | ABC + 4 | ABC + 2 | äbc + 2 | äbc + 3 | bbc + 3 | bbc +(8 rows) + +SELECT a, b FROM collate_test2 UNION SELECT a, b FROM collate_test2 ORDER BY 2; + a | b +---+----- + 1 | abc + 4 | ABC + 3 | bbc + 2 | äbc +(4 rows) + +SELECT a, b FROM collate_test3 WHERE a < 4 INTERSECT SELECT a, b FROM collate_test3 WHERE a > 1 ORDER BY 2; + a | b +---+----- + 3 | bbc + 2 | äbc +(2 rows) + +SELECT a, b FROM collate_test3 EXCEPT SELECT a, b FROM collate_test3 WHERE a < 2 ORDER BY 2; + a | b +---+----- + 4 | ABC + 3 | bbc + 2 | äbc +(3 rows) + +SELECT a, b FROM collate_test1 UNION ALL SELECT a, b FROM collate_test3 ORDER BY 2; -- fail +ERROR: could not determine which collation to use for string comparison +HINT: Use the COLLATE clause to set the collation explicitly. +SELECT a, b FROM collate_test1 UNION ALL SELECT a, b FROM collate_test3; -- ok + a | b +---+----- + 1 | abc + 2 | äbc + 3 | bbc + 4 | ABC + 1 | abc + 2 | äbc + 3 | bbc + 4 | ABC +(8 rows) + +SELECT a, b FROM collate_test1 UNION SELECT a, b FROM collate_test3 ORDER BY 2; -- fail +ERROR: collation mismatch between implicit collations "en_US" and "C" +LINE 1: SELECT a, b FROM collate_test1 UNION SELECT a, b FROM collat... + ^ +HINT: You can choose the collation by applying the COLLATE clause to one or both expressions. +SELECT a, b COLLATE "C" FROM collate_test1 UNION SELECT a, b FROM collate_test3 ORDER BY 2; -- ok + a | b +---+----- + 4 | ABC + 1 | abc + 3 | bbc + 2 | äbc +(4 rows) + +SELECT a, b FROM collate_test1 INTERSECT SELECT a, b FROM collate_test3 ORDER BY 2; -- fail +ERROR: collation mismatch between implicit collations "en_US" and "C" +LINE 1: ...ELECT a, b FROM collate_test1 INTERSECT SELECT a, b FROM col... + ^ +HINT: You can choose the collation by applying the COLLATE clause to one or both expressions. +SELECT a, b FROM collate_test1 EXCEPT SELECT a, b FROM collate_test3 ORDER BY 2; -- fail +ERROR: collation mismatch between implicit collations "en_US" and "C" +LINE 1: SELECT a, b FROM collate_test1 EXCEPT SELECT a, b FROM colla... + ^ +HINT: You can choose the collation by applying the COLLATE clause to one or both expressions. +CREATE TABLE test_u AS SELECT a, b FROM collate_test1 UNION ALL SELECT a, b FROM collate_test3; -- fail +ERROR: no collation was derived for column "b" with collatable type text +HINT: Use the COLLATE clause to set the collation explicitly. +-- collation mismatch between recursive and non-recursive term +WITH RECURSIVE foo(x) AS + (SELECT x FROM (VALUES('a' COLLATE "en_US"),('b')) t(x) + UNION ALL + SELECT (x || 'c') COLLATE "de_DE" FROM foo WHERE length(x) < 10) +SELECT * FROM foo; +ERROR: recursive query "foo" column 1 has collation "en_US" in non-recursive term but collation "de_DE" overall +LINE 2: (SELECT x FROM (VALUES('a' COLLATE "en_US"),('b')) t(x) + ^ +HINT: Use the COLLATE clause to set the collation of the non-recursive term. +-- casting +SELECT CAST('42' AS text COLLATE "C"); +ERROR: syntax error at or near "COLLATE" +LINE 1: SELECT CAST('42' AS text COLLATE "C"); + ^ +SELECT a, CAST(b AS varchar) FROM collate_test1 ORDER BY 2; + a | b +---+----- + 1 | abc + 4 | ABC + 2 | äbc + 3 | bbc +(4 rows) + +SELECT a, CAST(b AS varchar) FROM collate_test2 ORDER BY 2; + a | b +---+----- + 1 | abc + 4 | ABC + 3 | bbc + 2 | äbc +(4 rows) + +SELECT a, CAST(b AS varchar) FROM collate_test3 ORDER BY 2; + a | b +---+----- + 4 | ABC + 1 | abc + 3 | bbc + 2 | äbc +(4 rows) + +-- propagation of collation in SQL functions (inlined and non-inlined cases) +-- and plpgsql functions too +CREATE FUNCTION mylt (text, text) RETURNS boolean LANGUAGE sql + AS $$ select $1 < $2 $$; +CREATE FUNCTION mylt_noninline (text, text) RETURNS boolean LANGUAGE sql + AS $$ select $1 < $2 limit 1 $$; +CREATE FUNCTION mylt_plpgsql (text, text) RETURNS boolean LANGUAGE plpgsql + AS $$ begin return $1 < $2; end $$; +SELECT a.b AS a, b.b AS b, a.b < b.b AS lt, + mylt(a.b, b.b), mylt_noninline(a.b, b.b), mylt_plpgsql(a.b, b.b) +FROM collate_test1 a, collate_test1 b +ORDER BY a.b, b.b; + a | b | lt | mylt | mylt_noninline | mylt_plpgsql +-----+-----+----+------+----------------+-------------- + abc | abc | f | f | f | f + abc | ABC | t | t | t | t + abc | äbc | t | t | t | t + abc | bbc | t | t | t | t + ABC | abc | f | f | f | f + ABC | ABC | f | f | f | f + ABC | äbc | t | t | t | t + ABC | bbc | t | t | t | t + äbc | abc | f | f | f | f + äbc | ABC | f | f | f | f + äbc | äbc | f | f | f | f + äbc | bbc | t | t | t | t + bbc | abc | f | f | f | f + bbc | ABC | f | f | f | f + bbc | äbc | f | f | f | f + bbc | bbc | f | f | f | f +(16 rows) + +SELECT a.b AS a, b.b AS b, a.b < b.b COLLATE "C" AS lt, + mylt(a.b, b.b COLLATE "C"), mylt_noninline(a.b, b.b COLLATE "C"), + mylt_plpgsql(a.b, b.b COLLATE "C") +FROM collate_test1 a, collate_test1 b +ORDER BY a.b, b.b; + a | b | lt | mylt | mylt_noninline | mylt_plpgsql +-----+-----+----+------+----------------+-------------- + abc | abc | f | f | f | f + abc | ABC | f | f | f | f + abc | äbc | t | t | t | t + abc | bbc | t | t | t | t + ABC | abc | t | t | t | t + ABC | ABC | f | f | f | f + ABC | äbc | t | t | t | t + ABC | bbc | t | t | t | t + äbc | abc | f | f | f | f + äbc | ABC | f | f | f | f + äbc | äbc | f | f | f | f + äbc | bbc | f | f | f | f + bbc | abc | f | f | f | f + bbc | ABC | f | f | f | f + bbc | äbc | t | t | t | t + bbc | bbc | f | f | f | f +(16 rows) + +-- collation override in plpgsql +CREATE FUNCTION mylt2 (x text, y text) RETURNS boolean LANGUAGE plpgsql AS $$ +declare + xx text := x; + yy text := y; +begin + return xx < yy; +end +$$; +SELECT mylt2('a', 'B' collate "en_US") as t, mylt2('a', 'B' collate "C") as f; + t | f +---+--- + t | f +(1 row) + +CREATE OR REPLACE FUNCTION + mylt2 (x text, y text) RETURNS boolean LANGUAGE plpgsql AS $$ +declare + xx text COLLATE "POSIX" := x; + yy text := y; +begin + return xx < yy; +end +$$; +SELECT mylt2('a', 'B') as f; + f +--- + f +(1 row) + +SELECT mylt2('a', 'B' collate "C") as fail; -- conflicting collations +ERROR: could not determine which collation to use for string comparison +HINT: Use the COLLATE clause to set the collation explicitly. +CONTEXT: PL/pgSQL function mylt2(text,text) line 6 at RETURN +SELECT mylt2('a', 'B' collate "POSIX") as f; + f +--- + f +(1 row) + +-- polymorphism +SELECT * FROM unnest((SELECT array_agg(b ORDER BY b) FROM collate_test1)) ORDER BY 1; + unnest +-------- + abc + ABC + äbc + bbc +(4 rows) + +SELECT * FROM unnest((SELECT array_agg(b ORDER BY b) FROM collate_test2)) ORDER BY 1; + unnest +-------- + abc + ABC + bbc + äbc +(4 rows) + +SELECT * FROM unnest((SELECT array_agg(b ORDER BY b) FROM collate_test3)) ORDER BY 1; + unnest +-------- + ABC + abc + bbc + äbc +(4 rows) + +CREATE FUNCTION dup (anyelement) RETURNS anyelement + AS 'select $1' LANGUAGE sql; +SELECT a, dup(b) FROM collate_test1 ORDER BY 2; + a | dup +---+----- + 1 | abc + 4 | ABC + 2 | äbc + 3 | bbc +(4 rows) + +SELECT a, dup(b) FROM collate_test2 ORDER BY 2; + a | dup +---+----- + 1 | abc + 4 | ABC + 3 | bbc + 2 | äbc +(4 rows) + +SELECT a, dup(b) FROM collate_test3 ORDER BY 2; + a | dup +---+----- + 4 | ABC + 1 | abc + 3 | bbc + 2 | äbc +(4 rows) + +-- indexes +CREATE INDEX collate_test1_idx1 ON collate_test1 (b); +CREATE INDEX collate_test1_idx2 ON collate_test1 (b COLLATE "C"); +CREATE INDEX collate_test1_idx3 ON collate_test1 ((b COLLATE "C")); -- this is different grammatically +CREATE INDEX collate_test1_idx4 ON collate_test1 (((b||'foo') COLLATE "POSIX")); +CREATE INDEX collate_test1_idx5 ON collate_test1 (a COLLATE "C"); -- fail +ERROR: collations are not supported by type integer +CREATE INDEX collate_test1_idx6 ON collate_test1 ((a COLLATE "C")); -- fail +ERROR: collations are not supported by type integer +LINE 1: ...ATE INDEX collate_test1_idx6 ON collate_test1 ((a COLLATE "C... + ^ +SELECT relname, pg_get_indexdef(oid) FROM pg_class WHERE relname LIKE 'collate_test%_idx%' ORDER BY 1; + relname | pg_get_indexdef +--------------------+------------------------------------------------------------------------------------------------------------------- + collate_test1_idx1 | CREATE INDEX collate_test1_idx1 ON collate_tests.collate_test1 USING btree (b) + collate_test1_idx2 | CREATE INDEX collate_test1_idx2 ON collate_tests.collate_test1 USING btree (b COLLATE "C") + collate_test1_idx3 | CREATE INDEX collate_test1_idx3 ON collate_tests.collate_test1 USING btree (b COLLATE "C") + collate_test1_idx4 | CREATE INDEX collate_test1_idx4 ON collate_tests.collate_test1 USING btree (((b || 'foo'::text)) COLLATE "POSIX") +(4 rows) + +-- schema manipulation commands +CREATE ROLE regress_test_role; +CREATE SCHEMA test_schema; +-- We need to do this this way to cope with varying names for encodings: +do $$ +BEGIN + EXECUTE 'CREATE COLLATION test0 (locale = ' || + quote_literal((SELECT datcollate FROM pg_database WHERE datname = current_database())) || ');'; +END +$$; +CREATE COLLATION test0 FROM "C"; -- fail, duplicate name +ERROR: collation "test0" already exists +CREATE COLLATION IF NOT EXISTS test0 FROM "C"; -- ok, skipped +NOTICE: collation "test0" already exists, skipping +CREATE COLLATION IF NOT EXISTS test0 (locale = 'foo'); -- ok, skipped +NOTICE: collation "test0" for encoding "WIN1252" already exists, skipping +do $$ +BEGIN + EXECUTE 'CREATE COLLATION test1 (lc_collate = ' || + quote_literal((SELECT datcollate FROM pg_database WHERE datname = current_database())) || + ', lc_ctype = ' || + quote_literal((SELECT datctype FROM pg_database WHERE datname = current_database())) || ');'; +END +$$; +CREATE COLLATION test3 (lc_collate = 'en_US.utf8'); -- fail, need lc_ctype +ERROR: parameter "lc_ctype" must be specified +CREATE COLLATION testx (locale = 'nonsense'); -- fail +ERROR: could not create locale "nonsense": No such file or directory +DETAIL: The operating system could not find any locale data for the locale name "nonsense". +CREATE COLLATION test4 FROM nonsense; +ERROR: collation "nonsense" for encoding "WIN1252" does not exist +CREATE COLLATION test5 FROM test0; +SELECT collname FROM pg_collation WHERE collname LIKE 'test%' ORDER BY 1; + collname +---------- + test0 + test1 + test5 +(3 rows) + +ALTER COLLATION test1 RENAME TO test11; +ALTER COLLATION test0 RENAME TO test11; -- fail +ERROR: collation "test11" for encoding "WIN1252" already exists in schema "collate_tests" +ALTER COLLATION test1 RENAME TO test22; -- fail +ERROR: collation "test1" for encoding "WIN1252" does not exist +ALTER COLLATION test11 OWNER TO regress_test_role; +ALTER COLLATION test11 OWNER TO nonsense; +ERROR: role "nonsense" does not exist +ALTER COLLATION test11 SET SCHEMA test_schema; +COMMENT ON COLLATION test0 IS 'US English'; +SELECT collname, nspname, obj_description(pg_collation.oid, 'pg_collation') + FROM pg_collation JOIN pg_namespace ON (collnamespace = pg_namespace.oid) + WHERE collname LIKE 'test%' + ORDER BY 1; + collname | nspname | obj_description +----------+---------------+----------------- + test0 | collate_tests | US English + test11 | test_schema | + test5 | collate_tests | +(3 rows) + +DROP COLLATION test0, test_schema.test11, test5; +DROP COLLATION test0; -- fail +ERROR: collation "test0" for encoding "WIN1252" does not exist +DROP COLLATION IF EXISTS test0; +NOTICE: collation "test0" does not exist, skipping +SELECT collname FROM pg_collation WHERE collname LIKE 'test%'; + collname +---------- +(0 rows) + +DROP SCHEMA test_schema; +DROP ROLE regress_test_role; +-- ALTER +ALTER COLLATION "en_US" REFRESH VERSION; +NOTICE: version has not changed +-- also test for database while we are here +SELECT current_database() AS datname \gset +ALTER DATABASE :"datname" REFRESH COLLATION VERSION; +NOTICE: version has not changed +-- dependencies +CREATE COLLATION test0 FROM "C"; +CREATE TABLE collate_dep_test1 (a int, b text COLLATE test0); +CREATE DOMAIN collate_dep_dom1 AS text COLLATE test0; +CREATE TYPE collate_dep_test2 AS (x int, y text COLLATE test0); +CREATE VIEW collate_dep_test3 AS SELECT text 'foo' COLLATE test0 AS foo; +CREATE TABLE collate_dep_test4t (a int, b text); +CREATE INDEX collate_dep_test4i ON collate_dep_test4t (b COLLATE test0); +DROP COLLATION test0 RESTRICT; -- fail +ERROR: cannot drop collation test0 because other objects depend on it +DETAIL: column b of table collate_dep_test1 depends on collation test0 +type collate_dep_dom1 depends on collation test0 +column y of composite type collate_dep_test2 depends on collation test0 +view collate_dep_test3 depends on collation test0 +index collate_dep_test4i depends on collation test0 +HINT: Use DROP ... CASCADE to drop the dependent objects too. +DROP COLLATION test0 CASCADE; +NOTICE: drop cascades to 5 other objects +DETAIL: drop cascades to column b of table collate_dep_test1 +drop cascades to type collate_dep_dom1 +drop cascades to column y of composite type collate_dep_test2 +drop cascades to view collate_dep_test3 +drop cascades to index collate_dep_test4i +\d collate_dep_test1 + Table "collate_tests.collate_dep_test1" + Column | Type | Collation | Nullable | Default +--------+---------+-----------+----------+--------- + a | integer | | | + +\d collate_dep_test2 + Composite type "collate_tests.collate_dep_test2" + Column | Type | Collation | Nullable | Default +--------+---------+-----------+----------+--------- + x | integer | | | + +DROP TABLE collate_dep_test1, collate_dep_test4t; +DROP TYPE collate_dep_test2; +-- test range types and collations +create type textrange_c as range(subtype=text, collation="C"); +create type textrange_en_us as range(subtype=text, collation="en_US"); +select textrange_c('A','Z') @> 'b'::text; + ?column? +---------- + f +(1 row) + +select textrange_en_us('A','Z') @> 'b'::text; + ?column? +---------- + t +(1 row) + +drop type textrange_c; +drop type textrange_en_us; +-- nondeterministic collations +-- (not supported with libc provider) +CREATE COLLATION ctest_det (locale = 'en_US', deterministic = true); +CREATE COLLATION ctest_nondet (locale = 'en_US', deterministic = false); +ERROR: nondeterministic collations not supported with this provider +-- cleanup +SET client_min_messages TO warning; +DROP SCHEMA collate_tests CASCADE; diff --git a/src/test/regress/expected/collate.windows.win1252_1.out b/src/test/regress/expected/collate.windows.win1252_1.out new file mode 100644 index 0000000..879b12a --- /dev/null +++ b/src/test/regress/expected/collate.windows.win1252_1.out @@ -0,0 +1,13 @@ +/* + * This test is meant to run on Windows systems that has successfully + * run pg_import_system_collations(). Also, the database must have + * WIN1252 encoding, because of the locales' own encodings. Because + * of this, some test are lost from UTF-8 version, such as Turkish + * dotted and undotted 'i'. + */ +SELECT getdatabaseencoding() <> 'WIN1252' OR + (SELECT count(*) FROM pg_collation WHERE collname IN ('de_DE', 'en_US', 'sv_SE') AND collencoding = pg_char_to_encoding('WIN1252')) <> 3 OR + (version() !~ 'Visual C\+\+' AND version() !~ 'mingw32' AND version() !~ 'windows') + AS skip_test \gset +\if :skip_test +\quit diff --git a/src/test/regress/expected/combocid.out b/src/test/regress/expected/combocid.out new file mode 100644 index 0000000..2bf080b --- /dev/null +++ b/src/test/regress/expected/combocid.out @@ -0,0 +1,169 @@ +-- +-- Tests for some likely failure cases with combo cmin/cmax mechanism +-- +CREATE TEMP TABLE combocidtest (foobar int); +BEGIN; +-- a few dummy ops to push up the CommandId counter +INSERT INTO combocidtest SELECT 1 LIMIT 0; +INSERT INTO combocidtest SELECT 1 LIMIT 0; +INSERT INTO combocidtest SELECT 1 LIMIT 0; +INSERT INTO combocidtest SELECT 1 LIMIT 0; +INSERT INTO combocidtest SELECT 1 LIMIT 0; +INSERT INTO combocidtest SELECT 1 LIMIT 0; +INSERT INTO combocidtest SELECT 1 LIMIT 0; +INSERT INTO combocidtest SELECT 1 LIMIT 0; +INSERT INTO combocidtest SELECT 1 LIMIT 0; +INSERT INTO combocidtest SELECT 1 LIMIT 0; +INSERT INTO combocidtest VALUES (1); +INSERT INTO combocidtest VALUES (2); +SELECT ctid,cmin,* FROM combocidtest; + ctid | cmin | foobar +-------+------+-------- + (0,1) | 10 | 1 + (0,2) | 11 | 2 +(2 rows) + +SAVEPOINT s1; +UPDATE combocidtest SET foobar = foobar + 10; +-- here we should see only updated tuples +SELECT ctid,cmin,* FROM combocidtest; + ctid | cmin | foobar +-------+------+-------- + (0,3) | 12 | 11 + (0,4) | 12 | 12 +(2 rows) + +ROLLBACK TO s1; +-- now we should see old tuples, but with combo CIDs starting at 0 +SELECT ctid,cmin,* FROM combocidtest; + ctid | cmin | foobar +-------+------+-------- + (0,1) | 0 | 1 + (0,2) | 1 | 2 +(2 rows) + +COMMIT; +-- combo data is not there anymore, but should still see tuples +SELECT ctid,cmin,* FROM combocidtest; + ctid | cmin | foobar +-------+------+-------- + (0,1) | 0 | 1 + (0,2) | 1 | 2 +(2 rows) + +-- Test combo CIDs with portals +BEGIN; +INSERT INTO combocidtest VALUES (333); +DECLARE c CURSOR FOR SELECT ctid,cmin,* FROM combocidtest; +DELETE FROM combocidtest; +FETCH ALL FROM c; + ctid | cmin | foobar +-------+------+-------- + (0,1) | 1 | 1 + (0,2) | 1 | 2 + (0,5) | 0 | 333 +(3 rows) + +ROLLBACK; +SELECT ctid,cmin,* FROM combocidtest; + ctid | cmin | foobar +-------+------+-------- + (0,1) | 1 | 1 + (0,2) | 1 | 2 +(2 rows) + +-- check behavior with locked tuples +BEGIN; +-- a few dummy ops to push up the CommandId counter +INSERT INTO combocidtest SELECT 1 LIMIT 0; +INSERT INTO combocidtest SELECT 1 LIMIT 0; +INSERT INTO combocidtest SELECT 1 LIMIT 0; +INSERT INTO combocidtest SELECT 1 LIMIT 0; +INSERT INTO combocidtest SELECT 1 LIMIT 0; +INSERT INTO combocidtest SELECT 1 LIMIT 0; +INSERT INTO combocidtest SELECT 1 LIMIT 0; +INSERT INTO combocidtest SELECT 1 LIMIT 0; +INSERT INTO combocidtest SELECT 1 LIMIT 0; +INSERT INTO combocidtest SELECT 1 LIMIT 0; +INSERT INTO combocidtest VALUES (444); +SELECT ctid,cmin,* FROM combocidtest; + ctid | cmin | foobar +-------+------+-------- + (0,1) | 1 | 1 + (0,2) | 1 | 2 + (0,6) | 10 | 444 +(3 rows) + +SAVEPOINT s1; +-- this doesn't affect cmin +SELECT ctid,cmin,* FROM combocidtest FOR UPDATE; + ctid | cmin | foobar +-------+------+-------- + (0,1) | 1 | 1 + (0,2) | 1 | 2 + (0,6) | 10 | 444 +(3 rows) + +SELECT ctid,cmin,* FROM combocidtest; + ctid | cmin | foobar +-------+------+-------- + (0,1) | 1 | 1 + (0,2) | 1 | 2 + (0,6) | 10 | 444 +(3 rows) + +-- but this does +UPDATE combocidtest SET foobar = foobar + 10; +SELECT ctid,cmin,* FROM combocidtest; + ctid | cmin | foobar +-------+------+-------- + (0,7) | 12 | 11 + (0,8) | 12 | 12 + (0,9) | 12 | 454 +(3 rows) + +ROLLBACK TO s1; +SELECT ctid,cmin,* FROM combocidtest; + ctid | cmin | foobar +-------+------+-------- + (0,1) | 12 | 1 + (0,2) | 12 | 2 + (0,6) | 0 | 444 +(3 rows) + +COMMIT; +SELECT ctid,cmin,* FROM combocidtest; + ctid | cmin | foobar +-------+------+-------- + (0,1) | 12 | 1 + (0,2) | 12 | 2 + (0,6) | 0 | 444 +(3 rows) + +-- test for bug reported in +-- CABRT9RC81YUf1=jsmWopcKJEro=VoeG2ou6sPwyOUTx_qteRsg@mail.gmail.com +CREATE TABLE IF NOT EXISTS testcase( + id int PRIMARY KEY, + balance numeric +); +INSERT INTO testcase VALUES (1, 0); +BEGIN; +SELECT * FROM testcase WHERE testcase.id = 1 FOR UPDATE; + id | balance +----+--------- + 1 | 0 +(1 row) + +UPDATE testcase SET balance = balance + 400 WHERE id=1; +SAVEPOINT subxact; +UPDATE testcase SET balance = balance - 100 WHERE id=1; +ROLLBACK TO SAVEPOINT subxact; +-- should return one tuple +SELECT * FROM testcase WHERE id = 1 FOR UPDATE; + id | balance +----+--------- + 1 | 400 +(1 row) + +ROLLBACK; +DROP TABLE testcase; diff --git a/src/test/regress/expected/comments.out b/src/test/regress/expected/comments.out new file mode 100644 index 0000000..33f612e --- /dev/null +++ b/src/test/regress/expected/comments.out @@ -0,0 +1,65 @@ +-- +-- COMMENTS +-- +SELECT 'trailing' AS first; -- trailing single line + first +---------- + trailing +(1 row) + +SELECT /* embedded single line */ 'embedded' AS second; + second +---------- + embedded +(1 row) + +SELECT /* both embedded and trailing single line */ 'both' AS third; -- trailing single line + third +------- + both +(1 row) + +SELECT 'before multi-line' AS fourth; + fourth +------------------- + before multi-line +(1 row) + +/* This is an example of SQL which should not execute: + * select 'multi-line'; + */ +SELECT 'after multi-line' AS fifth; + fifth +------------------ + after multi-line +(1 row) + +-- +-- Nested comments +-- +/* +SELECT 'trailing' as x1; -- inside block comment +*/ +/* This block comment surrounds a query which itself has a block comment... +SELECT /* embedded single line */ 'embedded' AS x2; +*/ +SELECT -- continued after the following block comments... +/* Deeply nested comment. + This includes a single apostrophe to make sure we aren't decoding this part as a string. +SELECT 'deep nest' AS n1; +/* Second level of nesting... +SELECT 'deeper nest' as n2; +/* Third level of nesting... +SELECT 'deepest nest' as n3; +*/ +Hoo boy. Still two deep... +*/ +Now just one deep... +*/ +'deeply nested example' AS sixth; + sixth +----------------------- + deeply nested example +(1 row) + +/* and this is the end of the file */ diff --git a/src/test/regress/expected/compression.out b/src/test/regress/expected/compression.out new file mode 100644 index 0000000..834b755 --- /dev/null +++ b/src/test/regress/expected/compression.out @@ -0,0 +1,362 @@ +\set HIDE_TOAST_COMPRESSION false +-- ensure we get stable results regardless of installation's default +SET default_toast_compression = 'pglz'; +-- test creating table with compression method +CREATE TABLE cmdata(f1 text COMPRESSION pglz); +CREATE INDEX idx ON cmdata(f1); +INSERT INTO cmdata VALUES(repeat('1234567890', 1000)); +\d+ cmdata + Table "public.cmdata" + Column | Type | Collation | Nullable | Default | Storage | Compression | Stats target | Description +--------+------+-----------+----------+---------+----------+-------------+--------------+------------- + f1 | text | | | | extended | pglz | | +Indexes: + "idx" btree (f1) + +CREATE TABLE cmdata1(f1 TEXT COMPRESSION lz4); +INSERT INTO cmdata1 VALUES(repeat('1234567890', 1004)); +\d+ cmdata1 + Table "public.cmdata1" + Column | Type | Collation | Nullable | Default | Storage | Compression | Stats target | Description +--------+------+-----------+----------+---------+----------+-------------+--------------+------------- + f1 | text | | | | extended | lz4 | | + +-- verify stored compression method in the data +SELECT pg_column_compression(f1) FROM cmdata; + pg_column_compression +----------------------- + pglz +(1 row) + +SELECT pg_column_compression(f1) FROM cmdata1; + pg_column_compression +----------------------- + lz4 +(1 row) + +-- decompress data slice +SELECT SUBSTR(f1, 200, 5) FROM cmdata; + substr +-------- + 01234 +(1 row) + +SELECT SUBSTR(f1, 2000, 50) FROM cmdata1; + substr +---------------------------------------------------- + 01234567890123456789012345678901234567890123456789 +(1 row) + +-- copy with table creation +SELECT * INTO cmmove1 FROM cmdata; +\d+ cmmove1 + Table "public.cmmove1" + Column | Type | Collation | Nullable | Default | Storage | Compression | Stats target | Description +--------+------+-----------+----------+---------+----------+-------------+--------------+------------- + f1 | text | | | | extended | | | + +SELECT pg_column_compression(f1) FROM cmmove1; + pg_column_compression +----------------------- + pglz +(1 row) + +-- copy to existing table +CREATE TABLE cmmove3(f1 text COMPRESSION pglz); +INSERT INTO cmmove3 SELECT * FROM cmdata; +INSERT INTO cmmove3 SELECT * FROM cmdata1; +SELECT pg_column_compression(f1) FROM cmmove3; + pg_column_compression +----------------------- + pglz + lz4 +(2 rows) + +-- test LIKE INCLUDING COMPRESSION +CREATE TABLE cmdata2 (LIKE cmdata1 INCLUDING COMPRESSION); +\d+ cmdata2 + Table "public.cmdata2" + Column | Type | Collation | Nullable | Default | Storage | Compression | Stats target | Description +--------+------+-----------+----------+---------+----------+-------------+--------------+------------- + f1 | text | | | | extended | lz4 | | + +DROP TABLE cmdata2; +-- try setting compression for incompressible data type +CREATE TABLE cmdata2 (f1 int COMPRESSION pglz); +ERROR: column data type integer does not support compression +-- update using datum from different table +CREATE TABLE cmmove2(f1 text COMPRESSION pglz); +INSERT INTO cmmove2 VALUES (repeat('1234567890', 1004)); +SELECT pg_column_compression(f1) FROM cmmove2; + pg_column_compression +----------------------- + pglz +(1 row) + +UPDATE cmmove2 SET f1 = cmdata1.f1 FROM cmdata1; +SELECT pg_column_compression(f1) FROM cmmove2; + pg_column_compression +----------------------- + lz4 +(1 row) + +-- test externally stored compressed data +CREATE OR REPLACE FUNCTION large_val() RETURNS TEXT LANGUAGE SQL AS +'select array_agg(fipshash(g::text))::text from generate_series(1, 256) g'; +CREATE TABLE cmdata2 (f1 text COMPRESSION pglz); +INSERT INTO cmdata2 SELECT large_val() || repeat('a', 4000); +SELECT pg_column_compression(f1) FROM cmdata2; + pg_column_compression +----------------------- + pglz +(1 row) + +INSERT INTO cmdata1 SELECT large_val() || repeat('a', 4000); +SELECT pg_column_compression(f1) FROM cmdata1; + pg_column_compression +----------------------- + lz4 + lz4 +(2 rows) + +SELECT SUBSTR(f1, 200, 5) FROM cmdata1; + substr +-------- + 01234 + 79026 +(2 rows) + +SELECT SUBSTR(f1, 200, 5) FROM cmdata2; + substr +-------- + 79026 +(1 row) + +DROP TABLE cmdata2; +--test column type update varlena/non-varlena +CREATE TABLE cmdata2 (f1 int); +\d+ cmdata2 + Table "public.cmdata2" + Column | Type | Collation | Nullable | Default | Storage | Compression | Stats target | Description +--------+---------+-----------+----------+---------+---------+-------------+--------------+------------- + f1 | integer | | | | plain | | | + +ALTER TABLE cmdata2 ALTER COLUMN f1 TYPE varchar; +\d+ cmdata2 + Table "public.cmdata2" + Column | Type | Collation | Nullable | Default | Storage | Compression | Stats target | Description +--------+-------------------+-----------+----------+---------+----------+-------------+--------------+------------- + f1 | character varying | | | | extended | | | + +ALTER TABLE cmdata2 ALTER COLUMN f1 TYPE int USING f1::integer; +\d+ cmdata2 + Table "public.cmdata2" + Column | Type | Collation | Nullable | Default | Storage | Compression | Stats target | Description +--------+---------+-----------+----------+---------+---------+-------------+--------------+------------- + f1 | integer | | | | plain | | | + +--changing column storage should not impact the compression method +--but the data should not be compressed +ALTER TABLE cmdata2 ALTER COLUMN f1 TYPE varchar; +ALTER TABLE cmdata2 ALTER COLUMN f1 SET COMPRESSION pglz; +\d+ cmdata2 + Table "public.cmdata2" + Column | Type | Collation | Nullable | Default | Storage | Compression | Stats target | Description +--------+-------------------+-----------+----------+---------+----------+-------------+--------------+------------- + f1 | character varying | | | | extended | pglz | | + +ALTER TABLE cmdata2 ALTER COLUMN f1 SET STORAGE plain; +\d+ cmdata2 + Table "public.cmdata2" + Column | Type | Collation | Nullable | Default | Storage | Compression | Stats target | Description +--------+-------------------+-----------+----------+---------+---------+-------------+--------------+------------- + f1 | character varying | | | | plain | pglz | | + +INSERT INTO cmdata2 VALUES (repeat('123456789', 800)); +SELECT pg_column_compression(f1) FROM cmdata2; + pg_column_compression +----------------------- + +(1 row) + +-- test compression with materialized view +CREATE MATERIALIZED VIEW compressmv(x) AS SELECT * FROM cmdata1; +\d+ compressmv + Materialized view "public.compressmv" + Column | Type | Collation | Nullable | Default | Storage | Compression | Stats target | Description +--------+------+-----------+----------+---------+----------+-------------+--------------+------------- + x | text | | | | extended | | | +View definition: + SELECT f1 AS x + FROM cmdata1; + +SELECT pg_column_compression(f1) FROM cmdata1; + pg_column_compression +----------------------- + lz4 + lz4 +(2 rows) + +SELECT pg_column_compression(x) FROM compressmv; + pg_column_compression +----------------------- + lz4 + lz4 +(2 rows) + +-- test compression with partition +CREATE TABLE cmpart(f1 text COMPRESSION lz4) PARTITION BY HASH(f1); +CREATE TABLE cmpart1 PARTITION OF cmpart FOR VALUES WITH (MODULUS 2, REMAINDER 0); +CREATE TABLE cmpart2(f1 text COMPRESSION pglz); +ALTER TABLE cmpart ATTACH PARTITION cmpart2 FOR VALUES WITH (MODULUS 2, REMAINDER 1); +INSERT INTO cmpart VALUES (repeat('123456789', 1004)); +INSERT INTO cmpart VALUES (repeat('123456789', 4004)); +SELECT pg_column_compression(f1) FROM cmpart1; + pg_column_compression +----------------------- + lz4 +(1 row) + +SELECT pg_column_compression(f1) FROM cmpart2; + pg_column_compression +----------------------- + pglz +(1 row) + +-- test compression with inheritance, error +CREATE TABLE cminh() INHERITS(cmdata, cmdata1); +NOTICE: merging multiple inherited definitions of column "f1" +ERROR: column "f1" has a compression method conflict +DETAIL: pglz versus lz4 +CREATE TABLE cminh(f1 TEXT COMPRESSION lz4) INHERITS(cmdata); +NOTICE: merging column "f1" with inherited definition +ERROR: column "f1" has a compression method conflict +DETAIL: pglz versus lz4 +-- test default_toast_compression GUC +SET default_toast_compression = ''; +ERROR: invalid value for parameter "default_toast_compression": "" +HINT: Available values: pglz, lz4. +SET default_toast_compression = 'I do not exist compression'; +ERROR: invalid value for parameter "default_toast_compression": "I do not exist compression" +HINT: Available values: pglz, lz4. +SET default_toast_compression = 'lz4'; +SET default_toast_compression = 'pglz'; +-- test alter compression method +ALTER TABLE cmdata ALTER COLUMN f1 SET COMPRESSION lz4; +INSERT INTO cmdata VALUES (repeat('123456789', 4004)); +\d+ cmdata + Table "public.cmdata" + Column | Type | Collation | Nullable | Default | Storage | Compression | Stats target | Description +--------+------+-----------+----------+---------+----------+-------------+--------------+------------- + f1 | text | | | | extended | lz4 | | +Indexes: + "idx" btree (f1) + +SELECT pg_column_compression(f1) FROM cmdata; + pg_column_compression +----------------------- + pglz + lz4 +(2 rows) + +ALTER TABLE cmdata2 ALTER COLUMN f1 SET COMPRESSION default; +\d+ cmdata2 + Table "public.cmdata2" + Column | Type | Collation | Nullable | Default | Storage | Compression | Stats target | Description +--------+-------------------+-----------+----------+---------+---------+-------------+--------------+------------- + f1 | character varying | | | | plain | | | + +-- test alter compression method for materialized views +ALTER MATERIALIZED VIEW compressmv ALTER COLUMN x SET COMPRESSION lz4; +\d+ compressmv + Materialized view "public.compressmv" + Column | Type | Collation | Nullable | Default | Storage | Compression | Stats target | Description +--------+------+-----------+----------+---------+----------+-------------+--------------+------------- + x | text | | | | extended | lz4 | | +View definition: + SELECT f1 AS x + FROM cmdata1; + +-- test alter compression method for partitioned tables +ALTER TABLE cmpart1 ALTER COLUMN f1 SET COMPRESSION pglz; +ALTER TABLE cmpart2 ALTER COLUMN f1 SET COMPRESSION lz4; +-- new data should be compressed with the current compression method +INSERT INTO cmpart VALUES (repeat('123456789', 1004)); +INSERT INTO cmpart VALUES (repeat('123456789', 4004)); +SELECT pg_column_compression(f1) FROM cmpart1; + pg_column_compression +----------------------- + lz4 + pglz +(2 rows) + +SELECT pg_column_compression(f1) FROM cmpart2; + pg_column_compression +----------------------- + pglz + lz4 +(2 rows) + +-- VACUUM FULL does not recompress +SELECT pg_column_compression(f1) FROM cmdata; + pg_column_compression +----------------------- + pglz + lz4 +(2 rows) + +VACUUM FULL cmdata; +SELECT pg_column_compression(f1) FROM cmdata; + pg_column_compression +----------------------- + pglz + lz4 +(2 rows) + +-- test expression index +DROP TABLE cmdata2; +CREATE TABLE cmdata2 (f1 TEXT COMPRESSION pglz, f2 TEXT COMPRESSION lz4); +CREATE UNIQUE INDEX idx1 ON cmdata2 ((f1 || f2)); +INSERT INTO cmdata2 VALUES((SELECT array_agg(fipshash(g::TEXT))::TEXT FROM +generate_series(1, 50) g), VERSION()); +-- check data is ok +SELECT length(f1) FROM cmdata; + length +-------- + 10000 + 36036 +(2 rows) + +SELECT length(f1) FROM cmdata1; + length +-------- + 10040 + 12449 +(2 rows) + +SELECT length(f1) FROM cmmove1; + length +-------- + 10000 +(1 row) + +SELECT length(f1) FROM cmmove2; + length +-------- + 10040 +(1 row) + +SELECT length(f1) FROM cmmove3; + length +-------- + 10000 + 10040 +(2 rows) + +CREATE TABLE badcompresstbl (a text COMPRESSION I_Do_Not_Exist_Compression); -- fails +ERROR: invalid compression method "i_do_not_exist_compression" +CREATE TABLE badcompresstbl (a text); +ALTER TABLE badcompresstbl ALTER a SET COMPRESSION I_Do_Not_Exist_Compression; -- fails +ERROR: invalid compression method "i_do_not_exist_compression" +DROP TABLE badcompresstbl; +\set HIDE_TOAST_COMPRESSION true diff --git a/src/test/regress/expected/compression_1.out b/src/test/regress/expected/compression_1.out new file mode 100644 index 0000000..ddcd137 --- /dev/null +++ b/src/test/regress/expected/compression_1.out @@ -0,0 +1,356 @@ +\set HIDE_TOAST_COMPRESSION false +-- ensure we get stable results regardless of installation's default +SET default_toast_compression = 'pglz'; +-- test creating table with compression method +CREATE TABLE cmdata(f1 text COMPRESSION pglz); +CREATE INDEX idx ON cmdata(f1); +INSERT INTO cmdata VALUES(repeat('1234567890', 1000)); +\d+ cmdata + Table "public.cmdata" + Column | Type | Collation | Nullable | Default | Storage | Compression | Stats target | Description +--------+------+-----------+----------+---------+----------+-------------+--------------+------------- + f1 | text | | | | extended | pglz | | +Indexes: + "idx" btree (f1) + +CREATE TABLE cmdata1(f1 TEXT COMPRESSION lz4); +ERROR: compression method lz4 not supported +DETAIL: This functionality requires the server to be built with lz4 support. +INSERT INTO cmdata1 VALUES(repeat('1234567890', 1004)); +ERROR: relation "cmdata1" does not exist +LINE 1: INSERT INTO cmdata1 VALUES(repeat('1234567890', 1004)); + ^ +\d+ cmdata1 +-- verify stored compression method in the data +SELECT pg_column_compression(f1) FROM cmdata; + pg_column_compression +----------------------- + pglz +(1 row) + +SELECT pg_column_compression(f1) FROM cmdata1; +ERROR: relation "cmdata1" does not exist +LINE 1: SELECT pg_column_compression(f1) FROM cmdata1; + ^ +-- decompress data slice +SELECT SUBSTR(f1, 200, 5) FROM cmdata; + substr +-------- + 01234 +(1 row) + +SELECT SUBSTR(f1, 2000, 50) FROM cmdata1; +ERROR: relation "cmdata1" does not exist +LINE 1: SELECT SUBSTR(f1, 2000, 50) FROM cmdata1; + ^ +-- copy with table creation +SELECT * INTO cmmove1 FROM cmdata; +\d+ cmmove1 + Table "public.cmmove1" + Column | Type | Collation | Nullable | Default | Storage | Compression | Stats target | Description +--------+------+-----------+----------+---------+----------+-------------+--------------+------------- + f1 | text | | | | extended | | | + +SELECT pg_column_compression(f1) FROM cmmove1; + pg_column_compression +----------------------- + pglz +(1 row) + +-- copy to existing table +CREATE TABLE cmmove3(f1 text COMPRESSION pglz); +INSERT INTO cmmove3 SELECT * FROM cmdata; +INSERT INTO cmmove3 SELECT * FROM cmdata1; +ERROR: relation "cmdata1" does not exist +LINE 1: INSERT INTO cmmove3 SELECT * FROM cmdata1; + ^ +SELECT pg_column_compression(f1) FROM cmmove3; + pg_column_compression +----------------------- + pglz +(1 row) + +-- test LIKE INCLUDING COMPRESSION +CREATE TABLE cmdata2 (LIKE cmdata1 INCLUDING COMPRESSION); +ERROR: relation "cmdata1" does not exist +LINE 1: CREATE TABLE cmdata2 (LIKE cmdata1 INCLUDING COMPRESSION); + ^ +\d+ cmdata2 +DROP TABLE cmdata2; +ERROR: table "cmdata2" does not exist +-- try setting compression for incompressible data type +CREATE TABLE cmdata2 (f1 int COMPRESSION pglz); +ERROR: column data type integer does not support compression +-- update using datum from different table +CREATE TABLE cmmove2(f1 text COMPRESSION pglz); +INSERT INTO cmmove2 VALUES (repeat('1234567890', 1004)); +SELECT pg_column_compression(f1) FROM cmmove2; + pg_column_compression +----------------------- + pglz +(1 row) + +UPDATE cmmove2 SET f1 = cmdata1.f1 FROM cmdata1; +ERROR: relation "cmdata1" does not exist +LINE 1: UPDATE cmmove2 SET f1 = cmdata1.f1 FROM cmdata1; + ^ +SELECT pg_column_compression(f1) FROM cmmove2; + pg_column_compression +----------------------- + pglz +(1 row) + +-- test externally stored compressed data +CREATE OR REPLACE FUNCTION large_val() RETURNS TEXT LANGUAGE SQL AS +'select array_agg(fipshash(g::text))::text from generate_series(1, 256) g'; +CREATE TABLE cmdata2 (f1 text COMPRESSION pglz); +INSERT INTO cmdata2 SELECT large_val() || repeat('a', 4000); +SELECT pg_column_compression(f1) FROM cmdata2; + pg_column_compression +----------------------- + pglz +(1 row) + +INSERT INTO cmdata1 SELECT large_val() || repeat('a', 4000); +ERROR: relation "cmdata1" does not exist +LINE 1: INSERT INTO cmdata1 SELECT large_val() || repeat('a', 4000); + ^ +SELECT pg_column_compression(f1) FROM cmdata1; +ERROR: relation "cmdata1" does not exist +LINE 1: SELECT pg_column_compression(f1) FROM cmdata1; + ^ +SELECT SUBSTR(f1, 200, 5) FROM cmdata1; +ERROR: relation "cmdata1" does not exist +LINE 1: SELECT SUBSTR(f1, 200, 5) FROM cmdata1; + ^ +SELECT SUBSTR(f1, 200, 5) FROM cmdata2; + substr +-------- + 79026 +(1 row) + +DROP TABLE cmdata2; +--test column type update varlena/non-varlena +CREATE TABLE cmdata2 (f1 int); +\d+ cmdata2 + Table "public.cmdata2" + Column | Type | Collation | Nullable | Default | Storage | Compression | Stats target | Description +--------+---------+-----------+----------+---------+---------+-------------+--------------+------------- + f1 | integer | | | | plain | | | + +ALTER TABLE cmdata2 ALTER COLUMN f1 TYPE varchar; +\d+ cmdata2 + Table "public.cmdata2" + Column | Type | Collation | Nullable | Default | Storage | Compression | Stats target | Description +--------+-------------------+-----------+----------+---------+----------+-------------+--------------+------------- + f1 | character varying | | | | extended | | | + +ALTER TABLE cmdata2 ALTER COLUMN f1 TYPE int USING f1::integer; +\d+ cmdata2 + Table "public.cmdata2" + Column | Type | Collation | Nullable | Default | Storage | Compression | Stats target | Description +--------+---------+-----------+----------+---------+---------+-------------+--------------+------------- + f1 | integer | | | | plain | | | + +--changing column storage should not impact the compression method +--but the data should not be compressed +ALTER TABLE cmdata2 ALTER COLUMN f1 TYPE varchar; +ALTER TABLE cmdata2 ALTER COLUMN f1 SET COMPRESSION pglz; +\d+ cmdata2 + Table "public.cmdata2" + Column | Type | Collation | Nullable | Default | Storage | Compression | Stats target | Description +--------+-------------------+-----------+----------+---------+----------+-------------+--------------+------------- + f1 | character varying | | | | extended | pglz | | + +ALTER TABLE cmdata2 ALTER COLUMN f1 SET STORAGE plain; +\d+ cmdata2 + Table "public.cmdata2" + Column | Type | Collation | Nullable | Default | Storage | Compression | Stats target | Description +--------+-------------------+-----------+----------+---------+---------+-------------+--------------+------------- + f1 | character varying | | | | plain | pglz | | + +INSERT INTO cmdata2 VALUES (repeat('123456789', 800)); +SELECT pg_column_compression(f1) FROM cmdata2; + pg_column_compression +----------------------- + +(1 row) + +-- test compression with materialized view +CREATE MATERIALIZED VIEW compressmv(x) AS SELECT * FROM cmdata1; +ERROR: relation "cmdata1" does not exist +LINE 1: ...TE MATERIALIZED VIEW compressmv(x) AS SELECT * FROM cmdata1; + ^ +\d+ compressmv +SELECT pg_column_compression(f1) FROM cmdata1; +ERROR: relation "cmdata1" does not exist +LINE 1: SELECT pg_column_compression(f1) FROM cmdata1; + ^ +SELECT pg_column_compression(x) FROM compressmv; +ERROR: relation "compressmv" does not exist +LINE 1: SELECT pg_column_compression(x) FROM compressmv; + ^ +-- test compression with partition +CREATE TABLE cmpart(f1 text COMPRESSION lz4) PARTITION BY HASH(f1); +ERROR: compression method lz4 not supported +DETAIL: This functionality requires the server to be built with lz4 support. +CREATE TABLE cmpart1 PARTITION OF cmpart FOR VALUES WITH (MODULUS 2, REMAINDER 0); +ERROR: relation "cmpart" does not exist +CREATE TABLE cmpart2(f1 text COMPRESSION pglz); +ALTER TABLE cmpart ATTACH PARTITION cmpart2 FOR VALUES WITH (MODULUS 2, REMAINDER 1); +ERROR: relation "cmpart" does not exist +INSERT INTO cmpart VALUES (repeat('123456789', 1004)); +ERROR: relation "cmpart" does not exist +LINE 1: INSERT INTO cmpart VALUES (repeat('123456789', 1004)); + ^ +INSERT INTO cmpart VALUES (repeat('123456789', 4004)); +ERROR: relation "cmpart" does not exist +LINE 1: INSERT INTO cmpart VALUES (repeat('123456789', 4004)); + ^ +SELECT pg_column_compression(f1) FROM cmpart1; +ERROR: relation "cmpart1" does not exist +LINE 1: SELECT pg_column_compression(f1) FROM cmpart1; + ^ +SELECT pg_column_compression(f1) FROM cmpart2; + pg_column_compression +----------------------- +(0 rows) + +-- test compression with inheritance, error +CREATE TABLE cminh() INHERITS(cmdata, cmdata1); +ERROR: relation "cmdata1" does not exist +CREATE TABLE cminh(f1 TEXT COMPRESSION lz4) INHERITS(cmdata); +NOTICE: merging column "f1" with inherited definition +ERROR: column "f1" has a compression method conflict +DETAIL: pglz versus lz4 +-- test default_toast_compression GUC +SET default_toast_compression = ''; +ERROR: invalid value for parameter "default_toast_compression": "" +HINT: Available values: pglz. +SET default_toast_compression = 'I do not exist compression'; +ERROR: invalid value for parameter "default_toast_compression": "I do not exist compression" +HINT: Available values: pglz. +SET default_toast_compression = 'lz4'; +ERROR: invalid value for parameter "default_toast_compression": "lz4" +HINT: Available values: pglz. +SET default_toast_compression = 'pglz'; +-- test alter compression method +ALTER TABLE cmdata ALTER COLUMN f1 SET COMPRESSION lz4; +ERROR: compression method lz4 not supported +DETAIL: This functionality requires the server to be built with lz4 support. +INSERT INTO cmdata VALUES (repeat('123456789', 4004)); +\d+ cmdata + Table "public.cmdata" + Column | Type | Collation | Nullable | Default | Storage | Compression | Stats target | Description +--------+------+-----------+----------+---------+----------+-------------+--------------+------------- + f1 | text | | | | extended | pglz | | +Indexes: + "idx" btree (f1) + +SELECT pg_column_compression(f1) FROM cmdata; + pg_column_compression +----------------------- + pglz + pglz +(2 rows) + +ALTER TABLE cmdata2 ALTER COLUMN f1 SET COMPRESSION default; +\d+ cmdata2 + Table "public.cmdata2" + Column | Type | Collation | Nullable | Default | Storage | Compression | Stats target | Description +--------+-------------------+-----------+----------+---------+---------+-------------+--------------+------------- + f1 | character varying | | | | plain | | | + +-- test alter compression method for materialized views +ALTER MATERIALIZED VIEW compressmv ALTER COLUMN x SET COMPRESSION lz4; +ERROR: relation "compressmv" does not exist +\d+ compressmv +-- test alter compression method for partitioned tables +ALTER TABLE cmpart1 ALTER COLUMN f1 SET COMPRESSION pglz; +ERROR: relation "cmpart1" does not exist +ALTER TABLE cmpart2 ALTER COLUMN f1 SET COMPRESSION lz4; +ERROR: compression method lz4 not supported +DETAIL: This functionality requires the server to be built with lz4 support. +-- new data should be compressed with the current compression method +INSERT INTO cmpart VALUES (repeat('123456789', 1004)); +ERROR: relation "cmpart" does not exist +LINE 1: INSERT INTO cmpart VALUES (repeat('123456789', 1004)); + ^ +INSERT INTO cmpart VALUES (repeat('123456789', 4004)); +ERROR: relation "cmpart" does not exist +LINE 1: INSERT INTO cmpart VALUES (repeat('123456789', 4004)); + ^ +SELECT pg_column_compression(f1) FROM cmpart1; +ERROR: relation "cmpart1" does not exist +LINE 1: SELECT pg_column_compression(f1) FROM cmpart1; + ^ +SELECT pg_column_compression(f1) FROM cmpart2; + pg_column_compression +----------------------- +(0 rows) + +-- VACUUM FULL does not recompress +SELECT pg_column_compression(f1) FROM cmdata; + pg_column_compression +----------------------- + pglz + pglz +(2 rows) + +VACUUM FULL cmdata; +SELECT pg_column_compression(f1) FROM cmdata; + pg_column_compression +----------------------- + pglz + pglz +(2 rows) + +-- test expression index +DROP TABLE cmdata2; +CREATE TABLE cmdata2 (f1 TEXT COMPRESSION pglz, f2 TEXT COMPRESSION lz4); +ERROR: compression method lz4 not supported +DETAIL: This functionality requires the server to be built with lz4 support. +CREATE UNIQUE INDEX idx1 ON cmdata2 ((f1 || f2)); +ERROR: relation "cmdata2" does not exist +INSERT INTO cmdata2 VALUES((SELECT array_agg(fipshash(g::TEXT))::TEXT FROM +generate_series(1, 50) g), VERSION()); +ERROR: relation "cmdata2" does not exist +LINE 1: INSERT INTO cmdata2 VALUES((SELECT array_agg(fipshash(g::TEX... + ^ +-- check data is ok +SELECT length(f1) FROM cmdata; + length +-------- + 10000 + 36036 +(2 rows) + +SELECT length(f1) FROM cmdata1; +ERROR: relation "cmdata1" does not exist +LINE 1: SELECT length(f1) FROM cmdata1; + ^ +SELECT length(f1) FROM cmmove1; + length +-------- + 10000 +(1 row) + +SELECT length(f1) FROM cmmove2; + length +-------- + 10040 +(1 row) + +SELECT length(f1) FROM cmmove3; + length +-------- + 10000 +(1 row) + +CREATE TABLE badcompresstbl (a text COMPRESSION I_Do_Not_Exist_Compression); -- fails +ERROR: invalid compression method "i_do_not_exist_compression" +CREATE TABLE badcompresstbl (a text); +ALTER TABLE badcompresstbl ALTER a SET COMPRESSION I_Do_Not_Exist_Compression; -- fails +ERROR: invalid compression method "i_do_not_exist_compression" +DROP TABLE badcompresstbl; +\set HIDE_TOAST_COMPRESSION true diff --git a/src/test/regress/expected/constraints.out b/src/test/regress/expected/constraints.out new file mode 100644 index 0000000..e6f6602 --- /dev/null +++ b/src/test/regress/expected/constraints.out @@ -0,0 +1,789 @@ +-- +-- CONSTRAINTS +-- Constraints can be specified with: +-- - DEFAULT clause +-- - CHECK clauses +-- - PRIMARY KEY clauses +-- - UNIQUE clauses +-- - EXCLUDE clauses +-- +-- directory paths are passed to us in environment variables +\getenv abs_srcdir PG_ABS_SRCDIR +-- +-- DEFAULT syntax +-- +CREATE TABLE DEFAULT_TBL (i int DEFAULT 100, + x text DEFAULT 'vadim', f float8 DEFAULT 123.456); +INSERT INTO DEFAULT_TBL VALUES (1, 'thomas', 57.0613); +INSERT INTO DEFAULT_TBL VALUES (1, 'bruce'); +INSERT INTO DEFAULT_TBL (i, f) VALUES (2, 987.654); +INSERT INTO DEFAULT_TBL (x) VALUES ('marc'); +INSERT INTO DEFAULT_TBL VALUES (3, null, 1.0); +SELECT * FROM DEFAULT_TBL; + i | x | f +-----+--------+--------- + 1 | thomas | 57.0613 + 1 | bruce | 123.456 + 2 | vadim | 987.654 + 100 | marc | 123.456 + 3 | | 1 +(5 rows) + +CREATE SEQUENCE DEFAULT_SEQ; +CREATE TABLE DEFAULTEXPR_TBL (i1 int DEFAULT 100 + (200-199) * 2, + i2 int DEFAULT nextval('default_seq')); +INSERT INTO DEFAULTEXPR_TBL VALUES (-1, -2); +INSERT INTO DEFAULTEXPR_TBL (i1) VALUES (-3); +INSERT INTO DEFAULTEXPR_TBL (i2) VALUES (-4); +INSERT INTO DEFAULTEXPR_TBL (i2) VALUES (NULL); +SELECT * FROM DEFAULTEXPR_TBL; + i1 | i2 +-----+---- + -1 | -2 + -3 | 1 + 102 | -4 + 102 | +(4 rows) + +-- syntax errors +-- test for extraneous comma +CREATE TABLE error_tbl (i int DEFAULT (100, )); +ERROR: syntax error at or near ")" +LINE 1: CREATE TABLE error_tbl (i int DEFAULT (100, )); + ^ +-- this will fail because gram.y uses b_expr not a_expr for defaults, +-- to avoid a shift/reduce conflict that arises from NOT NULL being +-- part of the column definition syntax: +CREATE TABLE error_tbl (b1 bool DEFAULT 1 IN (1, 2)); +ERROR: syntax error at or near "IN" +LINE 1: CREATE TABLE error_tbl (b1 bool DEFAULT 1 IN (1, 2)); + ^ +-- this should work, however: +CREATE TABLE error_tbl (b1 bool DEFAULT (1 IN (1, 2))); +DROP TABLE error_tbl; +-- +-- CHECK syntax +-- +CREATE TABLE CHECK_TBL (x int, + CONSTRAINT CHECK_CON CHECK (x > 3)); +INSERT INTO CHECK_TBL VALUES (5); +INSERT INTO CHECK_TBL VALUES (4); +INSERT INTO CHECK_TBL VALUES (3); +ERROR: new row for relation "check_tbl" violates check constraint "check_con" +DETAIL: Failing row contains (3). +INSERT INTO CHECK_TBL VALUES (2); +ERROR: new row for relation "check_tbl" violates check constraint "check_con" +DETAIL: Failing row contains (2). +INSERT INTO CHECK_TBL VALUES (6); +INSERT INTO CHECK_TBL VALUES (1); +ERROR: new row for relation "check_tbl" violates check constraint "check_con" +DETAIL: Failing row contains (1). +SELECT * FROM CHECK_TBL; + x +--- + 5 + 4 + 6 +(3 rows) + +CREATE SEQUENCE CHECK_SEQ; +CREATE TABLE CHECK2_TBL (x int, y text, z int, + CONSTRAINT SEQUENCE_CON + CHECK (x > 3 and y <> 'check failed' and z < 8)); +INSERT INTO CHECK2_TBL VALUES (4, 'check ok', -2); +INSERT INTO CHECK2_TBL VALUES (1, 'x check failed', -2); +ERROR: new row for relation "check2_tbl" violates check constraint "sequence_con" +DETAIL: Failing row contains (1, x check failed, -2). +INSERT INTO CHECK2_TBL VALUES (5, 'z check failed', 10); +ERROR: new row for relation "check2_tbl" violates check constraint "sequence_con" +DETAIL: Failing row contains (5, z check failed, 10). +INSERT INTO CHECK2_TBL VALUES (0, 'check failed', -2); +ERROR: new row for relation "check2_tbl" violates check constraint "sequence_con" +DETAIL: Failing row contains (0, check failed, -2). +INSERT INTO CHECK2_TBL VALUES (6, 'check failed', 11); +ERROR: new row for relation "check2_tbl" violates check constraint "sequence_con" +DETAIL: Failing row contains (6, check failed, 11). +INSERT INTO CHECK2_TBL VALUES (7, 'check ok', 7); +SELECT * from CHECK2_TBL; + x | y | z +---+----------+---- + 4 | check ok | -2 + 7 | check ok | 7 +(2 rows) + +-- +-- Check constraints on INSERT +-- +CREATE SEQUENCE INSERT_SEQ; +CREATE TABLE INSERT_TBL (x INT DEFAULT nextval('insert_seq'), + y TEXT DEFAULT '-NULL-', + z INT DEFAULT -1 * currval('insert_seq'), + CONSTRAINT INSERT_TBL_CON CHECK (x >= 3 AND y <> 'check failed' AND x < 8), + CHECK (x + z = 0)); +INSERT INTO INSERT_TBL(x,z) VALUES (2, -2); +ERROR: new row for relation "insert_tbl" violates check constraint "insert_tbl_con" +DETAIL: Failing row contains (2, -NULL-, -2). +SELECT * FROM INSERT_TBL; + x | y | z +---+---+--- +(0 rows) + +SELECT 'one' AS one, nextval('insert_seq'); + one | nextval +-----+--------- + one | 1 +(1 row) + +INSERT INTO INSERT_TBL(y) VALUES ('Y'); +ERROR: new row for relation "insert_tbl" violates check constraint "insert_tbl_con" +DETAIL: Failing row contains (2, Y, -2). +INSERT INTO INSERT_TBL(y) VALUES ('Y'); +INSERT INTO INSERT_TBL(x,z) VALUES (1, -2); +ERROR: new row for relation "insert_tbl" violates check constraint "insert_tbl_check" +DETAIL: Failing row contains (1, -NULL-, -2). +INSERT INTO INSERT_TBL(z,x) VALUES (-7, 7); +INSERT INTO INSERT_TBL VALUES (5, 'check failed', -5); +ERROR: new row for relation "insert_tbl" violates check constraint "insert_tbl_con" +DETAIL: Failing row contains (5, check failed, -5). +INSERT INTO INSERT_TBL VALUES (7, '!check failed', -7); +INSERT INTO INSERT_TBL(y) VALUES ('-!NULL-'); +SELECT * FROM INSERT_TBL; + x | y | z +---+---------------+---- + 3 | Y | -3 + 7 | -NULL- | -7 + 7 | !check failed | -7 + 4 | -!NULL- | -4 +(4 rows) + +INSERT INTO INSERT_TBL(y,z) VALUES ('check failed', 4); +ERROR: new row for relation "insert_tbl" violates check constraint "insert_tbl_check" +DETAIL: Failing row contains (5, check failed, 4). +INSERT INTO INSERT_TBL(x,y) VALUES (5, 'check failed'); +ERROR: new row for relation "insert_tbl" violates check constraint "insert_tbl_con" +DETAIL: Failing row contains (5, check failed, -5). +INSERT INTO INSERT_TBL(x,y) VALUES (5, '!check failed'); +INSERT INTO INSERT_TBL(y) VALUES ('-!NULL-'); +SELECT * FROM INSERT_TBL; + x | y | z +---+---------------+---- + 3 | Y | -3 + 7 | -NULL- | -7 + 7 | !check failed | -7 + 4 | -!NULL- | -4 + 5 | !check failed | -5 + 6 | -!NULL- | -6 +(6 rows) + +SELECT 'seven' AS one, nextval('insert_seq'); + one | nextval +-------+--------- + seven | 7 +(1 row) + +INSERT INTO INSERT_TBL(y) VALUES ('Y'); +ERROR: new row for relation "insert_tbl" violates check constraint "insert_tbl_con" +DETAIL: Failing row contains (8, Y, -8). +SELECT 'eight' AS one, currval('insert_seq'); + one | currval +-------+--------- + eight | 8 +(1 row) + +-- According to SQL, it is OK to insert a record that gives rise to NULL +-- constraint-condition results. Postgres used to reject this, but it +-- was wrong: +INSERT INTO INSERT_TBL VALUES (null, null, null); +SELECT * FROM INSERT_TBL; + x | y | z +---+---------------+---- + 3 | Y | -3 + 7 | -NULL- | -7 + 7 | !check failed | -7 + 4 | -!NULL- | -4 + 5 | !check failed | -5 + 6 | -!NULL- | -6 + | | +(7 rows) + +-- +-- Check constraints on system columns +-- +CREATE TABLE SYS_COL_CHECK_TBL (city text, state text, is_capital bool, + altitude int, + CHECK (NOT (is_capital AND tableoid::regclass::text = 'sys_col_check_tbl'))); +INSERT INTO SYS_COL_CHECK_TBL VALUES ('Seattle', 'Washington', false, 100); +INSERT INTO SYS_COL_CHECK_TBL VALUES ('Olympia', 'Washington', true, 100); +ERROR: new row for relation "sys_col_check_tbl" violates check constraint "sys_col_check_tbl_check" +DETAIL: Failing row contains (Olympia, Washington, t, 100). +SELECT *, tableoid::regclass::text FROM SYS_COL_CHECK_TBL; + city | state | is_capital | altitude | tableoid +---------+------------+------------+----------+------------------- + Seattle | Washington | f | 100 | sys_col_check_tbl +(1 row) + +DROP TABLE SYS_COL_CHECK_TBL; +-- +-- Check constraints on system columns other then TableOid should return error +-- +CREATE TABLE SYS_COL_CHECK_TBL (city text, state text, is_capital bool, + altitude int, + CHECK (NOT (is_capital AND ctid::text = 'sys_col_check_tbl'))); +ERROR: system column "ctid" reference in check constraint is invalid +LINE 3: CHECK (NOT (is_capital AND ctid::text = 'sys_col_check... + ^ +-- +-- Check inheritance of defaults and constraints +-- +CREATE TABLE INSERT_CHILD (cx INT default 42, + cy INT CHECK (cy > x)) + INHERITS (INSERT_TBL); +INSERT INTO INSERT_CHILD(x,z,cy) VALUES (7,-7,11); +INSERT INTO INSERT_CHILD(x,z,cy) VALUES (7,-7,6); +ERROR: new row for relation "insert_child" violates check constraint "insert_child_check" +DETAIL: Failing row contains (7, -NULL-, -7, 42, 6). +INSERT INTO INSERT_CHILD(x,z,cy) VALUES (6,-7,7); +ERROR: new row for relation "insert_child" violates check constraint "insert_tbl_check" +DETAIL: Failing row contains (6, -NULL-, -7, 42, 7). +INSERT INTO INSERT_CHILD(x,y,z,cy) VALUES (6,'check failed',-6,7); +ERROR: new row for relation "insert_child" violates check constraint "insert_tbl_con" +DETAIL: Failing row contains (6, check failed, -6, 42, 7). +SELECT * FROM INSERT_CHILD; + x | y | z | cx | cy +---+--------+----+----+---- + 7 | -NULL- | -7 | 42 | 11 +(1 row) + +DROP TABLE INSERT_CHILD; +-- +-- Check NO INHERIT type of constraints and inheritance +-- +CREATE TABLE ATACC1 (TEST INT + CHECK (TEST > 0) NO INHERIT); +CREATE TABLE ATACC2 (TEST2 INT) INHERITS (ATACC1); +-- check constraint is not there on child +INSERT INTO ATACC2 (TEST) VALUES (-3); +-- check constraint is there on parent +INSERT INTO ATACC1 (TEST) VALUES (-3); +ERROR: new row for relation "atacc1" violates check constraint "atacc1_test_check" +DETAIL: Failing row contains (-3). +DROP TABLE ATACC1 CASCADE; +NOTICE: drop cascades to table atacc2 +CREATE TABLE ATACC1 (TEST INT, TEST2 INT + CHECK (TEST > 0), CHECK (TEST2 > 10) NO INHERIT); +CREATE TABLE ATACC2 () INHERITS (ATACC1); +-- check constraint is there on child +INSERT INTO ATACC2 (TEST) VALUES (-3); +ERROR: new row for relation "atacc2" violates check constraint "atacc1_test_check" +DETAIL: Failing row contains (-3, null). +-- check constraint is there on parent +INSERT INTO ATACC1 (TEST) VALUES (-3); +ERROR: new row for relation "atacc1" violates check constraint "atacc1_test_check" +DETAIL: Failing row contains (-3, null). +-- check constraint is not there on child +INSERT INTO ATACC2 (TEST2) VALUES (3); +-- check constraint is there on parent +INSERT INTO ATACC1 (TEST2) VALUES (3); +ERROR: new row for relation "atacc1" violates check constraint "atacc1_test2_check" +DETAIL: Failing row contains (null, 3). +DROP TABLE ATACC1 CASCADE; +NOTICE: drop cascades to table atacc2 +-- +-- Check constraints on INSERT INTO +-- +DELETE FROM INSERT_TBL; +ALTER SEQUENCE INSERT_SEQ RESTART WITH 4; +CREATE TEMP TABLE tmp (xd INT, yd TEXT, zd INT); +INSERT INTO tmp VALUES (null, 'Y', null); +INSERT INTO tmp VALUES (5, '!check failed', null); +INSERT INTO tmp VALUES (null, 'try again', null); +INSERT INTO INSERT_TBL(y) select yd from tmp; +SELECT * FROM INSERT_TBL; + x | y | z +---+---------------+---- + 4 | Y | -4 + 5 | !check failed | -5 + 6 | try again | -6 +(3 rows) + +INSERT INTO INSERT_TBL SELECT * FROM tmp WHERE yd = 'try again'; +INSERT INTO INSERT_TBL(y,z) SELECT yd, -7 FROM tmp WHERE yd = 'try again'; +INSERT INTO INSERT_TBL(y,z) SELECT yd, -8 FROM tmp WHERE yd = 'try again'; +ERROR: new row for relation "insert_tbl" violates check constraint "insert_tbl_con" +DETAIL: Failing row contains (8, try again, -8). +SELECT * FROM INSERT_TBL; + x | y | z +---+---------------+---- + 4 | Y | -4 + 5 | !check failed | -5 + 6 | try again | -6 + | try again | + 7 | try again | -7 +(5 rows) + +DROP TABLE tmp; +-- +-- Check constraints on UPDATE +-- +UPDATE INSERT_TBL SET x = NULL WHERE x = 5; +UPDATE INSERT_TBL SET x = 6 WHERE x = 6; +UPDATE INSERT_TBL SET x = -z, z = -x; +UPDATE INSERT_TBL SET x = z, z = x; +ERROR: new row for relation "insert_tbl" violates check constraint "insert_tbl_con" +DETAIL: Failing row contains (-4, Y, 4). +SELECT * FROM INSERT_TBL; + x | y | z +---+---------------+---- + 4 | Y | -4 + | try again | + 7 | try again | -7 + 5 | !check failed | + 6 | try again | -6 +(5 rows) + +-- DROP TABLE INSERT_TBL; +-- +-- Check constraints on COPY FROM +-- +CREATE TABLE COPY_TBL (x INT, y TEXT, z INT, + CONSTRAINT COPY_CON + CHECK (x > 3 AND y <> 'check failed' AND x < 7 )); +\set filename :abs_srcdir '/data/constro.data' +COPY COPY_TBL FROM :'filename'; +SELECT * FROM COPY_TBL; + x | y | z +---+---------------+--- + 4 | !check failed | 5 + 6 | OK | 4 +(2 rows) + +\set filename :abs_srcdir '/data/constrf.data' +COPY COPY_TBL FROM :'filename'; +ERROR: new row for relation "copy_tbl" violates check constraint "copy_con" +DETAIL: Failing row contains (7, check failed, 6). +CONTEXT: COPY copy_tbl, line 2: "7 check failed 6" +SELECT * FROM COPY_TBL; + x | y | z +---+---------------+--- + 4 | !check failed | 5 + 6 | OK | 4 +(2 rows) + +-- +-- Primary keys +-- +CREATE TABLE PRIMARY_TBL (i int PRIMARY KEY, t text); +INSERT INTO PRIMARY_TBL VALUES (1, 'one'); +INSERT INTO PRIMARY_TBL VALUES (2, 'two'); +INSERT INTO PRIMARY_TBL VALUES (1, 'three'); +ERROR: duplicate key value violates unique constraint "primary_tbl_pkey" +DETAIL: Key (i)=(1) already exists. +INSERT INTO PRIMARY_TBL VALUES (4, 'three'); +INSERT INTO PRIMARY_TBL VALUES (5, 'one'); +INSERT INTO PRIMARY_TBL (t) VALUES ('six'); +ERROR: null value in column "i" of relation "primary_tbl" violates not-null constraint +DETAIL: Failing row contains (null, six). +SELECT * FROM PRIMARY_TBL; + i | t +---+------- + 1 | one + 2 | two + 4 | three + 5 | one +(4 rows) + +DROP TABLE PRIMARY_TBL; +CREATE TABLE PRIMARY_TBL (i int, t text, + PRIMARY KEY(i,t)); +INSERT INTO PRIMARY_TBL VALUES (1, 'one'); +INSERT INTO PRIMARY_TBL VALUES (2, 'two'); +INSERT INTO PRIMARY_TBL VALUES (1, 'three'); +INSERT INTO PRIMARY_TBL VALUES (4, 'three'); +INSERT INTO PRIMARY_TBL VALUES (5, 'one'); +INSERT INTO PRIMARY_TBL (t) VALUES ('six'); +ERROR: null value in column "i" of relation "primary_tbl" violates not-null constraint +DETAIL: Failing row contains (null, six). +SELECT * FROM PRIMARY_TBL; + i | t +---+------- + 1 | one + 2 | two + 1 | three + 4 | three + 5 | one +(5 rows) + +DROP TABLE PRIMARY_TBL; +-- +-- Unique keys +-- +CREATE TABLE UNIQUE_TBL (i int UNIQUE, t text); +INSERT INTO UNIQUE_TBL VALUES (1, 'one'); +INSERT INTO UNIQUE_TBL VALUES (2, 'two'); +INSERT INTO UNIQUE_TBL VALUES (1, 'three'); +ERROR: duplicate key value violates unique constraint "unique_tbl_i_key" +DETAIL: Key (i)=(1) already exists. +INSERT INTO UNIQUE_TBL VALUES (4, 'four'); +INSERT INTO UNIQUE_TBL VALUES (5, 'one'); +INSERT INTO UNIQUE_TBL (t) VALUES ('six'); +INSERT INTO UNIQUE_TBL (t) VALUES ('seven'); +INSERT INTO UNIQUE_TBL VALUES (5, 'five-upsert-insert') ON CONFLICT (i) DO UPDATE SET t = 'five-upsert-update'; +INSERT INTO UNIQUE_TBL VALUES (6, 'six-upsert-insert') ON CONFLICT (i) DO UPDATE SET t = 'six-upsert-update'; +-- should fail +INSERT INTO UNIQUE_TBL VALUES (1, 'a'), (2, 'b'), (2, 'b') ON CONFLICT (i) DO UPDATE SET t = 'fails'; +ERROR: ON CONFLICT DO UPDATE command cannot affect row a second time +HINT: Ensure that no rows proposed for insertion within the same command have duplicate constrained values. +SELECT * FROM UNIQUE_TBL; + i | t +---+-------------------- + 1 | one + 2 | two + 4 | four + | six + | seven + 5 | five-upsert-update + 6 | six-upsert-insert +(7 rows) + +DROP TABLE UNIQUE_TBL; +CREATE TABLE UNIQUE_TBL (i int UNIQUE NULLS NOT DISTINCT, t text); +INSERT INTO UNIQUE_TBL VALUES (1, 'one'); +INSERT INTO UNIQUE_TBL VALUES (2, 'two'); +INSERT INTO UNIQUE_TBL VALUES (1, 'three'); -- fail +ERROR: duplicate key value violates unique constraint "unique_tbl_i_key" +DETAIL: Key (i)=(1) already exists. +INSERT INTO UNIQUE_TBL VALUES (4, 'four'); +INSERT INTO UNIQUE_TBL VALUES (5, 'one'); +INSERT INTO UNIQUE_TBL (t) VALUES ('six'); +INSERT INTO UNIQUE_TBL (t) VALUES ('seven'); -- fail +ERROR: duplicate key value violates unique constraint "unique_tbl_i_key" +DETAIL: Key (i)=(null) already exists. +INSERT INTO UNIQUE_TBL (t) VALUES ('eight') ON CONFLICT DO NOTHING; -- no-op +SELECT * FROM UNIQUE_TBL; + i | t +---+------ + 1 | one + 2 | two + 4 | four + 5 | one + | six +(5 rows) + +DROP TABLE UNIQUE_TBL; +CREATE TABLE UNIQUE_TBL (i int, t text, + UNIQUE(i,t)); +INSERT INTO UNIQUE_TBL VALUES (1, 'one'); +INSERT INTO UNIQUE_TBL VALUES (2, 'two'); +INSERT INTO UNIQUE_TBL VALUES (1, 'three'); +INSERT INTO UNIQUE_TBL VALUES (1, 'one'); +ERROR: duplicate key value violates unique constraint "unique_tbl_i_t_key" +DETAIL: Key (i, t)=(1, one) already exists. +INSERT INTO UNIQUE_TBL VALUES (5, 'one'); +INSERT INTO UNIQUE_TBL (t) VALUES ('six'); +SELECT * FROM UNIQUE_TBL; + i | t +---+------- + 1 | one + 2 | two + 1 | three + 5 | one + | six +(5 rows) + +DROP TABLE UNIQUE_TBL; +-- +-- Deferrable unique constraints +-- +CREATE TABLE unique_tbl (i int UNIQUE DEFERRABLE, t text); +INSERT INTO unique_tbl VALUES (0, 'one'); +INSERT INTO unique_tbl VALUES (1, 'two'); +INSERT INTO unique_tbl VALUES (2, 'tree'); +INSERT INTO unique_tbl VALUES (3, 'four'); +INSERT INTO unique_tbl VALUES (4, 'five'); +BEGIN; +-- default is immediate so this should fail right away +UPDATE unique_tbl SET i = 1 WHERE i = 0; +ERROR: duplicate key value violates unique constraint "unique_tbl_i_key" +DETAIL: Key (i)=(1) already exists. +ROLLBACK; +-- check is done at end of statement, so this should succeed +UPDATE unique_tbl SET i = i+1; +SELECT * FROM unique_tbl; + i | t +---+------ + 1 | one + 2 | two + 3 | tree + 4 | four + 5 | five +(5 rows) + +-- explicitly defer the constraint +BEGIN; +SET CONSTRAINTS unique_tbl_i_key DEFERRED; +INSERT INTO unique_tbl VALUES (3, 'three'); +DELETE FROM unique_tbl WHERE t = 'tree'; -- makes constraint valid again +COMMIT; -- should succeed +SELECT * FROM unique_tbl; + i | t +---+------- + 1 | one + 2 | two + 4 | four + 5 | five + 3 | three +(5 rows) + +-- try adding an initially deferred constraint +ALTER TABLE unique_tbl DROP CONSTRAINT unique_tbl_i_key; +ALTER TABLE unique_tbl ADD CONSTRAINT unique_tbl_i_key + UNIQUE (i) DEFERRABLE INITIALLY DEFERRED; +BEGIN; +INSERT INTO unique_tbl VALUES (1, 'five'); +INSERT INTO unique_tbl VALUES (5, 'one'); +UPDATE unique_tbl SET i = 4 WHERE i = 2; +UPDATE unique_tbl SET i = 2 WHERE i = 4 AND t = 'four'; +DELETE FROM unique_tbl WHERE i = 1 AND t = 'one'; +DELETE FROM unique_tbl WHERE i = 5 AND t = 'five'; +COMMIT; +SELECT * FROM unique_tbl; + i | t +---+------- + 3 | three + 1 | five + 5 | one + 4 | two + 2 | four +(5 rows) + +-- should fail at commit-time +BEGIN; +INSERT INTO unique_tbl VALUES (3, 'Three'); -- should succeed for now +COMMIT; -- should fail +ERROR: duplicate key value violates unique constraint "unique_tbl_i_key" +DETAIL: Key (i)=(3) already exists. +-- make constraint check immediate +BEGIN; +SET CONSTRAINTS ALL IMMEDIATE; +INSERT INTO unique_tbl VALUES (3, 'Three'); -- should fail +ERROR: duplicate key value violates unique constraint "unique_tbl_i_key" +DETAIL: Key (i)=(3) already exists. +COMMIT; +-- forced check when SET CONSTRAINTS is called +BEGIN; +SET CONSTRAINTS ALL DEFERRED; +INSERT INTO unique_tbl VALUES (3, 'Three'); -- should succeed for now +SET CONSTRAINTS ALL IMMEDIATE; -- should fail +ERROR: duplicate key value violates unique constraint "unique_tbl_i_key" +DETAIL: Key (i)=(3) already exists. +COMMIT; +-- test deferrable UNIQUE with a partitioned table +CREATE TABLE parted_uniq_tbl (i int UNIQUE DEFERRABLE) partition by range (i); +CREATE TABLE parted_uniq_tbl_1 PARTITION OF parted_uniq_tbl FOR VALUES FROM (0) TO (10); +CREATE TABLE parted_uniq_tbl_2 PARTITION OF parted_uniq_tbl FOR VALUES FROM (20) TO (30); +SELECT conname, conrelid::regclass FROM pg_constraint + WHERE conname LIKE 'parted_uniq%' ORDER BY conname; + conname | conrelid +-------------------------+------------------- + parted_uniq_tbl_1_i_key | parted_uniq_tbl_1 + parted_uniq_tbl_2_i_key | parted_uniq_tbl_2 + parted_uniq_tbl_i_key | parted_uniq_tbl +(3 rows) + +BEGIN; +INSERT INTO parted_uniq_tbl VALUES (1); +SAVEPOINT f; +INSERT INTO parted_uniq_tbl VALUES (1); -- unique violation +ERROR: duplicate key value violates unique constraint "parted_uniq_tbl_1_i_key" +DETAIL: Key (i)=(1) already exists. +ROLLBACK TO f; +SET CONSTRAINTS parted_uniq_tbl_i_key DEFERRED; +INSERT INTO parted_uniq_tbl VALUES (1); -- OK now, fail at commit +COMMIT; +ERROR: duplicate key value violates unique constraint "parted_uniq_tbl_1_i_key" +DETAIL: Key (i)=(1) already exists. +DROP TABLE parted_uniq_tbl; +-- test naming a constraint in a partition when a conflict exists +CREATE TABLE parted_fk_naming ( + id bigint NOT NULL default 1, + id_abc bigint, + CONSTRAINT dummy_constr FOREIGN KEY (id_abc) + REFERENCES parted_fk_naming (id), + PRIMARY KEY (id) +) +PARTITION BY LIST (id); +CREATE TABLE parted_fk_naming_1 ( + id bigint NOT NULL default 1, + id_abc bigint, + PRIMARY KEY (id), + CONSTRAINT dummy_constr CHECK (true) +); +ALTER TABLE parted_fk_naming ATTACH PARTITION parted_fk_naming_1 FOR VALUES IN ('1'); +SELECT conname FROM pg_constraint WHERE conrelid = 'parted_fk_naming_1'::regclass AND contype = 'f'; + conname +-------------------------------- + parted_fk_naming_1_id_abc_fkey +(1 row) + +DROP TABLE parted_fk_naming; +-- test a HOT update that invalidates the conflicting tuple. +-- the trigger should still fire and catch the violation +BEGIN; +INSERT INTO unique_tbl VALUES (3, 'Three'); -- should succeed for now +UPDATE unique_tbl SET t = 'THREE' WHERE i = 3 AND t = 'Three'; +COMMIT; -- should fail +ERROR: duplicate key value violates unique constraint "unique_tbl_i_key" +DETAIL: Key (i)=(3) already exists. +SELECT * FROM unique_tbl; + i | t +---+------- + 3 | three + 1 | five + 5 | one + 4 | two + 2 | four +(5 rows) + +-- test a HOT update that modifies the newly inserted tuple, +-- but should succeed because we then remove the other conflicting tuple. +BEGIN; +INSERT INTO unique_tbl VALUES(3, 'tree'); -- should succeed for now +UPDATE unique_tbl SET t = 'threex' WHERE t = 'tree'; +DELETE FROM unique_tbl WHERE t = 'three'; +SELECT * FROM unique_tbl; + i | t +---+-------- + 1 | five + 5 | one + 4 | two + 2 | four + 3 | threex +(5 rows) + +COMMIT; +SELECT * FROM unique_tbl; + i | t +---+-------- + 1 | five + 5 | one + 4 | two + 2 | four + 3 | threex +(5 rows) + +DROP TABLE unique_tbl; +-- +-- EXCLUDE constraints +-- +CREATE TABLE circles ( + c1 CIRCLE, + c2 TEXT, + EXCLUDE USING gist + (c1 WITH &&, (c2::circle) WITH &&) + WHERE (circle_center(c1) <> '(0,0)') +); +-- these should succeed because they don't match the index predicate +INSERT INTO circles VALUES('<(0,0), 5>', '<(0,0), 5>'); +INSERT INTO circles VALUES('<(0,0), 5>', '<(0,0), 4>'); +-- succeed +INSERT INTO circles VALUES('<(10,10), 10>', '<(0,0), 5>'); +-- fail, overlaps +INSERT INTO circles VALUES('<(20,20), 10>', '<(0,0), 4>'); +ERROR: conflicting key value violates exclusion constraint "circles_c1_c2_excl" +DETAIL: Key (c1, (c2::circle))=(<(20,20),10>, <(0,0),4>) conflicts with existing key (c1, (c2::circle))=(<(10,10),10>, <(0,0),5>). +-- succeed, because violation is ignored +INSERT INTO circles VALUES('<(20,20), 10>', '<(0,0), 4>') + ON CONFLICT ON CONSTRAINT circles_c1_c2_excl DO NOTHING; +-- fail, because DO UPDATE variant requires unique index +INSERT INTO circles VALUES('<(20,20), 10>', '<(0,0), 4>') + ON CONFLICT ON CONSTRAINT circles_c1_c2_excl DO UPDATE SET c2 = EXCLUDED.c2; +ERROR: ON CONFLICT DO UPDATE not supported with exclusion constraints +-- succeed because c1 doesn't overlap +INSERT INTO circles VALUES('<(20,20), 1>', '<(0,0), 5>'); +-- succeed because c2 doesn't overlap +INSERT INTO circles VALUES('<(20,20), 10>', '<(10,10), 5>'); +-- should fail on existing data without the WHERE clause +ALTER TABLE circles ADD EXCLUDE USING gist + (c1 WITH &&, (c2::circle) WITH &&); +ERROR: could not create exclusion constraint "circles_c1_c2_excl1" +DETAIL: Key (c1, (c2::circle))=(<(0,0),5>, <(0,0),5>) conflicts with key (c1, (c2::circle))=(<(0,0),5>, <(0,0),4>). +-- try reindexing an existing constraint +REINDEX INDEX circles_c1_c2_excl; +DROP TABLE circles; +-- Check deferred exclusion constraint +CREATE TABLE deferred_excl ( + f1 int, + f2 int, + CONSTRAINT deferred_excl_con EXCLUDE (f1 WITH =) INITIALLY DEFERRED +); +INSERT INTO deferred_excl VALUES(1); +INSERT INTO deferred_excl VALUES(2); +INSERT INTO deferred_excl VALUES(1); -- fail +ERROR: conflicting key value violates exclusion constraint "deferred_excl_con" +DETAIL: Key (f1)=(1) conflicts with existing key (f1)=(1). +INSERT INTO deferred_excl VALUES(1) ON CONFLICT ON CONSTRAINT deferred_excl_con DO NOTHING; -- fail +ERROR: ON CONFLICT does not support deferrable unique constraints/exclusion constraints as arbiters +BEGIN; +INSERT INTO deferred_excl VALUES(2); -- no fail here +COMMIT; -- should fail here +ERROR: conflicting key value violates exclusion constraint "deferred_excl_con" +DETAIL: Key (f1)=(2) conflicts with existing key (f1)=(2). +BEGIN; +INSERT INTO deferred_excl VALUES(3); +INSERT INTO deferred_excl VALUES(3); -- no fail here +COMMIT; -- should fail here +ERROR: conflicting key value violates exclusion constraint "deferred_excl_con" +DETAIL: Key (f1)=(3) conflicts with existing key (f1)=(3). +-- bug #13148: deferred constraint versus HOT update +BEGIN; +INSERT INTO deferred_excl VALUES(2, 1); -- no fail here +DELETE FROM deferred_excl WHERE f1 = 2 AND f2 IS NULL; -- remove old row +UPDATE deferred_excl SET f2 = 2 WHERE f1 = 2; +COMMIT; -- should not fail +SELECT * FROM deferred_excl; + f1 | f2 +----+---- + 1 | + 2 | 2 +(2 rows) + +ALTER TABLE deferred_excl DROP CONSTRAINT deferred_excl_con; +-- This should fail, but worth testing because of HOT updates +UPDATE deferred_excl SET f1 = 3; +ALTER TABLE deferred_excl ADD EXCLUDE (f1 WITH =); +ERROR: could not create exclusion constraint "deferred_excl_f1_excl" +DETAIL: Key (f1)=(3) conflicts with key (f1)=(3). +DROP TABLE deferred_excl; +-- Comments +-- Setup a low-level role to enforce non-superuser checks. +CREATE ROLE regress_constraint_comments; +SET SESSION AUTHORIZATION regress_constraint_comments; +CREATE TABLE constraint_comments_tbl (a int CONSTRAINT the_constraint CHECK (a > 0)); +CREATE DOMAIN constraint_comments_dom AS int CONSTRAINT the_constraint CHECK (value > 0); +COMMENT ON CONSTRAINT the_constraint ON constraint_comments_tbl IS 'yes, the comment'; +COMMENT ON CONSTRAINT the_constraint ON DOMAIN constraint_comments_dom IS 'yes, another comment'; +-- no such constraint +COMMENT ON CONSTRAINT no_constraint ON constraint_comments_tbl IS 'yes, the comment'; +ERROR: constraint "no_constraint" for table "constraint_comments_tbl" does not exist +COMMENT ON CONSTRAINT no_constraint ON DOMAIN constraint_comments_dom IS 'yes, another comment'; +ERROR: constraint "no_constraint" for domain constraint_comments_dom does not exist +-- no such table/domain +COMMENT ON CONSTRAINT the_constraint ON no_comments_tbl IS 'bad comment'; +ERROR: relation "no_comments_tbl" does not exist +COMMENT ON CONSTRAINT the_constraint ON DOMAIN no_comments_dom IS 'another bad comment'; +ERROR: type "no_comments_dom" does not exist +COMMENT ON CONSTRAINT the_constraint ON constraint_comments_tbl IS NULL; +COMMENT ON CONSTRAINT the_constraint ON DOMAIN constraint_comments_dom IS NULL; +-- unauthorized user +RESET SESSION AUTHORIZATION; +CREATE ROLE regress_constraint_comments_noaccess; +SET SESSION AUTHORIZATION regress_constraint_comments_noaccess; +COMMENT ON CONSTRAINT the_constraint ON constraint_comments_tbl IS 'no, the comment'; +ERROR: must be owner of relation constraint_comments_tbl +COMMENT ON CONSTRAINT the_constraint ON DOMAIN constraint_comments_dom IS 'no, another comment'; +ERROR: must be owner of type constraint_comments_dom +RESET SESSION AUTHORIZATION; +DROP TABLE constraint_comments_tbl; +DROP DOMAIN constraint_comments_dom; +DROP ROLE regress_constraint_comments; +DROP ROLE regress_constraint_comments_noaccess; diff --git a/src/test/regress/expected/conversion.out b/src/test/regress/expected/conversion.out new file mode 100644 index 0000000..442e7af --- /dev/null +++ b/src/test/regress/expected/conversion.out @@ -0,0 +1,734 @@ +-- +-- create user defined conversion +-- +-- directory paths and dlsuffix are passed to us in environment variables +\getenv libdir PG_LIBDIR +\getenv dlsuffix PG_DLSUFFIX +\set regresslib :libdir '/regress' :dlsuffix +CREATE FUNCTION test_enc_conversion(bytea, name, name, bool, validlen OUT int, result OUT bytea) + AS :'regresslib', 'test_enc_conversion' + LANGUAGE C STRICT; +CREATE USER regress_conversion_user WITH NOCREATEDB NOCREATEROLE; +SET SESSION AUTHORIZATION regress_conversion_user; +CREATE CONVERSION myconv FOR 'LATIN1' TO 'UTF8' FROM iso8859_1_to_utf8; +-- +-- cannot make same name conversion in same schema +-- +CREATE CONVERSION myconv FOR 'LATIN1' TO 'UTF8' FROM iso8859_1_to_utf8; +ERROR: conversion "myconv" already exists +-- +-- create default conversion with qualified name +-- +CREATE DEFAULT CONVERSION public.mydef FOR 'LATIN1' TO 'UTF8' FROM iso8859_1_to_utf8; +-- +-- cannot make default conversion with same schema/for_encoding/to_encoding +-- +CREATE DEFAULT CONVERSION public.mydef2 FOR 'LATIN1' TO 'UTF8' FROM iso8859_1_to_utf8; +ERROR: default conversion for LATIN1 to UTF8 already exists +-- test comments +COMMENT ON CONVERSION myconv_bad IS 'foo'; +ERROR: conversion "myconv_bad" does not exist +COMMENT ON CONVERSION myconv IS 'bar'; +COMMENT ON CONVERSION myconv IS NULL; +-- +-- drop user defined conversion +-- +DROP CONVERSION myconv; +DROP CONVERSION mydef; +-- +-- Note: the built-in conversions are exercised in opr_sanity.sql, +-- so there's no need to do that here. +-- +-- +-- return to the superuser +-- +RESET SESSION AUTHORIZATION; +DROP USER regress_conversion_user; +-- +-- Test built-in conversion functions. +-- +-- Helper function to test a conversion. Uses the test_enc_conversion function +-- that was created in the create_function_0 test. +create or replace function test_conv( + input IN bytea, + src_encoding IN text, + dst_encoding IN text, + result OUT bytea, + errorat OUT bytea, + error OUT text) +language plpgsql as +$$ +declare + validlen int; +begin + -- First try to perform the conversion with noError = false. If that errors out, + -- capture the error message, and try again with noError = true. The second call + -- should succeed and return the position of the error, return that too. + begin + select * into validlen, result from test_enc_conversion(input, src_encoding, dst_encoding, false); + errorat = NULL; + error := NULL; + exception when others then + error := sqlerrm; + select * into validlen, result from test_enc_conversion(input, src_encoding, dst_encoding, true); + errorat = substr(input, validlen + 1); + end; + return; +end; +$$; +-- +-- UTF-8 +-- +-- The description column must be unique. +CREATE TABLE utf8_verification_inputs (inbytes bytea, description text PRIMARY KEY); +insert into utf8_verification_inputs values + ('\x66006f', 'NUL byte'), + ('\xaf', 'bare continuation'), + ('\xc5', 'missing second byte in 2-byte char'), + ('\xc080', 'smallest 2-byte overlong'), + ('\xc1bf', 'largest 2-byte overlong'), + ('\xc280', 'next 2-byte after overlongs'), + ('\xdfbf', 'largest 2-byte'), + ('\xe9af', 'missing third byte in 3-byte char'), + ('\xe08080', 'smallest 3-byte overlong'), + ('\xe09fbf', 'largest 3-byte overlong'), + ('\xe0a080', 'next 3-byte after overlong'), + ('\xed9fbf', 'last before surrogates'), + ('\xeda080', 'smallest surrogate'), + ('\xedbfbf', 'largest surrogate'), + ('\xee8080', 'next after surrogates'), + ('\xefbfbf', 'largest 3-byte'), + ('\xf1afbf', 'missing fourth byte in 4-byte char'), + ('\xf0808080', 'smallest 4-byte overlong'), + ('\xf08fbfbf', 'largest 4-byte overlong'), + ('\xf0908080', 'next 4-byte after overlong'), + ('\xf48fbfbf', 'largest 4-byte'), + ('\xf4908080', 'smallest too large'), + ('\xfa9a9a8a8a', '5-byte'); +-- Test UTF-8 verification slow path +select description, (test_conv(inbytes, 'utf8', 'utf8')).* from utf8_verification_inputs; + description | result | errorat | error +------------------------------------+------------+--------------+---------------------------------------------------------------- + NUL byte | \x66 | \x006f | invalid byte sequence for encoding "UTF8": 0x00 + bare continuation | \x | \xaf | invalid byte sequence for encoding "UTF8": 0xaf + missing second byte in 2-byte char | \x | \xc5 | invalid byte sequence for encoding "UTF8": 0xc5 + smallest 2-byte overlong | \x | \xc080 | invalid byte sequence for encoding "UTF8": 0xc0 0x80 + largest 2-byte overlong | \x | \xc1bf | invalid byte sequence for encoding "UTF8": 0xc1 0xbf + next 2-byte after overlongs | \xc280 | | + largest 2-byte | \xdfbf | | + missing third byte in 3-byte char | \x | \xe9af | invalid byte sequence for encoding "UTF8": 0xe9 0xaf + smallest 3-byte overlong | \x | \xe08080 | invalid byte sequence for encoding "UTF8": 0xe0 0x80 0x80 + largest 3-byte overlong | \x | \xe09fbf | invalid byte sequence for encoding "UTF8": 0xe0 0x9f 0xbf + next 3-byte after overlong | \xe0a080 | | + last before surrogates | \xed9fbf | | + smallest surrogate | \x | \xeda080 | invalid byte sequence for encoding "UTF8": 0xed 0xa0 0x80 + largest surrogate | \x | \xedbfbf | invalid byte sequence for encoding "UTF8": 0xed 0xbf 0xbf + next after surrogates | \xee8080 | | + largest 3-byte | \xefbfbf | | + missing fourth byte in 4-byte char | \x | \xf1afbf | invalid byte sequence for encoding "UTF8": 0xf1 0xaf 0xbf + smallest 4-byte overlong | \x | \xf0808080 | invalid byte sequence for encoding "UTF8": 0xf0 0x80 0x80 0x80 + largest 4-byte overlong | \x | \xf08fbfbf | invalid byte sequence for encoding "UTF8": 0xf0 0x8f 0xbf 0xbf + next 4-byte after overlong | \xf0908080 | | + largest 4-byte | \xf48fbfbf | | + smallest too large | \x | \xf4908080 | invalid byte sequence for encoding "UTF8": 0xf4 0x90 0x80 0x80 + 5-byte | \x | \xfa9a9a8a8a | invalid byte sequence for encoding "UTF8": 0xfa +(23 rows) + +-- Test UTF-8 verification with ASCII padding appended to provide +-- coverage for algorithms that work on multiple bytes at a time. +-- The error message for a sequence starting with a 4-byte lead +-- will contain all 4 bytes if they are present, so various +-- expressions below add 3 ASCII bytes to the end to ensure +-- consistent error messages. +-- The number 64 below needs to be at least the value of STRIDE_LENGTH in wchar.c. +-- Test multibyte verification in fast path +with test_bytes as ( + select + inbytes, + description, + (test_conv(inbytes || repeat('.', 3)::bytea, 'utf8', 'utf8')).error + from utf8_verification_inputs +), test_padded as ( + select + description, + (test_conv(inbytes || repeat('.', 64)::bytea, 'utf8', 'utf8')).error + from test_bytes +) +select + description, + b.error as orig_error, + p.error as error_after_padding +from test_padded p +join test_bytes b +using (description) +where p.error is distinct from b.error +order by description; + description | orig_error | error_after_padding +-------------+------------+--------------------- +(0 rows) + +-- Test ASCII verification in fast path where incomplete +-- UTF-8 sequences fall at the end of the preceding chunk. +with test_bytes as ( + select + inbytes, + description, + (test_conv(inbytes || repeat('.', 3)::bytea, 'utf8', 'utf8')).error + from utf8_verification_inputs +), test_padded as ( + select + description, + (test_conv(repeat('.', 64 - length(inbytes))::bytea || inbytes || repeat('.', 64)::bytea, 'utf8', 'utf8')).error + from test_bytes +) +select + description, + b.error as orig_error, + p.error as error_after_padding +from test_padded p +join test_bytes b +using (description) +where p.error is distinct from b.error +order by description; + description | orig_error | error_after_padding +-------------+------------+--------------------- +(0 rows) + +-- Test cases where UTF-8 sequences within short text +-- come after the fast path returns. +with test_bytes as ( + select + inbytes, + description, + (test_conv(inbytes || repeat('.', 3)::bytea, 'utf8', 'utf8')).error + from utf8_verification_inputs +), test_padded as ( + select + description, + (test_conv(repeat('.', 64)::bytea || inbytes || repeat('.', 3)::bytea, 'utf8', 'utf8')).error + from test_bytes +) +select + description, + b.error as orig_error, + p.error as error_after_padding +from test_padded p +join test_bytes b +using (description) +where p.error is distinct from b.error +order by description; + description | orig_error | error_after_padding +-------------+------------+--------------------- +(0 rows) + +-- Test cases where incomplete UTF-8 sequences fall at the +-- end of the part checked by the fast path. +with test_bytes as ( + select + inbytes, + description, + (test_conv(inbytes || repeat('.', 3)::bytea, 'utf8', 'utf8')).error + from utf8_verification_inputs +), test_padded as ( + select + description, + (test_conv(repeat('.', 64 - length(inbytes))::bytea || inbytes || repeat('.', 3)::bytea, 'utf8', 'utf8')).error + from test_bytes +) +select + description, + b.error as orig_error, + p.error as error_after_padding +from test_padded p +join test_bytes b +using (description) +where p.error is distinct from b.error +order by description; + description | orig_error | error_after_padding +-------------+------------+--------------------- +(0 rows) + +CREATE TABLE utf8_inputs (inbytes bytea, description text); +insert into utf8_inputs values + ('\x666f6f', 'valid, pure ASCII'), + ('\xc3a4c3b6', 'valid, extra latin chars'), + ('\xd184d0bed0be', 'valid, cyrillic'), + ('\x666f6fe8b1a1', 'valid, kanji/Chinese'), + ('\xe382abe3829a', 'valid, two chars that combine to one in EUC_JIS_2004'), + ('\xe382ab', 'only first half of combined char in EUC_JIS_2004'), + ('\xe382abe382', 'incomplete combination when converted EUC_JIS_2004'), + ('\xecbd94eb81bceba6ac', 'valid, Hangul, Korean'), + ('\x666f6fefa8aa', 'valid, needs mapping function to convert to GB18030'), + ('\x66e8b1ff6f6f', 'invalid byte sequence'), + ('\x66006f', 'invalid, NUL byte'), + ('\x666f6fe8b100', 'invalid, NUL byte'), + ('\x666f6fe8b1', 'incomplete character at end'); +-- Test UTF-8 verification +select description, (test_conv(inbytes, 'utf8', 'utf8')).* from utf8_inputs; + description | result | errorat | error +------------------------------------------------------+----------------------+--------------+----------------------------------------------------------- + valid, pure ASCII | \x666f6f | | + valid, extra latin chars | \xc3a4c3b6 | | + valid, cyrillic | \xd184d0bed0be | | + valid, kanji/Chinese | \x666f6fe8b1a1 | | + valid, two chars that combine to one in EUC_JIS_2004 | \xe382abe3829a | | + only first half of combined char in EUC_JIS_2004 | \xe382ab | | + incomplete combination when converted EUC_JIS_2004 | \xe382ab | \xe382 | invalid byte sequence for encoding "UTF8": 0xe3 0x82 + valid, Hangul, Korean | \xecbd94eb81bceba6ac | | + valid, needs mapping function to convert to GB18030 | \x666f6fefa8aa | | + invalid byte sequence | \x66 | \xe8b1ff6f6f | invalid byte sequence for encoding "UTF8": 0xe8 0xb1 0xff + invalid, NUL byte | \x66 | \x006f | invalid byte sequence for encoding "UTF8": 0x00 + invalid, NUL byte | \x666f6f | \xe8b100 | invalid byte sequence for encoding "UTF8": 0xe8 0xb1 0x00 + incomplete character at end | \x666f6f | \xe8b1 | invalid byte sequence for encoding "UTF8": 0xe8 0xb1 +(13 rows) + +-- Test conversions from UTF-8 +select description, inbytes, (test_conv(inbytes, 'utf8', 'euc_jis_2004')).* from utf8_inputs; + description | inbytes | result | errorat | error +------------------------------------------------------+----------------------+----------------+----------------------+------------------------------------------------------------------------------------------------------------- + valid, pure ASCII | \x666f6f | \x666f6f | | + valid, extra latin chars | \xc3a4c3b6 | \xa9daa9ec | | + valid, cyrillic | \xd184d0bed0be | \xa7e6a7e0a7e0 | | + valid, kanji/Chinese | \x666f6fe8b1a1 | \x666f6fbedd | | + valid, two chars that combine to one in EUC_JIS_2004 | \xe382abe3829a | \xa5f7 | | + only first half of combined char in EUC_JIS_2004 | \xe382ab | \xa5ab | | + incomplete combination when converted EUC_JIS_2004 | \xe382abe382 | \x | \xe382abe382 | invalid byte sequence for encoding "UTF8": 0xe3 0x82 + valid, Hangul, Korean | \xecbd94eb81bceba6ac | \x | \xecbd94eb81bceba6ac | character with byte sequence 0xec 0xbd 0x94 in encoding "UTF8" has no equivalent in encoding "EUC_JIS_2004" + valid, needs mapping function to convert to GB18030 | \x666f6fefa8aa | \x666f6f | \xefa8aa | character with byte sequence 0xef 0xa8 0xaa in encoding "UTF8" has no equivalent in encoding "EUC_JIS_2004" + invalid byte sequence | \x66e8b1ff6f6f | \x66 | \xe8b1ff6f6f | invalid byte sequence for encoding "UTF8": 0xe8 0xb1 0xff + invalid, NUL byte | \x66006f | \x66 | \x006f | invalid byte sequence for encoding "UTF8": 0x00 + invalid, NUL byte | \x666f6fe8b100 | \x666f6f | \xe8b100 | invalid byte sequence for encoding "UTF8": 0xe8 0xb1 0x00 + incomplete character at end | \x666f6fe8b1 | \x666f6f | \xe8b1 | invalid byte sequence for encoding "UTF8": 0xe8 0xb1 +(13 rows) + +select description, inbytes, (test_conv(inbytes, 'utf8', 'latin1')).* from utf8_inputs; + description | inbytes | result | errorat | error +------------------------------------------------------+----------------------+----------+----------------------+------------------------------------------------------------------------------------------------------- + valid, pure ASCII | \x666f6f | \x666f6f | | + valid, extra latin chars | \xc3a4c3b6 | \xe4f6 | | + valid, cyrillic | \xd184d0bed0be | \x | \xd184d0bed0be | character with byte sequence 0xd1 0x84 in encoding "UTF8" has no equivalent in encoding "LATIN1" + valid, kanji/Chinese | \x666f6fe8b1a1 | \x666f6f | \xe8b1a1 | character with byte sequence 0xe8 0xb1 0xa1 in encoding "UTF8" has no equivalent in encoding "LATIN1" + valid, two chars that combine to one in EUC_JIS_2004 | \xe382abe3829a | \x | \xe382abe3829a | character with byte sequence 0xe3 0x82 0xab in encoding "UTF8" has no equivalent in encoding "LATIN1" + only first half of combined char in EUC_JIS_2004 | \xe382ab | \x | \xe382ab | character with byte sequence 0xe3 0x82 0xab in encoding "UTF8" has no equivalent in encoding "LATIN1" + incomplete combination when converted EUC_JIS_2004 | \xe382abe382 | \x | \xe382abe382 | character with byte sequence 0xe3 0x82 0xab in encoding "UTF8" has no equivalent in encoding "LATIN1" + valid, Hangul, Korean | \xecbd94eb81bceba6ac | \x | \xecbd94eb81bceba6ac | character with byte sequence 0xec 0xbd 0x94 in encoding "UTF8" has no equivalent in encoding "LATIN1" + valid, needs mapping function to convert to GB18030 | \x666f6fefa8aa | \x666f6f | \xefa8aa | character with byte sequence 0xef 0xa8 0xaa in encoding "UTF8" has no equivalent in encoding "LATIN1" + invalid byte sequence | \x66e8b1ff6f6f | \x66 | \xe8b1ff6f6f | invalid byte sequence for encoding "UTF8": 0xe8 0xb1 0xff + invalid, NUL byte | \x66006f | \x66 | \x006f | invalid byte sequence for encoding "UTF8": 0x00 + invalid, NUL byte | \x666f6fe8b100 | \x666f6f | \xe8b100 | invalid byte sequence for encoding "UTF8": 0xe8 0xb1 0x00 + incomplete character at end | \x666f6fe8b1 | \x666f6f | \xe8b1 | invalid byte sequence for encoding "UTF8": 0xe8 0xb1 +(13 rows) + +select description, inbytes, (test_conv(inbytes, 'utf8', 'latin2')).* from utf8_inputs; + description | inbytes | result | errorat | error +------------------------------------------------------+----------------------+----------+----------------------+------------------------------------------------------------------------------------------------------- + valid, pure ASCII | \x666f6f | \x666f6f | | + valid, extra latin chars | \xc3a4c3b6 | \xe4f6 | | + valid, cyrillic | \xd184d0bed0be | \x | \xd184d0bed0be | character with byte sequence 0xd1 0x84 in encoding "UTF8" has no equivalent in encoding "LATIN2" + valid, kanji/Chinese | \x666f6fe8b1a1 | \x666f6f | \xe8b1a1 | character with byte sequence 0xe8 0xb1 0xa1 in encoding "UTF8" has no equivalent in encoding "LATIN2" + valid, two chars that combine to one in EUC_JIS_2004 | \xe382abe3829a | \x | \xe382abe3829a | character with byte sequence 0xe3 0x82 0xab in encoding "UTF8" has no equivalent in encoding "LATIN2" + only first half of combined char in EUC_JIS_2004 | \xe382ab | \x | \xe382ab | character with byte sequence 0xe3 0x82 0xab in encoding "UTF8" has no equivalent in encoding "LATIN2" + incomplete combination when converted EUC_JIS_2004 | \xe382abe382 | \x | \xe382abe382 | character with byte sequence 0xe3 0x82 0xab in encoding "UTF8" has no equivalent in encoding "LATIN2" + valid, Hangul, Korean | \xecbd94eb81bceba6ac | \x | \xecbd94eb81bceba6ac | character with byte sequence 0xec 0xbd 0x94 in encoding "UTF8" has no equivalent in encoding "LATIN2" + valid, needs mapping function to convert to GB18030 | \x666f6fefa8aa | \x666f6f | \xefa8aa | character with byte sequence 0xef 0xa8 0xaa in encoding "UTF8" has no equivalent in encoding "LATIN2" + invalid byte sequence | \x66e8b1ff6f6f | \x66 | \xe8b1ff6f6f | invalid byte sequence for encoding "UTF8": 0xe8 0xb1 0xff + invalid, NUL byte | \x66006f | \x66 | \x006f | invalid byte sequence for encoding "UTF8": 0x00 + invalid, NUL byte | \x666f6fe8b100 | \x666f6f | \xe8b100 | invalid byte sequence for encoding "UTF8": 0xe8 0xb1 0x00 + incomplete character at end | \x666f6fe8b1 | \x666f6f | \xe8b1 | invalid byte sequence for encoding "UTF8": 0xe8 0xb1 +(13 rows) + +select description, inbytes, (test_conv(inbytes, 'utf8', 'latin5')).* from utf8_inputs; + description | inbytes | result | errorat | error +------------------------------------------------------+----------------------+----------+----------------------+------------------------------------------------------------------------------------------------------- + valid, pure ASCII | \x666f6f | \x666f6f | | + valid, extra latin chars | \xc3a4c3b6 | \xe4f6 | | + valid, cyrillic | \xd184d0bed0be | \x | \xd184d0bed0be | character with byte sequence 0xd1 0x84 in encoding "UTF8" has no equivalent in encoding "LATIN5" + valid, kanji/Chinese | \x666f6fe8b1a1 | \x666f6f | \xe8b1a1 | character with byte sequence 0xe8 0xb1 0xa1 in encoding "UTF8" has no equivalent in encoding "LATIN5" + valid, two chars that combine to one in EUC_JIS_2004 | \xe382abe3829a | \x | \xe382abe3829a | character with byte sequence 0xe3 0x82 0xab in encoding "UTF8" has no equivalent in encoding "LATIN5" + only first half of combined char in EUC_JIS_2004 | \xe382ab | \x | \xe382ab | character with byte sequence 0xe3 0x82 0xab in encoding "UTF8" has no equivalent in encoding "LATIN5" + incomplete combination when converted EUC_JIS_2004 | \xe382abe382 | \x | \xe382abe382 | character with byte sequence 0xe3 0x82 0xab in encoding "UTF8" has no equivalent in encoding "LATIN5" + valid, Hangul, Korean | \xecbd94eb81bceba6ac | \x | \xecbd94eb81bceba6ac | character with byte sequence 0xec 0xbd 0x94 in encoding "UTF8" has no equivalent in encoding "LATIN5" + valid, needs mapping function to convert to GB18030 | \x666f6fefa8aa | \x666f6f | \xefa8aa | character with byte sequence 0xef 0xa8 0xaa in encoding "UTF8" has no equivalent in encoding "LATIN5" + invalid byte sequence | \x66e8b1ff6f6f | \x66 | \xe8b1ff6f6f | invalid byte sequence for encoding "UTF8": 0xe8 0xb1 0xff + invalid, NUL byte | \x66006f | \x66 | \x006f | invalid byte sequence for encoding "UTF8": 0x00 + invalid, NUL byte | \x666f6fe8b100 | \x666f6f | \xe8b100 | invalid byte sequence for encoding "UTF8": 0xe8 0xb1 0x00 + incomplete character at end | \x666f6fe8b1 | \x666f6f | \xe8b1 | invalid byte sequence for encoding "UTF8": 0xe8 0xb1 +(13 rows) + +select description, inbytes, (test_conv(inbytes, 'utf8', 'koi8r')).* from utf8_inputs; + description | inbytes | result | errorat | error +------------------------------------------------------+----------------------+----------+----------------------+------------------------------------------------------------------------------------------------------ + valid, pure ASCII | \x666f6f | \x666f6f | | + valid, extra latin chars | \xc3a4c3b6 | \x | \xc3a4c3b6 | character with byte sequence 0xc3 0xa4 in encoding "UTF8" has no equivalent in encoding "KOI8R" + valid, cyrillic | \xd184d0bed0be | \xc6cfcf | | + valid, kanji/Chinese | \x666f6fe8b1a1 | \x666f6f | \xe8b1a1 | character with byte sequence 0xe8 0xb1 0xa1 in encoding "UTF8" has no equivalent in encoding "KOI8R" + valid, two chars that combine to one in EUC_JIS_2004 | \xe382abe3829a | \x | \xe382abe3829a | character with byte sequence 0xe3 0x82 0xab in encoding "UTF8" has no equivalent in encoding "KOI8R" + only first half of combined char in EUC_JIS_2004 | \xe382ab | \x | \xe382ab | character with byte sequence 0xe3 0x82 0xab in encoding "UTF8" has no equivalent in encoding "KOI8R" + incomplete combination when converted EUC_JIS_2004 | \xe382abe382 | \x | \xe382abe382 | character with byte sequence 0xe3 0x82 0xab in encoding "UTF8" has no equivalent in encoding "KOI8R" + valid, Hangul, Korean | \xecbd94eb81bceba6ac | \x | \xecbd94eb81bceba6ac | character with byte sequence 0xec 0xbd 0x94 in encoding "UTF8" has no equivalent in encoding "KOI8R" + valid, needs mapping function to convert to GB18030 | \x666f6fefa8aa | \x666f6f | \xefa8aa | character with byte sequence 0xef 0xa8 0xaa in encoding "UTF8" has no equivalent in encoding "KOI8R" + invalid byte sequence | \x66e8b1ff6f6f | \x66 | \xe8b1ff6f6f | invalid byte sequence for encoding "UTF8": 0xe8 0xb1 0xff + invalid, NUL byte | \x66006f | \x66 | \x006f | invalid byte sequence for encoding "UTF8": 0x00 + invalid, NUL byte | \x666f6fe8b100 | \x666f6f | \xe8b100 | invalid byte sequence for encoding "UTF8": 0xe8 0xb1 0x00 + incomplete character at end | \x666f6fe8b1 | \x666f6f | \xe8b1 | invalid byte sequence for encoding "UTF8": 0xe8 0xb1 +(13 rows) + +select description, inbytes, (test_conv(inbytes, 'utf8', 'gb18030')).* from utf8_inputs; + description | inbytes | result | errorat | error +------------------------------------------------------+----------------------+----------------------------+--------------+----------------------------------------------------------- + valid, pure ASCII | \x666f6f | \x666f6f | | + valid, extra latin chars | \xc3a4c3b6 | \x81308a3181308b32 | | + valid, cyrillic | \xd184d0bed0be | \xa7e6a7e0a7e0 | | + valid, kanji/Chinese | \x666f6fe8b1a1 | \x666f6fcff3 | | + valid, two chars that combine to one in EUC_JIS_2004 | \xe382abe3829a | \xa5ab8139a732 | | + only first half of combined char in EUC_JIS_2004 | \xe382ab | \xa5ab | | + incomplete combination when converted EUC_JIS_2004 | \xe382abe382 | \xa5ab | \xe382 | invalid byte sequence for encoding "UTF8": 0xe3 0x82 + valid, Hangul, Korean | \xecbd94eb81bceba6ac | \x8334e5398238c4338330b335 | | + valid, needs mapping function to convert to GB18030 | \x666f6fefa8aa | \x666f6f84309c38 | | + invalid byte sequence | \x66e8b1ff6f6f | \x66 | \xe8b1ff6f6f | invalid byte sequence for encoding "UTF8": 0xe8 0xb1 0xff + invalid, NUL byte | \x66006f | \x66 | \x006f | invalid byte sequence for encoding "UTF8": 0x00 + invalid, NUL byte | \x666f6fe8b100 | \x666f6f | \xe8b100 | invalid byte sequence for encoding "UTF8": 0xe8 0xb1 0x00 + incomplete character at end | \x666f6fe8b1 | \x666f6f | \xe8b1 | invalid byte sequence for encoding "UTF8": 0xe8 0xb1 +(13 rows) + +-- +-- EUC_JIS_2004 +-- +CREATE TABLE euc_jis_2004_inputs (inbytes bytea, description text); +insert into euc_jis_2004_inputs values + ('\x666f6f', 'valid, pure ASCII'), + ('\x666f6fbedd', 'valid'), + ('\xa5f7', 'valid, translates to two UTF-8 chars '), + ('\xbeddbe', 'incomplete char '), + ('\x666f6f00bedd', 'invalid, NUL byte'), + ('\x666f6fbe00dd', 'invalid, NUL byte'), + ('\x666f6fbedd00', 'invalid, NUL byte'), + ('\xbe04', 'invalid byte sequence'); +-- Test EUC_JIS_2004 verification +select description, inbytes, (test_conv(inbytes, 'euc_jis_2004', 'euc_jis_2004')).* from euc_jis_2004_inputs; + description | inbytes | result | errorat | error +---------------------------------------+----------------+--------------+----------+-------------------------------------------------------------- + valid, pure ASCII | \x666f6f | \x666f6f | | + valid | \x666f6fbedd | \x666f6fbedd | | + valid, translates to two UTF-8 chars | \xa5f7 | \xa5f7 | | + incomplete char | \xbeddbe | \xbedd | \xbe | invalid byte sequence for encoding "EUC_JIS_2004": 0xbe + invalid, NUL byte | \x666f6f00bedd | \x666f6f | \x00bedd | invalid byte sequence for encoding "EUC_JIS_2004": 0x00 + invalid, NUL byte | \x666f6fbe00dd | \x666f6f | \xbe00dd | invalid byte sequence for encoding "EUC_JIS_2004": 0xbe 0x00 + invalid, NUL byte | \x666f6fbedd00 | \x666f6fbedd | \x00 | invalid byte sequence for encoding "EUC_JIS_2004": 0x00 + invalid byte sequence | \xbe04 | \x | \xbe04 | invalid byte sequence for encoding "EUC_JIS_2004": 0xbe 0x04 +(8 rows) + +-- Test conversions from EUC_JIS_2004 +select description, inbytes, (test_conv(inbytes, 'euc_jis_2004', 'utf8')).* from euc_jis_2004_inputs; + description | inbytes | result | errorat | error +---------------------------------------+----------------+----------------+----------+-------------------------------------------------------------- + valid, pure ASCII | \x666f6f | \x666f6f | | + valid | \x666f6fbedd | \x666f6fe8b1a1 | | + valid, translates to two UTF-8 chars | \xa5f7 | \xe382abe3829a | | + incomplete char | \xbeddbe | \xe8b1a1 | \xbe | invalid byte sequence for encoding "EUC_JIS_2004": 0xbe + invalid, NUL byte | \x666f6f00bedd | \x666f6f | \x00bedd | invalid byte sequence for encoding "EUC_JIS_2004": 0x00 + invalid, NUL byte | \x666f6fbe00dd | \x666f6f | \xbe00dd | invalid byte sequence for encoding "EUC_JIS_2004": 0xbe 0x00 + invalid, NUL byte | \x666f6fbedd00 | \x666f6fe8b1a1 | \x00 | invalid byte sequence for encoding "EUC_JIS_2004": 0x00 + invalid byte sequence | \xbe04 | \x | \xbe04 | invalid byte sequence for encoding "EUC_JIS_2004": 0xbe 0x04 +(8 rows) + +-- +-- SHIFT-JIS-2004 +-- +CREATE TABLE shiftjis2004_inputs (inbytes bytea, description text); +insert into shiftjis2004_inputs values + ('\x666f6f', 'valid, pure ASCII'), + ('\x666f6f8fdb', 'valid'), + ('\x666f6f81c0', 'valid, no translation to UTF-8'), + ('\x666f6f82f5', 'valid, translates to two UTF-8 chars '), + ('\x666f6f8fdb8f', 'incomplete char '), + ('\x666f6f820a', 'incomplete char, followed by newline '), + ('\x666f6f008fdb', 'invalid, NUL byte'), + ('\x666f6f8f00db', 'invalid, NUL byte'), + ('\x666f6f8fdb00', 'invalid, NUL byte'); +-- Test SHIFT-JIS-2004 verification +select description, inbytes, (test_conv(inbytes, 'shiftjis2004', 'shiftjis2004')).* from shiftjis2004_inputs; + description | inbytes | result | errorat | error +---------------------------------------+----------------+--------------+----------+---------------------------------------------------------------- + valid, pure ASCII | \x666f6f | \x666f6f | | + valid | \x666f6f8fdb | \x666f6f8fdb | | + valid, no translation to UTF-8 | \x666f6f81c0 | \x666f6f81c0 | | + valid, translates to two UTF-8 chars | \x666f6f82f5 | \x666f6f82f5 | | + incomplete char | \x666f6f8fdb8f | \x666f6f8fdb | \x8f | invalid byte sequence for encoding "SHIFT_JIS_2004": 0x8f + incomplete char, followed by newline | \x666f6f820a | \x666f6f | \x820a | invalid byte sequence for encoding "SHIFT_JIS_2004": 0x82 0x0a + invalid, NUL byte | \x666f6f008fdb | \x666f6f | \x008fdb | invalid byte sequence for encoding "SHIFT_JIS_2004": 0x00 + invalid, NUL byte | \x666f6f8f00db | \x666f6f | \x8f00db | invalid byte sequence for encoding "SHIFT_JIS_2004": 0x8f 0x00 + invalid, NUL byte | \x666f6f8fdb00 | \x666f6f8fdb | \x00 | invalid byte sequence for encoding "SHIFT_JIS_2004": 0x00 +(9 rows) + +-- Test conversions from SHIFT-JIS-2004 +select description, inbytes, (test_conv(inbytes, 'shiftjis2004', 'utf8')).* from shiftjis2004_inputs; + description | inbytes | result | errorat | error +---------------------------------------+----------------+----------------------+----------+---------------------------------------------------------------- + valid, pure ASCII | \x666f6f | \x666f6f | | + valid | \x666f6f8fdb | \x666f6fe8b1a1 | | + valid, no translation to UTF-8 | \x666f6f81c0 | \x666f6fe28a84 | | + valid, translates to two UTF-8 chars | \x666f6f82f5 | \x666f6fe3818be3829a | | + incomplete char | \x666f6f8fdb8f | \x666f6fe8b1a1 | \x8f | invalid byte sequence for encoding "SHIFT_JIS_2004": 0x8f + incomplete char, followed by newline | \x666f6f820a | \x666f6f | \x820a | invalid byte sequence for encoding "SHIFT_JIS_2004": 0x82 0x0a + invalid, NUL byte | \x666f6f008fdb | \x666f6f | \x008fdb | invalid byte sequence for encoding "SHIFT_JIS_2004": 0x00 + invalid, NUL byte | \x666f6f8f00db | \x666f6f | \x8f00db | invalid byte sequence for encoding "SHIFT_JIS_2004": 0x8f 0x00 + invalid, NUL byte | \x666f6f8fdb00 | \x666f6fe8b1a1 | \x00 | invalid byte sequence for encoding "SHIFT_JIS_2004": 0x00 +(9 rows) + +select description, inbytes, (test_conv(inbytes, 'shiftjis2004', 'euc_jis_2004')).* from shiftjis2004_inputs; + description | inbytes | result | errorat | error +---------------------------------------+----------------+--------------+----------+---------------------------------------------------------------- + valid, pure ASCII | \x666f6f | \x666f6f | | + valid | \x666f6f8fdb | \x666f6fbedd | | + valid, no translation to UTF-8 | \x666f6f81c0 | \x666f6fa2c2 | | + valid, translates to two UTF-8 chars | \x666f6f82f5 | \x666f6fa4f7 | | + incomplete char | \x666f6f8fdb8f | \x666f6fbedd | \x8f | invalid byte sequence for encoding "SHIFT_JIS_2004": 0x8f + incomplete char, followed by newline | \x666f6f820a | \x666f6f | \x820a | invalid byte sequence for encoding "SHIFT_JIS_2004": 0x82 0x0a + invalid, NUL byte | \x666f6f008fdb | \x666f6f | \x008fdb | invalid byte sequence for encoding "SHIFT_JIS_2004": 0x00 + invalid, NUL byte | \x666f6f8f00db | \x666f6f | \x8f00db | invalid byte sequence for encoding "SHIFT_JIS_2004": 0x8f 0x00 + invalid, NUL byte | \x666f6f8fdb00 | \x666f6fbedd | \x00 | invalid byte sequence for encoding "SHIFT_JIS_2004": 0x00 +(9 rows) + +-- +-- GB18030 +-- +CREATE TABLE gb18030_inputs (inbytes bytea, description text); +insert into gb18030_inputs values + ('\x666f6f', 'valid, pure ASCII'), + ('\x666f6fcff3', 'valid'), + ('\x666f6f8431a530', 'valid, no translation to UTF-8'), + ('\x666f6f84309c38', 'valid, translates to UTF-8 by mapping function'), + ('\x666f6f84309c', 'incomplete char '), + ('\x666f6f84309c0a', 'incomplete char, followed by newline '), + ('\x666f6f84309c3800', 'invalid, NUL byte'), + ('\x666f6f84309c0038', 'invalid, NUL byte'); +-- Test GB18030 verification +select description, inbytes, (test_conv(inbytes, 'gb18030', 'gb18030')).* from gb18030_inputs; + description | inbytes | result | errorat | error +------------------------------------------------+--------------------+------------------+--------------+------------------------------------------------------------------- + valid, pure ASCII | \x666f6f | \x666f6f | | + valid | \x666f6fcff3 | \x666f6fcff3 | | + valid, no translation to UTF-8 | \x666f6f8431a530 | \x666f6f8431a530 | | + valid, translates to UTF-8 by mapping function | \x666f6f84309c38 | \x666f6f84309c38 | | + incomplete char | \x666f6f84309c | \x666f6f | \x84309c | invalid byte sequence for encoding "GB18030": 0x84 0x30 0x9c + incomplete char, followed by newline | \x666f6f84309c0a | \x666f6f | \x84309c0a | invalid byte sequence for encoding "GB18030": 0x84 0x30 0x9c 0x0a + invalid, NUL byte | \x666f6f84309c3800 | \x666f6f84309c38 | \x00 | invalid byte sequence for encoding "GB18030": 0x00 + invalid, NUL byte | \x666f6f84309c0038 | \x666f6f | \x84309c0038 | invalid byte sequence for encoding "GB18030": 0x84 0x30 0x9c 0x00 +(8 rows) + +-- Test conversions from GB18030 +select description, inbytes, (test_conv(inbytes, 'gb18030', 'utf8')).* from gb18030_inputs; + description | inbytes | result | errorat | error +------------------------------------------------+--------------------+----------------+--------------+------------------------------------------------------------------------------------------------------------- + valid, pure ASCII | \x666f6f | \x666f6f | | + valid | \x666f6fcff3 | \x666f6fe8b1a1 | | + valid, no translation to UTF-8 | \x666f6f8431a530 | \x666f6f | \x8431a530 | character with byte sequence 0x84 0x31 0xa5 0x30 in encoding "GB18030" has no equivalent in encoding "UTF8" + valid, translates to UTF-8 by mapping function | \x666f6f84309c38 | \x666f6fefa8aa | | + incomplete char | \x666f6f84309c | \x666f6f | \x84309c | invalid byte sequence for encoding "GB18030": 0x84 0x30 0x9c + incomplete char, followed by newline | \x666f6f84309c0a | \x666f6f | \x84309c0a | invalid byte sequence for encoding "GB18030": 0x84 0x30 0x9c 0x0a + invalid, NUL byte | \x666f6f84309c3800 | \x666f6fefa8aa | \x00 | invalid byte sequence for encoding "GB18030": 0x00 + invalid, NUL byte | \x666f6f84309c0038 | \x666f6f | \x84309c0038 | invalid byte sequence for encoding "GB18030": 0x84 0x30 0x9c 0x00 +(8 rows) + +-- +-- ISO-8859-5 +-- +CREATE TABLE iso8859_5_inputs (inbytes bytea, description text); +insert into iso8859_5_inputs values + ('\x666f6f', 'valid, pure ASCII'), + ('\xe4dede', 'valid'), + ('\x00', 'invalid, NUL byte'), + ('\xe400dede', 'invalid, NUL byte'), + ('\xe4dede00', 'invalid, NUL byte'); +-- Test ISO-8859-5 verification +select description, inbytes, (test_conv(inbytes, 'iso8859-5', 'iso8859-5')).* from iso8859_5_inputs; + description | inbytes | result | errorat | error +-------------------+------------+----------+----------+------------------------------------------------------- + valid, pure ASCII | \x666f6f | \x666f6f | | + valid | \xe4dede | \xe4dede | | + invalid, NUL byte | \x00 | \x | \x00 | invalid byte sequence for encoding "ISO_8859_5": 0x00 + invalid, NUL byte | \xe400dede | \xe4 | \x00dede | invalid byte sequence for encoding "ISO_8859_5": 0x00 + invalid, NUL byte | \xe4dede00 | \xe4dede | \x00 | invalid byte sequence for encoding "ISO_8859_5": 0x00 +(5 rows) + +-- Test conversions from ISO-8859-5 +select description, inbytes, (test_conv(inbytes, 'iso8859-5', 'utf8')).* from iso8859_5_inputs; + description | inbytes | result | errorat | error +-------------------+------------+----------------+----------+------------------------------------------------------- + valid, pure ASCII | \x666f6f | \x666f6f | | + valid | \xe4dede | \xd184d0bed0be | | + invalid, NUL byte | \x00 | \x | \x00 | invalid byte sequence for encoding "ISO_8859_5": 0x00 + invalid, NUL byte | \xe400dede | \xd184 | \x00dede | invalid byte sequence for encoding "ISO_8859_5": 0x00 + invalid, NUL byte | \xe4dede00 | \xd184d0bed0be | \x00 | invalid byte sequence for encoding "ISO_8859_5": 0x00 +(5 rows) + +select description, inbytes, (test_conv(inbytes, 'iso8859-5', 'koi8r')).* from iso8859_5_inputs; + description | inbytes | result | errorat | error +-------------------+------------+----------+----------+------------------------------------------------------- + valid, pure ASCII | \x666f6f | \x666f6f | | + valid | \xe4dede | \xc6cfcf | | + invalid, NUL byte | \x00 | \x | \x00 | invalid byte sequence for encoding "ISO_8859_5": 0x00 + invalid, NUL byte | \xe400dede | \xc6 | \x00dede | invalid byte sequence for encoding "ISO_8859_5": 0x00 + invalid, NUL byte | \xe4dede00 | \xc6cfcf | \x00 | invalid byte sequence for encoding "ISO_8859_5": 0x00 +(5 rows) + +select description, inbytes, (test_conv(inbytes, 'iso8859_5', 'mule_internal')).* from iso8859_5_inputs; + description | inbytes | result | errorat | error +-------------------+------------+----------------+----------+------------------------------------------------------- + valid, pure ASCII | \x666f6f | \x666f6f | | + valid | \xe4dede | \x8bc68bcf8bcf | | + invalid, NUL byte | \x00 | \x | \x00 | invalid byte sequence for encoding "ISO_8859_5": 0x00 + invalid, NUL byte | \xe400dede | \x8bc6 | \x00dede | invalid byte sequence for encoding "ISO_8859_5": 0x00 + invalid, NUL byte | \xe4dede00 | \x8bc68bcf8bcf | \x00 | invalid byte sequence for encoding "ISO_8859_5": 0x00 +(5 rows) + +-- +-- Big5 +-- +CREATE TABLE big5_inputs (inbytes bytea, description text); +insert into big5_inputs values + ('\x666f6f', 'valid, pure ASCII'), + ('\x666f6fb648', 'valid'), + ('\x666f6fa27f', 'valid, no translation to UTF-8'), + ('\x666f6fb60048', 'invalid, NUL byte'), + ('\x666f6fb64800', 'invalid, NUL byte'); +-- Test Big5 verification +select description, inbytes, (test_conv(inbytes, 'big5', 'big5')).* from big5_inputs; + description | inbytes | result | errorat | error +--------------------------------+----------------+--------------+----------+------------------------------------------------------ + valid, pure ASCII | \x666f6f | \x666f6f | | + valid | \x666f6fb648 | \x666f6fb648 | | + valid, no translation to UTF-8 | \x666f6fa27f | \x666f6fa27f | | + invalid, NUL byte | \x666f6fb60048 | \x666f6f | \xb60048 | invalid byte sequence for encoding "BIG5": 0xb6 0x00 + invalid, NUL byte | \x666f6fb64800 | \x666f6fb648 | \x00 | invalid byte sequence for encoding "BIG5": 0x00 +(5 rows) + +-- Test conversions from Big5 +select description, inbytes, (test_conv(inbytes, 'big5', 'utf8')).* from big5_inputs; + description | inbytes | result | errorat | error +--------------------------------+----------------+----------------+----------+------------------------------------------------------------------------------------------------ + valid, pure ASCII | \x666f6f | \x666f6f | | + valid | \x666f6fb648 | \x666f6fe8b1a1 | | + valid, no translation to UTF-8 | \x666f6fa27f | \x666f6f | \xa27f | character with byte sequence 0xa2 0x7f in encoding "BIG5" has no equivalent in encoding "UTF8" + invalid, NUL byte | \x666f6fb60048 | \x666f6f | \xb60048 | invalid byte sequence for encoding "BIG5": 0xb6 0x00 + invalid, NUL byte | \x666f6fb64800 | \x666f6fe8b1a1 | \x00 | invalid byte sequence for encoding "BIG5": 0x00 +(5 rows) + +select description, inbytes, (test_conv(inbytes, 'big5', 'mule_internal')).* from big5_inputs; + description | inbytes | result | errorat | error +--------------------------------+----------------+----------------+----------+------------------------------------------------------ + valid, pure ASCII | \x666f6f | \x666f6f | | + valid | \x666f6fb648 | \x666f6f95e2af | | + valid, no translation to UTF-8 | \x666f6fa27f | \x666f6f95a3c1 | | + invalid, NUL byte | \x666f6fb60048 | \x666f6f | \xb60048 | invalid byte sequence for encoding "BIG5": 0xb6 0x00 + invalid, NUL byte | \x666f6fb64800 | \x666f6f95e2af | \x00 | invalid byte sequence for encoding "BIG5": 0x00 +(5 rows) + +-- +-- MULE_INTERNAL +-- +CREATE TABLE mic_inputs (inbytes bytea, description text); +insert into mic_inputs values + ('\x666f6f', 'valid, pure ASCII'), + ('\x8bc68bcf8bcf', 'valid (in KOI8R)'), + ('\x8bc68bcf8b', 'invalid,incomplete char'), + ('\x92bedd', 'valid (in SHIFT_JIS)'), + ('\x92be', 'invalid, incomplete char)'), + ('\x666f6f95a3c1', 'valid (in Big5)'), + ('\x666f6f95a3', 'invalid, incomplete char'), + ('\x9200bedd', 'invalid, NUL byte'), + ('\x92bedd00', 'invalid, NUL byte'), + ('\x8b00c68bcf8bcf', 'invalid, NUL byte'); +-- Test MULE_INTERNAL verification +select description, inbytes, (test_conv(inbytes, 'mule_internal', 'mule_internal')).* from mic_inputs; + description | inbytes | result | errorat | error +---------------------------+------------------+----------------+------------------+-------------------------------------------------------------------- + valid, pure ASCII | \x666f6f | \x666f6f | | + valid (in KOI8R) | \x8bc68bcf8bcf | \x8bc68bcf8bcf | | + invalid,incomplete char | \x8bc68bcf8b | \x8bc68bcf | \x8b | invalid byte sequence for encoding "MULE_INTERNAL": 0x8b + valid (in SHIFT_JIS) | \x92bedd | \x92bedd | | + invalid, incomplete char) | \x92be | \x | \x92be | invalid byte sequence for encoding "MULE_INTERNAL": 0x92 0xbe + valid (in Big5) | \x666f6f95a3c1 | \x666f6f95a3c1 | | + invalid, incomplete char | \x666f6f95a3 | \x666f6f | \x95a3 | invalid byte sequence for encoding "MULE_INTERNAL": 0x95 0xa3 + invalid, NUL byte | \x9200bedd | \x | \x9200bedd | invalid byte sequence for encoding "MULE_INTERNAL": 0x92 0x00 0xbe + invalid, NUL byte | \x92bedd00 | \x92bedd | \x00 | invalid byte sequence for encoding "MULE_INTERNAL": 0x00 + invalid, NUL byte | \x8b00c68bcf8bcf | \x | \x8b00c68bcf8bcf | invalid byte sequence for encoding "MULE_INTERNAL": 0x8b 0x00 +(10 rows) + +-- Test conversions from MULE_INTERNAL +select description, inbytes, (test_conv(inbytes, 'mule_internal', 'koi8r')).* from mic_inputs; + description | inbytes | result | errorat | error +---------------------------+------------------+----------+------------------+--------------------------------------------------------------------------------------------------------------- + valid, pure ASCII | \x666f6f | \x666f6f | | + valid (in KOI8R) | \x8bc68bcf8bcf | \xc6cfcf | | + invalid,incomplete char | \x8bc68bcf8b | \xc6cf | \x8b | invalid byte sequence for encoding "MULE_INTERNAL": 0x8b + valid (in SHIFT_JIS) | \x92bedd | \x | \x92bedd | character with byte sequence 0x92 0xbe 0xdd in encoding "MULE_INTERNAL" has no equivalent in encoding "KOI8R" + invalid, incomplete char) | \x92be | \x | \x92be | invalid byte sequence for encoding "MULE_INTERNAL": 0x92 0xbe + valid (in Big5) | \x666f6f95a3c1 | \x666f6f | \x95a3c1 | character with byte sequence 0x95 0xa3 0xc1 in encoding "MULE_INTERNAL" has no equivalent in encoding "KOI8R" + invalid, incomplete char | \x666f6f95a3 | \x666f6f | \x95a3 | invalid byte sequence for encoding "MULE_INTERNAL": 0x95 0xa3 + invalid, NUL byte | \x9200bedd | \x | \x9200bedd | character with byte sequence 0x92 0x00 0xbe in encoding "MULE_INTERNAL" has no equivalent in encoding "KOI8R" + invalid, NUL byte | \x92bedd00 | \x | \x92bedd00 | character with byte sequence 0x92 0xbe 0xdd in encoding "MULE_INTERNAL" has no equivalent in encoding "KOI8R" + invalid, NUL byte | \x8b00c68bcf8bcf | \x | \x8b00c68bcf8bcf | character with byte sequence 0x8b 0x00 in encoding "MULE_INTERNAL" has no equivalent in encoding "KOI8R" +(10 rows) + +select description, inbytes, (test_conv(inbytes, 'mule_internal', 'iso8859-5')).* from mic_inputs; + description | inbytes | result | errorat | error +---------------------------+------------------+----------+------------------+-------------------------------------------------------------------------------------------------------------------- + valid, pure ASCII | \x666f6f | \x666f6f | | + valid (in KOI8R) | \x8bc68bcf8bcf | \xe4dede | | + invalid,incomplete char | \x8bc68bcf8b | \xe4de | \x8b | invalid byte sequence for encoding "MULE_INTERNAL": 0x8b + valid (in SHIFT_JIS) | \x92bedd | \x | \x92bedd | character with byte sequence 0x92 0xbe 0xdd in encoding "MULE_INTERNAL" has no equivalent in encoding "ISO_8859_5" + invalid, incomplete char) | \x92be | \x | \x92be | invalid byte sequence for encoding "MULE_INTERNAL": 0x92 0xbe + valid (in Big5) | \x666f6f95a3c1 | \x666f6f | \x95a3c1 | character with byte sequence 0x95 0xa3 0xc1 in encoding "MULE_INTERNAL" has no equivalent in encoding "ISO_8859_5" + invalid, incomplete char | \x666f6f95a3 | \x666f6f | \x95a3 | invalid byte sequence for encoding "MULE_INTERNAL": 0x95 0xa3 + invalid, NUL byte | \x9200bedd | \x | \x9200bedd | character with byte sequence 0x92 0x00 0xbe in encoding "MULE_INTERNAL" has no equivalent in encoding "ISO_8859_5" + invalid, NUL byte | \x92bedd00 | \x | \x92bedd00 | character with byte sequence 0x92 0xbe 0xdd in encoding "MULE_INTERNAL" has no equivalent in encoding "ISO_8859_5" + invalid, NUL byte | \x8b00c68bcf8bcf | \x | \x8b00c68bcf8bcf | character with byte sequence 0x8b 0x00 in encoding "MULE_INTERNAL" has no equivalent in encoding "ISO_8859_5" +(10 rows) + +select description, inbytes, (test_conv(inbytes, 'mule_internal', 'sjis')).* from mic_inputs; + description | inbytes | result | errorat | error +---------------------------+------------------+----------+------------------+-------------------------------------------------------------------------------------------------------------- + valid, pure ASCII | \x666f6f | \x666f6f | | + valid (in KOI8R) | \x8bc68bcf8bcf | \x | \x8bc68bcf8bcf | character with byte sequence 0x8b 0xc6 in encoding "MULE_INTERNAL" has no equivalent in encoding "SJIS" + invalid,incomplete char | \x8bc68bcf8b | \x | \x8bc68bcf8b | character with byte sequence 0x8b 0xc6 in encoding "MULE_INTERNAL" has no equivalent in encoding "SJIS" + valid (in SHIFT_JIS) | \x92bedd | \x8fdb | | + invalid, incomplete char) | \x92be | \x | \x92be | invalid byte sequence for encoding "MULE_INTERNAL": 0x92 0xbe + valid (in Big5) | \x666f6f95a3c1 | \x666f6f | \x95a3c1 | character with byte sequence 0x95 0xa3 0xc1 in encoding "MULE_INTERNAL" has no equivalent in encoding "SJIS" + invalid, incomplete char | \x666f6f95a3 | \x666f6f | \x95a3 | invalid byte sequence for encoding "MULE_INTERNAL": 0x95 0xa3 + invalid, NUL byte | \x9200bedd | \x | \x9200bedd | invalid byte sequence for encoding "MULE_INTERNAL": 0x92 0x00 0xbe + invalid, NUL byte | \x92bedd00 | \x8fdb | \x00 | invalid byte sequence for encoding "MULE_INTERNAL": 0x00 + invalid, NUL byte | \x8b00c68bcf8bcf | \x | \x8b00c68bcf8bcf | invalid byte sequence for encoding "MULE_INTERNAL": 0x8b 0x00 +(10 rows) + +select description, inbytes, (test_conv(inbytes, 'mule_internal', 'big5')).* from mic_inputs; + description | inbytes | result | errorat | error +---------------------------+------------------+--------------+------------------+-------------------------------------------------------------------------------------------------------------- + valid, pure ASCII | \x666f6f | \x666f6f | | + valid (in KOI8R) | \x8bc68bcf8bcf | \x | \x8bc68bcf8bcf | character with byte sequence 0x8b 0xc6 in encoding "MULE_INTERNAL" has no equivalent in encoding "BIG5" + invalid,incomplete char | \x8bc68bcf8b | \x | \x8bc68bcf8b | character with byte sequence 0x8b 0xc6 in encoding "MULE_INTERNAL" has no equivalent in encoding "BIG5" + valid (in SHIFT_JIS) | \x92bedd | \x | \x92bedd | character with byte sequence 0x92 0xbe 0xdd in encoding "MULE_INTERNAL" has no equivalent in encoding "BIG5" + invalid, incomplete char) | \x92be | \x | \x92be | invalid byte sequence for encoding "MULE_INTERNAL": 0x92 0xbe + valid (in Big5) | \x666f6f95a3c1 | \x666f6fa2a1 | | + invalid, incomplete char | \x666f6f95a3 | \x666f6f | \x95a3 | invalid byte sequence for encoding "MULE_INTERNAL": 0x95 0xa3 + invalid, NUL byte | \x9200bedd | \x | \x9200bedd | invalid byte sequence for encoding "MULE_INTERNAL": 0x92 0x00 0xbe + invalid, NUL byte | \x92bedd00 | \x | \x92bedd00 | character with byte sequence 0x92 0xbe 0xdd in encoding "MULE_INTERNAL" has no equivalent in encoding "BIG5" + invalid, NUL byte | \x8b00c68bcf8bcf | \x | \x8b00c68bcf8bcf | invalid byte sequence for encoding "MULE_INTERNAL": 0x8b 0x00 +(10 rows) + +select description, inbytes, (test_conv(inbytes, 'mule_internal', 'euc_jp')).* from mic_inputs; + description | inbytes | result | errorat | error +---------------------------+------------------+----------+------------------+---------------------------------------------------------------------------------------------------------------- + valid, pure ASCII | \x666f6f | \x666f6f | | + valid (in KOI8R) | \x8bc68bcf8bcf | \x | \x8bc68bcf8bcf | character with byte sequence 0x8b 0xc6 in encoding "MULE_INTERNAL" has no equivalent in encoding "EUC_JP" + invalid,incomplete char | \x8bc68bcf8b | \x | \x8bc68bcf8b | character with byte sequence 0x8b 0xc6 in encoding "MULE_INTERNAL" has no equivalent in encoding "EUC_JP" + valid (in SHIFT_JIS) | \x92bedd | \xbedd | | + invalid, incomplete char) | \x92be | \x | \x92be | invalid byte sequence for encoding "MULE_INTERNAL": 0x92 0xbe + valid (in Big5) | \x666f6f95a3c1 | \x666f6f | \x95a3c1 | character with byte sequence 0x95 0xa3 0xc1 in encoding "MULE_INTERNAL" has no equivalent in encoding "EUC_JP" + invalid, incomplete char | \x666f6f95a3 | \x666f6f | \x95a3 | invalid byte sequence for encoding "MULE_INTERNAL": 0x95 0xa3 + invalid, NUL byte | \x9200bedd | \x | \x9200bedd | invalid byte sequence for encoding "MULE_INTERNAL": 0x92 0x00 0xbe + invalid, NUL byte | \x92bedd00 | \xbedd | \x00 | invalid byte sequence for encoding "MULE_INTERNAL": 0x00 + invalid, NUL byte | \x8b00c68bcf8bcf | \x | \x8b00c68bcf8bcf | invalid byte sequence for encoding "MULE_INTERNAL": 0x8b 0x00 +(10 rows) + diff --git a/src/test/regress/expected/copy.out b/src/test/regress/expected/copy.out new file mode 100644 index 0000000..b48365e --- /dev/null +++ b/src/test/regress/expected/copy.out @@ -0,0 +1,296 @@ +-- +-- COPY +-- +-- directory paths are passed to us in environment variables +\getenv abs_srcdir PG_ABS_SRCDIR +\getenv abs_builddir PG_ABS_BUILDDIR +--- test copying in CSV mode with various styles +--- of embedded line ending characters +create temp table copytest ( + style text, + test text, + filler int); +insert into copytest values('DOS',E'abc\r\ndef',1); +insert into copytest values('Unix',E'abc\ndef',2); +insert into copytest values('Mac',E'abc\rdef',3); +insert into copytest values(E'esc\\ape',E'a\\r\\\r\\\n\\nb',4); +\set filename :abs_builddir '/results/copytest.csv' +copy copytest to :'filename' csv; +create temp table copytest2 (like copytest); +copy copytest2 from :'filename' csv; +select * from copytest except select * from copytest2; + style | test | filler +-------+------+-------- +(0 rows) + +truncate copytest2; +--- same test but with an escape char different from quote char +copy copytest to :'filename' csv quote '''' escape E'\\'; +copy copytest2 from :'filename' csv quote '''' escape E'\\'; +select * from copytest except select * from copytest2; + style | test | filler +-------+------+-------- +(0 rows) + +-- test header line feature +create temp table copytest3 ( + c1 int, + "col with , comma" text, + "col with "" quote" int); +copy copytest3 from stdin csv header; +copy copytest3 to stdout csv header; +c1,"col with , comma","col with "" quote" +1,a,1 +2,b,2 +create temp table copytest4 ( + c1 int, + "colname with tab: " text); +copy copytest4 from stdin (header); +copy copytest4 to stdout (header); +c1 colname with tab: \t +1 a +2 b +-- test copy from with a partitioned table +create table parted_copytest ( + a int, + b int, + c text +) partition by list (b); +create table parted_copytest_a1 (c text, b int, a int); +create table parted_copytest_a2 (a int, c text, b int); +alter table parted_copytest attach partition parted_copytest_a1 for values in(1); +alter table parted_copytest attach partition parted_copytest_a2 for values in(2); +-- We must insert enough rows to trigger multi-inserts. These are only +-- enabled adaptively when there are few enough partition changes. +insert into parted_copytest select x,1,'One' from generate_series(1,1000) x; +insert into parted_copytest select x,2,'Two' from generate_series(1001,1010) x; +insert into parted_copytest select x,1,'One' from generate_series(1011,1020) x; +\set filename :abs_builddir '/results/parted_copytest.csv' +copy (select * from parted_copytest order by a) to :'filename'; +truncate parted_copytest; +copy parted_copytest from :'filename'; +-- Ensure COPY FREEZE errors for partitioned tables. +begin; +truncate parted_copytest; +copy parted_copytest from :'filename' (freeze); +ERROR: cannot perform COPY FREEZE on a partitioned table +rollback; +select tableoid::regclass,count(*),sum(a) from parted_copytest +group by tableoid order by tableoid::regclass::name; + tableoid | count | sum +--------------------+-------+-------- + parted_copytest_a1 | 1010 | 510655 + parted_copytest_a2 | 10 | 10055 +(2 rows) + +truncate parted_copytest; +-- create before insert row trigger on parted_copytest_a2 +create function part_ins_func() returns trigger language plpgsql as $$ +begin + return new; +end; +$$; +create trigger part_ins_trig + before insert on parted_copytest_a2 + for each row + execute procedure part_ins_func(); +copy parted_copytest from :'filename'; +select tableoid::regclass,count(*),sum(a) from parted_copytest +group by tableoid order by tableoid::regclass::name; + tableoid | count | sum +--------------------+-------+-------- + parted_copytest_a1 | 1010 | 510655 + parted_copytest_a2 | 10 | 10055 +(2 rows) + +truncate table parted_copytest; +create index on parted_copytest (b); +drop trigger part_ins_trig on parted_copytest_a2; +copy parted_copytest from stdin; +-- Ensure index entries were properly added during the copy. +select * from parted_copytest where b = 1; + a | b | c +---+---+------ + 1 | 1 | str1 +(1 row) + +select * from parted_copytest where b = 2; + a | b | c +---+---+------ + 2 | 2 | str2 +(1 row) + +drop table parted_copytest; +-- +-- Progress reporting for COPY +-- +create table tab_progress_reporting ( + name text, + age int4, + location point, + salary int4, + manager name +); +-- Add a trigger to catch and print the contents of the catalog view +-- pg_stat_progress_copy during data insertion. This allows to test +-- the validation of some progress reports for COPY FROM where the trigger +-- would fire. +create function notice_after_tab_progress_reporting() returns trigger AS +$$ +declare report record; +begin + -- The fields ignored here are the ones that may not remain + -- consistent across multiple runs. The sizes reported may differ + -- across platforms, so just check if these are strictly positive. + with progress_data as ( + select + relid::regclass::text as relname, + command, + type, + bytes_processed > 0 as has_bytes_processed, + bytes_total > 0 as has_bytes_total, + tuples_processed, + tuples_excluded + from pg_stat_progress_copy + where pid = pg_backend_pid()) + select into report (to_jsonb(r)) as value + from progress_data r; + + raise info 'progress: %', report.value::text; + return new; +end; +$$ language plpgsql; +create trigger check_after_tab_progress_reporting + after insert on tab_progress_reporting + for each statement + execute function notice_after_tab_progress_reporting(); +-- Generate COPY FROM report with PIPE. +copy tab_progress_reporting from stdin; +INFO: progress: {"type": "PIPE", "command": "COPY FROM", "relname": "tab_progress_reporting", "has_bytes_total": false, "tuples_excluded": 0, "tuples_processed": 3, "has_bytes_processed": true} +-- Generate COPY FROM report with FILE, with some excluded tuples. +truncate tab_progress_reporting; +\set filename :abs_srcdir '/data/emp.data' +copy tab_progress_reporting from :'filename' + where (salary < 2000); +INFO: progress: {"type": "FILE", "command": "COPY FROM", "relname": "tab_progress_reporting", "has_bytes_total": true, "tuples_excluded": 1, "tuples_processed": 2, "has_bytes_processed": true} +drop trigger check_after_tab_progress_reporting on tab_progress_reporting; +drop function notice_after_tab_progress_reporting(); +drop table tab_progress_reporting; +-- Test header matching feature +create table header_copytest ( + a int, + b int, + c text +); +-- Make sure it works with dropped columns +alter table header_copytest drop column c; +alter table header_copytest add column c text; +copy header_copytest to stdout with (header match); +ERROR: cannot use "match" with HEADER in COPY TO +copy header_copytest from stdin with (header wrong_choice); +ERROR: header requires a Boolean value or "match" +-- works +copy header_copytest from stdin with (header match); +copy header_copytest (c, a, b) from stdin with (header match); +copy header_copytest from stdin with (header match, format csv); +-- errors +copy header_copytest (c, b, a) from stdin with (header match); +ERROR: column name mismatch in header line field 1: got "a", expected "c" +CONTEXT: COPY header_copytest, line 1: "a b c" +copy header_copytest from stdin with (header match); +ERROR: column name mismatch in header line field 3: got null value ("\N"), expected "c" +CONTEXT: COPY header_copytest, line 1: "a b \N" +copy header_copytest from stdin with (header match); +ERROR: wrong number of fields in header line: got 2, expected 3 +CONTEXT: COPY header_copytest, line 1: "a b" +copy header_copytest from stdin with (header match); +ERROR: wrong number of fields in header line: got 4, expected 3 +CONTEXT: COPY header_copytest, line 1: "a b c d" +copy header_copytest from stdin with (header match); +ERROR: column name mismatch in header line field 3: got "d", expected "c" +CONTEXT: COPY header_copytest, line 1: "a b d" +SELECT * FROM header_copytest ORDER BY a; + a | b | c +---+---+----- + 1 | 2 | foo + 3 | 4 | bar + 5 | 6 | baz +(3 rows) + +-- Drop an extra column, in the middle of the existing set. +alter table header_copytest drop column b; +-- works +copy header_copytest (c, a) from stdin with (header match); +copy header_copytest (a, c) from stdin with (header match); +-- errors +copy header_copytest from stdin with (header match); +ERROR: wrong number of fields in header line: got 3, expected 2 +CONTEXT: COPY header_copytest, line 1: "a ........pg.dropped.2........ c" +copy header_copytest (a, c) from stdin with (header match); +ERROR: wrong number of fields in header line: got 3, expected 2 +CONTEXT: COPY header_copytest, line 1: "a c b" +SELECT * FROM header_copytest ORDER BY a; + a | c +---+----- + 1 | foo + 3 | bar + 5 | baz + 7 | foo + 8 | foo +(5 rows) + +drop table header_copytest; +-- test COPY with overlong column defaults +create temp table oversized_column_default ( + col1 varchar(5) DEFAULT 'more than 5 chars', + col2 varchar(5)); +-- normal COPY should work +copy oversized_column_default from stdin; +-- error if the column is excluded +copy oversized_column_default (col2) from stdin; +ERROR: value too long for type character varying(5) +\. +invalid command \. +-- error if the DEFAULT option is given +copy oversized_column_default from stdin (default ''); +ERROR: value too long for type character varying(5) +\. +invalid command \. +drop table oversized_column_default; +-- +-- Create partitioned table that does not allow bulk insertions, to test bugs +-- related to the reuse of BulkInsertState across partitions (only done when +-- not using bulk insert). Switching between partitions often makes it more +-- likely to encounter these bugs, so we just switch on roughly every insert +-- by having an even/odd number partition and inserting evenly distributed +-- data. +-- +CREATE TABLE parted_si ( + id int not null, + data text not null, + -- prevent use of bulk insert by having a volatile function + rand float8 not null default random() +) +PARTITION BY LIST((id % 2)); +CREATE TABLE parted_si_p_even PARTITION OF parted_si FOR VALUES IN (0); +CREATE TABLE parted_si_p_odd PARTITION OF parted_si FOR VALUES IN (1); +-- Test that bulk relation extension handles reusing a single BulkInsertState +-- across partitions. Without the fix applied, this reliably reproduces +-- #18130 unless shared_buffers is extremely small (preventing any use use of +-- bulk relation extension). See +-- https://postgr.es/m/18130-7a86a7356a75209d%40postgresql.org +-- https://postgr.es/m/257696.1695670946%40sss.pgh.pa.us +\set filename :abs_srcdir '/data/desc.data' +COPY parted_si(id, data) FROM :'filename'; +-- An earlier bug (see commit b1ecb9b3fcf) could end up using a buffer from +-- the wrong partition. This test is *not* guaranteed to trigger that bug, but +-- does so when shared_buffers is small enough. To test if we encountered the +-- bug, check that the partition condition isn't violated. +SELECT tableoid::regclass, id % 2 = 0 is_even, count(*) from parted_si GROUP BY 1, 2 ORDER BY 1; + tableoid | is_even | count +------------------+---------+------- + parted_si_p_even | t | 5000 + parted_si_p_odd | f | 5000 +(2 rows) + +DROP TABLE parted_si; diff --git a/src/test/regress/expected/copy2.out b/src/test/regress/expected/copy2.out new file mode 100644 index 0000000..faf1a4d --- /dev/null +++ b/src/test/regress/expected/copy2.out @@ -0,0 +1,780 @@ +CREATE TEMP TABLE x ( + a serial, + b int, + c text not null default 'stuff', + d text, + e text +) ; +CREATE FUNCTION fn_x_before () RETURNS TRIGGER AS ' + BEGIN + NEW.e := ''before trigger fired''::text; + return NEW; + END; +' LANGUAGE plpgsql; +CREATE FUNCTION fn_x_after () RETURNS TRIGGER AS ' + BEGIN + UPDATE x set e=''after trigger fired'' where c=''stuff''; + return NULL; + END; +' LANGUAGE plpgsql; +CREATE TRIGGER trg_x_after AFTER INSERT ON x +FOR EACH ROW EXECUTE PROCEDURE fn_x_after(); +CREATE TRIGGER trg_x_before BEFORE INSERT ON x +FOR EACH ROW EXECUTE PROCEDURE fn_x_before(); +COPY x (a, b, c, d, e) from stdin; +COPY x (b, d) from stdin; +COPY x (b, d) from stdin; +COPY x (a, b, c, d, e) from stdin; +-- non-existent column in column list: should fail +COPY x (xyz) from stdin; +ERROR: column "xyz" of relation "x" does not exist +-- redundant options +COPY x from stdin (format CSV, FORMAT CSV); +ERROR: conflicting or redundant options +LINE 1: COPY x from stdin (format CSV, FORMAT CSV); + ^ +COPY x from stdin (freeze off, freeze on); +ERROR: conflicting or redundant options +LINE 1: COPY x from stdin (freeze off, freeze on); + ^ +COPY x from stdin (delimiter ',', delimiter ','); +ERROR: conflicting or redundant options +LINE 1: COPY x from stdin (delimiter ',', delimiter ','); + ^ +COPY x from stdin (null ' ', null ' '); +ERROR: conflicting or redundant options +LINE 1: COPY x from stdin (null ' ', null ' '); + ^ +COPY x from stdin (header off, header on); +ERROR: conflicting or redundant options +LINE 1: COPY x from stdin (header off, header on); + ^ +COPY x from stdin (quote ':', quote ':'); +ERROR: conflicting or redundant options +LINE 1: COPY x from stdin (quote ':', quote ':'); + ^ +COPY x from stdin (escape ':', escape ':'); +ERROR: conflicting or redundant options +LINE 1: COPY x from stdin (escape ':', escape ':'); + ^ +COPY x from stdin (force_quote (a), force_quote *); +ERROR: conflicting or redundant options +LINE 1: COPY x from stdin (force_quote (a), force_quote *); + ^ +COPY x from stdin (force_not_null (a), force_not_null (b)); +ERROR: conflicting or redundant options +LINE 1: COPY x from stdin (force_not_null (a), force_not_null (b)); + ^ +COPY x from stdin (force_null (a), force_null (b)); +ERROR: conflicting or redundant options +LINE 1: COPY x from stdin (force_null (a), force_null (b)); + ^ +COPY x from stdin (convert_selectively (a), convert_selectively (b)); +ERROR: conflicting or redundant options +LINE 1: COPY x from stdin (convert_selectively (a), convert_selectiv... + ^ +COPY x from stdin (encoding 'sql_ascii', encoding 'sql_ascii'); +ERROR: conflicting or redundant options +LINE 1: COPY x from stdin (encoding 'sql_ascii', encoding 'sql_ascii... + ^ +-- incorrect options +COPY x to stdin (format BINARY, delimiter ','); +ERROR: cannot specify DELIMITER in BINARY mode +COPY x to stdin (format BINARY, null 'x'); +ERROR: cannot specify NULL in BINARY mode +COPY x to stdin (format TEXT, force_quote(a)); +ERROR: COPY force quote available only in CSV mode +COPY x from stdin (format CSV, force_quote(a)); +ERROR: COPY force quote only available using COPY TO +COPY x to stdout (format TEXT, force_not_null(a)); +ERROR: COPY force not null available only in CSV mode +COPY x to stdin (format CSV, force_not_null(a)); +ERROR: COPY force not null only available using COPY FROM +COPY x to stdout (format TEXT, force_null(a)); +ERROR: COPY force null available only in CSV mode +COPY x to stdin (format CSV, force_null(a)); +ERROR: COPY force null only available using COPY FROM +-- too many columns in column list: should fail +COPY x (a, b, c, d, e, d, c) from stdin; +ERROR: column "d" specified more than once +-- missing data: should fail +COPY x from stdin; +ERROR: invalid input syntax for type integer: "" +CONTEXT: COPY x, line 1, column a: "" +COPY x from stdin; +ERROR: missing data for column "e" +CONTEXT: COPY x, line 1: "2000 230 23 23" +COPY x from stdin; +ERROR: missing data for column "e" +CONTEXT: COPY x, line 1: "2001 231 \N \N" +-- extra data: should fail +COPY x from stdin; +ERROR: extra data after last expected column +CONTEXT: COPY x, line 1: "2002 232 40 50 60 70 80" +-- various COPY options: delimiters, oids, NULL string, encoding +COPY x (b, c, d, e) from stdin delimiter ',' null 'x'; +COPY x from stdin WITH DELIMITER AS ';' NULL AS ''; +COPY x from stdin WITH DELIMITER AS ':' NULL AS E'\\X' ENCODING 'sql_ascii'; +COPY x TO stdout WHERE a = 1; +ERROR: WHERE clause not allowed with COPY TO +LINE 1: COPY x TO stdout WHERE a = 1; + ^ +COPY x from stdin WHERE a = 50004; +COPY x from stdin WHERE a > 60003; +COPY x from stdin WHERE f > 60003; +ERROR: column "f" does not exist +LINE 1: COPY x from stdin WHERE f > 60003; + ^ +COPY x from stdin WHERE a = max(x.b); +ERROR: aggregate functions are not allowed in COPY FROM WHERE conditions +LINE 1: COPY x from stdin WHERE a = max(x.b); + ^ +COPY x from stdin WHERE a IN (SELECT 1 FROM x); +ERROR: cannot use subquery in COPY FROM WHERE condition +LINE 1: COPY x from stdin WHERE a IN (SELECT 1 FROM x); + ^ +COPY x from stdin WHERE a IN (generate_series(1,5)); +ERROR: set-returning functions are not allowed in COPY FROM WHERE conditions +LINE 1: COPY x from stdin WHERE a IN (generate_series(1,5)); + ^ +COPY x from stdin WHERE a = row_number() over(b); +ERROR: window functions are not allowed in COPY FROM WHERE conditions +LINE 1: COPY x from stdin WHERE a = row_number() over(b); + ^ +-- check results of copy in +SELECT * FROM x; + a | b | c | d | e +-------+----+------------+--------+---------------------- + 9999 | | \N | NN | before trigger fired + 10000 | 21 | 31 | 41 | before trigger fired + 10001 | 22 | 32 | 42 | before trigger fired + 10002 | 23 | 33 | 43 | before trigger fired + 10003 | 24 | 34 | 44 | before trigger fired + 10004 | 25 | 35 | 45 | before trigger fired + 10005 | 26 | 36 | 46 | before trigger fired + 6 | | 45 | 80 | before trigger fired + 7 | | x | \x | before trigger fired + 8 | | , | \, | before trigger fired + 3000 | | c | | before trigger fired + 4000 | | C | | before trigger fired + 4001 | 1 | empty | | before trigger fired + 4002 | 2 | null | | before trigger fired + 4003 | 3 | Backslash | \ | before trigger fired + 4004 | 4 | BackslashX | \X | before trigger fired + 4005 | 5 | N | N | before trigger fired + 4006 | 6 | BackslashN | \N | before trigger fired + 4007 | 7 | XX | XX | before trigger fired + 4008 | 8 | Delimiter | : | before trigger fired + 50004 | 25 | 35 | 45 | before trigger fired + 60004 | 25 | 35 | 45 | before trigger fired + 60005 | 26 | 36 | 46 | before trigger fired + 1 | 1 | stuff | test_1 | after trigger fired + 2 | 2 | stuff | test_2 | after trigger fired + 3 | 3 | stuff | test_3 | after trigger fired + 4 | 4 | stuff | test_4 | after trigger fired + 5 | 5 | stuff | test_5 | after trigger fired +(28 rows) + +-- check copy out +COPY x TO stdout; +9999 \N \\N NN before trigger fired +10000 21 31 41 before trigger fired +10001 22 32 42 before trigger fired +10002 23 33 43 before trigger fired +10003 24 34 44 before trigger fired +10004 25 35 45 before trigger fired +10005 26 36 46 before trigger fired +6 \N 45 80 before trigger fired +7 \N x \\x before trigger fired +8 \N , \\, before trigger fired +3000 \N c \N before trigger fired +4000 \N C \N before trigger fired +4001 1 empty before trigger fired +4002 2 null \N before trigger fired +4003 3 Backslash \\ before trigger fired +4004 4 BackslashX \\X before trigger fired +4005 5 N N before trigger fired +4006 6 BackslashN \\N before trigger fired +4007 7 XX XX before trigger fired +4008 8 Delimiter : before trigger fired +50004 25 35 45 before trigger fired +60004 25 35 45 before trigger fired +60005 26 36 46 before trigger fired +1 1 stuff test_1 after trigger fired +2 2 stuff test_2 after trigger fired +3 3 stuff test_3 after trigger fired +4 4 stuff test_4 after trigger fired +5 5 stuff test_5 after trigger fired +COPY x (c, e) TO stdout; +\\N before trigger fired +31 before trigger fired +32 before trigger fired +33 before trigger fired +34 before trigger fired +35 before trigger fired +36 before trigger fired +45 before trigger fired +x before trigger fired +, before trigger fired +c before trigger fired +C before trigger fired +empty before trigger fired +null before trigger fired +Backslash before trigger fired +BackslashX before trigger fired +N before trigger fired +BackslashN before trigger fired +XX before trigger fired +Delimiter before trigger fired +35 before trigger fired +35 before trigger fired +36 before trigger fired +stuff after trigger fired +stuff after trigger fired +stuff after trigger fired +stuff after trigger fired +stuff after trigger fired +COPY x (b, e) TO stdout WITH NULL 'I''m null'; +I'm null before trigger fired +21 before trigger fired +22 before trigger fired +23 before trigger fired +24 before trigger fired +25 before trigger fired +26 before trigger fired +I'm null before trigger fired +I'm null before trigger fired +I'm null before trigger fired +I'm null before trigger fired +I'm null before trigger fired +1 before trigger fired +2 before trigger fired +3 before trigger fired +4 before trigger fired +5 before trigger fired +6 before trigger fired +7 before trigger fired +8 before trigger fired +25 before trigger fired +25 before trigger fired +26 before trigger fired +1 after trigger fired +2 after trigger fired +3 after trigger fired +4 after trigger fired +5 after trigger fired +CREATE TEMP TABLE y ( + col1 text, + col2 text +); +INSERT INTO y VALUES ('Jackson, Sam', E'\\h'); +INSERT INTO y VALUES ('It is "perfect".',E'\t'); +INSERT INTO y VALUES ('', NULL); +COPY y TO stdout WITH CSV; +"Jackson, Sam",\h +"It is ""perfect"".", +"", +COPY y TO stdout WITH CSV QUOTE '''' DELIMITER '|'; +Jackson, Sam|\h +It is "perfect".| +''| +COPY y TO stdout WITH CSV FORCE QUOTE col2 ESCAPE E'\\' ENCODING 'sql_ascii'; +"Jackson, Sam","\\h" +"It is \"perfect\"."," " +"", +COPY y TO stdout WITH CSV FORCE QUOTE *; +"Jackson, Sam","\h" +"It is ""perfect""."," " +"", +-- Repeat above tests with new 9.0 option syntax +COPY y TO stdout (FORMAT CSV); +"Jackson, Sam",\h +"It is ""perfect"".", +"", +COPY y TO stdout (FORMAT CSV, QUOTE '''', DELIMITER '|'); +Jackson, Sam|\h +It is "perfect".| +''| +COPY y TO stdout (FORMAT CSV, FORCE_QUOTE (col2), ESCAPE E'\\'); +"Jackson, Sam","\\h" +"It is \"perfect\"."," " +"", +COPY y TO stdout (FORMAT CSV, FORCE_QUOTE *); +"Jackson, Sam","\h" +"It is ""perfect""."," " +"", +\copy y TO stdout (FORMAT CSV) +"Jackson, Sam",\h +"It is ""perfect"".", +"", +\copy y TO stdout (FORMAT CSV, QUOTE '''', DELIMITER '|') +Jackson, Sam|\h +It is "perfect".| +''| +\copy y TO stdout (FORMAT CSV, FORCE_QUOTE (col2), ESCAPE E'\\') +"Jackson, Sam","\\h" +"It is \"perfect\"."," " +"", +\copy y TO stdout (FORMAT CSV, FORCE_QUOTE *) +"Jackson, Sam","\h" +"It is ""perfect""."," " +"", +--test that we read consecutive LFs properly +CREATE TEMP TABLE testnl (a int, b text, c int); +COPY testnl FROM stdin CSV; +-- test end of copy marker +CREATE TEMP TABLE testeoc (a text); +COPY testeoc FROM stdin CSV; +COPY testeoc TO stdout CSV; +a\. +\.b +c\.d +"\." +-- test handling of nonstandard null marker that violates escaping rules +CREATE TEMP TABLE testnull(a int, b text); +INSERT INTO testnull VALUES (1, E'\\0'), (NULL, NULL); +COPY testnull TO stdout WITH NULL AS E'\\0'; +1 \\0 +\0 \0 +COPY testnull FROM stdin WITH NULL AS E'\\0'; +SELECT * FROM testnull; + a | b +----+---- + 1 | \0 + | + 42 | \0 + | +(4 rows) + +BEGIN; +CREATE TABLE vistest (LIKE testeoc); +COPY vistest FROM stdin CSV; +COMMIT; +SELECT * FROM vistest; + a +---- + a0 + b +(2 rows) + +BEGIN; +TRUNCATE vistest; +COPY vistest FROM stdin CSV; +SELECT * FROM vistest; + a +---- + a1 + b +(2 rows) + +SAVEPOINT s1; +TRUNCATE vistest; +COPY vistest FROM stdin CSV; +SELECT * FROM vistest; + a +---- + d1 + e +(2 rows) + +COMMIT; +SELECT * FROM vistest; + a +---- + d1 + e +(2 rows) + +BEGIN; +TRUNCATE vistest; +COPY vistest FROM stdin CSV FREEZE; +SELECT * FROM vistest; + a +---- + a2 + b +(2 rows) + +SAVEPOINT s1; +TRUNCATE vistest; +COPY vistest FROM stdin CSV FREEZE; +SELECT * FROM vistest; + a +---- + d2 + e +(2 rows) + +COMMIT; +SELECT * FROM vistest; + a +---- + d2 + e +(2 rows) + +BEGIN; +TRUNCATE vistest; +COPY vistest FROM stdin CSV FREEZE; +SELECT * FROM vistest; + a +--- + x + y +(2 rows) + +COMMIT; +TRUNCATE vistest; +COPY vistest FROM stdin CSV FREEZE; +ERROR: cannot perform COPY FREEZE because the table was not created or truncated in the current subtransaction +BEGIN; +TRUNCATE vistest; +SAVEPOINT s1; +COPY vistest FROM stdin CSV FREEZE; +ERROR: cannot perform COPY FREEZE because the table was not created or truncated in the current subtransaction +COMMIT; +BEGIN; +INSERT INTO vistest VALUES ('z'); +SAVEPOINT s1; +TRUNCATE vistest; +ROLLBACK TO SAVEPOINT s1; +COPY vistest FROM stdin CSV FREEZE; +ERROR: cannot perform COPY FREEZE because the table was not created or truncated in the current subtransaction +COMMIT; +CREATE FUNCTION truncate_in_subxact() RETURNS VOID AS +$$ +BEGIN + TRUNCATE vistest; +EXCEPTION + WHEN OTHERS THEN + INSERT INTO vistest VALUES ('subxact failure'); +END; +$$ language plpgsql; +BEGIN; +INSERT INTO vistest VALUES ('z'); +SELECT truncate_in_subxact(); + truncate_in_subxact +--------------------- + +(1 row) + +COPY vistest FROM stdin CSV FREEZE; +SELECT * FROM vistest; + a +---- + d4 + e +(2 rows) + +COMMIT; +SELECT * FROM vistest; + a +---- + d4 + e +(2 rows) + +-- Test FORCE_NOT_NULL and FORCE_NULL options +CREATE TEMP TABLE forcetest ( + a INT NOT NULL, + b TEXT NOT NULL, + c TEXT, + d TEXT, + e TEXT +); +\pset null NULL +-- should succeed with no effect ("b" remains an empty string, "c" remains NULL) +BEGIN; +COPY forcetest (a, b, c) FROM STDIN WITH (FORMAT csv, FORCE_NOT_NULL(b), FORCE_NULL(c)); +COMMIT; +SELECT b, c FROM forcetest WHERE a = 1; + b | c +---+------ + | NULL +(1 row) + +-- should succeed, FORCE_NULL and FORCE_NOT_NULL can be both specified +BEGIN; +COPY forcetest (a, b, c, d) FROM STDIN WITH (FORMAT csv, FORCE_NOT_NULL(c,d), FORCE_NULL(c,d)); +COMMIT; +SELECT c, d FROM forcetest WHERE a = 2; + c | d +---+------ + | NULL +(1 row) + +-- should fail with not-null constraint violation +BEGIN; +COPY forcetest (a, b, c) FROM STDIN WITH (FORMAT csv, FORCE_NULL(b), FORCE_NOT_NULL(c)); +ERROR: null value in column "b" of relation "forcetest" violates not-null constraint +DETAIL: Failing row contains (3, null, , null, null). +CONTEXT: COPY forcetest, line 1: "3,,""" +ROLLBACK; +-- should fail with "not referenced by COPY" error +BEGIN; +COPY forcetest (d, e) FROM STDIN WITH (FORMAT csv, FORCE_NOT_NULL(b)); +ERROR: FORCE_NOT_NULL column "b" not referenced by COPY +ROLLBACK; +-- should fail with "not referenced by COPY" error +BEGIN; +COPY forcetest (d, e) FROM STDIN WITH (FORMAT csv, FORCE_NULL(b)); +ERROR: FORCE_NULL column "b" not referenced by COPY +ROLLBACK; +\pset null '' +-- test case with whole-row Var in a check constraint +create table check_con_tbl (f1 int); +create function check_con_function(check_con_tbl) returns bool as $$ +begin + raise notice 'input = %', row_to_json($1); + return $1.f1 > 0; +end $$ language plpgsql immutable; +alter table check_con_tbl add check (check_con_function(check_con_tbl.*)); +\d+ check_con_tbl + Table "public.check_con_tbl" + Column | Type | Collation | Nullable | Default | Storage | Stats target | Description +--------+---------+-----------+----------+---------+---------+--------------+------------- + f1 | integer | | | | plain | | +Check constraints: + "check_con_tbl_check" CHECK (check_con_function(check_con_tbl.*)) + +copy check_con_tbl from stdin; +NOTICE: input = {"f1":1} +NOTICE: input = {"f1":null} +copy check_con_tbl from stdin; +NOTICE: input = {"f1":0} +ERROR: new row for relation "check_con_tbl" violates check constraint "check_con_tbl_check" +DETAIL: Failing row contains (0). +CONTEXT: COPY check_con_tbl, line 1: "0" +select * from check_con_tbl; + f1 +---- + 1 + +(2 rows) + +-- test with RLS enabled. +CREATE ROLE regress_rls_copy_user; +CREATE ROLE regress_rls_copy_user_colperms; +CREATE TABLE rls_t1 (a int, b int, c int); +COPY rls_t1 (a, b, c) from stdin; +CREATE POLICY p1 ON rls_t1 FOR SELECT USING (a % 2 = 0); +ALTER TABLE rls_t1 ENABLE ROW LEVEL SECURITY; +ALTER TABLE rls_t1 FORCE ROW LEVEL SECURITY; +GRANT SELECT ON TABLE rls_t1 TO regress_rls_copy_user; +GRANT SELECT (a, b) ON TABLE rls_t1 TO regress_rls_copy_user_colperms; +-- all columns +COPY rls_t1 TO stdout; +1 4 1 +2 3 2 +3 2 3 +4 1 4 +COPY rls_t1 (a, b, c) TO stdout; +1 4 1 +2 3 2 +3 2 3 +4 1 4 +-- subset of columns +COPY rls_t1 (a) TO stdout; +1 +2 +3 +4 +COPY rls_t1 (a, b) TO stdout; +1 4 +2 3 +3 2 +4 1 +-- column reordering +COPY rls_t1 (b, a) TO stdout; +4 1 +3 2 +2 3 +1 4 +SET SESSION AUTHORIZATION regress_rls_copy_user; +-- all columns +COPY rls_t1 TO stdout; +2 3 2 +4 1 4 +COPY rls_t1 (a, b, c) TO stdout; +2 3 2 +4 1 4 +-- subset of columns +COPY rls_t1 (a) TO stdout; +2 +4 +COPY rls_t1 (a, b) TO stdout; +2 3 +4 1 +-- column reordering +COPY rls_t1 (b, a) TO stdout; +3 2 +1 4 +RESET SESSION AUTHORIZATION; +SET SESSION AUTHORIZATION regress_rls_copy_user_colperms; +-- attempt all columns (should fail) +COPY rls_t1 TO stdout; +ERROR: permission denied for table rls_t1 +COPY rls_t1 (a, b, c) TO stdout; +ERROR: permission denied for table rls_t1 +-- try to copy column with no privileges (should fail) +COPY rls_t1 (c) TO stdout; +ERROR: permission denied for table rls_t1 +-- subset of columns (should succeed) +COPY rls_t1 (a) TO stdout; +2 +4 +COPY rls_t1 (a, b) TO stdout; +2 3 +4 1 +RESET SESSION AUTHORIZATION; +-- test with INSTEAD OF INSERT trigger on a view +CREATE TABLE instead_of_insert_tbl(id serial, name text); +CREATE VIEW instead_of_insert_tbl_view AS SELECT ''::text AS str; +COPY instead_of_insert_tbl_view FROM stdin; -- fail +ERROR: cannot copy to view "instead_of_insert_tbl_view" +HINT: To enable copying to a view, provide an INSTEAD OF INSERT trigger. +CREATE FUNCTION fun_instead_of_insert_tbl() RETURNS trigger AS $$ +BEGIN + INSERT INTO instead_of_insert_tbl (name) VALUES (NEW.str); + RETURN NULL; +END; +$$ LANGUAGE plpgsql; +CREATE TRIGGER trig_instead_of_insert_tbl_view + INSTEAD OF INSERT ON instead_of_insert_tbl_view + FOR EACH ROW EXECUTE PROCEDURE fun_instead_of_insert_tbl(); +COPY instead_of_insert_tbl_view FROM stdin; +SELECT * FROM instead_of_insert_tbl; + id | name +----+------- + 1 | test1 +(1 row) + +-- Test of COPY optimization with view using INSTEAD OF INSERT +-- trigger when relation is created in the same transaction as +-- when COPY is executed. +BEGIN; +CREATE VIEW instead_of_insert_tbl_view_2 as select ''::text as str; +CREATE TRIGGER trig_instead_of_insert_tbl_view_2 + INSTEAD OF INSERT ON instead_of_insert_tbl_view_2 + FOR EACH ROW EXECUTE PROCEDURE fun_instead_of_insert_tbl(); +COPY instead_of_insert_tbl_view_2 FROM stdin; +SELECT * FROM instead_of_insert_tbl; + id | name +----+------- + 1 | test1 + 2 | test1 +(2 rows) + +COMMIT; +-- clean up +DROP TABLE forcetest; +DROP TABLE vistest; +DROP FUNCTION truncate_in_subxact(); +DROP TABLE x, y; +DROP TABLE rls_t1 CASCADE; +DROP ROLE regress_rls_copy_user; +DROP ROLE regress_rls_copy_user_colperms; +DROP FUNCTION fn_x_before(); +DROP FUNCTION fn_x_after(); +DROP TABLE instead_of_insert_tbl; +DROP VIEW instead_of_insert_tbl_view; +DROP VIEW instead_of_insert_tbl_view_2; +DROP FUNCTION fun_instead_of_insert_tbl(); +-- +-- COPY FROM ... DEFAULT +-- +create temp table copy_default ( + id integer primary key, + text_value text not null default 'test', + ts_value timestamp without time zone not null default '2022-07-05' +); +-- if DEFAULT is not specified, then the marker will be regular data +copy copy_default from stdin; +select id, text_value, ts_value from copy_default; + id | text_value | ts_value +----+------------+-------------------------- + 1 | value | Mon Jul 04 00:00:00 2022 + 2 | D | Tue Jul 05 00:00:00 2022 +(2 rows) + +truncate copy_default; +copy copy_default from stdin with (format csv); +select id, text_value, ts_value from copy_default; + id | text_value | ts_value +----+------------+-------------------------- + 1 | value | Mon Jul 04 00:00:00 2022 + 2 | \D | Tue Jul 05 00:00:00 2022 +(2 rows) + +truncate copy_default; +-- DEFAULT cannot be used in binary mode +copy copy_default from stdin with (format binary, default '\D'); +ERROR: cannot specify DEFAULT in BINARY mode +-- DEFAULT cannot be new line nor carriage return +copy copy_default from stdin with (default E'\n'); +ERROR: COPY default representation cannot use newline or carriage return +copy copy_default from stdin with (default E'\r'); +ERROR: COPY default representation cannot use newline or carriage return +-- DELIMITER cannot appear in DEFAULT spec +copy copy_default from stdin with (delimiter ';', default 'test;test'); +ERROR: COPY delimiter must not appear in the DEFAULT specification +-- CSV quote cannot appear in DEFAULT spec +copy copy_default from stdin with (format csv, quote '"', default 'test"test'); +ERROR: CSV quote character must not appear in the DEFAULT specification +-- NULL and DEFAULT spec must be different +copy copy_default from stdin with (default '\N'); +ERROR: NULL specification and DEFAULT specification cannot be the same +-- cannot use DEFAULT marker in column that has no DEFAULT value +copy copy_default from stdin with (default '\D'); +ERROR: unexpected default marker in COPY data +DETAIL: Column "id" has no default value. +CONTEXT: COPY copy_default, line 1: "\D value '2022-07-04'" +copy copy_default from stdin with (format csv, default '\D'); +ERROR: unexpected default marker in COPY data +DETAIL: Column "id" has no default value. +CONTEXT: COPY copy_default, line 1: "\D,value,2022-07-04" +-- The DEFAULT marker must be unquoted and unescaped or it's not recognized +copy copy_default from stdin with (default '\D'); +select id, text_value, ts_value from copy_default; + id | text_value | ts_value +----+------------+-------------------------- + 1 | test | Mon Jul 04 00:00:00 2022 + 2 | \D | Mon Jul 04 00:00:00 2022 + 3 | "D" | Mon Jul 04 00:00:00 2022 +(3 rows) + +truncate copy_default; +copy copy_default from stdin with (format csv, default '\D'); +select id, text_value, ts_value from copy_default; + id | text_value | ts_value +----+------------+-------------------------- + 1 | test | Mon Jul 04 00:00:00 2022 + 2 | \\D | Mon Jul 04 00:00:00 2022 + 3 | \D | Mon Jul 04 00:00:00 2022 +(3 rows) + +truncate copy_default; +-- successful usage of DEFAULT option in COPY +copy copy_default from stdin with (default '\D'); +select id, text_value, ts_value from copy_default; + id | text_value | ts_value +----+------------+-------------------------- + 1 | value | Mon Jul 04 00:00:00 2022 + 2 | test | Sun Jul 03 00:00:00 2022 + 3 | test | Tue Jul 05 00:00:00 2022 +(3 rows) + +truncate copy_default; +copy copy_default from stdin with (format csv, default '\D'); +select id, text_value, ts_value from copy_default; + id | text_value | ts_value +----+------------+-------------------------- + 1 | value | Mon Jul 04 00:00:00 2022 + 2 | test | Sun Jul 03 00:00:00 2022 + 3 | test | Tue Jul 05 00:00:00 2022 +(3 rows) + +truncate copy_default; +-- DEFAULT cannot be used in COPY TO +copy (select 1 as test) TO stdout with (default '\D'); +ERROR: COPY DEFAULT only available using COPY FROM diff --git a/src/test/regress/expected/copydml.out b/src/test/regress/expected/copydml.out new file mode 100644 index 0000000..b5a2256 --- /dev/null +++ b/src/test/regress/expected/copydml.out @@ -0,0 +1,112 @@ +-- +-- Test cases for COPY (INSERT/UPDATE/DELETE) TO +-- +create table copydml_test (id serial, t text); +insert into copydml_test (t) values ('a'); +insert into copydml_test (t) values ('b'); +insert into copydml_test (t) values ('c'); +insert into copydml_test (t) values ('d'); +insert into copydml_test (t) values ('e'); +-- +-- Test COPY (insert/update/delete ...) +-- +copy (insert into copydml_test (t) values ('f') returning id) to stdout; +6 +copy (update copydml_test set t = 'g' where t = 'f' returning id) to stdout; +6 +copy (delete from copydml_test where t = 'g' returning id) to stdout; +6 +-- +-- Test \copy (insert/update/delete ...) +-- +\copy (insert into copydml_test (t) values ('f') returning id) to stdout; +7 +\copy (update copydml_test set t = 'g' where t = 'f' returning id) to stdout; +7 +\copy (delete from copydml_test where t = 'g' returning id) to stdout; +7 +-- Error cases +copy (insert into copydml_test default values) to stdout; +ERROR: COPY query must have a RETURNING clause +copy (update copydml_test set t = 'g') to stdout; +ERROR: COPY query must have a RETURNING clause +copy (delete from copydml_test) to stdout; +ERROR: COPY query must have a RETURNING clause +create rule qqq as on insert to copydml_test do instead nothing; +copy (insert into copydml_test default values) to stdout; +ERROR: DO INSTEAD NOTHING rules are not supported for COPY +drop rule qqq on copydml_test; +create rule qqq as on insert to copydml_test do also delete from copydml_test; +copy (insert into copydml_test default values) to stdout; +ERROR: DO ALSO rules are not supported for the COPY +drop rule qqq on copydml_test; +create rule qqq as on insert to copydml_test do instead (delete from copydml_test; delete from copydml_test); +copy (insert into copydml_test default values) to stdout; +ERROR: multi-statement DO INSTEAD rules are not supported for COPY +drop rule qqq on copydml_test; +create rule qqq as on insert to copydml_test where new.t <> 'f' do instead delete from copydml_test; +copy (insert into copydml_test default values) to stdout; +ERROR: conditional DO INSTEAD rules are not supported for COPY +drop rule qqq on copydml_test; +create rule qqq as on update to copydml_test do instead nothing; +copy (update copydml_test set t = 'f') to stdout; +ERROR: DO INSTEAD NOTHING rules are not supported for COPY +drop rule qqq on copydml_test; +create rule qqq as on update to copydml_test do also delete from copydml_test; +copy (update copydml_test set t = 'f') to stdout; +ERROR: DO ALSO rules are not supported for the COPY +drop rule qqq on copydml_test; +create rule qqq as on update to copydml_test do instead (delete from copydml_test; delete from copydml_test); +copy (update copydml_test set t = 'f') to stdout; +ERROR: multi-statement DO INSTEAD rules are not supported for COPY +drop rule qqq on copydml_test; +create rule qqq as on update to copydml_test where new.t <> 'f' do instead delete from copydml_test; +copy (update copydml_test set t = 'f') to stdout; +ERROR: conditional DO INSTEAD rules are not supported for COPY +drop rule qqq on copydml_test; +create rule qqq as on delete to copydml_test do instead nothing; +copy (delete from copydml_test) to stdout; +ERROR: DO INSTEAD NOTHING rules are not supported for COPY +drop rule qqq on copydml_test; +create rule qqq as on delete to copydml_test do also insert into copydml_test default values; +copy (delete from copydml_test) to stdout; +ERROR: DO ALSO rules are not supported for the COPY +drop rule qqq on copydml_test; +create rule qqq as on delete to copydml_test do instead (insert into copydml_test default values; insert into copydml_test default values); +copy (delete from copydml_test) to stdout; +ERROR: multi-statement DO INSTEAD rules are not supported for COPY +drop rule qqq on copydml_test; +create rule qqq as on delete to copydml_test where old.t <> 'f' do instead insert into copydml_test default values; +copy (delete from copydml_test) to stdout; +ERROR: conditional DO INSTEAD rules are not supported for COPY +drop rule qqq on copydml_test; +-- triggers +create function qqq_trig() returns trigger as $$ +begin +if tg_op in ('INSERT', 'UPDATE') then + raise notice '% % %', tg_when, tg_op, new.id; + return new; +else + raise notice '% % %', tg_when, tg_op, old.id; + return old; +end if; +end +$$ language plpgsql; +create trigger qqqbef before insert or update or delete on copydml_test + for each row execute procedure qqq_trig(); +create trigger qqqaf after insert or update or delete on copydml_test + for each row execute procedure qqq_trig(); +copy (insert into copydml_test (t) values ('f') returning id) to stdout; +NOTICE: BEFORE INSERT 8 +8 +NOTICE: AFTER INSERT 8 +copy (update copydml_test set t = 'g' where t = 'f' returning id) to stdout; +NOTICE: BEFORE UPDATE 8 +8 +NOTICE: AFTER UPDATE 8 +copy (delete from copydml_test where t = 'g' returning id) to stdout; +NOTICE: BEFORE DELETE 8 +8 +NOTICE: AFTER DELETE 8 +drop table copydml_test; +drop function qqq_trig(); diff --git a/src/test/regress/expected/copyselect.out b/src/test/regress/expected/copyselect.out new file mode 100644 index 0000000..bb9e026 --- /dev/null +++ b/src/test/regress/expected/copyselect.out @@ -0,0 +1,161 @@ +-- +-- Test cases for COPY (select) TO +-- +create table test1 (id serial, t text); +insert into test1 (t) values ('a'); +insert into test1 (t) values ('b'); +insert into test1 (t) values ('c'); +insert into test1 (t) values ('d'); +insert into test1 (t) values ('e'); +create table test2 (id serial, t text); +insert into test2 (t) values ('A'); +insert into test2 (t) values ('B'); +insert into test2 (t) values ('C'); +insert into test2 (t) values ('D'); +insert into test2 (t) values ('E'); +create view v_test1 +as select 'v_'||t from test1; +-- +-- Test COPY table TO +-- +copy test1 to stdout; +1 a +2 b +3 c +4 d +5 e +-- +-- This should fail +-- +copy v_test1 to stdout; +ERROR: cannot copy from view "v_test1" +HINT: Try the COPY (SELECT ...) TO variant. +-- +-- Test COPY (select) TO +-- +copy (select t from test1 where id=1) to stdout; +a +-- +-- Test COPY (select for update) TO +-- +copy (select t from test1 where id=3 for update) to stdout; +c +-- +-- This should fail +-- +copy (select t into temp test3 from test1 where id=3) to stdout; +ERROR: COPY (SELECT INTO) is not supported +-- +-- This should fail +-- +copy (select * from test1) from stdin; +ERROR: syntax error at or near "from" +LINE 1: copy (select * from test1) from stdin; + ^ +-- +-- This should fail +-- +copy (select * from test1) (t,id) to stdout; +ERROR: syntax error at or near "(" +LINE 1: copy (select * from test1) (t,id) to stdout; + ^ +-- +-- Test JOIN +-- +copy (select * from test1 join test2 using (id)) to stdout; +1 a A +2 b B +3 c C +4 d D +5 e E +-- +-- Test UNION SELECT +-- +copy (select t from test1 where id = 1 UNION select * from v_test1 ORDER BY 1) to stdout; +a +v_a +v_b +v_c +v_d +v_e +-- +-- Test subselect +-- +copy (select * from (select t from test1 where id = 1 UNION select * from v_test1 ORDER BY 1) t1) to stdout; +a +v_a +v_b +v_c +v_d +v_e +-- +-- Test headers, CSV and quotes +-- +copy (select t from test1 where id = 1) to stdout csv header force quote t; +t +"a" +-- +-- Test psql builtins, plain table +-- +\copy test1 to stdout +1 a +2 b +3 c +4 d +5 e +-- +-- This should fail +-- +\copy v_test1 to stdout +ERROR: cannot copy from view "v_test1" +HINT: Try the COPY (SELECT ...) TO variant. +-- +-- Test \copy (select ...) +-- +\copy (select "id",'id','id""'||t,(id + 1)*id,t,"test1"."t" from test1 where id=3) to stdout +3 id id""c 12 c c +-- +-- Drop everything +-- +drop table test2; +drop view v_test1; +drop table test1; +-- psql handling of COPY in multi-command strings +copy (select 1) to stdout\; select 1/0; -- row, then error +1 +ERROR: division by zero +select 1/0\; copy (select 1) to stdout; -- error only +ERROR: division by zero +copy (select 1) to stdout\; copy (select 2) to stdout\; select 3\; select 4; -- 1 2 3 4 +1 +2 + ?column? +---------- + 3 +(1 row) + + ?column? +---------- + 4 +(1 row) + +create table test3 (c int); +select 0\; copy test3 from stdin\; copy test3 from stdin\; select 1; -- 0 1 + ?column? +---------- + 0 +(1 row) + + ?column? +---------- + 1 +(1 row) + +select * from test3; + c +--- + 1 + 2 +(2 rows) + +drop table test3; diff --git a/src/test/regress/expected/create_aggregate.out b/src/test/regress/expected/create_aggregate.out new file mode 100644 index 0000000..dcf6909 --- /dev/null +++ b/src/test/regress/expected/create_aggregate.out @@ -0,0 +1,324 @@ +-- +-- CREATE_AGGREGATE +-- +-- all functions CREATEd +CREATE AGGREGATE newavg ( + sfunc = int4_avg_accum, basetype = int4, stype = _int8, + finalfunc = int8_avg, + initcond1 = '{0,0}' +); +-- test comments +COMMENT ON AGGREGATE newavg_wrong (int4) IS 'an agg comment'; +ERROR: aggregate newavg_wrong(integer) does not exist +COMMENT ON AGGREGATE newavg (int4) IS 'an agg comment'; +COMMENT ON AGGREGATE newavg (int4) IS NULL; +-- without finalfunc; test obsolete spellings 'sfunc1' etc +CREATE AGGREGATE newsum ( + sfunc1 = int4pl, basetype = int4, stype1 = int4, + initcond1 = '0' +); +-- zero-argument aggregate +CREATE AGGREGATE newcnt (*) ( + sfunc = int8inc, stype = int8, + initcond = '0', parallel = safe +); +-- old-style spelling of same (except without parallel-safe; that's too new) +CREATE AGGREGATE oldcnt ( + sfunc = int8inc, basetype = 'ANY', stype = int8, + initcond = '0' +); +-- aggregate that only cares about null/nonnull input +CREATE AGGREGATE newcnt ("any") ( + sfunc = int8inc_any, stype = int8, + initcond = '0' +); +COMMENT ON AGGREGATE nosuchagg (*) IS 'should fail'; +ERROR: aggregate nosuchagg(*) does not exist +COMMENT ON AGGREGATE newcnt (*) IS 'an agg(*) comment'; +COMMENT ON AGGREGATE newcnt ("any") IS 'an agg(any) comment'; +-- multi-argument aggregate +create function sum3(int8,int8,int8) returns int8 as +'select $1 + $2 + $3' language sql strict immutable; +create aggregate sum2(int8,int8) ( + sfunc = sum3, stype = int8, + initcond = '0' +); +-- multi-argument aggregates sensitive to distinct/order, strict/nonstrict +create type aggtype as (a integer, b integer, c text); +create function aggf_trans(aggtype[],integer,integer,text) returns aggtype[] +as 'select array_append($1,ROW($2,$3,$4)::aggtype)' +language sql strict immutable; +create function aggfns_trans(aggtype[],integer,integer,text) returns aggtype[] +as 'select array_append($1,ROW($2,$3,$4)::aggtype)' +language sql immutable; +create aggregate aggfstr(integer,integer,text) ( + sfunc = aggf_trans, stype = aggtype[], + initcond = '{}' +); +create aggregate aggfns(integer,integer,text) ( + sfunc = aggfns_trans, stype = aggtype[], sspace = 10000, + initcond = '{}' +); +-- check error cases that would require run-time type coercion +create function least_accum(int8, int8) returns int8 language sql as + 'select least($1, $2)'; +create aggregate least_agg(int4) ( + stype = int8, sfunc = least_accum +); -- fails +ERROR: function least_accum(bigint, bigint) requires run-time type coercion +drop function least_accum(int8, int8); +create function least_accum(anycompatible, anycompatible) +returns anycompatible language sql as + 'select least($1, $2)'; +create aggregate least_agg(int4) ( + stype = int8, sfunc = least_accum +); -- fails +ERROR: function least_accum(bigint, bigint) requires run-time type coercion +create aggregate least_agg(int8) ( + stype = int8, sfunc = least_accum +); +drop function least_accum(anycompatible, anycompatible) cascade; +NOTICE: drop cascades to function least_agg(bigint) +-- variadic aggregates +create function least_accum(anyelement, variadic anyarray) +returns anyelement language sql as + 'select least($1, min($2[i])) from generate_subscripts($2,1) g(i)'; +create aggregate least_agg(variadic items anyarray) ( + stype = anyelement, sfunc = least_accum +); +create function cleast_accum(anycompatible, variadic anycompatiblearray) +returns anycompatible language sql as + 'select least($1, min($2[i])) from generate_subscripts($2,1) g(i)'; +create aggregate cleast_agg(variadic items anycompatiblearray) ( + stype = anycompatible, sfunc = cleast_accum +); +-- test ordered-set aggs using built-in support functions +create aggregate my_percentile_disc(float8 ORDER BY anyelement) ( + stype = internal, + sfunc = ordered_set_transition, + finalfunc = percentile_disc_final, + finalfunc_extra = true, + finalfunc_modify = read_write +); +create aggregate my_rank(VARIADIC "any" ORDER BY VARIADIC "any") ( + stype = internal, + sfunc = ordered_set_transition_multi, + finalfunc = rank_final, + finalfunc_extra = true, + hypothetical +); +alter aggregate my_percentile_disc(float8 ORDER BY anyelement) + rename to test_percentile_disc; +alter aggregate my_rank(VARIADIC "any" ORDER BY VARIADIC "any") + rename to test_rank; +\da test_* + List of aggregate functions + Schema | Name | Result data type | Argument data types | Description +--------+----------------------+------------------+----------------------------------------+------------- + public | test_percentile_disc | anyelement | double precision ORDER BY anyelement | + public | test_rank | bigint | VARIADIC "any" ORDER BY VARIADIC "any" | +(2 rows) + +-- moving-aggregate options +CREATE AGGREGATE sumdouble (float8) +( + stype = float8, + sfunc = float8pl, + mstype = float8, + msfunc = float8pl, + minvfunc = float8mi +); +-- aggregate combine and serialization functions +-- can't specify just one of serialfunc and deserialfunc +CREATE AGGREGATE myavg (numeric) +( + stype = internal, + sfunc = numeric_avg_accum, + serialfunc = numeric_avg_serialize +); +ERROR: must specify both or neither of serialization and deserialization functions +-- serialfunc must have correct parameters +CREATE AGGREGATE myavg (numeric) +( + stype = internal, + sfunc = numeric_avg_accum, + serialfunc = numeric_avg_deserialize, + deserialfunc = numeric_avg_deserialize +); +ERROR: function numeric_avg_deserialize(internal) does not exist +-- deserialfunc must have correct parameters +CREATE AGGREGATE myavg (numeric) +( + stype = internal, + sfunc = numeric_avg_accum, + serialfunc = numeric_avg_serialize, + deserialfunc = numeric_avg_serialize +); +ERROR: function numeric_avg_serialize(bytea, internal) does not exist +-- ensure combine function parameters are checked +CREATE AGGREGATE myavg (numeric) +( + stype = internal, + sfunc = numeric_avg_accum, + serialfunc = numeric_avg_serialize, + deserialfunc = numeric_avg_deserialize, + combinefunc = int4larger +); +ERROR: function int4larger(internal, internal) does not exist +-- ensure create aggregate works. +CREATE AGGREGATE myavg (numeric) +( + stype = internal, + sfunc = numeric_avg_accum, + finalfunc = numeric_avg, + serialfunc = numeric_avg_serialize, + deserialfunc = numeric_avg_deserialize, + combinefunc = numeric_avg_combine, + finalfunc_modify = shareable -- just to test a non-default setting +); +-- Ensure all these functions made it into the catalog +SELECT aggfnoid, aggtransfn, aggcombinefn, aggtranstype::regtype, + aggserialfn, aggdeserialfn, aggfinalmodify +FROM pg_aggregate +WHERE aggfnoid = 'myavg'::REGPROC; + aggfnoid | aggtransfn | aggcombinefn | aggtranstype | aggserialfn | aggdeserialfn | aggfinalmodify +----------+-------------------+---------------------+--------------+-----------------------+-------------------------+---------------- + myavg | numeric_avg_accum | numeric_avg_combine | internal | numeric_avg_serialize | numeric_avg_deserialize | s +(1 row) + +DROP AGGREGATE myavg (numeric); +-- create or replace aggregate +CREATE AGGREGATE myavg (numeric) +( + stype = internal, + sfunc = numeric_avg_accum, + finalfunc = numeric_avg +); +CREATE OR REPLACE AGGREGATE myavg (numeric) +( + stype = internal, + sfunc = numeric_avg_accum, + finalfunc = numeric_avg, + serialfunc = numeric_avg_serialize, + deserialfunc = numeric_avg_deserialize, + combinefunc = numeric_avg_combine, + finalfunc_modify = shareable -- just to test a non-default setting +); +-- Ensure all these functions made it into the catalog again +SELECT aggfnoid, aggtransfn, aggcombinefn, aggtranstype::regtype, + aggserialfn, aggdeserialfn, aggfinalmodify +FROM pg_aggregate +WHERE aggfnoid = 'myavg'::REGPROC; + aggfnoid | aggtransfn | aggcombinefn | aggtranstype | aggserialfn | aggdeserialfn | aggfinalmodify +----------+-------------------+---------------------+--------------+-----------------------+-------------------------+---------------- + myavg | numeric_avg_accum | numeric_avg_combine | internal | numeric_avg_serialize | numeric_avg_deserialize | s +(1 row) + +-- can change stype: +CREATE OR REPLACE AGGREGATE myavg (numeric) +( + stype = numeric, + sfunc = numeric_add +); +SELECT aggfnoid, aggtransfn, aggcombinefn, aggtranstype::regtype, + aggserialfn, aggdeserialfn, aggfinalmodify +FROM pg_aggregate +WHERE aggfnoid = 'myavg'::REGPROC; + aggfnoid | aggtransfn | aggcombinefn | aggtranstype | aggserialfn | aggdeserialfn | aggfinalmodify +----------+-------------+--------------+--------------+-------------+---------------+---------------- + myavg | numeric_add | - | numeric | - | - | r +(1 row) + +-- can't change return type: +CREATE OR REPLACE AGGREGATE myavg (numeric) +( + stype = numeric, + sfunc = numeric_add, + finalfunc = numeric_out +); +ERROR: cannot change return type of existing function +HINT: Use DROP AGGREGATE myavg(numeric) first. +-- can't change to a different kind: +CREATE OR REPLACE AGGREGATE myavg (order by numeric) +( + stype = numeric, + sfunc = numeric_add +); +ERROR: cannot change routine kind +DETAIL: "myavg" is an ordinary aggregate function. +-- can't change plain function to aggregate: +create function sum4(int8,int8,int8,int8) returns int8 as +'select $1 + $2 + $3 + $4' language sql strict immutable; +CREATE OR REPLACE AGGREGATE sum3 (int8,int8,int8) +( + stype = int8, + sfunc = sum4 +); +ERROR: cannot change routine kind +DETAIL: "sum3" is a function. +drop function sum4(int8,int8,int8,int8); +DROP AGGREGATE myavg (numeric); +-- invalid: bad parallel-safety marking +CREATE AGGREGATE mysum (int) +( + stype = int, + sfunc = int4pl, + parallel = pear +); +ERROR: parameter "parallel" must be SAFE, RESTRICTED, or UNSAFE +-- invalid: nonstrict inverse with strict forward function +CREATE FUNCTION float8mi_n(float8, float8) RETURNS float8 AS +$$ SELECT $1 - $2; $$ +LANGUAGE SQL; +CREATE AGGREGATE invalidsumdouble (float8) +( + stype = float8, + sfunc = float8pl, + mstype = float8, + msfunc = float8pl, + minvfunc = float8mi_n +); +ERROR: strictness of aggregate's forward and inverse transition functions must match +-- invalid: non-matching result types +CREATE FUNCTION float8mi_int(float8, float8) RETURNS int AS +$$ SELECT CAST($1 - $2 AS INT); $$ +LANGUAGE SQL; +CREATE AGGREGATE wrongreturntype (float8) +( + stype = float8, + sfunc = float8pl, + mstype = float8, + msfunc = float8pl, + minvfunc = float8mi_int +); +ERROR: return type of inverse transition function float8mi_int is not double precision +-- invalid: non-lowercase quoted identifiers +CREATE AGGREGATE case_agg ( -- old syntax + "Sfunc1" = int4pl, + "Basetype" = int4, + "Stype1" = int4, + "Initcond1" = '0', + "Parallel" = safe +); +WARNING: aggregate attribute "Sfunc1" not recognized +WARNING: aggregate attribute "Basetype" not recognized +WARNING: aggregate attribute "Stype1" not recognized +WARNING: aggregate attribute "Initcond1" not recognized +WARNING: aggregate attribute "Parallel" not recognized +ERROR: aggregate stype must be specified +CREATE AGGREGATE case_agg(float8) +( + "Stype" = internal, + "Sfunc" = ordered_set_transition, + "Finalfunc" = percentile_disc_final, + "Finalfunc_extra" = true, + "Finalfunc_modify" = read_write, + "Parallel" = safe +); +WARNING: aggregate attribute "Stype" not recognized +WARNING: aggregate attribute "Sfunc" not recognized +WARNING: aggregate attribute "Finalfunc" not recognized +WARNING: aggregate attribute "Finalfunc_extra" not recognized +WARNING: aggregate attribute "Finalfunc_modify" not recognized +WARNING: aggregate attribute "Parallel" not recognized +ERROR: aggregate stype must be specified diff --git a/src/test/regress/expected/create_am.out b/src/test/regress/expected/create_am.out new file mode 100644 index 0000000..b50293d --- /dev/null +++ b/src/test/regress/expected/create_am.out @@ -0,0 +1,390 @@ +-- +-- Create access method tests +-- +-- Make gist2 over gisthandler. In fact, it would be a synonym to gist. +CREATE ACCESS METHOD gist2 TYPE INDEX HANDLER gisthandler; +-- Verify return type checks for handlers +CREATE ACCESS METHOD bogus TYPE INDEX HANDLER int4in; +ERROR: function int4in(internal) does not exist +CREATE ACCESS METHOD bogus TYPE INDEX HANDLER heap_tableam_handler; +ERROR: function heap_tableam_handler must return type index_am_handler +-- Try to create gist2 index on fast_emp4000: fail because opclass doesn't exist +CREATE INDEX grect2ind2 ON fast_emp4000 USING gist2 (home_base); +ERROR: data type box has no default operator class for access method "gist2" +HINT: You must specify an operator class for the index or define a default operator class for the data type. +-- Make operator class for boxes using gist2 +CREATE OPERATOR CLASS box_ops DEFAULT + FOR TYPE box USING gist2 AS + OPERATOR 1 <<, + OPERATOR 2 &<, + OPERATOR 3 &&, + OPERATOR 4 &>, + OPERATOR 5 >>, + OPERATOR 6 ~=, + OPERATOR 7 @>, + OPERATOR 8 <@, + OPERATOR 9 &<|, + OPERATOR 10 <<|, + OPERATOR 11 |>>, + OPERATOR 12 |&>, + FUNCTION 1 gist_box_consistent(internal, box, smallint, oid, internal), + FUNCTION 2 gist_box_union(internal, internal), + -- don't need compress, decompress, or fetch functions + FUNCTION 5 gist_box_penalty(internal, internal, internal), + FUNCTION 6 gist_box_picksplit(internal, internal), + FUNCTION 7 gist_box_same(box, box, internal); +-- Create gist2 index on fast_emp4000 +CREATE INDEX grect2ind2 ON fast_emp4000 USING gist2 (home_base); +-- Now check the results from plain indexscan; temporarily drop existing +-- index grect2ind to ensure it doesn't capture the plan +BEGIN; +DROP INDEX grect2ind; +SET enable_seqscan = OFF; +SET enable_indexscan = ON; +SET enable_bitmapscan = OFF; +EXPLAIN (COSTS OFF) +SELECT * FROM fast_emp4000 + WHERE home_base <@ '(200,200),(2000,1000)'::box + ORDER BY (home_base[0])[0]; + QUERY PLAN +----------------------------------------------------------------- + Sort + Sort Key: ((home_base[0])[0]) + -> Index Only Scan using grect2ind2 on fast_emp4000 + Index Cond: (home_base <@ '(2000,1000),(200,200)'::box) +(4 rows) + +SELECT * FROM fast_emp4000 + WHERE home_base <@ '(200,200),(2000,1000)'::box + ORDER BY (home_base[0])[0]; + home_base +----------------------- + (337,455),(240,359) + (1444,403),(1346,344) +(2 rows) + +EXPLAIN (COSTS OFF) +SELECT count(*) FROM fast_emp4000 WHERE home_base && '(1000,1000,0,0)'::box; + QUERY PLAN +------------------------------------------------------------- + Aggregate + -> Index Only Scan using grect2ind2 on fast_emp4000 + Index Cond: (home_base && '(1000,1000),(0,0)'::box) +(3 rows) + +SELECT count(*) FROM fast_emp4000 WHERE home_base && '(1000,1000,0,0)'::box; + count +------- + 2 +(1 row) + +EXPLAIN (COSTS OFF) +SELECT count(*) FROM fast_emp4000 WHERE home_base IS NULL; + QUERY PLAN +-------------------------------------------------------- + Aggregate + -> Index Only Scan using grect2ind2 on fast_emp4000 + Index Cond: (home_base IS NULL) +(3 rows) + +SELECT count(*) FROM fast_emp4000 WHERE home_base IS NULL; + count +------- + 278 +(1 row) + +ROLLBACK; +-- Try to drop access method: fail because of dependent objects +DROP ACCESS METHOD gist2; +ERROR: cannot drop access method gist2 because other objects depend on it +DETAIL: index grect2ind2 depends on operator class box_ops for access method gist2 +HINT: Use DROP ... CASCADE to drop the dependent objects too. +-- Drop access method cascade +-- To prevent a (rare) deadlock against autovacuum, +-- we must lock the table that owns the index that will be dropped +BEGIN; +LOCK TABLE fast_emp4000; +DROP ACCESS METHOD gist2 CASCADE; +NOTICE: drop cascades to index grect2ind2 +COMMIT; +-- +-- Test table access methods +-- +-- prevent empty values +SET default_table_access_method = ''; +ERROR: invalid value for parameter "default_table_access_method": "" +DETAIL: default_table_access_method cannot be empty. +-- prevent nonexistent values +SET default_table_access_method = 'I do not exist AM'; +ERROR: invalid value for parameter "default_table_access_method": "I do not exist AM" +DETAIL: Table access method "I do not exist AM" does not exist. +-- prevent setting it to an index AM +SET default_table_access_method = 'btree'; +ERROR: access method "btree" is not of type TABLE +-- Create a heap2 table am handler with heapam handler +CREATE ACCESS METHOD heap2 TYPE TABLE HANDLER heap_tableam_handler; +-- Verify return type checks for handlers +CREATE ACCESS METHOD bogus TYPE TABLE HANDLER int4in; +ERROR: function int4in(internal) does not exist +CREATE ACCESS METHOD bogus TYPE TABLE HANDLER bthandler; +ERROR: function bthandler must return type table_am_handler +SELECT amname, amhandler, amtype FROM pg_am where amtype = 't' ORDER BY 1, 2; + amname | amhandler | amtype +--------+----------------------+-------- + heap | heap_tableam_handler | t + heap2 | heap_tableam_handler | t +(2 rows) + +-- First create tables employing the new AM using USING +-- plain CREATE TABLE +CREATE TABLE tableam_tbl_heap2(f1 int) USING heap2; +INSERT INTO tableam_tbl_heap2 VALUES(1); +SELECT f1 FROM tableam_tbl_heap2 ORDER BY f1; + f1 +---- + 1 +(1 row) + +-- CREATE TABLE AS +CREATE TABLE tableam_tblas_heap2 USING heap2 AS SELECT * FROM tableam_tbl_heap2; +SELECT f1 FROM tableam_tbl_heap2 ORDER BY f1; + f1 +---- + 1 +(1 row) + +-- SELECT INTO doesn't support USING +SELECT INTO tableam_tblselectinto_heap2 USING heap2 FROM tableam_tbl_heap2; +ERROR: syntax error at or near "USING" +LINE 1: SELECT INTO tableam_tblselectinto_heap2 USING heap2 FROM tab... + ^ +-- CREATE VIEW doesn't support USING +CREATE VIEW tableam_view_heap2 USING heap2 AS SELECT * FROM tableam_tbl_heap2; +ERROR: syntax error at or near "USING" +LINE 1: CREATE VIEW tableam_view_heap2 USING heap2 AS SELECT * FROM ... + ^ +-- CREATE SEQUENCE doesn't support USING +CREATE SEQUENCE tableam_seq_heap2 USING heap2; +ERROR: syntax error at or near "USING" +LINE 1: CREATE SEQUENCE tableam_seq_heap2 USING heap2; + ^ +-- CREATE MATERIALIZED VIEW does support USING +CREATE MATERIALIZED VIEW tableam_tblmv_heap2 USING heap2 AS SELECT * FROM tableam_tbl_heap2; +SELECT f1 FROM tableam_tblmv_heap2 ORDER BY f1; + f1 +---- + 1 +(1 row) + +-- CREATE TABLE .. PARTITION BY doesn't not support USING +CREATE TABLE tableam_parted_heap2 (a text, b int) PARTITION BY list (a) USING heap2; +ERROR: specifying a table access method is not supported on a partitioned table +CREATE TABLE tableam_parted_heap2 (a text, b int) PARTITION BY list (a); +-- new partitions will inherit from the current default, rather the partition root +SET default_table_access_method = 'heap'; +CREATE TABLE tableam_parted_a_heap2 PARTITION OF tableam_parted_heap2 FOR VALUES IN ('a'); +SET default_table_access_method = 'heap2'; +CREATE TABLE tableam_parted_b_heap2 PARTITION OF tableam_parted_heap2 FOR VALUES IN ('b'); +RESET default_table_access_method; +-- but the method can be explicitly specified +CREATE TABLE tableam_parted_c_heap2 PARTITION OF tableam_parted_heap2 FOR VALUES IN ('c') USING heap; +CREATE TABLE tableam_parted_d_heap2 PARTITION OF tableam_parted_heap2 FOR VALUES IN ('d') USING heap2; +-- List all objects in AM +SELECT + pc.relkind, + pa.amname, + CASE WHEN relkind = 't' THEN + (SELECT 'toast for ' || relname::regclass FROM pg_class pcm WHERE pcm.reltoastrelid = pc.oid) + ELSE + relname::regclass::text + END COLLATE "C" AS relname +FROM pg_class AS pc, + pg_am AS pa +WHERE pa.oid = pc.relam + AND pa.amname = 'heap2' +ORDER BY 3, 1, 2; + relkind | amname | relname +---------+--------+---------------------------------- + r | heap2 | tableam_parted_b_heap2 + r | heap2 | tableam_parted_d_heap2 + r | heap2 | tableam_tbl_heap2 + r | heap2 | tableam_tblas_heap2 + m | heap2 | tableam_tblmv_heap2 + t | heap2 | toast for tableam_parted_b_heap2 + t | heap2 | toast for tableam_parted_d_heap2 +(7 rows) + +-- Show dependencies onto AM - there shouldn't be any for toast +SELECT pg_describe_object(classid,objid,objsubid) AS obj +FROM pg_depend, pg_am +WHERE pg_depend.refclassid = 'pg_am'::regclass + AND pg_am.oid = pg_depend.refobjid + AND pg_am.amname = 'heap2' +ORDER BY classid, objid, objsubid; + obj +--------------------------------------- + table tableam_tbl_heap2 + table tableam_tblas_heap2 + materialized view tableam_tblmv_heap2 + table tableam_parted_b_heap2 + table tableam_parted_d_heap2 +(5 rows) + +-- ALTER TABLE SET ACCESS METHOD +CREATE TABLE heaptable USING heap AS + SELECT a, repeat(a::text, 100) FROM generate_series(1,9) AS a; +SELECT amname FROM pg_class c, pg_am am + WHERE c.relam = am.oid AND c.oid = 'heaptable'::regclass; + amname +-------- + heap +(1 row) + +-- Switching to heap2 adds new dependency entry to the AM. +ALTER TABLE heaptable SET ACCESS METHOD heap2; +SELECT pg_describe_object(classid, objid, objsubid) as obj, + pg_describe_object(refclassid, refobjid, refobjsubid) as objref, + deptype + FROM pg_depend + WHERE classid = 'pg_class'::regclass AND + objid = 'heaptable'::regclass + ORDER BY 1, 2; + obj | objref | deptype +-----------------+---------------------+--------- + table heaptable | access method heap2 | n + table heaptable | schema public | n +(2 rows) + +-- Switching to heap should not have a dependency entry to the AM. +ALTER TABLE heaptable SET ACCESS METHOD heap; +SELECT pg_describe_object(classid, objid, objsubid) as obj, + pg_describe_object(refclassid, refobjid, refobjsubid) as objref, + deptype + FROM pg_depend + WHERE classid = 'pg_class'::regclass AND + objid = 'heaptable'::regclass + ORDER BY 1, 2; + obj | objref | deptype +-----------------+---------------+--------- + table heaptable | schema public | n +(1 row) + +ALTER TABLE heaptable SET ACCESS METHOD heap2; +SELECT amname FROM pg_class c, pg_am am + WHERE c.relam = am.oid AND c.oid = 'heaptable'::regclass; + amname +-------- + heap2 +(1 row) + +SELECT COUNT(a), COUNT(1) FILTER(WHERE a=1) FROM heaptable; + count | count +-------+------- + 9 | 1 +(1 row) + +-- ALTER MATERIALIZED VIEW SET ACCESS METHOD +CREATE MATERIALIZED VIEW heapmv USING heap AS SELECT * FROM heaptable; +SELECT amname FROM pg_class c, pg_am am + WHERE c.relam = am.oid AND c.oid = 'heapmv'::regclass; + amname +-------- + heap +(1 row) + +ALTER MATERIALIZED VIEW heapmv SET ACCESS METHOD heap2; +SELECT amname FROM pg_class c, pg_am am + WHERE c.relam = am.oid AND c.oid = 'heapmv'::regclass; + amname +-------- + heap2 +(1 row) + +SELECT COUNT(a), COUNT(1) FILTER(WHERE a=1) FROM heapmv; + count | count +-------+------- + 9 | 1 +(1 row) + +-- No support for multiple subcommands +ALTER TABLE heaptable SET ACCESS METHOD heap, SET ACCESS METHOD heap2; +ERROR: cannot have multiple SET ACCESS METHOD subcommands +ALTER MATERIALIZED VIEW heapmv SET ACCESS METHOD heap, SET ACCESS METHOD heap2; +ERROR: cannot have multiple SET ACCESS METHOD subcommands +DROP MATERIALIZED VIEW heapmv; +DROP TABLE heaptable; +-- No support for partitioned tables. +CREATE TABLE am_partitioned(x INT, y INT) + PARTITION BY hash (x); +ALTER TABLE am_partitioned SET ACCESS METHOD heap2; +ERROR: cannot change access method of a partitioned table +DROP TABLE am_partitioned; +-- Second, create objects in the new AM by changing the default AM +BEGIN; +SET LOCAL default_table_access_method = 'heap2'; +-- following tests should all respect the default AM +CREATE TABLE tableam_tbl_heapx(f1 int); +CREATE TABLE tableam_tblas_heapx AS SELECT * FROM tableam_tbl_heapx; +SELECT INTO tableam_tblselectinto_heapx FROM tableam_tbl_heapx; +CREATE MATERIALIZED VIEW tableam_tblmv_heapx USING heap2 AS SELECT * FROM tableam_tbl_heapx; +CREATE TABLE tableam_parted_heapx (a text, b int) PARTITION BY list (a); +CREATE TABLE tableam_parted_1_heapx PARTITION OF tableam_parted_heapx FOR VALUES IN ('a', 'b'); +-- but an explicitly set AM overrides it +CREATE TABLE tableam_parted_2_heapx PARTITION OF tableam_parted_heapx FOR VALUES IN ('c', 'd') USING heap; +-- sequences, views and foreign servers shouldn't have an AM +CREATE VIEW tableam_view_heapx AS SELECT * FROM tableam_tbl_heapx; +CREATE SEQUENCE tableam_seq_heapx; +CREATE FOREIGN DATA WRAPPER fdw_heap2 VALIDATOR postgresql_fdw_validator; +CREATE SERVER fs_heap2 FOREIGN DATA WRAPPER fdw_heap2 ; +CREATE FOREIGN table tableam_fdw_heapx () SERVER fs_heap2; +-- Verify that new AM was used for tables, matviews, but not for sequences, views and fdws +SELECT + pc.relkind, + pa.amname, + CASE WHEN relkind = 't' THEN + (SELECT 'toast for ' || relname::regclass FROM pg_class pcm WHERE pcm.reltoastrelid = pc.oid) + ELSE + relname::regclass::text + END COLLATE "C" AS relname +FROM pg_class AS pc + LEFT JOIN pg_am AS pa ON (pa.oid = pc.relam) +WHERE pc.relname LIKE 'tableam_%_heapx' +ORDER BY 3, 1, 2; + relkind | amname | relname +---------+--------+----------------------------- + f | | tableam_fdw_heapx + r | heap2 | tableam_parted_1_heapx + r | heap | tableam_parted_2_heapx + p | | tableam_parted_heapx + S | | tableam_seq_heapx + r | heap2 | tableam_tbl_heapx + r | heap2 | tableam_tblas_heapx + m | heap2 | tableam_tblmv_heapx + r | heap2 | tableam_tblselectinto_heapx + v | | tableam_view_heapx +(10 rows) + +-- don't want to keep those tables, nor the default +ROLLBACK; +-- Third, check that we can neither create a table using a nonexistent +-- AM, nor using an index AM +CREATE TABLE i_am_a_failure() USING ""; +ERROR: zero-length delimited identifier at or near """" +LINE 1: CREATE TABLE i_am_a_failure() USING ""; + ^ +CREATE TABLE i_am_a_failure() USING i_do_not_exist_am; +ERROR: access method "i_do_not_exist_am" does not exist +CREATE TABLE i_am_a_failure() USING "I do not exist AM"; +ERROR: access method "I do not exist AM" does not exist +CREATE TABLE i_am_a_failure() USING "btree"; +ERROR: access method "btree" is not of type TABLE +-- Drop table access method, which fails as objects depends on it +DROP ACCESS METHOD heap2; +ERROR: cannot drop access method heap2 because other objects depend on it +DETAIL: table tableam_tbl_heap2 depends on access method heap2 +table tableam_tblas_heap2 depends on access method heap2 +materialized view tableam_tblmv_heap2 depends on access method heap2 +table tableam_parted_b_heap2 depends on access method heap2 +table tableam_parted_d_heap2 depends on access method heap2 +HINT: Use DROP ... CASCADE to drop the dependent objects too. +-- we intentionally leave the objects created above alive, to verify pg_dump support diff --git a/src/test/regress/expected/create_cast.out b/src/test/regress/expected/create_cast.out new file mode 100644 index 0000000..9a56fe3 --- /dev/null +++ b/src/test/regress/expected/create_cast.out @@ -0,0 +1,103 @@ +-- +-- CREATE_CAST +-- +-- Create some types to test with +CREATE TYPE casttesttype; +CREATE FUNCTION casttesttype_in(cstring) + RETURNS casttesttype + AS 'textin' + LANGUAGE internal STRICT IMMUTABLE; +NOTICE: return type casttesttype is only a shell +CREATE FUNCTION casttesttype_out(casttesttype) + RETURNS cstring + AS 'textout' + LANGUAGE internal STRICT IMMUTABLE; +NOTICE: argument type casttesttype is only a shell +CREATE TYPE casttesttype ( + internallength = variable, + input = casttesttype_in, + output = casttesttype_out, + alignment = int4 +); +-- a dummy function to test with +CREATE FUNCTION casttestfunc(casttesttype) RETURNS int4 LANGUAGE SQL AS +$$ SELECT 1; $$; +SELECT casttestfunc('foo'::text); -- fails, as there's no cast +ERROR: function casttestfunc(text) does not exist +LINE 1: SELECT casttestfunc('foo'::text); + ^ +HINT: No function matches the given name and argument types. You might need to add explicit type casts. +-- Try binary coercion cast +CREATE CAST (text AS casttesttype) WITHOUT FUNCTION; +SELECT casttestfunc('foo'::text); -- doesn't work, as the cast is explicit +ERROR: function casttestfunc(text) does not exist +LINE 1: SELECT casttestfunc('foo'::text); + ^ +HINT: No function matches the given name and argument types. You might need to add explicit type casts. +SELECT casttestfunc('foo'::text::casttesttype); -- should work + casttestfunc +-------------- + 1 +(1 row) + +DROP CAST (text AS casttesttype); -- cleanup +-- Try IMPLICIT binary coercion cast +CREATE CAST (text AS casttesttype) WITHOUT FUNCTION AS IMPLICIT; +SELECT casttestfunc('foo'::text); -- Should work now + casttestfunc +-------------- + 1 +(1 row) + +-- Try I/O conversion cast. +SELECT 1234::int4::casttesttype; -- No cast yet, should fail +ERROR: cannot cast type integer to casttesttype +LINE 1: SELECT 1234::int4::casttesttype; + ^ +CREATE CAST (int4 AS casttesttype) WITH INOUT; +SELECT 1234::int4::casttesttype; -- Should work now + casttesttype +-------------- + 1234 +(1 row) + +DROP CAST (int4 AS casttesttype); +-- Try cast with a function +CREATE FUNCTION int4_casttesttype(int4) RETURNS casttesttype LANGUAGE SQL AS +$$ SELECT ('foo'::text || $1::text)::casttesttype; $$; +CREATE CAST (int4 AS casttesttype) WITH FUNCTION int4_casttesttype(int4) AS IMPLICIT; +SELECT 1234::int4::casttesttype; -- Should work now + casttesttype +-------------- + foo1234 +(1 row) + +DROP FUNCTION int4_casttesttype(int4) CASCADE; +NOTICE: drop cascades to cast from integer to casttesttype +-- Try it with a function that requires an implicit cast +CREATE FUNCTION bar_int4_text(int4) RETURNS text LANGUAGE SQL AS +$$ SELECT ('bar'::text || $1::text); $$; +CREATE CAST (int4 AS casttesttype) WITH FUNCTION bar_int4_text(int4) AS IMPLICIT; +SELECT 1234::int4::casttesttype; -- Should work now + casttesttype +-------------- + bar1234 +(1 row) + +-- check dependencies generated for that +SELECT pg_describe_object(classid, objid, objsubid) as obj, + pg_describe_object(refclassid, refobjid, refobjsubid) as objref, + deptype +FROM pg_depend +WHERE classid = 'pg_cast'::regclass AND + objid = (SELECT oid FROM pg_cast + WHERE castsource = 'int4'::regtype + AND casttarget = 'casttesttype'::regtype) +ORDER BY refclassid; + obj | objref | deptype +-----------------------------------+---------------------------------+--------- + cast from integer to casttesttype | type casttesttype | n + cast from integer to casttesttype | function bar_int4_text(integer) | n + cast from integer to casttesttype | cast from text to casttesttype | n +(3 rows) + diff --git a/src/test/regress/expected/create_function_c.out b/src/test/regress/expected/create_function_c.out new file mode 100644 index 0000000..2dba9d7 --- /dev/null +++ b/src/test/regress/expected/create_function_c.out @@ -0,0 +1,36 @@ +-- +-- CREATE_FUNCTION_C +-- +-- This script used to create C functions for other scripts to use. +-- But to get rid of the ordering dependencies that caused, such +-- functions are now made either in test_setup.sql or in the specific +-- test script that needs them. All that remains here is error cases. +-- directory path and dlsuffix are passed to us in environment variables +\getenv libdir PG_LIBDIR +\getenv dlsuffix PG_DLSUFFIX +\set regresslib :libdir '/regress' :dlsuffix +-- +-- Check LOAD command. (The alternative of implicitly loading the library +-- is checked in many other test scripts.) +-- +LOAD :'regresslib'; +-- Things that shouldn't work: +CREATE FUNCTION test1 (int) RETURNS int LANGUAGE C + AS 'nosuchfile'; +ERROR: could not access file "nosuchfile": No such file or directory +-- To produce stable regression test output, we have to filter the name +-- of the regresslib file out of the error message in this test. +\set VERBOSITY sqlstate +CREATE FUNCTION test1 (int) RETURNS int LANGUAGE C + AS :'regresslib', 'nosuchsymbol'; +ERROR: 42883 +\set VERBOSITY default +SELECT regexp_replace(:'LAST_ERROR_MESSAGE', 'file ".*"', 'file "..."'); + regexp_replace +------------------------------------------------------ + could not find function "nosuchsymbol" in file "..." +(1 row) + +CREATE FUNCTION test1 (int) RETURNS int LANGUAGE internal + AS 'nosuch'; +ERROR: there is no built-in function named "nosuch" diff --git a/src/test/regress/expected/create_function_sql.out b/src/test/regress/expected/create_function_sql.out new file mode 100644 index 0000000..50aca59 --- /dev/null +++ b/src/test/regress/expected/create_function_sql.out @@ -0,0 +1,743 @@ +-- +-- CREATE_FUNCTION_SQL +-- +-- Assorted tests using SQL-language functions +-- +-- All objects made in this test are in temp_func_test schema +CREATE USER regress_unpriv_user; +CREATE SCHEMA temp_func_test; +GRANT ALL ON SCHEMA temp_func_test TO public; +SET search_path TO temp_func_test, public; +-- +-- Make sanity checks on the pg_proc entries created by CREATE FUNCTION +-- +-- +-- ARGUMENT and RETURN TYPES +-- +CREATE FUNCTION functest_A_1(text, date) RETURNS bool LANGUAGE 'sql' + AS 'SELECT $1 = ''abcd'' AND $2 > ''2001-01-01'''; +CREATE FUNCTION functest_A_2(text[]) RETURNS int LANGUAGE 'sql' + AS 'SELECT $1[1]::int'; +CREATE FUNCTION functest_A_3() RETURNS bool LANGUAGE 'sql' + AS 'SELECT false'; +SELECT proname, prorettype::regtype, proargtypes::regtype[] FROM pg_proc + WHERE oid in ('functest_A_1'::regproc, + 'functest_A_2'::regproc, + 'functest_A_3'::regproc) ORDER BY proname; + proname | prorettype | proargtypes +--------------+------------+------------------- + functest_a_1 | boolean | [0:1]={text,date} + functest_a_2 | integer | [0:0]={text[]} + functest_a_3 | boolean | {} +(3 rows) + +SELECT functest_A_1('abcd', '2020-01-01'); + functest_a_1 +-------------- + t +(1 row) + +SELECT functest_A_2(ARRAY['1', '2', '3']); + functest_a_2 +-------------- + 1 +(1 row) + +SELECT functest_A_3(); + functest_a_3 +-------------- + f +(1 row) + +-- +-- IMMUTABLE | STABLE | VOLATILE +-- +CREATE FUNCTION functest_B_1(int) RETURNS bool LANGUAGE 'sql' + AS 'SELECT $1 > 0'; +CREATE FUNCTION functest_B_2(int) RETURNS bool LANGUAGE 'sql' + IMMUTABLE AS 'SELECT $1 > 0'; +CREATE FUNCTION functest_B_3(int) RETURNS bool LANGUAGE 'sql' + STABLE AS 'SELECT $1 = 0'; +CREATE FUNCTION functest_B_4(int) RETURNS bool LANGUAGE 'sql' + VOLATILE AS 'SELECT $1 < 0'; +SELECT proname, provolatile FROM pg_proc + WHERE oid in ('functest_B_1'::regproc, + 'functest_B_2'::regproc, + 'functest_B_3'::regproc, + 'functest_B_4'::regproc) ORDER BY proname; + proname | provolatile +--------------+------------- + functest_b_1 | v + functest_b_2 | i + functest_b_3 | s + functest_b_4 | v +(4 rows) + +ALTER FUNCTION functest_B_2(int) VOLATILE; +ALTER FUNCTION functest_B_3(int) COST 100; -- unrelated change, no effect +SELECT proname, provolatile FROM pg_proc + WHERE oid in ('functest_B_1'::regproc, + 'functest_B_2'::regproc, + 'functest_B_3'::regproc, + 'functest_B_4'::regproc) ORDER BY proname; + proname | provolatile +--------------+------------- + functest_b_1 | v + functest_b_2 | v + functest_b_3 | s + functest_b_4 | v +(4 rows) + +-- +-- SECURITY DEFINER | INVOKER +-- +CREATE FUNCTION functest_C_1(int) RETURNS bool LANGUAGE 'sql' + AS 'SELECT $1 > 0'; +CREATE FUNCTION functest_C_2(int) RETURNS bool LANGUAGE 'sql' + SECURITY DEFINER AS 'SELECT $1 = 0'; +CREATE FUNCTION functest_C_3(int) RETURNS bool LANGUAGE 'sql' + SECURITY INVOKER AS 'SELECT $1 < 0'; +SELECT proname, prosecdef FROM pg_proc + WHERE oid in ('functest_C_1'::regproc, + 'functest_C_2'::regproc, + 'functest_C_3'::regproc) ORDER BY proname; + proname | prosecdef +--------------+----------- + functest_c_1 | f + functest_c_2 | t + functest_c_3 | f +(3 rows) + +ALTER FUNCTION functest_C_1(int) IMMUTABLE; -- unrelated change, no effect +ALTER FUNCTION functest_C_2(int) SECURITY INVOKER; +ALTER FUNCTION functest_C_3(int) SECURITY DEFINER; +SELECT proname, prosecdef FROM pg_proc + WHERE oid in ('functest_C_1'::regproc, + 'functest_C_2'::regproc, + 'functest_C_3'::regproc) ORDER BY proname; + proname | prosecdef +--------------+----------- + functest_c_1 | f + functest_c_2 | f + functest_c_3 | t +(3 rows) + +-- +-- LEAKPROOF +-- +CREATE FUNCTION functest_E_1(int) RETURNS bool LANGUAGE 'sql' + AS 'SELECT $1 > 100'; +CREATE FUNCTION functest_E_2(int) RETURNS bool LANGUAGE 'sql' + LEAKPROOF AS 'SELECT $1 > 100'; +SELECT proname, proleakproof FROM pg_proc + WHERE oid in ('functest_E_1'::regproc, + 'functest_E_2'::regproc) ORDER BY proname; + proname | proleakproof +--------------+-------------- + functest_e_1 | f + functest_e_2 | t +(2 rows) + +ALTER FUNCTION functest_E_1(int) LEAKPROOF; +ALTER FUNCTION functest_E_2(int) STABLE; -- unrelated change, no effect +SELECT proname, proleakproof FROM pg_proc + WHERE oid in ('functest_E_1'::regproc, + 'functest_E_2'::regproc) ORDER BY proname; + proname | proleakproof +--------------+-------------- + functest_e_1 | t + functest_e_2 | t +(2 rows) + +ALTER FUNCTION functest_E_2(int) NOT LEAKPROOF; -- remove leakproof attribute +SELECT proname, proleakproof FROM pg_proc + WHERE oid in ('functest_E_1'::regproc, + 'functest_E_2'::regproc) ORDER BY proname; + proname | proleakproof +--------------+-------------- + functest_e_1 | t + functest_e_2 | f +(2 rows) + +-- it takes superuser privilege to turn on leakproof, but not to turn off +ALTER FUNCTION functest_E_1(int) OWNER TO regress_unpriv_user; +ALTER FUNCTION functest_E_2(int) OWNER TO regress_unpriv_user; +SET SESSION AUTHORIZATION regress_unpriv_user; +SET search_path TO temp_func_test, public; +ALTER FUNCTION functest_E_1(int) NOT LEAKPROOF; +ALTER FUNCTION functest_E_2(int) LEAKPROOF; +ERROR: only superuser can define a leakproof function +CREATE FUNCTION functest_E_3(int) RETURNS bool LANGUAGE 'sql' + LEAKPROOF AS 'SELECT $1 < 200'; -- fail +ERROR: only superuser can define a leakproof function +RESET SESSION AUTHORIZATION; +-- +-- CALLED ON NULL INPUT | RETURNS NULL ON NULL INPUT | STRICT +-- +CREATE FUNCTION functest_F_1(int) RETURNS bool LANGUAGE 'sql' + AS 'SELECT $1 > 50'; +CREATE FUNCTION functest_F_2(int) RETURNS bool LANGUAGE 'sql' + CALLED ON NULL INPUT AS 'SELECT $1 = 50'; +CREATE FUNCTION functest_F_3(int) RETURNS bool LANGUAGE 'sql' + RETURNS NULL ON NULL INPUT AS 'SELECT $1 < 50'; +CREATE FUNCTION functest_F_4(int) RETURNS bool LANGUAGE 'sql' + STRICT AS 'SELECT $1 = 50'; +SELECT proname, proisstrict FROM pg_proc + WHERE oid in ('functest_F_1'::regproc, + 'functest_F_2'::regproc, + 'functest_F_3'::regproc, + 'functest_F_4'::regproc) ORDER BY proname; + proname | proisstrict +--------------+------------- + functest_f_1 | f + functest_f_2 | f + functest_f_3 | t + functest_f_4 | t +(4 rows) + +ALTER FUNCTION functest_F_1(int) IMMUTABLE; -- unrelated change, no effect +ALTER FUNCTION functest_F_2(int) STRICT; +ALTER FUNCTION functest_F_3(int) CALLED ON NULL INPUT; +SELECT proname, proisstrict FROM pg_proc + WHERE oid in ('functest_F_1'::regproc, + 'functest_F_2'::regproc, + 'functest_F_3'::regproc, + 'functest_F_4'::regproc) ORDER BY proname; + proname | proisstrict +--------------+------------- + functest_f_1 | f + functest_f_2 | t + functest_f_3 | f + functest_f_4 | t +(4 rows) + +-- pg_get_functiondef tests +SELECT pg_get_functiondef('functest_A_1'::regproc); + pg_get_functiondef +-------------------------------------------------------------------- + CREATE OR REPLACE FUNCTION temp_func_test.functest_a_1(text, date)+ + RETURNS boolean + + LANGUAGE sql + + AS $function$SELECT $1 = 'abcd' AND $2 > '2001-01-01'$function$ + + +(1 row) + +SELECT pg_get_functiondef('functest_B_3'::regproc); + pg_get_functiondef +----------------------------------------------------------------- + CREATE OR REPLACE FUNCTION temp_func_test.functest_b_3(integer)+ + RETURNS boolean + + LANGUAGE sql + + STABLE + + AS $function$SELECT $1 = 0$function$ + + +(1 row) + +SELECT pg_get_functiondef('functest_C_3'::regproc); + pg_get_functiondef +----------------------------------------------------------------- + CREATE OR REPLACE FUNCTION temp_func_test.functest_c_3(integer)+ + RETURNS boolean + + LANGUAGE sql + + SECURITY DEFINER + + AS $function$SELECT $1 < 0$function$ + + +(1 row) + +SELECT pg_get_functiondef('functest_F_2'::regproc); + pg_get_functiondef +----------------------------------------------------------------- + CREATE OR REPLACE FUNCTION temp_func_test.functest_f_2(integer)+ + RETURNS boolean + + LANGUAGE sql + + STRICT + + AS $function$SELECT $1 = 50$function$ + + +(1 row) + +-- +-- SQL-standard body +-- +CREATE FUNCTION functest_S_1(a text, b date) RETURNS boolean + LANGUAGE SQL + RETURN a = 'abcd' AND b > '2001-01-01'; +CREATE FUNCTION functest_S_2(a text[]) RETURNS int + RETURN a[1]::int; +CREATE FUNCTION functest_S_3() RETURNS boolean + RETURN false; +CREATE FUNCTION functest_S_3a() RETURNS boolean + BEGIN ATOMIC + ;;RETURN false;; + END; +CREATE FUNCTION functest_S_10(a text, b date) RETURNS boolean + LANGUAGE SQL + BEGIN ATOMIC + SELECT a = 'abcd' AND b > '2001-01-01'; + END; +CREATE FUNCTION functest_S_13() RETURNS boolean + BEGIN ATOMIC + SELECT 1; + SELECT false; + END; +-- check display of function arguments in sub-SELECT +CREATE TABLE functest1 (i int); +CREATE FUNCTION functest_S_16(a int, b int) RETURNS void + LANGUAGE SQL + BEGIN ATOMIC + INSERT INTO functest1 SELECT a + $2; + END; +-- error: duplicate function body +CREATE FUNCTION functest_S_xxx(x int) RETURNS int + LANGUAGE SQL + AS $$ SELECT x * 2 $$ + RETURN x * 3; +ERROR: duplicate function body specified +-- polymorphic arguments not allowed in this form +CREATE FUNCTION functest_S_xx(x anyarray) RETURNS anyelement + LANGUAGE SQL + RETURN x[1]; +ERROR: SQL function with unquoted function body cannot have polymorphic arguments +-- check reporting of parse-analysis errors +CREATE FUNCTION functest_S_xx(x date) RETURNS boolean + LANGUAGE SQL + RETURN x > 1; +ERROR: operator does not exist: date > integer +LINE 3: RETURN x > 1; + ^ +HINT: No operator matches the given name and argument types. You might need to add explicit type casts. +-- tricky parsing +CREATE FUNCTION functest_S_15(x int) RETURNS boolean +LANGUAGE SQL +BEGIN ATOMIC + select case when x % 2 = 0 then true else false end; +END; +SELECT functest_S_1('abcd', '2020-01-01'); + functest_s_1 +-------------- + t +(1 row) + +SELECT functest_S_2(ARRAY['1', '2', '3']); + functest_s_2 +-------------- + 1 +(1 row) + +SELECT functest_S_3(); + functest_s_3 +-------------- + f +(1 row) + +SELECT functest_S_10('abcd', '2020-01-01'); + functest_s_10 +--------------- + t +(1 row) + +SELECT functest_S_13(); + functest_s_13 +--------------- + f +(1 row) + +SELECT pg_get_functiondef('functest_S_1'::regproc); + pg_get_functiondef +------------------------------------------------------------------------ + CREATE OR REPLACE FUNCTION temp_func_test.functest_s_1(a text, b date)+ + RETURNS boolean + + LANGUAGE sql + + RETURN ((a = 'abcd'::text) AND (b > '01-01-2001'::date)) + + +(1 row) + +SELECT pg_get_functiondef('functest_S_2'::regproc); + pg_get_functiondef +------------------------------------------------------------------ + CREATE OR REPLACE FUNCTION temp_func_test.functest_s_2(a text[])+ + RETURNS integer + + LANGUAGE sql + + RETURN ((a)[1])::integer + + +(1 row) + +SELECT pg_get_functiondef('functest_S_3'::regproc); + pg_get_functiondef +---------------------------------------------------------- + CREATE OR REPLACE FUNCTION temp_func_test.functest_s_3()+ + RETURNS boolean + + LANGUAGE sql + + RETURN false + + +(1 row) + +SELECT pg_get_functiondef('functest_S_3a'::regproc); + pg_get_functiondef +----------------------------------------------------------- + CREATE OR REPLACE FUNCTION temp_func_test.functest_s_3a()+ + RETURNS boolean + + LANGUAGE sql + + BEGIN ATOMIC + + RETURN false; + + END + + +(1 row) + +SELECT pg_get_functiondef('functest_S_10'::regproc); + pg_get_functiondef +------------------------------------------------------------------------- + CREATE OR REPLACE FUNCTION temp_func_test.functest_s_10(a text, b date)+ + RETURNS boolean + + LANGUAGE sql + + BEGIN ATOMIC + + SELECT ((a = 'abcd'::text) AND (b > '01-01-2001'::date)); + + END + + +(1 row) + +SELECT pg_get_functiondef('functest_S_13'::regproc); + pg_get_functiondef +----------------------------------------------------------- + CREATE OR REPLACE FUNCTION temp_func_test.functest_s_13()+ + RETURNS boolean + + LANGUAGE sql + + BEGIN ATOMIC + + SELECT 1; + + SELECT false; + + END + + +(1 row) + +SELECT pg_get_functiondef('functest_S_15'::regproc); + pg_get_functiondef +-------------------------------------------------------------------- + CREATE OR REPLACE FUNCTION temp_func_test.functest_s_15(x integer)+ + RETURNS boolean + + LANGUAGE sql + + BEGIN ATOMIC + + SELECT + + CASE + + WHEN ((x % 2) = 0) THEN true + + ELSE false + + END AS "case"; + + END + + +(1 row) + +SELECT pg_get_functiondef('functest_S_16'::regproc); + pg_get_functiondef +------------------------------------------------------------------------------- + CREATE OR REPLACE FUNCTION temp_func_test.functest_s_16(a integer, b integer)+ + RETURNS void + + LANGUAGE sql + + BEGIN ATOMIC + + INSERT INTO functest1 (i) SELECT (functest_s_16.a + functest_s_16.b); + + END + + +(1 row) + +DROP TABLE functest1 CASCADE; +NOTICE: drop cascades to function functest_s_16(integer,integer) +-- test with views +CREATE TABLE functest3 (a int); +INSERT INTO functest3 VALUES (1), (2); +CREATE VIEW functestv3 AS SELECT * FROM functest3; +CREATE FUNCTION functest_S_14() RETURNS bigint + RETURN (SELECT count(*) FROM functestv3); +SELECT functest_S_14(); + functest_s_14 +--------------- + 2 +(1 row) + +DROP TABLE functest3 CASCADE; +NOTICE: drop cascades to 2 other objects +DETAIL: drop cascades to view functestv3 +drop cascades to function functest_s_14() +-- information_schema tests +CREATE FUNCTION functest_IS_1(a int, b int default 1, c text default 'foo') + RETURNS int + LANGUAGE SQL + AS 'SELECT $1 + $2'; +CREATE FUNCTION functest_IS_2(out a int, b int default 1) + RETURNS int + LANGUAGE SQL + AS 'SELECT $1'; +CREATE FUNCTION functest_IS_3(a int default 1, out b int) + RETURNS int + LANGUAGE SQL + AS 'SELECT $1'; +SELECT routine_name, ordinal_position, parameter_name, parameter_default + FROM information_schema.parameters JOIN information_schema.routines USING (specific_schema, specific_name) + WHERE routine_schema = 'temp_func_test' AND routine_name ~ '^functest_is_' + ORDER BY 1, 2; + routine_name | ordinal_position | parameter_name | parameter_default +---------------+------------------+----------------+------------------- + functest_is_1 | 1 | a | + functest_is_1 | 2 | b | 1 + functest_is_1 | 3 | c | 'foo'::text + functest_is_2 | 1 | a | + functest_is_2 | 2 | b | 1 + functest_is_3 | 1 | a | 1 + functest_is_3 | 2 | b | +(7 rows) + +DROP FUNCTION functest_IS_1(int, int, text), functest_IS_2(int), functest_IS_3(int); +-- routine usage views +CREATE FUNCTION functest_IS_4a() RETURNS int LANGUAGE SQL AS 'SELECT 1'; +CREATE FUNCTION functest_IS_4b(x int DEFAULT functest_IS_4a()) RETURNS int LANGUAGE SQL AS 'SELECT x'; +CREATE SEQUENCE functest1; +CREATE FUNCTION functest_IS_5(x int DEFAULT nextval('functest1')) + RETURNS int + LANGUAGE SQL + AS 'SELECT x'; +CREATE FUNCTION functest_IS_6() + RETURNS int + LANGUAGE SQL + RETURN nextval('functest1'); +CREATE TABLE functest2 (a int, b int); +CREATE FUNCTION functest_IS_7() + RETURNS int + LANGUAGE SQL + RETURN (SELECT count(a) FROM functest2); +SELECT r0.routine_name, r1.routine_name + FROM information_schema.routine_routine_usage rru + JOIN information_schema.routines r0 ON r0.specific_name = rru.specific_name + JOIN information_schema.routines r1 ON r1.specific_name = rru.routine_name + WHERE r0.routine_schema = 'temp_func_test' AND + r1.routine_schema = 'temp_func_test' + ORDER BY 1, 2; + routine_name | routine_name +----------------+---------------- + functest_is_4b | functest_is_4a +(1 row) + +SELECT routine_name, sequence_name FROM information_schema.routine_sequence_usage + WHERE routine_schema = 'temp_func_test' + ORDER BY 1, 2; + routine_name | sequence_name +---------------+--------------- + functest_is_5 | functest1 + functest_is_6 | functest1 +(2 rows) + +SELECT routine_name, table_name, column_name FROM information_schema.routine_column_usage + WHERE routine_schema = 'temp_func_test' + ORDER BY 1, 2; + routine_name | table_name | column_name +---------------+------------+------------- + functest_is_7 | functest2 | a +(1 row) + +SELECT routine_name, table_name FROM information_schema.routine_table_usage + WHERE routine_schema = 'temp_func_test' + ORDER BY 1, 2; + routine_name | table_name +---------------+------------ + functest_is_7 | functest2 +(1 row) + +DROP FUNCTION functest_IS_4a CASCADE; +NOTICE: drop cascades to function functest_is_4b(integer) +DROP SEQUENCE functest1 CASCADE; +NOTICE: drop cascades to 2 other objects +DETAIL: drop cascades to function functest_is_5(integer) +drop cascades to function functest_is_6() +DROP TABLE functest2 CASCADE; +NOTICE: drop cascades to function functest_is_7() +-- overload +CREATE FUNCTION functest_B_2(bigint) RETURNS bool LANGUAGE 'sql' + IMMUTABLE AS 'SELECT $1 > 0'; +DROP FUNCTION functest_b_1; +DROP FUNCTION functest_b_1; -- error, not found +ERROR: could not find a function named "functest_b_1" +DROP FUNCTION functest_b_2; -- error, ambiguous +ERROR: function name "functest_b_2" is not unique +HINT: Specify the argument list to select the function unambiguously. +-- CREATE OR REPLACE tests +CREATE FUNCTION functest1(a int) RETURNS int LANGUAGE SQL AS 'SELECT $1'; +CREATE OR REPLACE FUNCTION functest1(a int) RETURNS int LANGUAGE SQL WINDOW AS 'SELECT $1'; +ERROR: cannot change routine kind +DETAIL: "functest1" is a function. +CREATE OR REPLACE PROCEDURE functest1(a int) LANGUAGE SQL AS 'SELECT $1'; +ERROR: cannot change routine kind +DETAIL: "functest1" is a function. +DROP FUNCTION functest1(a int); +-- inlining of set-returning functions +CREATE TABLE functest3 (a int); +INSERT INTO functest3 VALUES (1), (2), (3); +CREATE FUNCTION functest_sri1() RETURNS SETOF int +LANGUAGE SQL +STABLE +AS ' + SELECT * FROM functest3; +'; +SELECT * FROM functest_sri1(); + functest_sri1 +--------------- + 1 + 2 + 3 +(3 rows) + +EXPLAIN (verbose, costs off) SELECT * FROM functest_sri1(); + QUERY PLAN +-------------------------------------- + Seq Scan on temp_func_test.functest3 + Output: functest3.a +(2 rows) + +CREATE FUNCTION functest_sri2() RETURNS SETOF int +LANGUAGE SQL +STABLE +BEGIN ATOMIC + SELECT * FROM functest3; +END; +SELECT * FROM functest_sri2(); + functest_sri2 +--------------- + 1 + 2 + 3 +(3 rows) + +EXPLAIN (verbose, costs off) SELECT * FROM functest_sri2(); + QUERY PLAN +-------------------------------------- + Seq Scan on temp_func_test.functest3 + Output: functest3.a +(2 rows) + +DROP TABLE functest3 CASCADE; +NOTICE: drop cascades to function functest_sri2() +-- Check behavior of VOID-returning SQL functions +CREATE FUNCTION voidtest1(a int) RETURNS VOID LANGUAGE SQL AS +$$ SELECT a + 1 $$; +SELECT voidtest1(42); + voidtest1 +----------- + +(1 row) + +CREATE FUNCTION voidtest2(a int, b int) RETURNS VOID LANGUAGE SQL AS +$$ SELECT voidtest1(a + b) $$; +SELECT voidtest2(11,22); + voidtest2 +----------- + +(1 row) + +-- currently, we can inline voidtest2 but not voidtest1 +EXPLAIN (verbose, costs off) SELECT voidtest2(11,22); + QUERY PLAN +------------------------- + Result + Output: voidtest1(33) +(2 rows) + +CREATE TEMP TABLE sometable(f1 int); +CREATE FUNCTION voidtest3(a int) RETURNS VOID LANGUAGE SQL AS +$$ INSERT INTO sometable VALUES(a + 1) $$; +SELECT voidtest3(17); + voidtest3 +----------- + +(1 row) + +CREATE FUNCTION voidtest4(a int) RETURNS VOID LANGUAGE SQL AS +$$ INSERT INTO sometable VALUES(a - 1) RETURNING f1 $$; +SELECT voidtest4(39); + voidtest4 +----------- + +(1 row) + +TABLE sometable; + f1 +---- + 18 + 38 +(2 rows) + +CREATE FUNCTION voidtest5(a int) RETURNS SETOF VOID LANGUAGE SQL AS +$$ SELECT generate_series(1, a) $$ STABLE; +SELECT * FROM voidtest5(3); + voidtest5 +----------- +(0 rows) + +-- Regression tests for bugs: +-- Check that arguments that are R/W expanded datums aren't corrupted by +-- multiple uses. This test knows that array_append() returns a R/W datum +-- and will modify a R/W array input in-place. We use SETOF to prevent +-- inlining of the SQL function. +CREATE FUNCTION double_append(anyarray, anyelement) RETURNS SETOF anyarray +LANGUAGE SQL IMMUTABLE AS +$$ SELECT array_append($1, $2) || array_append($1, $2) $$; +SELECT double_append(array_append(ARRAY[q1], q2), q3) + FROM (VALUES(1,2,3), (4,5,6)) v(q1,q2,q3); + double_append +--------------- + {1,2,3,1,2,3} + {4,5,6,4,5,6} +(2 rows) + +-- Things that shouldn't work: +CREATE FUNCTION test1 (int) RETURNS int LANGUAGE SQL + AS 'SELECT ''not an integer'';'; +ERROR: return type mismatch in function declared to return integer +DETAIL: Actual return type is text. +CONTEXT: SQL function "test1" +CREATE FUNCTION test1 (int) RETURNS int LANGUAGE SQL + AS 'not even SQL'; +ERROR: syntax error at or near "not" +LINE 2: AS 'not even SQL'; + ^ +CREATE FUNCTION test1 (int) RETURNS int LANGUAGE SQL + AS 'SELECT 1, 2, 3;'; +ERROR: return type mismatch in function declared to return integer +DETAIL: Final statement must return exactly one column. +CONTEXT: SQL function "test1" +CREATE FUNCTION test1 (int) RETURNS int LANGUAGE SQL + AS 'SELECT $2;'; +ERROR: there is no parameter $2 +LINE 2: AS 'SELECT $2;'; + ^ +CREATE FUNCTION test1 (int) RETURNS int LANGUAGE SQL + AS 'a', 'b'; +ERROR: only one AS item needed for language "sql" +-- Cleanup +DROP SCHEMA temp_func_test CASCADE; +NOTICE: drop cascades to 30 other objects +DETAIL: drop cascades to function functest_a_1(text,date) +drop cascades to function functest_a_2(text[]) +drop cascades to function functest_a_3() +drop cascades to function functest_b_2(integer) +drop cascades to function functest_b_3(integer) +drop cascades to function functest_b_4(integer) +drop cascades to function functest_c_1(integer) +drop cascades to function functest_c_2(integer) +drop cascades to function functest_c_3(integer) +drop cascades to function functest_e_1(integer) +drop cascades to function functest_e_2(integer) +drop cascades to function functest_f_1(integer) +drop cascades to function functest_f_2(integer) +drop cascades to function functest_f_3(integer) +drop cascades to function functest_f_4(integer) +drop cascades to function functest_s_1(text,date) +drop cascades to function functest_s_2(text[]) +drop cascades to function functest_s_3() +drop cascades to function functest_s_3a() +drop cascades to function functest_s_10(text,date) +drop cascades to function functest_s_13() +drop cascades to function functest_s_15(integer) +drop cascades to function functest_b_2(bigint) +drop cascades to function functest_sri1() +drop cascades to function voidtest1(integer) +drop cascades to function voidtest2(integer,integer) +drop cascades to function voidtest3(integer) +drop cascades to function voidtest4(integer) +drop cascades to function voidtest5(integer) +drop cascades to function double_append(anyarray,anyelement) +DROP USER regress_unpriv_user; +RESET search_path; diff --git a/src/test/regress/expected/create_index.out b/src/test/regress/expected/create_index.out new file mode 100644 index 0000000..acfd9d1 --- /dev/null +++ b/src/test/regress/expected/create_index.out @@ -0,0 +1,2848 @@ +-- +-- CREATE_INDEX +-- Create ancillary data structures (i.e. indices) +-- +-- directory paths are passed to us in environment variables +\getenv abs_srcdir PG_ABS_SRCDIR +-- +-- BTREE +-- +CREATE INDEX onek_unique1 ON onek USING btree(unique1 int4_ops); +CREATE INDEX IF NOT EXISTS onek_unique1 ON onek USING btree(unique1 int4_ops); +NOTICE: relation "onek_unique1" already exists, skipping +CREATE INDEX IF NOT EXISTS ON onek USING btree(unique1 int4_ops); +ERROR: syntax error at or near "ON" +LINE 1: CREATE INDEX IF NOT EXISTS ON onek USING btree(unique1 int4_... + ^ +CREATE INDEX onek_unique2 ON onek USING btree(unique2 int4_ops); +CREATE INDEX onek_hundred ON onek USING btree(hundred int4_ops); +CREATE INDEX onek_stringu1 ON onek USING btree(stringu1 name_ops); +CREATE INDEX tenk1_unique1 ON tenk1 USING btree(unique1 int4_ops); +CREATE INDEX tenk1_unique2 ON tenk1 USING btree(unique2 int4_ops); +CREATE INDEX tenk1_hundred ON tenk1 USING btree(hundred int4_ops); +CREATE INDEX tenk1_thous_tenthous ON tenk1 (thousand, tenthous); +CREATE INDEX tenk2_unique1 ON tenk2 USING btree(unique1 int4_ops); +CREATE INDEX tenk2_unique2 ON tenk2 USING btree(unique2 int4_ops); +CREATE INDEX tenk2_hundred ON tenk2 USING btree(hundred int4_ops); +CREATE INDEX rix ON road USING btree (name text_ops); +CREATE INDEX iix ON ihighway USING btree (name text_ops); +CREATE INDEX six ON shighway USING btree (name text_ops); +-- test comments +COMMENT ON INDEX six_wrong IS 'bad index'; +ERROR: relation "six_wrong" does not exist +COMMENT ON INDEX six IS 'good index'; +COMMENT ON INDEX six IS NULL; +-- +-- BTREE partial indices +-- +CREATE INDEX onek2_u1_prtl ON onek2 USING btree(unique1 int4_ops) + where unique1 < 20 or unique1 > 980; +CREATE INDEX onek2_u2_prtl ON onek2 USING btree(unique2 int4_ops) + where stringu1 < 'B'; +CREATE INDEX onek2_stu1_prtl ON onek2 USING btree(stringu1 name_ops) + where onek2.stringu1 >= 'J' and onek2.stringu1 < 'K'; +-- +-- GiST (rtree-equivalent opclasses only) +-- +CREATE TABLE slow_emp4000 ( + home_base box +); +CREATE TABLE fast_emp4000 ( + home_base box +); +\set filename :abs_srcdir '/data/rect.data' +COPY slow_emp4000 FROM :'filename'; +INSERT INTO fast_emp4000 SELECT * FROM slow_emp4000; +ANALYZE slow_emp4000; +ANALYZE fast_emp4000; +CREATE INDEX grect2ind ON fast_emp4000 USING gist (home_base); +-- we want to work with a point_tbl that includes a null +CREATE TEMP TABLE point_tbl AS SELECT * FROM public.point_tbl; +INSERT INTO POINT_TBL(f1) VALUES (NULL); +CREATE INDEX gpointind ON point_tbl USING gist (f1); +CREATE TEMP TABLE gpolygon_tbl AS + SELECT polygon(home_base) AS f1 FROM slow_emp4000; +INSERT INTO gpolygon_tbl VALUES ( '(1000,0,0,1000)' ); +INSERT INTO gpolygon_tbl VALUES ( '(0,1000,1000,1000)' ); +CREATE TEMP TABLE gcircle_tbl AS + SELECT circle(home_base) AS f1 FROM slow_emp4000; +CREATE INDEX ggpolygonind ON gpolygon_tbl USING gist (f1); +CREATE INDEX ggcircleind ON gcircle_tbl USING gist (f1); +-- +-- Test GiST indexes +-- +-- get non-indexed results for comparison purposes +SET enable_seqscan = ON; +SET enable_indexscan = OFF; +SET enable_bitmapscan = OFF; +SELECT * FROM fast_emp4000 + WHERE home_base <@ '(200,200),(2000,1000)'::box + ORDER BY (home_base[0])[0]; + home_base +----------------------- + (337,455),(240,359) + (1444,403),(1346,344) +(2 rows) + +SELECT count(*) FROM fast_emp4000 WHERE home_base && '(1000,1000,0,0)'::box; + count +------- + 2 +(1 row) + +SELECT count(*) FROM fast_emp4000 WHERE home_base IS NULL; + count +------- + 278 +(1 row) + +SELECT count(*) FROM gpolygon_tbl WHERE f1 && '(1000,1000,0,0)'::polygon; + count +------- + 2 +(1 row) + +SELECT count(*) FROM gcircle_tbl WHERE f1 && '<(500,500),500>'::circle; + count +------- + 2 +(1 row) + +SELECT count(*) FROM point_tbl WHERE f1 <@ box '(0,0,100,100)'; + count +------- + 3 +(1 row) + +SELECT count(*) FROM point_tbl WHERE box '(0,0,100,100)' @> f1; + count +------- + 3 +(1 row) + +SELECT count(*) FROM point_tbl WHERE f1 <@ polygon '(0,0),(0,100),(100,100),(50,50),(100,0),(0,0)'; + count +------- + 5 +(1 row) + +SELECT count(*) FROM point_tbl WHERE f1 <@ circle '<(50,50),50>'; + count +------- + 1 +(1 row) + +SELECT count(*) FROM point_tbl p WHERE p.f1 << '(0.0, 0.0)'; + count +------- + 3 +(1 row) + +SELECT count(*) FROM point_tbl p WHERE p.f1 >> '(0.0, 0.0)'; + count +------- + 4 +(1 row) + +SELECT count(*) FROM point_tbl p WHERE p.f1 <<| '(0.0, 0.0)'; + count +------- + 1 +(1 row) + +SELECT count(*) FROM point_tbl p WHERE p.f1 |>> '(0.0, 0.0)'; + count +------- + 5 +(1 row) + +SELECT count(*) FROM point_tbl p WHERE p.f1 ~= '(-5, -12)'; + count +------- + 1 +(1 row) + +SELECT * FROM point_tbl ORDER BY f1 <-> '0,1'; + f1 +------------------- + (0,0) + (1e-300,-1e-300) + (-3,4) + (-10,0) + (10,10) + (-5,-12) + (5.1,34.5) + (Infinity,1e+300) + (1e+300,Infinity) + (NaN,NaN) + +(11 rows) + +SELECT * FROM point_tbl WHERE f1 IS NULL; + f1 +---- + +(1 row) + +SELECT * FROM point_tbl WHERE f1 IS NOT NULL ORDER BY f1 <-> '0,1'; + f1 +------------------- + (0,0) + (1e-300,-1e-300) + (-3,4) + (-10,0) + (10,10) + (-5,-12) + (5.1,34.5) + (1e+300,Infinity) + (Infinity,1e+300) + (NaN,NaN) +(10 rows) + +SELECT * FROM point_tbl WHERE f1 <@ '(-10,-10),(10,10)':: box ORDER BY f1 <-> '0,1'; + f1 +------------------ + (0,0) + (1e-300,-1e-300) + (-3,4) + (-10,0) + (10,10) +(5 rows) + +SELECT * FROM gpolygon_tbl ORDER BY f1 <-> '(0,0)'::point LIMIT 10; + f1 +------------------------------------------------- + ((240,359),(240,455),(337,455),(337,359)) + ((662,163),(662,187),(759,187),(759,163)) + ((1000,0),(0,1000)) + ((0,1000),(1000,1000)) + ((1346,344),(1346,403),(1444,403),(1444,344)) + ((278,1409),(278,1457),(369,1457),(369,1409)) + ((907,1156),(907,1201),(948,1201),(948,1156)) + ((1517,971),(1517,1043),(1594,1043),(1594,971)) + ((175,1820),(175,1850),(259,1850),(259,1820)) + ((2424,81),(2424,160),(2424,160),(2424,81)) +(10 rows) + +SELECT circle_center(f1), round(radius(f1)) as radius FROM gcircle_tbl ORDER BY f1 <-> '(200,300)'::point LIMIT 10; + circle_center | radius +----------------+-------- + (288.5,407) | 68 + (710.5,175) | 50 + (323.5,1433) | 51 + (927.5,1178.5) | 30 + (1395,373.5) | 57 + (1555.5,1007) | 53 + (217,1835) | 45 + (489,2421.5) | 22 + (2424,120.5) | 40 + (751.5,2655) | 20 +(10 rows) + +-- Now check the results from plain indexscan +SET enable_seqscan = OFF; +SET enable_indexscan = ON; +SET enable_bitmapscan = OFF; +EXPLAIN (COSTS OFF) +SELECT * FROM fast_emp4000 + WHERE home_base <@ '(200,200),(2000,1000)'::box + ORDER BY (home_base[0])[0]; + QUERY PLAN +----------------------------------------------------------------- + Sort + Sort Key: ((home_base[0])[0]) + -> Index Only Scan using grect2ind on fast_emp4000 + Index Cond: (home_base <@ '(2000,1000),(200,200)'::box) +(4 rows) + +SELECT * FROM fast_emp4000 + WHERE home_base <@ '(200,200),(2000,1000)'::box + ORDER BY (home_base[0])[0]; + home_base +----------------------- + (337,455),(240,359) + (1444,403),(1346,344) +(2 rows) + +EXPLAIN (COSTS OFF) +SELECT count(*) FROM fast_emp4000 WHERE home_base && '(1000,1000,0,0)'::box; + QUERY PLAN +------------------------------------------------------------- + Aggregate + -> Index Only Scan using grect2ind on fast_emp4000 + Index Cond: (home_base && '(1000,1000),(0,0)'::box) +(3 rows) + +SELECT count(*) FROM fast_emp4000 WHERE home_base && '(1000,1000,0,0)'::box; + count +------- + 2 +(1 row) + +EXPLAIN (COSTS OFF) +SELECT count(*) FROM fast_emp4000 WHERE home_base IS NULL; + QUERY PLAN +------------------------------------------------------- + Aggregate + -> Index Only Scan using grect2ind on fast_emp4000 + Index Cond: (home_base IS NULL) +(3 rows) + +SELECT count(*) FROM fast_emp4000 WHERE home_base IS NULL; + count +------- + 278 +(1 row) + +EXPLAIN (COSTS OFF) +SELECT count(*) FROM gpolygon_tbl WHERE f1 && '(1000,1000,0,0)'::polygon; + QUERY PLAN +------------------------------------------------------------ + Aggregate + -> Index Scan using ggpolygonind on gpolygon_tbl + Index Cond: (f1 && '((1000,1000),(0,0))'::polygon) +(3 rows) + +SELECT count(*) FROM gpolygon_tbl WHERE f1 && '(1000,1000,0,0)'::polygon; + count +------- + 2 +(1 row) + +EXPLAIN (COSTS OFF) +SELECT count(*) FROM gcircle_tbl WHERE f1 && '<(500,500),500>'::circle; + QUERY PLAN +------------------------------------------------------- + Aggregate + -> Index Scan using ggcircleind on gcircle_tbl + Index Cond: (f1 && '<(500,500),500>'::circle) +(3 rows) + +SELECT count(*) FROM gcircle_tbl WHERE f1 && '<(500,500),500>'::circle; + count +------- + 2 +(1 row) + +EXPLAIN (COSTS OFF) +SELECT count(*) FROM point_tbl WHERE f1 <@ box '(0,0,100,100)'; + QUERY PLAN +---------------------------------------------------- + Aggregate + -> Index Only Scan using gpointind on point_tbl + Index Cond: (f1 <@ '(100,100),(0,0)'::box) +(3 rows) + +SELECT count(*) FROM point_tbl WHERE f1 <@ box '(0,0,100,100)'; + count +------- + 3 +(1 row) + +EXPLAIN (COSTS OFF) +SELECT count(*) FROM point_tbl WHERE box '(0,0,100,100)' @> f1; + QUERY PLAN +---------------------------------------------------- + Aggregate + -> Index Only Scan using gpointind on point_tbl + Index Cond: (f1 <@ '(100,100),(0,0)'::box) +(3 rows) + +SELECT count(*) FROM point_tbl WHERE box '(0,0,100,100)' @> f1; + count +------- + 3 +(1 row) + +EXPLAIN (COSTS OFF) +SELECT count(*) FROM point_tbl WHERE f1 <@ polygon '(0,0),(0,100),(100,100),(50,50),(100,0),(0,0)'; + QUERY PLAN +---------------------------------------------------------------------------------------- + Aggregate + -> Index Only Scan using gpointind on point_tbl + Index Cond: (f1 <@ '((0,0),(0,100),(100,100),(50,50),(100,0),(0,0))'::polygon) +(3 rows) + +SELECT count(*) FROM point_tbl WHERE f1 <@ polygon '(0,0),(0,100),(100,100),(50,50),(100,0),(0,0)'; + count +------- + 4 +(1 row) + +EXPLAIN (COSTS OFF) +SELECT count(*) FROM point_tbl WHERE f1 <@ circle '<(50,50),50>'; + QUERY PLAN +---------------------------------------------------- + Aggregate + -> Index Only Scan using gpointind on point_tbl + Index Cond: (f1 <@ '<(50,50),50>'::circle) +(3 rows) + +SELECT count(*) FROM point_tbl WHERE f1 <@ circle '<(50,50),50>'; + count +------- + 1 +(1 row) + +EXPLAIN (COSTS OFF) +SELECT count(*) FROM point_tbl p WHERE p.f1 << '(0.0, 0.0)'; + QUERY PLAN +------------------------------------------------------ + Aggregate + -> Index Only Scan using gpointind on point_tbl p + Index Cond: (f1 << '(0,0)'::point) +(3 rows) + +SELECT count(*) FROM point_tbl p WHERE p.f1 << '(0.0, 0.0)'; + count +------- + 3 +(1 row) + +EXPLAIN (COSTS OFF) +SELECT count(*) FROM point_tbl p WHERE p.f1 >> '(0.0, 0.0)'; + QUERY PLAN +------------------------------------------------------ + Aggregate + -> Index Only Scan using gpointind on point_tbl p + Index Cond: (f1 >> '(0,0)'::point) +(3 rows) + +SELECT count(*) FROM point_tbl p WHERE p.f1 >> '(0.0, 0.0)'; + count +------- + 4 +(1 row) + +EXPLAIN (COSTS OFF) +SELECT count(*) FROM point_tbl p WHERE p.f1 <<| '(0.0, 0.0)'; + QUERY PLAN +------------------------------------------------------ + Aggregate + -> Index Only Scan using gpointind on point_tbl p + Index Cond: (f1 <<| '(0,0)'::point) +(3 rows) + +SELECT count(*) FROM point_tbl p WHERE p.f1 <<| '(0.0, 0.0)'; + count +------- + 1 +(1 row) + +EXPLAIN (COSTS OFF) +SELECT count(*) FROM point_tbl p WHERE p.f1 |>> '(0.0, 0.0)'; + QUERY PLAN +------------------------------------------------------ + Aggregate + -> Index Only Scan using gpointind on point_tbl p + Index Cond: (f1 |>> '(0,0)'::point) +(3 rows) + +SELECT count(*) FROM point_tbl p WHERE p.f1 |>> '(0.0, 0.0)'; + count +------- + 5 +(1 row) + +EXPLAIN (COSTS OFF) +SELECT count(*) FROM point_tbl p WHERE p.f1 ~= '(-5, -12)'; + QUERY PLAN +------------------------------------------------------ + Aggregate + -> Index Only Scan using gpointind on point_tbl p + Index Cond: (f1 ~= '(-5,-12)'::point) +(3 rows) + +SELECT count(*) FROM point_tbl p WHERE p.f1 ~= '(-5, -12)'; + count +------- + 1 +(1 row) + +EXPLAIN (COSTS OFF) +SELECT * FROM point_tbl ORDER BY f1 <-> '0,1'; + QUERY PLAN +---------------------------------------------- + Index Only Scan using gpointind on point_tbl + Order By: (f1 <-> '(0,1)'::point) +(2 rows) + +SELECT * FROM point_tbl ORDER BY f1 <-> '0,1'; + f1 +------------------- + (1e-300,-1e-300) + (0,0) + (-3,4) + (-10,0) + (10,10) + (-5,-12) + (5.1,34.5) + (Infinity,1e+300) + (1e+300,Infinity) + (NaN,NaN) + +(11 rows) + +EXPLAIN (COSTS OFF) +SELECT * FROM point_tbl WHERE f1 IS NULL; + QUERY PLAN +---------------------------------------------- + Index Only Scan using gpointind on point_tbl + Index Cond: (f1 IS NULL) +(2 rows) + +SELECT * FROM point_tbl WHERE f1 IS NULL; + f1 +---- + +(1 row) + +EXPLAIN (COSTS OFF) +SELECT * FROM point_tbl WHERE f1 IS NOT NULL ORDER BY f1 <-> '0,1'; + QUERY PLAN +---------------------------------------------- + Index Only Scan using gpointind on point_tbl + Index Cond: (f1 IS NOT NULL) + Order By: (f1 <-> '(0,1)'::point) +(3 rows) + +SELECT * FROM point_tbl WHERE f1 IS NOT NULL ORDER BY f1 <-> '0,1'; + f1 +------------------- + (1e-300,-1e-300) + (0,0) + (-3,4) + (-10,0) + (10,10) + (-5,-12) + (5.1,34.5) + (Infinity,1e+300) + (1e+300,Infinity) + (NaN,NaN) +(10 rows) + +EXPLAIN (COSTS OFF) +SELECT * FROM point_tbl WHERE f1 <@ '(-10,-10),(10,10)':: box ORDER BY f1 <-> '0,1'; + QUERY PLAN +------------------------------------------------ + Index Only Scan using gpointind on point_tbl + Index Cond: (f1 <@ '(10,10),(-10,-10)'::box) + Order By: (f1 <-> '(0,1)'::point) +(3 rows) + +SELECT * FROM point_tbl WHERE f1 <@ '(-10,-10),(10,10)':: box ORDER BY f1 <-> '0,1'; + f1 +------------------ + (1e-300,-1e-300) + (0,0) + (-3,4) + (-10,0) + (10,10) +(5 rows) + +EXPLAIN (COSTS OFF) +SELECT * FROM gpolygon_tbl ORDER BY f1 <-> '(0,0)'::point LIMIT 10; + QUERY PLAN +----------------------------------------------------- + Limit + -> Index Scan using ggpolygonind on gpolygon_tbl + Order By: (f1 <-> '(0,0)'::point) +(3 rows) + +SELECT * FROM gpolygon_tbl ORDER BY f1 <-> '(0,0)'::point LIMIT 10; + f1 +------------------------------------------------- + ((240,359),(240,455),(337,455),(337,359)) + ((662,163),(662,187),(759,187),(759,163)) + ((1000,0),(0,1000)) + ((0,1000),(1000,1000)) + ((1346,344),(1346,403),(1444,403),(1444,344)) + ((278,1409),(278,1457),(369,1457),(369,1409)) + ((907,1156),(907,1201),(948,1201),(948,1156)) + ((1517,971),(1517,1043),(1594,1043),(1594,971)) + ((175,1820),(175,1850),(259,1850),(259,1820)) + ((2424,81),(2424,160),(2424,160),(2424,81)) +(10 rows) + +EXPLAIN (COSTS OFF) +SELECT circle_center(f1), round(radius(f1)) as radius FROM gcircle_tbl ORDER BY f1 <-> '(200,300)'::point LIMIT 10; + QUERY PLAN +--------------------------------------------------- + Limit + -> Index Scan using ggcircleind on gcircle_tbl + Order By: (f1 <-> '(200,300)'::point) +(3 rows) + +SELECT circle_center(f1), round(radius(f1)) as radius FROM gcircle_tbl ORDER BY f1 <-> '(200,300)'::point LIMIT 10; + circle_center | radius +----------------+-------- + (288.5,407) | 68 + (710.5,175) | 50 + (323.5,1433) | 51 + (927.5,1178.5) | 30 + (1395,373.5) | 57 + (1555.5,1007) | 53 + (217,1835) | 45 + (489,2421.5) | 22 + (2424,120.5) | 40 + (751.5,2655) | 20 +(10 rows) + +EXPLAIN (COSTS OFF) +SELECT point(x,x), (SELECT f1 FROM gpolygon_tbl ORDER BY f1 <-> point(x,x) LIMIT 1) as c FROM generate_series(0,10,1) x; + QUERY PLAN +-------------------------------------------------------------------------------------------- + Function Scan on generate_series x + SubPlan 1 + -> Limit + -> Index Scan using ggpolygonind on gpolygon_tbl + Order By: (f1 <-> point((x.x)::double precision, (x.x)::double precision)) +(5 rows) + +SELECT point(x,x), (SELECT f1 FROM gpolygon_tbl ORDER BY f1 <-> point(x,x) LIMIT 1) as c FROM generate_series(0,10,1) x; + point | c +---------+------------------------------------------- + (0,0) | ((240,359),(240,455),(337,455),(337,359)) + (1,1) | ((240,359),(240,455),(337,455),(337,359)) + (2,2) | ((240,359),(240,455),(337,455),(337,359)) + (3,3) | ((240,359),(240,455),(337,455),(337,359)) + (4,4) | ((240,359),(240,455),(337,455),(337,359)) + (5,5) | ((240,359),(240,455),(337,455),(337,359)) + (6,6) | ((240,359),(240,455),(337,455),(337,359)) + (7,7) | ((240,359),(240,455),(337,455),(337,359)) + (8,8) | ((240,359),(240,455),(337,455),(337,359)) + (9,9) | ((240,359),(240,455),(337,455),(337,359)) + (10,10) | ((240,359),(240,455),(337,455),(337,359)) +(11 rows) + +-- Now check the results from bitmap indexscan +SET enable_seqscan = OFF; +SET enable_indexscan = OFF; +SET enable_bitmapscan = ON; +EXPLAIN (COSTS OFF) +SELECT * FROM point_tbl WHERE f1 <@ '(-10,-10),(10,10)':: box ORDER BY f1 <-> '0,1'; + QUERY PLAN +------------------------------------------------------------ + Sort + Sort Key: ((f1 <-> '(0,1)'::point)) + -> Bitmap Heap Scan on point_tbl + Recheck Cond: (f1 <@ '(10,10),(-10,-10)'::box) + -> Bitmap Index Scan on gpointind + Index Cond: (f1 <@ '(10,10),(-10,-10)'::box) +(6 rows) + +SELECT * FROM point_tbl WHERE f1 <@ '(-10,-10),(10,10)':: box ORDER BY f1 <-> '0,1'; + f1 +------------------ + (0,0) + (1e-300,-1e-300) + (-3,4) + (-10,0) + (10,10) +(5 rows) + +RESET enable_seqscan; +RESET enable_indexscan; +RESET enable_bitmapscan; +-- +-- GIN over int[] and text[] +-- +-- Note: GIN currently supports only bitmap scans, not plain indexscans +-- +CREATE TABLE array_index_op_test ( + seqno int4, + i int4[], + t text[] +); +\set filename :abs_srcdir '/data/array.data' +COPY array_index_op_test FROM :'filename'; +ANALYZE array_index_op_test; +SELECT * FROM array_index_op_test WHERE i = '{NULL}' ORDER BY seqno; + seqno | i | t +-------+--------+-------- + 102 | {NULL} | {NULL} +(1 row) + +SELECT * FROM array_index_op_test WHERE i @> '{NULL}' ORDER BY seqno; + seqno | i | t +-------+---+--- +(0 rows) + +SELECT * FROM array_index_op_test WHERE i && '{NULL}' ORDER BY seqno; + seqno | i | t +-------+---+--- +(0 rows) + +SELECT * FROM array_index_op_test WHERE i <@ '{NULL}' ORDER BY seqno; + seqno | i | t +-------+----+---- + 101 | {} | {} +(1 row) + +SET enable_seqscan = OFF; +SET enable_indexscan = OFF; +SET enable_bitmapscan = ON; +CREATE INDEX intarrayidx ON array_index_op_test USING gin (i); +explain (costs off) +SELECT * FROM array_index_op_test WHERE i @> '{32}' ORDER BY seqno; + QUERY PLAN +---------------------------------------------------- + Sort + Sort Key: seqno + -> Bitmap Heap Scan on array_index_op_test + Recheck Cond: (i @> '{32}'::integer[]) + -> Bitmap Index Scan on intarrayidx + Index Cond: (i @> '{32}'::integer[]) +(6 rows) + +SELECT * FROM array_index_op_test WHERE i @> '{32}' ORDER BY seqno; + seqno | i | t +-------+---------------------------------+------------------------------------------------------------------------------------------------------------------------------------ + 6 | {39,35,5,94,17,92,60,32} | {AAAAAAAAAAAAAAA35875,AAAAAAAAAAAAAAAA23657} + 74 | {32} | {AAAAAAAAAAAAAAAA1729,AAAAAAAAAAAAA22860,AAAAAA99807,AAAAA17383,AAAAAAAAAAAAAAA67062,AAAAAAAAAAA15165,AAAAAAAAAAA50956} + 77 | {97,15,32,17,55,59,18,37,50,39} | {AAAAAAAAAAAA67946,AAAAAA54032,AAAAAAAA81587,55847,AAAAAAAAAAAAAA28620,AAAAAAAAAAAAAAAAA43052,AAAAAA75463,AAAA49534,AAAAAAAA44066} + 89 | {40,32,17,6,30,88} | {AA44673,AAAAAAAAAAA6119,AAAAAAAAAAAAAAAA23657,AAAAAAAAAAAAAAAAAA47955,AAAAAAAAAAAAAAAA33598,AAAAAAAAAAA33576,AA44673} + 98 | {38,34,32,89} | {AAAAAAAAAAAAAAAAAA71621,AAAA8857,AAAAAAAAAAAAAAAAAAA65037,AAAAAAAAAAAAAAAA31334,AAAAAAAAAA48845} + 100 | {85,32,57,39,49,84,32,3,30} | {AAAAAAA80240,AAAAAAAAAAAAAAAA1729,AAAAA60038,AAAAAAAAAAA92631,AAAAAAAA9523} +(6 rows) + +SELECT * FROM array_index_op_test WHERE i && '{32}' ORDER BY seqno; + seqno | i | t +-------+---------------------------------+------------------------------------------------------------------------------------------------------------------------------------ + 6 | {39,35,5,94,17,92,60,32} | {AAAAAAAAAAAAAAA35875,AAAAAAAAAAAAAAAA23657} + 74 | {32} | {AAAAAAAAAAAAAAAA1729,AAAAAAAAAAAAA22860,AAAAAA99807,AAAAA17383,AAAAAAAAAAAAAAA67062,AAAAAAAAAAA15165,AAAAAAAAAAA50956} + 77 | {97,15,32,17,55,59,18,37,50,39} | {AAAAAAAAAAAA67946,AAAAAA54032,AAAAAAAA81587,55847,AAAAAAAAAAAAAA28620,AAAAAAAAAAAAAAAAA43052,AAAAAA75463,AAAA49534,AAAAAAAA44066} + 89 | {40,32,17,6,30,88} | {AA44673,AAAAAAAAAAA6119,AAAAAAAAAAAAAAAA23657,AAAAAAAAAAAAAAAAAA47955,AAAAAAAAAAAAAAAA33598,AAAAAAAAAAA33576,AA44673} + 98 | {38,34,32,89} | {AAAAAAAAAAAAAAAAAA71621,AAAA8857,AAAAAAAAAAAAAAAAAAA65037,AAAAAAAAAAAAAAAA31334,AAAAAAAAAA48845} + 100 | {85,32,57,39,49,84,32,3,30} | {AAAAAAA80240,AAAAAAAAAAAAAAAA1729,AAAAA60038,AAAAAAAAAAA92631,AAAAAAAA9523} +(6 rows) + +SELECT * FROM array_index_op_test WHERE i @> '{17}' ORDER BY seqno; + seqno | i | t +-------+---------------------------------+------------------------------------------------------------------------------------------------------------------------------------ + 6 | {39,35,5,94,17,92,60,32} | {AAAAAAAAAAAAAAA35875,AAAAAAAAAAAAAAAA23657} + 12 | {17,99,18,52,91,72,0,43,96,23} | {AAAAA33250,AAAAAAAAAAAAAAAAAAA85420,AAAAAAAAAAA33576} + 15 | {17,14,16,63,67} | {AA6416,AAAAAAAAAA646,AAAAA95309} + 19 | {52,82,17,74,23,46,69,51,75} | {AAAAAAAAAAAAA73084,AAAAA75968,AAAAAAAAAAAAAAAA14047,AAAAAAA80240,AAAAAAAAAAAAAAAAAAA1205,A68938} + 53 | {38,17} | {AAAAAAAAAAA21658} + 65 | {61,5,76,59,17} | {AAAAAA99807,AAAAA64741,AAAAAAAAAAA53908,AA21643,AAAAAAAAA10012} + 77 | {97,15,32,17,55,59,18,37,50,39} | {AAAAAAAAAAAA67946,AAAAAA54032,AAAAAAAA81587,55847,AAAAAAAAAAAAAA28620,AAAAAAAAAAAAAAAAA43052,AAAAAA75463,AAAA49534,AAAAAAAA44066} + 89 | {40,32,17,6,30,88} | {AA44673,AAAAAAAAAAA6119,AAAAAAAAAAAAAAAA23657,AAAAAAAAAAAAAAAAAA47955,AAAAAAAAAAAAAAAA33598,AAAAAAAAAAA33576,AA44673} +(8 rows) + +SELECT * FROM array_index_op_test WHERE i && '{17}' ORDER BY seqno; + seqno | i | t +-------+---------------------------------+------------------------------------------------------------------------------------------------------------------------------------ + 6 | {39,35,5,94,17,92,60,32} | {AAAAAAAAAAAAAAA35875,AAAAAAAAAAAAAAAA23657} + 12 | {17,99,18,52,91,72,0,43,96,23} | {AAAAA33250,AAAAAAAAAAAAAAAAAAA85420,AAAAAAAAAAA33576} + 15 | {17,14,16,63,67} | {AA6416,AAAAAAAAAA646,AAAAA95309} + 19 | {52,82,17,74,23,46,69,51,75} | {AAAAAAAAAAAAA73084,AAAAA75968,AAAAAAAAAAAAAAAA14047,AAAAAAA80240,AAAAAAAAAAAAAAAAAAA1205,A68938} + 53 | {38,17} | {AAAAAAAAAAA21658} + 65 | {61,5,76,59,17} | {AAAAAA99807,AAAAA64741,AAAAAAAAAAA53908,AA21643,AAAAAAAAA10012} + 77 | {97,15,32,17,55,59,18,37,50,39} | {AAAAAAAAAAAA67946,AAAAAA54032,AAAAAAAA81587,55847,AAAAAAAAAAAAAA28620,AAAAAAAAAAAAAAAAA43052,AAAAAA75463,AAAA49534,AAAAAAAA44066} + 89 | {40,32,17,6,30,88} | {AA44673,AAAAAAAAAAA6119,AAAAAAAAAAAAAAAA23657,AAAAAAAAAAAAAAAAAA47955,AAAAAAAAAAAAAAAA33598,AAAAAAAAAAA33576,AA44673} +(8 rows) + +SELECT * FROM array_index_op_test WHERE i @> '{32,17}' ORDER BY seqno; + seqno | i | t +-------+---------------------------------+------------------------------------------------------------------------------------------------------------------------------------ + 6 | {39,35,5,94,17,92,60,32} | {AAAAAAAAAAAAAAA35875,AAAAAAAAAAAAAAAA23657} + 77 | {97,15,32,17,55,59,18,37,50,39} | {AAAAAAAAAAAA67946,AAAAAA54032,AAAAAAAA81587,55847,AAAAAAAAAAAAAA28620,AAAAAAAAAAAAAAAAA43052,AAAAAA75463,AAAA49534,AAAAAAAA44066} + 89 | {40,32,17,6,30,88} | {AA44673,AAAAAAAAAAA6119,AAAAAAAAAAAAAAAA23657,AAAAAAAAAAAAAAAAAA47955,AAAAAAAAAAAAAAAA33598,AAAAAAAAAAA33576,AA44673} +(3 rows) + +SELECT * FROM array_index_op_test WHERE i && '{32,17}' ORDER BY seqno; + seqno | i | t +-------+---------------------------------+------------------------------------------------------------------------------------------------------------------------------------ + 6 | {39,35,5,94,17,92,60,32} | {AAAAAAAAAAAAAAA35875,AAAAAAAAAAAAAAAA23657} + 12 | {17,99,18,52,91,72,0,43,96,23} | {AAAAA33250,AAAAAAAAAAAAAAAAAAA85420,AAAAAAAAAAA33576} + 15 | {17,14,16,63,67} | {AA6416,AAAAAAAAAA646,AAAAA95309} + 19 | {52,82,17,74,23,46,69,51,75} | {AAAAAAAAAAAAA73084,AAAAA75968,AAAAAAAAAAAAAAAA14047,AAAAAAA80240,AAAAAAAAAAAAAAAAAAA1205,A68938} + 53 | {38,17} | {AAAAAAAAAAA21658} + 65 | {61,5,76,59,17} | {AAAAAA99807,AAAAA64741,AAAAAAAAAAA53908,AA21643,AAAAAAAAA10012} + 74 | {32} | {AAAAAAAAAAAAAAAA1729,AAAAAAAAAAAAA22860,AAAAAA99807,AAAAA17383,AAAAAAAAAAAAAAA67062,AAAAAAAAAAA15165,AAAAAAAAAAA50956} + 77 | {97,15,32,17,55,59,18,37,50,39} | {AAAAAAAAAAAA67946,AAAAAA54032,AAAAAAAA81587,55847,AAAAAAAAAAAAAA28620,AAAAAAAAAAAAAAAAA43052,AAAAAA75463,AAAA49534,AAAAAAAA44066} + 89 | {40,32,17,6,30,88} | {AA44673,AAAAAAAAAAA6119,AAAAAAAAAAAAAAAA23657,AAAAAAAAAAAAAAAAAA47955,AAAAAAAAAAAAAAAA33598,AAAAAAAAAAA33576,AA44673} + 98 | {38,34,32,89} | {AAAAAAAAAAAAAAAAAA71621,AAAA8857,AAAAAAAAAAAAAAAAAAA65037,AAAAAAAAAAAAAAAA31334,AAAAAAAAAA48845} + 100 | {85,32,57,39,49,84,32,3,30} | {AAAAAAA80240,AAAAAAAAAAAAAAAA1729,AAAAA60038,AAAAAAAAAAA92631,AAAAAAAA9523} +(11 rows) + +SELECT * FROM array_index_op_test WHERE i <@ '{38,34,32,89}' ORDER BY seqno; + seqno | i | t +-------+---------------+---------------------------------------------------------------------------------------------------------------------------- + 40 | {34} | {AAAAAAAAAAAAAA10611,AAAAAAAAAAAAAAAAAAA1205,AAAAAAAAAAA50956,AAAAAAAAAAAAAAAA31334,AAAAA70466,AAAAAAAA81587,AAAAAAA74623} + 74 | {32} | {AAAAAAAAAAAAAAAA1729,AAAAAAAAAAAAA22860,AAAAAA99807,AAAAA17383,AAAAAAAAAAAAAAA67062,AAAAAAAAAAA15165,AAAAAAAAAAA50956} + 98 | {38,34,32,89} | {AAAAAAAAAAAAAAAAAA71621,AAAA8857,AAAAAAAAAAAAAAAAAAA65037,AAAAAAAAAAAAAAAA31334,AAAAAAAAAA48845} + 101 | {} | {} +(4 rows) + +SELECT * FROM array_index_op_test WHERE i = '{47,77}' ORDER BY seqno; + seqno | i | t +-------+---------+----------------------------------------------------------------------------------------------------------------- + 95 | {47,77} | {AAAAAAAAAAAAAAAAA764,AAAAAAAAAAA74076,AAAAAAAAAA18107,AAAAA40681,AAAAAAAAAAAAAAA35875,AAAAA60038,AAAAAAA56483} +(1 row) + +SELECT * FROM array_index_op_test WHERE i = '{}' ORDER BY seqno; + seqno | i | t +-------+----+---- + 101 | {} | {} +(1 row) + +SELECT * FROM array_index_op_test WHERE i @> '{}' ORDER BY seqno; + seqno | i | t +-------+---------------------------------+---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + 1 | {92,75,71,52,64,83} | {AAAAAAAA44066,AAAAAA1059,AAAAAAAAAAA176,AAAAAAA48038} + 2 | {3,6} | {AAAAAA98232,AAAAAAAA79710,AAAAAAAAAAAAAAAAA69675,AAAAAAAAAAAAAAAA55798,AAAAAAAAA12793} + 3 | {37,64,95,43,3,41,13,30,11,43} | {AAAAAAAAAA48845,AAAAA75968,AAAAA95309,AAA54451,AAAAAAAAAA22292,AAAAAAA99836,A96617,AA17009,AAAAAAAAAAAAAA95246} + 4 | {71,39,99,55,33,75,45} | {AAAAAAAAA53663,AAAAAAAAAAAAAAA67062,AAAAAAAAAA64777,AAA99043,AAAAAAAAAAAAAAAAAAA91804,39557} + 5 | {50,42,77,50,4} | {AAAAAAAAAAAAAAAAA26540,AAAAAAA79710,AAAAAAAAAAAAAAAAAAA1205,AAAAAAAAAAA176,AAAAA95309,AAAAAAAAAAA46154,AAAAAA66777,AAAAAAAAA27249,AAAAAAAAAA64777,AAAAAAAAAAAAAAAAAAA70104} + 6 | {39,35,5,94,17,92,60,32} | {AAAAAAAAAAAAAAA35875,AAAAAAAAAAAAAAAA23657} + 7 | {12,51,88,64,8} | {AAAAAAAAAAAAAAAAAA12591,AAAAAAAAAAAAAAAAA50407,AAAAAAAAAAAA67946} + 8 | {60,84} | {AAAAAAA81898,AAAAAA1059,AAAAAAAAAAAA81511,AAAAA961,AAAAAAAAAAAAAAAA31334,AAAAA64741,AA6416,AAAAAAAAAAAAAAAAAA32918,AAAAAAAAAAAAAAAAA50407} + 9 | {56,52,35,27,80,44,81,22} | {AAAAAAAAAAAAAAA73034,AAAAAAAAAAAAA7929,AAAAAAA66161,AA88409,39557,A27153,AAAAAAAA9523,AAAAAAAAAAA99000} + 10 | {71,5,45} | {AAAAAAAAAAA21658,AAAAAAAAAAAA21089,AAA54451,AAAAAAAAAAAAAAAAAA54141,AAAAAAAAAAAAAA28620,AAAAAAAAAAA21658,AAAAAAAAAAA74076,AAAAAAAAA27249} + 11 | {41,86,74,48,22,74,47,50} | {AAAAAAAA9523,AAAAAAAAAAAA37562,AAAAAAAAAAAAAAAA14047,AAAAAAAAAAA46154,AAAA41702,AAAAAAAAAAAAAAAAA764,AAAAA62737,39557} + 12 | {17,99,18,52,91,72,0,43,96,23} | {AAAAA33250,AAAAAAAAAAAAAAAAAAA85420,AAAAAAAAAAA33576} + 13 | {3,52,34,23} | {AAAAAA98232,AAAA49534,AAAAAAAAAAA21658} + 14 | {78,57,19} | {AAAA8857,AAAAAAAAAAAAAAA73034,AAAAAAAA81587,AAAAAAAAAAAAAAA68526,AAAAA75968,AAAAAAAAAAAAAA65909,AAAAAAAAA10012,AAAAAAAAAAAAAA65909} + 15 | {17,14,16,63,67} | {AA6416,AAAAAAAAAA646,AAAAA95309} + 16 | {14,63,85,11} | {AAAAAA66777} + 17 | {7,10,81,85} | {AAAAAA43678,AAAAAAA12144,AAAAAAAAAAA50956,AAAAAAAAAAAAAAAAAAA15356} + 18 | {1} | {AAAAAAAAAAA33576,AAAAA95309,64261,AAA59323,AAAAAAAAAAAAAA95246,55847,AAAAAAAAAAAA67946,AAAAAAAAAAAAAAAAAA64374} + 19 | {52,82,17,74,23,46,69,51,75} | {AAAAAAAAAAAAA73084,AAAAA75968,AAAAAAAAAAAAAAAA14047,AAAAAAA80240,AAAAAAAAAAAAAAAAAAA1205,A68938} + 20 | {72,89,70,51,54,37,8,49,79} | {AAAAAA58494} + 21 | {2,8,65,10,5,79,43} | {AAAAAAAAAAAAAAAAA88852,AAAAAAAAAAAAAAAAAAA91804,AAAAA64669,AAAAAAAAAAAAAAAA1443,AAAAAAAAAAAAAAAA23657,AAAAA12179,AAAAAAAAAAAAAAAAA88852,AAAAAAAAAAAAAAAA31334,AAAAAAAAAAAAAAAA41303,AAAAAAAAAAAAAAAAAAA85420} + 22 | {11,6,56,62,53,30} | {AAAAAAAA72908} + 23 | {40,90,5,38,72,40,30,10,43,55} | {A6053,AAAAAAAAAAA6119,AA44673,AAAAAAAAAAAAAAAAA764,AA17009,AAAAA17383,AAAAA70514,AAAAA33250,AAAAA95309,AAAAAAAAAAAA37562} + 24 | {94,61,99,35,48} | {AAAAAAAAAAA50956,AAAAAAAAAAA15165,AAAA85070,AAAAAAAAAAAAAAA36627,AAAAA961,AAAAAAAAAA55219} + 25 | {31,1,10,11,27,79,38} | {AAAAAAAAAAAAAAAAAA59334,45449} + 26 | {71,10,9,69,75} | {47735,AAAAAAA21462,AAAAAAAAAAAAAAAAA6897,AAAAAAAAAAAAAAAAAAA91804,AAAAAAAAA72121,AAAAAAAAAAAAAAAAAAA1205,AAAAA41597,AAAA8857,AAAAAAAAAAAAAAAAAAA15356,AA17009} + 27 | {94} | {AA6416,A6053,AAAAAAA21462,AAAAAAA57334,AAAAAAAAAAAAAAAAAA12591,AA88409,AAAAAAAAAAAAA70254} + 28 | {14,33,6,34,14} | {AAAAAAAAAAAAAAA13198,AAAAAAAA69452,AAAAAAAAAAA82945,AAAAAAA12144,AAAAAAAAA72121,AAAAAAAAAA18601} + 29 | {39,21} | {AAAAAAAAAAAAAAAAA6897,AAAAAAAAAAAAAAAAAAA38885,AAAA85070,AAAAAAAAAAAAAAAAAAA70104,AAAAA66674,AAAAAAAAAAAAA62007,AAAAAAAA69452,AAAAAAA1242,AAAAAAAAAAAAAAAA1729,AAAA35194} + 30 | {26,81,47,91,34} | {AAAAAAAAAAAAAAAAAAA70104,AAAAAAA80240} + 31 | {80,24,18,21,54} | {AAAAAAAAAAAAAAA13198,AAAAAAAAAAAAAAAAAAA70415,A27153,AAAAAAAAA53663,AAAAAAAAAAAAAAAAA50407,A68938} + 32 | {58,79,82,80,67,75,98,10,41} | {AAAAAAAAAAAAAAAAAA61286,AAA54451,AAAAAAAAAAAAAAAAAAA87527,A96617,51533} + 33 | {74,73} | {A85417,AAAAAAA56483,AAAAA17383,AAAAAAAAAAAAA62159,AAAAAAAAAAAA52814,AAAAAAAAAAAAA85723,AAAAAAAAAAAAAAAAAA55796} + 34 | {70,45} | {AAAAAAAAAAAAAAAAAA71621,AAAAAAAAAAAAAA28620,AAAAAAAAAA55219,AAAAAAAA23648,AAAAAAAAAA22292,AAAAAAA1242} + 35 | {23,40} | {AAAAAAAAAAAA52814,AAAA48949,AAAAAAAAA34727,AAAA8857,AAAAAAAAAAAAAAAAAAA62179,AAAAAAAAAAAAAAA68526,AAAAAAA99836,AAAAAAAA50094,AAAA91194,AAAAAAAAAAAAA73084} + 36 | {79,82,14,52,30,5,79} | {AAAAAAAAA53663,AAAAAAAAAAAAAAAA55798,AAAAAAAAAAAAAAAAAAA89194,AA88409,AAAAAAAAAAAAAAA81326,AAAAAAAAAAAAAAAAA63050,AAAAAAAAAAAAAAAA33598} + 37 | {53,11,81,39,3,78,58,64,74} | {AAAAAAAAAAAAAAAAAAA17075,AAAAAAA66161,AAAAAAAA23648,AAAAAAAAAAAAAA10611} + 38 | {59,5,4,95,28} | {AAAAAAAAAAA82945,A96617,47735,AAAAA12179,AAAAA64669,AAAAAA99807,AA74433,AAAAAAAAAAAAAAAAA59387} + 39 | {82,43,99,16,74} | {AAAAAAAAAAAAAAA67062,AAAAAAA57334,AAAAAAAAAAAAAA65909,A27153,AAAAAAAAAAAAAAAAAAA17075,AAAAAAAAAAAAAAAAA43052,AAAAAAAAAA64777,AAAAAAAAAAAA81511,AAAAAAAAAAAAAA65909,AAAAAAAAAAAAAA28620} + 40 | {34} | {AAAAAAAAAAAAAA10611,AAAAAAAAAAAAAAAAAAA1205,AAAAAAAAAAA50956,AAAAAAAAAAAAAAAA31334,AAAAA70466,AAAAAAAA81587,AAAAAAA74623} + 41 | {19,26,63,12,93,73,27,94} | {AAAAAAA79710,AAAAAAAAAA55219,AAAA41702,AAAAAAAAAAAAAAAAAAA17075,AAAAAAAAAAAAAAAAAA71621,AAAAAAAAAAAAAAAAA63050,AAAAAAA99836,AAAAAAAAAAAAAA8666} + 42 | {15,76,82,75,8,91} | {AAAAAAAAAAA176,AAAAAA38063,45449,AAAAAA54032,AAAAAAA81898,AA6416,AAAAAAAAAAAAAAAAAAA62179,45449,AAAAA60038,AAAAAAAA81587} + 43 | {39,87,91,97,79,28} | {AAAAAAAAAAA74076,A96617,AAAAAAAAAAAAAAAAAAA89194,AAAAAAAAAAAAAAAAAA55796,AAAAAAAAAAAAAAAA23657,AAAAAAAAAAAA67946} + 44 | {40,58,68,29,54} | {AAAAAAA81898,AAAAAA66777,AAAAAA98232} + 45 | {99,45} | {AAAAAAAA72908,AAAAAAAAAAAAAAAAAAA17075,AA88409,AAAAAAAAAAAAAAAAAA36842,AAAAAAA48038,AAAAAAAAAAAAAA10611} + 46 | {53,24} | {AAAAAAAAAAA53908,AAAAAA54032,AAAAA17383,AAAA48949,AAAAAAAAAA18601,AAAAA64669,45449,AAAAAAAAAAA98051,AAAAAAAAAAAAAAAAAA71621} + 47 | {98,23,64,12,75,61} | {AAA59323,AAAAA95309,AAAAAAAAAAAAAAAA31334,AAAAAAAAA27249,AAAAA17383,AAAAAAAAAAAA37562,AAAAAA1059,A84822,55847,AAAAA70466} + 48 | {76,14} | {AAAAAAAAAAAAA59671,AAAAAAAAAAAAAAAAAAA91804,AAAAAA66777,AAAAAAAAAAAAAAAAAAA89194,AAAAAAAAAAAAAAA36627,AAAAAAAAAAAAAAAAAAA17075,AAAAAAAAAAAAA73084,AAAAAAA79710,AAAAAAAAAAAAAAA40402,AAAAAAAAAAAAAAAAAAA65037} + 49 | {56,5,54,37,49} | {AA21643,AAAAAAAAAAA92631,AAAAAAAA81587} + 50 | {20,12,37,64,93} | {AAAAAAAAAA5483,AAAAAAAAAAAAAAAAAAA1205,AA6416,AAAAAAAAAAAAAAAAA63050,AAAAAAAAAAAAAAAAAA47955} + 51 | {47} | {AAAAAAAAAAAAAA96505,AAAAAAAAAAAAAAAAAA36842,AAAAA95309,AAAAAAAA81587,AA6416,AAAA91194,AAAAAA58494,AAAAAA1059,AAAAAAAA69452} + 52 | {89,0} | {AAAAAAAAAAAAAAAAAA47955,AAAAAAA48038,AAAAAAAAAAAAAAAAA43052,AAAAAAAAAAAAA73084,AAAAA70466,AAAAAAAAAAAAAAAAA764,AAAAAAAAAAA46154,AA66862} + 53 | {38,17} | {AAAAAAAAAAA21658} + 54 | {70,47} | {AAAAAAAAAAAAAAAAAA54141,AAAAA40681,AAAAAAA48038,AAAAAAAAAAAAAAAA29150,AAAAA41597,AAAAAAAAAAAAAAAAAA59334,AA15322} + 55 | {47,79,47,64,72,25,71,24,93} | {AAAAAAAAAAAAAAAAAA55796,AAAAA62737} + 56 | {33,7,60,54,93,90,77,85,39} | {AAAAAAAAAAAAAAAAAA32918,AA42406} + 57 | {23,45,10,42,36,21,9,96} | {AAAAAAAAAAAAAAAAAAA70415} + 58 | {92} | {AAAAAAAAAAAAAAAA98414,AAAAAAAA23648,AAAAAAAAAAAAAAAAAA55796,AA25381,AAAAAAAAAAA6119} + 59 | {9,69,46,77} | {39557,AAAAAAA89932,AAAAAAAAAAAAAAAAA43052,AAAAAAAAAAAAAAAAA26540,AAA20874,AA6416,AAAAAAAAAAAAAAAAAA47955} + 60 | {62,2,59,38,89} | {AAAAAAA89932,AAAAAAAAAAAAAAAAAAA15356,AA99927,AA17009,AAAAAAAAAAAAAAA35875} + 61 | {72,2,44,95,54,54,13} | {AAAAAAAAAAAAAAAAAAA91804} + 62 | {83,72,29,73} | {AAAAAAAAAAAAA15097,AAAA8857,AAAAAAAAAAAA35809,AAAAAAAAAAAA52814,AAAAAAAAAAAAAAAAAAA38885,AAAAAAAAAAAAAAAAAA24183,AAAAAA43678,A96617} + 63 | {11,4,61,87} | {AAAAAAAAA27249,AAAAAAAAAAAAAAAAAA32918,AAAAAAAAAAAAAAA13198,AAA20874,39557,51533,AAAAAAAAAAA53908,AAAAAAAAAAAAAA96505,AAAAAAAA78938} + 64 | {26,19,34,24,81,78} | {A96617,AAAAAAAAAAAAAAAAAAA70104,A68938,AAAAAAAAAAA53908,AAAAAAAAAAAAAAA453,AA17009,AAAAAAA80240} + 65 | {61,5,76,59,17} | {AAAAAA99807,AAAAA64741,AAAAAAAAAAA53908,AA21643,AAAAAAAAA10012} + 66 | {31,23,70,52,4,33,48,25} | {AAAAAAAAAAAAAAAAA69675,AAAAAAAA50094,AAAAAAAAAAA92631,AAAA35194,39557,AAAAAAA99836} + 67 | {31,94,7,10} | {AAAAAA38063,A96617,AAAA35194,AAAAAAAAAAAA67946} + 68 | {90,43,38} | {AA75092,AAAAAAAAAAAAAAAAA69675,AAAAAAAAAAA92631,AAAAAAAAA10012,AAAAAAAAAAAAA7929,AA21643} + 69 | {67,35,99,85,72,86,44} | {AAAAAAAAAAAAAAAAAAA1205,AAAAAAAA50094,AAAAAAAAAAAAAAAA1729,AAAAAAAAAAAAAAAAAA47955} + 70 | {56,70,83} | {AAAA41702,AAAAAAAAAAA82945,AA21643,AAAAAAAAAAA99000,A27153,AA25381,AAAAAAAAAAAAAA96505,AAAAAAA1242} + 71 | {74,26} | {AAAAAAAAAAA50956,AA74433,AAAAAAA21462,AAAAAAAAAAAAAAAAAAA17075,AAAAAAAAAAAAAAA36627,AAAAAAAAAAAAA70254,AAAAAAAAAA43419,39557} + 72 | {22,1,16,78,20,91,83} | {47735,AAAAAAA56483,AAAAAAAAAAAAA93788,AA42406,AAAAAAAAAAAAA73084,AAAAAAAA72908,AAAAAAAAAAAAAAAAAA61286,AAAAA66674,AAAAAAAAAAAAAAAAA50407} + 73 | {88,25,96,78,65,15,29,19} | {AAA54451,AAAAAAAAA27249,AAAAAAA9228,AAAAAAAAAAAAAAA67062,AAAAAAAAAAAAAAAAAAA70415,AAAAA17383,AAAAAAAAAAAAAAAA33598} + 74 | {32} | {AAAAAAAAAAAAAAAA1729,AAAAAAAAAAAAA22860,AAAAAA99807,AAAAA17383,AAAAAAAAAAAAAAA67062,AAAAAAAAAAA15165,AAAAAAAAAAA50956} + 75 | {12,96,83,24,71,89,55} | {AAAA48949,AAAAAAAA29716,AAAAAAAAAAAAAAAAAAA1205,AAAAAAAAAAAA67946,AAAAAAAAAAAAAAAA29150,AAA28075,AAAAAAAAAAAAAAAAA43052} + 76 | {92,55,10,7} | {AAAAAAAAAAAAAAA67062} + 77 | {97,15,32,17,55,59,18,37,50,39} | {AAAAAAAAAAAA67946,AAAAAA54032,AAAAAAAA81587,55847,AAAAAAAAAAAAAA28620,AAAAAAAAAAAAAAAAA43052,AAAAAA75463,AAAA49534,AAAAAAAA44066} + 78 | {55,89,44,84,34} | {AAAAAAAAAAA6119,AAAAAAAAAAAAAA8666,AA99927,AA42406,AAAAAAA81898,AAAAAAA9228,AAAAAAAAAAA92631,AA21643,AAAAAAAAAAAAAA28620} + 79 | {45} | {AAAAAAAAAA646,AAAAAAAAAAAAAAAAAAA70415,AAAAAA43678,AAAAAAAA72908} + 80 | {74,89,44,80,0} | {AAAA35194,AAAAAAAA79710,AAA20874,AAAAAAAAAAAAAAAAAAA70104,AAAAAAAAAAAAA73084,AAAAAAA57334,AAAAAAA9228,AAAAAAAAAAAAA62007} + 81 | {63,77,54,48,61,53,97} | {AAAAAAAAAAAAAAA81326,AAAAAAAAAA22292,AA25381,AAAAAAAAAAA74076,AAAAAAA81898,AAAAAAAAA72121} + 82 | {34,60,4,79,78,16,86,89,42,50} | {AAAAA40681,AAAAAAAAAAAAAAAAAA12591,AAAAAAA80240,AAAAAAAAAAAAAAAA55798,AAAAAAAAAAAAAAAAAAA70104} + 83 | {14,10} | {AAAAAAAAAA22292,AAAAAAAAAAAAA70254,AAAAAAAAAAA6119} + 84 | {11,83,35,13,96,94} | {AAAAA95309,AAAAAAAAAAAAAAAAAA32918,AAAAAAAAAAAAAAAAAA24183} + 85 | {39,60} | {AAAAAAAAAAAAAAAA55798,AAAAAAAAAA22292,AAAAAAA66161,AAAAAAA21462,AAAAAAAAAAAAAAAAAA12591,55847,AAAAAA98232,AAAAAAAAAAA46154} + 86 | {33,81,72,74,45,36,82} | {AAAAAAAA81587,AAAAAAAAAAAAAA96505,45449,AAAA80176} + 87 | {57,27,50,12,97,68} | {AAAAAAAAAAAAAAAAA26540,AAAAAAAAA10012,AAAAAAAAAAAA35809,AAAAAAAAAAAAAAAA29150,AAAAAAAAAAA82945,AAAAAA66777,31228,AAAAAAAAAAAAAAAA23657,AAAAAAAAAAAAAA28620,AAAAAAAAAAAAAA96505} + 88 | {41,90,77,24,6,24} | {AAAA35194,AAAA35194,AAAAAAA80240,AAAAAAAAAAA46154,AAAAAA58494,AAAAAAAAAAAAAAAAAAA17075,AAAAAAAAAAAAAAAAAA59334,AAAAAAAAAAAAAAAAAAA91804,AA74433} + 89 | {40,32,17,6,30,88} | {AA44673,AAAAAAAAAAA6119,AAAAAAAAAAAAAAAA23657,AAAAAAAAAAAAAAAAAA47955,AAAAAAAAAAAAAAAA33598,AAAAAAAAAAA33576,AA44673} + 90 | {88,75} | {AAAAA60038,AAAAAAAA23648,AAAAAAAAAAA99000,AAAA41702,AAAAAAAAAAAAA22860,AAAAAAAAAAAAAAA68526} + 91 | {78} | {AAAAAAAAAAAAA62007,AAA99043} + 92 | {85,63,49,45} | {AAAAAAA89932,AAAAAAAAAAAAA22860,AAAAAAAAAAAAAAAAAAA1205,AAAAAAAAAAAA21089} + 93 | {11} | {AAAAAAAAAAA176,AAAAAAAAAAAAAA8666,AAAAAAAAAAAAAAA453,AAAAAAAAAAAAA85723,A68938,AAAAAAAAAAAAA9821,AAAAAAA48038,AAAAAAAAAAAAAAAAA59387,AA99927,AAAAA17383} + 94 | {98,9,85,62,88,91,60,61,38,86} | {AAAAAAAA81587,AAAAA17383,AAAAAAAA81587} + 95 | {47,77} | {AAAAAAAAAAAAAAAAA764,AAAAAAAAAAA74076,AAAAAAAAAA18107,AAAAA40681,AAAAAAAAAAAAAAA35875,AAAAA60038,AAAAAAA56483} + 96 | {23,97,43} | {AAAAAAAAAA646,A87088} + 97 | {54,2,86,65} | {47735,AAAAAAA99836,AAAAAAAAAAAAAAAAA6897,AAAAAAAAAAAAAAAA29150,AAAAAAA80240,AAAAAAAAAAAAAAAA98414,AAAAAAA56483,AAAAAAAAAAAAAAAA29150,AAAAAAA39692,AA21643} + 98 | {38,34,32,89} | {AAAAAAAAAAAAAAAAAA71621,AAAA8857,AAAAAAAAAAAAAAAAAAA65037,AAAAAAAAAAAAAAAA31334,AAAAAAAAAA48845} + 99 | {37,86} | {AAAAAAAAAAAAAAAAAA32918,AAAAA70514,AAAAAAAAA10012,AAAAAAAAAAAAAAAAA59387,AAAAAAAAAA64777,AAAAAAAAAAAAAAAAAAA15356} + 100 | {85,32,57,39,49,84,32,3,30} | {AAAAAAA80240,AAAAAAAAAAAAAAAA1729,AAAAA60038,AAAAAAAAAAA92631,AAAAAAAA9523} + 101 | {} | {} + 102 | {NULL} | {NULL} +(102 rows) + +SELECT * FROM array_index_op_test WHERE i && '{}' ORDER BY seqno; + seqno | i | t +-------+---+--- +(0 rows) + +SELECT * FROM array_index_op_test WHERE i <@ '{}' ORDER BY seqno; + seqno | i | t +-------+----+---- + 101 | {} | {} +(1 row) + +CREATE INDEX textarrayidx ON array_index_op_test USING gin (t); +explain (costs off) +SELECT * FROM array_index_op_test WHERE t @> '{AAAAAAAA72908}' ORDER BY seqno; + QUERY PLAN +------------------------------------------------------------ + Sort + Sort Key: seqno + -> Bitmap Heap Scan on array_index_op_test + Recheck Cond: (t @> '{AAAAAAAA72908}'::text[]) + -> Bitmap Index Scan on textarrayidx + Index Cond: (t @> '{AAAAAAAA72908}'::text[]) +(6 rows) + +SELECT * FROM array_index_op_test WHERE t @> '{AAAAAAAA72908}' ORDER BY seqno; + seqno | i | t +-------+-----------------------+-------------------------------------------------------------------------------------------------------------------------------------------- + 22 | {11,6,56,62,53,30} | {AAAAAAAA72908} + 45 | {99,45} | {AAAAAAAA72908,AAAAAAAAAAAAAAAAAAA17075,AA88409,AAAAAAAAAAAAAAAAAA36842,AAAAAAA48038,AAAAAAAAAAAAAA10611} + 72 | {22,1,16,78,20,91,83} | {47735,AAAAAAA56483,AAAAAAAAAAAAA93788,AA42406,AAAAAAAAAAAAA73084,AAAAAAAA72908,AAAAAAAAAAAAAAAAAA61286,AAAAA66674,AAAAAAAAAAAAAAAAA50407} + 79 | {45} | {AAAAAAAAAA646,AAAAAAAAAAAAAAAAAAA70415,AAAAAA43678,AAAAAAAA72908} +(4 rows) + +SELECT * FROM array_index_op_test WHERE t && '{AAAAAAAA72908}' ORDER BY seqno; + seqno | i | t +-------+-----------------------+-------------------------------------------------------------------------------------------------------------------------------------------- + 22 | {11,6,56,62,53,30} | {AAAAAAAA72908} + 45 | {99,45} | {AAAAAAAA72908,AAAAAAAAAAAAAAAAAAA17075,AA88409,AAAAAAAAAAAAAAAAAA36842,AAAAAAA48038,AAAAAAAAAAAAAA10611} + 72 | {22,1,16,78,20,91,83} | {47735,AAAAAAA56483,AAAAAAAAAAAAA93788,AA42406,AAAAAAAAAAAAA73084,AAAAAAAA72908,AAAAAAAAAAAAAAAAAA61286,AAAAA66674,AAAAAAAAAAAAAAAAA50407} + 79 | {45} | {AAAAAAAAAA646,AAAAAAAAAAAAAAAAAAA70415,AAAAAA43678,AAAAAAAA72908} +(4 rows) + +SELECT * FROM array_index_op_test WHERE t @> '{AAAAAAAAAA646}' ORDER BY seqno; + seqno | i | t +-------+------------------+-------------------------------------------------------------------- + 15 | {17,14,16,63,67} | {AA6416,AAAAAAAAAA646,AAAAA95309} + 79 | {45} | {AAAAAAAAAA646,AAAAAAAAAAAAAAAAAAA70415,AAAAAA43678,AAAAAAAA72908} + 96 | {23,97,43} | {AAAAAAAAAA646,A87088} +(3 rows) + +SELECT * FROM array_index_op_test WHERE t && '{AAAAAAAAAA646}' ORDER BY seqno; + seqno | i | t +-------+------------------+-------------------------------------------------------------------- + 15 | {17,14,16,63,67} | {AA6416,AAAAAAAAAA646,AAAAA95309} + 79 | {45} | {AAAAAAAAAA646,AAAAAAAAAAAAAAAAAAA70415,AAAAAA43678,AAAAAAAA72908} + 96 | {23,97,43} | {AAAAAAAAAA646,A87088} +(3 rows) + +SELECT * FROM array_index_op_test WHERE t @> '{AAAAAAAA72908,AAAAAAAAAA646}' ORDER BY seqno; + seqno | i | t +-------+------+-------------------------------------------------------------------- + 79 | {45} | {AAAAAAAAAA646,AAAAAAAAAAAAAAAAAAA70415,AAAAAA43678,AAAAAAAA72908} +(1 row) + +SELECT * FROM array_index_op_test WHERE t && '{AAAAAAAA72908,AAAAAAAAAA646}' ORDER BY seqno; + seqno | i | t +-------+-----------------------+-------------------------------------------------------------------------------------------------------------------------------------------- + 15 | {17,14,16,63,67} | {AA6416,AAAAAAAAAA646,AAAAA95309} + 22 | {11,6,56,62,53,30} | {AAAAAAAA72908} + 45 | {99,45} | {AAAAAAAA72908,AAAAAAAAAAAAAAAAAAA17075,AA88409,AAAAAAAAAAAAAAAAAA36842,AAAAAAA48038,AAAAAAAAAAAAAA10611} + 72 | {22,1,16,78,20,91,83} | {47735,AAAAAAA56483,AAAAAAAAAAAAA93788,AA42406,AAAAAAAAAAAAA73084,AAAAAAAA72908,AAAAAAAAAAAAAAAAAA61286,AAAAA66674,AAAAAAAAAAAAAAAAA50407} + 79 | {45} | {AAAAAAAAAA646,AAAAAAAAAAAAAAAAAAA70415,AAAAAA43678,AAAAAAAA72908} + 96 | {23,97,43} | {AAAAAAAAAA646,A87088} +(6 rows) + +SELECT * FROM array_index_op_test WHERE t <@ '{AAAAAAAA72908,AAAAAAAAAAAAAAAAAAA17075,AA88409,AAAAAAAAAAAAAAAAAA36842,AAAAAAA48038,AAAAAAAAAAAAAA10611}' ORDER BY seqno; + seqno | i | t +-------+--------------------+----------------------------------------------------------------------------------------------------------- + 22 | {11,6,56,62,53,30} | {AAAAAAAA72908} + 45 | {99,45} | {AAAAAAAA72908,AAAAAAAAAAAAAAAAAAA17075,AA88409,AAAAAAAAAAAAAAAAAA36842,AAAAAAA48038,AAAAAAAAAAAAAA10611} + 101 | {} | {} +(3 rows) + +SELECT * FROM array_index_op_test WHERE t = '{AAAAAAAAAA646,A87088}' ORDER BY seqno; + seqno | i | t +-------+------------+------------------------ + 96 | {23,97,43} | {AAAAAAAAAA646,A87088} +(1 row) + +SELECT * FROM array_index_op_test WHERE t = '{}' ORDER BY seqno; + seqno | i | t +-------+----+---- + 101 | {} | {} +(1 row) + +SELECT * FROM array_index_op_test WHERE t @> '{}' ORDER BY seqno; + seqno | i | t +-------+---------------------------------+---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + 1 | {92,75,71,52,64,83} | {AAAAAAAA44066,AAAAAA1059,AAAAAAAAAAA176,AAAAAAA48038} + 2 | {3,6} | {AAAAAA98232,AAAAAAAA79710,AAAAAAAAAAAAAAAAA69675,AAAAAAAAAAAAAAAA55798,AAAAAAAAA12793} + 3 | {37,64,95,43,3,41,13,30,11,43} | {AAAAAAAAAA48845,AAAAA75968,AAAAA95309,AAA54451,AAAAAAAAAA22292,AAAAAAA99836,A96617,AA17009,AAAAAAAAAAAAAA95246} + 4 | {71,39,99,55,33,75,45} | {AAAAAAAAA53663,AAAAAAAAAAAAAAA67062,AAAAAAAAAA64777,AAA99043,AAAAAAAAAAAAAAAAAAA91804,39557} + 5 | {50,42,77,50,4} | {AAAAAAAAAAAAAAAAA26540,AAAAAAA79710,AAAAAAAAAAAAAAAAAAA1205,AAAAAAAAAAA176,AAAAA95309,AAAAAAAAAAA46154,AAAAAA66777,AAAAAAAAA27249,AAAAAAAAAA64777,AAAAAAAAAAAAAAAAAAA70104} + 6 | {39,35,5,94,17,92,60,32} | {AAAAAAAAAAAAAAA35875,AAAAAAAAAAAAAAAA23657} + 7 | {12,51,88,64,8} | {AAAAAAAAAAAAAAAAAA12591,AAAAAAAAAAAAAAAAA50407,AAAAAAAAAAAA67946} + 8 | {60,84} | {AAAAAAA81898,AAAAAA1059,AAAAAAAAAAAA81511,AAAAA961,AAAAAAAAAAAAAAAA31334,AAAAA64741,AA6416,AAAAAAAAAAAAAAAAAA32918,AAAAAAAAAAAAAAAAA50407} + 9 | {56,52,35,27,80,44,81,22} | {AAAAAAAAAAAAAAA73034,AAAAAAAAAAAAA7929,AAAAAAA66161,AA88409,39557,A27153,AAAAAAAA9523,AAAAAAAAAAA99000} + 10 | {71,5,45} | {AAAAAAAAAAA21658,AAAAAAAAAAAA21089,AAA54451,AAAAAAAAAAAAAAAAAA54141,AAAAAAAAAAAAAA28620,AAAAAAAAAAA21658,AAAAAAAAAAA74076,AAAAAAAAA27249} + 11 | {41,86,74,48,22,74,47,50} | {AAAAAAAA9523,AAAAAAAAAAAA37562,AAAAAAAAAAAAAAAA14047,AAAAAAAAAAA46154,AAAA41702,AAAAAAAAAAAAAAAAA764,AAAAA62737,39557} + 12 | {17,99,18,52,91,72,0,43,96,23} | {AAAAA33250,AAAAAAAAAAAAAAAAAAA85420,AAAAAAAAAAA33576} + 13 | {3,52,34,23} | {AAAAAA98232,AAAA49534,AAAAAAAAAAA21658} + 14 | {78,57,19} | {AAAA8857,AAAAAAAAAAAAAAA73034,AAAAAAAA81587,AAAAAAAAAAAAAAA68526,AAAAA75968,AAAAAAAAAAAAAA65909,AAAAAAAAA10012,AAAAAAAAAAAAAA65909} + 15 | {17,14,16,63,67} | {AA6416,AAAAAAAAAA646,AAAAA95309} + 16 | {14,63,85,11} | {AAAAAA66777} + 17 | {7,10,81,85} | {AAAAAA43678,AAAAAAA12144,AAAAAAAAAAA50956,AAAAAAAAAAAAAAAAAAA15356} + 18 | {1} | {AAAAAAAAAAA33576,AAAAA95309,64261,AAA59323,AAAAAAAAAAAAAA95246,55847,AAAAAAAAAAAA67946,AAAAAAAAAAAAAAAAAA64374} + 19 | {52,82,17,74,23,46,69,51,75} | {AAAAAAAAAAAAA73084,AAAAA75968,AAAAAAAAAAAAAAAA14047,AAAAAAA80240,AAAAAAAAAAAAAAAAAAA1205,A68938} + 20 | {72,89,70,51,54,37,8,49,79} | {AAAAAA58494} + 21 | {2,8,65,10,5,79,43} | {AAAAAAAAAAAAAAAAA88852,AAAAAAAAAAAAAAAAAAA91804,AAAAA64669,AAAAAAAAAAAAAAAA1443,AAAAAAAAAAAAAAAA23657,AAAAA12179,AAAAAAAAAAAAAAAAA88852,AAAAAAAAAAAAAAAA31334,AAAAAAAAAAAAAAAA41303,AAAAAAAAAAAAAAAAAAA85420} + 22 | {11,6,56,62,53,30} | {AAAAAAAA72908} + 23 | {40,90,5,38,72,40,30,10,43,55} | {A6053,AAAAAAAAAAA6119,AA44673,AAAAAAAAAAAAAAAAA764,AA17009,AAAAA17383,AAAAA70514,AAAAA33250,AAAAA95309,AAAAAAAAAAAA37562} + 24 | {94,61,99,35,48} | {AAAAAAAAAAA50956,AAAAAAAAAAA15165,AAAA85070,AAAAAAAAAAAAAAA36627,AAAAA961,AAAAAAAAAA55219} + 25 | {31,1,10,11,27,79,38} | {AAAAAAAAAAAAAAAAAA59334,45449} + 26 | {71,10,9,69,75} | {47735,AAAAAAA21462,AAAAAAAAAAAAAAAAA6897,AAAAAAAAAAAAAAAAAAA91804,AAAAAAAAA72121,AAAAAAAAAAAAAAAAAAA1205,AAAAA41597,AAAA8857,AAAAAAAAAAAAAAAAAAA15356,AA17009} + 27 | {94} | {AA6416,A6053,AAAAAAA21462,AAAAAAA57334,AAAAAAAAAAAAAAAAAA12591,AA88409,AAAAAAAAAAAAA70254} + 28 | {14,33,6,34,14} | {AAAAAAAAAAAAAAA13198,AAAAAAAA69452,AAAAAAAAAAA82945,AAAAAAA12144,AAAAAAAAA72121,AAAAAAAAAA18601} + 29 | {39,21} | {AAAAAAAAAAAAAAAAA6897,AAAAAAAAAAAAAAAAAAA38885,AAAA85070,AAAAAAAAAAAAAAAAAAA70104,AAAAA66674,AAAAAAAAAAAAA62007,AAAAAAAA69452,AAAAAAA1242,AAAAAAAAAAAAAAAA1729,AAAA35194} + 30 | {26,81,47,91,34} | {AAAAAAAAAAAAAAAAAAA70104,AAAAAAA80240} + 31 | {80,24,18,21,54} | {AAAAAAAAAAAAAAA13198,AAAAAAAAAAAAAAAAAAA70415,A27153,AAAAAAAAA53663,AAAAAAAAAAAAAAAAA50407,A68938} + 32 | {58,79,82,80,67,75,98,10,41} | {AAAAAAAAAAAAAAAAAA61286,AAA54451,AAAAAAAAAAAAAAAAAAA87527,A96617,51533} + 33 | {74,73} | {A85417,AAAAAAA56483,AAAAA17383,AAAAAAAAAAAAA62159,AAAAAAAAAAAA52814,AAAAAAAAAAAAA85723,AAAAAAAAAAAAAAAAAA55796} + 34 | {70,45} | {AAAAAAAAAAAAAAAAAA71621,AAAAAAAAAAAAAA28620,AAAAAAAAAA55219,AAAAAAAA23648,AAAAAAAAAA22292,AAAAAAA1242} + 35 | {23,40} | {AAAAAAAAAAAA52814,AAAA48949,AAAAAAAAA34727,AAAA8857,AAAAAAAAAAAAAAAAAAA62179,AAAAAAAAAAAAAAA68526,AAAAAAA99836,AAAAAAAA50094,AAAA91194,AAAAAAAAAAAAA73084} + 36 | {79,82,14,52,30,5,79} | {AAAAAAAAA53663,AAAAAAAAAAAAAAAA55798,AAAAAAAAAAAAAAAAAAA89194,AA88409,AAAAAAAAAAAAAAA81326,AAAAAAAAAAAAAAAAA63050,AAAAAAAAAAAAAAAA33598} + 37 | {53,11,81,39,3,78,58,64,74} | {AAAAAAAAAAAAAAAAAAA17075,AAAAAAA66161,AAAAAAAA23648,AAAAAAAAAAAAAA10611} + 38 | {59,5,4,95,28} | {AAAAAAAAAAA82945,A96617,47735,AAAAA12179,AAAAA64669,AAAAAA99807,AA74433,AAAAAAAAAAAAAAAAA59387} + 39 | {82,43,99,16,74} | {AAAAAAAAAAAAAAA67062,AAAAAAA57334,AAAAAAAAAAAAAA65909,A27153,AAAAAAAAAAAAAAAAAAA17075,AAAAAAAAAAAAAAAAA43052,AAAAAAAAAA64777,AAAAAAAAAAAA81511,AAAAAAAAAAAAAA65909,AAAAAAAAAAAAAA28620} + 40 | {34} | {AAAAAAAAAAAAAA10611,AAAAAAAAAAAAAAAAAAA1205,AAAAAAAAAAA50956,AAAAAAAAAAAAAAAA31334,AAAAA70466,AAAAAAAA81587,AAAAAAA74623} + 41 | {19,26,63,12,93,73,27,94} | {AAAAAAA79710,AAAAAAAAAA55219,AAAA41702,AAAAAAAAAAAAAAAAAAA17075,AAAAAAAAAAAAAAAAAA71621,AAAAAAAAAAAAAAAAA63050,AAAAAAA99836,AAAAAAAAAAAAAA8666} + 42 | {15,76,82,75,8,91} | {AAAAAAAAAAA176,AAAAAA38063,45449,AAAAAA54032,AAAAAAA81898,AA6416,AAAAAAAAAAAAAAAAAAA62179,45449,AAAAA60038,AAAAAAAA81587} + 43 | {39,87,91,97,79,28} | {AAAAAAAAAAA74076,A96617,AAAAAAAAAAAAAAAAAAA89194,AAAAAAAAAAAAAAAAAA55796,AAAAAAAAAAAAAAAA23657,AAAAAAAAAAAA67946} + 44 | {40,58,68,29,54} | {AAAAAAA81898,AAAAAA66777,AAAAAA98232} + 45 | {99,45} | {AAAAAAAA72908,AAAAAAAAAAAAAAAAAAA17075,AA88409,AAAAAAAAAAAAAAAAAA36842,AAAAAAA48038,AAAAAAAAAAAAAA10611} + 46 | {53,24} | {AAAAAAAAAAA53908,AAAAAA54032,AAAAA17383,AAAA48949,AAAAAAAAAA18601,AAAAA64669,45449,AAAAAAAAAAA98051,AAAAAAAAAAAAAAAAAA71621} + 47 | {98,23,64,12,75,61} | {AAA59323,AAAAA95309,AAAAAAAAAAAAAAAA31334,AAAAAAAAA27249,AAAAA17383,AAAAAAAAAAAA37562,AAAAAA1059,A84822,55847,AAAAA70466} + 48 | {76,14} | {AAAAAAAAAAAAA59671,AAAAAAAAAAAAAAAAAAA91804,AAAAAA66777,AAAAAAAAAAAAAAAAAAA89194,AAAAAAAAAAAAAAA36627,AAAAAAAAAAAAAAAAAAA17075,AAAAAAAAAAAAA73084,AAAAAAA79710,AAAAAAAAAAAAAAA40402,AAAAAAAAAAAAAAAAAAA65037} + 49 | {56,5,54,37,49} | {AA21643,AAAAAAAAAAA92631,AAAAAAAA81587} + 50 | {20,12,37,64,93} | {AAAAAAAAAA5483,AAAAAAAAAAAAAAAAAAA1205,AA6416,AAAAAAAAAAAAAAAAA63050,AAAAAAAAAAAAAAAAAA47955} + 51 | {47} | {AAAAAAAAAAAAAA96505,AAAAAAAAAAAAAAAAAA36842,AAAAA95309,AAAAAAAA81587,AA6416,AAAA91194,AAAAAA58494,AAAAAA1059,AAAAAAAA69452} + 52 | {89,0} | {AAAAAAAAAAAAAAAAAA47955,AAAAAAA48038,AAAAAAAAAAAAAAAAA43052,AAAAAAAAAAAAA73084,AAAAA70466,AAAAAAAAAAAAAAAAA764,AAAAAAAAAAA46154,AA66862} + 53 | {38,17} | {AAAAAAAAAAA21658} + 54 | {70,47} | {AAAAAAAAAAAAAAAAAA54141,AAAAA40681,AAAAAAA48038,AAAAAAAAAAAAAAAA29150,AAAAA41597,AAAAAAAAAAAAAAAAAA59334,AA15322} + 55 | {47,79,47,64,72,25,71,24,93} | {AAAAAAAAAAAAAAAAAA55796,AAAAA62737} + 56 | {33,7,60,54,93,90,77,85,39} | {AAAAAAAAAAAAAAAAAA32918,AA42406} + 57 | {23,45,10,42,36,21,9,96} | {AAAAAAAAAAAAAAAAAAA70415} + 58 | {92} | {AAAAAAAAAAAAAAAA98414,AAAAAAAA23648,AAAAAAAAAAAAAAAAAA55796,AA25381,AAAAAAAAAAA6119} + 59 | {9,69,46,77} | {39557,AAAAAAA89932,AAAAAAAAAAAAAAAAA43052,AAAAAAAAAAAAAAAAA26540,AAA20874,AA6416,AAAAAAAAAAAAAAAAAA47955} + 60 | {62,2,59,38,89} | {AAAAAAA89932,AAAAAAAAAAAAAAAAAAA15356,AA99927,AA17009,AAAAAAAAAAAAAAA35875} + 61 | {72,2,44,95,54,54,13} | {AAAAAAAAAAAAAAAAAAA91804} + 62 | {83,72,29,73} | {AAAAAAAAAAAAA15097,AAAA8857,AAAAAAAAAAAA35809,AAAAAAAAAAAA52814,AAAAAAAAAAAAAAAAAAA38885,AAAAAAAAAAAAAAAAAA24183,AAAAAA43678,A96617} + 63 | {11,4,61,87} | {AAAAAAAAA27249,AAAAAAAAAAAAAAAAAA32918,AAAAAAAAAAAAAAA13198,AAA20874,39557,51533,AAAAAAAAAAA53908,AAAAAAAAAAAAAA96505,AAAAAAAA78938} + 64 | {26,19,34,24,81,78} | {A96617,AAAAAAAAAAAAAAAAAAA70104,A68938,AAAAAAAAAAA53908,AAAAAAAAAAAAAAA453,AA17009,AAAAAAA80240} + 65 | {61,5,76,59,17} | {AAAAAA99807,AAAAA64741,AAAAAAAAAAA53908,AA21643,AAAAAAAAA10012} + 66 | {31,23,70,52,4,33,48,25} | {AAAAAAAAAAAAAAAAA69675,AAAAAAAA50094,AAAAAAAAAAA92631,AAAA35194,39557,AAAAAAA99836} + 67 | {31,94,7,10} | {AAAAAA38063,A96617,AAAA35194,AAAAAAAAAAAA67946} + 68 | {90,43,38} | {AA75092,AAAAAAAAAAAAAAAAA69675,AAAAAAAAAAA92631,AAAAAAAAA10012,AAAAAAAAAAAAA7929,AA21643} + 69 | {67,35,99,85,72,86,44} | {AAAAAAAAAAAAAAAAAAA1205,AAAAAAAA50094,AAAAAAAAAAAAAAAA1729,AAAAAAAAAAAAAAAAAA47955} + 70 | {56,70,83} | {AAAA41702,AAAAAAAAAAA82945,AA21643,AAAAAAAAAAA99000,A27153,AA25381,AAAAAAAAAAAAAA96505,AAAAAAA1242} + 71 | {74,26} | {AAAAAAAAAAA50956,AA74433,AAAAAAA21462,AAAAAAAAAAAAAAAAAAA17075,AAAAAAAAAAAAAAA36627,AAAAAAAAAAAAA70254,AAAAAAAAAA43419,39557} + 72 | {22,1,16,78,20,91,83} | {47735,AAAAAAA56483,AAAAAAAAAAAAA93788,AA42406,AAAAAAAAAAAAA73084,AAAAAAAA72908,AAAAAAAAAAAAAAAAAA61286,AAAAA66674,AAAAAAAAAAAAAAAAA50407} + 73 | {88,25,96,78,65,15,29,19} | {AAA54451,AAAAAAAAA27249,AAAAAAA9228,AAAAAAAAAAAAAAA67062,AAAAAAAAAAAAAAAAAAA70415,AAAAA17383,AAAAAAAAAAAAAAAA33598} + 74 | {32} | {AAAAAAAAAAAAAAAA1729,AAAAAAAAAAAAA22860,AAAAAA99807,AAAAA17383,AAAAAAAAAAAAAAA67062,AAAAAAAAAAA15165,AAAAAAAAAAA50956} + 75 | {12,96,83,24,71,89,55} | {AAAA48949,AAAAAAAA29716,AAAAAAAAAAAAAAAAAAA1205,AAAAAAAAAAAA67946,AAAAAAAAAAAAAAAA29150,AAA28075,AAAAAAAAAAAAAAAAA43052} + 76 | {92,55,10,7} | {AAAAAAAAAAAAAAA67062} + 77 | {97,15,32,17,55,59,18,37,50,39} | {AAAAAAAAAAAA67946,AAAAAA54032,AAAAAAAA81587,55847,AAAAAAAAAAAAAA28620,AAAAAAAAAAAAAAAAA43052,AAAAAA75463,AAAA49534,AAAAAAAA44066} + 78 | {55,89,44,84,34} | {AAAAAAAAAAA6119,AAAAAAAAAAAAAA8666,AA99927,AA42406,AAAAAAA81898,AAAAAAA9228,AAAAAAAAAAA92631,AA21643,AAAAAAAAAAAAAA28620} + 79 | {45} | {AAAAAAAAAA646,AAAAAAAAAAAAAAAAAAA70415,AAAAAA43678,AAAAAAAA72908} + 80 | {74,89,44,80,0} | {AAAA35194,AAAAAAAA79710,AAA20874,AAAAAAAAAAAAAAAAAAA70104,AAAAAAAAAAAAA73084,AAAAAAA57334,AAAAAAA9228,AAAAAAAAAAAAA62007} + 81 | {63,77,54,48,61,53,97} | {AAAAAAAAAAAAAAA81326,AAAAAAAAAA22292,AA25381,AAAAAAAAAAA74076,AAAAAAA81898,AAAAAAAAA72121} + 82 | {34,60,4,79,78,16,86,89,42,50} | {AAAAA40681,AAAAAAAAAAAAAAAAAA12591,AAAAAAA80240,AAAAAAAAAAAAAAAA55798,AAAAAAAAAAAAAAAAAAA70104} + 83 | {14,10} | {AAAAAAAAAA22292,AAAAAAAAAAAAA70254,AAAAAAAAAAA6119} + 84 | {11,83,35,13,96,94} | {AAAAA95309,AAAAAAAAAAAAAAAAAA32918,AAAAAAAAAAAAAAAAAA24183} + 85 | {39,60} | {AAAAAAAAAAAAAAAA55798,AAAAAAAAAA22292,AAAAAAA66161,AAAAAAA21462,AAAAAAAAAAAAAAAAAA12591,55847,AAAAAA98232,AAAAAAAAAAA46154} + 86 | {33,81,72,74,45,36,82} | {AAAAAAAA81587,AAAAAAAAAAAAAA96505,45449,AAAA80176} + 87 | {57,27,50,12,97,68} | {AAAAAAAAAAAAAAAAA26540,AAAAAAAAA10012,AAAAAAAAAAAA35809,AAAAAAAAAAAAAAAA29150,AAAAAAAAAAA82945,AAAAAA66777,31228,AAAAAAAAAAAAAAAA23657,AAAAAAAAAAAAAA28620,AAAAAAAAAAAAAA96505} + 88 | {41,90,77,24,6,24} | {AAAA35194,AAAA35194,AAAAAAA80240,AAAAAAAAAAA46154,AAAAAA58494,AAAAAAAAAAAAAAAAAAA17075,AAAAAAAAAAAAAAAAAA59334,AAAAAAAAAAAAAAAAAAA91804,AA74433} + 89 | {40,32,17,6,30,88} | {AA44673,AAAAAAAAAAA6119,AAAAAAAAAAAAAAAA23657,AAAAAAAAAAAAAAAAAA47955,AAAAAAAAAAAAAAAA33598,AAAAAAAAAAA33576,AA44673} + 90 | {88,75} | {AAAAA60038,AAAAAAAA23648,AAAAAAAAAAA99000,AAAA41702,AAAAAAAAAAAAA22860,AAAAAAAAAAAAAAA68526} + 91 | {78} | {AAAAAAAAAAAAA62007,AAA99043} + 92 | {85,63,49,45} | {AAAAAAA89932,AAAAAAAAAAAAA22860,AAAAAAAAAAAAAAAAAAA1205,AAAAAAAAAAAA21089} + 93 | {11} | {AAAAAAAAAAA176,AAAAAAAAAAAAAA8666,AAAAAAAAAAAAAAA453,AAAAAAAAAAAAA85723,A68938,AAAAAAAAAAAAA9821,AAAAAAA48038,AAAAAAAAAAAAAAAAA59387,AA99927,AAAAA17383} + 94 | {98,9,85,62,88,91,60,61,38,86} | {AAAAAAAA81587,AAAAA17383,AAAAAAAA81587} + 95 | {47,77} | {AAAAAAAAAAAAAAAAA764,AAAAAAAAAAA74076,AAAAAAAAAA18107,AAAAA40681,AAAAAAAAAAAAAAA35875,AAAAA60038,AAAAAAA56483} + 96 | {23,97,43} | {AAAAAAAAAA646,A87088} + 97 | {54,2,86,65} | {47735,AAAAAAA99836,AAAAAAAAAAAAAAAAA6897,AAAAAAAAAAAAAAAA29150,AAAAAAA80240,AAAAAAAAAAAAAAAA98414,AAAAAAA56483,AAAAAAAAAAAAAAAA29150,AAAAAAA39692,AA21643} + 98 | {38,34,32,89} | {AAAAAAAAAAAAAAAAAA71621,AAAA8857,AAAAAAAAAAAAAAAAAAA65037,AAAAAAAAAAAAAAAA31334,AAAAAAAAAA48845} + 99 | {37,86} | {AAAAAAAAAAAAAAAAAA32918,AAAAA70514,AAAAAAAAA10012,AAAAAAAAAAAAAAAAA59387,AAAAAAAAAA64777,AAAAAAAAAAAAAAAAAAA15356} + 100 | {85,32,57,39,49,84,32,3,30} | {AAAAAAA80240,AAAAAAAAAAAAAAAA1729,AAAAA60038,AAAAAAAAAAA92631,AAAAAAAA9523} + 101 | {} | {} + 102 | {NULL} | {NULL} +(102 rows) + +SELECT * FROM array_index_op_test WHERE t && '{}' ORDER BY seqno; + seqno | i | t +-------+---+--- +(0 rows) + +SELECT * FROM array_index_op_test WHERE t <@ '{}' ORDER BY seqno; + seqno | i | t +-------+----+---- + 101 | {} | {} +(1 row) + +-- And try it with a multicolumn GIN index +DROP INDEX intarrayidx, textarrayidx; +CREATE INDEX botharrayidx ON array_index_op_test USING gin (i, t); +SELECT * FROM array_index_op_test WHERE i @> '{32}' ORDER BY seqno; + seqno | i | t +-------+---------------------------------+------------------------------------------------------------------------------------------------------------------------------------ + 6 | {39,35,5,94,17,92,60,32} | {AAAAAAAAAAAAAAA35875,AAAAAAAAAAAAAAAA23657} + 74 | {32} | {AAAAAAAAAAAAAAAA1729,AAAAAAAAAAAAA22860,AAAAAA99807,AAAAA17383,AAAAAAAAAAAAAAA67062,AAAAAAAAAAA15165,AAAAAAAAAAA50956} + 77 | {97,15,32,17,55,59,18,37,50,39} | {AAAAAAAAAAAA67946,AAAAAA54032,AAAAAAAA81587,55847,AAAAAAAAAAAAAA28620,AAAAAAAAAAAAAAAAA43052,AAAAAA75463,AAAA49534,AAAAAAAA44066} + 89 | {40,32,17,6,30,88} | {AA44673,AAAAAAAAAAA6119,AAAAAAAAAAAAAAAA23657,AAAAAAAAAAAAAAAAAA47955,AAAAAAAAAAAAAAAA33598,AAAAAAAAAAA33576,AA44673} + 98 | {38,34,32,89} | {AAAAAAAAAAAAAAAAAA71621,AAAA8857,AAAAAAAAAAAAAAAAAAA65037,AAAAAAAAAAAAAAAA31334,AAAAAAAAAA48845} + 100 | {85,32,57,39,49,84,32,3,30} | {AAAAAAA80240,AAAAAAAAAAAAAAAA1729,AAAAA60038,AAAAAAAAAAA92631,AAAAAAAA9523} +(6 rows) + +SELECT * FROM array_index_op_test WHERE i && '{32}' ORDER BY seqno; + seqno | i | t +-------+---------------------------------+------------------------------------------------------------------------------------------------------------------------------------ + 6 | {39,35,5,94,17,92,60,32} | {AAAAAAAAAAAAAAA35875,AAAAAAAAAAAAAAAA23657} + 74 | {32} | {AAAAAAAAAAAAAAAA1729,AAAAAAAAAAAAA22860,AAAAAA99807,AAAAA17383,AAAAAAAAAAAAAAA67062,AAAAAAAAAAA15165,AAAAAAAAAAA50956} + 77 | {97,15,32,17,55,59,18,37,50,39} | {AAAAAAAAAAAA67946,AAAAAA54032,AAAAAAAA81587,55847,AAAAAAAAAAAAAA28620,AAAAAAAAAAAAAAAAA43052,AAAAAA75463,AAAA49534,AAAAAAAA44066} + 89 | {40,32,17,6,30,88} | {AA44673,AAAAAAAAAAA6119,AAAAAAAAAAAAAAAA23657,AAAAAAAAAAAAAAAAAA47955,AAAAAAAAAAAAAAAA33598,AAAAAAAAAAA33576,AA44673} + 98 | {38,34,32,89} | {AAAAAAAAAAAAAAAAAA71621,AAAA8857,AAAAAAAAAAAAAAAAAAA65037,AAAAAAAAAAAAAAAA31334,AAAAAAAAAA48845} + 100 | {85,32,57,39,49,84,32,3,30} | {AAAAAAA80240,AAAAAAAAAAAAAAAA1729,AAAAA60038,AAAAAAAAAAA92631,AAAAAAAA9523} +(6 rows) + +SELECT * FROM array_index_op_test WHERE t @> '{AAAAAAA80240}' ORDER BY seqno; + seqno | i | t +-------+--------------------------------+------------------------------------------------------------------------------------------------------------------------------------------------------------- + 19 | {52,82,17,74,23,46,69,51,75} | {AAAAAAAAAAAAA73084,AAAAA75968,AAAAAAAAAAAAAAAA14047,AAAAAAA80240,AAAAAAAAAAAAAAAAAAA1205,A68938} + 30 | {26,81,47,91,34} | {AAAAAAAAAAAAAAAAAAA70104,AAAAAAA80240} + 64 | {26,19,34,24,81,78} | {A96617,AAAAAAAAAAAAAAAAAAA70104,A68938,AAAAAAAAAAA53908,AAAAAAAAAAAAAAA453,AA17009,AAAAAAA80240} + 82 | {34,60,4,79,78,16,86,89,42,50} | {AAAAA40681,AAAAAAAAAAAAAAAAAA12591,AAAAAAA80240,AAAAAAAAAAAAAAAA55798,AAAAAAAAAAAAAAAAAAA70104} + 88 | {41,90,77,24,6,24} | {AAAA35194,AAAA35194,AAAAAAA80240,AAAAAAAAAAA46154,AAAAAA58494,AAAAAAAAAAAAAAAAAAA17075,AAAAAAAAAAAAAAAAAA59334,AAAAAAAAAAAAAAAAAAA91804,AA74433} + 97 | {54,2,86,65} | {47735,AAAAAAA99836,AAAAAAAAAAAAAAAAA6897,AAAAAAAAAAAAAAAA29150,AAAAAAA80240,AAAAAAAAAAAAAAAA98414,AAAAAAA56483,AAAAAAAAAAAAAAAA29150,AAAAAAA39692,AA21643} + 100 | {85,32,57,39,49,84,32,3,30} | {AAAAAAA80240,AAAAAAAAAAAAAAAA1729,AAAAA60038,AAAAAAAAAAA92631,AAAAAAAA9523} +(7 rows) + +SELECT * FROM array_index_op_test WHERE t && '{AAAAAAA80240}' ORDER BY seqno; + seqno | i | t +-------+--------------------------------+------------------------------------------------------------------------------------------------------------------------------------------------------------- + 19 | {52,82,17,74,23,46,69,51,75} | {AAAAAAAAAAAAA73084,AAAAA75968,AAAAAAAAAAAAAAAA14047,AAAAAAA80240,AAAAAAAAAAAAAAAAAAA1205,A68938} + 30 | {26,81,47,91,34} | {AAAAAAAAAAAAAAAAAAA70104,AAAAAAA80240} + 64 | {26,19,34,24,81,78} | {A96617,AAAAAAAAAAAAAAAAAAA70104,A68938,AAAAAAAAAAA53908,AAAAAAAAAAAAAAA453,AA17009,AAAAAAA80240} + 82 | {34,60,4,79,78,16,86,89,42,50} | {AAAAA40681,AAAAAAAAAAAAAAAAAA12591,AAAAAAA80240,AAAAAAAAAAAAAAAA55798,AAAAAAAAAAAAAAAAAAA70104} + 88 | {41,90,77,24,6,24} | {AAAA35194,AAAA35194,AAAAAAA80240,AAAAAAAAAAA46154,AAAAAA58494,AAAAAAAAAAAAAAAAAAA17075,AAAAAAAAAAAAAAAAAA59334,AAAAAAAAAAAAAAAAAAA91804,AA74433} + 97 | {54,2,86,65} | {47735,AAAAAAA99836,AAAAAAAAAAAAAAAAA6897,AAAAAAAAAAAAAAAA29150,AAAAAAA80240,AAAAAAAAAAAAAAAA98414,AAAAAAA56483,AAAAAAAAAAAAAAAA29150,AAAAAAA39692,AA21643} + 100 | {85,32,57,39,49,84,32,3,30} | {AAAAAAA80240,AAAAAAAAAAAAAAAA1729,AAAAA60038,AAAAAAAAAAA92631,AAAAAAAA9523} +(7 rows) + +SELECT * FROM array_index_op_test WHERE i @> '{32}' AND t && '{AAAAAAA80240}' ORDER BY seqno; + seqno | i | t +-------+-----------------------------+------------------------------------------------------------------------------ + 100 | {85,32,57,39,49,84,32,3,30} | {AAAAAAA80240,AAAAAAAAAAAAAAAA1729,AAAAA60038,AAAAAAAAAAA92631,AAAAAAAA9523} +(1 row) + +SELECT * FROM array_index_op_test WHERE i && '{32}' AND t @> '{AAAAAAA80240}' ORDER BY seqno; + seqno | i | t +-------+-----------------------------+------------------------------------------------------------------------------ + 100 | {85,32,57,39,49,84,32,3,30} | {AAAAAAA80240,AAAAAAAAAAAAAAAA1729,AAAAA60038,AAAAAAAAAAA92631,AAAAAAAA9523} +(1 row) + +SELECT * FROM array_index_op_test WHERE t = '{}' ORDER BY seqno; + seqno | i | t +-------+----+---- + 101 | {} | {} +(1 row) + +RESET enable_seqscan; +RESET enable_indexscan; +RESET enable_bitmapscan; +-- +-- Try a GIN index with a lot of items with same key. (GIN creates a posting +-- tree when there are enough duplicates) +-- +CREATE TABLE array_gin_test (a int[]); +INSERT INTO array_gin_test SELECT ARRAY[1, g%5, g] FROM generate_series(1, 10000) g; +CREATE INDEX array_gin_test_idx ON array_gin_test USING gin (a); +SELECT COUNT(*) FROM array_gin_test WHERE a @> '{2}'; + count +------- + 2000 +(1 row) + +DROP TABLE array_gin_test; +-- +-- Test GIN index's reloptions +-- +CREATE INDEX gin_relopts_test ON array_index_op_test USING gin (i) + WITH (FASTUPDATE=on, GIN_PENDING_LIST_LIMIT=128); +\d+ gin_relopts_test + Index "public.gin_relopts_test" + Column | Type | Key? | Definition | Storage | Stats target +--------+---------+------+------------+---------+-------------- + i | integer | yes | i | plain | +gin, for table "public.array_index_op_test" +Options: fastupdate=on, gin_pending_list_limit=128 + +-- +-- HASH +-- +CREATE UNLOGGED TABLE unlogged_hash_table (id int4); +CREATE INDEX unlogged_hash_index ON unlogged_hash_table USING hash (id int4_ops); +DROP TABLE unlogged_hash_table; +-- CREATE INDEX hash_ovfl_index ON hash_ovfl_heap USING hash (x int4_ops); +-- Test hash index build tuplesorting. Force hash tuplesort using low +-- maintenance_work_mem setting and fillfactor: +SET maintenance_work_mem = '1MB'; +CREATE INDEX hash_tuplesort_idx ON tenk1 USING hash (stringu1 name_ops) WITH (fillfactor = 10); +EXPLAIN (COSTS OFF) +SELECT count(*) FROM tenk1 WHERE stringu1 = 'TVAAAA'; + QUERY PLAN +------------------------------------------------------- + Aggregate + -> Bitmap Heap Scan on tenk1 + Recheck Cond: (stringu1 = 'TVAAAA'::name) + -> Bitmap Index Scan on hash_tuplesort_idx + Index Cond: (stringu1 = 'TVAAAA'::name) +(5 rows) + +SELECT count(*) FROM tenk1 WHERE stringu1 = 'TVAAAA'; + count +------- + 14 +(1 row) + +DROP INDEX hash_tuplesort_idx; +RESET maintenance_work_mem; +-- +-- Test unique null behavior +-- +CREATE TABLE unique_tbl (i int, t text); +CREATE UNIQUE INDEX unique_idx1 ON unique_tbl (i) NULLS DISTINCT; +CREATE UNIQUE INDEX unique_idx2 ON unique_tbl (i) NULLS NOT DISTINCT; +INSERT INTO unique_tbl VALUES (1, 'one'); +INSERT INTO unique_tbl VALUES (2, 'two'); +INSERT INTO unique_tbl VALUES (3, 'three'); +INSERT INTO unique_tbl VALUES (4, 'four'); +INSERT INTO unique_tbl VALUES (5, 'one'); +INSERT INTO unique_tbl (t) VALUES ('six'); +INSERT INTO unique_tbl (t) VALUES ('seven'); -- error from unique_idx2 +ERROR: duplicate key value violates unique constraint "unique_idx2" +DETAIL: Key (i)=(null) already exists. +DROP INDEX unique_idx1, unique_idx2; +INSERT INTO unique_tbl (t) VALUES ('seven'); +-- build indexes on filled table +CREATE UNIQUE INDEX unique_idx3 ON unique_tbl (i) NULLS DISTINCT; -- ok +CREATE UNIQUE INDEX unique_idx4 ON unique_tbl (i) NULLS NOT DISTINCT; -- error +ERROR: could not create unique index "unique_idx4" +DETAIL: Key (i)=(null) is duplicated. +DELETE FROM unique_tbl WHERE t = 'seven'; +CREATE UNIQUE INDEX unique_idx4 ON unique_tbl (i) NULLS NOT DISTINCT; -- ok now +\d unique_tbl + Table "public.unique_tbl" + Column | Type | Collation | Nullable | Default +--------+---------+-----------+----------+--------- + i | integer | | | + t | text | | | +Indexes: + "unique_idx3" UNIQUE, btree (i) + "unique_idx4" UNIQUE, btree (i) NULLS NOT DISTINCT + +\d unique_idx3 + Index "public.unique_idx3" + Column | Type | Key? | Definition +--------+---------+------+------------ + i | integer | yes | i +unique, btree, for table "public.unique_tbl" + +\d unique_idx4 + Index "public.unique_idx4" + Column | Type | Key? | Definition +--------+---------+------+------------ + i | integer | yes | i +unique nulls not distinct, btree, for table "public.unique_tbl" + +SELECT pg_get_indexdef('unique_idx3'::regclass); + pg_get_indexdef +---------------------------------------------------------------------- + CREATE UNIQUE INDEX unique_idx3 ON public.unique_tbl USING btree (i) +(1 row) + +SELECT pg_get_indexdef('unique_idx4'::regclass); + pg_get_indexdef +----------------------------------------------------------------------------------------- + CREATE UNIQUE INDEX unique_idx4 ON public.unique_tbl USING btree (i) NULLS NOT DISTINCT +(1 row) + +DROP TABLE unique_tbl; +-- +-- Test functional index +-- +CREATE TABLE func_index_heap (f1 text, f2 text); +CREATE UNIQUE INDEX func_index_index on func_index_heap (textcat(f1,f2)); +INSERT INTO func_index_heap VALUES('ABC','DEF'); +INSERT INTO func_index_heap VALUES('AB','CDEFG'); +INSERT INTO func_index_heap VALUES('QWE','RTY'); +-- this should fail because of unique index: +INSERT INTO func_index_heap VALUES('ABCD', 'EF'); +ERROR: duplicate key value violates unique constraint "func_index_index" +DETAIL: Key (textcat(f1, f2))=(ABCDEF) already exists. +-- but this shouldn't: +INSERT INTO func_index_heap VALUES('QWERTY'); +-- while we're here, see that the metadata looks sane +\d func_index_heap + Table "public.func_index_heap" + Column | Type | Collation | Nullable | Default +--------+------+-----------+----------+--------- + f1 | text | | | + f2 | text | | | +Indexes: + "func_index_index" UNIQUE, btree (textcat(f1, f2)) + +\d func_index_index + Index "public.func_index_index" + Column | Type | Key? | Definition +---------+------+------+----------------- + textcat | text | yes | textcat(f1, f2) +unique, btree, for table "public.func_index_heap" + +-- +-- Same test, expressional index +-- +DROP TABLE func_index_heap; +CREATE TABLE func_index_heap (f1 text, f2 text); +CREATE UNIQUE INDEX func_index_index on func_index_heap ((f1 || f2) text_ops); +INSERT INTO func_index_heap VALUES('ABC','DEF'); +INSERT INTO func_index_heap VALUES('AB','CDEFG'); +INSERT INTO func_index_heap VALUES('QWE','RTY'); +-- this should fail because of unique index: +INSERT INTO func_index_heap VALUES('ABCD', 'EF'); +ERROR: duplicate key value violates unique constraint "func_index_index" +DETAIL: Key ((f1 || f2))=(ABCDEF) already exists. +-- but this shouldn't: +INSERT INTO func_index_heap VALUES('QWERTY'); +-- while we're here, see that the metadata looks sane +\d func_index_heap + Table "public.func_index_heap" + Column | Type | Collation | Nullable | Default +--------+------+-----------+----------+--------- + f1 | text | | | + f2 | text | | | +Indexes: + "func_index_index" UNIQUE, btree ((f1 || f2)) + +\d func_index_index + Index "public.func_index_index" + Column | Type | Key? | Definition +--------+------+------+------------ + expr | text | yes | (f1 || f2) +unique, btree, for table "public.func_index_heap" + +-- this should fail because of unsafe column type (anonymous record) +create index on func_index_heap ((f1 || f2), (row(f1, f2))); +ERROR: column "row" has pseudo-type record +-- +-- Test unique index with included columns +-- +CREATE TABLE covering_index_heap (f1 int, f2 int, f3 text); +CREATE UNIQUE INDEX covering_index_index on covering_index_heap (f1,f2) INCLUDE(f3); +INSERT INTO covering_index_heap VALUES(1,1,'AAA'); +INSERT INTO covering_index_heap VALUES(1,2,'AAA'); +-- this should fail because of unique index on f1,f2: +INSERT INTO covering_index_heap VALUES(1,2,'BBB'); +ERROR: duplicate key value violates unique constraint "covering_index_index" +DETAIL: Key (f1, f2)=(1, 2) already exists. +-- and this shouldn't: +INSERT INTO covering_index_heap VALUES(1,4,'AAA'); +-- Try to build index on table that already contains data +CREATE UNIQUE INDEX covering_pkey on covering_index_heap (f1,f2) INCLUDE(f3); +-- Try to use existing covering index as primary key +ALTER TABLE covering_index_heap ADD CONSTRAINT covering_pkey PRIMARY KEY USING INDEX +covering_pkey; +DROP TABLE covering_index_heap; +-- +-- Try some concurrent index builds +-- +-- Unfortunately this only tests about half the code paths because there are +-- no concurrent updates happening to the table at the same time. +CREATE TABLE concur_heap (f1 text, f2 text); +-- empty table +CREATE INDEX CONCURRENTLY concur_index1 ON concur_heap(f2,f1); +CREATE INDEX CONCURRENTLY IF NOT EXISTS concur_index1 ON concur_heap(f2,f1); +NOTICE: relation "concur_index1" already exists, skipping +INSERT INTO concur_heap VALUES ('a','b'); +INSERT INTO concur_heap VALUES ('b','b'); +-- unique index +CREATE UNIQUE INDEX CONCURRENTLY concur_index2 ON concur_heap(f1); +CREATE UNIQUE INDEX CONCURRENTLY IF NOT EXISTS concur_index2 ON concur_heap(f1); +NOTICE: relation "concur_index2" already exists, skipping +-- check if constraint is set up properly to be enforced +INSERT INTO concur_heap VALUES ('b','x'); +ERROR: duplicate key value violates unique constraint "concur_index2" +DETAIL: Key (f1)=(b) already exists. +-- check if constraint is enforced properly at build time +CREATE UNIQUE INDEX CONCURRENTLY concur_index3 ON concur_heap(f2); +ERROR: could not create unique index "concur_index3" +DETAIL: Key (f2)=(b) is duplicated. +-- test that expression indexes and partial indexes work concurrently +CREATE INDEX CONCURRENTLY concur_index4 on concur_heap(f2) WHERE f1='a'; +CREATE INDEX CONCURRENTLY concur_index5 on concur_heap(f2) WHERE f1='x'; +-- here we also check that you can default the index name +CREATE INDEX CONCURRENTLY on concur_heap((f2||f1)); +-- You can't do a concurrent index build in a transaction +BEGIN; +CREATE INDEX CONCURRENTLY concur_index7 ON concur_heap(f1); +ERROR: CREATE INDEX CONCURRENTLY cannot run inside a transaction block +COMMIT; +-- test where predicate is able to do a transactional update during +-- a concurrent build before switching pg_index state flags. +CREATE FUNCTION predicate_stable() RETURNS bool IMMUTABLE +LANGUAGE plpgsql AS $$ +BEGIN + EXECUTE 'SELECT txid_current()'; + RETURN true; +END; $$; +CREATE INDEX CONCURRENTLY concur_index8 ON concur_heap (f1) + WHERE predicate_stable(); +DROP INDEX concur_index8; +DROP FUNCTION predicate_stable(); +-- But you can do a regular index build in a transaction +BEGIN; +CREATE INDEX std_index on concur_heap(f2); +COMMIT; +-- Failed builds are left invalid by VACUUM FULL, fixed by REINDEX +VACUUM FULL concur_heap; +REINDEX TABLE concur_heap; +ERROR: could not create unique index "concur_index3" +DETAIL: Key (f2)=(b) is duplicated. +DELETE FROM concur_heap WHERE f1 = 'b'; +VACUUM FULL concur_heap; +\d concur_heap + Table "public.concur_heap" + Column | Type | Collation | Nullable | Default +--------+------+-----------+----------+--------- + f1 | text | | | + f2 | text | | | +Indexes: + "concur_heap_expr_idx" btree ((f2 || f1)) + "concur_index1" btree (f2, f1) + "concur_index2" UNIQUE, btree (f1) + "concur_index3" UNIQUE, btree (f2) INVALID + "concur_index4" btree (f2) WHERE f1 = 'a'::text + "concur_index5" btree (f2) WHERE f1 = 'x'::text + "std_index" btree (f2) + +REINDEX TABLE concur_heap; +\d concur_heap + Table "public.concur_heap" + Column | Type | Collation | Nullable | Default +--------+------+-----------+----------+--------- + f1 | text | | | + f2 | text | | | +Indexes: + "concur_heap_expr_idx" btree ((f2 || f1)) + "concur_index1" btree (f2, f1) + "concur_index2" UNIQUE, btree (f1) + "concur_index3" UNIQUE, btree (f2) + "concur_index4" btree (f2) WHERE f1 = 'a'::text + "concur_index5" btree (f2) WHERE f1 = 'x'::text + "std_index" btree (f2) + +-- Temporary tables with concurrent builds and on-commit actions +-- CONCURRENTLY used with CREATE INDEX and DROP INDEX is ignored. +-- PRESERVE ROWS, the default. +CREATE TEMP TABLE concur_temp (f1 int, f2 text) + ON COMMIT PRESERVE ROWS; +INSERT INTO concur_temp VALUES (1, 'foo'), (2, 'bar'); +CREATE INDEX CONCURRENTLY concur_temp_ind ON concur_temp(f1); +DROP INDEX CONCURRENTLY concur_temp_ind; +DROP TABLE concur_temp; +-- ON COMMIT DROP +BEGIN; +CREATE TEMP TABLE concur_temp (f1 int, f2 text) + ON COMMIT DROP; +INSERT INTO concur_temp VALUES (1, 'foo'), (2, 'bar'); +-- Fails when running in a transaction. +CREATE INDEX CONCURRENTLY concur_temp_ind ON concur_temp(f1); +ERROR: CREATE INDEX CONCURRENTLY cannot run inside a transaction block +COMMIT; +-- ON COMMIT DELETE ROWS +CREATE TEMP TABLE concur_temp (f1 int, f2 text) + ON COMMIT DELETE ROWS; +INSERT INTO concur_temp VALUES (1, 'foo'), (2, 'bar'); +CREATE INDEX CONCURRENTLY concur_temp_ind ON concur_temp(f1); +DROP INDEX CONCURRENTLY concur_temp_ind; +DROP TABLE concur_temp; +-- +-- Try some concurrent index drops +-- +DROP INDEX CONCURRENTLY "concur_index2"; -- works +DROP INDEX CONCURRENTLY IF EXISTS "concur_index2"; -- notice +NOTICE: index "concur_index2" does not exist, skipping +-- failures +DROP INDEX CONCURRENTLY "concur_index2", "concur_index3"; +ERROR: DROP INDEX CONCURRENTLY does not support dropping multiple objects +BEGIN; +DROP INDEX CONCURRENTLY "concur_index5"; +ERROR: DROP INDEX CONCURRENTLY cannot run inside a transaction block +ROLLBACK; +-- successes +DROP INDEX CONCURRENTLY IF EXISTS "concur_index3"; +DROP INDEX CONCURRENTLY "concur_index4"; +DROP INDEX CONCURRENTLY "concur_index5"; +DROP INDEX CONCURRENTLY "concur_index1"; +DROP INDEX CONCURRENTLY "concur_heap_expr_idx"; +\d concur_heap + Table "public.concur_heap" + Column | Type | Collation | Nullable | Default +--------+------+-----------+----------+--------- + f1 | text | | | + f2 | text | | | +Indexes: + "std_index" btree (f2) + +DROP TABLE concur_heap; +-- +-- Test ADD CONSTRAINT USING INDEX +-- +CREATE TABLE cwi_test( a int , b varchar(10), c char); +-- add some data so that all tests have something to work with. +INSERT INTO cwi_test VALUES(1, 2), (3, 4), (5, 6); +CREATE UNIQUE INDEX cwi_uniq_idx ON cwi_test(a , b); +ALTER TABLE cwi_test ADD primary key USING INDEX cwi_uniq_idx; +\d cwi_test + Table "public.cwi_test" + Column | Type | Collation | Nullable | Default +--------+-----------------------+-----------+----------+--------- + a | integer | | not null | + b | character varying(10) | | not null | + c | character(1) | | | +Indexes: + "cwi_uniq_idx" PRIMARY KEY, btree (a, b) + +\d cwi_uniq_idx + Index "public.cwi_uniq_idx" + Column | Type | Key? | Definition +--------+-----------------------+------+------------ + a | integer | yes | a + b | character varying(10) | yes | b +primary key, btree, for table "public.cwi_test" + +CREATE UNIQUE INDEX cwi_uniq2_idx ON cwi_test(b , a); +ALTER TABLE cwi_test DROP CONSTRAINT cwi_uniq_idx, + ADD CONSTRAINT cwi_replaced_pkey PRIMARY KEY + USING INDEX cwi_uniq2_idx; +NOTICE: ALTER TABLE / ADD CONSTRAINT USING INDEX will rename index "cwi_uniq2_idx" to "cwi_replaced_pkey" +\d cwi_test + Table "public.cwi_test" + Column | Type | Collation | Nullable | Default +--------+-----------------------+-----------+----------+--------- + a | integer | | not null | + b | character varying(10) | | not null | + c | character(1) | | | +Indexes: + "cwi_replaced_pkey" PRIMARY KEY, btree (b, a) + +\d cwi_replaced_pkey + Index "public.cwi_replaced_pkey" + Column | Type | Key? | Definition +--------+-----------------------+------+------------ + b | character varying(10) | yes | b + a | integer | yes | a +primary key, btree, for table "public.cwi_test" + +DROP INDEX cwi_replaced_pkey; -- Should fail; a constraint depends on it +ERROR: cannot drop index cwi_replaced_pkey because constraint cwi_replaced_pkey on table cwi_test requires it +HINT: You can drop constraint cwi_replaced_pkey on table cwi_test instead. +-- Check that non-default index options are rejected +CREATE UNIQUE INDEX cwi_uniq3_idx ON cwi_test(a desc); +ALTER TABLE cwi_test ADD UNIQUE USING INDEX cwi_uniq3_idx; -- fail +ERROR: index "cwi_uniq3_idx" column number 1 does not have default sorting behavior +LINE 1: ALTER TABLE cwi_test ADD UNIQUE USING INDEX cwi_uniq3_idx; + ^ +DETAIL: Cannot create a primary key or unique constraint using such an index. +CREATE UNIQUE INDEX cwi_uniq4_idx ON cwi_test(b collate "POSIX"); +ALTER TABLE cwi_test ADD UNIQUE USING INDEX cwi_uniq4_idx; -- fail +ERROR: index "cwi_uniq4_idx" column number 1 does not have default sorting behavior +LINE 1: ALTER TABLE cwi_test ADD UNIQUE USING INDEX cwi_uniq4_idx; + ^ +DETAIL: Cannot create a primary key or unique constraint using such an index. +DROP TABLE cwi_test; +-- ADD CONSTRAINT USING INDEX is forbidden on partitioned tables +CREATE TABLE cwi_test(a int) PARTITION BY hash (a); +create unique index on cwi_test (a); +alter table cwi_test add primary key using index cwi_test_a_idx ; +ERROR: ALTER TABLE / ADD CONSTRAINT USING INDEX is not supported on partitioned tables +DROP TABLE cwi_test; +-- PRIMARY KEY constraint cannot be backed by a NULLS NOT DISTINCT index +CREATE TABLE cwi_test(a int, b int); +CREATE UNIQUE INDEX cwi_a_nnd ON cwi_test (a) NULLS NOT DISTINCT; +ALTER TABLE cwi_test ADD PRIMARY KEY USING INDEX cwi_a_nnd; +ERROR: primary keys cannot use NULLS NOT DISTINCT indexes +DROP TABLE cwi_test; +-- +-- Check handling of indexes on system columns +-- +CREATE TABLE syscol_table (a INT); +-- System columns cannot be indexed +CREATE INDEX ON syscolcol_table (ctid); +ERROR: relation "syscolcol_table" does not exist +-- nor used in expressions +CREATE INDEX ON syscol_table ((ctid >= '(1000,0)')); +ERROR: index creation on system columns is not supported +-- nor used in predicates +CREATE INDEX ON syscol_table (a) WHERE ctid >= '(1000,0)'; +ERROR: index creation on system columns is not supported +DROP TABLE syscol_table; +-- +-- Tests for IS NULL/IS NOT NULL with b-tree indexes +-- +CREATE TABLE onek_with_null AS SELECT unique1, unique2 FROM onek; +INSERT INTO onek_with_null (unique1,unique2) VALUES (NULL, -1), (NULL, NULL); +CREATE UNIQUE INDEX onek_nulltest ON onek_with_null (unique2,unique1); +SET enable_seqscan = OFF; +SET enable_indexscan = ON; +SET enable_bitmapscan = ON; +SELECT count(*) FROM onek_with_null WHERE unique1 IS NULL; + count +------- + 2 +(1 row) + +SELECT count(*) FROM onek_with_null WHERE unique1 IS NULL AND unique2 IS NULL; + count +------- + 1 +(1 row) + +SELECT count(*) FROM onek_with_null WHERE unique1 IS NOT NULL; + count +------- + 1000 +(1 row) + +SELECT count(*) FROM onek_with_null WHERE unique1 IS NULL AND unique2 IS NOT NULL; + count +------- + 1 +(1 row) + +SELECT count(*) FROM onek_with_null WHERE unique1 IS NOT NULL AND unique1 > 500; + count +------- + 499 +(1 row) + +SELECT count(*) FROM onek_with_null WHERE unique1 IS NULL AND unique1 > 500; + count +------- + 0 +(1 row) + +DROP INDEX onek_nulltest; +CREATE UNIQUE INDEX onek_nulltest ON onek_with_null (unique2 desc,unique1); +SELECT count(*) FROM onek_with_null WHERE unique1 IS NULL; + count +------- + 2 +(1 row) + +SELECT count(*) FROM onek_with_null WHERE unique1 IS NULL AND unique2 IS NULL; + count +------- + 1 +(1 row) + +SELECT count(*) FROM onek_with_null WHERE unique1 IS NOT NULL; + count +------- + 1000 +(1 row) + +SELECT count(*) FROM onek_with_null WHERE unique1 IS NULL AND unique2 IS NOT NULL; + count +------- + 1 +(1 row) + +SELECT count(*) FROM onek_with_null WHERE unique1 IS NOT NULL AND unique1 > 500; + count +------- + 499 +(1 row) + +SELECT count(*) FROM onek_with_null WHERE unique1 IS NULL AND unique1 > 500; + count +------- + 0 +(1 row) + +DROP INDEX onek_nulltest; +CREATE UNIQUE INDEX onek_nulltest ON onek_with_null (unique2 desc nulls last,unique1); +SELECT count(*) FROM onek_with_null WHERE unique1 IS NULL; + count +------- + 2 +(1 row) + +SELECT count(*) FROM onek_with_null WHERE unique1 IS NULL AND unique2 IS NULL; + count +------- + 1 +(1 row) + +SELECT count(*) FROM onek_with_null WHERE unique1 IS NOT NULL; + count +------- + 1000 +(1 row) + +SELECT count(*) FROM onek_with_null WHERE unique1 IS NULL AND unique2 IS NOT NULL; + count +------- + 1 +(1 row) + +SELECT count(*) FROM onek_with_null WHERE unique1 IS NOT NULL AND unique1 > 500; + count +------- + 499 +(1 row) + +SELECT count(*) FROM onek_with_null WHERE unique1 IS NULL AND unique1 > 500; + count +------- + 0 +(1 row) + +DROP INDEX onek_nulltest; +CREATE UNIQUE INDEX onek_nulltest ON onek_with_null (unique2 nulls first,unique1); +SELECT count(*) FROM onek_with_null WHERE unique1 IS NULL; + count +------- + 2 +(1 row) + +SELECT count(*) FROM onek_with_null WHERE unique1 IS NULL AND unique2 IS NULL; + count +------- + 1 +(1 row) + +SELECT count(*) FROM onek_with_null WHERE unique1 IS NOT NULL; + count +------- + 1000 +(1 row) + +SELECT count(*) FROM onek_with_null WHERE unique1 IS NULL AND unique2 IS NOT NULL; + count +------- + 1 +(1 row) + +SELECT count(*) FROM onek_with_null WHERE unique1 IS NOT NULL AND unique1 > 500; + count +------- + 499 +(1 row) + +SELECT count(*) FROM onek_with_null WHERE unique1 IS NULL AND unique1 > 500; + count +------- + 0 +(1 row) + +DROP INDEX onek_nulltest; +-- Check initial-positioning logic too +CREATE UNIQUE INDEX onek_nulltest ON onek_with_null (unique2); +SET enable_seqscan = OFF; +SET enable_indexscan = ON; +SET enable_bitmapscan = OFF; +SELECT unique1, unique2 FROM onek_with_null + ORDER BY unique2 LIMIT 2; + unique1 | unique2 +---------+--------- + | -1 + 147 | 0 +(2 rows) + +SELECT unique1, unique2 FROM onek_with_null WHERE unique2 >= -1 + ORDER BY unique2 LIMIT 2; + unique1 | unique2 +---------+--------- + | -1 + 147 | 0 +(2 rows) + +SELECT unique1, unique2 FROM onek_with_null WHERE unique2 >= 0 + ORDER BY unique2 LIMIT 2; + unique1 | unique2 +---------+--------- + 147 | 0 + 931 | 1 +(2 rows) + +SELECT unique1, unique2 FROM onek_with_null + ORDER BY unique2 DESC LIMIT 2; + unique1 | unique2 +---------+--------- + | + 278 | 999 +(2 rows) + +SELECT unique1, unique2 FROM onek_with_null WHERE unique2 >= -1 + ORDER BY unique2 DESC LIMIT 2; + unique1 | unique2 +---------+--------- + 278 | 999 + 0 | 998 +(2 rows) + +SELECT unique1, unique2 FROM onek_with_null WHERE unique2 < 999 + ORDER BY unique2 DESC LIMIT 2; + unique1 | unique2 +---------+--------- + 0 | 998 + 744 | 997 +(2 rows) + +RESET enable_seqscan; +RESET enable_indexscan; +RESET enable_bitmapscan; +DROP TABLE onek_with_null; +-- +-- Check bitmap index path planning +-- +EXPLAIN (COSTS OFF) +SELECT * FROM tenk1 + WHERE thousand = 42 AND (tenthous = 1 OR tenthous = 3 OR tenthous = 42); + QUERY PLAN +----------------------------------------------------------------------------------------------------------------------------------------- + Bitmap Heap Scan on tenk1 + Recheck Cond: (((thousand = 42) AND (tenthous = 1)) OR ((thousand = 42) AND (tenthous = 3)) OR ((thousand = 42) AND (tenthous = 42))) + -> BitmapOr + -> Bitmap Index Scan on tenk1_thous_tenthous + Index Cond: ((thousand = 42) AND (tenthous = 1)) + -> Bitmap Index Scan on tenk1_thous_tenthous + Index Cond: ((thousand = 42) AND (tenthous = 3)) + -> Bitmap Index Scan on tenk1_thous_tenthous + Index Cond: ((thousand = 42) AND (tenthous = 42)) +(9 rows) + +SELECT * FROM tenk1 + WHERE thousand = 42 AND (tenthous = 1 OR tenthous = 3 OR tenthous = 42); + unique1 | unique2 | two | four | ten | twenty | hundred | thousand | twothousand | fivethous | tenthous | odd | even | stringu1 | stringu2 | string4 +---------+---------+-----+------+-----+--------+---------+----------+-------------+-----------+----------+-----+------+----------+----------+--------- + 42 | 5530 | 0 | 2 | 2 | 2 | 42 | 42 | 42 | 42 | 42 | 84 | 85 | QBAAAA | SEIAAA | OOOOxx +(1 row) + +EXPLAIN (COSTS OFF) +SELECT count(*) FROM tenk1 + WHERE hundred = 42 AND (thousand = 42 OR thousand = 99); + QUERY PLAN +--------------------------------------------------------------------------------- + Aggregate + -> Bitmap Heap Scan on tenk1 + Recheck Cond: ((hundred = 42) AND ((thousand = 42) OR (thousand = 99))) + -> BitmapAnd + -> Bitmap Index Scan on tenk1_hundred + Index Cond: (hundred = 42) + -> BitmapOr + -> Bitmap Index Scan on tenk1_thous_tenthous + Index Cond: (thousand = 42) + -> Bitmap Index Scan on tenk1_thous_tenthous + Index Cond: (thousand = 99) +(11 rows) + +SELECT count(*) FROM tenk1 + WHERE hundred = 42 AND (thousand = 42 OR thousand = 99); + count +------- + 10 +(1 row) + +-- +-- Check behavior with duplicate index column contents +-- +CREATE TABLE dupindexcols AS + SELECT unique1 as id, stringu2::text as f1 FROM tenk1; +CREATE INDEX dupindexcols_i ON dupindexcols (f1, id, f1 text_pattern_ops); +ANALYZE dupindexcols; +EXPLAIN (COSTS OFF) + SELECT count(*) FROM dupindexcols + WHERE f1 BETWEEN 'WA' AND 'ZZZ' and id < 1000 and f1 ~<~ 'YX'; + QUERY PLAN +---------------------------------------------------------------------------------------------------------------- + Aggregate + -> Bitmap Heap Scan on dupindexcols + Recheck Cond: ((f1 >= 'WA'::text) AND (f1 <= 'ZZZ'::text) AND (id < 1000) AND (f1 ~<~ 'YX'::text)) + -> Bitmap Index Scan on dupindexcols_i + Index Cond: ((f1 >= 'WA'::text) AND (f1 <= 'ZZZ'::text) AND (id < 1000) AND (f1 ~<~ 'YX'::text)) +(5 rows) + +SELECT count(*) FROM dupindexcols + WHERE f1 BETWEEN 'WA' AND 'ZZZ' and id < 1000 and f1 ~<~ 'YX'; + count +------- + 97 +(1 row) + +-- +-- Check ordering of =ANY indexqual results (bug in 9.2.0) +-- +explain (costs off) +SELECT unique1 FROM tenk1 +WHERE unique1 IN (1,42,7) +ORDER BY unique1; + QUERY PLAN +------------------------------------------------------- + Index Only Scan using tenk1_unique1 on tenk1 + Index Cond: (unique1 = ANY ('{1,42,7}'::integer[])) +(2 rows) + +SELECT unique1 FROM tenk1 +WHERE unique1 IN (1,42,7) +ORDER BY unique1; + unique1 +--------- + 1 + 7 + 42 +(3 rows) + +explain (costs off) +SELECT thousand, tenthous FROM tenk1 +WHERE thousand < 2 AND tenthous IN (1001,3000) +ORDER BY thousand; + QUERY PLAN +------------------------------------------------------- + Index Only Scan using tenk1_thous_tenthous on tenk1 + Index Cond: (thousand < 2) + Filter: (tenthous = ANY ('{1001,3000}'::integer[])) +(3 rows) + +SELECT thousand, tenthous FROM tenk1 +WHERE thousand < 2 AND tenthous IN (1001,3000) +ORDER BY thousand; + thousand | tenthous +----------+---------- + 0 | 3000 + 1 | 1001 +(2 rows) + +SET enable_indexonlyscan = OFF; +explain (costs off) +SELECT thousand, tenthous FROM tenk1 +WHERE thousand < 2 AND tenthous IN (1001,3000) +ORDER BY thousand; + QUERY PLAN +-------------------------------------------------------------------------------------- + Sort + Sort Key: thousand + -> Index Scan using tenk1_thous_tenthous on tenk1 + Index Cond: ((thousand < 2) AND (tenthous = ANY ('{1001,3000}'::integer[]))) +(4 rows) + +SELECT thousand, tenthous FROM tenk1 +WHERE thousand < 2 AND tenthous IN (1001,3000) +ORDER BY thousand; + thousand | tenthous +----------+---------- + 0 | 3000 + 1 | 1001 +(2 rows) + +RESET enable_indexonlyscan; +-- +-- Check elimination of constant-NULL subexpressions +-- +explain (costs off) + select * from tenk1 where (thousand, tenthous) in ((1,1001), (null,null)); + QUERY PLAN +------------------------------------------------------ + Index Scan using tenk1_thous_tenthous on tenk1 + Index Cond: ((thousand = 1) AND (tenthous = 1001)) +(2 rows) + +-- +-- Check matching of boolean index columns to WHERE conditions and sort keys +-- +create temp table boolindex (b bool, i int, unique(b, i), junk float); +explain (costs off) + select * from boolindex order by b, i limit 10; + QUERY PLAN +------------------------------------------------------- + Limit + -> Index Scan using boolindex_b_i_key on boolindex +(2 rows) + +explain (costs off) + select * from boolindex where b order by i limit 10; + QUERY PLAN +------------------------------------------------------- + Limit + -> Index Scan using boolindex_b_i_key on boolindex + Index Cond: (b = true) +(3 rows) + +explain (costs off) + select * from boolindex where b = true order by i desc limit 10; + QUERY PLAN +---------------------------------------------------------------- + Limit + -> Index Scan Backward using boolindex_b_i_key on boolindex + Index Cond: (b = true) +(3 rows) + +explain (costs off) + select * from boolindex where not b order by i limit 10; + QUERY PLAN +------------------------------------------------------- + Limit + -> Index Scan using boolindex_b_i_key on boolindex + Index Cond: (b = false) +(3 rows) + +explain (costs off) + select * from boolindex where b is true order by i desc limit 10; + QUERY PLAN +---------------------------------------------------------------- + Limit + -> Index Scan Backward using boolindex_b_i_key on boolindex + Index Cond: (b = true) +(3 rows) + +explain (costs off) + select * from boolindex where b is false order by i desc limit 10; + QUERY PLAN +---------------------------------------------------------------- + Limit + -> Index Scan Backward using boolindex_b_i_key on boolindex + Index Cond: (b = false) +(3 rows) + +-- +-- REINDEX (VERBOSE) +-- +CREATE TABLE reindex_verbose(id integer primary key); +\set VERBOSITY terse \\ -- suppress machine-dependent details +REINDEX (VERBOSE) TABLE reindex_verbose; +INFO: index "reindex_verbose_pkey" was reindexed +\set VERBOSITY default +DROP TABLE reindex_verbose; +-- +-- REINDEX CONCURRENTLY +-- +CREATE TABLE concur_reindex_tab (c1 int); +-- REINDEX +REINDEX TABLE concur_reindex_tab; -- notice +NOTICE: table "concur_reindex_tab" has no indexes to reindex +REINDEX (CONCURRENTLY) TABLE concur_reindex_tab; -- notice +NOTICE: table "concur_reindex_tab" has no indexes that can be reindexed concurrently +ALTER TABLE concur_reindex_tab ADD COLUMN c2 text; -- add toast index +-- Normal index with integer column +CREATE UNIQUE INDEX concur_reindex_ind1 ON concur_reindex_tab(c1); +-- Normal index with text column +CREATE INDEX concur_reindex_ind2 ON concur_reindex_tab(c2); +-- UNIQUE index with expression +CREATE UNIQUE INDEX concur_reindex_ind3 ON concur_reindex_tab(abs(c1)); +-- Duplicate column names +CREATE INDEX concur_reindex_ind4 ON concur_reindex_tab(c1, c1, c2); +-- Create table for check on foreign key dependence switch with indexes swapped +ALTER TABLE concur_reindex_tab ADD PRIMARY KEY USING INDEX concur_reindex_ind1; +CREATE TABLE concur_reindex_tab2 (c1 int REFERENCES concur_reindex_tab); +INSERT INTO concur_reindex_tab VALUES (1, 'a'); +INSERT INTO concur_reindex_tab VALUES (2, 'a'); +-- Reindex concurrently of exclusion constraint currently not supported +CREATE TABLE concur_reindex_tab3 (c1 int, c2 int4range, EXCLUDE USING gist (c2 WITH &&)); +INSERT INTO concur_reindex_tab3 VALUES (3, '[1,2]'); +REINDEX INDEX CONCURRENTLY concur_reindex_tab3_c2_excl; -- error +ERROR: concurrent index creation for exclusion constraints is not supported +REINDEX TABLE CONCURRENTLY concur_reindex_tab3; -- succeeds with warning +WARNING: cannot reindex exclusion constraint index "public.concur_reindex_tab3_c2_excl" concurrently, skipping +INSERT INTO concur_reindex_tab3 VALUES (4, '[2,4]'); +ERROR: conflicting key value violates exclusion constraint "concur_reindex_tab3_c2_excl" +DETAIL: Key (c2)=([2,5)) conflicts with existing key (c2)=([1,3)). +-- Check materialized views +CREATE MATERIALIZED VIEW concur_reindex_matview AS SELECT * FROM concur_reindex_tab; +-- Dependency lookup before and after the follow-up REINDEX commands. +-- These should remain consistent. +SELECT pg_describe_object(classid, objid, objsubid) as obj, + pg_describe_object(refclassid,refobjid,refobjsubid) as objref, + deptype +FROM pg_depend +WHERE classid = 'pg_class'::regclass AND + objid in ('concur_reindex_tab'::regclass, + 'concur_reindex_ind1'::regclass, + 'concur_reindex_ind2'::regclass, + 'concur_reindex_ind3'::regclass, + 'concur_reindex_ind4'::regclass, + 'concur_reindex_matview'::regclass) + ORDER BY 1, 2; + obj | objref | deptype +------------------------------------------+------------------------------------------------------------+--------- + index concur_reindex_ind1 | constraint concur_reindex_ind1 on table concur_reindex_tab | i + index concur_reindex_ind2 | column c2 of table concur_reindex_tab | a + index concur_reindex_ind3 | column c1 of table concur_reindex_tab | a + index concur_reindex_ind3 | table concur_reindex_tab | a + index concur_reindex_ind4 | column c1 of table concur_reindex_tab | a + index concur_reindex_ind4 | column c2 of table concur_reindex_tab | a + materialized view concur_reindex_matview | schema public | n + table concur_reindex_tab | schema public | n +(8 rows) + +REINDEX INDEX CONCURRENTLY concur_reindex_ind1; +REINDEX TABLE CONCURRENTLY concur_reindex_tab; +REINDEX TABLE CONCURRENTLY concur_reindex_matview; +SELECT pg_describe_object(classid, objid, objsubid) as obj, + pg_describe_object(refclassid,refobjid,refobjsubid) as objref, + deptype +FROM pg_depend +WHERE classid = 'pg_class'::regclass AND + objid in ('concur_reindex_tab'::regclass, + 'concur_reindex_ind1'::regclass, + 'concur_reindex_ind2'::regclass, + 'concur_reindex_ind3'::regclass, + 'concur_reindex_ind4'::regclass, + 'concur_reindex_matview'::regclass) + ORDER BY 1, 2; + obj | objref | deptype +------------------------------------------+------------------------------------------------------------+--------- + index concur_reindex_ind1 | constraint concur_reindex_ind1 on table concur_reindex_tab | i + index concur_reindex_ind2 | column c2 of table concur_reindex_tab | a + index concur_reindex_ind3 | column c1 of table concur_reindex_tab | a + index concur_reindex_ind3 | table concur_reindex_tab | a + index concur_reindex_ind4 | column c1 of table concur_reindex_tab | a + index concur_reindex_ind4 | column c2 of table concur_reindex_tab | a + materialized view concur_reindex_matview | schema public | n + table concur_reindex_tab | schema public | n +(8 rows) + +-- Check that comments are preserved +CREATE TABLE testcomment (i int); +CREATE INDEX testcomment_idx1 ON testcomment (i); +COMMENT ON INDEX testcomment_idx1 IS 'test comment'; +SELECT obj_description('testcomment_idx1'::regclass, 'pg_class'); + obj_description +----------------- + test comment +(1 row) + +REINDEX TABLE testcomment; +SELECT obj_description('testcomment_idx1'::regclass, 'pg_class'); + obj_description +----------------- + test comment +(1 row) + +REINDEX TABLE CONCURRENTLY testcomment ; +SELECT obj_description('testcomment_idx1'::regclass, 'pg_class'); + obj_description +----------------- + test comment +(1 row) + +DROP TABLE testcomment; +-- Check that indisclustered updates are preserved +CREATE TABLE concur_clustered(i int); +CREATE INDEX concur_clustered_i_idx ON concur_clustered(i); +ALTER TABLE concur_clustered CLUSTER ON concur_clustered_i_idx; +REINDEX TABLE CONCURRENTLY concur_clustered; +SELECT indexrelid::regclass, indisclustered FROM pg_index + WHERE indrelid = 'concur_clustered'::regclass; + indexrelid | indisclustered +------------------------+---------------- + concur_clustered_i_idx | t +(1 row) + +DROP TABLE concur_clustered; +-- Check that indisreplident updates are preserved. +CREATE TABLE concur_replident(i int NOT NULL); +CREATE UNIQUE INDEX concur_replident_i_idx ON concur_replident(i); +ALTER TABLE concur_replident REPLICA IDENTITY + USING INDEX concur_replident_i_idx; +SELECT indexrelid::regclass, indisreplident FROM pg_index + WHERE indrelid = 'concur_replident'::regclass; + indexrelid | indisreplident +------------------------+---------------- + concur_replident_i_idx | t +(1 row) + +REINDEX TABLE CONCURRENTLY concur_replident; +SELECT indexrelid::regclass, indisreplident FROM pg_index + WHERE indrelid = 'concur_replident'::regclass; + indexrelid | indisreplident +------------------------+---------------- + concur_replident_i_idx | t +(1 row) + +DROP TABLE concur_replident; +-- Check that opclass parameters are preserved +CREATE TABLE concur_appclass_tab(i tsvector, j tsvector, k tsvector); +CREATE INDEX concur_appclass_ind on concur_appclass_tab + USING gist (i tsvector_ops (siglen='1000'), j tsvector_ops (siglen='500')); +CREATE INDEX concur_appclass_ind_2 on concur_appclass_tab + USING gist (k tsvector_ops (siglen='300'), j tsvector_ops); +REINDEX TABLE CONCURRENTLY concur_appclass_tab; +\d concur_appclass_tab + Table "public.concur_appclass_tab" + Column | Type | Collation | Nullable | Default +--------+----------+-----------+----------+--------- + i | tsvector | | | + j | tsvector | | | + k | tsvector | | | +Indexes: + "concur_appclass_ind" gist (i tsvector_ops (siglen='1000'), j tsvector_ops (siglen='500')) + "concur_appclass_ind_2" gist (k tsvector_ops (siglen='300'), j) + +DROP TABLE concur_appclass_tab; +-- Partitions +-- Create some partitioned tables +CREATE TABLE concur_reindex_part (c1 int, c2 int) PARTITION BY RANGE (c1); +CREATE TABLE concur_reindex_part_0 PARTITION OF concur_reindex_part + FOR VALUES FROM (0) TO (10) PARTITION BY list (c2); +CREATE TABLE concur_reindex_part_0_1 PARTITION OF concur_reindex_part_0 + FOR VALUES IN (1); +CREATE TABLE concur_reindex_part_0_2 PARTITION OF concur_reindex_part_0 + FOR VALUES IN (2); +-- This partitioned table will have no partitions. +CREATE TABLE concur_reindex_part_10 PARTITION OF concur_reindex_part + FOR VALUES FROM (10) TO (20) PARTITION BY list (c2); +-- Create some partitioned indexes +CREATE INDEX concur_reindex_part_index ON ONLY concur_reindex_part (c1); +CREATE INDEX concur_reindex_part_index_0 ON ONLY concur_reindex_part_0 (c1); +ALTER INDEX concur_reindex_part_index ATTACH PARTITION concur_reindex_part_index_0; +-- This partitioned index will have no partitions. +CREATE INDEX concur_reindex_part_index_10 ON ONLY concur_reindex_part_10 (c1); +ALTER INDEX concur_reindex_part_index ATTACH PARTITION concur_reindex_part_index_10; +CREATE INDEX concur_reindex_part_index_0_1 ON ONLY concur_reindex_part_0_1 (c1); +ALTER INDEX concur_reindex_part_index_0 ATTACH PARTITION concur_reindex_part_index_0_1; +CREATE INDEX concur_reindex_part_index_0_2 ON ONLY concur_reindex_part_0_2 (c1); +ALTER INDEX concur_reindex_part_index_0 ATTACH PARTITION concur_reindex_part_index_0_2; +SELECT relid, parentrelid, level FROM pg_partition_tree('concur_reindex_part_index') + ORDER BY relid, level; + relid | parentrelid | level +-------------------------------+-----------------------------+------- + concur_reindex_part_index | | 0 + concur_reindex_part_index_0 | concur_reindex_part_index | 1 + concur_reindex_part_index_10 | concur_reindex_part_index | 1 + concur_reindex_part_index_0_1 | concur_reindex_part_index_0 | 2 + concur_reindex_part_index_0_2 | concur_reindex_part_index_0 | 2 +(5 rows) + +SELECT relid, parentrelid, level FROM pg_partition_tree('concur_reindex_part_index') + ORDER BY relid, level; + relid | parentrelid | level +-------------------------------+-----------------------------+------- + concur_reindex_part_index | | 0 + concur_reindex_part_index_0 | concur_reindex_part_index | 1 + concur_reindex_part_index_10 | concur_reindex_part_index | 1 + concur_reindex_part_index_0_1 | concur_reindex_part_index_0 | 2 + concur_reindex_part_index_0_2 | concur_reindex_part_index_0 | 2 +(5 rows) + +-- REINDEX should preserve dependencies of partition tree. +SELECT pg_describe_object(classid, objid, objsubid) as obj, + pg_describe_object(refclassid,refobjid,refobjsubid) as objref, + deptype +FROM pg_depend +WHERE classid = 'pg_class'::regclass AND + objid in ('concur_reindex_part'::regclass, + 'concur_reindex_part_0'::regclass, + 'concur_reindex_part_0_1'::regclass, + 'concur_reindex_part_0_2'::regclass, + 'concur_reindex_part_index'::regclass, + 'concur_reindex_part_index_0'::regclass, + 'concur_reindex_part_index_0_1'::regclass, + 'concur_reindex_part_index_0_2'::regclass) + ORDER BY 1, 2; + obj | objref | deptype +------------------------------------------+--------------------------------------------+--------- + column c1 of table concur_reindex_part | table concur_reindex_part | i + column c2 of table concur_reindex_part_0 | table concur_reindex_part_0 | i + index concur_reindex_part_index | column c1 of table concur_reindex_part | a + index concur_reindex_part_index_0 | column c1 of table concur_reindex_part_0 | a + index concur_reindex_part_index_0 | index concur_reindex_part_index | P + index concur_reindex_part_index_0 | table concur_reindex_part_0 | S + index concur_reindex_part_index_0_1 | column c1 of table concur_reindex_part_0_1 | a + index concur_reindex_part_index_0_1 | index concur_reindex_part_index_0 | P + index concur_reindex_part_index_0_1 | table concur_reindex_part_0_1 | S + index concur_reindex_part_index_0_2 | column c1 of table concur_reindex_part_0_2 | a + index concur_reindex_part_index_0_2 | index concur_reindex_part_index_0 | P + index concur_reindex_part_index_0_2 | table concur_reindex_part_0_2 | S + table concur_reindex_part | schema public | n + table concur_reindex_part_0 | schema public | n + table concur_reindex_part_0 | table concur_reindex_part | a + table concur_reindex_part_0_1 | schema public | n + table concur_reindex_part_0_1 | table concur_reindex_part_0 | a + table concur_reindex_part_0_2 | schema public | n + table concur_reindex_part_0_2 | table concur_reindex_part_0 | a +(19 rows) + +REINDEX INDEX CONCURRENTLY concur_reindex_part_index_0_1; +REINDEX INDEX CONCURRENTLY concur_reindex_part_index_0_2; +SELECT relid, parentrelid, level FROM pg_partition_tree('concur_reindex_part_index') + ORDER BY relid, level; + relid | parentrelid | level +-------------------------------+-----------------------------+------- + concur_reindex_part_index | | 0 + concur_reindex_part_index_0 | concur_reindex_part_index | 1 + concur_reindex_part_index_10 | concur_reindex_part_index | 1 + concur_reindex_part_index_0_1 | concur_reindex_part_index_0 | 2 + concur_reindex_part_index_0_2 | concur_reindex_part_index_0 | 2 +(5 rows) + +REINDEX TABLE CONCURRENTLY concur_reindex_part_0_1; +REINDEX TABLE CONCURRENTLY concur_reindex_part_0_2; +SELECT pg_describe_object(classid, objid, objsubid) as obj, + pg_describe_object(refclassid,refobjid,refobjsubid) as objref, + deptype +FROM pg_depend +WHERE classid = 'pg_class'::regclass AND + objid in ('concur_reindex_part'::regclass, + 'concur_reindex_part_0'::regclass, + 'concur_reindex_part_0_1'::regclass, + 'concur_reindex_part_0_2'::regclass, + 'concur_reindex_part_index'::regclass, + 'concur_reindex_part_index_0'::regclass, + 'concur_reindex_part_index_0_1'::regclass, + 'concur_reindex_part_index_0_2'::regclass) + ORDER BY 1, 2; + obj | objref | deptype +------------------------------------------+--------------------------------------------+--------- + column c1 of table concur_reindex_part | table concur_reindex_part | i + column c2 of table concur_reindex_part_0 | table concur_reindex_part_0 | i + index concur_reindex_part_index | column c1 of table concur_reindex_part | a + index concur_reindex_part_index_0 | column c1 of table concur_reindex_part_0 | a + index concur_reindex_part_index_0 | index concur_reindex_part_index | P + index concur_reindex_part_index_0 | table concur_reindex_part_0 | S + index concur_reindex_part_index_0_1 | column c1 of table concur_reindex_part_0_1 | a + index concur_reindex_part_index_0_1 | index concur_reindex_part_index_0 | P + index concur_reindex_part_index_0_1 | table concur_reindex_part_0_1 | S + index concur_reindex_part_index_0_2 | column c1 of table concur_reindex_part_0_2 | a + index concur_reindex_part_index_0_2 | index concur_reindex_part_index_0 | P + index concur_reindex_part_index_0_2 | table concur_reindex_part_0_2 | S + table concur_reindex_part | schema public | n + table concur_reindex_part_0 | schema public | n + table concur_reindex_part_0 | table concur_reindex_part | a + table concur_reindex_part_0_1 | schema public | n + table concur_reindex_part_0_1 | table concur_reindex_part_0 | a + table concur_reindex_part_0_2 | schema public | n + table concur_reindex_part_0_2 | table concur_reindex_part_0 | a +(19 rows) + +SELECT relid, parentrelid, level FROM pg_partition_tree('concur_reindex_part_index') + ORDER BY relid, level; + relid | parentrelid | level +-------------------------------+-----------------------------+------- + concur_reindex_part_index | | 0 + concur_reindex_part_index_0 | concur_reindex_part_index | 1 + concur_reindex_part_index_10 | concur_reindex_part_index | 1 + concur_reindex_part_index_0_1 | concur_reindex_part_index_0 | 2 + concur_reindex_part_index_0_2 | concur_reindex_part_index_0 | 2 +(5 rows) + +-- REINDEX for partitioned indexes +-- REINDEX TABLE fails for partitioned indexes +-- Top-most parent index +REINDEX TABLE concur_reindex_part_index; -- error +ERROR: "concur_reindex_part_index" is not a table or materialized view +REINDEX TABLE CONCURRENTLY concur_reindex_part_index; -- error +ERROR: "concur_reindex_part_index" is not a table or materialized view +-- Partitioned index with no leaves +REINDEX TABLE concur_reindex_part_index_10; -- error +ERROR: "concur_reindex_part_index_10" is not a table or materialized view +REINDEX TABLE CONCURRENTLY concur_reindex_part_index_10; -- error +ERROR: "concur_reindex_part_index_10" is not a table or materialized view +-- Cannot run in a transaction block +BEGIN; +REINDEX INDEX concur_reindex_part_index; +ERROR: REINDEX INDEX cannot run inside a transaction block +CONTEXT: while reindexing partitioned index "public.concur_reindex_part_index" +ROLLBACK; +-- Helper functions to track changes of relfilenodes in a partition tree. +-- Create a table tracking the relfilenode state. +CREATE OR REPLACE FUNCTION create_relfilenode_part(relname text, indname text) + RETURNS VOID AS + $func$ + BEGIN + EXECUTE format(' + CREATE TABLE %I AS + SELECT oid, relname, relfilenode, relkind, reltoastrelid + FROM pg_class + WHERE oid IN + (SELECT relid FROM pg_partition_tree(''%I''));', + relname, indname); + END + $func$ LANGUAGE plpgsql; +CREATE OR REPLACE FUNCTION compare_relfilenode_part(tabname text) + RETURNS TABLE (relname name, relkind "char", state text) AS + $func$ + BEGIN + RETURN QUERY EXECUTE + format( + 'SELECT b.relname, + b.relkind, + CASE WHEN a.relfilenode = b.relfilenode THEN ''relfilenode is unchanged'' + ELSE ''relfilenode has changed'' END + -- Do not join with OID here as CONCURRENTLY changes it. + FROM %I b JOIN pg_class a ON b.relname = a.relname + ORDER BY 1;', tabname); + END + $func$ LANGUAGE plpgsql; +-- Check that expected relfilenodes are changed, non-concurrent case. +SELECT create_relfilenode_part('reindex_index_status', 'concur_reindex_part_index'); + create_relfilenode_part +------------------------- + +(1 row) + +REINDEX INDEX concur_reindex_part_index; +SELECT * FROM compare_relfilenode_part('reindex_index_status'); + relname | relkind | state +-------------------------------+---------+-------------------------- + concur_reindex_part_index | I | relfilenode is unchanged + concur_reindex_part_index_0 | I | relfilenode is unchanged + concur_reindex_part_index_0_1 | i | relfilenode has changed + concur_reindex_part_index_0_2 | i | relfilenode has changed + concur_reindex_part_index_10 | I | relfilenode is unchanged +(5 rows) + +DROP TABLE reindex_index_status; +-- concurrent case. +SELECT create_relfilenode_part('reindex_index_status', 'concur_reindex_part_index'); + create_relfilenode_part +------------------------- + +(1 row) + +REINDEX INDEX CONCURRENTLY concur_reindex_part_index; +SELECT * FROM compare_relfilenode_part('reindex_index_status'); + relname | relkind | state +-------------------------------+---------+-------------------------- + concur_reindex_part_index | I | relfilenode is unchanged + concur_reindex_part_index_0 | I | relfilenode is unchanged + concur_reindex_part_index_0_1 | i | relfilenode has changed + concur_reindex_part_index_0_2 | i | relfilenode has changed + concur_reindex_part_index_10 | I | relfilenode is unchanged +(5 rows) + +DROP TABLE reindex_index_status; +-- REINDEX for partitioned tables +-- REINDEX INDEX fails for partitioned tables +-- Top-most parent +REINDEX INDEX concur_reindex_part; -- error +ERROR: "concur_reindex_part" is not an index +REINDEX INDEX CONCURRENTLY concur_reindex_part; -- error +ERROR: "concur_reindex_part" is not an index +-- Partitioned with no leaves +REINDEX INDEX concur_reindex_part_10; -- error +ERROR: "concur_reindex_part_10" is not an index +REINDEX INDEX CONCURRENTLY concur_reindex_part_10; -- error +ERROR: "concur_reindex_part_10" is not an index +-- Cannot run in a transaction block +BEGIN; +REINDEX TABLE concur_reindex_part; +ERROR: REINDEX TABLE cannot run inside a transaction block +CONTEXT: while reindexing partitioned table "public.concur_reindex_part" +ROLLBACK; +-- Check that expected relfilenodes are changed, non-concurrent case. +-- Note that the partition tree changes of the *indexes* need to be checked. +SELECT create_relfilenode_part('reindex_index_status', 'concur_reindex_part_index'); + create_relfilenode_part +------------------------- + +(1 row) + +REINDEX TABLE concur_reindex_part; +SELECT * FROM compare_relfilenode_part('reindex_index_status'); + relname | relkind | state +-------------------------------+---------+-------------------------- + concur_reindex_part_index | I | relfilenode is unchanged + concur_reindex_part_index_0 | I | relfilenode is unchanged + concur_reindex_part_index_0_1 | i | relfilenode has changed + concur_reindex_part_index_0_2 | i | relfilenode has changed + concur_reindex_part_index_10 | I | relfilenode is unchanged +(5 rows) + +DROP TABLE reindex_index_status; +-- concurrent case. +SELECT create_relfilenode_part('reindex_index_status', 'concur_reindex_part_index'); + create_relfilenode_part +------------------------- + +(1 row) + +REINDEX TABLE CONCURRENTLY concur_reindex_part; +SELECT * FROM compare_relfilenode_part('reindex_index_status'); + relname | relkind | state +-------------------------------+---------+-------------------------- + concur_reindex_part_index | I | relfilenode is unchanged + concur_reindex_part_index_0 | I | relfilenode is unchanged + concur_reindex_part_index_0_1 | i | relfilenode has changed + concur_reindex_part_index_0_2 | i | relfilenode has changed + concur_reindex_part_index_10 | I | relfilenode is unchanged +(5 rows) + +DROP TABLE reindex_index_status; +DROP FUNCTION create_relfilenode_part; +DROP FUNCTION compare_relfilenode_part; +-- Cleanup of partition tree used for REINDEX test. +DROP TABLE concur_reindex_part; +-- Check errors +-- Cannot run inside a transaction block +BEGIN; +REINDEX TABLE CONCURRENTLY concur_reindex_tab; +ERROR: REINDEX CONCURRENTLY cannot run inside a transaction block +COMMIT; +REINDEX TABLE CONCURRENTLY pg_class; -- no catalog relation +ERROR: cannot reindex system catalogs concurrently +REINDEX INDEX CONCURRENTLY pg_class_oid_index; -- no catalog index +ERROR: cannot reindex system catalogs concurrently +-- These are the toast table and index of pg_authid. +REINDEX TABLE CONCURRENTLY pg_toast.pg_toast_1260; -- no catalog toast table +ERROR: cannot reindex system catalogs concurrently +REINDEX INDEX CONCURRENTLY pg_toast.pg_toast_1260_index; -- no catalog toast index +ERROR: cannot reindex system catalogs concurrently +REINDEX SYSTEM CONCURRENTLY postgres; -- not allowed for SYSTEM +ERROR: cannot reindex system catalogs concurrently +REINDEX (CONCURRENTLY) SYSTEM postgres; -- ditto +ERROR: cannot reindex system catalogs concurrently +REINDEX (CONCURRENTLY) SYSTEM; -- ditto +ERROR: cannot reindex system catalogs concurrently +-- Warns about catalog relations +REINDEX SCHEMA CONCURRENTLY pg_catalog; +WARNING: cannot reindex system catalogs concurrently, skipping all +-- Not the current database +REINDEX DATABASE not_current_database; +ERROR: can only reindex the currently open database +-- Check the relation status, there should not be invalid indexes +\d concur_reindex_tab + Table "public.concur_reindex_tab" + Column | Type | Collation | Nullable | Default +--------+---------+-----------+----------+--------- + c1 | integer | | not null | + c2 | text | | | +Indexes: + "concur_reindex_ind1" PRIMARY KEY, btree (c1) + "concur_reindex_ind2" btree (c2) + "concur_reindex_ind3" UNIQUE, btree (abs(c1)) + "concur_reindex_ind4" btree (c1, c1, c2) +Referenced by: + TABLE "concur_reindex_tab2" CONSTRAINT "concur_reindex_tab2_c1_fkey" FOREIGN KEY (c1) REFERENCES concur_reindex_tab(c1) + +DROP MATERIALIZED VIEW concur_reindex_matview; +DROP TABLE concur_reindex_tab, concur_reindex_tab2, concur_reindex_tab3; +-- Check handling of invalid indexes +CREATE TABLE concur_reindex_tab4 (c1 int); +INSERT INTO concur_reindex_tab4 VALUES (1), (1), (2); +-- This trick creates an invalid index. +CREATE UNIQUE INDEX CONCURRENTLY concur_reindex_ind5 ON concur_reindex_tab4 (c1); +ERROR: could not create unique index "concur_reindex_ind5" +DETAIL: Key (c1)=(1) is duplicated. +-- Reindexing concurrently this index fails with the same failure. +-- The extra index created is itself invalid, and can be dropped. +REINDEX INDEX CONCURRENTLY concur_reindex_ind5; +ERROR: could not create unique index "concur_reindex_ind5_ccnew" +DETAIL: Key (c1)=(1) is duplicated. +\d concur_reindex_tab4 + Table "public.concur_reindex_tab4" + Column | Type | Collation | Nullable | Default +--------+---------+-----------+----------+--------- + c1 | integer | | | +Indexes: + "concur_reindex_ind5" UNIQUE, btree (c1) INVALID + "concur_reindex_ind5_ccnew" UNIQUE, btree (c1) INVALID + +DROP INDEX concur_reindex_ind5_ccnew; +-- This makes the previous failure go away, so the index can become valid. +DELETE FROM concur_reindex_tab4 WHERE c1 = 1; +-- The invalid index is not processed when running REINDEX TABLE. +REINDEX TABLE CONCURRENTLY concur_reindex_tab4; +WARNING: cannot reindex invalid index "public.concur_reindex_ind5" concurrently, skipping +NOTICE: table "concur_reindex_tab4" has no indexes that can be reindexed concurrently +\d concur_reindex_tab4 + Table "public.concur_reindex_tab4" + Column | Type | Collation | Nullable | Default +--------+---------+-----------+----------+--------- + c1 | integer | | | +Indexes: + "concur_reindex_ind5" UNIQUE, btree (c1) INVALID + +-- But it is fixed with REINDEX INDEX. +REINDEX INDEX CONCURRENTLY concur_reindex_ind5; +\d concur_reindex_tab4 + Table "public.concur_reindex_tab4" + Column | Type | Collation | Nullable | Default +--------+---------+-----------+----------+--------- + c1 | integer | | | +Indexes: + "concur_reindex_ind5" UNIQUE, btree (c1) + +DROP TABLE concur_reindex_tab4; +-- Check handling of indexes with expressions and predicates. The +-- definitions of the rebuilt indexes should match the original +-- definitions. +CREATE TABLE concur_exprs_tab (c1 int , c2 boolean); +INSERT INTO concur_exprs_tab (c1, c2) VALUES (1369652450, FALSE), + (414515746, TRUE), + (897778963, FALSE); +CREATE UNIQUE INDEX concur_exprs_index_expr + ON concur_exprs_tab ((c1::text COLLATE "C")); +CREATE UNIQUE INDEX concur_exprs_index_pred ON concur_exprs_tab (c1) + WHERE (c1::text > 500000000::text COLLATE "C"); +CREATE UNIQUE INDEX concur_exprs_index_pred_2 + ON concur_exprs_tab ((1 / c1)) + WHERE ('-H') >= (c2::TEXT) COLLATE "C"; +ALTER INDEX concur_exprs_index_expr ALTER COLUMN 1 SET STATISTICS 100; +ANALYZE concur_exprs_tab; +SELECT starelid::regclass, count(*) FROM pg_statistic WHERE starelid IN ( + 'concur_exprs_index_expr'::regclass, + 'concur_exprs_index_pred'::regclass, + 'concur_exprs_index_pred_2'::regclass) + GROUP BY starelid ORDER BY starelid::regclass::text; + starelid | count +-------------------------+------- + concur_exprs_index_expr | 1 +(1 row) + +SELECT pg_get_indexdef('concur_exprs_index_expr'::regclass); + pg_get_indexdef +--------------------------------------------------------------------------------------------------------------- + CREATE UNIQUE INDEX concur_exprs_index_expr ON public.concur_exprs_tab USING btree (((c1)::text) COLLATE "C") +(1 row) + +SELECT pg_get_indexdef('concur_exprs_index_pred'::regclass); + pg_get_indexdef +---------------------------------------------------------------------------------------------------------------------------------------------- + CREATE UNIQUE INDEX concur_exprs_index_pred ON public.concur_exprs_tab USING btree (c1) WHERE ((c1)::text > ((500000000)::text COLLATE "C")) +(1 row) + +SELECT pg_get_indexdef('concur_exprs_index_pred_2'::regclass); + pg_get_indexdef +-------------------------------------------------------------------------------------------------------------------------------------------------- + CREATE UNIQUE INDEX concur_exprs_index_pred_2 ON public.concur_exprs_tab USING btree (((1 / c1))) WHERE ('-H'::text >= ((c2)::text COLLATE "C")) +(1 row) + +REINDEX TABLE CONCURRENTLY concur_exprs_tab; +SELECT pg_get_indexdef('concur_exprs_index_expr'::regclass); + pg_get_indexdef +--------------------------------------------------------------------------------------------------------------- + CREATE UNIQUE INDEX concur_exprs_index_expr ON public.concur_exprs_tab USING btree (((c1)::text) COLLATE "C") +(1 row) + +SELECT pg_get_indexdef('concur_exprs_index_pred'::regclass); + pg_get_indexdef +---------------------------------------------------------------------------------------------------------------------------------------------- + CREATE UNIQUE INDEX concur_exprs_index_pred ON public.concur_exprs_tab USING btree (c1) WHERE ((c1)::text > ((500000000)::text COLLATE "C")) +(1 row) + +SELECT pg_get_indexdef('concur_exprs_index_pred_2'::regclass); + pg_get_indexdef +-------------------------------------------------------------------------------------------------------------------------------------------------- + CREATE UNIQUE INDEX concur_exprs_index_pred_2 ON public.concur_exprs_tab USING btree (((1 / c1))) WHERE ('-H'::text >= ((c2)::text COLLATE "C")) +(1 row) + +-- ALTER TABLE recreates the indexes, which should keep their collations. +ALTER TABLE concur_exprs_tab ALTER c2 TYPE TEXT; +SELECT pg_get_indexdef('concur_exprs_index_expr'::regclass); + pg_get_indexdef +--------------------------------------------------------------------------------------------------------------- + CREATE UNIQUE INDEX concur_exprs_index_expr ON public.concur_exprs_tab USING btree (((c1)::text) COLLATE "C") +(1 row) + +SELECT pg_get_indexdef('concur_exprs_index_pred'::regclass); + pg_get_indexdef +---------------------------------------------------------------------------------------------------------------------------------------------- + CREATE UNIQUE INDEX concur_exprs_index_pred ON public.concur_exprs_tab USING btree (c1) WHERE ((c1)::text > ((500000000)::text COLLATE "C")) +(1 row) + +SELECT pg_get_indexdef('concur_exprs_index_pred_2'::regclass); + pg_get_indexdef +------------------------------------------------------------------------------------------------------------------------------------------ + CREATE UNIQUE INDEX concur_exprs_index_pred_2 ON public.concur_exprs_tab USING btree (((1 / c1))) WHERE ('-H'::text >= (c2 COLLATE "C")) +(1 row) + +-- Statistics should remain intact. +SELECT starelid::regclass, count(*) FROM pg_statistic WHERE starelid IN ( + 'concur_exprs_index_expr'::regclass, + 'concur_exprs_index_pred'::regclass, + 'concur_exprs_index_pred_2'::regclass) + GROUP BY starelid ORDER BY starelid::regclass::text; + starelid | count +-------------------------+------- + concur_exprs_index_expr | 1 +(1 row) + +-- attstattarget should remain intact +SELECT attrelid::regclass, attnum, attstattarget + FROM pg_attribute WHERE attrelid IN ( + 'concur_exprs_index_expr'::regclass, + 'concur_exprs_index_pred'::regclass, + 'concur_exprs_index_pred_2'::regclass) + ORDER BY attrelid::regclass::text, attnum; + attrelid | attnum | attstattarget +---------------------------+--------+--------------- + concur_exprs_index_expr | 1 | 100 + concur_exprs_index_pred | 1 | -1 + concur_exprs_index_pred_2 | 1 | -1 +(3 rows) + +DROP TABLE concur_exprs_tab; +-- Temporary tables and on-commit actions, where CONCURRENTLY is ignored. +-- ON COMMIT PRESERVE ROWS, the default. +CREATE TEMP TABLE concur_temp_tab_1 (c1 int, c2 text) + ON COMMIT PRESERVE ROWS; +INSERT INTO concur_temp_tab_1 VALUES (1, 'foo'), (2, 'bar'); +CREATE INDEX concur_temp_ind_1 ON concur_temp_tab_1(c2); +REINDEX TABLE CONCURRENTLY concur_temp_tab_1; +REINDEX INDEX CONCURRENTLY concur_temp_ind_1; +-- Still fails in transaction blocks +BEGIN; +REINDEX INDEX CONCURRENTLY concur_temp_ind_1; +ERROR: REINDEX CONCURRENTLY cannot run inside a transaction block +COMMIT; +-- ON COMMIT DELETE ROWS +CREATE TEMP TABLE concur_temp_tab_2 (c1 int, c2 text) + ON COMMIT DELETE ROWS; +CREATE INDEX concur_temp_ind_2 ON concur_temp_tab_2(c2); +REINDEX TABLE CONCURRENTLY concur_temp_tab_2; +REINDEX INDEX CONCURRENTLY concur_temp_ind_2; +-- ON COMMIT DROP +BEGIN; +CREATE TEMP TABLE concur_temp_tab_3 (c1 int, c2 text) + ON COMMIT PRESERVE ROWS; +INSERT INTO concur_temp_tab_3 VALUES (1, 'foo'), (2, 'bar'); +CREATE INDEX concur_temp_ind_3 ON concur_temp_tab_3(c2); +-- Fails when running in a transaction +REINDEX INDEX CONCURRENTLY concur_temp_ind_3; +ERROR: REINDEX CONCURRENTLY cannot run inside a transaction block +COMMIT; +-- REINDEX SCHEMA processes all temporary relations +CREATE TABLE reindex_temp_before AS +SELECT oid, relname, relfilenode, relkind, reltoastrelid + FROM pg_class + WHERE relname IN ('concur_temp_ind_1', 'concur_temp_ind_2'); +SELECT pg_my_temp_schema()::regnamespace as temp_schema_name \gset +REINDEX SCHEMA CONCURRENTLY :temp_schema_name; +SELECT b.relname, + b.relkind, + CASE WHEN a.relfilenode = b.relfilenode THEN 'relfilenode is unchanged' + ELSE 'relfilenode has changed' END + FROM reindex_temp_before b JOIN pg_class a ON b.oid = a.oid + ORDER BY 1; + relname | relkind | case +-------------------+---------+------------------------- + concur_temp_ind_1 | i | relfilenode has changed + concur_temp_ind_2 | i | relfilenode has changed +(2 rows) + +DROP TABLE concur_temp_tab_1, concur_temp_tab_2, reindex_temp_before; +-- +-- REINDEX SCHEMA +-- +REINDEX SCHEMA schema_to_reindex; -- failure, schema does not exist +ERROR: schema "schema_to_reindex" does not exist +CREATE SCHEMA schema_to_reindex; +SET search_path = 'schema_to_reindex'; +CREATE TABLE table1(col1 SERIAL PRIMARY KEY); +INSERT INTO table1 SELECT generate_series(1,400); +CREATE TABLE table2(col1 SERIAL PRIMARY KEY, col2 TEXT NOT NULL); +INSERT INTO table2 SELECT generate_series(1,400), 'abc'; +CREATE INDEX ON table2(col2); +CREATE MATERIALIZED VIEW matview AS SELECT col1 FROM table2; +CREATE INDEX ON matview(col1); +CREATE VIEW view AS SELECT col2 FROM table2; +CREATE TABLE reindex_before AS +SELECT oid, relname, relfilenode, relkind, reltoastrelid + FROM pg_class + where relnamespace = (SELECT oid FROM pg_namespace WHERE nspname = 'schema_to_reindex'); +INSERT INTO reindex_before +SELECT oid, 'pg_toast_TABLE', relfilenode, relkind, reltoastrelid +FROM pg_class WHERE oid IN + (SELECT reltoastrelid FROM reindex_before WHERE reltoastrelid > 0); +INSERT INTO reindex_before +SELECT oid, 'pg_toast_TABLE_index', relfilenode, relkind, reltoastrelid +FROM pg_class where oid in + (select indexrelid from pg_index where indrelid in + (select reltoastrelid from reindex_before where reltoastrelid > 0)); +REINDEX SCHEMA schema_to_reindex; +CREATE TABLE reindex_after AS SELECT oid, relname, relfilenode, relkind + FROM pg_class + where relnamespace = (SELECT oid FROM pg_namespace WHERE nspname = 'schema_to_reindex'); +SELECT b.relname, + b.relkind, + CASE WHEN a.relfilenode = b.relfilenode THEN 'relfilenode is unchanged' + ELSE 'relfilenode has changed' END + FROM reindex_before b JOIN pg_class a ON b.oid = a.oid + ORDER BY 1; + relname | relkind | case +----------------------+---------+-------------------------- + matview | m | relfilenode is unchanged + matview_col1_idx | i | relfilenode has changed + pg_toast_TABLE | t | relfilenode is unchanged + pg_toast_TABLE_index | i | relfilenode has changed + table1 | r | relfilenode is unchanged + table1_col1_seq | S | relfilenode is unchanged + table1_pkey | i | relfilenode has changed + table2 | r | relfilenode is unchanged + table2_col1_seq | S | relfilenode is unchanged + table2_col2_idx | i | relfilenode has changed + table2_pkey | i | relfilenode has changed + view | v | relfilenode is unchanged +(12 rows) + +REINDEX SCHEMA schema_to_reindex; +BEGIN; +REINDEX SCHEMA schema_to_reindex; -- failure, cannot run in a transaction +ERROR: REINDEX SCHEMA cannot run inside a transaction block +END; +-- concurrently +REINDEX SCHEMA CONCURRENTLY schema_to_reindex; +-- Failure for unauthorized user +CREATE ROLE regress_reindexuser NOLOGIN; +SET SESSION ROLE regress_reindexuser; +REINDEX SCHEMA schema_to_reindex; +ERROR: must be owner of schema schema_to_reindex +-- Permission failures with toast tables and indexes (pg_authid here) +RESET ROLE; +GRANT USAGE ON SCHEMA pg_toast TO regress_reindexuser; +SET SESSION ROLE regress_reindexuser; +REINDEX TABLE pg_toast.pg_toast_1260; +ERROR: must be owner of table pg_toast_1260 +REINDEX INDEX pg_toast.pg_toast_1260_index; +ERROR: must be owner of index pg_toast_1260_index +-- Clean up +RESET ROLE; +REVOKE USAGE ON SCHEMA pg_toast FROM regress_reindexuser; +DROP ROLE regress_reindexuser; +DROP SCHEMA schema_to_reindex CASCADE; +NOTICE: drop cascades to 6 other objects +DETAIL: drop cascades to table table1 +drop cascades to table table2 +drop cascades to materialized view matview +drop cascades to view view +drop cascades to table reindex_before +drop cascades to table reindex_after diff --git a/src/test/regress/expected/create_index_spgist.out b/src/test/regress/expected/create_index_spgist.out new file mode 100644 index 0000000..5c04df9 --- /dev/null +++ b/src/test/regress/expected/create_index_spgist.out @@ -0,0 +1,1371 @@ +-- +-- SP-GiST index tests +-- +CREATE TABLE quad_point_tbl AS + SELECT point(unique1,unique2) AS p FROM tenk1; +INSERT INTO quad_point_tbl + SELECT '(333.0,400.0)'::point FROM generate_series(1,1000); +INSERT INTO quad_point_tbl VALUES (NULL), (NULL), (NULL); +CREATE INDEX sp_quad_ind ON quad_point_tbl USING spgist (p); +CREATE TABLE kd_point_tbl AS SELECT * FROM quad_point_tbl; +CREATE INDEX sp_kd_ind ON kd_point_tbl USING spgist (p kd_point_ops); +CREATE TABLE radix_text_tbl AS + SELECT name AS t FROM road WHERE name !~ '^[0-9]'; +INSERT INTO radix_text_tbl + SELECT 'P0123456789abcdef' FROM generate_series(1,1000); +INSERT INTO radix_text_tbl VALUES ('P0123456789abcde'); +INSERT INTO radix_text_tbl VALUES ('P0123456789abcdefF'); +CREATE INDEX sp_radix_ind ON radix_text_tbl USING spgist (t); +-- get non-indexed results for comparison purposes +SET enable_seqscan = ON; +SET enable_indexscan = OFF; +SET enable_bitmapscan = OFF; +SELECT count(*) FROM quad_point_tbl WHERE p IS NULL; + count +------- + 3 +(1 row) + +SELECT count(*) FROM quad_point_tbl WHERE p IS NOT NULL; + count +------- + 11000 +(1 row) + +SELECT count(*) FROM quad_point_tbl; + count +------- + 11003 +(1 row) + +SELECT count(*) FROM quad_point_tbl WHERE p <@ box '(200,200,1000,1000)'; + count +------- + 1057 +(1 row) + +SELECT count(*) FROM quad_point_tbl WHERE box '(200,200,1000,1000)' @> p; + count +------- + 1057 +(1 row) + +SELECT count(*) FROM quad_point_tbl WHERE p << '(5000, 4000)'; + count +------- + 6000 +(1 row) + +SELECT count(*) FROM quad_point_tbl WHERE p >> '(5000, 4000)'; + count +------- + 4999 +(1 row) + +SELECT count(*) FROM quad_point_tbl WHERE p <<| '(5000, 4000)'; + count +------- + 5000 +(1 row) + +SELECT count(*) FROM quad_point_tbl WHERE p |>> '(5000, 4000)'; + count +------- + 5999 +(1 row) + +SELECT count(*) FROM quad_point_tbl WHERE p ~= '(4585, 365)'; + count +------- + 1 +(1 row) + +CREATE TEMP TABLE quad_point_tbl_ord_seq1 AS +SELECT row_number() OVER (ORDER BY p <-> '0,0') n, p <-> '0,0' dist, p +FROM quad_point_tbl; +CREATE TEMP TABLE quad_point_tbl_ord_seq2 AS +SELECT row_number() OVER (ORDER BY p <-> '0,0') n, p <-> '0,0' dist, p +FROM quad_point_tbl WHERE p <@ box '(200,200,1000,1000)'; +CREATE TEMP TABLE quad_point_tbl_ord_seq3 AS +SELECT row_number() OVER (ORDER BY p <-> '333,400') n, p <-> '333,400' dist, p +FROM quad_point_tbl WHERE p IS NOT NULL; +SELECT count(*) FROM radix_text_tbl WHERE t = 'P0123456789abcdef'; + count +------- + 1000 +(1 row) + +SELECT count(*) FROM radix_text_tbl WHERE t = 'P0123456789abcde'; + count +------- + 1 +(1 row) + +SELECT count(*) FROM radix_text_tbl WHERE t = 'P0123456789abcdefF'; + count +------- + 1 +(1 row) + +SELECT count(*) FROM radix_text_tbl WHERE t < 'Aztec Ct '; + count +------- + 272 +(1 row) + +SELECT count(*) FROM radix_text_tbl WHERE t ~<~ 'Aztec Ct '; + count +------- + 272 +(1 row) + +SELECT count(*) FROM radix_text_tbl WHERE t <= 'Aztec Ct '; + count +------- + 273 +(1 row) + +SELECT count(*) FROM radix_text_tbl WHERE t ~<=~ 'Aztec Ct '; + count +------- + 273 +(1 row) + +SELECT count(*) FROM radix_text_tbl WHERE t = 'Aztec Ct '; + count +------- + 1 +(1 row) + +SELECT count(*) FROM radix_text_tbl WHERE t = 'Worth St '; + count +------- + 2 +(1 row) + +SELECT count(*) FROM radix_text_tbl WHERE t >= 'Worth St '; + count +------- + 50 +(1 row) + +SELECT count(*) FROM radix_text_tbl WHERE t ~>=~ 'Worth St '; + count +------- + 50 +(1 row) + +SELECT count(*) FROM radix_text_tbl WHERE t > 'Worth St '; + count +------- + 48 +(1 row) + +SELECT count(*) FROM radix_text_tbl WHERE t ~>~ 'Worth St '; + count +------- + 48 +(1 row) + +SELECT count(*) FROM radix_text_tbl WHERE t ^@ 'Worth'; + count +------- + 2 +(1 row) + +-- Now check the results from plain indexscan +SET enable_seqscan = OFF; +SET enable_indexscan = ON; +SET enable_bitmapscan = OFF; +EXPLAIN (COSTS OFF) +SELECT count(*) FROM quad_point_tbl WHERE p IS NULL; + QUERY PLAN +----------------------------------------------------------- + Aggregate + -> Index Only Scan using sp_quad_ind on quad_point_tbl + Index Cond: (p IS NULL) +(3 rows) + +SELECT count(*) FROM quad_point_tbl WHERE p IS NULL; + count +------- + 3 +(1 row) + +EXPLAIN (COSTS OFF) +SELECT count(*) FROM quad_point_tbl WHERE p IS NOT NULL; + QUERY PLAN +----------------------------------------------------------- + Aggregate + -> Index Only Scan using sp_quad_ind on quad_point_tbl + Index Cond: (p IS NOT NULL) +(3 rows) + +SELECT count(*) FROM quad_point_tbl WHERE p IS NOT NULL; + count +------- + 11000 +(1 row) + +EXPLAIN (COSTS OFF) +SELECT count(*) FROM quad_point_tbl; + QUERY PLAN +----------------------------------------------------------- + Aggregate + -> Index Only Scan using sp_quad_ind on quad_point_tbl +(2 rows) + +SELECT count(*) FROM quad_point_tbl; + count +------- + 11003 +(1 row) + +EXPLAIN (COSTS OFF) +SELECT count(*) FROM quad_point_tbl WHERE p <@ box '(200,200,1000,1000)'; + QUERY PLAN +----------------------------------------------------------- + Aggregate + -> Index Only Scan using sp_quad_ind on quad_point_tbl + Index Cond: (p <@ '(1000,1000),(200,200)'::box) +(3 rows) + +SELECT count(*) FROM quad_point_tbl WHERE p <@ box '(200,200,1000,1000)'; + count +------- + 1057 +(1 row) + +EXPLAIN (COSTS OFF) +SELECT count(*) FROM quad_point_tbl WHERE box '(200,200,1000,1000)' @> p; + QUERY PLAN +----------------------------------------------------------- + Aggregate + -> Index Only Scan using sp_quad_ind on quad_point_tbl + Index Cond: (p <@ '(1000,1000),(200,200)'::box) +(3 rows) + +SELECT count(*) FROM quad_point_tbl WHERE box '(200,200,1000,1000)' @> p; + count +------- + 1057 +(1 row) + +EXPLAIN (COSTS OFF) +SELECT count(*) FROM quad_point_tbl WHERE p << '(5000, 4000)'; + QUERY PLAN +----------------------------------------------------------- + Aggregate + -> Index Only Scan using sp_quad_ind on quad_point_tbl + Index Cond: (p << '(5000,4000)'::point) +(3 rows) + +SELECT count(*) FROM quad_point_tbl WHERE p << '(5000, 4000)'; + count +------- + 6000 +(1 row) + +EXPLAIN (COSTS OFF) +SELECT count(*) FROM quad_point_tbl WHERE p >> '(5000, 4000)'; + QUERY PLAN +----------------------------------------------------------- + Aggregate + -> Index Only Scan using sp_quad_ind on quad_point_tbl + Index Cond: (p >> '(5000,4000)'::point) +(3 rows) + +SELECT count(*) FROM quad_point_tbl WHERE p >> '(5000, 4000)'; + count +------- + 4999 +(1 row) + +EXPLAIN (COSTS OFF) +SELECT count(*) FROM quad_point_tbl WHERE p <<| '(5000, 4000)'; + QUERY PLAN +----------------------------------------------------------- + Aggregate + -> Index Only Scan using sp_quad_ind on quad_point_tbl + Index Cond: (p <<| '(5000,4000)'::point) +(3 rows) + +SELECT count(*) FROM quad_point_tbl WHERE p <<| '(5000, 4000)'; + count +------- + 5000 +(1 row) + +EXPLAIN (COSTS OFF) +SELECT count(*) FROM quad_point_tbl WHERE p |>> '(5000, 4000)'; + QUERY PLAN +----------------------------------------------------------- + Aggregate + -> Index Only Scan using sp_quad_ind on quad_point_tbl + Index Cond: (p |>> '(5000,4000)'::point) +(3 rows) + +SELECT count(*) FROM quad_point_tbl WHERE p |>> '(5000, 4000)'; + count +------- + 5999 +(1 row) + +EXPLAIN (COSTS OFF) +SELECT count(*) FROM quad_point_tbl WHERE p ~= '(4585, 365)'; + QUERY PLAN +----------------------------------------------------------- + Aggregate + -> Index Only Scan using sp_quad_ind on quad_point_tbl + Index Cond: (p ~= '(4585,365)'::point) +(3 rows) + +SELECT count(*) FROM quad_point_tbl WHERE p ~= '(4585, 365)'; + count +------- + 1 +(1 row) + +EXPLAIN (COSTS OFF) +SELECT row_number() OVER (ORDER BY p <-> '0,0') n, p <-> '0,0' dist, p +FROM quad_point_tbl; + QUERY PLAN +----------------------------------------------------------- + WindowAgg + -> Index Only Scan using sp_quad_ind on quad_point_tbl + Order By: (p <-> '(0,0)'::point) +(3 rows) + +CREATE TEMP TABLE quad_point_tbl_ord_idx1 AS +SELECT row_number() OVER (ORDER BY p <-> '0,0') n, p <-> '0,0' dist, p +FROM quad_point_tbl; +SELECT * FROM quad_point_tbl_ord_seq1 seq FULL JOIN quad_point_tbl_ord_idx1 idx +ON seq.n = idx.n +WHERE seq.dist IS DISTINCT FROM idx.dist; + n | dist | p | n | dist | p +---+------+---+---+------+--- +(0 rows) + +EXPLAIN (COSTS OFF) +SELECT row_number() OVER (ORDER BY p <-> '0,0') n, p <-> '0,0' dist, p +FROM quad_point_tbl WHERE p <@ box '(200,200,1000,1000)'; + QUERY PLAN +----------------------------------------------------------- + WindowAgg + -> Index Only Scan using sp_quad_ind on quad_point_tbl + Index Cond: (p <@ '(1000,1000),(200,200)'::box) + Order By: (p <-> '(0,0)'::point) +(4 rows) + +CREATE TEMP TABLE quad_point_tbl_ord_idx2 AS +SELECT row_number() OVER (ORDER BY p <-> '0,0') n, p <-> '0,0' dist, p +FROM quad_point_tbl WHERE p <@ box '(200,200,1000,1000)'; +SELECT * FROM quad_point_tbl_ord_seq2 seq FULL JOIN quad_point_tbl_ord_idx2 idx +ON seq.n = idx.n +WHERE seq.dist IS DISTINCT FROM idx.dist; + n | dist | p | n | dist | p +---+------+---+---+------+--- +(0 rows) + +EXPLAIN (COSTS OFF) +SELECT row_number() OVER (ORDER BY p <-> '333,400') n, p <-> '333,400' dist, p +FROM quad_point_tbl WHERE p IS NOT NULL; + QUERY PLAN +----------------------------------------------------------- + WindowAgg + -> Index Only Scan using sp_quad_ind on quad_point_tbl + Index Cond: (p IS NOT NULL) + Order By: (p <-> '(333,400)'::point) +(4 rows) + +CREATE TEMP TABLE quad_point_tbl_ord_idx3 AS +SELECT row_number() OVER (ORDER BY p <-> '333,400') n, p <-> '333,400' dist, p +FROM quad_point_tbl WHERE p IS NOT NULL; +SELECT * FROM quad_point_tbl_ord_seq3 seq FULL JOIN quad_point_tbl_ord_idx3 idx +ON seq.n = idx.n +WHERE seq.dist IS DISTINCT FROM idx.dist; + n | dist | p | n | dist | p +---+------+---+---+------+--- +(0 rows) + +EXPLAIN (COSTS OFF) +SELECT count(*) FROM kd_point_tbl WHERE p <@ box '(200,200,1000,1000)'; + QUERY PLAN +--------------------------------------------------------- + Aggregate + -> Index Only Scan using sp_kd_ind on kd_point_tbl + Index Cond: (p <@ '(1000,1000),(200,200)'::box) +(3 rows) + +SELECT count(*) FROM kd_point_tbl WHERE p <@ box '(200,200,1000,1000)'; + count +------- + 1057 +(1 row) + +EXPLAIN (COSTS OFF) +SELECT count(*) FROM kd_point_tbl WHERE box '(200,200,1000,1000)' @> p; + QUERY PLAN +--------------------------------------------------------- + Aggregate + -> Index Only Scan using sp_kd_ind on kd_point_tbl + Index Cond: (p <@ '(1000,1000),(200,200)'::box) +(3 rows) + +SELECT count(*) FROM kd_point_tbl WHERE box '(200,200,1000,1000)' @> p; + count +------- + 1057 +(1 row) + +EXPLAIN (COSTS OFF) +SELECT count(*) FROM kd_point_tbl WHERE p << '(5000, 4000)'; + QUERY PLAN +------------------------------------------------------- + Aggregate + -> Index Only Scan using sp_kd_ind on kd_point_tbl + Index Cond: (p << '(5000,4000)'::point) +(3 rows) + +SELECT count(*) FROM kd_point_tbl WHERE p << '(5000, 4000)'; + count +------- + 6000 +(1 row) + +EXPLAIN (COSTS OFF) +SELECT count(*) FROM kd_point_tbl WHERE p >> '(5000, 4000)'; + QUERY PLAN +------------------------------------------------------- + Aggregate + -> Index Only Scan using sp_kd_ind on kd_point_tbl + Index Cond: (p >> '(5000,4000)'::point) +(3 rows) + +SELECT count(*) FROM kd_point_tbl WHERE p >> '(5000, 4000)'; + count +------- + 4999 +(1 row) + +EXPLAIN (COSTS OFF) +SELECT count(*) FROM kd_point_tbl WHERE p <<| '(5000, 4000)'; + QUERY PLAN +------------------------------------------------------- + Aggregate + -> Index Only Scan using sp_kd_ind on kd_point_tbl + Index Cond: (p <<| '(5000,4000)'::point) +(3 rows) + +SELECT count(*) FROM kd_point_tbl WHERE p <<| '(5000, 4000)'; + count +------- + 5000 +(1 row) + +EXPLAIN (COSTS OFF) +SELECT count(*) FROM kd_point_tbl WHERE p |>> '(5000, 4000)'; + QUERY PLAN +------------------------------------------------------- + Aggregate + -> Index Only Scan using sp_kd_ind on kd_point_tbl + Index Cond: (p |>> '(5000,4000)'::point) +(3 rows) + +SELECT count(*) FROM kd_point_tbl WHERE p |>> '(5000, 4000)'; + count +------- + 5999 +(1 row) + +EXPLAIN (COSTS OFF) +SELECT count(*) FROM kd_point_tbl WHERE p ~= '(4585, 365)'; + QUERY PLAN +------------------------------------------------------- + Aggregate + -> Index Only Scan using sp_kd_ind on kd_point_tbl + Index Cond: (p ~= '(4585,365)'::point) +(3 rows) + +SELECT count(*) FROM kd_point_tbl WHERE p ~= '(4585, 365)'; + count +------- + 1 +(1 row) + +EXPLAIN (COSTS OFF) +SELECT row_number() OVER (ORDER BY p <-> '0,0') n, p <-> '0,0' dist, p +FROM kd_point_tbl; + QUERY PLAN +------------------------------------------------------- + WindowAgg + -> Index Only Scan using sp_kd_ind on kd_point_tbl + Order By: (p <-> '(0,0)'::point) +(3 rows) + +CREATE TEMP TABLE kd_point_tbl_ord_idx1 AS +SELECT row_number() OVER (ORDER BY p <-> '0,0') n, p <-> '0,0' dist, p +FROM kd_point_tbl; +SELECT * FROM quad_point_tbl_ord_seq1 seq FULL JOIN kd_point_tbl_ord_idx1 idx +ON seq.n = idx.n +WHERE seq.dist IS DISTINCT FROM idx.dist; + n | dist | p | n | dist | p +---+------+---+---+------+--- +(0 rows) + +EXPLAIN (COSTS OFF) +SELECT row_number() OVER (ORDER BY p <-> '0,0') n, p <-> '0,0' dist, p +FROM kd_point_tbl WHERE p <@ box '(200,200,1000,1000)'; + QUERY PLAN +--------------------------------------------------------- + WindowAgg + -> Index Only Scan using sp_kd_ind on kd_point_tbl + Index Cond: (p <@ '(1000,1000),(200,200)'::box) + Order By: (p <-> '(0,0)'::point) +(4 rows) + +CREATE TEMP TABLE kd_point_tbl_ord_idx2 AS +SELECT row_number() OVER (ORDER BY p <-> '0,0') n, p <-> '0,0' dist, p +FROM kd_point_tbl WHERE p <@ box '(200,200,1000,1000)'; +SELECT * FROM quad_point_tbl_ord_seq2 seq FULL JOIN kd_point_tbl_ord_idx2 idx +ON seq.n = idx.n +WHERE seq.dist IS DISTINCT FROM idx.dist; + n | dist | p | n | dist | p +---+------+---+---+------+--- +(0 rows) + +EXPLAIN (COSTS OFF) +SELECT row_number() OVER (ORDER BY p <-> '333,400') n, p <-> '333,400' dist, p +FROM kd_point_tbl WHERE p IS NOT NULL; + QUERY PLAN +------------------------------------------------------- + WindowAgg + -> Index Only Scan using sp_kd_ind on kd_point_tbl + Index Cond: (p IS NOT NULL) + Order By: (p <-> '(333,400)'::point) +(4 rows) + +CREATE TEMP TABLE kd_point_tbl_ord_idx3 AS +SELECT row_number() OVER (ORDER BY p <-> '333,400') n, p <-> '333,400' dist, p +FROM kd_point_tbl WHERE p IS NOT NULL; +SELECT * FROM quad_point_tbl_ord_seq3 seq FULL JOIN kd_point_tbl_ord_idx3 idx +ON seq.n = idx.n +WHERE seq.dist IS DISTINCT FROM idx.dist; + n | dist | p | n | dist | p +---+------+---+---+------+--- +(0 rows) + +-- test KNN scan with included columns +-- the distance numbers are not exactly the same across platforms +SET extra_float_digits = 0; +CREATE INDEX ON quad_point_tbl_ord_seq1 USING spgist(p) INCLUDE(dist); +EXPLAIN (COSTS OFF) +SELECT p, dist FROM quad_point_tbl_ord_seq1 ORDER BY p <-> '0,0' LIMIT 10; + QUERY PLAN +------------------------------------------------------------------------------------------- + Limit + -> Index Only Scan using quad_point_tbl_ord_seq1_p_dist_idx on quad_point_tbl_ord_seq1 + Order By: (p <-> '(0,0)'::point) +(3 rows) + +SELECT p, dist FROM quad_point_tbl_ord_seq1 ORDER BY p <-> '0,0' LIMIT 10; + p | dist +-----------+------------------ + (59,21) | 62.6258732474047 + (88,104) | 136.235090927411 + (39,143) | 148.222805262888 + (139,160) | 211.945747775227 + (209,38) | 212.42645786248 + (157,156) | 221.325552072055 + (175,150) | 230.488611432322 + (236,34) | 238.436574375661 + (263,28) | 264.486294540946 + (322,53) | 326.33265236565 +(10 rows) + +RESET extra_float_digits; +-- check ORDER BY distance to NULL +SELECT (SELECT p FROM kd_point_tbl ORDER BY p <-> pt, p <-> '0,0' LIMIT 1) +FROM (VALUES (point '1,2'), (NULL), ('1234,5678')) pts(pt); + p +------------- + (59,21) + (59,21) + (1239,5647) +(3 rows) + +EXPLAIN (COSTS OFF) +SELECT count(*) FROM radix_text_tbl WHERE t = 'P0123456789abcdef'; + QUERY PLAN +------------------------------------------------------------ + Aggregate + -> Index Only Scan using sp_radix_ind on radix_text_tbl + Index Cond: (t = 'P0123456789abcdef'::text) +(3 rows) + +SELECT count(*) FROM radix_text_tbl WHERE t = 'P0123456789abcdef'; + count +------- + 1000 +(1 row) + +EXPLAIN (COSTS OFF) +SELECT count(*) FROM radix_text_tbl WHERE t = 'P0123456789abcde'; + QUERY PLAN +------------------------------------------------------------ + Aggregate + -> Index Only Scan using sp_radix_ind on radix_text_tbl + Index Cond: (t = 'P0123456789abcde'::text) +(3 rows) + +SELECT count(*) FROM radix_text_tbl WHERE t = 'P0123456789abcde'; + count +------- + 1 +(1 row) + +EXPLAIN (COSTS OFF) +SELECT count(*) FROM radix_text_tbl WHERE t = 'P0123456789abcdefF'; + QUERY PLAN +------------------------------------------------------------ + Aggregate + -> Index Only Scan using sp_radix_ind on radix_text_tbl + Index Cond: (t = 'P0123456789abcdefF'::text) +(3 rows) + +SELECT count(*) FROM radix_text_tbl WHERE t = 'P0123456789abcdefF'; + count +------- + 1 +(1 row) + +EXPLAIN (COSTS OFF) +SELECT count(*) FROM radix_text_tbl WHERE t < 'Aztec Ct '; + QUERY PLAN +---------------------------------------------------------------------- + Aggregate + -> Index Only Scan using sp_radix_ind on radix_text_tbl + Index Cond: (t < 'Aztec Ct '::text) +(3 rows) + +SELECT count(*) FROM radix_text_tbl WHERE t < 'Aztec Ct '; + count +------- + 272 +(1 row) + +EXPLAIN (COSTS OFF) +SELECT count(*) FROM radix_text_tbl WHERE t ~<~ 'Aztec Ct '; + QUERY PLAN +------------------------------------------------------------------------ + Aggregate + -> Index Only Scan using sp_radix_ind on radix_text_tbl + Index Cond: (t ~<~ 'Aztec Ct '::text) +(3 rows) + +SELECT count(*) FROM radix_text_tbl WHERE t ~<~ 'Aztec Ct '; + count +------- + 272 +(1 row) + +EXPLAIN (COSTS OFF) +SELECT count(*) FROM radix_text_tbl WHERE t <= 'Aztec Ct '; + QUERY PLAN +----------------------------------------------------------------------- + Aggregate + -> Index Only Scan using sp_radix_ind on radix_text_tbl + Index Cond: (t <= 'Aztec Ct '::text) +(3 rows) + +SELECT count(*) FROM radix_text_tbl WHERE t <= 'Aztec Ct '; + count +------- + 273 +(1 row) + +EXPLAIN (COSTS OFF) +SELECT count(*) FROM radix_text_tbl WHERE t ~<=~ 'Aztec Ct '; + QUERY PLAN +------------------------------------------------------------------------- + Aggregate + -> Index Only Scan using sp_radix_ind on radix_text_tbl + Index Cond: (t ~<=~ 'Aztec Ct '::text) +(3 rows) + +SELECT count(*) FROM radix_text_tbl WHERE t ~<=~ 'Aztec Ct '; + count +------- + 273 +(1 row) + +EXPLAIN (COSTS OFF) +SELECT count(*) FROM radix_text_tbl WHERE t = 'Aztec Ct '; + QUERY PLAN +---------------------------------------------------------------------- + Aggregate + -> Index Only Scan using sp_radix_ind on radix_text_tbl + Index Cond: (t = 'Aztec Ct '::text) +(3 rows) + +SELECT count(*) FROM radix_text_tbl WHERE t = 'Aztec Ct '; + count +------- + 1 +(1 row) + +EXPLAIN (COSTS OFF) +SELECT count(*) FROM radix_text_tbl WHERE t = 'Worth St '; + QUERY PLAN +---------------------------------------------------------------------- + Aggregate + -> Index Only Scan using sp_radix_ind on radix_text_tbl + Index Cond: (t = 'Worth St '::text) +(3 rows) + +SELECT count(*) FROM radix_text_tbl WHERE t = 'Worth St '; + count +------- + 2 +(1 row) + +EXPLAIN (COSTS OFF) +SELECT count(*) FROM radix_text_tbl WHERE t >= 'Worth St '; + QUERY PLAN +----------------------------------------------------------------------- + Aggregate + -> Index Only Scan using sp_radix_ind on radix_text_tbl + Index Cond: (t >= 'Worth St '::text) +(3 rows) + +SELECT count(*) FROM radix_text_tbl WHERE t >= 'Worth St '; + count +------- + 50 +(1 row) + +EXPLAIN (COSTS OFF) +SELECT count(*) FROM radix_text_tbl WHERE t ~>=~ 'Worth St '; + QUERY PLAN +------------------------------------------------------------------------- + Aggregate + -> Index Only Scan using sp_radix_ind on radix_text_tbl + Index Cond: (t ~>=~ 'Worth St '::text) +(3 rows) + +SELECT count(*) FROM radix_text_tbl WHERE t ~>=~ 'Worth St '; + count +------- + 50 +(1 row) + +EXPLAIN (COSTS OFF) +SELECT count(*) FROM radix_text_tbl WHERE t > 'Worth St '; + QUERY PLAN +---------------------------------------------------------------------- + Aggregate + -> Index Only Scan using sp_radix_ind on radix_text_tbl + Index Cond: (t > 'Worth St '::text) +(3 rows) + +SELECT count(*) FROM radix_text_tbl WHERE t > 'Worth St '; + count +------- + 48 +(1 row) + +EXPLAIN (COSTS OFF) +SELECT count(*) FROM radix_text_tbl WHERE t ~>~ 'Worth St '; + QUERY PLAN +------------------------------------------------------------------------ + Aggregate + -> Index Only Scan using sp_radix_ind on radix_text_tbl + Index Cond: (t ~>~ 'Worth St '::text) +(3 rows) + +SELECT count(*) FROM radix_text_tbl WHERE t ~>~ 'Worth St '; + count +------- + 48 +(1 row) + +EXPLAIN (COSTS OFF) +SELECT count(*) FROM radix_text_tbl WHERE t ^@ 'Worth'; + QUERY PLAN +------------------------------------------------------------ + Aggregate + -> Index Only Scan using sp_radix_ind on radix_text_tbl + Index Cond: (t ^@ 'Worth'::text) +(3 rows) + +SELECT count(*) FROM radix_text_tbl WHERE t ^@ 'Worth'; + count +------- + 2 +(1 row) + +EXPLAIN (COSTS OFF) +SELECT count(*) FROM radix_text_tbl WHERE starts_with(t, 'Worth'); + QUERY PLAN +------------------------------------------------------------ + Aggregate + -> Index Only Scan using sp_radix_ind on radix_text_tbl + Index Cond: (t ^@ 'Worth'::text) + Filter: starts_with(t, 'Worth'::text) +(4 rows) + +SELECT count(*) FROM radix_text_tbl WHERE starts_with(t, 'Worth'); + count +------- + 2 +(1 row) + +-- Now check the results from bitmap indexscan +SET enable_seqscan = OFF; +SET enable_indexscan = OFF; +SET enable_bitmapscan = ON; +EXPLAIN (COSTS OFF) +SELECT count(*) FROM quad_point_tbl WHERE p IS NULL; + QUERY PLAN +---------------------------------------------- + Aggregate + -> Bitmap Heap Scan on quad_point_tbl + Recheck Cond: (p IS NULL) + -> Bitmap Index Scan on sp_quad_ind + Index Cond: (p IS NULL) +(5 rows) + +SELECT count(*) FROM quad_point_tbl WHERE p IS NULL; + count +------- + 3 +(1 row) + +EXPLAIN (COSTS OFF) +SELECT count(*) FROM quad_point_tbl WHERE p IS NOT NULL; + QUERY PLAN +---------------------------------------------- + Aggregate + -> Bitmap Heap Scan on quad_point_tbl + Recheck Cond: (p IS NOT NULL) + -> Bitmap Index Scan on sp_quad_ind + Index Cond: (p IS NOT NULL) +(5 rows) + +SELECT count(*) FROM quad_point_tbl WHERE p IS NOT NULL; + count +------- + 11000 +(1 row) + +EXPLAIN (COSTS OFF) +SELECT count(*) FROM quad_point_tbl; + QUERY PLAN +---------------------------------------------- + Aggregate + -> Bitmap Heap Scan on quad_point_tbl + -> Bitmap Index Scan on sp_quad_ind +(3 rows) + +SELECT count(*) FROM quad_point_tbl; + count +------- + 11003 +(1 row) + +EXPLAIN (COSTS OFF) +SELECT count(*) FROM quad_point_tbl WHERE p <@ box '(200,200,1000,1000)'; + QUERY PLAN +--------------------------------------------------------------- + Aggregate + -> Bitmap Heap Scan on quad_point_tbl + Recheck Cond: (p <@ '(1000,1000),(200,200)'::box) + -> Bitmap Index Scan on sp_quad_ind + Index Cond: (p <@ '(1000,1000),(200,200)'::box) +(5 rows) + +SELECT count(*) FROM quad_point_tbl WHERE p <@ box '(200,200,1000,1000)'; + count +------- + 1057 +(1 row) + +EXPLAIN (COSTS OFF) +SELECT count(*) FROM quad_point_tbl WHERE box '(200,200,1000,1000)' @> p; + QUERY PLAN +--------------------------------------------------------------- + Aggregate + -> Bitmap Heap Scan on quad_point_tbl + Recheck Cond: ('(1000,1000),(200,200)'::box @> p) + -> Bitmap Index Scan on sp_quad_ind + Index Cond: (p <@ '(1000,1000),(200,200)'::box) +(5 rows) + +SELECT count(*) FROM quad_point_tbl WHERE box '(200,200,1000,1000)' @> p; + count +------- + 1057 +(1 row) + +EXPLAIN (COSTS OFF) +SELECT count(*) FROM quad_point_tbl WHERE p << '(5000, 4000)'; + QUERY PLAN +------------------------------------------------------- + Aggregate + -> Bitmap Heap Scan on quad_point_tbl + Recheck Cond: (p << '(5000,4000)'::point) + -> Bitmap Index Scan on sp_quad_ind + Index Cond: (p << '(5000,4000)'::point) +(5 rows) + +SELECT count(*) FROM quad_point_tbl WHERE p << '(5000, 4000)'; + count +------- + 6000 +(1 row) + +EXPLAIN (COSTS OFF) +SELECT count(*) FROM quad_point_tbl WHERE p >> '(5000, 4000)'; + QUERY PLAN +------------------------------------------------------- + Aggregate + -> Bitmap Heap Scan on quad_point_tbl + Recheck Cond: (p >> '(5000,4000)'::point) + -> Bitmap Index Scan on sp_quad_ind + Index Cond: (p >> '(5000,4000)'::point) +(5 rows) + +SELECT count(*) FROM quad_point_tbl WHERE p >> '(5000, 4000)'; + count +------- + 4999 +(1 row) + +EXPLAIN (COSTS OFF) +SELECT count(*) FROM quad_point_tbl WHERE p <<| '(5000, 4000)'; + QUERY PLAN +-------------------------------------------------------- + Aggregate + -> Bitmap Heap Scan on quad_point_tbl + Recheck Cond: (p <<| '(5000,4000)'::point) + -> Bitmap Index Scan on sp_quad_ind + Index Cond: (p <<| '(5000,4000)'::point) +(5 rows) + +SELECT count(*) FROM quad_point_tbl WHERE p <<| '(5000, 4000)'; + count +------- + 5000 +(1 row) + +EXPLAIN (COSTS OFF) +SELECT count(*) FROM quad_point_tbl WHERE p |>> '(5000, 4000)'; + QUERY PLAN +-------------------------------------------------------- + Aggregate + -> Bitmap Heap Scan on quad_point_tbl + Recheck Cond: (p |>> '(5000,4000)'::point) + -> Bitmap Index Scan on sp_quad_ind + Index Cond: (p |>> '(5000,4000)'::point) +(5 rows) + +SELECT count(*) FROM quad_point_tbl WHERE p |>> '(5000, 4000)'; + count +------- + 5999 +(1 row) + +EXPLAIN (COSTS OFF) +SELECT count(*) FROM quad_point_tbl WHERE p ~= '(4585, 365)'; + QUERY PLAN +------------------------------------------------------ + Aggregate + -> Bitmap Heap Scan on quad_point_tbl + Recheck Cond: (p ~= '(4585,365)'::point) + -> Bitmap Index Scan on sp_quad_ind + Index Cond: (p ~= '(4585,365)'::point) +(5 rows) + +SELECT count(*) FROM quad_point_tbl WHERE p ~= '(4585, 365)'; + count +------- + 1 +(1 row) + +EXPLAIN (COSTS OFF) +SELECT count(*) FROM kd_point_tbl WHERE p <@ box '(200,200,1000,1000)'; + QUERY PLAN +--------------------------------------------------------------- + Aggregate + -> Bitmap Heap Scan on kd_point_tbl + Recheck Cond: (p <@ '(1000,1000),(200,200)'::box) + -> Bitmap Index Scan on sp_kd_ind + Index Cond: (p <@ '(1000,1000),(200,200)'::box) +(5 rows) + +SELECT count(*) FROM kd_point_tbl WHERE p <@ box '(200,200,1000,1000)'; + count +------- + 1057 +(1 row) + +EXPLAIN (COSTS OFF) +SELECT count(*) FROM kd_point_tbl WHERE box '(200,200,1000,1000)' @> p; + QUERY PLAN +--------------------------------------------------------------- + Aggregate + -> Bitmap Heap Scan on kd_point_tbl + Recheck Cond: ('(1000,1000),(200,200)'::box @> p) + -> Bitmap Index Scan on sp_kd_ind + Index Cond: (p <@ '(1000,1000),(200,200)'::box) +(5 rows) + +SELECT count(*) FROM kd_point_tbl WHERE box '(200,200,1000,1000)' @> p; + count +------- + 1057 +(1 row) + +EXPLAIN (COSTS OFF) +SELECT count(*) FROM kd_point_tbl WHERE p << '(5000, 4000)'; + QUERY PLAN +------------------------------------------------------- + Aggregate + -> Bitmap Heap Scan on kd_point_tbl + Recheck Cond: (p << '(5000,4000)'::point) + -> Bitmap Index Scan on sp_kd_ind + Index Cond: (p << '(5000,4000)'::point) +(5 rows) + +SELECT count(*) FROM kd_point_tbl WHERE p << '(5000, 4000)'; + count +------- + 6000 +(1 row) + +EXPLAIN (COSTS OFF) +SELECT count(*) FROM kd_point_tbl WHERE p >> '(5000, 4000)'; + QUERY PLAN +------------------------------------------------------- + Aggregate + -> Bitmap Heap Scan on kd_point_tbl + Recheck Cond: (p >> '(5000,4000)'::point) + -> Bitmap Index Scan on sp_kd_ind + Index Cond: (p >> '(5000,4000)'::point) +(5 rows) + +SELECT count(*) FROM kd_point_tbl WHERE p >> '(5000, 4000)'; + count +------- + 4999 +(1 row) + +EXPLAIN (COSTS OFF) +SELECT count(*) FROM kd_point_tbl WHERE p <<| '(5000, 4000)'; + QUERY PLAN +-------------------------------------------------------- + Aggregate + -> Bitmap Heap Scan on kd_point_tbl + Recheck Cond: (p <<| '(5000,4000)'::point) + -> Bitmap Index Scan on sp_kd_ind + Index Cond: (p <<| '(5000,4000)'::point) +(5 rows) + +SELECT count(*) FROM kd_point_tbl WHERE p <<| '(5000, 4000)'; + count +------- + 5000 +(1 row) + +EXPLAIN (COSTS OFF) +SELECT count(*) FROM kd_point_tbl WHERE p |>> '(5000, 4000)'; + QUERY PLAN +-------------------------------------------------------- + Aggregate + -> Bitmap Heap Scan on kd_point_tbl + Recheck Cond: (p |>> '(5000,4000)'::point) + -> Bitmap Index Scan on sp_kd_ind + Index Cond: (p |>> '(5000,4000)'::point) +(5 rows) + +SELECT count(*) FROM kd_point_tbl WHERE p |>> '(5000, 4000)'; + count +------- + 5999 +(1 row) + +EXPLAIN (COSTS OFF) +SELECT count(*) FROM kd_point_tbl WHERE p ~= '(4585, 365)'; + QUERY PLAN +------------------------------------------------------ + Aggregate + -> Bitmap Heap Scan on kd_point_tbl + Recheck Cond: (p ~= '(4585,365)'::point) + -> Bitmap Index Scan on sp_kd_ind + Index Cond: (p ~= '(4585,365)'::point) +(5 rows) + +SELECT count(*) FROM kd_point_tbl WHERE p ~= '(4585, 365)'; + count +------- + 1 +(1 row) + +EXPLAIN (COSTS OFF) +SELECT count(*) FROM radix_text_tbl WHERE t = 'P0123456789abcdef'; + QUERY PLAN +----------------------------------------------------------- + Aggregate + -> Bitmap Heap Scan on radix_text_tbl + Recheck Cond: (t = 'P0123456789abcdef'::text) + -> Bitmap Index Scan on sp_radix_ind + Index Cond: (t = 'P0123456789abcdef'::text) +(5 rows) + +SELECT count(*) FROM radix_text_tbl WHERE t = 'P0123456789abcdef'; + count +------- + 1000 +(1 row) + +EXPLAIN (COSTS OFF) +SELECT count(*) FROM radix_text_tbl WHERE t = 'P0123456789abcde'; + QUERY PLAN +---------------------------------------------------------- + Aggregate + -> Bitmap Heap Scan on radix_text_tbl + Recheck Cond: (t = 'P0123456789abcde'::text) + -> Bitmap Index Scan on sp_radix_ind + Index Cond: (t = 'P0123456789abcde'::text) +(5 rows) + +SELECT count(*) FROM radix_text_tbl WHERE t = 'P0123456789abcde'; + count +------- + 1 +(1 row) + +EXPLAIN (COSTS OFF) +SELECT count(*) FROM radix_text_tbl WHERE t = 'P0123456789abcdefF'; + QUERY PLAN +------------------------------------------------------------ + Aggregate + -> Bitmap Heap Scan on radix_text_tbl + Recheck Cond: (t = 'P0123456789abcdefF'::text) + -> Bitmap Index Scan on sp_radix_ind + Index Cond: (t = 'P0123456789abcdefF'::text) +(5 rows) + +SELECT count(*) FROM radix_text_tbl WHERE t = 'P0123456789abcdefF'; + count +------- + 1 +(1 row) + +EXPLAIN (COSTS OFF) +SELECT count(*) FROM radix_text_tbl WHERE t < 'Aztec Ct '; + QUERY PLAN +---------------------------------------------------------------------------- + Aggregate + -> Bitmap Heap Scan on radix_text_tbl + Recheck Cond: (t < 'Aztec Ct '::text) + -> Bitmap Index Scan on sp_radix_ind + Index Cond: (t < 'Aztec Ct '::text) +(5 rows) + +SELECT count(*) FROM radix_text_tbl WHERE t < 'Aztec Ct '; + count +------- + 272 +(1 row) + +EXPLAIN (COSTS OFF) +SELECT count(*) FROM radix_text_tbl WHERE t ~<~ 'Aztec Ct '; + QUERY PLAN +------------------------------------------------------------------------------ + Aggregate + -> Bitmap Heap Scan on radix_text_tbl + Recheck Cond: (t ~<~ 'Aztec Ct '::text) + -> Bitmap Index Scan on sp_radix_ind + Index Cond: (t ~<~ 'Aztec Ct '::text) +(5 rows) + +SELECT count(*) FROM radix_text_tbl WHERE t ~<~ 'Aztec Ct '; + count +------- + 272 +(1 row) + +EXPLAIN (COSTS OFF) +SELECT count(*) FROM radix_text_tbl WHERE t <= 'Aztec Ct '; + QUERY PLAN +----------------------------------------------------------------------------- + Aggregate + -> Bitmap Heap Scan on radix_text_tbl + Recheck Cond: (t <= 'Aztec Ct '::text) + -> Bitmap Index Scan on sp_radix_ind + Index Cond: (t <= 'Aztec Ct '::text) +(5 rows) + +SELECT count(*) FROM radix_text_tbl WHERE t <= 'Aztec Ct '; + count +------- + 273 +(1 row) + +EXPLAIN (COSTS OFF) +SELECT count(*) FROM radix_text_tbl WHERE t ~<=~ 'Aztec Ct '; + QUERY PLAN +------------------------------------------------------------------------------- + Aggregate + -> Bitmap Heap Scan on radix_text_tbl + Recheck Cond: (t ~<=~ 'Aztec Ct '::text) + -> Bitmap Index Scan on sp_radix_ind + Index Cond: (t ~<=~ 'Aztec Ct '::text) +(5 rows) + +SELECT count(*) FROM radix_text_tbl WHERE t ~<=~ 'Aztec Ct '; + count +------- + 273 +(1 row) + +EXPLAIN (COSTS OFF) +SELECT count(*) FROM radix_text_tbl WHERE t = 'Aztec Ct '; + QUERY PLAN +---------------------------------------------------------------------------- + Aggregate + -> Bitmap Heap Scan on radix_text_tbl + Recheck Cond: (t = 'Aztec Ct '::text) + -> Bitmap Index Scan on sp_radix_ind + Index Cond: (t = 'Aztec Ct '::text) +(5 rows) + +SELECT count(*) FROM radix_text_tbl WHERE t = 'Aztec Ct '; + count +------- + 1 +(1 row) + +EXPLAIN (COSTS OFF) +SELECT count(*) FROM radix_text_tbl WHERE t = 'Worth St '; + QUERY PLAN +---------------------------------------------------------------------------- + Aggregate + -> Bitmap Heap Scan on radix_text_tbl + Recheck Cond: (t = 'Worth St '::text) + -> Bitmap Index Scan on sp_radix_ind + Index Cond: (t = 'Worth St '::text) +(5 rows) + +SELECT count(*) FROM radix_text_tbl WHERE t = 'Worth St '; + count +------- + 2 +(1 row) + +EXPLAIN (COSTS OFF) +SELECT count(*) FROM radix_text_tbl WHERE t >= 'Worth St '; + QUERY PLAN +----------------------------------------------------------------------------- + Aggregate + -> Bitmap Heap Scan on radix_text_tbl + Recheck Cond: (t >= 'Worth St '::text) + -> Bitmap Index Scan on sp_radix_ind + Index Cond: (t >= 'Worth St '::text) +(5 rows) + +SELECT count(*) FROM radix_text_tbl WHERE t >= 'Worth St '; + count +------- + 50 +(1 row) + +EXPLAIN (COSTS OFF) +SELECT count(*) FROM radix_text_tbl WHERE t ~>=~ 'Worth St '; + QUERY PLAN +------------------------------------------------------------------------------- + Aggregate + -> Bitmap Heap Scan on radix_text_tbl + Recheck Cond: (t ~>=~ 'Worth St '::text) + -> Bitmap Index Scan on sp_radix_ind + Index Cond: (t ~>=~ 'Worth St '::text) +(5 rows) + +SELECT count(*) FROM radix_text_tbl WHERE t ~>=~ 'Worth St '; + count +------- + 50 +(1 row) + +EXPLAIN (COSTS OFF) +SELECT count(*) FROM radix_text_tbl WHERE t > 'Worth St '; + QUERY PLAN +---------------------------------------------------------------------------- + Aggregate + -> Bitmap Heap Scan on radix_text_tbl + Recheck Cond: (t > 'Worth St '::text) + -> Bitmap Index Scan on sp_radix_ind + Index Cond: (t > 'Worth St '::text) +(5 rows) + +SELECT count(*) FROM radix_text_tbl WHERE t > 'Worth St '; + count +------- + 48 +(1 row) + +EXPLAIN (COSTS OFF) +SELECT count(*) FROM radix_text_tbl WHERE t ~>~ 'Worth St '; + QUERY PLAN +------------------------------------------------------------------------------ + Aggregate + -> Bitmap Heap Scan on radix_text_tbl + Recheck Cond: (t ~>~ 'Worth St '::text) + -> Bitmap Index Scan on sp_radix_ind + Index Cond: (t ~>~ 'Worth St '::text) +(5 rows) + +SELECT count(*) FROM radix_text_tbl WHERE t ~>~ 'Worth St '; + count +------- + 48 +(1 row) + +EXPLAIN (COSTS OFF) +SELECT count(*) FROM radix_text_tbl WHERE t ^@ 'Worth'; + QUERY PLAN +------------------------------------------------ + Aggregate + -> Bitmap Heap Scan on radix_text_tbl + Recheck Cond: (t ^@ 'Worth'::text) + -> Bitmap Index Scan on sp_radix_ind + Index Cond: (t ^@ 'Worth'::text) +(5 rows) + +SELECT count(*) FROM radix_text_tbl WHERE t ^@ 'Worth'; + count +------- + 2 +(1 row) + +EXPLAIN (COSTS OFF) +SELECT count(*) FROM radix_text_tbl WHERE starts_with(t, 'Worth'); + QUERY PLAN +------------------------------------------------ + Aggregate + -> Bitmap Heap Scan on radix_text_tbl + Filter: starts_with(t, 'Worth'::text) + -> Bitmap Index Scan on sp_radix_ind + Index Cond: (t ^@ 'Worth'::text) +(5 rows) + +SELECT count(*) FROM radix_text_tbl WHERE starts_with(t, 'Worth'); + count +------- + 2 +(1 row) + +RESET enable_seqscan; +RESET enable_indexscan; +RESET enable_bitmapscan; diff --git a/src/test/regress/expected/create_misc.out b/src/test/regress/expected/create_misc.out new file mode 100644 index 0000000..5b46ee5 --- /dev/null +++ b/src/test/regress/expected/create_misc.out @@ -0,0 +1,487 @@ +-- +-- CREATE_MISC +-- +-- +-- a is the type root +-- b and c inherit from a (one-level single inheritance) +-- d inherits from b and c (two-level multiple inheritance) +-- e inherits from c (two-level single inheritance) +-- f inherits from e (three-level single inheritance) +-- +CREATE TABLE a_star ( + class char, + a int4 +); +CREATE TABLE b_star ( + b text +) INHERITS (a_star); +CREATE TABLE c_star ( + c name +) INHERITS (a_star); +CREATE TABLE d_star ( + d float8 +) INHERITS (b_star, c_star); +NOTICE: merging multiple inherited definitions of column "class" +NOTICE: merging multiple inherited definitions of column "a" +CREATE TABLE e_star ( + e int2 +) INHERITS (c_star); +CREATE TABLE f_star ( + f polygon +) INHERITS (e_star); +INSERT INTO a_star (class, a) VALUES ('a', 1); +INSERT INTO a_star (class, a) VALUES ('a', 2); +INSERT INTO a_star (class) VALUES ('a'); +INSERT INTO b_star (class, a, b) VALUES ('b', 3, 'mumble'::text); +INSERT INTO b_star (class, a) VALUES ('b', 4); +INSERT INTO b_star (class, b) VALUES ('b', 'bumble'::text); +INSERT INTO b_star (class) VALUES ('b'); +INSERT INTO c_star (class, a, c) VALUES ('c', 5, 'hi mom'::name); +INSERT INTO c_star (class, a) VALUES ('c', 6); +INSERT INTO c_star (class, c) VALUES ('c', 'hi paul'::name); +INSERT INTO c_star (class) VALUES ('c'); +INSERT INTO d_star (class, a, b, c, d) + VALUES ('d', 7, 'grumble'::text, 'hi sunita'::name, '0.0'::float8); +INSERT INTO d_star (class, a, b, c) + VALUES ('d', 8, 'stumble'::text, 'hi koko'::name); +INSERT INTO d_star (class, a, b, d) + VALUES ('d', 9, 'rumble'::text, '1.1'::float8); +INSERT INTO d_star (class, a, c, d) + VALUES ('d', 10, 'hi kristin'::name, '10.01'::float8); +INSERT INTO d_star (class, b, c, d) + VALUES ('d', 'crumble'::text, 'hi boris'::name, '100.001'::float8); +INSERT INTO d_star (class, a, b) + VALUES ('d', 11, 'fumble'::text); +INSERT INTO d_star (class, a, c) + VALUES ('d', 12, 'hi avi'::name); +INSERT INTO d_star (class, a, d) + VALUES ('d', 13, '1000.0001'::float8); +INSERT INTO d_star (class, b, c) + VALUES ('d', 'tumble'::text, 'hi andrew'::name); +INSERT INTO d_star (class, b, d) + VALUES ('d', 'humble'::text, '10000.00001'::float8); +INSERT INTO d_star (class, c, d) + VALUES ('d', 'hi ginger'::name, '100000.000001'::float8); +INSERT INTO d_star (class, a) VALUES ('d', 14); +INSERT INTO d_star (class, b) VALUES ('d', 'jumble'::text); +INSERT INTO d_star (class, c) VALUES ('d', 'hi jolly'::name); +INSERT INTO d_star (class, d) VALUES ('d', '1000000.0000001'::float8); +INSERT INTO d_star (class) VALUES ('d'); +INSERT INTO e_star (class, a, c, e) + VALUES ('e', 15, 'hi carol'::name, '-1'::int2); +INSERT INTO e_star (class, a, c) + VALUES ('e', 16, 'hi bob'::name); +INSERT INTO e_star (class, a, e) + VALUES ('e', 17, '-2'::int2); +INSERT INTO e_star (class, c, e) + VALUES ('e', 'hi michelle'::name, '-3'::int2); +INSERT INTO e_star (class, a) + VALUES ('e', 18); +INSERT INTO e_star (class, c) + VALUES ('e', 'hi elisa'::name); +INSERT INTO e_star (class, e) + VALUES ('e', '-4'::int2); +INSERT INTO f_star (class, a, c, e, f) + VALUES ('f', 19, 'hi claire'::name, '-5'::int2, '(1,3),(2,4)'::polygon); +INSERT INTO f_star (class, a, c, e) + VALUES ('f', 20, 'hi mike'::name, '-6'::int2); +INSERT INTO f_star (class, a, c, f) + VALUES ('f', 21, 'hi marcel'::name, '(11,44),(22,55),(33,66)'::polygon); +INSERT INTO f_star (class, a, e, f) + VALUES ('f', 22, '-7'::int2, '(111,555),(222,666),(333,777),(444,888)'::polygon); +INSERT INTO f_star (class, c, e, f) + VALUES ('f', 'hi keith'::name, '-8'::int2, + '(1111,3333),(2222,4444)'::polygon); +INSERT INTO f_star (class, a, c) + VALUES ('f', 24, 'hi marc'::name); +INSERT INTO f_star (class, a, e) + VALUES ('f', 25, '-9'::int2); +INSERT INTO f_star (class, a, f) + VALUES ('f', 26, '(11111,33333),(22222,44444)'::polygon); +INSERT INTO f_star (class, c, e) + VALUES ('f', 'hi allison'::name, '-10'::int2); +INSERT INTO f_star (class, c, f) + VALUES ('f', 'hi jeff'::name, + '(111111,333333),(222222,444444)'::polygon); +INSERT INTO f_star (class, e, f) + VALUES ('f', '-11'::int2, '(1111111,3333333),(2222222,4444444)'::polygon); +INSERT INTO f_star (class, a) VALUES ('f', 27); +INSERT INTO f_star (class, c) VALUES ('f', 'hi carl'::name); +INSERT INTO f_star (class, e) VALUES ('f', '-12'::int2); +INSERT INTO f_star (class, f) + VALUES ('f', '(11111111,33333333),(22222222,44444444)'::polygon); +INSERT INTO f_star (class) VALUES ('f'); +-- Analyze the X_star tables for better plan stability in later tests +ANALYZE a_star; +ANALYZE b_star; +ANALYZE c_star; +ANALYZE d_star; +ANALYZE e_star; +ANALYZE f_star; +-- +-- inheritance stress test +-- +SELECT * FROM a_star*; + class | a +-------+---- + a | 1 + a | 2 + a | + b | 3 + b | 4 + b | + b | + c | 5 + c | 6 + c | + c | + d | 7 + d | 8 + d | 9 + d | 10 + d | + d | 11 + d | 12 + d | 13 + d | + d | + d | + d | 14 + d | + d | + d | + d | + e | 15 + e | 16 + e | 17 + e | + e | 18 + e | + e | + f | 19 + f | 20 + f | 21 + f | 22 + f | + f | 24 + f | 25 + f | 26 + f | + f | + f | + f | 27 + f | + f | + f | + f | +(50 rows) + +SELECT * + FROM b_star* x + WHERE x.b = text 'bumble' or x.a < 3; + class | a | b +-------+---+-------- + b | | bumble +(1 row) + +SELECT class, a + FROM c_star* x + WHERE x.c ~ text 'hi'; + class | a +-------+---- + c | 5 + c | + d | 7 + d | 8 + d | 10 + d | + d | 12 + d | + d | + d | + e | 15 + e | 16 + e | + e | + f | 19 + f | 20 + f | 21 + f | + f | 24 + f | + f | + f | +(22 rows) + +SELECT class, b, c + FROM d_star* x + WHERE x.a < 100; + class | b | c +-------+---------+------------ + d | grumble | hi sunita + d | stumble | hi koko + d | rumble | + d | | hi kristin + d | fumble | + d | | hi avi + d | | + d | | +(8 rows) + +SELECT class, c FROM e_star* x WHERE x.c NOTNULL; + class | c +-------+------------- + e | hi carol + e | hi bob + e | hi michelle + e | hi elisa + f | hi claire + f | hi mike + f | hi marcel + f | hi keith + f | hi marc + f | hi allison + f | hi jeff + f | hi carl +(12 rows) + +SELECT * FROM f_star* x WHERE x.c ISNULL; + class | a | c | e | f +-------+----+---+-----+------------------------------------------- + f | 22 | | -7 | ((111,555),(222,666),(333,777),(444,888)) + f | 25 | | -9 | + f | 26 | | | ((11111,33333),(22222,44444)) + f | | | -11 | ((1111111,3333333),(2222222,4444444)) + f | 27 | | | + f | | | -12 | + f | | | | ((11111111,33333333),(22222222,44444444)) + f | | | | +(8 rows) + +-- grouping and aggregation on inherited sets have been busted in the past... +SELECT sum(a) FROM a_star*; + sum +----- + 355 +(1 row) + +SELECT class, sum(a) FROM a_star* GROUP BY class ORDER BY class; + class | sum +-------+----- + a | 3 + b | 7 + c | 11 + d | 84 + e | 66 + f | 184 +(6 rows) + +ALTER TABLE f_star RENAME COLUMN f TO ff; +ALTER TABLE e_star* RENAME COLUMN e TO ee; +ALTER TABLE d_star* RENAME COLUMN d TO dd; +ALTER TABLE c_star* RENAME COLUMN c TO cc; +ALTER TABLE b_star* RENAME COLUMN b TO bb; +ALTER TABLE a_star* RENAME COLUMN a TO aa; +SELECT class, aa + FROM a_star* x + WHERE aa ISNULL; + class | aa +-------+---- + a | + b | + b | + c | + c | + d | + d | + d | + d | + d | + d | + d | + d | + e | + e | + e | + f | + f | + f | + f | + f | + f | + f | + f | +(24 rows) + +-- As of Postgres 7.1, ALTER implicitly recurses, +-- so this should be same as ALTER a_star* +ALTER TABLE a_star RENAME COLUMN aa TO foo; +SELECT class, foo + FROM a_star* x + WHERE x.foo >= 2; + class | foo +-------+----- + a | 2 + b | 3 + b | 4 + c | 5 + c | 6 + d | 7 + d | 8 + d | 9 + d | 10 + d | 11 + d | 12 + d | 13 + d | 14 + e | 15 + e | 16 + e | 17 + e | 18 + f | 19 + f | 20 + f | 21 + f | 22 + f | 24 + f | 25 + f | 26 + f | 27 +(25 rows) + +ALTER TABLE a_star RENAME COLUMN foo TO aa; +SELECT * + from a_star* + WHERE aa < 1000; + class | aa +-------+---- + a | 1 + a | 2 + b | 3 + b | 4 + c | 5 + c | 6 + d | 7 + d | 8 + d | 9 + d | 10 + d | 11 + d | 12 + d | 13 + d | 14 + e | 15 + e | 16 + e | 17 + e | 18 + f | 19 + f | 20 + f | 21 + f | 22 + f | 24 + f | 25 + f | 26 + f | 27 +(26 rows) + +ALTER TABLE f_star ADD COLUMN f int4; +UPDATE f_star SET f = 10; +ALTER TABLE e_star* ADD COLUMN e int4; +--UPDATE e_star* SET e = 42; +SELECT * FROM e_star*; + class | aa | cc | ee | e +-------+----+-------------+-----+--- + e | 15 | hi carol | -1 | + e | 16 | hi bob | | + e | 17 | | -2 | + e | | hi michelle | -3 | + e | 18 | | | + e | | hi elisa | | + e | | | -4 | + f | 19 | hi claire | -5 | + f | 20 | hi mike | -6 | + f | 21 | hi marcel | | + f | 22 | | -7 | + f | | hi keith | -8 | + f | 24 | hi marc | | + f | 25 | | -9 | + f | 26 | | | + f | | hi allison | -10 | + f | | hi jeff | | + f | | | -11 | + f | 27 | | | + f | | hi carl | | + f | | | -12 | + f | | | | + f | | | | +(23 rows) + +ALTER TABLE a_star* ADD COLUMN a text; +NOTICE: merging definition of column "a" for child "d_star" +-- That ALTER TABLE should have added TOAST tables. +SELECT relname, reltoastrelid <> 0 AS has_toast_table + FROM pg_class + WHERE oid::regclass IN ('a_star', 'c_star') + ORDER BY 1; + relname | has_toast_table +---------+----------------- + a_star | t + c_star | t +(2 rows) + +--UPDATE b_star* +-- SET a = text 'gazpacho' +-- WHERE aa > 4; +SELECT class, aa, a FROM a_star*; + class | aa | a +-------+----+--- + a | 1 | + a | 2 | + a | | + b | 3 | + b | 4 | + b | | + b | | + c | 5 | + c | 6 | + c | | + c | | + d | 7 | + d | 8 | + d | 9 | + d | 10 | + d | | + d | 11 | + d | 12 | + d | 13 | + d | | + d | | + d | | + d | 14 | + d | | + d | | + d | | + d | | + e | 15 | + e | 16 | + e | 17 | + e | | + e | 18 | + e | | + e | | + f | 19 | + f | 20 | + f | 21 | + f | 22 | + f | | + f | 24 | + f | 25 | + f | 26 | + f | | + f | | + f | | + f | 27 | + f | | + f | | + f | | + f | | +(50 rows) + diff --git a/src/test/regress/expected/create_operator.out b/src/test/regress/expected/create_operator.out new file mode 100644 index 0000000..f71b601 --- /dev/null +++ b/src/test/regress/expected/create_operator.out @@ -0,0 +1,285 @@ +-- +-- CREATE_OPERATOR +-- +CREATE OPERATOR ## ( + leftarg = path, + rightarg = path, + function = path_inter, + commutator = ## +); +CREATE OPERATOR @#@ ( + rightarg = int8, -- prefix + procedure = factorial +); +CREATE OPERATOR #%# ( + leftarg = int8, -- fail, postfix is no longer supported + procedure = factorial +); +ERROR: operator right argument type must be specified +DETAIL: Postfix operators are not supported. +-- Test operator created above +SELECT @#@ 24; + ?column? +-------------------------- + 620448401733239439360000 +(1 row) + +-- Test comments +COMMENT ON OPERATOR ###### (NONE, int4) IS 'bad prefix'; +ERROR: operator does not exist: ###### integer +COMMENT ON OPERATOR ###### (int4, NONE) IS 'bad postfix'; +ERROR: postfix operators are not supported +COMMENT ON OPERATOR ###### (int4, int8) IS 'bad infix'; +ERROR: operator does not exist: integer ###### bigint +-- Check that DROP on a nonexistent op behaves sanely, too +DROP OPERATOR ###### (NONE, int4); +ERROR: operator does not exist: ###### integer +DROP OPERATOR ###### (int4, NONE); +ERROR: postfix operators are not supported +DROP OPERATOR ###### (int4, int8); +ERROR: operator does not exist: integer ###### bigint +-- => is disallowed as an operator name now +CREATE OPERATOR => ( + rightarg = int8, + procedure = factorial +); +ERROR: syntax error at or near "=>" +LINE 1: CREATE OPERATOR => ( + ^ +-- lexing of <=, >=, <>, != has a number of edge cases +-- (=> is tested elsewhere) +-- this is legal because ! is not allowed in sql ops +CREATE OPERATOR !=- ( + rightarg = int8, + procedure = factorial +); +SELECT !=- 10; + ?column? +---------- + 3628800 +(1 row) + +-- postfix operators don't work anymore +SELECT 10 !=-; +ERROR: syntax error at or near ";" +LINE 1: SELECT 10 !=-; + ^ +-- make sure lexer returns != as <> even in edge cases +SELECT 2 !=/**/ 1, 2 !=/**/ 2; + ?column? | ?column? +----------+---------- + t | f +(1 row) + +SELECT 2 !=-- comment to be removed by psql + 1; + ?column? +---------- + t +(1 row) + +DO $$ -- use DO to protect -- from psql + declare r boolean; + begin + execute $e$ select 2 !=-- comment + 1 $e$ into r; + raise info 'r = %', r; + end; +$$; +INFO: r = t +-- check that <= etc. followed by more operator characters are returned +-- as the correct token with correct precedence +SELECT true<>-1 BETWEEN 1 AND 1; -- BETWEEN has prec. above <> but below Op + ?column? +---------- + t +(1 row) + +SELECT false<>/**/1 BETWEEN 1 AND 1; + ?column? +---------- + t +(1 row) + +SELECT false<=-1 BETWEEN 1 AND 1; + ?column? +---------- + t +(1 row) + +SELECT false>=-1 BETWEEN 1 AND 1; + ?column? +---------- + t +(1 row) + +SELECT 2<=/**/3, 3>=/**/2, 2<>/**/3; + ?column? | ?column? | ?column? +----------+----------+---------- + t | t | t +(1 row) + +SELECT 3<=/**/2, 2>=/**/3, 2<>/**/2; + ?column? | ?column? | ?column? +----------+----------+---------- + f | f | f +(1 row) + +-- Should fail. CREATE OPERATOR requires USAGE on SCHEMA +BEGIN TRANSACTION; +CREATE ROLE regress_rol_op1; +CREATE SCHEMA schema_op1; +GRANT USAGE ON SCHEMA schema_op1 TO PUBLIC; +REVOKE USAGE ON SCHEMA schema_op1 FROM regress_rol_op1; +SET ROLE regress_rol_op1; +CREATE OPERATOR schema_op1.#*# ( + rightarg = int8, + procedure = factorial +); +ERROR: permission denied for schema schema_op1 +ROLLBACK; +-- Should fail. SETOF type functions not allowed as argument (testing leftarg) +BEGIN TRANSACTION; +CREATE OPERATOR #*# ( + leftarg = SETOF int8, + procedure = factorial +); +ERROR: SETOF type not allowed for operator argument +ROLLBACK; +-- Should fail. SETOF type functions not allowed as argument (testing rightarg) +BEGIN TRANSACTION; +CREATE OPERATOR #*# ( + rightarg = SETOF int8, + procedure = factorial +); +ERROR: SETOF type not allowed for operator argument +ROLLBACK; +-- Should work. Sample text-book case +BEGIN TRANSACTION; +CREATE OR REPLACE FUNCTION fn_op2(boolean, boolean) +RETURNS boolean AS $$ + SELECT NULL::BOOLEAN; +$$ LANGUAGE sql IMMUTABLE; +CREATE OPERATOR === ( + LEFTARG = boolean, + RIGHTARG = boolean, + PROCEDURE = fn_op2, + COMMUTATOR = ===, + NEGATOR = !==, + RESTRICT = contsel, + JOIN = contjoinsel, + SORT1, SORT2, LTCMP, GTCMP, HASHES, MERGES +); +ROLLBACK; +-- Should fail. Invalid attribute +CREATE OPERATOR #@%# ( + rightarg = int8, + procedure = factorial, + invalid_att = int8 +); +WARNING: operator attribute "invalid_att" not recognized +-- Should fail. At least rightarg should be mandatorily specified +CREATE OPERATOR #@%# ( + procedure = factorial +); +ERROR: operator argument types must be specified +-- Should fail. Procedure should be mandatorily specified +CREATE OPERATOR #@%# ( + rightarg = int8 +); +ERROR: operator function must be specified +-- Should fail. CREATE OPERATOR requires USAGE on TYPE +BEGIN TRANSACTION; +CREATE ROLE regress_rol_op3; +CREATE TYPE type_op3 AS ENUM ('new', 'open', 'closed'); +CREATE FUNCTION fn_op3(type_op3, int8) +RETURNS int8 AS $$ + SELECT NULL::int8; +$$ LANGUAGE sql IMMUTABLE; +REVOKE USAGE ON TYPE type_op3 FROM regress_rol_op3; +REVOKE USAGE ON TYPE type_op3 FROM PUBLIC; -- Need to do this so that regress_rol_op3 is not allowed USAGE via PUBLIC +SET ROLE regress_rol_op3; +CREATE OPERATOR #*# ( + leftarg = type_op3, + rightarg = int8, + procedure = fn_op3 +); +ERROR: permission denied for type type_op3 +ROLLBACK; +-- Should fail. CREATE OPERATOR requires USAGE on TYPE (need to check separately for rightarg) +BEGIN TRANSACTION; +CREATE ROLE regress_rol_op4; +CREATE TYPE type_op4 AS ENUM ('new', 'open', 'closed'); +CREATE FUNCTION fn_op4(int8, type_op4) +RETURNS int8 AS $$ + SELECT NULL::int8; +$$ LANGUAGE sql IMMUTABLE; +REVOKE USAGE ON TYPE type_op4 FROM regress_rol_op4; +REVOKE USAGE ON TYPE type_op4 FROM PUBLIC; -- Need to do this so that regress_rol_op3 is not allowed USAGE via PUBLIC +SET ROLE regress_rol_op4; +CREATE OPERATOR #*# ( + leftarg = int8, + rightarg = type_op4, + procedure = fn_op4 +); +ERROR: permission denied for type type_op4 +ROLLBACK; +-- Should fail. CREATE OPERATOR requires EXECUTE on function +BEGIN TRANSACTION; +CREATE ROLE regress_rol_op5; +CREATE TYPE type_op5 AS ENUM ('new', 'open', 'closed'); +CREATE FUNCTION fn_op5(int8, int8) +RETURNS int8 AS $$ + SELECT NULL::int8; +$$ LANGUAGE sql IMMUTABLE; +REVOKE EXECUTE ON FUNCTION fn_op5(int8, int8) FROM regress_rol_op5; +REVOKE EXECUTE ON FUNCTION fn_op5(int8, int8) FROM PUBLIC;-- Need to do this so that regress_rol_op3 is not allowed EXECUTE via PUBLIC +SET ROLE regress_rol_op5; +CREATE OPERATOR #*# ( + leftarg = int8, + rightarg = int8, + procedure = fn_op5 +); +ERROR: permission denied for function fn_op5 +ROLLBACK; +-- Should fail. CREATE OPERATOR requires USAGE on return TYPE +BEGIN TRANSACTION; +CREATE ROLE regress_rol_op6; +CREATE TYPE type_op6 AS ENUM ('new', 'open', 'closed'); +CREATE FUNCTION fn_op6(int8, int8) +RETURNS type_op6 AS $$ + SELECT NULL::type_op6; +$$ LANGUAGE sql IMMUTABLE; +REVOKE USAGE ON TYPE type_op6 FROM regress_rol_op6; +REVOKE USAGE ON TYPE type_op6 FROM PUBLIC; -- Need to do this so that regress_rol_op3 is not allowed USAGE via PUBLIC +SET ROLE regress_rol_op6; +CREATE OPERATOR #*# ( + leftarg = int8, + rightarg = int8, + procedure = fn_op6 +); +ERROR: permission denied for type type_op6 +ROLLBACK; +-- invalid: non-lowercase quoted identifiers +CREATE OPERATOR === +( + "Leftarg" = box, + "Rightarg" = box, + "Procedure" = area_equal_function, + "Commutator" = ===, + "Negator" = !==, + "Restrict" = area_restriction_function, + "Join" = area_join_function, + "Hashes", + "Merges" +); +WARNING: operator attribute "Leftarg" not recognized +WARNING: operator attribute "Rightarg" not recognized +WARNING: operator attribute "Procedure" not recognized +WARNING: operator attribute "Commutator" not recognized +WARNING: operator attribute "Negator" not recognized +WARNING: operator attribute "Restrict" not recognized +WARNING: operator attribute "Join" not recognized +WARNING: operator attribute "Hashes" not recognized +WARNING: operator attribute "Merges" not recognized +ERROR: operator function must be specified diff --git a/src/test/regress/expected/create_procedure.out b/src/test/regress/expected/create_procedure.out new file mode 100644 index 0000000..f2a677f --- /dev/null +++ b/src/test/regress/expected/create_procedure.out @@ -0,0 +1,383 @@ +CALL nonexistent(); -- error +ERROR: procedure nonexistent() does not exist +LINE 1: CALL nonexistent(); + ^ +HINT: No procedure matches the given name and argument types. You might need to add explicit type casts. +CALL random(); -- error +ERROR: random() is not a procedure +LINE 1: CALL random(); + ^ +HINT: To call a function, use SELECT. +CREATE FUNCTION cp_testfunc1(a int) RETURNS int LANGUAGE SQL AS $$ SELECT a $$; +CREATE TABLE cp_test (a int, b text); +CREATE PROCEDURE ptest1(x text) +LANGUAGE SQL +AS $$ +INSERT INTO cp_test VALUES (1, x); +$$; +\df ptest1 + List of functions + Schema | Name | Result data type | Argument data types | Type +--------+--------+------------------+---------------------+------ + public | ptest1 | | IN x text | proc +(1 row) + +SELECT pg_get_functiondef('ptest1'::regproc); + pg_get_functiondef +------------------------------------------------------ + CREATE OR REPLACE PROCEDURE public.ptest1(IN x text)+ + LANGUAGE sql + + AS $procedure$ + + INSERT INTO cp_test VALUES (1, x); + + $procedure$ + + +(1 row) + +-- show only normal functions +\dfn public.*test*1 + List of functions + Schema | Name | Result data type | Argument data types | Type +--------+--------------+------------------+---------------------+------ + public | cp_testfunc1 | integer | a integer | func +(1 row) + +-- show only procedures +\dfp public.*test*1 + List of functions + Schema | Name | Result data type | Argument data types | Type +--------+--------+------------------+---------------------+------ + public | ptest1 | | IN x text | proc +(1 row) + +SELECT ptest1('x'); -- error +ERROR: ptest1(unknown) is a procedure +LINE 1: SELECT ptest1('x'); + ^ +HINT: To call a procedure, use CALL. +CALL ptest1('a'); -- ok +CALL ptest1('xy' || 'zzy'); -- ok, constant-folded arg +CALL ptest1(substring(random()::numeric(20,15)::text, 1, 1)); -- ok, volatile arg +SELECT * FROM cp_test ORDER BY b COLLATE "C"; + a | b +---+------- + 1 | 0 + 1 | a + 1 | xyzzy +(3 rows) + +-- SQL-standard body +CREATE PROCEDURE ptest1s(x text) +LANGUAGE SQL +BEGIN ATOMIC + INSERT INTO cp_test VALUES (1, x); +END; +\df ptest1s + List of functions + Schema | Name | Result data type | Argument data types | Type +--------+---------+------------------+---------------------+------ + public | ptest1s | | IN x text | proc +(1 row) + +SELECT pg_get_functiondef('ptest1s'::regproc); + pg_get_functiondef +------------------------------------------------------- + CREATE OR REPLACE PROCEDURE public.ptest1s(IN x text)+ + LANGUAGE sql + + BEGIN ATOMIC + + INSERT INTO cp_test (a, b) + + VALUES (1, ptest1s.x); + + END + + +(1 row) + +CALL ptest1s('b'); +SELECT * FROM cp_test ORDER BY b COLLATE "C"; + a | b +---+------- + 1 | 0 + 1 | a + 1 | b + 1 | xyzzy +(4 rows) + +-- utility functions currently not supported here +CREATE PROCEDURE ptestx() +LANGUAGE SQL +BEGIN ATOMIC + CREATE TABLE x (a int); +END; +ERROR: CREATE TABLE is not yet supported in unquoted SQL function body +CREATE PROCEDURE ptest2() +LANGUAGE SQL +AS $$ +SELECT 5; +$$; +CALL ptest2(); +-- nested CALL +TRUNCATE cp_test; +CREATE PROCEDURE ptest3(y text) +LANGUAGE SQL +AS $$ +CALL ptest1(y); +CALL ptest1($1); +$$; +CALL ptest3('b'); +SELECT * FROM cp_test; + a | b +---+--- + 1 | b + 1 | b +(2 rows) + +-- output arguments +CREATE PROCEDURE ptest4a(INOUT a int, INOUT b int) +LANGUAGE SQL +AS $$ +SELECT 1, 2; +$$; +CALL ptest4a(NULL, NULL); + a | b +---+--- + 1 | 2 +(1 row) + +CREATE PROCEDURE ptest4b(INOUT b int, INOUT a int) +LANGUAGE SQL +AS $$ +CALL ptest4a(a, b); -- error, not supported +$$; +ERROR: calling procedures with output arguments is not supported in SQL functions +CONTEXT: SQL function "ptest4b" +DROP PROCEDURE ptest4a; +-- named and default parameters +CREATE OR REPLACE PROCEDURE ptest5(a int, b text, c int default 100) +LANGUAGE SQL +AS $$ +INSERT INTO cp_test VALUES(a, b); +INSERT INTO cp_test VALUES(c, b); +$$; +TRUNCATE cp_test; +CALL ptest5(10, 'Hello', 20); +CALL ptest5(10, 'Hello'); +CALL ptest5(10, b => 'Hello'); +CALL ptest5(b => 'Hello', a => 10); +SELECT * FROM cp_test; + a | b +-----+------- + 10 | Hello + 20 | Hello + 10 | Hello + 100 | Hello + 10 | Hello + 100 | Hello + 10 | Hello + 100 | Hello +(8 rows) + +-- polymorphic types +CREATE PROCEDURE ptest6(a int, b anyelement) +LANGUAGE SQL +AS $$ +SELECT NULL::int; +$$; +CALL ptest6(1, 2); +-- collation assignment +CREATE PROCEDURE ptest7(a text, b text) +LANGUAGE SQL +AS $$ +SELECT a = b; +$$; +CALL ptest7(least('a', 'b'), 'a'); +-- empty body +CREATE PROCEDURE ptest8(x text) +BEGIN ATOMIC +END; +\df ptest8 + List of functions + Schema | Name | Result data type | Argument data types | Type +--------+--------+------------------+---------------------+------ + public | ptest8 | | IN x text | proc +(1 row) + +SELECT pg_get_functiondef('ptest8'::regproc); + pg_get_functiondef +------------------------------------------------------ + CREATE OR REPLACE PROCEDURE public.ptest8(IN x text)+ + LANGUAGE sql + + BEGIN ATOMIC + + END + + +(1 row) + +CALL ptest8(''); +-- OUT parameters +CREATE PROCEDURE ptest9(OUT a int) +LANGUAGE SQL +AS $$ +INSERT INTO cp_test VALUES (1, 'a'); +SELECT 1; +$$; +-- standard way to do a call: +CALL ptest9(NULL); + a +--- + 1 +(1 row) + +-- you can write an expression, but it's not evaluated +CALL ptest9(1/0); -- no error + a +--- + 1 +(1 row) + +-- ... and it had better match the type of the parameter +CALL ptest9(1./0.); -- error +ERROR: procedure ptest9(numeric) does not exist +LINE 1: CALL ptest9(1./0.); + ^ +HINT: No procedure matches the given name and argument types. You might need to add explicit type casts. +-- check named-parameter matching +CREATE PROCEDURE ptest10(OUT a int, IN b int, IN c int) +LANGUAGE SQL AS $$ SELECT b - c $$; +CALL ptest10(null, 7, 4); + a +--- + 3 +(1 row) + +CALL ptest10(a => null, b => 8, c => 2); + a +--- + 6 +(1 row) + +CALL ptest10(null, 7, c => 2); + a +--- + 5 +(1 row) + +CALL ptest10(null, c => 4, b => 11); + a +--- + 7 +(1 row) + +CALL ptest10(b => 8, c => 2, a => 0); + a +--- + 6 +(1 row) + +CREATE PROCEDURE ptest11(a OUT int, VARIADIC b int[]) LANGUAGE SQL + AS $$ SELECT b[1] + b[2] $$; +CALL ptest11(null, 11, 12, 13); + a +---- + 23 +(1 row) + +-- check resolution of ambiguous DROP commands +CREATE PROCEDURE ptest10(IN a int, IN b int, IN c int) +LANGUAGE SQL AS $$ SELECT a + b - c $$; +\df ptest10 + List of functions + Schema | Name | Result data type | Argument data types | Type +--------+---------+------------------+-------------------------------------------+------ + public | ptest10 | | IN a integer, IN b integer, IN c integer | proc + public | ptest10 | | OUT a integer, IN b integer, IN c integer | proc +(2 rows) + +drop procedure ptest10; -- fail +ERROR: procedure name "ptest10" is not unique +HINT: Specify the argument list to select the procedure unambiguously. +drop procedure ptest10(int, int, int); -- fail +ERROR: procedure name "ptest10" is not unique +begin; +drop procedure ptest10(out int, int, int); +\df ptest10 + List of functions + Schema | Name | Result data type | Argument data types | Type +--------+---------+------------------+------------------------------------------+------ + public | ptest10 | | IN a integer, IN b integer, IN c integer | proc +(1 row) + +drop procedure ptest10(int, int, int); -- now this would work +rollback; +begin; +drop procedure ptest10(in int, int, int); +\df ptest10 + List of functions + Schema | Name | Result data type | Argument data types | Type +--------+---------+------------------+-------------------------------------------+------ + public | ptest10 | | OUT a integer, IN b integer, IN c integer | proc +(1 row) + +drop procedure ptest10(int, int, int); -- now this would work +rollback; +-- various error cases +CALL version(); -- error: not a procedure +ERROR: version() is not a procedure +LINE 1: CALL version(); + ^ +HINT: To call a function, use SELECT. +CALL sum(1); -- error: not a procedure +ERROR: sum(integer) is not a procedure +LINE 1: CALL sum(1); + ^ +HINT: To call a function, use SELECT. +CREATE PROCEDURE ptestx() LANGUAGE SQL WINDOW AS $$ INSERT INTO cp_test VALUES (1, 'a') $$; +ERROR: invalid attribute in procedure definition +LINE 1: CREATE PROCEDURE ptestx() LANGUAGE SQL WINDOW AS $$ INSERT I... + ^ +CREATE PROCEDURE ptestx() LANGUAGE SQL STRICT AS $$ INSERT INTO cp_test VALUES (1, 'a') $$; +ERROR: invalid attribute in procedure definition +LINE 1: CREATE PROCEDURE ptestx() LANGUAGE SQL STRICT AS $$ INSERT I... + ^ +CREATE PROCEDURE ptestx(a VARIADIC int[], b OUT int) LANGUAGE SQL + AS $$ SELECT a[1] $$; +ERROR: VARIADIC parameter must be the last parameter +CREATE PROCEDURE ptestx(a int DEFAULT 42, b OUT int) LANGUAGE SQL + AS $$ SELECT a $$; +ERROR: procedure OUT parameters cannot appear after one with a default value +ALTER PROCEDURE ptest1(text) STRICT; +ERROR: invalid attribute in procedure definition +LINE 1: ALTER PROCEDURE ptest1(text) STRICT; + ^ +ALTER FUNCTION ptest1(text) VOLATILE; -- error: not a function +ERROR: ptest1(text) is not a function +ALTER PROCEDURE cp_testfunc1(int) VOLATILE; -- error: not a procedure +ERROR: cp_testfunc1(integer) is not a procedure +ALTER PROCEDURE nonexistent() VOLATILE; +ERROR: procedure nonexistent() does not exist +DROP FUNCTION ptest1(text); -- error: not a function +ERROR: ptest1(text) is not a function +DROP PROCEDURE cp_testfunc1(int); -- error: not a procedure +ERROR: cp_testfunc1(integer) is not a procedure +DROP PROCEDURE nonexistent(); +ERROR: procedure nonexistent() does not exist +-- privileges +CREATE USER regress_cp_user1; +GRANT INSERT ON cp_test TO regress_cp_user1; +REVOKE EXECUTE ON PROCEDURE ptest1(text) FROM PUBLIC; +SET ROLE regress_cp_user1; +CALL ptest1('a'); -- error +ERROR: permission denied for procedure ptest1 +RESET ROLE; +GRANT EXECUTE ON PROCEDURE ptest1(text) TO regress_cp_user1; +SET ROLE regress_cp_user1; +CALL ptest1('a'); -- ok +RESET ROLE; +-- ROUTINE syntax +ALTER ROUTINE cp_testfunc1(int) RENAME TO cp_testfunc1a; +ALTER ROUTINE cp_testfunc1a RENAME TO cp_testfunc1; +ALTER ROUTINE ptest1(text) RENAME TO ptest1a; +ALTER ROUTINE ptest1a RENAME TO ptest1; +DROP ROUTINE cp_testfunc1(int); +-- cleanup +DROP PROCEDURE ptest1; +DROP PROCEDURE ptest1s; +DROP PROCEDURE ptest2; +DROP TABLE cp_test; +DROP USER regress_cp_user1; diff --git a/src/test/regress/expected/create_role.out b/src/test/regress/expected/create_role.out new file mode 100644 index 0000000..46d4f9e --- /dev/null +++ b/src/test/regress/expected/create_role.out @@ -0,0 +1,261 @@ +-- ok, superuser can create users with any set of privileges +CREATE ROLE regress_role_super SUPERUSER; +CREATE ROLE regress_role_admin CREATEDB CREATEROLE REPLICATION BYPASSRLS; +GRANT CREATE ON DATABASE regression TO regress_role_admin WITH GRANT OPTION; +CREATE ROLE regress_role_limited_admin CREATEROLE; +CREATE ROLE regress_role_normal; +-- fail, CREATEROLE user can't give away role attributes without having them +SET SESSION AUTHORIZATION regress_role_limited_admin; +CREATE ROLE regress_nosuch_superuser SUPERUSER; +ERROR: permission denied to create role +DETAIL: Only roles with the SUPERUSER attribute may create roles with the SUPERUSER attribute. +CREATE ROLE regress_nosuch_replication_bypassrls REPLICATION BYPASSRLS; +ERROR: permission denied to create role +DETAIL: Only roles with the REPLICATION attribute may create roles with the REPLICATION attribute. +CREATE ROLE regress_nosuch_replication REPLICATION; +ERROR: permission denied to create role +DETAIL: Only roles with the REPLICATION attribute may create roles with the REPLICATION attribute. +CREATE ROLE regress_nosuch_bypassrls BYPASSRLS; +ERROR: permission denied to create role +DETAIL: Only roles with the BYPASSRLS attribute may create roles with the BYPASSRLS attribute. +CREATE ROLE regress_nosuch_createdb CREATEDB; +ERROR: permission denied to create role +DETAIL: Only roles with the CREATEDB attribute may create roles with the CREATEDB attribute. +-- ok, can create a role without any special attributes +CREATE ROLE regress_role_limited; +-- fail, can't give it in any of the restricted attributes +ALTER ROLE regress_role_limited SUPERUSER; +ERROR: permission denied to alter role +DETAIL: Only roles with the SUPERUSER attribute may change the SUPERUSER attribute. +ALTER ROLE regress_role_limited REPLICATION; +ERROR: permission denied to alter role +DETAIL: Only roles with the REPLICATION attribute may change the REPLICATION attribute. +ALTER ROLE regress_role_limited CREATEDB; +ERROR: permission denied to alter role +DETAIL: Only roles with the CREATEDB attribute may change the CREATEDB attribute. +ALTER ROLE regress_role_limited BYPASSRLS; +ERROR: permission denied to alter role +DETAIL: Only roles with the BYPASSRLS attribute may change the BYPASSRLS attribute. +DROP ROLE regress_role_limited; +-- ok, can give away these role attributes if you have them +SET SESSION AUTHORIZATION regress_role_admin; +CREATE ROLE regress_replication_bypassrls REPLICATION BYPASSRLS; +CREATE ROLE regress_replication REPLICATION; +CREATE ROLE regress_bypassrls BYPASSRLS; +CREATE ROLE regress_createdb CREATEDB; +-- ok, can toggle these role attributes off and on if you have them +ALTER ROLE regress_replication NOREPLICATION; +ALTER ROLE regress_replication REPLICATION; +ALTER ROLE regress_bypassrls NOBYPASSRLS; +ALTER ROLE regress_bypassrls BYPASSRLS; +ALTER ROLE regress_createdb NOCREATEDB; +ALTER ROLE regress_createdb CREATEDB; +-- fail, can't toggle SUPERUSER +ALTER ROLE regress_createdb SUPERUSER; +ERROR: permission denied to alter role +DETAIL: Only roles with the SUPERUSER attribute may change the SUPERUSER attribute. +ALTER ROLE regress_createdb NOSUPERUSER; +ERROR: permission denied to alter role +DETAIL: Only roles with the SUPERUSER attribute may change the SUPERUSER attribute. +-- ok, having CREATEROLE is enough to create users with these privileges +CREATE ROLE regress_createrole CREATEROLE NOINHERIT; +GRANT CREATE ON DATABASE regression TO regress_createrole WITH GRANT OPTION; +CREATE ROLE regress_login LOGIN; +CREATE ROLE regress_inherit INHERIT; +CREATE ROLE regress_connection_limit CONNECTION LIMIT 5; +CREATE ROLE regress_encrypted_password ENCRYPTED PASSWORD 'foo'; +CREATE ROLE regress_password_null PASSWORD NULL; +-- ok, backwards compatible noise words should be ignored +CREATE ROLE regress_noiseword SYSID 12345; +NOTICE: SYSID can no longer be specified +-- fail, cannot grant membership in superuser role +CREATE ROLE regress_nosuch_super IN ROLE regress_role_super; +ERROR: permission denied to grant role "regress_role_super" +DETAIL: Only roles with the SUPERUSER attribute may grant roles with the SUPERUSER attribute. +-- fail, database owner cannot have members +CREATE ROLE regress_nosuch_dbowner IN ROLE pg_database_owner; +ERROR: role "pg_database_owner" cannot have explicit members +-- ok, can grant other users into a role +CREATE ROLE regress_inroles ROLE + regress_role_super, regress_createdb, regress_createrole, regress_login, + regress_inherit, regress_connection_limit, regress_encrypted_password, regress_password_null; +-- fail, cannot grant a role into itself +CREATE ROLE regress_nosuch_recursive ROLE regress_nosuch_recursive; +ERROR: role "regress_nosuch_recursive" is a member of role "regress_nosuch_recursive" +-- ok, can grant other users into a role with admin option +CREATE ROLE regress_adminroles ADMIN + regress_role_super, regress_createdb, regress_createrole, regress_login, + regress_inherit, regress_connection_limit, regress_encrypted_password, regress_password_null; +-- fail, cannot grant a role into itself with admin option +CREATE ROLE regress_nosuch_admin_recursive ADMIN regress_nosuch_admin_recursive; +ERROR: role "regress_nosuch_admin_recursive" is a member of role "regress_nosuch_admin_recursive" +-- fail, regress_createrole does not have CREATEDB privilege +SET SESSION AUTHORIZATION regress_createrole; +CREATE DATABASE regress_nosuch_db; +ERROR: permission denied to create database +-- ok, regress_createrole can create new roles +CREATE ROLE regress_plainrole; +-- ok, roles with CREATEROLE can create new roles with it +CREATE ROLE regress_rolecreator CREATEROLE; +-- ok, roles with CREATEROLE can create new roles with different role +-- attributes, including CREATEROLE +CREATE ROLE regress_hasprivs CREATEROLE LOGIN INHERIT CONNECTION LIMIT 5; +-- ok, we should be able to modify a role we created +COMMENT ON ROLE regress_hasprivs IS 'some comment'; +ALTER ROLE regress_hasprivs RENAME TO regress_tenant; +ALTER ROLE regress_tenant NOINHERIT NOLOGIN CONNECTION LIMIT 7; +-- fail, we should be unable to modify a role we did not create +COMMENT ON ROLE regress_role_normal IS 'some comment'; +ERROR: permission denied +DETAIL: The current user must have the ADMIN option on role "regress_role_normal". +ALTER ROLE regress_role_normal RENAME TO regress_role_abnormal; +ERROR: permission denied to rename role +DETAIL: Only roles with the CREATEROLE attribute and the ADMIN option on role "regress_role_normal" may rename this role. +ALTER ROLE regress_role_normal NOINHERIT NOLOGIN CONNECTION LIMIT 7; +ERROR: permission denied to alter role +DETAIL: Only roles with the CREATEROLE attribute and the ADMIN option on role "regress_role_normal" may alter this role. +-- ok, regress_tenant can create objects within the database +SET SESSION AUTHORIZATION regress_tenant; +CREATE TABLE tenant_table (i integer); +CREATE INDEX tenant_idx ON tenant_table(i); +CREATE VIEW tenant_view AS SELECT * FROM pg_catalog.pg_class; +REVOKE ALL PRIVILEGES ON tenant_table FROM PUBLIC; +-- fail, these objects belonging to regress_tenant +SET SESSION AUTHORIZATION regress_createrole; +DROP INDEX tenant_idx; +ERROR: must be owner of index tenant_idx +ALTER TABLE tenant_table ADD COLUMN t text; +ERROR: must be owner of table tenant_table +DROP TABLE tenant_table; +ERROR: must be owner of table tenant_table +ALTER VIEW tenant_view OWNER TO regress_role_admin; +ERROR: must be owner of view tenant_view +DROP VIEW tenant_view; +ERROR: must be owner of view tenant_view +-- fail, can't create objects owned as regress_tenant +CREATE SCHEMA regress_tenant_schema AUTHORIZATION regress_tenant; +ERROR: must be able to SET ROLE "regress_tenant" +-- fail, we don't inherit permissions from regress_tenant +REASSIGN OWNED BY regress_tenant TO regress_createrole; +ERROR: permission denied to reassign objects +DETAIL: Only roles with privileges of role "regress_tenant" may reassign objects owned by it. +-- ok, create a role with a value for createrole_self_grant +SET createrole_self_grant = 'set, inherit'; +CREATE ROLE regress_tenant2; +GRANT CREATE ON DATABASE regression TO regress_tenant2; +-- ok, regress_tenant2 can create objects within the database +SET SESSION AUTHORIZATION regress_tenant2; +CREATE TABLE tenant2_table (i integer); +REVOKE ALL PRIVILEGES ON tenant2_table FROM PUBLIC; +-- ok, because we have SET and INHERIT on regress_tenant2 +SET SESSION AUTHORIZATION regress_createrole; +CREATE SCHEMA regress_tenant2_schema AUTHORIZATION regress_tenant2; +ALTER SCHEMA regress_tenant2_schema OWNER TO regress_createrole; +ALTER TABLE tenant2_table OWNER TO regress_createrole; +ALTER TABLE tenant2_table OWNER TO regress_tenant2; +-- with SET but not INHERIT, we can give away objects but not take them +REVOKE INHERIT OPTION FOR regress_tenant2 FROM regress_createrole; +ALTER SCHEMA regress_tenant2_schema OWNER TO regress_tenant2; +ALTER TABLE tenant2_table OWNER TO regress_createrole; +ERROR: must be owner of table tenant2_table +-- with INHERIT but not SET, we can take objects but not give them away +GRANT regress_tenant2 TO regress_createrole WITH INHERIT TRUE, SET FALSE; +ALTER TABLE tenant2_table OWNER TO regress_createrole; +ALTER TABLE tenant2_table OWNER TO regress_tenant2; +ERROR: must be able to SET ROLE "regress_tenant2" +DROP TABLE tenant2_table; +-- fail, CREATEROLE is not enough to create roles in privileged roles +CREATE ROLE regress_read_all_data IN ROLE pg_read_all_data; +ERROR: permission denied to grant role "pg_read_all_data" +DETAIL: Only roles with the ADMIN option on role "pg_read_all_data" may grant this role. +CREATE ROLE regress_write_all_data IN ROLE pg_write_all_data; +ERROR: permission denied to grant role "pg_write_all_data" +DETAIL: Only roles with the ADMIN option on role "pg_write_all_data" may grant this role. +CREATE ROLE regress_monitor IN ROLE pg_monitor; +ERROR: permission denied to grant role "pg_monitor" +DETAIL: Only roles with the ADMIN option on role "pg_monitor" may grant this role. +CREATE ROLE regress_read_all_settings IN ROLE pg_read_all_settings; +ERROR: permission denied to grant role "pg_read_all_settings" +DETAIL: Only roles with the ADMIN option on role "pg_read_all_settings" may grant this role. +CREATE ROLE regress_read_all_stats IN ROLE pg_read_all_stats; +ERROR: permission denied to grant role "pg_read_all_stats" +DETAIL: Only roles with the ADMIN option on role "pg_read_all_stats" may grant this role. +CREATE ROLE regress_stat_scan_tables IN ROLE pg_stat_scan_tables; +ERROR: permission denied to grant role "pg_stat_scan_tables" +DETAIL: Only roles with the ADMIN option on role "pg_stat_scan_tables" may grant this role. +CREATE ROLE regress_read_server_files IN ROLE pg_read_server_files; +ERROR: permission denied to grant role "pg_read_server_files" +DETAIL: Only roles with the ADMIN option on role "pg_read_server_files" may grant this role. +CREATE ROLE regress_write_server_files IN ROLE pg_write_server_files; +ERROR: permission denied to grant role "pg_write_server_files" +DETAIL: Only roles with the ADMIN option on role "pg_write_server_files" may grant this role. +CREATE ROLE regress_execute_server_program IN ROLE pg_execute_server_program; +ERROR: permission denied to grant role "pg_execute_server_program" +DETAIL: Only roles with the ADMIN option on role "pg_execute_server_program" may grant this role. +CREATE ROLE regress_signal_backend IN ROLE pg_signal_backend; +ERROR: permission denied to grant role "pg_signal_backend" +DETAIL: Only roles with the ADMIN option on role "pg_signal_backend" may grant this role. +-- fail, role still owns database objects +DROP ROLE regress_tenant; +ERROR: role "regress_tenant" cannot be dropped because some objects depend on it +DETAIL: owner of table tenant_table +owner of view tenant_view +-- fail, creation of these roles failed above so they do not now exist +SET SESSION AUTHORIZATION regress_role_admin; +DROP ROLE regress_nosuch_superuser; +ERROR: role "regress_nosuch_superuser" does not exist +DROP ROLE regress_nosuch_replication_bypassrls; +ERROR: role "regress_nosuch_replication_bypassrls" does not exist +DROP ROLE regress_nosuch_replication; +ERROR: role "regress_nosuch_replication" does not exist +DROP ROLE regress_nosuch_bypassrls; +ERROR: role "regress_nosuch_bypassrls" does not exist +DROP ROLE regress_nosuch_super; +ERROR: role "regress_nosuch_super" does not exist +DROP ROLE regress_nosuch_dbowner; +ERROR: role "regress_nosuch_dbowner" does not exist +DROP ROLE regress_nosuch_recursive; +ERROR: role "regress_nosuch_recursive" does not exist +DROP ROLE regress_nosuch_admin_recursive; +ERROR: role "regress_nosuch_admin_recursive" does not exist +DROP ROLE regress_plainrole; +-- must revoke privileges before dropping role +REVOKE CREATE ON DATABASE regression FROM regress_createrole CASCADE; +-- ok, should be able to drop non-superuser roles we created +DROP ROLE regress_replication_bypassrls; +DROP ROLE regress_replication; +DROP ROLE regress_bypassrls; +DROP ROLE regress_createdb; +DROP ROLE regress_createrole; +DROP ROLE regress_login; +DROP ROLE regress_inherit; +DROP ROLE regress_connection_limit; +DROP ROLE regress_encrypted_password; +DROP ROLE regress_password_null; +DROP ROLE regress_noiseword; +DROP ROLE regress_inroles; +DROP ROLE regress_adminroles; +-- fail, cannot drop ourself, nor superusers or roles we lack ADMIN for +DROP ROLE regress_role_super; +ERROR: permission denied to drop role +DETAIL: Only roles with the SUPERUSER attribute may drop roles with the SUPERUSER attribute. +DROP ROLE regress_role_admin; +ERROR: current user cannot be dropped +DROP ROLE regress_rolecreator; +ERROR: permission denied to drop role +DETAIL: Only roles with the CREATEROLE attribute and the ADMIN option on role "regress_rolecreator" may drop this role. +-- ok +RESET SESSION AUTHORIZATION; +REVOKE CREATE ON DATABASE regression FROM regress_role_admin CASCADE; +DROP INDEX tenant_idx; +DROP TABLE tenant_table; +DROP VIEW tenant_view; +DROP SCHEMA regress_tenant2_schema; +-- check for duplicated drop +DROP ROLE regress_tenant, regress_tenant; +DROP ROLE regress_tenant2; +DROP ROLE regress_rolecreator; +DROP ROLE regress_role_admin; +DROP ROLE regress_role_limited_admin; +DROP ROLE regress_role_super; +DROP ROLE regress_role_normal; diff --git a/src/test/regress/expected/create_schema.out b/src/test/regress/expected/create_schema.out new file mode 100644 index 0000000..93302a0 --- /dev/null +++ b/src/test/regress/expected/create_schema.out @@ -0,0 +1,98 @@ +-- +-- CREATE_SCHEMA +-- +-- Schema creation with elements. +CREATE ROLE regress_create_schema_role SUPERUSER; +-- Cases where schema creation fails as objects are qualified with a schema +-- that does not match with what's expected. +-- This checks all the object types that include schema qualifications. +CREATE SCHEMA AUTHORIZATION regress_create_schema_role + CREATE SEQUENCE schema_not_existing.seq; +ERROR: CREATE specifies a schema (schema_not_existing) different from the one being created (regress_create_schema_role) +CREATE SCHEMA AUTHORIZATION regress_create_schema_role + CREATE TABLE schema_not_existing.tab (id int); +ERROR: CREATE specifies a schema (schema_not_existing) different from the one being created (regress_create_schema_role) +CREATE SCHEMA AUTHORIZATION regress_create_schema_role + CREATE VIEW schema_not_existing.view AS SELECT 1; +ERROR: CREATE specifies a schema (schema_not_existing) different from the one being created (regress_create_schema_role) +CREATE SCHEMA AUTHORIZATION regress_create_schema_role + CREATE INDEX ON schema_not_existing.tab (id); +ERROR: CREATE specifies a schema (schema_not_existing) different from the one being created (regress_create_schema_role) +CREATE SCHEMA AUTHORIZATION regress_create_schema_role + CREATE TRIGGER schema_trig BEFORE INSERT ON schema_not_existing.tab + EXECUTE FUNCTION schema_trig.no_func(); +ERROR: CREATE specifies a schema (schema_not_existing) different from the one being created (regress_create_schema_role) +-- Again, with a role specification and no schema names. +SET ROLE regress_create_schema_role; +CREATE SCHEMA AUTHORIZATION CURRENT_ROLE + CREATE SEQUENCE schema_not_existing.seq; +ERROR: CREATE specifies a schema (schema_not_existing) different from the one being created (regress_create_schema_role) +CREATE SCHEMA AUTHORIZATION CURRENT_ROLE + CREATE TABLE schema_not_existing.tab (id int); +ERROR: CREATE specifies a schema (schema_not_existing) different from the one being created (regress_create_schema_role) +CREATE SCHEMA AUTHORIZATION CURRENT_ROLE + CREATE VIEW schema_not_existing.view AS SELECT 1; +ERROR: CREATE specifies a schema (schema_not_existing) different from the one being created (regress_create_schema_role) +CREATE SCHEMA AUTHORIZATION CURRENT_ROLE + CREATE INDEX ON schema_not_existing.tab (id); +ERROR: CREATE specifies a schema (schema_not_existing) different from the one being created (regress_create_schema_role) +CREATE SCHEMA AUTHORIZATION CURRENT_ROLE + CREATE TRIGGER schema_trig BEFORE INSERT ON schema_not_existing.tab + EXECUTE FUNCTION schema_trig.no_func(); +ERROR: CREATE specifies a schema (schema_not_existing) different from the one being created (regress_create_schema_role) +-- Again, with a schema name and a role specification. +CREATE SCHEMA regress_schema_1 AUTHORIZATION CURRENT_ROLE + CREATE SEQUENCE schema_not_existing.seq; +ERROR: CREATE specifies a schema (schema_not_existing) different from the one being created (regress_schema_1) +CREATE SCHEMA regress_schema_1 AUTHORIZATION CURRENT_ROLE + CREATE TABLE schema_not_existing.tab (id int); +ERROR: CREATE specifies a schema (schema_not_existing) different from the one being created (regress_schema_1) +CREATE SCHEMA regress_schema_1 AUTHORIZATION CURRENT_ROLE + CREATE VIEW schema_not_existing.view AS SELECT 1; +ERROR: CREATE specifies a schema (schema_not_existing) different from the one being created (regress_schema_1) +CREATE SCHEMA regress_schema_1 AUTHORIZATION CURRENT_ROLE + CREATE INDEX ON schema_not_existing.tab (id); +ERROR: CREATE specifies a schema (schema_not_existing) different from the one being created (regress_schema_1) +CREATE SCHEMA regress_schema_1 AUTHORIZATION CURRENT_ROLE + CREATE TRIGGER schema_trig BEFORE INSERT ON schema_not_existing.tab + EXECUTE FUNCTION schema_trig.no_func(); +ERROR: CREATE specifies a schema (schema_not_existing) different from the one being created (regress_schema_1) +RESET ROLE; +-- Cases where the schema creation succeeds. +-- The schema created matches the role name. +CREATE SCHEMA AUTHORIZATION regress_create_schema_role + CREATE TABLE regress_create_schema_role.tab (id int); +\d regress_create_schema_role.tab + Table "regress_create_schema_role.tab" + Column | Type | Collation | Nullable | Default +--------+---------+-----------+----------+--------- + id | integer | | | + +DROP SCHEMA regress_create_schema_role CASCADE; +NOTICE: drop cascades to table regress_create_schema_role.tab +-- Again, with a different role specification and no schema names. +SET ROLE regress_create_schema_role; +CREATE SCHEMA AUTHORIZATION CURRENT_ROLE + CREATE TABLE regress_create_schema_role.tab (id int); +\d regress_create_schema_role.tab + Table "regress_create_schema_role.tab" + Column | Type | Collation | Nullable | Default +--------+---------+-----------+----------+--------- + id | integer | | | + +DROP SCHEMA regress_create_schema_role CASCADE; +NOTICE: drop cascades to table tab +-- Again, with a schema name and a role specification. +CREATE SCHEMA regress_schema_1 AUTHORIZATION CURRENT_ROLE + CREATE TABLE regress_schema_1.tab (id int); +\d regress_schema_1.tab + Table "regress_schema_1.tab" + Column | Type | Collation | Nullable | Default +--------+---------+-----------+----------+--------- + id | integer | | | + +DROP SCHEMA regress_schema_1 CASCADE; +NOTICE: drop cascades to table regress_schema_1.tab +RESET ROLE; +-- Clean up +DROP ROLE regress_create_schema_role; diff --git a/src/test/regress/expected/create_table.out b/src/test/regress/expected/create_table.out new file mode 100644 index 0000000..1c3ef2b --- /dev/null +++ b/src/test/regress/expected/create_table.out @@ -0,0 +1,1120 @@ +-- +-- CREATE_TABLE +-- +-- Error cases +CREATE TABLE unknowntab ( + u unknown -- fail +); +ERROR: column "u" has pseudo-type unknown +CREATE TYPE unknown_comptype AS ( + u unknown -- fail +); +ERROR: column "u" has pseudo-type unknown +-- invalid: non-lowercase quoted reloptions identifiers +CREATE TABLE tas_case WITH ("Fillfactor" = 10) AS SELECT 1 a; +ERROR: unrecognized parameter "Fillfactor" +CREATE UNLOGGED TABLE unlogged1 (a int primary key); -- OK +CREATE TEMPORARY TABLE unlogged2 (a int primary key); -- OK +SELECT relname, relkind, relpersistence FROM pg_class WHERE relname ~ '^unlogged\d' ORDER BY relname; + relname | relkind | relpersistence +----------------+---------+---------------- + unlogged1 | r | u + unlogged1_pkey | i | u + unlogged2 | r | t + unlogged2_pkey | i | t +(4 rows) + +REINDEX INDEX unlogged1_pkey; +REINDEX INDEX unlogged2_pkey; +SELECT relname, relkind, relpersistence FROM pg_class WHERE relname ~ '^unlogged\d' ORDER BY relname; + relname | relkind | relpersistence +----------------+---------+---------------- + unlogged1 | r | u + unlogged1_pkey | i | u + unlogged2 | r | t + unlogged2_pkey | i | t +(4 rows) + +DROP TABLE unlogged2; +INSERT INTO unlogged1 VALUES (42); +CREATE UNLOGGED TABLE public.unlogged2 (a int primary key); -- also OK +CREATE UNLOGGED TABLE pg_temp.unlogged3 (a int primary key); -- not OK +ERROR: only temporary relations may be created in temporary schemas +LINE 1: CREATE UNLOGGED TABLE pg_temp.unlogged3 (a int primary key); + ^ +CREATE TABLE pg_temp.implicitly_temp (a int primary key); -- OK +CREATE TEMP TABLE explicitly_temp (a int primary key); -- also OK +CREATE TEMP TABLE pg_temp.doubly_temp (a int primary key); -- also OK +CREATE TEMP TABLE public.temp_to_perm (a int primary key); -- not OK +ERROR: cannot create temporary relation in non-temporary schema +LINE 1: CREATE TEMP TABLE public.temp_to_perm (a int primary key); + ^ +DROP TABLE unlogged1, public.unlogged2; +CREATE TABLE as_select1 AS SELECT * FROM pg_class WHERE relkind = 'r'; +CREATE TABLE as_select1 AS SELECT * FROM pg_class WHERE relkind = 'r'; +ERROR: relation "as_select1" already exists +CREATE TABLE IF NOT EXISTS as_select1 AS SELECT * FROM pg_class WHERE relkind = 'r'; +NOTICE: relation "as_select1" already exists, skipping +DROP TABLE as_select1; +PREPARE select1 AS SELECT 1 as a; +CREATE TABLE as_select1 AS EXECUTE select1; +CREATE TABLE as_select1 AS EXECUTE select1; +ERROR: relation "as_select1" already exists +SELECT * FROM as_select1; + a +--- + 1 +(1 row) + +CREATE TABLE IF NOT EXISTS as_select1 AS EXECUTE select1; +NOTICE: relation "as_select1" already exists, skipping +DROP TABLE as_select1; +DEALLOCATE select1; +-- create an extra wide table to test for issues related to that +-- (temporarily hide query, to avoid the long CREATE TABLE stmt) +\set ECHO none +INSERT INTO extra_wide_table(firstc, lastc) VALUES('first col', 'last col'); +SELECT firstc, lastc FROM extra_wide_table; + firstc | lastc +-----------+---------- + first col | last col +(1 row) + +-- check that tables with oids cannot be created anymore +CREATE TABLE withoid() WITH OIDS; +ERROR: syntax error at or near "OIDS" +LINE 1: CREATE TABLE withoid() WITH OIDS; + ^ +CREATE TABLE withoid() WITH (oids); +ERROR: tables declared WITH OIDS are not supported +CREATE TABLE withoid() WITH (oids = true); +ERROR: tables declared WITH OIDS are not supported +-- but explicitly not adding oids is still supported +CREATE TEMP TABLE withoutoid() WITHOUT OIDS; DROP TABLE withoutoid; +CREATE TEMP TABLE withoutoid() WITH (oids = false); DROP TABLE withoutoid; +-- check restriction with default expressions +-- invalid use of column reference in default expressions +CREATE TABLE default_expr_column (id int DEFAULT (id)); +ERROR: cannot use column reference in DEFAULT expression +LINE 1: CREATE TABLE default_expr_column (id int DEFAULT (id)); + ^ +CREATE TABLE default_expr_column (id int DEFAULT (bar.id)); +ERROR: cannot use column reference in DEFAULT expression +LINE 1: CREATE TABLE default_expr_column (id int DEFAULT (bar.id)); + ^ +CREATE TABLE default_expr_agg_column (id int DEFAULT (avg(id))); +ERROR: cannot use column reference in DEFAULT expression +LINE 1: ...TE TABLE default_expr_agg_column (id int DEFAULT (avg(id))); + ^ +-- invalid column definition +CREATE TABLE default_expr_non_column (a int DEFAULT (avg(non_existent))); +ERROR: cannot use column reference in DEFAULT expression +LINE 1: ...TABLE default_expr_non_column (a int DEFAULT (avg(non_existe... + ^ +-- invalid use of aggregate +CREATE TABLE default_expr_agg (a int DEFAULT (avg(1))); +ERROR: aggregate functions are not allowed in DEFAULT expressions +LINE 1: CREATE TABLE default_expr_agg (a int DEFAULT (avg(1))); + ^ +-- invalid use of subquery +CREATE TABLE default_expr_agg (a int DEFAULT (select 1)); +ERROR: cannot use subquery in DEFAULT expression +LINE 1: CREATE TABLE default_expr_agg (a int DEFAULT (select 1)); + ^ +-- invalid use of set-returning function +CREATE TABLE default_expr_agg (a int DEFAULT (generate_series(1,3))); +ERROR: set-returning functions are not allowed in DEFAULT expressions +LINE 1: CREATE TABLE default_expr_agg (a int DEFAULT (generate_serie... + ^ +-- Verify that subtransaction rollback restores rd_createSubid. +BEGIN; +CREATE TABLE remember_create_subid (c int); +SAVEPOINT q; DROP TABLE remember_create_subid; ROLLBACK TO q; +COMMIT; +DROP TABLE remember_create_subid; +-- Verify that subtransaction rollback restores rd_firstRelfilenodeSubid. +CREATE TABLE remember_node_subid (c int); +BEGIN; +ALTER TABLE remember_node_subid ALTER c TYPE bigint; +SAVEPOINT q; DROP TABLE remember_node_subid; ROLLBACK TO q; +COMMIT; +DROP TABLE remember_node_subid; +-- +-- Partitioned tables +-- +-- cannot combine INHERITS and PARTITION BY (although grammar allows) +CREATE TABLE partitioned ( + a int +) INHERITS (some_table) PARTITION BY LIST (a); +ERROR: cannot create partitioned table as inheritance child +-- cannot use more than 1 column as partition key for list partitioned table +CREATE TABLE partitioned ( + a1 int, + a2 int +) PARTITION BY LIST (a1, a2); -- fail +ERROR: cannot use "list" partition strategy with more than one column +-- unsupported constraint type for partitioned tables +CREATE TABLE partitioned ( + a int, + EXCLUDE USING gist (a WITH &&) +) PARTITION BY RANGE (a); +ERROR: exclusion constraints are not supported on partitioned tables +LINE 3: EXCLUDE USING gist (a WITH &&) + ^ +-- prevent using prohibited expressions in the key +CREATE FUNCTION retset (a int) RETURNS SETOF int AS $$ SELECT 1; $$ LANGUAGE SQL IMMUTABLE; +CREATE TABLE partitioned ( + a int +) PARTITION BY RANGE (retset(a)); +ERROR: set-returning functions are not allowed in partition key expressions +DROP FUNCTION retset(int); +CREATE TABLE partitioned ( + a int +) PARTITION BY RANGE ((avg(a))); +ERROR: aggregate functions are not allowed in partition key expressions +CREATE TABLE partitioned ( + a int, + b int +) PARTITION BY RANGE ((avg(a) OVER (PARTITION BY b))); +ERROR: window functions are not allowed in partition key expressions +CREATE TABLE partitioned ( + a int +) PARTITION BY LIST ((a LIKE (SELECT 1))); +ERROR: cannot use subquery in partition key expression +CREATE TABLE partitioned ( + a int +) PARTITION BY RANGE ((42)); +ERROR: cannot use constant expression as partition key +CREATE FUNCTION const_func () RETURNS int AS $$ SELECT 1; $$ LANGUAGE SQL IMMUTABLE; +CREATE TABLE partitioned ( + a int +) PARTITION BY RANGE (const_func()); +ERROR: cannot use constant expression as partition key +DROP FUNCTION const_func(); +-- only accept valid partitioning strategy +CREATE TABLE partitioned ( + a int +) PARTITION BY MAGIC (a); +ERROR: unrecognized partitioning strategy "magic" +-- specified column must be present in the table +CREATE TABLE partitioned ( + a int +) PARTITION BY RANGE (b); +ERROR: column "b" named in partition key does not exist +LINE 3: ) PARTITION BY RANGE (b); + ^ +-- cannot use system columns in partition key +CREATE TABLE partitioned ( + a int +) PARTITION BY RANGE (xmin); +ERROR: cannot use system column "xmin" in partition key +LINE 3: ) PARTITION BY RANGE (xmin); + ^ +-- cannot use pseudotypes +CREATE TABLE partitioned ( + a int, + b int +) PARTITION BY RANGE (((a, b))); +ERROR: partition key column 1 has pseudo-type record +CREATE TABLE partitioned ( + a int, + b int +) PARTITION BY RANGE (a, ('unknown')); +ERROR: partition key column 2 has pseudo-type unknown +-- functions in key must be immutable +CREATE FUNCTION immut_func (a int) RETURNS int AS $$ SELECT a + random()::int; $$ LANGUAGE SQL; +CREATE TABLE partitioned ( + a int +) PARTITION BY RANGE (immut_func(a)); +ERROR: functions in partition key expression must be marked IMMUTABLE +DROP FUNCTION immut_func(int); +-- prevent using columns of unsupported types in key (type must have a btree operator class) +CREATE TABLE partitioned ( + a point +) PARTITION BY LIST (a); +ERROR: data type point has no default operator class for access method "btree" +HINT: You must specify a btree operator class or define a default btree operator class for the data type. +CREATE TABLE partitioned ( + a point +) PARTITION BY LIST (a point_ops); +ERROR: operator class "point_ops" does not exist for access method "btree" +CREATE TABLE partitioned ( + a point +) PARTITION BY RANGE (a); +ERROR: data type point has no default operator class for access method "btree" +HINT: You must specify a btree operator class or define a default btree operator class for the data type. +CREATE TABLE partitioned ( + a point +) PARTITION BY RANGE (a point_ops); +ERROR: operator class "point_ops" does not exist for access method "btree" +-- cannot add NO INHERIT constraints to partitioned tables +CREATE TABLE partitioned ( + a int, + CONSTRAINT check_a CHECK (a > 0) NO INHERIT +) PARTITION BY RANGE (a); +ERROR: cannot add NO INHERIT constraint to partitioned table "partitioned" +-- some checks after successful creation of a partitioned table +CREATE FUNCTION plusone(a int) RETURNS INT AS $$ SELECT a+1; $$ LANGUAGE SQL; +CREATE TABLE partitioned ( + a int, + b int, + c text, + d text +) PARTITION BY RANGE (a oid_ops, plusone(b), c collate "default", d collate "C"); +-- check relkind +SELECT relkind FROM pg_class WHERE relname = 'partitioned'; + relkind +--------- + p +(1 row) + +-- prevent a function referenced in partition key from being dropped +DROP FUNCTION plusone(int); +ERROR: cannot drop function plusone(integer) because other objects depend on it +DETAIL: table partitioned depends on function plusone(integer) +HINT: Use DROP ... CASCADE to drop the dependent objects too. +-- partitioned table cannot participate in regular inheritance +CREATE TABLE partitioned2 ( + a int, + b text +) PARTITION BY RANGE ((a+1), substr(b, 1, 5)); +CREATE TABLE fail () INHERITS (partitioned2); +ERROR: cannot inherit from partitioned table "partitioned2" +-- Partition key in describe output +\d partitioned + Partitioned table "public.partitioned" + Column | Type | Collation | Nullable | Default +--------+---------+-----------+----------+--------- + a | integer | | | + b | integer | | | + c | text | | | + d | text | | | +Partition key: RANGE (a oid_ops, plusone(b), c, d COLLATE "C") +Number of partitions: 0 + +\d+ partitioned2 + Partitioned table "public.partitioned2" + Column | Type | Collation | Nullable | Default | Storage | Stats target | Description +--------+---------+-----------+----------+---------+----------+--------------+------------- + a | integer | | | | plain | | + b | text | | | | extended | | +Partition key: RANGE (((a + 1)), substr(b, 1, 5)) +Number of partitions: 0 + +INSERT INTO partitioned2 VALUES (1, 'hello'); +ERROR: no partition of relation "partitioned2" found for row +DETAIL: Partition key of the failing row contains ((a + 1), substr(b, 1, 5)) = (2, hello). +CREATE TABLE part2_1 PARTITION OF partitioned2 FOR VALUES FROM (-1, 'aaaaa') TO (100, 'ccccc'); +\d+ part2_1 + Table "public.part2_1" + Column | Type | Collation | Nullable | Default | Storage | Stats target | Description +--------+---------+-----------+----------+---------+----------+--------------+------------- + a | integer | | | | plain | | + b | text | | | | extended | | +Partition of: partitioned2 FOR VALUES FROM ('-1', 'aaaaa') TO (100, 'ccccc') +Partition constraint: (((a + 1) IS NOT NULL) AND (substr(b, 1, 5) IS NOT NULL) AND (((a + 1) > '-1'::integer) OR (((a + 1) = '-1'::integer) AND (substr(b, 1, 5) >= 'aaaaa'::text))) AND (((a + 1) < 100) OR (((a + 1) = 100) AND (substr(b, 1, 5) < 'ccccc'::text)))) + +DROP TABLE partitioned, partitioned2; +-- check reference to partitioned table's rowtype in partition descriptor +create table partitioned (a int, b int) + partition by list ((row(a, b)::partitioned)); +create table partitioned1 + partition of partitioned for values in ('(1,2)'::partitioned); +create table partitioned2 + partition of partitioned for values in ('(2,4)'::partitioned); +explain (costs off) +select * from partitioned where row(a,b)::partitioned = '(1,2)'::partitioned; + QUERY PLAN +----------------------------------------------------------- + Seq Scan on partitioned1 partitioned + Filter: (ROW(a, b)::partitioned = '(1,2)'::partitioned) +(2 rows) + +drop table partitioned; +-- whole-row Var in partition key works too +create table partitioned (a int, b int) + partition by list ((partitioned)); +create table partitioned1 + partition of partitioned for values in ('(1,2)'); +create table partitioned2 + partition of partitioned for values in ('(2,4)'); +explain (costs off) +select * from partitioned where partitioned = '(1,2)'::partitioned; + QUERY PLAN +----------------------------------------------------------------- + Seq Scan on partitioned1 partitioned + Filter: ((partitioned.*)::partitioned = '(1,2)'::partitioned) +(2 rows) + +\d+ partitioned1 + Table "public.partitioned1" + Column | Type | Collation | Nullable | Default | Storage | Stats target | Description +--------+---------+-----------+----------+---------+---------+--------------+------------- + a | integer | | | | plain | | + b | integer | | | | plain | | +Partition of: partitioned FOR VALUES IN ('(1,2)') +Partition constraint: (((partitioned1.*)::partitioned IS DISTINCT FROM NULL) AND ((partitioned1.*)::partitioned = '(1,2)'::partitioned)) + +drop table partitioned; +-- check that dependencies of partition columns are handled correctly +create domain intdom1 as int; +create table partitioned ( + a intdom1, + b text +) partition by range (a); +alter table partitioned drop column a; -- fail +ERROR: cannot drop column "a" because it is part of the partition key of relation "partitioned" +drop domain intdom1; -- fail, requires cascade +ERROR: cannot drop type intdom1 because other objects depend on it +DETAIL: table partitioned depends on type intdom1 +HINT: Use DROP ... CASCADE to drop the dependent objects too. +drop domain intdom1 cascade; +NOTICE: drop cascades to table partitioned +table partitioned; -- gone +ERROR: relation "partitioned" does not exist +LINE 1: table partitioned; + ^ +-- likewise for columns used in partition expressions +create domain intdom1 as int; +create table partitioned ( + a intdom1, + b text +) partition by range (plusone(a)); +alter table partitioned drop column a; -- fail +ERROR: cannot drop column "a" because it is part of the partition key of relation "partitioned" +drop domain intdom1; -- fail, requires cascade +ERROR: cannot drop type intdom1 because other objects depend on it +DETAIL: table partitioned depends on type intdom1 +HINT: Use DROP ... CASCADE to drop the dependent objects too. +drop domain intdom1 cascade; +NOTICE: drop cascades to table partitioned +table partitioned; -- gone +ERROR: relation "partitioned" does not exist +LINE 1: table partitioned; + ^ +-- +-- Partitions +-- +-- check partition bound syntax +CREATE TABLE list_parted ( + a int +) PARTITION BY LIST (a); +CREATE TABLE part_p1 PARTITION OF list_parted FOR VALUES IN ('1'); +CREATE TABLE part_p2 PARTITION OF list_parted FOR VALUES IN (2); +CREATE TABLE part_p3 PARTITION OF list_parted FOR VALUES IN ((2+1)); +CREATE TABLE part_null PARTITION OF list_parted FOR VALUES IN (null); +\d+ list_parted + Partitioned table "public.list_parted" + Column | Type | Collation | Nullable | Default | Storage | Stats target | Description +--------+---------+-----------+----------+---------+---------+--------------+------------- + a | integer | | | | plain | | +Partition key: LIST (a) +Partitions: part_null FOR VALUES IN (NULL), + part_p1 FOR VALUES IN (1), + part_p2 FOR VALUES IN (2), + part_p3 FOR VALUES IN (3) + +-- forbidden expressions for partition bound with list partitioned table +CREATE TABLE part_bogus_expr_fail PARTITION OF list_parted FOR VALUES IN (somename); +ERROR: cannot use column reference in partition bound expression +LINE 1: ...expr_fail PARTITION OF list_parted FOR VALUES IN (somename); + ^ +CREATE TABLE part_bogus_expr_fail PARTITION OF list_parted FOR VALUES IN (somename.somename); +ERROR: cannot use column reference in partition bound expression +LINE 1: ...expr_fail PARTITION OF list_parted FOR VALUES IN (somename.s... + ^ +CREATE TABLE part_bogus_expr_fail PARTITION OF list_parted FOR VALUES IN (a); +ERROR: cannot use column reference in partition bound expression +LINE 1: ..._bogus_expr_fail PARTITION OF list_parted FOR VALUES IN (a); + ^ +CREATE TABLE part_bogus_expr_fail PARTITION OF list_parted FOR VALUES IN (sum(a)); +ERROR: cannot use column reference in partition bound expression +LINE 1: ...s_expr_fail PARTITION OF list_parted FOR VALUES IN (sum(a)); + ^ +CREATE TABLE part_bogus_expr_fail PARTITION OF list_parted FOR VALUES IN (sum(somename)); +ERROR: cannot use column reference in partition bound expression +LINE 1: ..._fail PARTITION OF list_parted FOR VALUES IN (sum(somename))... + ^ +CREATE TABLE part_bogus_expr_fail PARTITION OF list_parted FOR VALUES IN (sum(1)); +ERROR: aggregate functions are not allowed in partition bound +LINE 1: ...s_expr_fail PARTITION OF list_parted FOR VALUES IN (sum(1)); + ^ +CREATE TABLE part_bogus_expr_fail PARTITION OF list_parted FOR VALUES IN ((select 1)); +ERROR: cannot use subquery in partition bound +LINE 1: ...expr_fail PARTITION OF list_parted FOR VALUES IN ((select 1)... + ^ +CREATE TABLE part_bogus_expr_fail PARTITION OF list_parted FOR VALUES IN (generate_series(4, 6)); +ERROR: set-returning functions are not allowed in partition bound +LINE 1: ...expr_fail PARTITION OF list_parted FOR VALUES IN (generate_s... + ^ +CREATE TABLE part_bogus_expr_fail PARTITION OF list_parted FOR VALUES IN ((1+1) collate "POSIX"); +ERROR: collations are not supported by type integer +LINE 1: ...ail PARTITION OF list_parted FOR VALUES IN ((1+1) collate "P... + ^ +-- syntax does not allow empty list of values for list partitions +CREATE TABLE fail_part PARTITION OF list_parted FOR VALUES IN (); +ERROR: syntax error at or near ")" +LINE 1: ...E TABLE fail_part PARTITION OF list_parted FOR VALUES IN (); + ^ +-- trying to specify range for list partitioned table +CREATE TABLE fail_part PARTITION OF list_parted FOR VALUES FROM (1) TO (2); +ERROR: invalid bound specification for a list partition +LINE 1: ...BLE fail_part PARTITION OF list_parted FOR VALUES FROM (1) T... + ^ +-- trying to specify modulus and remainder for list partitioned table +CREATE TABLE fail_part PARTITION OF list_parted FOR VALUES WITH (MODULUS 10, REMAINDER 1); +ERROR: invalid bound specification for a list partition +LINE 1: ...BLE fail_part PARTITION OF list_parted FOR VALUES WITH (MODU... + ^ +-- check default partition cannot be created more than once +CREATE TABLE part_default PARTITION OF list_parted DEFAULT; +CREATE TABLE fail_default_part PARTITION OF list_parted DEFAULT; +ERROR: partition "fail_default_part" conflicts with existing default partition "part_default" +LINE 1: ...TE TABLE fail_default_part PARTITION OF list_parted DEFAULT; + ^ +-- specified literal can't be cast to the partition column data type +CREATE TABLE bools ( + a bool +) PARTITION BY LIST (a); +CREATE TABLE bools_true PARTITION OF bools FOR VALUES IN (1); +ERROR: specified value cannot be cast to type boolean for column "a" +LINE 1: ...REATE TABLE bools_true PARTITION OF bools FOR VALUES IN (1); + ^ +DROP TABLE bools; +-- specified literal can be cast, and the cast might not be immutable +CREATE TABLE moneyp ( + a money +) PARTITION BY LIST (a); +CREATE TABLE moneyp_10 PARTITION OF moneyp FOR VALUES IN (10); +CREATE TABLE moneyp_11 PARTITION OF moneyp FOR VALUES IN ('11'); +CREATE TABLE moneyp_12 PARTITION OF moneyp FOR VALUES IN (to_char(12, '99')::int); +DROP TABLE moneyp; +-- cast is immutable +CREATE TABLE bigintp ( + a bigint +) PARTITION BY LIST (a); +CREATE TABLE bigintp_10 PARTITION OF bigintp FOR VALUES IN (10); +-- fails due to overlap: +CREATE TABLE bigintp_10_2 PARTITION OF bigintp FOR VALUES IN ('10'); +ERROR: partition "bigintp_10_2" would overlap partition "bigintp_10" +LINE 1: ...ABLE bigintp_10_2 PARTITION OF bigintp FOR VALUES IN ('10'); + ^ +DROP TABLE bigintp; +CREATE TABLE range_parted ( + a date +) PARTITION BY RANGE (a); +-- forbidden expressions for partition bounds with range partitioned table +CREATE TABLE part_bogus_expr_fail PARTITION OF range_parted + FOR VALUES FROM (somename) TO ('2019-01-01'); +ERROR: cannot use column reference in partition bound expression +LINE 2: FOR VALUES FROM (somename) TO ('2019-01-01'); + ^ +CREATE TABLE part_bogus_expr_fail PARTITION OF range_parted + FOR VALUES FROM (somename.somename) TO ('2019-01-01'); +ERROR: cannot use column reference in partition bound expression +LINE 2: FOR VALUES FROM (somename.somename) TO ('2019-01-01'); + ^ +CREATE TABLE part_bogus_expr_fail PARTITION OF range_parted + FOR VALUES FROM (a) TO ('2019-01-01'); +ERROR: cannot use column reference in partition bound expression +LINE 2: FOR VALUES FROM (a) TO ('2019-01-01'); + ^ +CREATE TABLE part_bogus_expr_fail PARTITION OF range_parted + FOR VALUES FROM (max(a)) TO ('2019-01-01'); +ERROR: cannot use column reference in partition bound expression +LINE 2: FOR VALUES FROM (max(a)) TO ('2019-01-01'); + ^ +CREATE TABLE part_bogus_expr_fail PARTITION OF range_parted + FOR VALUES FROM (max(somename)) TO ('2019-01-01'); +ERROR: cannot use column reference in partition bound expression +LINE 2: FOR VALUES FROM (max(somename)) TO ('2019-01-01'); + ^ +CREATE TABLE part_bogus_expr_fail PARTITION OF range_parted + FOR VALUES FROM (max('2019-02-01'::date)) TO ('2019-01-01'); +ERROR: aggregate functions are not allowed in partition bound +LINE 2: FOR VALUES FROM (max('2019-02-01'::date)) TO ('2019-01-01'... + ^ +CREATE TABLE part_bogus_expr_fail PARTITION OF range_parted + FOR VALUES FROM ((select 1)) TO ('2019-01-01'); +ERROR: cannot use subquery in partition bound +LINE 2: FOR VALUES FROM ((select 1)) TO ('2019-01-01'); + ^ +CREATE TABLE part_bogus_expr_fail PARTITION OF range_parted + FOR VALUES FROM (generate_series(1, 3)) TO ('2019-01-01'); +ERROR: set-returning functions are not allowed in partition bound +LINE 2: FOR VALUES FROM (generate_series(1, 3)) TO ('2019-01-01'); + ^ +-- trying to specify list for range partitioned table +CREATE TABLE fail_part PARTITION OF range_parted FOR VALUES IN ('a'); +ERROR: invalid bound specification for a range partition +LINE 1: ...BLE fail_part PARTITION OF range_parted FOR VALUES IN ('a'); + ^ +-- trying to specify modulus and remainder for range partitioned table +CREATE TABLE fail_part PARTITION OF range_parted FOR VALUES WITH (MODULUS 10, REMAINDER 1); +ERROR: invalid bound specification for a range partition +LINE 1: ...LE fail_part PARTITION OF range_parted FOR VALUES WITH (MODU... + ^ +-- each of start and end bounds must have same number of values as the +-- length of the partition key +CREATE TABLE fail_part PARTITION OF range_parted FOR VALUES FROM ('a', 1) TO ('z'); +ERROR: FROM must specify exactly one value per partitioning column +CREATE TABLE fail_part PARTITION OF range_parted FOR VALUES FROM ('a') TO ('z', 1); +ERROR: TO must specify exactly one value per partitioning column +-- cannot specify null values in range bounds +CREATE TABLE fail_part PARTITION OF range_parted FOR VALUES FROM (null) TO (maxvalue); +ERROR: cannot specify NULL in range bound +-- trying to specify modulus and remainder for range partitioned table +CREATE TABLE fail_part PARTITION OF range_parted FOR VALUES WITH (MODULUS 10, REMAINDER 1); +ERROR: invalid bound specification for a range partition +LINE 1: ...LE fail_part PARTITION OF range_parted FOR VALUES WITH (MODU... + ^ +-- check partition bound syntax for the hash partition +CREATE TABLE hash_parted ( + a int +) PARTITION BY HASH (a); +CREATE TABLE hpart_1 PARTITION OF hash_parted FOR VALUES WITH (MODULUS 10, REMAINDER 0); +CREATE TABLE hpart_2 PARTITION OF hash_parted FOR VALUES WITH (MODULUS 50, REMAINDER 1); +CREATE TABLE hpart_3 PARTITION OF hash_parted FOR VALUES WITH (MODULUS 200, REMAINDER 2); +CREATE TABLE hpart_4 PARTITION OF hash_parted FOR VALUES WITH (MODULUS 10, REMAINDER 3); +-- modulus 25 is factor of modulus of 50 but 10 is not a factor of 25. +CREATE TABLE fail_part PARTITION OF hash_parted FOR VALUES WITH (MODULUS 25, REMAINDER 3); +ERROR: every hash partition modulus must be a factor of the next larger modulus +DETAIL: The new modulus 25 is not divisible by 10, the modulus of existing partition "hpart_4". +-- previous modulus 50 is factor of 150 but this modulus is not a factor of next modulus 200. +CREATE TABLE fail_part PARTITION OF hash_parted FOR VALUES WITH (MODULUS 150, REMAINDER 3); +ERROR: every hash partition modulus must be a factor of the next larger modulus +DETAIL: The new modulus 150 is not a factor of 200, the modulus of existing partition "hpart_3". +-- overlapping remainders +CREATE TABLE fail_part PARTITION OF hash_parted FOR VALUES WITH (MODULUS 100, REMAINDER 3); +ERROR: partition "fail_part" would overlap partition "hpart_4" +LINE 1: ...BLE fail_part PARTITION OF hash_parted FOR VALUES WITH (MODU... + ^ +-- trying to specify range for the hash partitioned table +CREATE TABLE fail_part PARTITION OF hash_parted FOR VALUES FROM ('a', 1) TO ('z'); +ERROR: invalid bound specification for a hash partition +LINE 1: ...BLE fail_part PARTITION OF hash_parted FOR VALUES FROM ('a',... + ^ +-- trying to specify list value for the hash partitioned table +CREATE TABLE fail_part PARTITION OF hash_parted FOR VALUES IN (1000); +ERROR: invalid bound specification for a hash partition +LINE 1: ...BLE fail_part PARTITION OF hash_parted FOR VALUES IN (1000); + ^ +-- trying to create default partition for the hash partitioned table +CREATE TABLE fail_default_part PARTITION OF hash_parted DEFAULT; +ERROR: a hash-partitioned table may not have a default partition +-- check if compatible with the specified parent +-- cannot create as partition of a non-partitioned table +CREATE TABLE unparted ( + a int +); +CREATE TABLE fail_part PARTITION OF unparted FOR VALUES IN ('a'); +ERROR: "unparted" is not partitioned +CREATE TABLE fail_part PARTITION OF unparted FOR VALUES WITH (MODULUS 2, REMAINDER 1); +ERROR: "unparted" is not partitioned +DROP TABLE unparted; +-- cannot create a permanent rel as partition of a temp rel +CREATE TEMP TABLE temp_parted ( + a int +) PARTITION BY LIST (a); +CREATE TABLE fail_part PARTITION OF temp_parted FOR VALUES IN ('a'); +ERROR: cannot create a permanent relation as partition of temporary relation "temp_parted" +DROP TABLE temp_parted; +-- check for partition bound overlap and other invalid specifications +CREATE TABLE list_parted2 ( + a varchar +) PARTITION BY LIST (a); +CREATE TABLE part_null_z PARTITION OF list_parted2 FOR VALUES IN (null, 'z'); +CREATE TABLE part_ab PARTITION OF list_parted2 FOR VALUES IN ('a', 'b'); +CREATE TABLE list_parted2_def PARTITION OF list_parted2 DEFAULT; +CREATE TABLE fail_part PARTITION OF list_parted2 FOR VALUES IN (null); +ERROR: partition "fail_part" would overlap partition "part_null_z" +LINE 1: ...LE fail_part PARTITION OF list_parted2 FOR VALUES IN (null); + ^ +CREATE TABLE fail_part PARTITION OF list_parted2 FOR VALUES IN ('b', 'c'); +ERROR: partition "fail_part" would overlap partition "part_ab" +LINE 1: ...ail_part PARTITION OF list_parted2 FOR VALUES IN ('b', 'c'); + ^ +-- check default partition overlap +INSERT INTO list_parted2 VALUES('X'); +CREATE TABLE fail_part PARTITION OF list_parted2 FOR VALUES IN ('W', 'X', 'Y'); +ERROR: updated partition constraint for default partition "list_parted2_def" would be violated by some row +CREATE TABLE range_parted2 ( + a int +) PARTITION BY RANGE (a); +-- trying to create range partition with empty range +CREATE TABLE fail_part PARTITION OF range_parted2 FOR VALUES FROM (1) TO (0); +ERROR: empty range bound specified for partition "fail_part" +LINE 1: ..._part PARTITION OF range_parted2 FOR VALUES FROM (1) TO (0); + ^ +DETAIL: Specified lower bound (1) is greater than or equal to upper bound (0). +-- note that the range '[1, 1)' has no elements +CREATE TABLE fail_part PARTITION OF range_parted2 FOR VALUES FROM (1) TO (1); +ERROR: empty range bound specified for partition "fail_part" +LINE 1: ..._part PARTITION OF range_parted2 FOR VALUES FROM (1) TO (1); + ^ +DETAIL: Specified lower bound (1) is greater than or equal to upper bound (1). +CREATE TABLE part0 PARTITION OF range_parted2 FOR VALUES FROM (minvalue) TO (1); +CREATE TABLE fail_part PARTITION OF range_parted2 FOR VALUES FROM (minvalue) TO (2); +ERROR: partition "fail_part" would overlap partition "part0" +LINE 1: ..._part PARTITION OF range_parted2 FOR VALUES FROM (minvalue) ... + ^ +CREATE TABLE part1 PARTITION OF range_parted2 FOR VALUES FROM (1) TO (10); +CREATE TABLE fail_part PARTITION OF range_parted2 FOR VALUES FROM (-1) TO (1); +ERROR: partition "fail_part" would overlap partition "part0" +LINE 1: ..._part PARTITION OF range_parted2 FOR VALUES FROM (-1) TO (1)... + ^ +CREATE TABLE fail_part PARTITION OF range_parted2 FOR VALUES FROM (9) TO (maxvalue); +ERROR: partition "fail_part" would overlap partition "part1" +LINE 1: ..._part PARTITION OF range_parted2 FOR VALUES FROM (9) TO (max... + ^ +CREATE TABLE part2 PARTITION OF range_parted2 FOR VALUES FROM (20) TO (30); +CREATE TABLE part3 PARTITION OF range_parted2 FOR VALUES FROM (30) TO (40); +CREATE TABLE fail_part PARTITION OF range_parted2 FOR VALUES FROM (10) TO (30); +ERROR: partition "fail_part" would overlap partition "part2" +LINE 1: ...art PARTITION OF range_parted2 FOR VALUES FROM (10) TO (30); + ^ +CREATE TABLE fail_part PARTITION OF range_parted2 FOR VALUES FROM (10) TO (50); +ERROR: partition "fail_part" would overlap partition "part2" +LINE 1: ...art PARTITION OF range_parted2 FOR VALUES FROM (10) TO (50); + ^ +-- Create a default partition for range partitioned table +CREATE TABLE range2_default PARTITION OF range_parted2 DEFAULT; +-- More than one default partition is not allowed, so this should give error +CREATE TABLE fail_default_part PARTITION OF range_parted2 DEFAULT; +ERROR: partition "fail_default_part" conflicts with existing default partition "range2_default" +LINE 1: ... TABLE fail_default_part PARTITION OF range_parted2 DEFAULT; + ^ +-- Check if the range for default partitions overlap +INSERT INTO range_parted2 VALUES (85); +CREATE TABLE fail_part PARTITION OF range_parted2 FOR VALUES FROM (80) TO (90); +ERROR: updated partition constraint for default partition "range2_default" would be violated by some row +CREATE TABLE part4 PARTITION OF range_parted2 FOR VALUES FROM (90) TO (100); +-- now check for multi-column range partition key +CREATE TABLE range_parted3 ( + a int, + b int +) PARTITION BY RANGE (a, (b+1)); +CREATE TABLE part00 PARTITION OF range_parted3 FOR VALUES FROM (0, minvalue) TO (0, maxvalue); +CREATE TABLE fail_part PARTITION OF range_parted3 FOR VALUES FROM (0, minvalue) TO (0, 1); +ERROR: partition "fail_part" would overlap partition "part00" +LINE 1: ..._part PARTITION OF range_parted3 FOR VALUES FROM (0, minvalu... + ^ +CREATE TABLE part10 PARTITION OF range_parted3 FOR VALUES FROM (1, minvalue) TO (1, 1); +CREATE TABLE part11 PARTITION OF range_parted3 FOR VALUES FROM (1, 1) TO (1, 10); +CREATE TABLE part12 PARTITION OF range_parted3 FOR VALUES FROM (1, 10) TO (1, maxvalue); +CREATE TABLE fail_part PARTITION OF range_parted3 FOR VALUES FROM (1, 10) TO (1, 20); +ERROR: partition "fail_part" would overlap partition "part12" +LINE 1: ...rt PARTITION OF range_parted3 FOR VALUES FROM (1, 10) TO (1,... + ^ +CREATE TABLE range3_default PARTITION OF range_parted3 DEFAULT; +-- cannot create a partition that says column b is allowed to range +-- from -infinity to +infinity, while there exist partitions that have +-- more specific ranges +CREATE TABLE fail_part PARTITION OF range_parted3 FOR VALUES FROM (1, minvalue) TO (1, maxvalue); +ERROR: partition "fail_part" would overlap partition "part10" +LINE 1: ..._part PARTITION OF range_parted3 FOR VALUES FROM (1, minvalu... + ^ +-- check for partition bound overlap and other invalid specifications for the hash partition +CREATE TABLE hash_parted2 ( + a varchar +) PARTITION BY HASH (a); +CREATE TABLE h2part_1 PARTITION OF hash_parted2 FOR VALUES WITH (MODULUS 4, REMAINDER 2); +CREATE TABLE h2part_2 PARTITION OF hash_parted2 FOR VALUES WITH (MODULUS 8, REMAINDER 0); +CREATE TABLE h2part_3 PARTITION OF hash_parted2 FOR VALUES WITH (MODULUS 8, REMAINDER 4); +CREATE TABLE h2part_4 PARTITION OF hash_parted2 FOR VALUES WITH (MODULUS 8, REMAINDER 5); +-- overlap with part_4 +CREATE TABLE fail_part PARTITION OF hash_parted2 FOR VALUES WITH (MODULUS 2, REMAINDER 1); +ERROR: partition "fail_part" would overlap partition "h2part_4" +LINE 1: ...LE fail_part PARTITION OF hash_parted2 FOR VALUES WITH (MODU... + ^ +-- modulus must be greater than zero +CREATE TABLE fail_part PARTITION OF hash_parted2 FOR VALUES WITH (MODULUS 0, REMAINDER 1); +ERROR: modulus for hash partition must be an integer value greater than zero +-- remainder must be greater than or equal to zero and less than modulus +CREATE TABLE fail_part PARTITION OF hash_parted2 FOR VALUES WITH (MODULUS 8, REMAINDER 8); +ERROR: remainder for hash partition must be less than modulus +-- check schema propagation from parent +CREATE TABLE parted ( + a text, + b int NOT NULL DEFAULT 0, + CONSTRAINT check_a CHECK (length(a) > 0) +) PARTITION BY LIST (a); +CREATE TABLE part_a PARTITION OF parted FOR VALUES IN ('a'); +-- only inherited attributes (never local ones) +SELECT attname, attislocal, attinhcount FROM pg_attribute + WHERE attrelid = 'part_a'::regclass and attnum > 0 + ORDER BY attnum; + attname | attislocal | attinhcount +---------+------------+------------- + a | f | 1 + b | f | 1 +(2 rows) + +-- able to specify column default, column constraint, and table constraint +-- first check the "column specified more than once" error +CREATE TABLE part_b PARTITION OF parted ( + b NOT NULL, + b DEFAULT 1, + b CHECK (b >= 0), + CONSTRAINT check_a CHECK (length(a) > 0) +) FOR VALUES IN ('b'); +ERROR: column "b" specified more than once +CREATE TABLE part_b PARTITION OF parted ( + b NOT NULL DEFAULT 1, + CONSTRAINT check_a CHECK (length(a) > 0), + CONSTRAINT check_b CHECK (b >= 0) +) FOR VALUES IN ('b'); +NOTICE: merging constraint "check_a" with inherited definition +-- conislocal should be false for any merged constraints, true otherwise +SELECT conislocal, coninhcount FROM pg_constraint WHERE conrelid = 'part_b'::regclass ORDER BY conislocal, coninhcount; + conislocal | coninhcount +------------+------------- + f | 1 + t | 0 +(2 rows) + +-- Once check_b is added to the parent, it should be made non-local for part_b +ALTER TABLE parted ADD CONSTRAINT check_b CHECK (b >= 0); +NOTICE: merging constraint "check_b" with inherited definition +SELECT conislocal, coninhcount FROM pg_constraint WHERE conrelid = 'part_b'::regclass; + conislocal | coninhcount +------------+------------- + f | 1 + f | 1 +(2 rows) + +-- Neither check_a nor check_b are droppable from part_b +ALTER TABLE part_b DROP CONSTRAINT check_a; +ERROR: cannot drop inherited constraint "check_a" of relation "part_b" +ALTER TABLE part_b DROP CONSTRAINT check_b; +ERROR: cannot drop inherited constraint "check_b" of relation "part_b" +-- And dropping it from parted should leave no trace of them on part_b, unlike +-- traditional inheritance where they will be left behind, because they would +-- be local constraints. +ALTER TABLE parted DROP CONSTRAINT check_a, DROP CONSTRAINT check_b; +SELECT conislocal, coninhcount FROM pg_constraint WHERE conrelid = 'part_b'::regclass; + conislocal | coninhcount +------------+------------- +(0 rows) + +-- specify PARTITION BY for a partition +CREATE TABLE fail_part_col_not_found PARTITION OF parted FOR VALUES IN ('c') PARTITION BY RANGE (c); +ERROR: column "c" named in partition key does not exist +LINE 1: ...TITION OF parted FOR VALUES IN ('c') PARTITION BY RANGE (c); + ^ +CREATE TABLE part_c PARTITION OF parted (b WITH OPTIONS NOT NULL DEFAULT 0) FOR VALUES IN ('c') PARTITION BY RANGE ((b)); +-- create a level-2 partition +CREATE TABLE part_c_1_10 PARTITION OF part_c FOR VALUES FROM (1) TO (10); +-- check that NOT NULL and default value are inherited correctly +create table parted_notnull_inh_test (a int default 1, b int not null default 0) partition by list (a); +create table parted_notnull_inh_test1 partition of parted_notnull_inh_test (a not null, b default 1) for values in (1); +insert into parted_notnull_inh_test (b) values (null); +ERROR: null value in column "b" of relation "parted_notnull_inh_test1" violates not-null constraint +DETAIL: Failing row contains (1, null). +-- note that while b's default is overridden, a's default is preserved +\d parted_notnull_inh_test1 + Table "public.parted_notnull_inh_test1" + Column | Type | Collation | Nullable | Default +--------+---------+-----------+----------+--------- + a | integer | | not null | 1 + b | integer | | not null | 1 +Partition of: parted_notnull_inh_test FOR VALUES IN (1) + +drop table parted_notnull_inh_test; +-- check that collations are assigned in partition bound expressions +create table parted_boolean_col (a bool, b text) partition by list(a); +create table parted_boolean_less partition of parted_boolean_col + for values in ('foo' < 'bar'); +create table parted_boolean_greater partition of parted_boolean_col + for values in ('foo' > 'bar'); +drop table parted_boolean_col; +-- check for a conflicting COLLATE clause +create table parted_collate_must_match (a text collate "C", b text collate "C") + partition by range (a); +-- on the partition key +create table parted_collate_must_match1 partition of parted_collate_must_match + (a collate "POSIX") for values from ('a') to ('m'); +-- on another column +create table parted_collate_must_match2 partition of parted_collate_must_match + (b collate "POSIX") for values from ('m') to ('z'); +drop table parted_collate_must_match; +-- check that non-matching collations for partition bound +-- expressions are coerced to the right collation +create table test_part_coll_posix (a text) partition by range (a collate "POSIX"); +-- ok, collation is implicitly coerced +create table test_part_coll partition of test_part_coll_posix for values from ('a' collate "C") to ('g'); +-- ok +create table test_part_coll2 partition of test_part_coll_posix for values from ('g') to ('m'); +-- ok, collation is implicitly coerced +create table test_part_coll_cast partition of test_part_coll_posix for values from (name 'm' collate "C") to ('s'); +-- ok; partition collation silently overrides the default collation of type 'name' +create table test_part_coll_cast2 partition of test_part_coll_posix for values from (name 's') to ('z'); +drop table test_part_coll_posix; +-- Partition bound in describe output +\d+ part_b + Table "public.part_b" + Column | Type | Collation | Nullable | Default | Storage | Stats target | Description +--------+---------+-----------+----------+---------+----------+--------------+------------- + a | text | | | | extended | | + b | integer | | not null | 1 | plain | | +Partition of: parted FOR VALUES IN ('b') +Partition constraint: ((a IS NOT NULL) AND (a = 'b'::text)) + +-- Both partition bound and partition key in describe output +\d+ part_c + Partitioned table "public.part_c" + Column | Type | Collation | Nullable | Default | Storage | Stats target | Description +--------+---------+-----------+----------+---------+----------+--------------+------------- + a | text | | | | extended | | + b | integer | | not null | 0 | plain | | +Partition of: parted FOR VALUES IN ('c') +Partition constraint: ((a IS NOT NULL) AND (a = 'c'::text)) +Partition key: RANGE (b) +Partitions: part_c_1_10 FOR VALUES FROM (1) TO (10) + +-- a level-2 partition's constraint will include the parent's expressions +\d+ part_c_1_10 + Table "public.part_c_1_10" + Column | Type | Collation | Nullable | Default | Storage | Stats target | Description +--------+---------+-----------+----------+---------+----------+--------------+------------- + a | text | | | | extended | | + b | integer | | not null | 0 | plain | | +Partition of: part_c FOR VALUES FROM (1) TO (10) +Partition constraint: ((a IS NOT NULL) AND (a = 'c'::text) AND (b IS NOT NULL) AND (b >= 1) AND (b < 10)) + +-- Show partition count in the parent's describe output +-- Tempted to include \d+ output listing partitions with bound info but +-- output could vary depending on the order in which partition oids are +-- returned. +\d parted + Partitioned table "public.parted" + Column | Type | Collation | Nullable | Default +--------+---------+-----------+----------+--------- + a | text | | | + b | integer | | not null | 0 +Partition key: LIST (a) +Number of partitions: 3 (Use \d+ to list them.) + +\d hash_parted + Partitioned table "public.hash_parted" + Column | Type | Collation | Nullable | Default +--------+---------+-----------+----------+--------- + a | integer | | | +Partition key: HASH (a) +Number of partitions: 4 (Use \d+ to list them.) + +-- check that we get the expected partition constraints +CREATE TABLE range_parted4 (a int, b int, c int) PARTITION BY RANGE (abs(a), abs(b), c); +CREATE TABLE unbounded_range_part PARTITION OF range_parted4 FOR VALUES FROM (MINVALUE, MINVALUE, MINVALUE) TO (MAXVALUE, MAXVALUE, MAXVALUE); +\d+ unbounded_range_part + Table "public.unbounded_range_part" + Column | Type | Collation | Nullable | Default | Storage | Stats target | Description +--------+---------+-----------+----------+---------+---------+--------------+------------- + a | integer | | | | plain | | + b | integer | | | | plain | | + c | integer | | | | plain | | +Partition of: range_parted4 FOR VALUES FROM (MINVALUE, MINVALUE, MINVALUE) TO (MAXVALUE, MAXVALUE, MAXVALUE) +Partition constraint: ((abs(a) IS NOT NULL) AND (abs(b) IS NOT NULL) AND (c IS NOT NULL)) + +DROP TABLE unbounded_range_part; +CREATE TABLE range_parted4_1 PARTITION OF range_parted4 FOR VALUES FROM (MINVALUE, MINVALUE, MINVALUE) TO (1, MAXVALUE, MAXVALUE); +\d+ range_parted4_1 + Table "public.range_parted4_1" + Column | Type | Collation | Nullable | Default | Storage | Stats target | Description +--------+---------+-----------+----------+---------+---------+--------------+------------- + a | integer | | | | plain | | + b | integer | | | | plain | | + c | integer | | | | plain | | +Partition of: range_parted4 FOR VALUES FROM (MINVALUE, MINVALUE, MINVALUE) TO (1, MAXVALUE, MAXVALUE) +Partition constraint: ((abs(a) IS NOT NULL) AND (abs(b) IS NOT NULL) AND (c IS NOT NULL) AND (abs(a) <= 1)) + +CREATE TABLE range_parted4_2 PARTITION OF range_parted4 FOR VALUES FROM (3, 4, 5) TO (6, 7, MAXVALUE); +\d+ range_parted4_2 + Table "public.range_parted4_2" + Column | Type | Collation | Nullable | Default | Storage | Stats target | Description +--------+---------+-----------+----------+---------+---------+--------------+------------- + a | integer | | | | plain | | + b | integer | | | | plain | | + c | integer | | | | plain | | +Partition of: range_parted4 FOR VALUES FROM (3, 4, 5) TO (6, 7, MAXVALUE) +Partition constraint: ((abs(a) IS NOT NULL) AND (abs(b) IS NOT NULL) AND (c IS NOT NULL) AND ((abs(a) > 3) OR ((abs(a) = 3) AND (abs(b) > 4)) OR ((abs(a) = 3) AND (abs(b) = 4) AND (c >= 5))) AND ((abs(a) < 6) OR ((abs(a) = 6) AND (abs(b) <= 7)))) + +CREATE TABLE range_parted4_3 PARTITION OF range_parted4 FOR VALUES FROM (6, 8, MINVALUE) TO (9, MAXVALUE, MAXVALUE); +\d+ range_parted4_3 + Table "public.range_parted4_3" + Column | Type | Collation | Nullable | Default | Storage | Stats target | Description +--------+---------+-----------+----------+---------+---------+--------------+------------- + a | integer | | | | plain | | + b | integer | | | | plain | | + c | integer | | | | plain | | +Partition of: range_parted4 FOR VALUES FROM (6, 8, MINVALUE) TO (9, MAXVALUE, MAXVALUE) +Partition constraint: ((abs(a) IS NOT NULL) AND (abs(b) IS NOT NULL) AND (c IS NOT NULL) AND ((abs(a) > 6) OR ((abs(a) = 6) AND (abs(b) >= 8))) AND (abs(a) <= 9)) + +DROP TABLE range_parted4; +-- user-defined operator class in partition key +CREATE FUNCTION my_int4_sort(int4,int4) RETURNS int LANGUAGE sql + AS $$ SELECT CASE WHEN $1 = $2 THEN 0 WHEN $1 > $2 THEN 1 ELSE -1 END; $$; +CREATE OPERATOR CLASS test_int4_ops FOR TYPE int4 USING btree AS + OPERATOR 1 < (int4,int4), OPERATOR 2 <= (int4,int4), + OPERATOR 3 = (int4,int4), OPERATOR 4 >= (int4,int4), + OPERATOR 5 > (int4,int4), FUNCTION 1 my_int4_sort(int4,int4); +CREATE TABLE partkey_t (a int4) PARTITION BY RANGE (a test_int4_ops); +CREATE TABLE partkey_t_1 PARTITION OF partkey_t FOR VALUES FROM (0) TO (1000); +INSERT INTO partkey_t VALUES (100); +INSERT INTO partkey_t VALUES (200); +-- cleanup +DROP TABLE parted, list_parted, range_parted, list_parted2, range_parted2, range_parted3; +DROP TABLE partkey_t, hash_parted, hash_parted2; +DROP OPERATOR CLASS test_int4_ops USING btree; +DROP FUNCTION my_int4_sort(int4,int4); +-- comments on partitioned tables columns +CREATE TABLE parted_col_comment (a int, b text) PARTITION BY LIST (a); +COMMENT ON TABLE parted_col_comment IS 'Am partitioned table'; +COMMENT ON COLUMN parted_col_comment.a IS 'Partition key'; +SELECT obj_description('parted_col_comment'::regclass); + obj_description +---------------------- + Am partitioned table +(1 row) + +\d+ parted_col_comment + Partitioned table "public.parted_col_comment" + Column | Type | Collation | Nullable | Default | Storage | Stats target | Description +--------+---------+-----------+----------+---------+----------+--------------+--------------- + a | integer | | | | plain | | Partition key + b | text | | | | extended | | +Partition key: LIST (a) +Number of partitions: 0 + +DROP TABLE parted_col_comment; +-- specifying storage parameters for partitioned tables is not supported +CREATE TABLE parted_col_comment (a int, b text) PARTITION BY LIST (a) WITH (fillfactor=100); +ERROR: cannot specify storage parameters for a partitioned table +HINT: Specify storage parameters for its leaf partitions instead. +-- list partitioning on array type column +CREATE TABLE arrlp (a int[]) PARTITION BY LIST (a); +CREATE TABLE arrlp12 PARTITION OF arrlp FOR VALUES IN ('{1}', '{2}'); +\d+ arrlp12 + Table "public.arrlp12" + Column | Type | Collation | Nullable | Default | Storage | Stats target | Description +--------+-----------+-----------+----------+---------+----------+--------------+------------- + a | integer[] | | | | extended | | +Partition of: arrlp FOR VALUES IN ('{1}', '{2}') +Partition constraint: ((a IS NOT NULL) AND ((a = '{1}'::integer[]) OR (a = '{2}'::integer[]))) + +DROP TABLE arrlp; +-- partition on boolean column +create table boolspart (a bool) partition by list (a); +create table boolspart_t partition of boolspart for values in (true); +create table boolspart_f partition of boolspart for values in (false); +\d+ boolspart + Partitioned table "public.boolspart" + Column | Type | Collation | Nullable | Default | Storage | Stats target | Description +--------+---------+-----------+----------+---------+---------+--------------+------------- + a | boolean | | | | plain | | +Partition key: LIST (a) +Partitions: boolspart_f FOR VALUES IN (false), + boolspart_t FOR VALUES IN (true) + +drop table boolspart; +-- partitions mixing temporary and permanent relations +create table perm_parted (a int) partition by list (a); +create temporary table temp_parted (a int) partition by list (a); +create table perm_part partition of temp_parted default; -- error +ERROR: cannot create a permanent relation as partition of temporary relation "temp_parted" +create temp table temp_part partition of perm_parted default; -- error +ERROR: cannot create a temporary relation as partition of permanent relation "perm_parted" +create temp table temp_part partition of temp_parted default; -- ok +drop table perm_parted cascade; +drop table temp_parted cascade; +-- check that adding partitions to a table while it is being used is prevented +create table tab_part_create (a int) partition by list (a); +create or replace function func_part_create() returns trigger + language plpgsql as $$ + begin + execute 'create table tab_part_create_1 partition of tab_part_create for values in (1)'; + return null; + end $$; +create trigger trig_part_create before insert on tab_part_create + for each statement execute procedure func_part_create(); +insert into tab_part_create values (1); +ERROR: cannot CREATE TABLE .. PARTITION OF "tab_part_create" because it is being used by active queries in this session +CONTEXT: SQL statement "create table tab_part_create_1 partition of tab_part_create for values in (1)" +PL/pgSQL function func_part_create() line 3 at EXECUTE +drop table tab_part_create; +drop function func_part_create(); +-- test using a volatile expression as partition bound +create table volatile_partbound_test (partkey timestamp) partition by range (partkey); +create table volatile_partbound_test1 partition of volatile_partbound_test for values from (minvalue) to (current_timestamp); +create table volatile_partbound_test2 partition of volatile_partbound_test for values from (current_timestamp) to (maxvalue); +-- this should go into the partition volatile_partbound_test2 +insert into volatile_partbound_test values (current_timestamp); +select tableoid::regclass from volatile_partbound_test; + tableoid +-------------------------- + volatile_partbound_test2 +(1 row) + +drop table volatile_partbound_test; +-- test the case where a check constraint on default partition allows +-- to avoid scanning it when adding a new partition +create table defcheck (a int, b int) partition by list (b); +create table defcheck_def (a int, c int, b int); +alter table defcheck_def drop c; +alter table defcheck attach partition defcheck_def default; +alter table defcheck_def add check (b <= 0 and b is not null); +create table defcheck_1 partition of defcheck for values in (1, null); +-- test that complex default partition constraints are enforced correctly +insert into defcheck_def values (0, 0); +create table defcheck_0 partition of defcheck for values in (0); +ERROR: updated partition constraint for default partition "defcheck_def" would be violated by some row +drop table defcheck; +-- tests of column drop with partition tables and indexes using +-- predicates and expressions. +create table part_column_drop ( + useless_1 int, + id int, + useless_2 int, + d int, + b int, + useless_3 int +) partition by range (id); +alter table part_column_drop drop column useless_1; +alter table part_column_drop drop column useless_2; +alter table part_column_drop drop column useless_3; +create index part_column_drop_b_pred on part_column_drop(b) where b = 1; +create index part_column_drop_b_expr on part_column_drop((b = 1)); +create index part_column_drop_d_pred on part_column_drop(d) where d = 2; +create index part_column_drop_d_expr on part_column_drop((d = 2)); +create table part_column_drop_1_10 partition of + part_column_drop for values from (1) to (10); +\d part_column_drop + Partitioned table "public.part_column_drop" + Column | Type | Collation | Nullable | Default +--------+---------+-----------+----------+--------- + id | integer | | | + d | integer | | | + b | integer | | | +Partition key: RANGE (id) +Indexes: + "part_column_drop_b_expr" btree ((b = 1)) + "part_column_drop_b_pred" btree (b) WHERE b = 1 + "part_column_drop_d_expr" btree ((d = 2)) + "part_column_drop_d_pred" btree (d) WHERE d = 2 +Number of partitions: 1 (Use \d+ to list them.) + +\d part_column_drop_1_10 + Table "public.part_column_drop_1_10" + Column | Type | Collation | Nullable | Default +--------+---------+-----------+----------+--------- + id | integer | | | + d | integer | | | + b | integer | | | +Partition of: part_column_drop FOR VALUES FROM (1) TO (10) +Indexes: + "part_column_drop_1_10_b_idx" btree (b) WHERE b = 1 + "part_column_drop_1_10_d_idx" btree (d) WHERE d = 2 + "part_column_drop_1_10_expr_idx" btree ((b = 1)) + "part_column_drop_1_10_expr_idx1" btree ((d = 2)) + +drop table part_column_drop; diff --git a/src/test/regress/expected/create_table_like.out b/src/test/regress/expected/create_table_like.out new file mode 100644 index 0000000..0ed94f1 --- /dev/null +++ b/src/test/regress/expected/create_table_like.out @@ -0,0 +1,520 @@ +/* Test inheritance of structure (LIKE) */ +CREATE TABLE inhx (xx text DEFAULT 'text'); +/* + * Test double inheritance + * + * Ensure that defaults are NOT included unless + * INCLUDING DEFAULTS is specified + */ +CREATE TABLE ctla (aa TEXT); +CREATE TABLE ctlb (bb TEXT) INHERITS (ctla); +CREATE TABLE foo (LIKE nonexistent); +ERROR: relation "nonexistent" does not exist +LINE 1: CREATE TABLE foo (LIKE nonexistent); + ^ +CREATE TABLE inhe (ee text, LIKE inhx) inherits (ctlb); +INSERT INTO inhe VALUES ('ee-col1', 'ee-col2', DEFAULT, 'ee-col4'); +SELECT * FROM inhe; /* Columns aa, bb, xx value NULL, ee */ + aa | bb | ee | xx +---------+---------+----+--------- + ee-col1 | ee-col2 | | ee-col4 +(1 row) + +SELECT * FROM inhx; /* Empty set since LIKE inherits structure only */ + xx +---- +(0 rows) + +SELECT * FROM ctlb; /* Has ee entry */ + aa | bb +---------+--------- + ee-col1 | ee-col2 +(1 row) + +SELECT * FROM ctla; /* Has ee entry */ + aa +--------- + ee-col1 +(1 row) + +CREATE TABLE inhf (LIKE inhx, LIKE inhx); /* Throw error */ +ERROR: column "xx" specified more than once +CREATE TABLE inhf (LIKE inhx INCLUDING DEFAULTS INCLUDING CONSTRAINTS); +INSERT INTO inhf DEFAULT VALUES; +SELECT * FROM inhf; /* Single entry with value 'text' */ + xx +------ + text +(1 row) + +ALTER TABLE inhx add constraint foo CHECK (xx = 'text'); +ALTER TABLE inhx ADD PRIMARY KEY (xx); +CREATE TABLE inhg (LIKE inhx); /* Doesn't copy constraint */ +INSERT INTO inhg VALUES ('foo'); +DROP TABLE inhg; +CREATE TABLE inhg (x text, LIKE inhx INCLUDING CONSTRAINTS, y text); /* Copies constraints */ +INSERT INTO inhg VALUES ('x', 'text', 'y'); /* Succeeds */ +INSERT INTO inhg VALUES ('x', 'text', 'y'); /* Succeeds -- Unique constraints not copied */ +INSERT INTO inhg VALUES ('x', 'foo', 'y'); /* fails due to constraint */ +ERROR: new row for relation "inhg" violates check constraint "foo" +DETAIL: Failing row contains (x, foo, y). +SELECT * FROM inhg; /* Two records with three columns in order x=x, xx=text, y=y */ + x | xx | y +---+------+--- + x | text | y + x | text | y +(2 rows) + +DROP TABLE inhg; +CREATE TABLE test_like_id_1 (a bigint GENERATED ALWAYS AS IDENTITY, b text); +\d test_like_id_1 + Table "public.test_like_id_1" + Column | Type | Collation | Nullable | Default +--------+--------+-----------+----------+------------------------------ + a | bigint | | not null | generated always as identity + b | text | | | + +INSERT INTO test_like_id_1 (b) VALUES ('b1'); +SELECT * FROM test_like_id_1; + a | b +---+---- + 1 | b1 +(1 row) + +CREATE TABLE test_like_id_2 (LIKE test_like_id_1); +\d test_like_id_2 + Table "public.test_like_id_2" + Column | Type | Collation | Nullable | Default +--------+--------+-----------+----------+--------- + a | bigint | | not null | + b | text | | | + +INSERT INTO test_like_id_2 (b) VALUES ('b2'); +ERROR: null value in column "a" of relation "test_like_id_2" violates not-null constraint +DETAIL: Failing row contains (null, b2). +SELECT * FROM test_like_id_2; -- identity was not copied + a | b +---+--- +(0 rows) + +CREATE TABLE test_like_id_3 (LIKE test_like_id_1 INCLUDING IDENTITY); +\d test_like_id_3 + Table "public.test_like_id_3" + Column | Type | Collation | Nullable | Default +--------+--------+-----------+----------+------------------------------ + a | bigint | | not null | generated always as identity + b | text | | | + +INSERT INTO test_like_id_3 (b) VALUES ('b3'); +SELECT * FROM test_like_id_3; -- identity was copied and applied + a | b +---+---- + 1 | b3 +(1 row) + +DROP TABLE test_like_id_1, test_like_id_2, test_like_id_3; +CREATE TABLE test_like_gen_1 (a int, b int GENERATED ALWAYS AS (a * 2) STORED); +\d test_like_gen_1 + Table "public.test_like_gen_1" + Column | Type | Collation | Nullable | Default +--------+---------+-----------+----------+------------------------------------ + a | integer | | | + b | integer | | | generated always as (a * 2) stored + +INSERT INTO test_like_gen_1 (a) VALUES (1); +SELECT * FROM test_like_gen_1; + a | b +---+--- + 1 | 2 +(1 row) + +CREATE TABLE test_like_gen_2 (LIKE test_like_gen_1); +\d test_like_gen_2 + Table "public.test_like_gen_2" + Column | Type | Collation | Nullable | Default +--------+---------+-----------+----------+--------- + a | integer | | | + b | integer | | | + +INSERT INTO test_like_gen_2 (a) VALUES (1); +SELECT * FROM test_like_gen_2; + a | b +---+--- + 1 | +(1 row) + +CREATE TABLE test_like_gen_3 (LIKE test_like_gen_1 INCLUDING GENERATED); +\d test_like_gen_3 + Table "public.test_like_gen_3" + Column | Type | Collation | Nullable | Default +--------+---------+-----------+----------+------------------------------------ + a | integer | | | + b | integer | | | generated always as (a * 2) stored + +INSERT INTO test_like_gen_3 (a) VALUES (1); +SELECT * FROM test_like_gen_3; + a | b +---+--- + 1 | 2 +(1 row) + +DROP TABLE test_like_gen_1, test_like_gen_2, test_like_gen_3; +-- also test generated column with a "forward" reference (bug #16342) +CREATE TABLE test_like_4 (b int DEFAULT 42, + c int GENERATED ALWAYS AS (a * 2) STORED, + a int CHECK (a > 0)); +\d test_like_4 + Table "public.test_like_4" + Column | Type | Collation | Nullable | Default +--------+---------+-----------+----------+------------------------------------ + b | integer | | | 42 + c | integer | | | generated always as (a * 2) stored + a | integer | | | +Check constraints: + "test_like_4_a_check" CHECK (a > 0) + +CREATE TABLE test_like_4a (LIKE test_like_4); +CREATE TABLE test_like_4b (LIKE test_like_4 INCLUDING DEFAULTS); +CREATE TABLE test_like_4c (LIKE test_like_4 INCLUDING GENERATED); +CREATE TABLE test_like_4d (LIKE test_like_4 INCLUDING DEFAULTS INCLUDING GENERATED); +\d test_like_4a + Table "public.test_like_4a" + Column | Type | Collation | Nullable | Default +--------+---------+-----------+----------+--------- + b | integer | | | + c | integer | | | + a | integer | | | + +INSERT INTO test_like_4a (a) VALUES(11); +SELECT a, b, c FROM test_like_4a; + a | b | c +----+---+--- + 11 | | +(1 row) + +\d test_like_4b + Table "public.test_like_4b" + Column | Type | Collation | Nullable | Default +--------+---------+-----------+----------+--------- + b | integer | | | 42 + c | integer | | | + a | integer | | | + +INSERT INTO test_like_4b (a) VALUES(11); +SELECT a, b, c FROM test_like_4b; + a | b | c +----+----+--- + 11 | 42 | +(1 row) + +\d test_like_4c + Table "public.test_like_4c" + Column | Type | Collation | Nullable | Default +--------+---------+-----------+----------+------------------------------------ + b | integer | | | + c | integer | | | generated always as (a * 2) stored + a | integer | | | + +INSERT INTO test_like_4c (a) VALUES(11); +SELECT a, b, c FROM test_like_4c; + a | b | c +----+---+---- + 11 | | 22 +(1 row) + +\d test_like_4d + Table "public.test_like_4d" + Column | Type | Collation | Nullable | Default +--------+---------+-----------+----------+------------------------------------ + b | integer | | | 42 + c | integer | | | generated always as (a * 2) stored + a | integer | | | + +INSERT INTO test_like_4d (a) VALUES(11); +SELECT a, b, c FROM test_like_4d; + a | b | c +----+----+---- + 11 | 42 | 22 +(1 row) + +-- Test renumbering of Vars when combining LIKE with inheritance +CREATE TABLE test_like_5 (x point, y point, z point); +CREATE TABLE test_like_5x (p int CHECK (p > 0), + q int GENERATED ALWAYS AS (p * 2) STORED); +CREATE TABLE test_like_5c (LIKE test_like_4 INCLUDING ALL) + INHERITS (test_like_5, test_like_5x); +\d test_like_5c + Table "public.test_like_5c" + Column | Type | Collation | Nullable | Default +--------+---------+-----------+----------+------------------------------------ + x | point | | | + y | point | | | + z | point | | | + p | integer | | | + q | integer | | | generated always as (p * 2) stored + b | integer | | | 42 + c | integer | | | generated always as (a * 2) stored + a | integer | | | +Check constraints: + "test_like_4_a_check" CHECK (a > 0) + "test_like_5x_p_check" CHECK (p > 0) +Inherits: test_like_5, + test_like_5x + +DROP TABLE test_like_4, test_like_4a, test_like_4b, test_like_4c, test_like_4d; +DROP TABLE test_like_5, test_like_5x, test_like_5c; +CREATE TABLE inhg (x text, LIKE inhx INCLUDING INDEXES, y text); /* copies indexes */ +INSERT INTO inhg VALUES (5, 10); +INSERT INTO inhg VALUES (20, 10); -- should fail +ERROR: duplicate key value violates unique constraint "inhg_pkey" +DETAIL: Key (xx)=(10) already exists. +DROP TABLE inhg; +/* Multiple primary keys creation should fail */ +CREATE TABLE inhg (x text, LIKE inhx INCLUDING INDEXES, PRIMARY KEY(x)); /* fails */ +ERROR: multiple primary keys for table "inhg" are not allowed +CREATE TABLE inhz (xx text DEFAULT 'text', yy int UNIQUE); +CREATE UNIQUE INDEX inhz_xx_idx on inhz (xx) WHERE xx <> 'test'; +/* Ok to create multiple unique indexes */ +CREATE TABLE inhg (x text UNIQUE, LIKE inhz INCLUDING INDEXES); +INSERT INTO inhg (xx, yy, x) VALUES ('test', 5, 10); +INSERT INTO inhg (xx, yy, x) VALUES ('test', 10, 15); +INSERT INTO inhg (xx, yy, x) VALUES ('foo', 10, 15); -- should fail +ERROR: duplicate key value violates unique constraint "inhg_x_key" +DETAIL: Key (x)=(15) already exists. +DROP TABLE inhg; +DROP TABLE inhz; +/* Use primary key imported by LIKE for self-referential FK constraint */ +CREATE TABLE inhz (x text REFERENCES inhz, LIKE inhx INCLUDING INDEXES); +\d inhz + Table "public.inhz" + Column | Type | Collation | Nullable | Default +--------+------+-----------+----------+--------- + x | text | | | + xx | text | | not null | +Indexes: + "inhz_pkey" PRIMARY KEY, btree (xx) +Foreign-key constraints: + "inhz_x_fkey" FOREIGN KEY (x) REFERENCES inhz(xx) +Referenced by: + TABLE "inhz" CONSTRAINT "inhz_x_fkey" FOREIGN KEY (x) REFERENCES inhz(xx) + +DROP TABLE inhz; +-- including storage and comments +CREATE TABLE ctlt1 (a text CHECK (length(a) > 2) PRIMARY KEY, b text); +CREATE INDEX ctlt1_b_key ON ctlt1 (b); +CREATE INDEX ctlt1_fnidx ON ctlt1 ((a || b)); +CREATE STATISTICS ctlt1_a_b_stat ON a,b FROM ctlt1; +CREATE STATISTICS ctlt1_expr_stat ON (a || b) FROM ctlt1; +COMMENT ON STATISTICS ctlt1_a_b_stat IS 'ab stats'; +COMMENT ON STATISTICS ctlt1_expr_stat IS 'ab expr stats'; +COMMENT ON COLUMN ctlt1.a IS 'A'; +COMMENT ON COLUMN ctlt1.b IS 'B'; +COMMENT ON CONSTRAINT ctlt1_a_check ON ctlt1 IS 't1_a_check'; +COMMENT ON INDEX ctlt1_pkey IS 'index pkey'; +COMMENT ON INDEX ctlt1_b_key IS 'index b_key'; +ALTER TABLE ctlt1 ALTER COLUMN a SET STORAGE MAIN; +CREATE TABLE ctlt2 (c text); +ALTER TABLE ctlt2 ALTER COLUMN c SET STORAGE EXTERNAL; +COMMENT ON COLUMN ctlt2.c IS 'C'; +CREATE TABLE ctlt3 (a text CHECK (length(a) < 5), c text CHECK (length(c) < 7)); +ALTER TABLE ctlt3 ALTER COLUMN c SET STORAGE EXTERNAL; +ALTER TABLE ctlt3 ALTER COLUMN a SET STORAGE MAIN; +CREATE INDEX ctlt3_fnidx ON ctlt3 ((a || c)); +COMMENT ON COLUMN ctlt3.a IS 'A3'; +COMMENT ON COLUMN ctlt3.c IS 'C'; +COMMENT ON CONSTRAINT ctlt3_a_check ON ctlt3 IS 't3_a_check'; +CREATE TABLE ctlt4 (a text, c text); +ALTER TABLE ctlt4 ALTER COLUMN c SET STORAGE EXTERNAL; +CREATE TABLE ctlt12_storage (LIKE ctlt1 INCLUDING STORAGE, LIKE ctlt2 INCLUDING STORAGE); +\d+ ctlt12_storage + Table "public.ctlt12_storage" + Column | Type | Collation | Nullable | Default | Storage | Stats target | Description +--------+------+-----------+----------+---------+----------+--------------+------------- + a | text | | not null | | main | | + b | text | | | | extended | | + c | text | | | | external | | + +CREATE TABLE ctlt12_comments (LIKE ctlt1 INCLUDING COMMENTS, LIKE ctlt2 INCLUDING COMMENTS); +\d+ ctlt12_comments + Table "public.ctlt12_comments" + Column | Type | Collation | Nullable | Default | Storage | Stats target | Description +--------+------+-----------+----------+---------+----------+--------------+------------- + a | text | | not null | | extended | | A + b | text | | | | extended | | B + c | text | | | | extended | | C + +CREATE TABLE ctlt1_inh (LIKE ctlt1 INCLUDING CONSTRAINTS INCLUDING COMMENTS) INHERITS (ctlt1); +NOTICE: merging column "a" with inherited definition +NOTICE: merging column "b" with inherited definition +NOTICE: merging constraint "ctlt1_a_check" with inherited definition +\d+ ctlt1_inh + Table "public.ctlt1_inh" + Column | Type | Collation | Nullable | Default | Storage | Stats target | Description +--------+------+-----------+----------+---------+----------+--------------+------------- + a | text | | not null | | main | | A + b | text | | | | extended | | B +Check constraints: + "ctlt1_a_check" CHECK (length(a) > 2) +Inherits: ctlt1 + +SELECT description FROM pg_description, pg_constraint c WHERE classoid = 'pg_constraint'::regclass AND objoid = c.oid AND c.conrelid = 'ctlt1_inh'::regclass; + description +------------- + t1_a_check +(1 row) + +CREATE TABLE ctlt13_inh () INHERITS (ctlt1, ctlt3); +NOTICE: merging multiple inherited definitions of column "a" +\d+ ctlt13_inh + Table "public.ctlt13_inh" + Column | Type | Collation | Nullable | Default | Storage | Stats target | Description +--------+------+-----------+----------+---------+----------+--------------+------------- + a | text | | not null | | main | | + b | text | | | | extended | | + c | text | | | | external | | +Check constraints: + "ctlt1_a_check" CHECK (length(a) > 2) + "ctlt3_a_check" CHECK (length(a) < 5) + "ctlt3_c_check" CHECK (length(c) < 7) +Inherits: ctlt1, + ctlt3 + +CREATE TABLE ctlt13_like (LIKE ctlt3 INCLUDING CONSTRAINTS INCLUDING INDEXES INCLUDING COMMENTS INCLUDING STORAGE) INHERITS (ctlt1); +NOTICE: merging column "a" with inherited definition +\d+ ctlt13_like + Table "public.ctlt13_like" + Column | Type | Collation | Nullable | Default | Storage | Stats target | Description +--------+------+-----------+----------+---------+----------+--------------+------------- + a | text | | not null | | main | | A3 + b | text | | | | extended | | + c | text | | | | external | | C +Indexes: + "ctlt13_like_expr_idx" btree ((a || c)) +Check constraints: + "ctlt1_a_check" CHECK (length(a) > 2) + "ctlt3_a_check" CHECK (length(a) < 5) + "ctlt3_c_check" CHECK (length(c) < 7) +Inherits: ctlt1 + +SELECT description FROM pg_description, pg_constraint c WHERE classoid = 'pg_constraint'::regclass AND objoid = c.oid AND c.conrelid = 'ctlt13_like'::regclass; + description +------------- + t3_a_check +(1 row) + +CREATE TABLE ctlt_all (LIKE ctlt1 INCLUDING ALL); +\d+ ctlt_all + Table "public.ctlt_all" + Column | Type | Collation | Nullable | Default | Storage | Stats target | Description +--------+------+-----------+----------+---------+----------+--------------+------------- + a | text | | not null | | main | | A + b | text | | | | extended | | B +Indexes: + "ctlt_all_pkey" PRIMARY KEY, btree (a) + "ctlt_all_b_idx" btree (b) + "ctlt_all_expr_idx" btree ((a || b)) +Check constraints: + "ctlt1_a_check" CHECK (length(a) > 2) +Statistics objects: + "public.ctlt_all_a_b_stat" ON a, b FROM ctlt_all + "public.ctlt_all_expr_stat" ON (a || b) FROM ctlt_all + +SELECT c.relname, objsubid, description FROM pg_description, pg_index i, pg_class c WHERE classoid = 'pg_class'::regclass AND objoid = i.indexrelid AND c.oid = i.indexrelid AND i.indrelid = 'ctlt_all'::regclass ORDER BY c.relname, objsubid; + relname | objsubid | description +----------------+----------+------------- + ctlt_all_b_idx | 0 | index b_key + ctlt_all_pkey | 0 | index pkey +(2 rows) + +SELECT s.stxname, objsubid, description FROM pg_description, pg_statistic_ext s WHERE classoid = 'pg_statistic_ext'::regclass AND objoid = s.oid AND s.stxrelid = 'ctlt_all'::regclass ORDER BY s.stxname, objsubid; + stxname | objsubid | description +--------------------+----------+--------------- + ctlt_all_a_b_stat | 0 | ab stats + ctlt_all_expr_stat | 0 | ab expr stats +(2 rows) + +CREATE TABLE inh_error1 () INHERITS (ctlt1, ctlt4); +NOTICE: merging multiple inherited definitions of column "a" +ERROR: inherited column "a" has a storage parameter conflict +DETAIL: MAIN versus EXTENDED +CREATE TABLE inh_error2 (LIKE ctlt4 INCLUDING STORAGE) INHERITS (ctlt1); +NOTICE: merging column "a" with inherited definition +ERROR: column "a" has a storage parameter conflict +DETAIL: MAIN versus EXTENDED +-- Check that LIKE isn't confused by a system catalog of the same name +CREATE TABLE pg_attrdef (LIKE ctlt1 INCLUDING ALL); +\d+ public.pg_attrdef + Table "public.pg_attrdef" + Column | Type | Collation | Nullable | Default | Storage | Stats target | Description +--------+------+-----------+----------+---------+----------+--------------+------------- + a | text | | not null | | main | | A + b | text | | | | extended | | B +Indexes: + "pg_attrdef_pkey" PRIMARY KEY, btree (a) + "pg_attrdef_b_idx" btree (b) + "pg_attrdef_expr_idx" btree ((a || b)) +Check constraints: + "ctlt1_a_check" CHECK (length(a) > 2) +Statistics objects: + "public.pg_attrdef_a_b_stat" ON a, b FROM public.pg_attrdef + "public.pg_attrdef_expr_stat" ON (a || b) FROM public.pg_attrdef + +DROP TABLE public.pg_attrdef; +-- Check that LIKE isn't confused when new table masks the old, either +BEGIN; +CREATE SCHEMA ctl_schema; +SET LOCAL search_path = ctl_schema, public; +CREATE TABLE ctlt1 (LIKE ctlt1 INCLUDING ALL); +\d+ ctlt1 + Table "ctl_schema.ctlt1" + Column | Type | Collation | Nullable | Default | Storage | Stats target | Description +--------+------+-----------+----------+---------+----------+--------------+------------- + a | text | | not null | | main | | A + b | text | | | | extended | | B +Indexes: + "ctlt1_pkey" PRIMARY KEY, btree (a) + "ctlt1_b_idx" btree (b) + "ctlt1_expr_idx" btree ((a || b)) +Check constraints: + "ctlt1_a_check" CHECK (length(a) > 2) +Statistics objects: + "ctl_schema.ctlt1_a_b_stat" ON a, b FROM ctlt1 + "ctl_schema.ctlt1_expr_stat" ON (a || b) FROM ctlt1 + +ROLLBACK; +DROP TABLE ctlt1, ctlt2, ctlt3, ctlt4, ctlt12_storage, ctlt12_comments, ctlt1_inh, ctlt13_inh, ctlt13_like, ctlt_all, ctla, ctlb CASCADE; +NOTICE: drop cascades to table inhe +-- LIKE must respect NO INHERIT property of constraints +CREATE TABLE noinh_con_copy (a int CHECK (a > 0) NO INHERIT); +CREATE TABLE noinh_con_copy1 (LIKE noinh_con_copy INCLUDING CONSTRAINTS); +\d noinh_con_copy1 + Table "public.noinh_con_copy1" + Column | Type | Collation | Nullable | Default +--------+---------+-----------+----------+--------- + a | integer | | | +Check constraints: + "noinh_con_copy_a_check" CHECK (a > 0) NO INHERIT + +-- fail, as partitioned tables don't allow NO INHERIT constraints +CREATE TABLE noinh_con_copy1_parted (LIKE noinh_con_copy INCLUDING ALL) + PARTITION BY LIST (a); +ERROR: cannot add NO INHERIT constraint to partitioned table "noinh_con_copy1_parted" +DROP TABLE noinh_con_copy, noinh_con_copy1; +/* LIKE with other relation kinds */ +CREATE TABLE ctlt4 (a int, b text); +CREATE SEQUENCE ctlseq1; +CREATE TABLE ctlt10 (LIKE ctlseq1); -- fail +ERROR: relation "ctlseq1" is invalid in LIKE clause +LINE 1: CREATE TABLE ctlt10 (LIKE ctlseq1); + ^ +DETAIL: This operation is not supported for sequences. +CREATE VIEW ctlv1 AS SELECT * FROM ctlt4; +CREATE TABLE ctlt11 (LIKE ctlv1); +CREATE TABLE ctlt11a (LIKE ctlv1 INCLUDING ALL); +CREATE TYPE ctlty1 AS (a int, b text); +CREATE TABLE ctlt12 (LIKE ctlty1); +DROP SEQUENCE ctlseq1; +DROP TYPE ctlty1; +DROP VIEW ctlv1; +DROP TABLE IF EXISTS ctlt4, ctlt10, ctlt11, ctlt11a, ctlt12; +NOTICE: table "ctlt10" does not exist, skipping diff --git a/src/test/regress/expected/create_type.out b/src/test/regress/expected/create_type.out new file mode 100644 index 0000000..7383fcd --- /dev/null +++ b/src/test/regress/expected/create_type.out @@ -0,0 +1,407 @@ +-- +-- CREATE_TYPE +-- +-- directory path and dlsuffix are passed to us in environment variables +\getenv libdir PG_LIBDIR +\getenv dlsuffix PG_DLSUFFIX +\set regresslib :libdir '/regress' :dlsuffix +-- +-- Test the "old style" approach of making the I/O functions first, +-- with no explicit shell type creation. +-- +CREATE FUNCTION widget_in(cstring) + RETURNS widget + AS :'regresslib' + LANGUAGE C STRICT IMMUTABLE; +NOTICE: type "widget" is not yet defined +DETAIL: Creating a shell type definition. +CREATE FUNCTION widget_out(widget) + RETURNS cstring + AS :'regresslib' + LANGUAGE C STRICT IMMUTABLE; +NOTICE: argument type widget is only a shell +CREATE FUNCTION int44in(cstring) + RETURNS city_budget + AS :'regresslib' + LANGUAGE C STRICT IMMUTABLE; +NOTICE: type "city_budget" is not yet defined +DETAIL: Creating a shell type definition. +CREATE FUNCTION int44out(city_budget) + RETURNS cstring + AS :'regresslib' + LANGUAGE C STRICT IMMUTABLE; +NOTICE: argument type city_budget is only a shell +CREATE TYPE widget ( + internallength = 24, + input = widget_in, + output = widget_out, + typmod_in = numerictypmodin, + typmod_out = numerictypmodout, + alignment = double +); +CREATE TYPE city_budget ( + internallength = 16, + input = int44in, + output = int44out, + element = int4, + category = 'x', -- just to verify the system will take it + preferred = true -- ditto +); +-- Test creation and destruction of shell types +CREATE TYPE shell; +CREATE TYPE shell; -- fail, type already present +ERROR: type "shell" already exists +DROP TYPE shell; +DROP TYPE shell; -- fail, type not exist +ERROR: type "shell" does not exist +-- also, let's leave one around for purposes of pg_dump testing +CREATE TYPE myshell; +-- +-- Test type-related default values (broken in releases before PG 7.2) +-- +-- This part of the test also exercises the "new style" approach of making +-- a shell type and then filling it in. +-- +CREATE TYPE int42; +CREATE TYPE text_w_default; +-- Make dummy I/O routines using the existing internal support for int4, text +CREATE FUNCTION int42_in(cstring) + RETURNS int42 + AS 'int4in' + LANGUAGE internal STRICT IMMUTABLE; +NOTICE: return type int42 is only a shell +CREATE FUNCTION int42_out(int42) + RETURNS cstring + AS 'int4out' + LANGUAGE internal STRICT IMMUTABLE; +NOTICE: argument type int42 is only a shell +CREATE FUNCTION text_w_default_in(cstring) + RETURNS text_w_default + AS 'textin' + LANGUAGE internal STRICT IMMUTABLE; +NOTICE: return type text_w_default is only a shell +CREATE FUNCTION text_w_default_out(text_w_default) + RETURNS cstring + AS 'textout' + LANGUAGE internal STRICT IMMUTABLE; +NOTICE: argument type text_w_default is only a shell +CREATE TYPE int42 ( + internallength = 4, + input = int42_in, + output = int42_out, + alignment = int4, + default = 42, + passedbyvalue +); +CREATE TYPE text_w_default ( + internallength = variable, + input = text_w_default_in, + output = text_w_default_out, + alignment = int4, + default = 'zippo' +); +CREATE TABLE default_test (f1 text_w_default, f2 int42); +INSERT INTO default_test DEFAULT VALUES; +SELECT * FROM default_test; + f1 | f2 +-------+---- + zippo | 42 +(1 row) + +-- We need a shell type to test some CREATE TYPE failure cases with +CREATE TYPE bogus_type; +-- invalid: non-lowercase quoted identifiers +CREATE TYPE bogus_type ( + "Internallength" = 4, + "Input" = int42_in, + "Output" = int42_out, + "Alignment" = int4, + "Default" = 42, + "Passedbyvalue" +); +WARNING: type attribute "Internallength" not recognized +LINE 2: "Internallength" = 4, + ^ +WARNING: type attribute "Input" not recognized +LINE 3: "Input" = int42_in, + ^ +WARNING: type attribute "Output" not recognized +LINE 4: "Output" = int42_out, + ^ +WARNING: type attribute "Alignment" not recognized +LINE 5: "Alignment" = int4, + ^ +WARNING: type attribute "Default" not recognized +LINE 6: "Default" = 42, + ^ +WARNING: type attribute "Passedbyvalue" not recognized +LINE 7: "Passedbyvalue" + ^ +ERROR: type input function must be specified +-- invalid: input/output function incompatibility +CREATE TYPE bogus_type (INPUT = array_in, + OUTPUT = array_out, + ELEMENT = int, + INTERNALLENGTH = 32); +ERROR: type input function array_in must return type bogus_type +DROP TYPE bogus_type; +-- It no longer is possible to issue CREATE TYPE without making a shell first +CREATE TYPE bogus_type (INPUT = array_in, + OUTPUT = array_out, + ELEMENT = int, + INTERNALLENGTH = 32); +ERROR: type "bogus_type" does not exist +HINT: Create the type as a shell type, then create its I/O functions, then do a full CREATE TYPE. +-- Test stand-alone composite type +CREATE TYPE default_test_row AS (f1 text_w_default, f2 int42); +CREATE FUNCTION get_default_test() RETURNS SETOF default_test_row AS ' + SELECT * FROM default_test; +' LANGUAGE SQL; +SELECT * FROM get_default_test(); + f1 | f2 +-------+---- + zippo | 42 +(1 row) + +-- Test comments +COMMENT ON TYPE bad IS 'bad comment'; +ERROR: type "bad" does not exist +COMMENT ON TYPE default_test_row IS 'good comment'; +COMMENT ON TYPE default_test_row IS NULL; +COMMENT ON COLUMN default_test_row.nope IS 'bad comment'; +ERROR: column "nope" of relation "default_test_row" does not exist +COMMENT ON COLUMN default_test_row.f1 IS 'good comment'; +COMMENT ON COLUMN default_test_row.f1 IS NULL; +-- Check shell type create for existing types +CREATE TYPE text_w_default; -- should fail +ERROR: type "text_w_default" already exists +DROP TYPE default_test_row CASCADE; +NOTICE: drop cascades to function get_default_test() +DROP TABLE default_test; +-- Check dependencies are established when creating a new type +CREATE TYPE base_type; +CREATE FUNCTION base_fn_in(cstring) RETURNS base_type AS 'boolin' + LANGUAGE internal IMMUTABLE STRICT; +NOTICE: return type base_type is only a shell +CREATE FUNCTION base_fn_out(base_type) RETURNS cstring AS 'boolout' + LANGUAGE internal IMMUTABLE STRICT; +NOTICE: argument type base_type is only a shell +CREATE TYPE base_type(INPUT = base_fn_in, OUTPUT = base_fn_out); +DROP FUNCTION base_fn_in(cstring); -- error +ERROR: cannot drop function base_fn_in(cstring) because other objects depend on it +DETAIL: type base_type depends on function base_fn_in(cstring) +function base_fn_out(base_type) depends on type base_type +HINT: Use DROP ... CASCADE to drop the dependent objects too. +DROP FUNCTION base_fn_out(base_type); -- error +ERROR: cannot drop function base_fn_out(base_type) because other objects depend on it +DETAIL: type base_type depends on function base_fn_out(base_type) +function base_fn_in(cstring) depends on type base_type +HINT: Use DROP ... CASCADE to drop the dependent objects too. +DROP TYPE base_type; -- error +ERROR: cannot drop type base_type because other objects depend on it +DETAIL: function base_fn_in(cstring) depends on type base_type +function base_fn_out(base_type) depends on type base_type +HINT: Use DROP ... CASCADE to drop the dependent objects too. +DROP TYPE base_type CASCADE; +NOTICE: drop cascades to 2 other objects +DETAIL: drop cascades to function base_fn_in(cstring) +drop cascades to function base_fn_out(base_type) +-- Check usage of typmod with a user-defined type +-- (we have borrowed numeric's typmod functions) +CREATE TEMP TABLE mytab (foo widget(42,13,7)); -- should fail +ERROR: invalid NUMERIC type modifier +LINE 1: CREATE TEMP TABLE mytab (foo widget(42,13,7)); + ^ +CREATE TEMP TABLE mytab (foo widget(42,13)); +SELECT format_type(atttypid,atttypmod) FROM pg_attribute +WHERE attrelid = 'mytab'::regclass AND attnum > 0; + format_type +--------------- + widget(42,13) +(1 row) + +-- might as well exercise the widget type while we're here +INSERT INTO mytab VALUES ('(1,2,3)'), ('(-44,5.5,12)'); +TABLE mytab; + foo +-------------- + (1,2,3) + (-44,5.5,12) +(2 rows) + +-- and test format_type() a bit more, too +select format_type('varchar'::regtype, 42); + format_type +----------------------- + character varying(38) +(1 row) + +select format_type('bpchar'::regtype, null); + format_type +------------- + character +(1 row) + +-- this behavior difference is intentional +select format_type('bpchar'::regtype, -1); + format_type +------------- + bpchar +(1 row) + +-- Test non-error-throwing APIs using widget, which still throws errors +SELECT pg_input_is_valid('(1,2,3)', 'widget'); + pg_input_is_valid +------------------- + t +(1 row) + +SELECT pg_input_is_valid('(1,2)', 'widget'); -- hard error expected +ERROR: invalid input syntax for type widget: "(1,2)" +SELECT pg_input_is_valid('{"(1,2,3)"}', 'widget[]'); + pg_input_is_valid +------------------- + t +(1 row) + +SELECT pg_input_is_valid('{"(1,2)"}', 'widget[]'); -- hard error expected +ERROR: invalid input syntax for type widget: "(1,2)" +SELECT pg_input_is_valid('("(1,2,3)")', 'mytab'); + pg_input_is_valid +------------------- + t +(1 row) + +SELECT pg_input_is_valid('("(1,2)")', 'mytab'); -- hard error expected +ERROR: invalid input syntax for type widget: "(1,2)" +-- Test creation of an operator over a user-defined type +CREATE FUNCTION pt_in_widget(point, widget) + RETURNS bool + AS :'regresslib' + LANGUAGE C STRICT; +CREATE OPERATOR <% ( + leftarg = point, + rightarg = widget, + procedure = pt_in_widget, + commutator = >% , + negator = >=% +); +SELECT point '(1,2)' <% widget '(0,0,3)' AS t, + point '(1,2)' <% widget '(0,0,1)' AS f; + t | f +---+--- + t | f +(1 row) + +-- exercise city_budget type +CREATE TABLE city ( + name name, + location box, + budget city_budget +); +INSERT INTO city VALUES +('Podunk', '(1,2),(3,4)', '100,127,1000'), +('Gotham', '(1000,34),(1100,334)', '123456,127,-1000,6789'); +TABLE city; + name | location | budget +--------+----------------------+----------------------- + Podunk | (3,4),(1,2) | 100,127,1000,0 + Gotham | (1100,334),(1000,34) | 123456,127,-1000,6789 +(2 rows) + +-- +-- Test CREATE/ALTER TYPE using a type that's compatible with varchar, +-- so we can re-use those support functions +-- +CREATE TYPE myvarchar; +CREATE FUNCTION myvarcharin(cstring, oid, integer) RETURNS myvarchar +LANGUAGE internal IMMUTABLE PARALLEL SAFE STRICT AS 'varcharin'; +NOTICE: return type myvarchar is only a shell +CREATE FUNCTION myvarcharout(myvarchar) RETURNS cstring +LANGUAGE internal IMMUTABLE PARALLEL SAFE STRICT AS 'varcharout'; +NOTICE: argument type myvarchar is only a shell +CREATE FUNCTION myvarcharsend(myvarchar) RETURNS bytea +LANGUAGE internal STABLE PARALLEL SAFE STRICT AS 'varcharsend'; +NOTICE: argument type myvarchar is only a shell +CREATE FUNCTION myvarcharrecv(internal, oid, integer) RETURNS myvarchar +LANGUAGE internal STABLE PARALLEL SAFE STRICT AS 'varcharrecv'; +NOTICE: return type myvarchar is only a shell +-- fail, it's still a shell: +ALTER TYPE myvarchar SET (storage = extended); +ERROR: type "myvarchar" is only a shell +CREATE TYPE myvarchar ( + input = myvarcharin, + output = myvarcharout, + alignment = integer, + storage = main +); +-- want to check updating of a domain over the target type, too +CREATE DOMAIN myvarchardom AS myvarchar; +ALTER TYPE myvarchar SET (storage = plain); -- not allowed +ERROR: cannot change type's storage to PLAIN +ALTER TYPE myvarchar SET (storage = extended); +ALTER TYPE myvarchar SET ( + send = myvarcharsend, + receive = myvarcharrecv, + typmod_in = varchartypmodin, + typmod_out = varchartypmodout, + -- these are bogus, but it's safe as long as we don't use the type: + analyze = ts_typanalyze, + subscript = raw_array_subscript_handler +); +SELECT typinput, typoutput, typreceive, typsend, typmodin, typmodout, + typanalyze, typsubscript, typstorage +FROM pg_type WHERE typname = 'myvarchar'; + typinput | typoutput | typreceive | typsend | typmodin | typmodout | typanalyze | typsubscript | typstorage +-------------+--------------+---------------+---------------+-----------------+------------------+---------------+-----------------------------+------------ + myvarcharin | myvarcharout | myvarcharrecv | myvarcharsend | varchartypmodin | varchartypmodout | ts_typanalyze | raw_array_subscript_handler | x +(1 row) + +SELECT typinput, typoutput, typreceive, typsend, typmodin, typmodout, + typanalyze, typsubscript, typstorage +FROM pg_type WHERE typname = '_myvarchar'; + typinput | typoutput | typreceive | typsend | typmodin | typmodout | typanalyze | typsubscript | typstorage +----------+-----------+------------+------------+-----------------+------------------+------------------+-------------------------+------------ + array_in | array_out | array_recv | array_send | varchartypmodin | varchartypmodout | array_typanalyze | array_subscript_handler | x +(1 row) + +SELECT typinput, typoutput, typreceive, typsend, typmodin, typmodout, + typanalyze, typsubscript, typstorage +FROM pg_type WHERE typname = 'myvarchardom'; + typinput | typoutput | typreceive | typsend | typmodin | typmodout | typanalyze | typsubscript | typstorage +-----------+--------------+-------------+---------------+----------+-----------+---------------+--------------+------------ + domain_in | myvarcharout | domain_recv | myvarcharsend | - | - | ts_typanalyze | - | x +(1 row) + +SELECT typinput, typoutput, typreceive, typsend, typmodin, typmodout, + typanalyze, typsubscript, typstorage +FROM pg_type WHERE typname = '_myvarchardom'; + typinput | typoutput | typreceive | typsend | typmodin | typmodout | typanalyze | typsubscript | typstorage +----------+-----------+------------+------------+----------+-----------+------------------+-------------------------+------------ + array_in | array_out | array_recv | array_send | - | - | array_typanalyze | array_subscript_handler | x +(1 row) + +-- ensure dependencies are straight +DROP FUNCTION myvarcharsend(myvarchar); -- fail +ERROR: cannot drop function myvarcharsend(myvarchar) because other objects depend on it +DETAIL: type myvarchar depends on function myvarcharsend(myvarchar) +function myvarcharin(cstring,oid,integer) depends on type myvarchar +function myvarcharout(myvarchar) depends on type myvarchar +function myvarcharrecv(internal,oid,integer) depends on type myvarchar +type myvarchardom depends on function myvarcharsend(myvarchar) +HINT: Use DROP ... CASCADE to drop the dependent objects too. +DROP TYPE myvarchar; -- fail +ERROR: cannot drop type myvarchar because other objects depend on it +DETAIL: function myvarcharin(cstring,oid,integer) depends on type myvarchar +function myvarcharout(myvarchar) depends on type myvarchar +function myvarcharsend(myvarchar) depends on type myvarchar +function myvarcharrecv(internal,oid,integer) depends on type myvarchar +type myvarchardom depends on type myvarchar +HINT: Use DROP ... CASCADE to drop the dependent objects too. +DROP TYPE myvarchar CASCADE; +NOTICE: drop cascades to 5 other objects +DETAIL: drop cascades to function myvarcharin(cstring,oid,integer) +drop cascades to function myvarcharout(myvarchar) +drop cascades to function myvarcharsend(myvarchar) +drop cascades to function myvarcharrecv(internal,oid,integer) +drop cascades to type myvarchardom diff --git a/src/test/regress/expected/create_view.out b/src/test/regress/expected/create_view.out new file mode 100644 index 0000000..61825ef --- /dev/null +++ b/src/test/regress/expected/create_view.out @@ -0,0 +1,2313 @@ +-- +-- CREATE_VIEW +-- Virtual class definitions +-- (this also tests the query rewrite system) +-- +-- directory paths and dlsuffix are passed to us in environment variables +\getenv abs_srcdir PG_ABS_SRCDIR +\getenv libdir PG_LIBDIR +\getenv dlsuffix PG_DLSUFFIX +\set regresslib :libdir '/regress' :dlsuffix +CREATE FUNCTION interpt_pp(path, path) + RETURNS point + AS :'regresslib' + LANGUAGE C STRICT; +CREATE TABLE real_city ( + pop int4, + cname text, + outline path +); +\set filename :abs_srcdir '/data/real_city.data' +COPY real_city FROM :'filename'; +ANALYZE real_city; +SELECT * + INTO TABLE ramp + FROM ONLY road + WHERE name ~ '.*Ramp'; +CREATE VIEW street AS + SELECT r.name, r.thepath, c.cname AS cname + FROM ONLY road r, real_city c + WHERE c.outline ?# r.thepath; +CREATE VIEW iexit AS + SELECT ih.name, ih.thepath, + interpt_pp(ih.thepath, r.thepath) AS exit + FROM ihighway ih, ramp r + WHERE ih.thepath ?# r.thepath; +CREATE VIEW toyemp AS + SELECT name, age, location, 12*salary AS annualsal + FROM emp; +-- Test comments +COMMENT ON VIEW noview IS 'no view'; +ERROR: relation "noview" does not exist +COMMENT ON VIEW toyemp IS 'is a view'; +COMMENT ON VIEW toyemp IS NULL; +-- These views are left around mainly to exercise special cases in pg_dump. +CREATE TABLE view_base_table (key int PRIMARY KEY, data varchar(20)); +CREATE VIEW key_dependent_view AS + SELECT * FROM view_base_table GROUP BY key; +ALTER TABLE view_base_table DROP CONSTRAINT view_base_table_pkey; -- fails +ERROR: cannot drop constraint view_base_table_pkey on table view_base_table because other objects depend on it +DETAIL: view key_dependent_view depends on constraint view_base_table_pkey on table view_base_table +HINT: Use DROP ... CASCADE to drop the dependent objects too. +CREATE VIEW key_dependent_view_no_cols AS + SELECT FROM view_base_table GROUP BY key HAVING length(data) > 0; +-- +-- CREATE OR REPLACE VIEW +-- +CREATE TABLE viewtest_tbl (a int, b int, c numeric(10,1), d text COLLATE "C"); +COPY viewtest_tbl FROM stdin; +CREATE OR REPLACE VIEW viewtest AS + SELECT * FROM viewtest_tbl; +CREATE OR REPLACE VIEW viewtest AS + SELECT * FROM viewtest_tbl WHERE a > 10; +SELECT * FROM viewtest; + a | b | c | d +----+----+-----+------- + 15 | 20 | 3.3 | xyzz + 20 | 25 | 4.4 | xyzzy +(2 rows) + +CREATE OR REPLACE VIEW viewtest AS + SELECT a, b, c, d FROM viewtest_tbl WHERE a > 5 ORDER BY b DESC; +SELECT * FROM viewtest; + a | b | c | d +----+----+-----+------- + 20 | 25 | 4.4 | xyzzy + 15 | 20 | 3.3 | xyzz + 10 | 15 | 2.2 | xyz +(3 rows) + +-- should fail +CREATE OR REPLACE VIEW viewtest AS + SELECT a FROM viewtest_tbl WHERE a <> 20; +ERROR: cannot drop columns from view +-- should fail +CREATE OR REPLACE VIEW viewtest AS + SELECT 1, * FROM viewtest_tbl; +ERROR: cannot change name of view column "a" to "?column?" +HINT: Use ALTER VIEW ... RENAME COLUMN ... to change name of view column instead. +-- should fail +CREATE OR REPLACE VIEW viewtest AS + SELECT a, b::numeric, c, d FROM viewtest_tbl; +ERROR: cannot change data type of view column "b" from integer to numeric +-- should fail +CREATE OR REPLACE VIEW viewtest AS + SELECT a, b, c::numeric(10,2), d FROM viewtest_tbl; +ERROR: cannot change data type of view column "c" from numeric(10,1) to numeric(10,2) +-- should fail +CREATE OR REPLACE VIEW viewtest AS + SELECT a, b, c, d COLLATE "POSIX" FROM viewtest_tbl; +ERROR: cannot change collation of view column "d" from "C" to "POSIX" +-- should work +CREATE OR REPLACE VIEW viewtest AS + SELECT a, b, c, d, 0 AS e FROM viewtest_tbl; +DROP VIEW viewtest; +DROP TABLE viewtest_tbl; +-- tests for temporary views +CREATE SCHEMA temp_view_test + CREATE TABLE base_table (a int, id int) + CREATE TABLE base_table2 (a int, id int); +SET search_path TO temp_view_test, public; +CREATE TEMPORARY TABLE temp_table (a int, id int); +-- should be created in temp_view_test schema +CREATE VIEW v1 AS SELECT * FROM base_table; +-- should be created in temp object schema +CREATE VIEW v1_temp AS SELECT * FROM temp_table; +NOTICE: view "v1_temp" will be a temporary view +-- should be created in temp object schema +CREATE TEMP VIEW v2_temp AS SELECT * FROM base_table; +-- should be created in temp_views schema +CREATE VIEW temp_view_test.v2 AS SELECT * FROM base_table; +-- should fail +CREATE VIEW temp_view_test.v3_temp AS SELECT * FROM temp_table; +NOTICE: view "v3_temp" will be a temporary view +ERROR: cannot create temporary relation in non-temporary schema +-- should fail +CREATE SCHEMA test_view_schema + CREATE TEMP VIEW testview AS SELECT 1; +ERROR: cannot create temporary relation in non-temporary schema +-- joins: if any of the join relations are temporary, the view +-- should also be temporary +-- should be non-temp +CREATE VIEW v3 AS + SELECT t1.a AS t1_a, t2.a AS t2_a + FROM base_table t1, base_table2 t2 + WHERE t1.id = t2.id; +-- should be temp (one join rel is temp) +CREATE VIEW v4_temp AS + SELECT t1.a AS t1_a, t2.a AS t2_a + FROM base_table t1, temp_table t2 + WHERE t1.id = t2.id; +NOTICE: view "v4_temp" will be a temporary view +-- should be temp +CREATE VIEW v5_temp AS + SELECT t1.a AS t1_a, t2.a AS t2_a, t3.a AS t3_a + FROM base_table t1, base_table2 t2, temp_table t3 + WHERE t1.id = t2.id and t2.id = t3.id; +NOTICE: view "v5_temp" will be a temporary view +-- subqueries +CREATE VIEW v4 AS SELECT * FROM base_table WHERE id IN (SELECT id FROM base_table2); +CREATE VIEW v5 AS SELECT t1.id, t2.a FROM base_table t1, (SELECT * FROM base_table2) t2; +CREATE VIEW v6 AS SELECT * FROM base_table WHERE EXISTS (SELECT 1 FROM base_table2); +CREATE VIEW v7 AS SELECT * FROM base_table WHERE NOT EXISTS (SELECT 1 FROM base_table2); +CREATE VIEW v8 AS SELECT * FROM base_table WHERE EXISTS (SELECT 1); +CREATE VIEW v6_temp AS SELECT * FROM base_table WHERE id IN (SELECT id FROM temp_table); +NOTICE: view "v6_temp" will be a temporary view +CREATE VIEW v7_temp AS SELECT t1.id, t2.a FROM base_table t1, (SELECT * FROM temp_table) t2; +NOTICE: view "v7_temp" will be a temporary view +CREATE VIEW v8_temp AS SELECT * FROM base_table WHERE EXISTS (SELECT 1 FROM temp_table); +NOTICE: view "v8_temp" will be a temporary view +CREATE VIEW v9_temp AS SELECT * FROM base_table WHERE NOT EXISTS (SELECT 1 FROM temp_table); +NOTICE: view "v9_temp" will be a temporary view +-- a view should also be temporary if it references a temporary view +CREATE VIEW v10_temp AS SELECT * FROM v7_temp; +NOTICE: view "v10_temp" will be a temporary view +CREATE VIEW v11_temp AS SELECT t1.id, t2.a FROM base_table t1, v10_temp t2; +NOTICE: view "v11_temp" will be a temporary view +CREATE VIEW v12_temp AS SELECT true FROM v11_temp; +NOTICE: view "v12_temp" will be a temporary view +-- a view should also be temporary if it references a temporary sequence +CREATE SEQUENCE seq1; +CREATE TEMPORARY SEQUENCE seq1_temp; +CREATE VIEW v9 AS SELECT seq1.is_called FROM seq1; +CREATE VIEW v13_temp AS SELECT seq1_temp.is_called FROM seq1_temp; +NOTICE: view "v13_temp" will be a temporary view +SELECT relname FROM pg_class + WHERE relname LIKE 'v_' + AND relnamespace = (SELECT oid FROM pg_namespace WHERE nspname = 'temp_view_test') + ORDER BY relname; + relname +--------- + v1 + v2 + v3 + v4 + v5 + v6 + v7 + v8 + v9 +(9 rows) + +SELECT relname FROM pg_class + WHERE relname LIKE 'v%' + AND relnamespace IN (SELECT oid FROM pg_namespace WHERE nspname LIKE 'pg_temp%') + ORDER BY relname; + relname +---------- + v10_temp + v11_temp + v12_temp + v13_temp + v1_temp + v2_temp + v4_temp + v5_temp + v6_temp + v7_temp + v8_temp + v9_temp +(12 rows) + +CREATE SCHEMA testviewschm2; +SET search_path TO testviewschm2, public; +CREATE TABLE t1 (num int, name text); +CREATE TABLE t2 (num2 int, value text); +CREATE TEMP TABLE tt (num2 int, value text); +CREATE VIEW nontemp1 AS SELECT * FROM t1 CROSS JOIN t2; +CREATE VIEW temporal1 AS SELECT * FROM t1 CROSS JOIN tt; +NOTICE: view "temporal1" will be a temporary view +CREATE VIEW nontemp2 AS SELECT * FROM t1 INNER JOIN t2 ON t1.num = t2.num2; +CREATE VIEW temporal2 AS SELECT * FROM t1 INNER JOIN tt ON t1.num = tt.num2; +NOTICE: view "temporal2" will be a temporary view +CREATE VIEW nontemp3 AS SELECT * FROM t1 LEFT JOIN t2 ON t1.num = t2.num2; +CREATE VIEW temporal3 AS SELECT * FROM t1 LEFT JOIN tt ON t1.num = tt.num2; +NOTICE: view "temporal3" will be a temporary view +CREATE VIEW nontemp4 AS SELECT * FROM t1 LEFT JOIN t2 ON t1.num = t2.num2 AND t2.value = 'xxx'; +CREATE VIEW temporal4 AS SELECT * FROM t1 LEFT JOIN tt ON t1.num = tt.num2 AND tt.value = 'xxx'; +NOTICE: view "temporal4" will be a temporary view +SELECT relname FROM pg_class + WHERE relname LIKE 'nontemp%' + AND relnamespace = (SELECT oid FROM pg_namespace WHERE nspname = 'testviewschm2') + ORDER BY relname; + relname +---------- + nontemp1 + nontemp2 + nontemp3 + nontemp4 +(4 rows) + +SELECT relname FROM pg_class + WHERE relname LIKE 'temporal%' + AND relnamespace IN (SELECT oid FROM pg_namespace WHERE nspname LIKE 'pg_temp%') + ORDER BY relname; + relname +----------- + temporal1 + temporal2 + temporal3 + temporal4 +(4 rows) + +CREATE TABLE tbl1 ( a int, b int); +CREATE TABLE tbl2 (c int, d int); +CREATE TABLE tbl3 (e int, f int); +CREATE TABLE tbl4 (g int, h int); +CREATE TEMP TABLE tmptbl (i int, j int); +--Should be in testviewschm2 +CREATE VIEW pubview AS SELECT * FROM tbl1 WHERE tbl1.a +BETWEEN (SELECT d FROM tbl2 WHERE c = 1) AND (SELECT e FROM tbl3 WHERE f = 2) +AND EXISTS (SELECT g FROM tbl4 LEFT JOIN tbl3 ON tbl4.h = tbl3.f); +SELECT count(*) FROM pg_class where relname = 'pubview' +AND relnamespace IN (SELECT OID FROM pg_namespace WHERE nspname = 'testviewschm2'); + count +------- + 1 +(1 row) + +--Should be in temp object schema +CREATE VIEW mytempview AS SELECT * FROM tbl1 WHERE tbl1.a +BETWEEN (SELECT d FROM tbl2 WHERE c = 1) AND (SELECT e FROM tbl3 WHERE f = 2) +AND EXISTS (SELECT g FROM tbl4 LEFT JOIN tbl3 ON tbl4.h = tbl3.f) +AND NOT EXISTS (SELECT g FROM tbl4 LEFT JOIN tmptbl ON tbl4.h = tmptbl.j); +NOTICE: view "mytempview" will be a temporary view +SELECT count(*) FROM pg_class where relname LIKE 'mytempview' +And relnamespace IN (SELECT OID FROM pg_namespace WHERE nspname LIKE 'pg_temp%'); + count +------- + 1 +(1 row) + +-- +-- CREATE VIEW and WITH(...) clause +-- +CREATE VIEW mysecview1 + AS SELECT * FROM tbl1 WHERE a = 0; +CREATE VIEW mysecview2 WITH (security_barrier=true) + AS SELECT * FROM tbl1 WHERE a > 0; +CREATE VIEW mysecview3 WITH (security_barrier=false) + AS SELECT * FROM tbl1 WHERE a < 0; +CREATE VIEW mysecview4 WITH (security_barrier) + AS SELECT * FROM tbl1 WHERE a <> 0; +CREATE VIEW mysecview5 WITH (security_barrier=100) -- Error + AS SELECT * FROM tbl1 WHERE a > 100; +ERROR: invalid value for boolean option "security_barrier": 100 +CREATE VIEW mysecview6 WITH (invalid_option) -- Error + AS SELECT * FROM tbl1 WHERE a < 100; +ERROR: unrecognized parameter "invalid_option" +CREATE VIEW mysecview7 WITH (security_invoker=true) + AS SELECT * FROM tbl1 WHERE a = 100; +CREATE VIEW mysecview8 WITH (security_invoker=false, security_barrier=true) + AS SELECT * FROM tbl1 WHERE a > 100; +CREATE VIEW mysecview9 WITH (security_invoker) + AS SELECT * FROM tbl1 WHERE a < 100; +CREATE VIEW mysecview10 WITH (security_invoker=100) -- Error + AS SELECT * FROM tbl1 WHERE a <> 100; +ERROR: invalid value for boolean option "security_invoker": 100 +SELECT relname, relkind, reloptions FROM pg_class + WHERE oid in ('mysecview1'::regclass, 'mysecview2'::regclass, + 'mysecview3'::regclass, 'mysecview4'::regclass, + 'mysecview7'::regclass, 'mysecview8'::regclass, + 'mysecview9'::regclass) + ORDER BY relname; + relname | relkind | reloptions +------------+---------+------------------------------------------------ + mysecview1 | v | + mysecview2 | v | {security_barrier=true} + mysecview3 | v | {security_barrier=false} + mysecview4 | v | {security_barrier=true} + mysecview7 | v | {security_invoker=true} + mysecview8 | v | {security_invoker=false,security_barrier=true} + mysecview9 | v | {security_invoker=true} +(7 rows) + +CREATE OR REPLACE VIEW mysecview1 + AS SELECT * FROM tbl1 WHERE a = 256; +CREATE OR REPLACE VIEW mysecview2 + AS SELECT * FROM tbl1 WHERE a > 256; +CREATE OR REPLACE VIEW mysecview3 WITH (security_barrier=true) + AS SELECT * FROM tbl1 WHERE a < 256; +CREATE OR REPLACE VIEW mysecview4 WITH (security_barrier=false) + AS SELECT * FROM tbl1 WHERE a <> 256; +CREATE OR REPLACE VIEW mysecview7 + AS SELECT * FROM tbl1 WHERE a > 256; +CREATE OR REPLACE VIEW mysecview8 WITH (security_invoker=true) + AS SELECT * FROM tbl1 WHERE a < 256; +CREATE OR REPLACE VIEW mysecview9 WITH (security_invoker=false, security_barrier=true) + AS SELECT * FROM tbl1 WHERE a <> 256; +SELECT relname, relkind, reloptions FROM pg_class + WHERE oid in ('mysecview1'::regclass, 'mysecview2'::regclass, + 'mysecview3'::regclass, 'mysecview4'::regclass, + 'mysecview7'::regclass, 'mysecview8'::regclass, + 'mysecview9'::regclass) + ORDER BY relname; + relname | relkind | reloptions +------------+---------+------------------------------------------------ + mysecview1 | v | + mysecview2 | v | + mysecview3 | v | {security_barrier=true} + mysecview4 | v | {security_barrier=false} + mysecview7 | v | + mysecview8 | v | {security_invoker=true} + mysecview9 | v | {security_invoker=false,security_barrier=true} +(7 rows) + +-- Check that unknown literals are converted to "text" in CREATE VIEW, +-- so that we don't end up with unknown-type columns. +CREATE VIEW unspecified_types AS + SELECT 42 as i, 42.5 as num, 'foo' as u, 'foo'::unknown as u2, null as n; +\d+ unspecified_types + View "testviewschm2.unspecified_types" + Column | Type | Collation | Nullable | Default | Storage | Description +--------+---------+-----------+----------+---------+----------+------------- + i | integer | | | | plain | + num | numeric | | | | main | + u | text | | | | extended | + u2 | text | | | | extended | + n | text | | | | extended | +View definition: + SELECT 42 AS i, + 42.5 AS num, + 'foo'::text AS u, + 'foo'::text AS u2, + NULL::text AS n; + +SELECT * FROM unspecified_types; + i | num | u | u2 | n +----+------+-----+-----+--- + 42 | 42.5 | foo | foo | +(1 row) + +-- This test checks that proper typmods are assigned in a multi-row VALUES +CREATE VIEW tt1 AS + SELECT * FROM ( + VALUES + ('abc'::varchar(3), '0123456789', 42, 'abcd'::varchar(4)), + ('0123456789', 'abc'::varchar(3), 42.12, 'abc'::varchar(4)) + ) vv(a,b,c,d); +\d+ tt1 + View "testviewschm2.tt1" + Column | Type | Collation | Nullable | Default | Storage | Description +--------+----------------------+-----------+----------+---------+----------+------------- + a | character varying | | | | extended | + b | character varying | | | | extended | + c | numeric | | | | main | + d | character varying(4) | | | | extended | +View definition: + SELECT a, + b, + c, + d + FROM ( VALUES ('abc'::character varying(3),'0123456789'::character varying,42,'abcd'::character varying(4)), ('0123456789'::character varying,'abc'::character varying(3),42.12,'abc'::character varying(4))) vv(a, b, c, d); + +SELECT * FROM tt1; + a | b | c | d +------------+------------+-------+------ + abc | 0123456789 | 42 | abcd + 0123456789 | abc | 42.12 | abc +(2 rows) + +SELECT a::varchar(3) FROM tt1; + a +----- + abc + 012 +(2 rows) + +DROP VIEW tt1; +-- Test view decompilation in the face of relation renaming conflicts +CREATE TABLE tt1 (f1 int, f2 int, f3 text); +CREATE TABLE tx1 (x1 int, x2 int, x3 text); +CREATE TABLE temp_view_test.tt1 (y1 int, f2 int, f3 text); +CREATE VIEW aliased_view_1 AS + select * from tt1 + where exists (select 1 from tx1 where tt1.f1 = tx1.x1); +CREATE VIEW aliased_view_2 AS + select * from tt1 a1 + where exists (select 1 from tx1 where a1.f1 = tx1.x1); +CREATE VIEW aliased_view_3 AS + select * from tt1 + where exists (select 1 from tx1 a2 where tt1.f1 = a2.x1); +CREATE VIEW aliased_view_4 AS + select * from temp_view_test.tt1 + where exists (select 1 from tt1 where temp_view_test.tt1.y1 = tt1.f1); +\d+ aliased_view_1 + View "testviewschm2.aliased_view_1" + Column | Type | Collation | Nullable | Default | Storage | Description +--------+---------+-----------+----------+---------+----------+------------- + f1 | integer | | | | plain | + f2 | integer | | | | plain | + f3 | text | | | | extended | +View definition: + SELECT f1, + f2, + f3 + FROM tt1 + WHERE (EXISTS ( SELECT 1 + FROM tx1 + WHERE tt1.f1 = tx1.x1)); + +\d+ aliased_view_2 + View "testviewschm2.aliased_view_2" + Column | Type | Collation | Nullable | Default | Storage | Description +--------+---------+-----------+----------+---------+----------+------------- + f1 | integer | | | | plain | + f2 | integer | | | | plain | + f3 | text | | | | extended | +View definition: + SELECT f1, + f2, + f3 + FROM tt1 a1 + WHERE (EXISTS ( SELECT 1 + FROM tx1 + WHERE a1.f1 = tx1.x1)); + +\d+ aliased_view_3 + View "testviewschm2.aliased_view_3" + Column | Type | Collation | Nullable | Default | Storage | Description +--------+---------+-----------+----------+---------+----------+------------- + f1 | integer | | | | plain | + f2 | integer | | | | plain | + f3 | text | | | | extended | +View definition: + SELECT f1, + f2, + f3 + FROM tt1 + WHERE (EXISTS ( SELECT 1 + FROM tx1 a2 + WHERE tt1.f1 = a2.x1)); + +\d+ aliased_view_4 + View "testviewschm2.aliased_view_4" + Column | Type | Collation | Nullable | Default | Storage | Description +--------+---------+-----------+----------+---------+----------+------------- + y1 | integer | | | | plain | + f2 | integer | | | | plain | + f3 | text | | | | extended | +View definition: + SELECT y1, + f2, + f3 + FROM temp_view_test.tt1 + WHERE (EXISTS ( SELECT 1 + FROM tt1 tt1_1 + WHERE tt1.y1 = tt1_1.f1)); + +ALTER TABLE tx1 RENAME TO a1; +\d+ aliased_view_1 + View "testviewschm2.aliased_view_1" + Column | Type | Collation | Nullable | Default | Storage | Description +--------+---------+-----------+----------+---------+----------+------------- + f1 | integer | | | | plain | + f2 | integer | | | | plain | + f3 | text | | | | extended | +View definition: + SELECT f1, + f2, + f3 + FROM tt1 + WHERE (EXISTS ( SELECT 1 + FROM a1 + WHERE tt1.f1 = a1.x1)); + +\d+ aliased_view_2 + View "testviewschm2.aliased_view_2" + Column | Type | Collation | Nullable | Default | Storage | Description +--------+---------+-----------+----------+---------+----------+------------- + f1 | integer | | | | plain | + f2 | integer | | | | plain | + f3 | text | | | | extended | +View definition: + SELECT f1, + f2, + f3 + FROM tt1 a1 + WHERE (EXISTS ( SELECT 1 + FROM a1 a1_1 + WHERE a1.f1 = a1_1.x1)); + +\d+ aliased_view_3 + View "testviewschm2.aliased_view_3" + Column | Type | Collation | Nullable | Default | Storage | Description +--------+---------+-----------+----------+---------+----------+------------- + f1 | integer | | | | plain | + f2 | integer | | | | plain | + f3 | text | | | | extended | +View definition: + SELECT f1, + f2, + f3 + FROM tt1 + WHERE (EXISTS ( SELECT 1 + FROM a1 a2 + WHERE tt1.f1 = a2.x1)); + +\d+ aliased_view_4 + View "testviewschm2.aliased_view_4" + Column | Type | Collation | Nullable | Default | Storage | Description +--------+---------+-----------+----------+---------+----------+------------- + y1 | integer | | | | plain | + f2 | integer | | | | plain | + f3 | text | | | | extended | +View definition: + SELECT y1, + f2, + f3 + FROM temp_view_test.tt1 + WHERE (EXISTS ( SELECT 1 + FROM tt1 tt1_1 + WHERE tt1.y1 = tt1_1.f1)); + +ALTER TABLE tt1 RENAME TO a2; +\d+ aliased_view_1 + View "testviewschm2.aliased_view_1" + Column | Type | Collation | Nullable | Default | Storage | Description +--------+---------+-----------+----------+---------+----------+------------- + f1 | integer | | | | plain | + f2 | integer | | | | plain | + f3 | text | | | | extended | +View definition: + SELECT f1, + f2, + f3 + FROM a2 + WHERE (EXISTS ( SELECT 1 + FROM a1 + WHERE a2.f1 = a1.x1)); + +\d+ aliased_view_2 + View "testviewschm2.aliased_view_2" + Column | Type | Collation | Nullable | Default | Storage | Description +--------+---------+-----------+----------+---------+----------+------------- + f1 | integer | | | | plain | + f2 | integer | | | | plain | + f3 | text | | | | extended | +View definition: + SELECT f1, + f2, + f3 + FROM a2 a1 + WHERE (EXISTS ( SELECT 1 + FROM a1 a1_1 + WHERE a1.f1 = a1_1.x1)); + +\d+ aliased_view_3 + View "testviewschm2.aliased_view_3" + Column | Type | Collation | Nullable | Default | Storage | Description +--------+---------+-----------+----------+---------+----------+------------- + f1 | integer | | | | plain | + f2 | integer | | | | plain | + f3 | text | | | | extended | +View definition: + SELECT f1, + f2, + f3 + FROM a2 + WHERE (EXISTS ( SELECT 1 + FROM a1 a2_1 + WHERE a2.f1 = a2_1.x1)); + +\d+ aliased_view_4 + View "testviewschm2.aliased_view_4" + Column | Type | Collation | Nullable | Default | Storage | Description +--------+---------+-----------+----------+---------+----------+------------- + y1 | integer | | | | plain | + f2 | integer | | | | plain | + f3 | text | | | | extended | +View definition: + SELECT y1, + f2, + f3 + FROM temp_view_test.tt1 + WHERE (EXISTS ( SELECT 1 + FROM a2 + WHERE tt1.y1 = a2.f1)); + +ALTER TABLE a1 RENAME TO tt1; +\d+ aliased_view_1 + View "testviewschm2.aliased_view_1" + Column | Type | Collation | Nullable | Default | Storage | Description +--------+---------+-----------+----------+---------+----------+------------- + f1 | integer | | | | plain | + f2 | integer | | | | plain | + f3 | text | | | | extended | +View definition: + SELECT f1, + f2, + f3 + FROM a2 + WHERE (EXISTS ( SELECT 1 + FROM tt1 + WHERE a2.f1 = tt1.x1)); + +\d+ aliased_view_2 + View "testviewschm2.aliased_view_2" + Column | Type | Collation | Nullable | Default | Storage | Description +--------+---------+-----------+----------+---------+----------+------------- + f1 | integer | | | | plain | + f2 | integer | | | | plain | + f3 | text | | | | extended | +View definition: + SELECT f1, + f2, + f3 + FROM a2 a1 + WHERE (EXISTS ( SELECT 1 + FROM tt1 + WHERE a1.f1 = tt1.x1)); + +\d+ aliased_view_3 + View "testviewschm2.aliased_view_3" + Column | Type | Collation | Nullable | Default | Storage | Description +--------+---------+-----------+----------+---------+----------+------------- + f1 | integer | | | | plain | + f2 | integer | | | | plain | + f3 | text | | | | extended | +View definition: + SELECT f1, + f2, + f3 + FROM a2 + WHERE (EXISTS ( SELECT 1 + FROM tt1 a2_1 + WHERE a2.f1 = a2_1.x1)); + +\d+ aliased_view_4 + View "testviewschm2.aliased_view_4" + Column | Type | Collation | Nullable | Default | Storage | Description +--------+---------+-----------+----------+---------+----------+------------- + y1 | integer | | | | plain | + f2 | integer | | | | plain | + f3 | text | | | | extended | +View definition: + SELECT y1, + f2, + f3 + FROM temp_view_test.tt1 + WHERE (EXISTS ( SELECT 1 + FROM a2 + WHERE tt1.y1 = a2.f1)); + +ALTER TABLE a2 RENAME TO tx1; +ALTER TABLE tx1 SET SCHEMA temp_view_test; +\d+ aliased_view_1 + View "testviewschm2.aliased_view_1" + Column | Type | Collation | Nullable | Default | Storage | Description +--------+---------+-----------+----------+---------+----------+------------- + f1 | integer | | | | plain | + f2 | integer | | | | plain | + f3 | text | | | | extended | +View definition: + SELECT f1, + f2, + f3 + FROM temp_view_test.tx1 + WHERE (EXISTS ( SELECT 1 + FROM tt1 + WHERE tx1.f1 = tt1.x1)); + +\d+ aliased_view_2 + View "testviewschm2.aliased_view_2" + Column | Type | Collation | Nullable | Default | Storage | Description +--------+---------+-----------+----------+---------+----------+------------- + f1 | integer | | | | plain | + f2 | integer | | | | plain | + f3 | text | | | | extended | +View definition: + SELECT f1, + f2, + f3 + FROM temp_view_test.tx1 a1 + WHERE (EXISTS ( SELECT 1 + FROM tt1 + WHERE a1.f1 = tt1.x1)); + +\d+ aliased_view_3 + View "testviewschm2.aliased_view_3" + Column | Type | Collation | Nullable | Default | Storage | Description +--------+---------+-----------+----------+---------+----------+------------- + f1 | integer | | | | plain | + f2 | integer | | | | plain | + f3 | text | | | | extended | +View definition: + SELECT f1, + f2, + f3 + FROM temp_view_test.tx1 + WHERE (EXISTS ( SELECT 1 + FROM tt1 a2 + WHERE tx1.f1 = a2.x1)); + +\d+ aliased_view_4 + View "testviewschm2.aliased_view_4" + Column | Type | Collation | Nullable | Default | Storage | Description +--------+---------+-----------+----------+---------+----------+------------- + y1 | integer | | | | plain | + f2 | integer | | | | plain | + f3 | text | | | | extended | +View definition: + SELECT y1, + f2, + f3 + FROM temp_view_test.tt1 + WHERE (EXISTS ( SELECT 1 + FROM temp_view_test.tx1 + WHERE tt1.y1 = tx1.f1)); + +ALTER TABLE temp_view_test.tt1 RENAME TO tmp1; +ALTER TABLE temp_view_test.tmp1 SET SCHEMA testviewschm2; +ALTER TABLE tmp1 RENAME TO tx1; +\d+ aliased_view_1 + View "testviewschm2.aliased_view_1" + Column | Type | Collation | Nullable | Default | Storage | Description +--------+---------+-----------+----------+---------+----------+------------- + f1 | integer | | | | plain | + f2 | integer | | | | plain | + f3 | text | | | | extended | +View definition: + SELECT f1, + f2, + f3 + FROM temp_view_test.tx1 + WHERE (EXISTS ( SELECT 1 + FROM tt1 + WHERE tx1.f1 = tt1.x1)); + +\d+ aliased_view_2 + View "testviewschm2.aliased_view_2" + Column | Type | Collation | Nullable | Default | Storage | Description +--------+---------+-----------+----------+---------+----------+------------- + f1 | integer | | | | plain | + f2 | integer | | | | plain | + f3 | text | | | | extended | +View definition: + SELECT f1, + f2, + f3 + FROM temp_view_test.tx1 a1 + WHERE (EXISTS ( SELECT 1 + FROM tt1 + WHERE a1.f1 = tt1.x1)); + +\d+ aliased_view_3 + View "testviewschm2.aliased_view_3" + Column | Type | Collation | Nullable | Default | Storage | Description +--------+---------+-----------+----------+---------+----------+------------- + f1 | integer | | | | plain | + f2 | integer | | | | plain | + f3 | text | | | | extended | +View definition: + SELECT f1, + f2, + f3 + FROM temp_view_test.tx1 + WHERE (EXISTS ( SELECT 1 + FROM tt1 a2 + WHERE tx1.f1 = a2.x1)); + +\d+ aliased_view_4 + View "testviewschm2.aliased_view_4" + Column | Type | Collation | Nullable | Default | Storage | Description +--------+---------+-----------+----------+---------+----------+------------- + y1 | integer | | | | plain | + f2 | integer | | | | plain | + f3 | text | | | | extended | +View definition: + SELECT y1, + f2, + f3 + FROM tx1 + WHERE (EXISTS ( SELECT 1 + FROM temp_view_test.tx1 tx1_1 + WHERE tx1.y1 = tx1_1.f1)); + +-- Test aliasing of joins +create view view_of_joins as +select * from + (select * from (tbl1 cross join tbl2) same) ss, + (tbl3 cross join tbl4) same; +\d+ view_of_joins + View "testviewschm2.view_of_joins" + Column | Type | Collation | Nullable | Default | Storage | Description +--------+---------+-----------+----------+---------+---------+------------- + a | integer | | | | plain | + b | integer | | | | plain | + c | integer | | | | plain | + d | integer | | | | plain | + e | integer | | | | plain | + f | integer | | | | plain | + g | integer | | | | plain | + h | integer | | | | plain | +View definition: + SELECT ss.a, + ss.b, + ss.c, + ss.d, + same.e, + same.f, + same.g, + same.h + FROM ( SELECT same_1.a, + same_1.b, + same_1.c, + same_1.d + FROM (tbl1 + CROSS JOIN tbl2) same_1) ss, + (tbl3 + CROSS JOIN tbl4) same; + +create table tbl1a (a int, c int); +create view view_of_joins_2a as select * from tbl1 join tbl1a using (a); +create view view_of_joins_2b as select * from tbl1 join tbl1a using (a) as x; +create view view_of_joins_2c as select * from (tbl1 join tbl1a using (a)) as y; +create view view_of_joins_2d as select * from (tbl1 join tbl1a using (a) as x) as y; +select pg_get_viewdef('view_of_joins_2a', true); + pg_get_viewdef +---------------------------- + SELECT tbl1.a, + + tbl1.b, + + tbl1a.c + + FROM tbl1 + + JOIN tbl1a USING (a); +(1 row) + +select pg_get_viewdef('view_of_joins_2b', true); + pg_get_viewdef +--------------------------------- + SELECT tbl1.a, + + tbl1.b, + + tbl1a.c + + FROM tbl1 + + JOIN tbl1a USING (a) AS x; +(1 row) + +select pg_get_viewdef('view_of_joins_2c', true); + pg_get_viewdef +------------------------------- + SELECT y.a, + + y.b, + + y.c + + FROM (tbl1 + + JOIN tbl1a USING (a)) y; +(1 row) + +select pg_get_viewdef('view_of_joins_2d', true); + pg_get_viewdef +------------------------------------ + SELECT y.a, + + y.b, + + y.c + + FROM (tbl1 + + JOIN tbl1a USING (a) AS x) y; +(1 row) + +-- Test view decompilation in the face of column addition/deletion/renaming +create table tt2 (a int, b int, c int); +create table tt3 (ax int8, b int2, c numeric); +create table tt4 (ay int, b int, q int); +create view v1 as select * from tt2 natural join tt3; +create view v1a as select * from (tt2 natural join tt3) j; +create view v2 as select * from tt2 join tt3 using (b,c) join tt4 using (b); +create view v2a as select * from (tt2 join tt3 using (b,c) join tt4 using (b)) j; +create view v3 as select * from tt2 join tt3 using (b,c) full join tt4 using (b); +select pg_get_viewdef('v1', true); + pg_get_viewdef +----------------------------- + SELECT tt2.b, + + tt3.c, + + tt2.a, + + tt3.ax + + FROM tt2 + + JOIN tt3 USING (b, c); +(1 row) + +select pg_get_viewdef('v1a', true); + pg_get_viewdef +-------------------------------- + SELECT j.b, + + j.c, + + j.a, + + j.ax + + FROM (tt2 + + JOIN tt3 USING (b, c)) j; +(1 row) + +select pg_get_viewdef('v2', true); + pg_get_viewdef +---------------------------- + SELECT tt2.b, + + tt3.c, + + tt2.a, + + tt3.ax, + + tt4.ay, + + tt4.q + + FROM tt2 + + JOIN tt3 USING (b, c)+ + JOIN tt4 USING (b); +(1 row) + +select pg_get_viewdef('v2a', true); + pg_get_viewdef +----------------------------- + SELECT j.b, + + j.c, + + j.a, + + j.ax, + + j.ay, + + j.q + + FROM (tt2 + + JOIN tt3 USING (b, c) + + JOIN tt4 USING (b)) j; +(1 row) + +select pg_get_viewdef('v3', true); + pg_get_viewdef +------------------------------- + SELECT b, + + tt3.c, + + tt2.a, + + tt3.ax, + + tt4.ay, + + tt4.q + + FROM tt2 + + JOIN tt3 USING (b, c) + + FULL JOIN tt4 USING (b); +(1 row) + +alter table tt2 add column d int; +alter table tt2 add column e int; +select pg_get_viewdef('v1', true); + pg_get_viewdef +----------------------------- + SELECT tt2.b, + + tt3.c, + + tt2.a, + + tt3.ax + + FROM tt2 + + JOIN tt3 USING (b, c); +(1 row) + +select pg_get_viewdef('v1a', true); + pg_get_viewdef +-------------------------------- + SELECT j.b, + + j.c, + + j.a, + + j.ax + + FROM (tt2 + + JOIN tt3 USING (b, c)) j; +(1 row) + +select pg_get_viewdef('v2', true); + pg_get_viewdef +---------------------------- + SELECT tt2.b, + + tt3.c, + + tt2.a, + + tt3.ax, + + tt4.ay, + + tt4.q + + FROM tt2 + + JOIN tt3 USING (b, c)+ + JOIN tt4 USING (b); +(1 row) + +select pg_get_viewdef('v2a', true); + pg_get_viewdef +----------------------------- + SELECT j.b, + + j.c, + + j.a, + + j.ax, + + j.ay, + + j.q + + FROM (tt2 + + JOIN tt3 USING (b, c) + + JOIN tt4 USING (b)) j; +(1 row) + +select pg_get_viewdef('v3', true); + pg_get_viewdef +------------------------------- + SELECT b, + + tt3.c, + + tt2.a, + + tt3.ax, + + tt4.ay, + + tt4.q + + FROM tt2 + + JOIN tt3 USING (b, c) + + FULL JOIN tt4 USING (b); +(1 row) + +alter table tt3 rename c to d; +select pg_get_viewdef('v1', true); + pg_get_viewdef +------------------------------------------- + SELECT tt2.b, + + tt3.c, + + tt2.a, + + tt3.ax + + FROM tt2 + + JOIN tt3 tt3(ax, b, c) USING (b, c); +(1 row) + +select pg_get_viewdef('v1a', true); + pg_get_viewdef +---------------------------------------------- + SELECT j.b, + + j.c, + + j.a, + + j.ax + + FROM (tt2 + + JOIN tt3 tt3(ax, b, c) USING (b, c)) j; +(1 row) + +select pg_get_viewdef('v2', true); + pg_get_viewdef +------------------------------------------ + SELECT tt2.b, + + tt3.c, + + tt2.a, + + tt3.ax, + + tt4.ay, + + tt4.q + + FROM tt2 + + JOIN tt3 tt3(ax, b, c) USING (b, c)+ + JOIN tt4 USING (b); +(1 row) + +select pg_get_viewdef('v2a', true); + pg_get_viewdef +------------------------------------------ + SELECT j.b, + + j.c, + + j.a, + + j.ax, + + j.ay, + + j.q + + FROM (tt2 + + JOIN tt3 tt3(ax, b, c) USING (b, c)+ + JOIN tt4 USING (b)) j; +(1 row) + +select pg_get_viewdef('v3', true); + pg_get_viewdef +------------------------------------------ + SELECT b, + + tt3.c, + + tt2.a, + + tt3.ax, + + tt4.ay, + + tt4.q + + FROM tt2 + + JOIN tt3 tt3(ax, b, c) USING (b, c)+ + FULL JOIN tt4 USING (b); +(1 row) + +alter table tt3 add column c int; +alter table tt3 add column e int; +select pg_get_viewdef('v1', true); + pg_get_viewdef +--------------------------------------------------- + SELECT tt2.b, + + tt3.c, + + tt2.a, + + tt3.ax + + FROM tt2 + + JOIN tt3 tt3(ax, b, c, c_1, e) USING (b, c); +(1 row) + +select pg_get_viewdef('v1a', true); + pg_get_viewdef +----------------------------------------------------------------------------------- + SELECT j.b, + + j.c, + + j.a, + + j.ax + + FROM (tt2 + + JOIN tt3 tt3(ax, b, c, c_1, e) USING (b, c)) j(b, c, a, d, e, ax, c_1, e_1); +(1 row) + +select pg_get_viewdef('v2', true); + pg_get_viewdef +-------------------------------------------------- + SELECT tt2.b, + + tt3.c, + + tt2.a, + + tt3.ax, + + tt4.ay, + + tt4.q + + FROM tt2 + + JOIN tt3 tt3(ax, b, c, c_1, e) USING (b, c)+ + JOIN tt4 USING (b); +(1 row) + +select pg_get_viewdef('v2a', true); + pg_get_viewdef +----------------------------------------------------------------- + SELECT j.b, + + j.c, + + j.a, + + j.ax, + + j.ay, + + j.q + + FROM (tt2 + + JOIN tt3 tt3(ax, b, c, c_1, e) USING (b, c) + + JOIN tt4 USING (b)) j(b, c, a, d, e, ax, c_1, e_1, ay, q); +(1 row) + +select pg_get_viewdef('v3', true); + pg_get_viewdef +-------------------------------------------------- + SELECT b, + + tt3.c, + + tt2.a, + + tt3.ax, + + tt4.ay, + + tt4.q + + FROM tt2 + + JOIN tt3 tt3(ax, b, c, c_1, e) USING (b, c)+ + FULL JOIN tt4 USING (b); +(1 row) + +alter table tt2 drop column d; +select pg_get_viewdef('v1', true); + pg_get_viewdef +--------------------------------------------------- + SELECT tt2.b, + + tt3.c, + + tt2.a, + + tt3.ax + + FROM tt2 + + JOIN tt3 tt3(ax, b, c, c_1, e) USING (b, c); +(1 row) + +select pg_get_viewdef('v1a', true); + pg_get_viewdef +-------------------------------------------------------------------------------- + SELECT j.b, + + j.c, + + j.a, + + j.ax + + FROM (tt2 + + JOIN tt3 tt3(ax, b, c, c_1, e) USING (b, c)) j(b, c, a, e, ax, c_1, e_1); +(1 row) + +select pg_get_viewdef('v2', true); + pg_get_viewdef +-------------------------------------------------- + SELECT tt2.b, + + tt3.c, + + tt2.a, + + tt3.ax, + + tt4.ay, + + tt4.q + + FROM tt2 + + JOIN tt3 tt3(ax, b, c, c_1, e) USING (b, c)+ + JOIN tt4 USING (b); +(1 row) + +select pg_get_viewdef('v2a', true); + pg_get_viewdef +-------------------------------------------------------------- + SELECT j.b, + + j.c, + + j.a, + + j.ax, + + j.ay, + + j.q + + FROM (tt2 + + JOIN tt3 tt3(ax, b, c, c_1, e) USING (b, c) + + JOIN tt4 USING (b)) j(b, c, a, e, ax, c_1, e_1, ay, q); +(1 row) + +select pg_get_viewdef('v3', true); + pg_get_viewdef +-------------------------------------------------- + SELECT b, + + tt3.c, + + tt2.a, + + tt3.ax, + + tt4.ay, + + tt4.q + + FROM tt2 + + JOIN tt3 tt3(ax, b, c, c_1, e) USING (b, c)+ + FULL JOIN tt4 USING (b); +(1 row) + +create table tt5 (a int, b int); +create table tt6 (c int, d int); +create view vv1 as select * from (tt5 cross join tt6) j(aa,bb,cc,dd); +select pg_get_viewdef('vv1', true); + pg_get_viewdef +----------------------------------------- + SELECT j.aa, + + j.bb, + + j.cc, + + j.dd + + FROM (tt5 + + CROSS JOIN tt6) j(aa, bb, cc, dd); +(1 row) + +alter table tt5 add column c int; +select pg_get_viewdef('vv1', true); + pg_get_viewdef +-------------------------------------------- + SELECT j.aa, + + j.bb, + + j.cc, + + j.dd + + FROM (tt5 + + CROSS JOIN tt6) j(aa, bb, c, cc, dd); +(1 row) + +alter table tt5 add column cc int; +select pg_get_viewdef('vv1', true); + pg_get_viewdef +-------------------------------------------------- + SELECT j.aa, + + j.bb, + + j.cc, + + j.dd + + FROM (tt5 + + CROSS JOIN tt6) j(aa, bb, c, cc_1, cc, dd); +(1 row) + +alter table tt5 drop column c; +select pg_get_viewdef('vv1', true); + pg_get_viewdef +----------------------------------------------- + SELECT j.aa, + + j.bb, + + j.cc, + + j.dd + + FROM (tt5 + + CROSS JOIN tt6) j(aa, bb, cc_1, cc, dd); +(1 row) + +create view v4 as select * from v1; +alter view v1 rename column a to x; +select pg_get_viewdef('v1', true); + pg_get_viewdef +--------------------------------------------------- + SELECT tt2.b, + + tt3.c, + + tt2.a AS x, + + tt3.ax + + FROM tt2 + + JOIN tt3 tt3(ax, b, c, c_1, e) USING (b, c); +(1 row) + +select pg_get_viewdef('v4', true); + pg_get_viewdef +---------------- + SELECT b, + + c, + + x AS a, + + ax + + FROM v1; +(1 row) + +-- Unnamed FULL JOIN USING is lots of fun too +create table tt7 (x int, xx int, y int); +alter table tt7 drop column xx; +create table tt8 (x int, z int); +create view vv2 as +select * from (values(1,2,3,4,5)) v(a,b,c,d,e) +union all +select * from tt7 full join tt8 using (x), tt8 tt8x; +select pg_get_viewdef('vv2', true); + pg_get_viewdef +------------------------------------------------ + SELECT v.a, + + v.b, + + v.c, + + v.d, + + v.e + + FROM ( VALUES (1,2,3,4,5)) v(a, b, c, d, e)+ + UNION ALL + + SELECT x AS a, + + tt7.y AS b, + + tt8.z AS c, + + tt8x.x_1 AS d, + + tt8x.z AS e + + FROM tt7 + + FULL JOIN tt8 USING (x), + + tt8 tt8x(x_1, z); +(1 row) + +create view vv3 as +select * from (values(1,2,3,4,5,6)) v(a,b,c,x,e,f) +union all +select * from + tt7 full join tt8 using (x), + tt7 tt7x full join tt8 tt8x using (x); +select pg_get_viewdef('vv3', true); + pg_get_viewdef +----------------------------------------------------- + SELECT v.a, + + v.b, + + v.c, + + v.x, + + v.e, + + v.f + + FROM ( VALUES (1,2,3,4,5,6)) v(a, b, c, x, e, f)+ + UNION ALL + + SELECT x AS a, + + tt7.y AS b, + + tt8.z AS c, + + x_1 AS x, + + tt7x.y AS e, + + tt8x.z AS f + + FROM tt7 + + FULL JOIN tt8 USING (x), + + tt7 tt7x(x_1, y) + + FULL JOIN tt8 tt8x(x_1, z) USING (x_1); +(1 row) + +create view vv4 as +select * from (values(1,2,3,4,5,6,7)) v(a,b,c,x,e,f,g) +union all +select * from + tt7 full join tt8 using (x), + tt7 tt7x full join tt8 tt8x using (x) full join tt8 tt8y using (x); +select pg_get_viewdef('vv4', true); + pg_get_viewdef +---------------------------------------------------------- + SELECT v.a, + + v.b, + + v.c, + + v.x, + + v.e, + + v.f, + + v.g + + FROM ( VALUES (1,2,3,4,5,6,7)) v(a, b, c, x, e, f, g)+ + UNION ALL + + SELECT x AS a, + + tt7.y AS b, + + tt8.z AS c, + + x_1 AS x, + + tt7x.y AS e, + + tt8x.z AS f, + + tt8y.z AS g + + FROM tt7 + + FULL JOIN tt8 USING (x), + + tt7 tt7x(x_1, y) + + FULL JOIN tt8 tt8x(x_1, z) USING (x_1) + + FULL JOIN tt8 tt8y(x_1, z) USING (x_1); +(1 row) + +alter table tt7 add column zz int; +alter table tt7 add column z int; +alter table tt7 drop column zz; +alter table tt8 add column z2 int; +select pg_get_viewdef('vv2', true); + pg_get_viewdef +------------------------------------------------ + SELECT v.a, + + v.b, + + v.c, + + v.d, + + v.e + + FROM ( VALUES (1,2,3,4,5)) v(a, b, c, d, e)+ + UNION ALL + + SELECT x AS a, + + tt7.y AS b, + + tt8.z AS c, + + tt8x.x_1 AS d, + + tt8x.z AS e + + FROM tt7 + + FULL JOIN tt8 USING (x), + + tt8 tt8x(x_1, z, z2); +(1 row) + +select pg_get_viewdef('vv3', true); + pg_get_viewdef +----------------------------------------------------- + SELECT v.a, + + v.b, + + v.c, + + v.x, + + v.e, + + v.f + + FROM ( VALUES (1,2,3,4,5,6)) v(a, b, c, x, e, f)+ + UNION ALL + + SELECT x AS a, + + tt7.y AS b, + + tt8.z AS c, + + x_1 AS x, + + tt7x.y AS e, + + tt8x.z AS f + + FROM tt7 + + FULL JOIN tt8 USING (x), + + tt7 tt7x(x_1, y, z) + + FULL JOIN tt8 tt8x(x_1, z, z2) USING (x_1); +(1 row) + +select pg_get_viewdef('vv4', true); + pg_get_viewdef +---------------------------------------------------------- + SELECT v.a, + + v.b, + + v.c, + + v.x, + + v.e, + + v.f, + + v.g + + FROM ( VALUES (1,2,3,4,5,6,7)) v(a, b, c, x, e, f, g)+ + UNION ALL + + SELECT x AS a, + + tt7.y AS b, + + tt8.z AS c, + + x_1 AS x, + + tt7x.y AS e, + + tt8x.z AS f, + + tt8y.z AS g + + FROM tt7 + + FULL JOIN tt8 USING (x), + + tt7 tt7x(x_1, y, z) + + FULL JOIN tt8 tt8x(x_1, z, z2) USING (x_1) + + FULL JOIN tt8 tt8y(x_1, z, z2) USING (x_1); +(1 row) + +-- Implicit coercions in a JOIN USING create issues similar to FULL JOIN +create table tt7a (x date, xx int, y int); +alter table tt7a drop column xx; +create table tt8a (x timestamptz, z int); +create view vv2a as +select * from (values(now(),2,3,now(),5)) v(a,b,c,d,e) +union all +select * from tt7a left join tt8a using (x), tt8a tt8ax; +select pg_get_viewdef('vv2a', true); + pg_get_viewdef +-------------------------------------------------------- + SELECT v.a, + + v.b, + + v.c, + + v.d, + + v.e + + FROM ( VALUES (now(),2,3,now(),5)) v(a, b, c, d, e)+ + UNION ALL + + SELECT x AS a, + + tt7a.y AS b, + + tt8a.z AS c, + + tt8ax.x_1 AS d, + + tt8ax.z AS e + + FROM tt7a + + LEFT JOIN tt8a USING (x), + + tt8a tt8ax(x_1, z); +(1 row) + +-- +-- Also check dropping a column that existed when the view was made +-- +create table tt9 (x int, xx int, y int); +create table tt10 (x int, z int); +create view vv5 as select x,y,z from tt9 join tt10 using(x); +select pg_get_viewdef('vv5', true); + pg_get_viewdef +--------------------------- + SELECT tt9.x, + + tt9.y, + + tt10.z + + FROM tt9 + + JOIN tt10 USING (x); +(1 row) + +alter table tt9 drop column xx; +select pg_get_viewdef('vv5', true); + pg_get_viewdef +--------------------------- + SELECT tt9.x, + + tt9.y, + + tt10.z + + FROM tt9 + + JOIN tt10 USING (x); +(1 row) + +-- +-- Another corner case is that we might add a column to a table below a +-- JOIN USING, and thereby make the USING column name ambiguous +-- +create table tt11 (x int, y int); +create table tt12 (x int, z int); +create table tt13 (z int, q int); +create view vv6 as select x,y,z,q from + (tt11 join tt12 using(x)) join tt13 using(z); +select pg_get_viewdef('vv6', true); + pg_get_viewdef +--------------------------- + SELECT tt11.x, + + tt11.y, + + tt12.z, + + tt13.q + + FROM tt11 + + JOIN tt12 USING (x) + + JOIN tt13 USING (z); +(1 row) + +alter table tt11 add column z int; +select pg_get_viewdef('vv6', true); + pg_get_viewdef +------------------------------ + SELECT tt11.x, + + tt11.y, + + tt12.z, + + tt13.q + + FROM tt11 tt11(x, y, z_1)+ + JOIN tt12 USING (x) + + JOIN tt13 USING (z); +(1 row) + +-- +-- Check cases involving dropped/altered columns in a function's rowtype result +-- +create table tt14t (f1 text, f2 text, f3 text, f4 text); +insert into tt14t values('foo', 'bar', 'baz', '42'); +alter table tt14t drop column f2; +create function tt14f() returns setof tt14t as +$$ +declare + rec1 record; +begin + for rec1 in select * from tt14t + loop + return next rec1; + end loop; +end; +$$ +language plpgsql; +create view tt14v as select t.* from tt14f() t; +select pg_get_viewdef('tt14v', true); + pg_get_viewdef +-------------------------------- + SELECT f1, + + f3, + + f4 + + FROM tt14f() t(f1, f3, f4); +(1 row) + +select * from tt14v; + f1 | f3 | f4 +-----+-----+---- + foo | baz | 42 +(1 row) + +alter table tt14t drop column f3; -- fail, view has explicit reference to f3 +ERROR: cannot drop column f3 of table tt14t because other objects depend on it +DETAIL: view tt14v depends on column f3 of table tt14t +HINT: Use DROP ... CASCADE to drop the dependent objects too. +-- We used to have a bug that would allow the above to succeed, posing +-- hazards for later execution of the view. Check that the internal +-- defenses for those hazards haven't bit-rotted, in case some other +-- bug with similar symptoms emerges. +begin; +-- destroy the dependency entry that prevents the DROP: +delete from pg_depend where + objid = (select oid from pg_rewrite + where ev_class = 'tt14v'::regclass and rulename = '_RETURN') + and refobjsubid = 3 +returning pg_describe_object(classid, objid, objsubid) as obj, + pg_describe_object(refclassid, refobjid, refobjsubid) as ref, + deptype; + obj | ref | deptype +----------------------------+--------------------------+--------- + rule _RETURN on view tt14v | column f3 of table tt14t | n +(1 row) + +-- this will now succeed: +alter table tt14t drop column f3; +-- column f3 is still in the view, sort of ... +select pg_get_viewdef('tt14v', true); + pg_get_viewdef +------------------------------- + SELECT f1, + + "?dropped?column?" AS f3,+ + f4 + + FROM tt14f() t(f1, f4); +(1 row) + +-- ... and you can even EXPLAIN it ... +explain (verbose, costs off) select * from tt14v; + QUERY PLAN +---------------------------------------- + Function Scan on testviewschm2.tt14f t + Output: t.f1, t.f3, t.f4 + Function Call: tt14f() +(3 rows) + +-- but it will fail at execution +select f1, f4 from tt14v; + f1 | f4 +-----+---- + foo | 42 +(1 row) + +select * from tt14v; +ERROR: attribute 3 of type record has been dropped +rollback; +-- likewise, altering a referenced column's type is prohibited ... +alter table tt14t alter column f4 type integer using f4::integer; -- fail +ERROR: cannot alter type of a column used by a view or rule +DETAIL: rule _RETURN on view tt14v depends on column "f4" +-- ... but some bug might let it happen, so check defenses +begin; +-- destroy the dependency entry that prevents the ALTER: +delete from pg_depend where + objid = (select oid from pg_rewrite + where ev_class = 'tt14v'::regclass and rulename = '_RETURN') + and refobjsubid = 4 +returning pg_describe_object(classid, objid, objsubid) as obj, + pg_describe_object(refclassid, refobjid, refobjsubid) as ref, + deptype; + obj | ref | deptype +----------------------------+--------------------------+--------- + rule _RETURN on view tt14v | column f4 of table tt14t | n +(1 row) + +-- this will now succeed: +alter table tt14t alter column f4 type integer using f4::integer; +-- f4 is still in the view ... +select pg_get_viewdef('tt14v', true); + pg_get_viewdef +-------------------------------- + SELECT f1, + + f3, + + f4 + + FROM tt14f() t(f1, f3, f4); +(1 row) + +-- but will fail at execution +select f1, f3 from tt14v; + f1 | f3 +-----+----- + foo | baz +(1 row) + +select * from tt14v; +ERROR: attribute 4 of type record has wrong type +DETAIL: Table has type integer, but query expects text. +rollback; +drop view tt14v; +create view tt14v as select t.f1, t.f4 from tt14f() t; +select pg_get_viewdef('tt14v', true); + pg_get_viewdef +-------------------------------- + SELECT f1, + + f4 + + FROM tt14f() t(f1, f3, f4); +(1 row) + +select * from tt14v; + f1 | f4 +-----+---- + foo | 42 +(1 row) + +alter table tt14t drop column f3; -- ok +select pg_get_viewdef('tt14v', true); + pg_get_viewdef +---------------------------- + SELECT f1, + + f4 + + FROM tt14f() t(f1, f4); +(1 row) + +explain (verbose, costs off) select * from tt14v; + QUERY PLAN +---------------------------------------- + Function Scan on testviewschm2.tt14f t + Output: t.f1, t.f4 + Function Call: tt14f() +(3 rows) + +select * from tt14v; + f1 | f4 +-----+---- + foo | 42 +(1 row) + +-- check display of whole-row variables in some corner cases +create type nestedcomposite as (x int8_tbl); +create view tt15v as select row(i)::nestedcomposite from int8_tbl i; +select * from tt15v; + row +------------------------------------------ + ("(123,456)") + ("(123,4567890123456789)") + ("(4567890123456789,123)") + ("(4567890123456789,4567890123456789)") + ("(4567890123456789,-4567890123456789)") +(5 rows) + +select pg_get_viewdef('tt15v', true); + pg_get_viewdef +------------------------------------------------------ + SELECT ROW(i.*::int8_tbl)::nestedcomposite AS "row"+ + FROM int8_tbl i; +(1 row) + +select row(i.*::int8_tbl)::nestedcomposite from int8_tbl i; + row +------------------------------------------ + ("(123,456)") + ("(123,4567890123456789)") + ("(4567890123456789,123)") + ("(4567890123456789,4567890123456789)") + ("(4567890123456789,-4567890123456789)") +(5 rows) + +create view tt16v as select * from int8_tbl i, lateral(values(i)) ss; +select * from tt16v; + q1 | q2 | column1 +------------------+-------------------+-------------------------------------- + 123 | 456 | (123,456) + 123 | 4567890123456789 | (123,4567890123456789) + 4567890123456789 | 123 | (4567890123456789,123) + 4567890123456789 | 4567890123456789 | (4567890123456789,4567890123456789) + 4567890123456789 | -4567890123456789 | (4567890123456789,-4567890123456789) +(5 rows) + +select pg_get_viewdef('tt16v', true); + pg_get_viewdef +------------------------------------------- + SELECT i.q1, + + i.q2, + + ss.column1 + + FROM int8_tbl i, + + LATERAL ( VALUES (i.*::int8_tbl)) ss; +(1 row) + +select * from int8_tbl i, lateral(values(i.*::int8_tbl)) ss; + q1 | q2 | column1 +------------------+-------------------+-------------------------------------- + 123 | 456 | (123,456) + 123 | 4567890123456789 | (123,4567890123456789) + 4567890123456789 | 123 | (4567890123456789,123) + 4567890123456789 | 4567890123456789 | (4567890123456789,4567890123456789) + 4567890123456789 | -4567890123456789 | (4567890123456789,-4567890123456789) +(5 rows) + +create view tt17v as select * from int8_tbl i where i in (values(i)); +select * from tt17v; + q1 | q2 +------------------+------------------- + 123 | 456 + 123 | 4567890123456789 + 4567890123456789 | 123 + 4567890123456789 | 4567890123456789 + 4567890123456789 | -4567890123456789 +(5 rows) + +select pg_get_viewdef('tt17v', true); + pg_get_viewdef +--------------------------------------------- + SELECT q1, + + q2 + + FROM int8_tbl i + + WHERE (i.* IN ( VALUES (i.*::int8_tbl))); +(1 row) + +select * from int8_tbl i where i.* in (values(i.*::int8_tbl)); + q1 | q2 +------------------+------------------- + 123 | 456 + 123 | 4567890123456789 + 4567890123456789 | 123 + 4567890123456789 | 4567890123456789 + 4567890123456789 | -4567890123456789 +(5 rows) + +create table tt15v_log(o tt15v, n tt15v, incr bool); +create rule updlog as on update to tt15v do also + insert into tt15v_log values(old, new, row(old,old) < row(new,new)); +\d+ tt15v + View "testviewschm2.tt15v" + Column | Type | Collation | Nullable | Default | Storage | Description +--------+-----------------+-----------+----------+---------+----------+------------- + row | nestedcomposite | | | | extended | +View definition: + SELECT ROW(i.*::int8_tbl)::nestedcomposite AS "row" + FROM int8_tbl i; +Rules: + updlog AS + ON UPDATE TO tt15v DO INSERT INTO tt15v_log (o, n, incr) + VALUES (old.*::tt15v, new.*::tt15v, (ROW(old.*::tt15v, old.*::tt15v) < ROW(new.*::tt15v, new.*::tt15v))) + +-- check unique-ification of overlength names +create view tt18v as + select * from int8_tbl xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxy + union all + select * from int8_tbl xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxz; +NOTICE: identifier "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxy" will be truncated to "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" +NOTICE: identifier "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxz" will be truncated to "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" +select pg_get_viewdef('tt18v', true); + pg_get_viewdef +----------------------------------------------------------------------------------- + SELECT xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx.q1, + + xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx.q2 + + FROM int8_tbl xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx + + UNION ALL + + SELECT xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx.q1, + + xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx.q2 + + FROM int8_tbl xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx; +(1 row) + +explain (costs off) select * from tt18v; + QUERY PLAN +-------------------------------------------------------------------------------------------- + Append + -> Seq Scan on int8_tbl xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx + -> Seq Scan on int8_tbl xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx_1 +(3 rows) + +-- check display of ScalarArrayOp with a sub-select +select 'foo'::text = any(array['abc','def','foo']::text[]); + ?column? +---------- + t +(1 row) + +select 'foo'::text = any((select array['abc','def','foo']::text[])); -- fail +ERROR: operator does not exist: text = text[] +LINE 1: select 'foo'::text = any((select array['abc','def','foo']::t... + ^ +HINT: No operator matches the given name and argument types. You might need to add explicit type casts. +select 'foo'::text = any((select array['abc','def','foo']::text[])::text[]); + ?column? +---------- + t +(1 row) + +create view tt19v as +select 'foo'::text = any(array['abc','def','foo']::text[]) c1, + 'foo'::text = any((select array['abc','def','foo']::text[])::text[]) c2; +select pg_get_viewdef('tt19v', true); + pg_get_viewdef +------------------------------------------------------------------------------------------------------------ + SELECT 'foo'::text = ANY (ARRAY['abc'::text, 'def'::text, 'foo'::text]) AS c1, + + 'foo'::text = ANY ((( SELECT ARRAY['abc'::text, 'def'::text, 'foo'::text] AS "array"))::text[]) AS c2; +(1 row) + +-- check display of assorted RTE_FUNCTION expressions +create view tt20v as +select * from + coalesce(1,2) as c, + collation for ('x'::text) col, + current_date as d, + localtimestamp(3) as t, + cast(1+2 as int4) as i4, + cast(1+2 as int8) as i8; +select pg_get_viewdef('tt20v', true); + pg_get_viewdef +--------------------------------------------- + SELECT c.c, + + col.col, + + d.d, + + t.t, + + i4.i4, + + i8.i8 + + FROM COALESCE(1, 2) c(c), + + COLLATION FOR ('x'::text) col(col), + + CURRENT_DATE d(d), + + LOCALTIMESTAMP(3) t(t), + + CAST(1 + 2 AS integer) i4(i4), + + CAST((1 + 2)::bigint AS bigint) i8(i8); +(1 row) + +-- reverse-listing of various special function syntaxes required by SQL +create view tt201v as +select + ('2022-12-01'::date + '1 day'::interval) at time zone 'UTC' as atz, + extract(day from now()) as extr, + (now(), '1 day'::interval) overlaps + (current_timestamp(2), '1 day'::interval) as o, + 'foo' is normalized isn, + 'foo' is nfkc normalized isnn, + normalize('foo') as n, + normalize('foo', nfkd) as nfkd, + overlay('foo' placing 'bar' from 2) as ovl, + overlay('foo' placing 'bar' from 2 for 3) as ovl2, + position('foo' in 'foobar') as p, + substring('foo' from 2 for 3) as s, + substring('foo' similar 'f' escape '#') as ss, + substring('foo' from 'oo') as ssf, -- historically-permitted abuse + trim(' ' from ' foo ') as bt, + trim(leading ' ' from ' foo ') as lt, + trim(trailing ' foo ') as rt, + trim(E'\\000'::bytea from E'\\000Tom\\000'::bytea) as btb, + trim(leading E'\\000'::bytea from E'\\000Tom\\000'::bytea) as ltb, + trim(trailing E'\\000'::bytea from E'\\000Tom\\000'::bytea) as rtb, + CURRENT_DATE as cd, + (select * from CURRENT_DATE) as cd2, + CURRENT_TIME as ct, + (select * from CURRENT_TIME) as ct2, + CURRENT_TIME (1) as ct3, + (select * from CURRENT_TIME (1)) as ct4, + CURRENT_TIMESTAMP as ct5, + (select * from CURRENT_TIMESTAMP) as ct6, + CURRENT_TIMESTAMP (1) as ct7, + (select * from CURRENT_TIMESTAMP (1)) as ct8, + LOCALTIME as lt1, + (select * from LOCALTIME) as lt2, + LOCALTIME (1) as lt3, + (select * from LOCALTIME (1)) as lt4, + LOCALTIMESTAMP as lt5, + (select * from LOCALTIMESTAMP) as lt6, + LOCALTIMESTAMP (1) as lt7, + (select * from LOCALTIMESTAMP (1)) as lt8, + CURRENT_CATALOG as ca, + (select * from CURRENT_CATALOG) as ca2, + CURRENT_ROLE as cr, + (select * from CURRENT_ROLE) as cr2, + CURRENT_SCHEMA as cs, + (select * from CURRENT_SCHEMA) as cs2, + CURRENT_USER as cu, + (select * from CURRENT_USER) as cu2, + USER as us, + (select * from USER) as us2, + SESSION_USER seu, + (select * from SESSION_USER) as seu2, + SYSTEM_USER as su, + (select * from SYSTEM_USER) as su2; +select pg_get_viewdef('tt201v', true); + pg_get_viewdef +----------------------------------------------------------------------------------------------- + SELECT (('12-01-2022'::date + '@ 1 day'::interval) AT TIME ZONE 'UTC'::text) AS atz, + + EXTRACT(day FROM now()) AS extr, + + ((now(), '@ 1 day'::interval) OVERLAPS (CURRENT_TIMESTAMP(2), '@ 1 day'::interval)) AS o,+ + ('foo'::text IS NORMALIZED) AS isn, + + ('foo'::text IS NFKC NORMALIZED) AS isnn, + + NORMALIZE('foo'::text) AS n, + + NORMALIZE('foo'::text, NFKD) AS nfkd, + + OVERLAY('foo'::text PLACING 'bar'::text FROM 2) AS ovl, + + OVERLAY('foo'::text PLACING 'bar'::text FROM 2 FOR 3) AS ovl2, + + POSITION(('foo'::text) IN ('foobar'::text)) AS p, + + SUBSTRING('foo'::text FROM 2 FOR 3) AS s, + + SUBSTRING('foo'::text SIMILAR 'f'::text ESCAPE '#'::text) AS ss, + + "substring"('foo'::text, 'oo'::text) AS ssf, + + TRIM(BOTH ' '::text FROM ' foo '::text) AS bt, + + TRIM(LEADING ' '::text FROM ' foo '::text) AS lt, + + TRIM(TRAILING FROM ' foo '::text) AS rt, + + TRIM(BOTH '\x00'::bytea FROM '\x00546f6d00'::bytea) AS btb, + + TRIM(LEADING '\x00'::bytea FROM '\x00546f6d00'::bytea) AS ltb, + + TRIM(TRAILING '\x00'::bytea FROM '\x00546f6d00'::bytea) AS rtb, + + CURRENT_DATE AS cd, + + ( SELECT "current_date"."current_date" + + FROM CURRENT_DATE "current_date"("current_date")) AS cd2, + + CURRENT_TIME AS ct, + + ( SELECT "current_time"."current_time" + + FROM CURRENT_TIME "current_time"("current_time")) AS ct2, + + CURRENT_TIME(1) AS ct3, + + ( SELECT "current_time"."current_time" + + FROM CURRENT_TIME(1) "current_time"("current_time")) AS ct4, + + CURRENT_TIMESTAMP AS ct5, + + ( SELECT "current_timestamp"."current_timestamp" + + FROM CURRENT_TIMESTAMP "current_timestamp"("current_timestamp")) AS ct6, + + CURRENT_TIMESTAMP(1) AS ct7, + + ( SELECT "current_timestamp"."current_timestamp" + + FROM CURRENT_TIMESTAMP(1) "current_timestamp"("current_timestamp")) AS ct8, + + LOCALTIME AS lt1, + + ( SELECT "localtime"."localtime" + + FROM LOCALTIME "localtime"("localtime")) AS lt2, + + LOCALTIME(1) AS lt3, + + ( SELECT "localtime"."localtime" + + FROM LOCALTIME(1) "localtime"("localtime")) AS lt4, + + LOCALTIMESTAMP AS lt5, + + ( SELECT "localtimestamp"."localtimestamp" + + FROM LOCALTIMESTAMP "localtimestamp"("localtimestamp")) AS lt6, + + LOCALTIMESTAMP(1) AS lt7, + + ( SELECT "localtimestamp"."localtimestamp" + + FROM LOCALTIMESTAMP(1) "localtimestamp"("localtimestamp")) AS lt8, + + CURRENT_CATALOG AS ca, + + ( SELECT "current_catalog"."current_catalog" + + FROM CURRENT_CATALOG "current_catalog"("current_catalog")) AS ca2, + + CURRENT_ROLE AS cr, + + ( SELECT "current_role"."current_role" + + FROM CURRENT_ROLE "current_role"("current_role")) AS cr2, + + CURRENT_SCHEMA AS cs, + + ( SELECT "current_schema"."current_schema" + + FROM CURRENT_SCHEMA "current_schema"("current_schema")) AS cs2, + + CURRENT_USER AS cu, + + ( SELECT "current_user"."current_user" + + FROM CURRENT_USER "current_user"("current_user")) AS cu2, + + USER AS us, + + ( SELECT "user"."user" + + FROM USER "user"("user")) AS us2, + + SESSION_USER AS seu, + + ( SELECT "session_user"."session_user" + + FROM SESSION_USER "session_user"("session_user")) AS seu2, + + SYSTEM_USER AS su, + + ( SELECT "system_user"."system_user" + + FROM SYSTEM_USER "system_user"("system_user")) AS su2; +(1 row) + +-- corner cases with empty join conditions +create view tt21v as +select * from tt5 natural inner join tt6; +select pg_get_viewdef('tt21v', true); + pg_get_viewdef +---------------------- + SELECT tt5.a, + + tt5.b, + + tt5.cc, + + tt6.c, + + tt6.d + + FROM tt5 + + CROSS JOIN tt6; +(1 row) + +create view tt22v as +select * from tt5 natural left join tt6; +select pg_get_viewdef('tt22v', true); + pg_get_viewdef +----------------------------- + SELECT tt5.a, + + tt5.b, + + tt5.cc, + + tt6.c, + + tt6.d + + FROM tt5 + + LEFT JOIN tt6 ON TRUE; +(1 row) + +-- check handling of views with immediately-renamed columns +create view tt23v (col_a, col_b) as +select q1 as other_name1, q2 as other_name2 from int8_tbl +union +select 42, 43; +select pg_get_viewdef('tt23v', true); + pg_get_viewdef +------------------------------- + SELECT int8_tbl.q1 AS col_a,+ + int8_tbl.q2 AS col_b + + FROM int8_tbl + + UNION + + SELECT 42 AS col_a, + + 43 AS col_b; +(1 row) + +select pg_get_ruledef(oid, true) from pg_rewrite + where ev_class = 'tt23v'::regclass and ev_type = '1'; + pg_get_ruledef +----------------------------------------------------------------- + CREATE RULE "_RETURN" AS + + ON SELECT TO tt23v DO INSTEAD SELECT int8_tbl.q1 AS col_a,+ + int8_tbl.q2 AS col_b + + FROM int8_tbl + + UNION + + SELECT 42 AS col_a, + + 43 AS col_b; +(1 row) + +-- test extraction of FieldSelect field names (get_name_for_var_field) +create view tt24v as +with cte as materialized (select r from (values(1,2),(3,4)) r) +select (r).column2 as col_a, (rr).column2 as col_b from + cte join (select rr from (values(1,7),(3,8)) rr limit 2) ss + on (r).column1 = (rr).column1; +select pg_get_viewdef('tt24v', true); + pg_get_viewdef +------------------------------------------------------------ + WITH cte AS MATERIALIZED ( + + SELECT r.*::record AS r + + FROM ( VALUES (1,2), (3,4)) r + + ) + + SELECT (cte.r).column2 AS col_a, + + (ss.rr).column2 AS col_b + + FROM cte + + JOIN ( SELECT rr.*::record AS rr + + FROM ( VALUES (1,7), (3,8)) rr + + LIMIT 2) ss ON (cte.r).column1 = (ss.rr).column1; +(1 row) + +create view tt25v as +with cte as materialized (select pg_get_keywords() k) +select (k).word from cte; +select pg_get_viewdef('tt25v', true); + pg_get_viewdef +---------------------------------------- + WITH cte AS MATERIALIZED ( + + SELECT pg_get_keywords() AS k+ + ) + + SELECT (k).word AS word + + FROM cte; +(1 row) + +-- also check cases seen only in EXPLAIN +explain (verbose, costs off) +select * from tt24v; + QUERY PLAN +------------------------------------------------------------------------------------------ + Hash Join + Output: (cte.r).column2, ((ROW("*VALUES*".column1, "*VALUES*".column2))).column2 + Hash Cond: ((cte.r).column1 = ((ROW("*VALUES*".column1, "*VALUES*".column2))).column1) + CTE cte + -> Values Scan on "*VALUES*_1" + Output: ROW("*VALUES*_1".column1, "*VALUES*_1".column2) + -> CTE Scan on cte + Output: cte.r + -> Hash + Output: (ROW("*VALUES*".column1, "*VALUES*".column2)) + -> Limit + Output: (ROW("*VALUES*".column1, "*VALUES*".column2)) + -> Values Scan on "*VALUES*" + Output: ROW("*VALUES*".column1, "*VALUES*".column2) +(14 rows) + +explain (verbose, costs off) +select (r).column2 from (select r from (values(1,2),(3,4)) r limit 1) ss; + QUERY PLAN +------------------------------------------------------------------- + Subquery Scan on ss + Output: (ss.r).column2 + -> Limit + Output: (ROW("*VALUES*".column1, "*VALUES*".column2)) + -> Values Scan on "*VALUES*" + Output: ROW("*VALUES*".column1, "*VALUES*".column2) +(6 rows) + +-- test pretty-print parenthesization rules, and SubLink deparsing +create view tt26v as +select x + y + z as c1, + (x * y) + z as c2, + x + (y * z) as c3, + (x + y) * z as c4, + x * (y + z) as c5, + x + (y + z) as c6, + x + (y # z) as c7, + (x > y) AND (y > z OR x > z) as c8, + (x > y) OR (y > z AND NOT (x > z)) as c9, + (x,y) <> ALL (values(1,2),(3,4)) as c10, + (x,y) <= ANY (values(1,2),(3,4)) as c11 +from (values(1,2,3)) v(x,y,z); +select pg_get_viewdef('tt26v', true); + pg_get_viewdef +---------------------------------------------------- + SELECT x + y + z AS c1, + + x * y + z AS c2, + + x + y * z AS c3, + + (x + y) * z AS c4, + + x * (y + z) AS c5, + + x + (y + z) AS c6, + + x + (y # z) AS c7, + + x > y AND (y > z OR x > z) AS c8, + + x > y OR y > z AND NOT x > z AS c9, + + ((x, y) <> ALL ( VALUES (1,2), (3,4))) AS c10,+ + ((x, y) <= ANY ( VALUES (1,2), (3,4))) AS c11 + + FROM ( VALUES (1,2,3)) v(x, y, z); +(1 row) + +-- clean up all the random objects we made above +DROP SCHEMA temp_view_test CASCADE; +NOTICE: drop cascades to 27 other objects +DETAIL: drop cascades to table temp_view_test.base_table +drop cascades to view v2_temp +drop cascades to view v4_temp +drop cascades to view v6_temp +drop cascades to view v7_temp +drop cascades to view v10_temp +drop cascades to view v8_temp +drop cascades to view v9_temp +drop cascades to view v11_temp +drop cascades to view v12_temp +drop cascades to table temp_view_test.base_table2 +drop cascades to view v5_temp +drop cascades to view temp_view_test.v1 +drop cascades to view temp_view_test.v2 +drop cascades to view temp_view_test.v3 +drop cascades to view temp_view_test.v4 +drop cascades to view temp_view_test.v5 +drop cascades to view temp_view_test.v6 +drop cascades to view temp_view_test.v7 +drop cascades to view temp_view_test.v8 +drop cascades to sequence temp_view_test.seq1 +drop cascades to view temp_view_test.v9 +drop cascades to table temp_view_test.tx1 +drop cascades to view aliased_view_1 +drop cascades to view aliased_view_2 +drop cascades to view aliased_view_3 +drop cascades to view aliased_view_4 +DROP SCHEMA testviewschm2 CASCADE; +NOTICE: drop cascades to 77 other objects +DETAIL: drop cascades to table t1 +drop cascades to view temporal1 +drop cascades to view temporal2 +drop cascades to view temporal3 +drop cascades to view temporal4 +drop cascades to table t2 +drop cascades to view nontemp1 +drop cascades to view nontemp2 +drop cascades to view nontemp3 +drop cascades to view nontemp4 +drop cascades to table tbl1 +drop cascades to table tbl2 +drop cascades to table tbl3 +drop cascades to table tbl4 +drop cascades to view mytempview +drop cascades to view pubview +drop cascades to view mysecview1 +drop cascades to view mysecview2 +drop cascades to view mysecview3 +drop cascades to view mysecview4 +drop cascades to view mysecview7 +drop cascades to view mysecview8 +drop cascades to view mysecview9 +drop cascades to view unspecified_types +drop cascades to table tt1 +drop cascades to table tx1 +drop cascades to view view_of_joins +drop cascades to table tbl1a +drop cascades to view view_of_joins_2a +drop cascades to view view_of_joins_2b +drop cascades to view view_of_joins_2c +drop cascades to view view_of_joins_2d +drop cascades to table tt2 +drop cascades to table tt3 +drop cascades to table tt4 +drop cascades to view v1 +drop cascades to view v1a +drop cascades to view v2 +drop cascades to view v2a +drop cascades to view v3 +drop cascades to table tt5 +drop cascades to table tt6 +drop cascades to view vv1 +drop cascades to view v4 +drop cascades to table tt7 +drop cascades to table tt8 +drop cascades to view vv2 +drop cascades to view vv3 +drop cascades to view vv4 +drop cascades to table tt7a +drop cascades to table tt8a +drop cascades to view vv2a +drop cascades to table tt9 +drop cascades to table tt10 +drop cascades to view vv5 +drop cascades to table tt11 +drop cascades to table tt12 +drop cascades to table tt13 +drop cascades to view vv6 +drop cascades to table tt14t +drop cascades to function tt14f() +drop cascades to view tt14v +drop cascades to type nestedcomposite +drop cascades to view tt15v +drop cascades to view tt16v +drop cascades to view tt17v +drop cascades to table tt15v_log +drop cascades to view tt18v +drop cascades to view tt19v +drop cascades to view tt20v +drop cascades to view tt201v +drop cascades to view tt21v +drop cascades to view tt22v +drop cascades to view tt23v +drop cascades to view tt24v +drop cascades to view tt25v +drop cascades to view tt26v diff --git a/src/test/regress/expected/date.out b/src/test/regress/expected/date.out new file mode 100644 index 0000000..f5949f3 --- /dev/null +++ b/src/test/regress/expected/date.out @@ -0,0 +1,1534 @@ +-- +-- DATE +-- +CREATE TABLE DATE_TBL (f1 date); +INSERT INTO DATE_TBL VALUES ('1957-04-09'); +INSERT INTO DATE_TBL VALUES ('1957-06-13'); +INSERT INTO DATE_TBL VALUES ('1996-02-28'); +INSERT INTO DATE_TBL VALUES ('1996-02-29'); +INSERT INTO DATE_TBL VALUES ('1996-03-01'); +INSERT INTO DATE_TBL VALUES ('1996-03-02'); +INSERT INTO DATE_TBL VALUES ('1997-02-28'); +INSERT INTO DATE_TBL VALUES ('1997-02-29'); +ERROR: date/time field value out of range: "1997-02-29" +LINE 1: INSERT INTO DATE_TBL VALUES ('1997-02-29'); + ^ +INSERT INTO DATE_TBL VALUES ('1997-03-01'); +INSERT INTO DATE_TBL VALUES ('1997-03-02'); +INSERT INTO DATE_TBL VALUES ('2000-04-01'); +INSERT INTO DATE_TBL VALUES ('2000-04-02'); +INSERT INTO DATE_TBL VALUES ('2000-04-03'); +INSERT INTO DATE_TBL VALUES ('2038-04-08'); +INSERT INTO DATE_TBL VALUES ('2039-04-09'); +INSERT INTO DATE_TBL VALUES ('2040-04-10'); +INSERT INTO DATE_TBL VALUES ('2040-04-10 BC'); +SELECT f1 FROM DATE_TBL; + f1 +--------------- + 04-09-1957 + 06-13-1957 + 02-28-1996 + 02-29-1996 + 03-01-1996 + 03-02-1996 + 02-28-1997 + 03-01-1997 + 03-02-1997 + 04-01-2000 + 04-02-2000 + 04-03-2000 + 04-08-2038 + 04-09-2039 + 04-10-2040 + 04-10-2040 BC +(16 rows) + +SELECT f1 FROM DATE_TBL WHERE f1 < '2000-01-01'; + f1 +--------------- + 04-09-1957 + 06-13-1957 + 02-28-1996 + 02-29-1996 + 03-01-1996 + 03-02-1996 + 02-28-1997 + 03-01-1997 + 03-02-1997 + 04-10-2040 BC +(10 rows) + +SELECT f1 FROM DATE_TBL + WHERE f1 BETWEEN '2000-01-01' AND '2001-01-01'; + f1 +------------ + 04-01-2000 + 04-02-2000 + 04-03-2000 +(3 rows) + +-- +-- Check all the documented input formats +-- +SET datestyle TO iso; -- display results in ISO +SET datestyle TO ymd; +SELECT date 'January 8, 1999'; + date +------------ + 1999-01-08 +(1 row) + +SELECT date '1999-01-08'; + date +------------ + 1999-01-08 +(1 row) + +SELECT date '1999-01-18'; + date +------------ + 1999-01-18 +(1 row) + +SELECT date '1/8/1999'; +ERROR: date/time field value out of range: "1/8/1999" +LINE 1: SELECT date '1/8/1999'; + ^ +HINT: Perhaps you need a different "datestyle" setting. +SELECT date '1/18/1999'; +ERROR: date/time field value out of range: "1/18/1999" +LINE 1: SELECT date '1/18/1999'; + ^ +HINT: Perhaps you need a different "datestyle" setting. +SELECT date '18/1/1999'; +ERROR: date/time field value out of range: "18/1/1999" +LINE 1: SELECT date '18/1/1999'; + ^ +HINT: Perhaps you need a different "datestyle" setting. +SELECT date '01/02/03'; + date +------------ + 2001-02-03 +(1 row) + +SELECT date '19990108'; + date +------------ + 1999-01-08 +(1 row) + +SELECT date '990108'; + date +------------ + 1999-01-08 +(1 row) + +SELECT date '1999.008'; + date +------------ + 1999-01-08 +(1 row) + +SELECT date 'J2451187'; + date +------------ + 1999-01-08 +(1 row) + +SELECT date 'January 8, 99 BC'; +ERROR: date/time field value out of range: "January 8, 99 BC" +LINE 1: SELECT date 'January 8, 99 BC'; + ^ +HINT: Perhaps you need a different "datestyle" setting. +SELECT date '99-Jan-08'; + date +------------ + 1999-01-08 +(1 row) + +SELECT date '1999-Jan-08'; + date +------------ + 1999-01-08 +(1 row) + +SELECT date '08-Jan-99'; +ERROR: date/time field value out of range: "08-Jan-99" +LINE 1: SELECT date '08-Jan-99'; + ^ +HINT: Perhaps you need a different "datestyle" setting. +SELECT date '08-Jan-1999'; + date +------------ + 1999-01-08 +(1 row) + +SELECT date 'Jan-08-99'; +ERROR: date/time field value out of range: "Jan-08-99" +LINE 1: SELECT date 'Jan-08-99'; + ^ +HINT: Perhaps you need a different "datestyle" setting. +SELECT date 'Jan-08-1999'; + date +------------ + 1999-01-08 +(1 row) + +SELECT date '99-08-Jan'; +ERROR: invalid input syntax for type date: "99-08-Jan" +LINE 1: SELECT date '99-08-Jan'; + ^ +SELECT date '1999-08-Jan'; +ERROR: invalid input syntax for type date: "1999-08-Jan" +LINE 1: SELECT date '1999-08-Jan'; + ^ +SELECT date '99 Jan 08'; + date +------------ + 1999-01-08 +(1 row) + +SELECT date '1999 Jan 08'; + date +------------ + 1999-01-08 +(1 row) + +SELECT date '08 Jan 99'; +ERROR: date/time field value out of range: "08 Jan 99" +LINE 1: SELECT date '08 Jan 99'; + ^ +HINT: Perhaps you need a different "datestyle" setting. +SELECT date '08 Jan 1999'; + date +------------ + 1999-01-08 +(1 row) + +SELECT date 'Jan 08 99'; +ERROR: date/time field value out of range: "Jan 08 99" +LINE 1: SELECT date 'Jan 08 99'; + ^ +HINT: Perhaps you need a different "datestyle" setting. +SELECT date 'Jan 08 1999'; + date +------------ + 1999-01-08 +(1 row) + +SELECT date '99 08 Jan'; + date +------------ + 1999-01-08 +(1 row) + +SELECT date '1999 08 Jan'; + date +------------ + 1999-01-08 +(1 row) + +SELECT date '99-01-08'; + date +------------ + 1999-01-08 +(1 row) + +SELECT date '1999-01-08'; + date +------------ + 1999-01-08 +(1 row) + +SELECT date '08-01-99'; +ERROR: date/time field value out of range: "08-01-99" +LINE 1: SELECT date '08-01-99'; + ^ +HINT: Perhaps you need a different "datestyle" setting. +SELECT date '08-01-1999'; +ERROR: date/time field value out of range: "08-01-1999" +LINE 1: SELECT date '08-01-1999'; + ^ +HINT: Perhaps you need a different "datestyle" setting. +SELECT date '01-08-99'; +ERROR: date/time field value out of range: "01-08-99" +LINE 1: SELECT date '01-08-99'; + ^ +HINT: Perhaps you need a different "datestyle" setting. +SELECT date '01-08-1999'; +ERROR: date/time field value out of range: "01-08-1999" +LINE 1: SELECT date '01-08-1999'; + ^ +HINT: Perhaps you need a different "datestyle" setting. +SELECT date '99-08-01'; + date +------------ + 1999-08-01 +(1 row) + +SELECT date '1999-08-01'; + date +------------ + 1999-08-01 +(1 row) + +SELECT date '99 01 08'; + date +------------ + 1999-01-08 +(1 row) + +SELECT date '1999 01 08'; + date +------------ + 1999-01-08 +(1 row) + +SELECT date '08 01 99'; +ERROR: date/time field value out of range: "08 01 99" +LINE 1: SELECT date '08 01 99'; + ^ +HINT: Perhaps you need a different "datestyle" setting. +SELECT date '08 01 1999'; +ERROR: date/time field value out of range: "08 01 1999" +LINE 1: SELECT date '08 01 1999'; + ^ +HINT: Perhaps you need a different "datestyle" setting. +SELECT date '01 08 99'; +ERROR: date/time field value out of range: "01 08 99" +LINE 1: SELECT date '01 08 99'; + ^ +HINT: Perhaps you need a different "datestyle" setting. +SELECT date '01 08 1999'; +ERROR: date/time field value out of range: "01 08 1999" +LINE 1: SELECT date '01 08 1999'; + ^ +HINT: Perhaps you need a different "datestyle" setting. +SELECT date '99 08 01'; + date +------------ + 1999-08-01 +(1 row) + +SELECT date '1999 08 01'; + date +------------ + 1999-08-01 +(1 row) + +SET datestyle TO dmy; +SELECT date 'January 8, 1999'; + date +------------ + 1999-01-08 +(1 row) + +SELECT date '1999-01-08'; + date +------------ + 1999-01-08 +(1 row) + +SELECT date '1999-01-18'; + date +------------ + 1999-01-18 +(1 row) + +SELECT date '1/8/1999'; + date +------------ + 1999-08-01 +(1 row) + +SELECT date '1/18/1999'; +ERROR: date/time field value out of range: "1/18/1999" +LINE 1: SELECT date '1/18/1999'; + ^ +HINT: Perhaps you need a different "datestyle" setting. +SELECT date '18/1/1999'; + date +------------ + 1999-01-18 +(1 row) + +SELECT date '01/02/03'; + date +------------ + 2003-02-01 +(1 row) + +SELECT date '19990108'; + date +------------ + 1999-01-08 +(1 row) + +SELECT date '990108'; + date +------------ + 1999-01-08 +(1 row) + +SELECT date '1999.008'; + date +------------ + 1999-01-08 +(1 row) + +SELECT date 'J2451187'; + date +------------ + 1999-01-08 +(1 row) + +SELECT date 'January 8, 99 BC'; + date +--------------- + 0099-01-08 BC +(1 row) + +SELECT date '99-Jan-08'; +ERROR: date/time field value out of range: "99-Jan-08" +LINE 1: SELECT date '99-Jan-08'; + ^ +HINT: Perhaps you need a different "datestyle" setting. +SELECT date '1999-Jan-08'; + date +------------ + 1999-01-08 +(1 row) + +SELECT date '08-Jan-99'; + date +------------ + 1999-01-08 +(1 row) + +SELECT date '08-Jan-1999'; + date +------------ + 1999-01-08 +(1 row) + +SELECT date 'Jan-08-99'; + date +------------ + 1999-01-08 +(1 row) + +SELECT date 'Jan-08-1999'; + date +------------ + 1999-01-08 +(1 row) + +SELECT date '99-08-Jan'; +ERROR: invalid input syntax for type date: "99-08-Jan" +LINE 1: SELECT date '99-08-Jan'; + ^ +SELECT date '1999-08-Jan'; +ERROR: invalid input syntax for type date: "1999-08-Jan" +LINE 1: SELECT date '1999-08-Jan'; + ^ +SELECT date '99 Jan 08'; +ERROR: date/time field value out of range: "99 Jan 08" +LINE 1: SELECT date '99 Jan 08'; + ^ +HINT: Perhaps you need a different "datestyle" setting. +SELECT date '1999 Jan 08'; + date +------------ + 1999-01-08 +(1 row) + +SELECT date '08 Jan 99'; + date +------------ + 1999-01-08 +(1 row) + +SELECT date '08 Jan 1999'; + date +------------ + 1999-01-08 +(1 row) + +SELECT date 'Jan 08 99'; + date +------------ + 1999-01-08 +(1 row) + +SELECT date 'Jan 08 1999'; + date +------------ + 1999-01-08 +(1 row) + +SELECT date '99 08 Jan'; +ERROR: invalid input syntax for type date: "99 08 Jan" +LINE 1: SELECT date '99 08 Jan'; + ^ +SELECT date '1999 08 Jan'; + date +------------ + 1999-01-08 +(1 row) + +SELECT date '99-01-08'; +ERROR: date/time field value out of range: "99-01-08" +LINE 1: SELECT date '99-01-08'; + ^ +HINT: Perhaps you need a different "datestyle" setting. +SELECT date '1999-01-08'; + date +------------ + 1999-01-08 +(1 row) + +SELECT date '08-01-99'; + date +------------ + 1999-01-08 +(1 row) + +SELECT date '08-01-1999'; + date +------------ + 1999-01-08 +(1 row) + +SELECT date '01-08-99'; + date +------------ + 1999-08-01 +(1 row) + +SELECT date '01-08-1999'; + date +------------ + 1999-08-01 +(1 row) + +SELECT date '99-08-01'; +ERROR: date/time field value out of range: "99-08-01" +LINE 1: SELECT date '99-08-01'; + ^ +HINT: Perhaps you need a different "datestyle" setting. +SELECT date '1999-08-01'; + date +------------ + 1999-08-01 +(1 row) + +SELECT date '99 01 08'; +ERROR: date/time field value out of range: "99 01 08" +LINE 1: SELECT date '99 01 08'; + ^ +HINT: Perhaps you need a different "datestyle" setting. +SELECT date '1999 01 08'; + date +------------ + 1999-01-08 +(1 row) + +SELECT date '08 01 99'; + date +------------ + 1999-01-08 +(1 row) + +SELECT date '08 01 1999'; + date +------------ + 1999-01-08 +(1 row) + +SELECT date '01 08 99'; + date +------------ + 1999-08-01 +(1 row) + +SELECT date '01 08 1999'; + date +------------ + 1999-08-01 +(1 row) + +SELECT date '99 08 01'; +ERROR: date/time field value out of range: "99 08 01" +LINE 1: SELECT date '99 08 01'; + ^ +HINT: Perhaps you need a different "datestyle" setting. +SELECT date '1999 08 01'; + date +------------ + 1999-08-01 +(1 row) + +SET datestyle TO mdy; +SELECT date 'January 8, 1999'; + date +------------ + 1999-01-08 +(1 row) + +SELECT date '1999-01-08'; + date +------------ + 1999-01-08 +(1 row) + +SELECT date '1999-01-18'; + date +------------ + 1999-01-18 +(1 row) + +SELECT date '1/8/1999'; + date +------------ + 1999-01-08 +(1 row) + +SELECT date '1/18/1999'; + date +------------ + 1999-01-18 +(1 row) + +SELECT date '18/1/1999'; +ERROR: date/time field value out of range: "18/1/1999" +LINE 1: SELECT date '18/1/1999'; + ^ +HINT: Perhaps you need a different "datestyle" setting. +SELECT date '01/02/03'; + date +------------ + 2003-01-02 +(1 row) + +SELECT date '19990108'; + date +------------ + 1999-01-08 +(1 row) + +SELECT date '990108'; + date +------------ + 1999-01-08 +(1 row) + +SELECT date '1999.008'; + date +------------ + 1999-01-08 +(1 row) + +SELECT date 'J2451187'; + date +------------ + 1999-01-08 +(1 row) + +SELECT date 'January 8, 99 BC'; + date +--------------- + 0099-01-08 BC +(1 row) + +SELECT date '99-Jan-08'; +ERROR: date/time field value out of range: "99-Jan-08" +LINE 1: SELECT date '99-Jan-08'; + ^ +HINT: Perhaps you need a different "datestyle" setting. +SELECT date '1999-Jan-08'; + date +------------ + 1999-01-08 +(1 row) + +SELECT date '08-Jan-99'; + date +------------ + 1999-01-08 +(1 row) + +SELECT date '08-Jan-1999'; + date +------------ + 1999-01-08 +(1 row) + +SELECT date 'Jan-08-99'; + date +------------ + 1999-01-08 +(1 row) + +SELECT date 'Jan-08-1999'; + date +------------ + 1999-01-08 +(1 row) + +SELECT date '99-08-Jan'; +ERROR: invalid input syntax for type date: "99-08-Jan" +LINE 1: SELECT date '99-08-Jan'; + ^ +SELECT date '1999-08-Jan'; +ERROR: invalid input syntax for type date: "1999-08-Jan" +LINE 1: SELECT date '1999-08-Jan'; + ^ +SELECT date '99 Jan 08'; +ERROR: invalid input syntax for type date: "99 Jan 08" +LINE 1: SELECT date '99 Jan 08'; + ^ +SELECT date '1999 Jan 08'; + date +------------ + 1999-01-08 +(1 row) + +SELECT date '08 Jan 99'; + date +------------ + 1999-01-08 +(1 row) + +SELECT date '08 Jan 1999'; + date +------------ + 1999-01-08 +(1 row) + +SELECT date 'Jan 08 99'; + date +------------ + 1999-01-08 +(1 row) + +SELECT date 'Jan 08 1999'; + date +------------ + 1999-01-08 +(1 row) + +SELECT date '99 08 Jan'; +ERROR: invalid input syntax for type date: "99 08 Jan" +LINE 1: SELECT date '99 08 Jan'; + ^ +SELECT date '1999 08 Jan'; + date +------------ + 1999-01-08 +(1 row) + +SELECT date '99-01-08'; +ERROR: date/time field value out of range: "99-01-08" +LINE 1: SELECT date '99-01-08'; + ^ +HINT: Perhaps you need a different "datestyle" setting. +SELECT date '1999-01-08'; + date +------------ + 1999-01-08 +(1 row) + +SELECT date '08-01-99'; + date +------------ + 1999-08-01 +(1 row) + +SELECT date '08-01-1999'; + date +------------ + 1999-08-01 +(1 row) + +SELECT date '01-08-99'; + date +------------ + 1999-01-08 +(1 row) + +SELECT date '01-08-1999'; + date +------------ + 1999-01-08 +(1 row) + +SELECT date '99-08-01'; +ERROR: date/time field value out of range: "99-08-01" +LINE 1: SELECT date '99-08-01'; + ^ +HINT: Perhaps you need a different "datestyle" setting. +SELECT date '1999-08-01'; + date +------------ + 1999-08-01 +(1 row) + +SELECT date '99 01 08'; +ERROR: date/time field value out of range: "99 01 08" +LINE 1: SELECT date '99 01 08'; + ^ +HINT: Perhaps you need a different "datestyle" setting. +SELECT date '1999 01 08'; + date +------------ + 1999-01-08 +(1 row) + +SELECT date '08 01 99'; + date +------------ + 1999-08-01 +(1 row) + +SELECT date '08 01 1999'; + date +------------ + 1999-08-01 +(1 row) + +SELECT date '01 08 99'; + date +------------ + 1999-01-08 +(1 row) + +SELECT date '01 08 1999'; + date +------------ + 1999-01-08 +(1 row) + +SELECT date '99 08 01'; +ERROR: date/time field value out of range: "99 08 01" +LINE 1: SELECT date '99 08 01'; + ^ +HINT: Perhaps you need a different "datestyle" setting. +SELECT date '1999 08 01'; + date +------------ + 1999-08-01 +(1 row) + +-- Check upper and lower limits of date range +SELECT date '4714-11-24 BC'; + date +--------------- + 4714-11-24 BC +(1 row) + +SELECT date '4714-11-23 BC'; -- out of range +ERROR: date out of range: "4714-11-23 BC" +LINE 1: SELECT date '4714-11-23 BC'; + ^ +SELECT date '5874897-12-31'; + date +--------------- + 5874897-12-31 +(1 row) + +SELECT date '5874898-01-01'; -- out of range +ERROR: date out of range: "5874898-01-01" +LINE 1: SELECT date '5874898-01-01'; + ^ +-- Test non-error-throwing API +SELECT pg_input_is_valid('now', 'date'); + pg_input_is_valid +------------------- + t +(1 row) + +SELECT pg_input_is_valid('garbage', 'date'); + pg_input_is_valid +------------------- + f +(1 row) + +SELECT pg_input_is_valid('6874898-01-01', 'date'); + pg_input_is_valid +------------------- + f +(1 row) + +SELECT * FROM pg_input_error_info('garbage', 'date'); + message | detail | hint | sql_error_code +-----------------------------------------------+--------+------+---------------- + invalid input syntax for type date: "garbage" | | | 22007 +(1 row) + +SELECT * FROM pg_input_error_info('6874898-01-01', 'date'); + message | detail | hint | sql_error_code +------------------------------------+--------+------+---------------- + date out of range: "6874898-01-01" | | | 22008 +(1 row) + +RESET datestyle; +-- +-- Simple math +-- Leave most of it for the horology tests +-- +SELECT f1 - date '2000-01-01' AS "Days From 2K" FROM DATE_TBL; + Days From 2K +-------------- + -15607 + -15542 + -1403 + -1402 + -1401 + -1400 + -1037 + -1036 + -1035 + 91 + 92 + 93 + 13977 + 14343 + 14710 + -1475115 +(16 rows) + +SELECT f1 - date 'epoch' AS "Days From Epoch" FROM DATE_TBL; + Days From Epoch +----------------- + -4650 + -4585 + 9554 + 9555 + 9556 + 9557 + 9920 + 9921 + 9922 + 11048 + 11049 + 11050 + 24934 + 25300 + 25667 + -1464158 +(16 rows) + +SELECT date 'yesterday' - date 'today' AS "One day"; + One day +--------- + -1 +(1 row) + +SELECT date 'today' - date 'tomorrow' AS "One day"; + One day +--------- + -1 +(1 row) + +SELECT date 'yesterday' - date 'tomorrow' AS "Two days"; + Two days +---------- + -2 +(1 row) + +SELECT date 'tomorrow' - date 'today' AS "One day"; + One day +--------- + 1 +(1 row) + +SELECT date 'today' - date 'yesterday' AS "One day"; + One day +--------- + 1 +(1 row) + +SELECT date 'tomorrow' - date 'yesterday' AS "Two days"; + Two days +---------- + 2 +(1 row) + +-- +-- test extract! +-- +SELECT f1 as "date", + date_part('year', f1) AS year, + date_part('month', f1) AS month, + date_part('day', f1) AS day, + date_part('quarter', f1) AS quarter, + date_part('decade', f1) AS decade, + date_part('century', f1) AS century, + date_part('millennium', f1) AS millennium, + date_part('isoyear', f1) AS isoyear, + date_part('week', f1) AS week, + date_part('dow', f1) AS dow, + date_part('isodow', f1) AS isodow, + date_part('doy', f1) AS doy, + date_part('julian', f1) AS julian, + date_part('epoch', f1) AS epoch + FROM date_tbl; + date | year | month | day | quarter | decade | century | millennium | isoyear | week | dow | isodow | doy | julian | epoch +---------------+-------+-------+-----+---------+--------+---------+------------+---------+------+-----+--------+-----+---------+--------------- + 04-09-1957 | 1957 | 4 | 9 | 2 | 195 | 20 | 2 | 1957 | 15 | 2 | 2 | 99 | 2435938 | -401760000 + 06-13-1957 | 1957 | 6 | 13 | 2 | 195 | 20 | 2 | 1957 | 24 | 4 | 4 | 164 | 2436003 | -396144000 + 02-28-1996 | 1996 | 2 | 28 | 1 | 199 | 20 | 2 | 1996 | 9 | 3 | 3 | 59 | 2450142 | 825465600 + 02-29-1996 | 1996 | 2 | 29 | 1 | 199 | 20 | 2 | 1996 | 9 | 4 | 4 | 60 | 2450143 | 825552000 + 03-01-1996 | 1996 | 3 | 1 | 1 | 199 | 20 | 2 | 1996 | 9 | 5 | 5 | 61 | 2450144 | 825638400 + 03-02-1996 | 1996 | 3 | 2 | 1 | 199 | 20 | 2 | 1996 | 9 | 6 | 6 | 62 | 2450145 | 825724800 + 02-28-1997 | 1997 | 2 | 28 | 1 | 199 | 20 | 2 | 1997 | 9 | 5 | 5 | 59 | 2450508 | 857088000 + 03-01-1997 | 1997 | 3 | 1 | 1 | 199 | 20 | 2 | 1997 | 9 | 6 | 6 | 60 | 2450509 | 857174400 + 03-02-1997 | 1997 | 3 | 2 | 1 | 199 | 20 | 2 | 1997 | 9 | 0 | 7 | 61 | 2450510 | 857260800 + 04-01-2000 | 2000 | 4 | 1 | 2 | 200 | 20 | 2 | 2000 | 13 | 6 | 6 | 92 | 2451636 | 954547200 + 04-02-2000 | 2000 | 4 | 2 | 2 | 200 | 20 | 2 | 2000 | 13 | 0 | 7 | 93 | 2451637 | 954633600 + 04-03-2000 | 2000 | 4 | 3 | 2 | 200 | 20 | 2 | 2000 | 14 | 1 | 1 | 94 | 2451638 | 954720000 + 04-08-2038 | 2038 | 4 | 8 | 2 | 203 | 21 | 3 | 2038 | 14 | 4 | 4 | 98 | 2465522 | 2154297600 + 04-09-2039 | 2039 | 4 | 9 | 2 | 203 | 21 | 3 | 2039 | 14 | 6 | 6 | 99 | 2465888 | 2185920000 + 04-10-2040 | 2040 | 4 | 10 | 2 | 204 | 21 | 3 | 2040 | 15 | 2 | 2 | 101 | 2466255 | 2217628800 + 04-10-2040 BC | -2040 | 4 | 10 | 2 | -204 | -21 | -3 | -2040 | 15 | 1 | 1 | 100 | 976430 | -126503251200 +(16 rows) + +-- +-- epoch +-- +SELECT EXTRACT(EPOCH FROM DATE '1970-01-01'); -- 0 + extract +--------- + 0 +(1 row) + +-- +-- century +-- +SELECT EXTRACT(CENTURY FROM DATE '0101-12-31 BC'); -- -2 + extract +--------- + -2 +(1 row) + +SELECT EXTRACT(CENTURY FROM DATE '0100-12-31 BC'); -- -1 + extract +--------- + -1 +(1 row) + +SELECT EXTRACT(CENTURY FROM DATE '0001-12-31 BC'); -- -1 + extract +--------- + -1 +(1 row) + +SELECT EXTRACT(CENTURY FROM DATE '0001-01-01'); -- 1 + extract +--------- + 1 +(1 row) + +SELECT EXTRACT(CENTURY FROM DATE '0001-01-01 AD'); -- 1 + extract +--------- + 1 +(1 row) + +SELECT EXTRACT(CENTURY FROM DATE '1900-12-31'); -- 19 + extract +--------- + 19 +(1 row) + +SELECT EXTRACT(CENTURY FROM DATE '1901-01-01'); -- 20 + extract +--------- + 20 +(1 row) + +SELECT EXTRACT(CENTURY FROM DATE '2000-12-31'); -- 20 + extract +--------- + 20 +(1 row) + +SELECT EXTRACT(CENTURY FROM DATE '2001-01-01'); -- 21 + extract +--------- + 21 +(1 row) + +SELECT EXTRACT(CENTURY FROM CURRENT_DATE)>=21 AS True; -- true + true +------ + t +(1 row) + +-- +-- millennium +-- +SELECT EXTRACT(MILLENNIUM FROM DATE '0001-12-31 BC'); -- -1 + extract +--------- + -1 +(1 row) + +SELECT EXTRACT(MILLENNIUM FROM DATE '0001-01-01 AD'); -- 1 + extract +--------- + 1 +(1 row) + +SELECT EXTRACT(MILLENNIUM FROM DATE '1000-12-31'); -- 1 + extract +--------- + 1 +(1 row) + +SELECT EXTRACT(MILLENNIUM FROM DATE '1001-01-01'); -- 2 + extract +--------- + 2 +(1 row) + +SELECT EXTRACT(MILLENNIUM FROM DATE '2000-12-31'); -- 2 + extract +--------- + 2 +(1 row) + +SELECT EXTRACT(MILLENNIUM FROM DATE '2001-01-01'); -- 3 + extract +--------- + 3 +(1 row) + +-- next test to be fixed on the turn of the next millennium;-) +SELECT EXTRACT(MILLENNIUM FROM CURRENT_DATE); -- 3 + extract +--------- + 3 +(1 row) + +-- +-- decade +-- +SELECT EXTRACT(DECADE FROM DATE '1994-12-25'); -- 199 + extract +--------- + 199 +(1 row) + +SELECT EXTRACT(DECADE FROM DATE '0010-01-01'); -- 1 + extract +--------- + 1 +(1 row) + +SELECT EXTRACT(DECADE FROM DATE '0009-12-31'); -- 0 + extract +--------- + 0 +(1 row) + +SELECT EXTRACT(DECADE FROM DATE '0001-01-01 BC'); -- 0 + extract +--------- + 0 +(1 row) + +SELECT EXTRACT(DECADE FROM DATE '0002-12-31 BC'); -- -1 + extract +--------- + -1 +(1 row) + +SELECT EXTRACT(DECADE FROM DATE '0011-01-01 BC'); -- -1 + extract +--------- + -1 +(1 row) + +SELECT EXTRACT(DECADE FROM DATE '0012-12-31 BC'); -- -2 + extract +--------- + -2 +(1 row) + +-- +-- all possible fields +-- +SELECT EXTRACT(MICROSECONDS FROM DATE '2020-08-11'); +ERROR: unit "microseconds" not supported for type date +SELECT EXTRACT(MILLISECONDS FROM DATE '2020-08-11'); +ERROR: unit "milliseconds" not supported for type date +SELECT EXTRACT(SECOND FROM DATE '2020-08-11'); +ERROR: unit "second" not supported for type date +SELECT EXTRACT(MINUTE FROM DATE '2020-08-11'); +ERROR: unit "minute" not supported for type date +SELECT EXTRACT(HOUR FROM DATE '2020-08-11'); +ERROR: unit "hour" not supported for type date +SELECT EXTRACT(DAY FROM DATE '2020-08-11'); + extract +--------- + 11 +(1 row) + +SELECT EXTRACT(MONTH FROM DATE '2020-08-11'); + extract +--------- + 8 +(1 row) + +SELECT EXTRACT(YEAR FROM DATE '2020-08-11'); + extract +--------- + 2020 +(1 row) + +SELECT EXTRACT(YEAR FROM DATE '2020-08-11 BC'); + extract +--------- + -2020 +(1 row) + +SELECT EXTRACT(DECADE FROM DATE '2020-08-11'); + extract +--------- + 202 +(1 row) + +SELECT EXTRACT(CENTURY FROM DATE '2020-08-11'); + extract +--------- + 21 +(1 row) + +SELECT EXTRACT(MILLENNIUM FROM DATE '2020-08-11'); + extract +--------- + 3 +(1 row) + +SELECT EXTRACT(ISOYEAR FROM DATE '2020-08-11'); + extract +--------- + 2020 +(1 row) + +SELECT EXTRACT(ISOYEAR FROM DATE '2020-08-11 BC'); + extract +--------- + -2020 +(1 row) + +SELECT EXTRACT(QUARTER FROM DATE '2020-08-11'); + extract +--------- + 3 +(1 row) + +SELECT EXTRACT(WEEK FROM DATE '2020-08-11'); + extract +--------- + 33 +(1 row) + +SELECT EXTRACT(DOW FROM DATE '2020-08-11'); + extract +--------- + 2 +(1 row) + +SELECT EXTRACT(DOW FROM DATE '2020-08-16'); + extract +--------- + 0 +(1 row) + +SELECT EXTRACT(ISODOW FROM DATE '2020-08-11'); + extract +--------- + 2 +(1 row) + +SELECT EXTRACT(ISODOW FROM DATE '2020-08-16'); + extract +--------- + 7 +(1 row) + +SELECT EXTRACT(DOY FROM DATE '2020-08-11'); + extract +--------- + 224 +(1 row) + +SELECT EXTRACT(TIMEZONE FROM DATE '2020-08-11'); +ERROR: unit "timezone" not supported for type date +SELECT EXTRACT(TIMEZONE_M FROM DATE '2020-08-11'); +ERROR: unit "timezone_m" not supported for type date +SELECT EXTRACT(TIMEZONE_H FROM DATE '2020-08-11'); +ERROR: unit "timezone_h" not supported for type date +SELECT EXTRACT(EPOCH FROM DATE '2020-08-11'); + extract +------------ + 1597104000 +(1 row) + +SELECT EXTRACT(JULIAN FROM DATE '2020-08-11'); + extract +--------- + 2459073 +(1 row) + +-- +-- test trunc function! +-- +SELECT DATE_TRUNC('MILLENNIUM', TIMESTAMP '1970-03-20 04:30:00.00000'); -- 1001 + date_trunc +-------------------------- + Thu Jan 01 00:00:00 1001 +(1 row) + +SELECT DATE_TRUNC('MILLENNIUM', DATE '1970-03-20'); -- 1001-01-01 + date_trunc +------------------------------ + Thu Jan 01 00:00:00 1001 PST +(1 row) + +SELECT DATE_TRUNC('CENTURY', TIMESTAMP '1970-03-20 04:30:00.00000'); -- 1901 + date_trunc +-------------------------- + Tue Jan 01 00:00:00 1901 +(1 row) + +SELECT DATE_TRUNC('CENTURY', DATE '1970-03-20'); -- 1901 + date_trunc +------------------------------ + Tue Jan 01 00:00:00 1901 PST +(1 row) + +SELECT DATE_TRUNC('CENTURY', DATE '2004-08-10'); -- 2001-01-01 + date_trunc +------------------------------ + Mon Jan 01 00:00:00 2001 PST +(1 row) + +SELECT DATE_TRUNC('CENTURY', DATE '0002-02-04'); -- 0001-01-01 + date_trunc +------------------------------ + Mon Jan 01 00:00:00 0001 PST +(1 row) + +SELECT DATE_TRUNC('CENTURY', DATE '0055-08-10 BC'); -- 0100-01-01 BC + date_trunc +--------------------------------- + Tue Jan 01 00:00:00 0100 PST BC +(1 row) + +SELECT DATE_TRUNC('DECADE', DATE '1993-12-25'); -- 1990-01-01 + date_trunc +------------------------------ + Mon Jan 01 00:00:00 1990 PST +(1 row) + +SELECT DATE_TRUNC('DECADE', DATE '0004-12-25'); -- 0001-01-01 BC + date_trunc +--------------------------------- + Sat Jan 01 00:00:00 0001 PST BC +(1 row) + +SELECT DATE_TRUNC('DECADE', DATE '0002-12-31 BC'); -- 0011-01-01 BC + date_trunc +--------------------------------- + Mon Jan 01 00:00:00 0011 PST BC +(1 row) + +-- +-- test infinity +-- +select 'infinity'::date, '-infinity'::date; + date | date +----------+----------- + infinity | -infinity +(1 row) + +select 'infinity'::date > 'today'::date as t; + t +--- + t +(1 row) + +select '-infinity'::date < 'today'::date as t; + t +--- + t +(1 row) + +select isfinite('infinity'::date), isfinite('-infinity'::date), isfinite('today'::date); + isfinite | isfinite | isfinite +----------+----------+---------- + f | f | t +(1 row) + +select 'infinity'::date = '+infinity'::date as t; + t +--- + t +(1 row) + +-- +-- oscillating fields from non-finite date: +-- +SELECT EXTRACT(DAY FROM DATE 'infinity'); -- NULL + extract +--------- + +(1 row) + +SELECT EXTRACT(DAY FROM DATE '-infinity'); -- NULL + extract +--------- + +(1 row) + +-- all supported fields +SELECT EXTRACT(DAY FROM DATE 'infinity'); -- NULL + extract +--------- + +(1 row) + +SELECT EXTRACT(MONTH FROM DATE 'infinity'); -- NULL + extract +--------- + +(1 row) + +SELECT EXTRACT(QUARTER FROM DATE 'infinity'); -- NULL + extract +--------- + +(1 row) + +SELECT EXTRACT(WEEK FROM DATE 'infinity'); -- NULL + extract +--------- + +(1 row) + +SELECT EXTRACT(DOW FROM DATE 'infinity'); -- NULL + extract +--------- + +(1 row) + +SELECT EXTRACT(ISODOW FROM DATE 'infinity'); -- NULL + extract +--------- + +(1 row) + +SELECT EXTRACT(DOY FROM DATE 'infinity'); -- NULL + extract +--------- + +(1 row) + +-- +-- monotonic fields from non-finite date: +-- +SELECT EXTRACT(EPOCH FROM DATE 'infinity'); -- Infinity + extract +---------- + Infinity +(1 row) + +SELECT EXTRACT(EPOCH FROM DATE '-infinity'); -- -Infinity + extract +----------- + -Infinity +(1 row) + +-- all supported fields +SELECT EXTRACT(YEAR FROM DATE 'infinity'); -- Infinity + extract +---------- + Infinity +(1 row) + +SELECT EXTRACT(DECADE FROM DATE 'infinity'); -- Infinity + extract +---------- + Infinity +(1 row) + +SELECT EXTRACT(CENTURY FROM DATE 'infinity'); -- Infinity + extract +---------- + Infinity +(1 row) + +SELECT EXTRACT(MILLENNIUM FROM DATE 'infinity'); -- Infinity + extract +---------- + Infinity +(1 row) + +SELECT EXTRACT(JULIAN FROM DATE 'infinity'); -- Infinity + extract +---------- + Infinity +(1 row) + +SELECT EXTRACT(ISOYEAR FROM DATE 'infinity'); -- Infinity + extract +---------- + Infinity +(1 row) + +SELECT EXTRACT(EPOCH FROM DATE 'infinity'); -- Infinity + extract +---------- + Infinity +(1 row) + +-- +-- wrong fields from non-finite date: +-- +SELECT EXTRACT(MICROSEC FROM DATE 'infinity'); -- error +ERROR: unit "microsec" not recognized for type date +-- test constructors +select make_date(2013, 7, 15); + make_date +------------ + 07-15-2013 +(1 row) + +select make_date(-44, 3, 15); + make_date +--------------- + 03-15-0044 BC +(1 row) + +select make_time(8, 20, 0.0); + make_time +----------- + 08:20:00 +(1 row) + +-- should fail +select make_date(0, 7, 15); +ERROR: date field value out of range: 0-07-15 +select make_date(2013, 2, 30); +ERROR: date field value out of range: 2013-02-30 +select make_date(2013, 13, 1); +ERROR: date field value out of range: 2013-13-01 +select make_date(2013, 11, -1); +ERROR: date field value out of range: 2013-11--1 +select make_time(10, 55, 100.1); +ERROR: time field value out of range: 10:55:100.1 +select make_time(24, 0, 2.1); +ERROR: time field value out of range: 24:00:2.1 diff --git a/src/test/regress/expected/dbsize.out b/src/test/regress/expected/dbsize.out new file mode 100644 index 0000000..f1121a8 --- /dev/null +++ b/src/test/regress/expected/dbsize.out @@ -0,0 +1,195 @@ +SELECT size, pg_size_pretty(size), pg_size_pretty(-1 * size) FROM + (VALUES (10::bigint), (1000::bigint), (1000000::bigint), + (1000000000::bigint), (1000000000000::bigint), + (1000000000000000::bigint)) x(size); + size | pg_size_pretty | pg_size_pretty +------------------+----------------+---------------- + 10 | 10 bytes | -10 bytes + 1000 | 1000 bytes | -1000 bytes + 1000000 | 977 kB | -977 kB + 1000000000 | 954 MB | -954 MB + 1000000000000 | 931 GB | -931 GB + 1000000000000000 | 909 TB | -909 TB +(6 rows) + +SELECT size, pg_size_pretty(size), pg_size_pretty(-1 * size) FROM + (VALUES (10::numeric), (1000::numeric), (1000000::numeric), + (1000000000::numeric), (1000000000000::numeric), + (1000000000000000::numeric), + (10.5::numeric), (1000.5::numeric), (1000000.5::numeric), + (1000000000.5::numeric), (1000000000000.5::numeric), + (1000000000000000.5::numeric)) x(size); + size | pg_size_pretty | pg_size_pretty +--------------------+----------------+---------------- + 10 | 10 bytes | -10 bytes + 1000 | 1000 bytes | -1000 bytes + 1000000 | 977 kB | -977 kB + 1000000000 | 954 MB | -954 MB + 1000000000000 | 931 GB | -931 GB + 1000000000000000 | 909 TB | -909 TB + 10.5 | 10.5 bytes | -10.5 bytes + 1000.5 | 1000.5 bytes | -1000.5 bytes + 1000000.5 | 977 kB | -977 kB + 1000000000.5 | 954 MB | -954 MB + 1000000000000.5 | 931 GB | -931 GB + 1000000000000000.5 | 909 TB | -909 TB +(12 rows) + +-- test where units change up +SELECT size, pg_size_pretty(size), pg_size_pretty(-1 * size) FROM + (VALUES (10239::bigint), (10240::bigint), + (10485247::bigint), (10485248::bigint), + (10736893951::bigint), (10736893952::bigint), + (10994579406847::bigint), (10994579406848::bigint), + (11258449312612351::bigint), (11258449312612352::bigint)) x(size); + size | pg_size_pretty | pg_size_pretty +-------------------+----------------+---------------- + 10239 | 10239 bytes | -10239 bytes + 10240 | 10 kB | -10 kB + 10485247 | 10239 kB | -10239 kB + 10485248 | 10 MB | -10 MB + 10736893951 | 10239 MB | -10239 MB + 10736893952 | 10 GB | -10 GB + 10994579406847 | 10239 GB | -10239 GB + 10994579406848 | 10 TB | -10 TB + 11258449312612351 | 10239 TB | -10239 TB + 11258449312612352 | 10 PB | -10 PB +(10 rows) + +SELECT size, pg_size_pretty(size), pg_size_pretty(-1 * size) FROM + (VALUES (10239::numeric), (10240::numeric), + (10485247::numeric), (10485248::numeric), + (10736893951::numeric), (10736893952::numeric), + (10994579406847::numeric), (10994579406848::numeric), + (11258449312612351::numeric), (11258449312612352::numeric), + (11528652096115048447::numeric), (11528652096115048448::numeric)) x(size); + size | pg_size_pretty | pg_size_pretty +----------------------+----------------+---------------- + 10239 | 10239 bytes | -10239 bytes + 10240 | 10 kB | -10 kB + 10485247 | 10239 kB | -10239 kB + 10485248 | 10 MB | -10 MB + 10736893951 | 10239 MB | -10239 MB + 10736893952 | 10 GB | -10 GB + 10994579406847 | 10239 GB | -10239 GB + 10994579406848 | 10 TB | -10 TB + 11258449312612351 | 10239 TB | -10239 TB + 11258449312612352 | 10 PB | -10 PB + 11528652096115048447 | 10239 PB | -10239 PB + 11528652096115048448 | 10240 PB | -10240 PB +(12 rows) + +-- pg_size_bytes() tests +SELECT size, pg_size_bytes(size) FROM + (VALUES ('1'), ('123bytes'), ('256 B'), ('1kB'), ('1MB'), (' 1 GB'), ('1.5 GB '), + ('1TB'), ('3000 TB'), ('1e6 MB'), ('99 PB')) x(size); + size | pg_size_bytes +----------+-------------------- + 1 | 1 + 123bytes | 123 + 256 B | 256 + 1kB | 1024 + 1MB | 1048576 + 1 GB | 1073741824 + 1.5 GB | 1610612736 + 1TB | 1099511627776 + 3000 TB | 3298534883328000 + 1e6 MB | 1048576000000 + 99 PB | 111464090777419776 +(11 rows) + +-- case-insensitive units are supported +SELECT size, pg_size_bytes(size) FROM + (VALUES ('1'), ('123bYteS'), ('1kb'), ('1mb'), (' 1 Gb'), ('1.5 gB '), + ('1tb'), ('3000 tb'), ('1e6 mb'), ('99 pb')) x(size); + size | pg_size_bytes +----------+-------------------- + 1 | 1 + 123bYteS | 123 + 1kb | 1024 + 1mb | 1048576 + 1 Gb | 1073741824 + 1.5 gB | 1610612736 + 1tb | 1099511627776 + 3000 tb | 3298534883328000 + 1e6 mb | 1048576000000 + 99 pb | 111464090777419776 +(10 rows) + +-- negative numbers are supported +SELECT size, pg_size_bytes(size) FROM + (VALUES ('-1'), ('-123bytes'), ('-1kb'), ('-1mb'), (' -1 Gb'), ('-1.5 gB '), + ('-1tb'), ('-3000 TB'), ('-10e-1 MB'), ('-99 PB')) x(size); + size | pg_size_bytes +-----------+--------------------- + -1 | -1 + -123bytes | -123 + -1kb | -1024 + -1mb | -1048576 + -1 Gb | -1073741824 + -1.5 gB | -1610612736 + -1tb | -1099511627776 + -3000 TB | -3298534883328000 + -10e-1 MB | -1048576 + -99 PB | -111464090777419776 +(10 rows) + +-- different cases with allowed points +SELECT size, pg_size_bytes(size) FROM + (VALUES ('-1.'), ('-1.kb'), ('-1. kb'), ('-0. gb'), + ('-.1'), ('-.1kb'), ('-.1 kb'), ('-.0 gb')) x(size); + size | pg_size_bytes +--------+--------------- + -1. | -1 + -1.kb | -1024 + -1. kb | -1024 + -0. gb | 0 + -.1 | 0 + -.1kb | -102 + -.1 kb | -102 + -.0 gb | 0 +(8 rows) + +-- invalid inputs +SELECT pg_size_bytes('1 AB'); +ERROR: invalid size: "1 AB" +DETAIL: Invalid size unit: "AB". +HINT: Valid units are "bytes", "B", "kB", "MB", "GB", "TB", and "PB". +SELECT pg_size_bytes('1 AB A'); +ERROR: invalid size: "1 AB A" +DETAIL: Invalid size unit: "AB A". +HINT: Valid units are "bytes", "B", "kB", "MB", "GB", "TB", and "PB". +SELECT pg_size_bytes('1 AB A '); +ERROR: invalid size: "1 AB A " +DETAIL: Invalid size unit: "AB A". +HINT: Valid units are "bytes", "B", "kB", "MB", "GB", "TB", and "PB". +SELECT pg_size_bytes('9223372036854775807.9'); +ERROR: bigint out of range +SELECT pg_size_bytes('1e100'); +ERROR: bigint out of range +SELECT pg_size_bytes('1e1000000000000000000'); +ERROR: value overflows numeric format +SELECT pg_size_bytes('1 byte'); -- the singular "byte" is not supported +ERROR: invalid size: "1 byte" +DETAIL: Invalid size unit: "byte". +HINT: Valid units are "bytes", "B", "kB", "MB", "GB", "TB", and "PB". +SELECT pg_size_bytes(''); +ERROR: invalid size: "" +SELECT pg_size_bytes('kb'); +ERROR: invalid size: "kb" +SELECT pg_size_bytes('..'); +ERROR: invalid size: ".." +SELECT pg_size_bytes('-.'); +ERROR: invalid size: "-." +SELECT pg_size_bytes('-.kb'); +ERROR: invalid size: "-.kb" +SELECT pg_size_bytes('-. kb'); +ERROR: invalid size: "-. kb" +SELECT pg_size_bytes('.+912'); +ERROR: invalid size: ".+912" +SELECT pg_size_bytes('+912+ kB'); +ERROR: invalid size: "+912+ kB" +DETAIL: Invalid size unit: "+ kB". +HINT: Valid units are "bytes", "B", "kB", "MB", "GB", "TB", and "PB". +SELECT pg_size_bytes('++123 kB'); +ERROR: invalid size: "++123 kB" diff --git a/src/test/regress/expected/delete.out b/src/test/regress/expected/delete.out new file mode 100644 index 0000000..e7eb328 --- /dev/null +++ b/src/test/regress/expected/delete.out @@ -0,0 +1,33 @@ +CREATE TABLE delete_test ( + id SERIAL PRIMARY KEY, + a INT, + b text +); +INSERT INTO delete_test (a) VALUES (10); +INSERT INTO delete_test (a, b) VALUES (50, repeat('x', 10000)); +INSERT INTO delete_test (a) VALUES (100); +-- allow an alias to be specified for DELETE's target table +DELETE FROM delete_test AS dt WHERE dt.a > 75; +-- if an alias is specified, don't allow the original table name +-- to be referenced +DELETE FROM delete_test dt WHERE delete_test.a > 25; +ERROR: invalid reference to FROM-clause entry for table "delete_test" +LINE 1: DELETE FROM delete_test dt WHERE delete_test.a > 25; + ^ +HINT: Perhaps you meant to reference the table alias "dt". +SELECT id, a, char_length(b) FROM delete_test; + id | a | char_length +----+----+------------- + 1 | 10 | + 2 | 50 | 10000 +(2 rows) + +-- delete a row with a TOASTed value +DELETE FROM delete_test WHERE a > 25; +SELECT id, a, char_length(b) FROM delete_test; + id | a | char_length +----+----+------------- + 1 | 10 | +(1 row) + +DROP TABLE delete_test; diff --git a/src/test/regress/expected/dependency.out b/src/test/regress/expected/dependency.out new file mode 100644 index 0000000..6d9498c --- /dev/null +++ b/src/test/regress/expected/dependency.out @@ -0,0 +1,154 @@ +-- +-- DEPENDENCIES +-- +CREATE USER regress_dep_user; +CREATE USER regress_dep_user2; +CREATE USER regress_dep_user3; +CREATE GROUP regress_dep_group; +CREATE TABLE deptest (f1 serial primary key, f2 text); +GRANT SELECT ON TABLE deptest TO GROUP regress_dep_group; +GRANT ALL ON TABLE deptest TO regress_dep_user, regress_dep_user2; +-- can't drop neither because they have privileges somewhere +DROP USER regress_dep_user; +ERROR: role "regress_dep_user" cannot be dropped because some objects depend on it +DETAIL: privileges for table deptest +DROP GROUP regress_dep_group; +ERROR: role "regress_dep_group" cannot be dropped because some objects depend on it +DETAIL: privileges for table deptest +-- if we revoke the privileges we can drop the group +REVOKE SELECT ON deptest FROM GROUP regress_dep_group; +DROP GROUP regress_dep_group; +-- can't drop the user if we revoke the privileges partially +REVOKE SELECT, INSERT, UPDATE, DELETE, TRUNCATE, REFERENCES ON deptest FROM regress_dep_user; +DROP USER regress_dep_user; +ERROR: role "regress_dep_user" cannot be dropped because some objects depend on it +DETAIL: privileges for table deptest +-- now we are OK to drop him +REVOKE TRIGGER ON deptest FROM regress_dep_user; +DROP USER regress_dep_user; +-- we are OK too if we drop the privileges all at once +REVOKE ALL ON deptest FROM regress_dep_user2; +DROP USER regress_dep_user2; +-- can't drop the owner of an object +-- the error message detail here would include a pg_toast_nnn name that +-- is not constant, so suppress it +\set VERBOSITY terse +ALTER TABLE deptest OWNER TO regress_dep_user3; +DROP USER regress_dep_user3; +ERROR: role "regress_dep_user3" cannot be dropped because some objects depend on it +\set VERBOSITY default +-- if we drop the object, we can drop the user too +DROP TABLE deptest; +DROP USER regress_dep_user3; +-- Test DROP OWNED +CREATE USER regress_dep_user0; +CREATE USER regress_dep_user1; +CREATE USER regress_dep_user2; +SET SESSION AUTHORIZATION regress_dep_user0; +-- permission denied +DROP OWNED BY regress_dep_user1; +ERROR: permission denied to drop objects +DETAIL: Only roles with privileges of role "regress_dep_user1" may drop objects owned by it. +DROP OWNED BY regress_dep_user0, regress_dep_user2; +ERROR: permission denied to drop objects +DETAIL: Only roles with privileges of role "regress_dep_user2" may drop objects owned by it. +REASSIGN OWNED BY regress_dep_user0 TO regress_dep_user1; +ERROR: permission denied to reassign objects +DETAIL: Only roles with privileges of role "regress_dep_user1" may reassign objects to it. +REASSIGN OWNED BY regress_dep_user1 TO regress_dep_user0; +ERROR: permission denied to reassign objects +DETAIL: Only roles with privileges of role "regress_dep_user1" may reassign objects owned by it. +-- this one is allowed +DROP OWNED BY regress_dep_user0; +CREATE TABLE deptest1 (f1 int unique); +GRANT ALL ON deptest1 TO regress_dep_user1 WITH GRANT OPTION; +SET SESSION AUTHORIZATION regress_dep_user1; +CREATE TABLE deptest (a serial primary key, b text); +GRANT ALL ON deptest1 TO regress_dep_user2; +RESET SESSION AUTHORIZATION; +\z deptest1 + Access privileges + Schema | Name | Type | Access privileges | Column privileges | Policies +--------+----------+-------+----------------------------------------------------+-------------------+---------- + public | deptest1 | table | regress_dep_user0=arwdDxt/regress_dep_user0 +| | + | | | regress_dep_user1=a*r*w*d*D*x*t*/regress_dep_user0+| | + | | | regress_dep_user2=arwdDxt/regress_dep_user1 | | +(1 row) + +DROP OWNED BY regress_dep_user1; +-- all grants revoked +\z deptest1 + Access privileges + Schema | Name | Type | Access privileges | Column privileges | Policies +--------+----------+-------+---------------------------------------------+-------------------+---------- + public | deptest1 | table | regress_dep_user0=arwdDxt/regress_dep_user0 | | +(1 row) + +-- table was dropped +\d deptest +-- Test REASSIGN OWNED +GRANT ALL ON deptest1 TO regress_dep_user1; +GRANT CREATE ON DATABASE regression TO regress_dep_user1; +SET SESSION AUTHORIZATION regress_dep_user1; +CREATE SCHEMA deptest; +CREATE TABLE deptest (a serial primary key, b text); +ALTER DEFAULT PRIVILEGES FOR ROLE regress_dep_user1 IN SCHEMA deptest + GRANT ALL ON TABLES TO regress_dep_user2; +CREATE FUNCTION deptest_func() RETURNS void LANGUAGE plpgsql + AS $$ BEGIN END; $$; +CREATE TYPE deptest_enum AS ENUM ('red'); +CREATE TYPE deptest_range AS RANGE (SUBTYPE = int4); +CREATE TABLE deptest2 (f1 int); +-- make a serial column the hard way +CREATE SEQUENCE ss1; +ALTER TABLE deptest2 ALTER f1 SET DEFAULT nextval('ss1'); +ALTER SEQUENCE ss1 OWNED BY deptest2.f1; +-- When reassigning ownership of a composite type, its pg_class entry +-- should match +CREATE TYPE deptest_t AS (a int); +SELECT typowner = relowner +FROM pg_type JOIN pg_class c ON typrelid = c.oid WHERE typname = 'deptest_t'; + ?column? +---------- + t +(1 row) + +RESET SESSION AUTHORIZATION; +REASSIGN OWNED BY regress_dep_user1 TO regress_dep_user2; +\dt deptest + List of relations + Schema | Name | Type | Owner +--------+---------+-------+------------------- + public | deptest | table | regress_dep_user2 +(1 row) + +SELECT typowner = relowner +FROM pg_type JOIN pg_class c ON typrelid = c.oid WHERE typname = 'deptest_t'; + ?column? +---------- + t +(1 row) + +-- doesn't work: grant still exists +DROP USER regress_dep_user1; +ERROR: role "regress_dep_user1" cannot be dropped because some objects depend on it +DETAIL: privileges for database regression +privileges for table deptest1 +owner of default privileges on new relations belonging to role regress_dep_user1 in schema deptest +DROP OWNED BY regress_dep_user1; +DROP USER regress_dep_user1; +DROP USER regress_dep_user2; +ERROR: role "regress_dep_user2" cannot be dropped because some objects depend on it +DETAIL: owner of schema deptest +owner of sequence deptest_a_seq +owner of table deptest +owner of function deptest_func() +owner of type deptest_enum +owner of type deptest_multirange +owner of type deptest_range +owner of table deptest2 +owner of sequence ss1 +owner of type deptest_t +DROP OWNED BY regress_dep_user2, regress_dep_user0; +DROP USER regress_dep_user2; +DROP USER regress_dep_user0; diff --git a/src/test/regress/expected/domain.out b/src/test/regress/expected/domain.out new file mode 100644 index 0000000..6d94e84 --- /dev/null +++ b/src/test/regress/expected/domain.out @@ -0,0 +1,1209 @@ +-- +-- Test domains. +-- +-- Test Comment / Drop +create domain domaindroptest int4; +comment on domain domaindroptest is 'About to drop this..'; +create domain dependenttypetest domaindroptest; +-- fail because of dependent type +drop domain domaindroptest; +ERROR: cannot drop type domaindroptest because other objects depend on it +DETAIL: type dependenttypetest depends on type domaindroptest +HINT: Use DROP ... CASCADE to drop the dependent objects too. +drop domain domaindroptest cascade; +NOTICE: drop cascades to type dependenttypetest +-- this should fail because already gone +drop domain domaindroptest cascade; +ERROR: type "domaindroptest" does not exist +-- Test domain input. +-- Note: the point of checking both INSERT and COPY FROM is that INSERT +-- exercises CoerceToDomain while COPY exercises domain_in. +create domain domainvarchar varchar(5); +create domain domainnumeric numeric(8,2); +create domain domainint4 int4; +create domain domaintext text; +-- Test explicit coercions --- these should succeed (and truncate) +SELECT cast('123456' as domainvarchar); + domainvarchar +--------------- + 12345 +(1 row) + +SELECT cast('12345' as domainvarchar); + domainvarchar +--------------- + 12345 +(1 row) + +-- Test tables using domains +create table basictest + ( testint4 domainint4 + , testtext domaintext + , testvarchar domainvarchar + , testnumeric domainnumeric + ); +INSERT INTO basictest values ('88', 'haha', 'short', '123.12'); -- Good +INSERT INTO basictest values ('88', 'haha', 'short text', '123.12'); -- Bad varchar +ERROR: value too long for type character varying(5) +INSERT INTO basictest values ('88', 'haha', 'short', '123.1212'); -- Truncate numeric +-- Test copy +COPY basictest (testvarchar) FROM stdin; -- fail +ERROR: value too long for type character varying(5) +CONTEXT: COPY basictest, line 1, column testvarchar: "notsoshorttext" +COPY basictest (testvarchar) FROM stdin; +select * from basictest; + testint4 | testtext | testvarchar | testnumeric +----------+----------+-------------+------------- + 88 | haha | short | 123.12 + 88 | haha | short | 123.12 + | | short | +(3 rows) + +-- check that domains inherit operations from base types +select testtext || testvarchar as concat, testnumeric + 42 as sum +from basictest; + concat | sum +-----------+-------- + hahashort | 165.12 + hahashort | 165.12 + | +(3 rows) + +-- check that union/case/coalesce type resolution handles domains properly +select pg_typeof(coalesce(4::domainint4, 7)); + pg_typeof +----------- + integer +(1 row) + +select pg_typeof(coalesce(4::domainint4, 7::domainint4)); + pg_typeof +------------ + domainint4 +(1 row) + +drop table basictest; +drop domain domainvarchar restrict; +drop domain domainnumeric restrict; +drop domain domainint4 restrict; +drop domain domaintext; +-- Test non-error-throwing input +create domain positiveint int4 check(value > 0); +create domain weirdfloat float8 check((1 / value) < 10); +select pg_input_is_valid('1', 'positiveint'); + pg_input_is_valid +------------------- + t +(1 row) + +select pg_input_is_valid('junk', 'positiveint'); + pg_input_is_valid +------------------- + f +(1 row) + +select pg_input_is_valid('-1', 'positiveint'); + pg_input_is_valid +------------------- + f +(1 row) + +select * from pg_input_error_info('junk', 'positiveint'); + message | detail | hint | sql_error_code +-----------------------------------------------+--------+------+---------------- + invalid input syntax for type integer: "junk" | | | 22P02 +(1 row) + +select * from pg_input_error_info('-1', 'positiveint'); + message | detail | hint | sql_error_code +----------------------------------------------------------------------------+--------+------+---------------- + value for domain positiveint violates check constraint "positiveint_check" | | | 23514 +(1 row) + +select * from pg_input_error_info('junk', 'weirdfloat'); + message | detail | hint | sql_error_code +--------------------------------------------------------+--------+------+---------------- + invalid input syntax for type double precision: "junk" | | | 22P02 +(1 row) + +select * from pg_input_error_info('0.01', 'weirdfloat'); + message | detail | hint | sql_error_code +--------------------------------------------------------------------------+--------+------+---------------- + value for domain weirdfloat violates check constraint "weirdfloat_check" | | | 23514 +(1 row) + +-- We currently can't trap errors raised in the CHECK expression itself +select * from pg_input_error_info('0', 'weirdfloat'); +ERROR: division by zero +drop domain positiveint; +drop domain weirdfloat; +-- Test domains over array types +create domain domainint4arr int4[1]; +create domain domainchar4arr varchar(4)[2][3]; +create table domarrtest + ( testint4arr domainint4arr + , testchar4arr domainchar4arr + ); +INSERT INTO domarrtest values ('{2,2}', '{{"a","b"},{"c","d"}}'); +INSERT INTO domarrtest values ('{{2,2},{2,2}}', '{{"a","b"}}'); +INSERT INTO domarrtest values ('{2,2}', '{{"a","b"},{"c","d"},{"e","f"}}'); +INSERT INTO domarrtest values ('{2,2}', '{{"a"},{"c"}}'); +INSERT INTO domarrtest values (NULL, '{{"a","b","c"},{"d","e","f"}}'); +INSERT INTO domarrtest values (NULL, '{{"toolong","b","c"},{"d","e","f"}}'); +ERROR: value too long for type character varying(4) +INSERT INTO domarrtest (testint4arr[1], testint4arr[3]) values (11,22); +select * from domarrtest; + testint4arr | testchar4arr +---------------+--------------------- + {2,2} | {{a,b},{c,d}} + {{2,2},{2,2}} | {{a,b}} + {2,2} | {{a,b},{c,d},{e,f}} + {2,2} | {{a},{c}} + | {{a,b,c},{d,e,f}} + {11,NULL,22} | +(6 rows) + +select testint4arr[1], testchar4arr[2:2] from domarrtest; + testint4arr | testchar4arr +-------------+-------------- + 2 | {{c,d}} + | {} + 2 | {{c,d}} + 2 | {{c}} + | {{d,e,f}} + 11 | +(6 rows) + +select array_dims(testint4arr), array_dims(testchar4arr) from domarrtest; + array_dims | array_dims +------------+------------ + [1:2] | [1:2][1:2] + [1:2][1:2] | [1:1][1:2] + [1:2] | [1:3][1:2] + [1:2] | [1:2][1:1] + | [1:2][1:3] + [1:3] | +(6 rows) + +COPY domarrtest FROM stdin; +COPY domarrtest FROM stdin; -- fail +ERROR: value too long for type character varying(4) +CONTEXT: COPY domarrtest, line 1, column testchar4arr: "{qwerty,w,e}" +select * from domarrtest; + testint4arr | testchar4arr +---------------+--------------------- + {2,2} | {{a,b},{c,d}} + {{2,2},{2,2}} | {{a,b}} + {2,2} | {{a,b},{c,d},{e,f}} + {2,2} | {{a},{c}} + | {{a,b,c},{d,e,f}} + {11,NULL,22} | + {3,4} | {q,w,e} + | +(8 rows) + +update domarrtest set + testint4arr[1] = testint4arr[1] + 1, + testint4arr[3] = testint4arr[3] - 1 +where testchar4arr is null; +select * from domarrtest where testchar4arr is null; + testint4arr | testchar4arr +------------------+-------------- + {12,NULL,21} | + {NULL,NULL,NULL} | +(2 rows) + +drop table domarrtest; +drop domain domainint4arr restrict; +drop domain domainchar4arr restrict; +create domain dia as int[]; +select '{1,2,3}'::dia; + dia +--------- + {1,2,3} +(1 row) + +select array_dims('{1,2,3}'::dia); + array_dims +------------ + [1:3] +(1 row) + +select pg_typeof('{1,2,3}'::dia); + pg_typeof +----------- + dia +(1 row) + +select pg_typeof('{1,2,3}'::dia || 42); -- should be int[] not dia + pg_typeof +----------- + integer[] +(1 row) + +drop domain dia; +-- Test domains over composites +create type comptype as (r float8, i float8); +create domain dcomptype as comptype; +create table dcomptable (d1 dcomptype unique); +insert into dcomptable values (row(1,2)::dcomptype); +insert into dcomptable values (row(3,4)::comptype); +insert into dcomptable values (row(1,2)::dcomptype); -- fail on uniqueness +ERROR: duplicate key value violates unique constraint "dcomptable_d1_key" +DETAIL: Key (d1)=((1,2)) already exists. +insert into dcomptable (d1.r) values(11); +select * from dcomptable; + d1 +------- + (1,2) + (3,4) + (11,) +(3 rows) + +select (d1).r, (d1).i, (d1).* from dcomptable; + r | i | r | i +----+---+----+--- + 1 | 2 | 1 | 2 + 3 | 4 | 3 | 4 + 11 | | 11 | +(3 rows) + +update dcomptable set d1.r = (d1).r + 1 where (d1).i > 0; +select * from dcomptable; + d1 +------- + (11,) + (2,2) + (4,4) +(3 rows) + +alter domain dcomptype add constraint c1 check ((value).r <= (value).i); +alter domain dcomptype add constraint c2 check ((value).r > (value).i); -- fail +ERROR: column "d1" of table "dcomptable" contains values that violate the new constraint +select row(2,1)::dcomptype; -- fail +ERROR: value for domain dcomptype violates check constraint "c1" +insert into dcomptable values (row(1,2)::comptype); +insert into dcomptable values (row(2,1)::comptype); -- fail +ERROR: value for domain dcomptype violates check constraint "c1" +insert into dcomptable (d1.r) values(99); +insert into dcomptable (d1.r, d1.i) values(99, 100); +insert into dcomptable (d1.r, d1.i) values(100, 99); -- fail +ERROR: value for domain dcomptype violates check constraint "c1" +update dcomptable set d1.r = (d1).r + 1 where (d1).i > 0; -- fail +ERROR: value for domain dcomptype violates check constraint "c1" +update dcomptable set d1.r = (d1).r - 1, d1.i = (d1).i + 1 where (d1).i > 0; +select * from dcomptable; + d1 +---------- + (11,) + (99,) + (1,3) + (3,5) + (0,3) + (98,101) +(6 rows) + +explain (verbose, costs off) + update dcomptable set d1.r = (d1).r - 1, d1.i = (d1).i + 1 where (d1).i > 0; + QUERY PLAN +----------------------------------------------------------------------------------------------- + Update on public.dcomptable + -> Seq Scan on public.dcomptable + Output: ROW(((d1).r - '1'::double precision), ((d1).i + '1'::double precision)), ctid + Filter: ((dcomptable.d1).i > '0'::double precision) +(4 rows) + +create rule silly as on delete to dcomptable do instead + update dcomptable set d1.r = (d1).r - 1, d1.i = (d1).i + 1 where (d1).i > 0; +\d+ dcomptable + Table "public.dcomptable" + Column | Type | Collation | Nullable | Default | Storage | Stats target | Description +--------+-----------+-----------+----------+---------+----------+--------------+------------- + d1 | dcomptype | | | | extended | | +Indexes: + "dcomptable_d1_key" UNIQUE CONSTRAINT, btree (d1) +Rules: + silly AS + ON DELETE TO dcomptable DO INSTEAD UPDATE dcomptable SET d1.r = (dcomptable.d1).r - 1::double precision, d1.i = (dcomptable.d1).i + 1::double precision + WHERE (dcomptable.d1).i > 0::double precision + +create function makedcomp(r float8, i float8) returns dcomptype +as 'select row(r, i)' language sql; +select makedcomp(1,2); + makedcomp +----------- + (1,2) +(1 row) + +select makedcomp(2,1); -- fail +ERROR: value for domain dcomptype violates check constraint "c1" +select * from makedcomp(1,2) m; + r | i +---+--- + 1 | 2 +(1 row) + +select m, m is not null from makedcomp(1,2) m; + m | ?column? +-------+---------- + (1,2) | t +(1 row) + +drop function makedcomp(float8, float8); +drop table dcomptable; +drop type comptype cascade; +NOTICE: drop cascades to type dcomptype +-- check altering and dropping columns used by domain constraints +create type comptype as (r float8, i float8); +create domain dcomptype as comptype; +alter domain dcomptype add constraint c1 check ((value).r > 0); +comment on constraint c1 on domain dcomptype is 'random commentary'; +select row(0,1)::dcomptype; -- fail +ERROR: value for domain dcomptype violates check constraint "c1" +alter type comptype alter attribute r type varchar; -- fail +ERROR: operator does not exist: character varying > double precision +HINT: No operator matches the given name and argument types. You might need to add explicit type casts. +alter type comptype alter attribute r type bigint; +alter type comptype drop attribute r; -- fail +ERROR: cannot drop column r of composite type comptype because other objects depend on it +DETAIL: constraint c1 depends on column r of composite type comptype +HINT: Use DROP ... CASCADE to drop the dependent objects too. +alter type comptype drop attribute i; +select conname, obj_description(oid, 'pg_constraint') from pg_constraint + where contypid = 'dcomptype'::regtype; -- check comment is still there + conname | obj_description +---------+------------------- + c1 | random commentary +(1 row) + +drop type comptype cascade; +NOTICE: drop cascades to type dcomptype +-- Test domains over arrays of composite +create type comptype as (r float8, i float8); +create domain dcomptypea as comptype[]; +create table dcomptable (d1 dcomptypea unique); +insert into dcomptable values (array[row(1,2)]::dcomptypea); +insert into dcomptable values (array[row(3,4), row(5,6)]::comptype[]); +insert into dcomptable values (array[row(7,8)::comptype, row(9,10)::comptype]); +insert into dcomptable values (array[row(1,2)]::dcomptypea); -- fail on uniqueness +ERROR: duplicate key value violates unique constraint "dcomptable_d1_key" +DETAIL: Key (d1)=({"(1,2)"}) already exists. +insert into dcomptable (d1[1]) values(row(9,10)); +insert into dcomptable (d1[1].r) values(11); +select * from dcomptable; + d1 +-------------------- + {"(1,2)"} + {"(3,4)","(5,6)"} + {"(7,8)","(9,10)"} + {"(9,10)"} + {"(11,)"} +(5 rows) + +select d1[2], d1[1].r, d1[1].i from dcomptable; + d1 | r | i +--------+----+---- + | 1 | 2 + (5,6) | 3 | 4 + (9,10) | 7 | 8 + | 9 | 10 + | 11 | +(5 rows) + +update dcomptable set d1[2] = row(d1[2].i, d1[2].r); +select * from dcomptable; + d1 +-------------------- + {"(1,2)","(,)"} + {"(3,4)","(6,5)"} + {"(7,8)","(10,9)"} + {"(9,10)","(,)"} + {"(11,)","(,)"} +(5 rows) + +update dcomptable set d1[1].r = d1[1].r + 1 where d1[1].i > 0; +select * from dcomptable; + d1 +-------------------- + {"(11,)","(,)"} + {"(2,2)","(,)"} + {"(4,4)","(6,5)"} + {"(8,8)","(10,9)"} + {"(10,10)","(,)"} +(5 rows) + +alter domain dcomptypea add constraint c1 check (value[1].r <= value[1].i); +alter domain dcomptypea add constraint c2 check (value[1].r > value[1].i); -- fail +ERROR: column "d1" of table "dcomptable" contains values that violate the new constraint +select array[row(2,1)]::dcomptypea; -- fail +ERROR: value for domain dcomptypea violates check constraint "c1" +insert into dcomptable values (array[row(1,2)]::comptype[]); +insert into dcomptable values (array[row(2,1)]::comptype[]); -- fail +ERROR: value for domain dcomptypea violates check constraint "c1" +insert into dcomptable (d1[1].r) values(99); +insert into dcomptable (d1[1].r, d1[1].i) values(99, 100); +insert into dcomptable (d1[1].r, d1[1].i) values(100, 99); -- fail +ERROR: value for domain dcomptypea violates check constraint "c1" +update dcomptable set d1[1].r = d1[1].r + 1 where d1[1].i > 0; -- fail +ERROR: value for domain dcomptypea violates check constraint "c1" +update dcomptable set d1[1].r = d1[1].r - 1, d1[1].i = d1[1].i + 1 + where d1[1].i > 0; +select * from dcomptable; + d1 +-------------------- + {"(11,)","(,)"} + {"(99,)"} + {"(1,3)","(,)"} + {"(3,5)","(6,5)"} + {"(7,9)","(10,9)"} + {"(9,11)","(,)"} + {"(0,3)"} + {"(98,101)"} +(8 rows) + +explain (verbose, costs off) + update dcomptable set d1[1].r = d1[1].r - 1, d1[1].i = d1[1].i + 1 + where d1[1].i > 0; + QUERY PLAN +---------------------------------------------------------------------------------------------------------------- + Update on public.dcomptable + -> Seq Scan on public.dcomptable + Output: (d1[1].r := (d1[1].r - '1'::double precision))[1].i := (d1[1].i + '1'::double precision), ctid + Filter: (dcomptable.d1[1].i > '0'::double precision) +(4 rows) + +create rule silly as on delete to dcomptable do instead + update dcomptable set d1[1].r = d1[1].r - 1, d1[1].i = d1[1].i + 1 + where d1[1].i > 0; +\d+ dcomptable + Table "public.dcomptable" + Column | Type | Collation | Nullable | Default | Storage | Stats target | Description +--------+------------+-----------+----------+---------+----------+--------------+------------- + d1 | dcomptypea | | | | extended | | +Indexes: + "dcomptable_d1_key" UNIQUE CONSTRAINT, btree (d1) +Rules: + silly AS + ON DELETE TO dcomptable DO INSTEAD UPDATE dcomptable SET d1[1].r = dcomptable.d1[1].r - 1::double precision, d1[1].i = dcomptable.d1[1].i + 1::double precision + WHERE dcomptable.d1[1].i > 0::double precision + +drop table dcomptable; +drop type comptype cascade; +NOTICE: drop cascades to type dcomptypea +-- Test arrays over domains +create domain posint as int check (value > 0); +create table pitable (f1 posint[]); +insert into pitable values(array[42]); +insert into pitable values(array[-1]); -- fail +ERROR: value for domain posint violates check constraint "posint_check" +insert into pitable values('{0}'); -- fail +ERROR: value for domain posint violates check constraint "posint_check" +LINE 1: insert into pitable values('{0}'); + ^ +update pitable set f1[1] = f1[1] + 1; +update pitable set f1[1] = 0; -- fail +ERROR: value for domain posint violates check constraint "posint_check" +select * from pitable; + f1 +------ + {43} +(1 row) + +drop table pitable; +create domain vc4 as varchar(4); +create table vc4table (f1 vc4[]); +insert into vc4table values(array['too long']); -- fail +ERROR: value too long for type character varying(4) +insert into vc4table values(array['too long']::vc4[]); -- cast truncates +select * from vc4table; + f1 +---------- + {"too "} +(1 row) + +drop table vc4table; +drop type vc4; +-- You can sort of fake arrays-of-arrays by putting a domain in between +create domain dposinta as posint[]; +create table dposintatable (f1 dposinta[]); +insert into dposintatable values(array[array[42]]); -- fail +ERROR: column "f1" is of type dposinta[] but expression is of type integer[] +LINE 1: insert into dposintatable values(array[array[42]]); + ^ +HINT: You will need to rewrite or cast the expression. +insert into dposintatable values(array[array[42]::posint[]]); -- still fail +ERROR: column "f1" is of type dposinta[] but expression is of type posint[] +LINE 1: insert into dposintatable values(array[array[42]::posint[]])... + ^ +HINT: You will need to rewrite or cast the expression. +insert into dposintatable values(array[array[42]::dposinta]); -- but this works +select f1, f1[1], (f1[1])[1] from dposintatable; + f1 | f1 | f1 +----------+------+---- + {"{42}"} | {42} | 42 +(1 row) + +select pg_typeof(f1) from dposintatable; + pg_typeof +------------ + dposinta[] +(1 row) + +select pg_typeof(f1[1]) from dposintatable; + pg_typeof +----------- + dposinta +(1 row) + +select pg_typeof(f1[1][1]) from dposintatable; + pg_typeof +----------- + dposinta +(1 row) + +select pg_typeof((f1[1])[1]) from dposintatable; + pg_typeof +----------- + posint +(1 row) + +update dposintatable set f1[2] = array[99]; +select f1, f1[1], (f1[2])[1] from dposintatable; + f1 | f1 | f1 +-----------------+------+---- + {"{42}","{99}"} | {42} | 99 +(1 row) + +-- it'd be nice if you could do something like this, but for now you can't: +update dposintatable set f1[2][1] = array[97]; +ERROR: wrong number of array subscripts +-- maybe someday we can make this syntax work: +update dposintatable set (f1[2])[1] = array[98]; +ERROR: syntax error at or near "[" +LINE 1: update dposintatable set (f1[2])[1] = array[98]; + ^ +drop table dposintatable; +drop domain posint cascade; +NOTICE: drop cascades to type dposinta +-- Test arrays over domains of composite +create type comptype as (cf1 int, cf2 int); +create domain dcomptype as comptype check ((value).cf1 > 0); +create table dcomptable (f1 dcomptype[]); +insert into dcomptable values (null); +update dcomptable set f1[1].cf2 = 5; +table dcomptable; + f1 +---------- + {"(,5)"} +(1 row) + +update dcomptable set f1[1].cf1 = -1; -- fail +ERROR: value for domain dcomptype violates check constraint "dcomptype_check" +update dcomptable set f1[1].cf1 = 1; +table dcomptable; + f1 +----------- + {"(1,5)"} +(1 row) + +-- if there's no constraints, a different code path is taken: +alter domain dcomptype drop constraint dcomptype_check; +update dcomptable set f1[1].cf1 = -1; -- now ok +table dcomptable; + f1 +------------ + {"(-1,5)"} +(1 row) + +drop table dcomptable; +drop type comptype cascade; +NOTICE: drop cascades to type dcomptype +-- Test not-null restrictions +create domain dnotnull varchar(15) NOT NULL; +create domain dnull varchar(15); +create domain dcheck varchar(15) NOT NULL CHECK (VALUE = 'a' OR VALUE = 'c' OR VALUE = 'd'); +create table nulltest + ( col1 dnotnull + , col2 dnotnull NULL -- NOT NULL in the domain cannot be overridden + , col3 dnull NOT NULL + , col4 dnull + , col5 dcheck CHECK (col5 IN ('c', 'd')) + ); +INSERT INTO nulltest DEFAULT VALUES; +ERROR: domain dnotnull does not allow null values +INSERT INTO nulltest values ('a', 'b', 'c', 'd', 'c'); -- Good +insert into nulltest values ('a', 'b', 'c', 'd', NULL); +ERROR: domain dcheck does not allow null values +insert into nulltest values ('a', 'b', 'c', 'd', 'a'); +ERROR: new row for relation "nulltest" violates check constraint "nulltest_col5_check" +DETAIL: Failing row contains (a, b, c, d, a). +INSERT INTO nulltest values (NULL, 'b', 'c', 'd', 'd'); +ERROR: domain dnotnull does not allow null values +INSERT INTO nulltest values ('a', NULL, 'c', 'd', 'c'); +ERROR: domain dnotnull does not allow null values +INSERT INTO nulltest values ('a', 'b', NULL, 'd', 'c'); +ERROR: null value in column "col3" of relation "nulltest" violates not-null constraint +DETAIL: Failing row contains (a, b, null, d, c). +INSERT INTO nulltest values ('a', 'b', 'c', NULL, 'd'); -- Good +-- Test copy +COPY nulltest FROM stdin; --fail +ERROR: null value in column "col3" of relation "nulltest" violates not-null constraint +DETAIL: Failing row contains (a, b, null, d, d). +CONTEXT: COPY nulltest, line 1: "a b \N d d" +COPY nulltest FROM stdin; --fail +ERROR: domain dcheck does not allow null values +CONTEXT: COPY nulltest, line 1, column col5: null input +-- Last row is bad +COPY nulltest FROM stdin; +ERROR: new row for relation "nulltest" violates check constraint "nulltest_col5_check" +DETAIL: Failing row contains (a, b, c, null, a). +CONTEXT: COPY nulltest, line 3: "a b c \N a" +select * from nulltest; + col1 | col2 | col3 | col4 | col5 +------+------+------+------+------ + a | b | c | d | c + a | b | c | | d +(2 rows) + +-- Test out coerced (casted) constraints +SELECT cast('1' as dnotnull); + dnotnull +---------- + 1 +(1 row) + +SELECT cast(NULL as dnotnull); -- fail +ERROR: domain dnotnull does not allow null values +SELECT cast(cast(NULL as dnull) as dnotnull); -- fail +ERROR: domain dnotnull does not allow null values +SELECT cast(col4 as dnotnull) from nulltest; -- fail +ERROR: domain dnotnull does not allow null values +-- cleanup +drop table nulltest; +drop domain dnotnull restrict; +drop domain dnull restrict; +drop domain dcheck restrict; +create domain ddef1 int4 DEFAULT 3; +create domain ddef2 oid DEFAULT '12'; +-- Type mixing, function returns int8 +create domain ddef3 text DEFAULT 5; +create sequence ddef4_seq; +create domain ddef4 int4 DEFAULT nextval('ddef4_seq'); +create domain ddef5 numeric(8,2) NOT NULL DEFAULT '12.12'; +create table defaulttest + ( col1 ddef1 + , col2 ddef2 + , col3 ddef3 + , col4 ddef4 PRIMARY KEY + , col5 ddef1 NOT NULL DEFAULT NULL + , col6 ddef2 DEFAULT '88' + , col7 ddef4 DEFAULT 8000 + , col8 ddef5 + ); +insert into defaulttest(col4) values(0); -- fails, col5 defaults to null +ERROR: null value in column "col5" of relation "defaulttest" violates not-null constraint +DETAIL: Failing row contains (3, 12, 5, 0, null, 88, 8000, 12.12). +alter table defaulttest alter column col5 drop default; +insert into defaulttest default values; -- succeeds, inserts domain default +-- We used to treat SET DEFAULT NULL as equivalent to DROP DEFAULT; wrong +alter table defaulttest alter column col5 set default null; +insert into defaulttest(col4) values(0); -- fails +ERROR: null value in column "col5" of relation "defaulttest" violates not-null constraint +DETAIL: Failing row contains (3, 12, 5, 0, null, 88, 8000, 12.12). +alter table defaulttest alter column col5 drop default; +insert into defaulttest default values; +insert into defaulttest default values; +-- Test defaults with copy +COPY defaulttest(col5) FROM stdin; +select * from defaulttest; + col1 | col2 | col3 | col4 | col5 | col6 | col7 | col8 +------+------+------+------+------+------+------+------- + 3 | 12 | 5 | 1 | 3 | 88 | 8000 | 12.12 + 3 | 12 | 5 | 2 | 3 | 88 | 8000 | 12.12 + 3 | 12 | 5 | 3 | 3 | 88 | 8000 | 12.12 + 3 | 12 | 5 | 4 | 42 | 88 | 8000 | 12.12 +(4 rows) + +drop table defaulttest cascade; +-- Test ALTER DOMAIN .. NOT NULL +create domain dnotnulltest integer; +create table domnotnull +( col1 dnotnulltest +, col2 dnotnulltest +); +insert into domnotnull default values; +alter domain dnotnulltest set not null; -- fails +ERROR: column "col1" of table "domnotnull" contains null values +update domnotnull set col1 = 5; +alter domain dnotnulltest set not null; -- fails +ERROR: column "col2" of table "domnotnull" contains null values +update domnotnull set col2 = 6; +alter domain dnotnulltest set not null; +update domnotnull set col1 = null; -- fails +ERROR: domain dnotnulltest does not allow null values +alter domain dnotnulltest drop not null; +update domnotnull set col1 = null; +drop domain dnotnulltest cascade; +NOTICE: drop cascades to 2 other objects +DETAIL: drop cascades to column col2 of table domnotnull +drop cascades to column col1 of table domnotnull +-- Test ALTER DOMAIN .. DEFAULT .. +create table domdeftest (col1 ddef1); +insert into domdeftest default values; +select * from domdeftest; + col1 +------ + 3 +(1 row) + +alter domain ddef1 set default '42'; +insert into domdeftest default values; +select * from domdeftest; + col1 +------ + 3 + 42 +(2 rows) + +alter domain ddef1 drop default; +insert into domdeftest default values; +select * from domdeftest; + col1 +------ + 3 + 42 + +(3 rows) + +drop table domdeftest; +-- Test ALTER DOMAIN .. CONSTRAINT .. +create domain con as integer; +create table domcontest (col1 con); +insert into domcontest values (1); +insert into domcontest values (2); +alter domain con add constraint t check (VALUE < 1); -- fails +ERROR: column "col1" of table "domcontest" contains values that violate the new constraint +alter domain con add constraint t check (VALUE < 34); +alter domain con add check (VALUE > 0); +insert into domcontest values (-5); -- fails +ERROR: value for domain con violates check constraint "con_check" +insert into domcontest values (42); -- fails +ERROR: value for domain con violates check constraint "t" +insert into domcontest values (5); +alter domain con drop constraint t; +insert into domcontest values (-5); --fails +ERROR: value for domain con violates check constraint "con_check" +insert into domcontest values (42); +alter domain con drop constraint nonexistent; +ERROR: constraint "nonexistent" of domain "con" does not exist +alter domain con drop constraint if exists nonexistent; +NOTICE: constraint "nonexistent" of domain "con" does not exist, skipping +-- Test ALTER DOMAIN .. CONSTRAINT .. NOT VALID +create domain things AS INT; +CREATE TABLE thethings (stuff things); +INSERT INTO thethings (stuff) VALUES (55); +ALTER DOMAIN things ADD CONSTRAINT meow CHECK (VALUE < 11); +ERROR: column "stuff" of table "thethings" contains values that violate the new constraint +ALTER DOMAIN things ADD CONSTRAINT meow CHECK (VALUE < 11) NOT VALID; +ALTER DOMAIN things VALIDATE CONSTRAINT meow; +ERROR: column "stuff" of table "thethings" contains values that violate the new constraint +UPDATE thethings SET stuff = 10; +ALTER DOMAIN things VALIDATE CONSTRAINT meow; +-- Confirm ALTER DOMAIN with RULES. +create table domtab (col1 integer); +create domain dom as integer; +create view domview as select cast(col1 as dom) from domtab; +insert into domtab (col1) values (null); +insert into domtab (col1) values (5); +select * from domview; + col1 +------ + + 5 +(2 rows) + +alter domain dom set not null; +select * from domview; -- fail +ERROR: domain dom does not allow null values +alter domain dom drop not null; +select * from domview; + col1 +------ + + 5 +(2 rows) + +alter domain dom add constraint domchkgt6 check(value > 6); +select * from domview; --fail +ERROR: value for domain dom violates check constraint "domchkgt6" +alter domain dom drop constraint domchkgt6 restrict; +select * from domview; + col1 +------ + + 5 +(2 rows) + +-- cleanup +drop domain ddef1 restrict; +drop domain ddef2 restrict; +drop domain ddef3 restrict; +drop domain ddef4 restrict; +drop domain ddef5 restrict; +drop sequence ddef4_seq; +-- Test domains over domains +create domain vchar4 varchar(4); +create domain dinter vchar4 check (substring(VALUE, 1, 1) = 'x'); +create domain dtop dinter check (substring(VALUE, 2, 1) = '1'); +select 'x123'::dtop; + dtop +------ + x123 +(1 row) + +select 'x1234'::dtop; -- explicit coercion should truncate + dtop +------ + x123 +(1 row) + +select 'y1234'::dtop; -- fail +ERROR: value for domain dtop violates check constraint "dinter_check" +select 'y123'::dtop; -- fail +ERROR: value for domain dtop violates check constraint "dinter_check" +select 'yz23'::dtop; -- fail +ERROR: value for domain dtop violates check constraint "dinter_check" +select 'xz23'::dtop; -- fail +ERROR: value for domain dtop violates check constraint "dtop_check" +create temp table dtest(f1 dtop); +insert into dtest values('x123'); +insert into dtest values('x1234'); -- fail, implicit coercion +ERROR: value too long for type character varying(4) +insert into dtest values('y1234'); -- fail, implicit coercion +ERROR: value too long for type character varying(4) +insert into dtest values('y123'); -- fail +ERROR: value for domain dtop violates check constraint "dinter_check" +insert into dtest values('yz23'); -- fail +ERROR: value for domain dtop violates check constraint "dinter_check" +insert into dtest values('xz23'); -- fail +ERROR: value for domain dtop violates check constraint "dtop_check" +drop table dtest; +drop domain vchar4 cascade; +NOTICE: drop cascades to 2 other objects +DETAIL: drop cascades to type dinter +drop cascades to type dtop +-- Make sure that constraints of newly-added domain columns are +-- enforced correctly, even if there's no default value for the new +-- column. Per bug #1433 +create domain str_domain as text not null; +create table domain_test (a int, b int); +insert into domain_test values (1, 2); +insert into domain_test values (1, 2); +-- should fail +alter table domain_test add column c str_domain; +ERROR: domain str_domain does not allow null values +create domain str_domain2 as text check (value <> 'foo') default 'foo'; +-- should fail +alter table domain_test add column d str_domain2; +ERROR: value for domain str_domain2 violates check constraint "str_domain2_check" +-- Check that domain constraints on prepared statement parameters of +-- unknown type are enforced correctly. +create domain pos_int as int4 check (value > 0) not null; +prepare s1 as select $1::pos_int = 10 as "is_ten"; +execute s1(10); + is_ten +-------- + t +(1 row) + +execute s1(0); -- should fail +ERROR: value for domain pos_int violates check constraint "pos_int_check" +execute s1(NULL); -- should fail +ERROR: domain pos_int does not allow null values +-- Check that domain constraints on plpgsql function parameters, results, +-- and local variables are enforced correctly. +create function doubledecrement(p1 pos_int) returns pos_int as $$ +declare v pos_int; +begin + return p1; +end$$ language plpgsql; +select doubledecrement(3); -- fail because of implicit null assignment +ERROR: domain pos_int does not allow null values +CONTEXT: PL/pgSQL function doubledecrement(pos_int) line 2 during statement block local variable initialization +create or replace function doubledecrement(p1 pos_int) returns pos_int as $$ +declare v pos_int := 0; +begin + return p1; +end$$ language plpgsql; +select doubledecrement(3); -- fail at initialization assignment +ERROR: value for domain pos_int violates check constraint "pos_int_check" +CONTEXT: PL/pgSQL function doubledecrement(pos_int) line 2 during statement block local variable initialization +create or replace function doubledecrement(p1 pos_int) returns pos_int as $$ +declare v pos_int := 1; +begin + v := p1 - 1; + return v - 1; +end$$ language plpgsql; +select doubledecrement(null); -- fail before call +ERROR: domain pos_int does not allow null values +select doubledecrement(0); -- fail before call +ERROR: value for domain pos_int violates check constraint "pos_int_check" +select doubledecrement(1); -- fail at assignment to v +ERROR: value for domain pos_int violates check constraint "pos_int_check" +CONTEXT: PL/pgSQL function doubledecrement(pos_int) line 4 at assignment +select doubledecrement(2); -- fail at return +ERROR: value for domain pos_int violates check constraint "pos_int_check" +CONTEXT: PL/pgSQL function doubledecrement(pos_int) while casting return value to function's return type +select doubledecrement(3); -- good + doubledecrement +----------------- + 1 +(1 row) + +-- Check that ALTER DOMAIN tests columns of derived types +create domain posint as int4; +-- Currently, this doesn't work for composite types, but verify it complains +create type ddtest1 as (f1 posint); +create table ddtest2(f1 ddtest1); +insert into ddtest2 values(row(-1)); +alter domain posint add constraint c1 check(value >= 0); +ERROR: cannot alter type "posint" because column "ddtest2.f1" uses it +drop table ddtest2; +-- Likewise for domains within arrays of composite +create table ddtest2(f1 ddtest1[]); +insert into ddtest2 values('{(-1)}'); +alter domain posint add constraint c1 check(value >= 0); +ERROR: cannot alter type "posint" because column "ddtest2.f1" uses it +drop table ddtest2; +-- Likewise for domains within domains over composite +create domain ddtest1d as ddtest1; +create table ddtest2(f1 ddtest1d); +insert into ddtest2 values('(-1)'); +alter domain posint add constraint c1 check(value >= 0); +ERROR: cannot alter type "posint" because column "ddtest2.f1" uses it +drop table ddtest2; +drop domain ddtest1d; +-- Likewise for domains within domains over array of composite +create domain ddtest1d as ddtest1[]; +create table ddtest2(f1 ddtest1d); +insert into ddtest2 values('{(-1)}'); +alter domain posint add constraint c1 check(value >= 0); +ERROR: cannot alter type "posint" because column "ddtest2.f1" uses it +drop table ddtest2; +drop domain ddtest1d; +-- Doesn't work for ranges, either +create type rposint as range (subtype = posint); +create table ddtest2(f1 rposint); +insert into ddtest2 values('(-1,3]'); +alter domain posint add constraint c1 check(value >= 0); +ERROR: cannot alter type "posint" because column "ddtest2.f1" uses it +drop table ddtest2; +drop type rposint; +alter domain posint add constraint c1 check(value >= 0); +create domain posint2 as posint check (value % 2 = 0); +create table ddtest2(f1 posint2); +insert into ddtest2 values(11); -- fail +ERROR: value for domain posint2 violates check constraint "posint2_check" +insert into ddtest2 values(-2); -- fail +ERROR: value for domain posint2 violates check constraint "c1" +insert into ddtest2 values(2); +alter domain posint add constraint c2 check(value >= 10); -- fail +ERROR: column "f1" of table "ddtest2" contains values that violate the new constraint +alter domain posint add constraint c2 check(value > 0); -- OK +drop table ddtest2; +drop type ddtest1; +drop domain posint cascade; +NOTICE: drop cascades to type posint2 +-- +-- Check enforcement of domain-related typmod in plpgsql (bug #5717) +-- +create or replace function array_elem_check(numeric) returns numeric as $$ +declare + x numeric(4,2)[1]; +begin + x[1] := $1; + return x[1]; +end$$ language plpgsql; +select array_elem_check(121.00); +ERROR: numeric field overflow +DETAIL: A field with precision 4, scale 2 must round to an absolute value less than 10^2. +CONTEXT: PL/pgSQL function array_elem_check(numeric) line 5 at assignment +select array_elem_check(1.23456); + array_elem_check +------------------ + 1.23 +(1 row) + +create domain mynums as numeric(4,2)[1]; +create or replace function array_elem_check(numeric) returns numeric as $$ +declare + x mynums; +begin + x[1] := $1; + return x[1]; +end$$ language plpgsql; +select array_elem_check(121.00); +ERROR: numeric field overflow +DETAIL: A field with precision 4, scale 2 must round to an absolute value less than 10^2. +CONTEXT: PL/pgSQL function array_elem_check(numeric) line 5 at assignment +select array_elem_check(1.23456); + array_elem_check +------------------ + 1.23 +(1 row) + +create domain mynums2 as mynums; +create or replace function array_elem_check(numeric) returns numeric as $$ +declare + x mynums2; +begin + x[1] := $1; + return x[1]; +end$$ language plpgsql; +select array_elem_check(121.00); +ERROR: numeric field overflow +DETAIL: A field with precision 4, scale 2 must round to an absolute value less than 10^2. +CONTEXT: PL/pgSQL function array_elem_check(numeric) line 5 at assignment +select array_elem_check(1.23456); + array_elem_check +------------------ + 1.23 +(1 row) + +drop function array_elem_check(numeric); +-- +-- Check enforcement of array-level domain constraints +-- +create domain orderedpair as int[2] check (value[1] < value[2]); +select array[1,2]::orderedpair; + array +------- + {1,2} +(1 row) + +select array[2,1]::orderedpair; -- fail +ERROR: value for domain orderedpair violates check constraint "orderedpair_check" +create temp table op (f1 orderedpair); +insert into op values (array[1,2]); +insert into op values (array[2,1]); -- fail +ERROR: value for domain orderedpair violates check constraint "orderedpair_check" +update op set f1[2] = 3; +update op set f1[2] = 0; -- fail +ERROR: value for domain orderedpair violates check constraint "orderedpair_check" +select * from op; + f1 +------- + {1,3} +(1 row) + +create or replace function array_elem_check(int) returns int as $$ +declare + x orderedpair := '{1,2}'; +begin + x[2] := $1; + return x[2]; +end$$ language plpgsql; +select array_elem_check(3); + array_elem_check +------------------ + 3 +(1 row) + +select array_elem_check(-1); +ERROR: value for domain orderedpair violates check constraint "orderedpair_check" +CONTEXT: PL/pgSQL function array_elem_check(integer) line 5 at assignment +drop function array_elem_check(int); +-- +-- Check enforcement of changing constraints in plpgsql +-- +create domain di as int; +create function dom_check(int) returns di as $$ +declare d di; +begin + d := $1::di; + return d; +end +$$ language plpgsql immutable; +select dom_check(0); + dom_check +----------- + 0 +(1 row) + +alter domain di add constraint pos check (value > 0); +select dom_check(0); -- fail +ERROR: value for domain di violates check constraint "pos" +CONTEXT: PL/pgSQL function dom_check(integer) line 4 at assignment +alter domain di drop constraint pos; +select dom_check(0); + dom_check +----------- + 0 +(1 row) + +-- implicit cast during assignment is a separate code path, test that too +create or replace function dom_check(int) returns di as $$ +declare d di; +begin + d := $1; + return d; +end +$$ language plpgsql immutable; +select dom_check(0); + dom_check +----------- + 0 +(1 row) + +alter domain di add constraint pos check (value > 0); +select dom_check(0); -- fail +ERROR: value for domain di violates check constraint "pos" +CONTEXT: PL/pgSQL function dom_check(integer) line 4 at assignment +alter domain di drop constraint pos; +select dom_check(0); + dom_check +----------- + 0 +(1 row) + +drop function dom_check(int); +drop domain di; +-- +-- Check use of a (non-inline-able) SQL function in a domain constraint; +-- this has caused issues in the past +-- +create function sql_is_distinct_from(anyelement, anyelement) +returns boolean language sql +as 'select $1 is distinct from $2 limit 1'; +create domain inotnull int + check (sql_is_distinct_from(value, null)); +select 1::inotnull; + inotnull +---------- + 1 +(1 row) + +select null::inotnull; +ERROR: value for domain inotnull violates check constraint "inotnull_check" +create table dom_table (x inotnull); +insert into dom_table values ('1'); +insert into dom_table values (1); +insert into dom_table values (null); +ERROR: value for domain inotnull violates check constraint "inotnull_check" +drop table dom_table; +drop domain inotnull; +drop function sql_is_distinct_from(anyelement, anyelement); +-- +-- Renaming +-- +create domain testdomain1 as int; +alter domain testdomain1 rename to testdomain2; +alter type testdomain2 rename to testdomain3; -- alter type also works +drop domain testdomain3; +-- +-- Renaming domain constraints +-- +create domain testdomain1 as int constraint unsigned check (value > 0); +alter domain testdomain1 rename constraint unsigned to unsigned_foo; +alter domain testdomain1 drop constraint unsigned_foo; +drop domain testdomain1; diff --git a/src/test/regress/expected/drop_if_exists.out b/src/test/regress/expected/drop_if_exists.out new file mode 100644 index 0000000..5e44c2c --- /dev/null +++ b/src/test/regress/expected/drop_if_exists.out @@ -0,0 +1,342 @@ +-- +-- IF EXISTS tests +-- +-- table (will be really dropped at the end) +DROP TABLE test_exists; +ERROR: table "test_exists" does not exist +DROP TABLE IF EXISTS test_exists; +NOTICE: table "test_exists" does not exist, skipping +CREATE TABLE test_exists (a int, b text); +-- view +DROP VIEW test_view_exists; +ERROR: view "test_view_exists" does not exist +DROP VIEW IF EXISTS test_view_exists; +NOTICE: view "test_view_exists" does not exist, skipping +CREATE VIEW test_view_exists AS select * from test_exists; +DROP VIEW IF EXISTS test_view_exists; +DROP VIEW test_view_exists; +ERROR: view "test_view_exists" does not exist +-- index +DROP INDEX test_index_exists; +ERROR: index "test_index_exists" does not exist +DROP INDEX IF EXISTS test_index_exists; +NOTICE: index "test_index_exists" does not exist, skipping +CREATE INDEX test_index_exists on test_exists(a); +DROP INDEX IF EXISTS test_index_exists; +DROP INDEX test_index_exists; +ERROR: index "test_index_exists" does not exist +-- sequence +DROP SEQUENCE test_sequence_exists; +ERROR: sequence "test_sequence_exists" does not exist +DROP SEQUENCE IF EXISTS test_sequence_exists; +NOTICE: sequence "test_sequence_exists" does not exist, skipping +CREATE SEQUENCE test_sequence_exists; +DROP SEQUENCE IF EXISTS test_sequence_exists; +DROP SEQUENCE test_sequence_exists; +ERROR: sequence "test_sequence_exists" does not exist +-- schema +DROP SCHEMA test_schema_exists; +ERROR: schema "test_schema_exists" does not exist +DROP SCHEMA IF EXISTS test_schema_exists; +NOTICE: schema "test_schema_exists" does not exist, skipping +CREATE SCHEMA test_schema_exists; +DROP SCHEMA IF EXISTS test_schema_exists; +DROP SCHEMA test_schema_exists; +ERROR: schema "test_schema_exists" does not exist +-- type +DROP TYPE test_type_exists; +ERROR: type "test_type_exists" does not exist +DROP TYPE IF EXISTS test_type_exists; +NOTICE: type "test_type_exists" does not exist, skipping +CREATE type test_type_exists as (a int, b text); +DROP TYPE IF EXISTS test_type_exists; +DROP TYPE test_type_exists; +ERROR: type "test_type_exists" does not exist +-- domain +DROP DOMAIN test_domain_exists; +ERROR: type "test_domain_exists" does not exist +DROP DOMAIN IF EXISTS test_domain_exists; +NOTICE: type "test_domain_exists" does not exist, skipping +CREATE domain test_domain_exists as int not null check (value > 0); +DROP DOMAIN IF EXISTS test_domain_exists; +DROP DOMAIN test_domain_exists; +ERROR: type "test_domain_exists" does not exist +--- +--- role/user/group +--- +CREATE USER regress_test_u1; +CREATE ROLE regress_test_r1; +CREATE GROUP regress_test_g1; +DROP USER regress_test_u2; +ERROR: role "regress_test_u2" does not exist +DROP USER IF EXISTS regress_test_u1, regress_test_u2; +NOTICE: role "regress_test_u2" does not exist, skipping +DROP USER regress_test_u1; +ERROR: role "regress_test_u1" does not exist +DROP ROLE regress_test_r2; +ERROR: role "regress_test_r2" does not exist +DROP ROLE IF EXISTS regress_test_r1, regress_test_r2; +NOTICE: role "regress_test_r2" does not exist, skipping +DROP ROLE regress_test_r1; +ERROR: role "regress_test_r1" does not exist +DROP GROUP regress_test_g2; +ERROR: role "regress_test_g2" does not exist +DROP GROUP IF EXISTS regress_test_g1, regress_test_g2; +NOTICE: role "regress_test_g2" does not exist, skipping +DROP GROUP regress_test_g1; +ERROR: role "regress_test_g1" does not exist +-- collation +DROP COLLATION IF EXISTS test_collation_exists; +NOTICE: collation "test_collation_exists" does not exist, skipping +-- conversion +DROP CONVERSION test_conversion_exists; +ERROR: conversion "test_conversion_exists" does not exist +DROP CONVERSION IF EXISTS test_conversion_exists; +NOTICE: conversion "test_conversion_exists" does not exist, skipping +CREATE CONVERSION test_conversion_exists + FOR 'LATIN1' TO 'UTF8' FROM iso8859_1_to_utf8; +DROP CONVERSION test_conversion_exists; +-- text search parser +DROP TEXT SEARCH PARSER test_tsparser_exists; +ERROR: text search parser "test_tsparser_exists" does not exist +DROP TEXT SEARCH PARSER IF EXISTS test_tsparser_exists; +NOTICE: text search parser "test_tsparser_exists" does not exist, skipping +-- text search dictionary +DROP TEXT SEARCH DICTIONARY test_tsdict_exists; +ERROR: text search dictionary "test_tsdict_exists" does not exist +DROP TEXT SEARCH DICTIONARY IF EXISTS test_tsdict_exists; +NOTICE: text search dictionary "test_tsdict_exists" does not exist, skipping +CREATE TEXT SEARCH DICTIONARY test_tsdict_exists ( + Template=ispell, + DictFile=ispell_sample, + AffFile=ispell_sample +); +DROP TEXT SEARCH DICTIONARY test_tsdict_exists; +-- test search template +DROP TEXT SEARCH TEMPLATE test_tstemplate_exists; +ERROR: text search template "test_tstemplate_exists" does not exist +DROP TEXT SEARCH TEMPLATE IF EXISTS test_tstemplate_exists; +NOTICE: text search template "test_tstemplate_exists" does not exist, skipping +-- text search configuration +DROP TEXT SEARCH CONFIGURATION test_tsconfig_exists; +ERROR: text search configuration "test_tsconfig_exists" does not exist +DROP TEXT SEARCH CONFIGURATION IF EXISTS test_tsconfig_exists; +NOTICE: text search configuration "test_tsconfig_exists" does not exist, skipping +CREATE TEXT SEARCH CONFIGURATION test_tsconfig_exists (COPY=english); +DROP TEXT SEARCH CONFIGURATION test_tsconfig_exists; +-- extension +DROP EXTENSION test_extension_exists; +ERROR: extension "test_extension_exists" does not exist +DROP EXTENSION IF EXISTS test_extension_exists; +NOTICE: extension "test_extension_exists" does not exist, skipping +-- functions +DROP FUNCTION test_function_exists(); +ERROR: function test_function_exists() does not exist +DROP FUNCTION IF EXISTS test_function_exists(); +NOTICE: function test_function_exists() does not exist, skipping +DROP FUNCTION test_function_exists(int, text, int[]); +ERROR: function test_function_exists(integer, text, integer[]) does not exist +DROP FUNCTION IF EXISTS test_function_exists(int, text, int[]); +NOTICE: function test_function_exists(pg_catalog.int4,text,pg_catalog.int4[]) does not exist, skipping +-- aggregate +DROP AGGREGATE test_aggregate_exists(*); +ERROR: aggregate test_aggregate_exists(*) does not exist +DROP AGGREGATE IF EXISTS test_aggregate_exists(*); +NOTICE: aggregate test_aggregate_exists() does not exist, skipping +DROP AGGREGATE test_aggregate_exists(int); +ERROR: aggregate test_aggregate_exists(integer) does not exist +DROP AGGREGATE IF EXISTS test_aggregate_exists(int); +NOTICE: aggregate test_aggregate_exists(pg_catalog.int4) does not exist, skipping +-- operator +DROP OPERATOR @#@ (int, int); +ERROR: operator does not exist: integer @#@ integer +DROP OPERATOR IF EXISTS @#@ (int, int); +NOTICE: operator @#@ does not exist, skipping +CREATE OPERATOR @#@ + (leftarg = int8, rightarg = int8, procedure = int8xor); +DROP OPERATOR @#@ (int8, int8); +-- language +DROP LANGUAGE test_language_exists; +ERROR: language "test_language_exists" does not exist +DROP LANGUAGE IF EXISTS test_language_exists; +NOTICE: language "test_language_exists" does not exist, skipping +-- cast +DROP CAST (text AS text); +ERROR: cast from type text to type text does not exist +DROP CAST IF EXISTS (text AS text); +NOTICE: cast from type text to type text does not exist, skipping +-- trigger +DROP TRIGGER test_trigger_exists ON test_exists; +ERROR: trigger "test_trigger_exists" for table "test_exists" does not exist +DROP TRIGGER IF EXISTS test_trigger_exists ON test_exists; +NOTICE: trigger "test_trigger_exists" for relation "test_exists" does not exist, skipping +DROP TRIGGER test_trigger_exists ON no_such_table; +ERROR: relation "no_such_table" does not exist +DROP TRIGGER IF EXISTS test_trigger_exists ON no_such_table; +NOTICE: relation "no_such_table" does not exist, skipping +DROP TRIGGER test_trigger_exists ON no_such_schema.no_such_table; +ERROR: schema "no_such_schema" does not exist +DROP TRIGGER IF EXISTS test_trigger_exists ON no_such_schema.no_such_table; +NOTICE: schema "no_such_schema" does not exist, skipping +CREATE TRIGGER test_trigger_exists + BEFORE UPDATE ON test_exists + FOR EACH ROW EXECUTE PROCEDURE suppress_redundant_updates_trigger(); +DROP TRIGGER test_trigger_exists ON test_exists; +-- rule +DROP RULE test_rule_exists ON test_exists; +ERROR: rule "test_rule_exists" for relation "test_exists" does not exist +DROP RULE IF EXISTS test_rule_exists ON test_exists; +NOTICE: rule "test_rule_exists" for relation "test_exists" does not exist, skipping +DROP RULE test_rule_exists ON no_such_table; +ERROR: relation "no_such_table" does not exist +DROP RULE IF EXISTS test_rule_exists ON no_such_table; +NOTICE: relation "no_such_table" does not exist, skipping +DROP RULE test_rule_exists ON no_such_schema.no_such_table; +ERROR: schema "no_such_schema" does not exist +DROP RULE IF EXISTS test_rule_exists ON no_such_schema.no_such_table; +NOTICE: schema "no_such_schema" does not exist, skipping +CREATE RULE test_rule_exists AS ON INSERT TO test_exists + DO INSTEAD + INSERT INTO test_exists VALUES (NEW.a, NEW.b || NEW.a::text); +DROP RULE test_rule_exists ON test_exists; +-- foreign data wrapper +DROP FOREIGN DATA WRAPPER test_fdw_exists; +ERROR: foreign-data wrapper "test_fdw_exists" does not exist +DROP FOREIGN DATA WRAPPER IF EXISTS test_fdw_exists; +NOTICE: foreign-data wrapper "test_fdw_exists" does not exist, skipping +-- foreign server +DROP SERVER test_server_exists; +ERROR: server "test_server_exists" does not exist +DROP SERVER IF EXISTS test_server_exists; +NOTICE: server "test_server_exists" does not exist, skipping +-- operator class +DROP OPERATOR CLASS test_operator_class USING btree; +ERROR: operator class "test_operator_class" does not exist for access method "btree" +DROP OPERATOR CLASS IF EXISTS test_operator_class USING btree; +NOTICE: operator class "test_operator_class" does not exist for access method "btree", skipping +DROP OPERATOR CLASS test_operator_class USING no_such_am; +ERROR: access method "no_such_am" does not exist +DROP OPERATOR CLASS IF EXISTS test_operator_class USING no_such_am; +ERROR: access method "no_such_am" does not exist +-- operator family +DROP OPERATOR FAMILY test_operator_family USING btree; +ERROR: operator family "test_operator_family" does not exist for access method "btree" +DROP OPERATOR FAMILY IF EXISTS test_operator_family USING btree; +NOTICE: operator family "test_operator_family" does not exist for access method "btree", skipping +DROP OPERATOR FAMILY test_operator_family USING no_such_am; +ERROR: access method "no_such_am" does not exist +DROP OPERATOR FAMILY IF EXISTS test_operator_family USING no_such_am; +ERROR: access method "no_such_am" does not exist +-- access method +DROP ACCESS METHOD no_such_am; +ERROR: access method "no_such_am" does not exist +DROP ACCESS METHOD IF EXISTS no_such_am; +NOTICE: access method "no_such_am" does not exist, skipping +-- drop the table +DROP TABLE IF EXISTS test_exists; +DROP TABLE test_exists; +ERROR: table "test_exists" does not exist +-- be tolerant with missing schemas, types, etc +DROP AGGREGATE IF EXISTS no_such_schema.foo(int); +NOTICE: schema "no_such_schema" does not exist, skipping +DROP AGGREGATE IF EXISTS foo(no_such_type); +NOTICE: type "no_such_type" does not exist, skipping +DROP AGGREGATE IF EXISTS foo(no_such_schema.no_such_type); +NOTICE: schema "no_such_schema" does not exist, skipping +DROP CAST IF EXISTS (INTEGER AS no_such_type2); +NOTICE: type "no_such_type2" does not exist, skipping +DROP CAST IF EXISTS (no_such_type1 AS INTEGER); +NOTICE: type "no_such_type1" does not exist, skipping +DROP CAST IF EXISTS (INTEGER AS no_such_schema.bar); +NOTICE: schema "no_such_schema" does not exist, skipping +DROP CAST IF EXISTS (no_such_schema.foo AS INTEGER); +NOTICE: schema "no_such_schema" does not exist, skipping +DROP COLLATION IF EXISTS no_such_schema.foo; +NOTICE: schema "no_such_schema" does not exist, skipping +DROP CONVERSION IF EXISTS no_such_schema.foo; +NOTICE: schema "no_such_schema" does not exist, skipping +DROP DOMAIN IF EXISTS no_such_schema.foo; +NOTICE: schema "no_such_schema" does not exist, skipping +DROP FOREIGN TABLE IF EXISTS no_such_schema.foo; +NOTICE: schema "no_such_schema" does not exist, skipping +DROP FUNCTION IF EXISTS no_such_schema.foo(); +NOTICE: schema "no_such_schema" does not exist, skipping +DROP FUNCTION IF EXISTS foo(no_such_type); +NOTICE: type "no_such_type" does not exist, skipping +DROP FUNCTION IF EXISTS foo(no_such_schema.no_such_type); +NOTICE: schema "no_such_schema" does not exist, skipping +DROP INDEX IF EXISTS no_such_schema.foo; +NOTICE: schema "no_such_schema" does not exist, skipping +DROP MATERIALIZED VIEW IF EXISTS no_such_schema.foo; +NOTICE: schema "no_such_schema" does not exist, skipping +DROP OPERATOR IF EXISTS no_such_schema.+ (int, int); +NOTICE: schema "no_such_schema" does not exist, skipping +DROP OPERATOR IF EXISTS + (no_such_type, no_such_type); +NOTICE: type "no_such_type" does not exist, skipping +DROP OPERATOR IF EXISTS + (no_such_schema.no_such_type, no_such_schema.no_such_type); +NOTICE: schema "no_such_schema" does not exist, skipping +DROP OPERATOR IF EXISTS # (NONE, no_such_schema.no_such_type); +NOTICE: schema "no_such_schema" does not exist, skipping +DROP OPERATOR CLASS IF EXISTS no_such_schema.widget_ops USING btree; +NOTICE: schema "no_such_schema" does not exist, skipping +DROP OPERATOR FAMILY IF EXISTS no_such_schema.float_ops USING btree; +NOTICE: schema "no_such_schema" does not exist, skipping +DROP RULE IF EXISTS foo ON no_such_schema.bar; +NOTICE: schema "no_such_schema" does not exist, skipping +DROP SEQUENCE IF EXISTS no_such_schema.foo; +NOTICE: schema "no_such_schema" does not exist, skipping +DROP TABLE IF EXISTS no_such_schema.foo; +NOTICE: schema "no_such_schema" does not exist, skipping +DROP TEXT SEARCH CONFIGURATION IF EXISTS no_such_schema.foo; +NOTICE: schema "no_such_schema" does not exist, skipping +DROP TEXT SEARCH DICTIONARY IF EXISTS no_such_schema.foo; +NOTICE: schema "no_such_schema" does not exist, skipping +DROP TEXT SEARCH PARSER IF EXISTS no_such_schema.foo; +NOTICE: schema "no_such_schema" does not exist, skipping +DROP TEXT SEARCH TEMPLATE IF EXISTS no_such_schema.foo; +NOTICE: schema "no_such_schema" does not exist, skipping +DROP TRIGGER IF EXISTS foo ON no_such_schema.bar; +NOTICE: schema "no_such_schema" does not exist, skipping +DROP TYPE IF EXISTS no_such_schema.foo; +NOTICE: schema "no_such_schema" does not exist, skipping +DROP VIEW IF EXISTS no_such_schema.foo; +NOTICE: schema "no_such_schema" does not exist, skipping +-- Check we receive an ambiguous function error when there are +-- multiple matching functions. +CREATE FUNCTION test_ambiguous_funcname(int) returns int as $$ select $1; $$ language sql; +CREATE FUNCTION test_ambiguous_funcname(text) returns text as $$ select $1; $$ language sql; +DROP FUNCTION test_ambiguous_funcname; +ERROR: function name "test_ambiguous_funcname" is not unique +HINT: Specify the argument list to select the function unambiguously. +DROP FUNCTION IF EXISTS test_ambiguous_funcname; +ERROR: function name "test_ambiguous_funcname" is not unique +HINT: Specify the argument list to select the function unambiguously. +-- cleanup +DROP FUNCTION test_ambiguous_funcname(int); +DROP FUNCTION test_ambiguous_funcname(text); +-- Likewise for procedures. +CREATE PROCEDURE test_ambiguous_procname(int) as $$ begin end; $$ language plpgsql; +CREATE PROCEDURE test_ambiguous_procname(text) as $$ begin end; $$ language plpgsql; +DROP PROCEDURE test_ambiguous_procname; +ERROR: procedure name "test_ambiguous_procname" is not unique +HINT: Specify the argument list to select the procedure unambiguously. +DROP PROCEDURE IF EXISTS test_ambiguous_procname; +ERROR: procedure name "test_ambiguous_procname" is not unique +HINT: Specify the argument list to select the procedure unambiguously. +-- Check we get a similar error if we use ROUTINE instead of PROCEDURE. +DROP ROUTINE IF EXISTS test_ambiguous_procname; +ERROR: routine name "test_ambiguous_procname" is not unique +HINT: Specify the argument list to select the routine unambiguously. +-- cleanup +DROP PROCEDURE test_ambiguous_procname(int); +DROP PROCEDURE test_ambiguous_procname(text); +-- This test checks both the functionality of 'if exists' and the syntax +-- of the drop database command. +drop database test_database_exists (force); +ERROR: database "test_database_exists" does not exist +drop database test_database_exists with (force); +ERROR: database "test_database_exists" does not exist +drop database if exists test_database_exists (force); +NOTICE: database "test_database_exists" does not exist, skipping +drop database if exists test_database_exists with (force); +NOTICE: database "test_database_exists" does not exist, skipping diff --git a/src/test/regress/expected/drop_operator.out b/src/test/regress/expected/drop_operator.out new file mode 100644 index 0000000..cc8f5e7 --- /dev/null +++ b/src/test/regress/expected/drop_operator.out @@ -0,0 +1,61 @@ +CREATE OPERATOR === ( + PROCEDURE = int8eq, + LEFTARG = bigint, + RIGHTARG = bigint, + COMMUTATOR = === +); +CREATE OPERATOR !== ( + PROCEDURE = int8ne, + LEFTARG = bigint, + RIGHTARG = bigint, + NEGATOR = ===, + COMMUTATOR = !== +); +DROP OPERATOR !==(bigint, bigint); +SELECT ctid, oprcom +FROM pg_catalog.pg_operator fk +WHERE oprcom != 0 AND + NOT EXISTS(SELECT 1 FROM pg_catalog.pg_operator pk WHERE pk.oid = fk.oprcom); + ctid | oprcom +------+-------- +(0 rows) + +SELECT ctid, oprnegate +FROM pg_catalog.pg_operator fk +WHERE oprnegate != 0 AND + NOT EXISTS(SELECT 1 FROM pg_catalog.pg_operator pk WHERE pk.oid = fk.oprnegate); + ctid | oprnegate +------+----------- +(0 rows) + +DROP OPERATOR ===(bigint, bigint); +CREATE OPERATOR <| ( + PROCEDURE = int8lt, + LEFTARG = bigint, + RIGHTARG = bigint +); +CREATE OPERATOR |> ( + PROCEDURE = int8gt, + LEFTARG = bigint, + RIGHTARG = bigint, + NEGATOR = <|, + COMMUTATOR = <| +); +DROP OPERATOR |>(bigint, bigint); +SELECT ctid, oprcom +FROM pg_catalog.pg_operator fk +WHERE oprcom != 0 AND + NOT EXISTS(SELECT 1 FROM pg_catalog.pg_operator pk WHERE pk.oid = fk.oprcom); + ctid | oprcom +------+-------- +(0 rows) + +SELECT ctid, oprnegate +FROM pg_catalog.pg_operator fk +WHERE oprnegate != 0 AND + NOT EXISTS(SELECT 1 FROM pg_catalog.pg_operator pk WHERE pk.oid = fk.oprnegate); + ctid | oprnegate +------+----------- +(0 rows) + +DROP OPERATOR <|(bigint, bigint); diff --git a/src/test/regress/expected/enum.out b/src/test/regress/expected/enum.out new file mode 100644 index 0000000..0115968 --- /dev/null +++ b/src/test/regress/expected/enum.out @@ -0,0 +1,719 @@ +-- +-- Enum tests +-- +CREATE TYPE rainbow AS ENUM ('red', 'orange', 'yellow', 'green', 'blue', 'purple'); +-- +-- Did it create the right number of rows? +-- +SELECT COUNT(*) FROM pg_enum WHERE enumtypid = 'rainbow'::regtype; + count +------- + 6 +(1 row) + +-- +-- I/O functions +-- +SELECT 'red'::rainbow; + rainbow +--------- + red +(1 row) + +SELECT 'mauve'::rainbow; +ERROR: invalid input value for enum rainbow: "mauve" +LINE 1: SELECT 'mauve'::rainbow; + ^ +-- Also try it with non-error-throwing API +SELECT pg_input_is_valid('red', 'rainbow'); + pg_input_is_valid +------------------- + t +(1 row) + +SELECT pg_input_is_valid('mauve', 'rainbow'); + pg_input_is_valid +------------------- + f +(1 row) + +SELECT * FROM pg_input_error_info('mauve', 'rainbow'); + message | detail | hint | sql_error_code +-----------------------------------------------+--------+------+---------------- + invalid input value for enum rainbow: "mauve" | | | 22P02 +(1 row) + +\x +SELECT * FROM pg_input_error_info(repeat('too_long', 32), 'rainbow'); +-[ RECORD 1 ]--+--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- +message | invalid input value for enum rainbow: "too_longtoo_longtoo_longtoo_longtoo_longtoo_longtoo_longtoo_longtoo_longtoo_longtoo_longtoo_longtoo_longtoo_longtoo_longtoo_longtoo_longtoo_longtoo_longtoo_longtoo_longtoo_longtoo_longtoo_longtoo_longtoo_longtoo_longtoo_longtoo_longtoo_longtoo_longtoo_long" +detail | +hint | +sql_error_code | 22P02 + +\x +-- +-- adding new values +-- +CREATE TYPE planets AS ENUM ( 'venus', 'earth', 'mars' ); +SELECT enumlabel, enumsortorder +FROM pg_enum +WHERE enumtypid = 'planets'::regtype +ORDER BY 2; + enumlabel | enumsortorder +-----------+--------------- + venus | 1 + earth | 2 + mars | 3 +(3 rows) + +ALTER TYPE planets ADD VALUE 'uranus'; +SELECT enumlabel, enumsortorder +FROM pg_enum +WHERE enumtypid = 'planets'::regtype +ORDER BY 2; + enumlabel | enumsortorder +-----------+--------------- + venus | 1 + earth | 2 + mars | 3 + uranus | 4 +(4 rows) + +ALTER TYPE planets ADD VALUE 'mercury' BEFORE 'venus'; +ALTER TYPE planets ADD VALUE 'saturn' BEFORE 'uranus'; +ALTER TYPE planets ADD VALUE 'jupiter' AFTER 'mars'; +ALTER TYPE planets ADD VALUE 'neptune' AFTER 'uranus'; +SELECT enumlabel, enumsortorder +FROM pg_enum +WHERE enumtypid = 'planets'::regtype +ORDER BY 2; + enumlabel | enumsortorder +-----------+--------------- + mercury | 0 + venus | 1 + earth | 2 + mars | 3 + jupiter | 3.25 + saturn | 3.5 + uranus | 4 + neptune | 5 +(8 rows) + +SELECT enumlabel, enumsortorder +FROM pg_enum +WHERE enumtypid = 'planets'::regtype +ORDER BY enumlabel::planets; + enumlabel | enumsortorder +-----------+--------------- + mercury | 0 + venus | 1 + earth | 2 + mars | 3 + jupiter | 3.25 + saturn | 3.5 + uranus | 4 + neptune | 5 +(8 rows) + +-- errors for adding labels +ALTER TYPE planets ADD VALUE + 'plutoplutoplutoplutoplutoplutoplutoplutoplutoplutoplutoplutoplutopluto'; +ERROR: invalid enum label "plutoplutoplutoplutoplutoplutoplutoplutoplutoplutoplutoplutoplutopluto" +DETAIL: Labels must be 63 bytes or less. +ALTER TYPE planets ADD VALUE 'pluto' AFTER 'zeus'; +ERROR: "zeus" is not an existing enum label +-- if not exists tests +-- existing value gives error +ALTER TYPE planets ADD VALUE 'mercury'; +ERROR: enum label "mercury" already exists +-- unless IF NOT EXISTS is specified +ALTER TYPE planets ADD VALUE IF NOT EXISTS 'mercury'; +NOTICE: enum label "mercury" already exists, skipping +-- should be neptune, not mercury +SELECT enum_last(NULL::planets); + enum_last +----------- + neptune +(1 row) + +ALTER TYPE planets ADD VALUE IF NOT EXISTS 'pluto'; +-- should be pluto, i.e. the new value +SELECT enum_last(NULL::planets); + enum_last +----------- + pluto +(1 row) + +-- +-- Test inserting so many values that we have to renumber +-- +create type insenum as enum ('L1', 'L2'); +alter type insenum add value 'i1' before 'L2'; +alter type insenum add value 'i2' before 'L2'; +alter type insenum add value 'i3' before 'L2'; +alter type insenum add value 'i4' before 'L2'; +alter type insenum add value 'i5' before 'L2'; +alter type insenum add value 'i6' before 'L2'; +alter type insenum add value 'i7' before 'L2'; +alter type insenum add value 'i8' before 'L2'; +alter type insenum add value 'i9' before 'L2'; +alter type insenum add value 'i10' before 'L2'; +alter type insenum add value 'i11' before 'L2'; +alter type insenum add value 'i12' before 'L2'; +alter type insenum add value 'i13' before 'L2'; +alter type insenum add value 'i14' before 'L2'; +alter type insenum add value 'i15' before 'L2'; +alter type insenum add value 'i16' before 'L2'; +alter type insenum add value 'i17' before 'L2'; +alter type insenum add value 'i18' before 'L2'; +alter type insenum add value 'i19' before 'L2'; +alter type insenum add value 'i20' before 'L2'; +alter type insenum add value 'i21' before 'L2'; +alter type insenum add value 'i22' before 'L2'; +alter type insenum add value 'i23' before 'L2'; +alter type insenum add value 'i24' before 'L2'; +alter type insenum add value 'i25' before 'L2'; +alter type insenum add value 'i26' before 'L2'; +alter type insenum add value 'i27' before 'L2'; +alter type insenum add value 'i28' before 'L2'; +alter type insenum add value 'i29' before 'L2'; +alter type insenum add value 'i30' before 'L2'; +-- The exact values of enumsortorder will now depend on the local properties +-- of float4, but in any reasonable implementation we should get at least +-- 20 splits before having to renumber; so only hide values > 20. +SELECT enumlabel, + case when enumsortorder > 20 then null else enumsortorder end as so +FROM pg_enum +WHERE enumtypid = 'insenum'::regtype +ORDER BY enumsortorder; + enumlabel | so +-----------+---- + L1 | 1 + i1 | 2 + i2 | 3 + i3 | 4 + i4 | 5 + i5 | 6 + i6 | 7 + i7 | 8 + i8 | 9 + i9 | 10 + i10 | 11 + i11 | 12 + i12 | 13 + i13 | 14 + i14 | 15 + i15 | 16 + i16 | 17 + i17 | 18 + i18 | 19 + i19 | 20 + i20 | + i21 | + i22 | + i23 | + i24 | + i25 | + i26 | + i27 | + i28 | + i29 | + i30 | + L2 | +(32 rows) + +-- +-- Basic table creation, row selection +-- +CREATE TABLE enumtest (col rainbow); +INSERT INTO enumtest values ('red'), ('orange'), ('yellow'), ('green'); +COPY enumtest FROM stdin; +SELECT * FROM enumtest; + col +-------- + red + orange + yellow + green + blue + purple +(6 rows) + +-- +-- Operators, no index +-- +SELECT * FROM enumtest WHERE col = 'orange'; + col +-------- + orange +(1 row) + +SELECT * FROM enumtest WHERE col <> 'orange' ORDER BY col; + col +-------- + red + yellow + green + blue + purple +(5 rows) + +SELECT * FROM enumtest WHERE col > 'yellow' ORDER BY col; + col +-------- + green + blue + purple +(3 rows) + +SELECT * FROM enumtest WHERE col >= 'yellow' ORDER BY col; + col +-------- + yellow + green + blue + purple +(4 rows) + +SELECT * FROM enumtest WHERE col < 'green' ORDER BY col; + col +-------- + red + orange + yellow +(3 rows) + +SELECT * FROM enumtest WHERE col <= 'green' ORDER BY col; + col +-------- + red + orange + yellow + green +(4 rows) + +-- +-- Cast to/from text +-- +SELECT 'red'::rainbow::text || 'hithere'; + ?column? +------------ + redhithere +(1 row) + +SELECT 'red'::text::rainbow = 'red'::rainbow; + ?column? +---------- + t +(1 row) + +-- +-- Aggregates +-- +SELECT min(col) FROM enumtest; + min +----- + red +(1 row) + +SELECT max(col) FROM enumtest; + max +-------- + purple +(1 row) + +SELECT max(col) FROM enumtest WHERE col < 'green'; + max +-------- + yellow +(1 row) + +-- +-- Index tests, force use of index +-- +SET enable_seqscan = off; +SET enable_bitmapscan = off; +-- +-- Btree index / opclass with the various operators +-- +CREATE UNIQUE INDEX enumtest_btree ON enumtest USING btree (col); +SELECT * FROM enumtest WHERE col = 'orange'; + col +-------- + orange +(1 row) + +SELECT * FROM enumtest WHERE col <> 'orange' ORDER BY col; + col +-------- + red + yellow + green + blue + purple +(5 rows) + +SELECT * FROM enumtest WHERE col > 'yellow' ORDER BY col; + col +-------- + green + blue + purple +(3 rows) + +SELECT * FROM enumtest WHERE col >= 'yellow' ORDER BY col; + col +-------- + yellow + green + blue + purple +(4 rows) + +SELECT * FROM enumtest WHERE col < 'green' ORDER BY col; + col +-------- + red + orange + yellow +(3 rows) + +SELECT * FROM enumtest WHERE col <= 'green' ORDER BY col; + col +-------- + red + orange + yellow + green +(4 rows) + +SELECT min(col) FROM enumtest; + min +----- + red +(1 row) + +SELECT max(col) FROM enumtest; + max +-------- + purple +(1 row) + +SELECT max(col) FROM enumtest WHERE col < 'green'; + max +-------- + yellow +(1 row) + +DROP INDEX enumtest_btree; +-- +-- Hash index / opclass with the = operator +-- +CREATE INDEX enumtest_hash ON enumtest USING hash (col); +SELECT * FROM enumtest WHERE col = 'orange'; + col +-------- + orange +(1 row) + +DROP INDEX enumtest_hash; +-- +-- End index tests +-- +RESET enable_seqscan; +RESET enable_bitmapscan; +-- +-- Domains over enums +-- +CREATE DOMAIN rgb AS rainbow CHECK (VALUE IN ('red', 'green', 'blue')); +SELECT 'red'::rgb; + rgb +----- + red +(1 row) + +SELECT 'purple'::rgb; +ERROR: value for domain rgb violates check constraint "rgb_check" +SELECT 'purple'::rainbow::rgb; +ERROR: value for domain rgb violates check constraint "rgb_check" +DROP DOMAIN rgb; +-- +-- Arrays +-- +SELECT '{red,green,blue}'::rainbow[]; + rainbow +------------------ + {red,green,blue} +(1 row) + +SELECT ('{red,green,blue}'::rainbow[])[2]; + rainbow +--------- + green +(1 row) + +SELECT 'red' = ANY ('{red,green,blue}'::rainbow[]); + ?column? +---------- + t +(1 row) + +SELECT 'yellow' = ANY ('{red,green,blue}'::rainbow[]); + ?column? +---------- + f +(1 row) + +SELECT 'red' = ALL ('{red,green,blue}'::rainbow[]); + ?column? +---------- + f +(1 row) + +SELECT 'red' = ALL ('{red,red}'::rainbow[]); + ?column? +---------- + t +(1 row) + +-- +-- Support functions +-- +SELECT enum_first(NULL::rainbow); + enum_first +------------ + red +(1 row) + +SELECT enum_last('green'::rainbow); + enum_last +----------- + purple +(1 row) + +SELECT enum_range(NULL::rainbow); + enum_range +--------------------------------------- + {red,orange,yellow,green,blue,purple} +(1 row) + +SELECT enum_range('orange'::rainbow, 'green'::rainbow); + enum_range +----------------------- + {orange,yellow,green} +(1 row) + +SELECT enum_range(NULL, 'green'::rainbow); + enum_range +--------------------------- + {red,orange,yellow,green} +(1 row) + +SELECT enum_range('orange'::rainbow, NULL); + enum_range +----------------------------------- + {orange,yellow,green,blue,purple} +(1 row) + +SELECT enum_range(NULL::rainbow, NULL); + enum_range +--------------------------------------- + {red,orange,yellow,green,blue,purple} +(1 row) + +-- +-- User functions, can't test perl/python etc here since may not be compiled. +-- +CREATE FUNCTION echo_me(anyenum) RETURNS text AS $$ +BEGIN +RETURN $1::text || 'omg'; +END +$$ LANGUAGE plpgsql; +SELECT echo_me('red'::rainbow); + echo_me +--------- + redomg +(1 row) + +-- +-- Concrete function should override generic one +-- +CREATE FUNCTION echo_me(rainbow) RETURNS text AS $$ +BEGIN +RETURN $1::text || 'wtf'; +END +$$ LANGUAGE plpgsql; +SELECT echo_me('red'::rainbow); + echo_me +--------- + redwtf +(1 row) + +-- +-- If we drop the original generic one, we don't have to qualify the type +-- anymore, since there's only one match +-- +DROP FUNCTION echo_me(anyenum); +SELECT echo_me('red'); + echo_me +--------- + redwtf +(1 row) + +DROP FUNCTION echo_me(rainbow); +-- +-- RI triggers on enum types +-- +CREATE TABLE enumtest_parent (id rainbow PRIMARY KEY); +CREATE TABLE enumtest_child (parent rainbow REFERENCES enumtest_parent); +INSERT INTO enumtest_parent VALUES ('red'); +INSERT INTO enumtest_child VALUES ('red'); +INSERT INTO enumtest_child VALUES ('blue'); -- fail +ERROR: insert or update on table "enumtest_child" violates foreign key constraint "enumtest_child_parent_fkey" +DETAIL: Key (parent)=(blue) is not present in table "enumtest_parent". +DELETE FROM enumtest_parent; -- fail +ERROR: update or delete on table "enumtest_parent" violates foreign key constraint "enumtest_child_parent_fkey" on table "enumtest_child" +DETAIL: Key (id)=(red) is still referenced from table "enumtest_child". +-- +-- cross-type RI should fail +-- +CREATE TYPE bogus AS ENUM('good', 'bad', 'ugly'); +CREATE TABLE enumtest_bogus_child(parent bogus REFERENCES enumtest_parent); +ERROR: foreign key constraint "enumtest_bogus_child_parent_fkey" cannot be implemented +DETAIL: Key columns "parent" and "id" are of incompatible types: bogus and rainbow. +DROP TYPE bogus; +-- check renaming a value +ALTER TYPE rainbow RENAME VALUE 'red' TO 'crimson'; +SELECT enumlabel, enumsortorder +FROM pg_enum +WHERE enumtypid = 'rainbow'::regtype +ORDER BY 2; + enumlabel | enumsortorder +-----------+--------------- + crimson | 1 + orange | 2 + yellow | 3 + green | 4 + blue | 5 + purple | 6 +(6 rows) + +-- check that renaming a non-existent value fails +ALTER TYPE rainbow RENAME VALUE 'red' TO 'crimson'; +ERROR: "red" is not an existing enum label +-- check that renaming to an existent value fails +ALTER TYPE rainbow RENAME VALUE 'blue' TO 'green'; +ERROR: enum label "green" already exists +-- +-- check transactional behaviour of ALTER TYPE ... ADD VALUE +-- +CREATE TYPE bogus AS ENUM('good'); +-- check that we can add new values to existing enums in a transaction +-- but we can't use them +BEGIN; +ALTER TYPE bogus ADD VALUE 'new'; +SAVEPOINT x; +SELECT 'new'::bogus; -- unsafe +ERROR: unsafe use of new value "new" of enum type bogus +LINE 1: SELECT 'new'::bogus; + ^ +HINT: New enum values must be committed before they can be used. +ROLLBACK TO x; +SELECT enum_first(null::bogus); -- safe + enum_first +------------ + good +(1 row) + +SELECT enum_last(null::bogus); -- unsafe +ERROR: unsafe use of new value "new" of enum type bogus +HINT: New enum values must be committed before they can be used. +ROLLBACK TO x; +SELECT enum_range(null::bogus); -- unsafe +ERROR: unsafe use of new value "new" of enum type bogus +HINT: New enum values must be committed before they can be used. +ROLLBACK TO x; +COMMIT; +SELECT 'new'::bogus; -- now safe + bogus +------- + new +(1 row) + +SELECT enumlabel, enumsortorder +FROM pg_enum +WHERE enumtypid = 'bogus'::regtype +ORDER BY 2; + enumlabel | enumsortorder +-----------+--------------- + good | 1 + new | 2 +(2 rows) + +-- check that we recognize the case where the enum already existed but was +-- modified in the current txn; this should not be considered safe +BEGIN; +ALTER TYPE bogus RENAME TO bogon; +ALTER TYPE bogon ADD VALUE 'bad'; +SELECT 'bad'::bogon; +ERROR: unsafe use of new value "bad" of enum type bogon +LINE 1: SELECT 'bad'::bogon; + ^ +HINT: New enum values must be committed before they can be used. +ROLLBACK; +-- but a renamed value is safe to use later in same transaction +BEGIN; +ALTER TYPE bogus RENAME VALUE 'good' to 'bad'; +SELECT 'bad'::bogus; + bogus +------- + bad +(1 row) + +ROLLBACK; +DROP TYPE bogus; +-- check that values created during CREATE TYPE can be used in any case +BEGIN; +CREATE TYPE bogus AS ENUM('good','bad','ugly'); +ALTER TYPE bogus RENAME TO bogon; +select enum_range(null::bogon); + enum_range +----------------- + {good,bad,ugly} +(1 row) + +ROLLBACK; +-- ideally, we'd allow this usage; but it requires keeping track of whether +-- the enum type was created in the current transaction, which is expensive +BEGIN; +CREATE TYPE bogus AS ENUM('good'); +ALTER TYPE bogus RENAME TO bogon; +ALTER TYPE bogon ADD VALUE 'bad'; +ALTER TYPE bogon ADD VALUE 'ugly'; +select enum_range(null::bogon); -- fails +ERROR: unsafe use of new value "bad" of enum type bogon +HINT: New enum values must be committed before they can be used. +ROLLBACK; +-- +-- Cleanup +-- +DROP TABLE enumtest_child; +DROP TABLE enumtest_parent; +DROP TABLE enumtest; +DROP TYPE rainbow; +-- +-- Verify properly cleaned up +-- +SELECT COUNT(*) FROM pg_type WHERE typname = 'rainbow'; + count +------- + 0 +(1 row) + +SELECT * FROM pg_enum WHERE NOT EXISTS + (SELECT 1 FROM pg_type WHERE pg_type.oid = enumtypid); + oid | enumtypid | enumsortorder | enumlabel +-----+-----------+---------------+----------- +(0 rows) + diff --git a/src/test/regress/expected/equivclass.out b/src/test/regress/expected/equivclass.out new file mode 100644 index 0000000..126f704 --- /dev/null +++ b/src/test/regress/expected/equivclass.out @@ -0,0 +1,453 @@ +-- +-- Tests for the planner's "equivalence class" mechanism +-- +-- One thing that's not tested well during normal querying is the logic +-- for handling "broken" ECs. This is because an EC can only become broken +-- if its underlying btree operator family doesn't include a complete set +-- of cross-type equality operators. There are not (and should not be) +-- any such families built into Postgres; so we have to hack things up +-- to create one. We do this by making two alias types that are really +-- int8 (so we need no new C code) and adding only some operators for them +-- into the standard integer_ops opfamily. +create type int8alias1; +create function int8alias1in(cstring) returns int8alias1 + strict immutable language internal as 'int8in'; +NOTICE: return type int8alias1 is only a shell +create function int8alias1out(int8alias1) returns cstring + strict immutable language internal as 'int8out'; +NOTICE: argument type int8alias1 is only a shell +create type int8alias1 ( + input = int8alias1in, + output = int8alias1out, + like = int8 +); +create type int8alias2; +create function int8alias2in(cstring) returns int8alias2 + strict immutable language internal as 'int8in'; +NOTICE: return type int8alias2 is only a shell +create function int8alias2out(int8alias2) returns cstring + strict immutable language internal as 'int8out'; +NOTICE: argument type int8alias2 is only a shell +create type int8alias2 ( + input = int8alias2in, + output = int8alias2out, + like = int8 +); +create cast (int8 as int8alias1) without function; +create cast (int8 as int8alias2) without function; +create cast (int8alias1 as int8) without function; +create cast (int8alias2 as int8) without function; +create function int8alias1eq(int8alias1, int8alias1) returns bool + strict immutable language internal as 'int8eq'; +create operator = ( + procedure = int8alias1eq, + leftarg = int8alias1, rightarg = int8alias1, + commutator = =, + restrict = eqsel, join = eqjoinsel, + merges +); +alter operator family integer_ops using btree add + operator 3 = (int8alias1, int8alias1); +create function int8alias2eq(int8alias2, int8alias2) returns bool + strict immutable language internal as 'int8eq'; +create operator = ( + procedure = int8alias2eq, + leftarg = int8alias2, rightarg = int8alias2, + commutator = =, + restrict = eqsel, join = eqjoinsel, + merges +); +alter operator family integer_ops using btree add + operator 3 = (int8alias2, int8alias2); +create function int8alias1eq(int8, int8alias1) returns bool + strict immutable language internal as 'int8eq'; +create operator = ( + procedure = int8alias1eq, + leftarg = int8, rightarg = int8alias1, + restrict = eqsel, join = eqjoinsel, + merges +); +alter operator family integer_ops using btree add + operator 3 = (int8, int8alias1); +create function int8alias1eq(int8alias1, int8alias2) returns bool + strict immutable language internal as 'int8eq'; +create operator = ( + procedure = int8alias1eq, + leftarg = int8alias1, rightarg = int8alias2, + restrict = eqsel, join = eqjoinsel, + merges +); +alter operator family integer_ops using btree add + operator 3 = (int8alias1, int8alias2); +create function int8alias1lt(int8alias1, int8alias1) returns bool + strict immutable language internal as 'int8lt'; +create operator < ( + procedure = int8alias1lt, + leftarg = int8alias1, rightarg = int8alias1 +); +alter operator family integer_ops using btree add + operator 1 < (int8alias1, int8alias1); +create function int8alias1cmp(int8, int8alias1) returns int + strict immutable language internal as 'btint8cmp'; +alter operator family integer_ops using btree add + function 1 int8alias1cmp (int8, int8alias1); +create table ec0 (ff int8 primary key, f1 int8, f2 int8); +create table ec1 (ff int8 primary key, f1 int8alias1, f2 int8alias2); +create table ec2 (xf int8 primary key, x1 int8alias1, x2 int8alias2); +-- for the moment we only want to look at nestloop plans +set enable_hashjoin = off; +set enable_mergejoin = off; +-- +-- Note that for cases where there's a missing operator, we don't care so +-- much whether the plan is ideal as that we don't fail or generate an +-- outright incorrect plan. +-- +explain (costs off) + select * from ec0 where ff = f1 and f1 = '42'::int8; + QUERY PLAN +----------------------------------- + Index Scan using ec0_pkey on ec0 + Index Cond: (ff = '42'::bigint) + Filter: (f1 = '42'::bigint) +(3 rows) + +explain (costs off) + select * from ec0 where ff = f1 and f1 = '42'::int8alias1; + QUERY PLAN +--------------------------------------- + Index Scan using ec0_pkey on ec0 + Index Cond: (ff = '42'::int8alias1) + Filter: (f1 = '42'::int8alias1) +(3 rows) + +explain (costs off) + select * from ec1 where ff = f1 and f1 = '42'::int8alias1; + QUERY PLAN +--------------------------------------- + Index Scan using ec1_pkey on ec1 + Index Cond: (ff = '42'::int8alias1) + Filter: (f1 = '42'::int8alias1) +(3 rows) + +explain (costs off) + select * from ec1 where ff = f1 and f1 = '42'::int8alias2; + QUERY PLAN +--------------------------------------------------- + Seq Scan on ec1 + Filter: ((ff = f1) AND (f1 = '42'::int8alias2)) +(2 rows) + +explain (costs off) + select * from ec1, ec2 where ff = x1 and ff = '42'::int8; + QUERY PLAN +------------------------------------------------------------------- + Nested Loop + Join Filter: (ec1.ff = ec2.x1) + -> Index Scan using ec1_pkey on ec1 + Index Cond: ((ff = '42'::bigint) AND (ff = '42'::bigint)) + -> Seq Scan on ec2 +(5 rows) + +explain (costs off) + select * from ec1, ec2 where ff = x1 and ff = '42'::int8alias1; + QUERY PLAN +--------------------------------------------- + Nested Loop + -> Index Scan using ec1_pkey on ec1 + Index Cond: (ff = '42'::int8alias1) + -> Seq Scan on ec2 + Filter: (x1 = '42'::int8alias1) +(5 rows) + +explain (costs off) + select * from ec1, ec2 where ff = x1 and '42'::int8 = x1; + QUERY PLAN +----------------------------------------- + Nested Loop + Join Filter: (ec1.ff = ec2.x1) + -> Index Scan using ec1_pkey on ec1 + Index Cond: (ff = '42'::bigint) + -> Seq Scan on ec2 + Filter: ('42'::bigint = x1) +(6 rows) + +explain (costs off) + select * from ec1, ec2 where ff = x1 and x1 = '42'::int8alias1; + QUERY PLAN +--------------------------------------------- + Nested Loop + -> Index Scan using ec1_pkey on ec1 + Index Cond: (ff = '42'::int8alias1) + -> Seq Scan on ec2 + Filter: (x1 = '42'::int8alias1) +(5 rows) + +explain (costs off) + select * from ec1, ec2 where ff = x1 and x1 = '42'::int8alias2; + QUERY PLAN +----------------------------------------- + Nested Loop + -> Seq Scan on ec2 + Filter: (x1 = '42'::int8alias2) + -> Index Scan using ec1_pkey on ec1 + Index Cond: (ff = ec2.x1) +(5 rows) + +create unique index ec1_expr1 on ec1((ff + 1)); +create unique index ec1_expr2 on ec1((ff + 2 + 1)); +create unique index ec1_expr3 on ec1((ff + 3 + 1)); +create unique index ec1_expr4 on ec1((ff + 4)); +explain (costs off) + select * from ec1, + (select ff + 1 as x from + (select ff + 2 as ff from ec1 + union all + select ff + 3 as ff from ec1) ss0 + union all + select ff + 4 as x from ec1) as ss1 + where ss1.x = ec1.f1 and ec1.ff = 42::int8; + QUERY PLAN +----------------------------------------------------- + Nested Loop + -> Index Scan using ec1_pkey on ec1 + Index Cond: (ff = '42'::bigint) + -> Append + -> Index Scan using ec1_expr2 on ec1 ec1_1 + Index Cond: (((ff + 2) + 1) = ec1.f1) + -> Index Scan using ec1_expr3 on ec1 ec1_2 + Index Cond: (((ff + 3) + 1) = ec1.f1) + -> Index Scan using ec1_expr4 on ec1 ec1_3 + Index Cond: ((ff + 4) = ec1.f1) +(10 rows) + +explain (costs off) + select * from ec1, + (select ff + 1 as x from + (select ff + 2 as ff from ec1 + union all + select ff + 3 as ff from ec1) ss0 + union all + select ff + 4 as x from ec1) as ss1 + where ss1.x = ec1.f1 and ec1.ff = 42::int8 and ec1.ff = ec1.f1; + QUERY PLAN +------------------------------------------------------------------- + Nested Loop + Join Filter: ((((ec1_1.ff + 2) + 1)) = ec1.f1) + -> Index Scan using ec1_pkey on ec1 + Index Cond: ((ff = '42'::bigint) AND (ff = '42'::bigint)) + Filter: (ff = f1) + -> Append + -> Index Scan using ec1_expr2 on ec1 ec1_1 + Index Cond: (((ff + 2) + 1) = '42'::bigint) + -> Index Scan using ec1_expr3 on ec1 ec1_2 + Index Cond: (((ff + 3) + 1) = '42'::bigint) + -> Index Scan using ec1_expr4 on ec1 ec1_3 + Index Cond: ((ff + 4) = '42'::bigint) +(12 rows) + +explain (costs off) + select * from ec1, + (select ff + 1 as x from + (select ff + 2 as ff from ec1 + union all + select ff + 3 as ff from ec1) ss0 + union all + select ff + 4 as x from ec1) as ss1, + (select ff + 1 as x from + (select ff + 2 as ff from ec1 + union all + select ff + 3 as ff from ec1) ss0 + union all + select ff + 4 as x from ec1) as ss2 + where ss1.x = ec1.f1 and ss1.x = ss2.x and ec1.ff = 42::int8; + QUERY PLAN +--------------------------------------------------------------------- + Nested Loop + -> Nested Loop + -> Index Scan using ec1_pkey on ec1 + Index Cond: (ff = '42'::bigint) + -> Append + -> Index Scan using ec1_expr2 on ec1 ec1_1 + Index Cond: (((ff + 2) + 1) = ec1.f1) + -> Index Scan using ec1_expr3 on ec1 ec1_2 + Index Cond: (((ff + 3) + 1) = ec1.f1) + -> Index Scan using ec1_expr4 on ec1 ec1_3 + Index Cond: ((ff + 4) = ec1.f1) + -> Append + -> Index Scan using ec1_expr2 on ec1 ec1_4 + Index Cond: (((ff + 2) + 1) = (((ec1_1.ff + 2) + 1))) + -> Index Scan using ec1_expr3 on ec1 ec1_5 + Index Cond: (((ff + 3) + 1) = (((ec1_1.ff + 2) + 1))) + -> Index Scan using ec1_expr4 on ec1 ec1_6 + Index Cond: ((ff + 4) = (((ec1_1.ff + 2) + 1))) +(18 rows) + +-- let's try that as a mergejoin +set enable_mergejoin = on; +set enable_nestloop = off; +explain (costs off) + select * from ec1, + (select ff + 1 as x from + (select ff + 2 as ff from ec1 + union all + select ff + 3 as ff from ec1) ss0 + union all + select ff + 4 as x from ec1) as ss1, + (select ff + 1 as x from + (select ff + 2 as ff from ec1 + union all + select ff + 3 as ff from ec1) ss0 + union all + select ff + 4 as x from ec1) as ss2 + where ss1.x = ec1.f1 and ss1.x = ss2.x and ec1.ff = 42::int8; + QUERY PLAN +----------------------------------------------------------------- + Merge Join + Merge Cond: ((((ec1_4.ff + 2) + 1)) = (((ec1_1.ff + 2) + 1))) + -> Merge Append + Sort Key: (((ec1_4.ff + 2) + 1)) + -> Index Scan using ec1_expr2 on ec1 ec1_4 + -> Index Scan using ec1_expr3 on ec1 ec1_5 + -> Index Scan using ec1_expr4 on ec1 ec1_6 + -> Materialize + -> Merge Join + Merge Cond: ((((ec1_1.ff + 2) + 1)) = ec1.f1) + -> Merge Append + Sort Key: (((ec1_1.ff + 2) + 1)) + -> Index Scan using ec1_expr2 on ec1 ec1_1 + -> Index Scan using ec1_expr3 on ec1 ec1_2 + -> Index Scan using ec1_expr4 on ec1 ec1_3 + -> Sort + Sort Key: ec1.f1 USING < + -> Index Scan using ec1_pkey on ec1 + Index Cond: (ff = '42'::bigint) +(19 rows) + +-- check partially indexed scan +set enable_nestloop = on; +set enable_mergejoin = off; +drop index ec1_expr3; +explain (costs off) + select * from ec1, + (select ff + 1 as x from + (select ff + 2 as ff from ec1 + union all + select ff + 3 as ff from ec1) ss0 + union all + select ff + 4 as x from ec1) as ss1 + where ss1.x = ec1.f1 and ec1.ff = 42::int8; + QUERY PLAN +----------------------------------------------------- + Nested Loop + -> Index Scan using ec1_pkey on ec1 + Index Cond: (ff = '42'::bigint) + -> Append + -> Index Scan using ec1_expr2 on ec1 ec1_1 + Index Cond: (((ff + 2) + 1) = ec1.f1) + -> Seq Scan on ec1 ec1_2 + Filter: (((ff + 3) + 1) = ec1.f1) + -> Index Scan using ec1_expr4 on ec1 ec1_3 + Index Cond: ((ff + 4) = ec1.f1) +(10 rows) + +-- let's try that as a mergejoin +set enable_mergejoin = on; +set enable_nestloop = off; +explain (costs off) + select * from ec1, + (select ff + 1 as x from + (select ff + 2 as ff from ec1 + union all + select ff + 3 as ff from ec1) ss0 + union all + select ff + 4 as x from ec1) as ss1 + where ss1.x = ec1.f1 and ec1.ff = 42::int8; + QUERY PLAN +----------------------------------------------------- + Merge Join + Merge Cond: ((((ec1_1.ff + 2) + 1)) = ec1.f1) + -> Merge Append + Sort Key: (((ec1_1.ff + 2) + 1)) + -> Index Scan using ec1_expr2 on ec1 ec1_1 + -> Sort + Sort Key: (((ec1_2.ff + 3) + 1)) + -> Seq Scan on ec1 ec1_2 + -> Index Scan using ec1_expr4 on ec1 ec1_3 + -> Sort + Sort Key: ec1.f1 USING < + -> Index Scan using ec1_pkey on ec1 + Index Cond: (ff = '42'::bigint) +(13 rows) + +-- check effects of row-level security +set enable_nestloop = on; +set enable_mergejoin = off; +alter table ec1 enable row level security; +create policy p1 on ec1 using (f1 < '5'::int8alias1); +create user regress_user_ectest; +grant select on ec0 to regress_user_ectest; +grant select on ec1 to regress_user_ectest; +-- without any RLS, we'll treat {a.ff, b.ff, 43} as an EquivalenceClass +explain (costs off) + select * from ec0 a, ec1 b + where a.ff = b.ff and a.ff = 43::bigint::int8alias1; + QUERY PLAN +--------------------------------------------- + Nested Loop + -> Index Scan using ec0_pkey on ec0 a + Index Cond: (ff = '43'::int8alias1) + -> Index Scan using ec1_pkey on ec1 b + Index Cond: (ff = '43'::int8alias1) +(5 rows) + +set session authorization regress_user_ectest; +-- with RLS active, the non-leakproof a.ff = 43 clause is not treated +-- as a suitable source for an EquivalenceClass; currently, this is true +-- even though the RLS clause has nothing to do directly with the EC +explain (costs off) + select * from ec0 a, ec1 b + where a.ff = b.ff and a.ff = 43::bigint::int8alias1; + QUERY PLAN +--------------------------------------------- + Nested Loop + -> Index Scan using ec0_pkey on ec0 a + Index Cond: (ff = '43'::int8alias1) + -> Index Scan using ec1_pkey on ec1 b + Index Cond: (ff = a.ff) + Filter: (f1 < '5'::int8alias1) +(6 rows) + +reset session authorization; +revoke select on ec0 from regress_user_ectest; +revoke select on ec1 from regress_user_ectest; +drop user regress_user_ectest; +-- check that X=X is converted to X IS NOT NULL when appropriate +explain (costs off) + select * from tenk1 where unique1 = unique1 and unique2 = unique2; + QUERY PLAN +------------------------------------------------------------- + Seq Scan on tenk1 + Filter: ((unique1 IS NOT NULL) AND (unique2 IS NOT NULL)) +(2 rows) + +-- this could be converted, but isn't at present +explain (costs off) + select * from tenk1 where unique1 = unique1 or unique2 = unique2; + QUERY PLAN +-------------------------------------------------------- + Seq Scan on tenk1 + Filter: ((unique1 = unique1) OR (unique2 = unique2)) +(2 rows) + +-- check that we recognize equivalence with dummy domains in the way +create temp table undername (f1 name, f2 int); +create temp view overview as + select f1::information_schema.sql_identifier as sqli, f2 from undername; +explain (costs off) -- this should not require a sort + select * from overview where sqli = 'foo' order by sqli; + QUERY PLAN +------------------------------ + Seq Scan on undername + Filter: (f1 = 'foo'::name) +(2 rows) + diff --git a/src/test/regress/expected/errors.out b/src/test/regress/expected/errors.out new file mode 100644 index 0000000..8c52747 --- /dev/null +++ b/src/test/regress/expected/errors.out @@ -0,0 +1,447 @@ +-- +-- ERRORS +-- +-- bad in postquel, but ok in PostgreSQL +select 1; + ?column? +---------- + 1 +(1 row) + +-- +-- UNSUPPORTED STUFF +-- doesn't work +-- notify pg_class +-- +-- +-- SELECT +-- this used to be a syntax error, but now we allow an empty target list +select; +-- +(1 row) + +-- no such relation +select * from nonesuch; +ERROR: relation "nonesuch" does not exist +LINE 1: select * from nonesuch; + ^ +-- bad name in target list +select nonesuch from pg_database; +ERROR: column "nonesuch" does not exist +LINE 1: select nonesuch from pg_database; + ^ +-- empty distinct list isn't OK +select distinct from pg_database; +ERROR: syntax error at or near "from" +LINE 1: select distinct from pg_database; + ^ +-- bad attribute name on lhs of operator +select * from pg_database where nonesuch = pg_database.datname; +ERROR: column "nonesuch" does not exist +LINE 1: select * from pg_database where nonesuch = pg_database.datna... + ^ +-- bad attribute name on rhs of operator +select * from pg_database where pg_database.datname = nonesuch; +ERROR: column "nonesuch" does not exist +LINE 1: ...ect * from pg_database where pg_database.datname = nonesuch; + ^ +-- bad attribute name in select distinct on +select distinct on (foobar) * from pg_database; +ERROR: column "foobar" does not exist +LINE 1: select distinct on (foobar) * from pg_database; + ^ +-- grouping with FOR UPDATE +select null from pg_database group by datname for update; +ERROR: FOR UPDATE is not allowed with GROUP BY clause +select null from pg_database group by grouping sets (()) for update; +ERROR: FOR UPDATE is not allowed with GROUP BY clause +-- +-- DELETE +-- missing relation name (this had better not wildcard!) +delete from; +ERROR: syntax error at or near ";" +LINE 1: delete from; + ^ +-- no such relation +delete from nonesuch; +ERROR: relation "nonesuch" does not exist +LINE 1: delete from nonesuch; + ^ +-- +-- DROP +-- missing relation name (this had better not wildcard!) +drop table; +ERROR: syntax error at or near ";" +LINE 1: drop table; + ^ +-- no such relation +drop table nonesuch; +ERROR: table "nonesuch" does not exist +-- +-- ALTER TABLE +-- relation renaming +-- missing relation name +alter table rename; +ERROR: syntax error at or near ";" +LINE 1: alter table rename; + ^ +-- no such relation +alter table nonesuch rename to newnonesuch; +ERROR: relation "nonesuch" does not exist +-- no such relation +alter table nonesuch rename to stud_emp; +ERROR: relation "nonesuch" does not exist +-- conflict +alter table stud_emp rename to student; +ERROR: relation "student" already exists +-- self-conflict +alter table stud_emp rename to stud_emp; +ERROR: relation "stud_emp" already exists +-- attribute renaming +-- no such relation +alter table nonesuchrel rename column nonesuchatt to newnonesuchatt; +ERROR: relation "nonesuchrel" does not exist +-- no such attribute +alter table emp rename column nonesuchatt to newnonesuchatt; +ERROR: column "nonesuchatt" does not exist +-- conflict +alter table emp rename column salary to manager; +ERROR: column "manager" of relation "stud_emp" already exists +-- conflict +alter table emp rename column salary to ctid; +ERROR: column name "ctid" conflicts with a system column name +-- +-- TRANSACTION STUFF +-- not in a xact +abort; +WARNING: there is no transaction in progress +-- not in a xact +end; +WARNING: there is no transaction in progress +-- +-- CREATE AGGREGATE +-- sfunc/finalfunc type disagreement +create aggregate newavg2 (sfunc = int4pl, + basetype = int4, + stype = int4, + finalfunc = int2um, + initcond = '0'); +ERROR: function int2um(integer) does not exist +-- left out basetype +create aggregate newcnt1 (sfunc = int4inc, + stype = int4, + initcond = '0'); +ERROR: aggregate input type must be specified +-- +-- DROP INDEX +-- missing index name +drop index; +ERROR: syntax error at or near ";" +LINE 1: drop index; + ^ +-- bad index name +drop index 314159; +ERROR: syntax error at or near "314159" +LINE 1: drop index 314159; + ^ +-- no such index +drop index nonesuch; +ERROR: index "nonesuch" does not exist +-- +-- DROP AGGREGATE +-- missing aggregate name +drop aggregate; +ERROR: syntax error at or near ";" +LINE 1: drop aggregate; + ^ +-- missing aggregate type +drop aggregate newcnt1; +ERROR: syntax error at or near ";" +LINE 1: drop aggregate newcnt1; + ^ +-- bad aggregate name +drop aggregate 314159 (int); +ERROR: syntax error at or near "314159" +LINE 1: drop aggregate 314159 (int); + ^ +-- bad aggregate type +drop aggregate newcnt (nonesuch); +ERROR: type "nonesuch" does not exist +-- no such aggregate +drop aggregate nonesuch (int4); +ERROR: aggregate nonesuch(integer) does not exist +-- no such aggregate for type +drop aggregate newcnt (float4); +ERROR: aggregate newcnt(real) does not exist +-- +-- DROP FUNCTION +-- missing function name +drop function (); +ERROR: syntax error at or near "(" +LINE 1: drop function (); + ^ +-- bad function name +drop function 314159(); +ERROR: syntax error at or near "314159" +LINE 1: drop function 314159(); + ^ +-- no such function +drop function nonesuch(); +ERROR: function nonesuch() does not exist +-- +-- DROP TYPE +-- missing type name +drop type; +ERROR: syntax error at or near ";" +LINE 1: drop type; + ^ +-- bad type name +drop type 314159; +ERROR: syntax error at or near "314159" +LINE 1: drop type 314159; + ^ +-- no such type +drop type nonesuch; +ERROR: type "nonesuch" does not exist +-- +-- DROP OPERATOR +-- missing everything +drop operator; +ERROR: syntax error at or near ";" +LINE 1: drop operator; + ^ +-- bad operator name +drop operator equals; +ERROR: syntax error at or near ";" +LINE 1: drop operator equals; + ^ +-- missing type list +drop operator ===; +ERROR: syntax error at or near ";" +LINE 1: drop operator ===; + ^ +-- missing parentheses +drop operator int4, int4; +ERROR: syntax error at or near "," +LINE 1: drop operator int4, int4; + ^ +-- missing operator name +drop operator (int4, int4); +ERROR: syntax error at or near "(" +LINE 1: drop operator (int4, int4); + ^ +-- missing type list contents +drop operator === (); +ERROR: syntax error at or near ")" +LINE 1: drop operator === (); + ^ +-- no such operator +drop operator === (int4); +ERROR: missing argument +LINE 1: drop operator === (int4); + ^ +HINT: Use NONE to denote the missing argument of a unary operator. +-- no such operator by that name +drop operator === (int4, int4); +ERROR: operator does not exist: integer === integer +-- no such type1 +drop operator = (nonesuch); +ERROR: missing argument +LINE 1: drop operator = (nonesuch); + ^ +HINT: Use NONE to denote the missing argument of a unary operator. +-- no such type1 +drop operator = ( , int4); +ERROR: syntax error at or near "," +LINE 1: drop operator = ( , int4); + ^ +-- no such type1 +drop operator = (nonesuch, int4); +ERROR: type "nonesuch" does not exist +-- no such type2 +drop operator = (int4, nonesuch); +ERROR: type "nonesuch" does not exist +-- no such type2 +drop operator = (int4, ); +ERROR: syntax error at or near ")" +LINE 1: drop operator = (int4, ); + ^ +-- +-- DROP RULE +-- missing rule name +drop rule; +ERROR: syntax error at or near ";" +LINE 1: drop rule; + ^ +-- bad rule name +drop rule 314159; +ERROR: syntax error at or near "314159" +LINE 1: drop rule 314159; + ^ +-- no such rule +drop rule nonesuch on noplace; +ERROR: relation "noplace" does not exist +-- these postquel variants are no longer supported +drop tuple rule nonesuch; +ERROR: syntax error at or near "tuple" +LINE 1: drop tuple rule nonesuch; + ^ +drop instance rule nonesuch on noplace; +ERROR: syntax error at or near "instance" +LINE 1: drop instance rule nonesuch on noplace; + ^ +drop rewrite rule nonesuch; +ERROR: syntax error at or near "rewrite" +LINE 1: drop rewrite rule nonesuch; + ^ +-- +-- Check that division-by-zero is properly caught. +-- +select 1/0; +ERROR: division by zero +select 1::int8/0; +ERROR: division by zero +select 1/0::int8; +ERROR: division by zero +select 1::int2/0; +ERROR: division by zero +select 1/0::int2; +ERROR: division by zero +select 1::numeric/0; +ERROR: division by zero +select 1/0::numeric; +ERROR: division by zero +select 1::float8/0; +ERROR: division by zero +select 1/0::float8; +ERROR: division by zero +select 1::float4/0; +ERROR: division by zero +select 1/0::float4; +ERROR: division by zero +-- +-- Test psql's reporting of syntax error location +-- +xxx; +ERROR: syntax error at or near "xxx" +LINE 1: xxx; + ^ +CREATE foo; +ERROR: syntax error at or near "foo" +LINE 1: CREATE foo; + ^ +CREATE TABLE ; +ERROR: syntax error at or near ";" +LINE 1: CREATE TABLE ; + ^ +CREATE TABLE +\g +ERROR: syntax error at end of input +LINE 1: CREATE TABLE + ^ +INSERT INTO foo VALUES(123) foo; +ERROR: syntax error at or near "foo" +LINE 1: INSERT INTO foo VALUES(123) foo; + ^ +INSERT INTO 123 +VALUES(123); +ERROR: syntax error at or near "123" +LINE 1: INSERT INTO 123 + ^ +INSERT INTO foo +VALUES(123) 123 +; +ERROR: syntax error at or near "123" +LINE 2: VALUES(123) 123 + ^ +-- with a tab +CREATE TABLE foo + (id INT4 UNIQUE NOT NULL, id2 TEXT NOT NULL PRIMARY KEY, + id3 INTEGER NOT NUL, + id4 INT4 UNIQUE NOT NULL, id5 TEXT UNIQUE NOT NULL); +ERROR: syntax error at or near "NUL" +LINE 3: id3 INTEGER NOT NUL, + ^ +-- long line to be truncated on the left +CREATE TABLE foo(id INT4 UNIQUE NOT NULL, id2 TEXT NOT NULL PRIMARY KEY, id3 INTEGER NOT NUL, +id4 INT4 UNIQUE NOT NULL, id5 TEXT UNIQUE NOT NULL); +ERROR: syntax error at or near "NUL" +LINE 1: ...OT NULL, id2 TEXT NOT NULL PRIMARY KEY, id3 INTEGER NOT NUL, + ^ +-- long line to be truncated on the right +CREATE TABLE foo( +id3 INTEGER NOT NUL, id4 INT4 UNIQUE NOT NULL, id5 TEXT UNIQUE NOT NULL, id INT4 UNIQUE NOT NULL, id2 TEXT NOT NULL PRIMARY KEY); +ERROR: syntax error at or near "NUL" +LINE 2: id3 INTEGER NOT NUL, id4 INT4 UNIQUE NOT NULL, id5 TEXT UNIQ... + ^ +-- long line to be truncated both ways +CREATE TABLE foo(id INT4 UNIQUE NOT NULL, id2 TEXT NOT NULL PRIMARY KEY, id3 INTEGER NOT NUL, id4 INT4 UNIQUE NOT NULL, id5 TEXT UNIQUE NOT NULL); +ERROR: syntax error at or near "NUL" +LINE 1: ...L, id2 TEXT NOT NULL PRIMARY KEY, id3 INTEGER NOT NUL, id4 I... + ^ +-- long line to be truncated on the left, many lines +CREATE +TEMPORARY +TABLE +foo(id INT4 UNIQUE NOT NULL, id2 TEXT NOT NULL PRIMARY KEY, id3 INTEGER NOT NUL, +id4 INT4 +UNIQUE +NOT +NULL, +id5 TEXT +UNIQUE +NOT +NULL) +; +ERROR: syntax error at or near "NUL" +LINE 4: ...OT NULL, id2 TEXT NOT NULL PRIMARY KEY, id3 INTEGER NOT NUL, + ^ +-- long line to be truncated on the right, many lines +CREATE +TEMPORARY +TABLE +foo( +id3 INTEGER NOT NUL, id4 INT4 UNIQUE NOT NULL, id5 TEXT UNIQUE NOT NULL, id INT4 UNIQUE NOT NULL, id2 TEXT NOT NULL PRIMARY KEY) +; +ERROR: syntax error at or near "NUL" +LINE 5: id3 INTEGER NOT NUL, id4 INT4 UNIQUE NOT NULL, id5 TEXT UNIQ... + ^ +-- long line to be truncated both ways, many lines +CREATE +TEMPORARY +TABLE +foo +(id +INT4 +UNIQUE NOT NULL, idx INT4 UNIQUE NOT NULL, idy INT4 UNIQUE NOT NULL, id2 TEXT NOT NULL PRIMARY KEY, id3 INTEGER NOT NUL, id4 INT4 UNIQUE NOT NULL, id5 TEXT UNIQUE NOT NULL, +idz INT4 UNIQUE NOT NULL, +idv INT4 UNIQUE NOT NULL); +ERROR: syntax error at or near "NUL" +LINE 7: ...L, id2 TEXT NOT NULL PRIMARY KEY, id3 INTEGER NOT NUL, id4 I... + ^ +-- more than 10 lines... +CREATE +TEMPORARY +TABLE +foo +(id +INT4 +UNIQUE +NOT +NULL +, +idm +INT4 +UNIQUE +NOT +NULL, +idx INT4 UNIQUE NOT NULL, idy INT4 UNIQUE NOT NULL, id2 TEXT NOT NULL PRIMARY KEY, id3 INTEGER NOT NUL, id4 INT4 UNIQUE NOT NULL, id5 TEXT UNIQUE NOT NULL, +idz INT4 UNIQUE NOT NULL, +idv +INT4 +UNIQUE +NOT +NULL); +ERROR: syntax error at or near "NUL" +LINE 16: ...L, id2 TEXT NOT NULL PRIMARY KEY, id3 INTEGER NOT NUL, id4 I... + ^ diff --git a/src/test/regress/expected/event_trigger.out b/src/test/regress/expected/event_trigger.out new file mode 100644 index 0000000..5a10958 --- /dev/null +++ b/src/test/regress/expected/event_trigger.out @@ -0,0 +1,616 @@ +-- should fail, return type mismatch +create event trigger regress_event_trigger + on ddl_command_start + execute procedure pg_backend_pid(); +ERROR: function pg_backend_pid must return type event_trigger +-- OK +create function test_event_trigger() returns event_trigger as $$ +BEGIN + RAISE NOTICE 'test_event_trigger: % %', tg_event, tg_tag; +END +$$ language plpgsql; +-- should fail, can't call it as a plain function +SELECT test_event_trigger(); +ERROR: trigger functions can only be called as triggers +CONTEXT: compilation of PL/pgSQL function "test_event_trigger" near line 1 +-- should fail, event triggers cannot have declared arguments +create function test_event_trigger_arg(name text) +returns event_trigger as $$ BEGIN RETURN 1; END $$ language plpgsql; +ERROR: event trigger functions cannot have declared arguments +CONTEXT: compilation of PL/pgSQL function "test_event_trigger_arg" near line 1 +-- should fail, SQL functions cannot be event triggers +create function test_event_trigger_sql() returns event_trigger as $$ +SELECT 1 $$ language sql; +ERROR: SQL functions cannot return type event_trigger +-- should fail, no elephant_bootstrap entry point +create event trigger regress_event_trigger on elephant_bootstrap + execute procedure test_event_trigger(); +ERROR: unrecognized event name "elephant_bootstrap" +-- OK +create event trigger regress_event_trigger on ddl_command_start + execute procedure test_event_trigger(); +-- OK +create event trigger regress_event_trigger_end on ddl_command_end + execute function test_event_trigger(); +-- should fail, food is not a valid filter variable +create event trigger regress_event_trigger2 on ddl_command_start + when food in ('sandwich') + execute procedure test_event_trigger(); +ERROR: unrecognized filter variable "food" +-- should fail, sandwich is not a valid command tag +create event trigger regress_event_trigger2 on ddl_command_start + when tag in ('sandwich') + execute procedure test_event_trigger(); +ERROR: filter value "sandwich" not recognized for filter variable "tag" +-- should fail, create skunkcabbage is not a valid command tag +create event trigger regress_event_trigger2 on ddl_command_start + when tag in ('create table', 'create skunkcabbage') + execute procedure test_event_trigger(); +ERROR: filter value "create skunkcabbage" not recognized for filter variable "tag" +-- should fail, can't have event triggers on event triggers +create event trigger regress_event_trigger2 on ddl_command_start + when tag in ('DROP EVENT TRIGGER') + execute procedure test_event_trigger(); +ERROR: event triggers are not supported for DROP EVENT TRIGGER +-- should fail, can't have event triggers on global objects +create event trigger regress_event_trigger2 on ddl_command_start + when tag in ('CREATE ROLE') + execute procedure test_event_trigger(); +ERROR: event triggers are not supported for CREATE ROLE +-- should fail, can't have event triggers on global objects +create event trigger regress_event_trigger2 on ddl_command_start + when tag in ('CREATE DATABASE') + execute procedure test_event_trigger(); +ERROR: event triggers are not supported for CREATE DATABASE +-- should fail, can't have event triggers on global objects +create event trigger regress_event_trigger2 on ddl_command_start + when tag in ('CREATE TABLESPACE') + execute procedure test_event_trigger(); +ERROR: event triggers are not supported for CREATE TABLESPACE +-- should fail, can't have same filter variable twice +create event trigger regress_event_trigger2 on ddl_command_start + when tag in ('create table') and tag in ('CREATE FUNCTION') + execute procedure test_event_trigger(); +ERROR: filter variable "tag" specified more than once +-- should fail, can't have arguments +create event trigger regress_event_trigger2 on ddl_command_start + execute procedure test_event_trigger('argument not allowed'); +ERROR: syntax error at or near "'argument not allowed'" +LINE 2: execute procedure test_event_trigger('argument not allowe... + ^ +-- OK +create event trigger regress_event_trigger2 on ddl_command_start + when tag in ('create table', 'CREATE FUNCTION') + execute procedure test_event_trigger(); +-- OK +comment on event trigger regress_event_trigger is 'test comment'; +-- drop as non-superuser should fail +create role regress_evt_user; +set role regress_evt_user; +create event trigger regress_event_trigger_noperms on ddl_command_start + execute procedure test_event_trigger(); +ERROR: permission denied to create event trigger "regress_event_trigger_noperms" +HINT: Must be superuser to create an event trigger. +reset role; +-- test enabling and disabling +alter event trigger regress_event_trigger disable; +-- fires _trigger2 and _trigger_end should fire, but not _trigger +create table event_trigger_fire1 (a int); +NOTICE: test_event_trigger: ddl_command_start CREATE TABLE +NOTICE: test_event_trigger: ddl_command_end CREATE TABLE +alter event trigger regress_event_trigger enable; +set session_replication_role = replica; +-- fires nothing +create table event_trigger_fire2 (a int); +alter event trigger regress_event_trigger enable replica; +-- fires only _trigger +create table event_trigger_fire3 (a int); +NOTICE: test_event_trigger: ddl_command_start CREATE TABLE +alter event trigger regress_event_trigger enable always; +-- fires only _trigger +create table event_trigger_fire4 (a int); +NOTICE: test_event_trigger: ddl_command_start CREATE TABLE +reset session_replication_role; +-- fires all three +create table event_trigger_fire5 (a int); +NOTICE: test_event_trigger: ddl_command_start CREATE TABLE +NOTICE: test_event_trigger: ddl_command_start CREATE TABLE +NOTICE: test_event_trigger: ddl_command_end CREATE TABLE +-- non-top-level command +create function f1() returns int +language plpgsql +as $$ +begin + create table event_trigger_fire6 (a int); + return 0; +end $$; +NOTICE: test_event_trigger: ddl_command_start CREATE FUNCTION +NOTICE: test_event_trigger: ddl_command_start CREATE FUNCTION +NOTICE: test_event_trigger: ddl_command_end CREATE FUNCTION +select f1(); +NOTICE: test_event_trigger: ddl_command_start CREATE TABLE +NOTICE: test_event_trigger: ddl_command_start CREATE TABLE +NOTICE: test_event_trigger: ddl_command_end CREATE TABLE + f1 +---- + 0 +(1 row) + +-- non-top-level command +create procedure p1() +language plpgsql +as $$ +begin + create table event_trigger_fire7 (a int); +end $$; +NOTICE: test_event_trigger: ddl_command_start CREATE PROCEDURE +NOTICE: test_event_trigger: ddl_command_end CREATE PROCEDURE +call p1(); +NOTICE: test_event_trigger: ddl_command_start CREATE TABLE +NOTICE: test_event_trigger: ddl_command_start CREATE TABLE +NOTICE: test_event_trigger: ddl_command_end CREATE TABLE +-- clean up +alter event trigger regress_event_trigger disable; +drop table event_trigger_fire2, event_trigger_fire3, event_trigger_fire4, event_trigger_fire5, event_trigger_fire6, event_trigger_fire7; +NOTICE: test_event_trigger: ddl_command_end DROP TABLE +drop routine f1(), p1(); +NOTICE: test_event_trigger: ddl_command_end DROP ROUTINE +-- regress_event_trigger_end should fire on these commands +grant all on table event_trigger_fire1 to public; +NOTICE: test_event_trigger: ddl_command_end GRANT +comment on table event_trigger_fire1 is 'here is a comment'; +NOTICE: test_event_trigger: ddl_command_end COMMENT +revoke all on table event_trigger_fire1 from public; +NOTICE: test_event_trigger: ddl_command_end REVOKE +drop table event_trigger_fire1; +NOTICE: test_event_trigger: ddl_command_end DROP TABLE +create foreign data wrapper useless; +NOTICE: test_event_trigger: ddl_command_end CREATE FOREIGN DATA WRAPPER +create server useless_server foreign data wrapper useless; +NOTICE: test_event_trigger: ddl_command_end CREATE SERVER +create user mapping for regress_evt_user server useless_server; +NOTICE: test_event_trigger: ddl_command_end CREATE USER MAPPING +alter default privileges for role regress_evt_user + revoke delete on tables from regress_evt_user; +NOTICE: test_event_trigger: ddl_command_end ALTER DEFAULT PRIVILEGES +-- alter owner to non-superuser should fail +alter event trigger regress_event_trigger owner to regress_evt_user; +ERROR: permission denied to change owner of event trigger "regress_event_trigger" +HINT: The owner of an event trigger must be a superuser. +-- alter owner to superuser should work +alter role regress_evt_user superuser; +alter event trigger regress_event_trigger owner to regress_evt_user; +-- should fail, name collision +alter event trigger regress_event_trigger rename to regress_event_trigger2; +ERROR: event trigger "regress_event_trigger2" already exists +-- OK +alter event trigger regress_event_trigger rename to regress_event_trigger3; +-- should fail, doesn't exist any more +drop event trigger regress_event_trigger; +ERROR: event trigger "regress_event_trigger" does not exist +-- should fail, regress_evt_user owns some objects +drop role regress_evt_user; +ERROR: role "regress_evt_user" cannot be dropped because some objects depend on it +DETAIL: owner of event trigger regress_event_trigger3 +owner of user mapping for regress_evt_user on server useless_server +owner of default privileges on new relations belonging to role regress_evt_user +-- cleanup before next test +-- these are all OK; the second one should emit a NOTICE +drop event trigger if exists regress_event_trigger2; +drop event trigger if exists regress_event_trigger2; +NOTICE: event trigger "regress_event_trigger2" does not exist, skipping +drop event trigger regress_event_trigger3; +drop event trigger regress_event_trigger_end; +-- test support for dropped objects +CREATE SCHEMA schema_one authorization regress_evt_user; +CREATE SCHEMA schema_two authorization regress_evt_user; +CREATE SCHEMA audit_tbls authorization regress_evt_user; +CREATE TEMP TABLE a_temp_tbl (); +SET SESSION AUTHORIZATION regress_evt_user; +CREATE TABLE schema_one.table_one(a int); +CREATE TABLE schema_one."table two"(a int); +CREATE TABLE schema_one.table_three(a int); +CREATE TABLE audit_tbls.schema_one_table_two(the_value text); +CREATE TABLE schema_two.table_two(a int); +CREATE TABLE schema_two.table_three(a int, b text); +CREATE TABLE audit_tbls.schema_two_table_three(the_value text); +CREATE OR REPLACE FUNCTION schema_two.add(int, int) RETURNS int LANGUAGE plpgsql + CALLED ON NULL INPUT + AS $$ BEGIN RETURN coalesce($1,0) + coalesce($2,0); END; $$; +CREATE AGGREGATE schema_two.newton + (BASETYPE = int, SFUNC = schema_two.add, STYPE = int); +RESET SESSION AUTHORIZATION; +CREATE TABLE undroppable_objs ( + object_type text, + object_identity text +); +INSERT INTO undroppable_objs VALUES +('table', 'schema_one.table_three'), +('table', 'audit_tbls.schema_two_table_three'); +CREATE TABLE dropped_objects ( + type text, + schema text, + object text +); +-- This tests errors raised within event triggers; the one in audit_tbls +-- uses 2nd-level recursive invocation via test_evtrig_dropped_objects(). +CREATE OR REPLACE FUNCTION undroppable() RETURNS event_trigger +LANGUAGE plpgsql AS $$ +DECLARE + obj record; +BEGIN + PERFORM 1 FROM pg_tables WHERE tablename = 'undroppable_objs'; + IF NOT FOUND THEN + RAISE NOTICE 'table undroppable_objs not found, skipping'; + RETURN; + END IF; + FOR obj IN + SELECT * FROM pg_event_trigger_dropped_objects() JOIN + undroppable_objs USING (object_type, object_identity) + LOOP + RAISE EXCEPTION 'object % of type % cannot be dropped', + obj.object_identity, obj.object_type; + END LOOP; +END; +$$; +CREATE EVENT TRIGGER undroppable ON sql_drop + EXECUTE PROCEDURE undroppable(); +CREATE OR REPLACE FUNCTION test_evtrig_dropped_objects() RETURNS event_trigger +LANGUAGE plpgsql AS $$ +DECLARE + obj record; +BEGIN + FOR obj IN SELECT * FROM pg_event_trigger_dropped_objects() + LOOP + IF obj.object_type = 'table' THEN + EXECUTE format('DROP TABLE IF EXISTS audit_tbls.%I', + format('%s_%s', obj.schema_name, obj.object_name)); + END IF; + + INSERT INTO dropped_objects + (type, schema, object) VALUES + (obj.object_type, obj.schema_name, obj.object_identity); + END LOOP; +END +$$; +CREATE EVENT TRIGGER regress_event_trigger_drop_objects ON sql_drop + WHEN TAG IN ('drop table', 'drop function', 'drop view', + 'drop owned', 'drop schema', 'alter table') + EXECUTE PROCEDURE test_evtrig_dropped_objects(); +ALTER TABLE schema_one.table_one DROP COLUMN a; +DROP SCHEMA schema_one, schema_two CASCADE; +NOTICE: drop cascades to 7 other objects +DETAIL: drop cascades to table schema_two.table_two +drop cascades to table schema_two.table_three +drop cascades to function schema_two.add(integer,integer) +drop cascades to function schema_two.newton(integer) +drop cascades to table schema_one.table_one +drop cascades to table schema_one."table two" +drop cascades to table schema_one.table_three +NOTICE: table "schema_two_table_two" does not exist, skipping +NOTICE: table "audit_tbls_schema_two_table_three" does not exist, skipping +ERROR: object audit_tbls.schema_two_table_three of type table cannot be dropped +CONTEXT: PL/pgSQL function undroppable() line 14 at RAISE +SQL statement "DROP TABLE IF EXISTS audit_tbls.schema_two_table_three" +PL/pgSQL function test_evtrig_dropped_objects() line 8 at EXECUTE +DELETE FROM undroppable_objs WHERE object_identity = 'audit_tbls.schema_two_table_three'; +DROP SCHEMA schema_one, schema_two CASCADE; +NOTICE: drop cascades to 7 other objects +DETAIL: drop cascades to table schema_two.table_two +drop cascades to table schema_two.table_three +drop cascades to function schema_two.add(integer,integer) +drop cascades to function schema_two.newton(integer) +drop cascades to table schema_one.table_one +drop cascades to table schema_one."table two" +drop cascades to table schema_one.table_three +NOTICE: table "schema_two_table_two" does not exist, skipping +NOTICE: table "audit_tbls_schema_two_table_three" does not exist, skipping +NOTICE: table "schema_one_table_one" does not exist, skipping +NOTICE: table "schema_one_table two" does not exist, skipping +NOTICE: table "schema_one_table_three" does not exist, skipping +ERROR: object schema_one.table_three of type table cannot be dropped +CONTEXT: PL/pgSQL function undroppable() line 14 at RAISE +DELETE FROM undroppable_objs WHERE object_identity = 'schema_one.table_three'; +DROP SCHEMA schema_one, schema_two CASCADE; +NOTICE: drop cascades to 7 other objects +DETAIL: drop cascades to table schema_two.table_two +drop cascades to table schema_two.table_three +drop cascades to function schema_two.add(integer,integer) +drop cascades to function schema_two.newton(integer) +drop cascades to table schema_one.table_one +drop cascades to table schema_one."table two" +drop cascades to table schema_one.table_three +NOTICE: table "schema_two_table_two" does not exist, skipping +NOTICE: table "audit_tbls_schema_two_table_three" does not exist, skipping +NOTICE: table "schema_one_table_one" does not exist, skipping +NOTICE: table "schema_one_table two" does not exist, skipping +NOTICE: table "schema_one_table_three" does not exist, skipping +SELECT * FROM dropped_objects WHERE schema IS NULL OR schema <> 'pg_toast'; + type | schema | object +--------------+------------+------------------------------------- + table column | schema_one | schema_one.table_one.a + schema | | schema_two + table | schema_two | schema_two.table_two + type | schema_two | schema_two.table_two + type | schema_two | schema_two.table_two[] + table | audit_tbls | audit_tbls.schema_two_table_three + type | audit_tbls | audit_tbls.schema_two_table_three + type | audit_tbls | audit_tbls.schema_two_table_three[] + table | schema_two | schema_two.table_three + type | schema_two | schema_two.table_three + type | schema_two | schema_two.table_three[] + function | schema_two | schema_two.add(integer,integer) + aggregate | schema_two | schema_two.newton(integer) + schema | | schema_one + table | schema_one | schema_one.table_one + type | schema_one | schema_one.table_one + type | schema_one | schema_one.table_one[] + table | schema_one | schema_one."table two" + type | schema_one | schema_one."table two" + type | schema_one | schema_one."table two"[] + table | schema_one | schema_one.table_three + type | schema_one | schema_one.table_three + type | schema_one | schema_one.table_three[] +(23 rows) + +DROP OWNED BY regress_evt_user; +NOTICE: schema "audit_tbls" does not exist, skipping +SELECT * FROM dropped_objects WHERE type = 'schema'; + type | schema | object +--------+--------+------------ + schema | | schema_two + schema | | schema_one + schema | | audit_tbls +(3 rows) + +DROP ROLE regress_evt_user; +DROP EVENT TRIGGER regress_event_trigger_drop_objects; +DROP EVENT TRIGGER undroppable; +-- Event triggers on relations. +CREATE OR REPLACE FUNCTION event_trigger_report_dropped() + RETURNS event_trigger + LANGUAGE plpgsql +AS $$ +DECLARE r record; +BEGIN + FOR r IN SELECT * from pg_event_trigger_dropped_objects() + LOOP + IF NOT r.normal AND NOT r.original THEN + CONTINUE; + END IF; + RAISE NOTICE 'NORMAL: orig=% normal=% istemp=% type=% identity=% name=% args=%', + r.original, r.normal, r.is_temporary, r.object_type, + r.object_identity, r.address_names, r.address_args; + END LOOP; +END; $$; +CREATE EVENT TRIGGER regress_event_trigger_report_dropped ON sql_drop + EXECUTE PROCEDURE event_trigger_report_dropped(); +CREATE OR REPLACE FUNCTION event_trigger_report_end() + RETURNS event_trigger + LANGUAGE plpgsql +AS $$ +DECLARE r RECORD; +BEGIN + FOR r IN SELECT * FROM pg_event_trigger_ddl_commands() + LOOP + RAISE NOTICE 'END: command_tag=% type=% identity=%', + r.command_tag, r.object_type, r.object_identity; + END LOOP; +END; $$; +CREATE EVENT TRIGGER regress_event_trigger_report_end ON ddl_command_end + EXECUTE PROCEDURE event_trigger_report_end(); +CREATE SCHEMA evttrig + CREATE TABLE one (col_a SERIAL PRIMARY KEY, col_b text DEFAULT 'forty two', col_c SERIAL) + CREATE INDEX one_idx ON one (col_b) + CREATE TABLE two (col_c INTEGER CHECK (col_c > 0) REFERENCES one DEFAULT 42) + CREATE TABLE id (col_d int NOT NULL GENERATED ALWAYS AS IDENTITY); +NOTICE: END: command_tag=CREATE SCHEMA type=schema identity=evttrig +NOTICE: END: command_tag=CREATE SEQUENCE type=sequence identity=evttrig.one_col_a_seq +NOTICE: END: command_tag=CREATE SEQUENCE type=sequence identity=evttrig.one_col_c_seq +NOTICE: END: command_tag=CREATE TABLE type=table identity=evttrig.one +NOTICE: END: command_tag=CREATE INDEX type=index identity=evttrig.one_pkey +NOTICE: END: command_tag=ALTER SEQUENCE type=sequence identity=evttrig.one_col_a_seq +NOTICE: END: command_tag=ALTER SEQUENCE type=sequence identity=evttrig.one_col_c_seq +NOTICE: END: command_tag=CREATE TABLE type=table identity=evttrig.two +NOTICE: END: command_tag=ALTER TABLE type=table identity=evttrig.two +NOTICE: END: command_tag=CREATE SEQUENCE type=sequence identity=evttrig.id_col_d_seq +NOTICE: END: command_tag=CREATE TABLE type=table identity=evttrig.id +NOTICE: END: command_tag=ALTER SEQUENCE type=sequence identity=evttrig.id_col_d_seq +NOTICE: END: command_tag=CREATE INDEX type=index identity=evttrig.one_idx +-- Partitioned tables with a partitioned index +CREATE TABLE evttrig.parted ( + id int PRIMARY KEY) + PARTITION BY RANGE (id); +NOTICE: END: command_tag=CREATE TABLE type=table identity=evttrig.parted +NOTICE: END: command_tag=CREATE INDEX type=index identity=evttrig.parted_pkey +CREATE TABLE evttrig.part_1_10 PARTITION OF evttrig.parted (id) + FOR VALUES FROM (1) TO (10); +NOTICE: END: command_tag=CREATE TABLE type=table identity=evttrig.part_1_10 +CREATE TABLE evttrig.part_10_20 PARTITION OF evttrig.parted (id) + FOR VALUES FROM (10) TO (20) PARTITION BY RANGE (id); +NOTICE: END: command_tag=CREATE TABLE type=table identity=evttrig.part_10_20 +CREATE TABLE evttrig.part_10_15 PARTITION OF evttrig.part_10_20 (id) + FOR VALUES FROM (10) TO (15); +NOTICE: END: command_tag=CREATE TABLE type=table identity=evttrig.part_10_15 +CREATE TABLE evttrig.part_15_20 PARTITION OF evttrig.part_10_20 (id) + FOR VALUES FROM (15) TO (20); +NOTICE: END: command_tag=CREATE TABLE type=table identity=evttrig.part_15_20 +ALTER TABLE evttrig.two DROP COLUMN col_c; +NOTICE: NORMAL: orig=t normal=f istemp=f type=table column identity=evttrig.two.col_c name={evttrig,two,col_c} args={} +NOTICE: NORMAL: orig=f normal=t istemp=f type=table constraint identity=two_col_c_check on evttrig.two name={evttrig,two,two_col_c_check} args={} +NOTICE: END: command_tag=ALTER TABLE type=table identity=evttrig.two +ALTER TABLE evttrig.one ALTER COLUMN col_b DROP DEFAULT; +NOTICE: NORMAL: orig=t normal=f istemp=f type=default value identity=for evttrig.one.col_b name={evttrig,one,col_b} args={} +NOTICE: END: command_tag=ALTER TABLE type=table identity=evttrig.one +ALTER TABLE evttrig.one DROP CONSTRAINT one_pkey; +NOTICE: NORMAL: orig=t normal=f istemp=f type=table constraint identity=one_pkey on evttrig.one name={evttrig,one,one_pkey} args={} +NOTICE: END: command_tag=ALTER TABLE type=table identity=evttrig.one +ALTER TABLE evttrig.one DROP COLUMN col_c; +NOTICE: NORMAL: orig=t normal=f istemp=f type=table column identity=evttrig.one.col_c name={evttrig,one,col_c} args={} +NOTICE: NORMAL: orig=f normal=t istemp=f type=default value identity=for evttrig.one.col_c name={evttrig,one,col_c} args={} +NOTICE: END: command_tag=ALTER TABLE type=table identity=evttrig.one +ALTER TABLE evttrig.id ALTER COLUMN col_d SET DATA TYPE bigint; +NOTICE: END: command_tag=ALTER SEQUENCE type=sequence identity=evttrig.id_col_d_seq +NOTICE: END: command_tag=ALTER TABLE type=table identity=evttrig.id +ALTER TABLE evttrig.id ALTER COLUMN col_d DROP IDENTITY, + ALTER COLUMN col_d SET DATA TYPE int; +NOTICE: END: command_tag=ALTER TABLE type=table identity=evttrig.id +DROP INDEX evttrig.one_idx; +NOTICE: NORMAL: orig=t normal=f istemp=f type=index identity=evttrig.one_idx name={evttrig,one_idx} args={} +DROP SCHEMA evttrig CASCADE; +NOTICE: drop cascades to 4 other objects +DETAIL: drop cascades to table evttrig.one +drop cascades to table evttrig.two +drop cascades to table evttrig.id +drop cascades to table evttrig.parted +NOTICE: NORMAL: orig=t normal=f istemp=f type=schema identity=evttrig name={evttrig} args={} +NOTICE: NORMAL: orig=f normal=t istemp=f type=table identity=evttrig.one name={evttrig,one} args={} +NOTICE: NORMAL: orig=f normal=t istemp=f type=sequence identity=evttrig.one_col_a_seq name={evttrig,one_col_a_seq} args={} +NOTICE: NORMAL: orig=f normal=t istemp=f type=default value identity=for evttrig.one.col_a name={evttrig,one,col_a} args={} +NOTICE: NORMAL: orig=f normal=t istemp=f type=table identity=evttrig.two name={evttrig,two} args={} +NOTICE: NORMAL: orig=f normal=t istemp=f type=table identity=evttrig.id name={evttrig,id} args={} +NOTICE: NORMAL: orig=f normal=t istemp=f type=table identity=evttrig.parted name={evttrig,parted} args={} +NOTICE: NORMAL: orig=f normal=t istemp=f type=table identity=evttrig.part_1_10 name={evttrig,part_1_10} args={} +NOTICE: NORMAL: orig=f normal=t istemp=f type=table identity=evttrig.part_10_20 name={evttrig,part_10_20} args={} +NOTICE: NORMAL: orig=f normal=t istemp=f type=table identity=evttrig.part_10_15 name={evttrig,part_10_15} args={} +NOTICE: NORMAL: orig=f normal=t istemp=f type=table identity=evttrig.part_15_20 name={evttrig,part_15_20} args={} +DROP TABLE a_temp_tbl; +NOTICE: NORMAL: orig=t normal=f istemp=t type=table identity=pg_temp.a_temp_tbl name={pg_temp,a_temp_tbl} args={} +-- CREATE OPERATOR CLASS without FAMILY clause should report +-- both CREATE OPERATOR FAMILY and CREATE OPERATOR CLASS +CREATE OPERATOR CLASS evttrigopclass FOR TYPE int USING btree AS STORAGE int; +NOTICE: END: command_tag=CREATE OPERATOR FAMILY type=operator family identity=public.evttrigopclass USING btree +NOTICE: END: command_tag=CREATE OPERATOR CLASS type=operator class identity=public.evttrigopclass USING btree +DROP EVENT TRIGGER regress_event_trigger_report_dropped; +DROP EVENT TRIGGER regress_event_trigger_report_end; +-- only allowed from within an event trigger function, should fail +select pg_event_trigger_table_rewrite_oid(); +ERROR: pg_event_trigger_table_rewrite_oid() can only be called in a table_rewrite event trigger function +-- test Table Rewrite Event Trigger +CREATE OR REPLACE FUNCTION test_evtrig_no_rewrite() RETURNS event_trigger +LANGUAGE plpgsql AS $$ +BEGIN + RAISE EXCEPTION 'rewrites not allowed'; +END; +$$; +create event trigger no_rewrite_allowed on table_rewrite + execute procedure test_evtrig_no_rewrite(); +create table rewriteme (id serial primary key, foo float, bar timestamptz); +insert into rewriteme + select x * 1.001 from generate_series(1, 500) as t(x); +alter table rewriteme alter column foo type numeric; +ERROR: rewrites not allowed +CONTEXT: PL/pgSQL function test_evtrig_no_rewrite() line 3 at RAISE +alter table rewriteme add column baz int default 0; +-- test with more than one reason to rewrite a single table +CREATE OR REPLACE FUNCTION test_evtrig_no_rewrite() RETURNS event_trigger +LANGUAGE plpgsql AS $$ +BEGIN + RAISE NOTICE 'Table ''%'' is being rewritten (reason = %)', + pg_event_trigger_table_rewrite_oid()::regclass, + pg_event_trigger_table_rewrite_reason(); +END; +$$; +alter table rewriteme + add column onemore int default 0, + add column another int default -1, + alter column foo type numeric(10,4); +NOTICE: Table 'rewriteme' is being rewritten (reason = 4) +-- matview rewrite when changing access method +CREATE MATERIALIZED VIEW heapmv USING heap AS SELECT 1 AS a; +ALTER MATERIALIZED VIEW heapmv SET ACCESS METHOD heap2; +NOTICE: Table 'heapmv' is being rewritten (reason = 8) +DROP MATERIALIZED VIEW heapmv; +-- shouldn't trigger a table_rewrite event +alter table rewriteme alter column foo type numeric(12,4); +begin; +set timezone to 'UTC'; +alter table rewriteme alter column bar type timestamp; +set timezone to '0'; +alter table rewriteme alter column bar type timestamptz; +set timezone to 'Europe/London'; +alter table rewriteme alter column bar type timestamp; -- does rewrite +NOTICE: Table 'rewriteme' is being rewritten (reason = 4) +rollback; +-- typed tables are rewritten when their type changes. Don't emit table +-- name, because firing order is not stable. +CREATE OR REPLACE FUNCTION test_evtrig_no_rewrite() RETURNS event_trigger +LANGUAGE plpgsql AS $$ +BEGIN + RAISE NOTICE 'Table is being rewritten (reason = %)', + pg_event_trigger_table_rewrite_reason(); +END; +$$; +create type rewritetype as (a int); +create table rewritemetoo1 of rewritetype; +create table rewritemetoo2 of rewritetype; +alter type rewritetype alter attribute a type text cascade; +NOTICE: Table is being rewritten (reason = 4) +NOTICE: Table is being rewritten (reason = 4) +-- but this doesn't work +create table rewritemetoo3 (a rewritetype); +alter type rewritetype alter attribute a type varchar cascade; +ERROR: cannot alter type "rewritetype" because column "rewritemetoo3.a" uses it +drop table rewriteme; +drop event trigger no_rewrite_allowed; +drop function test_evtrig_no_rewrite(); +-- test Row Security Event Trigger +RESET SESSION AUTHORIZATION; +CREATE TABLE event_trigger_test (a integer, b text); +CREATE OR REPLACE FUNCTION start_command() +RETURNS event_trigger AS $$ +BEGIN +RAISE NOTICE '% - ddl_command_start', tg_tag; +END; +$$ LANGUAGE plpgsql; +CREATE OR REPLACE FUNCTION end_command() +RETURNS event_trigger AS $$ +BEGIN +RAISE NOTICE '% - ddl_command_end', tg_tag; +END; +$$ LANGUAGE plpgsql; +CREATE OR REPLACE FUNCTION drop_sql_command() +RETURNS event_trigger AS $$ +BEGIN +RAISE NOTICE '% - sql_drop', tg_tag; +END; +$$ LANGUAGE plpgsql; +CREATE EVENT TRIGGER start_rls_command ON ddl_command_start + WHEN TAG IN ('CREATE POLICY', 'ALTER POLICY', 'DROP POLICY') EXECUTE PROCEDURE start_command(); +CREATE EVENT TRIGGER end_rls_command ON ddl_command_end + WHEN TAG IN ('CREATE POLICY', 'ALTER POLICY', 'DROP POLICY') EXECUTE PROCEDURE end_command(); +CREATE EVENT TRIGGER sql_drop_command ON sql_drop + WHEN TAG IN ('DROP POLICY') EXECUTE PROCEDURE drop_sql_command(); +CREATE POLICY p1 ON event_trigger_test USING (FALSE); +NOTICE: CREATE POLICY - ddl_command_start +NOTICE: CREATE POLICY - ddl_command_end +ALTER POLICY p1 ON event_trigger_test USING (TRUE); +NOTICE: ALTER POLICY - ddl_command_start +NOTICE: ALTER POLICY - ddl_command_end +ALTER POLICY p1 ON event_trigger_test RENAME TO p2; +NOTICE: ALTER POLICY - ddl_command_start +NOTICE: ALTER POLICY - ddl_command_end +DROP POLICY p2 ON event_trigger_test; +NOTICE: DROP POLICY - ddl_command_start +NOTICE: DROP POLICY - sql_drop +NOTICE: DROP POLICY - ddl_command_end +-- Check the object addresses of all the event triggers. +SELECT + e.evtname, + pg_describe_object('pg_event_trigger'::regclass, e.oid, 0) as descr, + b.type, b.object_names, b.object_args, + pg_identify_object(a.classid, a.objid, a.objsubid) as ident + FROM pg_event_trigger as e, + LATERAL pg_identify_object_as_address('pg_event_trigger'::regclass, e.oid, 0) as b, + LATERAL pg_get_object_address(b.type, b.object_names, b.object_args) as a + ORDER BY e.evtname; + evtname | descr | type | object_names | object_args | ident +-------------------+---------------------------------+---------------+---------------------+-------------+-------------------------------------------------------- + end_rls_command | event trigger end_rls_command | event trigger | {end_rls_command} | {} | ("event trigger",,end_rls_command,end_rls_command) + sql_drop_command | event trigger sql_drop_command | event trigger | {sql_drop_command} | {} | ("event trigger",,sql_drop_command,sql_drop_command) + start_rls_command | event trigger start_rls_command | event trigger | {start_rls_command} | {} | ("event trigger",,start_rls_command,start_rls_command) +(3 rows) + +DROP EVENT TRIGGER start_rls_command; +DROP EVENT TRIGGER end_rls_command; +DROP EVENT TRIGGER sql_drop_command; diff --git a/src/test/regress/expected/explain.out b/src/test/regress/expected/explain.out new file mode 100644 index 0000000..1aca774 --- /dev/null +++ b/src/test/regress/expected/explain.out @@ -0,0 +1,561 @@ +-- +-- EXPLAIN +-- +-- There are many test cases elsewhere that use EXPLAIN as a vehicle for +-- checking something else (usually planner behavior). This file is +-- concerned with testing EXPLAIN in its own right. +-- +-- To produce stable regression test output, it's usually necessary to +-- ignore details such as exact costs or row counts. These filter +-- functions replace changeable output details with fixed strings. +create function explain_filter(text) returns setof text +language plpgsql as +$$ +declare + ln text; +begin + for ln in execute $1 + loop + -- Replace any numeric word with just 'N' + ln := regexp_replace(ln, '-?\m\d+\M', 'N', 'g'); + -- In sort output, the above won't match units-suffixed numbers + ln := regexp_replace(ln, '\m\d+kB', 'NkB', 'g'); + -- Ignore text-mode buffers output because it varies depending + -- on the system state + CONTINUE WHEN (ln ~ ' +Buffers: .*'); + -- Ignore text-mode "Planning:" line because whether it's output + -- varies depending on the system state + CONTINUE WHEN (ln = 'Planning:'); + return next ln; + end loop; +end; +$$; +-- To produce valid JSON output, replace numbers with "0" or "0.0" not "N" +create function explain_filter_to_json(text) returns jsonb +language plpgsql as +$$ +declare + data text := ''; + ln text; +begin + for ln in execute $1 + loop + -- Replace any numeric word with just '0' + ln := regexp_replace(ln, '\m\d+\M', '0', 'g'); + data := data || ln; + end loop; + return data::jsonb; +end; +$$; +-- Disable JIT, or we'll get different output on machines where that's been +-- forced on +set jit = off; +-- Similarly, disable track_io_timing, to avoid output differences when +-- enabled. +set track_io_timing = off; +-- Simple cases +select explain_filter('explain select * from int8_tbl i8'); + explain_filter +--------------------------------------------------------- + Seq Scan on int8_tbl i8 (cost=N.N..N.N rows=N width=N) +(1 row) + +select explain_filter('explain (analyze) select * from int8_tbl i8'); + explain_filter +----------------------------------------------------------------------------------------------- + Seq Scan on int8_tbl i8 (cost=N.N..N.N rows=N width=N) (actual time=N.N..N.N rows=N loops=N) + Planning Time: N.N ms + Execution Time: N.N ms +(3 rows) + +select explain_filter('explain (analyze, verbose) select * from int8_tbl i8'); + explain_filter +------------------------------------------------------------------------------------------------------ + Seq Scan on public.int8_tbl i8 (cost=N.N..N.N rows=N width=N) (actual time=N.N..N.N rows=N loops=N) + Output: q1, q2 + Planning Time: N.N ms + Execution Time: N.N ms +(4 rows) + +select explain_filter('explain (analyze, buffers, format text) select * from int8_tbl i8'); + explain_filter +----------------------------------------------------------------------------------------------- + Seq Scan on int8_tbl i8 (cost=N.N..N.N rows=N width=N) (actual time=N.N..N.N rows=N loops=N) + Planning Time: N.N ms + Execution Time: N.N ms +(3 rows) + +select explain_filter('explain (analyze, buffers, format xml) select * from int8_tbl i8'); + explain_filter +-------------------------------------------------------- + + + + + + + Seq Scan + + false + + false + + int8_tbl + + i8 + + N.N + + N.N + + N + + N + + N.N + + N.N + + N + + N + + N + + N + + N+ + N+ + N + + N + + N + + N + + N + + N + + + + + + N + + N + + N+ + N+ + N + + N + + N + + N + + N + + N + + + + N.N + + + + + + N.N + + + + +(1 row) + +select explain_filter('explain (analyze, buffers, format yaml) select * from int8_tbl i8'); + explain_filter +------------------------------- + - Plan: + + Node Type: "Seq Scan" + + Parallel Aware: false + + Async Capable: false + + Relation Name: "int8_tbl"+ + Alias: "i8" + + Startup Cost: N.N + + Total Cost: N.N + + Plan Rows: N + + Plan Width: N + + Actual Startup Time: N.N + + Actual Total Time: N.N + + Actual Rows: N + + Actual Loops: N + + Shared Hit Blocks: N + + Shared Read Blocks: N + + Shared Dirtied Blocks: N + + Shared Written Blocks: N + + Local Hit Blocks: N + + Local Read Blocks: N + + Local Dirtied Blocks: N + + Local Written Blocks: N + + Temp Read Blocks: N + + Temp Written Blocks: N + + Planning: + + Shared Hit Blocks: N + + Shared Read Blocks: N + + Shared Dirtied Blocks: N + + Shared Written Blocks: N + + Local Hit Blocks: N + + Local Read Blocks: N + + Local Dirtied Blocks: N + + Local Written Blocks: N + + Temp Read Blocks: N + + Temp Written Blocks: N + + Planning Time: N.N + + Triggers: + + Execution Time: N.N +(1 row) + +select explain_filter('explain (buffers, format text) select * from int8_tbl i8'); + explain_filter +--------------------------------------------------------- + Seq Scan on int8_tbl i8 (cost=N.N..N.N rows=N width=N) +(1 row) + +select explain_filter('explain (buffers, format json) select * from int8_tbl i8'); + explain_filter +------------------------------------ + [ + + { + + "Plan": { + + "Node Type": "Seq Scan", + + "Parallel Aware": false, + + "Async Capable": false, + + "Relation Name": "int8_tbl",+ + "Alias": "i8", + + "Startup Cost": N.N, + + "Total Cost": N.N, + + "Plan Rows": N, + + "Plan Width": N, + + "Shared Hit Blocks": N, + + "Shared Read Blocks": N, + + "Shared Dirtied Blocks": N, + + "Shared Written Blocks": N, + + "Local Hit Blocks": N, + + "Local Read Blocks": N, + + "Local Dirtied Blocks": N, + + "Local Written Blocks": N, + + "Temp Read Blocks": N, + + "Temp Written Blocks": N + + }, + + "Planning": { + + "Shared Hit Blocks": N, + + "Shared Read Blocks": N, + + "Shared Dirtied Blocks": N, + + "Shared Written Blocks": N, + + "Local Hit Blocks": N, + + "Local Read Blocks": N, + + "Local Dirtied Blocks": N, + + "Local Written Blocks": N, + + "Temp Read Blocks": N, + + "Temp Written Blocks": N + + } + + } + + ] +(1 row) + +-- Check output including I/O timings. These fields are conditional +-- but always set in JSON format, so check them only in this case. +set track_io_timing = on; +select explain_filter('explain (analyze, buffers, format json) select * from int8_tbl i8'); + explain_filter +------------------------------------ + [ + + { + + "Plan": { + + "Node Type": "Seq Scan", + + "Parallel Aware": false, + + "Async Capable": false, + + "Relation Name": "int8_tbl",+ + "Alias": "i8", + + "Startup Cost": N.N, + + "Total Cost": N.N, + + "Plan Rows": N, + + "Plan Width": N, + + "Actual Startup Time": N.N, + + "Actual Total Time": N.N, + + "Actual Rows": N, + + "Actual Loops": N, + + "Shared Hit Blocks": N, + + "Shared Read Blocks": N, + + "Shared Dirtied Blocks": N, + + "Shared Written Blocks": N, + + "Local Hit Blocks": N, + + "Local Read Blocks": N, + + "Local Dirtied Blocks": N, + + "Local Written Blocks": N, + + "Temp Read Blocks": N, + + "Temp Written Blocks": N, + + "I/O Read Time": N.N, + + "I/O Write Time": N.N, + + "Temp I/O Read Time": N.N, + + "Temp I/O Write Time": N.N + + }, + + "Planning": { + + "Shared Hit Blocks": N, + + "Shared Read Blocks": N, + + "Shared Dirtied Blocks": N, + + "Shared Written Blocks": N, + + "Local Hit Blocks": N, + + "Local Read Blocks": N, + + "Local Dirtied Blocks": N, + + "Local Written Blocks": N, + + "Temp Read Blocks": N, + + "Temp Written Blocks": N, + + "I/O Read Time": N.N, + + "I/O Write Time": N.N, + + "Temp I/O Read Time": N.N, + + "Temp I/O Write Time": N.N + + }, + + "Planning Time": N.N, + + "Triggers": [ + + ], + + "Execution Time": N.N + + } + + ] +(1 row) + +set track_io_timing = off; +-- SETTINGS option +-- We have to ignore other settings that might be imposed by the environment, +-- so printing the whole Settings field unfortunately won't do. +begin; +set local plan_cache_mode = force_generic_plan; +select true as "OK" + from explain_filter('explain (settings) select * from int8_tbl i8') ln + where ln ~ '^ *Settings: .*plan_cache_mode = ''force_generic_plan'''; + OK +---- + t +(1 row) + +select explain_filter_to_json('explain (settings, format json) select * from int8_tbl i8') #> '{0,Settings,plan_cache_mode}'; + ?column? +---------------------- + "force_generic_plan" +(1 row) + +rollback; +-- GENERIC_PLAN option +select explain_filter('explain (generic_plan) select unique1 from tenk1 where thousand = $1'); + explain_filter +--------------------------------------------------------------------------------- + Bitmap Heap Scan on tenk1 (cost=N.N..N.N rows=N width=N) + Recheck Cond: (thousand = $N) + -> Bitmap Index Scan on tenk1_thous_tenthous (cost=N.N..N.N rows=N width=N) + Index Cond: (thousand = $N) +(4 rows) + +-- should fail +select explain_filter('explain (analyze, generic_plan) select unique1 from tenk1 where thousand = $1'); +ERROR: EXPLAIN options ANALYZE and GENERIC_PLAN cannot be used together +CONTEXT: PL/pgSQL function explain_filter(text) line 5 at FOR over EXECUTE statement +-- Test EXPLAIN (GENERIC_PLAN) with partition pruning +-- partitions should be pruned at plan time, based on constants, +-- but there should be no pruning based on parameter placeholders +create table gen_part ( + key1 integer not null, + key2 integer not null +) partition by list (key1); +create table gen_part_1 + partition of gen_part for values in (1) + partition by range (key2); +create table gen_part_1_1 + partition of gen_part_1 for values from (1) to (2); +create table gen_part_1_2 + partition of gen_part_1 for values from (2) to (3); +create table gen_part_2 + partition of gen_part for values in (2); +-- should scan gen_part_1_1 and gen_part_1_2, but not gen_part_2 +select explain_filter('explain (generic_plan) select key1, key2 from gen_part where key1 = 1 and key2 = $1'); + explain_filter +--------------------------------------------------------------------------- + Append (cost=N.N..N.N rows=N width=N) + -> Seq Scan on gen_part_1_1 gen_part_1 (cost=N.N..N.N rows=N width=N) + Filter: ((key1 = N) AND (key2 = $N)) + -> Seq Scan on gen_part_1_2 gen_part_2 (cost=N.N..N.N rows=N width=N) + Filter: ((key1 = N) AND (key2 = $N)) +(5 rows) + +drop table gen_part; +-- +-- Test production of per-worker data +-- +-- Unfortunately, because we don't know how many worker processes we'll +-- actually get (maybe none at all), we can't examine the "Workers" output +-- in any detail. We can check that it parses correctly as JSON, and then +-- remove it from the displayed results. +begin; +-- encourage use of parallel plans +set parallel_setup_cost=0; +set parallel_tuple_cost=0; +set min_parallel_table_scan_size=0; +set max_parallel_workers_per_gather=4; +select jsonb_pretty( + explain_filter_to_json('explain (analyze, verbose, buffers, format json) + select * from tenk1 order by tenthous') + -- remove "Workers" node of the Seq Scan plan node + #- '{0,Plan,Plans,0,Plans,0,Workers}' + -- remove "Workers" node of the Sort plan node + #- '{0,Plan,Plans,0,Workers}' + -- Also remove its sort-type fields, as those aren't 100% stable + #- '{0,Plan,Plans,0,Sort Method}' + #- '{0,Plan,Plans,0,Sort Space Type}' +); + jsonb_pretty +------------------------------------------------------------- + [ + + { + + "Plan": { + + "Plans": [ + + { + + "Plans": [ + + { + + "Alias": "tenk1", + + "Output": [ + + "unique1", + + "unique2", + + "two", + + "four", + + "ten", + + "twenty", + + "hundred", + + "thousand", + + "twothousand", + + "fivethous", + + "tenthous", + + "odd", + + "even", + + "stringu1", + + "stringu2", + + "string4" + + ], + + "Schema": "public", + + "Node Type": "Seq Scan", + + "Plan Rows": 0, + + "Plan Width": 0, + + "Total Cost": 0.0, + + "Actual Rows": 0, + + "Actual Loops": 0, + + "Startup Cost": 0.0, + + "Async Capable": false, + + "Relation Name": "tenk1", + + "Parallel Aware": true, + + "Local Hit Blocks": 0, + + "Temp Read Blocks": 0, + + "Actual Total Time": 0.0, + + "Local Read Blocks": 0, + + "Shared Hit Blocks": 0, + + "Shared Read Blocks": 0, + + "Actual Startup Time": 0.0, + + "Parent Relationship": "Outer",+ + "Temp Written Blocks": 0, + + "Local Dirtied Blocks": 0, + + "Local Written Blocks": 0, + + "Shared Dirtied Blocks": 0, + + "Shared Written Blocks": 0 + + } + + ], + + "Output": [ + + "unique1", + + "unique2", + + "two", + + "four", + + "ten", + + "twenty", + + "hundred", + + "thousand", + + "twothousand", + + "fivethous", + + "tenthous", + + "odd", + + "even", + + "stringu1", + + "stringu2", + + "string4" + + ], + + "Sort Key": [ + + "tenk1.tenthous" + + ], + + "Node Type": "Sort", + + "Plan Rows": 0, + + "Plan Width": 0, + + "Total Cost": 0.0, + + "Actual Rows": 0, + + "Actual Loops": 0, + + "Startup Cost": 0.0, + + "Async Capable": false, + + "Parallel Aware": false, + + "Sort Space Used": 0, + + "Local Hit Blocks": 0, + + "Temp Read Blocks": 0, + + "Actual Total Time": 0.0, + + "Local Read Blocks": 0, + + "Shared Hit Blocks": 0, + + "Shared Read Blocks": 0, + + "Actual Startup Time": 0.0, + + "Parent Relationship": "Outer", + + "Temp Written Blocks": 0, + + "Local Dirtied Blocks": 0, + + "Local Written Blocks": 0, + + "Shared Dirtied Blocks": 0, + + "Shared Written Blocks": 0 + + } + + ], + + "Output": [ + + "unique1", + + "unique2", + + "two", + + "four", + + "ten", + + "twenty", + + "hundred", + + "thousand", + + "twothousand", + + "fivethous", + + "tenthous", + + "odd", + + "even", + + "stringu1", + + "stringu2", + + "string4" + + ], + + "Node Type": "Gather Merge", + + "Plan Rows": 0, + + "Plan Width": 0, + + "Total Cost": 0.0, + + "Actual Rows": 0, + + "Actual Loops": 0, + + "Startup Cost": 0.0, + + "Async Capable": false, + + "Parallel Aware": false, + + "Workers Planned": 0, + + "Local Hit Blocks": 0, + + "Temp Read Blocks": 0, + + "Workers Launched": 0, + + "Actual Total Time": 0.0, + + "Local Read Blocks": 0, + + "Shared Hit Blocks": 0, + + "Shared Read Blocks": 0, + + "Actual Startup Time": 0.0, + + "Temp Written Blocks": 0, + + "Local Dirtied Blocks": 0, + + "Local Written Blocks": 0, + + "Shared Dirtied Blocks": 0, + + "Shared Written Blocks": 0 + + }, + + "Planning": { + + "Local Hit Blocks": 0, + + "Temp Read Blocks": 0, + + "Local Read Blocks": 0, + + "Shared Hit Blocks": 0, + + "Shared Read Blocks": 0, + + "Temp Written Blocks": 0, + + "Local Dirtied Blocks": 0, + + "Local Written Blocks": 0, + + "Shared Dirtied Blocks": 0, + + "Shared Written Blocks": 0 + + }, + + "Triggers": [ + + ], + + "Planning Time": 0.0, + + "Execution Time": 0.0 + + } + + ] +(1 row) + +rollback; +-- Test display of temporary objects +create temp table t1(f1 float8); +create function pg_temp.mysin(float8) returns float8 language plpgsql +as 'begin return sin($1); end'; +select explain_filter('explain (verbose) select * from t1 where pg_temp.mysin(f1) < 0.5'); + explain_filter +------------------------------------------------------------ + Seq Scan on pg_temp.t1 (cost=N.N..N.N rows=N width=N) + Output: f1 + Filter: (pg_temp.mysin(t1.f1) < 'N.N'::double precision) +(3 rows) + +-- Test compute_query_id +set compute_query_id = on; +select explain_filter('explain (verbose) select * from int8_tbl i8'); + explain_filter +---------------------------------------------------------------- + Seq Scan on public.int8_tbl i8 (cost=N.N..N.N rows=N width=N) + Output: q1, q2 + Query Identifier: N +(3 rows) + diff --git a/src/test/regress/expected/expressions.out b/src/test/regress/expected/expressions.out new file mode 100644 index 0000000..caeeb19 --- /dev/null +++ b/src/test/regress/expected/expressions.out @@ -0,0 +1,423 @@ +-- +-- expression evaluation tests that don't fit into a more specific file +-- +-- +-- Tests for SQLValueFunction +-- +-- current_date (always matches because of transactional behaviour) +SELECT date(now())::text = current_date::text; + ?column? +---------- + t +(1 row) + +-- current_time / localtime +SELECT now()::timetz::text = current_time::text; + ?column? +---------- + t +(1 row) + +SELECT now()::timetz(4)::text = current_time(4)::text; + ?column? +---------- + t +(1 row) + +SELECT now()::time::text = localtime::text; + ?column? +---------- + t +(1 row) + +SELECT now()::time(3)::text = localtime(3)::text; + ?column? +---------- + t +(1 row) + +-- current_time[stamp]/ localtime[stamp] (always matches because of transactional behaviour) +SELECT current_timestamp = NOW(); + ?column? +---------- + t +(1 row) + +-- precision +SELECT length(current_timestamp::text) >= length(current_timestamp(0)::text); + ?column? +---------- + t +(1 row) + +-- localtimestamp +SELECT now()::timestamp::text = localtimestamp::text; + ?column? +---------- + t +(1 row) + +-- precision overflow +SELECT current_time = current_time(7); +WARNING: TIME(7) WITH TIME ZONE precision reduced to maximum allowed, 6 + ?column? +---------- + t +(1 row) + +SELECT current_timestamp = current_timestamp(7); +WARNING: TIMESTAMP(7) WITH TIME ZONE precision reduced to maximum allowed, 6 + ?column? +---------- + t +(1 row) + +SELECT localtime = localtime(7); +WARNING: TIME(7) precision reduced to maximum allowed, 6 + ?column? +---------- + t +(1 row) + +SELECT localtimestamp = localtimestamp(7); +WARNING: TIMESTAMP(7) precision reduced to maximum allowed, 6 + ?column? +---------- + t +(1 row) + +-- current_role/user/user is tested in rolenames.sql +-- current database / catalog +SELECT current_catalog = current_database(); + ?column? +---------- + t +(1 row) + +-- current_schema +SELECT current_schema; + current_schema +---------------- + public +(1 row) + +SET search_path = 'notme'; +SELECT current_schema; + current_schema +---------------- + +(1 row) + +SET search_path = 'pg_catalog'; +SELECT current_schema; + current_schema +---------------- + pg_catalog +(1 row) + +RESET search_path; +-- +-- Test parsing of a no-op cast to a type with unspecified typmod +-- +begin; +create table numeric_tbl (f1 numeric(18,3), f2 numeric); +create view numeric_view as + select + f1, f1::numeric(16,4) as f1164, f1::numeric as f1n, + f2, f2::numeric(16,4) as f2164, f2::numeric as f2n + from numeric_tbl; +\d+ numeric_view + View "public.numeric_view" + Column | Type | Collation | Nullable | Default | Storage | Description +--------+---------------+-----------+----------+---------+---------+------------- + f1 | numeric(18,3) | | | | main | + f1164 | numeric(16,4) | | | | main | + f1n | numeric | | | | main | + f2 | numeric | | | | main | + f2164 | numeric(16,4) | | | | main | + f2n | numeric | | | | main | +View definition: + SELECT f1, + f1::numeric(16,4) AS f1164, + f1::numeric AS f1n, + f2, + f2::numeric(16,4) AS f2164, + f2 AS f2n + FROM numeric_tbl; + +explain (verbose, costs off) select * from numeric_view; + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------------------------------------- + Seq Scan on public.numeric_tbl + Output: numeric_tbl.f1, (numeric_tbl.f1)::numeric(16,4), (numeric_tbl.f1)::numeric, numeric_tbl.f2, (numeric_tbl.f2)::numeric(16,4), numeric_tbl.f2 +(2 rows) + +-- bpchar, lacking planner support for its length coercion function, +-- could behave differently +create table bpchar_tbl (f1 character(16) unique, f2 bpchar); +create view bpchar_view as + select + f1, f1::character(14) as f114, f1::bpchar as f1n, + f2, f2::character(14) as f214, f2::bpchar as f2n + from bpchar_tbl; +\d+ bpchar_view + View "public.bpchar_view" + Column | Type | Collation | Nullable | Default | Storage | Description +--------+---------------+-----------+----------+---------+----------+------------- + f1 | character(16) | | | | extended | + f114 | character(14) | | | | extended | + f1n | bpchar | | | | extended | + f2 | bpchar | | | | extended | + f214 | character(14) | | | | extended | + f2n | bpchar | | | | extended | +View definition: + SELECT f1, + f1::character(14) AS f114, + f1::bpchar AS f1n, + f2, + f2::character(14) AS f214, + f2 AS f2n + FROM bpchar_tbl; + +explain (verbose, costs off) select * from bpchar_view + where f1::bpchar = 'foo'; + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------------------------------ + Index Scan using bpchar_tbl_f1_key on public.bpchar_tbl + Output: bpchar_tbl.f1, (bpchar_tbl.f1)::character(14), (bpchar_tbl.f1)::bpchar, bpchar_tbl.f2, (bpchar_tbl.f2)::character(14), bpchar_tbl.f2 + Index Cond: ((bpchar_tbl.f1)::bpchar = 'foo'::bpchar) +(3 rows) + +rollback; +-- +-- Ordinarily, IN/NOT IN can be converted to a ScalarArrayOpExpr +-- with a suitably-chosen array type. +-- +explain (verbose, costs off) +select random() IN (1, 4, 8.0); + QUERY PLAN +------------------------------------------------------------ + Result + Output: (random() = ANY ('{1,4,8}'::double precision[])) +(2 rows) + +explain (verbose, costs off) +select random()::int IN (1, 4, 8.0); + QUERY PLAN +--------------------------------------------------------------------------- + Result + Output: (((random())::integer)::numeric = ANY ('{1,4,8.0}'::numeric[])) +(2 rows) + +-- However, if there's not a common supertype for the IN elements, +-- we should instead try to produce "x = v1 OR x = v2 OR ...". +-- In most cases that'll fail for lack of all the requisite = operators, +-- but it can succeed sometimes. So this should complain about lack of +-- an = operator, not about cast failure. +select '(0,0)'::point in ('(0,0,0,0)'::box, point(0,0)); +ERROR: operator does not exist: point = box +LINE 1: select '(0,0)'::point in ('(0,0,0,0)'::box, point(0,0)); + ^ +HINT: No operator matches the given name and argument types. You might need to add explicit type casts. +-- +-- Tests for ScalarArrayOpExpr with a hashfn +-- +-- create a stable function so that the tests below are not +-- evaluated using the planner's constant folding. +begin; +create function return_int_input(int) returns int as $$ +begin + return $1; +end; +$$ language plpgsql stable; +create function return_text_input(text) returns text as $$ +begin + return $1; +end; +$$ language plpgsql stable; +select return_int_input(1) in (10, 9, 2, 8, 3, 7, 4, 6, 5, 1); + ?column? +---------- + t +(1 row) + +select return_int_input(1) in (10, 9, 2, 8, 3, 7, 4, 6, 5, null); + ?column? +---------- + +(1 row) + +select return_int_input(1) in (null, null, null, null, null, null, null, null, null, null, null); + ?column? +---------- + +(1 row) + +select return_int_input(1) in (10, 9, 2, 8, 3, 7, 4, 6, 5, 1, null); + ?column? +---------- + t +(1 row) + +select return_int_input(null::int) in (10, 9, 2, 8, 3, 7, 4, 6, 5, 1); + ?column? +---------- + +(1 row) + +select return_int_input(null::int) in (10, 9, 2, 8, 3, 7, 4, 6, 5, null); + ?column? +---------- + +(1 row) + +select return_text_input('a') in ('a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j'); + ?column? +---------- + t +(1 row) + +-- NOT IN +select return_int_input(1) not in (10, 9, 2, 8, 3, 7, 4, 6, 5, 1); + ?column? +---------- + f +(1 row) + +select return_int_input(1) not in (10, 9, 2, 8, 3, 7, 4, 6, 5, 0); + ?column? +---------- + t +(1 row) + +select return_int_input(1) not in (10, 9, 2, 8, 3, 7, 4, 6, 5, 2, null); + ?column? +---------- + +(1 row) + +select return_int_input(1) not in (10, 9, 2, 8, 3, 7, 4, 6, 5, 1, null); + ?column? +---------- + f +(1 row) + +select return_int_input(1) not in (null, null, null, null, null, null, null, null, null, null, null); + ?column? +---------- + +(1 row) + +select return_int_input(null::int) not in (10, 9, 2, 8, 3, 7, 4, 6, 5, 1); + ?column? +---------- + +(1 row) + +select return_int_input(null::int) not in (10, 9, 2, 8, 3, 7, 4, 6, 5, null); + ?column? +---------- + +(1 row) + +select return_text_input('a') not in ('a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j'); + ?column? +---------- + f +(1 row) + +rollback; +-- Test with non-strict equality function. +-- We need to create our own type for this. +begin; +create type myint; +create function myintin(cstring) returns myint strict immutable language + internal as 'int4in'; +NOTICE: return type myint is only a shell +create function myintout(myint) returns cstring strict immutable language + internal as 'int4out'; +NOTICE: argument type myint is only a shell +create function myinthash(myint) returns integer strict immutable language + internal as 'hashint4'; +NOTICE: argument type myint is only a shell +create type myint (input = myintin, output = myintout, like = int4); +create cast (int4 as myint) without function; +create cast (myint as int4) without function; +create function myinteq(myint, myint) returns bool as $$ +begin + if $1 is null and $2 is null then + return true; + else + return $1::int = $2::int; + end if; +end; +$$ language plpgsql immutable; +create function myintne(myint, myint) returns bool as $$ +begin + return not myinteq($1, $2); +end; +$$ language plpgsql immutable; +create operator = ( + leftarg = myint, + rightarg = myint, + commutator = =, + negator = <>, + procedure = myinteq, + restrict = eqsel, + join = eqjoinsel, + merges +); +create operator <> ( + leftarg = myint, + rightarg = myint, + commutator = <>, + negator = =, + procedure = myintne, + restrict = eqsel, + join = eqjoinsel, + merges +); +create operator class myint_ops +default for type myint using hash as + operator 1 = (myint, myint), + function 1 myinthash(myint); +create table inttest (a myint); +insert into inttest values(1::myint),(null); +-- try an array with enough elements to cause hashing +select * from inttest where a in (1::myint,2::myint,3::myint,4::myint,5::myint,6::myint,7::myint,8::myint,9::myint, null); + a +--- + 1 + +(2 rows) + +select * from inttest where a not in (1::myint,2::myint,3::myint,4::myint,5::myint,6::myint,7::myint,8::myint,9::myint, null); + a +--- +(0 rows) + +select * from inttest where a not in (0::myint,2::myint,3::myint,4::myint,5::myint,6::myint,7::myint,8::myint,9::myint, null); + a +--- +(0 rows) + +-- ensure the result matched with the non-hashed version. We simply remove +-- some array elements so that we don't reach the hashing threshold. +select * from inttest where a in (1::myint,2::myint,3::myint,4::myint,5::myint, null); + a +--- + 1 + +(2 rows) + +select * from inttest where a not in (1::myint,2::myint,3::myint,4::myint,5::myint, null); + a +--- +(0 rows) + +select * from inttest where a not in (0::myint,2::myint,3::myint,4::myint,5::myint, null); + a +--- +(0 rows) + +rollback; diff --git a/src/test/regress/expected/fast_default.out b/src/test/regress/expected/fast_default.out new file mode 100644 index 0000000..59365da --- /dev/null +++ b/src/test/regress/expected/fast_default.out @@ -0,0 +1,861 @@ +-- +-- ALTER TABLE ADD COLUMN DEFAULT test +-- +SET search_path = fast_default; +CREATE SCHEMA fast_default; +CREATE TABLE m(id OID); +INSERT INTO m VALUES (NULL::OID); +CREATE FUNCTION set(tabname name) RETURNS VOID +AS $$ +BEGIN + UPDATE m + SET id = (SELECT c.relfilenode + FROM pg_class AS c, pg_namespace AS s + WHERE c.relname = tabname + AND c.relnamespace = s.oid + AND s.nspname = 'fast_default'); +END; +$$ LANGUAGE 'plpgsql'; +CREATE FUNCTION comp() RETURNS TEXT +AS $$ +BEGIN + RETURN (SELECT CASE + WHEN m.id = c.relfilenode THEN 'Unchanged' + ELSE 'Rewritten' + END + FROM m, pg_class AS c, pg_namespace AS s + WHERE c.relname = 't' + AND c.relnamespace = s.oid + AND s.nspname = 'fast_default'); +END; +$$ LANGUAGE 'plpgsql'; +CREATE FUNCTION log_rewrite() RETURNS event_trigger +LANGUAGE plpgsql as +$func$ + +declare + this_schema text; +begin + select into this_schema relnamespace::regnamespace::text + from pg_class + where oid = pg_event_trigger_table_rewrite_oid(); + if this_schema = 'fast_default' + then + RAISE NOTICE 'rewriting table % for reason %', + pg_event_trigger_table_rewrite_oid()::regclass, + pg_event_trigger_table_rewrite_reason(); + end if; +end; +$func$; +CREATE TABLE has_volatile AS +SELECT * FROM generate_series(1,10) id; +CREATE EVENT TRIGGER has_volatile_rewrite + ON table_rewrite + EXECUTE PROCEDURE log_rewrite(); +-- only the last of these should trigger a rewrite +ALTER TABLE has_volatile ADD col1 int; +ALTER TABLE has_volatile ADD col2 int DEFAULT 1; +ALTER TABLE has_volatile ADD col3 timestamptz DEFAULT current_timestamp; +ALTER TABLE has_volatile ADD col4 int DEFAULT (random() * 10000)::int; +NOTICE: rewriting table has_volatile for reason 2 +-- Test a large sample of different datatypes +CREATE TABLE T(pk INT NOT NULL PRIMARY KEY, c_int INT DEFAULT 1); +SELECT set('t'); + set +----- + +(1 row) + +INSERT INTO T VALUES (1), (2); +ALTER TABLE T ADD COLUMN c_bpchar BPCHAR(5) DEFAULT 'hello', + ALTER COLUMN c_int SET DEFAULT 2; +INSERT INTO T VALUES (3), (4); +ALTER TABLE T ADD COLUMN c_text TEXT DEFAULT 'world', + ALTER COLUMN c_bpchar SET DEFAULT 'dog'; +INSERT INTO T VALUES (5), (6); +ALTER TABLE T ADD COLUMN c_date DATE DEFAULT '2016-06-02', + ALTER COLUMN c_text SET DEFAULT 'cat'; +INSERT INTO T VALUES (7), (8); +ALTER TABLE T ADD COLUMN c_timestamp TIMESTAMP DEFAULT '2016-09-01 12:00:00', + ADD COLUMN c_timestamp_null TIMESTAMP, + ALTER COLUMN c_date SET DEFAULT '2010-01-01'; +INSERT INTO T VALUES (9), (10); +ALTER TABLE T ADD COLUMN c_array TEXT[] + DEFAULT '{"This", "is", "the", "real", "world"}', + ALTER COLUMN c_timestamp SET DEFAULT '1970-12-31 11:12:13', + ALTER COLUMN c_timestamp_null SET DEFAULT '2016-09-29 12:00:00'; +INSERT INTO T VALUES (11), (12); +ALTER TABLE T ADD COLUMN c_small SMALLINT DEFAULT -5, + ADD COLUMN c_small_null SMALLINT, + ALTER COLUMN c_array + SET DEFAULT '{"This", "is", "no", "fantasy"}'; +INSERT INTO T VALUES (13), (14); +ALTER TABLE T ADD COLUMN c_big BIGINT DEFAULT 180000000000018, + ALTER COLUMN c_small SET DEFAULT 9, + ALTER COLUMN c_small_null SET DEFAULT 13; +INSERT INTO T VALUES (15), (16); +ALTER TABLE T ADD COLUMN c_num NUMERIC DEFAULT 1.00000000001, + ALTER COLUMN c_big SET DEFAULT -9999999999999999; +INSERT INTO T VALUES (17), (18); +ALTER TABLE T ADD COLUMN c_time TIME DEFAULT '12:00:00', + ALTER COLUMN c_num SET DEFAULT 2.000000000000002; +INSERT INTO T VALUES (19), (20); +ALTER TABLE T ADD COLUMN c_interval INTERVAL DEFAULT '1 day', + ALTER COLUMN c_time SET DEFAULT '23:59:59'; +INSERT INTO T VALUES (21), (22); +ALTER TABLE T ADD COLUMN c_hugetext TEXT DEFAULT repeat('abcdefg',1000), + ALTER COLUMN c_interval SET DEFAULT '3 hours'; +INSERT INTO T VALUES (23), (24); +ALTER TABLE T ALTER COLUMN c_interval DROP DEFAULT, + ALTER COLUMN c_hugetext SET DEFAULT repeat('poiuyt', 1000); +INSERT INTO T VALUES (25), (26); +ALTER TABLE T ALTER COLUMN c_bpchar DROP DEFAULT, + ALTER COLUMN c_date DROP DEFAULT, + ALTER COLUMN c_text DROP DEFAULT, + ALTER COLUMN c_timestamp DROP DEFAULT, + ALTER COLUMN c_array DROP DEFAULT, + ALTER COLUMN c_small DROP DEFAULT, + ALTER COLUMN c_big DROP DEFAULT, + ALTER COLUMN c_num DROP DEFAULT, + ALTER COLUMN c_time DROP DEFAULT, + ALTER COLUMN c_hugetext DROP DEFAULT; +INSERT INTO T VALUES (27), (28); +SELECT pk, c_int, c_bpchar, c_text, c_date, c_timestamp, + c_timestamp_null, c_array, c_small, c_small_null, + c_big, c_num, c_time, c_interval, + c_hugetext = repeat('abcdefg',1000) as c_hugetext_origdef, + c_hugetext = repeat('poiuyt', 1000) as c_hugetext_newdef +FROM T ORDER BY pk; + pk | c_int | c_bpchar | c_text | c_date | c_timestamp | c_timestamp_null | c_array | c_small | c_small_null | c_big | c_num | c_time | c_interval | c_hugetext_origdef | c_hugetext_newdef +----+-------+----------+--------+------------+--------------------------+--------------------------+--------------------------+---------+--------------+-------------------+-------------------+----------+------------+--------------------+------------------- + 1 | 1 | hello | world | 06-02-2016 | Thu Sep 01 12:00:00 2016 | | {This,is,the,real,world} | -5 | | 180000000000018 | 1.00000000001 | 12:00:00 | @ 1 day | t | f + 2 | 1 | hello | world | 06-02-2016 | Thu Sep 01 12:00:00 2016 | | {This,is,the,real,world} | -5 | | 180000000000018 | 1.00000000001 | 12:00:00 | @ 1 day | t | f + 3 | 2 | hello | world | 06-02-2016 | Thu Sep 01 12:00:00 2016 | | {This,is,the,real,world} | -5 | | 180000000000018 | 1.00000000001 | 12:00:00 | @ 1 day | t | f + 4 | 2 | hello | world | 06-02-2016 | Thu Sep 01 12:00:00 2016 | | {This,is,the,real,world} | -5 | | 180000000000018 | 1.00000000001 | 12:00:00 | @ 1 day | t | f + 5 | 2 | dog | world | 06-02-2016 | Thu Sep 01 12:00:00 2016 | | {This,is,the,real,world} | -5 | | 180000000000018 | 1.00000000001 | 12:00:00 | @ 1 day | t | f + 6 | 2 | dog | world | 06-02-2016 | Thu Sep 01 12:00:00 2016 | | {This,is,the,real,world} | -5 | | 180000000000018 | 1.00000000001 | 12:00:00 | @ 1 day | t | f + 7 | 2 | dog | cat | 06-02-2016 | Thu Sep 01 12:00:00 2016 | | {This,is,the,real,world} | -5 | | 180000000000018 | 1.00000000001 | 12:00:00 | @ 1 day | t | f + 8 | 2 | dog | cat | 06-02-2016 | Thu Sep 01 12:00:00 2016 | | {This,is,the,real,world} | -5 | | 180000000000018 | 1.00000000001 | 12:00:00 | @ 1 day | t | f + 9 | 2 | dog | cat | 01-01-2010 | Thu Sep 01 12:00:00 2016 | | {This,is,the,real,world} | -5 | | 180000000000018 | 1.00000000001 | 12:00:00 | @ 1 day | t | f + 10 | 2 | dog | cat | 01-01-2010 | Thu Sep 01 12:00:00 2016 | | {This,is,the,real,world} | -5 | | 180000000000018 | 1.00000000001 | 12:00:00 | @ 1 day | t | f + 11 | 2 | dog | cat | 01-01-2010 | Thu Dec 31 11:12:13 1970 | Thu Sep 29 12:00:00 2016 | {This,is,the,real,world} | -5 | | 180000000000018 | 1.00000000001 | 12:00:00 | @ 1 day | t | f + 12 | 2 | dog | cat | 01-01-2010 | Thu Dec 31 11:12:13 1970 | Thu Sep 29 12:00:00 2016 | {This,is,the,real,world} | -5 | | 180000000000018 | 1.00000000001 | 12:00:00 | @ 1 day | t | f + 13 | 2 | dog | cat | 01-01-2010 | Thu Dec 31 11:12:13 1970 | Thu Sep 29 12:00:00 2016 | {This,is,no,fantasy} | -5 | | 180000000000018 | 1.00000000001 | 12:00:00 | @ 1 day | t | f + 14 | 2 | dog | cat | 01-01-2010 | Thu Dec 31 11:12:13 1970 | Thu Sep 29 12:00:00 2016 | {This,is,no,fantasy} | -5 | | 180000000000018 | 1.00000000001 | 12:00:00 | @ 1 day | t | f + 15 | 2 | dog | cat | 01-01-2010 | Thu Dec 31 11:12:13 1970 | Thu Sep 29 12:00:00 2016 | {This,is,no,fantasy} | 9 | 13 | 180000000000018 | 1.00000000001 | 12:00:00 | @ 1 day | t | f + 16 | 2 | dog | cat | 01-01-2010 | Thu Dec 31 11:12:13 1970 | Thu Sep 29 12:00:00 2016 | {This,is,no,fantasy} | 9 | 13 | 180000000000018 | 1.00000000001 | 12:00:00 | @ 1 day | t | f + 17 | 2 | dog | cat | 01-01-2010 | Thu Dec 31 11:12:13 1970 | Thu Sep 29 12:00:00 2016 | {This,is,no,fantasy} | 9 | 13 | -9999999999999999 | 1.00000000001 | 12:00:00 | @ 1 day | t | f + 18 | 2 | dog | cat | 01-01-2010 | Thu Dec 31 11:12:13 1970 | Thu Sep 29 12:00:00 2016 | {This,is,no,fantasy} | 9 | 13 | -9999999999999999 | 1.00000000001 | 12:00:00 | @ 1 day | t | f + 19 | 2 | dog | cat | 01-01-2010 | Thu Dec 31 11:12:13 1970 | Thu Sep 29 12:00:00 2016 | {This,is,no,fantasy} | 9 | 13 | -9999999999999999 | 2.000000000000002 | 12:00:00 | @ 1 day | t | f + 20 | 2 | dog | cat | 01-01-2010 | Thu Dec 31 11:12:13 1970 | Thu Sep 29 12:00:00 2016 | {This,is,no,fantasy} | 9 | 13 | -9999999999999999 | 2.000000000000002 | 12:00:00 | @ 1 day | t | f + 21 | 2 | dog | cat | 01-01-2010 | Thu Dec 31 11:12:13 1970 | Thu Sep 29 12:00:00 2016 | {This,is,no,fantasy} | 9 | 13 | -9999999999999999 | 2.000000000000002 | 23:59:59 | @ 1 day | t | f + 22 | 2 | dog | cat | 01-01-2010 | Thu Dec 31 11:12:13 1970 | Thu Sep 29 12:00:00 2016 | {This,is,no,fantasy} | 9 | 13 | -9999999999999999 | 2.000000000000002 | 23:59:59 | @ 1 day | t | f + 23 | 2 | dog | cat | 01-01-2010 | Thu Dec 31 11:12:13 1970 | Thu Sep 29 12:00:00 2016 | {This,is,no,fantasy} | 9 | 13 | -9999999999999999 | 2.000000000000002 | 23:59:59 | @ 3 hours | t | f + 24 | 2 | dog | cat | 01-01-2010 | Thu Dec 31 11:12:13 1970 | Thu Sep 29 12:00:00 2016 | {This,is,no,fantasy} | 9 | 13 | -9999999999999999 | 2.000000000000002 | 23:59:59 | @ 3 hours | t | f + 25 | 2 | dog | cat | 01-01-2010 | Thu Dec 31 11:12:13 1970 | Thu Sep 29 12:00:00 2016 | {This,is,no,fantasy} | 9 | 13 | -9999999999999999 | 2.000000000000002 | 23:59:59 | | f | t + 26 | 2 | dog | cat | 01-01-2010 | Thu Dec 31 11:12:13 1970 | Thu Sep 29 12:00:00 2016 | {This,is,no,fantasy} | 9 | 13 | -9999999999999999 | 2.000000000000002 | 23:59:59 | | f | t + 27 | 2 | | | | | Thu Sep 29 12:00:00 2016 | | | 13 | | | | | | + 28 | 2 | | | | | Thu Sep 29 12:00:00 2016 | | | 13 | | | | | | +(28 rows) + +SELECT comp(); + comp +----------- + Unchanged +(1 row) + +DROP TABLE T; +-- Test expressions in the defaults +CREATE OR REPLACE FUNCTION foo(a INT) RETURNS TEXT AS $$ +DECLARE res TEXT := ''; + i INT; +BEGIN + i := 0; + WHILE (i < a) LOOP + res := res || chr(ascii('a') + i); + i := i + 1; + END LOOP; + RETURN res; +END; $$ LANGUAGE PLPGSQL STABLE; +CREATE TABLE T(pk INT NOT NULL PRIMARY KEY, c_int INT DEFAULT LENGTH(foo(6))); +SELECT set('t'); + set +----- + +(1 row) + +INSERT INTO T VALUES (1), (2); +ALTER TABLE T ADD COLUMN c_bpchar BPCHAR(5) DEFAULT foo(4), + ALTER COLUMN c_int SET DEFAULT LENGTH(foo(8)); +INSERT INTO T VALUES (3), (4); +ALTER TABLE T ADD COLUMN c_text TEXT DEFAULT foo(6), + ALTER COLUMN c_bpchar SET DEFAULT foo(3); +INSERT INTO T VALUES (5), (6); +ALTER TABLE T ADD COLUMN c_date DATE + DEFAULT '2016-06-02'::DATE + LENGTH(foo(10)), + ALTER COLUMN c_text SET DEFAULT foo(12); +INSERT INTO T VALUES (7), (8); +ALTER TABLE T ADD COLUMN c_timestamp TIMESTAMP + DEFAULT '2016-09-01'::DATE + LENGTH(foo(10)), + ALTER COLUMN c_date + SET DEFAULT '2010-01-01'::DATE - LENGTH(foo(4)); +INSERT INTO T VALUES (9), (10); +ALTER TABLE T ADD COLUMN c_array TEXT[] + DEFAULT ('{"This", "is", "' || foo(4) || + '","the", "real", "world"}')::TEXT[], + ALTER COLUMN c_timestamp + SET DEFAULT '1970-12-31'::DATE + LENGTH(foo(30)); +INSERT INTO T VALUES (11), (12); +ALTER TABLE T ALTER COLUMN c_int DROP DEFAULT, + ALTER COLUMN c_array + SET DEFAULT ('{"This", "is", "' || foo(1) || + '", "fantasy"}')::text[]; +INSERT INTO T VALUES (13), (14); +ALTER TABLE T ALTER COLUMN c_bpchar DROP DEFAULT, + ALTER COLUMN c_date DROP DEFAULT, + ALTER COLUMN c_text DROP DEFAULT, + ALTER COLUMN c_timestamp DROP DEFAULT, + ALTER COLUMN c_array DROP DEFAULT; +INSERT INTO T VALUES (15), (16); +SELECT * FROM T; + pk | c_int | c_bpchar | c_text | c_date | c_timestamp | c_array +----+-------+----------+--------------+------------+--------------------------+------------------------------- + 1 | 6 | abcd | abcdef | 06-12-2016 | Sun Sep 11 00:00:00 2016 | {This,is,abcd,the,real,world} + 2 | 6 | abcd | abcdef | 06-12-2016 | Sun Sep 11 00:00:00 2016 | {This,is,abcd,the,real,world} + 3 | 8 | abcd | abcdef | 06-12-2016 | Sun Sep 11 00:00:00 2016 | {This,is,abcd,the,real,world} + 4 | 8 | abcd | abcdef | 06-12-2016 | Sun Sep 11 00:00:00 2016 | {This,is,abcd,the,real,world} + 5 | 8 | abc | abcdef | 06-12-2016 | Sun Sep 11 00:00:00 2016 | {This,is,abcd,the,real,world} + 6 | 8 | abc | abcdef | 06-12-2016 | Sun Sep 11 00:00:00 2016 | {This,is,abcd,the,real,world} + 7 | 8 | abc | abcdefghijkl | 06-12-2016 | Sun Sep 11 00:00:00 2016 | {This,is,abcd,the,real,world} + 8 | 8 | abc | abcdefghijkl | 06-12-2016 | Sun Sep 11 00:00:00 2016 | {This,is,abcd,the,real,world} + 9 | 8 | abc | abcdefghijkl | 12-28-2009 | Sun Sep 11 00:00:00 2016 | {This,is,abcd,the,real,world} + 10 | 8 | abc | abcdefghijkl | 12-28-2009 | Sun Sep 11 00:00:00 2016 | {This,is,abcd,the,real,world} + 11 | 8 | abc | abcdefghijkl | 12-28-2009 | Sat Jan 30 00:00:00 1971 | {This,is,abcd,the,real,world} + 12 | 8 | abc | abcdefghijkl | 12-28-2009 | Sat Jan 30 00:00:00 1971 | {This,is,abcd,the,real,world} + 13 | | abc | abcdefghijkl | 12-28-2009 | Sat Jan 30 00:00:00 1971 | {This,is,a,fantasy} + 14 | | abc | abcdefghijkl | 12-28-2009 | Sat Jan 30 00:00:00 1971 | {This,is,a,fantasy} + 15 | | | | | | + 16 | | | | | | +(16 rows) + +SELECT comp(); + comp +----------- + Unchanged +(1 row) + +DROP TABLE T; +DROP FUNCTION foo(INT); +-- Fall back to full rewrite for volatile expressions +CREATE TABLE T(pk INT NOT NULL PRIMARY KEY); +INSERT INTO T VALUES (1); +SELECT set('t'); + set +----- + +(1 row) + +-- now() is stable, because it returns the transaction timestamp +ALTER TABLE T ADD COLUMN c1 TIMESTAMP DEFAULT now(); +SELECT comp(); + comp +----------- + Unchanged +(1 row) + +-- clock_timestamp() is volatile +ALTER TABLE T ADD COLUMN c2 TIMESTAMP DEFAULT clock_timestamp(); +NOTICE: rewriting table t for reason 2 +SELECT comp(); + comp +----------- + Rewritten +(1 row) + +-- check that we notice insertion of a volatile default argument +CREATE FUNCTION foolme(timestamptz DEFAULT clock_timestamp()) + RETURNS timestamptz + IMMUTABLE AS 'select $1' LANGUAGE sql; +ALTER TABLE T ADD COLUMN c3 timestamptz DEFAULT foolme(); +NOTICE: rewriting table t for reason 2 +SELECT attname, atthasmissing, attmissingval FROM pg_attribute + WHERE attrelid = 't'::regclass AND attnum > 0 + ORDER BY attnum; + attname | atthasmissing | attmissingval +---------+---------------+--------------- + pk | f | + c1 | f | + c2 | f | + c3 | f | +(4 rows) + +DROP TABLE T; +DROP FUNCTION foolme(timestamptz); +-- Simple querie +CREATE TABLE T (pk INT NOT NULL PRIMARY KEY); +SELECT set('t'); + set +----- + +(1 row) + +INSERT INTO T SELECT * FROM generate_series(1, 10) a; +ALTER TABLE T ADD COLUMN c_bigint BIGINT NOT NULL DEFAULT -1; +INSERT INTO T SELECT b, b - 10 FROM generate_series(11, 20) a(b); +ALTER TABLE T ADD COLUMN c_text TEXT DEFAULT 'hello'; +INSERT INTO T SELECT b, b - 10, (b + 10)::text FROM generate_series(21, 30) a(b); +-- WHERE clause +SELECT c_bigint, c_text FROM T WHERE c_bigint = -1 LIMIT 1; + c_bigint | c_text +----------+-------- + -1 | hello +(1 row) + +EXPLAIN (VERBOSE TRUE, COSTS FALSE) +SELECT c_bigint, c_text FROM T WHERE c_bigint = -1 LIMIT 1; + QUERY PLAN +---------------------------------------------- + Limit + Output: c_bigint, c_text + -> Seq Scan on fast_default.t + Output: c_bigint, c_text + Filter: (t.c_bigint = '-1'::integer) +(5 rows) + +SELECT c_bigint, c_text FROM T WHERE c_text = 'hello' LIMIT 1; + c_bigint | c_text +----------+-------- + -1 | hello +(1 row) + +EXPLAIN (VERBOSE TRUE, COSTS FALSE) SELECT c_bigint, c_text FROM T WHERE c_text = 'hello' LIMIT 1; + QUERY PLAN +-------------------------------------------- + Limit + Output: c_bigint, c_text + -> Seq Scan on fast_default.t + Output: c_bigint, c_text + Filter: (t.c_text = 'hello'::text) +(5 rows) + +-- COALESCE +SELECT COALESCE(c_bigint, pk), COALESCE(c_text, pk::text) +FROM T +ORDER BY pk LIMIT 10; + coalesce | coalesce +----------+---------- + -1 | hello + -1 | hello + -1 | hello + -1 | hello + -1 | hello + -1 | hello + -1 | hello + -1 | hello + -1 | hello + -1 | hello +(10 rows) + +-- Aggregate function +SELECT SUM(c_bigint), MAX(c_text COLLATE "C" ), MIN(c_text COLLATE "C") FROM T; + sum | max | min +-----+-------+----- + 200 | hello | 31 +(1 row) + +-- ORDER BY +SELECT * FROM T ORDER BY c_bigint, c_text, pk LIMIT 10; + pk | c_bigint | c_text +----+----------+-------- + 1 | -1 | hello + 2 | -1 | hello + 3 | -1 | hello + 4 | -1 | hello + 5 | -1 | hello + 6 | -1 | hello + 7 | -1 | hello + 8 | -1 | hello + 9 | -1 | hello + 10 | -1 | hello +(10 rows) + +EXPLAIN (VERBOSE TRUE, COSTS FALSE) +SELECT * FROM T ORDER BY c_bigint, c_text, pk LIMIT 10; + QUERY PLAN +---------------------------------------------- + Limit + Output: pk, c_bigint, c_text + -> Sort + Output: pk, c_bigint, c_text + Sort Key: t.c_bigint, t.c_text, t.pk + -> Seq Scan on fast_default.t + Output: pk, c_bigint, c_text +(7 rows) + +-- LIMIT +SELECT * FROM T WHERE c_bigint > -1 ORDER BY c_bigint, c_text, pk LIMIT 10; + pk | c_bigint | c_text +----+----------+-------- + 11 | 1 | hello + 12 | 2 | hello + 13 | 3 | hello + 14 | 4 | hello + 15 | 5 | hello + 16 | 6 | hello + 17 | 7 | hello + 18 | 8 | hello + 19 | 9 | hello + 20 | 10 | hello +(10 rows) + +EXPLAIN (VERBOSE TRUE, COSTS FALSE) +SELECT * FROM T WHERE c_bigint > -1 ORDER BY c_bigint, c_text, pk LIMIT 10; + QUERY PLAN +---------------------------------------------------- + Limit + Output: pk, c_bigint, c_text + -> Sort + Output: pk, c_bigint, c_text + Sort Key: t.c_bigint, t.c_text, t.pk + -> Seq Scan on fast_default.t + Output: pk, c_bigint, c_text + Filter: (t.c_bigint > '-1'::integer) +(8 rows) + +-- DELETE with RETURNING +DELETE FROM T WHERE pk BETWEEN 10 AND 20 RETURNING *; + pk | c_bigint | c_text +----+----------+-------- + 10 | -1 | hello + 11 | 1 | hello + 12 | 2 | hello + 13 | 3 | hello + 14 | 4 | hello + 15 | 5 | hello + 16 | 6 | hello + 17 | 7 | hello + 18 | 8 | hello + 19 | 9 | hello + 20 | 10 | hello +(11 rows) + +EXPLAIN (VERBOSE TRUE, COSTS FALSE) +DELETE FROM T WHERE pk BETWEEN 10 AND 20 RETURNING *; + QUERY PLAN +----------------------------------------------------------- + Delete on fast_default.t + Output: pk, c_bigint, c_text + -> Bitmap Heap Scan on fast_default.t + Output: ctid + Recheck Cond: ((t.pk >= 10) AND (t.pk <= 20)) + -> Bitmap Index Scan on t_pkey + Index Cond: ((t.pk >= 10) AND (t.pk <= 20)) +(7 rows) + +-- UPDATE +UPDATE T SET c_text = '"' || c_text || '"' WHERE pk < 10; +SELECT * FROM T WHERE c_text LIKE '"%"' ORDER BY PK; + pk | c_bigint | c_text +----+----------+--------- + 1 | -1 | "hello" + 2 | -1 | "hello" + 3 | -1 | "hello" + 4 | -1 | "hello" + 5 | -1 | "hello" + 6 | -1 | "hello" + 7 | -1 | "hello" + 8 | -1 | "hello" + 9 | -1 | "hello" +(9 rows) + +SELECT comp(); + comp +----------- + Unchanged +(1 row) + +DROP TABLE T; +-- Combine with other DDL +CREATE TABLE T(pk INT NOT NULL PRIMARY KEY); +SELECT set('t'); + set +----- + +(1 row) + +INSERT INTO T VALUES (1), (2); +ALTER TABLE T ADD COLUMN c_int INT NOT NULL DEFAULT -1; +INSERT INTO T VALUES (3), (4); +ALTER TABLE T ADD COLUMN c_text TEXT DEFAULT 'Hello'; +INSERT INTO T VALUES (5), (6); +ALTER TABLE T ALTER COLUMN c_text SET DEFAULT 'world', + ALTER COLUMN c_int SET DEFAULT 1; +INSERT INTO T VALUES (7), (8); +SELECT * FROM T ORDER BY pk; + pk | c_int | c_text +----+-------+-------- + 1 | -1 | Hello + 2 | -1 | Hello + 3 | -1 | Hello + 4 | -1 | Hello + 5 | -1 | Hello + 6 | -1 | Hello + 7 | 1 | world + 8 | 1 | world +(8 rows) + +-- Add an index +CREATE INDEX i ON T(c_int, c_text); +SELECT c_text FROM T WHERE c_int = -1; + c_text +-------- + Hello + Hello + Hello + Hello + Hello + Hello +(6 rows) + +SELECT comp(); + comp +----------- + Unchanged +(1 row) + +-- query to exercise expand_tuple function +CREATE TABLE t1 AS +SELECT 1::int AS a , 2::int AS b +FROM generate_series(1,20) q; +ALTER TABLE t1 ADD COLUMN c text; +SELECT a, + stddev(cast((SELECT sum(1) FROM generate_series(1,20) x) AS float4)) + OVER (PARTITION BY a,b,c ORDER BY b) + AS z +FROM t1; + a | z +---+--- + 1 | 0 + 1 | 0 + 1 | 0 + 1 | 0 + 1 | 0 + 1 | 0 + 1 | 0 + 1 | 0 + 1 | 0 + 1 | 0 + 1 | 0 + 1 | 0 + 1 | 0 + 1 | 0 + 1 | 0 + 1 | 0 + 1 | 0 + 1 | 0 + 1 | 0 + 1 | 0 +(20 rows) + +DROP TABLE T; +-- test that we account for missing columns without defaults correctly +-- in expand_tuple, and that rows are correctly expanded for triggers +CREATE FUNCTION test_trigger() +RETURNS trigger +LANGUAGE plpgsql +AS $$ + +begin + raise notice 'old tuple: %', to_json(OLD)::text; + if TG_OP = 'DELETE' + then + return OLD; + else + return NEW; + end if; +end; + +$$; +-- 2 new columns, both have defaults +CREATE TABLE t (id serial PRIMARY KEY, a int, b int, c int); +INSERT INTO t (a,b,c) VALUES (1,2,3); +ALTER TABLE t ADD COLUMN x int NOT NULL DEFAULT 4; +ALTER TABLE t ADD COLUMN y int NOT NULL DEFAULT 5; +CREATE TRIGGER a BEFORE UPDATE ON t FOR EACH ROW EXECUTE PROCEDURE test_trigger(); +SELECT * FROM t; + id | a | b | c | x | y +----+---+---+---+---+--- + 1 | 1 | 2 | 3 | 4 | 5 +(1 row) + +UPDATE t SET y = 2; +NOTICE: old tuple: {"id":1,"a":1,"b":2,"c":3,"x":4,"y":5} +SELECT * FROM t; + id | a | b | c | x | y +----+---+---+---+---+--- + 1 | 1 | 2 | 3 | 4 | 2 +(1 row) + +DROP TABLE t; +-- 2 new columns, first has default +CREATE TABLE t (id serial PRIMARY KEY, a int, b int, c int); +INSERT INTO t (a,b,c) VALUES (1,2,3); +ALTER TABLE t ADD COLUMN x int NOT NULL DEFAULT 4; +ALTER TABLE t ADD COLUMN y int; +CREATE TRIGGER a BEFORE UPDATE ON t FOR EACH ROW EXECUTE PROCEDURE test_trigger(); +SELECT * FROM t; + id | a | b | c | x | y +----+---+---+---+---+--- + 1 | 1 | 2 | 3 | 4 | +(1 row) + +UPDATE t SET y = 2; +NOTICE: old tuple: {"id":1,"a":1,"b":2,"c":3,"x":4,"y":null} +SELECT * FROM t; + id | a | b | c | x | y +----+---+---+---+---+--- + 1 | 1 | 2 | 3 | 4 | 2 +(1 row) + +DROP TABLE t; +-- 2 new columns, second has default +CREATE TABLE t (id serial PRIMARY KEY, a int, b int, c int); +INSERT INTO t (a,b,c) VALUES (1,2,3); +ALTER TABLE t ADD COLUMN x int; +ALTER TABLE t ADD COLUMN y int NOT NULL DEFAULT 5; +CREATE TRIGGER a BEFORE UPDATE ON t FOR EACH ROW EXECUTE PROCEDURE test_trigger(); +SELECT * FROM t; + id | a | b | c | x | y +----+---+---+---+---+--- + 1 | 1 | 2 | 3 | | 5 +(1 row) + +UPDATE t SET y = 2; +NOTICE: old tuple: {"id":1,"a":1,"b":2,"c":3,"x":null,"y":5} +SELECT * FROM t; + id | a | b | c | x | y +----+---+---+---+---+--- + 1 | 1 | 2 | 3 | | 2 +(1 row) + +DROP TABLE t; +-- 2 new columns, neither has default +CREATE TABLE t (id serial PRIMARY KEY, a int, b int, c int); +INSERT INTO t (a,b,c) VALUES (1,2,3); +ALTER TABLE t ADD COLUMN x int; +ALTER TABLE t ADD COLUMN y int; +CREATE TRIGGER a BEFORE UPDATE ON t FOR EACH ROW EXECUTE PROCEDURE test_trigger(); +SELECT * FROM t; + id | a | b | c | x | y +----+---+---+---+---+--- + 1 | 1 | 2 | 3 | | +(1 row) + +UPDATE t SET y = 2; +NOTICE: old tuple: {"id":1,"a":1,"b":2,"c":3,"x":null,"y":null} +SELECT * FROM t; + id | a | b | c | x | y +----+---+---+---+---+--- + 1 | 1 | 2 | 3 | | 2 +(1 row) + +DROP TABLE t; +-- same as last 4 tests but here the last original column has a NULL value +-- 2 new columns, both have defaults +CREATE TABLE t (id serial PRIMARY KEY, a int, b int, c int); +INSERT INTO t (a,b,c) VALUES (1,2,NULL); +ALTER TABLE t ADD COLUMN x int NOT NULL DEFAULT 4; +ALTER TABLE t ADD COLUMN y int NOT NULL DEFAULT 5; +CREATE TRIGGER a BEFORE UPDATE ON t FOR EACH ROW EXECUTE PROCEDURE test_trigger(); +SELECT * FROM t; + id | a | b | c | x | y +----+---+---+---+---+--- + 1 | 1 | 2 | | 4 | 5 +(1 row) + +UPDATE t SET y = 2; +NOTICE: old tuple: {"id":1,"a":1,"b":2,"c":null,"x":4,"y":5} +SELECT * FROM t; + id | a | b | c | x | y +----+---+---+---+---+--- + 1 | 1 | 2 | | 4 | 2 +(1 row) + +DROP TABLE t; +-- 2 new columns, first has default +CREATE TABLE t (id serial PRIMARY KEY, a int, b int, c int); +INSERT INTO t (a,b,c) VALUES (1,2,NULL); +ALTER TABLE t ADD COLUMN x int NOT NULL DEFAULT 4; +ALTER TABLE t ADD COLUMN y int; +CREATE TRIGGER a BEFORE UPDATE ON t FOR EACH ROW EXECUTE PROCEDURE test_trigger(); +SELECT * FROM t; + id | a | b | c | x | y +----+---+---+---+---+--- + 1 | 1 | 2 | | 4 | +(1 row) + +UPDATE t SET y = 2; +NOTICE: old tuple: {"id":1,"a":1,"b":2,"c":null,"x":4,"y":null} +SELECT * FROM t; + id | a | b | c | x | y +----+---+---+---+---+--- + 1 | 1 | 2 | | 4 | 2 +(1 row) + +DROP TABLE t; +-- 2 new columns, second has default +CREATE TABLE t (id serial PRIMARY KEY, a int, b int, c int); +INSERT INTO t (a,b,c) VALUES (1,2,NULL); +ALTER TABLE t ADD COLUMN x int; +ALTER TABLE t ADD COLUMN y int NOT NULL DEFAULT 5; +CREATE TRIGGER a BEFORE UPDATE ON t FOR EACH ROW EXECUTE PROCEDURE test_trigger(); +SELECT * FROM t; + id | a | b | c | x | y +----+---+---+---+---+--- + 1 | 1 | 2 | | | 5 +(1 row) + +UPDATE t SET y = 2; +NOTICE: old tuple: {"id":1,"a":1,"b":2,"c":null,"x":null,"y":5} +SELECT * FROM t; + id | a | b | c | x | y +----+---+---+---+---+--- + 1 | 1 | 2 | | | 2 +(1 row) + +DROP TABLE t; +-- 2 new columns, neither has default +CREATE TABLE t (id serial PRIMARY KEY, a int, b int, c int); +INSERT INTO t (a,b,c) VALUES (1,2,NULL); +ALTER TABLE t ADD COLUMN x int; +ALTER TABLE t ADD COLUMN y int; +CREATE TRIGGER a BEFORE UPDATE ON t FOR EACH ROW EXECUTE PROCEDURE test_trigger(); +SELECT * FROM t; + id | a | b | c | x | y +----+---+---+---+---+--- + 1 | 1 | 2 | | | +(1 row) + +UPDATE t SET y = 2; +NOTICE: old tuple: {"id":1,"a":1,"b":2,"c":null,"x":null,"y":null} +SELECT * FROM t; + id | a | b | c | x | y +----+---+---+---+---+--- + 1 | 1 | 2 | | | 2 +(1 row) + +DROP TABLE t; +-- make sure expanded tuple has correct self pointer +-- it will be required by the RI trigger doing the cascading delete +CREATE TABLE leader (a int PRIMARY KEY, b int); +CREATE TABLE follower (a int REFERENCES leader ON DELETE CASCADE, b int); +INSERT INTO leader VALUES (1, 1), (2, 2); +ALTER TABLE leader ADD c int; +ALTER TABLE leader DROP c; +DELETE FROM leader; +-- check that ALTER TABLE ... ALTER TYPE does the right thing +CREATE TABLE vtype( a integer); +INSERT INTO vtype VALUES (1); +ALTER TABLE vtype ADD COLUMN b DOUBLE PRECISION DEFAULT 0.2; +ALTER TABLE vtype ADD COLUMN c BOOLEAN DEFAULT true; +SELECT * FROM vtype; + a | b | c +---+-----+--- + 1 | 0.2 | t +(1 row) + +ALTER TABLE vtype + ALTER b TYPE text USING b::text, + ALTER c TYPE text USING c::text; +NOTICE: rewriting table vtype for reason 4 +SELECT * FROM vtype; + a | b | c +---+-----+------ + 1 | 0.2 | true +(1 row) + +-- also check the case that doesn't rewrite the table +CREATE TABLE vtype2 (a int); +INSERT INTO vtype2 VALUES (1); +ALTER TABLE vtype2 ADD COLUMN b varchar(10) DEFAULT 'xxx'; +ALTER TABLE vtype2 ALTER COLUMN b SET DEFAULT 'yyy'; +INSERT INTO vtype2 VALUES (2); +ALTER TABLE vtype2 ALTER COLUMN b TYPE varchar(20) USING b::varchar(20); +SELECT * FROM vtype2; + a | b +---+----- + 1 | xxx + 2 | yyy +(2 rows) + +-- Ensure that defaults are checked when evaluating whether HOT update +-- is possible, this was broken for a while: +-- https://postgr.es/m/20190202133521.ylauh3ckqa7colzj%40alap3.anarazel.de +BEGIN; +CREATE TABLE t(); +INSERT INTO t DEFAULT VALUES; +ALTER TABLE t ADD COLUMN a int DEFAULT 1; +CREATE INDEX ON t(a); +-- set column with a default 1 to NULL, due to a bug that wasn't +-- noticed has heap_getattr buggily returned NULL for default columns +UPDATE t SET a = NULL; +-- verify that index and non-index scans show the same result +SET LOCAL enable_seqscan = true; +SELECT * FROM t WHERE a IS NULL; + a +--- + +(1 row) + +SET LOCAL enable_seqscan = false; +SELECT * FROM t WHERE a IS NULL; + a +--- + +(1 row) + +ROLLBACK; +-- verify that a default set on a non-plain table doesn't set a missing +-- value on the attribute +CREATE FOREIGN DATA WRAPPER dummy; +CREATE SERVER s0 FOREIGN DATA WRAPPER dummy; +CREATE FOREIGN TABLE ft1 (c1 integer NOT NULL) SERVER s0; +ALTER FOREIGN TABLE ft1 ADD COLUMN c8 integer DEFAULT 0; +ALTER FOREIGN TABLE ft1 ALTER COLUMN c8 TYPE char(10); +SELECT count(*) + FROM pg_attribute + WHERE attrelid = 'ft1'::regclass AND + (attmissingval IS NOT NULL OR atthasmissing); + count +------- + 0 +(1 row) + +-- cleanup +DROP FOREIGN TABLE ft1; +DROP SERVER s0; +DROP FOREIGN DATA WRAPPER dummy; +DROP TABLE vtype; +DROP TABLE vtype2; +DROP TABLE follower; +DROP TABLE leader; +DROP FUNCTION test_trigger(); +DROP TABLE t1; +DROP FUNCTION set(name); +DROP FUNCTION comp(); +DROP TABLE m; +DROP TABLE has_volatile; +DROP EVENT TRIGGER has_volatile_rewrite; +DROP FUNCTION log_rewrite; +DROP SCHEMA fast_default; +-- Leave a table with an active fast default in place, for pg_upgrade testing +set search_path = public; +create table has_fast_default(f1 int); +insert into has_fast_default values(1); +alter table has_fast_default add column f2 int default 42; +table has_fast_default; + f1 | f2 +----+---- + 1 | 42 +(1 row) + diff --git a/src/test/regress/expected/float4-misrounded-input.out b/src/test/regress/expected/float4-misrounded-input.out new file mode 100644 index 0000000..a427231 --- /dev/null +++ b/src/test/regress/expected/float4-misrounded-input.out @@ -0,0 +1,986 @@ +-- +-- FLOAT4 +-- +CREATE TABLE FLOAT4_TBL (f1 float4); +INSERT INTO FLOAT4_TBL(f1) VALUES (' 0.0'); +INSERT INTO FLOAT4_TBL(f1) VALUES ('1004.30 '); +INSERT INTO FLOAT4_TBL(f1) VALUES (' -34.84 '); +INSERT INTO FLOAT4_TBL(f1) VALUES ('1.2345678901234e+20'); +INSERT INTO FLOAT4_TBL(f1) VALUES ('1.2345678901234e-20'); +-- test for over and under flow +INSERT INTO FLOAT4_TBL(f1) VALUES ('10e70'); +ERROR: "10e70" is out of range for type real +LINE 1: INSERT INTO FLOAT4_TBL(f1) VALUES ('10e70'); + ^ +INSERT INTO FLOAT4_TBL(f1) VALUES ('-10e70'); +ERROR: "-10e70" is out of range for type real +LINE 1: INSERT INTO FLOAT4_TBL(f1) VALUES ('-10e70'); + ^ +INSERT INTO FLOAT4_TBL(f1) VALUES ('10e-70'); +ERROR: "10e-70" is out of range for type real +LINE 1: INSERT INTO FLOAT4_TBL(f1) VALUES ('10e-70'); + ^ +INSERT INTO FLOAT4_TBL(f1) VALUES ('-10e-70'); +ERROR: "-10e-70" is out of range for type real +LINE 1: INSERT INTO FLOAT4_TBL(f1) VALUES ('-10e-70'); + ^ +INSERT INTO FLOAT4_TBL(f1) VALUES ('10e70'::float8); +ERROR: value out of range: overflow +INSERT INTO FLOAT4_TBL(f1) VALUES ('-10e70'::float8); +ERROR: value out of range: overflow +INSERT INTO FLOAT4_TBL(f1) VALUES ('10e-70'::float8); +ERROR: value out of range: underflow +INSERT INTO FLOAT4_TBL(f1) VALUES ('-10e-70'::float8); +ERROR: value out of range: underflow +INSERT INTO FLOAT4_TBL(f1) VALUES ('10e400'); +ERROR: "10e400" is out of range for type real +LINE 1: INSERT INTO FLOAT4_TBL(f1) VALUES ('10e400'); + ^ +INSERT INTO FLOAT4_TBL(f1) VALUES ('-10e400'); +ERROR: "-10e400" is out of range for type real +LINE 1: INSERT INTO FLOAT4_TBL(f1) VALUES ('-10e400'); + ^ +INSERT INTO FLOAT4_TBL(f1) VALUES ('10e-400'); +ERROR: "10e-400" is out of range for type real +LINE 1: INSERT INTO FLOAT4_TBL(f1) VALUES ('10e-400'); + ^ +INSERT INTO FLOAT4_TBL(f1) VALUES ('-10e-400'); +ERROR: "-10e-400" is out of range for type real +LINE 1: INSERT INTO FLOAT4_TBL(f1) VALUES ('-10e-400'); + ^ +-- bad input +INSERT INTO FLOAT4_TBL(f1) VALUES (''); +ERROR: invalid input syntax for type real: "" +LINE 1: INSERT INTO FLOAT4_TBL(f1) VALUES (''); + ^ +INSERT INTO FLOAT4_TBL(f1) VALUES (' '); +ERROR: invalid input syntax for type real: " " +LINE 1: INSERT INTO FLOAT4_TBL(f1) VALUES (' '); + ^ +INSERT INTO FLOAT4_TBL(f1) VALUES ('xyz'); +ERROR: invalid input syntax for type real: "xyz" +LINE 1: INSERT INTO FLOAT4_TBL(f1) VALUES ('xyz'); + ^ +INSERT INTO FLOAT4_TBL(f1) VALUES ('5.0.0'); +ERROR: invalid input syntax for type real: "5.0.0" +LINE 1: INSERT INTO FLOAT4_TBL(f1) VALUES ('5.0.0'); + ^ +INSERT INTO FLOAT4_TBL(f1) VALUES ('5 . 0'); +ERROR: invalid input syntax for type real: "5 . 0" +LINE 1: INSERT INTO FLOAT4_TBL(f1) VALUES ('5 . 0'); + ^ +INSERT INTO FLOAT4_TBL(f1) VALUES ('5. 0'); +ERROR: invalid input syntax for type real: "5. 0" +LINE 1: INSERT INTO FLOAT4_TBL(f1) VALUES ('5. 0'); + ^ +INSERT INTO FLOAT4_TBL(f1) VALUES (' - 3.0'); +ERROR: invalid input syntax for type real: " - 3.0" +LINE 1: INSERT INTO FLOAT4_TBL(f1) VALUES (' - 3.0'); + ^ +INSERT INTO FLOAT4_TBL(f1) VALUES ('123 5'); +ERROR: invalid input syntax for type real: "123 5" +LINE 1: INSERT INTO FLOAT4_TBL(f1) VALUES ('123 5'); + ^ +-- Also try it with non-error-throwing API +SELECT pg_input_is_valid('34.5', 'float4'); + pg_input_is_valid +------------------- + t +(1 row) + +SELECT pg_input_is_valid('xyz', 'float4'); + pg_input_is_valid +------------------- + f +(1 row) + +SELECT pg_input_is_valid('1e400', 'float4'); + pg_input_is_valid +------------------- + f +(1 row) + +SELECT * FROM pg_input_error_info('1e400', 'float4'); + message | detail | hint | sql_error_code +---------------------------------------+--------+------+---------------- + "1e400" is out of range for type real | | | 22003 +(1 row) + +-- special inputs +SELECT 'NaN'::float4; + float4 +-------- + NaN +(1 row) + +SELECT 'nan'::float4; + float4 +-------- + NaN +(1 row) + +SELECT ' NAN '::float4; + float4 +-------- + NaN +(1 row) + +SELECT 'infinity'::float4; + float4 +---------- + Infinity +(1 row) + +SELECT ' -INFINiTY '::float4; + float4 +----------- + -Infinity +(1 row) + +-- bad special inputs +SELECT 'N A N'::float4; +ERROR: invalid input syntax for type real: "N A N" +LINE 1: SELECT 'N A N'::float4; + ^ +SELECT 'NaN x'::float4; +ERROR: invalid input syntax for type real: "NaN x" +LINE 1: SELECT 'NaN x'::float4; + ^ +SELECT ' INFINITY x'::float4; +ERROR: invalid input syntax for type real: " INFINITY x" +LINE 1: SELECT ' INFINITY x'::float4; + ^ +SELECT 'Infinity'::float4 + 100.0; + ?column? +---------- + Infinity +(1 row) + +SELECT 'Infinity'::float4 / 'Infinity'::float4; + ?column? +---------- + NaN +(1 row) + +SELECT '42'::float4 / 'Infinity'::float4; + ?column? +---------- + 0 +(1 row) + +SELECT 'nan'::float4 / 'nan'::float4; + ?column? +---------- + NaN +(1 row) + +SELECT 'nan'::float4 / '0'::float4; + ?column? +---------- + NaN +(1 row) + +SELECT 'nan'::numeric::float4; + float4 +-------- + NaN +(1 row) + +SELECT * FROM FLOAT4_TBL; + f1 +--------------- + 0 + 1004.3 + -34.84 + 1.2345679e+20 + 1.2345679e-20 +(5 rows) + +SELECT f.* FROM FLOAT4_TBL f WHERE f.f1 <> '1004.3'; + f1 +--------------- + 0 + -34.84 + 1.2345679e+20 + 1.2345679e-20 +(4 rows) + +SELECT f.* FROM FLOAT4_TBL f WHERE f.f1 = '1004.3'; + f1 +-------- + 1004.3 +(1 row) + +SELECT f.* FROM FLOAT4_TBL f WHERE '1004.3' > f.f1; + f1 +--------------- + 0 + -34.84 + 1.2345679e-20 +(3 rows) + +SELECT f.* FROM FLOAT4_TBL f WHERE f.f1 < '1004.3'; + f1 +--------------- + 0 + -34.84 + 1.2345679e-20 +(3 rows) + +SELECT f.* FROM FLOAT4_TBL f WHERE '1004.3' >= f.f1; + f1 +--------------- + 0 + 1004.3 + -34.84 + 1.2345679e-20 +(4 rows) + +SELECT f.* FROM FLOAT4_TBL f WHERE f.f1 <= '1004.3'; + f1 +--------------- + 0 + 1004.3 + -34.84 + 1.2345679e-20 +(4 rows) + +SELECT f.f1, f.f1 * '-10' AS x FROM FLOAT4_TBL f + WHERE f.f1 > '0.0'; + f1 | x +---------------+---------------- + 1004.3 | -10043 + 1.2345679e+20 | -1.2345678e+21 + 1.2345679e-20 | -1.2345678e-19 +(3 rows) + +SELECT f.f1, f.f1 + '-10' AS x FROM FLOAT4_TBL f + WHERE f.f1 > '0.0'; + f1 | x +---------------+--------------- + 1004.3 | 994.3 + 1.2345679e+20 | 1.2345679e+20 + 1.2345679e-20 | -10 +(3 rows) + +SELECT f.f1, f.f1 / '-10' AS x FROM FLOAT4_TBL f + WHERE f.f1 > '0.0'; + f1 | x +---------------+---------------- + 1004.3 | -100.43 + 1.2345679e+20 | -1.2345679e+19 + 1.2345679e-20 | -1.2345679e-21 +(3 rows) + +SELECT f.f1, f.f1 - '-10' AS x FROM FLOAT4_TBL f + WHERE f.f1 > '0.0'; + f1 | x +---------------+--------------- + 1004.3 | 1014.3 + 1.2345679e+20 | 1.2345679e+20 + 1.2345679e-20 | 10 +(3 rows) + +-- test divide by zero +SELECT f.f1 / '0.0' from FLOAT4_TBL f; +ERROR: division by zero +SELECT * FROM FLOAT4_TBL; + f1 +--------------- + 0 + 1004.3 + -34.84 + 1.2345679e+20 + 1.2345679e-20 +(5 rows) + +-- test the unary float4abs operator +SELECT f.f1, @f.f1 AS abs_f1 FROM FLOAT4_TBL f; + f1 | abs_f1 +---------------+--------------- + 0 | 0 + 1004.3 | 1004.3 + -34.84 | 34.84 + 1.2345679e+20 | 1.2345679e+20 + 1.2345679e-20 | 1.2345679e-20 +(5 rows) + +UPDATE FLOAT4_TBL + SET f1 = FLOAT4_TBL.f1 * '-1' + WHERE FLOAT4_TBL.f1 > '0.0'; +SELECT * FROM FLOAT4_TBL; + f1 +---------------- + 0 + -34.84 + -1004.3 + -1.2345679e+20 + -1.2345679e-20 +(5 rows) + +-- test edge-case coercions to integer +SELECT '32767.4'::float4::int2; + int2 +------- + 32767 +(1 row) + +SELECT '32767.6'::float4::int2; +ERROR: smallint out of range +SELECT '-32768.4'::float4::int2; + int2 +-------- + -32768 +(1 row) + +SELECT '-32768.6'::float4::int2; +ERROR: smallint out of range +SELECT '2147483520'::float4::int4; + int4 +------------ + 2147483520 +(1 row) + +SELECT '2147483647'::float4::int4; +ERROR: integer out of range +SELECT '-2147483648.5'::float4::int4; + int4 +------------- + -2147483648 +(1 row) + +SELECT '-2147483900'::float4::int4; +ERROR: integer out of range +SELECT '9223369837831520256'::float4::int8; + int8 +--------------------- + 9223369837831520256 +(1 row) + +SELECT '9223372036854775807'::float4::int8; +ERROR: bigint out of range +SELECT '-9223372036854775808.5'::float4::int8; + int8 +---------------------- + -9223372036854775808 +(1 row) + +SELECT '-9223380000000000000'::float4::int8; +ERROR: bigint out of range +-- Test for correct input rounding in edge cases. +-- These lists are from Paxson 1991, excluding subnormals and +-- inputs of over 9 sig. digits. +SELECT float4send('5e-20'::float4); + float4send +------------ + \x1f6c1e4a +(1 row) + +SELECT float4send('67e14'::float4); + float4send +------------ + \x59be6cea +(1 row) + +SELECT float4send('985e15'::float4); + float4send +------------ + \x5d5ab6c4 +(1 row) + +SELECT float4send('55895e-16'::float4); + float4send +------------ + \x2cc4a9bd +(1 row) + +SELECT float4send('7038531e-32'::float4); + float4send +------------ + \x15ae43fe +(1 row) + +SELECT float4send('702990899e-20'::float4); + float4send +------------ + \x2cf757ca +(1 row) + +SELECT float4send('3e-23'::float4); + float4send +------------ + \x1a111234 +(1 row) + +SELECT float4send('57e18'::float4); + float4send +------------ + \x6045c22c +(1 row) + +SELECT float4send('789e-35'::float4); + float4send +------------ + \x0a23de70 +(1 row) + +SELECT float4send('2539e-18'::float4); + float4send +------------ + \x2736f449 +(1 row) + +SELECT float4send('76173e28'::float4); + float4send +------------ + \x7616398a +(1 row) + +SELECT float4send('887745e-11'::float4); + float4send +------------ + \x3714f05c +(1 row) + +SELECT float4send('5382571e-37'::float4); + float4send +------------ + \x0d2eaca7 +(1 row) + +SELECT float4send('82381273e-35'::float4); + float4send +------------ + \x128289d0 +(1 row) + +SELECT float4send('750486563e-38'::float4); + float4send +------------ + \x0f18377e +(1 row) + +-- Test that the smallest possible normalized input value inputs +-- correctly, either in 9-significant-digit or shortest-decimal +-- format. +-- +-- exact val is 1.1754943508... +-- shortest val is 1.1754944000 +-- midpoint to next val is 1.1754944208... +SELECT float4send('1.17549435e-38'::float4); + float4send +------------ + \x00800000 +(1 row) + +SELECT float4send('1.1754944e-38'::float4); + float4send +------------ + \x00800000 +(1 row) + +-- test output (and round-trip safety) of various values. +-- To ensure we're testing what we think we're testing, start with +-- float values specified by bit patterns (as a useful side effect, +-- this means we'll fail on non-IEEE platforms). +create type xfloat4; +create function xfloat4in(cstring) returns xfloat4 immutable strict + language internal as 'int4in'; +NOTICE: return type xfloat4 is only a shell +create function xfloat4out(xfloat4) returns cstring immutable strict + language internal as 'int4out'; +NOTICE: argument type xfloat4 is only a shell +create type xfloat4 (input = xfloat4in, output = xfloat4out, like = float4); +create cast (xfloat4 as float4) without function; +create cast (float4 as xfloat4) without function; +create cast (xfloat4 as integer) without function; +create cast (integer as xfloat4) without function; +-- float4: seeeeeee emmmmmmm mmmmmmmm mmmmmmmm +-- we don't care to assume the platform's strtod() handles subnormals +-- correctly; those are "use at your own risk". However we do test +-- subnormal outputs, since those are under our control. +with testdata(bits) as (values + -- small subnormals + (x'00000001'), + (x'00000002'), (x'00000003'), + (x'00000010'), (x'00000011'), (x'00000100'), (x'00000101'), + (x'00004000'), (x'00004001'), (x'00080000'), (x'00080001'), + -- stress values + (x'0053c4f4'), -- 7693e-42 + (x'006c85c4'), -- 996622e-44 + (x'0041ca76'), -- 60419369e-46 + (x'004b7678'), -- 6930161142e-48 + -- taken from upstream testsuite + (x'00000007'), + (x'00424fe2'), + -- borderline between subnormal and normal + (x'007ffff0'), (x'007ffff1'), (x'007ffffe'), (x'007fffff')) +select float4send(flt) as ibits, + flt + from (select bits::integer::xfloat4::float4 as flt + from testdata + offset 0) s; + ibits | flt +------------+--------------- + \x00000001 | 1e-45 + \x00000002 | 3e-45 + \x00000003 | 4e-45 + \x00000010 | 2.2e-44 + \x00000011 | 2.4e-44 + \x00000100 | 3.59e-43 + \x00000101 | 3.6e-43 + \x00004000 | 2.2959e-41 + \x00004001 | 2.296e-41 + \x00080000 | 7.34684e-40 + \x00080001 | 7.34685e-40 + \x0053c4f4 | 7.693e-39 + \x006c85c4 | 9.96622e-39 + \x0041ca76 | 6.041937e-39 + \x004b7678 | 6.930161e-39 + \x00000007 | 1e-44 + \x00424fe2 | 6.0898e-39 + \x007ffff0 | 1.1754921e-38 + \x007ffff1 | 1.1754922e-38 + \x007ffffe | 1.1754941e-38 + \x007fffff | 1.1754942e-38 +(21 rows) + +with testdata(bits) as (values + (x'00000000'), + -- smallest normal values + (x'00800000'), (x'00800001'), (x'00800004'), (x'00800005'), + (x'00800006'), + -- small normal values chosen for short vs. long output + (x'008002f1'), (x'008002f2'), (x'008002f3'), + (x'00800e17'), (x'00800e18'), (x'00800e19'), + -- assorted values (random mantissae) + (x'01000001'), (x'01102843'), (x'01a52c98'), + (x'0219c229'), (x'02e4464d'), (x'037343c1'), (x'03a91b36'), + (x'047ada65'), (x'0496fe87'), (x'0550844f'), (x'05999da3'), + (x'060ea5e2'), (x'06e63c45'), (x'07f1e548'), (x'0fc5282b'), + (x'1f850283'), (x'2874a9d6'), + -- values around 5e-08 + (x'3356bf94'), (x'3356bf95'), (x'3356bf96'), + -- around 1e-07 + (x'33d6bf94'), (x'33d6bf95'), (x'33d6bf96'), + -- around 3e-07 .. 1e-04 + (x'34a10faf'), (x'34a10fb0'), (x'34a10fb1'), + (x'350637bc'), (x'350637bd'), (x'350637be'), + (x'35719786'), (x'35719787'), (x'35719788'), + (x'358637bc'), (x'358637bd'), (x'358637be'), + (x'36a7c5ab'), (x'36a7c5ac'), (x'36a7c5ad'), + (x'3727c5ab'), (x'3727c5ac'), (x'3727c5ad'), + -- format crossover at 1e-04 + (x'38d1b714'), (x'38d1b715'), (x'38d1b716'), + (x'38d1b717'), (x'38d1b718'), (x'38d1b719'), + (x'38d1b71a'), (x'38d1b71b'), (x'38d1b71c'), + (x'38d1b71d'), + -- + (x'38dffffe'), (x'38dfffff'), (x'38e00000'), + (x'38efffff'), (x'38f00000'), (x'38f00001'), + (x'3a83126e'), (x'3a83126f'), (x'3a831270'), + (x'3c23d709'), (x'3c23d70a'), (x'3c23d70b'), + (x'3dcccccc'), (x'3dcccccd'), (x'3dccccce'), + -- chosen to need 9 digits for 3dcccd70 + (x'3dcccd6f'), (x'3dcccd70'), (x'3dcccd71'), + -- + (x'3effffff'), (x'3f000000'), (x'3f000001'), + (x'3f333332'), (x'3f333333'), (x'3f333334'), + -- approach 1.0 with increasing numbers of 9s + (x'3f666665'), (x'3f666666'), (x'3f666667'), + (x'3f7d70a3'), (x'3f7d70a4'), (x'3f7d70a5'), + (x'3f7fbe76'), (x'3f7fbe77'), (x'3f7fbe78'), + (x'3f7ff971'), (x'3f7ff972'), (x'3f7ff973'), + (x'3f7fff57'), (x'3f7fff58'), (x'3f7fff59'), + (x'3f7fffee'), (x'3f7fffef'), + -- values very close to 1 + (x'3f7ffff0'), (x'3f7ffff1'), (x'3f7ffff2'), + (x'3f7ffff3'), (x'3f7ffff4'), (x'3f7ffff5'), + (x'3f7ffff6'), (x'3f7ffff7'), (x'3f7ffff8'), + (x'3f7ffff9'), (x'3f7ffffa'), (x'3f7ffffb'), + (x'3f7ffffc'), (x'3f7ffffd'), (x'3f7ffffe'), + (x'3f7fffff'), + (x'3f800000'), + (x'3f800001'), (x'3f800002'), (x'3f800003'), + (x'3f800004'), (x'3f800005'), (x'3f800006'), + (x'3f800007'), (x'3f800008'), (x'3f800009'), + -- values 1 to 1.1 + (x'3f80000f'), (x'3f800010'), (x'3f800011'), + (x'3f800012'), (x'3f800013'), (x'3f800014'), + (x'3f800017'), (x'3f800018'), (x'3f800019'), + (x'3f80001a'), (x'3f80001b'), (x'3f80001c'), + (x'3f800029'), (x'3f80002a'), (x'3f80002b'), + (x'3f800053'), (x'3f800054'), (x'3f800055'), + (x'3f800346'), (x'3f800347'), (x'3f800348'), + (x'3f8020c4'), (x'3f8020c5'), (x'3f8020c6'), + (x'3f8147ad'), (x'3f8147ae'), (x'3f8147af'), + (x'3f8ccccc'), (x'3f8ccccd'), (x'3f8cccce'), + -- + (x'3fc90fdb'), -- pi/2 + (x'402df854'), -- e + (x'40490fdb'), -- pi + -- + (x'409fffff'), (x'40a00000'), (x'40a00001'), + (x'40afffff'), (x'40b00000'), (x'40b00001'), + (x'411fffff'), (x'41200000'), (x'41200001'), + (x'42c7ffff'), (x'42c80000'), (x'42c80001'), + (x'4479ffff'), (x'447a0000'), (x'447a0001'), + (x'461c3fff'), (x'461c4000'), (x'461c4001'), + (x'47c34fff'), (x'47c35000'), (x'47c35001'), + (x'497423ff'), (x'49742400'), (x'49742401'), + (x'4b18967f'), (x'4b189680'), (x'4b189681'), + (x'4cbebc1f'), (x'4cbebc20'), (x'4cbebc21'), + (x'4e6e6b27'), (x'4e6e6b28'), (x'4e6e6b29'), + (x'501502f8'), (x'501502f9'), (x'501502fa'), + (x'51ba43b6'), (x'51ba43b7'), (x'51ba43b8'), + -- stress values + (x'1f6c1e4a'), -- 5e-20 + (x'59be6cea'), -- 67e14 + (x'5d5ab6c4'), -- 985e15 + (x'2cc4a9bd'), -- 55895e-16 + (x'15ae43fd'), -- 7038531e-32 + (x'2cf757ca'), -- 702990899e-20 + (x'665ba998'), -- 25933168707e13 + (x'743c3324'), -- 596428896559e20 + -- exercise fixed-point memmoves + (x'47f1205a'), + (x'4640e6ae'), + (x'449a5225'), + (x'42f6e9d5'), + (x'414587dd'), + (x'3f9e064b'), + -- these cases come from the upstream's testsuite + -- BoundaryRoundEven + (x'4c000004'), + (x'50061c46'), + (x'510006a8'), + -- ExactValueRoundEven + (x'48951f84'), + (x'45fd1840'), + -- LotsOfTrailingZeros + (x'39800000'), + (x'3b200000'), + (x'3b900000'), + (x'3bd00000'), + -- Regression + (x'63800000'), + (x'4b000000'), + (x'4b800000'), + (x'4c000001'), + (x'4c800b0d'), + (x'00d24584'), + (x'00d90b88'), + (x'45803f34'), + (x'4f9f24f7'), + (x'3a8722c3'), + (x'5c800041'), + (x'15ae43fd'), + (x'5d4cccfb'), + (x'4c800001'), + (x'57800ed8'), + (x'5f000000'), + (x'700000f0'), + (x'5f23e9ac'), + (x'5e9502f9'), + (x'5e8012b1'), + (x'3c000028'), + (x'60cde861'), + (x'03aa2a50'), + (x'43480000'), + (x'4c000000'), + -- LooksLikePow5 + (x'5D1502F9'), + (x'5D9502F9'), + (x'5E1502F9'), + -- OutputLength + (x'3f99999a'), + (x'3f9d70a4'), + (x'3f9df3b6'), + (x'3f9e0419'), + (x'3f9e0610'), + (x'3f9e064b'), + (x'3f9e0651'), + (x'03d20cfe') +) +select float4send(flt) as ibits, + flt, + flt::text::float4 as r_flt, + float4send(flt::text::float4) as obits, + float4send(flt::text::float4) = float4send(flt) as correct + from (select bits::integer::xfloat4::float4 as flt + from testdata + offset 0) s; + ibits | flt | r_flt | obits | correct +------------+----------------+----------------+------------+--------- + \x00000000 | 0 | 0 | \x00000000 | t + \x00800000 | 1.1754944e-38 | 1.1754944e-38 | \x00800000 | t + \x00800001 | 1.1754945e-38 | 1.1754945e-38 | \x00800001 | t + \x00800004 | 1.1754949e-38 | 1.1754949e-38 | \x00800004 | t + \x00800005 | 1.175495e-38 | 1.175495e-38 | \x00800005 | t + \x00800006 | 1.1754952e-38 | 1.1754952e-38 | \x00800006 | t + \x008002f1 | 1.1755999e-38 | 1.1755999e-38 | \x008002f1 | t + \x008002f2 | 1.1756e-38 | 1.1756e-38 | \x008002f2 | t + \x008002f3 | 1.1756001e-38 | 1.1756001e-38 | \x008002f3 | t + \x00800e17 | 1.1759998e-38 | 1.1759998e-38 | \x00800e17 | t + \x00800e18 | 1.176e-38 | 1.176e-38 | \x00800e18 | t + \x00800e19 | 1.1760001e-38 | 1.1760001e-38 | \x00800e19 | t + \x01000001 | 2.350989e-38 | 2.350989e-38 | \x01000001 | t + \x01102843 | 2.647751e-38 | 2.647751e-38 | \x01102843 | t + \x01a52c98 | 6.0675416e-38 | 6.0675416e-38 | \x01a52c98 | t + \x0219c229 | 1.1296386e-37 | 1.1296386e-37 | \x0219c229 | t + \x02e4464d | 3.354194e-37 | 3.354194e-37 | \x02e4464d | t + \x037343c1 | 7.148906e-37 | 7.148906e-37 | \x037343c1 | t + \x03a91b36 | 9.939175e-37 | 9.939175e-37 | \x03a91b36 | t + \x047ada65 | 2.948764e-36 | 2.948764e-36 | \x047ada65 | t + \x0496fe87 | 3.5498577e-36 | 3.5498577e-36 | \x0496fe87 | t + \x0550844f | 9.804414e-36 | 9.804414e-36 | \x0550844f | t + \x05999da3 | 1.4445957e-35 | 1.4445957e-35 | \x05999da3 | t + \x060ea5e2 | 2.6829103e-35 | 2.6829103e-35 | \x060ea5e2 | t + \x06e63c45 | 8.660494e-35 | 8.660494e-35 | \x06e63c45 | t + \x07f1e548 | 3.639641e-34 | 3.639641e-34 | \x07f1e548 | t + \x0fc5282b | 1.9441172e-29 | 1.9441172e-29 | \x0fc5282b | t + \x1f850283 | 5.6331846e-20 | 5.6331846e-20 | \x1f850283 | t + \x2874a9d6 | 1.3581548e-14 | 1.3581548e-14 | \x2874a9d6 | t + \x3356bf94 | 4.9999997e-08 | 4.9999997e-08 | \x3356bf94 | t + \x3356bf95 | 5e-08 | 5e-08 | \x3356bf95 | t + \x3356bf96 | 5.0000004e-08 | 5.0000004e-08 | \x3356bf96 | t + \x33d6bf94 | 9.9999994e-08 | 9.9999994e-08 | \x33d6bf94 | t + \x33d6bf95 | 1e-07 | 1e-07 | \x33d6bf95 | t + \x33d6bf96 | 1.0000001e-07 | 1.0000001e-07 | \x33d6bf96 | t + \x34a10faf | 2.9999998e-07 | 2.9999998e-07 | \x34a10faf | t + \x34a10fb0 | 3e-07 | 3e-07 | \x34a10fb0 | t + \x34a10fb1 | 3.0000004e-07 | 3.0000004e-07 | \x34a10fb1 | t + \x350637bc | 4.9999994e-07 | 4.9999994e-07 | \x350637bc | t + \x350637bd | 5e-07 | 5e-07 | \x350637bd | t + \x350637be | 5.0000006e-07 | 5.0000006e-07 | \x350637be | t + \x35719786 | 8.999999e-07 | 8.999999e-07 | \x35719786 | t + \x35719787 | 9e-07 | 9e-07 | \x35719787 | t + \x35719788 | 9.0000003e-07 | 9.0000003e-07 | \x35719788 | t + \x358637bc | 9.999999e-07 | 9.999999e-07 | \x358637bc | t + \x358637bd | 1e-06 | 1e-06 | \x358637bd | t + \x358637be | 1.0000001e-06 | 1.0000001e-06 | \x358637be | t + \x36a7c5ab | 4.9999994e-06 | 4.9999994e-06 | \x36a7c5ab | t + \x36a7c5ac | 5e-06 | 5e-06 | \x36a7c5ac | t + \x36a7c5ad | 5.0000003e-06 | 5.0000003e-06 | \x36a7c5ad | t + \x3727c5ab | 9.999999e-06 | 9.999999e-06 | \x3727c5ab | t + \x3727c5ac | 1e-05 | 1e-05 | \x3727c5ac | t + \x3727c5ad | 1.0000001e-05 | 1.0000001e-05 | \x3727c5ad | t + \x38d1b714 | 9.9999976e-05 | 9.9999976e-05 | \x38d1b714 | t + \x38d1b715 | 9.999998e-05 | 9.999998e-05 | \x38d1b715 | t + \x38d1b716 | 9.999999e-05 | 9.999999e-05 | \x38d1b716 | t + \x38d1b717 | 0.0001 | 0.0001 | \x38d1b717 | t + \x38d1b718 | 0.000100000005 | 0.000100000005 | \x38d1b718 | t + \x38d1b719 | 0.00010000001 | 0.00010000001 | \x38d1b719 | t + \x38d1b71a | 0.00010000002 | 0.00010000002 | \x38d1b71a | t + \x38d1b71b | 0.00010000003 | 0.00010000003 | \x38d1b71b | t + \x38d1b71c | 0.000100000034 | 0.000100000034 | \x38d1b71c | t + \x38d1b71d | 0.00010000004 | 0.00010000004 | \x38d1b71d | t + \x38dffffe | 0.00010681151 | 0.00010681151 | \x38dffffe | t + \x38dfffff | 0.000106811516 | 0.000106811516 | \x38dfffff | t + \x38e00000 | 0.00010681152 | 0.00010681152 | \x38e00000 | t + \x38efffff | 0.00011444091 | 0.00011444091 | \x38efffff | t + \x38f00000 | 0.00011444092 | 0.00011444092 | \x38f00000 | t + \x38f00001 | 0.000114440925 | 0.000114440925 | \x38f00001 | t + \x3a83126e | 0.0009999999 | 0.0009999999 | \x3a83126e | t + \x3a83126f | 0.001 | 0.001 | \x3a83126f | t + \x3a831270 | 0.0010000002 | 0.0010000002 | \x3a831270 | t + \x3c23d709 | 0.009999999 | 0.009999999 | \x3c23d709 | t + \x3c23d70a | 0.01 | 0.01 | \x3c23d70a | t + \x3c23d70b | 0.010000001 | 0.010000001 | \x3c23d70b | t + \x3dcccccc | 0.099999994 | 0.099999994 | \x3dcccccc | t + \x3dcccccd | 0.1 | 0.1 | \x3dcccccd | t + \x3dccccce | 0.10000001 | 0.10000001 | \x3dccccce | t + \x3dcccd6f | 0.10000121 | 0.10000121 | \x3dcccd6f | t + \x3dcccd70 | 0.100001216 | 0.100001216 | \x3dcccd70 | t + \x3dcccd71 | 0.10000122 | 0.10000122 | \x3dcccd71 | t + \x3effffff | 0.49999997 | 0.49999997 | \x3effffff | t + \x3f000000 | 0.5 | 0.5 | \x3f000000 | t + \x3f000001 | 0.50000006 | 0.50000006 | \x3f000001 | t + \x3f333332 | 0.6999999 | 0.6999999 | \x3f333332 | t + \x3f333333 | 0.7 | 0.7 | \x3f333333 | t + \x3f333334 | 0.70000005 | 0.70000005 | \x3f333334 | t + \x3f666665 | 0.8999999 | 0.8999999 | \x3f666665 | t + \x3f666666 | 0.9 | 0.9 | \x3f666666 | t + \x3f666667 | 0.90000004 | 0.90000004 | \x3f666667 | t + \x3f7d70a3 | 0.98999995 | 0.98999995 | \x3f7d70a3 | t + \x3f7d70a4 | 0.99 | 0.99 | \x3f7d70a4 | t + \x3f7d70a5 | 0.99000007 | 0.99000007 | \x3f7d70a5 | t + \x3f7fbe76 | 0.99899995 | 0.99899995 | \x3f7fbe76 | t + \x3f7fbe77 | 0.999 | 0.999 | \x3f7fbe77 | t + \x3f7fbe78 | 0.9990001 | 0.9990001 | \x3f7fbe78 | t + \x3f7ff971 | 0.9998999 | 0.9998999 | \x3f7ff971 | t + \x3f7ff972 | 0.9999 | 0.9999 | \x3f7ff972 | t + \x3f7ff973 | 0.99990004 | 0.99990004 | \x3f7ff973 | t + \x3f7fff57 | 0.9999899 | 0.9999899 | \x3f7fff57 | t + \x3f7fff58 | 0.99999 | 0.99999 | \x3f7fff58 | t + \x3f7fff59 | 0.99999005 | 0.99999005 | \x3f7fff59 | t + \x3f7fffee | 0.9999989 | 0.9999989 | \x3f7fffee | t + \x3f7fffef | 0.999999 | 0.999999 | \x3f7fffef | t + \x3f7ffff0 | 0.99999905 | 0.99999905 | \x3f7ffff0 | t + \x3f7ffff1 | 0.9999991 | 0.9999991 | \x3f7ffff1 | t + \x3f7ffff2 | 0.99999917 | 0.99999917 | \x3f7ffff2 | t + \x3f7ffff3 | 0.9999992 | 0.9999992 | \x3f7ffff3 | t + \x3f7ffff4 | 0.9999993 | 0.9999993 | \x3f7ffff4 | t + \x3f7ffff5 | 0.99999934 | 0.99999934 | \x3f7ffff5 | t + \x3f7ffff6 | 0.9999994 | 0.9999994 | \x3f7ffff6 | t + \x3f7ffff7 | 0.99999946 | 0.99999946 | \x3f7ffff7 | t + \x3f7ffff8 | 0.9999995 | 0.9999995 | \x3f7ffff8 | t + \x3f7ffff9 | 0.9999996 | 0.9999996 | \x3f7ffff9 | t + \x3f7ffffa | 0.99999964 | 0.99999964 | \x3f7ffffa | t + \x3f7ffffb | 0.9999997 | 0.9999997 | \x3f7ffffb | t + \x3f7ffffc | 0.99999976 | 0.99999976 | \x3f7ffffc | t + \x3f7ffffd | 0.9999998 | 0.9999998 | \x3f7ffffd | t + \x3f7ffffe | 0.9999999 | 0.9999999 | \x3f7ffffe | t + \x3f7fffff | 0.99999994 | 0.99999994 | \x3f7fffff | t + \x3f800000 | 1 | 1 | \x3f800000 | t + \x3f800001 | 1.0000001 | 1.0000001 | \x3f800001 | t + \x3f800002 | 1.0000002 | 1.0000002 | \x3f800002 | t + \x3f800003 | 1.0000004 | 1.0000004 | \x3f800003 | t + \x3f800004 | 1.0000005 | 1.0000005 | \x3f800004 | t + \x3f800005 | 1.0000006 | 1.0000006 | \x3f800005 | t + \x3f800006 | 1.0000007 | 1.0000007 | \x3f800006 | t + \x3f800007 | 1.0000008 | 1.0000008 | \x3f800007 | t + \x3f800008 | 1.000001 | 1.000001 | \x3f800008 | t + \x3f800009 | 1.0000011 | 1.0000011 | \x3f800009 | t + \x3f80000f | 1.0000018 | 1.0000018 | \x3f80000f | t + \x3f800010 | 1.0000019 | 1.0000019 | \x3f800010 | t + \x3f800011 | 1.000002 | 1.000002 | \x3f800011 | t + \x3f800012 | 1.0000021 | 1.0000021 | \x3f800012 | t + \x3f800013 | 1.0000023 | 1.0000023 | \x3f800013 | t + \x3f800014 | 1.0000024 | 1.0000024 | \x3f800014 | t + \x3f800017 | 1.0000027 | 1.0000027 | \x3f800017 | t + \x3f800018 | 1.0000029 | 1.0000029 | \x3f800018 | t + \x3f800019 | 1.000003 | 1.000003 | \x3f800019 | t + \x3f80001a | 1.0000031 | 1.0000031 | \x3f80001a | t + \x3f80001b | 1.0000032 | 1.0000032 | \x3f80001b | t + \x3f80001c | 1.0000033 | 1.0000033 | \x3f80001c | t + \x3f800029 | 1.0000049 | 1.0000049 | \x3f800029 | t + \x3f80002a | 1.000005 | 1.000005 | \x3f80002a | t + \x3f80002b | 1.0000051 | 1.0000051 | \x3f80002b | t + \x3f800053 | 1.0000099 | 1.0000099 | \x3f800053 | t + \x3f800054 | 1.00001 | 1.00001 | \x3f800054 | t + \x3f800055 | 1.0000101 | 1.0000101 | \x3f800055 | t + \x3f800346 | 1.0000999 | 1.0000999 | \x3f800346 | t + \x3f800347 | 1.0001 | 1.0001 | \x3f800347 | t + \x3f800348 | 1.0001001 | 1.0001001 | \x3f800348 | t + \x3f8020c4 | 1.0009999 | 1.0009999 | \x3f8020c4 | t + \x3f8020c5 | 1.001 | 1.001 | \x3f8020c5 | t + \x3f8020c6 | 1.0010002 | 1.0010002 | \x3f8020c6 | t + \x3f8147ad | 1.0099999 | 1.0099999 | \x3f8147ad | t + \x3f8147ae | 1.01 | 1.01 | \x3f8147ae | t + \x3f8147af | 1.0100001 | 1.0100001 | \x3f8147af | t + \x3f8ccccc | 1.0999999 | 1.0999999 | \x3f8ccccc | t + \x3f8ccccd | 1.1 | 1.1 | \x3f8ccccd | t + \x3f8cccce | 1.1000001 | 1.1000001 | \x3f8cccce | t + \x3fc90fdb | 1.5707964 | 1.5707964 | \x3fc90fdb | t + \x402df854 | 2.7182817 | 2.7182817 | \x402df854 | t + \x40490fdb | 3.1415927 | 3.1415927 | \x40490fdb | t + \x409fffff | 4.9999995 | 4.9999995 | \x409fffff | t + \x40a00000 | 5 | 5 | \x40a00000 | t + \x40a00001 | 5.0000005 | 5.0000005 | \x40a00001 | t + \x40afffff | 5.4999995 | 5.4999995 | \x40afffff | t + \x40b00000 | 5.5 | 5.5 | \x40b00000 | t + \x40b00001 | 5.5000005 | 5.5000005 | \x40b00001 | t + \x411fffff | 9.999999 | 9.999999 | \x411fffff | t + \x41200000 | 10 | 10 | \x41200000 | t + \x41200001 | 10.000001 | 10.000001 | \x41200001 | t + \x42c7ffff | 99.99999 | 99.99999 | \x42c7ffff | t + \x42c80000 | 100 | 100 | \x42c80000 | t + \x42c80001 | 100.00001 | 100.00001 | \x42c80001 | t + \x4479ffff | 999.99994 | 999.99994 | \x4479ffff | t + \x447a0000 | 1000 | 1000 | \x447a0000 | t + \x447a0001 | 1000.00006 | 1000.00006 | \x447a0001 | t + \x461c3fff | 9999.999 | 9999.999 | \x461c3fff | t + \x461c4000 | 10000 | 10000 | \x461c4000 | t + \x461c4001 | 10000.001 | 10000.001 | \x461c4001 | t + \x47c34fff | 99999.99 | 99999.99 | \x47c34fff | t + \x47c35000 | 100000 | 100000 | \x47c35000 | t + \x47c35001 | 100000.01 | 100000.01 | \x47c35001 | t + \x497423ff | 999999.94 | 999999.94 | \x497423ff | t + \x49742400 | 1e+06 | 1e+06 | \x49742400 | t + \x49742401 | 1.00000006e+06 | 1.00000006e+06 | \x49742401 | t + \x4b18967f | 9.999999e+06 | 9.999999e+06 | \x4b18967f | t + \x4b189680 | 1e+07 | 1e+07 | \x4b189680 | t + \x4b189681 | 1.0000001e+07 | 1.0000001e+07 | \x4b189681 | t + \x4cbebc1f | 9.999999e+07 | 9.999999e+07 | \x4cbebc1f | t + \x4cbebc20 | 1e+08 | 1e+08 | \x4cbebc20 | t + \x4cbebc21 | 1.0000001e+08 | 1.0000001e+08 | \x4cbebc21 | t + \x4e6e6b27 | 9.9999994e+08 | 9.9999994e+08 | \x4e6e6b27 | t + \x4e6e6b28 | 1e+09 | 1e+09 | \x4e6e6b28 | t + \x4e6e6b29 | 1.00000006e+09 | 1.00000006e+09 | \x4e6e6b29 | t + \x501502f8 | 9.999999e+09 | 9.999999e+09 | \x501502f8 | t + \x501502f9 | 1e+10 | 1e+10 | \x501502f9 | t + \x501502fa | 1.0000001e+10 | 1.0000001e+10 | \x501502fa | t + \x51ba43b6 | 9.999999e+10 | 9.999999e+10 | \x51ba43b6 | t + \x51ba43b7 | 1e+11 | 1e+11 | \x51ba43b7 | t + \x51ba43b8 | 1.0000001e+11 | 1.0000001e+11 | \x51ba43b8 | t + \x1f6c1e4a | 5e-20 | 5e-20 | \x1f6c1e4a | t + \x59be6cea | 6.7e+15 | 6.7e+15 | \x59be6cea | t + \x5d5ab6c4 | 9.85e+17 | 9.85e+17 | \x5d5ab6c4 | t + \x2cc4a9bd | 5.5895e-12 | 5.5895e-12 | \x2cc4a9bd | t + \x15ae43fd | 7.038531e-26 | 7.0385313e-26 | \x15ae43fe | f + \x2cf757ca | 7.0299088e-12 | 7.0299088e-12 | \x2cf757ca | t + \x665ba998 | 2.5933168e+23 | 2.5933168e+23 | \x665ba998 | t + \x743c3324 | 5.9642887e+31 | 5.9642887e+31 | \x743c3324 | t + \x47f1205a | 123456.7 | 123456.7 | \x47f1205a | t + \x4640e6ae | 12345.67 | 12345.67 | \x4640e6ae | t + \x449a5225 | 1234.567 | 1234.567 | \x449a5225 | t + \x42f6e9d5 | 123.4567 | 123.4567 | \x42f6e9d5 | t + \x414587dd | 12.34567 | 12.34567 | \x414587dd | t + \x3f9e064b | 1.234567 | 1.234567 | \x3f9e064b | t + \x4c000004 | 3.3554448e+07 | 3.3554448e+07 | \x4c000004 | t + \x50061c46 | 8.999999e+09 | 8.999999e+09 | \x50061c46 | t + \x510006a8 | 3.4366718e+10 | 3.4366718e+10 | \x510006a8 | t + \x48951f84 | 305404.12 | 305404.12 | \x48951f84 | t + \x45fd1840 | 8099.0312 | 8099.0312 | \x45fd1840 | t + \x39800000 | 0.00024414062 | 0.00024414062 | \x39800000 | t + \x3b200000 | 0.0024414062 | 0.0024414062 | \x3b200000 | t + \x3b900000 | 0.0043945312 | 0.0043945312 | \x3b900000 | t + \x3bd00000 | 0.0063476562 | 0.0063476562 | \x3bd00000 | t + \x63800000 | 4.7223665e+21 | 4.7223665e+21 | \x63800000 | t + \x4b000000 | 8.388608e+06 | 8.388608e+06 | \x4b000000 | t + \x4b800000 | 1.6777216e+07 | 1.6777216e+07 | \x4b800000 | t + \x4c000001 | 3.3554436e+07 | 3.3554436e+07 | \x4c000001 | t + \x4c800b0d | 6.7131496e+07 | 6.7131496e+07 | \x4c800b0d | t + \x00d24584 | 1.9310392e-38 | 1.9310392e-38 | \x00d24584 | t + \x00d90b88 | 1.993244e-38 | 1.993244e-38 | \x00d90b88 | t + \x45803f34 | 4103.9004 | 4103.9004 | \x45803f34 | t + \x4f9f24f7 | 5.3399997e+09 | 5.3399997e+09 | \x4f9f24f7 | t + \x3a8722c3 | 0.0010310042 | 0.0010310042 | \x3a8722c3 | t + \x5c800041 | 2.882326e+17 | 2.882326e+17 | \x5c800041 | t + \x15ae43fd | 7.038531e-26 | 7.0385313e-26 | \x15ae43fe | f + \x5d4cccfb | 9.223404e+17 | 9.223404e+17 | \x5d4cccfb | t + \x4c800001 | 6.710887e+07 | 6.710887e+07 | \x4c800001 | t + \x57800ed8 | 2.816025e+14 | 2.816025e+14 | \x57800ed8 | t + \x5f000000 | 9.223372e+18 | 9.223372e+18 | \x5f000000 | t + \x700000f0 | 1.5846086e+29 | 1.5846086e+29 | \x700000f0 | t + \x5f23e9ac | 1.1811161e+19 | 1.1811161e+19 | \x5f23e9ac | t + \x5e9502f9 | 5.368709e+18 | 5.368709e+18 | \x5e9502f9 | t + \x5e8012b1 | 4.6143166e+18 | 4.6143166e+18 | \x5e8012b1 | t + \x3c000028 | 0.007812537 | 0.007812537 | \x3c000028 | t + \x60cde861 | 1.18697725e+20 | 1.18697725e+20 | \x60cde861 | t + \x03aa2a50 | 1.00014165e-36 | 1.00014165e-36 | \x03aa2a50 | t + \x43480000 | 200 | 200 | \x43480000 | t + \x4c000000 | 3.3554432e+07 | 3.3554432e+07 | \x4c000000 | t + \x5d1502f9 | 6.7108864e+17 | 6.7108864e+17 | \x5d1502f9 | t + \x5d9502f9 | 1.3421773e+18 | 1.3421773e+18 | \x5d9502f9 | t + \x5e1502f9 | 2.6843546e+18 | 2.6843546e+18 | \x5e1502f9 | t + \x3f99999a | 1.2 | 1.2 | \x3f99999a | t + \x3f9d70a4 | 1.23 | 1.23 | \x3f9d70a4 | t + \x3f9df3b6 | 1.234 | 1.234 | \x3f9df3b6 | t + \x3f9e0419 | 1.2345 | 1.2345 | \x3f9e0419 | t + \x3f9e0610 | 1.23456 | 1.23456 | \x3f9e0610 | t + \x3f9e064b | 1.234567 | 1.234567 | \x3f9e064b | t + \x3f9e0651 | 1.2345678 | 1.2345678 | \x3f9e0651 | t + \x03d20cfe | 1.23456735e-36 | 1.23456735e-36 | \x03d20cfe | t +(261 rows) + +-- clean up, lest opr_sanity complain +drop type xfloat4 cascade; +NOTICE: drop cascades to 6 other objects +DETAIL: drop cascades to function xfloat4in(cstring) +drop cascades to function xfloat4out(xfloat4) +drop cascades to cast from xfloat4 to real +drop cascades to cast from real to xfloat4 +drop cascades to cast from xfloat4 to integer +drop cascades to cast from integer to xfloat4 diff --git a/src/test/regress/expected/float4.out b/src/test/regress/expected/float4.out new file mode 100644 index 0000000..65ee82c --- /dev/null +++ b/src/test/regress/expected/float4.out @@ -0,0 +1,986 @@ +-- +-- FLOAT4 +-- +CREATE TABLE FLOAT4_TBL (f1 float4); +INSERT INTO FLOAT4_TBL(f1) VALUES (' 0.0'); +INSERT INTO FLOAT4_TBL(f1) VALUES ('1004.30 '); +INSERT INTO FLOAT4_TBL(f1) VALUES (' -34.84 '); +INSERT INTO FLOAT4_TBL(f1) VALUES ('1.2345678901234e+20'); +INSERT INTO FLOAT4_TBL(f1) VALUES ('1.2345678901234e-20'); +-- test for over and under flow +INSERT INTO FLOAT4_TBL(f1) VALUES ('10e70'); +ERROR: "10e70" is out of range for type real +LINE 1: INSERT INTO FLOAT4_TBL(f1) VALUES ('10e70'); + ^ +INSERT INTO FLOAT4_TBL(f1) VALUES ('-10e70'); +ERROR: "-10e70" is out of range for type real +LINE 1: INSERT INTO FLOAT4_TBL(f1) VALUES ('-10e70'); + ^ +INSERT INTO FLOAT4_TBL(f1) VALUES ('10e-70'); +ERROR: "10e-70" is out of range for type real +LINE 1: INSERT INTO FLOAT4_TBL(f1) VALUES ('10e-70'); + ^ +INSERT INTO FLOAT4_TBL(f1) VALUES ('-10e-70'); +ERROR: "-10e-70" is out of range for type real +LINE 1: INSERT INTO FLOAT4_TBL(f1) VALUES ('-10e-70'); + ^ +INSERT INTO FLOAT4_TBL(f1) VALUES ('10e70'::float8); +ERROR: value out of range: overflow +INSERT INTO FLOAT4_TBL(f1) VALUES ('-10e70'::float8); +ERROR: value out of range: overflow +INSERT INTO FLOAT4_TBL(f1) VALUES ('10e-70'::float8); +ERROR: value out of range: underflow +INSERT INTO FLOAT4_TBL(f1) VALUES ('-10e-70'::float8); +ERROR: value out of range: underflow +INSERT INTO FLOAT4_TBL(f1) VALUES ('10e400'); +ERROR: "10e400" is out of range for type real +LINE 1: INSERT INTO FLOAT4_TBL(f1) VALUES ('10e400'); + ^ +INSERT INTO FLOAT4_TBL(f1) VALUES ('-10e400'); +ERROR: "-10e400" is out of range for type real +LINE 1: INSERT INTO FLOAT4_TBL(f1) VALUES ('-10e400'); + ^ +INSERT INTO FLOAT4_TBL(f1) VALUES ('10e-400'); +ERROR: "10e-400" is out of range for type real +LINE 1: INSERT INTO FLOAT4_TBL(f1) VALUES ('10e-400'); + ^ +INSERT INTO FLOAT4_TBL(f1) VALUES ('-10e-400'); +ERROR: "-10e-400" is out of range for type real +LINE 1: INSERT INTO FLOAT4_TBL(f1) VALUES ('-10e-400'); + ^ +-- bad input +INSERT INTO FLOAT4_TBL(f1) VALUES (''); +ERROR: invalid input syntax for type real: "" +LINE 1: INSERT INTO FLOAT4_TBL(f1) VALUES (''); + ^ +INSERT INTO FLOAT4_TBL(f1) VALUES (' '); +ERROR: invalid input syntax for type real: " " +LINE 1: INSERT INTO FLOAT4_TBL(f1) VALUES (' '); + ^ +INSERT INTO FLOAT4_TBL(f1) VALUES ('xyz'); +ERROR: invalid input syntax for type real: "xyz" +LINE 1: INSERT INTO FLOAT4_TBL(f1) VALUES ('xyz'); + ^ +INSERT INTO FLOAT4_TBL(f1) VALUES ('5.0.0'); +ERROR: invalid input syntax for type real: "5.0.0" +LINE 1: INSERT INTO FLOAT4_TBL(f1) VALUES ('5.0.0'); + ^ +INSERT INTO FLOAT4_TBL(f1) VALUES ('5 . 0'); +ERROR: invalid input syntax for type real: "5 . 0" +LINE 1: INSERT INTO FLOAT4_TBL(f1) VALUES ('5 . 0'); + ^ +INSERT INTO FLOAT4_TBL(f1) VALUES ('5. 0'); +ERROR: invalid input syntax for type real: "5. 0" +LINE 1: INSERT INTO FLOAT4_TBL(f1) VALUES ('5. 0'); + ^ +INSERT INTO FLOAT4_TBL(f1) VALUES (' - 3.0'); +ERROR: invalid input syntax for type real: " - 3.0" +LINE 1: INSERT INTO FLOAT4_TBL(f1) VALUES (' - 3.0'); + ^ +INSERT INTO FLOAT4_TBL(f1) VALUES ('123 5'); +ERROR: invalid input syntax for type real: "123 5" +LINE 1: INSERT INTO FLOAT4_TBL(f1) VALUES ('123 5'); + ^ +-- Also try it with non-error-throwing API +SELECT pg_input_is_valid('34.5', 'float4'); + pg_input_is_valid +------------------- + t +(1 row) + +SELECT pg_input_is_valid('xyz', 'float4'); + pg_input_is_valid +------------------- + f +(1 row) + +SELECT pg_input_is_valid('1e400', 'float4'); + pg_input_is_valid +------------------- + f +(1 row) + +SELECT * FROM pg_input_error_info('1e400', 'float4'); + message | detail | hint | sql_error_code +---------------------------------------+--------+------+---------------- + "1e400" is out of range for type real | | | 22003 +(1 row) + +-- special inputs +SELECT 'NaN'::float4; + float4 +-------- + NaN +(1 row) + +SELECT 'nan'::float4; + float4 +-------- + NaN +(1 row) + +SELECT ' NAN '::float4; + float4 +-------- + NaN +(1 row) + +SELECT 'infinity'::float4; + float4 +---------- + Infinity +(1 row) + +SELECT ' -INFINiTY '::float4; + float4 +----------- + -Infinity +(1 row) + +-- bad special inputs +SELECT 'N A N'::float4; +ERROR: invalid input syntax for type real: "N A N" +LINE 1: SELECT 'N A N'::float4; + ^ +SELECT 'NaN x'::float4; +ERROR: invalid input syntax for type real: "NaN x" +LINE 1: SELECT 'NaN x'::float4; + ^ +SELECT ' INFINITY x'::float4; +ERROR: invalid input syntax for type real: " INFINITY x" +LINE 1: SELECT ' INFINITY x'::float4; + ^ +SELECT 'Infinity'::float4 + 100.0; + ?column? +---------- + Infinity +(1 row) + +SELECT 'Infinity'::float4 / 'Infinity'::float4; + ?column? +---------- + NaN +(1 row) + +SELECT '42'::float4 / 'Infinity'::float4; + ?column? +---------- + 0 +(1 row) + +SELECT 'nan'::float4 / 'nan'::float4; + ?column? +---------- + NaN +(1 row) + +SELECT 'nan'::float4 / '0'::float4; + ?column? +---------- + NaN +(1 row) + +SELECT 'nan'::numeric::float4; + float4 +-------- + NaN +(1 row) + +SELECT * FROM FLOAT4_TBL; + f1 +--------------- + 0 + 1004.3 + -34.84 + 1.2345679e+20 + 1.2345679e-20 +(5 rows) + +SELECT f.* FROM FLOAT4_TBL f WHERE f.f1 <> '1004.3'; + f1 +--------------- + 0 + -34.84 + 1.2345679e+20 + 1.2345679e-20 +(4 rows) + +SELECT f.* FROM FLOAT4_TBL f WHERE f.f1 = '1004.3'; + f1 +-------- + 1004.3 +(1 row) + +SELECT f.* FROM FLOAT4_TBL f WHERE '1004.3' > f.f1; + f1 +--------------- + 0 + -34.84 + 1.2345679e-20 +(3 rows) + +SELECT f.* FROM FLOAT4_TBL f WHERE f.f1 < '1004.3'; + f1 +--------------- + 0 + -34.84 + 1.2345679e-20 +(3 rows) + +SELECT f.* FROM FLOAT4_TBL f WHERE '1004.3' >= f.f1; + f1 +--------------- + 0 + 1004.3 + -34.84 + 1.2345679e-20 +(4 rows) + +SELECT f.* FROM FLOAT4_TBL f WHERE f.f1 <= '1004.3'; + f1 +--------------- + 0 + 1004.3 + -34.84 + 1.2345679e-20 +(4 rows) + +SELECT f.f1, f.f1 * '-10' AS x FROM FLOAT4_TBL f + WHERE f.f1 > '0.0'; + f1 | x +---------------+---------------- + 1004.3 | -10043 + 1.2345679e+20 | -1.2345678e+21 + 1.2345679e-20 | -1.2345678e-19 +(3 rows) + +SELECT f.f1, f.f1 + '-10' AS x FROM FLOAT4_TBL f + WHERE f.f1 > '0.0'; + f1 | x +---------------+--------------- + 1004.3 | 994.3 + 1.2345679e+20 | 1.2345679e+20 + 1.2345679e-20 | -10 +(3 rows) + +SELECT f.f1, f.f1 / '-10' AS x FROM FLOAT4_TBL f + WHERE f.f1 > '0.0'; + f1 | x +---------------+---------------- + 1004.3 | -100.43 + 1.2345679e+20 | -1.2345679e+19 + 1.2345679e-20 | -1.2345679e-21 +(3 rows) + +SELECT f.f1, f.f1 - '-10' AS x FROM FLOAT4_TBL f + WHERE f.f1 > '0.0'; + f1 | x +---------------+--------------- + 1004.3 | 1014.3 + 1.2345679e+20 | 1.2345679e+20 + 1.2345679e-20 | 10 +(3 rows) + +-- test divide by zero +SELECT f.f1 / '0.0' from FLOAT4_TBL f; +ERROR: division by zero +SELECT * FROM FLOAT4_TBL; + f1 +--------------- + 0 + 1004.3 + -34.84 + 1.2345679e+20 + 1.2345679e-20 +(5 rows) + +-- test the unary float4abs operator +SELECT f.f1, @f.f1 AS abs_f1 FROM FLOAT4_TBL f; + f1 | abs_f1 +---------------+--------------- + 0 | 0 + 1004.3 | 1004.3 + -34.84 | 34.84 + 1.2345679e+20 | 1.2345679e+20 + 1.2345679e-20 | 1.2345679e-20 +(5 rows) + +UPDATE FLOAT4_TBL + SET f1 = FLOAT4_TBL.f1 * '-1' + WHERE FLOAT4_TBL.f1 > '0.0'; +SELECT * FROM FLOAT4_TBL; + f1 +---------------- + 0 + -34.84 + -1004.3 + -1.2345679e+20 + -1.2345679e-20 +(5 rows) + +-- test edge-case coercions to integer +SELECT '32767.4'::float4::int2; + int2 +------- + 32767 +(1 row) + +SELECT '32767.6'::float4::int2; +ERROR: smallint out of range +SELECT '-32768.4'::float4::int2; + int2 +-------- + -32768 +(1 row) + +SELECT '-32768.6'::float4::int2; +ERROR: smallint out of range +SELECT '2147483520'::float4::int4; + int4 +------------ + 2147483520 +(1 row) + +SELECT '2147483647'::float4::int4; +ERROR: integer out of range +SELECT '-2147483648.5'::float4::int4; + int4 +------------- + -2147483648 +(1 row) + +SELECT '-2147483900'::float4::int4; +ERROR: integer out of range +SELECT '9223369837831520256'::float4::int8; + int8 +--------------------- + 9223369837831520256 +(1 row) + +SELECT '9223372036854775807'::float4::int8; +ERROR: bigint out of range +SELECT '-9223372036854775808.5'::float4::int8; + int8 +---------------------- + -9223372036854775808 +(1 row) + +SELECT '-9223380000000000000'::float4::int8; +ERROR: bigint out of range +-- Test for correct input rounding in edge cases. +-- These lists are from Paxson 1991, excluding subnormals and +-- inputs of over 9 sig. digits. +SELECT float4send('5e-20'::float4); + float4send +------------ + \x1f6c1e4a +(1 row) + +SELECT float4send('67e14'::float4); + float4send +------------ + \x59be6cea +(1 row) + +SELECT float4send('985e15'::float4); + float4send +------------ + \x5d5ab6c4 +(1 row) + +SELECT float4send('55895e-16'::float4); + float4send +------------ + \x2cc4a9bd +(1 row) + +SELECT float4send('7038531e-32'::float4); + float4send +------------ + \x15ae43fd +(1 row) + +SELECT float4send('702990899e-20'::float4); + float4send +------------ + \x2cf757ca +(1 row) + +SELECT float4send('3e-23'::float4); + float4send +------------ + \x1a111234 +(1 row) + +SELECT float4send('57e18'::float4); + float4send +------------ + \x6045c22c +(1 row) + +SELECT float4send('789e-35'::float4); + float4send +------------ + \x0a23de70 +(1 row) + +SELECT float4send('2539e-18'::float4); + float4send +------------ + \x2736f449 +(1 row) + +SELECT float4send('76173e28'::float4); + float4send +------------ + \x7616398a +(1 row) + +SELECT float4send('887745e-11'::float4); + float4send +------------ + \x3714f05c +(1 row) + +SELECT float4send('5382571e-37'::float4); + float4send +------------ + \x0d2eaca7 +(1 row) + +SELECT float4send('82381273e-35'::float4); + float4send +------------ + \x128289d1 +(1 row) + +SELECT float4send('750486563e-38'::float4); + float4send +------------ + \x0f18377e +(1 row) + +-- Test that the smallest possible normalized input value inputs +-- correctly, either in 9-significant-digit or shortest-decimal +-- format. +-- +-- exact val is 1.1754943508... +-- shortest val is 1.1754944000 +-- midpoint to next val is 1.1754944208... +SELECT float4send('1.17549435e-38'::float4); + float4send +------------ + \x00800000 +(1 row) + +SELECT float4send('1.1754944e-38'::float4); + float4send +------------ + \x00800000 +(1 row) + +-- test output (and round-trip safety) of various values. +-- To ensure we're testing what we think we're testing, start with +-- float values specified by bit patterns (as a useful side effect, +-- this means we'll fail on non-IEEE platforms). +create type xfloat4; +create function xfloat4in(cstring) returns xfloat4 immutable strict + language internal as 'int4in'; +NOTICE: return type xfloat4 is only a shell +create function xfloat4out(xfloat4) returns cstring immutable strict + language internal as 'int4out'; +NOTICE: argument type xfloat4 is only a shell +create type xfloat4 (input = xfloat4in, output = xfloat4out, like = float4); +create cast (xfloat4 as float4) without function; +create cast (float4 as xfloat4) without function; +create cast (xfloat4 as integer) without function; +create cast (integer as xfloat4) without function; +-- float4: seeeeeee emmmmmmm mmmmmmmm mmmmmmmm +-- we don't care to assume the platform's strtod() handles subnormals +-- correctly; those are "use at your own risk". However we do test +-- subnormal outputs, since those are under our control. +with testdata(bits) as (values + -- small subnormals + (x'00000001'), + (x'00000002'), (x'00000003'), + (x'00000010'), (x'00000011'), (x'00000100'), (x'00000101'), + (x'00004000'), (x'00004001'), (x'00080000'), (x'00080001'), + -- stress values + (x'0053c4f4'), -- 7693e-42 + (x'006c85c4'), -- 996622e-44 + (x'0041ca76'), -- 60419369e-46 + (x'004b7678'), -- 6930161142e-48 + -- taken from upstream testsuite + (x'00000007'), + (x'00424fe2'), + -- borderline between subnormal and normal + (x'007ffff0'), (x'007ffff1'), (x'007ffffe'), (x'007fffff')) +select float4send(flt) as ibits, + flt + from (select bits::integer::xfloat4::float4 as flt + from testdata + offset 0) s; + ibits | flt +------------+--------------- + \x00000001 | 1e-45 + \x00000002 | 3e-45 + \x00000003 | 4e-45 + \x00000010 | 2.2e-44 + \x00000011 | 2.4e-44 + \x00000100 | 3.59e-43 + \x00000101 | 3.6e-43 + \x00004000 | 2.2959e-41 + \x00004001 | 2.296e-41 + \x00080000 | 7.34684e-40 + \x00080001 | 7.34685e-40 + \x0053c4f4 | 7.693e-39 + \x006c85c4 | 9.96622e-39 + \x0041ca76 | 6.041937e-39 + \x004b7678 | 6.930161e-39 + \x00000007 | 1e-44 + \x00424fe2 | 6.0898e-39 + \x007ffff0 | 1.1754921e-38 + \x007ffff1 | 1.1754922e-38 + \x007ffffe | 1.1754941e-38 + \x007fffff | 1.1754942e-38 +(21 rows) + +with testdata(bits) as (values + (x'00000000'), + -- smallest normal values + (x'00800000'), (x'00800001'), (x'00800004'), (x'00800005'), + (x'00800006'), + -- small normal values chosen for short vs. long output + (x'008002f1'), (x'008002f2'), (x'008002f3'), + (x'00800e17'), (x'00800e18'), (x'00800e19'), + -- assorted values (random mantissae) + (x'01000001'), (x'01102843'), (x'01a52c98'), + (x'0219c229'), (x'02e4464d'), (x'037343c1'), (x'03a91b36'), + (x'047ada65'), (x'0496fe87'), (x'0550844f'), (x'05999da3'), + (x'060ea5e2'), (x'06e63c45'), (x'07f1e548'), (x'0fc5282b'), + (x'1f850283'), (x'2874a9d6'), + -- values around 5e-08 + (x'3356bf94'), (x'3356bf95'), (x'3356bf96'), + -- around 1e-07 + (x'33d6bf94'), (x'33d6bf95'), (x'33d6bf96'), + -- around 3e-07 .. 1e-04 + (x'34a10faf'), (x'34a10fb0'), (x'34a10fb1'), + (x'350637bc'), (x'350637bd'), (x'350637be'), + (x'35719786'), (x'35719787'), (x'35719788'), + (x'358637bc'), (x'358637bd'), (x'358637be'), + (x'36a7c5ab'), (x'36a7c5ac'), (x'36a7c5ad'), + (x'3727c5ab'), (x'3727c5ac'), (x'3727c5ad'), + -- format crossover at 1e-04 + (x'38d1b714'), (x'38d1b715'), (x'38d1b716'), + (x'38d1b717'), (x'38d1b718'), (x'38d1b719'), + (x'38d1b71a'), (x'38d1b71b'), (x'38d1b71c'), + (x'38d1b71d'), + -- + (x'38dffffe'), (x'38dfffff'), (x'38e00000'), + (x'38efffff'), (x'38f00000'), (x'38f00001'), + (x'3a83126e'), (x'3a83126f'), (x'3a831270'), + (x'3c23d709'), (x'3c23d70a'), (x'3c23d70b'), + (x'3dcccccc'), (x'3dcccccd'), (x'3dccccce'), + -- chosen to need 9 digits for 3dcccd70 + (x'3dcccd6f'), (x'3dcccd70'), (x'3dcccd71'), + -- + (x'3effffff'), (x'3f000000'), (x'3f000001'), + (x'3f333332'), (x'3f333333'), (x'3f333334'), + -- approach 1.0 with increasing numbers of 9s + (x'3f666665'), (x'3f666666'), (x'3f666667'), + (x'3f7d70a3'), (x'3f7d70a4'), (x'3f7d70a5'), + (x'3f7fbe76'), (x'3f7fbe77'), (x'3f7fbe78'), + (x'3f7ff971'), (x'3f7ff972'), (x'3f7ff973'), + (x'3f7fff57'), (x'3f7fff58'), (x'3f7fff59'), + (x'3f7fffee'), (x'3f7fffef'), + -- values very close to 1 + (x'3f7ffff0'), (x'3f7ffff1'), (x'3f7ffff2'), + (x'3f7ffff3'), (x'3f7ffff4'), (x'3f7ffff5'), + (x'3f7ffff6'), (x'3f7ffff7'), (x'3f7ffff8'), + (x'3f7ffff9'), (x'3f7ffffa'), (x'3f7ffffb'), + (x'3f7ffffc'), (x'3f7ffffd'), (x'3f7ffffe'), + (x'3f7fffff'), + (x'3f800000'), + (x'3f800001'), (x'3f800002'), (x'3f800003'), + (x'3f800004'), (x'3f800005'), (x'3f800006'), + (x'3f800007'), (x'3f800008'), (x'3f800009'), + -- values 1 to 1.1 + (x'3f80000f'), (x'3f800010'), (x'3f800011'), + (x'3f800012'), (x'3f800013'), (x'3f800014'), + (x'3f800017'), (x'3f800018'), (x'3f800019'), + (x'3f80001a'), (x'3f80001b'), (x'3f80001c'), + (x'3f800029'), (x'3f80002a'), (x'3f80002b'), + (x'3f800053'), (x'3f800054'), (x'3f800055'), + (x'3f800346'), (x'3f800347'), (x'3f800348'), + (x'3f8020c4'), (x'3f8020c5'), (x'3f8020c6'), + (x'3f8147ad'), (x'3f8147ae'), (x'3f8147af'), + (x'3f8ccccc'), (x'3f8ccccd'), (x'3f8cccce'), + -- + (x'3fc90fdb'), -- pi/2 + (x'402df854'), -- e + (x'40490fdb'), -- pi + -- + (x'409fffff'), (x'40a00000'), (x'40a00001'), + (x'40afffff'), (x'40b00000'), (x'40b00001'), + (x'411fffff'), (x'41200000'), (x'41200001'), + (x'42c7ffff'), (x'42c80000'), (x'42c80001'), + (x'4479ffff'), (x'447a0000'), (x'447a0001'), + (x'461c3fff'), (x'461c4000'), (x'461c4001'), + (x'47c34fff'), (x'47c35000'), (x'47c35001'), + (x'497423ff'), (x'49742400'), (x'49742401'), + (x'4b18967f'), (x'4b189680'), (x'4b189681'), + (x'4cbebc1f'), (x'4cbebc20'), (x'4cbebc21'), + (x'4e6e6b27'), (x'4e6e6b28'), (x'4e6e6b29'), + (x'501502f8'), (x'501502f9'), (x'501502fa'), + (x'51ba43b6'), (x'51ba43b7'), (x'51ba43b8'), + -- stress values + (x'1f6c1e4a'), -- 5e-20 + (x'59be6cea'), -- 67e14 + (x'5d5ab6c4'), -- 985e15 + (x'2cc4a9bd'), -- 55895e-16 + (x'15ae43fd'), -- 7038531e-32 + (x'2cf757ca'), -- 702990899e-20 + (x'665ba998'), -- 25933168707e13 + (x'743c3324'), -- 596428896559e20 + -- exercise fixed-point memmoves + (x'47f1205a'), + (x'4640e6ae'), + (x'449a5225'), + (x'42f6e9d5'), + (x'414587dd'), + (x'3f9e064b'), + -- these cases come from the upstream's testsuite + -- BoundaryRoundEven + (x'4c000004'), + (x'50061c46'), + (x'510006a8'), + -- ExactValueRoundEven + (x'48951f84'), + (x'45fd1840'), + -- LotsOfTrailingZeros + (x'39800000'), + (x'3b200000'), + (x'3b900000'), + (x'3bd00000'), + -- Regression + (x'63800000'), + (x'4b000000'), + (x'4b800000'), + (x'4c000001'), + (x'4c800b0d'), + (x'00d24584'), + (x'00d90b88'), + (x'45803f34'), + (x'4f9f24f7'), + (x'3a8722c3'), + (x'5c800041'), + (x'15ae43fd'), + (x'5d4cccfb'), + (x'4c800001'), + (x'57800ed8'), + (x'5f000000'), + (x'700000f0'), + (x'5f23e9ac'), + (x'5e9502f9'), + (x'5e8012b1'), + (x'3c000028'), + (x'60cde861'), + (x'03aa2a50'), + (x'43480000'), + (x'4c000000'), + -- LooksLikePow5 + (x'5D1502F9'), + (x'5D9502F9'), + (x'5E1502F9'), + -- OutputLength + (x'3f99999a'), + (x'3f9d70a4'), + (x'3f9df3b6'), + (x'3f9e0419'), + (x'3f9e0610'), + (x'3f9e064b'), + (x'3f9e0651'), + (x'03d20cfe') +) +select float4send(flt) as ibits, + flt, + flt::text::float4 as r_flt, + float4send(flt::text::float4) as obits, + float4send(flt::text::float4) = float4send(flt) as correct + from (select bits::integer::xfloat4::float4 as flt + from testdata + offset 0) s; + ibits | flt | r_flt | obits | correct +------------+----------------+----------------+------------+--------- + \x00000000 | 0 | 0 | \x00000000 | t + \x00800000 | 1.1754944e-38 | 1.1754944e-38 | \x00800000 | t + \x00800001 | 1.1754945e-38 | 1.1754945e-38 | \x00800001 | t + \x00800004 | 1.1754949e-38 | 1.1754949e-38 | \x00800004 | t + \x00800005 | 1.175495e-38 | 1.175495e-38 | \x00800005 | t + \x00800006 | 1.1754952e-38 | 1.1754952e-38 | \x00800006 | t + \x008002f1 | 1.1755999e-38 | 1.1755999e-38 | \x008002f1 | t + \x008002f2 | 1.1756e-38 | 1.1756e-38 | \x008002f2 | t + \x008002f3 | 1.1756001e-38 | 1.1756001e-38 | \x008002f3 | t + \x00800e17 | 1.1759998e-38 | 1.1759998e-38 | \x00800e17 | t + \x00800e18 | 1.176e-38 | 1.176e-38 | \x00800e18 | t + \x00800e19 | 1.1760001e-38 | 1.1760001e-38 | \x00800e19 | t + \x01000001 | 2.350989e-38 | 2.350989e-38 | \x01000001 | t + \x01102843 | 2.647751e-38 | 2.647751e-38 | \x01102843 | t + \x01a52c98 | 6.0675416e-38 | 6.0675416e-38 | \x01a52c98 | t + \x0219c229 | 1.1296386e-37 | 1.1296386e-37 | \x0219c229 | t + \x02e4464d | 3.354194e-37 | 3.354194e-37 | \x02e4464d | t + \x037343c1 | 7.148906e-37 | 7.148906e-37 | \x037343c1 | t + \x03a91b36 | 9.939175e-37 | 9.939175e-37 | \x03a91b36 | t + \x047ada65 | 2.948764e-36 | 2.948764e-36 | \x047ada65 | t + \x0496fe87 | 3.5498577e-36 | 3.5498577e-36 | \x0496fe87 | t + \x0550844f | 9.804414e-36 | 9.804414e-36 | \x0550844f | t + \x05999da3 | 1.4445957e-35 | 1.4445957e-35 | \x05999da3 | t + \x060ea5e2 | 2.6829103e-35 | 2.6829103e-35 | \x060ea5e2 | t + \x06e63c45 | 8.660494e-35 | 8.660494e-35 | \x06e63c45 | t + \x07f1e548 | 3.639641e-34 | 3.639641e-34 | \x07f1e548 | t + \x0fc5282b | 1.9441172e-29 | 1.9441172e-29 | \x0fc5282b | t + \x1f850283 | 5.6331846e-20 | 5.6331846e-20 | \x1f850283 | t + \x2874a9d6 | 1.3581548e-14 | 1.3581548e-14 | \x2874a9d6 | t + \x3356bf94 | 4.9999997e-08 | 4.9999997e-08 | \x3356bf94 | t + \x3356bf95 | 5e-08 | 5e-08 | \x3356bf95 | t + \x3356bf96 | 5.0000004e-08 | 5.0000004e-08 | \x3356bf96 | t + \x33d6bf94 | 9.9999994e-08 | 9.9999994e-08 | \x33d6bf94 | t + \x33d6bf95 | 1e-07 | 1e-07 | \x33d6bf95 | t + \x33d6bf96 | 1.0000001e-07 | 1.0000001e-07 | \x33d6bf96 | t + \x34a10faf | 2.9999998e-07 | 2.9999998e-07 | \x34a10faf | t + \x34a10fb0 | 3e-07 | 3e-07 | \x34a10fb0 | t + \x34a10fb1 | 3.0000004e-07 | 3.0000004e-07 | \x34a10fb1 | t + \x350637bc | 4.9999994e-07 | 4.9999994e-07 | \x350637bc | t + \x350637bd | 5e-07 | 5e-07 | \x350637bd | t + \x350637be | 5.0000006e-07 | 5.0000006e-07 | \x350637be | t + \x35719786 | 8.999999e-07 | 8.999999e-07 | \x35719786 | t + \x35719787 | 9e-07 | 9e-07 | \x35719787 | t + \x35719788 | 9.0000003e-07 | 9.0000003e-07 | \x35719788 | t + \x358637bc | 9.999999e-07 | 9.999999e-07 | \x358637bc | t + \x358637bd | 1e-06 | 1e-06 | \x358637bd | t + \x358637be | 1.0000001e-06 | 1.0000001e-06 | \x358637be | t + \x36a7c5ab | 4.9999994e-06 | 4.9999994e-06 | \x36a7c5ab | t + \x36a7c5ac | 5e-06 | 5e-06 | \x36a7c5ac | t + \x36a7c5ad | 5.0000003e-06 | 5.0000003e-06 | \x36a7c5ad | t + \x3727c5ab | 9.999999e-06 | 9.999999e-06 | \x3727c5ab | t + \x3727c5ac | 1e-05 | 1e-05 | \x3727c5ac | t + \x3727c5ad | 1.0000001e-05 | 1.0000001e-05 | \x3727c5ad | t + \x38d1b714 | 9.9999976e-05 | 9.9999976e-05 | \x38d1b714 | t + \x38d1b715 | 9.999998e-05 | 9.999998e-05 | \x38d1b715 | t + \x38d1b716 | 9.999999e-05 | 9.999999e-05 | \x38d1b716 | t + \x38d1b717 | 0.0001 | 0.0001 | \x38d1b717 | t + \x38d1b718 | 0.000100000005 | 0.000100000005 | \x38d1b718 | t + \x38d1b719 | 0.00010000001 | 0.00010000001 | \x38d1b719 | t + \x38d1b71a | 0.00010000002 | 0.00010000002 | \x38d1b71a | t + \x38d1b71b | 0.00010000003 | 0.00010000003 | \x38d1b71b | t + \x38d1b71c | 0.000100000034 | 0.000100000034 | \x38d1b71c | t + \x38d1b71d | 0.00010000004 | 0.00010000004 | \x38d1b71d | t + \x38dffffe | 0.00010681151 | 0.00010681151 | \x38dffffe | t + \x38dfffff | 0.000106811516 | 0.000106811516 | \x38dfffff | t + \x38e00000 | 0.00010681152 | 0.00010681152 | \x38e00000 | t + \x38efffff | 0.00011444091 | 0.00011444091 | \x38efffff | t + \x38f00000 | 0.00011444092 | 0.00011444092 | \x38f00000 | t + \x38f00001 | 0.000114440925 | 0.000114440925 | \x38f00001 | t + \x3a83126e | 0.0009999999 | 0.0009999999 | \x3a83126e | t + \x3a83126f | 0.001 | 0.001 | \x3a83126f | t + \x3a831270 | 0.0010000002 | 0.0010000002 | \x3a831270 | t + \x3c23d709 | 0.009999999 | 0.009999999 | \x3c23d709 | t + \x3c23d70a | 0.01 | 0.01 | \x3c23d70a | t + \x3c23d70b | 0.010000001 | 0.010000001 | \x3c23d70b | t + \x3dcccccc | 0.099999994 | 0.099999994 | \x3dcccccc | t + \x3dcccccd | 0.1 | 0.1 | \x3dcccccd | t + \x3dccccce | 0.10000001 | 0.10000001 | \x3dccccce | t + \x3dcccd6f | 0.10000121 | 0.10000121 | \x3dcccd6f | t + \x3dcccd70 | 0.100001216 | 0.100001216 | \x3dcccd70 | t + \x3dcccd71 | 0.10000122 | 0.10000122 | \x3dcccd71 | t + \x3effffff | 0.49999997 | 0.49999997 | \x3effffff | t + \x3f000000 | 0.5 | 0.5 | \x3f000000 | t + \x3f000001 | 0.50000006 | 0.50000006 | \x3f000001 | t + \x3f333332 | 0.6999999 | 0.6999999 | \x3f333332 | t + \x3f333333 | 0.7 | 0.7 | \x3f333333 | t + \x3f333334 | 0.70000005 | 0.70000005 | \x3f333334 | t + \x3f666665 | 0.8999999 | 0.8999999 | \x3f666665 | t + \x3f666666 | 0.9 | 0.9 | \x3f666666 | t + \x3f666667 | 0.90000004 | 0.90000004 | \x3f666667 | t + \x3f7d70a3 | 0.98999995 | 0.98999995 | \x3f7d70a3 | t + \x3f7d70a4 | 0.99 | 0.99 | \x3f7d70a4 | t + \x3f7d70a5 | 0.99000007 | 0.99000007 | \x3f7d70a5 | t + \x3f7fbe76 | 0.99899995 | 0.99899995 | \x3f7fbe76 | t + \x3f7fbe77 | 0.999 | 0.999 | \x3f7fbe77 | t + \x3f7fbe78 | 0.9990001 | 0.9990001 | \x3f7fbe78 | t + \x3f7ff971 | 0.9998999 | 0.9998999 | \x3f7ff971 | t + \x3f7ff972 | 0.9999 | 0.9999 | \x3f7ff972 | t + \x3f7ff973 | 0.99990004 | 0.99990004 | \x3f7ff973 | t + \x3f7fff57 | 0.9999899 | 0.9999899 | \x3f7fff57 | t + \x3f7fff58 | 0.99999 | 0.99999 | \x3f7fff58 | t + \x3f7fff59 | 0.99999005 | 0.99999005 | \x3f7fff59 | t + \x3f7fffee | 0.9999989 | 0.9999989 | \x3f7fffee | t + \x3f7fffef | 0.999999 | 0.999999 | \x3f7fffef | t + \x3f7ffff0 | 0.99999905 | 0.99999905 | \x3f7ffff0 | t + \x3f7ffff1 | 0.9999991 | 0.9999991 | \x3f7ffff1 | t + \x3f7ffff2 | 0.99999917 | 0.99999917 | \x3f7ffff2 | t + \x3f7ffff3 | 0.9999992 | 0.9999992 | \x3f7ffff3 | t + \x3f7ffff4 | 0.9999993 | 0.9999993 | \x3f7ffff4 | t + \x3f7ffff5 | 0.99999934 | 0.99999934 | \x3f7ffff5 | t + \x3f7ffff6 | 0.9999994 | 0.9999994 | \x3f7ffff6 | t + \x3f7ffff7 | 0.99999946 | 0.99999946 | \x3f7ffff7 | t + \x3f7ffff8 | 0.9999995 | 0.9999995 | \x3f7ffff8 | t + \x3f7ffff9 | 0.9999996 | 0.9999996 | \x3f7ffff9 | t + \x3f7ffffa | 0.99999964 | 0.99999964 | \x3f7ffffa | t + \x3f7ffffb | 0.9999997 | 0.9999997 | \x3f7ffffb | t + \x3f7ffffc | 0.99999976 | 0.99999976 | \x3f7ffffc | t + \x3f7ffffd | 0.9999998 | 0.9999998 | \x3f7ffffd | t + \x3f7ffffe | 0.9999999 | 0.9999999 | \x3f7ffffe | t + \x3f7fffff | 0.99999994 | 0.99999994 | \x3f7fffff | t + \x3f800000 | 1 | 1 | \x3f800000 | t + \x3f800001 | 1.0000001 | 1.0000001 | \x3f800001 | t + \x3f800002 | 1.0000002 | 1.0000002 | \x3f800002 | t + \x3f800003 | 1.0000004 | 1.0000004 | \x3f800003 | t + \x3f800004 | 1.0000005 | 1.0000005 | \x3f800004 | t + \x3f800005 | 1.0000006 | 1.0000006 | \x3f800005 | t + \x3f800006 | 1.0000007 | 1.0000007 | \x3f800006 | t + \x3f800007 | 1.0000008 | 1.0000008 | \x3f800007 | t + \x3f800008 | 1.000001 | 1.000001 | \x3f800008 | t + \x3f800009 | 1.0000011 | 1.0000011 | \x3f800009 | t + \x3f80000f | 1.0000018 | 1.0000018 | \x3f80000f | t + \x3f800010 | 1.0000019 | 1.0000019 | \x3f800010 | t + \x3f800011 | 1.000002 | 1.000002 | \x3f800011 | t + \x3f800012 | 1.0000021 | 1.0000021 | \x3f800012 | t + \x3f800013 | 1.0000023 | 1.0000023 | \x3f800013 | t + \x3f800014 | 1.0000024 | 1.0000024 | \x3f800014 | t + \x3f800017 | 1.0000027 | 1.0000027 | \x3f800017 | t + \x3f800018 | 1.0000029 | 1.0000029 | \x3f800018 | t + \x3f800019 | 1.000003 | 1.000003 | \x3f800019 | t + \x3f80001a | 1.0000031 | 1.0000031 | \x3f80001a | t + \x3f80001b | 1.0000032 | 1.0000032 | \x3f80001b | t + \x3f80001c | 1.0000033 | 1.0000033 | \x3f80001c | t + \x3f800029 | 1.0000049 | 1.0000049 | \x3f800029 | t + \x3f80002a | 1.000005 | 1.000005 | \x3f80002a | t + \x3f80002b | 1.0000051 | 1.0000051 | \x3f80002b | t + \x3f800053 | 1.0000099 | 1.0000099 | \x3f800053 | t + \x3f800054 | 1.00001 | 1.00001 | \x3f800054 | t + \x3f800055 | 1.0000101 | 1.0000101 | \x3f800055 | t + \x3f800346 | 1.0000999 | 1.0000999 | \x3f800346 | t + \x3f800347 | 1.0001 | 1.0001 | \x3f800347 | t + \x3f800348 | 1.0001001 | 1.0001001 | \x3f800348 | t + \x3f8020c4 | 1.0009999 | 1.0009999 | \x3f8020c4 | t + \x3f8020c5 | 1.001 | 1.001 | \x3f8020c5 | t + \x3f8020c6 | 1.0010002 | 1.0010002 | \x3f8020c6 | t + \x3f8147ad | 1.0099999 | 1.0099999 | \x3f8147ad | t + \x3f8147ae | 1.01 | 1.01 | \x3f8147ae | t + \x3f8147af | 1.0100001 | 1.0100001 | \x3f8147af | t + \x3f8ccccc | 1.0999999 | 1.0999999 | \x3f8ccccc | t + \x3f8ccccd | 1.1 | 1.1 | \x3f8ccccd | t + \x3f8cccce | 1.1000001 | 1.1000001 | \x3f8cccce | t + \x3fc90fdb | 1.5707964 | 1.5707964 | \x3fc90fdb | t + \x402df854 | 2.7182817 | 2.7182817 | \x402df854 | t + \x40490fdb | 3.1415927 | 3.1415927 | \x40490fdb | t + \x409fffff | 4.9999995 | 4.9999995 | \x409fffff | t + \x40a00000 | 5 | 5 | \x40a00000 | t + \x40a00001 | 5.0000005 | 5.0000005 | \x40a00001 | t + \x40afffff | 5.4999995 | 5.4999995 | \x40afffff | t + \x40b00000 | 5.5 | 5.5 | \x40b00000 | t + \x40b00001 | 5.5000005 | 5.5000005 | \x40b00001 | t + \x411fffff | 9.999999 | 9.999999 | \x411fffff | t + \x41200000 | 10 | 10 | \x41200000 | t + \x41200001 | 10.000001 | 10.000001 | \x41200001 | t + \x42c7ffff | 99.99999 | 99.99999 | \x42c7ffff | t + \x42c80000 | 100 | 100 | \x42c80000 | t + \x42c80001 | 100.00001 | 100.00001 | \x42c80001 | t + \x4479ffff | 999.99994 | 999.99994 | \x4479ffff | t + \x447a0000 | 1000 | 1000 | \x447a0000 | t + \x447a0001 | 1000.00006 | 1000.00006 | \x447a0001 | t + \x461c3fff | 9999.999 | 9999.999 | \x461c3fff | t + \x461c4000 | 10000 | 10000 | \x461c4000 | t + \x461c4001 | 10000.001 | 10000.001 | \x461c4001 | t + \x47c34fff | 99999.99 | 99999.99 | \x47c34fff | t + \x47c35000 | 100000 | 100000 | \x47c35000 | t + \x47c35001 | 100000.01 | 100000.01 | \x47c35001 | t + \x497423ff | 999999.94 | 999999.94 | \x497423ff | t + \x49742400 | 1e+06 | 1e+06 | \x49742400 | t + \x49742401 | 1.00000006e+06 | 1.00000006e+06 | \x49742401 | t + \x4b18967f | 9.999999e+06 | 9.999999e+06 | \x4b18967f | t + \x4b189680 | 1e+07 | 1e+07 | \x4b189680 | t + \x4b189681 | 1.0000001e+07 | 1.0000001e+07 | \x4b189681 | t + \x4cbebc1f | 9.999999e+07 | 9.999999e+07 | \x4cbebc1f | t + \x4cbebc20 | 1e+08 | 1e+08 | \x4cbebc20 | t + \x4cbebc21 | 1.0000001e+08 | 1.0000001e+08 | \x4cbebc21 | t + \x4e6e6b27 | 9.9999994e+08 | 9.9999994e+08 | \x4e6e6b27 | t + \x4e6e6b28 | 1e+09 | 1e+09 | \x4e6e6b28 | t + \x4e6e6b29 | 1.00000006e+09 | 1.00000006e+09 | \x4e6e6b29 | t + \x501502f8 | 9.999999e+09 | 9.999999e+09 | \x501502f8 | t + \x501502f9 | 1e+10 | 1e+10 | \x501502f9 | t + \x501502fa | 1.0000001e+10 | 1.0000001e+10 | \x501502fa | t + \x51ba43b6 | 9.999999e+10 | 9.999999e+10 | \x51ba43b6 | t + \x51ba43b7 | 1e+11 | 1e+11 | \x51ba43b7 | t + \x51ba43b8 | 1.0000001e+11 | 1.0000001e+11 | \x51ba43b8 | t + \x1f6c1e4a | 5e-20 | 5e-20 | \x1f6c1e4a | t + \x59be6cea | 6.7e+15 | 6.7e+15 | \x59be6cea | t + \x5d5ab6c4 | 9.85e+17 | 9.85e+17 | \x5d5ab6c4 | t + \x2cc4a9bd | 5.5895e-12 | 5.5895e-12 | \x2cc4a9bd | t + \x15ae43fd | 7.038531e-26 | 7.038531e-26 | \x15ae43fd | t + \x2cf757ca | 7.0299088e-12 | 7.0299088e-12 | \x2cf757ca | t + \x665ba998 | 2.5933168e+23 | 2.5933168e+23 | \x665ba998 | t + \x743c3324 | 5.9642887e+31 | 5.9642887e+31 | \x743c3324 | t + \x47f1205a | 123456.7 | 123456.7 | \x47f1205a | t + \x4640e6ae | 12345.67 | 12345.67 | \x4640e6ae | t + \x449a5225 | 1234.567 | 1234.567 | \x449a5225 | t + \x42f6e9d5 | 123.4567 | 123.4567 | \x42f6e9d5 | t + \x414587dd | 12.34567 | 12.34567 | \x414587dd | t + \x3f9e064b | 1.234567 | 1.234567 | \x3f9e064b | t + \x4c000004 | 3.3554448e+07 | 3.3554448e+07 | \x4c000004 | t + \x50061c46 | 8.999999e+09 | 8.999999e+09 | \x50061c46 | t + \x510006a8 | 3.4366718e+10 | 3.4366718e+10 | \x510006a8 | t + \x48951f84 | 305404.12 | 305404.12 | \x48951f84 | t + \x45fd1840 | 8099.0312 | 8099.0312 | \x45fd1840 | t + \x39800000 | 0.00024414062 | 0.00024414062 | \x39800000 | t + \x3b200000 | 0.0024414062 | 0.0024414062 | \x3b200000 | t + \x3b900000 | 0.0043945312 | 0.0043945312 | \x3b900000 | t + \x3bd00000 | 0.0063476562 | 0.0063476562 | \x3bd00000 | t + \x63800000 | 4.7223665e+21 | 4.7223665e+21 | \x63800000 | t + \x4b000000 | 8.388608e+06 | 8.388608e+06 | \x4b000000 | t + \x4b800000 | 1.6777216e+07 | 1.6777216e+07 | \x4b800000 | t + \x4c000001 | 3.3554436e+07 | 3.3554436e+07 | \x4c000001 | t + \x4c800b0d | 6.7131496e+07 | 6.7131496e+07 | \x4c800b0d | t + \x00d24584 | 1.9310392e-38 | 1.9310392e-38 | \x00d24584 | t + \x00d90b88 | 1.993244e-38 | 1.993244e-38 | \x00d90b88 | t + \x45803f34 | 4103.9004 | 4103.9004 | \x45803f34 | t + \x4f9f24f7 | 5.3399997e+09 | 5.3399997e+09 | \x4f9f24f7 | t + \x3a8722c3 | 0.0010310042 | 0.0010310042 | \x3a8722c3 | t + \x5c800041 | 2.882326e+17 | 2.882326e+17 | \x5c800041 | t + \x15ae43fd | 7.038531e-26 | 7.038531e-26 | \x15ae43fd | t + \x5d4cccfb | 9.223404e+17 | 9.223404e+17 | \x5d4cccfb | t + \x4c800001 | 6.710887e+07 | 6.710887e+07 | \x4c800001 | t + \x57800ed8 | 2.816025e+14 | 2.816025e+14 | \x57800ed8 | t + \x5f000000 | 9.223372e+18 | 9.223372e+18 | \x5f000000 | t + \x700000f0 | 1.5846086e+29 | 1.5846086e+29 | \x700000f0 | t + \x5f23e9ac | 1.1811161e+19 | 1.1811161e+19 | \x5f23e9ac | t + \x5e9502f9 | 5.368709e+18 | 5.368709e+18 | \x5e9502f9 | t + \x5e8012b1 | 4.6143166e+18 | 4.6143166e+18 | \x5e8012b1 | t + \x3c000028 | 0.007812537 | 0.007812537 | \x3c000028 | t + \x60cde861 | 1.18697725e+20 | 1.18697725e+20 | \x60cde861 | t + \x03aa2a50 | 1.00014165e-36 | 1.00014165e-36 | \x03aa2a50 | t + \x43480000 | 200 | 200 | \x43480000 | t + \x4c000000 | 3.3554432e+07 | 3.3554432e+07 | \x4c000000 | t + \x5d1502f9 | 6.7108864e+17 | 6.7108864e+17 | \x5d1502f9 | t + \x5d9502f9 | 1.3421773e+18 | 1.3421773e+18 | \x5d9502f9 | t + \x5e1502f9 | 2.6843546e+18 | 2.6843546e+18 | \x5e1502f9 | t + \x3f99999a | 1.2 | 1.2 | \x3f99999a | t + \x3f9d70a4 | 1.23 | 1.23 | \x3f9d70a4 | t + \x3f9df3b6 | 1.234 | 1.234 | \x3f9df3b6 | t + \x3f9e0419 | 1.2345 | 1.2345 | \x3f9e0419 | t + \x3f9e0610 | 1.23456 | 1.23456 | \x3f9e0610 | t + \x3f9e064b | 1.234567 | 1.234567 | \x3f9e064b | t + \x3f9e0651 | 1.2345678 | 1.2345678 | \x3f9e0651 | t + \x03d20cfe | 1.23456735e-36 | 1.23456735e-36 | \x03d20cfe | t +(261 rows) + +-- clean up, lest opr_sanity complain +drop type xfloat4 cascade; +NOTICE: drop cascades to 6 other objects +DETAIL: drop cascades to function xfloat4in(cstring) +drop cascades to function xfloat4out(xfloat4) +drop cascades to cast from xfloat4 to real +drop cascades to cast from real to xfloat4 +drop cascades to cast from xfloat4 to integer +drop cascades to cast from integer to xfloat4 diff --git a/src/test/regress/expected/float8.out b/src/test/regress/expected/float8.out new file mode 100644 index 0000000..344d6b7 --- /dev/null +++ b/src/test/regress/expected/float8.out @@ -0,0 +1,1444 @@ +-- +-- FLOAT8 +-- +-- +-- Build a table for testing +-- (This temporarily hides the table created in test_setup.sql) +-- +CREATE TEMP TABLE FLOAT8_TBL(f1 float8); +INSERT INTO FLOAT8_TBL(f1) VALUES (' 0.0 '); +INSERT INTO FLOAT8_TBL(f1) VALUES ('1004.30 '); +INSERT INTO FLOAT8_TBL(f1) VALUES (' -34.84'); +INSERT INTO FLOAT8_TBL(f1) VALUES ('1.2345678901234e+200'); +INSERT INTO FLOAT8_TBL(f1) VALUES ('1.2345678901234e-200'); +-- test for underflow and overflow handling +SELECT '10e400'::float8; +ERROR: "10e400" is out of range for type double precision +LINE 1: SELECT '10e400'::float8; + ^ +SELECT '-10e400'::float8; +ERROR: "-10e400" is out of range for type double precision +LINE 1: SELECT '-10e400'::float8; + ^ +SELECT '10e-400'::float8; +ERROR: "10e-400" is out of range for type double precision +LINE 1: SELECT '10e-400'::float8; + ^ +SELECT '-10e-400'::float8; +ERROR: "-10e-400" is out of range for type double precision +LINE 1: SELECT '-10e-400'::float8; + ^ +-- test smallest normalized input +SELECT float8send('2.2250738585072014E-308'::float8); + float8send +-------------------- + \x0010000000000000 +(1 row) + +-- bad input +INSERT INTO FLOAT8_TBL(f1) VALUES (''); +ERROR: invalid input syntax for type double precision: "" +LINE 1: INSERT INTO FLOAT8_TBL(f1) VALUES (''); + ^ +INSERT INTO FLOAT8_TBL(f1) VALUES (' '); +ERROR: invalid input syntax for type double precision: " " +LINE 1: INSERT INTO FLOAT8_TBL(f1) VALUES (' '); + ^ +INSERT INTO FLOAT8_TBL(f1) VALUES ('xyz'); +ERROR: invalid input syntax for type double precision: "xyz" +LINE 1: INSERT INTO FLOAT8_TBL(f1) VALUES ('xyz'); + ^ +INSERT INTO FLOAT8_TBL(f1) VALUES ('5.0.0'); +ERROR: invalid input syntax for type double precision: "5.0.0" +LINE 1: INSERT INTO FLOAT8_TBL(f1) VALUES ('5.0.0'); + ^ +INSERT INTO FLOAT8_TBL(f1) VALUES ('5 . 0'); +ERROR: invalid input syntax for type double precision: "5 . 0" +LINE 1: INSERT INTO FLOAT8_TBL(f1) VALUES ('5 . 0'); + ^ +INSERT INTO FLOAT8_TBL(f1) VALUES ('5. 0'); +ERROR: invalid input syntax for type double precision: "5. 0" +LINE 1: INSERT INTO FLOAT8_TBL(f1) VALUES ('5. 0'); + ^ +INSERT INTO FLOAT8_TBL(f1) VALUES (' - 3'); +ERROR: invalid input syntax for type double precision: " - 3" +LINE 1: INSERT INTO FLOAT8_TBL(f1) VALUES (' - 3'); + ^ +INSERT INTO FLOAT8_TBL(f1) VALUES ('123 5'); +ERROR: invalid input syntax for type double precision: "123 5" +LINE 1: INSERT INTO FLOAT8_TBL(f1) VALUES ('123 5'); + ^ +-- Also try it with non-error-throwing API +SELECT pg_input_is_valid('34.5', 'float8'); + pg_input_is_valid +------------------- + t +(1 row) + +SELECT pg_input_is_valid('xyz', 'float8'); + pg_input_is_valid +------------------- + f +(1 row) + +SELECT pg_input_is_valid('1e4000', 'float8'); + pg_input_is_valid +------------------- + f +(1 row) + +SELECT * FROM pg_input_error_info('1e4000', 'float8'); + message | detail | hint | sql_error_code +----------------------------------------------------+--------+------+---------------- + "1e4000" is out of range for type double precision | | | 22003 +(1 row) + +-- special inputs +SELECT 'NaN'::float8; + float8 +-------- + NaN +(1 row) + +SELECT 'nan'::float8; + float8 +-------- + NaN +(1 row) + +SELECT ' NAN '::float8; + float8 +-------- + NaN +(1 row) + +SELECT 'infinity'::float8; + float8 +---------- + Infinity +(1 row) + +SELECT ' -INFINiTY '::float8; + float8 +----------- + -Infinity +(1 row) + +-- bad special inputs +SELECT 'N A N'::float8; +ERROR: invalid input syntax for type double precision: "N A N" +LINE 1: SELECT 'N A N'::float8; + ^ +SELECT 'NaN x'::float8; +ERROR: invalid input syntax for type double precision: "NaN x" +LINE 1: SELECT 'NaN x'::float8; + ^ +SELECT ' INFINITY x'::float8; +ERROR: invalid input syntax for type double precision: " INFINITY x" +LINE 1: SELECT ' INFINITY x'::float8; + ^ +SELECT 'Infinity'::float8 + 100.0; + ?column? +---------- + Infinity +(1 row) + +SELECT 'Infinity'::float8 / 'Infinity'::float8; + ?column? +---------- + NaN +(1 row) + +SELECT '42'::float8 / 'Infinity'::float8; + ?column? +---------- + 0 +(1 row) + +SELECT 'nan'::float8 / 'nan'::float8; + ?column? +---------- + NaN +(1 row) + +SELECT 'nan'::float8 / '0'::float8; + ?column? +---------- + NaN +(1 row) + +SELECT 'nan'::numeric::float8; + float8 +-------- + NaN +(1 row) + +SELECT * FROM FLOAT8_TBL; + f1 +---------------------- + 0 + 1004.3 + -34.84 + 1.2345678901234e+200 + 1.2345678901234e-200 +(5 rows) + +SELECT f.* FROM FLOAT8_TBL f WHERE f.f1 <> '1004.3'; + f1 +---------------------- + 0 + -34.84 + 1.2345678901234e+200 + 1.2345678901234e-200 +(4 rows) + +SELECT f.* FROM FLOAT8_TBL f WHERE f.f1 = '1004.3'; + f1 +-------- + 1004.3 +(1 row) + +SELECT f.* FROM FLOAT8_TBL f WHERE '1004.3' > f.f1; + f1 +---------------------- + 0 + -34.84 + 1.2345678901234e-200 +(3 rows) + +SELECT f.* FROM FLOAT8_TBL f WHERE f.f1 < '1004.3'; + f1 +---------------------- + 0 + -34.84 + 1.2345678901234e-200 +(3 rows) + +SELECT f.* FROM FLOAT8_TBL f WHERE '1004.3' >= f.f1; + f1 +---------------------- + 0 + 1004.3 + -34.84 + 1.2345678901234e-200 +(4 rows) + +SELECT f.* FROM FLOAT8_TBL f WHERE f.f1 <= '1004.3'; + f1 +---------------------- + 0 + 1004.3 + -34.84 + 1.2345678901234e-200 +(4 rows) + +SELECT f.f1, f.f1 * '-10' AS x + FROM FLOAT8_TBL f + WHERE f.f1 > '0.0'; + f1 | x +----------------------+----------------------- + 1004.3 | -10043 + 1.2345678901234e+200 | -1.2345678901234e+201 + 1.2345678901234e-200 | -1.2345678901234e-199 +(3 rows) + +SELECT f.f1, f.f1 + '-10' AS x + FROM FLOAT8_TBL f + WHERE f.f1 > '0.0'; + f1 | x +----------------------+---------------------- + 1004.3 | 994.3 + 1.2345678901234e+200 | 1.2345678901234e+200 + 1.2345678901234e-200 | -10 +(3 rows) + +SELECT f.f1, f.f1 / '-10' AS x + FROM FLOAT8_TBL f + WHERE f.f1 > '0.0'; + f1 | x +----------------------+----------------------- + 1004.3 | -100.42999999999999 + 1.2345678901234e+200 | -1.2345678901234e+199 + 1.2345678901234e-200 | -1.2345678901234e-201 +(3 rows) + +SELECT f.f1, f.f1 - '-10' AS x + FROM FLOAT8_TBL f + WHERE f.f1 > '0.0'; + f1 | x +----------------------+---------------------- + 1004.3 | 1014.3 + 1.2345678901234e+200 | 1.2345678901234e+200 + 1.2345678901234e-200 | 10 +(3 rows) + +SELECT f.f1 ^ '2.0' AS square_f1 + FROM FLOAT8_TBL f where f.f1 = '1004.3'; + square_f1 +-------------------- + 1008618.4899999999 +(1 row) + +-- absolute value +SELECT f.f1, @f.f1 AS abs_f1 + FROM FLOAT8_TBL f; + f1 | abs_f1 +----------------------+---------------------- + 0 | 0 + 1004.3 | 1004.3 + -34.84 | 34.84 + 1.2345678901234e+200 | 1.2345678901234e+200 + 1.2345678901234e-200 | 1.2345678901234e-200 +(5 rows) + +-- truncate +SELECT f.f1, trunc(f.f1) AS trunc_f1 + FROM FLOAT8_TBL f; + f1 | trunc_f1 +----------------------+---------------------- + 0 | 0 + 1004.3 | 1004 + -34.84 | -34 + 1.2345678901234e+200 | 1.2345678901234e+200 + 1.2345678901234e-200 | 0 +(5 rows) + +-- round +SELECT f.f1, round(f.f1) AS round_f1 + FROM FLOAT8_TBL f; + f1 | round_f1 +----------------------+---------------------- + 0 | 0 + 1004.3 | 1004 + -34.84 | -35 + 1.2345678901234e+200 | 1.2345678901234e+200 + 1.2345678901234e-200 | 0 +(5 rows) + +-- ceil / ceiling +select ceil(f1) as ceil_f1 from float8_tbl f; + ceil_f1 +---------------------- + 0 + 1005 + -34 + 1.2345678901234e+200 + 1 +(5 rows) + +select ceiling(f1) as ceiling_f1 from float8_tbl f; + ceiling_f1 +---------------------- + 0 + 1005 + -34 + 1.2345678901234e+200 + 1 +(5 rows) + +-- floor +select floor(f1) as floor_f1 from float8_tbl f; + floor_f1 +---------------------- + 0 + 1004 + -35 + 1.2345678901234e+200 + 0 +(5 rows) + +-- sign +select sign(f1) as sign_f1 from float8_tbl f; + sign_f1 +--------- + 0 + 1 + -1 + 1 + 1 +(5 rows) + +-- avoid bit-exact output here because operations may not be bit-exact. +SET extra_float_digits = 0; +-- square root +SELECT sqrt(float8 '64') AS eight; + eight +------- + 8 +(1 row) + +SELECT |/ float8 '64' AS eight; + eight +------- + 8 +(1 row) + +SELECT f.f1, |/f.f1 AS sqrt_f1 + FROM FLOAT8_TBL f + WHERE f.f1 > '0.0'; + f1 | sqrt_f1 +----------------------+----------------------- + 1004.3 | 31.6906926399535 + 1.2345678901234e+200 | 1.11111110611109e+100 + 1.2345678901234e-200 | 1.11111110611109e-100 +(3 rows) + +-- power +SELECT power(float8 '144', float8 '0.5'); + power +------- + 12 +(1 row) + +SELECT power(float8 'NaN', float8 '0.5'); + power +------- + NaN +(1 row) + +SELECT power(float8 '144', float8 'NaN'); + power +------- + NaN +(1 row) + +SELECT power(float8 'NaN', float8 'NaN'); + power +------- + NaN +(1 row) + +SELECT power(float8 '-1', float8 'NaN'); + power +------- + NaN +(1 row) + +SELECT power(float8 '1', float8 'NaN'); + power +------- + 1 +(1 row) + +SELECT power(float8 'NaN', float8 '0'); + power +------- + 1 +(1 row) + +SELECT power(float8 'inf', float8 '0'); + power +------- + 1 +(1 row) + +SELECT power(float8 '-inf', float8 '0'); + power +------- + 1 +(1 row) + +SELECT power(float8 '0', float8 'inf'); + power +------- + 0 +(1 row) + +SELECT power(float8 '0', float8 '-inf'); +ERROR: zero raised to a negative power is undefined +SELECT power(float8 '1', float8 'inf'); + power +------- + 1 +(1 row) + +SELECT power(float8 '1', float8 '-inf'); + power +------- + 1 +(1 row) + +SELECT power(float8 '-1', float8 'inf'); + power +------- + 1 +(1 row) + +SELECT power(float8 '-1', float8 '-inf'); + power +------- + 1 +(1 row) + +SELECT power(float8 '0.1', float8 'inf'); + power +------- + 0 +(1 row) + +SELECT power(float8 '-0.1', float8 'inf'); + power +------- + 0 +(1 row) + +SELECT power(float8 '1.1', float8 'inf'); + power +---------- + Infinity +(1 row) + +SELECT power(float8 '-1.1', float8 'inf'); + power +---------- + Infinity +(1 row) + +SELECT power(float8 '0.1', float8 '-inf'); + power +---------- + Infinity +(1 row) + +SELECT power(float8 '-0.1', float8 '-inf'); + power +---------- + Infinity +(1 row) + +SELECT power(float8 '1.1', float8 '-inf'); + power +------- + 0 +(1 row) + +SELECT power(float8 '-1.1', float8 '-inf'); + power +------- + 0 +(1 row) + +SELECT power(float8 'inf', float8 '-2'); + power +------- + 0 +(1 row) + +SELECT power(float8 'inf', float8 '2'); + power +---------- + Infinity +(1 row) + +SELECT power(float8 'inf', float8 'inf'); + power +---------- + Infinity +(1 row) + +SELECT power(float8 'inf', float8 '-inf'); + power +------- + 0 +(1 row) + +-- Intel's icc misoptimizes the code that controls the sign of this result, +-- even with -mp1. Pending a fix for that, only test for "is it zero". +SELECT power(float8 '-inf', float8 '-2') = '0'; + ?column? +---------- + t +(1 row) + +SELECT power(float8 '-inf', float8 '-3'); + power +------- + -0 +(1 row) + +SELECT power(float8 '-inf', float8 '2'); + power +---------- + Infinity +(1 row) + +SELECT power(float8 '-inf', float8 '3'); + power +----------- + -Infinity +(1 row) + +SELECT power(float8 '-inf', float8 '3.5'); +ERROR: a negative number raised to a non-integer power yields a complex result +SELECT power(float8 '-inf', float8 'inf'); + power +---------- + Infinity +(1 row) + +SELECT power(float8 '-inf', float8 '-inf'); + power +------- + 0 +(1 row) + +-- take exp of ln(f.f1) +SELECT f.f1, exp(ln(f.f1)) AS exp_ln_f1 + FROM FLOAT8_TBL f + WHERE f.f1 > '0.0'; + f1 | exp_ln_f1 +----------------------+----------------------- + 1004.3 | 1004.3 + 1.2345678901234e+200 | 1.23456789012338e+200 + 1.2345678901234e-200 | 1.23456789012339e-200 +(3 rows) + +-- check edge cases for exp +SELECT exp('inf'::float8), exp('-inf'::float8), exp('nan'::float8); + exp | exp | exp +----------+-----+----- + Infinity | 0 | NaN +(1 row) + +-- cube root +SELECT ||/ float8 '27' AS three; + three +------- + 3 +(1 row) + +SELECT f.f1, ||/f.f1 AS cbrt_f1 FROM FLOAT8_TBL f; + f1 | cbrt_f1 +----------------------+---------------------- + 0 | 0 + 1004.3 | 10.014312837827 + -34.84 | -3.26607421344208 + 1.2345678901234e+200 | 4.97933859234765e+66 + 1.2345678901234e-200 | 2.3112042409018e-67 +(5 rows) + +SELECT * FROM FLOAT8_TBL; + f1 +---------------------- + 0 + 1004.3 + -34.84 + 1.2345678901234e+200 + 1.2345678901234e-200 +(5 rows) + +UPDATE FLOAT8_TBL + SET f1 = FLOAT8_TBL.f1 * '-1' + WHERE FLOAT8_TBL.f1 > '0.0'; +SELECT f.f1 * '1e200' from FLOAT8_TBL f; +ERROR: value out of range: overflow +SELECT f.f1 ^ '1e200' from FLOAT8_TBL f; +ERROR: value out of range: overflow +SELECT 0 ^ 0 + 0 ^ 1 + 0 ^ 0.0 + 0 ^ 0.5; + ?column? +---------- + 2 +(1 row) + +SELECT ln(f.f1) from FLOAT8_TBL f where f.f1 = '0.0' ; +ERROR: cannot take logarithm of zero +SELECT ln(f.f1) from FLOAT8_TBL f where f.f1 < '0.0' ; +ERROR: cannot take logarithm of a negative number +SELECT exp(f.f1) from FLOAT8_TBL f; +ERROR: value out of range: underflow +SELECT f.f1 / '0.0' from FLOAT8_TBL f; +ERROR: division by zero +SELECT * FROM FLOAT8_TBL; + f1 +----------------------- + 0 + -34.84 + -1004.3 + -1.2345678901234e+200 + -1.2345678901234e-200 +(5 rows) + +-- hyperbolic functions +-- we run these with extra_float_digits = 0 too, since different platforms +-- tend to produce results that vary in the last place. +SELECT sinh(float8 '1'); + sinh +----------------- + 1.1752011936438 +(1 row) + +SELECT cosh(float8 '1'); + cosh +------------------ + 1.54308063481524 +(1 row) + +SELECT tanh(float8 '1'); + tanh +------------------- + 0.761594155955765 +(1 row) + +SELECT asinh(float8 '1'); + asinh +------------------- + 0.881373587019543 +(1 row) + +SELECT acosh(float8 '2'); + acosh +------------------ + 1.31695789692482 +(1 row) + +SELECT atanh(float8 '0.5'); + atanh +------------------- + 0.549306144334055 +(1 row) + +-- test Inf/NaN cases for hyperbolic functions +SELECT sinh(float8 'infinity'); + sinh +---------- + Infinity +(1 row) + +SELECT sinh(float8 '-infinity'); + sinh +----------- + -Infinity +(1 row) + +SELECT sinh(float8 'nan'); + sinh +------ + NaN +(1 row) + +SELECT cosh(float8 'infinity'); + cosh +---------- + Infinity +(1 row) + +SELECT cosh(float8 '-infinity'); + cosh +---------- + Infinity +(1 row) + +SELECT cosh(float8 'nan'); + cosh +------ + NaN +(1 row) + +SELECT tanh(float8 'infinity'); + tanh +------ + 1 +(1 row) + +SELECT tanh(float8 '-infinity'); + tanh +------ + -1 +(1 row) + +SELECT tanh(float8 'nan'); + tanh +------ + NaN +(1 row) + +SELECT asinh(float8 'infinity'); + asinh +---------- + Infinity +(1 row) + +SELECT asinh(float8 '-infinity'); + asinh +----------- + -Infinity +(1 row) + +SELECT asinh(float8 'nan'); + asinh +------- + NaN +(1 row) + +-- acosh(Inf) should be Inf, but some mingw versions produce NaN, so skip test +-- SELECT acosh(float8 'infinity'); +SELECT acosh(float8 '-infinity'); +ERROR: input is out of range +SELECT acosh(float8 'nan'); + acosh +------- + NaN +(1 row) + +SELECT atanh(float8 'infinity'); +ERROR: input is out of range +SELECT atanh(float8 '-infinity'); +ERROR: input is out of range +SELECT atanh(float8 'nan'); + atanh +------- + NaN +(1 row) + +-- error functions +-- we run these with extra_float_digits = -1, to get consistently rounded +-- results on all platforms. +SET extra_float_digits = -1; +SELECT x, + erf(x), + erfc(x) +FROM (VALUES (float8 '-infinity'), + (-28), (-6), (-3.4), (-2.1), (-1.1), (-0.45), + (-1.2e-9), (-2.3e-13), (-1.2e-17), (0), + (1.2e-17), (2.3e-13), (1.2e-9), + (0.45), (1.1), (2.1), (3.4), (6), (28), + (float8 'infinity'), (float8 'nan')) AS t(x); + x | erf | erfc +-----------+----------------------+--------------------- + -Infinity | -1 | 2 + -28 | -1 | 2 + -6 | -1 | 2 + -3.4 | -0.99999847800664 | 1.9999984780066 + -2.1 | -0.99702053334367 | 1.9970205333437 + -1.1 | -0.88020506957408 | 1.8802050695741 + -0.45 | -0.47548171978692 | 1.4754817197869 + -1.2e-09 | -1.3540550005146e-09 | 1.0000000013541 + -2.3e-13 | -2.5952720843197e-13 | 1.0000000000003 + -1.2e-17 | -1.3540550005146e-17 | 1 + 0 | 0 | 1 + 1.2e-17 | 1.3540550005146e-17 | 1 + 2.3e-13 | 2.5952720843197e-13 | 0.99999999999974 + 1.2e-09 | 1.3540550005146e-09 | 0.99999999864595 + 0.45 | 0.47548171978692 | 0.52451828021308 + 1.1 | 0.88020506957408 | 0.11979493042592 + 2.1 | 0.99702053334367 | 0.002979466656333 + 3.4 | 0.99999847800664 | 1.5219933628623e-06 + 6 | 1 | 2.1519736712499e-17 + 28 | 1 | 0 + Infinity | 1 | 0 + NaN | NaN | NaN +(22 rows) + +RESET extra_float_digits; +-- test for over- and underflow +INSERT INTO FLOAT8_TBL(f1) VALUES ('10e400'); +ERROR: "10e400" is out of range for type double precision +LINE 1: INSERT INTO FLOAT8_TBL(f1) VALUES ('10e400'); + ^ +INSERT INTO FLOAT8_TBL(f1) VALUES ('-10e400'); +ERROR: "-10e400" is out of range for type double precision +LINE 1: INSERT INTO FLOAT8_TBL(f1) VALUES ('-10e400'); + ^ +INSERT INTO FLOAT8_TBL(f1) VALUES ('10e-400'); +ERROR: "10e-400" is out of range for type double precision +LINE 1: INSERT INTO FLOAT8_TBL(f1) VALUES ('10e-400'); + ^ +INSERT INTO FLOAT8_TBL(f1) VALUES ('-10e-400'); +ERROR: "-10e-400" is out of range for type double precision +LINE 1: INSERT INTO FLOAT8_TBL(f1) VALUES ('-10e-400'); + ^ +DROP TABLE FLOAT8_TBL; +-- Check the float8 values exported for use by other tests +SELECT * FROM FLOAT8_TBL; + f1 +----------------------- + 0 + -34.84 + -1004.3 + -1.2345678901234e+200 + -1.2345678901234e-200 +(5 rows) + +-- test edge-case coercions to integer +SELECT '32767.4'::float8::int2; + int2 +------- + 32767 +(1 row) + +SELECT '32767.6'::float8::int2; +ERROR: smallint out of range +SELECT '-32768.4'::float8::int2; + int2 +-------- + -32768 +(1 row) + +SELECT '-32768.6'::float8::int2; +ERROR: smallint out of range +SELECT '2147483647.4'::float8::int4; + int4 +------------ + 2147483647 +(1 row) + +SELECT '2147483647.6'::float8::int4; +ERROR: integer out of range +SELECT '-2147483648.4'::float8::int4; + int4 +------------- + -2147483648 +(1 row) + +SELECT '-2147483648.6'::float8::int4; +ERROR: integer out of range +SELECT '9223372036854773760'::float8::int8; + int8 +--------------------- + 9223372036854773760 +(1 row) + +SELECT '9223372036854775807'::float8::int8; +ERROR: bigint out of range +SELECT '-9223372036854775808.5'::float8::int8; + int8 +---------------------- + -9223372036854775808 +(1 row) + +SELECT '-9223372036854780000'::float8::int8; +ERROR: bigint out of range +-- test exact cases for trigonometric functions in degrees +SELECT x, + sind(x), + sind(x) IN (-1,-0.5,0,0.5,1) AS sind_exact +FROM (VALUES (0), (30), (90), (150), (180), + (210), (270), (330), (360)) AS t(x); + x | sind | sind_exact +-----+------+------------ + 0 | 0 | t + 30 | 0.5 | t + 90 | 1 | t + 150 | 0.5 | t + 180 | 0 | t + 210 | -0.5 | t + 270 | -1 | t + 330 | -0.5 | t + 360 | 0 | t +(9 rows) + +SELECT x, + cosd(x), + cosd(x) IN (-1,-0.5,0,0.5,1) AS cosd_exact +FROM (VALUES (0), (60), (90), (120), (180), + (240), (270), (300), (360)) AS t(x); + x | cosd | cosd_exact +-----+------+------------ + 0 | 1 | t + 60 | 0.5 | t + 90 | 0 | t + 120 | -0.5 | t + 180 | -1 | t + 240 | -0.5 | t + 270 | 0 | t + 300 | 0.5 | t + 360 | 1 | t +(9 rows) + +SELECT x, + tand(x), + tand(x) IN ('-Infinity'::float8,-1,0, + 1,'Infinity'::float8) AS tand_exact, + cotd(x), + cotd(x) IN ('-Infinity'::float8,-1,0, + 1,'Infinity'::float8) AS cotd_exact +FROM (VALUES (0), (45), (90), (135), (180), + (225), (270), (315), (360)) AS t(x); + x | tand | tand_exact | cotd | cotd_exact +-----+-----------+------------+-----------+------------ + 0 | 0 | t | Infinity | t + 45 | 1 | t | 1 | t + 90 | Infinity | t | 0 | t + 135 | -1 | t | -1 | t + 180 | 0 | t | -Infinity | t + 225 | 1 | t | 1 | t + 270 | -Infinity | t | 0 | t + 315 | -1 | t | -1 | t + 360 | 0 | t | Infinity | t +(9 rows) + +SELECT x, + asind(x), + asind(x) IN (-90,-30,0,30,90) AS asind_exact, + acosd(x), + acosd(x) IN (0,60,90,120,180) AS acosd_exact +FROM (VALUES (-1), (-0.5), (0), (0.5), (1)) AS t(x); + x | asind | asind_exact | acosd | acosd_exact +------+-------+-------------+-------+------------- + -1 | -90 | t | 180 | t + -0.5 | -30 | t | 120 | t + 0 | 0 | t | 90 | t + 0.5 | 30 | t | 60 | t + 1 | 90 | t | 0 | t +(5 rows) + +SELECT x, + atand(x), + atand(x) IN (-90,-45,0,45,90) AS atand_exact +FROM (VALUES ('-Infinity'::float8), (-1), (0), (1), + ('Infinity'::float8)) AS t(x); + x | atand | atand_exact +-----------+-------+------------- + -Infinity | -90 | t + -1 | -45 | t + 0 | 0 | t + 1 | 45 | t + Infinity | 90 | t +(5 rows) + +SELECT x, y, + atan2d(y, x), + atan2d(y, x) IN (-90,0,90,180) AS atan2d_exact +FROM (SELECT 10*cosd(a), 10*sind(a) + FROM generate_series(0, 360, 90) AS t(a)) AS t(x,y); + x | y | atan2d | atan2d_exact +-----+-----+--------+-------------- + 10 | 0 | 0 | t + 0 | 10 | 90 | t + -10 | 0 | 180 | t + 0 | -10 | -90 | t + 10 | 0 | 0 | t +(5 rows) + +-- +-- test output (and round-trip safety) of various values. +-- To ensure we're testing what we think we're testing, start with +-- float values specified by bit patterns (as a useful side effect, +-- this means we'll fail on non-IEEE platforms). +create type xfloat8; +create function xfloat8in(cstring) returns xfloat8 immutable strict + language internal as 'int8in'; +NOTICE: return type xfloat8 is only a shell +create function xfloat8out(xfloat8) returns cstring immutable strict + language internal as 'int8out'; +NOTICE: argument type xfloat8 is only a shell +create type xfloat8 (input = xfloat8in, output = xfloat8out, like = float8); +create cast (xfloat8 as float8) without function; +create cast (float8 as xfloat8) without function; +create cast (xfloat8 as bigint) without function; +create cast (bigint as xfloat8) without function; +-- float8: seeeeeee eeeeeeee eeeeeeee mmmmmmmm mmmmmmmm(x4) +-- we don't care to assume the platform's strtod() handles subnormals +-- correctly; those are "use at your own risk". However we do test +-- subnormal outputs, since those are under our control. +with testdata(bits) as (values + -- small subnormals + (x'0000000000000001'), + (x'0000000000000002'), (x'0000000000000003'), + (x'0000000000001000'), (x'0000000100000000'), + (x'0000010000000000'), (x'0000010100000000'), + (x'0000400000000000'), (x'0000400100000000'), + (x'0000800000000000'), (x'0000800000000001'), + -- these values taken from upstream testsuite + (x'00000000000f4240'), + (x'00000000016e3600'), + (x'0000008cdcdea440'), + -- borderline between subnormal and normal + (x'000ffffffffffff0'), (x'000ffffffffffff1'), + (x'000ffffffffffffe'), (x'000fffffffffffff')) +select float8send(flt) as ibits, + flt + from (select bits::bigint::xfloat8::float8 as flt + from testdata + offset 0) s; + ibits | flt +--------------------+------------------------- + \x0000000000000001 | 5e-324 + \x0000000000000002 | 1e-323 + \x0000000000000003 | 1.5e-323 + \x0000000000001000 | 2.0237e-320 + \x0000000100000000 | 2.121995791e-314 + \x0000010000000000 | 5.43230922487e-312 + \x0000010100000000 | 5.45352918278e-312 + \x0000400000000000 | 3.4766779039175e-310 + \x0000400100000000 | 3.4768901034966e-310 + \x0000800000000000 | 6.953355807835e-310 + \x0000800000000001 | 6.95335580783505e-310 + \x00000000000f4240 | 4.940656e-318 + \x00000000016e3600 | 1.18575755e-316 + \x0000008cdcdea440 | 2.989102097996e-312 + \x000ffffffffffff0 | 2.2250738585071935e-308 + \x000ffffffffffff1 | 2.225073858507194e-308 + \x000ffffffffffffe | 2.2250738585072004e-308 + \x000fffffffffffff | 2.225073858507201e-308 +(18 rows) + +-- round-trip tests +with testdata(bits) as (values + (x'0000000000000000'), + -- smallest normal values + (x'0010000000000000'), (x'0010000000000001'), + (x'0010000000000002'), (x'0018000000000000'), + -- + (x'3ddb7cdfd9d7bdba'), (x'3ddb7cdfd9d7bdbb'), (x'3ddb7cdfd9d7bdbc'), + (x'3e112e0be826d694'), (x'3e112e0be826d695'), (x'3e112e0be826d696'), + (x'3e45798ee2308c39'), (x'3e45798ee2308c3a'), (x'3e45798ee2308c3b'), + (x'3e7ad7f29abcaf47'), (x'3e7ad7f29abcaf48'), (x'3e7ad7f29abcaf49'), + (x'3eb0c6f7a0b5ed8c'), (x'3eb0c6f7a0b5ed8d'), (x'3eb0c6f7a0b5ed8e'), + (x'3ee4f8b588e368ef'), (x'3ee4f8b588e368f0'), (x'3ee4f8b588e368f1'), + (x'3f1a36e2eb1c432c'), (x'3f1a36e2eb1c432d'), (x'3f1a36e2eb1c432e'), + (x'3f50624dd2f1a9fb'), (x'3f50624dd2f1a9fc'), (x'3f50624dd2f1a9fd'), + (x'3f847ae147ae147a'), (x'3f847ae147ae147b'), (x'3f847ae147ae147c'), + (x'3fb9999999999999'), (x'3fb999999999999a'), (x'3fb999999999999b'), + -- values very close to 1 + (x'3feffffffffffff0'), (x'3feffffffffffff1'), (x'3feffffffffffff2'), + (x'3feffffffffffff3'), (x'3feffffffffffff4'), (x'3feffffffffffff5'), + (x'3feffffffffffff6'), (x'3feffffffffffff7'), (x'3feffffffffffff8'), + (x'3feffffffffffff9'), (x'3feffffffffffffa'), (x'3feffffffffffffb'), + (x'3feffffffffffffc'), (x'3feffffffffffffd'), (x'3feffffffffffffe'), + (x'3fefffffffffffff'), + (x'3ff0000000000000'), + (x'3ff0000000000001'), (x'3ff0000000000002'), (x'3ff0000000000003'), + (x'3ff0000000000004'), (x'3ff0000000000005'), (x'3ff0000000000006'), + (x'3ff0000000000007'), (x'3ff0000000000008'), (x'3ff0000000000009'), + -- + (x'3ff921fb54442d18'), + (x'4005bf0a8b14576a'), + (x'400921fb54442d18'), + -- + (x'4023ffffffffffff'), (x'4024000000000000'), (x'4024000000000001'), + (x'4058ffffffffffff'), (x'4059000000000000'), (x'4059000000000001'), + (x'408f3fffffffffff'), (x'408f400000000000'), (x'408f400000000001'), + (x'40c387ffffffffff'), (x'40c3880000000000'), (x'40c3880000000001'), + (x'40f869ffffffffff'), (x'40f86a0000000000'), (x'40f86a0000000001'), + (x'412e847fffffffff'), (x'412e848000000000'), (x'412e848000000001'), + (x'416312cfffffffff'), (x'416312d000000000'), (x'416312d000000001'), + (x'4197d783ffffffff'), (x'4197d78400000000'), (x'4197d78400000001'), + (x'41cdcd64ffffffff'), (x'41cdcd6500000000'), (x'41cdcd6500000001'), + (x'4202a05f1fffffff'), (x'4202a05f20000000'), (x'4202a05f20000001'), + (x'42374876e7ffffff'), (x'42374876e8000000'), (x'42374876e8000001'), + (x'426d1a94a1ffffff'), (x'426d1a94a2000000'), (x'426d1a94a2000001'), + (x'42a2309ce53fffff'), (x'42a2309ce5400000'), (x'42a2309ce5400001'), + (x'42d6bcc41e8fffff'), (x'42d6bcc41e900000'), (x'42d6bcc41e900001'), + (x'430c6bf52633ffff'), (x'430c6bf526340000'), (x'430c6bf526340001'), + (x'4341c37937e07fff'), (x'4341c37937e08000'), (x'4341c37937e08001'), + (x'4376345785d89fff'), (x'4376345785d8a000'), (x'4376345785d8a001'), + (x'43abc16d674ec7ff'), (x'43abc16d674ec800'), (x'43abc16d674ec801'), + (x'43e158e460913cff'), (x'43e158e460913d00'), (x'43e158e460913d01'), + (x'4415af1d78b58c3f'), (x'4415af1d78b58c40'), (x'4415af1d78b58c41'), + (x'444b1ae4d6e2ef4f'), (x'444b1ae4d6e2ef50'), (x'444b1ae4d6e2ef51'), + (x'4480f0cf064dd591'), (x'4480f0cf064dd592'), (x'4480f0cf064dd593'), + (x'44b52d02c7e14af5'), (x'44b52d02c7e14af6'), (x'44b52d02c7e14af7'), + (x'44ea784379d99db3'), (x'44ea784379d99db4'), (x'44ea784379d99db5'), + (x'45208b2a2c280290'), (x'45208b2a2c280291'), (x'45208b2a2c280292'), + -- + (x'7feffffffffffffe'), (x'7fefffffffffffff'), + -- round to even tests (+ve) + (x'4350000000000002'), + (x'4350000000002e06'), + (x'4352000000000003'), + (x'4352000000000004'), + (x'4358000000000003'), + (x'4358000000000004'), + (x'435f000000000020'), + -- round to even tests (-ve) + (x'c350000000000002'), + (x'c350000000002e06'), + (x'c352000000000003'), + (x'c352000000000004'), + (x'c358000000000003'), + (x'c358000000000004'), + (x'c35f000000000020'), + -- exercise fixed-point memmoves + (x'42dc12218377de66'), + (x'42a674e79c5fe51f'), + (x'4271f71fb04cb74c'), + (x'423cbe991a145879'), + (x'4206fee0e1a9e061'), + (x'41d26580b487e6b4'), + (x'419d6f34540ca453'), + (x'41678c29dcd6e9dc'), + (x'4132d687e3df217d'), + (x'40fe240c9fcb68c8'), + (x'40c81cd6e63c53d3'), + (x'40934a4584fd0fdc'), + (x'405edd3c07fb4c93'), + (x'4028b0fcd32f7076'), + (x'3ff3c0ca428c59f8'), + -- these cases come from the upstream's testsuite + -- LotsOfTrailingZeros) + (x'3e60000000000000'), + -- Regression + (x'c352bd2668e077c4'), + (x'434018601510c000'), + (x'43d055dc36f24000'), + (x'43e052961c6f8000'), + (x'3ff3c0ca2a5b1d5d'), + -- LooksLikePow5 + (x'4830f0cf064dd592'), + (x'4840f0cf064dd592'), + (x'4850f0cf064dd592'), + -- OutputLength + (x'3ff3333333333333'), + (x'3ff3ae147ae147ae'), + (x'3ff3be76c8b43958'), + (x'3ff3c083126e978d'), + (x'3ff3c0c1fc8f3238'), + (x'3ff3c0c9539b8887'), + (x'3ff3c0ca2a5b1d5d'), + (x'3ff3c0ca4283de1b'), + (x'3ff3c0ca43db770a'), + (x'3ff3c0ca428abd53'), + (x'3ff3c0ca428c1d2b'), + (x'3ff3c0ca428c51f2'), + (x'3ff3c0ca428c58fc'), + (x'3ff3c0ca428c59dd'), + (x'3ff3c0ca428c59f8'), + (x'3ff3c0ca428c59fb'), + -- 32-bit chunking + (x'40112e0be8047a7d'), + (x'40112e0be815a889'), + (x'40112e0be826d695'), + (x'40112e0be83804a1'), + (x'40112e0be84932ad'), + -- MinMaxShift + (x'0040000000000000'), + (x'007fffffffffffff'), + (x'0290000000000000'), + (x'029fffffffffffff'), + (x'4350000000000000'), + (x'435fffffffffffff'), + (x'1330000000000000'), + (x'133fffffffffffff'), + (x'3a6fa7161a4d6e0c') +) +select float8send(flt) as ibits, + flt, + flt::text::float8 as r_flt, + float8send(flt::text::float8) as obits, + float8send(flt::text::float8) = float8send(flt) as correct + from (select bits::bigint::xfloat8::float8 as flt + from testdata + offset 0) s; + ibits | flt | r_flt | obits | correct +--------------------+-------------------------+-------------------------+--------------------+--------- + \x0000000000000000 | 0 | 0 | \x0000000000000000 | t + \x0010000000000000 | 2.2250738585072014e-308 | 2.2250738585072014e-308 | \x0010000000000000 | t + \x0010000000000001 | 2.225073858507202e-308 | 2.225073858507202e-308 | \x0010000000000001 | t + \x0010000000000002 | 2.2250738585072024e-308 | 2.2250738585072024e-308 | \x0010000000000002 | t + \x0018000000000000 | 3.337610787760802e-308 | 3.337610787760802e-308 | \x0018000000000000 | t + \x3ddb7cdfd9d7bdba | 9.999999999999999e-11 | 9.999999999999999e-11 | \x3ddb7cdfd9d7bdba | t + \x3ddb7cdfd9d7bdbb | 1e-10 | 1e-10 | \x3ddb7cdfd9d7bdbb | t + \x3ddb7cdfd9d7bdbc | 1.0000000000000002e-10 | 1.0000000000000002e-10 | \x3ddb7cdfd9d7bdbc | t + \x3e112e0be826d694 | 9.999999999999999e-10 | 9.999999999999999e-10 | \x3e112e0be826d694 | t + \x3e112e0be826d695 | 1e-09 | 1e-09 | \x3e112e0be826d695 | t + \x3e112e0be826d696 | 1.0000000000000003e-09 | 1.0000000000000003e-09 | \x3e112e0be826d696 | t + \x3e45798ee2308c39 | 9.999999999999999e-09 | 9.999999999999999e-09 | \x3e45798ee2308c39 | t + \x3e45798ee2308c3a | 1e-08 | 1e-08 | \x3e45798ee2308c3a | t + \x3e45798ee2308c3b | 1.0000000000000002e-08 | 1.0000000000000002e-08 | \x3e45798ee2308c3b | t + \x3e7ad7f29abcaf47 | 9.999999999999998e-08 | 9.999999999999998e-08 | \x3e7ad7f29abcaf47 | t + \x3e7ad7f29abcaf48 | 1e-07 | 1e-07 | \x3e7ad7f29abcaf48 | t + \x3e7ad7f29abcaf49 | 1.0000000000000001e-07 | 1.0000000000000001e-07 | \x3e7ad7f29abcaf49 | t + \x3eb0c6f7a0b5ed8c | 9.999999999999997e-07 | 9.999999999999997e-07 | \x3eb0c6f7a0b5ed8c | t + \x3eb0c6f7a0b5ed8d | 1e-06 | 1e-06 | \x3eb0c6f7a0b5ed8d | t + \x3eb0c6f7a0b5ed8e | 1.0000000000000002e-06 | 1.0000000000000002e-06 | \x3eb0c6f7a0b5ed8e | t + \x3ee4f8b588e368ef | 9.999999999999997e-06 | 9.999999999999997e-06 | \x3ee4f8b588e368ef | t + \x3ee4f8b588e368f0 | 9.999999999999999e-06 | 9.999999999999999e-06 | \x3ee4f8b588e368f0 | t + \x3ee4f8b588e368f1 | 1e-05 | 1e-05 | \x3ee4f8b588e368f1 | t + \x3f1a36e2eb1c432c | 9.999999999999999e-05 | 9.999999999999999e-05 | \x3f1a36e2eb1c432c | t + \x3f1a36e2eb1c432d | 0.0001 | 0.0001 | \x3f1a36e2eb1c432d | t + \x3f1a36e2eb1c432e | 0.00010000000000000002 | 0.00010000000000000002 | \x3f1a36e2eb1c432e | t + \x3f50624dd2f1a9fb | 0.0009999999999999998 | 0.0009999999999999998 | \x3f50624dd2f1a9fb | t + \x3f50624dd2f1a9fc | 0.001 | 0.001 | \x3f50624dd2f1a9fc | t + \x3f50624dd2f1a9fd | 0.0010000000000000002 | 0.0010000000000000002 | \x3f50624dd2f1a9fd | t + \x3f847ae147ae147a | 0.009999999999999998 | 0.009999999999999998 | \x3f847ae147ae147a | t + \x3f847ae147ae147b | 0.01 | 0.01 | \x3f847ae147ae147b | t + \x3f847ae147ae147c | 0.010000000000000002 | 0.010000000000000002 | \x3f847ae147ae147c | t + \x3fb9999999999999 | 0.09999999999999999 | 0.09999999999999999 | \x3fb9999999999999 | t + \x3fb999999999999a | 0.1 | 0.1 | \x3fb999999999999a | t + \x3fb999999999999b | 0.10000000000000002 | 0.10000000000000002 | \x3fb999999999999b | t + \x3feffffffffffff0 | 0.9999999999999982 | 0.9999999999999982 | \x3feffffffffffff0 | t + \x3feffffffffffff1 | 0.9999999999999983 | 0.9999999999999983 | \x3feffffffffffff1 | t + \x3feffffffffffff2 | 0.9999999999999984 | 0.9999999999999984 | \x3feffffffffffff2 | t + \x3feffffffffffff3 | 0.9999999999999986 | 0.9999999999999986 | \x3feffffffffffff3 | t + \x3feffffffffffff4 | 0.9999999999999987 | 0.9999999999999987 | \x3feffffffffffff4 | t + \x3feffffffffffff5 | 0.9999999999999988 | 0.9999999999999988 | \x3feffffffffffff5 | t + \x3feffffffffffff6 | 0.9999999999999989 | 0.9999999999999989 | \x3feffffffffffff6 | t + \x3feffffffffffff7 | 0.999999999999999 | 0.999999999999999 | \x3feffffffffffff7 | t + \x3feffffffffffff8 | 0.9999999999999991 | 0.9999999999999991 | \x3feffffffffffff8 | t + \x3feffffffffffff9 | 0.9999999999999992 | 0.9999999999999992 | \x3feffffffffffff9 | t + \x3feffffffffffffa | 0.9999999999999993 | 0.9999999999999993 | \x3feffffffffffffa | t + \x3feffffffffffffb | 0.9999999999999994 | 0.9999999999999994 | \x3feffffffffffffb | t + \x3feffffffffffffc | 0.9999999999999996 | 0.9999999999999996 | \x3feffffffffffffc | t + \x3feffffffffffffd | 0.9999999999999997 | 0.9999999999999997 | \x3feffffffffffffd | t + \x3feffffffffffffe | 0.9999999999999998 | 0.9999999999999998 | \x3feffffffffffffe | t + \x3fefffffffffffff | 0.9999999999999999 | 0.9999999999999999 | \x3fefffffffffffff | t + \x3ff0000000000000 | 1 | 1 | \x3ff0000000000000 | t + \x3ff0000000000001 | 1.0000000000000002 | 1.0000000000000002 | \x3ff0000000000001 | t + \x3ff0000000000002 | 1.0000000000000004 | 1.0000000000000004 | \x3ff0000000000002 | t + \x3ff0000000000003 | 1.0000000000000007 | 1.0000000000000007 | \x3ff0000000000003 | t + \x3ff0000000000004 | 1.0000000000000009 | 1.0000000000000009 | \x3ff0000000000004 | t + \x3ff0000000000005 | 1.000000000000001 | 1.000000000000001 | \x3ff0000000000005 | t + \x3ff0000000000006 | 1.0000000000000013 | 1.0000000000000013 | \x3ff0000000000006 | t + \x3ff0000000000007 | 1.0000000000000016 | 1.0000000000000016 | \x3ff0000000000007 | t + \x3ff0000000000008 | 1.0000000000000018 | 1.0000000000000018 | \x3ff0000000000008 | t + \x3ff0000000000009 | 1.000000000000002 | 1.000000000000002 | \x3ff0000000000009 | t + \x3ff921fb54442d18 | 1.5707963267948966 | 1.5707963267948966 | \x3ff921fb54442d18 | t + \x4005bf0a8b14576a | 2.7182818284590455 | 2.7182818284590455 | \x4005bf0a8b14576a | t + \x400921fb54442d18 | 3.141592653589793 | 3.141592653589793 | \x400921fb54442d18 | t + \x4023ffffffffffff | 9.999999999999998 | 9.999999999999998 | \x4023ffffffffffff | t + \x4024000000000000 | 10 | 10 | \x4024000000000000 | t + \x4024000000000001 | 10.000000000000002 | 10.000000000000002 | \x4024000000000001 | t + \x4058ffffffffffff | 99.99999999999999 | 99.99999999999999 | \x4058ffffffffffff | t + \x4059000000000000 | 100 | 100 | \x4059000000000000 | t + \x4059000000000001 | 100.00000000000001 | 100.00000000000001 | \x4059000000000001 | t + \x408f3fffffffffff | 999.9999999999999 | 999.9999999999999 | \x408f3fffffffffff | t + \x408f400000000000 | 1000 | 1000 | \x408f400000000000 | t + \x408f400000000001 | 1000.0000000000001 | 1000.0000000000001 | \x408f400000000001 | t + \x40c387ffffffffff | 9999.999999999998 | 9999.999999999998 | \x40c387ffffffffff | t + \x40c3880000000000 | 10000 | 10000 | \x40c3880000000000 | t + \x40c3880000000001 | 10000.000000000002 | 10000.000000000002 | \x40c3880000000001 | t + \x40f869ffffffffff | 99999.99999999999 | 99999.99999999999 | \x40f869ffffffffff | t + \x40f86a0000000000 | 100000 | 100000 | \x40f86a0000000000 | t + \x40f86a0000000001 | 100000.00000000001 | 100000.00000000001 | \x40f86a0000000001 | t + \x412e847fffffffff | 999999.9999999999 | 999999.9999999999 | \x412e847fffffffff | t + \x412e848000000000 | 1000000 | 1000000 | \x412e848000000000 | t + \x412e848000000001 | 1000000.0000000001 | 1000000.0000000001 | \x412e848000000001 | t + \x416312cfffffffff | 9999999.999999998 | 9999999.999999998 | \x416312cfffffffff | t + \x416312d000000000 | 10000000 | 10000000 | \x416312d000000000 | t + \x416312d000000001 | 10000000.000000002 | 10000000.000000002 | \x416312d000000001 | t + \x4197d783ffffffff | 99999999.99999999 | 99999999.99999999 | \x4197d783ffffffff | t + \x4197d78400000000 | 100000000 | 100000000 | \x4197d78400000000 | t + \x4197d78400000001 | 100000000.00000001 | 100000000.00000001 | \x4197d78400000001 | t + \x41cdcd64ffffffff | 999999999.9999999 | 999999999.9999999 | \x41cdcd64ffffffff | t + \x41cdcd6500000000 | 1000000000 | 1000000000 | \x41cdcd6500000000 | t + \x41cdcd6500000001 | 1000000000.0000001 | 1000000000.0000001 | \x41cdcd6500000001 | t + \x4202a05f1fffffff | 9999999999.999998 | 9999999999.999998 | \x4202a05f1fffffff | t + \x4202a05f20000000 | 10000000000 | 10000000000 | \x4202a05f20000000 | t + \x4202a05f20000001 | 10000000000.000002 | 10000000000.000002 | \x4202a05f20000001 | t + \x42374876e7ffffff | 99999999999.99998 | 99999999999.99998 | \x42374876e7ffffff | t + \x42374876e8000000 | 100000000000 | 100000000000 | \x42374876e8000000 | t + \x42374876e8000001 | 100000000000.00002 | 100000000000.00002 | \x42374876e8000001 | t + \x426d1a94a1ffffff | 999999999999.9999 | 999999999999.9999 | \x426d1a94a1ffffff | t + \x426d1a94a2000000 | 1000000000000 | 1000000000000 | \x426d1a94a2000000 | t + \x426d1a94a2000001 | 1000000000000.0001 | 1000000000000.0001 | \x426d1a94a2000001 | t + \x42a2309ce53fffff | 9999999999999.998 | 9999999999999.998 | \x42a2309ce53fffff | t + \x42a2309ce5400000 | 10000000000000 | 10000000000000 | \x42a2309ce5400000 | t + \x42a2309ce5400001 | 10000000000000.002 | 10000000000000.002 | \x42a2309ce5400001 | t + \x42d6bcc41e8fffff | 99999999999999.98 | 99999999999999.98 | \x42d6bcc41e8fffff | t + \x42d6bcc41e900000 | 100000000000000 | 100000000000000 | \x42d6bcc41e900000 | t + \x42d6bcc41e900001 | 100000000000000.02 | 100000000000000.02 | \x42d6bcc41e900001 | t + \x430c6bf52633ffff | 999999999999999.9 | 999999999999999.9 | \x430c6bf52633ffff | t + \x430c6bf526340000 | 1e+15 | 1e+15 | \x430c6bf526340000 | t + \x430c6bf526340001 | 1.0000000000000001e+15 | 1.0000000000000001e+15 | \x430c6bf526340001 | t + \x4341c37937e07fff | 9.999999999999998e+15 | 9.999999999999998e+15 | \x4341c37937e07fff | t + \x4341c37937e08000 | 1e+16 | 1e+16 | \x4341c37937e08000 | t + \x4341c37937e08001 | 1.0000000000000002e+16 | 1.0000000000000002e+16 | \x4341c37937e08001 | t + \x4376345785d89fff | 9.999999999999998e+16 | 9.999999999999998e+16 | \x4376345785d89fff | t + \x4376345785d8a000 | 1e+17 | 1e+17 | \x4376345785d8a000 | t + \x4376345785d8a001 | 1.0000000000000002e+17 | 1.0000000000000002e+17 | \x4376345785d8a001 | t + \x43abc16d674ec7ff | 9.999999999999999e+17 | 9.999999999999999e+17 | \x43abc16d674ec7ff | t + \x43abc16d674ec800 | 1e+18 | 1e+18 | \x43abc16d674ec800 | t + \x43abc16d674ec801 | 1.0000000000000001e+18 | 1.0000000000000001e+18 | \x43abc16d674ec801 | t + \x43e158e460913cff | 9.999999999999998e+18 | 9.999999999999998e+18 | \x43e158e460913cff | t + \x43e158e460913d00 | 1e+19 | 1e+19 | \x43e158e460913d00 | t + \x43e158e460913d01 | 1.0000000000000002e+19 | 1.0000000000000002e+19 | \x43e158e460913d01 | t + \x4415af1d78b58c3f | 9.999999999999998e+19 | 9.999999999999998e+19 | \x4415af1d78b58c3f | t + \x4415af1d78b58c40 | 1e+20 | 1e+20 | \x4415af1d78b58c40 | t + \x4415af1d78b58c41 | 1.0000000000000002e+20 | 1.0000000000000002e+20 | \x4415af1d78b58c41 | t + \x444b1ae4d6e2ef4f | 9.999999999999999e+20 | 9.999999999999999e+20 | \x444b1ae4d6e2ef4f | t + \x444b1ae4d6e2ef50 | 1e+21 | 1e+21 | \x444b1ae4d6e2ef50 | t + \x444b1ae4d6e2ef51 | 1.0000000000000001e+21 | 1.0000000000000001e+21 | \x444b1ae4d6e2ef51 | t + \x4480f0cf064dd591 | 9.999999999999998e+21 | 9.999999999999998e+21 | \x4480f0cf064dd591 | t + \x4480f0cf064dd592 | 1e+22 | 1e+22 | \x4480f0cf064dd592 | t + \x4480f0cf064dd593 | 1.0000000000000002e+22 | 1.0000000000000002e+22 | \x4480f0cf064dd593 | t + \x44b52d02c7e14af5 | 9.999999999999997e+22 | 9.999999999999997e+22 | \x44b52d02c7e14af5 | t + \x44b52d02c7e14af6 | 9.999999999999999e+22 | 9.999999999999999e+22 | \x44b52d02c7e14af6 | t + \x44b52d02c7e14af7 | 1.0000000000000001e+23 | 1.0000000000000001e+23 | \x44b52d02c7e14af7 | t + \x44ea784379d99db3 | 9.999999999999998e+23 | 9.999999999999998e+23 | \x44ea784379d99db3 | t + \x44ea784379d99db4 | 1e+24 | 1e+24 | \x44ea784379d99db4 | t + \x44ea784379d99db5 | 1.0000000000000001e+24 | 1.0000000000000001e+24 | \x44ea784379d99db5 | t + \x45208b2a2c280290 | 9.999999999999999e+24 | 9.999999999999999e+24 | \x45208b2a2c280290 | t + \x45208b2a2c280291 | 1e+25 | 1e+25 | \x45208b2a2c280291 | t + \x45208b2a2c280292 | 1.0000000000000003e+25 | 1.0000000000000003e+25 | \x45208b2a2c280292 | t + \x7feffffffffffffe | 1.7976931348623155e+308 | 1.7976931348623155e+308 | \x7feffffffffffffe | t + \x7fefffffffffffff | 1.7976931348623157e+308 | 1.7976931348623157e+308 | \x7fefffffffffffff | t + \x4350000000000002 | 1.8014398509481992e+16 | 1.8014398509481992e+16 | \x4350000000000002 | t + \x4350000000002e06 | 1.8014398509529112e+16 | 1.8014398509529112e+16 | \x4350000000002e06 | t + \x4352000000000003 | 2.0266198323167244e+16 | 2.0266198323167244e+16 | \x4352000000000003 | t + \x4352000000000004 | 2.0266198323167248e+16 | 2.0266198323167248e+16 | \x4352000000000004 | t + \x4358000000000003 | 2.7021597764222988e+16 | 2.7021597764222988e+16 | \x4358000000000003 | t + \x4358000000000004 | 2.7021597764222992e+16 | 2.7021597764222992e+16 | \x4358000000000004 | t + \x435f000000000020 | 3.4902897112121472e+16 | 3.4902897112121472e+16 | \x435f000000000020 | t + \xc350000000000002 | -1.8014398509481992e+16 | -1.8014398509481992e+16 | \xc350000000000002 | t + \xc350000000002e06 | -1.8014398509529112e+16 | -1.8014398509529112e+16 | \xc350000000002e06 | t + \xc352000000000003 | -2.0266198323167244e+16 | -2.0266198323167244e+16 | \xc352000000000003 | t + \xc352000000000004 | -2.0266198323167248e+16 | -2.0266198323167248e+16 | \xc352000000000004 | t + \xc358000000000003 | -2.7021597764222988e+16 | -2.7021597764222988e+16 | \xc358000000000003 | t + \xc358000000000004 | -2.7021597764222992e+16 | -2.7021597764222992e+16 | \xc358000000000004 | t + \xc35f000000000020 | -3.4902897112121472e+16 | -3.4902897112121472e+16 | \xc35f000000000020 | t + \x42dc12218377de66 | 123456789012345.6 | 123456789012345.6 | \x42dc12218377de66 | t + \x42a674e79c5fe51f | 12345678901234.56 | 12345678901234.56 | \x42a674e79c5fe51f | t + \x4271f71fb04cb74c | 1234567890123.456 | 1234567890123.456 | \x4271f71fb04cb74c | t + \x423cbe991a145879 | 123456789012.3456 | 123456789012.3456 | \x423cbe991a145879 | t + \x4206fee0e1a9e061 | 12345678901.23456 | 12345678901.23456 | \x4206fee0e1a9e061 | t + \x41d26580b487e6b4 | 1234567890.123456 | 1234567890.123456 | \x41d26580b487e6b4 | t + \x419d6f34540ca453 | 123456789.0123456 | 123456789.0123456 | \x419d6f34540ca453 | t + \x41678c29dcd6e9dc | 12345678.90123456 | 12345678.90123456 | \x41678c29dcd6e9dc | t + \x4132d687e3df217d | 1234567.890123456 | 1234567.890123456 | \x4132d687e3df217d | t + \x40fe240c9fcb68c8 | 123456.7890123456 | 123456.7890123456 | \x40fe240c9fcb68c8 | t + \x40c81cd6e63c53d3 | 12345.67890123456 | 12345.67890123456 | \x40c81cd6e63c53d3 | t + \x40934a4584fd0fdc | 1234.567890123456 | 1234.567890123456 | \x40934a4584fd0fdc | t + \x405edd3c07fb4c93 | 123.4567890123456 | 123.4567890123456 | \x405edd3c07fb4c93 | t + \x4028b0fcd32f7076 | 12.34567890123456 | 12.34567890123456 | \x4028b0fcd32f7076 | t + \x3ff3c0ca428c59f8 | 1.234567890123456 | 1.234567890123456 | \x3ff3c0ca428c59f8 | t + \x3e60000000000000 | 2.9802322387695312e-08 | 2.9802322387695312e-08 | \x3e60000000000000 | t + \xc352bd2668e077c4 | -2.1098088986959632e+16 | -2.1098088986959632e+16 | \xc352bd2668e077c4 | t + \x434018601510c000 | 9.0608011534336e+15 | 9.0608011534336e+15 | \x434018601510c000 | t + \x43d055dc36f24000 | 4.708356024711512e+18 | 4.708356024711512e+18 | \x43d055dc36f24000 | t + \x43e052961c6f8000 | 9.409340012568248e+18 | 9.409340012568248e+18 | \x43e052961c6f8000 | t + \x3ff3c0ca2a5b1d5d | 1.2345678 | 1.2345678 | \x3ff3c0ca2a5b1d5d | t + \x4830f0cf064dd592 | 5.764607523034235e+39 | 5.764607523034235e+39 | \x4830f0cf064dd592 | t + \x4840f0cf064dd592 | 1.152921504606847e+40 | 1.152921504606847e+40 | \x4840f0cf064dd592 | t + \x4850f0cf064dd592 | 2.305843009213694e+40 | 2.305843009213694e+40 | \x4850f0cf064dd592 | t + \x3ff3333333333333 | 1.2 | 1.2 | \x3ff3333333333333 | t + \x3ff3ae147ae147ae | 1.23 | 1.23 | \x3ff3ae147ae147ae | t + \x3ff3be76c8b43958 | 1.234 | 1.234 | \x3ff3be76c8b43958 | t + \x3ff3c083126e978d | 1.2345 | 1.2345 | \x3ff3c083126e978d | t + \x3ff3c0c1fc8f3238 | 1.23456 | 1.23456 | \x3ff3c0c1fc8f3238 | t + \x3ff3c0c9539b8887 | 1.234567 | 1.234567 | \x3ff3c0c9539b8887 | t + \x3ff3c0ca2a5b1d5d | 1.2345678 | 1.2345678 | \x3ff3c0ca2a5b1d5d | t + \x3ff3c0ca4283de1b | 1.23456789 | 1.23456789 | \x3ff3c0ca4283de1b | t + \x3ff3c0ca43db770a | 1.234567895 | 1.234567895 | \x3ff3c0ca43db770a | t + \x3ff3c0ca428abd53 | 1.2345678901 | 1.2345678901 | \x3ff3c0ca428abd53 | t + \x3ff3c0ca428c1d2b | 1.23456789012 | 1.23456789012 | \x3ff3c0ca428c1d2b | t + \x3ff3c0ca428c51f2 | 1.234567890123 | 1.234567890123 | \x3ff3c0ca428c51f2 | t + \x3ff3c0ca428c58fc | 1.2345678901234 | 1.2345678901234 | \x3ff3c0ca428c58fc | t + \x3ff3c0ca428c59dd | 1.23456789012345 | 1.23456789012345 | \x3ff3c0ca428c59dd | t + \x3ff3c0ca428c59f8 | 1.234567890123456 | 1.234567890123456 | \x3ff3c0ca428c59f8 | t + \x3ff3c0ca428c59fb | 1.2345678901234567 | 1.2345678901234567 | \x3ff3c0ca428c59fb | t + \x40112e0be8047a7d | 4.294967294 | 4.294967294 | \x40112e0be8047a7d | t + \x40112e0be815a889 | 4.294967295 | 4.294967295 | \x40112e0be815a889 | t + \x40112e0be826d695 | 4.294967296 | 4.294967296 | \x40112e0be826d695 | t + \x40112e0be83804a1 | 4.294967297 | 4.294967297 | \x40112e0be83804a1 | t + \x40112e0be84932ad | 4.294967298 | 4.294967298 | \x40112e0be84932ad | t + \x0040000000000000 | 1.7800590868057611e-307 | 1.7800590868057611e-307 | \x0040000000000000 | t + \x007fffffffffffff | 2.8480945388892175e-306 | 2.8480945388892175e-306 | \x007fffffffffffff | t + \x0290000000000000 | 2.446494580089078e-296 | 2.446494580089078e-296 | \x0290000000000000 | t + \x029fffffffffffff | 4.8929891601781557e-296 | 4.8929891601781557e-296 | \x029fffffffffffff | t + \x4350000000000000 | 1.8014398509481984e+16 | 1.8014398509481984e+16 | \x4350000000000000 | t + \x435fffffffffffff | 3.6028797018963964e+16 | 3.6028797018963964e+16 | \x435fffffffffffff | t + \x1330000000000000 | 2.900835519859558e-216 | 2.900835519859558e-216 | \x1330000000000000 | t + \x133fffffffffffff | 5.801671039719115e-216 | 5.801671039719115e-216 | \x133fffffffffffff | t + \x3a6fa7161a4d6e0c | 3.196104012172126e-27 | 3.196104012172126e-27 | \x3a6fa7161a4d6e0c | t +(209 rows) + +-- clean up, lest opr_sanity complain +drop type xfloat8 cascade; +NOTICE: drop cascades to 6 other objects +DETAIL: drop cascades to function xfloat8in(cstring) +drop cascades to function xfloat8out(xfloat8) +drop cascades to cast from xfloat8 to double precision +drop cascades to cast from double precision to xfloat8 +drop cascades to cast from xfloat8 to bigint +drop cascades to cast from bigint to xfloat8 diff --git a/src/test/regress/expected/foreign_data.out b/src/test/regress/expected/foreign_data.out new file mode 100644 index 0000000..5b30ee4 --- /dev/null +++ b/src/test/regress/expected/foreign_data.out @@ -0,0 +1,2105 @@ +-- +-- Test foreign-data wrapper and server management. +-- +-- directory paths and dlsuffix are passed to us in environment variables +\getenv libdir PG_LIBDIR +\getenv dlsuffix PG_DLSUFFIX +\set regresslib :libdir '/regress' :dlsuffix +CREATE FUNCTION test_fdw_handler() + RETURNS fdw_handler + AS :'regresslib', 'test_fdw_handler' + LANGUAGE C; +-- Clean up in case a prior regression run failed +-- Suppress NOTICE messages when roles don't exist +SET client_min_messages TO 'warning'; +DROP ROLE IF EXISTS regress_foreign_data_user, regress_test_role, regress_test_role2, regress_test_role_super, regress_test_indirect, regress_unprivileged_role; +RESET client_min_messages; +CREATE ROLE regress_foreign_data_user LOGIN SUPERUSER; +SET SESSION AUTHORIZATION 'regress_foreign_data_user'; +CREATE ROLE regress_test_role; +CREATE ROLE regress_test_role2; +CREATE ROLE regress_test_role_super SUPERUSER; +CREATE ROLE regress_test_indirect; +CREATE ROLE regress_unprivileged_role; +CREATE FOREIGN DATA WRAPPER dummy; +COMMENT ON FOREIGN DATA WRAPPER dummy IS 'useless'; +CREATE FOREIGN DATA WRAPPER postgresql VALIDATOR postgresql_fdw_validator; +-- At this point we should have 2 built-in wrappers and no servers. +SELECT fdwname, fdwhandler::regproc, fdwvalidator::regproc, fdwoptions FROM pg_foreign_data_wrapper ORDER BY 1, 2, 3; + fdwname | fdwhandler | fdwvalidator | fdwoptions +------------+------------+--------------------------+------------ + dummy | - | - | + postgresql | - | postgresql_fdw_validator | +(2 rows) + +SELECT srvname, srvoptions FROM pg_foreign_server; + srvname | srvoptions +---------+------------ +(0 rows) + +SELECT * FROM pg_user_mapping; + oid | umuser | umserver | umoptions +-----+--------+----------+----------- +(0 rows) + +-- CREATE FOREIGN DATA WRAPPER +CREATE FOREIGN DATA WRAPPER foo VALIDATOR bar; -- ERROR +ERROR: function bar(text[], oid) does not exist +CREATE FOREIGN DATA WRAPPER foo; +\dew + List of foreign-data wrappers + Name | Owner | Handler | Validator +------------+---------------------------+---------+-------------------------- + dummy | regress_foreign_data_user | - | - + foo | regress_foreign_data_user | - | - + postgresql | regress_foreign_data_user | - | postgresql_fdw_validator +(3 rows) + +CREATE FOREIGN DATA WRAPPER foo; -- duplicate +ERROR: foreign-data wrapper "foo" already exists +DROP FOREIGN DATA WRAPPER foo; +CREATE FOREIGN DATA WRAPPER foo OPTIONS (testing '1'); +\dew+ + List of foreign-data wrappers + Name | Owner | Handler | Validator | Access privileges | FDW options | Description +------------+---------------------------+---------+--------------------------+-------------------+---------------+------------- + dummy | regress_foreign_data_user | - | - | | | useless + foo | regress_foreign_data_user | - | - | | (testing '1') | + postgresql | regress_foreign_data_user | - | postgresql_fdw_validator | | | +(3 rows) + +DROP FOREIGN DATA WRAPPER foo; +CREATE FOREIGN DATA WRAPPER foo OPTIONS (testing '1', testing '2'); -- ERROR +ERROR: option "testing" provided more than once +CREATE FOREIGN DATA WRAPPER foo OPTIONS (testing '1', another '2'); +\dew+ + List of foreign-data wrappers + Name | Owner | Handler | Validator | Access privileges | FDW options | Description +------------+---------------------------+---------+--------------------------+-------------------+----------------------------+------------- + dummy | regress_foreign_data_user | - | - | | | useless + foo | regress_foreign_data_user | - | - | | (testing '1', another '2') | + postgresql | regress_foreign_data_user | - | postgresql_fdw_validator | | | +(3 rows) + +DROP FOREIGN DATA WRAPPER foo; +SET ROLE regress_test_role; +CREATE FOREIGN DATA WRAPPER foo; -- ERROR +ERROR: permission denied to create foreign-data wrapper "foo" +HINT: Must be superuser to create a foreign-data wrapper. +RESET ROLE; +CREATE FOREIGN DATA WRAPPER foo VALIDATOR postgresql_fdw_validator; +\dew+ + List of foreign-data wrappers + Name | Owner | Handler | Validator | Access privileges | FDW options | Description +------------+---------------------------+---------+--------------------------+-------------------+-------------+------------- + dummy | regress_foreign_data_user | - | - | | | useless + foo | regress_foreign_data_user | - | postgresql_fdw_validator | | | + postgresql | regress_foreign_data_user | - | postgresql_fdw_validator | | | +(3 rows) + +-- HANDLER related checks +CREATE FUNCTION invalid_fdw_handler() RETURNS int LANGUAGE SQL AS 'SELECT 1;'; +CREATE FOREIGN DATA WRAPPER test_fdw HANDLER invalid_fdw_handler; -- ERROR +ERROR: function invalid_fdw_handler must return type fdw_handler +CREATE FOREIGN DATA WRAPPER test_fdw HANDLER test_fdw_handler HANDLER invalid_fdw_handler; -- ERROR +ERROR: conflicting or redundant options +LINE 1: ...GN DATA WRAPPER test_fdw HANDLER test_fdw_handler HANDLER in... + ^ +CREATE FOREIGN DATA WRAPPER test_fdw HANDLER test_fdw_handler; +DROP FOREIGN DATA WRAPPER test_fdw; +-- ALTER FOREIGN DATA WRAPPER +ALTER FOREIGN DATA WRAPPER foo OPTIONS (nonexistent 'fdw'); -- ERROR +ERROR: invalid option "nonexistent" +HINT: There are no valid options in this context. +ALTER FOREIGN DATA WRAPPER foo; -- ERROR +ERROR: syntax error at or near ";" +LINE 1: ALTER FOREIGN DATA WRAPPER foo; + ^ +ALTER FOREIGN DATA WRAPPER foo VALIDATOR bar; -- ERROR +ERROR: function bar(text[], oid) does not exist +ALTER FOREIGN DATA WRAPPER foo NO VALIDATOR; +\dew+ + List of foreign-data wrappers + Name | Owner | Handler | Validator | Access privileges | FDW options | Description +------------+---------------------------+---------+--------------------------+-------------------+-------------+------------- + dummy | regress_foreign_data_user | - | - | | | useless + foo | regress_foreign_data_user | - | - | | | + postgresql | regress_foreign_data_user | - | postgresql_fdw_validator | | | +(3 rows) + +ALTER FOREIGN DATA WRAPPER foo OPTIONS (a '1', b '2'); +ALTER FOREIGN DATA WRAPPER foo OPTIONS (SET c '4'); -- ERROR +ERROR: option "c" not found +ALTER FOREIGN DATA WRAPPER foo OPTIONS (DROP c); -- ERROR +ERROR: option "c" not found +ALTER FOREIGN DATA WRAPPER foo OPTIONS (ADD x '1', DROP x); +\dew+ + List of foreign-data wrappers + Name | Owner | Handler | Validator | Access privileges | FDW options | Description +------------+---------------------------+---------+--------------------------+-------------------+----------------+------------- + dummy | regress_foreign_data_user | - | - | | | useless + foo | regress_foreign_data_user | - | - | | (a '1', b '2') | + postgresql | regress_foreign_data_user | - | postgresql_fdw_validator | | | +(3 rows) + +ALTER FOREIGN DATA WRAPPER foo OPTIONS (DROP a, SET b '3', ADD c '4'); +\dew+ + List of foreign-data wrappers + Name | Owner | Handler | Validator | Access privileges | FDW options | Description +------------+---------------------------+---------+--------------------------+-------------------+----------------+------------- + dummy | regress_foreign_data_user | - | - | | | useless + foo | regress_foreign_data_user | - | - | | (b '3', c '4') | + postgresql | regress_foreign_data_user | - | postgresql_fdw_validator | | | +(3 rows) + +ALTER FOREIGN DATA WRAPPER foo OPTIONS (a '2'); +ALTER FOREIGN DATA WRAPPER foo OPTIONS (b '4'); -- ERROR +ERROR: option "b" provided more than once +\dew+ + List of foreign-data wrappers + Name | Owner | Handler | Validator | Access privileges | FDW options | Description +------------+---------------------------+---------+--------------------------+-------------------+-----------------------+------------- + dummy | regress_foreign_data_user | - | - | | | useless + foo | regress_foreign_data_user | - | - | | (b '3', c '4', a '2') | + postgresql | regress_foreign_data_user | - | postgresql_fdw_validator | | | +(3 rows) + +SET ROLE regress_test_role; +ALTER FOREIGN DATA WRAPPER foo OPTIONS (ADD d '5'); -- ERROR +ERROR: permission denied to alter foreign-data wrapper "foo" +HINT: Must be superuser to alter a foreign-data wrapper. +SET ROLE regress_test_role_super; +ALTER FOREIGN DATA WRAPPER foo OPTIONS (ADD d '5'); +\dew+ + List of foreign-data wrappers + Name | Owner | Handler | Validator | Access privileges | FDW options | Description +------------+---------------------------+---------+--------------------------+-------------------+------------------------------+------------- + dummy | regress_foreign_data_user | - | - | | | useless + foo | regress_foreign_data_user | - | - | | (b '3', c '4', a '2', d '5') | + postgresql | regress_foreign_data_user | - | postgresql_fdw_validator | | | +(3 rows) + +ALTER FOREIGN DATA WRAPPER foo OWNER TO regress_test_role; -- ERROR +ERROR: permission denied to change owner of foreign-data wrapper "foo" +HINT: The owner of a foreign-data wrapper must be a superuser. +ALTER FOREIGN DATA WRAPPER foo OWNER TO regress_test_role_super; +ALTER ROLE regress_test_role_super NOSUPERUSER; +SET ROLE regress_test_role_super; +ALTER FOREIGN DATA WRAPPER foo OPTIONS (ADD e '6'); -- ERROR +ERROR: permission denied to alter foreign-data wrapper "foo" +HINT: Must be superuser to alter a foreign-data wrapper. +RESET ROLE; +\dew+ + List of foreign-data wrappers + Name | Owner | Handler | Validator | Access privileges | FDW options | Description +------------+---------------------------+---------+--------------------------+-------------------+------------------------------+------------- + dummy | regress_foreign_data_user | - | - | | | useless + foo | regress_test_role_super | - | - | | (b '3', c '4', a '2', d '5') | + postgresql | regress_foreign_data_user | - | postgresql_fdw_validator | | | +(3 rows) + +ALTER FOREIGN DATA WRAPPER foo RENAME TO foo1; +\dew+ + List of foreign-data wrappers + Name | Owner | Handler | Validator | Access privileges | FDW options | Description +------------+---------------------------+---------+--------------------------+-------------------+------------------------------+------------- + dummy | regress_foreign_data_user | - | - | | | useless + foo1 | regress_test_role_super | - | - | | (b '3', c '4', a '2', d '5') | + postgresql | regress_foreign_data_user | - | postgresql_fdw_validator | | | +(3 rows) + +ALTER FOREIGN DATA WRAPPER foo1 RENAME TO foo; +-- HANDLER related checks +ALTER FOREIGN DATA WRAPPER foo HANDLER invalid_fdw_handler; -- ERROR +ERROR: function invalid_fdw_handler must return type fdw_handler +ALTER FOREIGN DATA WRAPPER foo HANDLER test_fdw_handler HANDLER anything; -- ERROR +ERROR: conflicting or redundant options +LINE 1: ...FOREIGN DATA WRAPPER foo HANDLER test_fdw_handler HANDLER an... + ^ +ALTER FOREIGN DATA WRAPPER foo HANDLER test_fdw_handler; +WARNING: changing the foreign-data wrapper handler can change behavior of existing foreign tables +DROP FUNCTION invalid_fdw_handler(); +-- DROP FOREIGN DATA WRAPPER +DROP FOREIGN DATA WRAPPER nonexistent; -- ERROR +ERROR: foreign-data wrapper "nonexistent" does not exist +DROP FOREIGN DATA WRAPPER IF EXISTS nonexistent; +NOTICE: foreign-data wrapper "nonexistent" does not exist, skipping +\dew+ + List of foreign-data wrappers + Name | Owner | Handler | Validator | Access privileges | FDW options | Description +------------+---------------------------+------------------+--------------------------+-------------------+------------------------------+------------- + dummy | regress_foreign_data_user | - | - | | | useless + foo | regress_test_role_super | test_fdw_handler | - | | (b '3', c '4', a '2', d '5') | + postgresql | regress_foreign_data_user | - | postgresql_fdw_validator | | | +(3 rows) + +DROP ROLE regress_test_role_super; -- ERROR +ERROR: role "regress_test_role_super" cannot be dropped because some objects depend on it +DETAIL: owner of foreign-data wrapper foo +SET ROLE regress_test_role_super; +DROP FOREIGN DATA WRAPPER foo; +RESET ROLE; +DROP ROLE regress_test_role_super; +\dew+ + List of foreign-data wrappers + Name | Owner | Handler | Validator | Access privileges | FDW options | Description +------------+---------------------------+---------+--------------------------+-------------------+-------------+------------- + dummy | regress_foreign_data_user | - | - | | | useless + postgresql | regress_foreign_data_user | - | postgresql_fdw_validator | | | +(2 rows) + +CREATE FOREIGN DATA WRAPPER foo; +CREATE SERVER s1 FOREIGN DATA WRAPPER foo; +COMMENT ON SERVER s1 IS 'foreign server'; +CREATE USER MAPPING FOR current_user SERVER s1; +CREATE USER MAPPING FOR current_user SERVER s1; -- ERROR +ERROR: user mapping for "regress_foreign_data_user" already exists for server "s1" +CREATE USER MAPPING IF NOT EXISTS FOR current_user SERVER s1; -- NOTICE +NOTICE: user mapping for "regress_foreign_data_user" already exists for server "s1", skipping +\dew+ + List of foreign-data wrappers + Name | Owner | Handler | Validator | Access privileges | FDW options | Description +------------+---------------------------+---------+--------------------------+-------------------+-------------+------------- + dummy | regress_foreign_data_user | - | - | | | useless + foo | regress_foreign_data_user | - | - | | | + postgresql | regress_foreign_data_user | - | postgresql_fdw_validator | | | +(3 rows) + +\des+ + List of foreign servers + Name | Owner | Foreign-data wrapper | Access privileges | Type | Version | FDW options | Description +------+---------------------------+----------------------+-------------------+------+---------+-------------+---------------- + s1 | regress_foreign_data_user | foo | | | | | foreign server +(1 row) + +\deu+ + List of user mappings + Server | User name | FDW options +--------+---------------------------+------------- + s1 | regress_foreign_data_user | +(1 row) + +DROP FOREIGN DATA WRAPPER foo; -- ERROR +ERROR: cannot drop foreign-data wrapper foo because other objects depend on it +DETAIL: server s1 depends on foreign-data wrapper foo +user mapping for regress_foreign_data_user on server s1 depends on server s1 +HINT: Use DROP ... CASCADE to drop the dependent objects too. +SET ROLE regress_test_role; +DROP FOREIGN DATA WRAPPER foo CASCADE; -- ERROR +ERROR: must be owner of foreign-data wrapper foo +RESET ROLE; +DROP FOREIGN DATA WRAPPER foo CASCADE; +NOTICE: drop cascades to 2 other objects +DETAIL: drop cascades to server s1 +drop cascades to user mapping for regress_foreign_data_user on server s1 +\dew+ + List of foreign-data wrappers + Name | Owner | Handler | Validator | Access privileges | FDW options | Description +------------+---------------------------+---------+--------------------------+-------------------+-------------+------------- + dummy | regress_foreign_data_user | - | - | | | useless + postgresql | regress_foreign_data_user | - | postgresql_fdw_validator | | | +(2 rows) + +\des+ + List of foreign servers + Name | Owner | Foreign-data wrapper | Access privileges | Type | Version | FDW options | Description +------+-------+----------------------+-------------------+------+---------+-------------+------------- +(0 rows) + +\deu+ + List of user mappings + Server | User name | FDW options +--------+-----------+------------- +(0 rows) + +-- exercise CREATE SERVER +CREATE SERVER s1 FOREIGN DATA WRAPPER foo; -- ERROR +ERROR: foreign-data wrapper "foo" does not exist +CREATE FOREIGN DATA WRAPPER foo OPTIONS ("test wrapper" 'true'); +CREATE SERVER s1 FOREIGN DATA WRAPPER foo; +CREATE SERVER s1 FOREIGN DATA WRAPPER foo; -- ERROR +ERROR: server "s1" already exists +CREATE SERVER IF NOT EXISTS s1 FOREIGN DATA WRAPPER foo; -- No ERROR, just NOTICE +NOTICE: server "s1" already exists, skipping +CREATE SERVER s2 FOREIGN DATA WRAPPER foo OPTIONS (host 'a', dbname 'b'); +CREATE SERVER s3 TYPE 'oracle' FOREIGN DATA WRAPPER foo; +CREATE SERVER s4 TYPE 'oracle' FOREIGN DATA WRAPPER foo OPTIONS (host 'a', dbname 'b'); +CREATE SERVER s5 VERSION '15.0' FOREIGN DATA WRAPPER foo; +CREATE SERVER s6 VERSION '16.0' FOREIGN DATA WRAPPER foo OPTIONS (host 'a', dbname 'b'); +CREATE SERVER s7 TYPE 'oracle' VERSION '17.0' FOREIGN DATA WRAPPER foo OPTIONS (host 'a', dbname 'b'); +CREATE SERVER s8 FOREIGN DATA WRAPPER postgresql OPTIONS (foo '1'); -- ERROR +ERROR: invalid option "foo" +CREATE SERVER s8 FOREIGN DATA WRAPPER postgresql OPTIONS (host 'localhost', dbname 's8db'); +\des+ + List of foreign servers + Name | Owner | Foreign-data wrapper | Access privileges | Type | Version | FDW options | Description +------+---------------------------+----------------------+-------------------+--------+---------+-----------------------------------+------------- + s1 | regress_foreign_data_user | foo | | | | | + s2 | regress_foreign_data_user | foo | | | | (host 'a', dbname 'b') | + s3 | regress_foreign_data_user | foo | | oracle | | | + s4 | regress_foreign_data_user | foo | | oracle | | (host 'a', dbname 'b') | + s5 | regress_foreign_data_user | foo | | | 15.0 | | + s6 | regress_foreign_data_user | foo | | | 16.0 | (host 'a', dbname 'b') | + s7 | regress_foreign_data_user | foo | | oracle | 17.0 | (host 'a', dbname 'b') | + s8 | regress_foreign_data_user | postgresql | | | | (host 'localhost', dbname 's8db') | +(8 rows) + +SET ROLE regress_test_role; +CREATE SERVER t1 FOREIGN DATA WRAPPER foo; -- ERROR: no usage on FDW +ERROR: permission denied for foreign-data wrapper foo +RESET ROLE; +GRANT USAGE ON FOREIGN DATA WRAPPER foo TO regress_test_role; +SET ROLE regress_test_role; +CREATE SERVER t1 FOREIGN DATA WRAPPER foo; +RESET ROLE; +\des+ + List of foreign servers + Name | Owner | Foreign-data wrapper | Access privileges | Type | Version | FDW options | Description +------+---------------------------+----------------------+-------------------+--------+---------+-----------------------------------+------------- + s1 | regress_foreign_data_user | foo | | | | | + s2 | regress_foreign_data_user | foo | | | | (host 'a', dbname 'b') | + s3 | regress_foreign_data_user | foo | | oracle | | | + s4 | regress_foreign_data_user | foo | | oracle | | (host 'a', dbname 'b') | + s5 | regress_foreign_data_user | foo | | | 15.0 | | + s6 | regress_foreign_data_user | foo | | | 16.0 | (host 'a', dbname 'b') | + s7 | regress_foreign_data_user | foo | | oracle | 17.0 | (host 'a', dbname 'b') | + s8 | regress_foreign_data_user | postgresql | | | | (host 'localhost', dbname 's8db') | + t1 | regress_test_role | foo | | | | | +(9 rows) + +REVOKE USAGE ON FOREIGN DATA WRAPPER foo FROM regress_test_role; +GRANT USAGE ON FOREIGN DATA WRAPPER foo TO regress_test_indirect; +SET ROLE regress_test_role; +CREATE SERVER t2 FOREIGN DATA WRAPPER foo; -- ERROR +ERROR: permission denied for foreign-data wrapper foo +RESET ROLE; +GRANT regress_test_indirect TO regress_test_role; +SET ROLE regress_test_role; +CREATE SERVER t2 FOREIGN DATA WRAPPER foo; +\des+ + List of foreign servers + Name | Owner | Foreign-data wrapper | Access privileges | Type | Version | FDW options | Description +------+---------------------------+----------------------+-------------------+--------+---------+-----------------------------------+------------- + s1 | regress_foreign_data_user | foo | | | | | + s2 | regress_foreign_data_user | foo | | | | (host 'a', dbname 'b') | + s3 | regress_foreign_data_user | foo | | oracle | | | + s4 | regress_foreign_data_user | foo | | oracle | | (host 'a', dbname 'b') | + s5 | regress_foreign_data_user | foo | | | 15.0 | | + s6 | regress_foreign_data_user | foo | | | 16.0 | (host 'a', dbname 'b') | + s7 | regress_foreign_data_user | foo | | oracle | 17.0 | (host 'a', dbname 'b') | + s8 | regress_foreign_data_user | postgresql | | | | (host 'localhost', dbname 's8db') | + t1 | regress_test_role | foo | | | | | + t2 | regress_test_role | foo | | | | | +(10 rows) + +RESET ROLE; +REVOKE regress_test_indirect FROM regress_test_role; +-- ALTER SERVER +ALTER SERVER s0; -- ERROR +ERROR: syntax error at or near ";" +LINE 1: ALTER SERVER s0; + ^ +ALTER SERVER s0 OPTIONS (a '1'); -- ERROR +ERROR: server "s0" does not exist +ALTER SERVER s1 VERSION '1.0' OPTIONS (servername 's1'); +ALTER SERVER s2 VERSION '1.1'; +ALTER SERVER s3 OPTIONS ("tns name" 'orcl', port '1521'); +GRANT USAGE ON FOREIGN SERVER s1 TO regress_test_role; +GRANT USAGE ON FOREIGN SERVER s6 TO regress_test_role2 WITH GRANT OPTION; +\des+ + List of foreign servers + Name | Owner | Foreign-data wrapper | Access privileges | Type | Version | FDW options | Description +------+---------------------------+----------------------+-------------------------------------------------------+--------+---------+-----------------------------------+------------- + s1 | regress_foreign_data_user | foo | regress_foreign_data_user=U/regress_foreign_data_user+| | 1.0 | (servername 's1') | + | | | regress_test_role=U/regress_foreign_data_user | | | | + s2 | regress_foreign_data_user | foo | | | 1.1 | (host 'a', dbname 'b') | + s3 | regress_foreign_data_user | foo | | oracle | | ("tns name" 'orcl', port '1521') | + s4 | regress_foreign_data_user | foo | | oracle | | (host 'a', dbname 'b') | + s5 | regress_foreign_data_user | foo | | | 15.0 | | + s6 | regress_foreign_data_user | foo | regress_foreign_data_user=U/regress_foreign_data_user+| | 16.0 | (host 'a', dbname 'b') | + | | | regress_test_role2=U*/regress_foreign_data_user | | | | + s7 | regress_foreign_data_user | foo | | oracle | 17.0 | (host 'a', dbname 'b') | + s8 | regress_foreign_data_user | postgresql | | | | (host 'localhost', dbname 's8db') | + t1 | regress_test_role | foo | | | | | + t2 | regress_test_role | foo | | | | | +(10 rows) + +SET ROLE regress_test_role; +ALTER SERVER s1 VERSION '1.1'; -- ERROR +ERROR: must be owner of foreign server s1 +ALTER SERVER s1 OWNER TO regress_test_role; -- ERROR +ERROR: must be owner of foreign server s1 +RESET ROLE; +ALTER SERVER s1 OWNER TO regress_test_role; +GRANT regress_test_role2 TO regress_test_role; +SET ROLE regress_test_role; +ALTER SERVER s1 VERSION '1.1'; +ALTER SERVER s1 OWNER TO regress_test_role2; -- ERROR +ERROR: permission denied for foreign-data wrapper foo +RESET ROLE; +ALTER SERVER s8 OPTIONS (foo '1'); -- ERROR option validation +ERROR: invalid option "foo" +ALTER SERVER s8 OPTIONS (connect_timeout '30', SET dbname 'db1', DROP host); +SET ROLE regress_test_role; +ALTER SERVER s1 OWNER TO regress_test_indirect; -- ERROR +ERROR: must be able to SET ROLE "regress_test_indirect" +RESET ROLE; +GRANT regress_test_indirect TO regress_test_role; +SET ROLE regress_test_role; +ALTER SERVER s1 OWNER TO regress_test_indirect; +RESET ROLE; +GRANT USAGE ON FOREIGN DATA WRAPPER foo TO regress_test_indirect; +SET ROLE regress_test_role; +ALTER SERVER s1 OWNER TO regress_test_indirect; +RESET ROLE; +DROP ROLE regress_test_indirect; -- ERROR +ERROR: role "regress_test_indirect" cannot be dropped because some objects depend on it +DETAIL: privileges for foreign-data wrapper foo +owner of server s1 +\des+ + List of foreign servers + Name | Owner | Foreign-data wrapper | Access privileges | Type | Version | FDW options | Description +------+---------------------------+----------------------+-------------------------------------------------------+--------+---------+--------------------------------------+------------- + s1 | regress_test_indirect | foo | regress_test_indirect=U/regress_test_indirect | | 1.1 | (servername 's1') | + s2 | regress_foreign_data_user | foo | | | 1.1 | (host 'a', dbname 'b') | + s3 | regress_foreign_data_user | foo | | oracle | | ("tns name" 'orcl', port '1521') | + s4 | regress_foreign_data_user | foo | | oracle | | (host 'a', dbname 'b') | + s5 | regress_foreign_data_user | foo | | | 15.0 | | + s6 | regress_foreign_data_user | foo | regress_foreign_data_user=U/regress_foreign_data_user+| | 16.0 | (host 'a', dbname 'b') | + | | | regress_test_role2=U*/regress_foreign_data_user | | | | + s7 | regress_foreign_data_user | foo | | oracle | 17.0 | (host 'a', dbname 'b') | + s8 | regress_foreign_data_user | postgresql | | | | (dbname 'db1', connect_timeout '30') | + t1 | regress_test_role | foo | | | | | + t2 | regress_test_role | foo | | | | | +(10 rows) + +ALTER SERVER s8 RENAME to s8new; +\des+ + List of foreign servers + Name | Owner | Foreign-data wrapper | Access privileges | Type | Version | FDW options | Description +-------+---------------------------+----------------------+-------------------------------------------------------+--------+---------+--------------------------------------+------------- + s1 | regress_test_indirect | foo | regress_test_indirect=U/regress_test_indirect | | 1.1 | (servername 's1') | + s2 | regress_foreign_data_user | foo | | | 1.1 | (host 'a', dbname 'b') | + s3 | regress_foreign_data_user | foo | | oracle | | ("tns name" 'orcl', port '1521') | + s4 | regress_foreign_data_user | foo | | oracle | | (host 'a', dbname 'b') | + s5 | regress_foreign_data_user | foo | | | 15.0 | | + s6 | regress_foreign_data_user | foo | regress_foreign_data_user=U/regress_foreign_data_user+| | 16.0 | (host 'a', dbname 'b') | + | | | regress_test_role2=U*/regress_foreign_data_user | | | | + s7 | regress_foreign_data_user | foo | | oracle | 17.0 | (host 'a', dbname 'b') | + s8new | regress_foreign_data_user | postgresql | | | | (dbname 'db1', connect_timeout '30') | + t1 | regress_test_role | foo | | | | | + t2 | regress_test_role | foo | | | | | +(10 rows) + +ALTER SERVER s8new RENAME to s8; +-- DROP SERVER +DROP SERVER nonexistent; -- ERROR +ERROR: server "nonexistent" does not exist +DROP SERVER IF EXISTS nonexistent; +NOTICE: server "nonexistent" does not exist, skipping +\des + List of foreign servers + Name | Owner | Foreign-data wrapper +------+---------------------------+---------------------- + s1 | regress_test_indirect | foo + s2 | regress_foreign_data_user | foo + s3 | regress_foreign_data_user | foo + s4 | regress_foreign_data_user | foo + s5 | regress_foreign_data_user | foo + s6 | regress_foreign_data_user | foo + s7 | regress_foreign_data_user | foo + s8 | regress_foreign_data_user | postgresql + t1 | regress_test_role | foo + t2 | regress_test_role | foo +(10 rows) + +SET ROLE regress_test_role; +DROP SERVER s2; -- ERROR +ERROR: must be owner of foreign server s2 +DROP SERVER s1; +RESET ROLE; +\des + List of foreign servers + Name | Owner | Foreign-data wrapper +------+---------------------------+---------------------- + s2 | regress_foreign_data_user | foo + s3 | regress_foreign_data_user | foo + s4 | regress_foreign_data_user | foo + s5 | regress_foreign_data_user | foo + s6 | regress_foreign_data_user | foo + s7 | regress_foreign_data_user | foo + s8 | regress_foreign_data_user | postgresql + t1 | regress_test_role | foo + t2 | regress_test_role | foo +(9 rows) + +ALTER SERVER s2 OWNER TO regress_test_role; +SET ROLE regress_test_role; +DROP SERVER s2; +RESET ROLE; +\des + List of foreign servers + Name | Owner | Foreign-data wrapper +------+---------------------------+---------------------- + s3 | regress_foreign_data_user | foo + s4 | regress_foreign_data_user | foo + s5 | regress_foreign_data_user | foo + s6 | regress_foreign_data_user | foo + s7 | regress_foreign_data_user | foo + s8 | regress_foreign_data_user | postgresql + t1 | regress_test_role | foo + t2 | regress_test_role | foo +(8 rows) + +CREATE USER MAPPING FOR current_user SERVER s3; +\deu + List of user mappings + Server | User name +--------+--------------------------- + s3 | regress_foreign_data_user +(1 row) + +DROP SERVER s3; -- ERROR +ERROR: cannot drop server s3 because other objects depend on it +DETAIL: user mapping for regress_foreign_data_user on server s3 depends on server s3 +HINT: Use DROP ... CASCADE to drop the dependent objects too. +DROP SERVER s3 CASCADE; +NOTICE: drop cascades to user mapping for regress_foreign_data_user on server s3 +\des + List of foreign servers + Name | Owner | Foreign-data wrapper +------+---------------------------+---------------------- + s4 | regress_foreign_data_user | foo + s5 | regress_foreign_data_user | foo + s6 | regress_foreign_data_user | foo + s7 | regress_foreign_data_user | foo + s8 | regress_foreign_data_user | postgresql + t1 | regress_test_role | foo + t2 | regress_test_role | foo +(7 rows) + +\deu +List of user mappings + Server | User name +--------+----------- +(0 rows) + +-- CREATE USER MAPPING +CREATE USER MAPPING FOR regress_test_missing_role SERVER s1; -- ERROR +ERROR: role "regress_test_missing_role" does not exist +CREATE USER MAPPING FOR current_user SERVER s1; -- ERROR +ERROR: server "s1" does not exist +CREATE USER MAPPING FOR current_user SERVER s4; +CREATE USER MAPPING FOR user SERVER s4; -- ERROR duplicate +ERROR: user mapping for "regress_foreign_data_user" already exists for server "s4" +CREATE USER MAPPING FOR public SERVER s4 OPTIONS ("this mapping" 'is public'); +CREATE USER MAPPING FOR user SERVER s8 OPTIONS (username 'test', password 'secret'); -- ERROR +ERROR: invalid option "username" +HINT: Perhaps you meant the option "user". +CREATE USER MAPPING FOR user SERVER s8 OPTIONS (user 'test', password 'secret'); +ALTER SERVER s5 OWNER TO regress_test_role; +ALTER SERVER s6 OWNER TO regress_test_indirect; +SET ROLE regress_test_role; +CREATE USER MAPPING FOR current_user SERVER s5; +CREATE USER MAPPING FOR current_user SERVER s6 OPTIONS (username 'test'); +CREATE USER MAPPING FOR current_user SERVER s7; -- ERROR +ERROR: permission denied for foreign server s7 +CREATE USER MAPPING FOR public SERVER s8; -- ERROR +ERROR: must be owner of foreign server s8 +RESET ROLE; +ALTER SERVER t1 OWNER TO regress_test_indirect; +SET ROLE regress_test_role; +CREATE USER MAPPING FOR current_user SERVER t1 OPTIONS (username 'bob', password 'boo'); +CREATE USER MAPPING FOR public SERVER t1; +RESET ROLE; +\deu + List of user mappings + Server | User name +--------+--------------------------- + s4 | public + s4 | regress_foreign_data_user + s5 | regress_test_role + s6 | regress_test_role + s8 | regress_foreign_data_user + t1 | public + t1 | regress_test_role +(7 rows) + +-- ALTER USER MAPPING +ALTER USER MAPPING FOR regress_test_missing_role SERVER s4 OPTIONS (gotcha 'true'); -- ERROR +ERROR: role "regress_test_missing_role" does not exist +ALTER USER MAPPING FOR user SERVER ss4 OPTIONS (gotcha 'true'); -- ERROR +ERROR: server "ss4" does not exist +ALTER USER MAPPING FOR public SERVER s5 OPTIONS (gotcha 'true'); -- ERROR +ERROR: user mapping for "public" does not exist for server "s5" +ALTER USER MAPPING FOR current_user SERVER s8 OPTIONS (username 'test'); -- ERROR +ERROR: invalid option "username" +HINT: Perhaps you meant the option "user". +ALTER USER MAPPING FOR current_user SERVER s8 OPTIONS (DROP user, SET password 'public'); +SET ROLE regress_test_role; +ALTER USER MAPPING FOR current_user SERVER s5 OPTIONS (ADD modified '1'); +ALTER USER MAPPING FOR public SERVER s4 OPTIONS (ADD modified '1'); -- ERROR +ERROR: must be owner of foreign server s4 +ALTER USER MAPPING FOR public SERVER t1 OPTIONS (ADD modified '1'); +RESET ROLE; +\deu+ + List of user mappings + Server | User name | FDW options +--------+---------------------------+---------------------------------- + s4 | public | ("this mapping" 'is public') + s4 | regress_foreign_data_user | + s5 | regress_test_role | (modified '1') + s6 | regress_test_role | (username 'test') + s8 | regress_foreign_data_user | (password 'public') + t1 | public | (modified '1') + t1 | regress_test_role | (username 'bob', password 'boo') +(7 rows) + +-- DROP USER MAPPING +DROP USER MAPPING FOR regress_test_missing_role SERVER s4; -- ERROR +ERROR: role "regress_test_missing_role" does not exist +DROP USER MAPPING FOR user SERVER ss4; +ERROR: server "ss4" does not exist +DROP USER MAPPING FOR public SERVER s7; -- ERROR +ERROR: user mapping for "public" does not exist for server "s7" +DROP USER MAPPING IF EXISTS FOR regress_test_missing_role SERVER s4; +NOTICE: role "regress_test_missing_role" does not exist, skipping +DROP USER MAPPING IF EXISTS FOR user SERVER ss4; +NOTICE: server "ss4" does not exist, skipping +DROP USER MAPPING IF EXISTS FOR public SERVER s7; +NOTICE: user mapping for "public" does not exist for server "s7", skipping +CREATE USER MAPPING FOR public SERVER s8; +SET ROLE regress_test_role; +DROP USER MAPPING FOR public SERVER s8; -- ERROR +ERROR: must be owner of foreign server s8 +RESET ROLE; +DROP SERVER s7; +\deu + List of user mappings + Server | User name +--------+--------------------------- + s4 | public + s4 | regress_foreign_data_user + s5 | regress_test_role + s6 | regress_test_role + s8 | public + s8 | regress_foreign_data_user + t1 | public + t1 | regress_test_role +(8 rows) + +-- CREATE FOREIGN TABLE +CREATE SCHEMA foreign_schema; +CREATE SERVER s0 FOREIGN DATA WRAPPER dummy; +CREATE FOREIGN TABLE ft1 (); -- ERROR +ERROR: syntax error at or near ";" +LINE 1: CREATE FOREIGN TABLE ft1 (); + ^ +CREATE FOREIGN TABLE ft1 () SERVER no_server; -- ERROR +ERROR: server "no_server" does not exist +CREATE FOREIGN TABLE ft1 ( + c1 integer OPTIONS ("param 1" 'val1') PRIMARY KEY, + c2 text OPTIONS (param2 'val2', param3 'val3'), + c3 date +) SERVER s0 OPTIONS (delimiter ',', quote '"', "be quoted" 'value'); -- ERROR +ERROR: primary key constraints are not supported on foreign tables +LINE 2: c1 integer OPTIONS ("param 1" 'val1') PRIMARY KEY, + ^ +CREATE TABLE ref_table (id integer PRIMARY KEY); +CREATE FOREIGN TABLE ft1 ( + c1 integer OPTIONS ("param 1" 'val1') REFERENCES ref_table (id), + c2 text OPTIONS (param2 'val2', param3 'val3'), + c3 date +) SERVER s0 OPTIONS (delimiter ',', quote '"', "be quoted" 'value'); -- ERROR +ERROR: foreign key constraints are not supported on foreign tables +LINE 2: c1 integer OPTIONS ("param 1" 'val1') REFERENCES ref_table ... + ^ +DROP TABLE ref_table; +CREATE FOREIGN TABLE ft1 ( + c1 integer OPTIONS ("param 1" 'val1') NOT NULL, + c2 text OPTIONS (param2 'val2', param3 'val3'), + c3 date, + UNIQUE (c3) +) SERVER s0 OPTIONS (delimiter ',', quote '"', "be quoted" 'value'); -- ERROR +ERROR: unique constraints are not supported on foreign tables +LINE 5: UNIQUE (c3) + ^ +CREATE FOREIGN TABLE ft1 ( + c1 integer OPTIONS ("param 1" 'val1') NOT NULL, + c2 text OPTIONS (param2 'val2', param3 'val3') CHECK (c2 <> ''), + c3 date, + CHECK (c3 BETWEEN '1994-01-01'::date AND '1994-01-31'::date) +) SERVER s0 OPTIONS (delimiter ',', quote '"', "be quoted" 'value'); +COMMENT ON FOREIGN TABLE ft1 IS 'ft1'; +COMMENT ON COLUMN ft1.c1 IS 'ft1.c1'; +\d+ ft1 + Foreign table "public.ft1" + Column | Type | Collation | Nullable | Default | FDW options | Storage | Stats target | Description +--------+---------+-----------+----------+---------+--------------------------------+----------+--------------+------------- + c1 | integer | | not null | | ("param 1" 'val1') | plain | | ft1.c1 + c2 | text | | | | (param2 'val2', param3 'val3') | extended | | + c3 | date | | | | | plain | | +Check constraints: + "ft1_c2_check" CHECK (c2 <> ''::text) + "ft1_c3_check" CHECK (c3 >= '01-01-1994'::date AND c3 <= '01-31-1994'::date) +Server: s0 +FDW options: (delimiter ',', quote '"', "be quoted" 'value') + +\det+ + List of foreign tables + Schema | Table | Server | FDW options | Description +--------+-------+--------+-------------------------------------------------+------------- + public | ft1 | s0 | (delimiter ',', quote '"', "be quoted" 'value') | ft1 +(1 row) + +CREATE INDEX id_ft1_c2 ON ft1 (c2); -- ERROR +ERROR: cannot create index on relation "ft1" +DETAIL: This operation is not supported for foreign tables. +SELECT * FROM ft1; -- ERROR +ERROR: foreign-data wrapper "dummy" has no handler +EXPLAIN SELECT * FROM ft1; -- ERROR +ERROR: foreign-data wrapper "dummy" has no handler +CREATE TABLE lt1 (a INT) PARTITION BY RANGE (a); +CREATE FOREIGN TABLE ft_part1 + PARTITION OF lt1 FOR VALUES FROM (0) TO (1000) SERVER s0; +CREATE INDEX ON lt1 (a); -- skips partition +CREATE UNIQUE INDEX ON lt1 (a); -- ERROR +ERROR: cannot create unique index on partitioned table "lt1" +DETAIL: Table "lt1" contains partitions that are foreign tables. +ALTER TABLE lt1 ADD PRIMARY KEY (a); -- ERROR +ERROR: cannot create unique index on partitioned table "lt1" +DETAIL: Table "lt1" contains partitions that are foreign tables. +DROP TABLE lt1; +CREATE TABLE lt1 (a INT) PARTITION BY RANGE (a); +CREATE INDEX ON lt1 (a); +CREATE FOREIGN TABLE ft_part1 + PARTITION OF lt1 FOR VALUES FROM (0) TO (1000) SERVER s0; +CREATE FOREIGN TABLE ft_part2 (a INT) SERVER s0; +ALTER TABLE lt1 ATTACH PARTITION ft_part2 FOR VALUES FROM (1000) TO (2000); +DROP FOREIGN TABLE ft_part1, ft_part2; +CREATE UNIQUE INDEX ON lt1 (a); +ALTER TABLE lt1 ADD PRIMARY KEY (a); +CREATE FOREIGN TABLE ft_part1 + PARTITION OF lt1 FOR VALUES FROM (0) TO (1000) SERVER s0; -- ERROR +ERROR: cannot create foreign partition of partitioned table "lt1" +DETAIL: Table "lt1" contains indexes that are unique. +CREATE FOREIGN TABLE ft_part2 (a INT NOT NULL) SERVER s0; +ALTER TABLE lt1 ATTACH PARTITION ft_part2 + FOR VALUES FROM (1000) TO (2000); -- ERROR +ERROR: cannot attach foreign table "ft_part2" as partition of partitioned table "lt1" +DETAIL: Partitioned table "lt1" contains unique indexes. +DROP TABLE lt1; +DROP FOREIGN TABLE ft_part2; +CREATE TABLE lt1 (a INT) PARTITION BY RANGE (a); +CREATE INDEX ON lt1 (a); +CREATE TABLE lt1_part1 + PARTITION OF lt1 FOR VALUES FROM (0) TO (1000) + PARTITION BY RANGE (a); +CREATE FOREIGN TABLE ft_part_1_1 + PARTITION OF lt1_part1 FOR VALUES FROM (0) TO (100) SERVER s0; +CREATE FOREIGN TABLE ft_part_1_2 (a INT) SERVER s0; +ALTER TABLE lt1_part1 ATTACH PARTITION ft_part_1_2 FOR VALUES FROM (100) TO (200); +CREATE UNIQUE INDEX ON lt1 (a); +ERROR: cannot create unique index on partitioned table "lt1" +DETAIL: Table "lt1" contains partitions that are foreign tables. +ALTER TABLE lt1 ADD PRIMARY KEY (a); +ERROR: cannot create unique index on partitioned table "lt1_part1" +DETAIL: Table "lt1_part1" contains partitions that are foreign tables. +DROP FOREIGN TABLE ft_part_1_1, ft_part_1_2; +CREATE UNIQUE INDEX ON lt1 (a); +ALTER TABLE lt1 ADD PRIMARY KEY (a); +CREATE FOREIGN TABLE ft_part_1_1 + PARTITION OF lt1_part1 FOR VALUES FROM (0) TO (100) SERVER s0; +ERROR: cannot create foreign partition of partitioned table "lt1_part1" +DETAIL: Table "lt1_part1" contains indexes that are unique. +CREATE FOREIGN TABLE ft_part_1_2 (a INT NOT NULL) SERVER s0; +ALTER TABLE lt1_part1 ATTACH PARTITION ft_part_1_2 FOR VALUES FROM (100) TO (200); +ERROR: cannot attach foreign table "ft_part_1_2" as partition of partitioned table "lt1_part1" +DETAIL: Partitioned table "lt1_part1" contains unique indexes. +DROP TABLE lt1; +DROP FOREIGN TABLE ft_part_1_2; +-- ALTER FOREIGN TABLE +COMMENT ON FOREIGN TABLE ft1 IS 'foreign table'; +COMMENT ON FOREIGN TABLE ft1 IS NULL; +COMMENT ON COLUMN ft1.c1 IS 'foreign column'; +COMMENT ON COLUMN ft1.c1 IS NULL; +ALTER FOREIGN TABLE ft1 ADD COLUMN c4 integer; +ALTER FOREIGN TABLE ft1 ADD COLUMN c5 integer DEFAULT 0; +ALTER FOREIGN TABLE ft1 ADD COLUMN c6 integer; +ALTER FOREIGN TABLE ft1 ADD COLUMN c7 integer NOT NULL; +ALTER FOREIGN TABLE ft1 ADD COLUMN c8 integer; +ALTER FOREIGN TABLE ft1 ADD COLUMN c9 integer; +ALTER FOREIGN TABLE ft1 ADD COLUMN c10 integer OPTIONS (p1 'v1'); +ALTER FOREIGN TABLE ft1 ALTER COLUMN c4 SET DEFAULT 0; +ALTER FOREIGN TABLE ft1 ALTER COLUMN c5 DROP DEFAULT; +ALTER FOREIGN TABLE ft1 ALTER COLUMN c6 SET NOT NULL; +ALTER FOREIGN TABLE ft1 ALTER COLUMN c7 DROP NOT NULL; +ALTER FOREIGN TABLE ft1 ALTER COLUMN c8 TYPE char(10) USING '0'; -- ERROR +ERROR: "ft1" is not a table +ALTER FOREIGN TABLE ft1 ALTER COLUMN c8 TYPE char(10); +ALTER FOREIGN TABLE ft1 ALTER COLUMN c8 SET DATA TYPE text; +ALTER FOREIGN TABLE ft1 ALTER COLUMN xmin OPTIONS (ADD p1 'v1'); -- ERROR +ERROR: cannot alter system column "xmin" +ALTER FOREIGN TABLE ft1 ALTER COLUMN c7 OPTIONS (ADD p1 'v1', ADD p2 'v2'), + ALTER COLUMN c8 OPTIONS (ADD p1 'v1', ADD p2 'v2'); +ALTER FOREIGN TABLE ft1 ALTER COLUMN c8 OPTIONS (SET p2 'V2', DROP p1); +ALTER FOREIGN TABLE ft1 ALTER COLUMN c1 SET STATISTICS 10000; +ALTER FOREIGN TABLE ft1 ALTER COLUMN c1 SET (n_distinct = 100); +ALTER FOREIGN TABLE ft1 ALTER COLUMN c8 SET STATISTICS -1; +ALTER FOREIGN TABLE ft1 ALTER COLUMN c8 SET STORAGE PLAIN; +\d+ ft1 + Foreign table "public.ft1" + Column | Type | Collation | Nullable | Default | FDW options | Storage | Stats target | Description +--------+---------+-----------+----------+---------+--------------------------------+----------+--------------+------------- + c1 | integer | | not null | | ("param 1" 'val1') | plain | 10000 | + c2 | text | | | | (param2 'val2', param3 'val3') | extended | | + c3 | date | | | | | plain | | + c4 | integer | | | 0 | | plain | | + c5 | integer | | | | | plain | | + c6 | integer | | not null | | | plain | | + c7 | integer | | | | (p1 'v1', p2 'v2') | plain | | + c8 | text | | | | (p2 'V2') | plain | | + c9 | integer | | | | | plain | | + c10 | integer | | | | (p1 'v1') | plain | | +Check constraints: + "ft1_c2_check" CHECK (c2 <> ''::text) + "ft1_c3_check" CHECK (c3 >= '01-01-1994'::date AND c3 <= '01-31-1994'::date) +Server: s0 +FDW options: (delimiter ',', quote '"', "be quoted" 'value') + +-- can't change the column type if it's used elsewhere +CREATE TABLE use_ft1_column_type (x ft1); +ALTER FOREIGN TABLE ft1 ALTER COLUMN c8 SET DATA TYPE integer; -- ERROR +ERROR: cannot alter foreign table "ft1" because column "use_ft1_column_type.x" uses its row type +DROP TABLE use_ft1_column_type; +ALTER FOREIGN TABLE ft1 ADD PRIMARY KEY (c7); -- ERROR +ERROR: primary key constraints are not supported on foreign tables +LINE 1: ALTER FOREIGN TABLE ft1 ADD PRIMARY KEY (c7); + ^ +ALTER FOREIGN TABLE ft1 ADD CONSTRAINT ft1_c9_check CHECK (c9 < 0) NOT VALID; +ALTER FOREIGN TABLE ft1 ALTER CONSTRAINT ft1_c9_check DEFERRABLE; -- ERROR +ERROR: ALTER action ALTER CONSTRAINT cannot be performed on relation "ft1" +DETAIL: This operation is not supported for foreign tables. +ALTER FOREIGN TABLE ft1 DROP CONSTRAINT ft1_c9_check; +ALTER FOREIGN TABLE ft1 DROP CONSTRAINT no_const; -- ERROR +ERROR: constraint "no_const" of relation "ft1" does not exist +ALTER FOREIGN TABLE ft1 DROP CONSTRAINT IF EXISTS no_const; +NOTICE: constraint "no_const" of relation "ft1" does not exist, skipping +ALTER FOREIGN TABLE ft1 OWNER TO regress_test_role; +ALTER FOREIGN TABLE ft1 OPTIONS (DROP delimiter, SET quote '~', ADD escape '@'); +ALTER FOREIGN TABLE ft1 DROP COLUMN no_column; -- ERROR +ERROR: column "no_column" of relation "ft1" does not exist +ALTER FOREIGN TABLE ft1 DROP COLUMN IF EXISTS no_column; +NOTICE: column "no_column" of relation "ft1" does not exist, skipping +ALTER FOREIGN TABLE ft1 DROP COLUMN c9; +ALTER FOREIGN TABLE ft1 SET SCHEMA foreign_schema; +ALTER FOREIGN TABLE ft1 SET TABLESPACE ts; -- ERROR +ERROR: relation "ft1" does not exist +ALTER FOREIGN TABLE foreign_schema.ft1 RENAME c1 TO foreign_column_1; +ALTER FOREIGN TABLE foreign_schema.ft1 RENAME TO foreign_table_1; +\d foreign_schema.foreign_table_1 + Foreign table "foreign_schema.foreign_table_1" + Column | Type | Collation | Nullable | Default | FDW options +------------------+---------+-----------+----------+---------+-------------------------------- + foreign_column_1 | integer | | not null | | ("param 1" 'val1') + c2 | text | | | | (param2 'val2', param3 'val3') + c3 | date | | | | + c4 | integer | | | 0 | + c5 | integer | | | | + c6 | integer | | not null | | + c7 | integer | | | | (p1 'v1', p2 'v2') + c8 | text | | | | (p2 'V2') + c10 | integer | | | | (p1 'v1') +Check constraints: + "ft1_c2_check" CHECK (c2 <> ''::text) + "ft1_c3_check" CHECK (c3 >= '01-01-1994'::date AND c3 <= '01-31-1994'::date) +Server: s0 +FDW options: (quote '~', "be quoted" 'value', escape '@') + +-- alter noexisting table +ALTER FOREIGN TABLE IF EXISTS doesnt_exist_ft1 ADD COLUMN c4 integer; +NOTICE: relation "doesnt_exist_ft1" does not exist, skipping +ALTER FOREIGN TABLE IF EXISTS doesnt_exist_ft1 ADD COLUMN c6 integer; +NOTICE: relation "doesnt_exist_ft1" does not exist, skipping +ALTER FOREIGN TABLE IF EXISTS doesnt_exist_ft1 ADD COLUMN c7 integer NOT NULL; +NOTICE: relation "doesnt_exist_ft1" does not exist, skipping +ALTER FOREIGN TABLE IF EXISTS doesnt_exist_ft1 ADD COLUMN c8 integer; +NOTICE: relation "doesnt_exist_ft1" does not exist, skipping +ALTER FOREIGN TABLE IF EXISTS doesnt_exist_ft1 ADD COLUMN c9 integer; +NOTICE: relation "doesnt_exist_ft1" does not exist, skipping +ALTER FOREIGN TABLE IF EXISTS doesnt_exist_ft1 ADD COLUMN c10 integer OPTIONS (p1 'v1'); +NOTICE: relation "doesnt_exist_ft1" does not exist, skipping +ALTER FOREIGN TABLE IF EXISTS doesnt_exist_ft1 ALTER COLUMN c6 SET NOT NULL; +NOTICE: relation "doesnt_exist_ft1" does not exist, skipping +ALTER FOREIGN TABLE IF EXISTS doesnt_exist_ft1 ALTER COLUMN c7 DROP NOT NULL; +NOTICE: relation "doesnt_exist_ft1" does not exist, skipping +ALTER FOREIGN TABLE IF EXISTS doesnt_exist_ft1 ALTER COLUMN c8 TYPE char(10); +NOTICE: relation "doesnt_exist_ft1" does not exist, skipping +ALTER FOREIGN TABLE IF EXISTS doesnt_exist_ft1 ALTER COLUMN c8 SET DATA TYPE text; +NOTICE: relation "doesnt_exist_ft1" does not exist, skipping +ALTER FOREIGN TABLE IF EXISTS doesnt_exist_ft1 ALTER COLUMN c7 OPTIONS (ADD p1 'v1', ADD p2 'v2'), + ALTER COLUMN c8 OPTIONS (ADD p1 'v1', ADD p2 'v2'); +NOTICE: relation "doesnt_exist_ft1" does not exist, skipping +ALTER FOREIGN TABLE IF EXISTS doesnt_exist_ft1 ALTER COLUMN c8 OPTIONS (SET p2 'V2', DROP p1); +NOTICE: relation "doesnt_exist_ft1" does not exist, skipping +ALTER FOREIGN TABLE IF EXISTS doesnt_exist_ft1 DROP CONSTRAINT IF EXISTS no_const; +NOTICE: relation "doesnt_exist_ft1" does not exist, skipping +ALTER FOREIGN TABLE IF EXISTS doesnt_exist_ft1 DROP CONSTRAINT ft1_c1_check; +NOTICE: relation "doesnt_exist_ft1" does not exist, skipping +ALTER FOREIGN TABLE IF EXISTS doesnt_exist_ft1 OWNER TO regress_test_role; +NOTICE: relation "doesnt_exist_ft1" does not exist, skipping +ALTER FOREIGN TABLE IF EXISTS doesnt_exist_ft1 OPTIONS (DROP delimiter, SET quote '~', ADD escape '@'); +NOTICE: relation "doesnt_exist_ft1" does not exist, skipping +ALTER FOREIGN TABLE IF EXISTS doesnt_exist_ft1 DROP COLUMN IF EXISTS no_column; +NOTICE: relation "doesnt_exist_ft1" does not exist, skipping +ALTER FOREIGN TABLE IF EXISTS doesnt_exist_ft1 DROP COLUMN c9; +NOTICE: relation "doesnt_exist_ft1" does not exist, skipping +ALTER FOREIGN TABLE IF EXISTS doesnt_exist_ft1 SET SCHEMA foreign_schema; +NOTICE: relation "doesnt_exist_ft1" does not exist, skipping +ALTER FOREIGN TABLE IF EXISTS doesnt_exist_ft1 RENAME c1 TO foreign_column_1; +NOTICE: relation "doesnt_exist_ft1" does not exist, skipping +ALTER FOREIGN TABLE IF EXISTS doesnt_exist_ft1 RENAME TO foreign_table_1; +NOTICE: relation "doesnt_exist_ft1" does not exist, skipping +-- Information schema +SELECT * FROM information_schema.foreign_data_wrappers ORDER BY 1, 2; + foreign_data_wrapper_catalog | foreign_data_wrapper_name | authorization_identifier | library_name | foreign_data_wrapper_language +------------------------------+---------------------------+---------------------------+--------------+------------------------------- + regression | dummy | regress_foreign_data_user | | c + regression | foo | regress_foreign_data_user | | c + regression | postgresql | regress_foreign_data_user | | c +(3 rows) + +SELECT * FROM information_schema.foreign_data_wrapper_options ORDER BY 1, 2, 3; + foreign_data_wrapper_catalog | foreign_data_wrapper_name | option_name | option_value +------------------------------+---------------------------+--------------+-------------- + regression | foo | test wrapper | true +(1 row) + +SELECT * FROM information_schema.foreign_servers ORDER BY 1, 2; + foreign_server_catalog | foreign_server_name | foreign_data_wrapper_catalog | foreign_data_wrapper_name | foreign_server_type | foreign_server_version | authorization_identifier +------------------------+---------------------+------------------------------+---------------------------+---------------------+------------------------+--------------------------- + regression | s0 | regression | dummy | | | regress_foreign_data_user + regression | s4 | regression | foo | oracle | | regress_foreign_data_user + regression | s5 | regression | foo | | 15.0 | regress_test_role + regression | s6 | regression | foo | | 16.0 | regress_test_indirect + regression | s8 | regression | postgresql | | | regress_foreign_data_user + regression | t1 | regression | foo | | | regress_test_indirect + regression | t2 | regression | foo | | | regress_test_role +(7 rows) + +SELECT * FROM information_schema.foreign_server_options ORDER BY 1, 2, 3; + foreign_server_catalog | foreign_server_name | option_name | option_value +------------------------+---------------------+-----------------+-------------- + regression | s4 | dbname | b + regression | s4 | host | a + regression | s6 | dbname | b + regression | s6 | host | a + regression | s8 | connect_timeout | 30 + regression | s8 | dbname | db1 +(6 rows) + +SELECT * FROM information_schema.user_mappings ORDER BY lower(authorization_identifier), 2, 3; + authorization_identifier | foreign_server_catalog | foreign_server_name +---------------------------+------------------------+--------------------- + PUBLIC | regression | s4 + PUBLIC | regression | s8 + PUBLIC | regression | t1 + regress_foreign_data_user | regression | s4 + regress_foreign_data_user | regression | s8 + regress_test_role | regression | s5 + regress_test_role | regression | s6 + regress_test_role | regression | t1 +(8 rows) + +SELECT * FROM information_schema.user_mapping_options ORDER BY lower(authorization_identifier), 2, 3, 4; + authorization_identifier | foreign_server_catalog | foreign_server_name | option_name | option_value +---------------------------+------------------------+---------------------+--------------+-------------- + PUBLIC | regression | s4 | this mapping | is public + PUBLIC | regression | t1 | modified | 1 + regress_foreign_data_user | regression | s8 | password | public + regress_test_role | regression | s5 | modified | 1 + regress_test_role | regression | s6 | username | test + regress_test_role | regression | t1 | password | boo + regress_test_role | regression | t1 | username | bob +(7 rows) + +SELECT * FROM information_schema.usage_privileges WHERE object_type LIKE 'FOREIGN%' AND object_name IN ('s6', 'foo') ORDER BY 1, 2, 3, 4, 5; + grantor | grantee | object_catalog | object_schema | object_name | object_type | privilege_type | is_grantable +---------------------------+---------------------------+----------------+---------------+-------------+----------------------+----------------+-------------- + regress_foreign_data_user | regress_foreign_data_user | regression | | foo | FOREIGN DATA WRAPPER | USAGE | YES + regress_foreign_data_user | regress_test_indirect | regression | | foo | FOREIGN DATA WRAPPER | USAGE | NO + regress_test_indirect | regress_test_indirect | regression | | s6 | FOREIGN SERVER | USAGE | YES + regress_test_indirect | regress_test_role2 | regression | | s6 | FOREIGN SERVER | USAGE | YES +(4 rows) + +SELECT * FROM information_schema.role_usage_grants WHERE object_type LIKE 'FOREIGN%' AND object_name IN ('s6', 'foo') ORDER BY 1, 2, 3, 4, 5; + grantor | grantee | object_catalog | object_schema | object_name | object_type | privilege_type | is_grantable +---------------------------+---------------------------+----------------+---------------+-------------+----------------------+----------------+-------------- + regress_foreign_data_user | regress_foreign_data_user | regression | | foo | FOREIGN DATA WRAPPER | USAGE | YES + regress_foreign_data_user | regress_test_indirect | regression | | foo | FOREIGN DATA WRAPPER | USAGE | NO + regress_test_indirect | regress_test_indirect | regression | | s6 | FOREIGN SERVER | USAGE | YES + regress_test_indirect | regress_test_role2 | regression | | s6 | FOREIGN SERVER | USAGE | YES +(4 rows) + +SELECT * FROM information_schema.foreign_tables ORDER BY 1, 2, 3; + foreign_table_catalog | foreign_table_schema | foreign_table_name | foreign_server_catalog | foreign_server_name +-----------------------+----------------------+--------------------+------------------------+--------------------- + regression | foreign_schema | foreign_table_1 | regression | s0 +(1 row) + +SELECT * FROM information_schema.foreign_table_options ORDER BY 1, 2, 3, 4; + foreign_table_catalog | foreign_table_schema | foreign_table_name | option_name | option_value +-----------------------+----------------------+--------------------+-------------+-------------- + regression | foreign_schema | foreign_table_1 | be quoted | value + regression | foreign_schema | foreign_table_1 | escape | @ + regression | foreign_schema | foreign_table_1 | quote | ~ +(3 rows) + +SET ROLE regress_test_role; +SELECT * FROM information_schema.user_mapping_options ORDER BY 1, 2, 3, 4; + authorization_identifier | foreign_server_catalog | foreign_server_name | option_name | option_value +--------------------------+------------------------+---------------------+-------------+-------------- + PUBLIC | regression | t1 | modified | 1 + regress_test_role | regression | s5 | modified | 1 + regress_test_role | regression | s6 | username | test + regress_test_role | regression | t1 | password | boo + regress_test_role | regression | t1 | username | bob +(5 rows) + +SELECT * FROM information_schema.usage_privileges WHERE object_type LIKE 'FOREIGN%' AND object_name IN ('s6', 'foo') ORDER BY 1, 2, 3, 4, 5; + grantor | grantee | object_catalog | object_schema | object_name | object_type | privilege_type | is_grantable +---------------------------+-----------------------+----------------+---------------+-------------+----------------------+----------------+-------------- + regress_foreign_data_user | regress_test_indirect | regression | | foo | FOREIGN DATA WRAPPER | USAGE | NO + regress_test_indirect | regress_test_indirect | regression | | s6 | FOREIGN SERVER | USAGE | YES + regress_test_indirect | regress_test_role2 | regression | | s6 | FOREIGN SERVER | USAGE | YES +(3 rows) + +SELECT * FROM information_schema.role_usage_grants WHERE object_type LIKE 'FOREIGN%' AND object_name IN ('s6', 'foo') ORDER BY 1, 2, 3, 4, 5; + grantor | grantee | object_catalog | object_schema | object_name | object_type | privilege_type | is_grantable +---------------------------+-----------------------+----------------+---------------+-------------+----------------------+----------------+-------------- + regress_foreign_data_user | regress_test_indirect | regression | | foo | FOREIGN DATA WRAPPER | USAGE | NO + regress_test_indirect | regress_test_indirect | regression | | s6 | FOREIGN SERVER | USAGE | YES + regress_test_indirect | regress_test_role2 | regression | | s6 | FOREIGN SERVER | USAGE | YES +(3 rows) + +DROP USER MAPPING FOR current_user SERVER t1; +SET ROLE regress_test_role2; +SELECT * FROM information_schema.user_mapping_options ORDER BY 1, 2, 3, 4; + authorization_identifier | foreign_server_catalog | foreign_server_name | option_name | option_value +--------------------------+------------------------+---------------------+-------------+-------------- + regress_test_role | regression | s6 | username | +(1 row) + +RESET ROLE; +-- has_foreign_data_wrapper_privilege +SELECT has_foreign_data_wrapper_privilege('regress_test_role', + (SELECT oid FROM pg_foreign_data_wrapper WHERE fdwname='foo'), 'USAGE'); + has_foreign_data_wrapper_privilege +------------------------------------ + t +(1 row) + +SELECT has_foreign_data_wrapper_privilege('regress_test_role', 'foo', 'USAGE'); + has_foreign_data_wrapper_privilege +------------------------------------ + t +(1 row) + +SELECT has_foreign_data_wrapper_privilege( + (SELECT oid FROM pg_roles WHERE rolname='regress_test_role'), + (SELECT oid FROM pg_foreign_data_wrapper WHERE fdwname='foo'), 'USAGE'); + has_foreign_data_wrapper_privilege +------------------------------------ + t +(1 row) + +SELECT has_foreign_data_wrapper_privilege( + (SELECT oid FROM pg_foreign_data_wrapper WHERE fdwname='foo'), 'USAGE'); + has_foreign_data_wrapper_privilege +------------------------------------ + t +(1 row) + +SELECT has_foreign_data_wrapper_privilege( + (SELECT oid FROM pg_roles WHERE rolname='regress_test_role'), 'foo', 'USAGE'); + has_foreign_data_wrapper_privilege +------------------------------------ + t +(1 row) + +SELECT has_foreign_data_wrapper_privilege('foo', 'USAGE'); + has_foreign_data_wrapper_privilege +------------------------------------ + t +(1 row) + +GRANT USAGE ON FOREIGN DATA WRAPPER foo TO regress_test_role; +SELECT has_foreign_data_wrapper_privilege('regress_test_role', 'foo', 'USAGE'); + has_foreign_data_wrapper_privilege +------------------------------------ + t +(1 row) + +-- has_server_privilege +SELECT has_server_privilege('regress_test_role', + (SELECT oid FROM pg_foreign_server WHERE srvname='s8'), 'USAGE'); + has_server_privilege +---------------------- + f +(1 row) + +SELECT has_server_privilege('regress_test_role', 's8', 'USAGE'); + has_server_privilege +---------------------- + f +(1 row) + +SELECT has_server_privilege( + (SELECT oid FROM pg_roles WHERE rolname='regress_test_role'), + (SELECT oid FROM pg_foreign_server WHERE srvname='s8'), 'USAGE'); + has_server_privilege +---------------------- + f +(1 row) + +SELECT has_server_privilege( + (SELECT oid FROM pg_foreign_server WHERE srvname='s8'), 'USAGE'); + has_server_privilege +---------------------- + t +(1 row) + +SELECT has_server_privilege( + (SELECT oid FROM pg_roles WHERE rolname='regress_test_role'), 's8', 'USAGE'); + has_server_privilege +---------------------- + f +(1 row) + +SELECT has_server_privilege('s8', 'USAGE'); + has_server_privilege +---------------------- + t +(1 row) + +GRANT USAGE ON FOREIGN SERVER s8 TO regress_test_role; +SELECT has_server_privilege('regress_test_role', 's8', 'USAGE'); + has_server_privilege +---------------------- + t +(1 row) + +REVOKE USAGE ON FOREIGN SERVER s8 FROM regress_test_role; +GRANT USAGE ON FOREIGN SERVER s4 TO regress_test_role; +DROP USER MAPPING FOR public SERVER s4; +ALTER SERVER s6 OPTIONS (DROP host, DROP dbname); +ALTER USER MAPPING FOR regress_test_role SERVER s6 OPTIONS (DROP username); +ALTER FOREIGN DATA WRAPPER foo VALIDATOR postgresql_fdw_validator; +WARNING: changing the foreign-data wrapper validator can cause the options for dependent objects to become invalid +-- Privileges +SET ROLE regress_unprivileged_role; +CREATE FOREIGN DATA WRAPPER foobar; -- ERROR +ERROR: permission denied to create foreign-data wrapper "foobar" +HINT: Must be superuser to create a foreign-data wrapper. +ALTER FOREIGN DATA WRAPPER foo OPTIONS (gotcha 'true'); -- ERROR +ERROR: permission denied to alter foreign-data wrapper "foo" +HINT: Must be superuser to alter a foreign-data wrapper. +ALTER FOREIGN DATA WRAPPER foo OWNER TO regress_unprivileged_role; -- ERROR +ERROR: permission denied to change owner of foreign-data wrapper "foo" +HINT: Must be superuser to change owner of a foreign-data wrapper. +DROP FOREIGN DATA WRAPPER foo; -- ERROR +ERROR: must be owner of foreign-data wrapper foo +GRANT USAGE ON FOREIGN DATA WRAPPER foo TO regress_test_role; -- ERROR +ERROR: permission denied for foreign-data wrapper foo +CREATE SERVER s9 FOREIGN DATA WRAPPER foo; -- ERROR +ERROR: permission denied for foreign-data wrapper foo +ALTER SERVER s4 VERSION '0.5'; -- ERROR +ERROR: must be owner of foreign server s4 +ALTER SERVER s4 OWNER TO regress_unprivileged_role; -- ERROR +ERROR: must be owner of foreign server s4 +DROP SERVER s4; -- ERROR +ERROR: must be owner of foreign server s4 +GRANT USAGE ON FOREIGN SERVER s4 TO regress_test_role; -- ERROR +ERROR: permission denied for foreign server s4 +CREATE USER MAPPING FOR public SERVER s4; -- ERROR +ERROR: must be owner of foreign server s4 +ALTER USER MAPPING FOR regress_test_role SERVER s6 OPTIONS (gotcha 'true'); -- ERROR +ERROR: must be owner of foreign server s6 +DROP USER MAPPING FOR regress_test_role SERVER s6; -- ERROR +ERROR: must be owner of foreign server s6 +RESET ROLE; +GRANT USAGE ON FOREIGN DATA WRAPPER postgresql TO regress_unprivileged_role; +GRANT USAGE ON FOREIGN DATA WRAPPER foo TO regress_unprivileged_role WITH GRANT OPTION; +SET ROLE regress_unprivileged_role; +CREATE FOREIGN DATA WRAPPER foobar; -- ERROR +ERROR: permission denied to create foreign-data wrapper "foobar" +HINT: Must be superuser to create a foreign-data wrapper. +ALTER FOREIGN DATA WRAPPER foo OPTIONS (gotcha 'true'); -- ERROR +ERROR: permission denied to alter foreign-data wrapper "foo" +HINT: Must be superuser to alter a foreign-data wrapper. +DROP FOREIGN DATA WRAPPER foo; -- ERROR +ERROR: must be owner of foreign-data wrapper foo +GRANT USAGE ON FOREIGN DATA WRAPPER postgresql TO regress_test_role; -- WARNING +WARNING: no privileges were granted for "postgresql" +GRANT USAGE ON FOREIGN DATA WRAPPER foo TO regress_test_role; +CREATE SERVER s9 FOREIGN DATA WRAPPER postgresql; +ALTER SERVER s6 VERSION '0.5'; -- ERROR +ERROR: must be owner of foreign server s6 +DROP SERVER s6; -- ERROR +ERROR: must be owner of foreign server s6 +GRANT USAGE ON FOREIGN SERVER s6 TO regress_test_role; -- ERROR +ERROR: permission denied for foreign server s6 +GRANT USAGE ON FOREIGN SERVER s9 TO regress_test_role; +CREATE USER MAPPING FOR public SERVER s6; -- ERROR +ERROR: must be owner of foreign server s6 +CREATE USER MAPPING FOR public SERVER s9; +ALTER USER MAPPING FOR regress_test_role SERVER s6 OPTIONS (gotcha 'true'); -- ERROR +ERROR: must be owner of foreign server s6 +DROP USER MAPPING FOR regress_test_role SERVER s6; -- ERROR +ERROR: must be owner of foreign server s6 +RESET ROLE; +REVOKE USAGE ON FOREIGN DATA WRAPPER foo FROM regress_unprivileged_role; -- ERROR +ERROR: dependent privileges exist +HINT: Use CASCADE to revoke them too. +REVOKE USAGE ON FOREIGN DATA WRAPPER foo FROM regress_unprivileged_role CASCADE; +SET ROLE regress_unprivileged_role; +GRANT USAGE ON FOREIGN DATA WRAPPER foo TO regress_test_role; -- ERROR +ERROR: permission denied for foreign-data wrapper foo +CREATE SERVER s10 FOREIGN DATA WRAPPER foo; -- ERROR +ERROR: permission denied for foreign-data wrapper foo +ALTER SERVER s9 VERSION '1.1'; +GRANT USAGE ON FOREIGN SERVER s9 TO regress_test_role; +CREATE USER MAPPING FOR current_user SERVER s9; +DROP SERVER s9 CASCADE; +NOTICE: drop cascades to 2 other objects +DETAIL: drop cascades to user mapping for public on server s9 +drop cascades to user mapping for regress_unprivileged_role on server s9 +RESET ROLE; +CREATE SERVER s9 FOREIGN DATA WRAPPER foo; +GRANT USAGE ON FOREIGN SERVER s9 TO regress_unprivileged_role; +SET ROLE regress_unprivileged_role; +ALTER SERVER s9 VERSION '1.2'; -- ERROR +ERROR: must be owner of foreign server s9 +GRANT USAGE ON FOREIGN SERVER s9 TO regress_test_role; -- WARNING +WARNING: no privileges were granted for "s9" +CREATE USER MAPPING FOR current_user SERVER s9; +DROP SERVER s9 CASCADE; -- ERROR +ERROR: must be owner of foreign server s9 +-- Check visibility of user mapping data +SET ROLE regress_test_role; +CREATE SERVER s10 FOREIGN DATA WRAPPER foo; +CREATE USER MAPPING FOR public SERVER s10 OPTIONS (user 'secret'); +CREATE USER MAPPING FOR regress_unprivileged_role SERVER s10 OPTIONS (user 'secret'); +-- owner of server can see some option fields +\deu+ + List of user mappings + Server | User name | FDW options +--------+---------------------------+------------------- + s10 | public | ("user" 'secret') + s10 | regress_unprivileged_role | + s4 | regress_foreign_data_user | + s5 | regress_test_role | (modified '1') + s6 | regress_test_role | + s8 | public | + s8 | regress_foreign_data_user | + s9 | regress_unprivileged_role | + t1 | public | (modified '1') +(9 rows) + +RESET ROLE; +-- superuser can see all option fields +\deu+ + List of user mappings + Server | User name | FDW options +--------+---------------------------+--------------------- + s10 | public | ("user" 'secret') + s10 | regress_unprivileged_role | ("user" 'secret') + s4 | regress_foreign_data_user | + s5 | regress_test_role | (modified '1') + s6 | regress_test_role | + s8 | public | + s8 | regress_foreign_data_user | (password 'public') + s9 | regress_unprivileged_role | + t1 | public | (modified '1') +(9 rows) + +-- unprivileged user cannot see any option field +SET ROLE regress_unprivileged_role; +\deu+ + List of user mappings + Server | User name | FDW options +--------+---------------------------+------------- + s10 | public | + s10 | regress_unprivileged_role | + s4 | regress_foreign_data_user | + s5 | regress_test_role | + s6 | regress_test_role | + s8 | public | + s8 | regress_foreign_data_user | + s9 | regress_unprivileged_role | + t1 | public | +(9 rows) + +RESET ROLE; +DROP SERVER s10 CASCADE; +NOTICE: drop cascades to 2 other objects +DETAIL: drop cascades to user mapping for public on server s10 +drop cascades to user mapping for regress_unprivileged_role on server s10 +-- Triggers +CREATE FUNCTION dummy_trigger() RETURNS TRIGGER AS $$ + BEGIN + RETURN NULL; + END +$$ language plpgsql; +CREATE TRIGGER trigtest_before_stmt BEFORE INSERT OR UPDATE OR DELETE +ON foreign_schema.foreign_table_1 +FOR EACH STATEMENT +EXECUTE PROCEDURE dummy_trigger(); +CREATE TRIGGER trigtest_after_stmt AFTER INSERT OR UPDATE OR DELETE +ON foreign_schema.foreign_table_1 +FOR EACH STATEMENT +EXECUTE PROCEDURE dummy_trigger(); +CREATE TRIGGER trigtest_after_stmt_tt AFTER INSERT OR UPDATE OR DELETE -- ERROR +ON foreign_schema.foreign_table_1 +REFERENCING NEW TABLE AS new_table +FOR EACH STATEMENT +EXECUTE PROCEDURE dummy_trigger(); +ERROR: "foreign_table_1" is a foreign table +DETAIL: Triggers on foreign tables cannot have transition tables. +CREATE TRIGGER trigtest_before_row BEFORE INSERT OR UPDATE OR DELETE +ON foreign_schema.foreign_table_1 +FOR EACH ROW +EXECUTE PROCEDURE dummy_trigger(); +CREATE TRIGGER trigtest_after_row AFTER INSERT OR UPDATE OR DELETE +ON foreign_schema.foreign_table_1 +FOR EACH ROW +EXECUTE PROCEDURE dummy_trigger(); +CREATE CONSTRAINT TRIGGER trigtest_constraint AFTER INSERT OR UPDATE OR DELETE +ON foreign_schema.foreign_table_1 +FOR EACH ROW +EXECUTE PROCEDURE dummy_trigger(); +ERROR: "foreign_table_1" is a foreign table +DETAIL: Foreign tables cannot have constraint triggers. +ALTER FOREIGN TABLE foreign_schema.foreign_table_1 + DISABLE TRIGGER trigtest_before_stmt; +ALTER FOREIGN TABLE foreign_schema.foreign_table_1 + ENABLE TRIGGER trigtest_before_stmt; +DROP TRIGGER trigtest_before_stmt ON foreign_schema.foreign_table_1; +DROP TRIGGER trigtest_before_row ON foreign_schema.foreign_table_1; +DROP TRIGGER trigtest_after_stmt ON foreign_schema.foreign_table_1; +DROP TRIGGER trigtest_after_row ON foreign_schema.foreign_table_1; +DROP FUNCTION dummy_trigger(); +-- Table inheritance +CREATE TABLE fd_pt1 ( + c1 integer NOT NULL, + c2 text, + c3 date +); +CREATE FOREIGN TABLE ft2 () INHERITS (fd_pt1) + SERVER s0 OPTIONS (delimiter ',', quote '"', "be quoted" 'value'); +\d+ fd_pt1 + Table "public.fd_pt1" + Column | Type | Collation | Nullable | Default | Storage | Stats target | Description +--------+---------+-----------+----------+---------+----------+--------------+------------- + c1 | integer | | not null | | plain | | + c2 | text | | | | extended | | + c3 | date | | | | plain | | +Child tables: ft2, FOREIGN + +\d+ ft2 + Foreign table "public.ft2" + Column | Type | Collation | Nullable | Default | FDW options | Storage | Stats target | Description +--------+---------+-----------+----------+---------+-------------+----------+--------------+------------- + c1 | integer | | not null | | | plain | | + c2 | text | | | | | extended | | + c3 | date | | | | | plain | | +Server: s0 +FDW options: (delimiter ',', quote '"', "be quoted" 'value') +Inherits: fd_pt1 + +DROP FOREIGN TABLE ft2; +\d+ fd_pt1 + Table "public.fd_pt1" + Column | Type | Collation | Nullable | Default | Storage | Stats target | Description +--------+---------+-----------+----------+---------+----------+--------------+------------- + c1 | integer | | not null | | plain | | + c2 | text | | | | extended | | + c3 | date | | | | plain | | + +CREATE FOREIGN TABLE ft2 ( + c1 integer NOT NULL, + c2 text, + c3 date +) SERVER s0 OPTIONS (delimiter ',', quote '"', "be quoted" 'value'); +\d+ ft2 + Foreign table "public.ft2" + Column | Type | Collation | Nullable | Default | FDW options | Storage | Stats target | Description +--------+---------+-----------+----------+---------+-------------+----------+--------------+------------- + c1 | integer | | not null | | | plain | | + c2 | text | | | | | extended | | + c3 | date | | | | | plain | | +Server: s0 +FDW options: (delimiter ',', quote '"', "be quoted" 'value') + +ALTER FOREIGN TABLE ft2 INHERIT fd_pt1; +\d+ fd_pt1 + Table "public.fd_pt1" + Column | Type | Collation | Nullable | Default | Storage | Stats target | Description +--------+---------+-----------+----------+---------+----------+--------------+------------- + c1 | integer | | not null | | plain | | + c2 | text | | | | extended | | + c3 | date | | | | plain | | +Child tables: ft2, FOREIGN + +\d+ ft2 + Foreign table "public.ft2" + Column | Type | Collation | Nullable | Default | FDW options | Storage | Stats target | Description +--------+---------+-----------+----------+---------+-------------+----------+--------------+------------- + c1 | integer | | not null | | | plain | | + c2 | text | | | | | extended | | + c3 | date | | | | | plain | | +Server: s0 +FDW options: (delimiter ',', quote '"', "be quoted" 'value') +Inherits: fd_pt1 + +CREATE TABLE ct3() INHERITS(ft2); +CREATE FOREIGN TABLE ft3 ( + c1 integer NOT NULL, + c2 text, + c3 date +) INHERITS(ft2) + SERVER s0; +NOTICE: merging column "c1" with inherited definition +NOTICE: merging column "c2" with inherited definition +NOTICE: merging column "c3" with inherited definition +\d+ ft2 + Foreign table "public.ft2" + Column | Type | Collation | Nullable | Default | FDW options | Storage | Stats target | Description +--------+---------+-----------+----------+---------+-------------+----------+--------------+------------- + c1 | integer | | not null | | | plain | | + c2 | text | | | | | extended | | + c3 | date | | | | | plain | | +Server: s0 +FDW options: (delimiter ',', quote '"', "be quoted" 'value') +Inherits: fd_pt1 +Child tables: ct3, + ft3, FOREIGN + +\d+ ct3 + Table "public.ct3" + Column | Type | Collation | Nullable | Default | Storage | Stats target | Description +--------+---------+-----------+----------+---------+----------+--------------+------------- + c1 | integer | | not null | | plain | | + c2 | text | | | | extended | | + c3 | date | | | | plain | | +Inherits: ft2 + +\d+ ft3 + Foreign table "public.ft3" + Column | Type | Collation | Nullable | Default | FDW options | Storage | Stats target | Description +--------+---------+-----------+----------+---------+-------------+----------+--------------+------------- + c1 | integer | | not null | | | plain | | + c2 | text | | | | | extended | | + c3 | date | | | | | plain | | +Server: s0 +Inherits: ft2 + +-- add attributes recursively +ALTER TABLE fd_pt1 ADD COLUMN c4 integer; +ALTER TABLE fd_pt1 ADD COLUMN c5 integer DEFAULT 0; +ALTER TABLE fd_pt1 ADD COLUMN c6 integer; +ALTER TABLE fd_pt1 ADD COLUMN c7 integer NOT NULL; +ALTER TABLE fd_pt1 ADD COLUMN c8 integer; +\d+ fd_pt1 + Table "public.fd_pt1" + Column | Type | Collation | Nullable | Default | Storage | Stats target | Description +--------+---------+-----------+----------+---------+----------+--------------+------------- + c1 | integer | | not null | | plain | | + c2 | text | | | | extended | | + c3 | date | | | | plain | | + c4 | integer | | | | plain | | + c5 | integer | | | 0 | plain | | + c6 | integer | | | | plain | | + c7 | integer | | not null | | plain | | + c8 | integer | | | | plain | | +Child tables: ft2, FOREIGN + +\d+ ft2 + Foreign table "public.ft2" + Column | Type | Collation | Nullable | Default | FDW options | Storage | Stats target | Description +--------+---------+-----------+----------+---------+-------------+----------+--------------+------------- + c1 | integer | | not null | | | plain | | + c2 | text | | | | | extended | | + c3 | date | | | | | plain | | + c4 | integer | | | | | plain | | + c5 | integer | | | 0 | | plain | | + c6 | integer | | | | | plain | | + c7 | integer | | not null | | | plain | | + c8 | integer | | | | | plain | | +Server: s0 +FDW options: (delimiter ',', quote '"', "be quoted" 'value') +Inherits: fd_pt1 +Child tables: ct3, + ft3, FOREIGN + +\d+ ct3 + Table "public.ct3" + Column | Type | Collation | Nullable | Default | Storage | Stats target | Description +--------+---------+-----------+----------+---------+----------+--------------+------------- + c1 | integer | | not null | | plain | | + c2 | text | | | | extended | | + c3 | date | | | | plain | | + c4 | integer | | | | plain | | + c5 | integer | | | 0 | plain | | + c6 | integer | | | | plain | | + c7 | integer | | not null | | plain | | + c8 | integer | | | | plain | | +Inherits: ft2 + +\d+ ft3 + Foreign table "public.ft3" + Column | Type | Collation | Nullable | Default | FDW options | Storage | Stats target | Description +--------+---------+-----------+----------+---------+-------------+----------+--------------+------------- + c1 | integer | | not null | | | plain | | + c2 | text | | | | | extended | | + c3 | date | | | | | plain | | + c4 | integer | | | | | plain | | + c5 | integer | | | 0 | | plain | | + c6 | integer | | | | | plain | | + c7 | integer | | not null | | | plain | | + c8 | integer | | | | | plain | | +Server: s0 +Inherits: ft2 + +-- alter attributes recursively +ALTER TABLE fd_pt1 ALTER COLUMN c4 SET DEFAULT 0; +ALTER TABLE fd_pt1 ALTER COLUMN c5 DROP DEFAULT; +ALTER TABLE fd_pt1 ALTER COLUMN c6 SET NOT NULL; +ALTER TABLE fd_pt1 ALTER COLUMN c7 DROP NOT NULL; +ALTER TABLE fd_pt1 ALTER COLUMN c8 TYPE char(10) USING '0'; -- ERROR +ERROR: "ft2" is not a table +ALTER TABLE fd_pt1 ALTER COLUMN c8 TYPE char(10); +ALTER TABLE fd_pt1 ALTER COLUMN c8 SET DATA TYPE text; +ALTER TABLE fd_pt1 ALTER COLUMN c1 SET STATISTICS 10000; +ALTER TABLE fd_pt1 ALTER COLUMN c1 SET (n_distinct = 100); +ALTER TABLE fd_pt1 ALTER COLUMN c8 SET STATISTICS -1; +ALTER TABLE fd_pt1 ALTER COLUMN c8 SET STORAGE EXTERNAL; +\d+ fd_pt1 + Table "public.fd_pt1" + Column | Type | Collation | Nullable | Default | Storage | Stats target | Description +--------+---------+-----------+----------+---------+----------+--------------+------------- + c1 | integer | | not null | | plain | 10000 | + c2 | text | | | | extended | | + c3 | date | | | | plain | | + c4 | integer | | | 0 | plain | | + c5 | integer | | | | plain | | + c6 | integer | | not null | | plain | | + c7 | integer | | | | plain | | + c8 | text | | | | external | | +Child tables: ft2, FOREIGN + +\d+ ft2 + Foreign table "public.ft2" + Column | Type | Collation | Nullable | Default | FDW options | Storage | Stats target | Description +--------+---------+-----------+----------+---------+-------------+----------+--------------+------------- + c1 | integer | | not null | | | plain | 10000 | + c2 | text | | | | | extended | | + c3 | date | | | | | plain | | + c4 | integer | | | 0 | | plain | | + c5 | integer | | | | | plain | | + c6 | integer | | not null | | | plain | | + c7 | integer | | | | | plain | | + c8 | text | | | | | external | | +Server: s0 +FDW options: (delimiter ',', quote '"', "be quoted" 'value') +Inherits: fd_pt1 +Child tables: ct3, + ft3, FOREIGN + +-- drop attributes recursively +ALTER TABLE fd_pt1 DROP COLUMN c4; +ALTER TABLE fd_pt1 DROP COLUMN c5; +ALTER TABLE fd_pt1 DROP COLUMN c6; +ALTER TABLE fd_pt1 DROP COLUMN c7; +ALTER TABLE fd_pt1 DROP COLUMN c8; +\d+ fd_pt1 + Table "public.fd_pt1" + Column | Type | Collation | Nullable | Default | Storage | Stats target | Description +--------+---------+-----------+----------+---------+----------+--------------+------------- + c1 | integer | | not null | | plain | 10000 | + c2 | text | | | | extended | | + c3 | date | | | | plain | | +Child tables: ft2, FOREIGN + +\d+ ft2 + Foreign table "public.ft2" + Column | Type | Collation | Nullable | Default | FDW options | Storage | Stats target | Description +--------+---------+-----------+----------+---------+-------------+----------+--------------+------------- + c1 | integer | | not null | | | plain | 10000 | + c2 | text | | | | | extended | | + c3 | date | | | | | plain | | +Server: s0 +FDW options: (delimiter ',', quote '"', "be quoted" 'value') +Inherits: fd_pt1 +Child tables: ct3, + ft3, FOREIGN + +-- add constraints recursively +ALTER TABLE fd_pt1 ADD CONSTRAINT fd_pt1chk1 CHECK (c1 > 0) NO INHERIT; +ALTER TABLE fd_pt1 ADD CONSTRAINT fd_pt1chk2 CHECK (c2 <> ''); +-- connoinherit should be true for NO INHERIT constraint +SELECT relname, conname, contype, conislocal, coninhcount, connoinherit + FROM pg_class AS pc JOIN pg_constraint AS pgc ON (conrelid = pc.oid) + WHERE pc.relname = 'fd_pt1' + ORDER BY 1,2; + relname | conname | contype | conislocal | coninhcount | connoinherit +---------+------------+---------+------------+-------------+-------------- + fd_pt1 | fd_pt1chk1 | c | t | 0 | t + fd_pt1 | fd_pt1chk2 | c | t | 0 | f +(2 rows) + +-- child does not inherit NO INHERIT constraints +\d+ fd_pt1 + Table "public.fd_pt1" + Column | Type | Collation | Nullable | Default | Storage | Stats target | Description +--------+---------+-----------+----------+---------+----------+--------------+------------- + c1 | integer | | not null | | plain | 10000 | + c2 | text | | | | extended | | + c3 | date | | | | plain | | +Check constraints: + "fd_pt1chk1" CHECK (c1 > 0) NO INHERIT + "fd_pt1chk2" CHECK (c2 <> ''::text) +Child tables: ft2, FOREIGN + +\d+ ft2 + Foreign table "public.ft2" + Column | Type | Collation | Nullable | Default | FDW options | Storage | Stats target | Description +--------+---------+-----------+----------+---------+-------------+----------+--------------+------------- + c1 | integer | | not null | | | plain | 10000 | + c2 | text | | | | | extended | | + c3 | date | | | | | plain | | +Check constraints: + "fd_pt1chk2" CHECK (c2 <> ''::text) +Server: s0 +FDW options: (delimiter ',', quote '"', "be quoted" 'value') +Inherits: fd_pt1 +Child tables: ct3, + ft3, FOREIGN + +DROP FOREIGN TABLE ft2; -- ERROR +ERROR: cannot drop foreign table ft2 because other objects depend on it +DETAIL: table ct3 depends on foreign table ft2 +foreign table ft3 depends on foreign table ft2 +HINT: Use DROP ... CASCADE to drop the dependent objects too. +DROP FOREIGN TABLE ft2 CASCADE; +NOTICE: drop cascades to 2 other objects +DETAIL: drop cascades to table ct3 +drop cascades to foreign table ft3 +CREATE FOREIGN TABLE ft2 ( + c1 integer NOT NULL, + c2 text, + c3 date +) SERVER s0 OPTIONS (delimiter ',', quote '"', "be quoted" 'value'); +-- child must have parent's INHERIT constraints +ALTER FOREIGN TABLE ft2 INHERIT fd_pt1; -- ERROR +ERROR: child table is missing constraint "fd_pt1chk2" +ALTER FOREIGN TABLE ft2 ADD CONSTRAINT fd_pt1chk2 CHECK (c2 <> ''); +ALTER FOREIGN TABLE ft2 INHERIT fd_pt1; +-- child does not inherit NO INHERIT constraints +\d+ fd_pt1 + Table "public.fd_pt1" + Column | Type | Collation | Nullable | Default | Storage | Stats target | Description +--------+---------+-----------+----------+---------+----------+--------------+------------- + c1 | integer | | not null | | plain | 10000 | + c2 | text | | | | extended | | + c3 | date | | | | plain | | +Check constraints: + "fd_pt1chk1" CHECK (c1 > 0) NO INHERIT + "fd_pt1chk2" CHECK (c2 <> ''::text) +Child tables: ft2, FOREIGN + +\d+ ft2 + Foreign table "public.ft2" + Column | Type | Collation | Nullable | Default | FDW options | Storage | Stats target | Description +--------+---------+-----------+----------+---------+-------------+----------+--------------+------------- + c1 | integer | | not null | | | plain | | + c2 | text | | | | | extended | | + c3 | date | | | | | plain | | +Check constraints: + "fd_pt1chk2" CHECK (c2 <> ''::text) +Server: s0 +FDW options: (delimiter ',', quote '"', "be quoted" 'value') +Inherits: fd_pt1 + +-- drop constraints recursively +ALTER TABLE fd_pt1 DROP CONSTRAINT fd_pt1chk1 CASCADE; +ALTER TABLE fd_pt1 DROP CONSTRAINT fd_pt1chk2 CASCADE; +-- NOT VALID case +INSERT INTO fd_pt1 VALUES (1, 'fd_pt1'::text, '1994-01-01'::date); +ALTER TABLE fd_pt1 ADD CONSTRAINT fd_pt1chk3 CHECK (c2 <> '') NOT VALID; +\d+ fd_pt1 + Table "public.fd_pt1" + Column | Type | Collation | Nullable | Default | Storage | Stats target | Description +--------+---------+-----------+----------+---------+----------+--------------+------------- + c1 | integer | | not null | | plain | 10000 | + c2 | text | | | | extended | | + c3 | date | | | | plain | | +Check constraints: + "fd_pt1chk3" CHECK (c2 <> ''::text) NOT VALID +Child tables: ft2, FOREIGN + +\d+ ft2 + Foreign table "public.ft2" + Column | Type | Collation | Nullable | Default | FDW options | Storage | Stats target | Description +--------+---------+-----------+----------+---------+-------------+----------+--------------+------------- + c1 | integer | | not null | | | plain | | + c2 | text | | | | | extended | | + c3 | date | | | | | plain | | +Check constraints: + "fd_pt1chk2" CHECK (c2 <> ''::text) + "fd_pt1chk3" CHECK (c2 <> ''::text) NOT VALID +Server: s0 +FDW options: (delimiter ',', quote '"', "be quoted" 'value') +Inherits: fd_pt1 + +-- VALIDATE CONSTRAINT need do nothing on foreign tables +ALTER TABLE fd_pt1 VALIDATE CONSTRAINT fd_pt1chk3; +\d+ fd_pt1 + Table "public.fd_pt1" + Column | Type | Collation | Nullable | Default | Storage | Stats target | Description +--------+---------+-----------+----------+---------+----------+--------------+------------- + c1 | integer | | not null | | plain | 10000 | + c2 | text | | | | extended | | + c3 | date | | | | plain | | +Check constraints: + "fd_pt1chk3" CHECK (c2 <> ''::text) +Child tables: ft2, FOREIGN + +\d+ ft2 + Foreign table "public.ft2" + Column | Type | Collation | Nullable | Default | FDW options | Storage | Stats target | Description +--------+---------+-----------+----------+---------+-------------+----------+--------------+------------- + c1 | integer | | not null | | | plain | | + c2 | text | | | | | extended | | + c3 | date | | | | | plain | | +Check constraints: + "fd_pt1chk2" CHECK (c2 <> ''::text) + "fd_pt1chk3" CHECK (c2 <> ''::text) +Server: s0 +FDW options: (delimiter ',', quote '"', "be quoted" 'value') +Inherits: fd_pt1 + +-- changes name of an attribute recursively +ALTER TABLE fd_pt1 RENAME COLUMN c1 TO f1; +ALTER TABLE fd_pt1 RENAME COLUMN c2 TO f2; +ALTER TABLE fd_pt1 RENAME COLUMN c3 TO f3; +-- changes name of a constraint recursively +ALTER TABLE fd_pt1 RENAME CONSTRAINT fd_pt1chk3 TO f2_check; +\d+ fd_pt1 + Table "public.fd_pt1" + Column | Type | Collation | Nullable | Default | Storage | Stats target | Description +--------+---------+-----------+----------+---------+----------+--------------+------------- + f1 | integer | | not null | | plain | 10000 | + f2 | text | | | | extended | | + f3 | date | | | | plain | | +Check constraints: + "f2_check" CHECK (f2 <> ''::text) +Child tables: ft2, FOREIGN + +\d+ ft2 + Foreign table "public.ft2" + Column | Type | Collation | Nullable | Default | FDW options | Storage | Stats target | Description +--------+---------+-----------+----------+---------+-------------+----------+--------------+------------- + f1 | integer | | not null | | | plain | | + f2 | text | | | | | extended | | + f3 | date | | | | | plain | | +Check constraints: + "f2_check" CHECK (f2 <> ''::text) + "fd_pt1chk2" CHECK (f2 <> ''::text) +Server: s0 +FDW options: (delimiter ',', quote '"', "be quoted" 'value') +Inherits: fd_pt1 + +DROP TABLE fd_pt1 CASCADE; +NOTICE: drop cascades to foreign table ft2 +-- IMPORT FOREIGN SCHEMA +IMPORT FOREIGN SCHEMA s1 FROM SERVER s9 INTO public; -- ERROR +ERROR: foreign-data wrapper "foo" has no handler +IMPORT FOREIGN SCHEMA s1 LIMIT TO (t1) FROM SERVER s9 INTO public; --ERROR +ERROR: foreign-data wrapper "foo" has no handler +IMPORT FOREIGN SCHEMA s1 EXCEPT (t1) FROM SERVER s9 INTO public; -- ERROR +ERROR: foreign-data wrapper "foo" has no handler +IMPORT FOREIGN SCHEMA s1 EXCEPT (t1, t2) FROM SERVER s9 INTO public +OPTIONS (option1 'value1', option2 'value2'); -- ERROR +ERROR: foreign-data wrapper "foo" has no handler +-- DROP FOREIGN TABLE +DROP FOREIGN TABLE no_table; -- ERROR +ERROR: foreign table "no_table" does not exist +DROP FOREIGN TABLE IF EXISTS no_table; +NOTICE: foreign table "no_table" does not exist, skipping +DROP FOREIGN TABLE foreign_schema.foreign_table_1; +-- REASSIGN OWNED/DROP OWNED of foreign objects +REASSIGN OWNED BY regress_test_role TO regress_test_role2; +DROP OWNED BY regress_test_role2; +ERROR: cannot drop desired object(s) because other objects depend on them +DETAIL: user mapping for regress_test_role on server s5 depends on server s5 +HINT: Use DROP ... CASCADE to drop the dependent objects too. +DROP OWNED BY regress_test_role2 CASCADE; +NOTICE: drop cascades to user mapping for regress_test_role on server s5 +-- Foreign partition DDL stuff +CREATE TABLE fd_pt2 ( + c1 integer NOT NULL, + c2 text, + c3 date +) PARTITION BY LIST (c1); +CREATE FOREIGN TABLE fd_pt2_1 PARTITION OF fd_pt2 FOR VALUES IN (1) + SERVER s0 OPTIONS (delimiter ',', quote '"', "be quoted" 'value'); +\d+ fd_pt2 + Partitioned table "public.fd_pt2" + Column | Type | Collation | Nullable | Default | Storage | Stats target | Description +--------+---------+-----------+----------+---------+----------+--------------+------------- + c1 | integer | | not null | | plain | | + c2 | text | | | | extended | | + c3 | date | | | | plain | | +Partition key: LIST (c1) +Partitions: fd_pt2_1 FOR VALUES IN (1), FOREIGN + +\d+ fd_pt2_1 + Foreign table "public.fd_pt2_1" + Column | Type | Collation | Nullable | Default | FDW options | Storage | Stats target | Description +--------+---------+-----------+----------+---------+-------------+----------+--------------+------------- + c1 | integer | | not null | | | plain | | + c2 | text | | | | | extended | | + c3 | date | | | | | plain | | +Partition of: fd_pt2 FOR VALUES IN (1) +Partition constraint: ((c1 IS NOT NULL) AND (c1 = 1)) +Server: s0 +FDW options: (delimiter ',', quote '"', "be quoted" 'value') + +-- partition cannot have additional columns +DROP FOREIGN TABLE fd_pt2_1; +CREATE FOREIGN TABLE fd_pt2_1 ( + c1 integer NOT NULL, + c2 text, + c3 date, + c4 char +) SERVER s0 OPTIONS (delimiter ',', quote '"', "be quoted" 'value'); +\d+ fd_pt2_1 + Foreign table "public.fd_pt2_1" + Column | Type | Collation | Nullable | Default | FDW options | Storage | Stats target | Description +--------+--------------+-----------+----------+---------+-------------+----------+--------------+------------- + c1 | integer | | not null | | | plain | | + c2 | text | | | | | extended | | + c3 | date | | | | | plain | | + c4 | character(1) | | | | | extended | | +Server: s0 +FDW options: (delimiter ',', quote '"', "be quoted" 'value') + +ALTER TABLE fd_pt2 ATTACH PARTITION fd_pt2_1 FOR VALUES IN (1); -- ERROR +ERROR: table "fd_pt2_1" contains column "c4" not found in parent "fd_pt2" +DETAIL: The new partition may contain only the columns present in parent. +DROP FOREIGN TABLE fd_pt2_1; +\d+ fd_pt2 + Partitioned table "public.fd_pt2" + Column | Type | Collation | Nullable | Default | Storage | Stats target | Description +--------+---------+-----------+----------+---------+----------+--------------+------------- + c1 | integer | | not null | | plain | | + c2 | text | | | | extended | | + c3 | date | | | | plain | | +Partition key: LIST (c1) +Number of partitions: 0 + +CREATE FOREIGN TABLE fd_pt2_1 ( + c1 integer NOT NULL, + c2 text, + c3 date +) SERVER s0 OPTIONS (delimiter ',', quote '"', "be quoted" 'value'); +\d+ fd_pt2_1 + Foreign table "public.fd_pt2_1" + Column | Type | Collation | Nullable | Default | FDW options | Storage | Stats target | Description +--------+---------+-----------+----------+---------+-------------+----------+--------------+------------- + c1 | integer | | not null | | | plain | | + c2 | text | | | | | extended | | + c3 | date | | | | | plain | | +Server: s0 +FDW options: (delimiter ',', quote '"', "be quoted" 'value') + +-- no attach partition validation occurs for foreign tables +ALTER TABLE fd_pt2 ATTACH PARTITION fd_pt2_1 FOR VALUES IN (1); +\d+ fd_pt2 + Partitioned table "public.fd_pt2" + Column | Type | Collation | Nullable | Default | Storage | Stats target | Description +--------+---------+-----------+----------+---------+----------+--------------+------------- + c1 | integer | | not null | | plain | | + c2 | text | | | | extended | | + c3 | date | | | | plain | | +Partition key: LIST (c1) +Partitions: fd_pt2_1 FOR VALUES IN (1), FOREIGN + +\d+ fd_pt2_1 + Foreign table "public.fd_pt2_1" + Column | Type | Collation | Nullable | Default | FDW options | Storage | Stats target | Description +--------+---------+-----------+----------+---------+-------------+----------+--------------+------------- + c1 | integer | | not null | | | plain | | + c2 | text | | | | | extended | | + c3 | date | | | | | plain | | +Partition of: fd_pt2 FOR VALUES IN (1) +Partition constraint: ((c1 IS NOT NULL) AND (c1 = 1)) +Server: s0 +FDW options: (delimiter ',', quote '"', "be quoted" 'value') + +-- cannot add column to a partition +ALTER TABLE fd_pt2_1 ADD c4 char; +ERROR: cannot add column to a partition +-- ok to have a partition's own constraints though +ALTER TABLE fd_pt2_1 ALTER c3 SET NOT NULL; +ALTER TABLE fd_pt2_1 ADD CONSTRAINT p21chk CHECK (c2 <> ''); +\d+ fd_pt2 + Partitioned table "public.fd_pt2" + Column | Type | Collation | Nullable | Default | Storage | Stats target | Description +--------+---------+-----------+----------+---------+----------+--------------+------------- + c1 | integer | | not null | | plain | | + c2 | text | | | | extended | | + c3 | date | | | | plain | | +Partition key: LIST (c1) +Partitions: fd_pt2_1 FOR VALUES IN (1), FOREIGN + +\d+ fd_pt2_1 + Foreign table "public.fd_pt2_1" + Column | Type | Collation | Nullable | Default | FDW options | Storage | Stats target | Description +--------+---------+-----------+----------+---------+-------------+----------+--------------+------------- + c1 | integer | | not null | | | plain | | + c2 | text | | | | | extended | | + c3 | date | | not null | | | plain | | +Partition of: fd_pt2 FOR VALUES IN (1) +Partition constraint: ((c1 IS NOT NULL) AND (c1 = 1)) +Check constraints: + "p21chk" CHECK (c2 <> ''::text) +Server: s0 +FDW options: (delimiter ',', quote '"', "be quoted" 'value') + +-- cannot drop inherited NOT NULL constraint from a partition +ALTER TABLE fd_pt2_1 ALTER c1 DROP NOT NULL; +ERROR: column "c1" is marked NOT NULL in parent table +-- partition must have parent's constraints +ALTER TABLE fd_pt2 DETACH PARTITION fd_pt2_1; +ALTER TABLE fd_pt2 ALTER c2 SET NOT NULL; +\d+ fd_pt2 + Partitioned table "public.fd_pt2" + Column | Type | Collation | Nullable | Default | Storage | Stats target | Description +--------+---------+-----------+----------+---------+----------+--------------+------------- + c1 | integer | | not null | | plain | | + c2 | text | | not null | | extended | | + c3 | date | | | | plain | | +Partition key: LIST (c1) +Number of partitions: 0 + +\d+ fd_pt2_1 + Foreign table "public.fd_pt2_1" + Column | Type | Collation | Nullable | Default | FDW options | Storage | Stats target | Description +--------+---------+-----------+----------+---------+-------------+----------+--------------+------------- + c1 | integer | | not null | | | plain | | + c2 | text | | | | | extended | | + c3 | date | | not null | | | plain | | +Check constraints: + "p21chk" CHECK (c2 <> ''::text) +Server: s0 +FDW options: (delimiter ',', quote '"', "be quoted" 'value') + +ALTER TABLE fd_pt2 ATTACH PARTITION fd_pt2_1 FOR VALUES IN (1); -- ERROR +ERROR: column "c2" in child table must be marked NOT NULL +ALTER FOREIGN TABLE fd_pt2_1 ALTER c2 SET NOT NULL; +ALTER TABLE fd_pt2 ATTACH PARTITION fd_pt2_1 FOR VALUES IN (1); +ALTER TABLE fd_pt2 DETACH PARTITION fd_pt2_1; +ALTER TABLE fd_pt2 ADD CONSTRAINT fd_pt2chk1 CHECK (c1 > 0); +\d+ fd_pt2 + Partitioned table "public.fd_pt2" + Column | Type | Collation | Nullable | Default | Storage | Stats target | Description +--------+---------+-----------+----------+---------+----------+--------------+------------- + c1 | integer | | not null | | plain | | + c2 | text | | not null | | extended | | + c3 | date | | | | plain | | +Partition key: LIST (c1) +Check constraints: + "fd_pt2chk1" CHECK (c1 > 0) +Number of partitions: 0 + +\d+ fd_pt2_1 + Foreign table "public.fd_pt2_1" + Column | Type | Collation | Nullable | Default | FDW options | Storage | Stats target | Description +--------+---------+-----------+----------+---------+-------------+----------+--------------+------------- + c1 | integer | | not null | | | plain | | + c2 | text | | not null | | | extended | | + c3 | date | | not null | | | plain | | +Check constraints: + "p21chk" CHECK (c2 <> ''::text) +Server: s0 +FDW options: (delimiter ',', quote '"', "be quoted" 'value') + +ALTER TABLE fd_pt2 ATTACH PARTITION fd_pt2_1 FOR VALUES IN (1); -- ERROR +ERROR: child table is missing constraint "fd_pt2chk1" +ALTER FOREIGN TABLE fd_pt2_1 ADD CONSTRAINT fd_pt2chk1 CHECK (c1 > 0); +ALTER TABLE fd_pt2 ATTACH PARTITION fd_pt2_1 FOR VALUES IN (1); +DROP FOREIGN TABLE fd_pt2_1; +DROP TABLE fd_pt2; +-- foreign table cannot be part of partition tree made of temporary +-- relations. +CREATE TEMP TABLE temp_parted (a int) PARTITION BY LIST (a); +CREATE FOREIGN TABLE foreign_part PARTITION OF temp_parted DEFAULT + SERVER s0; -- ERROR +ERROR: cannot create a permanent relation as partition of temporary relation "temp_parted" +CREATE FOREIGN TABLE foreign_part (a int) SERVER s0; +ALTER TABLE temp_parted ATTACH PARTITION foreign_part DEFAULT; -- ERROR +ERROR: cannot attach a permanent relation as partition of temporary relation "temp_parted" +DROP FOREIGN TABLE foreign_part; +DROP TABLE temp_parted; +-- Cleanup +DROP SCHEMA foreign_schema CASCADE; +DROP ROLE regress_test_role; -- ERROR +ERROR: role "regress_test_role" cannot be dropped because some objects depend on it +DETAIL: privileges for foreign-data wrapper foo +privileges for server s4 +owner of user mapping for regress_test_role on server s6 +DROP SERVER t1 CASCADE; +NOTICE: drop cascades to user mapping for public on server t1 +DROP USER MAPPING FOR regress_test_role SERVER s6; +DROP FOREIGN DATA WRAPPER foo CASCADE; +NOTICE: drop cascades to 5 other objects +DETAIL: drop cascades to server s4 +drop cascades to user mapping for regress_foreign_data_user on server s4 +drop cascades to server s6 +drop cascades to server s9 +drop cascades to user mapping for regress_unprivileged_role on server s9 +DROP SERVER s8 CASCADE; +NOTICE: drop cascades to 2 other objects +DETAIL: drop cascades to user mapping for regress_foreign_data_user on server s8 +drop cascades to user mapping for public on server s8 +DROP ROLE regress_test_indirect; +DROP ROLE regress_test_role; +DROP ROLE regress_unprivileged_role; -- ERROR +ERROR: role "regress_unprivileged_role" cannot be dropped because some objects depend on it +DETAIL: privileges for foreign-data wrapper postgresql +REVOKE ALL ON FOREIGN DATA WRAPPER postgresql FROM regress_unprivileged_role; +DROP ROLE regress_unprivileged_role; +DROP ROLE regress_test_role2; +DROP FOREIGN DATA WRAPPER postgresql CASCADE; +DROP FOREIGN DATA WRAPPER dummy CASCADE; +NOTICE: drop cascades to server s0 +\c +DROP ROLE regress_foreign_data_user; +-- At this point we should have no wrappers, no servers, and no mappings. +SELECT fdwname, fdwhandler, fdwvalidator, fdwoptions FROM pg_foreign_data_wrapper; + fdwname | fdwhandler | fdwvalidator | fdwoptions +---------+------------+--------------+------------ +(0 rows) + +SELECT srvname, srvoptions FROM pg_foreign_server; + srvname | srvoptions +---------+------------ +(0 rows) + +SELECT * FROM pg_user_mapping; + oid | umuser | umserver | umoptions +-----+--------+----------+----------- +(0 rows) + diff --git a/src/test/regress/expected/foreign_key.out b/src/test/regress/expected/foreign_key.out new file mode 100644 index 0000000..12e523c --- /dev/null +++ b/src/test/regress/expected/foreign_key.out @@ -0,0 +1,2919 @@ +-- +-- FOREIGN KEY +-- +-- MATCH FULL +-- +-- First test, check and cascade +-- +CREATE TABLE PKTABLE ( ptest1 int PRIMARY KEY, ptest2 text ); +CREATE TABLE FKTABLE ( ftest1 int REFERENCES PKTABLE MATCH FULL ON DELETE CASCADE ON UPDATE CASCADE, ftest2 int ); +-- Insert test data into PKTABLE +INSERT INTO PKTABLE VALUES (1, 'Test1'); +INSERT INTO PKTABLE VALUES (2, 'Test2'); +INSERT INTO PKTABLE VALUES (3, 'Test3'); +INSERT INTO PKTABLE VALUES (4, 'Test4'); +INSERT INTO PKTABLE VALUES (5, 'Test5'); +-- Insert successful rows into FK TABLE +INSERT INTO FKTABLE VALUES (1, 2); +INSERT INTO FKTABLE VALUES (2, 3); +INSERT INTO FKTABLE VALUES (3, 4); +INSERT INTO FKTABLE VALUES (NULL, 1); +-- Insert a failed row into FK TABLE +INSERT INTO FKTABLE VALUES (100, 2); +ERROR: insert or update on table "fktable" violates foreign key constraint "fktable_ftest1_fkey" +DETAIL: Key (ftest1)=(100) is not present in table "pktable". +-- Check FKTABLE +SELECT * FROM FKTABLE; + ftest1 | ftest2 +--------+-------- + 1 | 2 + 2 | 3 + 3 | 4 + | 1 +(4 rows) + +-- Delete a row from PK TABLE +DELETE FROM PKTABLE WHERE ptest1=1; +-- Check FKTABLE for removal of matched row +SELECT * FROM FKTABLE; + ftest1 | ftest2 +--------+-------- + 2 | 3 + 3 | 4 + | 1 +(3 rows) + +-- Update a row from PK TABLE +UPDATE PKTABLE SET ptest1=1 WHERE ptest1=2; +-- Check FKTABLE for update of matched row +SELECT * FROM FKTABLE; + ftest1 | ftest2 +--------+-------- + 3 | 4 + | 1 + 1 | 3 +(3 rows) + +DROP TABLE FKTABLE; +DROP TABLE PKTABLE; +-- +-- check set NULL and table constraint on multiple columns +-- +CREATE TABLE PKTABLE ( ptest1 int, ptest2 int, ptest3 text, PRIMARY KEY(ptest1, ptest2) ); +CREATE TABLE FKTABLE ( ftest1 int, ftest2 int, ftest3 int, CONSTRAINT constrname FOREIGN KEY(ftest1, ftest2) + REFERENCES PKTABLE MATCH FULL ON DELETE SET NULL ON UPDATE SET NULL); +-- Test comments +COMMENT ON CONSTRAINT constrname_wrong ON FKTABLE IS 'fk constraint comment'; +ERROR: constraint "constrname_wrong" for table "fktable" does not exist +COMMENT ON CONSTRAINT constrname ON FKTABLE IS 'fk constraint comment'; +COMMENT ON CONSTRAINT constrname ON FKTABLE IS NULL; +-- Insert test data into PKTABLE +INSERT INTO PKTABLE VALUES (1, 2, 'Test1'); +INSERT INTO PKTABLE VALUES (1, 3, 'Test1-2'); +INSERT INTO PKTABLE VALUES (2, 4, 'Test2'); +INSERT INTO PKTABLE VALUES (3, 6, 'Test3'); +INSERT INTO PKTABLE VALUES (4, 8, 'Test4'); +INSERT INTO PKTABLE VALUES (5, 10, 'Test5'); +-- Insert successful rows into FK TABLE +INSERT INTO FKTABLE VALUES (1, 2, 4); +INSERT INTO FKTABLE VALUES (1, 3, 5); +INSERT INTO FKTABLE VALUES (2, 4, 8); +INSERT INTO FKTABLE VALUES (3, 6, 12); +INSERT INTO FKTABLE VALUES (NULL, NULL, 0); +-- Insert failed rows into FK TABLE +INSERT INTO FKTABLE VALUES (100, 2, 4); +ERROR: insert or update on table "fktable" violates foreign key constraint "constrname" +DETAIL: Key (ftest1, ftest2)=(100, 2) is not present in table "pktable". +INSERT INTO FKTABLE VALUES (2, 2, 4); +ERROR: insert or update on table "fktable" violates foreign key constraint "constrname" +DETAIL: Key (ftest1, ftest2)=(2, 2) is not present in table "pktable". +INSERT INTO FKTABLE VALUES (NULL, 2, 4); +ERROR: insert or update on table "fktable" violates foreign key constraint "constrname" +DETAIL: MATCH FULL does not allow mixing of null and nonnull key values. +INSERT INTO FKTABLE VALUES (1, NULL, 4); +ERROR: insert or update on table "fktable" violates foreign key constraint "constrname" +DETAIL: MATCH FULL does not allow mixing of null and nonnull key values. +-- Check FKTABLE +SELECT * FROM FKTABLE; + ftest1 | ftest2 | ftest3 +--------+--------+-------- + 1 | 2 | 4 + 1 | 3 | 5 + 2 | 4 | 8 + 3 | 6 | 12 + | | 0 +(5 rows) + +-- Delete a row from PK TABLE +DELETE FROM PKTABLE WHERE ptest1=1 and ptest2=2; +-- Check FKTABLE for removal of matched row +SELECT * FROM FKTABLE; + ftest1 | ftest2 | ftest3 +--------+--------+-------- + 1 | 3 | 5 + 2 | 4 | 8 + 3 | 6 | 12 + | | 0 + | | 4 +(5 rows) + +-- Delete another row from PK TABLE +DELETE FROM PKTABLE WHERE ptest1=5 and ptest2=10; +-- Check FKTABLE (should be no change) +SELECT * FROM FKTABLE; + ftest1 | ftest2 | ftest3 +--------+--------+-------- + 1 | 3 | 5 + 2 | 4 | 8 + 3 | 6 | 12 + | | 0 + | | 4 +(5 rows) + +-- Update a row from PK TABLE +UPDATE PKTABLE SET ptest1=1 WHERE ptest1=2; +-- Check FKTABLE for update of matched row +SELECT * FROM FKTABLE; + ftest1 | ftest2 | ftest3 +--------+--------+-------- + 1 | 3 | 5 + 3 | 6 | 12 + | | 0 + | | 4 + | | 8 +(5 rows) + +-- Check update with part of key null +UPDATE FKTABLE SET ftest1 = NULL WHERE ftest1 = 1; +ERROR: insert or update on table "fktable" violates foreign key constraint "constrname" +DETAIL: MATCH FULL does not allow mixing of null and nonnull key values. +-- Check update with old and new key values equal +UPDATE FKTABLE SET ftest1 = 1 WHERE ftest1 = 1; +-- Try altering the column type where foreign keys are involved +ALTER TABLE PKTABLE ALTER COLUMN ptest1 TYPE bigint; +ALTER TABLE FKTABLE ALTER COLUMN ftest1 TYPE bigint; +SELECT * FROM PKTABLE; + ptest1 | ptest2 | ptest3 +--------+--------+--------- + 1 | 3 | Test1-2 + 3 | 6 | Test3 + 4 | 8 | Test4 + 1 | 4 | Test2 +(4 rows) + +SELECT * FROM FKTABLE; + ftest1 | ftest2 | ftest3 +--------+--------+-------- + 3 | 6 | 12 + | | 0 + | | 4 + | | 8 + 1 | 3 | 5 +(5 rows) + +DROP TABLE PKTABLE CASCADE; +NOTICE: drop cascades to constraint constrname on table fktable +DROP TABLE FKTABLE; +-- +-- check set default and table constraint on multiple columns +-- +CREATE TABLE PKTABLE ( ptest1 int, ptest2 int, ptest3 text, PRIMARY KEY(ptest1, ptest2) ); +CREATE TABLE FKTABLE ( ftest1 int DEFAULT -1, ftest2 int DEFAULT -2, ftest3 int, CONSTRAINT constrname2 FOREIGN KEY(ftest1, ftest2) + REFERENCES PKTABLE MATCH FULL ON DELETE SET DEFAULT ON UPDATE SET DEFAULT); +-- Insert a value in PKTABLE for default +INSERT INTO PKTABLE VALUES (-1, -2, 'The Default!'); +-- Insert test data into PKTABLE +INSERT INTO PKTABLE VALUES (1, 2, 'Test1'); +INSERT INTO PKTABLE VALUES (1, 3, 'Test1-2'); +INSERT INTO PKTABLE VALUES (2, 4, 'Test2'); +INSERT INTO PKTABLE VALUES (3, 6, 'Test3'); +INSERT INTO PKTABLE VALUES (4, 8, 'Test4'); +INSERT INTO PKTABLE VALUES (5, 10, 'Test5'); +-- Insert successful rows into FK TABLE +INSERT INTO FKTABLE VALUES (1, 2, 4); +INSERT INTO FKTABLE VALUES (1, 3, 5); +INSERT INTO FKTABLE VALUES (2, 4, 8); +INSERT INTO FKTABLE VALUES (3, 6, 12); +INSERT INTO FKTABLE VALUES (NULL, NULL, 0); +-- Insert failed rows into FK TABLE +INSERT INTO FKTABLE VALUES (100, 2, 4); +ERROR: insert or update on table "fktable" violates foreign key constraint "constrname2" +DETAIL: Key (ftest1, ftest2)=(100, 2) is not present in table "pktable". +INSERT INTO FKTABLE VALUES (2, 2, 4); +ERROR: insert or update on table "fktable" violates foreign key constraint "constrname2" +DETAIL: Key (ftest1, ftest2)=(2, 2) is not present in table "pktable". +INSERT INTO FKTABLE VALUES (NULL, 2, 4); +ERROR: insert or update on table "fktable" violates foreign key constraint "constrname2" +DETAIL: MATCH FULL does not allow mixing of null and nonnull key values. +INSERT INTO FKTABLE VALUES (1, NULL, 4); +ERROR: insert or update on table "fktable" violates foreign key constraint "constrname2" +DETAIL: MATCH FULL does not allow mixing of null and nonnull key values. +-- Check FKTABLE +SELECT * FROM FKTABLE; + ftest1 | ftest2 | ftest3 +--------+--------+-------- + 1 | 2 | 4 + 1 | 3 | 5 + 2 | 4 | 8 + 3 | 6 | 12 + | | 0 +(5 rows) + +-- Delete a row from PK TABLE +DELETE FROM PKTABLE WHERE ptest1=1 and ptest2=2; +-- Check FKTABLE to check for removal +SELECT * FROM FKTABLE; + ftest1 | ftest2 | ftest3 +--------+--------+-------- + 1 | 3 | 5 + 2 | 4 | 8 + 3 | 6 | 12 + | | 0 + -1 | -2 | 4 +(5 rows) + +-- Delete another row from PK TABLE +DELETE FROM PKTABLE WHERE ptest1=5 and ptest2=10; +-- Check FKTABLE (should be no change) +SELECT * FROM FKTABLE; + ftest1 | ftest2 | ftest3 +--------+--------+-------- + 1 | 3 | 5 + 2 | 4 | 8 + 3 | 6 | 12 + | | 0 + -1 | -2 | 4 +(5 rows) + +-- Update a row from PK TABLE +UPDATE PKTABLE SET ptest1=1 WHERE ptest1=2; +-- Check FKTABLE for update of matched row +SELECT * FROM FKTABLE; + ftest1 | ftest2 | ftest3 +--------+--------+-------- + 1 | 3 | 5 + 3 | 6 | 12 + | | 0 + -1 | -2 | 4 + -1 | -2 | 8 +(5 rows) + +-- this should fail for lack of CASCADE +DROP TABLE PKTABLE; +ERROR: cannot drop table pktable because other objects depend on it +DETAIL: constraint constrname2 on table fktable depends on table pktable +HINT: Use DROP ... CASCADE to drop the dependent objects too. +DROP TABLE PKTABLE CASCADE; +NOTICE: drop cascades to constraint constrname2 on table fktable +DROP TABLE FKTABLE; +-- +-- First test, check with no on delete or on update +-- +CREATE TABLE PKTABLE ( ptest1 int PRIMARY KEY, ptest2 text ); +CREATE TABLE FKTABLE ( ftest1 int REFERENCES PKTABLE MATCH FULL, ftest2 int ); +-- Insert test data into PKTABLE +INSERT INTO PKTABLE VALUES (1, 'Test1'); +INSERT INTO PKTABLE VALUES (2, 'Test2'); +INSERT INTO PKTABLE VALUES (3, 'Test3'); +INSERT INTO PKTABLE VALUES (4, 'Test4'); +INSERT INTO PKTABLE VALUES (5, 'Test5'); +-- Insert successful rows into FK TABLE +INSERT INTO FKTABLE VALUES (1, 2); +INSERT INTO FKTABLE VALUES (2, 3); +INSERT INTO FKTABLE VALUES (3, 4); +INSERT INTO FKTABLE VALUES (NULL, 1); +-- Insert a failed row into FK TABLE +INSERT INTO FKTABLE VALUES (100, 2); +ERROR: insert or update on table "fktable" violates foreign key constraint "fktable_ftest1_fkey" +DETAIL: Key (ftest1)=(100) is not present in table "pktable". +-- Check FKTABLE +SELECT * FROM FKTABLE; + ftest1 | ftest2 +--------+-------- + 1 | 2 + 2 | 3 + 3 | 4 + | 1 +(4 rows) + +-- Check PKTABLE +SELECT * FROM PKTABLE; + ptest1 | ptest2 +--------+-------- + 1 | Test1 + 2 | Test2 + 3 | Test3 + 4 | Test4 + 5 | Test5 +(5 rows) + +-- Delete a row from PK TABLE (should fail) +DELETE FROM PKTABLE WHERE ptest1=1; +ERROR: update or delete on table "pktable" violates foreign key constraint "fktable_ftest1_fkey" on table "fktable" +DETAIL: Key (ptest1)=(1) is still referenced from table "fktable". +-- Delete a row from PK TABLE (should succeed) +DELETE FROM PKTABLE WHERE ptest1=5; +-- Check PKTABLE for deletes +SELECT * FROM PKTABLE; + ptest1 | ptest2 +--------+-------- + 1 | Test1 + 2 | Test2 + 3 | Test3 + 4 | Test4 +(4 rows) + +-- Update a row from PK TABLE (should fail) +UPDATE PKTABLE SET ptest1=0 WHERE ptest1=2; +ERROR: update or delete on table "pktable" violates foreign key constraint "fktable_ftest1_fkey" on table "fktable" +DETAIL: Key (ptest1)=(2) is still referenced from table "fktable". +-- Update a row from PK TABLE (should succeed) +UPDATE PKTABLE SET ptest1=0 WHERE ptest1=4; +-- Check PKTABLE for updates +SELECT * FROM PKTABLE; + ptest1 | ptest2 +--------+-------- + 1 | Test1 + 2 | Test2 + 3 | Test3 + 0 | Test4 +(4 rows) + +DROP TABLE FKTABLE; +DROP TABLE PKTABLE; +-- +-- Check initial check upon ALTER TABLE +-- +CREATE TABLE PKTABLE ( ptest1 int, ptest2 int, PRIMARY KEY(ptest1, ptest2) ); +CREATE TABLE FKTABLE ( ftest1 int, ftest2 int ); +INSERT INTO PKTABLE VALUES (1, 2); +INSERT INTO FKTABLE VALUES (1, NULL); +ALTER TABLE FKTABLE ADD FOREIGN KEY(ftest1, ftest2) REFERENCES PKTABLE MATCH FULL; +ERROR: insert or update on table "fktable" violates foreign key constraint "fktable_ftest1_ftest2_fkey" +DETAIL: MATCH FULL does not allow mixing of null and nonnull key values. +DROP TABLE FKTABLE; +DROP TABLE PKTABLE; +-- MATCH SIMPLE +-- Base test restricting update/delete +CREATE TABLE PKTABLE ( ptest1 int, ptest2 int, ptest3 int, ptest4 text, PRIMARY KEY(ptest1, ptest2, ptest3) ); +CREATE TABLE FKTABLE ( ftest1 int, ftest2 int, ftest3 int, ftest4 int, CONSTRAINT constrname3 + FOREIGN KEY(ftest1, ftest2, ftest3) REFERENCES PKTABLE); +-- Insert Primary Key values +INSERT INTO PKTABLE VALUES (1, 2, 3, 'test1'); +INSERT INTO PKTABLE VALUES (1, 3, 3, 'test2'); +INSERT INTO PKTABLE VALUES (2, 3, 4, 'test3'); +INSERT INTO PKTABLE VALUES (2, 4, 5, 'test4'); +-- Insert Foreign Key values +INSERT INTO FKTABLE VALUES (1, 2, 3, 1); +INSERT INTO FKTABLE VALUES (NULL, 2, 3, 2); +INSERT INTO FKTABLE VALUES (2, NULL, 3, 3); +INSERT INTO FKTABLE VALUES (NULL, 2, 7, 4); +INSERT INTO FKTABLE VALUES (NULL, 3, 4, 5); +-- Insert a failed values +INSERT INTO FKTABLE VALUES (1, 2, 7, 6); +ERROR: insert or update on table "fktable" violates foreign key constraint "constrname3" +DETAIL: Key (ftest1, ftest2, ftest3)=(1, 2, 7) is not present in table "pktable". +-- Show FKTABLE +SELECT * from FKTABLE; + ftest1 | ftest2 | ftest3 | ftest4 +--------+--------+--------+-------- + 1 | 2 | 3 | 1 + | 2 | 3 | 2 + 2 | | 3 | 3 + | 2 | 7 | 4 + | 3 | 4 | 5 +(5 rows) + +-- Try to update something that should fail +UPDATE PKTABLE set ptest2=5 where ptest2=2; +ERROR: update or delete on table "pktable" violates foreign key constraint "constrname3" on table "fktable" +DETAIL: Key (ptest1, ptest2, ptest3)=(1, 2, 3) is still referenced from table "fktable". +-- Try to update something that should succeed +UPDATE PKTABLE set ptest1=1 WHERE ptest2=3; +-- Try to delete something that should fail +DELETE FROM PKTABLE where ptest1=1 and ptest2=2 and ptest3=3; +ERROR: update or delete on table "pktable" violates foreign key constraint "constrname3" on table "fktable" +DETAIL: Key (ptest1, ptest2, ptest3)=(1, 2, 3) is still referenced from table "fktable". +-- Try to delete something that should work +DELETE FROM PKTABLE where ptest1=2; +-- Show PKTABLE and FKTABLE +SELECT * from PKTABLE; + ptest1 | ptest2 | ptest3 | ptest4 +--------+--------+--------+-------- + 1 | 2 | 3 | test1 + 1 | 3 | 3 | test2 + 1 | 3 | 4 | test3 +(3 rows) + +SELECT * from FKTABLE; + ftest1 | ftest2 | ftest3 | ftest4 +--------+--------+--------+-------- + 1 | 2 | 3 | 1 + | 2 | 3 | 2 + 2 | | 3 | 3 + | 2 | 7 | 4 + | 3 | 4 | 5 +(5 rows) + +DROP TABLE FKTABLE; +DROP TABLE PKTABLE; +-- restrict with null values +CREATE TABLE PKTABLE ( ptest1 int, ptest2 int, ptest3 int, ptest4 text, UNIQUE(ptest1, ptest2, ptest3) ); +CREATE TABLE FKTABLE ( ftest1 int, ftest2 int, ftest3 int, ftest4 int, CONSTRAINT constrname3 + FOREIGN KEY(ftest1, ftest2, ftest3) REFERENCES PKTABLE (ptest1, ptest2, ptest3)); +INSERT INTO PKTABLE VALUES (1, 2, 3, 'test1'); +INSERT INTO PKTABLE VALUES (1, 3, NULL, 'test2'); +INSERT INTO PKTABLE VALUES (2, NULL, 4, 'test3'); +INSERT INTO FKTABLE VALUES (1, 2, 3, 1); +DELETE FROM PKTABLE WHERE ptest1 = 2; +SELECT * FROM PKTABLE; + ptest1 | ptest2 | ptest3 | ptest4 +--------+--------+--------+-------- + 1 | 2 | 3 | test1 + 1 | 3 | | test2 +(2 rows) + +SELECT * FROM FKTABLE; + ftest1 | ftest2 | ftest3 | ftest4 +--------+--------+--------+-------- + 1 | 2 | 3 | 1 +(1 row) + +DROP TABLE FKTABLE; +DROP TABLE PKTABLE; +-- cascade update/delete +CREATE TABLE PKTABLE ( ptest1 int, ptest2 int, ptest3 int, ptest4 text, PRIMARY KEY(ptest1, ptest2, ptest3) ); +CREATE TABLE FKTABLE ( ftest1 int, ftest2 int, ftest3 int, ftest4 int, CONSTRAINT constrname3 + FOREIGN KEY(ftest1, ftest2, ftest3) REFERENCES PKTABLE + ON DELETE CASCADE ON UPDATE CASCADE); +-- Insert Primary Key values +INSERT INTO PKTABLE VALUES (1, 2, 3, 'test1'); +INSERT INTO PKTABLE VALUES (1, 3, 3, 'test2'); +INSERT INTO PKTABLE VALUES (2, 3, 4, 'test3'); +INSERT INTO PKTABLE VALUES (2, 4, 5, 'test4'); +-- Insert Foreign Key values +INSERT INTO FKTABLE VALUES (1, 2, 3, 1); +INSERT INTO FKTABLE VALUES (NULL, 2, 3, 2); +INSERT INTO FKTABLE VALUES (2, NULL, 3, 3); +INSERT INTO FKTABLE VALUES (NULL, 2, 7, 4); +INSERT INTO FKTABLE VALUES (NULL, 3, 4, 5); +-- Insert a failed values +INSERT INTO FKTABLE VALUES (1, 2, 7, 6); +ERROR: insert or update on table "fktable" violates foreign key constraint "constrname3" +DETAIL: Key (ftest1, ftest2, ftest3)=(1, 2, 7) is not present in table "pktable". +-- Show FKTABLE +SELECT * from FKTABLE; + ftest1 | ftest2 | ftest3 | ftest4 +--------+--------+--------+-------- + 1 | 2 | 3 | 1 + | 2 | 3 | 2 + 2 | | 3 | 3 + | 2 | 7 | 4 + | 3 | 4 | 5 +(5 rows) + +-- Try to update something that will cascade +UPDATE PKTABLE set ptest2=5 where ptest2=2; +-- Try to update something that should not cascade +UPDATE PKTABLE set ptest1=1 WHERE ptest2=3; +-- Show PKTABLE and FKTABLE +SELECT * from PKTABLE; + ptest1 | ptest2 | ptest3 | ptest4 +--------+--------+--------+-------- + 2 | 4 | 5 | test4 + 1 | 5 | 3 | test1 + 1 | 3 | 3 | test2 + 1 | 3 | 4 | test3 +(4 rows) + +SELECT * from FKTABLE; + ftest1 | ftest2 | ftest3 | ftest4 +--------+--------+--------+-------- + | 2 | 3 | 2 + 2 | | 3 | 3 + | 2 | 7 | 4 + | 3 | 4 | 5 + 1 | 5 | 3 | 1 +(5 rows) + +-- Try to delete something that should cascade +DELETE FROM PKTABLE where ptest1=1 and ptest2=5 and ptest3=3; +-- Show PKTABLE and FKTABLE +SELECT * from PKTABLE; + ptest1 | ptest2 | ptest3 | ptest4 +--------+--------+--------+-------- + 2 | 4 | 5 | test4 + 1 | 3 | 3 | test2 + 1 | 3 | 4 | test3 +(3 rows) + +SELECT * from FKTABLE; + ftest1 | ftest2 | ftest3 | ftest4 +--------+--------+--------+-------- + | 2 | 3 | 2 + 2 | | 3 | 3 + | 2 | 7 | 4 + | 3 | 4 | 5 +(4 rows) + +-- Try to delete something that should not have a cascade +DELETE FROM PKTABLE where ptest1=2; +-- Show PKTABLE and FKTABLE +SELECT * from PKTABLE; + ptest1 | ptest2 | ptest3 | ptest4 +--------+--------+--------+-------- + 1 | 3 | 3 | test2 + 1 | 3 | 4 | test3 +(2 rows) + +SELECT * from FKTABLE; + ftest1 | ftest2 | ftest3 | ftest4 +--------+--------+--------+-------- + | 2 | 3 | 2 + 2 | | 3 | 3 + | 2 | 7 | 4 + | 3 | 4 | 5 +(4 rows) + +DROP TABLE FKTABLE; +DROP TABLE PKTABLE; +-- set null update / set default delete +CREATE TABLE PKTABLE ( ptest1 int, ptest2 int, ptest3 int, ptest4 text, PRIMARY KEY(ptest1, ptest2, ptest3) ); +CREATE TABLE FKTABLE ( ftest1 int DEFAULT 0, ftest2 int, ftest3 int, ftest4 int, CONSTRAINT constrname3 + FOREIGN KEY(ftest1, ftest2, ftest3) REFERENCES PKTABLE + ON DELETE SET DEFAULT ON UPDATE SET NULL); +-- Insert Primary Key values +INSERT INTO PKTABLE VALUES (1, 2, 3, 'test1'); +INSERT INTO PKTABLE VALUES (1, 3, 3, 'test2'); +INSERT INTO PKTABLE VALUES (2, 3, 4, 'test3'); +INSERT INTO PKTABLE VALUES (2, 4, 5, 'test4'); +-- Insert Foreign Key values +INSERT INTO FKTABLE VALUES (1, 2, 3, 1); +INSERT INTO FKTABLE VALUES (2, 3, 4, 1); +INSERT INTO FKTABLE VALUES (NULL, 2, 3, 2); +INSERT INTO FKTABLE VALUES (2, NULL, 3, 3); +INSERT INTO FKTABLE VALUES (NULL, 2, 7, 4); +INSERT INTO FKTABLE VALUES (NULL, 3, 4, 5); +-- Insert a failed values +INSERT INTO FKTABLE VALUES (1, 2, 7, 6); +ERROR: insert or update on table "fktable" violates foreign key constraint "constrname3" +DETAIL: Key (ftest1, ftest2, ftest3)=(1, 2, 7) is not present in table "pktable". +-- Show FKTABLE +SELECT * from FKTABLE; + ftest1 | ftest2 | ftest3 | ftest4 +--------+--------+--------+-------- + 1 | 2 | 3 | 1 + 2 | 3 | 4 | 1 + | 2 | 3 | 2 + 2 | | 3 | 3 + | 2 | 7 | 4 + | 3 | 4 | 5 +(6 rows) + +-- Try to update something that will set null +UPDATE PKTABLE set ptest2=5 where ptest2=2; +-- Try to update something that should not set null +UPDATE PKTABLE set ptest2=2 WHERE ptest2=3 and ptest1=1; +-- Show PKTABLE and FKTABLE +SELECT * from PKTABLE; + ptest1 | ptest2 | ptest3 | ptest4 +--------+--------+--------+-------- + 2 | 3 | 4 | test3 + 2 | 4 | 5 | test4 + 1 | 5 | 3 | test1 + 1 | 2 | 3 | test2 +(4 rows) + +SELECT * from FKTABLE; + ftest1 | ftest2 | ftest3 | ftest4 +--------+--------+--------+-------- + 2 | 3 | 4 | 1 + | 2 | 3 | 2 + 2 | | 3 | 3 + | 2 | 7 | 4 + | 3 | 4 | 5 + | | | 1 +(6 rows) + +-- Try to delete something that should set default +DELETE FROM PKTABLE where ptest1=2 and ptest2=3 and ptest3=4; +-- Show PKTABLE and FKTABLE +SELECT * from PKTABLE; + ptest1 | ptest2 | ptest3 | ptest4 +--------+--------+--------+-------- + 2 | 4 | 5 | test4 + 1 | 5 | 3 | test1 + 1 | 2 | 3 | test2 +(3 rows) + +SELECT * from FKTABLE; + ftest1 | ftest2 | ftest3 | ftest4 +--------+--------+--------+-------- + | 2 | 3 | 2 + 2 | | 3 | 3 + | 2 | 7 | 4 + | 3 | 4 | 5 + | | | 1 + 0 | | | 1 +(6 rows) + +-- Try to delete something that should not set default +DELETE FROM PKTABLE where ptest2=5; +-- Show PKTABLE and FKTABLE +SELECT * from PKTABLE; + ptest1 | ptest2 | ptest3 | ptest4 +--------+--------+--------+-------- + 2 | 4 | 5 | test4 + 1 | 2 | 3 | test2 +(2 rows) + +SELECT * from FKTABLE; + ftest1 | ftest2 | ftest3 | ftest4 +--------+--------+--------+-------- + | 2 | 3 | 2 + 2 | | 3 | 3 + | 2 | 7 | 4 + | 3 | 4 | 5 + | | | 1 + 0 | | | 1 +(6 rows) + +DROP TABLE FKTABLE; +DROP TABLE PKTABLE; +-- set default update / set null delete +CREATE TABLE PKTABLE ( ptest1 int, ptest2 int, ptest3 int, ptest4 text, PRIMARY KEY(ptest1, ptest2, ptest3) ); +CREATE TABLE FKTABLE ( ftest1 int DEFAULT 0, ftest2 int DEFAULT -1, ftest3 int DEFAULT -2, ftest4 int, CONSTRAINT constrname3 + FOREIGN KEY(ftest1, ftest2, ftest3) REFERENCES PKTABLE + ON DELETE SET NULL ON UPDATE SET DEFAULT); +-- Insert Primary Key values +INSERT INTO PKTABLE VALUES (1, 2, 3, 'test1'); +INSERT INTO PKTABLE VALUES (1, 3, 3, 'test2'); +INSERT INTO PKTABLE VALUES (2, 3, 4, 'test3'); +INSERT INTO PKTABLE VALUES (2, 4, 5, 'test4'); +INSERT INTO PKTABLE VALUES (2, -1, 5, 'test5'); +-- Insert Foreign Key values +INSERT INTO FKTABLE VALUES (1, 2, 3, 1); +INSERT INTO FKTABLE VALUES (2, 3, 4, 1); +INSERT INTO FKTABLE VALUES (2, 4, 5, 1); +INSERT INTO FKTABLE VALUES (NULL, 2, 3, 2); +INSERT INTO FKTABLE VALUES (2, NULL, 3, 3); +INSERT INTO FKTABLE VALUES (NULL, 2, 7, 4); +INSERT INTO FKTABLE VALUES (NULL, 3, 4, 5); +-- Insert a failed values +INSERT INTO FKTABLE VALUES (1, 2, 7, 6); +ERROR: insert or update on table "fktable" violates foreign key constraint "constrname3" +DETAIL: Key (ftest1, ftest2, ftest3)=(1, 2, 7) is not present in table "pktable". +-- Show FKTABLE +SELECT * from FKTABLE; + ftest1 | ftest2 | ftest3 | ftest4 +--------+--------+--------+-------- + 1 | 2 | 3 | 1 + 2 | 3 | 4 | 1 + 2 | 4 | 5 | 1 + | 2 | 3 | 2 + 2 | | 3 | 3 + | 2 | 7 | 4 + | 3 | 4 | 5 +(7 rows) + +-- Try to update something that will fail +UPDATE PKTABLE set ptest2=5 where ptest2=2; +ERROR: insert or update on table "fktable" violates foreign key constraint "constrname3" +DETAIL: Key (ftest1, ftest2, ftest3)=(0, -1, -2) is not present in table "pktable". +-- Try to update something that will set default +UPDATE PKTABLE set ptest1=0, ptest2=-1, ptest3=-2 where ptest2=2; +UPDATE PKTABLE set ptest2=10 where ptest2=4; +-- Try to update something that should not set default +UPDATE PKTABLE set ptest2=2 WHERE ptest2=3 and ptest1=1; +-- Show PKTABLE and FKTABLE +SELECT * from PKTABLE; + ptest1 | ptest2 | ptest3 | ptest4 +--------+--------+--------+-------- + 2 | 3 | 4 | test3 + 2 | -1 | 5 | test5 + 0 | -1 | -2 | test1 + 2 | 10 | 5 | test4 + 1 | 2 | 3 | test2 +(5 rows) + +SELECT * from FKTABLE; + ftest1 | ftest2 | ftest3 | ftest4 +--------+--------+--------+-------- + 2 | 3 | 4 | 1 + | 2 | 3 | 2 + 2 | | 3 | 3 + | 2 | 7 | 4 + | 3 | 4 | 5 + 0 | -1 | -2 | 1 + 0 | -1 | -2 | 1 +(7 rows) + +-- Try to delete something that should set null +DELETE FROM PKTABLE where ptest1=2 and ptest2=3 and ptest3=4; +-- Show PKTABLE and FKTABLE +SELECT * from PKTABLE; + ptest1 | ptest2 | ptest3 | ptest4 +--------+--------+--------+-------- + 2 | -1 | 5 | test5 + 0 | -1 | -2 | test1 + 2 | 10 | 5 | test4 + 1 | 2 | 3 | test2 +(4 rows) + +SELECT * from FKTABLE; + ftest1 | ftest2 | ftest3 | ftest4 +--------+--------+--------+-------- + | 2 | 3 | 2 + 2 | | 3 | 3 + | 2 | 7 | 4 + | 3 | 4 | 5 + 0 | -1 | -2 | 1 + 0 | -1 | -2 | 1 + | | | 1 +(7 rows) + +-- Try to delete something that should not set null +DELETE FROM PKTABLE where ptest2=-1 and ptest3=5; +-- Show PKTABLE and FKTABLE +SELECT * from PKTABLE; + ptest1 | ptest2 | ptest3 | ptest4 +--------+--------+--------+-------- + 0 | -1 | -2 | test1 + 2 | 10 | 5 | test4 + 1 | 2 | 3 | test2 +(3 rows) + +SELECT * from FKTABLE; + ftest1 | ftest2 | ftest3 | ftest4 +--------+--------+--------+-------- + | 2 | 3 | 2 + 2 | | 3 | 3 + | 2 | 7 | 4 + | 3 | 4 | 5 + 0 | -1 | -2 | 1 + 0 | -1 | -2 | 1 + | | | 1 +(7 rows) + +DROP TABLE FKTABLE; +DROP TABLE PKTABLE; +-- Test for ON DELETE SET NULL/DEFAULT (column_list); +CREATE TABLE PKTABLE (tid int, id int, PRIMARY KEY (tid, id)); +CREATE TABLE FKTABLE (tid int, id int, foo int, FOREIGN KEY (tid, id) REFERENCES PKTABLE ON DELETE SET NULL (bar)); +ERROR: column "bar" referenced in foreign key constraint does not exist +CREATE TABLE FKTABLE (tid int, id int, foo int, FOREIGN KEY (tid, id) REFERENCES PKTABLE ON DELETE SET NULL (foo)); +ERROR: column "foo" referenced in ON DELETE SET action must be part of foreign key +CREATE TABLE FKTABLE (tid int, id int, foo int, FOREIGN KEY (tid, foo) REFERENCES PKTABLE ON UPDATE SET NULL (foo)); +ERROR: a column list with SET NULL is only supported for ON DELETE actions +LINE 1: ...oo int, FOREIGN KEY (tid, foo) REFERENCES PKTABLE ON UPDATE ... + ^ +CREATE TABLE FKTABLE ( + tid int, id int, + fk_id_del_set_null int, + fk_id_del_set_default int DEFAULT 0, + FOREIGN KEY (tid, fk_id_del_set_null) REFERENCES PKTABLE ON DELETE SET NULL (fk_id_del_set_null), + FOREIGN KEY (tid, fk_id_del_set_default) REFERENCES PKTABLE ON DELETE SET DEFAULT (fk_id_del_set_default) +); +SELECT pg_get_constraintdef(oid) FROM pg_constraint WHERE conrelid = 'fktable'::regclass::oid ORDER BY oid; + pg_get_constraintdef +-------------------------------------------------------------------------------------------------------------------- + FOREIGN KEY (tid, fk_id_del_set_null) REFERENCES pktable(tid, id) ON DELETE SET NULL (fk_id_del_set_null) + FOREIGN KEY (tid, fk_id_del_set_default) REFERENCES pktable(tid, id) ON DELETE SET DEFAULT (fk_id_del_set_default) +(2 rows) + +INSERT INTO PKTABLE VALUES (1, 0), (1, 1), (1, 2); +INSERT INTO FKTABLE VALUES + (1, 1, 1, NULL), + (1, 2, NULL, 2); +DELETE FROM PKTABLE WHERE id = 1 OR id = 2; +SELECT * FROM FKTABLE ORDER BY id; + tid | id | fk_id_del_set_null | fk_id_del_set_default +-----+----+--------------------+----------------------- + 1 | 1 | | + 1 | 2 | | 0 +(2 rows) + +DROP TABLE FKTABLE; +DROP TABLE PKTABLE; +-- Test some invalid FK definitions +CREATE TABLE PKTABLE (ptest1 int PRIMARY KEY, someoid oid); +CREATE TABLE FKTABLE_FAIL1 ( ftest1 int, CONSTRAINT fkfail1 FOREIGN KEY (ftest2) REFERENCES PKTABLE); +ERROR: column "ftest2" referenced in foreign key constraint does not exist +CREATE TABLE FKTABLE_FAIL2 ( ftest1 int, CONSTRAINT fkfail1 FOREIGN KEY (ftest1) REFERENCES PKTABLE(ptest2)); +ERROR: column "ptest2" referenced in foreign key constraint does not exist +CREATE TABLE FKTABLE_FAIL3 ( ftest1 int, CONSTRAINT fkfail1 FOREIGN KEY (tableoid) REFERENCES PKTABLE(someoid)); +ERROR: system columns cannot be used in foreign keys +CREATE TABLE FKTABLE_FAIL4 ( ftest1 oid, CONSTRAINT fkfail1 FOREIGN KEY (ftest1) REFERENCES PKTABLE(tableoid)); +ERROR: system columns cannot be used in foreign keys +DROP TABLE PKTABLE; +-- Test for referencing column number smaller than referenced constraint +CREATE TABLE PKTABLE (ptest1 int, ptest2 int, UNIQUE(ptest1, ptest2)); +CREATE TABLE FKTABLE_FAIL1 (ftest1 int REFERENCES pktable(ptest1)); +ERROR: there is no unique constraint matching given keys for referenced table "pktable" +DROP TABLE FKTABLE_FAIL1; +ERROR: table "fktable_fail1" does not exist +DROP TABLE PKTABLE; +-- +-- Tests for mismatched types +-- +-- Basic one column, two table setup +CREATE TABLE PKTABLE (ptest1 int PRIMARY KEY); +INSERT INTO PKTABLE VALUES(42); +-- This next should fail, because int=inet does not exist +CREATE TABLE FKTABLE (ftest1 inet REFERENCES pktable); +ERROR: foreign key constraint "fktable_ftest1_fkey" cannot be implemented +DETAIL: Key columns "ftest1" and "ptest1" are of incompatible types: inet and integer. +-- This should also fail for the same reason, but here we +-- give the column name +CREATE TABLE FKTABLE (ftest1 inet REFERENCES pktable(ptest1)); +ERROR: foreign key constraint "fktable_ftest1_fkey" cannot be implemented +DETAIL: Key columns "ftest1" and "ptest1" are of incompatible types: inet and integer. +-- This should succeed, even though they are different types, +-- because int=int8 exists and is a member of the integer opfamily +CREATE TABLE FKTABLE (ftest1 int8 REFERENCES pktable); +-- Check it actually works +INSERT INTO FKTABLE VALUES(42); -- should succeed +INSERT INTO FKTABLE VALUES(43); -- should fail +ERROR: insert or update on table "fktable" violates foreign key constraint "fktable_ftest1_fkey" +DETAIL: Key (ftest1)=(43) is not present in table "pktable". +UPDATE FKTABLE SET ftest1 = ftest1; -- should succeed +UPDATE FKTABLE SET ftest1 = ftest1 + 1; -- should fail +ERROR: insert or update on table "fktable" violates foreign key constraint "fktable_ftest1_fkey" +DETAIL: Key (ftest1)=(43) is not present in table "pktable". +DROP TABLE FKTABLE; +-- This should fail, because we'd have to cast numeric to int which is +-- not an implicit coercion (or use numeric=numeric, but that's not part +-- of the integer opfamily) +CREATE TABLE FKTABLE (ftest1 numeric REFERENCES pktable); +ERROR: foreign key constraint "fktable_ftest1_fkey" cannot be implemented +DETAIL: Key columns "ftest1" and "ptest1" are of incompatible types: numeric and integer. +DROP TABLE PKTABLE; +-- On the other hand, this should work because int implicitly promotes to +-- numeric, and we allow promotion on the FK side +CREATE TABLE PKTABLE (ptest1 numeric PRIMARY KEY); +INSERT INTO PKTABLE VALUES(42); +CREATE TABLE FKTABLE (ftest1 int REFERENCES pktable); +-- Check it actually works +INSERT INTO FKTABLE VALUES(42); -- should succeed +INSERT INTO FKTABLE VALUES(43); -- should fail +ERROR: insert or update on table "fktable" violates foreign key constraint "fktable_ftest1_fkey" +DETAIL: Key (ftest1)=(43) is not present in table "pktable". +UPDATE FKTABLE SET ftest1 = ftest1; -- should succeed +UPDATE FKTABLE SET ftest1 = ftest1 + 1; -- should fail +ERROR: insert or update on table "fktable" violates foreign key constraint "fktable_ftest1_fkey" +DETAIL: Key (ftest1)=(43) is not present in table "pktable". +DROP TABLE FKTABLE; +DROP TABLE PKTABLE; +-- Two columns, two tables +CREATE TABLE PKTABLE (ptest1 int, ptest2 inet, PRIMARY KEY(ptest1, ptest2)); +-- This should fail, because we just chose really odd types +CREATE TABLE FKTABLE (ftest1 cidr, ftest2 timestamp, FOREIGN KEY(ftest1, ftest2) REFERENCES pktable); +ERROR: foreign key constraint "fktable_ftest1_ftest2_fkey" cannot be implemented +DETAIL: Key columns "ftest1" and "ptest1" are of incompatible types: cidr and integer. +-- Again, so should this... +CREATE TABLE FKTABLE (ftest1 cidr, ftest2 timestamp, FOREIGN KEY(ftest1, ftest2) REFERENCES pktable(ptest1, ptest2)); +ERROR: foreign key constraint "fktable_ftest1_ftest2_fkey" cannot be implemented +DETAIL: Key columns "ftest1" and "ptest1" are of incompatible types: cidr and integer. +-- This fails because we mixed up the column ordering +CREATE TABLE FKTABLE (ftest1 int, ftest2 inet, FOREIGN KEY(ftest2, ftest1) REFERENCES pktable); +ERROR: foreign key constraint "fktable_ftest2_ftest1_fkey" cannot be implemented +DETAIL: Key columns "ftest2" and "ptest1" are of incompatible types: inet and integer. +-- As does this... +CREATE TABLE FKTABLE (ftest1 int, ftest2 inet, FOREIGN KEY(ftest2, ftest1) REFERENCES pktable(ptest1, ptest2)); +ERROR: foreign key constraint "fktable_ftest2_ftest1_fkey" cannot be implemented +DETAIL: Key columns "ftest2" and "ptest1" are of incompatible types: inet and integer. +-- And again.. +CREATE TABLE FKTABLE (ftest1 int, ftest2 inet, FOREIGN KEY(ftest1, ftest2) REFERENCES pktable(ptest2, ptest1)); +ERROR: foreign key constraint "fktable_ftest1_ftest2_fkey" cannot be implemented +DETAIL: Key columns "ftest1" and "ptest2" are of incompatible types: integer and inet. +-- This works... +CREATE TABLE FKTABLE (ftest1 int, ftest2 inet, FOREIGN KEY(ftest2, ftest1) REFERENCES pktable(ptest2, ptest1)); +DROP TABLE FKTABLE; +-- As does this +CREATE TABLE FKTABLE (ftest1 int, ftest2 inet, FOREIGN KEY(ftest1, ftest2) REFERENCES pktable(ptest1, ptest2)); +DROP TABLE FKTABLE; +DROP TABLE PKTABLE; +-- Two columns, same table +-- Make sure this still works... +CREATE TABLE PKTABLE (ptest1 int, ptest2 inet, ptest3 int, ptest4 inet, PRIMARY KEY(ptest1, ptest2), FOREIGN KEY(ptest3, +ptest4) REFERENCES pktable(ptest1, ptest2)); +DROP TABLE PKTABLE; +-- And this, +CREATE TABLE PKTABLE (ptest1 int, ptest2 inet, ptest3 int, ptest4 inet, PRIMARY KEY(ptest1, ptest2), FOREIGN KEY(ptest3, +ptest4) REFERENCES pktable); +DROP TABLE PKTABLE; +-- This shouldn't (mixed up columns) +CREATE TABLE PKTABLE (ptest1 int, ptest2 inet, ptest3 int, ptest4 inet, PRIMARY KEY(ptest1, ptest2), FOREIGN KEY(ptest3, +ptest4) REFERENCES pktable(ptest2, ptest1)); +ERROR: foreign key constraint "pktable_ptest3_ptest4_fkey" cannot be implemented +DETAIL: Key columns "ptest3" and "ptest2" are of incompatible types: integer and inet. +-- Nor should this... (same reason, we have 4,3 referencing 1,2 which mismatches types +CREATE TABLE PKTABLE (ptest1 int, ptest2 inet, ptest3 int, ptest4 inet, PRIMARY KEY(ptest1, ptest2), FOREIGN KEY(ptest4, +ptest3) REFERENCES pktable(ptest1, ptest2)); +ERROR: foreign key constraint "pktable_ptest4_ptest3_fkey" cannot be implemented +DETAIL: Key columns "ptest4" and "ptest1" are of incompatible types: inet and integer. +-- Not this one either... Same as the last one except we didn't defined the columns being referenced. +CREATE TABLE PKTABLE (ptest1 int, ptest2 inet, ptest3 int, ptest4 inet, PRIMARY KEY(ptest1, ptest2), FOREIGN KEY(ptest4, +ptest3) REFERENCES pktable); +ERROR: foreign key constraint "pktable_ptest4_ptest3_fkey" cannot be implemented +DETAIL: Key columns "ptest4" and "ptest1" are of incompatible types: inet and integer. +-- +-- Now some cases with inheritance +-- Basic 2 table case: 1 column of matching types. +create table pktable_base (base1 int not null); +create table pktable (ptest1 int, primary key(base1), unique(base1, ptest1)) inherits (pktable_base); +create table fktable (ftest1 int references pktable(base1)); +-- now some ins, upd, del +insert into pktable(base1) values (1); +insert into pktable(base1) values (2); +-- let's insert a non-existent fktable value +insert into fktable(ftest1) values (3); +ERROR: insert or update on table "fktable" violates foreign key constraint "fktable_ftest1_fkey" +DETAIL: Key (ftest1)=(3) is not present in table "pktable". +-- let's make a valid row for that +insert into pktable(base1) values (3); +insert into fktable(ftest1) values (3); +-- let's try removing a row that should fail from pktable +delete from pktable where base1>2; +ERROR: update or delete on table "pktable" violates foreign key constraint "fktable_ftest1_fkey" on table "fktable" +DETAIL: Key (base1)=(3) is still referenced from table "fktable". +-- okay, let's try updating all of the base1 values to *4 +-- which should fail. +update pktable set base1=base1*4; +ERROR: update or delete on table "pktable" violates foreign key constraint "fktable_ftest1_fkey" on table "fktable" +DETAIL: Key (base1)=(3) is still referenced from table "fktable". +-- okay, let's try an update that should work. +update pktable set base1=base1*4 where base1<3; +-- and a delete that should work +delete from pktable where base1>3; +-- cleanup +drop table fktable; +delete from pktable; +-- Now 2 columns 2 tables, matching types +create table fktable (ftest1 int, ftest2 int, foreign key(ftest1, ftest2) references pktable(base1, ptest1)); +-- now some ins, upd, del +insert into pktable(base1, ptest1) values (1, 1); +insert into pktable(base1, ptest1) values (2, 2); +-- let's insert a non-existent fktable value +insert into fktable(ftest1, ftest2) values (3, 1); +ERROR: insert or update on table "fktable" violates foreign key constraint "fktable_ftest1_ftest2_fkey" +DETAIL: Key (ftest1, ftest2)=(3, 1) is not present in table "pktable". +-- let's make a valid row for that +insert into pktable(base1,ptest1) values (3, 1); +insert into fktable(ftest1, ftest2) values (3, 1); +-- let's try removing a row that should fail from pktable +delete from pktable where base1>2; +ERROR: update or delete on table "pktable" violates foreign key constraint "fktable_ftest1_ftest2_fkey" on table "fktable" +DETAIL: Key (base1, ptest1)=(3, 1) is still referenced from table "fktable". +-- okay, let's try updating all of the base1 values to *4 +-- which should fail. +update pktable set base1=base1*4; +ERROR: update or delete on table "pktable" violates foreign key constraint "fktable_ftest1_ftest2_fkey" on table "fktable" +DETAIL: Key (base1, ptest1)=(3, 1) is still referenced from table "fktable". +-- okay, let's try an update that should work. +update pktable set base1=base1*4 where base1<3; +-- and a delete that should work +delete from pktable where base1>3; +-- cleanup +drop table fktable; +drop table pktable; +drop table pktable_base; +-- Now we'll do one all in 1 table with 2 columns of matching types +create table pktable_base(base1 int not null, base2 int); +create table pktable(ptest1 int, ptest2 int, primary key(base1, ptest1), foreign key(base2, ptest2) references + pktable(base1, ptest1)) inherits (pktable_base); +insert into pktable (base1, ptest1, base2, ptest2) values (1, 1, 1, 1); +insert into pktable (base1, ptest1, base2, ptest2) values (2, 1, 1, 1); +insert into pktable (base1, ptest1, base2, ptest2) values (2, 2, 2, 1); +insert into pktable (base1, ptest1, base2, ptest2) values (1, 3, 2, 2); +-- fails (3,2) isn't in base1, ptest1 +insert into pktable (base1, ptest1, base2, ptest2) values (2, 3, 3, 2); +ERROR: insert or update on table "pktable" violates foreign key constraint "pktable_base2_ptest2_fkey" +DETAIL: Key (base2, ptest2)=(3, 2) is not present in table "pktable". +-- fails (2,2) is being referenced +delete from pktable where base1=2; +ERROR: update or delete on table "pktable" violates foreign key constraint "pktable_base2_ptest2_fkey" on table "pktable" +DETAIL: Key (base1, ptest1)=(2, 2) is still referenced from table "pktable". +-- fails (1,1) is being referenced (twice) +update pktable set base1=3 where base1=1; +ERROR: update or delete on table "pktable" violates foreign key constraint "pktable_base2_ptest2_fkey" on table "pktable" +DETAIL: Key (base1, ptest1)=(1, 1) is still referenced from table "pktable". +-- this sequence of two deletes will work, since after the first there will be no (2,*) references +delete from pktable where base2=2; +delete from pktable where base1=2; +drop table pktable; +drop table pktable_base; +-- 2 columns (2 tables), mismatched types +create table pktable_base(base1 int not null); +create table pktable(ptest1 inet, primary key(base1, ptest1)) inherits (pktable_base); +-- just generally bad types (with and without column references on the referenced table) +create table fktable(ftest1 cidr, ftest2 int[], foreign key (ftest1, ftest2) references pktable); +ERROR: foreign key constraint "fktable_ftest1_ftest2_fkey" cannot be implemented +DETAIL: Key columns "ftest1" and "base1" are of incompatible types: cidr and integer. +create table fktable(ftest1 cidr, ftest2 int[], foreign key (ftest1, ftest2) references pktable(base1, ptest1)); +ERROR: foreign key constraint "fktable_ftest1_ftest2_fkey" cannot be implemented +DETAIL: Key columns "ftest1" and "base1" are of incompatible types: cidr and integer. +-- let's mix up which columns reference which +create table fktable(ftest1 int, ftest2 inet, foreign key(ftest2, ftest1) references pktable); +ERROR: foreign key constraint "fktable_ftest2_ftest1_fkey" cannot be implemented +DETAIL: Key columns "ftest2" and "base1" are of incompatible types: inet and integer. +create table fktable(ftest1 int, ftest2 inet, foreign key(ftest2, ftest1) references pktable(base1, ptest1)); +ERROR: foreign key constraint "fktable_ftest2_ftest1_fkey" cannot be implemented +DETAIL: Key columns "ftest2" and "base1" are of incompatible types: inet and integer. +create table fktable(ftest1 int, ftest2 inet, foreign key(ftest1, ftest2) references pktable(ptest1, base1)); +ERROR: foreign key constraint "fktable_ftest1_ftest2_fkey" cannot be implemented +DETAIL: Key columns "ftest1" and "ptest1" are of incompatible types: integer and inet. +drop table pktable; +drop table pktable_base; +-- 2 columns (1 table), mismatched types +create table pktable_base(base1 int not null, base2 int); +create table pktable(ptest1 inet, ptest2 inet[], primary key(base1, ptest1), foreign key(base2, ptest2) references + pktable(base1, ptest1)) inherits (pktable_base); +ERROR: foreign key constraint "pktable_base2_ptest2_fkey" cannot be implemented +DETAIL: Key columns "ptest2" and "ptest1" are of incompatible types: inet[] and inet. +create table pktable(ptest1 inet, ptest2 inet, primary key(base1, ptest1), foreign key(base2, ptest2) references + pktable(ptest1, base1)) inherits (pktable_base); +ERROR: foreign key constraint "pktable_base2_ptest2_fkey" cannot be implemented +DETAIL: Key columns "base2" and "ptest1" are of incompatible types: integer and inet. +create table pktable(ptest1 inet, ptest2 inet, primary key(base1, ptest1), foreign key(ptest2, base2) references + pktable(base1, ptest1)) inherits (pktable_base); +ERROR: foreign key constraint "pktable_ptest2_base2_fkey" cannot be implemented +DETAIL: Key columns "ptest2" and "base1" are of incompatible types: inet and integer. +create table pktable(ptest1 inet, ptest2 inet, primary key(base1, ptest1), foreign key(ptest2, base2) references + pktable(base1, ptest1)) inherits (pktable_base); +ERROR: foreign key constraint "pktable_ptest2_base2_fkey" cannot be implemented +DETAIL: Key columns "ptest2" and "base1" are of incompatible types: inet and integer. +drop table pktable; +ERROR: table "pktable" does not exist +drop table pktable_base; +-- +-- Deferrable constraints +-- +-- deferrable, explicitly deferred +CREATE TABLE pktable ( + id INT4 PRIMARY KEY, + other INT4 +); +CREATE TABLE fktable ( + id INT4 PRIMARY KEY, + fk INT4 REFERENCES pktable DEFERRABLE +); +-- default to immediate: should fail +INSERT INTO fktable VALUES (5, 10); +ERROR: insert or update on table "fktable" violates foreign key constraint "fktable_fk_fkey" +DETAIL: Key (fk)=(10) is not present in table "pktable". +-- explicitly defer the constraint +BEGIN; +SET CONSTRAINTS ALL DEFERRED; +INSERT INTO fktable VALUES (10, 15); +INSERT INTO pktable VALUES (15, 0); -- make the FK insert valid +COMMIT; +DROP TABLE fktable, pktable; +-- deferrable, initially deferred +CREATE TABLE pktable ( + id INT4 PRIMARY KEY, + other INT4 +); +CREATE TABLE fktable ( + id INT4 PRIMARY KEY, + fk INT4 REFERENCES pktable DEFERRABLE INITIALLY DEFERRED +); +-- default to deferred, should succeed +BEGIN; +INSERT INTO fktable VALUES (100, 200); +INSERT INTO pktable VALUES (200, 500); -- make the FK insert valid +COMMIT; +-- default to deferred, explicitly make immediate +BEGIN; +SET CONSTRAINTS ALL IMMEDIATE; +-- should fail +INSERT INTO fktable VALUES (500, 1000); +ERROR: insert or update on table "fktable" violates foreign key constraint "fktable_fk_fkey" +DETAIL: Key (fk)=(1000) is not present in table "pktable". +COMMIT; +DROP TABLE fktable, pktable; +-- tricky behavior: according to SQL99, if a deferred constraint is set +-- to 'immediate' mode, it should be checked for validity *immediately*, +-- not when the current transaction commits (i.e. the mode change applies +-- retroactively) +CREATE TABLE pktable ( + id INT4 PRIMARY KEY, + other INT4 +); +CREATE TABLE fktable ( + id INT4 PRIMARY KEY, + fk INT4 REFERENCES pktable DEFERRABLE +); +BEGIN; +SET CONSTRAINTS ALL DEFERRED; +-- should succeed, for now +INSERT INTO fktable VALUES (1000, 2000); +-- should cause transaction abort, due to preceding error +SET CONSTRAINTS ALL IMMEDIATE; +ERROR: insert or update on table "fktable" violates foreign key constraint "fktable_fk_fkey" +DETAIL: Key (fk)=(2000) is not present in table "pktable". +INSERT INTO pktable VALUES (2000, 3); -- too late +ERROR: current transaction is aborted, commands ignored until end of transaction block +COMMIT; +DROP TABLE fktable, pktable; +-- deferrable, initially deferred +CREATE TABLE pktable ( + id INT4 PRIMARY KEY, + other INT4 +); +CREATE TABLE fktable ( + id INT4 PRIMARY KEY, + fk INT4 REFERENCES pktable DEFERRABLE INITIALLY DEFERRED +); +BEGIN; +-- no error here +INSERT INTO fktable VALUES (100, 200); +-- error here on commit +COMMIT; +ERROR: insert or update on table "fktable" violates foreign key constraint "fktable_fk_fkey" +DETAIL: Key (fk)=(200) is not present in table "pktable". +DROP TABLE pktable, fktable; +-- test notice about expensive referential integrity checks, +-- where the index cannot be used because of type incompatibilities. +CREATE TEMP TABLE pktable ( + id1 INT4 PRIMARY KEY, + id2 VARCHAR(4) UNIQUE, + id3 REAL UNIQUE, + UNIQUE(id1, id2, id3) +); +CREATE TEMP TABLE fktable ( + x1 INT4 REFERENCES pktable(id1), + x2 VARCHAR(4) REFERENCES pktable(id2), + x3 REAL REFERENCES pktable(id3), + x4 TEXT, + x5 INT2 +); +-- check individual constraints with alter table. +-- should fail +-- varchar does not promote to real +ALTER TABLE fktable ADD CONSTRAINT fk_2_3 +FOREIGN KEY (x2) REFERENCES pktable(id3); +ERROR: foreign key constraint "fk_2_3" cannot be implemented +DETAIL: Key columns "x2" and "id3" are of incompatible types: character varying and real. +-- nor to int4 +ALTER TABLE fktable ADD CONSTRAINT fk_2_1 +FOREIGN KEY (x2) REFERENCES pktable(id1); +ERROR: foreign key constraint "fk_2_1" cannot be implemented +DETAIL: Key columns "x2" and "id1" are of incompatible types: character varying and integer. +-- real does not promote to int4 +ALTER TABLE fktable ADD CONSTRAINT fk_3_1 +FOREIGN KEY (x3) REFERENCES pktable(id1); +ERROR: foreign key constraint "fk_3_1" cannot be implemented +DETAIL: Key columns "x3" and "id1" are of incompatible types: real and integer. +-- int4 does not promote to text +ALTER TABLE fktable ADD CONSTRAINT fk_1_2 +FOREIGN KEY (x1) REFERENCES pktable(id2); +ERROR: foreign key constraint "fk_1_2" cannot be implemented +DETAIL: Key columns "x1" and "id2" are of incompatible types: integer and character varying. +-- should succeed +-- int4 promotes to real +ALTER TABLE fktable ADD CONSTRAINT fk_1_3 +FOREIGN KEY (x1) REFERENCES pktable(id3); +-- text is compatible with varchar +ALTER TABLE fktable ADD CONSTRAINT fk_4_2 +FOREIGN KEY (x4) REFERENCES pktable(id2); +-- int2 is part of integer opfamily as of 8.0 +ALTER TABLE fktable ADD CONSTRAINT fk_5_1 +FOREIGN KEY (x5) REFERENCES pktable(id1); +-- check multikey cases, especially out-of-order column lists +-- these should work +ALTER TABLE fktable ADD CONSTRAINT fk_123_123 +FOREIGN KEY (x1,x2,x3) REFERENCES pktable(id1,id2,id3); +ALTER TABLE fktable ADD CONSTRAINT fk_213_213 +FOREIGN KEY (x2,x1,x3) REFERENCES pktable(id2,id1,id3); +ALTER TABLE fktable ADD CONSTRAINT fk_253_213 +FOREIGN KEY (x2,x5,x3) REFERENCES pktable(id2,id1,id3); +-- these should fail +ALTER TABLE fktable ADD CONSTRAINT fk_123_231 +FOREIGN KEY (x1,x2,x3) REFERENCES pktable(id2,id3,id1); +ERROR: foreign key constraint "fk_123_231" cannot be implemented +DETAIL: Key columns "x1" and "id2" are of incompatible types: integer and character varying. +ALTER TABLE fktable ADD CONSTRAINT fk_241_132 +FOREIGN KEY (x2,x4,x1) REFERENCES pktable(id1,id3,id2); +ERROR: foreign key constraint "fk_241_132" cannot be implemented +DETAIL: Key columns "x2" and "id1" are of incompatible types: character varying and integer. +DROP TABLE pktable, fktable; +-- test a tricky case: we can elide firing the FK check trigger during +-- an UPDATE if the UPDATE did not change the foreign key +-- field. However, we can't do this if our transaction was the one that +-- created the updated row and the trigger is deferred, since our UPDATE +-- will have invalidated the original newly-inserted tuple, and therefore +-- cause the on-INSERT RI trigger not to be fired. +CREATE TEMP TABLE pktable ( + id int primary key, + other int +); +CREATE TEMP TABLE fktable ( + id int primary key, + fk int references pktable deferrable initially deferred +); +INSERT INTO pktable VALUES (5, 10); +BEGIN; +-- doesn't match PK, but no error yet +INSERT INTO fktable VALUES (0, 20); +-- don't change FK +UPDATE fktable SET id = id + 1; +-- should catch error from initial INSERT +COMMIT; +ERROR: insert or update on table "fktable" violates foreign key constraint "fktable_fk_fkey" +DETAIL: Key (fk)=(20) is not present in table "pktable". +-- check same case when insert is in a different subtransaction than update +BEGIN; +-- doesn't match PK, but no error yet +INSERT INTO fktable VALUES (0, 20); +-- UPDATE will be in a subxact +SAVEPOINT savept1; +-- don't change FK +UPDATE fktable SET id = id + 1; +-- should catch error from initial INSERT +COMMIT; +ERROR: insert or update on table "fktable" violates foreign key constraint "fktable_fk_fkey" +DETAIL: Key (fk)=(20) is not present in table "pktable". +BEGIN; +-- INSERT will be in a subxact +SAVEPOINT savept1; +-- doesn't match PK, but no error yet +INSERT INTO fktable VALUES (0, 20); +RELEASE SAVEPOINT savept1; +-- don't change FK +UPDATE fktable SET id = id + 1; +-- should catch error from initial INSERT +COMMIT; +ERROR: insert or update on table "fktable" violates foreign key constraint "fktable_fk_fkey" +DETAIL: Key (fk)=(20) is not present in table "pktable". +BEGIN; +-- doesn't match PK, but no error yet +INSERT INTO fktable VALUES (0, 20); +-- UPDATE will be in a subxact +SAVEPOINT savept1; +-- don't change FK +UPDATE fktable SET id = id + 1; +-- Roll back the UPDATE +ROLLBACK TO savept1; +-- should catch error from initial INSERT +COMMIT; +ERROR: insert or update on table "fktable" violates foreign key constraint "fktable_fk_fkey" +DETAIL: Key (fk)=(20) is not present in table "pktable". +-- +-- check ALTER CONSTRAINT +-- +INSERT INTO fktable VALUES (1, 5); +ALTER TABLE fktable ALTER CONSTRAINT fktable_fk_fkey DEFERRABLE INITIALLY IMMEDIATE; +BEGIN; +-- doesn't match FK, should throw error now +UPDATE pktable SET id = 10 WHERE id = 5; +ERROR: update or delete on table "pktable" violates foreign key constraint "fktable_fk_fkey" on table "fktable" +DETAIL: Key (id)=(5) is still referenced from table "fktable". +COMMIT; +BEGIN; +-- doesn't match PK, should throw error now +INSERT INTO fktable VALUES (0, 20); +ERROR: insert or update on table "fktable" violates foreign key constraint "fktable_fk_fkey" +DETAIL: Key (fk)=(20) is not present in table "pktable". +COMMIT; +-- try additional syntax +ALTER TABLE fktable ALTER CONSTRAINT fktable_fk_fkey NOT DEFERRABLE; +-- illegal option +ALTER TABLE fktable ALTER CONSTRAINT fktable_fk_fkey NOT DEFERRABLE INITIALLY DEFERRED; +ERROR: constraint declared INITIALLY DEFERRED must be DEFERRABLE +LINE 1: ...e ALTER CONSTRAINT fktable_fk_fkey NOT DEFERRABLE INITIALLY ... + ^ +-- test order of firing of FK triggers when several RI-induced changes need to +-- be made to the same row. This was broken by subtransaction-related +-- changes in 8.0. +CREATE TEMP TABLE users ( + id INT PRIMARY KEY, + name VARCHAR NOT NULL +); +INSERT INTO users VALUES (1, 'Jozko'); +INSERT INTO users VALUES (2, 'Ferko'); +INSERT INTO users VALUES (3, 'Samko'); +CREATE TEMP TABLE tasks ( + id INT PRIMARY KEY, + owner INT REFERENCES users ON UPDATE CASCADE ON DELETE SET NULL, + worker INT REFERENCES users ON UPDATE CASCADE ON DELETE SET NULL, + checked_by INT REFERENCES users ON UPDATE CASCADE ON DELETE SET NULL +); +INSERT INTO tasks VALUES (1,1,NULL,NULL); +INSERT INTO tasks VALUES (2,2,2,NULL); +INSERT INTO tasks VALUES (3,3,3,3); +SELECT * FROM tasks; + id | owner | worker | checked_by +----+-------+--------+------------ + 1 | 1 | | + 2 | 2 | 2 | + 3 | 3 | 3 | 3 +(3 rows) + +UPDATE users SET id = 4 WHERE id = 3; +SELECT * FROM tasks; + id | owner | worker | checked_by +----+-------+--------+------------ + 1 | 1 | | + 2 | 2 | 2 | + 3 | 4 | 4 | 4 +(3 rows) + +DELETE FROM users WHERE id = 4; +SELECT * FROM tasks; + id | owner | worker | checked_by +----+-------+--------+------------ + 1 | 1 | | + 2 | 2 | 2 | + 3 | | | +(3 rows) + +-- could fail with only 2 changes to make, if row was already updated +BEGIN; +UPDATE tasks set id=id WHERE id=2; +SELECT * FROM tasks; + id | owner | worker | checked_by +----+-------+--------+------------ + 1 | 1 | | + 3 | | | + 2 | 2 | 2 | +(3 rows) + +DELETE FROM users WHERE id = 2; +SELECT * FROM tasks; + id | owner | worker | checked_by +----+-------+--------+------------ + 1 | 1 | | + 3 | | | + 2 | | | +(3 rows) + +COMMIT; +-- +-- Test self-referential FK with CASCADE (bug #6268) +-- +create temp table selfref ( + a int primary key, + b int, + foreign key (b) references selfref (a) + on update cascade on delete cascade +); +insert into selfref (a, b) +values + (0, 0), + (1, 1); +begin; + update selfref set a = 123 where a = 0; + select a, b from selfref; + a | b +-----+----- + 1 | 1 + 123 | 123 +(2 rows) + + update selfref set a = 456 where a = 123; + select a, b from selfref; + a | b +-----+----- + 1 | 1 + 456 | 456 +(2 rows) + +commit; +-- +-- Test that SET DEFAULT actions recognize updates to default values +-- +create temp table defp (f1 int primary key); +create temp table defc (f1 int default 0 + references defp on delete set default); +insert into defp values (0), (1), (2); +insert into defc values (2); +select * from defc; + f1 +---- + 2 +(1 row) + +delete from defp where f1 = 2; +select * from defc; + f1 +---- + 0 +(1 row) + +delete from defp where f1 = 0; -- fail +ERROR: update or delete on table "defp" violates foreign key constraint "defc_f1_fkey" on table "defc" +DETAIL: Key (f1)=(0) is still referenced from table "defc". +alter table defc alter column f1 set default 1; +delete from defp where f1 = 0; +select * from defc; + f1 +---- + 1 +(1 row) + +delete from defp where f1 = 1; -- fail +ERROR: update or delete on table "defp" violates foreign key constraint "defc_f1_fkey" on table "defc" +DETAIL: Key (f1)=(1) is still referenced from table "defc". +-- +-- Test the difference between NO ACTION and RESTRICT +-- +create temp table pp (f1 int primary key); +create temp table cc (f1 int references pp on update no action on delete no action); +insert into pp values(12); +insert into pp values(11); +update pp set f1=f1+1; +insert into cc values(13); +update pp set f1=f1+1; +update pp set f1=f1+1; -- fail +ERROR: update or delete on table "pp" violates foreign key constraint "cc_f1_fkey" on table "cc" +DETAIL: Key (f1)=(13) is still referenced from table "cc". +delete from pp where f1 = 13; -- fail +ERROR: update or delete on table "pp" violates foreign key constraint "cc_f1_fkey" on table "cc" +DETAIL: Key (f1)=(13) is still referenced from table "cc". +drop table pp, cc; +create temp table pp (f1 int primary key); +create temp table cc (f1 int references pp on update restrict on delete restrict); +insert into pp values(12); +insert into pp values(11); +update pp set f1=f1+1; +insert into cc values(13); +update pp set f1=f1+1; -- fail +ERROR: update or delete on table "pp" violates foreign key constraint "cc_f1_fkey" on table "cc" +DETAIL: Key (f1)=(13) is still referenced from table "cc". +delete from pp where f1 = 13; -- fail +ERROR: update or delete on table "pp" violates foreign key constraint "cc_f1_fkey" on table "cc" +DETAIL: Key (f1)=(13) is still referenced from table "cc". +drop table pp, cc; +-- +-- Test interaction of foreign-key optimization with rules (bug #14219) +-- +create temp table t1 (a integer primary key, b text); +create temp table t2 (a integer primary key, b integer references t1); +create rule r1 as on delete to t1 do delete from t2 where t2.b = old.a; +explain (costs off) delete from t1 where a = 1; + QUERY PLAN +-------------------------------------------- + Delete on t2 + -> Nested Loop + -> Index Scan using t1_pkey on t1 + Index Cond: (a = 1) + -> Seq Scan on t2 + Filter: (b = 1) + + Delete on t1 + -> Index Scan using t1_pkey on t1 + Index Cond: (a = 1) +(10 rows) + +delete from t1 where a = 1; +-- Test a primary key with attributes located in later attnum positions +-- compared to the fk attributes. +create table pktable2 (a int, b int, c int, d int, e int, primary key (d, e)); +create table fktable2 (d int, e int, foreign key (d, e) references pktable2); +insert into pktable2 values (1, 2, 3, 4, 5); +insert into fktable2 values (4, 5); +delete from pktable2; +ERROR: update or delete on table "pktable2" violates foreign key constraint "fktable2_d_e_fkey" on table "fktable2" +DETAIL: Key (d, e)=(4, 5) is still referenced from table "fktable2". +update pktable2 set d = 5; +ERROR: update or delete on table "pktable2" violates foreign key constraint "fktable2_d_e_fkey" on table "fktable2" +DETAIL: Key (d, e)=(4, 5) is still referenced from table "fktable2". +drop table pktable2, fktable2; +-- Test truncation of long foreign key names +create table pktable1 (a int primary key); +create table pktable2 (a int, b int, primary key (a, b)); +create table fktable2 ( + a int, + b int, + very_very_long_column_name_to_exceed_63_characters int, + foreign key (very_very_long_column_name_to_exceed_63_characters) references pktable1, + foreign key (a, very_very_long_column_name_to_exceed_63_characters) references pktable2, + foreign key (a, very_very_long_column_name_to_exceed_63_characters) references pktable2 +); +select conname from pg_constraint where conrelid = 'fktable2'::regclass order by conname; + conname +----------------------------------------------------------------- + fktable2_a_very_very_long_column_name_to_exceed_63_charac_fkey1 + fktable2_a_very_very_long_column_name_to_exceed_63_charact_fkey + fktable2_very_very_long_column_name_to_exceed_63_character_fkey +(3 rows) + +drop table pktable1, pktable2, fktable2; +-- +-- Test deferred FK check on a tuple deleted by a rolled-back subtransaction +-- +create table pktable2(f1 int primary key); +create table fktable2(f1 int references pktable2 deferrable initially deferred); +insert into pktable2 values(1); +begin; +insert into fktable2 values(1); +savepoint x; +delete from fktable2; +rollback to x; +commit; +begin; +insert into fktable2 values(2); +savepoint x; +delete from fktable2; +rollback to x; +commit; -- fail +ERROR: insert or update on table "fktable2" violates foreign key constraint "fktable2_f1_fkey" +DETAIL: Key (f1)=(2) is not present in table "pktable2". +-- +-- Test that we prevent dropping FK constraint with pending trigger events +-- +begin; +insert into fktable2 values(2); +alter table fktable2 drop constraint fktable2_f1_fkey; +ERROR: cannot ALTER TABLE "fktable2" because it has pending trigger events +commit; +begin; +delete from pktable2 where f1 = 1; +alter table fktable2 drop constraint fktable2_f1_fkey; +ERROR: cannot ALTER TABLE "pktable2" because it has pending trigger events +commit; +drop table pktable2, fktable2; +-- +-- Test keys that "look" different but compare as equal +-- +create table pktable2 (a float8, b float8, primary key (a, b)); +create table fktable2 (x float8, y float8, foreign key (x, y) references pktable2 (a, b) on update cascade); +insert into pktable2 values ('-0', '-0'); +insert into fktable2 values ('-0', '-0'); +select * from pktable2; + a | b +----+---- + -0 | -0 +(1 row) + +select * from fktable2; + x | y +----+---- + -0 | -0 +(1 row) + +update pktable2 set a = '0' where a = '-0'; +select * from pktable2; + a | b +---+---- + 0 | -0 +(1 row) + +-- should have updated fktable2.x +select * from fktable2; + x | y +---+---- + 0 | -0 +(1 row) + +drop table pktable2, fktable2; +-- +-- Foreign keys and partitioned tables +-- +-- Creation of a partitioned hierarchy with irregular definitions +CREATE TABLE fk_notpartitioned_pk (fdrop1 int, a int, fdrop2 int, b int, + PRIMARY KEY (a, b)); +ALTER TABLE fk_notpartitioned_pk DROP COLUMN fdrop1, DROP COLUMN fdrop2; +CREATE TABLE fk_partitioned_fk (b int, fdrop1 int, a int) PARTITION BY RANGE (a, b); +ALTER TABLE fk_partitioned_fk DROP COLUMN fdrop1; +CREATE TABLE fk_partitioned_fk_1 (fdrop1 int, fdrop2 int, a int, fdrop3 int, b int); +ALTER TABLE fk_partitioned_fk_1 DROP COLUMN fdrop1, DROP COLUMN fdrop2, DROP COLUMN fdrop3; +ALTER TABLE fk_partitioned_fk ATTACH PARTITION fk_partitioned_fk_1 FOR VALUES FROM (0,0) TO (1000,1000); +ALTER TABLE fk_partitioned_fk ADD FOREIGN KEY (a, b) REFERENCES fk_notpartitioned_pk; +CREATE TABLE fk_partitioned_fk_2 (b int, fdrop1 int, fdrop2 int, a int); +ALTER TABLE fk_partitioned_fk_2 DROP COLUMN fdrop1, DROP COLUMN fdrop2; +ALTER TABLE fk_partitioned_fk ATTACH PARTITION fk_partitioned_fk_2 FOR VALUES FROM (1000,1000) TO (2000,2000); +CREATE TABLE fk_partitioned_fk_3 (fdrop1 int, fdrop2 int, fdrop3 int, fdrop4 int, b int, a int) + PARTITION BY HASH (a); +ALTER TABLE fk_partitioned_fk_3 DROP COLUMN fdrop1, DROP COLUMN fdrop2, + DROP COLUMN fdrop3, DROP COLUMN fdrop4; +CREATE TABLE fk_partitioned_fk_3_0 PARTITION OF fk_partitioned_fk_3 FOR VALUES WITH (MODULUS 5, REMAINDER 0); +CREATE TABLE fk_partitioned_fk_3_1 PARTITION OF fk_partitioned_fk_3 FOR VALUES WITH (MODULUS 5, REMAINDER 1); +ALTER TABLE fk_partitioned_fk ATTACH PARTITION fk_partitioned_fk_3 + FOR VALUES FROM (2000,2000) TO (3000,3000); +-- Creating a foreign key with ONLY on a partitioned table referencing +-- a non-partitioned table fails. +ALTER TABLE ONLY fk_partitioned_fk ADD FOREIGN KEY (a, b) + REFERENCES fk_notpartitioned_pk; +ERROR: cannot use ONLY for foreign key on partitioned table "fk_partitioned_fk" referencing relation "fk_notpartitioned_pk" +-- Adding a NOT VALID foreign key on a partitioned table referencing +-- a non-partitioned table fails. +ALTER TABLE fk_partitioned_fk ADD FOREIGN KEY (a, b) + REFERENCES fk_notpartitioned_pk NOT VALID; +ERROR: cannot add NOT VALID foreign key on partitioned table "fk_partitioned_fk" referencing relation "fk_notpartitioned_pk" +DETAIL: This feature is not yet supported on partitioned tables. +-- these inserts, targeting both the partition directly as well as the +-- partitioned table, should all fail +INSERT INTO fk_partitioned_fk (a,b) VALUES (500, 501); +ERROR: insert or update on table "fk_partitioned_fk_1" violates foreign key constraint "fk_partitioned_fk_a_b_fkey" +DETAIL: Key (a, b)=(500, 501) is not present in table "fk_notpartitioned_pk". +INSERT INTO fk_partitioned_fk_1 (a,b) VALUES (500, 501); +ERROR: insert or update on table "fk_partitioned_fk_1" violates foreign key constraint "fk_partitioned_fk_a_b_fkey" +DETAIL: Key (a, b)=(500, 501) is not present in table "fk_notpartitioned_pk". +INSERT INTO fk_partitioned_fk (a,b) VALUES (1500, 1501); +ERROR: insert or update on table "fk_partitioned_fk_2" violates foreign key constraint "fk_partitioned_fk_a_b_fkey" +DETAIL: Key (a, b)=(1500, 1501) is not present in table "fk_notpartitioned_pk". +INSERT INTO fk_partitioned_fk_2 (a,b) VALUES (1500, 1501); +ERROR: insert or update on table "fk_partitioned_fk_2" violates foreign key constraint "fk_partitioned_fk_a_b_fkey" +DETAIL: Key (a, b)=(1500, 1501) is not present in table "fk_notpartitioned_pk". +INSERT INTO fk_partitioned_fk (a,b) VALUES (2500, 2502); +ERROR: insert or update on table "fk_partitioned_fk_3_1" violates foreign key constraint "fk_partitioned_fk_a_b_fkey" +DETAIL: Key (a, b)=(2500, 2502) is not present in table "fk_notpartitioned_pk". +INSERT INTO fk_partitioned_fk_3 (a,b) VALUES (2500, 2502); +ERROR: insert or update on table "fk_partitioned_fk_3_1" violates foreign key constraint "fk_partitioned_fk_a_b_fkey" +DETAIL: Key (a, b)=(2500, 2502) is not present in table "fk_notpartitioned_pk". +INSERT INTO fk_partitioned_fk (a,b) VALUES (2501, 2503); +ERROR: insert or update on table "fk_partitioned_fk_3_0" violates foreign key constraint "fk_partitioned_fk_a_b_fkey" +DETAIL: Key (a, b)=(2501, 2503) is not present in table "fk_notpartitioned_pk". +INSERT INTO fk_partitioned_fk_3 (a,b) VALUES (2501, 2503); +ERROR: insert or update on table "fk_partitioned_fk_3_0" violates foreign key constraint "fk_partitioned_fk_a_b_fkey" +DETAIL: Key (a, b)=(2501, 2503) is not present in table "fk_notpartitioned_pk". +-- but if we insert the values that make them valid, then they work +INSERT INTO fk_notpartitioned_pk VALUES (500, 501), (1500, 1501), + (2500, 2502), (2501, 2503); +INSERT INTO fk_partitioned_fk (a,b) VALUES (500, 501); +INSERT INTO fk_partitioned_fk (a,b) VALUES (1500, 1501); +INSERT INTO fk_partitioned_fk (a,b) VALUES (2500, 2502); +INSERT INTO fk_partitioned_fk (a,b) VALUES (2501, 2503); +-- this update fails because there is no referenced row +UPDATE fk_partitioned_fk SET a = a + 1 WHERE a = 2501; +ERROR: insert or update on table "fk_partitioned_fk_3_1" violates foreign key constraint "fk_partitioned_fk_a_b_fkey" +DETAIL: Key (a, b)=(2502, 2503) is not present in table "fk_notpartitioned_pk". +-- but we can fix it thusly: +INSERT INTO fk_notpartitioned_pk (a,b) VALUES (2502, 2503); +UPDATE fk_partitioned_fk SET a = a + 1 WHERE a = 2501; +-- these updates would leave lingering rows in the referencing table; disallow +UPDATE fk_notpartitioned_pk SET b = 502 WHERE a = 500; +ERROR: update or delete on table "fk_notpartitioned_pk" violates foreign key constraint "fk_partitioned_fk_a_b_fkey" on table "fk_partitioned_fk" +DETAIL: Key (a, b)=(500, 501) is still referenced from table "fk_partitioned_fk". +UPDATE fk_notpartitioned_pk SET b = 1502 WHERE a = 1500; +ERROR: update or delete on table "fk_notpartitioned_pk" violates foreign key constraint "fk_partitioned_fk_a_b_fkey" on table "fk_partitioned_fk" +DETAIL: Key (a, b)=(1500, 1501) is still referenced from table "fk_partitioned_fk". +UPDATE fk_notpartitioned_pk SET b = 2504 WHERE a = 2500; +ERROR: update or delete on table "fk_notpartitioned_pk" violates foreign key constraint "fk_partitioned_fk_a_b_fkey" on table "fk_partitioned_fk" +DETAIL: Key (a, b)=(2500, 2502) is still referenced from table "fk_partitioned_fk". +-- check psql behavior +\d fk_notpartitioned_pk + Table "public.fk_notpartitioned_pk" + Column | Type | Collation | Nullable | Default +--------+---------+-----------+----------+--------- + a | integer | | not null | + b | integer | | not null | +Indexes: + "fk_notpartitioned_pk_pkey" PRIMARY KEY, btree (a, b) +Referenced by: + TABLE "fk_partitioned_fk" CONSTRAINT "fk_partitioned_fk_a_b_fkey" FOREIGN KEY (a, b) REFERENCES fk_notpartitioned_pk(a, b) + +ALTER TABLE fk_partitioned_fk DROP CONSTRAINT fk_partitioned_fk_a_b_fkey; +-- done. +DROP TABLE fk_notpartitioned_pk, fk_partitioned_fk; +-- Altering a type referenced by a foreign key needs to drop/recreate the FK. +-- Ensure that works. +CREATE TABLE fk_notpartitioned_pk (a INT, PRIMARY KEY(a), CHECK (a > 0)); +CREATE TABLE fk_partitioned_fk (a INT REFERENCES fk_notpartitioned_pk(a) PRIMARY KEY) PARTITION BY RANGE(a); +CREATE TABLE fk_partitioned_fk_1 PARTITION OF fk_partitioned_fk FOR VALUES FROM (MINVALUE) TO (MAXVALUE); +INSERT INTO fk_notpartitioned_pk VALUES (1); +INSERT INTO fk_partitioned_fk VALUES (1); +ALTER TABLE fk_notpartitioned_pk ALTER COLUMN a TYPE bigint; +DELETE FROM fk_notpartitioned_pk WHERE a = 1; +ERROR: update or delete on table "fk_notpartitioned_pk" violates foreign key constraint "fk_partitioned_fk_a_fkey" on table "fk_partitioned_fk" +DETAIL: Key (a)=(1) is still referenced from table "fk_partitioned_fk". +DROP TABLE fk_notpartitioned_pk, fk_partitioned_fk; +-- Test some other exotic foreign key features: MATCH SIMPLE, ON UPDATE/DELETE +-- actions +CREATE TABLE fk_notpartitioned_pk (a int, b int, primary key (a, b)); +CREATE TABLE fk_partitioned_fk (a int default 2501, b int default 142857) PARTITION BY LIST (a); +CREATE TABLE fk_partitioned_fk_1 PARTITION OF fk_partitioned_fk FOR VALUES IN (NULL,500,501,502); +ALTER TABLE fk_partitioned_fk ADD FOREIGN KEY (a, b) + REFERENCES fk_notpartitioned_pk MATCH SIMPLE + ON DELETE SET NULL ON UPDATE SET NULL; +CREATE TABLE fk_partitioned_fk_2 PARTITION OF fk_partitioned_fk FOR VALUES IN (1500,1502); +CREATE TABLE fk_partitioned_fk_3 (a int, b int); +ALTER TABLE fk_partitioned_fk ATTACH PARTITION fk_partitioned_fk_3 FOR VALUES IN (2500,2501,2502,2503); +-- this insert fails +INSERT INTO fk_partitioned_fk (a, b) VALUES (2502, 2503); +ERROR: insert or update on table "fk_partitioned_fk_3" violates foreign key constraint "fk_partitioned_fk_a_b_fkey" +DETAIL: Key (a, b)=(2502, 2503) is not present in table "fk_notpartitioned_pk". +INSERT INTO fk_partitioned_fk_3 (a, b) VALUES (2502, 2503); +ERROR: insert or update on table "fk_partitioned_fk_3" violates foreign key constraint "fk_partitioned_fk_a_b_fkey" +DETAIL: Key (a, b)=(2502, 2503) is not present in table "fk_notpartitioned_pk". +-- but since the FK is MATCH SIMPLE, this one doesn't +INSERT INTO fk_partitioned_fk_3 (a, b) VALUES (2502, NULL); +-- now create the referenced row ... +INSERT INTO fk_notpartitioned_pk VALUES (2502, 2503); +--- and now the same insert work +INSERT INTO fk_partitioned_fk_3 (a, b) VALUES (2502, 2503); +-- this always works +INSERT INTO fk_partitioned_fk (a,b) VALUES (NULL, NULL); +-- MATCH FULL +INSERT INTO fk_notpartitioned_pk VALUES (1, 2); +CREATE TABLE fk_partitioned_fk_full (x int, y int) PARTITION BY RANGE (x); +CREATE TABLE fk_partitioned_fk_full_1 PARTITION OF fk_partitioned_fk_full DEFAULT; +INSERT INTO fk_partitioned_fk_full VALUES (1, NULL); +ALTER TABLE fk_partitioned_fk_full ADD FOREIGN KEY (x, y) REFERENCES fk_notpartitioned_pk MATCH FULL; -- fails +ERROR: insert or update on table "fk_partitioned_fk_full_1" violates foreign key constraint "fk_partitioned_fk_full_x_y_fkey" +DETAIL: MATCH FULL does not allow mixing of null and nonnull key values. +TRUNCATE fk_partitioned_fk_full; +ALTER TABLE fk_partitioned_fk_full ADD FOREIGN KEY (x, y) REFERENCES fk_notpartitioned_pk MATCH FULL; +INSERT INTO fk_partitioned_fk_full VALUES (1, NULL); -- fails +ERROR: insert or update on table "fk_partitioned_fk_full_1" violates foreign key constraint "fk_partitioned_fk_full_x_y_fkey" +DETAIL: MATCH FULL does not allow mixing of null and nonnull key values. +DROP TABLE fk_partitioned_fk_full; +-- ON UPDATE SET NULL +SELECT tableoid::regclass, a, b FROM fk_partitioned_fk WHERE b IS NULL ORDER BY a; + tableoid | a | b +---------------------+------+--- + fk_partitioned_fk_3 | 2502 | + fk_partitioned_fk_1 | | +(2 rows) + +UPDATE fk_notpartitioned_pk SET a = a + 1 WHERE a = 2502; +SELECT tableoid::regclass, a, b FROM fk_partitioned_fk WHERE b IS NULL ORDER BY a; + tableoid | a | b +---------------------+------+--- + fk_partitioned_fk_3 | 2502 | + fk_partitioned_fk_1 | | + fk_partitioned_fk_1 | | +(3 rows) + +-- ON DELETE SET NULL +INSERT INTO fk_partitioned_fk VALUES (2503, 2503); +SELECT count(*) FROM fk_partitioned_fk WHERE a IS NULL; + count +------- + 2 +(1 row) + +DELETE FROM fk_notpartitioned_pk; +SELECT count(*) FROM fk_partitioned_fk WHERE a IS NULL; + count +------- + 3 +(1 row) + +-- ON UPDATE/DELETE SET DEFAULT +ALTER TABLE fk_partitioned_fk DROP CONSTRAINT fk_partitioned_fk_a_b_fkey; +ALTER TABLE fk_partitioned_fk ADD FOREIGN KEY (a, b) + REFERENCES fk_notpartitioned_pk + ON DELETE SET DEFAULT ON UPDATE SET DEFAULT; +INSERT INTO fk_notpartitioned_pk VALUES (2502, 2503); +INSERT INTO fk_partitioned_fk_3 (a, b) VALUES (2502, 2503); +-- this fails, because the defaults for the referencing table are not present +-- in the referenced table: +UPDATE fk_notpartitioned_pk SET a = 1500 WHERE a = 2502; +ERROR: insert or update on table "fk_partitioned_fk_3" violates foreign key constraint "fk_partitioned_fk_a_b_fkey" +DETAIL: Key (a, b)=(2501, 142857) is not present in table "fk_notpartitioned_pk". +-- but inserting the row we can make it work: +INSERT INTO fk_notpartitioned_pk VALUES (2501, 142857); +UPDATE fk_notpartitioned_pk SET a = 1500 WHERE a = 2502; +SELECT * FROM fk_partitioned_fk WHERE b = 142857; + a | b +------+-------- + 2501 | 142857 +(1 row) + +-- ON DELETE SET NULL column_list +ALTER TABLE fk_partitioned_fk DROP CONSTRAINT fk_partitioned_fk_a_b_fkey; +ALTER TABLE fk_partitioned_fk ADD FOREIGN KEY (a, b) + REFERENCES fk_notpartitioned_pk + ON DELETE SET NULL (a); +BEGIN; +DELETE FROM fk_notpartitioned_pk WHERE b = 142857; +SELECT * FROM fk_partitioned_fk WHERE a IS NOT NULL OR b IS NOT NULL ORDER BY a NULLS LAST; + a | b +------+-------- + 2502 | + | 142857 +(2 rows) + +ROLLBACK; +-- ON DELETE SET DEFAULT column_list +ALTER TABLE fk_partitioned_fk DROP CONSTRAINT fk_partitioned_fk_a_b_fkey; +ALTER TABLE fk_partitioned_fk ADD FOREIGN KEY (a, b) + REFERENCES fk_notpartitioned_pk + ON DELETE SET DEFAULT (a); +BEGIN; +DELETE FROM fk_partitioned_fk; +DELETE FROM fk_notpartitioned_pk; +INSERT INTO fk_notpartitioned_pk VALUES (500, 100000), (2501, 100000); +INSERT INTO fk_partitioned_fk VALUES (500, 100000); +DELETE FROM fk_notpartitioned_pk WHERE a = 500; +SELECT * FROM fk_partitioned_fk ORDER BY a; + a | b +------+-------- + 2501 | 100000 +(1 row) + +ROLLBACK; +-- ON UPDATE/DELETE CASCADE +ALTER TABLE fk_partitioned_fk DROP CONSTRAINT fk_partitioned_fk_a_b_fkey; +ALTER TABLE fk_partitioned_fk ADD FOREIGN KEY (a, b) + REFERENCES fk_notpartitioned_pk + ON DELETE CASCADE ON UPDATE CASCADE; +UPDATE fk_notpartitioned_pk SET a = 2502 WHERE a = 2501; +SELECT * FROM fk_partitioned_fk WHERE b = 142857; + a | b +------+-------- + 2502 | 142857 +(1 row) + +-- Now you see it ... +SELECT * FROM fk_partitioned_fk WHERE b = 142857; + a | b +------+-------- + 2502 | 142857 +(1 row) + +DELETE FROM fk_notpartitioned_pk WHERE b = 142857; +-- now you don't. +SELECT * FROM fk_partitioned_fk WHERE a = 142857; + a | b +---+--- +(0 rows) + +-- verify that DROP works +DROP TABLE fk_partitioned_fk_2; +-- Test behavior of the constraint together with attaching and detaching +-- partitions. +CREATE TABLE fk_partitioned_fk_2 PARTITION OF fk_partitioned_fk FOR VALUES IN (1500,1502); +ALTER TABLE fk_partitioned_fk DETACH PARTITION fk_partitioned_fk_2; +BEGIN; +DROP TABLE fk_partitioned_fk; +-- constraint should still be there +\d fk_partitioned_fk_2; + Table "public.fk_partitioned_fk_2" + Column | Type | Collation | Nullable | Default +--------+---------+-----------+----------+--------- + a | integer | | | 2501 + b | integer | | | 142857 +Foreign-key constraints: + "fk_partitioned_fk_a_b_fkey" FOREIGN KEY (a, b) REFERENCES fk_notpartitioned_pk(a, b) ON UPDATE CASCADE ON DELETE CASCADE + +ROLLBACK; +ALTER TABLE fk_partitioned_fk ATTACH PARTITION fk_partitioned_fk_2 FOR VALUES IN (1500,1502); +DROP TABLE fk_partitioned_fk_2; +CREATE TABLE fk_partitioned_fk_2 (b int, c text, a int, + FOREIGN KEY (a, b) REFERENCES fk_notpartitioned_pk ON UPDATE CASCADE ON DELETE CASCADE); +ALTER TABLE fk_partitioned_fk_2 DROP COLUMN c; +ALTER TABLE fk_partitioned_fk ATTACH PARTITION fk_partitioned_fk_2 FOR VALUES IN (1500,1502); +-- should have only one constraint +\d fk_partitioned_fk_2 + Table "public.fk_partitioned_fk_2" + Column | Type | Collation | Nullable | Default +--------+---------+-----------+----------+--------- + b | integer | | | + a | integer | | | +Partition of: fk_partitioned_fk FOR VALUES IN (1500, 1502) +Foreign-key constraints: + TABLE "fk_partitioned_fk" CONSTRAINT "fk_partitioned_fk_a_b_fkey" FOREIGN KEY (a, b) REFERENCES fk_notpartitioned_pk(a, b) ON UPDATE CASCADE ON DELETE CASCADE + +DROP TABLE fk_partitioned_fk_2; +CREATE TABLE fk_partitioned_fk_4 (a int, b int, FOREIGN KEY (a, b) REFERENCES fk_notpartitioned_pk(a, b) ON UPDATE CASCADE ON DELETE CASCADE) PARTITION BY RANGE (b, a); +CREATE TABLE fk_partitioned_fk_4_1 PARTITION OF fk_partitioned_fk_4 FOR VALUES FROM (1,1) TO (100,100); +CREATE TABLE fk_partitioned_fk_4_2 (a int, b int, FOREIGN KEY (a, b) REFERENCES fk_notpartitioned_pk(a, b) ON UPDATE SET NULL); +ALTER TABLE fk_partitioned_fk_4 ATTACH PARTITION fk_partitioned_fk_4_2 FOR VALUES FROM (100,100) TO (1000,1000); +ALTER TABLE fk_partitioned_fk ATTACH PARTITION fk_partitioned_fk_4 FOR VALUES IN (3500,3502); +ALTER TABLE fk_partitioned_fk DETACH PARTITION fk_partitioned_fk_4; +ALTER TABLE fk_partitioned_fk ATTACH PARTITION fk_partitioned_fk_4 FOR VALUES IN (3500,3502); +-- should only have one constraint +\d fk_partitioned_fk_4 + Partitioned table "public.fk_partitioned_fk_4" + Column | Type | Collation | Nullable | Default +--------+---------+-----------+----------+--------- + a | integer | | | + b | integer | | | +Partition of: fk_partitioned_fk FOR VALUES IN (3500, 3502) +Partition key: RANGE (b, a) +Foreign-key constraints: + TABLE "fk_partitioned_fk" CONSTRAINT "fk_partitioned_fk_a_b_fkey" FOREIGN KEY (a, b) REFERENCES fk_notpartitioned_pk(a, b) ON UPDATE CASCADE ON DELETE CASCADE +Number of partitions: 2 (Use \d+ to list them.) + +\d fk_partitioned_fk_4_1 + Table "public.fk_partitioned_fk_4_1" + Column | Type | Collation | Nullable | Default +--------+---------+-----------+----------+--------- + a | integer | | | + b | integer | | | +Partition of: fk_partitioned_fk_4 FOR VALUES FROM (1, 1) TO (100, 100) +Foreign-key constraints: + TABLE "fk_partitioned_fk" CONSTRAINT "fk_partitioned_fk_a_b_fkey" FOREIGN KEY (a, b) REFERENCES fk_notpartitioned_pk(a, b) ON UPDATE CASCADE ON DELETE CASCADE + +-- this one has an FK with mismatched properties +\d fk_partitioned_fk_4_2 + Table "public.fk_partitioned_fk_4_2" + Column | Type | Collation | Nullable | Default +--------+---------+-----------+----------+--------- + a | integer | | | + b | integer | | | +Partition of: fk_partitioned_fk_4 FOR VALUES FROM (100, 100) TO (1000, 1000) +Foreign-key constraints: + "fk_partitioned_fk_4_2_a_b_fkey" FOREIGN KEY (a, b) REFERENCES fk_notpartitioned_pk(a, b) ON UPDATE SET NULL + TABLE "fk_partitioned_fk" CONSTRAINT "fk_partitioned_fk_a_b_fkey" FOREIGN KEY (a, b) REFERENCES fk_notpartitioned_pk(a, b) ON UPDATE CASCADE ON DELETE CASCADE + +CREATE TABLE fk_partitioned_fk_5 (a int, b int, + FOREIGN KEY (a, b) REFERENCES fk_notpartitioned_pk(a, b) ON UPDATE CASCADE ON DELETE CASCADE DEFERRABLE, + FOREIGN KEY (a, b) REFERENCES fk_notpartitioned_pk(a, b) MATCH FULL ON UPDATE CASCADE ON DELETE CASCADE) + PARTITION BY RANGE (a); +CREATE TABLE fk_partitioned_fk_5_1 (a int, b int, FOREIGN KEY (a, b) REFERENCES fk_notpartitioned_pk); +ALTER TABLE fk_partitioned_fk ATTACH PARTITION fk_partitioned_fk_5 FOR VALUES IN (4500); +ALTER TABLE fk_partitioned_fk_5 ATTACH PARTITION fk_partitioned_fk_5_1 FOR VALUES FROM (0) TO (10); +ALTER TABLE fk_partitioned_fk DETACH PARTITION fk_partitioned_fk_5; +ALTER TABLE fk_partitioned_fk ATTACH PARTITION fk_partitioned_fk_5 FOR VALUES IN (4500); +-- this one has two constraints, similar but not quite the one in the parent, +-- so it gets a new one +\d fk_partitioned_fk_5 + Partitioned table "public.fk_partitioned_fk_5" + Column | Type | Collation | Nullable | Default +--------+---------+-----------+----------+--------- + a | integer | | | + b | integer | | | +Partition of: fk_partitioned_fk FOR VALUES IN (4500) +Partition key: RANGE (a) +Foreign-key constraints: + "fk_partitioned_fk_5_a_b_fkey" FOREIGN KEY (a, b) REFERENCES fk_notpartitioned_pk(a, b) ON UPDATE CASCADE ON DELETE CASCADE DEFERRABLE + "fk_partitioned_fk_5_a_b_fkey1" FOREIGN KEY (a, b) REFERENCES fk_notpartitioned_pk(a, b) MATCH FULL ON UPDATE CASCADE ON DELETE CASCADE + TABLE "fk_partitioned_fk" CONSTRAINT "fk_partitioned_fk_a_b_fkey" FOREIGN KEY (a, b) REFERENCES fk_notpartitioned_pk(a, b) ON UPDATE CASCADE ON DELETE CASCADE +Number of partitions: 1 (Use \d+ to list them.) + +-- verify that it works to reattaching a child with multiple candidate +-- constraints +ALTER TABLE fk_partitioned_fk_5 DETACH PARTITION fk_partitioned_fk_5_1; +ALTER TABLE fk_partitioned_fk_5 ATTACH PARTITION fk_partitioned_fk_5_1 FOR VALUES FROM (0) TO (10); +\d fk_partitioned_fk_5_1 + Table "public.fk_partitioned_fk_5_1" + Column | Type | Collation | Nullable | Default +--------+---------+-----------+----------+--------- + a | integer | | | + b | integer | | | +Partition of: fk_partitioned_fk_5 FOR VALUES FROM (0) TO (10) +Foreign-key constraints: + "fk_partitioned_fk_5_1_a_b_fkey" FOREIGN KEY (a, b) REFERENCES fk_notpartitioned_pk(a, b) + TABLE "fk_partitioned_fk_5" CONSTRAINT "fk_partitioned_fk_5_a_b_fkey" FOREIGN KEY (a, b) REFERENCES fk_notpartitioned_pk(a, b) ON UPDATE CASCADE ON DELETE CASCADE DEFERRABLE + TABLE "fk_partitioned_fk_5" CONSTRAINT "fk_partitioned_fk_5_a_b_fkey1" FOREIGN KEY (a, b) REFERENCES fk_notpartitioned_pk(a, b) MATCH FULL ON UPDATE CASCADE ON DELETE CASCADE + TABLE "fk_partitioned_fk" CONSTRAINT "fk_partitioned_fk_a_b_fkey" FOREIGN KEY (a, b) REFERENCES fk_notpartitioned_pk(a, b) ON UPDATE CASCADE ON DELETE CASCADE + +-- verify that attaching a table checks that the existing data satisfies the +-- constraint +CREATE TABLE fk_partitioned_fk_2 (a int, b int) PARTITION BY RANGE (b); +CREATE TABLE fk_partitioned_fk_2_1 PARTITION OF fk_partitioned_fk_2 FOR VALUES FROM (0) TO (1000); +CREATE TABLE fk_partitioned_fk_2_2 PARTITION OF fk_partitioned_fk_2 FOR VALUES FROM (1000) TO (2000); +INSERT INTO fk_partitioned_fk_2 VALUES (1600, 601), (1600, 1601); +ALTER TABLE fk_partitioned_fk ATTACH PARTITION fk_partitioned_fk_2 + FOR VALUES IN (1600); +ERROR: insert or update on table "fk_partitioned_fk_2_1" violates foreign key constraint "fk_partitioned_fk_a_b_fkey" +DETAIL: Key (a, b)=(1600, 601) is not present in table "fk_notpartitioned_pk". +INSERT INTO fk_notpartitioned_pk VALUES (1600, 601), (1600, 1601); +ALTER TABLE fk_partitioned_fk ATTACH PARTITION fk_partitioned_fk_2 + FOR VALUES IN (1600); +-- leave these tables around intentionally +-- test the case when the referenced table is owned by a different user +create role regress_other_partitioned_fk_owner; +grant references on fk_notpartitioned_pk to regress_other_partitioned_fk_owner; +set role regress_other_partitioned_fk_owner; +create table other_partitioned_fk(a int, b int) partition by list (a); +create table other_partitioned_fk_1 partition of other_partitioned_fk + for values in (2048); +insert into other_partitioned_fk + select 2048, x from generate_series(1,10) x; +-- this should fail +alter table other_partitioned_fk add foreign key (a, b) + references fk_notpartitioned_pk(a, b); +ERROR: insert or update on table "other_partitioned_fk_1" violates foreign key constraint "other_partitioned_fk_a_b_fkey" +DETAIL: Key (a, b)=(2048, 1) is not present in table "fk_notpartitioned_pk". +-- add the missing keys and retry +reset role; +insert into fk_notpartitioned_pk (a, b) + select 2048, x from generate_series(1,10) x; +set role regress_other_partitioned_fk_owner; +alter table other_partitioned_fk add foreign key (a, b) + references fk_notpartitioned_pk(a, b); +-- clean up +drop table other_partitioned_fk; +reset role; +revoke all on fk_notpartitioned_pk from regress_other_partitioned_fk_owner; +drop role regress_other_partitioned_fk_owner; +-- +-- Test self-referencing foreign key with partition. +-- This should create only one fk constraint per partition +-- +CREATE TABLE parted_self_fk ( + id bigint NOT NULL PRIMARY KEY, + id_abc bigint, + FOREIGN KEY (id_abc) REFERENCES parted_self_fk(id) +) +PARTITION BY RANGE (id); +CREATE TABLE part1_self_fk ( + id bigint NOT NULL PRIMARY KEY, + id_abc bigint +); +ALTER TABLE parted_self_fk ATTACH PARTITION part1_self_fk FOR VALUES FROM (0) TO (10); +CREATE TABLE part2_self_fk PARTITION OF parted_self_fk FOR VALUES FROM (10) TO (20); +CREATE TABLE part3_self_fk ( -- a partitioned partition + id bigint NOT NULL PRIMARY KEY, + id_abc bigint +) PARTITION BY RANGE (id); +CREATE TABLE part32_self_fk PARTITION OF part3_self_fk FOR VALUES FROM (20) TO (30); +ALTER TABLE parted_self_fk ATTACH PARTITION part3_self_fk FOR VALUES FROM (20) TO (40); +CREATE TABLE part33_self_fk ( + id bigint NOT NULL PRIMARY KEY, + id_abc bigint +); +ALTER TABLE part3_self_fk ATTACH PARTITION part33_self_fk FOR VALUES FROM (30) TO (40); +SELECT cr.relname, co.conname, co.contype, co.convalidated, + p.conname AS conparent, p.convalidated, cf.relname AS foreignrel +FROM pg_constraint co +JOIN pg_class cr ON cr.oid = co.conrelid +LEFT JOIN pg_class cf ON cf.oid = co.confrelid +LEFT JOIN pg_constraint p ON p.oid = co.conparentid +WHERE cr.oid IN (SELECT relid FROM pg_partition_tree('parted_self_fk')) +ORDER BY co.contype, cr.relname, co.conname, p.conname; + relname | conname | contype | convalidated | conparent | convalidated | foreignrel +----------------+----------------------------+---------+--------------+----------------------------+--------------+---------------- + part1_self_fk | parted_self_fk_id_abc_fkey | f | t | parted_self_fk_id_abc_fkey | t | parted_self_fk + part2_self_fk | parted_self_fk_id_abc_fkey | f | t | parted_self_fk_id_abc_fkey | t | parted_self_fk + part32_self_fk | parted_self_fk_id_abc_fkey | f | t | parted_self_fk_id_abc_fkey | t | parted_self_fk + part33_self_fk | parted_self_fk_id_abc_fkey | f | t | parted_self_fk_id_abc_fkey | t | parted_self_fk + part3_self_fk | parted_self_fk_id_abc_fkey | f | t | parted_self_fk_id_abc_fkey | t | parted_self_fk + parted_self_fk | parted_self_fk_id_abc_fkey | f | t | | | parted_self_fk + part1_self_fk | part1_self_fk_pkey | p | t | parted_self_fk_pkey | t | + part2_self_fk | part2_self_fk_pkey | p | t | parted_self_fk_pkey | t | + part32_self_fk | part32_self_fk_pkey | p | t | part3_self_fk_pkey | t | + part33_self_fk | part33_self_fk_pkey | p | t | part3_self_fk_pkey | t | + part3_self_fk | part3_self_fk_pkey | p | t | parted_self_fk_pkey | t | + parted_self_fk | parted_self_fk_pkey | p | t | | | +(12 rows) + +-- detach and re-attach multiple times just to ensure everything is kosher +ALTER TABLE parted_self_fk DETACH PARTITION part2_self_fk; +ALTER TABLE parted_self_fk ATTACH PARTITION part2_self_fk FOR VALUES FROM (10) TO (20); +ALTER TABLE parted_self_fk DETACH PARTITION part2_self_fk; +ALTER TABLE parted_self_fk ATTACH PARTITION part2_self_fk FOR VALUES FROM (10) TO (20); +SELECT cr.relname, co.conname, co.contype, co.convalidated, + p.conname AS conparent, p.convalidated, cf.relname AS foreignrel +FROM pg_constraint co +JOIN pg_class cr ON cr.oid = co.conrelid +LEFT JOIN pg_class cf ON cf.oid = co.confrelid +LEFT JOIN pg_constraint p ON p.oid = co.conparentid +WHERE cr.oid IN (SELECT relid FROM pg_partition_tree('parted_self_fk')) +ORDER BY co.contype, cr.relname, co.conname, p.conname; + relname | conname | contype | convalidated | conparent | convalidated | foreignrel +----------------+----------------------------+---------+--------------+----------------------------+--------------+---------------- + part1_self_fk | parted_self_fk_id_abc_fkey | f | t | parted_self_fk_id_abc_fkey | t | parted_self_fk + part2_self_fk | parted_self_fk_id_abc_fkey | f | t | parted_self_fk_id_abc_fkey | t | parted_self_fk + part32_self_fk | parted_self_fk_id_abc_fkey | f | t | parted_self_fk_id_abc_fkey | t | parted_self_fk + part33_self_fk | parted_self_fk_id_abc_fkey | f | t | parted_self_fk_id_abc_fkey | t | parted_self_fk + part3_self_fk | parted_self_fk_id_abc_fkey | f | t | parted_self_fk_id_abc_fkey | t | parted_self_fk + parted_self_fk | parted_self_fk_id_abc_fkey | f | t | | | parted_self_fk + part1_self_fk | part1_self_fk_pkey | p | t | parted_self_fk_pkey | t | + part2_self_fk | part2_self_fk_pkey | p | t | parted_self_fk_pkey | t | + part32_self_fk | part32_self_fk_pkey | p | t | part3_self_fk_pkey | t | + part33_self_fk | part33_self_fk_pkey | p | t | part3_self_fk_pkey | t | + part3_self_fk | part3_self_fk_pkey | p | t | parted_self_fk_pkey | t | + parted_self_fk | parted_self_fk_pkey | p | t | | | +(12 rows) + +-- Leave this table around, for pg_upgrade/pg_dump tests +-- Test creating a constraint at the parent that already exists in partitions. +-- There should be no duplicated constraints, and attempts to drop the +-- constraint in partitions should raise appropriate errors. +create schema fkpart0 + create table pkey (a int primary key) + create table fk_part (a int) partition by list (a) + create table fk_part_1 partition of fk_part + (foreign key (a) references fkpart0.pkey) for values in (1) + create table fk_part_23 partition of fk_part + (foreign key (a) references fkpart0.pkey) for values in (2, 3) + partition by list (a) + create table fk_part_23_2 partition of fk_part_23 for values in (2); +alter table fkpart0.fk_part add foreign key (a) references fkpart0.pkey; +\d fkpart0.fk_part_1 \\ -- should have only one FK + Table "fkpart0.fk_part_1" + Column | Type | Collation | Nullable | Default +--------+---------+-----------+----------+--------- + a | integer | | | +Partition of: fkpart0.fk_part FOR VALUES IN (1) +Foreign-key constraints: + TABLE "fkpart0.fk_part" CONSTRAINT "fk_part_a_fkey" FOREIGN KEY (a) REFERENCES fkpart0.pkey(a) + +alter table fkpart0.fk_part_1 drop constraint fk_part_1_a_fkey; +ERROR: cannot drop inherited constraint "fk_part_1_a_fkey" of relation "fk_part_1" +\d fkpart0.fk_part_23 \\ -- should have only one FK + Partitioned table "fkpart0.fk_part_23" + Column | Type | Collation | Nullable | Default +--------+---------+-----------+----------+--------- + a | integer | | | +Partition of: fkpart0.fk_part FOR VALUES IN (2, 3) +Partition key: LIST (a) +Foreign-key constraints: + TABLE "fkpart0.fk_part" CONSTRAINT "fk_part_a_fkey" FOREIGN KEY (a) REFERENCES fkpart0.pkey(a) +Number of partitions: 1 (Use \d+ to list them.) + +\d fkpart0.fk_part_23_2 \\ -- should have only one FK + Table "fkpart0.fk_part_23_2" + Column | Type | Collation | Nullable | Default +--------+---------+-----------+----------+--------- + a | integer | | | +Partition of: fkpart0.fk_part_23 FOR VALUES IN (2) +Foreign-key constraints: + TABLE "fkpart0.fk_part" CONSTRAINT "fk_part_a_fkey" FOREIGN KEY (a) REFERENCES fkpart0.pkey(a) + +alter table fkpart0.fk_part_23 drop constraint fk_part_23_a_fkey; +ERROR: cannot drop inherited constraint "fk_part_23_a_fkey" of relation "fk_part_23" +alter table fkpart0.fk_part_23_2 drop constraint fk_part_23_a_fkey; +ERROR: cannot drop inherited constraint "fk_part_23_a_fkey" of relation "fk_part_23_2" +create table fkpart0.fk_part_4 partition of fkpart0.fk_part for values in (4); +\d fkpart0.fk_part_4 + Table "fkpart0.fk_part_4" + Column | Type | Collation | Nullable | Default +--------+---------+-----------+----------+--------- + a | integer | | | +Partition of: fkpart0.fk_part FOR VALUES IN (4) +Foreign-key constraints: + TABLE "fkpart0.fk_part" CONSTRAINT "fk_part_a_fkey" FOREIGN KEY (a) REFERENCES fkpart0.pkey(a) + +alter table fkpart0.fk_part_4 drop constraint fk_part_a_fkey; +ERROR: cannot drop inherited constraint "fk_part_a_fkey" of relation "fk_part_4" +create table fkpart0.fk_part_56 partition of fkpart0.fk_part + for values in (5,6) partition by list (a); +create table fkpart0.fk_part_56_5 partition of fkpart0.fk_part_56 + for values in (5); +\d fkpart0.fk_part_56 + Partitioned table "fkpart0.fk_part_56" + Column | Type | Collation | Nullable | Default +--------+---------+-----------+----------+--------- + a | integer | | | +Partition of: fkpart0.fk_part FOR VALUES IN (5, 6) +Partition key: LIST (a) +Foreign-key constraints: + TABLE "fkpart0.fk_part" CONSTRAINT "fk_part_a_fkey" FOREIGN KEY (a) REFERENCES fkpart0.pkey(a) +Number of partitions: 1 (Use \d+ to list them.) + +alter table fkpart0.fk_part_56 drop constraint fk_part_a_fkey; +ERROR: cannot drop inherited constraint "fk_part_a_fkey" of relation "fk_part_56" +alter table fkpart0.fk_part_56_5 drop constraint fk_part_a_fkey; +ERROR: cannot drop inherited constraint "fk_part_a_fkey" of relation "fk_part_56_5" +-- verify that attaching and detaching partitions maintains the right set of +-- triggers +create schema fkpart1 + create table pkey (a int primary key) + create table fk_part (a int) partition by list (a) + create table fk_part_1 partition of fk_part for values in (1) partition by list (a) + create table fk_part_1_1 partition of fk_part_1 for values in (1); +alter table fkpart1.fk_part add foreign key (a) references fkpart1.pkey; +insert into fkpart1.fk_part values (1); -- should fail +ERROR: insert or update on table "fk_part_1_1" violates foreign key constraint "fk_part_a_fkey" +DETAIL: Key (a)=(1) is not present in table "pkey". +insert into fkpart1.pkey values (1); +insert into fkpart1.fk_part values (1); +delete from fkpart1.pkey where a = 1; -- should fail +ERROR: update or delete on table "pkey" violates foreign key constraint "fk_part_a_fkey" on table "fk_part" +DETAIL: Key (a)=(1) is still referenced from table "fk_part". +alter table fkpart1.fk_part detach partition fkpart1.fk_part_1; +create table fkpart1.fk_part_1_2 partition of fkpart1.fk_part_1 for values in (2); +insert into fkpart1.fk_part_1 values (2); -- should fail +ERROR: insert or update on table "fk_part_1_2" violates foreign key constraint "fk_part_a_fkey" +DETAIL: Key (a)=(2) is not present in table "pkey". +delete from fkpart1.pkey where a = 1; +ERROR: update or delete on table "pkey" violates foreign key constraint "fk_part_a_fkey" on table "fk_part_1" +DETAIL: Key (a)=(1) is still referenced from table "fk_part_1". +-- verify that attaching and detaching partitions manipulates the inheritance +-- properties of their FK constraints correctly +create schema fkpart2 + create table pkey (a int primary key) + create table fk_part (a int, constraint fkey foreign key (a) references fkpart2.pkey) partition by list (a) + create table fk_part_1 partition of fkpart2.fk_part for values in (1) partition by list (a) + create table fk_part_1_1 (a int, constraint my_fkey foreign key (a) references fkpart2.pkey); +alter table fkpart2.fk_part_1 attach partition fkpart2.fk_part_1_1 for values in (1); +alter table fkpart2.fk_part_1 drop constraint fkey; -- should fail +ERROR: cannot drop inherited constraint "fkey" of relation "fk_part_1" +alter table fkpart2.fk_part_1_1 drop constraint my_fkey; -- should fail +ERROR: cannot drop inherited constraint "my_fkey" of relation "fk_part_1_1" +alter table fkpart2.fk_part detach partition fkpart2.fk_part_1; +alter table fkpart2.fk_part_1 drop constraint fkey; -- ok +alter table fkpart2.fk_part_1_1 drop constraint my_fkey; -- doesn't exist +ERROR: constraint "my_fkey" of relation "fk_part_1_1" does not exist +-- verify constraint deferrability +create schema fkpart3 + create table pkey (a int primary key) + create table fk_part (a int, constraint fkey foreign key (a) references fkpart3.pkey deferrable initially immediate) partition by list (a) + create table fk_part_1 partition of fkpart3.fk_part for values in (1) partition by list (a) + create table fk_part_1_1 partition of fkpart3.fk_part_1 for values in (1) + create table fk_part_2 partition of fkpart3.fk_part for values in (2); +begin; +set constraints fkpart3.fkey deferred; +insert into fkpart3.fk_part values (1); +insert into fkpart3.pkey values (1); +commit; +begin; +set constraints fkpart3.fkey deferred; +delete from fkpart3.pkey; +delete from fkpart3.fk_part; +commit; +drop schema fkpart0, fkpart1, fkpart2, fkpart3 cascade; +NOTICE: drop cascades to 10 other objects +DETAIL: drop cascades to table fkpart3.pkey +drop cascades to table fkpart3.fk_part +drop cascades to table fkpart2.pkey +drop cascades to table fkpart2.fk_part +drop cascades to table fkpart2.fk_part_1 +drop cascades to table fkpart1.pkey +drop cascades to table fkpart1.fk_part +drop cascades to table fkpart1.fk_part_1 +drop cascades to table fkpart0.pkey +drop cascades to table fkpart0.fk_part +-- Test a partitioned table as referenced table. +-- Verify basic functionality with a regular partition creation and a partition +-- with a different column layout, as well as partitions added (created and +-- attached) after creating the foreign key. +CREATE SCHEMA fkpart3; +SET search_path TO fkpart3; +CREATE TABLE pk (a int PRIMARY KEY) PARTITION BY RANGE (a); +CREATE TABLE pk1 PARTITION OF pk FOR VALUES FROM (0) TO (1000); +CREATE TABLE pk2 (b int, a int); +ALTER TABLE pk2 DROP COLUMN b; +ALTER TABLE pk2 ALTER a SET NOT NULL; +ALTER TABLE pk ATTACH PARTITION pk2 FOR VALUES FROM (1000) TO (2000); +CREATE TABLE fk (a int) PARTITION BY RANGE (a); +CREATE TABLE fk1 PARTITION OF fk FOR VALUES FROM (0) TO (750); +ALTER TABLE fk ADD FOREIGN KEY (a) REFERENCES pk; +CREATE TABLE fk2 (b int, a int) ; +ALTER TABLE fk2 DROP COLUMN b; +ALTER TABLE fk ATTACH PARTITION fk2 FOR VALUES FROM (750) TO (3500); +CREATE TABLE pk3 PARTITION OF pk FOR VALUES FROM (2000) TO (3000); +CREATE TABLE pk4 (LIKE pk); +ALTER TABLE pk ATTACH PARTITION pk4 FOR VALUES FROM (3000) TO (4000); +CREATE TABLE pk5 (c int, b int, a int NOT NULL) PARTITION BY RANGE (a); +ALTER TABLE pk5 DROP COLUMN b, DROP COLUMN c; +CREATE TABLE pk51 PARTITION OF pk5 FOR VALUES FROM (4000) TO (4500); +CREATE TABLE pk52 PARTITION OF pk5 FOR VALUES FROM (4500) TO (5000); +ALTER TABLE pk ATTACH PARTITION pk5 FOR VALUES FROM (4000) TO (5000); +CREATE TABLE fk3 PARTITION OF fk FOR VALUES FROM (3500) TO (5000); +-- these should fail: referenced value not present +INSERT into fk VALUES (1); +ERROR: insert or update on table "fk1" violates foreign key constraint "fk_a_fkey" +DETAIL: Key (a)=(1) is not present in table "pk". +INSERT into fk VALUES (1000); +ERROR: insert or update on table "fk2" violates foreign key constraint "fk_a_fkey" +DETAIL: Key (a)=(1000) is not present in table "pk". +INSERT into fk VALUES (2000); +ERROR: insert or update on table "fk2" violates foreign key constraint "fk_a_fkey" +DETAIL: Key (a)=(2000) is not present in table "pk". +INSERT into fk VALUES (3000); +ERROR: insert or update on table "fk2" violates foreign key constraint "fk_a_fkey" +DETAIL: Key (a)=(3000) is not present in table "pk". +INSERT into fk VALUES (4000); +ERROR: insert or update on table "fk3" violates foreign key constraint "fk_a_fkey" +DETAIL: Key (a)=(4000) is not present in table "pk". +INSERT into fk VALUES (4500); +ERROR: insert or update on table "fk3" violates foreign key constraint "fk_a_fkey" +DETAIL: Key (a)=(4500) is not present in table "pk". +-- insert into the referenced table, now they should work +INSERT into pk VALUES (1), (1000), (2000), (3000), (4000), (4500); +INSERT into fk VALUES (1), (1000), (2000), (3000), (4000), (4500); +-- should fail: referencing value present +DELETE FROM pk WHERE a = 1; +ERROR: update or delete on table "pk1" violates foreign key constraint "fk_a_fkey1" on table "fk" +DETAIL: Key (a)=(1) is still referenced from table "fk". +DELETE FROM pk WHERE a = 1000; +ERROR: update or delete on table "pk2" violates foreign key constraint "fk_a_fkey2" on table "fk" +DETAIL: Key (a)=(1000) is still referenced from table "fk". +DELETE FROM pk WHERE a = 2000; +ERROR: update or delete on table "pk3" violates foreign key constraint "fk_a_fkey3" on table "fk" +DETAIL: Key (a)=(2000) is still referenced from table "fk". +DELETE FROM pk WHERE a = 3000; +ERROR: update or delete on table "pk4" violates foreign key constraint "fk_a_fkey4" on table "fk" +DETAIL: Key (a)=(3000) is still referenced from table "fk". +DELETE FROM pk WHERE a = 4000; +ERROR: update or delete on table "pk51" violates foreign key constraint "fk_a_fkey6" on table "fk" +DETAIL: Key (a)=(4000) is still referenced from table "fk". +DELETE FROM pk WHERE a = 4500; +ERROR: update or delete on table "pk52" violates foreign key constraint "fk_a_fkey7" on table "fk" +DETAIL: Key (a)=(4500) is still referenced from table "fk". +UPDATE pk SET a = 2 WHERE a = 1; +ERROR: update or delete on table "pk1" violates foreign key constraint "fk_a_fkey1" on table "fk" +DETAIL: Key (a)=(1) is still referenced from table "fk". +UPDATE pk SET a = 1002 WHERE a = 1000; +ERROR: update or delete on table "pk2" violates foreign key constraint "fk_a_fkey2" on table "fk" +DETAIL: Key (a)=(1000) is still referenced from table "fk". +UPDATE pk SET a = 2002 WHERE a = 2000; +ERROR: update or delete on table "pk3" violates foreign key constraint "fk_a_fkey3" on table "fk" +DETAIL: Key (a)=(2000) is still referenced from table "fk". +UPDATE pk SET a = 3002 WHERE a = 3000; +ERROR: update or delete on table "pk4" violates foreign key constraint "fk_a_fkey4" on table "fk" +DETAIL: Key (a)=(3000) is still referenced from table "fk". +UPDATE pk SET a = 4002 WHERE a = 4000; +ERROR: update or delete on table "pk51" violates foreign key constraint "fk_a_fkey6" on table "fk" +DETAIL: Key (a)=(4000) is still referenced from table "fk". +UPDATE pk SET a = 4502 WHERE a = 4500; +ERROR: update or delete on table "pk52" violates foreign key constraint "fk_a_fkey7" on table "fk" +DETAIL: Key (a)=(4500) is still referenced from table "fk". +-- now they should work +DELETE FROM fk; +UPDATE pk SET a = 2 WHERE a = 1; +DELETE FROM pk WHERE a = 2; +UPDATE pk SET a = 1002 WHERE a = 1000; +DELETE FROM pk WHERE a = 1002; +UPDATE pk SET a = 2002 WHERE a = 2000; +DELETE FROM pk WHERE a = 2002; +UPDATE pk SET a = 3002 WHERE a = 3000; +DELETE FROM pk WHERE a = 3002; +UPDATE pk SET a = 4002 WHERE a = 4000; +DELETE FROM pk WHERE a = 4002; +UPDATE pk SET a = 4502 WHERE a = 4500; +DELETE FROM pk WHERE a = 4502; +CREATE SCHEMA fkpart4; +SET search_path TO fkpart4; +-- dropping/detaching PARTITIONs is prevented if that would break +-- a foreign key's existing data +CREATE TABLE droppk (a int PRIMARY KEY) PARTITION BY RANGE (a); +CREATE TABLE droppk1 PARTITION OF droppk FOR VALUES FROM (0) TO (1000); +CREATE TABLE droppk_d PARTITION OF droppk DEFAULT; +CREATE TABLE droppk2 PARTITION OF droppk FOR VALUES FROM (1000) TO (2000) + PARTITION BY RANGE (a); +CREATE TABLE droppk21 PARTITION OF droppk2 FOR VALUES FROM (1000) TO (1400); +CREATE TABLE droppk2_d PARTITION OF droppk2 DEFAULT; +INSERT into droppk VALUES (1), (1000), (1500), (2000); +CREATE TABLE dropfk (a int REFERENCES droppk); +INSERT into dropfk VALUES (1), (1000), (1500), (2000); +-- these should all fail +ALTER TABLE droppk DETACH PARTITION droppk_d; +ERROR: removing partition "droppk_d" violates foreign key constraint "dropfk_a_fkey5" +DETAIL: Key (a)=(2000) is still referenced from table "dropfk". +ALTER TABLE droppk2 DETACH PARTITION droppk2_d; +ERROR: removing partition "droppk2_d" violates foreign key constraint "dropfk_a_fkey4" +DETAIL: Key (a)=(1500) is still referenced from table "dropfk". +ALTER TABLE droppk DETACH PARTITION droppk1; +ERROR: removing partition "droppk1" violates foreign key constraint "dropfk_a_fkey1" +DETAIL: Key (a)=(1) is still referenced from table "dropfk". +ALTER TABLE droppk DETACH PARTITION droppk2; +ERROR: removing partition "droppk2" violates foreign key constraint "dropfk_a_fkey2" +DETAIL: Key (a)=(1000) is still referenced from table "dropfk". +ALTER TABLE droppk2 DETACH PARTITION droppk21; +ERROR: removing partition "droppk21" violates foreign key constraint "dropfk_a_fkey3" +DETAIL: Key (a)=(1000) is still referenced from table "dropfk". +-- dropping partitions is disallowed +DROP TABLE droppk_d; +ERROR: cannot drop table droppk_d because other objects depend on it +DETAIL: constraint dropfk_a_fkey on table dropfk depends on table droppk_d +HINT: Use DROP ... CASCADE to drop the dependent objects too. +DROP TABLE droppk2_d; +ERROR: cannot drop table droppk2_d because other objects depend on it +DETAIL: constraint dropfk_a_fkey on table dropfk depends on table droppk2_d +HINT: Use DROP ... CASCADE to drop the dependent objects too. +DROP TABLE droppk1; +ERROR: cannot drop table droppk1 because other objects depend on it +DETAIL: constraint dropfk_a_fkey on table dropfk depends on table droppk1 +HINT: Use DROP ... CASCADE to drop the dependent objects too. +DROP TABLE droppk2; +ERROR: cannot drop table droppk2 because other objects depend on it +DETAIL: constraint dropfk_a_fkey on table dropfk depends on table droppk2 +HINT: Use DROP ... CASCADE to drop the dependent objects too. +DROP TABLE droppk21; +ERROR: cannot drop table droppk21 because other objects depend on it +DETAIL: constraint dropfk_a_fkey on table dropfk depends on table droppk21 +HINT: Use DROP ... CASCADE to drop the dependent objects too. +DELETE FROM dropfk; +-- dropping partitions is disallowed, even when no referencing values +DROP TABLE droppk_d; +ERROR: cannot drop table droppk_d because other objects depend on it +DETAIL: constraint dropfk_a_fkey on table dropfk depends on table droppk_d +HINT: Use DROP ... CASCADE to drop the dependent objects too. +DROP TABLE droppk2_d; +ERROR: cannot drop table droppk2_d because other objects depend on it +DETAIL: constraint dropfk_a_fkey on table dropfk depends on table droppk2_d +HINT: Use DROP ... CASCADE to drop the dependent objects too. +DROP TABLE droppk1; +ERROR: cannot drop table droppk1 because other objects depend on it +DETAIL: constraint dropfk_a_fkey on table dropfk depends on table droppk1 +HINT: Use DROP ... CASCADE to drop the dependent objects too. +-- but DETACH is allowed, and DROP afterwards works +ALTER TABLE droppk2 DETACH PARTITION droppk21; +DROP TABLE droppk2; +ERROR: cannot drop table droppk2 because other objects depend on it +DETAIL: constraint dropfk_a_fkey on table dropfk depends on table droppk2 +HINT: Use DROP ... CASCADE to drop the dependent objects too. +-- Verify that initial constraint creation and cloning behave correctly +CREATE SCHEMA fkpart5; +SET search_path TO fkpart5; +CREATE TABLE pk (a int PRIMARY KEY) PARTITION BY LIST (a); +CREATE TABLE pk1 PARTITION OF pk FOR VALUES IN (1) PARTITION BY LIST (a); +CREATE TABLE pk11 PARTITION OF pk1 FOR VALUES IN (1); +CREATE TABLE fk (a int) PARTITION BY LIST (a); +CREATE TABLE fk1 PARTITION OF fk FOR VALUES IN (1) PARTITION BY LIST (a); +CREATE TABLE fk11 PARTITION OF fk1 FOR VALUES IN (1); +ALTER TABLE fk ADD FOREIGN KEY (a) REFERENCES pk; +CREATE TABLE pk2 PARTITION OF pk FOR VALUES IN (2); +CREATE TABLE pk3 (a int NOT NULL) PARTITION BY LIST (a); +CREATE TABLE pk31 PARTITION OF pk3 FOR VALUES IN (31); +CREATE TABLE pk32 (b int, a int NOT NULL); +ALTER TABLE pk32 DROP COLUMN b; +ALTER TABLE pk3 ATTACH PARTITION pk32 FOR VALUES IN (32); +ALTER TABLE pk ATTACH PARTITION pk3 FOR VALUES IN (31, 32); +CREATE TABLE fk2 PARTITION OF fk FOR VALUES IN (2); +CREATE TABLE fk3 (b int, a int); +ALTER TABLE fk3 DROP COLUMN b; +ALTER TABLE fk ATTACH PARTITION fk3 FOR VALUES IN (3); +SELECT pg_describe_object('pg_constraint'::regclass, oid, 0), confrelid::regclass, + CASE WHEN conparentid <> 0 THEN pg_describe_object('pg_constraint'::regclass, conparentid, 0) ELSE 'TOP' END +FROM pg_catalog.pg_constraint +WHERE conrelid IN (SELECT relid FROM pg_partition_tree('fk')) +ORDER BY conrelid::regclass::text, conname; + pg_describe_object | confrelid | case +------------------------------------+-----------+----------------------------------- + constraint fk_a_fkey on table fk | pk | TOP + constraint fk_a_fkey1 on table fk | pk1 | constraint fk_a_fkey on table fk + constraint fk_a_fkey2 on table fk | pk11 | constraint fk_a_fkey1 on table fk + constraint fk_a_fkey3 on table fk | pk2 | constraint fk_a_fkey on table fk + constraint fk_a_fkey4 on table fk | pk3 | constraint fk_a_fkey on table fk + constraint fk_a_fkey5 on table fk | pk31 | constraint fk_a_fkey4 on table fk + constraint fk_a_fkey6 on table fk | pk32 | constraint fk_a_fkey4 on table fk + constraint fk_a_fkey on table fk1 | pk | constraint fk_a_fkey on table fk + constraint fk_a_fkey on table fk11 | pk | constraint fk_a_fkey on table fk1 + constraint fk_a_fkey on table fk2 | pk | constraint fk_a_fkey on table fk + constraint fk_a_fkey on table fk3 | pk | constraint fk_a_fkey on table fk +(11 rows) + +CREATE TABLE fk4 (LIKE fk); +INSERT INTO fk4 VALUES (50); +ALTER TABLE fk ATTACH PARTITION fk4 FOR VALUES IN (50); +ERROR: insert or update on table "fk4" violates foreign key constraint "fk_a_fkey" +DETAIL: Key (a)=(50) is not present in table "pk". +-- Verify constraint deferrability +CREATE SCHEMA fkpart9; +SET search_path TO fkpart9; +CREATE TABLE pk (a int PRIMARY KEY) PARTITION BY LIST (a); +CREATE TABLE pk1 PARTITION OF pk FOR VALUES IN (1, 2) PARTITION BY LIST (a); +CREATE TABLE pk11 PARTITION OF pk1 FOR VALUES IN (1); +CREATE TABLE pk3 PARTITION OF pk FOR VALUES IN (3); +CREATE TABLE fk (a int REFERENCES pk DEFERRABLE INITIALLY IMMEDIATE); +INSERT INTO fk VALUES (1); -- should fail +ERROR: insert or update on table "fk" violates foreign key constraint "fk_a_fkey" +DETAIL: Key (a)=(1) is not present in table "pk". +BEGIN; +SET CONSTRAINTS fk_a_fkey DEFERRED; +INSERT INTO fk VALUES (1); +COMMIT; -- should fail +ERROR: insert or update on table "fk" violates foreign key constraint "fk_a_fkey" +DETAIL: Key (a)=(1) is not present in table "pk". +BEGIN; +SET CONSTRAINTS fk_a_fkey DEFERRED; +INSERT INTO fk VALUES (1); +INSERT INTO pk VALUES (1); +COMMIT; -- OK +BEGIN; +SET CONSTRAINTS fk_a_fkey DEFERRED; +DELETE FROM pk WHERE a = 1; +DELETE FROM fk WHERE a = 1; +COMMIT; -- OK +-- Verify constraint deferrability when changed by ALTER +-- Partitioned table at referencing end +CREATE TABLE pt(f1 int, f2 int, f3 int, PRIMARY KEY(f1,f2)); +CREATE TABLE ref(f1 int, f2 int, f3 int) + PARTITION BY list(f1); +CREATE TABLE ref1 PARTITION OF ref FOR VALUES IN (1); +CREATE TABLE ref2 PARTITION OF ref FOR VALUES in (2); +ALTER TABLE ref ADD FOREIGN KEY(f1,f2) REFERENCES pt; +ALTER TABLE ref ALTER CONSTRAINT ref_f1_f2_fkey + DEFERRABLE INITIALLY DEFERRED; +INSERT INTO pt VALUES(1,2,3); +INSERT INTO ref VALUES(1,2,3); +BEGIN; +DELETE FROM pt; +DELETE FROM ref; +ABORT; +DROP TABLE pt, ref; +-- Multi-level partitioning at referencing end +CREATE TABLE pt(f1 int, f2 int, f3 int, PRIMARY KEY(f1,f2)); +CREATE TABLE ref(f1 int, f2 int, f3 int) + PARTITION BY list(f1); +CREATE TABLE ref1_2 PARTITION OF ref FOR VALUES IN (1, 2) PARTITION BY list (f2); +CREATE TABLE ref1 PARTITION OF ref1_2 FOR VALUES IN (1); +CREATE TABLE ref2 PARTITION OF ref1_2 FOR VALUES IN (2) PARTITION BY list (f2); +CREATE TABLE ref22 PARTITION OF ref2 FOR VALUES IN (2); +ALTER TABLE ref ADD FOREIGN KEY(f1,f2) REFERENCES pt; +INSERT INTO pt VALUES(1,2,3); +INSERT INTO ref VALUES(1,2,3); +ALTER TABLE ref22 ALTER CONSTRAINT ref_f1_f2_fkey + DEFERRABLE INITIALLY IMMEDIATE; -- fails +ERROR: cannot alter constraint "ref_f1_f2_fkey" on relation "ref22" +DETAIL: Constraint "ref_f1_f2_fkey" is derived from constraint "ref_f1_f2_fkey" of relation "ref". +HINT: You may alter the constraint it derives from instead. +ALTER TABLE ref ALTER CONSTRAINT ref_f1_f2_fkey + DEFERRABLE INITIALLY DEFERRED; +BEGIN; +DELETE FROM pt; +DELETE FROM ref; +ABORT; +DROP TABLE pt, ref; +-- Partitioned table at referenced end +CREATE TABLE pt(f1 int, f2 int, f3 int, PRIMARY KEY(f1,f2)) + PARTITION BY LIST(f1); +CREATE TABLE pt1 PARTITION OF pt FOR VALUES IN (1); +CREATE TABLE pt2 PARTITION OF pt FOR VALUES IN (2); +CREATE TABLE ref(f1 int, f2 int, f3 int); +ALTER TABLE ref ADD FOREIGN KEY(f1,f2) REFERENCES pt; +ALTER TABLE ref ALTER CONSTRAINT ref_f1_f2_fkey + DEFERRABLE INITIALLY DEFERRED; +INSERT INTO pt VALUES(1,2,3); +INSERT INTO ref VALUES(1,2,3); +BEGIN; +DELETE FROM pt; +DELETE FROM ref; +ABORT; +DROP TABLE pt, ref; +-- Multi-level partitioning at referenced end +CREATE TABLE pt(f1 int, f2 int, f3 int, PRIMARY KEY(f1,f2)) + PARTITION BY LIST(f1); +CREATE TABLE pt1_2 PARTITION OF pt FOR VALUES IN (1, 2) PARTITION BY LIST (f1); +CREATE TABLE pt1 PARTITION OF pt1_2 FOR VALUES IN (1); +CREATE TABLE pt2 PARTITION OF pt1_2 FOR VALUES IN (2); +CREATE TABLE ref(f1 int, f2 int, f3 int); +ALTER TABLE ref ADD FOREIGN KEY(f1,f2) REFERENCES pt; +ALTER TABLE ref ALTER CONSTRAINT ref_f1_f2_fkey1 + DEFERRABLE INITIALLY DEFERRED; -- fails +ERROR: cannot alter constraint "ref_f1_f2_fkey1" on relation "ref" +DETAIL: Constraint "ref_f1_f2_fkey1" is derived from constraint "ref_f1_f2_fkey" of relation "ref". +HINT: You may alter the constraint it derives from instead. +ALTER TABLE ref ALTER CONSTRAINT ref_f1_f2_fkey + DEFERRABLE INITIALLY DEFERRED; +INSERT INTO pt VALUES(1,2,3); +INSERT INTO ref VALUES(1,2,3); +BEGIN; +DELETE FROM pt; +DELETE FROM ref; +ABORT; +DROP TABLE pt, ref; +DROP SCHEMA fkpart9 CASCADE; +NOTICE: drop cascades to 2 other objects +DETAIL: drop cascades to table pk +drop cascades to table fk +-- Verify ON UPDATE/DELETE behavior +CREATE SCHEMA fkpart6; +SET search_path TO fkpart6; +CREATE TABLE pk (a int PRIMARY KEY) PARTITION BY RANGE (a); +CREATE TABLE pk1 PARTITION OF pk FOR VALUES FROM (1) TO (100) PARTITION BY RANGE (a); +CREATE TABLE pk11 PARTITION OF pk1 FOR VALUES FROM (1) TO (50); +CREATE TABLE pk12 PARTITION OF pk1 FOR VALUES FROM (50) TO (100); +CREATE TABLE fk (a int) PARTITION BY RANGE (a); +CREATE TABLE fk1 PARTITION OF fk FOR VALUES FROM (1) TO (100) PARTITION BY RANGE (a); +CREATE TABLE fk11 PARTITION OF fk1 FOR VALUES FROM (1) TO (10); +CREATE TABLE fk12 PARTITION OF fk1 FOR VALUES FROM (10) TO (100); +ALTER TABLE fk ADD FOREIGN KEY (a) REFERENCES pk ON UPDATE CASCADE ON DELETE CASCADE; +CREATE TABLE fk_d PARTITION OF fk DEFAULT; +INSERT INTO pk VALUES (1); +INSERT INTO fk VALUES (1); +UPDATE pk SET a = 20; +SELECT tableoid::regclass, * FROM fk; + tableoid | a +----------+---- + fk12 | 20 +(1 row) + +DELETE FROM pk WHERE a = 20; +SELECT tableoid::regclass, * FROM fk; + tableoid | a +----------+--- +(0 rows) + +DROP TABLE fk; +TRUNCATE TABLE pk; +INSERT INTO pk VALUES (20), (50); +CREATE TABLE fk (a int) PARTITION BY RANGE (a); +CREATE TABLE fk1 PARTITION OF fk FOR VALUES FROM (1) TO (100) PARTITION BY RANGE (a); +CREATE TABLE fk11 PARTITION OF fk1 FOR VALUES FROM (1) TO (10); +CREATE TABLE fk12 PARTITION OF fk1 FOR VALUES FROM (10) TO (100); +ALTER TABLE fk ADD FOREIGN KEY (a) REFERENCES pk ON UPDATE SET NULL ON DELETE SET NULL; +CREATE TABLE fk_d PARTITION OF fk DEFAULT; +INSERT INTO fk VALUES (20), (50); +UPDATE pk SET a = 21 WHERE a = 20; +DELETE FROM pk WHERE a = 50; +SELECT tableoid::regclass, * FROM fk; + tableoid | a +----------+--- + fk_d | + fk_d | +(2 rows) + +DROP TABLE fk; +TRUNCATE TABLE pk; +INSERT INTO pk VALUES (20), (30), (50); +CREATE TABLE fk (id int, a int DEFAULT 50) PARTITION BY RANGE (a); +CREATE TABLE fk1 PARTITION OF fk FOR VALUES FROM (1) TO (100) PARTITION BY RANGE (a); +CREATE TABLE fk11 PARTITION OF fk1 FOR VALUES FROM (1) TO (10); +CREATE TABLE fk12 PARTITION OF fk1 FOR VALUES FROM (10) TO (100); +ALTER TABLE fk ADD FOREIGN KEY (a) REFERENCES pk ON UPDATE SET DEFAULT ON DELETE SET DEFAULT; +CREATE TABLE fk_d PARTITION OF fk DEFAULT; +INSERT INTO fk VALUES (1, 20), (2, 30); +DELETE FROM pk WHERE a = 20 RETURNING *; + a +---- + 20 +(1 row) + +UPDATE pk SET a = 90 WHERE a = 30 RETURNING *; + a +---- + 90 +(1 row) + +SELECT tableoid::regclass, * FROM fk; + tableoid | id | a +----------+----+---- + fk12 | 1 | 50 + fk12 | 2 | 50 +(2 rows) + +DROP TABLE fk; +TRUNCATE TABLE pk; +INSERT INTO pk VALUES (20), (30); +CREATE TABLE fk (a int DEFAULT 50) PARTITION BY RANGE (a); +CREATE TABLE fk1 PARTITION OF fk FOR VALUES FROM (1) TO (100) PARTITION BY RANGE (a); +CREATE TABLE fk11 PARTITION OF fk1 FOR VALUES FROM (1) TO (10); +CREATE TABLE fk12 PARTITION OF fk1 FOR VALUES FROM (10) TO (100); +ALTER TABLE fk ADD FOREIGN KEY (a) REFERENCES pk ON UPDATE RESTRICT ON DELETE RESTRICT; +CREATE TABLE fk_d PARTITION OF fk DEFAULT; +INSERT INTO fk VALUES (20), (30); +DELETE FROM pk WHERE a = 20; +ERROR: update or delete on table "pk11" violates foreign key constraint "fk_a_fkey2" on table "fk" +DETAIL: Key (a)=(20) is still referenced from table "fk". +UPDATE pk SET a = 90 WHERE a = 30; +ERROR: update or delete on table "pk" violates foreign key constraint "fk_a_fkey" on table "fk" +DETAIL: Key (a)=(30) is still referenced from table "fk". +SELECT tableoid::regclass, * FROM fk; + tableoid | a +----------+---- + fk12 | 20 + fk12 | 30 +(2 rows) + +DROP TABLE fk; +-- test for reported bug: relispartition not set +-- https://postgr.es/m/CA+HiwqHMsRtRYRWYTWavKJ8x14AFsv7bmAV46mYwnfD3vy8goQ@mail.gmail.com +CREATE SCHEMA fkpart7 + CREATE TABLE pkpart (a int) PARTITION BY LIST (a) + CREATE TABLE pkpart1 PARTITION OF pkpart FOR VALUES IN (1); +ALTER TABLE fkpart7.pkpart1 ADD PRIMARY KEY (a); +ALTER TABLE fkpart7.pkpart ADD PRIMARY KEY (a); +CREATE TABLE fkpart7.fk (a int REFERENCES fkpart7.pkpart); +DROP SCHEMA fkpart7 CASCADE; +NOTICE: drop cascades to 2 other objects +DETAIL: drop cascades to table fkpart7.pkpart +drop cascades to table fkpart7.fk +-- ensure we check partitions are "not used" when dropping constraints +CREATE SCHEMA fkpart8 + CREATE TABLE tbl1(f1 int PRIMARY KEY) + CREATE TABLE tbl2(f1 int REFERENCES tbl1 DEFERRABLE INITIALLY DEFERRED) PARTITION BY RANGE(f1) + CREATE TABLE tbl2_p1 PARTITION OF tbl2 FOR VALUES FROM (minvalue) TO (maxvalue); +INSERT INTO fkpart8.tbl1 VALUES(1); +BEGIN; +INSERT INTO fkpart8.tbl2 VALUES(1); +ALTER TABLE fkpart8.tbl2 DROP CONSTRAINT tbl2_f1_fkey; +ERROR: cannot ALTER TABLE "tbl2_p1" because it has pending trigger events +COMMIT; +DROP SCHEMA fkpart8 CASCADE; +NOTICE: drop cascades to 2 other objects +DETAIL: drop cascades to table fkpart8.tbl1 +drop cascades to table fkpart8.tbl2 +-- ensure FK referencing a multi-level partitioned table are +-- enforce reference to sub-children. +CREATE SCHEMA fkpart9 + CREATE TABLE pk (a INT PRIMARY KEY) PARTITION BY RANGE (a) + CREATE TABLE fk ( + fk_a INT REFERENCES pk(a) ON DELETE CASCADE + ) + CREATE TABLE pk1 PARTITION OF pk FOR VALUES FROM (30) TO (50) PARTITION BY RANGE (a) + CREATE TABLE pk11 PARTITION OF pk1 FOR VALUES FROM (30) TO (40); +INSERT INTO fkpart9.pk VALUES (35); +INSERT INTO fkpart9.fk VALUES (35); +DELETE FROM fkpart9.pk WHERE a=35; +SELECT * FROM fkpart9.pk; + a +--- +(0 rows) + +SELECT * FROM fkpart9.fk; + fk_a +------ +(0 rows) + +DROP SCHEMA fkpart9 CASCADE; +NOTICE: drop cascades to 2 other objects +DETAIL: drop cascades to table fkpart9.pk +drop cascades to table fkpart9.fk +-- test that ri_Check_Pk_Match() scans the correct partition for a deferred +-- ON DELETE/UPDATE NO ACTION constraint +CREATE SCHEMA fkpart10 + CREATE TABLE tbl1(f1 int PRIMARY KEY) PARTITION BY RANGE(f1) + CREATE TABLE tbl1_p1 PARTITION OF tbl1 FOR VALUES FROM (minvalue) TO (1) + CREATE TABLE tbl1_p2 PARTITION OF tbl1 FOR VALUES FROM (1) TO (maxvalue) + CREATE TABLE tbl2(f1 int REFERENCES tbl1 DEFERRABLE INITIALLY DEFERRED) + CREATE TABLE tbl3(f1 int PRIMARY KEY) PARTITION BY RANGE(f1) + CREATE TABLE tbl3_p1 PARTITION OF tbl3 FOR VALUES FROM (minvalue) TO (1) + CREATE TABLE tbl3_p2 PARTITION OF tbl3 FOR VALUES FROM (1) TO (maxvalue) + CREATE TABLE tbl4(f1 int REFERENCES tbl3 DEFERRABLE INITIALLY DEFERRED); +INSERT INTO fkpart10.tbl1 VALUES (0), (1); +INSERT INTO fkpart10.tbl2 VALUES (0), (1); +INSERT INTO fkpart10.tbl3 VALUES (-2), (-1), (0); +INSERT INTO fkpart10.tbl4 VALUES (-2), (-1); +BEGIN; +DELETE FROM fkpart10.tbl1 WHERE f1 = 0; +UPDATE fkpart10.tbl1 SET f1 = 2 WHERE f1 = 1; +INSERT INTO fkpart10.tbl1 VALUES (0), (1); +COMMIT; +-- test that cross-partition updates correctly enforces the foreign key +-- restriction (specifically testing INITIAILLY DEFERRED) +BEGIN; +UPDATE fkpart10.tbl1 SET f1 = 3 WHERE f1 = 0; +UPDATE fkpart10.tbl3 SET f1 = f1 * -1; +INSERT INTO fkpart10.tbl1 VALUES (4); +COMMIT; +ERROR: update or delete on table "tbl1" violates foreign key constraint "tbl2_f1_fkey" on table "tbl2" +DETAIL: Key (f1)=(0) is still referenced from table "tbl2". +BEGIN; +UPDATE fkpart10.tbl3 SET f1 = f1 * -1; +UPDATE fkpart10.tbl3 SET f1 = f1 + 3; +UPDATE fkpart10.tbl1 SET f1 = 3 WHERE f1 = 0; +INSERT INTO fkpart10.tbl1 VALUES (0); +COMMIT; +ERROR: update or delete on table "tbl3" violates foreign key constraint "tbl4_f1_fkey" on table "tbl4" +DETAIL: Key (f1)=(-2) is still referenced from table "tbl4". +BEGIN; +UPDATE fkpart10.tbl3 SET f1 = f1 * -1; +UPDATE fkpart10.tbl1 SET f1 = 3 WHERE f1 = 0; +INSERT INTO fkpart10.tbl1 VALUES (0); +INSERT INTO fkpart10.tbl3 VALUES (-2), (-1); +COMMIT; +-- test where the updated table now has both an IMMEDIATE and a DEFERRED +-- constraint pointing into it +CREATE TABLE fkpart10.tbl5(f1 int REFERENCES fkpart10.tbl3); +INSERT INTO fkpart10.tbl5 VALUES (-2), (-1); +BEGIN; +UPDATE fkpart10.tbl3 SET f1 = f1 * -3; +ERROR: update or delete on table "tbl3" violates foreign key constraint "tbl5_f1_fkey" on table "tbl5" +DETAIL: Key (f1)=(-2) is still referenced from table "tbl5". +COMMIT; +-- Now test where the row referenced from the table with an IMMEDIATE +-- constraint stays in place, while those referenced from the table with a +-- DEFERRED constraint don't. +DELETE FROM fkpart10.tbl5; +INSERT INTO fkpart10.tbl5 VALUES (0); +BEGIN; +UPDATE fkpart10.tbl3 SET f1 = f1 * -3; +COMMIT; +ERROR: update or delete on table "tbl3" violates foreign key constraint "tbl4_f1_fkey" on table "tbl4" +DETAIL: Key (f1)=(-2) is still referenced from table "tbl4". +DROP SCHEMA fkpart10 CASCADE; +NOTICE: drop cascades to 5 other objects +DETAIL: drop cascades to table fkpart10.tbl1 +drop cascades to table fkpart10.tbl2 +drop cascades to table fkpart10.tbl3 +drop cascades to table fkpart10.tbl4 +drop cascades to table fkpart10.tbl5 +-- verify foreign keys are enforced during cross-partition updates, +-- especially on the PK side +CREATE SCHEMA fkpart11 + CREATE TABLE pk (a INT PRIMARY KEY, b text) PARTITION BY LIST (a) + CREATE TABLE fk ( + a INT, + CONSTRAINT fkey FOREIGN KEY (a) REFERENCES pk(a) ON UPDATE CASCADE ON DELETE CASCADE + ) + CREATE TABLE fk_parted ( + a INT PRIMARY KEY, + CONSTRAINT fkey FOREIGN KEY (a) REFERENCES pk(a) ON UPDATE CASCADE ON DELETE CASCADE + ) PARTITION BY LIST (a) + CREATE TABLE fk_another ( + a INT, + CONSTRAINT fkey FOREIGN KEY (a) REFERENCES fk_parted (a) ON UPDATE CASCADE ON DELETE CASCADE + ) + CREATE TABLE pk1 PARTITION OF pk FOR VALUES IN (1, 2) PARTITION BY LIST (a) + CREATE TABLE pk2 PARTITION OF pk FOR VALUES IN (3) + CREATE TABLE pk3 PARTITION OF pk FOR VALUES IN (4) + CREATE TABLE fk1 PARTITION OF fk_parted FOR VALUES IN (1, 2) + CREATE TABLE fk2 PARTITION OF fk_parted FOR VALUES IN (3) + CREATE TABLE fk3 PARTITION OF fk_parted FOR VALUES IN (4); +CREATE TABLE fkpart11.pk11 (b text, a int NOT NULL); +ALTER TABLE fkpart11.pk1 ATTACH PARTITION fkpart11.pk11 FOR VALUES IN (1); +CREATE TABLE fkpart11.pk12 (b text, c int, a int NOT NULL); +ALTER TABLE fkpart11.pk12 DROP c; +ALTER TABLE fkpart11.pk1 ATTACH PARTITION fkpart11.pk12 FOR VALUES IN (2); +INSERT INTO fkpart11.pk VALUES (1, 'xxx'), (3, 'yyy'); +INSERT INTO fkpart11.fk VALUES (1), (3); +INSERT INTO fkpart11.fk_parted VALUES (1), (3); +INSERT INTO fkpart11.fk_another VALUES (1), (3); +-- moves 2 rows from one leaf partition to another, with both updates being +-- cascaded to fk and fk_parted. Updates of fk_parted, of which one is +-- cross-partition (3 -> 4), are further cascaded to fk_another. +UPDATE fkpart11.pk SET a = a + 1 RETURNING tableoid::pg_catalog.regclass, *; + tableoid | a | b +---------------+---+----- + fkpart11.pk12 | 2 | xxx + fkpart11.pk3 | 4 | yyy +(2 rows) + +SELECT tableoid::pg_catalog.regclass, * FROM fkpart11.fk; + tableoid | a +-------------+--- + fkpart11.fk | 2 + fkpart11.fk | 4 +(2 rows) + +SELECT tableoid::pg_catalog.regclass, * FROM fkpart11.fk_parted; + tableoid | a +--------------+--- + fkpart11.fk1 | 2 + fkpart11.fk3 | 4 +(2 rows) + +SELECT tableoid::pg_catalog.regclass, * FROM fkpart11.fk_another; + tableoid | a +---------------------+--- + fkpart11.fk_another | 2 + fkpart11.fk_another | 4 +(2 rows) + +-- let's try with the foreign key pointing at tables in the partition tree +-- that are not the same as the query's target table +-- 1. foreign key pointing into a non-root ancestor +-- +-- A cross-partition update on the root table will fail, because we currently +-- can't enforce the foreign keys pointing into a non-leaf partition +ALTER TABLE fkpart11.fk DROP CONSTRAINT fkey; +DELETE FROM fkpart11.fk WHERE a = 4; +ALTER TABLE fkpart11.fk ADD CONSTRAINT fkey FOREIGN KEY (a) REFERENCES fkpart11.pk1 (a) ON UPDATE CASCADE ON DELETE CASCADE; +UPDATE fkpart11.pk SET a = a - 1; +ERROR: cannot move tuple across partitions when a non-root ancestor of the source partition is directly referenced in a foreign key +DETAIL: A foreign key points to ancestor "pk1" but not the root ancestor "pk". +HINT: Consider defining the foreign key on table "pk". +-- it's okay though if the non-leaf partition is updated directly +UPDATE fkpart11.pk1 SET a = a - 1; +SELECT tableoid::pg_catalog.regclass, * FROM fkpart11.pk; + tableoid | a | b +---------------+---+----- + fkpart11.pk11 | 1 | xxx + fkpart11.pk3 | 4 | yyy +(2 rows) + +SELECT tableoid::pg_catalog.regclass, * FROM fkpart11.fk; + tableoid | a +-------------+--- + fkpart11.fk | 1 +(1 row) + +SELECT tableoid::pg_catalog.regclass, * FROM fkpart11.fk_parted; + tableoid | a +--------------+--- + fkpart11.fk1 | 1 + fkpart11.fk3 | 4 +(2 rows) + +SELECT tableoid::pg_catalog.regclass, * FROM fkpart11.fk_another; + tableoid | a +---------------------+--- + fkpart11.fk_another | 4 + fkpart11.fk_another | 1 +(2 rows) + +-- 2. foreign key pointing into a single leaf partition +-- +-- A cross-partition update that deletes from the pointed-to leaf partition +-- is allowed to succeed +ALTER TABLE fkpart11.fk DROP CONSTRAINT fkey; +ALTER TABLE fkpart11.fk ADD CONSTRAINT fkey FOREIGN KEY (a) REFERENCES fkpart11.pk11 (a) ON UPDATE CASCADE ON DELETE CASCADE; +-- will delete (1) from p11 which is cascaded to fk +UPDATE fkpart11.pk SET a = a + 1 WHERE a = 1; +SELECT tableoid::pg_catalog.regclass, * FROM fkpart11.fk; + tableoid | a +----------+--- +(0 rows) + +DROP TABLE fkpart11.fk; +-- check that regular and deferrable AR triggers on the PK tables +-- still work as expected +CREATE FUNCTION fkpart11.print_row () RETURNS TRIGGER LANGUAGE plpgsql AS $$ + BEGIN + RAISE NOTICE 'TABLE: %, OP: %, OLD: %, NEW: %', TG_RELNAME, TG_OP, OLD, NEW; + RETURN NULL; + END; +$$; +CREATE TRIGGER trig_upd_pk AFTER UPDATE ON fkpart11.pk FOR EACH ROW EXECUTE FUNCTION fkpart11.print_row(); +CREATE TRIGGER trig_del_pk AFTER DELETE ON fkpart11.pk FOR EACH ROW EXECUTE FUNCTION fkpart11.print_row(); +CREATE TRIGGER trig_ins_pk AFTER INSERT ON fkpart11.pk FOR EACH ROW EXECUTE FUNCTION fkpart11.print_row(); +CREATE CONSTRAINT TRIGGER trig_upd_fk_parted AFTER UPDATE ON fkpart11.fk_parted INITIALLY DEFERRED FOR EACH ROW EXECUTE FUNCTION fkpart11.print_row(); +CREATE CONSTRAINT TRIGGER trig_del_fk_parted AFTER DELETE ON fkpart11.fk_parted INITIALLY DEFERRED FOR EACH ROW EXECUTE FUNCTION fkpart11.print_row(); +CREATE CONSTRAINT TRIGGER trig_ins_fk_parted AFTER INSERT ON fkpart11.fk_parted INITIALLY DEFERRED FOR EACH ROW EXECUTE FUNCTION fkpart11.print_row(); +UPDATE fkpart11.pk SET a = 3 WHERE a = 4; +NOTICE: TABLE: pk3, OP: DELETE, OLD: (4,yyy), NEW: +NOTICE: TABLE: pk2, OP: INSERT, OLD: , NEW: (3,yyy) +NOTICE: TABLE: fk3, OP: DELETE, OLD: (4), NEW: +NOTICE: TABLE: fk2, OP: INSERT, OLD: , NEW: (3) +UPDATE fkpart11.pk SET a = 1 WHERE a = 2; +NOTICE: TABLE: pk12, OP: DELETE, OLD: (xxx,2), NEW: +NOTICE: TABLE: pk11, OP: INSERT, OLD: , NEW: (xxx,1) +NOTICE: TABLE: fk1, OP: UPDATE, OLD: (2), NEW: (1) +DROP SCHEMA fkpart11 CASCADE; +NOTICE: drop cascades to 4 other objects +DETAIL: drop cascades to table fkpart11.pk +drop cascades to table fkpart11.fk_parted +drop cascades to table fkpart11.fk_another +drop cascades to function fkpart11.print_row() diff --git a/src/test/regress/expected/functional_deps.out b/src/test/regress/expected/functional_deps.out new file mode 100644 index 0000000..32381b8 --- /dev/null +++ b/src/test/regress/expected/functional_deps.out @@ -0,0 +1,232 @@ +-- from http://www.depesz.com/index.php/2010/04/19/getting-unique-elements/ +CREATE TEMP TABLE articles ( + id int CONSTRAINT articles_pkey PRIMARY KEY, + keywords text, + title text UNIQUE NOT NULL, + body text UNIQUE, + created date +); +CREATE TEMP TABLE articles_in_category ( + article_id int, + category_id int, + changed date, + PRIMARY KEY (article_id, category_id) +); +-- test functional dependencies based on primary keys/unique constraints +-- base tables +-- group by primary key (OK) +SELECT id, keywords, title, body, created +FROM articles +GROUP BY id; + id | keywords | title | body | created +----+----------+-------+------+--------- +(0 rows) + +-- group by unique not null (fail/todo) +SELECT id, keywords, title, body, created +FROM articles +GROUP BY title; +ERROR: column "articles.id" must appear in the GROUP BY clause or be used in an aggregate function +LINE 1: SELECT id, keywords, title, body, created + ^ +-- group by unique nullable (fail) +SELECT id, keywords, title, body, created +FROM articles +GROUP BY body; +ERROR: column "articles.id" must appear in the GROUP BY clause or be used in an aggregate function +LINE 1: SELECT id, keywords, title, body, created + ^ +-- group by something else (fail) +SELECT id, keywords, title, body, created +FROM articles +GROUP BY keywords; +ERROR: column "articles.id" must appear in the GROUP BY clause or be used in an aggregate function +LINE 1: SELECT id, keywords, title, body, created + ^ +-- multiple tables +-- group by primary key (OK) +SELECT a.id, a.keywords, a.title, a.body, a.created +FROM articles AS a, articles_in_category AS aic +WHERE a.id = aic.article_id AND aic.category_id in (14,62,70,53,138) +GROUP BY a.id; + id | keywords | title | body | created +----+----------+-------+------+--------- +(0 rows) + +-- group by something else (fail) +SELECT a.id, a.keywords, a.title, a.body, a.created +FROM articles AS a, articles_in_category AS aic +WHERE a.id = aic.article_id AND aic.category_id in (14,62,70,53,138) +GROUP BY aic.article_id, aic.category_id; +ERROR: column "a.id" must appear in the GROUP BY clause or be used in an aggregate function +LINE 1: SELECT a.id, a.keywords, a.title, a.body, a.created + ^ +-- JOIN syntax +-- group by left table's primary key (OK) +SELECT a.id, a.keywords, a.title, a.body, a.created +FROM articles AS a JOIN articles_in_category AS aic ON a.id = aic.article_id +WHERE aic.category_id in (14,62,70,53,138) +GROUP BY a.id; + id | keywords | title | body | created +----+----------+-------+------+--------- +(0 rows) + +-- group by something else (fail) +SELECT a.id, a.keywords, a.title, a.body, a.created +FROM articles AS a JOIN articles_in_category AS aic ON a.id = aic.article_id +WHERE aic.category_id in (14,62,70,53,138) +GROUP BY aic.article_id, aic.category_id; +ERROR: column "a.id" must appear in the GROUP BY clause or be used in an aggregate function +LINE 1: SELECT a.id, a.keywords, a.title, a.body, a.created + ^ +-- group by right table's (composite) primary key (OK) +SELECT aic.changed +FROM articles AS a JOIN articles_in_category AS aic ON a.id = aic.article_id +WHERE aic.category_id in (14,62,70,53,138) +GROUP BY aic.category_id, aic.article_id; + changed +--------- +(0 rows) + +-- group by right table's partial primary key (fail) +SELECT aic.changed +FROM articles AS a JOIN articles_in_category AS aic ON a.id = aic.article_id +WHERE aic.category_id in (14,62,70,53,138) +GROUP BY aic.article_id; +ERROR: column "aic.changed" must appear in the GROUP BY clause or be used in an aggregate function +LINE 1: SELECT aic.changed + ^ +-- example from documentation +CREATE TEMP TABLE products (product_id int, name text, price numeric); +CREATE TEMP TABLE sales (product_id int, units int); +-- OK +SELECT product_id, p.name, (sum(s.units) * p.price) AS sales + FROM products p LEFT JOIN sales s USING (product_id) + GROUP BY product_id, p.name, p.price; + product_id | name | sales +------------+------+------- +(0 rows) + +-- fail +SELECT product_id, p.name, (sum(s.units) * p.price) AS sales + FROM products p LEFT JOIN sales s USING (product_id) + GROUP BY product_id; +ERROR: column "p.name" must appear in the GROUP BY clause or be used in an aggregate function +LINE 1: SELECT product_id, p.name, (sum(s.units) * p.price) AS sales + ^ +ALTER TABLE products ADD PRIMARY KEY (product_id); +-- OK now +SELECT product_id, p.name, (sum(s.units) * p.price) AS sales + FROM products p LEFT JOIN sales s USING (product_id) + GROUP BY product_id; + product_id | name | sales +------------+------+------- +(0 rows) + +-- Drupal example, http://drupal.org/node/555530 +CREATE TEMP TABLE node ( + nid SERIAL, + vid integer NOT NULL default '0', + type varchar(32) NOT NULL default '', + title varchar(128) NOT NULL default '', + uid integer NOT NULL default '0', + status integer NOT NULL default '1', + created integer NOT NULL default '0', + -- snip + PRIMARY KEY (nid, vid) +); +CREATE TEMP TABLE users ( + uid integer NOT NULL default '0', + name varchar(60) NOT NULL default '', + pass varchar(32) NOT NULL default '', + -- snip + PRIMARY KEY (uid), + UNIQUE (name) +); +-- OK +SELECT u.uid, u.name FROM node n +INNER JOIN users u ON u.uid = n.uid +WHERE n.type = 'blog' AND n.status = 1 +GROUP BY u.uid, u.name; + uid | name +-----+------ +(0 rows) + +-- OK +SELECT u.uid, u.name FROM node n +INNER JOIN users u ON u.uid = n.uid +WHERE n.type = 'blog' AND n.status = 1 +GROUP BY u.uid; + uid | name +-----+------ +(0 rows) + +-- Check views and dependencies +-- fail +CREATE TEMP VIEW fdv1 AS +SELECT id, keywords, title, body, created +FROM articles +GROUP BY body; +ERROR: column "articles.id" must appear in the GROUP BY clause or be used in an aggregate function +LINE 2: SELECT id, keywords, title, body, created + ^ +-- OK +CREATE TEMP VIEW fdv1 AS +SELECT id, keywords, title, body, created +FROM articles +GROUP BY id; +-- fail +ALTER TABLE articles DROP CONSTRAINT articles_pkey RESTRICT; +ERROR: cannot drop constraint articles_pkey on table articles because other objects depend on it +DETAIL: view fdv1 depends on constraint articles_pkey on table articles +HINT: Use DROP ... CASCADE to drop the dependent objects too. +DROP VIEW fdv1; +-- multiple dependencies +CREATE TEMP VIEW fdv2 AS +SELECT a.id, a.keywords, a.title, aic.category_id, aic.changed +FROM articles AS a JOIN articles_in_category AS aic ON a.id = aic.article_id +WHERE aic.category_id in (14,62,70,53,138) +GROUP BY a.id, aic.category_id, aic.article_id; +ALTER TABLE articles DROP CONSTRAINT articles_pkey RESTRICT; -- fail +ERROR: cannot drop constraint articles_pkey on table articles because other objects depend on it +DETAIL: view fdv2 depends on constraint articles_pkey on table articles +HINT: Use DROP ... CASCADE to drop the dependent objects too. +ALTER TABLE articles_in_category DROP CONSTRAINT articles_in_category_pkey RESTRICT; --fail +ERROR: cannot drop constraint articles_in_category_pkey on table articles_in_category because other objects depend on it +DETAIL: view fdv2 depends on constraint articles_in_category_pkey on table articles_in_category +HINT: Use DROP ... CASCADE to drop the dependent objects too. +DROP VIEW fdv2; +-- nested queries +CREATE TEMP VIEW fdv3 AS +SELECT id, keywords, title, body, created +FROM articles +GROUP BY id +UNION +SELECT id, keywords, title, body, created +FROM articles +GROUP BY id; +ALTER TABLE articles DROP CONSTRAINT articles_pkey RESTRICT; -- fail +ERROR: cannot drop constraint articles_pkey on table articles because other objects depend on it +DETAIL: view fdv3 depends on constraint articles_pkey on table articles +HINT: Use DROP ... CASCADE to drop the dependent objects too. +DROP VIEW fdv3; +CREATE TEMP VIEW fdv4 AS +SELECT * FROM articles WHERE title IN (SELECT title FROM articles GROUP BY id); +ALTER TABLE articles DROP CONSTRAINT articles_pkey RESTRICT; -- fail +ERROR: cannot drop constraint articles_pkey on table articles because other objects depend on it +DETAIL: view fdv4 depends on constraint articles_pkey on table articles +HINT: Use DROP ... CASCADE to drop the dependent objects too. +DROP VIEW fdv4; +-- prepared query plans: this results in failure on reuse +PREPARE foo AS + SELECT id, keywords, title, body, created + FROM articles + GROUP BY id; +EXECUTE foo; + id | keywords | title | body | created +----+----------+-------+------+--------- +(0 rows) + +ALTER TABLE articles DROP CONSTRAINT articles_pkey RESTRICT; +EXECUTE foo; -- fail +ERROR: column "articles.keywords" must appear in the GROUP BY clause or be used in an aggregate function diff --git a/src/test/regress/expected/generated.out b/src/test/regress/expected/generated.out new file mode 100644 index 0000000..0f623f7 --- /dev/null +++ b/src/test/regress/expected/generated.out @@ -0,0 +1,1182 @@ +-- sanity check of system catalog +SELECT attrelid, attname, attgenerated FROM pg_attribute WHERE attgenerated NOT IN ('', 's'); + attrelid | attname | attgenerated +----------+---------+-------------- +(0 rows) + +CREATE TABLE gtest0 (a int PRIMARY KEY, b int GENERATED ALWAYS AS (55) STORED); +CREATE TABLE gtest1 (a int PRIMARY KEY, b int GENERATED ALWAYS AS (a * 2) STORED); +SELECT table_name, column_name, column_default, is_nullable, is_generated, generation_expression FROM information_schema.columns WHERE table_name LIKE 'gtest_' ORDER BY 1, 2; + table_name | column_name | column_default | is_nullable | is_generated | generation_expression +------------+-------------+----------------+-------------+--------------+----------------------- + gtest0 | a | | NO | NEVER | + gtest0 | b | | YES | ALWAYS | 55 + gtest1 | a | | NO | NEVER | + gtest1 | b | | YES | ALWAYS | (a * 2) +(4 rows) + +SELECT table_name, column_name, dependent_column FROM information_schema.column_column_usage ORDER BY 1, 2, 3; + table_name | column_name | dependent_column +------------+-------------+------------------ + gtest1 | a | b +(1 row) + +\d gtest1 + Table "public.gtest1" + Column | Type | Collation | Nullable | Default +--------+---------+-----------+----------+------------------------------------ + a | integer | | not null | + b | integer | | | generated always as (a * 2) stored +Indexes: + "gtest1_pkey" PRIMARY KEY, btree (a) + +-- duplicate generated +CREATE TABLE gtest_err_1 (a int PRIMARY KEY, b int GENERATED ALWAYS AS (a * 2) STORED GENERATED ALWAYS AS (a * 3) STORED); +ERROR: multiple generation clauses specified for column "b" of table "gtest_err_1" +LINE 1: ...ARY KEY, b int GENERATED ALWAYS AS (a * 2) STORED GENERATED ... + ^ +-- references to other generated columns, including self-references +CREATE TABLE gtest_err_2a (a int PRIMARY KEY, b int GENERATED ALWAYS AS (b * 2) STORED); +ERROR: cannot use generated column "b" in column generation expression +LINE 1: ...2a (a int PRIMARY KEY, b int GENERATED ALWAYS AS (b * 2) STO... + ^ +DETAIL: A generated column cannot reference another generated column. +CREATE TABLE gtest_err_2b (a int PRIMARY KEY, b int GENERATED ALWAYS AS (a * 2) STORED, c int GENERATED ALWAYS AS (b * 3) STORED); +ERROR: cannot use generated column "b" in column generation expression +LINE 1: ...AYS AS (a * 2) STORED, c int GENERATED ALWAYS AS (b * 3) STO... + ^ +DETAIL: A generated column cannot reference another generated column. +-- a whole-row var is a self-reference on steroids, so disallow that too +CREATE TABLE gtest_err_2c (a int PRIMARY KEY, + b int GENERATED ALWAYS AS (num_nulls(gtest_err_2c)) STORED); +ERROR: cannot use whole-row variable in column generation expression +LINE 2: b int GENERATED ALWAYS AS (num_nulls(gtest_err_2c)) STOR... + ^ +DETAIL: This would cause the generated column to depend on its own value. +-- invalid reference +CREATE TABLE gtest_err_3 (a int PRIMARY KEY, b int GENERATED ALWAYS AS (c * 2) STORED); +ERROR: column "c" does not exist +LINE 1: ..._3 (a int PRIMARY KEY, b int GENERATED ALWAYS AS (c * 2) STO... + ^ +-- generation expression must be immutable +CREATE TABLE gtest_err_4 (a int PRIMARY KEY, b double precision GENERATED ALWAYS AS (random()) STORED); +ERROR: generation expression is not immutable +-- ... but be sure that the immutability test is accurate +CREATE TABLE gtest2 (a int, b text GENERATED ALWAYS AS (a || ' sec') STORED); +DROP TABLE gtest2; +-- cannot have default/identity and generated +CREATE TABLE gtest_err_5a (a int PRIMARY KEY, b int DEFAULT 5 GENERATED ALWAYS AS (a * 2) STORED); +ERROR: both default and generation expression specified for column "b" of table "gtest_err_5a" +LINE 1: ... gtest_err_5a (a int PRIMARY KEY, b int DEFAULT 5 GENERATED ... + ^ +CREATE TABLE gtest_err_5b (a int PRIMARY KEY, b int GENERATED ALWAYS AS identity GENERATED ALWAYS AS (a * 2) STORED); +ERROR: both identity and generation expression specified for column "b" of table "gtest_err_5b" +LINE 1: ...t PRIMARY KEY, b int GENERATED ALWAYS AS identity GENERATED ... + ^ +-- reference to system column not allowed in generated column +-- (except tableoid, which we test below) +CREATE TABLE gtest_err_6a (a int PRIMARY KEY, b bool GENERATED ALWAYS AS (xmin <> 37) STORED); +ERROR: cannot use system column "xmin" in column generation expression +LINE 1: ...a (a int PRIMARY KEY, b bool GENERATED ALWAYS AS (xmin <> 37... + ^ +-- various prohibited constructs +CREATE TABLE gtest_err_7a (a int PRIMARY KEY, b int GENERATED ALWAYS AS (avg(a)) STORED); +ERROR: aggregate functions are not allowed in column generation expressions +LINE 1: ...7a (a int PRIMARY KEY, b int GENERATED ALWAYS AS (avg(a)) ST... + ^ +CREATE TABLE gtest_err_7b (a int PRIMARY KEY, b int GENERATED ALWAYS AS (row_number() OVER (ORDER BY a)) STORED); +ERROR: window functions are not allowed in column generation expressions +LINE 1: ...7b (a int PRIMARY KEY, b int GENERATED ALWAYS AS (row_number... + ^ +CREATE TABLE gtest_err_7c (a int PRIMARY KEY, b int GENERATED ALWAYS AS ((SELECT a)) STORED); +ERROR: cannot use subquery in column generation expression +LINE 1: ...7c (a int PRIMARY KEY, b int GENERATED ALWAYS AS ((SELECT a)... + ^ +CREATE TABLE gtest_err_7d (a int PRIMARY KEY, b int GENERATED ALWAYS AS (generate_series(1, a)) STORED); +ERROR: set-returning functions are not allowed in column generation expressions +LINE 1: ...7d (a int PRIMARY KEY, b int GENERATED ALWAYS AS (generate_s... + ^ +-- GENERATED BY DEFAULT not allowed +CREATE TABLE gtest_err_8 (a int PRIMARY KEY, b int GENERATED BY DEFAULT AS (a * 2) STORED); +ERROR: for a generated column, GENERATED ALWAYS must be specified +LINE 1: ...E gtest_err_8 (a int PRIMARY KEY, b int GENERATED BY DEFAULT... + ^ +INSERT INTO gtest1 VALUES (1); +INSERT INTO gtest1 VALUES (2, DEFAULT); -- ok +INSERT INTO gtest1 VALUES (3, 33); -- error +ERROR: cannot insert a non-DEFAULT value into column "b" +DETAIL: Column "b" is a generated column. +INSERT INTO gtest1 VALUES (3, 33), (4, 44); -- error +ERROR: cannot insert a non-DEFAULT value into column "b" +DETAIL: Column "b" is a generated column. +INSERT INTO gtest1 VALUES (3, DEFAULT), (4, 44); -- error +ERROR: cannot insert a non-DEFAULT value into column "b" +DETAIL: Column "b" is a generated column. +INSERT INTO gtest1 VALUES (3, 33), (4, DEFAULT); -- error +ERROR: cannot insert a non-DEFAULT value into column "b" +DETAIL: Column "b" is a generated column. +INSERT INTO gtest1 VALUES (3, DEFAULT), (4, DEFAULT); -- ok +SELECT * FROM gtest1 ORDER BY a; + a | b +---+--- + 1 | 2 + 2 | 4 + 3 | 6 + 4 | 8 +(4 rows) + +DELETE FROM gtest1 WHERE a >= 3; +UPDATE gtest1 SET b = DEFAULT WHERE a = 1; +UPDATE gtest1 SET b = 11 WHERE a = 1; -- error +ERROR: column "b" can only be updated to DEFAULT +DETAIL: Column "b" is a generated column. +SELECT * FROM gtest1 ORDER BY a; + a | b +---+--- + 1 | 2 + 2 | 4 +(2 rows) + +SELECT a, b, b * 2 AS b2 FROM gtest1 ORDER BY a; + a | b | b2 +---+---+---- + 1 | 2 | 4 + 2 | 4 | 8 +(2 rows) + +SELECT a, b FROM gtest1 WHERE b = 4 ORDER BY a; + a | b +---+--- + 2 | 4 +(1 row) + +-- test that overflow error happens on write +INSERT INTO gtest1 VALUES (2000000000); +ERROR: integer out of range +SELECT * FROM gtest1; + a | b +---+--- + 2 | 4 + 1 | 2 +(2 rows) + +DELETE FROM gtest1 WHERE a = 2000000000; +-- test with joins +CREATE TABLE gtestx (x int, y int); +INSERT INTO gtestx VALUES (11, 1), (22, 2), (33, 3); +SELECT * FROM gtestx, gtest1 WHERE gtestx.y = gtest1.a; + x | y | a | b +----+---+---+--- + 11 | 1 | 1 | 2 + 22 | 2 | 2 | 4 +(2 rows) + +DROP TABLE gtestx; +-- test UPDATE/DELETE quals +SELECT * FROM gtest1 ORDER BY a; + a | b +---+--- + 1 | 2 + 2 | 4 +(2 rows) + +UPDATE gtest1 SET a = 3 WHERE b = 4; +SELECT * FROM gtest1 ORDER BY a; + a | b +---+--- + 1 | 2 + 3 | 6 +(2 rows) + +DELETE FROM gtest1 WHERE b = 2; +SELECT * FROM gtest1 ORDER BY a; + a | b +---+--- + 3 | 6 +(1 row) + +-- test MERGE +CREATE TABLE gtestm ( + id int PRIMARY KEY, + f1 int, + f2 int, + f3 int GENERATED ALWAYS AS (f1 * 2) STORED, + f4 int GENERATED ALWAYS AS (f2 * 2) STORED +); +INSERT INTO gtestm VALUES (1, 5, 100); +MERGE INTO gtestm t USING (VALUES (1, 10), (2, 20)) v(id, f1) ON t.id = v.id + WHEN MATCHED THEN UPDATE SET f1 = v.f1 + WHEN NOT MATCHED THEN INSERT VALUES (v.id, v.f1, 200); +SELECT * FROM gtestm ORDER BY id; + id | f1 | f2 | f3 | f4 +----+----+-----+----+----- + 1 | 10 | 100 | 20 | 200 + 2 | 20 | 200 | 40 | 400 +(2 rows) + +DROP TABLE gtestm; +-- views +CREATE VIEW gtest1v AS SELECT * FROM gtest1; +SELECT * FROM gtest1v; + a | b +---+--- + 3 | 6 +(1 row) + +INSERT INTO gtest1v VALUES (4, 8); -- error +ERROR: cannot insert a non-DEFAULT value into column "b" +DETAIL: Column "b" is a generated column. +INSERT INTO gtest1v VALUES (5, DEFAULT); -- ok +INSERT INTO gtest1v VALUES (6, 66), (7, 77); -- error +ERROR: cannot insert a non-DEFAULT value into column "b" +DETAIL: Column "b" is a generated column. +INSERT INTO gtest1v VALUES (6, DEFAULT), (7, 77); -- error +ERROR: cannot insert a non-DEFAULT value into column "b" +DETAIL: Column "b" is a generated column. +INSERT INTO gtest1v VALUES (6, 66), (7, DEFAULT); -- error +ERROR: cannot insert a non-DEFAULT value into column "b" +DETAIL: Column "b" is a generated column. +INSERT INTO gtest1v VALUES (6, DEFAULT), (7, DEFAULT); -- ok +ALTER VIEW gtest1v ALTER COLUMN b SET DEFAULT 100; +INSERT INTO gtest1v VALUES (8, DEFAULT); -- error +ERROR: cannot insert a non-DEFAULT value into column "b" +DETAIL: Column "b" is a generated column. +INSERT INTO gtest1v VALUES (8, DEFAULT), (9, DEFAULT); -- error +ERROR: cannot insert a non-DEFAULT value into column "b" +DETAIL: Column "b" is a generated column. +SELECT * FROM gtest1v; + a | b +---+---- + 3 | 6 + 5 | 10 + 6 | 12 + 7 | 14 +(4 rows) + +DELETE FROM gtest1v WHERE a >= 5; +DROP VIEW gtest1v; +-- CTEs +WITH foo AS (SELECT * FROM gtest1) SELECT * FROM foo; + a | b +---+--- + 3 | 6 +(1 row) + +-- inheritance +CREATE TABLE gtest1_1 () INHERITS (gtest1); +SELECT * FROM gtest1_1; + a | b +---+--- +(0 rows) + +\d gtest1_1 + Table "public.gtest1_1" + Column | Type | Collation | Nullable | Default +--------+---------+-----------+----------+------------------------------------ + a | integer | | not null | + b | integer | | | generated always as (a * 2) stored +Inherits: gtest1 + +INSERT INTO gtest1_1 VALUES (4); +SELECT * FROM gtest1_1; + a | b +---+--- + 4 | 8 +(1 row) + +SELECT * FROM gtest1; + a | b +---+--- + 3 | 6 + 4 | 8 +(2 rows) + +-- can't have generated column that is a child of normal column +CREATE TABLE gtest_normal (a int, b int); +CREATE TABLE gtest_normal_child (a int, b int GENERATED ALWAYS AS (a * 2) STORED) INHERITS (gtest_normal); -- error +NOTICE: merging column "a" with inherited definition +NOTICE: merging column "b" with inherited definition +ERROR: child column "b" specifies generation expression +HINT: A child table column cannot be generated unless its parent column is. +CREATE TABLE gtest_normal_child (a int, b int GENERATED ALWAYS AS (a * 2) STORED); +ALTER TABLE gtest_normal_child INHERIT gtest_normal; -- error +ERROR: column "b" in child table must not be a generated column +DROP TABLE gtest_normal, gtest_normal_child; +-- test inheritance mismatches between parent and child +CREATE TABLE gtestx (x int, b int DEFAULT 10) INHERITS (gtest1); -- error +NOTICE: merging column "b" with inherited definition +ERROR: column "b" inherits from generated column but specifies default +CREATE TABLE gtestx (x int, b int GENERATED ALWAYS AS IDENTITY) INHERITS (gtest1); -- error +NOTICE: merging column "b" with inherited definition +ERROR: column "b" inherits from generated column but specifies identity +CREATE TABLE gtestx (x int, b int GENERATED ALWAYS AS (a * 22) STORED) INHERITS (gtest1); -- ok, overrides parent +NOTICE: merging column "b" with inherited definition +\d+ gtestx + Table "public.gtestx" + Column | Type | Collation | Nullable | Default | Storage | Stats target | Description +--------+---------+-----------+----------+-------------------------------------+---------+--------------+------------- + a | integer | | not null | | plain | | + b | integer | | | generated always as (a * 22) stored | plain | | + x | integer | | | | plain | | +Inherits: gtest1 + +CREATE TABLE gtestxx_1 (a int NOT NULL, b int); +ALTER TABLE gtestxx_1 INHERIT gtest1; -- error +ERROR: column "b" in child table must be a generated column +CREATE TABLE gtestxx_3 (a int NOT NULL, b int GENERATED ALWAYS AS (a * 2) STORED); +ALTER TABLE gtestxx_3 INHERIT gtest1; -- ok +CREATE TABLE gtestxx_4 (b int GENERATED ALWAYS AS (a * 2) STORED, a int NOT NULL); +ALTER TABLE gtestxx_4 INHERIT gtest1; -- ok +-- test multiple inheritance mismatches +CREATE TABLE gtesty (x int, b int DEFAULT 55); +CREATE TABLE gtest1_y () INHERITS (gtest0, gtesty); -- error +NOTICE: merging multiple inherited definitions of column "b" +ERROR: inherited column "b" has a generation conflict +DROP TABLE gtesty; +CREATE TABLE gtesty (x int, b int); +CREATE TABLE gtest1_y () INHERITS (gtest1, gtesty); -- error +NOTICE: merging multiple inherited definitions of column "b" +ERROR: inherited column "b" has a generation conflict +DROP TABLE gtesty; +CREATE TABLE gtesty (x int, b int GENERATED ALWAYS AS (x * 22) STORED); +CREATE TABLE gtest1_y () INHERITS (gtest1, gtesty); -- error +NOTICE: merging multiple inherited definitions of column "b" +ERROR: column "b" inherits conflicting generation expressions +HINT: To resolve the conflict, specify a generation expression explicitly. +CREATE TABLE gtest1_y (b int GENERATED ALWAYS AS (x + 1) STORED) INHERITS (gtest1, gtesty); -- ok +NOTICE: merging multiple inherited definitions of column "b" +NOTICE: moving and merging column "b" with inherited definition +DETAIL: User-specified column moved to the position of the inherited column. +\d gtest1_y + Table "public.gtest1_y" + Column | Type | Collation | Nullable | Default +--------+---------+-----------+----------+------------------------------------ + a | integer | | not null | + b | integer | | | generated always as (x + 1) stored + x | integer | | | +Inherits: gtest1, + gtesty + +-- test correct handling of GENERATED column that's only in child +CREATE TABLE gtestp (f1 int); +CREATE TABLE gtestc (f2 int GENERATED ALWAYS AS (f1+1) STORED) INHERITS(gtestp); +INSERT INTO gtestc values(42); +TABLE gtestc; + f1 | f2 +----+---- + 42 | 43 +(1 row) + +UPDATE gtestp SET f1 = f1 * 10; +TABLE gtestc; + f1 | f2 +-----+----- + 420 | 421 +(1 row) + +DROP TABLE gtestp CASCADE; +NOTICE: drop cascades to table gtestc +-- test stored update +CREATE TABLE gtest3 (a int, b int GENERATED ALWAYS AS (a * 3) STORED); +INSERT INTO gtest3 (a) VALUES (1), (2), (3), (NULL); +SELECT * FROM gtest3 ORDER BY a; + a | b +---+--- + 1 | 3 + 2 | 6 + 3 | 9 + | +(4 rows) + +UPDATE gtest3 SET a = 22 WHERE a = 2; +SELECT * FROM gtest3 ORDER BY a; + a | b +----+---- + 1 | 3 + 3 | 9 + 22 | 66 + | +(4 rows) + +CREATE TABLE gtest3a (a text, b text GENERATED ALWAYS AS (a || '+' || a) STORED); +INSERT INTO gtest3a (a) VALUES ('a'), ('b'), ('c'), (NULL); +SELECT * FROM gtest3a ORDER BY a; + a | b +---+----- + a | a+a + b | b+b + c | c+c + | +(4 rows) + +UPDATE gtest3a SET a = 'bb' WHERE a = 'b'; +SELECT * FROM gtest3a ORDER BY a; + a | b +----+------- + a | a+a + bb | bb+bb + c | c+c + | +(4 rows) + +-- COPY +TRUNCATE gtest1; +INSERT INTO gtest1 (a) VALUES (1), (2); +COPY gtest1 TO stdout; +1 +2 +COPY gtest1 (a, b) TO stdout; +ERROR: column "b" is a generated column +DETAIL: Generated columns cannot be used in COPY. +COPY gtest1 FROM stdin; +COPY gtest1 (a, b) FROM stdin; +ERROR: column "b" is a generated column +DETAIL: Generated columns cannot be used in COPY. +SELECT * FROM gtest1 ORDER BY a; + a | b +---+--- + 1 | 2 + 2 | 4 + 3 | 6 + 4 | 8 +(4 rows) + +TRUNCATE gtest3; +INSERT INTO gtest3 (a) VALUES (1), (2); +COPY gtest3 TO stdout; +1 +2 +COPY gtest3 (a, b) TO stdout; +ERROR: column "b" is a generated column +DETAIL: Generated columns cannot be used in COPY. +COPY gtest3 FROM stdin; +COPY gtest3 (a, b) FROM stdin; +ERROR: column "b" is a generated column +DETAIL: Generated columns cannot be used in COPY. +SELECT * FROM gtest3 ORDER BY a; + a | b +---+---- + 1 | 3 + 2 | 6 + 3 | 9 + 4 | 12 +(4 rows) + +-- null values +CREATE TABLE gtest2 (a int PRIMARY KEY, b int GENERATED ALWAYS AS (NULL) STORED); +INSERT INTO gtest2 VALUES (1); +SELECT * FROM gtest2; + a | b +---+--- + 1 | +(1 row) + +-- simple column reference for varlena types +CREATE TABLE gtest_varlena (a varchar, b varchar GENERATED ALWAYS AS (a) STORED); +INSERT INTO gtest_varlena (a) VALUES('01234567890123456789'); +INSERT INTO gtest_varlena (a) VALUES(NULL); +SELECT * FROM gtest_varlena ORDER BY a; + a | b +----------------------+---------------------- + 01234567890123456789 | 01234567890123456789 + | +(2 rows) + +DROP TABLE gtest_varlena; +-- composite types +CREATE TYPE double_int as (a int, b int); +CREATE TABLE gtest4 ( + a int, + b double_int GENERATED ALWAYS AS ((a * 2, a * 3)) STORED +); +INSERT INTO gtest4 VALUES (1), (6); +SELECT * FROM gtest4; + a | b +---+--------- + 1 | (2,3) + 6 | (12,18) +(2 rows) + +DROP TABLE gtest4; +DROP TYPE double_int; +-- using tableoid is allowed +CREATE TABLE gtest_tableoid ( + a int PRIMARY KEY, + b bool GENERATED ALWAYS AS (tableoid = 'gtest_tableoid'::regclass) STORED +); +INSERT INTO gtest_tableoid VALUES (1), (2); +ALTER TABLE gtest_tableoid ADD COLUMN + c regclass GENERATED ALWAYS AS (tableoid) STORED; +SELECT * FROM gtest_tableoid; + a | b | c +---+---+---------------- + 1 | t | gtest_tableoid + 2 | t | gtest_tableoid +(2 rows) + +-- drop column behavior +CREATE TABLE gtest10 (a int PRIMARY KEY, b int, c int GENERATED ALWAYS AS (b * 2) STORED); +ALTER TABLE gtest10 DROP COLUMN b; -- fails +ERROR: cannot drop column b of table gtest10 because other objects depend on it +DETAIL: column c of table gtest10 depends on column b of table gtest10 +HINT: Use DROP ... CASCADE to drop the dependent objects too. +ALTER TABLE gtest10 DROP COLUMN b CASCADE; -- drops c too +NOTICE: drop cascades to column c of table gtest10 +\d gtest10 + Table "public.gtest10" + Column | Type | Collation | Nullable | Default +--------+---------+-----------+----------+--------- + a | integer | | not null | +Indexes: + "gtest10_pkey" PRIMARY KEY, btree (a) + +CREATE TABLE gtest10a (a int PRIMARY KEY, b int GENERATED ALWAYS AS (a * 2) STORED); +ALTER TABLE gtest10a DROP COLUMN b; +INSERT INTO gtest10a (a) VALUES (1); +-- privileges +CREATE USER regress_user11; +CREATE TABLE gtest11s (a int PRIMARY KEY, b int, c int GENERATED ALWAYS AS (b * 2) STORED); +INSERT INTO gtest11s VALUES (1, 10), (2, 20); +GRANT SELECT (a, c) ON gtest11s TO regress_user11; +CREATE FUNCTION gf1(a int) RETURNS int AS $$ SELECT a * 3 $$ IMMUTABLE LANGUAGE SQL; +REVOKE ALL ON FUNCTION gf1(int) FROM PUBLIC; +CREATE TABLE gtest12s (a int PRIMARY KEY, b int, c int GENERATED ALWAYS AS (gf1(b)) STORED); +INSERT INTO gtest12s VALUES (1, 10), (2, 20); +GRANT SELECT (a, c) ON gtest12s TO regress_user11; +SET ROLE regress_user11; +SELECT a, b FROM gtest11s; -- not allowed +ERROR: permission denied for table gtest11s +SELECT a, c FROM gtest11s; -- allowed + a | c +---+---- + 1 | 20 + 2 | 40 +(2 rows) + +SELECT gf1(10); -- not allowed +ERROR: permission denied for function gf1 +SELECT a, c FROM gtest12s; -- allowed + a | c +---+---- + 1 | 30 + 2 | 60 +(2 rows) + +RESET ROLE; +DROP FUNCTION gf1(int); -- fail +ERROR: cannot drop function gf1(integer) because other objects depend on it +DETAIL: column c of table gtest12s depends on function gf1(integer) +HINT: Use DROP ... CASCADE to drop the dependent objects too. +DROP TABLE gtest11s, gtest12s; +DROP FUNCTION gf1(int); +DROP USER regress_user11; +-- check constraints +CREATE TABLE gtest20 (a int PRIMARY KEY, b int GENERATED ALWAYS AS (a * 2) STORED CHECK (b < 50)); +INSERT INTO gtest20 (a) VALUES (10); -- ok +INSERT INTO gtest20 (a) VALUES (30); -- violates constraint +ERROR: new row for relation "gtest20" violates check constraint "gtest20_b_check" +DETAIL: Failing row contains (30, 60). +CREATE TABLE gtest20a (a int PRIMARY KEY, b int GENERATED ALWAYS AS (a * 2) STORED); +INSERT INTO gtest20a (a) VALUES (10); +INSERT INTO gtest20a (a) VALUES (30); +ALTER TABLE gtest20a ADD CHECK (b < 50); -- fails on existing row +ERROR: check constraint "gtest20a_b_check" of relation "gtest20a" is violated by some row +CREATE TABLE gtest20b (a int PRIMARY KEY, b int GENERATED ALWAYS AS (a * 2) STORED); +INSERT INTO gtest20b (a) VALUES (10); +INSERT INTO gtest20b (a) VALUES (30); +ALTER TABLE gtest20b ADD CONSTRAINT chk CHECK (b < 50) NOT VALID; +ALTER TABLE gtest20b VALIDATE CONSTRAINT chk; -- fails on existing row +ERROR: check constraint "chk" of relation "gtest20b" is violated by some row +-- not-null constraints +CREATE TABLE gtest21a (a int PRIMARY KEY, b int GENERATED ALWAYS AS (nullif(a, 0)) STORED NOT NULL); +INSERT INTO gtest21a (a) VALUES (1); -- ok +INSERT INTO gtest21a (a) VALUES (0); -- violates constraint +ERROR: null value in column "b" of relation "gtest21a" violates not-null constraint +DETAIL: Failing row contains (0, null). +CREATE TABLE gtest21b (a int PRIMARY KEY, b int GENERATED ALWAYS AS (nullif(a, 0)) STORED); +ALTER TABLE gtest21b ALTER COLUMN b SET NOT NULL; +INSERT INTO gtest21b (a) VALUES (1); -- ok +INSERT INTO gtest21b (a) VALUES (0); -- violates constraint +ERROR: null value in column "b" of relation "gtest21b" violates not-null constraint +DETAIL: Failing row contains (0, null). +ALTER TABLE gtest21b ALTER COLUMN b DROP NOT NULL; +INSERT INTO gtest21b (a) VALUES (0); -- ok now +-- index constraints +CREATE TABLE gtest22a (a int PRIMARY KEY, b int GENERATED ALWAYS AS (a / 2) STORED UNIQUE); +INSERT INTO gtest22a VALUES (2); +INSERT INTO gtest22a VALUES (3); +ERROR: duplicate key value violates unique constraint "gtest22a_b_key" +DETAIL: Key (b)=(1) already exists. +INSERT INTO gtest22a VALUES (4); +CREATE TABLE gtest22b (a int, b int GENERATED ALWAYS AS (a / 2) STORED, PRIMARY KEY (a, b)); +INSERT INTO gtest22b VALUES (2); +INSERT INTO gtest22b VALUES (2); +ERROR: duplicate key value violates unique constraint "gtest22b_pkey" +DETAIL: Key (a, b)=(2, 1) already exists. +-- indexes +CREATE TABLE gtest22c (a int, b int GENERATED ALWAYS AS (a * 2) STORED); +CREATE INDEX gtest22c_b_idx ON gtest22c (b); +CREATE INDEX gtest22c_expr_idx ON gtest22c ((b * 3)); +CREATE INDEX gtest22c_pred_idx ON gtest22c (a) WHERE b > 0; +\d gtest22c + Table "public.gtest22c" + Column | Type | Collation | Nullable | Default +--------+---------+-----------+----------+------------------------------------ + a | integer | | | + b | integer | | | generated always as (a * 2) stored +Indexes: + "gtest22c_b_idx" btree (b) + "gtest22c_expr_idx" btree ((b * 3)) + "gtest22c_pred_idx" btree (a) WHERE b > 0 + +INSERT INTO gtest22c VALUES (1), (2), (3); +SET enable_seqscan TO off; +SET enable_bitmapscan TO off; +EXPLAIN (COSTS OFF) SELECT * FROM gtest22c WHERE b = 4; + QUERY PLAN +--------------------------------------------- + Index Scan using gtest22c_b_idx on gtest22c + Index Cond: (b = 4) +(2 rows) + +SELECT * FROM gtest22c WHERE b = 4; + a | b +---+--- + 2 | 4 +(1 row) + +EXPLAIN (COSTS OFF) SELECT * FROM gtest22c WHERE b * 3 = 6; + QUERY PLAN +------------------------------------------------ + Index Scan using gtest22c_expr_idx on gtest22c + Index Cond: ((b * 3) = 6) +(2 rows) + +SELECT * FROM gtest22c WHERE b * 3 = 6; + a | b +---+--- + 1 | 2 +(1 row) + +EXPLAIN (COSTS OFF) SELECT * FROM gtest22c WHERE a = 1 AND b > 0; + QUERY PLAN +------------------------------------------------ + Index Scan using gtest22c_pred_idx on gtest22c + Index Cond: (a = 1) +(2 rows) + +SELECT * FROM gtest22c WHERE a = 1 AND b > 0; + a | b +---+--- + 1 | 2 +(1 row) + +RESET enable_seqscan; +RESET enable_bitmapscan; +-- foreign keys +CREATE TABLE gtest23a (x int PRIMARY KEY, y int); +INSERT INTO gtest23a VALUES (1, 11), (2, 22), (3, 33); +CREATE TABLE gtest23x (a int PRIMARY KEY, b int GENERATED ALWAYS AS (a * 2) STORED REFERENCES gtest23a (x) ON UPDATE CASCADE); -- error +ERROR: invalid ON UPDATE action for foreign key constraint containing generated column +CREATE TABLE gtest23x (a int PRIMARY KEY, b int GENERATED ALWAYS AS (a * 2) STORED REFERENCES gtest23a (x) ON DELETE SET NULL); -- error +ERROR: invalid ON DELETE action for foreign key constraint containing generated column +CREATE TABLE gtest23b (a int PRIMARY KEY, b int GENERATED ALWAYS AS (a * 2) STORED REFERENCES gtest23a (x)); +\d gtest23b + Table "public.gtest23b" + Column | Type | Collation | Nullable | Default +--------+---------+-----------+----------+------------------------------------ + a | integer | | not null | + b | integer | | | generated always as (a * 2) stored +Indexes: + "gtest23b_pkey" PRIMARY KEY, btree (a) +Foreign-key constraints: + "gtest23b_b_fkey" FOREIGN KEY (b) REFERENCES gtest23a(x) + +INSERT INTO gtest23b VALUES (1); -- ok +INSERT INTO gtest23b VALUES (5); -- error +ERROR: insert or update on table "gtest23b" violates foreign key constraint "gtest23b_b_fkey" +DETAIL: Key (b)=(10) is not present in table "gtest23a". +DROP TABLE gtest23b; +DROP TABLE gtest23a; +CREATE TABLE gtest23p (x int, y int GENERATED ALWAYS AS (x * 2) STORED, PRIMARY KEY (y)); +INSERT INTO gtest23p VALUES (1), (2), (3); +CREATE TABLE gtest23q (a int PRIMARY KEY, b int REFERENCES gtest23p (y)); +INSERT INTO gtest23q VALUES (1, 2); -- ok +INSERT INTO gtest23q VALUES (2, 5); -- error +ERROR: insert or update on table "gtest23q" violates foreign key constraint "gtest23q_b_fkey" +DETAIL: Key (b)=(5) is not present in table "gtest23p". +-- domains +CREATE DOMAIN gtestdomain1 AS int CHECK (VALUE < 10); +CREATE TABLE gtest24 (a int PRIMARY KEY, b gtestdomain1 GENERATED ALWAYS AS (a * 2) STORED); +INSERT INTO gtest24 (a) VALUES (4); -- ok +INSERT INTO gtest24 (a) VALUES (6); -- error +ERROR: value for domain gtestdomain1 violates check constraint "gtestdomain1_check" +-- typed tables (currently not supported) +CREATE TYPE gtest_type AS (f1 integer, f2 text, f3 bigint); +CREATE TABLE gtest28 OF gtest_type (f1 WITH OPTIONS GENERATED ALWAYS AS (f2 *2) STORED); +ERROR: generated columns are not supported on typed tables +DROP TYPE gtest_type CASCADE; +-- partitioning cases +CREATE TABLE gtest_parent (f1 date NOT NULL, f2 bigint, f3 bigint) PARTITION BY RANGE (f1); +CREATE TABLE gtest_child PARTITION OF gtest_parent ( + f3 WITH OPTIONS GENERATED ALWAYS AS (f2 * 2) STORED +) FOR VALUES FROM ('2016-07-01') TO ('2016-08-01'); -- error +ERROR: child column "f3" specifies generation expression +HINT: A child table column cannot be generated unless its parent column is. +CREATE TABLE gtest_child (f1 date NOT NULL, f2 bigint, f3 bigint GENERATED ALWAYS AS (f2 * 2) STORED); +ALTER TABLE gtest_parent ATTACH PARTITION gtest_child FOR VALUES FROM ('2016-07-01') TO ('2016-08-01'); -- error +ERROR: column "f3" in child table must not be a generated column +DROP TABLE gtest_parent, gtest_child; +CREATE TABLE gtest_parent (f1 date NOT NULL, f2 bigint, f3 bigint GENERATED ALWAYS AS (f2 * 2) STORED) PARTITION BY RANGE (f1); +CREATE TABLE gtest_child PARTITION OF gtest_parent + FOR VALUES FROM ('2016-07-01') TO ('2016-08-01'); -- inherits gen expr +CREATE TABLE gtest_child2 PARTITION OF gtest_parent ( + f3 WITH OPTIONS GENERATED ALWAYS AS (f2 * 22) STORED -- overrides gen expr +) FOR VALUES FROM ('2016-08-01') TO ('2016-09-01'); +CREATE TABLE gtest_child3 PARTITION OF gtest_parent ( + f3 DEFAULT 42 -- error +) FOR VALUES FROM ('2016-09-01') TO ('2016-10-01'); +ERROR: column "f3" inherits from generated column but specifies default +CREATE TABLE gtest_child3 PARTITION OF gtest_parent ( + f3 WITH OPTIONS GENERATED ALWAYS AS IDENTITY -- error +) FOR VALUES FROM ('2016-09-01') TO ('2016-10-01'); +ERROR: identity columns are not supported on partitions +CREATE TABLE gtest_child3 (f1 date NOT NULL, f2 bigint, f3 bigint); +ALTER TABLE gtest_parent ATTACH PARTITION gtest_child3 FOR VALUES FROM ('2016-09-01') TO ('2016-10-01'); -- error +ERROR: column "f3" in child table must be a generated column +DROP TABLE gtest_child3; +CREATE TABLE gtest_child3 (f1 date NOT NULL, f2 bigint, f3 bigint DEFAULT 42); +ALTER TABLE gtest_parent ATTACH PARTITION gtest_child3 FOR VALUES FROM ('2016-09-01') TO ('2016-10-01'); -- error +ERROR: column "f3" in child table must be a generated column +DROP TABLE gtest_child3; +CREATE TABLE gtest_child3 (f1 date NOT NULL, f2 bigint, f3 bigint GENERATED ALWAYS AS IDENTITY); +ALTER TABLE gtest_parent ATTACH PARTITION gtest_child3 FOR VALUES FROM ('2016-09-01') TO ('2016-10-01'); -- error +ERROR: column "f3" in child table must be a generated column +DROP TABLE gtest_child3; +CREATE TABLE gtest_child3 (f1 date NOT NULL, f2 bigint, f3 bigint GENERATED ALWAYS AS (f2 * 33) STORED); +ALTER TABLE gtest_parent ATTACH PARTITION gtest_child3 FOR VALUES FROM ('2016-09-01') TO ('2016-10-01'); +\d gtest_child + Table "public.gtest_child" + Column | Type | Collation | Nullable | Default +--------+--------+-----------+----------+------------------------------------- + f1 | date | | not null | + f2 | bigint | | | + f3 | bigint | | | generated always as (f2 * 2) stored +Partition of: gtest_parent FOR VALUES FROM ('07-01-2016') TO ('08-01-2016') + +\d gtest_child2 + Table "public.gtest_child2" + Column | Type | Collation | Nullable | Default +--------+--------+-----------+----------+-------------------------------------- + f1 | date | | not null | + f2 | bigint | | | + f3 | bigint | | | generated always as (f2 * 22) stored +Partition of: gtest_parent FOR VALUES FROM ('08-01-2016') TO ('09-01-2016') + +\d gtest_child3 + Table "public.gtest_child3" + Column | Type | Collation | Nullable | Default +--------+--------+-----------+----------+-------------------------------------- + f1 | date | | not null | + f2 | bigint | | | + f3 | bigint | | | generated always as (f2 * 33) stored +Partition of: gtest_parent FOR VALUES FROM ('09-01-2016') TO ('10-01-2016') + +INSERT INTO gtest_parent (f1, f2) VALUES ('2016-07-15', 1); +SELECT * FROM gtest_parent; + f1 | f2 | f3 +------------+----+---- + 07-15-2016 | 1 | 2 +(1 row) + +SELECT * FROM gtest_child; + f1 | f2 | f3 +------------+----+---- + 07-15-2016 | 1 | 2 +(1 row) + +UPDATE gtest_parent SET f1 = f1 + 60; +SELECT * FROM gtest_parent; + f1 | f2 | f3 +------------+----+---- + 09-13-2016 | 1 | 33 +(1 row) + +SELECT * FROM gtest_child3; + f1 | f2 | f3 +------------+----+---- + 09-13-2016 | 1 | 33 +(1 row) + +-- we leave these tables around for purposes of testing dump/reload/upgrade +-- generated columns in partition key (not allowed) +CREATE TABLE gtest_part_key (f1 date NOT NULL, f2 bigint, f3 bigint GENERATED ALWAYS AS (f2 * 2) STORED) PARTITION BY RANGE (f3); +ERROR: cannot use generated column in partition key +LINE 1: ...ENERATED ALWAYS AS (f2 * 2) STORED) PARTITION BY RANGE (f3); + ^ +DETAIL: Column "f3" is a generated column. +CREATE TABLE gtest_part_key (f1 date NOT NULL, f2 bigint, f3 bigint GENERATED ALWAYS AS (f2 * 2) STORED) PARTITION BY RANGE ((f3 * 3)); +ERROR: cannot use generated column in partition key +LINE 1: ...ED ALWAYS AS (f2 * 2) STORED) PARTITION BY RANGE ((f3 * 3)); + ^ +DETAIL: Column "f3" is a generated column. +-- ALTER TABLE ... ADD COLUMN +CREATE TABLE gtest25 (a int PRIMARY KEY); +INSERT INTO gtest25 VALUES (3), (4); +ALTER TABLE gtest25 ADD COLUMN b int GENERATED ALWAYS AS (a * 3) STORED; +SELECT * FROM gtest25 ORDER BY a; + a | b +---+---- + 3 | 9 + 4 | 12 +(2 rows) + +ALTER TABLE gtest25 ADD COLUMN x int GENERATED ALWAYS AS (b * 4) STORED; -- error +ERROR: cannot use generated column "b" in column generation expression +DETAIL: A generated column cannot reference another generated column. +ALTER TABLE gtest25 ADD COLUMN x int GENERATED ALWAYS AS (z * 4) STORED; -- error +ERROR: column "z" does not exist +ALTER TABLE gtest25 ADD COLUMN c int DEFAULT 42, + ADD COLUMN x int GENERATED ALWAYS AS (c * 4) STORED; +ALTER TABLE gtest25 ADD COLUMN d int DEFAULT 101; +ALTER TABLE gtest25 ALTER COLUMN d SET DATA TYPE float8, + ADD COLUMN y float8 GENERATED ALWAYS AS (d * 4) STORED; +SELECT * FROM gtest25 ORDER BY a; + a | b | c | x | d | y +---+----+----+-----+-----+----- + 3 | 9 | 42 | 168 | 101 | 404 + 4 | 12 | 42 | 168 | 101 | 404 +(2 rows) + +\d gtest25 + Table "public.gtest25" + Column | Type | Collation | Nullable | Default +--------+------------------+-----------+----------+------------------------------------------------------ + a | integer | | not null | + b | integer | | | generated always as (a * 3) stored + c | integer | | | 42 + x | integer | | | generated always as (c * 4) stored + d | double precision | | | 101 + y | double precision | | | generated always as (d * 4::double precision) stored +Indexes: + "gtest25_pkey" PRIMARY KEY, btree (a) + +-- ALTER TABLE ... ALTER COLUMN +CREATE TABLE gtest27 ( + a int, + b int, + x int GENERATED ALWAYS AS ((a + b) * 2) STORED +); +INSERT INTO gtest27 (a, b) VALUES (3, 7), (4, 11); +ALTER TABLE gtest27 ALTER COLUMN a TYPE text; -- error +ERROR: cannot alter type of a column used by a generated column +DETAIL: Column "a" is used by generated column "x". +ALTER TABLE gtest27 ALTER COLUMN x TYPE numeric; +\d gtest27 + Table "public.gtest27" + Column | Type | Collation | Nullable | Default +--------+---------+-----------+----------+-------------------------------------------- + a | integer | | | + b | integer | | | + x | numeric | | | generated always as (((a + b) * 2)) stored + +SELECT * FROM gtest27; + a | b | x +---+----+---- + 3 | 7 | 20 + 4 | 11 | 30 +(2 rows) + +ALTER TABLE gtest27 ALTER COLUMN x TYPE boolean USING x <> 0; -- error +ERROR: generation expression for column "x" cannot be cast automatically to type boolean +ALTER TABLE gtest27 ALTER COLUMN x DROP DEFAULT; -- error +ERROR: column "x" of relation "gtest27" is a generated column +HINT: Use ALTER TABLE ... ALTER COLUMN ... DROP EXPRESSION instead. +-- It's possible to alter the column types this way: +ALTER TABLE gtest27 + DROP COLUMN x, + ALTER COLUMN a TYPE bigint, + ALTER COLUMN b TYPE bigint, + ADD COLUMN x bigint GENERATED ALWAYS AS ((a + b) * 2) STORED; +\d gtest27 + Table "public.gtest27" + Column | Type | Collation | Nullable | Default +--------+--------+-----------+----------+------------------------------------------ + a | bigint | | | + b | bigint | | | + x | bigint | | | generated always as ((a + b) * 2) stored + +-- Ideally you could just do this, but not today (and should x change type?): +ALTER TABLE gtest27 + ALTER COLUMN a TYPE float8, + ALTER COLUMN b TYPE float8; -- error +ERROR: cannot alter type of a column used by a generated column +DETAIL: Column "a" is used by generated column "x". +\d gtest27 + Table "public.gtest27" + Column | Type | Collation | Nullable | Default +--------+--------+-----------+----------+------------------------------------------ + a | bigint | | | + b | bigint | | | + x | bigint | | | generated always as ((a + b) * 2) stored + +SELECT * FROM gtest27; + a | b | x +---+----+---- + 3 | 7 | 20 + 4 | 11 | 30 +(2 rows) + +-- ALTER TABLE ... ALTER COLUMN ... DROP EXPRESSION +CREATE TABLE gtest29 ( + a int, + b int GENERATED ALWAYS AS (a * 2) STORED +); +INSERT INTO gtest29 (a) VALUES (3), (4); +ALTER TABLE gtest29 ALTER COLUMN a DROP EXPRESSION; -- error +ERROR: column "a" of relation "gtest29" is not a stored generated column +ALTER TABLE gtest29 ALTER COLUMN a DROP EXPRESSION IF EXISTS; -- notice +NOTICE: column "a" of relation "gtest29" is not a stored generated column, skipping +ALTER TABLE gtest29 ALTER COLUMN b DROP EXPRESSION; +INSERT INTO gtest29 (a) VALUES (5); +INSERT INTO gtest29 (a, b) VALUES (6, 66); +SELECT * FROM gtest29; + a | b +---+---- + 3 | 6 + 4 | 8 + 5 | + 6 | 66 +(4 rows) + +\d gtest29 + Table "public.gtest29" + Column | Type | Collation | Nullable | Default +--------+---------+-----------+----------+--------- + a | integer | | | + b | integer | | | + +-- check that dependencies between columns have also been removed +ALTER TABLE gtest29 DROP COLUMN a; -- should not drop b +\d gtest29 + Table "public.gtest29" + Column | Type | Collation | Nullable | Default +--------+---------+-----------+----------+--------- + b | integer | | | + +-- with inheritance +CREATE TABLE gtest30 ( + a int, + b int GENERATED ALWAYS AS (a * 2) STORED +); +CREATE TABLE gtest30_1 () INHERITS (gtest30); +ALTER TABLE gtest30 ALTER COLUMN b DROP EXPRESSION; +\d gtest30 + Table "public.gtest30" + Column | Type | Collation | Nullable | Default +--------+---------+-----------+----------+--------- + a | integer | | | + b | integer | | | +Number of child tables: 1 (Use \d+ to list them.) + +\d gtest30_1 + Table "public.gtest30_1" + Column | Type | Collation | Nullable | Default +--------+---------+-----------+----------+--------- + a | integer | | | + b | integer | | | +Inherits: gtest30 + +DROP TABLE gtest30 CASCADE; +NOTICE: drop cascades to table gtest30_1 +CREATE TABLE gtest30 ( + a int, + b int GENERATED ALWAYS AS (a * 2) STORED +); +CREATE TABLE gtest30_1 () INHERITS (gtest30); +ALTER TABLE ONLY gtest30 ALTER COLUMN b DROP EXPRESSION; -- error +ERROR: ALTER TABLE / DROP EXPRESSION must be applied to child tables too +\d gtest30 + Table "public.gtest30" + Column | Type | Collation | Nullable | Default +--------+---------+-----------+----------+------------------------------------ + a | integer | | | + b | integer | | | generated always as (a * 2) stored +Number of child tables: 1 (Use \d+ to list them.) + +\d gtest30_1 + Table "public.gtest30_1" + Column | Type | Collation | Nullable | Default +--------+---------+-----------+----------+------------------------------------ + a | integer | | | + b | integer | | | generated always as (a * 2) stored +Inherits: gtest30 + +ALTER TABLE gtest30_1 ALTER COLUMN b DROP EXPRESSION; -- error +ERROR: cannot drop generation expression from inherited column +-- triggers +CREATE TABLE gtest26 ( + a int PRIMARY KEY, + b int GENERATED ALWAYS AS (a * 2) STORED +); +CREATE FUNCTION gtest_trigger_func() RETURNS trigger + LANGUAGE plpgsql +AS $$ +BEGIN + IF tg_op IN ('DELETE', 'UPDATE') THEN + RAISE INFO '%: %: old = %', TG_NAME, TG_WHEN, OLD; + END IF; + IF tg_op IN ('INSERT', 'UPDATE') THEN + RAISE INFO '%: %: new = %', TG_NAME, TG_WHEN, NEW; + END IF; + IF tg_op = 'DELETE' THEN + RETURN OLD; + ELSE + RETURN NEW; + END IF; +END +$$; +CREATE TRIGGER gtest1 BEFORE DELETE OR UPDATE ON gtest26 + FOR EACH ROW + WHEN (OLD.b < 0) -- ok + EXECUTE PROCEDURE gtest_trigger_func(); +CREATE TRIGGER gtest2a BEFORE INSERT OR UPDATE ON gtest26 + FOR EACH ROW + WHEN (NEW.b < 0) -- error + EXECUTE PROCEDURE gtest_trigger_func(); +ERROR: BEFORE trigger's WHEN condition cannot reference NEW generated columns +LINE 3: WHEN (NEW.b < 0) -- error + ^ +DETAIL: Column "b" is a generated column. +CREATE TRIGGER gtest2b BEFORE INSERT OR UPDATE ON gtest26 + FOR EACH ROW + WHEN (NEW.* IS NOT NULL) -- error + EXECUTE PROCEDURE gtest_trigger_func(); +ERROR: BEFORE trigger's WHEN condition cannot reference NEW generated columns +LINE 3: WHEN (NEW.* IS NOT NULL) -- error + ^ +DETAIL: A whole-row reference is used and the table contains generated columns. +CREATE TRIGGER gtest2 BEFORE INSERT ON gtest26 + FOR EACH ROW + WHEN (NEW.a < 0) + EXECUTE PROCEDURE gtest_trigger_func(); +CREATE TRIGGER gtest3 AFTER DELETE OR UPDATE ON gtest26 + FOR EACH ROW + WHEN (OLD.b < 0) -- ok + EXECUTE PROCEDURE gtest_trigger_func(); +CREATE TRIGGER gtest4 AFTER INSERT OR UPDATE ON gtest26 + FOR EACH ROW + WHEN (NEW.b < 0) -- ok + EXECUTE PROCEDURE gtest_trigger_func(); +INSERT INTO gtest26 (a) VALUES (-2), (0), (3); +INFO: gtest2: BEFORE: new = (-2,) +INFO: gtest4: AFTER: new = (-2,-4) +SELECT * FROM gtest26 ORDER BY a; + a | b +----+---- + -2 | -4 + 0 | 0 + 3 | 6 +(3 rows) + +UPDATE gtest26 SET a = a * -2; +INFO: gtest1: BEFORE: old = (-2,-4) +INFO: gtest1: BEFORE: new = (4,) +INFO: gtest3: AFTER: old = (-2,-4) +INFO: gtest3: AFTER: new = (4,8) +INFO: gtest4: AFTER: old = (3,6) +INFO: gtest4: AFTER: new = (-6,-12) +SELECT * FROM gtest26 ORDER BY a; + a | b +----+----- + -6 | -12 + 0 | 0 + 4 | 8 +(3 rows) + +DELETE FROM gtest26 WHERE a = -6; +INFO: gtest1: BEFORE: old = (-6,-12) +INFO: gtest3: AFTER: old = (-6,-12) +SELECT * FROM gtest26 ORDER BY a; + a | b +---+--- + 0 | 0 + 4 | 8 +(2 rows) + +DROP TRIGGER gtest1 ON gtest26; +DROP TRIGGER gtest2 ON gtest26; +DROP TRIGGER gtest3 ON gtest26; +-- Check that an UPDATE of "a" fires the trigger for UPDATE OF b, per +-- SQL standard. +CREATE FUNCTION gtest_trigger_func3() RETURNS trigger + LANGUAGE plpgsql +AS $$ +BEGIN + RAISE NOTICE 'OK'; + RETURN NEW; +END +$$; +CREATE TRIGGER gtest11 BEFORE UPDATE OF b ON gtest26 + FOR EACH ROW + EXECUTE PROCEDURE gtest_trigger_func3(); +UPDATE gtest26 SET a = 1 WHERE a = 0; +NOTICE: OK +DROP TRIGGER gtest11 ON gtest26; +TRUNCATE gtest26; +-- check that modifications of stored generated columns in triggers do +-- not get propagated +CREATE FUNCTION gtest_trigger_func4() RETURNS trigger + LANGUAGE plpgsql +AS $$ +BEGIN + NEW.a = 10; + NEW.b = 300; + RETURN NEW; +END; +$$; +CREATE TRIGGER gtest12_01 BEFORE UPDATE ON gtest26 + FOR EACH ROW + EXECUTE PROCEDURE gtest_trigger_func(); +CREATE TRIGGER gtest12_02 BEFORE UPDATE ON gtest26 + FOR EACH ROW + EXECUTE PROCEDURE gtest_trigger_func4(); +CREATE TRIGGER gtest12_03 BEFORE UPDATE ON gtest26 + FOR EACH ROW + EXECUTE PROCEDURE gtest_trigger_func(); +INSERT INTO gtest26 (a) VALUES (1); +UPDATE gtest26 SET a = 11 WHERE a = 1; +INFO: gtest12_01: BEFORE: old = (1,2) +INFO: gtest12_01: BEFORE: new = (11,) +INFO: gtest12_03: BEFORE: old = (1,2) +INFO: gtest12_03: BEFORE: new = (10,) +SELECT * FROM gtest26 ORDER BY a; + a | b +----+---- + 10 | 20 +(1 row) + +-- LIKE INCLUDING GENERATED and dropped column handling +CREATE TABLE gtest28a ( + a int, + b int, + c int, + x int GENERATED ALWAYS AS (b * 2) STORED +); +ALTER TABLE gtest28a DROP COLUMN a; +CREATE TABLE gtest28b (LIKE gtest28a INCLUDING GENERATED); +\d gtest28* + Table "public.gtest28a" + Column | Type | Collation | Nullable | Default +--------+---------+-----------+----------+------------------------------------ + b | integer | | | + c | integer | | | + x | integer | | | generated always as (b * 2) stored + + Table "public.gtest28b" + Column | Type | Collation | Nullable | Default +--------+---------+-----------+----------+------------------------------------ + b | integer | | | + c | integer | | | + x | integer | | | generated always as (b * 2) stored + diff --git a/src/test/regress/expected/geometry.out b/src/test/regress/expected/geometry.out new file mode 100644 index 0000000..8be694f --- /dev/null +++ b/src/test/regress/expected/geometry.out @@ -0,0 +1,5322 @@ +-- +-- GEOMETRY +-- +-- Back off displayed precision a little bit to reduce platform-to-platform +-- variation in results. +SET extra_float_digits TO -3; +-- +-- Points +-- +SELECT center(f1) AS center + FROM BOX_TBL; + center +--------- + (1,1) + (2,2) + (-5,-4) + (2.5,3) + (3,3) +(5 rows) + +SELECT (@@ f1) AS center + FROM BOX_TBL; + center +--------- + (1,1) + (2,2) + (-5,-4) + (2.5,3) + (3,3) +(5 rows) + +SELECT point(f1) AS center + FROM CIRCLE_TBL; + center +----------- + (5,1) + (1,2) + (1,3) + (1,2) + (100,200) + (100,1) + (3,5) + (3,5) +(8 rows) + +SELECT (@@ f1) AS center + FROM CIRCLE_TBL; + center +----------- + (5,1) + (1,2) + (1,3) + (1,2) + (100,200) + (100,1) + (3,5) + (3,5) +(8 rows) + +SELECT (@@ f1) AS center + FROM POLYGON_TBL + WHERE (# f1) > 2; + center +------------------------------- + (1.33333333333,1.33333333333) + (2.33333333333,1.33333333333) + (4,5) + (4,5) + (4,3) +(5 rows) + +-- "is horizontal" function +SELECT p1.f1 + FROM POINT_TBL p1 + WHERE ishorizontal(p1.f1, point '(0,0)'); + f1 +------------------ + (0,0) + (-10,0) + (1e-300,-1e-300) +(3 rows) + +-- "is horizontal" operator +SELECT p1.f1 + FROM POINT_TBL p1 + WHERE p1.f1 ?- point '(0,0)'; + f1 +------------------ + (0,0) + (-10,0) + (1e-300,-1e-300) +(3 rows) + +-- "is vertical" function +SELECT p1.f1 + FROM POINT_TBL p1 + WHERE isvertical(p1.f1, point '(5.1,34.5)'); + f1 +------------ + (5.1,34.5) +(1 row) + +-- "is vertical" operator +SELECT p1.f1 + FROM POINT_TBL p1 + WHERE p1.f1 ?| point '(5.1,34.5)'; + f1 +------------ + (5.1,34.5) +(1 row) + +-- Slope +SELECT p1.f1, p2.f1, slope(p1.f1, p2.f1) FROM POINT_TBL p1, POINT_TBL p2; + f1 | f1 | slope +-------------------+-------------------+---------------- + (0,0) | (0,0) | Infinity + (0,0) | (-10,0) | 0 + (0,0) | (-3,4) | -1.33333333333 + (0,0) | (5.1,34.5) | 6.76470588235 + (0,0) | (-5,-12) | 2.4 + (0,0) | (1e-300,-1e-300) | Infinity + (0,0) | (1e+300,Infinity) | Infinity + (0,0) | (Infinity,1e+300) | 0 + (0,0) | (NaN,NaN) | NaN + (0,0) | (10,10) | 1 + (-10,0) | (0,0) | 0 + (-10,0) | (-10,0) | Infinity + (-10,0) | (-3,4) | 0.571428571429 + (-10,0) | (5.1,34.5) | 2.28476821192 + (-10,0) | (-5,-12) | -2.4 + (-10,0) | (1e-300,-1e-300) | 0 + (-10,0) | (1e+300,Infinity) | Infinity + (-10,0) | (Infinity,1e+300) | 0 + (-10,0) | (NaN,NaN) | NaN + (-10,0) | (10,10) | 0.5 + (-3,4) | (0,0) | -1.33333333333 + (-3,4) | (-10,0) | 0.571428571429 + (-3,4) | (-3,4) | Infinity + (-3,4) | (5.1,34.5) | 3.76543209877 + (-3,4) | (-5,-12) | 8 + (-3,4) | (1e-300,-1e-300) | -1.33333333333 + (-3,4) | (1e+300,Infinity) | Infinity + (-3,4) | (Infinity,1e+300) | 0 + (-3,4) | (NaN,NaN) | NaN + (-3,4) | (10,10) | 0.461538461538 + (5.1,34.5) | (0,0) | 6.76470588235 + (5.1,34.5) | (-10,0) | 2.28476821192 + (5.1,34.5) | (-3,4) | 3.76543209877 + (5.1,34.5) | (5.1,34.5) | Infinity + (5.1,34.5) | (-5,-12) | 4.60396039604 + (5.1,34.5) | (1e-300,-1e-300) | 6.76470588235 + (5.1,34.5) | (1e+300,Infinity) | Infinity + (5.1,34.5) | (Infinity,1e+300) | 0 + (5.1,34.5) | (NaN,NaN) | NaN + (5.1,34.5) | (10,10) | -5 + (-5,-12) | (0,0) | 2.4 + (-5,-12) | (-10,0) | -2.4 + (-5,-12) | (-3,4) | 8 + (-5,-12) | (5.1,34.5) | 4.60396039604 + (-5,-12) | (-5,-12) | Infinity + (-5,-12) | (1e-300,-1e-300) | 2.4 + (-5,-12) | (1e+300,Infinity) | Infinity + (-5,-12) | (Infinity,1e+300) | 0 + (-5,-12) | (NaN,NaN) | NaN + (-5,-12) | (10,10) | 1.46666666667 + (1e-300,-1e-300) | (0,0) | Infinity + (1e-300,-1e-300) | (-10,0) | 0 + (1e-300,-1e-300) | (-3,4) | -1.33333333333 + (1e-300,-1e-300) | (5.1,34.5) | 6.76470588235 + (1e-300,-1e-300) | (-5,-12) | 2.4 + (1e-300,-1e-300) | (1e-300,-1e-300) | Infinity + (1e-300,-1e-300) | (1e+300,Infinity) | Infinity + (1e-300,-1e-300) | (Infinity,1e+300) | 0 + (1e-300,-1e-300) | (NaN,NaN) | NaN + (1e-300,-1e-300) | (10,10) | 1 + (1e+300,Infinity) | (0,0) | Infinity + (1e+300,Infinity) | (-10,0) | Infinity + (1e+300,Infinity) | (-3,4) | Infinity + (1e+300,Infinity) | (5.1,34.5) | Infinity + (1e+300,Infinity) | (-5,-12) | Infinity + (1e+300,Infinity) | (1e-300,-1e-300) | Infinity + (1e+300,Infinity) | (1e+300,Infinity) | Infinity + (1e+300,Infinity) | (Infinity,1e+300) | NaN + (1e+300,Infinity) | (NaN,NaN) | NaN + (1e+300,Infinity) | (10,10) | Infinity + (Infinity,1e+300) | (0,0) | 0 + (Infinity,1e+300) | (-10,0) | 0 + (Infinity,1e+300) | (-3,4) | 0 + (Infinity,1e+300) | (5.1,34.5) | 0 + (Infinity,1e+300) | (-5,-12) | 0 + (Infinity,1e+300) | (1e-300,-1e-300) | 0 + (Infinity,1e+300) | (1e+300,Infinity) | NaN + (Infinity,1e+300) | (Infinity,1e+300) | Infinity + (Infinity,1e+300) | (NaN,NaN) | NaN + (Infinity,1e+300) | (10,10) | 0 + (NaN,NaN) | (0,0) | NaN + (NaN,NaN) | (-10,0) | NaN + (NaN,NaN) | (-3,4) | NaN + (NaN,NaN) | (5.1,34.5) | NaN + (NaN,NaN) | (-5,-12) | NaN + (NaN,NaN) | (1e-300,-1e-300) | NaN + (NaN,NaN) | (1e+300,Infinity) | NaN + (NaN,NaN) | (Infinity,1e+300) | NaN + (NaN,NaN) | (NaN,NaN) | NaN + (NaN,NaN) | (10,10) | NaN + (10,10) | (0,0) | 1 + (10,10) | (-10,0) | 0.5 + (10,10) | (-3,4) | 0.461538461538 + (10,10) | (5.1,34.5) | -5 + (10,10) | (-5,-12) | 1.46666666667 + (10,10) | (1e-300,-1e-300) | 1 + (10,10) | (1e+300,Infinity) | Infinity + (10,10) | (Infinity,1e+300) | 0 + (10,10) | (NaN,NaN) | NaN + (10,10) | (10,10) | Infinity +(100 rows) + +-- Add point +SELECT p1.f1, p2.f1, p1.f1 + p2.f1 FROM POINT_TBL p1, POINT_TBL p2; + f1 | f1 | ?column? +-------------------+-------------------+--------------------- + (0,0) | (0,0) | (0,0) + (0,0) | (-10,0) | (-10,0) + (0,0) | (-3,4) | (-3,4) + (0,0) | (5.1,34.5) | (5.1,34.5) + (0,0) | (-5,-12) | (-5,-12) + (0,0) | (1e-300,-1e-300) | (1e-300,-1e-300) + (0,0) | (1e+300,Infinity) | (1e+300,Infinity) + (0,0) | (Infinity,1e+300) | (Infinity,1e+300) + (0,0) | (NaN,NaN) | (NaN,NaN) + (0,0) | (10,10) | (10,10) + (-10,0) | (0,0) | (-10,0) + (-10,0) | (-10,0) | (-20,0) + (-10,0) | (-3,4) | (-13,4) + (-10,0) | (5.1,34.5) | (-4.9,34.5) + (-10,0) | (-5,-12) | (-15,-12) + (-10,0) | (1e-300,-1e-300) | (-10,-1e-300) + (-10,0) | (1e+300,Infinity) | (1e+300,Infinity) + (-10,0) | (Infinity,1e+300) | (Infinity,1e+300) + (-10,0) | (NaN,NaN) | (NaN,NaN) + (-10,0) | (10,10) | (0,10) + (-3,4) | (0,0) | (-3,4) + (-3,4) | (-10,0) | (-13,4) + (-3,4) | (-3,4) | (-6,8) + (-3,4) | (5.1,34.5) | (2.1,38.5) + (-3,4) | (-5,-12) | (-8,-8) + (-3,4) | (1e-300,-1e-300) | (-3,4) + (-3,4) | (1e+300,Infinity) | (1e+300,Infinity) + (-3,4) | (Infinity,1e+300) | (Infinity,1e+300) + (-3,4) | (NaN,NaN) | (NaN,NaN) + (-3,4) | (10,10) | (7,14) + (5.1,34.5) | (0,0) | (5.1,34.5) + (5.1,34.5) | (-10,0) | (-4.9,34.5) + (5.1,34.5) | (-3,4) | (2.1,38.5) + (5.1,34.5) | (5.1,34.5) | (10.2,69) + (5.1,34.5) | (-5,-12) | (0.1,22.5) + (5.1,34.5) | (1e-300,-1e-300) | (5.1,34.5) + (5.1,34.5) | (1e+300,Infinity) | (1e+300,Infinity) + (5.1,34.5) | (Infinity,1e+300) | (Infinity,1e+300) + (5.1,34.5) | (NaN,NaN) | (NaN,NaN) + (5.1,34.5) | (10,10) | (15.1,44.5) + (-5,-12) | (0,0) | (-5,-12) + (-5,-12) | (-10,0) | (-15,-12) + (-5,-12) | (-3,4) | (-8,-8) + (-5,-12) | (5.1,34.5) | (0.1,22.5) + (-5,-12) | (-5,-12) | (-10,-24) + (-5,-12) | (1e-300,-1e-300) | (-5,-12) + (-5,-12) | (1e+300,Infinity) | (1e+300,Infinity) + (-5,-12) | (Infinity,1e+300) | (Infinity,1e+300) + (-5,-12) | (NaN,NaN) | (NaN,NaN) + (-5,-12) | (10,10) | (5,-2) + (1e-300,-1e-300) | (0,0) | (1e-300,-1e-300) + (1e-300,-1e-300) | (-10,0) | (-10,-1e-300) + (1e-300,-1e-300) | (-3,4) | (-3,4) + (1e-300,-1e-300) | (5.1,34.5) | (5.1,34.5) + (1e-300,-1e-300) | (-5,-12) | (-5,-12) + (1e-300,-1e-300) | (1e-300,-1e-300) | (2e-300,-2e-300) + (1e-300,-1e-300) | (1e+300,Infinity) | (1e+300,Infinity) + (1e-300,-1e-300) | (Infinity,1e+300) | (Infinity,1e+300) + (1e-300,-1e-300) | (NaN,NaN) | (NaN,NaN) + (1e-300,-1e-300) | (10,10) | (10,10) + (1e+300,Infinity) | (0,0) | (1e+300,Infinity) + (1e+300,Infinity) | (-10,0) | (1e+300,Infinity) + (1e+300,Infinity) | (-3,4) | (1e+300,Infinity) + (1e+300,Infinity) | (5.1,34.5) | (1e+300,Infinity) + (1e+300,Infinity) | (-5,-12) | (1e+300,Infinity) + (1e+300,Infinity) | (1e-300,-1e-300) | (1e+300,Infinity) + (1e+300,Infinity) | (1e+300,Infinity) | (2e+300,Infinity) + (1e+300,Infinity) | (Infinity,1e+300) | (Infinity,Infinity) + (1e+300,Infinity) | (NaN,NaN) | (NaN,NaN) + (1e+300,Infinity) | (10,10) | (1e+300,Infinity) + (Infinity,1e+300) | (0,0) | (Infinity,1e+300) + (Infinity,1e+300) | (-10,0) | (Infinity,1e+300) + (Infinity,1e+300) | (-3,4) | (Infinity,1e+300) + (Infinity,1e+300) | (5.1,34.5) | (Infinity,1e+300) + (Infinity,1e+300) | (-5,-12) | (Infinity,1e+300) + (Infinity,1e+300) | (1e-300,-1e-300) | (Infinity,1e+300) + (Infinity,1e+300) | (1e+300,Infinity) | (Infinity,Infinity) + (Infinity,1e+300) | (Infinity,1e+300) | (Infinity,2e+300) + (Infinity,1e+300) | (NaN,NaN) | (NaN,NaN) + (Infinity,1e+300) | (10,10) | (Infinity,1e+300) + (NaN,NaN) | (0,0) | (NaN,NaN) + (NaN,NaN) | (-10,0) | (NaN,NaN) + (NaN,NaN) | (-3,4) | (NaN,NaN) + (NaN,NaN) | (5.1,34.5) | (NaN,NaN) + (NaN,NaN) | (-5,-12) | (NaN,NaN) + (NaN,NaN) | (1e-300,-1e-300) | (NaN,NaN) + (NaN,NaN) | (1e+300,Infinity) | (NaN,NaN) + (NaN,NaN) | (Infinity,1e+300) | (NaN,NaN) + (NaN,NaN) | (NaN,NaN) | (NaN,NaN) + (NaN,NaN) | (10,10) | (NaN,NaN) + (10,10) | (0,0) | (10,10) + (10,10) | (-10,0) | (0,10) + (10,10) | (-3,4) | (7,14) + (10,10) | (5.1,34.5) | (15.1,44.5) + (10,10) | (-5,-12) | (5,-2) + (10,10) | (1e-300,-1e-300) | (10,10) + (10,10) | (1e+300,Infinity) | (1e+300,Infinity) + (10,10) | (Infinity,1e+300) | (Infinity,1e+300) + (10,10) | (NaN,NaN) | (NaN,NaN) + (10,10) | (10,10) | (20,20) +(100 rows) + +-- Subtract point +SELECT p1.f1, p2.f1, p1.f1 - p2.f1 FROM POINT_TBL p1, POINT_TBL p2; + f1 | f1 | ?column? +-------------------+-------------------+---------------------- + (0,0) | (0,0) | (0,0) + (0,0) | (-10,0) | (10,0) + (0,0) | (-3,4) | (3,-4) + (0,0) | (5.1,34.5) | (-5.1,-34.5) + (0,0) | (-5,-12) | (5,12) + (0,0) | (1e-300,-1e-300) | (-1e-300,1e-300) + (0,0) | (1e+300,Infinity) | (-1e+300,-Infinity) + (0,0) | (Infinity,1e+300) | (-Infinity,-1e+300) + (0,0) | (NaN,NaN) | (NaN,NaN) + (0,0) | (10,10) | (-10,-10) + (-10,0) | (0,0) | (-10,0) + (-10,0) | (-10,0) | (0,0) + (-10,0) | (-3,4) | (-7,-4) + (-10,0) | (5.1,34.5) | (-15.1,-34.5) + (-10,0) | (-5,-12) | (-5,12) + (-10,0) | (1e-300,-1e-300) | (-10,1e-300) + (-10,0) | (1e+300,Infinity) | (-1e+300,-Infinity) + (-10,0) | (Infinity,1e+300) | (-Infinity,-1e+300) + (-10,0) | (NaN,NaN) | (NaN,NaN) + (-10,0) | (10,10) | (-20,-10) + (-3,4) | (0,0) | (-3,4) + (-3,4) | (-10,0) | (7,4) + (-3,4) | (-3,4) | (0,0) + (-3,4) | (5.1,34.5) | (-8.1,-30.5) + (-3,4) | (-5,-12) | (2,16) + (-3,4) | (1e-300,-1e-300) | (-3,4) + (-3,4) | (1e+300,Infinity) | (-1e+300,-Infinity) + (-3,4) | (Infinity,1e+300) | (-Infinity,-1e+300) + (-3,4) | (NaN,NaN) | (NaN,NaN) + (-3,4) | (10,10) | (-13,-6) + (5.1,34.5) | (0,0) | (5.1,34.5) + (5.1,34.5) | (-10,0) | (15.1,34.5) + (5.1,34.5) | (-3,4) | (8.1,30.5) + (5.1,34.5) | (5.1,34.5) | (0,0) + (5.1,34.5) | (-5,-12) | (10.1,46.5) + (5.1,34.5) | (1e-300,-1e-300) | (5.1,34.5) + (5.1,34.5) | (1e+300,Infinity) | (-1e+300,-Infinity) + (5.1,34.5) | (Infinity,1e+300) | (-Infinity,-1e+300) + (5.1,34.5) | (NaN,NaN) | (NaN,NaN) + (5.1,34.5) | (10,10) | (-4.9,24.5) + (-5,-12) | (0,0) | (-5,-12) + (-5,-12) | (-10,0) | (5,-12) + (-5,-12) | (-3,4) | (-2,-16) + (-5,-12) | (5.1,34.5) | (-10.1,-46.5) + (-5,-12) | (-5,-12) | (0,0) + (-5,-12) | (1e-300,-1e-300) | (-5,-12) + (-5,-12) | (1e+300,Infinity) | (-1e+300,-Infinity) + (-5,-12) | (Infinity,1e+300) | (-Infinity,-1e+300) + (-5,-12) | (NaN,NaN) | (NaN,NaN) + (-5,-12) | (10,10) | (-15,-22) + (1e-300,-1e-300) | (0,0) | (1e-300,-1e-300) + (1e-300,-1e-300) | (-10,0) | (10,-1e-300) + (1e-300,-1e-300) | (-3,4) | (3,-4) + (1e-300,-1e-300) | (5.1,34.5) | (-5.1,-34.5) + (1e-300,-1e-300) | (-5,-12) | (5,12) + (1e-300,-1e-300) | (1e-300,-1e-300) | (0,0) + (1e-300,-1e-300) | (1e+300,Infinity) | (-1e+300,-Infinity) + (1e-300,-1e-300) | (Infinity,1e+300) | (-Infinity,-1e+300) + (1e-300,-1e-300) | (NaN,NaN) | (NaN,NaN) + (1e-300,-1e-300) | (10,10) | (-10,-10) + (1e+300,Infinity) | (0,0) | (1e+300,Infinity) + (1e+300,Infinity) | (-10,0) | (1e+300,Infinity) + (1e+300,Infinity) | (-3,4) | (1e+300,Infinity) + (1e+300,Infinity) | (5.1,34.5) | (1e+300,Infinity) + (1e+300,Infinity) | (-5,-12) | (1e+300,Infinity) + (1e+300,Infinity) | (1e-300,-1e-300) | (1e+300,Infinity) + (1e+300,Infinity) | (1e+300,Infinity) | (0,NaN) + (1e+300,Infinity) | (Infinity,1e+300) | (-Infinity,Infinity) + (1e+300,Infinity) | (NaN,NaN) | (NaN,NaN) + (1e+300,Infinity) | (10,10) | (1e+300,Infinity) + (Infinity,1e+300) | (0,0) | (Infinity,1e+300) + (Infinity,1e+300) | (-10,0) | (Infinity,1e+300) + (Infinity,1e+300) | (-3,4) | (Infinity,1e+300) + (Infinity,1e+300) | (5.1,34.5) | (Infinity,1e+300) + (Infinity,1e+300) | (-5,-12) | (Infinity,1e+300) + (Infinity,1e+300) | (1e-300,-1e-300) | (Infinity,1e+300) + (Infinity,1e+300) | (1e+300,Infinity) | (Infinity,-Infinity) + (Infinity,1e+300) | (Infinity,1e+300) | (NaN,0) + (Infinity,1e+300) | (NaN,NaN) | (NaN,NaN) + (Infinity,1e+300) | (10,10) | (Infinity,1e+300) + (NaN,NaN) | (0,0) | (NaN,NaN) + (NaN,NaN) | (-10,0) | (NaN,NaN) + (NaN,NaN) | (-3,4) | (NaN,NaN) + (NaN,NaN) | (5.1,34.5) | (NaN,NaN) + (NaN,NaN) | (-5,-12) | (NaN,NaN) + (NaN,NaN) | (1e-300,-1e-300) | (NaN,NaN) + (NaN,NaN) | (1e+300,Infinity) | (NaN,NaN) + (NaN,NaN) | (Infinity,1e+300) | (NaN,NaN) + (NaN,NaN) | (NaN,NaN) | (NaN,NaN) + (NaN,NaN) | (10,10) | (NaN,NaN) + (10,10) | (0,0) | (10,10) + (10,10) | (-10,0) | (20,10) + (10,10) | (-3,4) | (13,6) + (10,10) | (5.1,34.5) | (4.9,-24.5) + (10,10) | (-5,-12) | (15,22) + (10,10) | (1e-300,-1e-300) | (10,10) + (10,10) | (1e+300,Infinity) | (-1e+300,-Infinity) + (10,10) | (Infinity,1e+300) | (-Infinity,-1e+300) + (10,10) | (NaN,NaN) | (NaN,NaN) + (10,10) | (10,10) | (0,0) +(100 rows) + +-- Multiply with point +SELECT p1.f1, p2.f1, p1.f1 * p2.f1 FROM POINT_TBL p1, POINT_TBL p2 WHERE p1.f1[0] BETWEEN 1 AND 1000; + f1 | f1 | ?column? +------------+-------------------+----------------------- + (5.1,34.5) | (0,0) | (0,0) + (10,10) | (0,0) | (0,0) + (5.1,34.5) | (-10,0) | (-51,-345) + (10,10) | (-10,0) | (-100,-100) + (5.1,34.5) | (-3,4) | (-153.3,-83.1) + (10,10) | (-3,4) | (-70,10) + (5.1,34.5) | (5.1,34.5) | (-1164.24,351.9) + (10,10) | (5.1,34.5) | (-294,396) + (5.1,34.5) | (-5,-12) | (388.5,-233.7) + (10,10) | (-5,-12) | (70,-170) + (5.1,34.5) | (1e-300,-1e-300) | (3.96e-299,2.94e-299) + (10,10) | (1e-300,-1e-300) | (2e-299,0) + (5.1,34.5) | (1e+300,Infinity) | (-Infinity,Infinity) + (10,10) | (1e+300,Infinity) | (-Infinity,Infinity) + (5.1,34.5) | (Infinity,1e+300) | (Infinity,Infinity) + (10,10) | (Infinity,1e+300) | (Infinity,Infinity) + (5.1,34.5) | (NaN,NaN) | (NaN,NaN) + (10,10) | (NaN,NaN) | (NaN,NaN) + (5.1,34.5) | (10,10) | (-294,396) + (10,10) | (10,10) | (0,200) +(20 rows) + +-- Underflow error +SELECT p1.f1, p2.f1, p1.f1 * p2.f1 FROM POINT_TBL p1, POINT_TBL p2 WHERE p1.f1[0] < 1; +ERROR: value out of range: underflow +-- Divide by point +SELECT p1.f1, p2.f1, p1.f1 / p2.f1 FROM POINT_TBL p1, POINT_TBL p2 WHERE p2.f1[0] BETWEEN 1 AND 1000; + f1 | f1 | ?column? +-------------------+------------+------------------------------------------- + (0,0) | (5.1,34.5) | (0,0) + (0,0) | (10,10) | (0,0) + (-10,0) | (5.1,34.5) | (-0.0419318237877,0.283656455034) + (-10,0) | (10,10) | (-0.5,0.5) + (-3,4) | (5.1,34.5) | (0.100883034877,0.101869666025) + (-3,4) | (10,10) | (0.05,0.35) + (5.1,34.5) | (5.1,34.5) | (1,0) + (5.1,34.5) | (10,10) | (1.98,1.47) + (-5,-12) | (5.1,34.5) | (-0.361353657935,0.0915100389719) + (-5,-12) | (10,10) | (-0.85,-0.35) + (1e-300,-1e-300) | (5.1,34.5) | (-2.41724631247e-302,-3.25588278822e-302) + (1e-300,-1e-300) | (10,10) | (0,-1e-301) + (1e+300,Infinity) | (5.1,34.5) | (Infinity,Infinity) + (1e+300,Infinity) | (10,10) | (Infinity,Infinity) + (Infinity,1e+300) | (5.1,34.5) | (Infinity,-Infinity) + (Infinity,1e+300) | (10,10) | (Infinity,-Infinity) + (NaN,NaN) | (5.1,34.5) | (NaN,NaN) + (NaN,NaN) | (10,10) | (NaN,NaN) + (10,10) | (5.1,34.5) | (0.325588278822,-0.241724631247) + (10,10) | (10,10) | (1,0) +(20 rows) + +-- Overflow error +SELECT p1.f1, p2.f1, p1.f1 / p2.f1 FROM POINT_TBL p1, POINT_TBL p2 WHERE p2.f1[0] > 1000; +ERROR: value out of range: overflow +-- Division by 0 error +SELECT p1.f1, p2.f1, p1.f1 / p2.f1 FROM POINT_TBL p1, POINT_TBL p2 WHERE p2.f1 ~= '(0,0)'::point; +ERROR: division by zero +-- Distance to line +SELECT p.f1, l.s, p.f1 <-> l.s AS dist_pl, l.s <-> p.f1 AS dist_lp FROM POINT_TBL p, LINE_TBL l; + f1 | s | dist_pl | dist_lp +-------------------+---------------------------------------+--------------------+-------------------- + (0,0) | {0,-1,5} | 5 | 5 + (0,0) | {1,0,5} | 5 | 5 + (0,0) | {0,3,0} | 0 | 0 + (0,0) | {1,-1,0} | 0 | 0 + (0,0) | {-0.4,-1,-6} | 5.57086014531 | 5.57086014531 + (0,0) | {-0.000184615384615,-1,15.3846153846} | 15.3846151224 | 15.3846151224 + (0,0) | {3,NaN,5} | NaN | NaN + (0,0) | {NaN,NaN,NaN} | NaN | NaN + (0,0) | {0,-1,3} | 3 | 3 + (0,0) | {-1,0,3} | 3 | 3 + (-10,0) | {0,-1,5} | 5 | 5 + (-10,0) | {1,0,5} | 5 | 5 + (-10,0) | {0,3,0} | 0 | 0 + (-10,0) | {1,-1,0} | 7.07106781187 | 7.07106781187 + (-10,0) | {-0.4,-1,-6} | 1.85695338177 | 1.85695338177 + (-10,0) | {-0.000184615384615,-1,15.3846153846} | 15.3864612763 | 15.3864612763 + (-10,0) | {3,NaN,5} | NaN | NaN + (-10,0) | {NaN,NaN,NaN} | NaN | NaN + (-10,0) | {0,-1,3} | 3 | 3 + (-10,0) | {-1,0,3} | 13 | 13 + (-3,4) | {0,-1,5} | 1 | 1 + (-3,4) | {1,0,5} | 2 | 2 + (-3,4) | {0,3,0} | 4 | 4 + (-3,4) | {1,-1,0} | 4.94974746831 | 4.94974746831 + (-3,4) | {-0.4,-1,-6} | 8.17059487979 | 8.17059487979 + (-3,4) | {-0.000184615384615,-1,15.3846153846} | 11.3851690368 | 11.3851690368 + (-3,4) | {3,NaN,5} | NaN | NaN + (-3,4) | {NaN,NaN,NaN} | NaN | NaN + (-3,4) | {0,-1,3} | 1 | 1 + (-3,4) | {-1,0,3} | 6 | 6 + (5.1,34.5) | {0,-1,5} | 29.5 | 29.5 + (5.1,34.5) | {1,0,5} | 10.1 | 10.1 + (5.1,34.5) | {0,3,0} | 34.5 | 34.5 + (5.1,34.5) | {1,-1,0} | 20.7889393669 | 20.7889393669 + (5.1,34.5) | {-0.4,-1,-6} | 39.4973984303 | 39.4973984303 + (5.1,34.5) | {-0.000184615384615,-1,15.3846153846} | 19.1163258281 | 19.1163258281 + (5.1,34.5) | {3,NaN,5} | NaN | NaN + (5.1,34.5) | {NaN,NaN,NaN} | NaN | NaN + (5.1,34.5) | {0,-1,3} | 31.5 | 31.5 + (5.1,34.5) | {-1,0,3} | 2.1 | 2.1 + (-5,-12) | {0,-1,5} | 17 | 17 + (-5,-12) | {1,0,5} | 0 | 0 + (-5,-12) | {0,3,0} | 12 | 12 + (-5,-12) | {1,-1,0} | 4.94974746831 | 4.94974746831 + (-5,-12) | {-0.4,-1,-6} | 7.42781352708 | 7.42781352708 + (-5,-12) | {-0.000184615384615,-1,15.3846153846} | 27.3855379948 | 27.3855379948 + (-5,-12) | {3,NaN,5} | NaN | NaN + (-5,-12) | {NaN,NaN,NaN} | NaN | NaN + (-5,-12) | {0,-1,3} | 15 | 15 + (-5,-12) | {-1,0,3} | 8 | 8 + (1e-300,-1e-300) | {0,-1,5} | 5 | 5 + (1e-300,-1e-300) | {1,0,5} | 5 | 5 + (1e-300,-1e-300) | {0,3,0} | 1e-300 | 1e-300 + (1e-300,-1e-300) | {1,-1,0} | 1.41421356237e-300 | 1.41421356237e-300 + (1e-300,-1e-300) | {-0.4,-1,-6} | 5.57086014531 | 5.57086014531 + (1e-300,-1e-300) | {-0.000184615384615,-1,15.3846153846} | 15.3846151224 | 15.3846151224 + (1e-300,-1e-300) | {3,NaN,5} | NaN | NaN + (1e-300,-1e-300) | {NaN,NaN,NaN} | NaN | NaN + (1e-300,-1e-300) | {0,-1,3} | 3 | 3 + (1e-300,-1e-300) | {-1,0,3} | 3 | 3 + (1e+300,Infinity) | {0,-1,5} | Infinity | Infinity + (1e+300,Infinity) | {1,0,5} | NaN | NaN + (1e+300,Infinity) | {0,3,0} | Infinity | Infinity + (1e+300,Infinity) | {1,-1,0} | Infinity | Infinity + (1e+300,Infinity) | {-0.4,-1,-6} | Infinity | Infinity + (1e+300,Infinity) | {-0.000184615384615,-1,15.3846153846} | Infinity | Infinity + (1e+300,Infinity) | {3,NaN,5} | NaN | NaN + (1e+300,Infinity) | {NaN,NaN,NaN} | NaN | NaN + (1e+300,Infinity) | {0,-1,3} | Infinity | Infinity + (1e+300,Infinity) | {-1,0,3} | NaN | NaN + (Infinity,1e+300) | {0,-1,5} | NaN | NaN + (Infinity,1e+300) | {1,0,5} | Infinity | Infinity + (Infinity,1e+300) | {0,3,0} | NaN | NaN + (Infinity,1e+300) | {1,-1,0} | NaN | NaN + (Infinity,1e+300) | {-0.4,-1,-6} | NaN | NaN + (Infinity,1e+300) | {-0.000184615384615,-1,15.3846153846} | NaN | NaN + (Infinity,1e+300) | {3,NaN,5} | NaN | NaN + (Infinity,1e+300) | {NaN,NaN,NaN} | NaN | NaN + (Infinity,1e+300) | {0,-1,3} | NaN | NaN + (Infinity,1e+300) | {-1,0,3} | Infinity | Infinity + (NaN,NaN) | {0,-1,5} | NaN | NaN + (NaN,NaN) | {1,0,5} | NaN | NaN + (NaN,NaN) | {0,3,0} | NaN | NaN + (NaN,NaN) | {1,-1,0} | NaN | NaN + (NaN,NaN) | {-0.4,-1,-6} | NaN | NaN + (NaN,NaN) | {-0.000184615384615,-1,15.3846153846} | NaN | NaN + (NaN,NaN) | {3,NaN,5} | NaN | NaN + (NaN,NaN) | {NaN,NaN,NaN} | NaN | NaN + (NaN,NaN) | {0,-1,3} | NaN | NaN + (NaN,NaN) | {-1,0,3} | NaN | NaN + (10,10) | {0,-1,5} | 5 | 5 + (10,10) | {1,0,5} | 15 | 15 + (10,10) | {0,3,0} | 10 | 10 + (10,10) | {1,-1,0} | 0 | 0 + (10,10) | {-0.4,-1,-6} | 18.5695338177 | 18.5695338177 + (10,10) | {-0.000184615384615,-1,15.3846153846} | 5.38276913903 | 5.38276913903 + (10,10) | {3,NaN,5} | NaN | NaN + (10,10) | {NaN,NaN,NaN} | NaN | NaN + (10,10) | {0,-1,3} | 7 | 7 + (10,10) | {-1,0,3} | 7 | 7 +(100 rows) + +-- Distance to line segment +SELECT p.f1, l.s, p.f1 <-> l.s AS dist_ps, l.s <-> p.f1 AS dist_sp FROM POINT_TBL p, LSEG_TBL l; + f1 | s | dist_ps | dist_sp +-------------------+-------------------------------+--------------------+-------------------- + (0,0) | [(1,2),(3,4)] | 2.2360679775 | 2.2360679775 + (0,0) | [(0,0),(6,6)] | 0 | 0 + (0,0) | [(10,-10),(-3,-4)] | 4.88901207039 | 4.88901207039 + (0,0) | [(-1000000,200),(300000,-40)] | 15.3846151224 | 15.3846151224 + (0,0) | [(11,22),(33,44)] | 24.5967477525 | 24.5967477525 + (0,0) | [(-10,2),(-10,3)] | 10.1980390272 | 10.1980390272 + (0,0) | [(0,-20),(30,-20)] | 20 | 20 + (0,0) | [(NaN,1),(NaN,90)] | NaN | NaN + (-10,0) | [(1,2),(3,4)] | 11.1803398875 | 11.1803398875 + (-10,0) | [(0,0),(6,6)] | 10 | 10 + (-10,0) | [(10,-10),(-3,-4)] | 8.0622577483 | 8.0622577483 + (-10,0) | [(-1000000,200),(300000,-40)] | 15.3864612763 | 15.3864612763 + (-10,0) | [(11,22),(33,44)] | 30.4138126515 | 30.4138126515 + (-10,0) | [(-10,2),(-10,3)] | 2 | 2 + (-10,0) | [(0,-20),(30,-20)] | 22.360679775 | 22.360679775 + (-10,0) | [(NaN,1),(NaN,90)] | NaN | NaN + (-3,4) | [(1,2),(3,4)] | 4.472135955 | 4.472135955 + (-3,4) | [(0,0),(6,6)] | 4.94974746831 | 4.94974746831 + (-3,4) | [(10,-10),(-3,-4)] | 8 | 8 + (-3,4) | [(-1000000,200),(300000,-40)] | 11.3851690367 | 11.3851690367 + (-3,4) | [(11,22),(33,44)] | 22.803508502 | 22.803508502 + (-3,4) | [(-10,2),(-10,3)] | 7.07106781187 | 7.07106781187 + (-3,4) | [(0,-20),(30,-20)] | 24.1867732449 | 24.1867732449 + (-3,4) | [(NaN,1),(NaN,90)] | NaN | NaN + (5.1,34.5) | [(1,2),(3,4)] | 30.5722096028 | 30.5722096028 + (5.1,34.5) | [(0,0),(6,6)] | 28.5142069853 | 28.5142069853 + (5.1,34.5) | [(10,-10),(-3,-4)] | 39.3428519556 | 39.3428519556 + (5.1,34.5) | [(-1000000,200),(300000,-40)] | 19.1163258281 | 19.1163258281 + (5.1,34.5) | [(11,22),(33,44)] | 13.0107647738 | 13.0107647738 + (5.1,34.5) | [(-10,2),(-10,3)] | 34.932220084 | 34.932220084 + (5.1,34.5) | [(0,-20),(30,-20)] | 54.5 | 54.5 + (5.1,34.5) | [(NaN,1),(NaN,90)] | NaN | NaN + (-5,-12) | [(1,2),(3,4)] | 15.2315462117 | 15.2315462117 + (-5,-12) | [(0,0),(6,6)] | 13 | 13 + (-5,-12) | [(10,-10),(-3,-4)] | 8.10179143093 | 8.10179143093 + (-5,-12) | [(-1000000,200),(300000,-40)] | 27.3855379949 | 27.3855379949 + (-5,-12) | [(11,22),(33,44)] | 37.5765884561 | 37.5765884561 + (-5,-12) | [(-10,2),(-10,3)] | 14.8660687473 | 14.8660687473 + (-5,-12) | [(0,-20),(30,-20)] | 9.43398113206 | 9.43398113206 + (-5,-12) | [(NaN,1),(NaN,90)] | NaN | NaN + (1e-300,-1e-300) | [(1,2),(3,4)] | 2.2360679775 | 2.2360679775 + (1e-300,-1e-300) | [(0,0),(6,6)] | 1.41421356237e-300 | 1.41421356237e-300 + (1e-300,-1e-300) | [(10,-10),(-3,-4)] | 4.88901207039 | 4.88901207039 + (1e-300,-1e-300) | [(-1000000,200),(300000,-40)] | 15.3846151224 | 15.3846151224 + (1e-300,-1e-300) | [(11,22),(33,44)] | 24.5967477525 | 24.5967477525 + (1e-300,-1e-300) | [(-10,2),(-10,3)] | 10.1980390272 | 10.1980390272 + (1e-300,-1e-300) | [(0,-20),(30,-20)] | 20 | 20 + (1e-300,-1e-300) | [(NaN,1),(NaN,90)] | NaN | NaN + (1e+300,Infinity) | [(1,2),(3,4)] | Infinity | Infinity + (1e+300,Infinity) | [(0,0),(6,6)] | Infinity | Infinity + (1e+300,Infinity) | [(10,-10),(-3,-4)] | Infinity | Infinity + (1e+300,Infinity) | [(-1000000,200),(300000,-40)] | Infinity | Infinity + (1e+300,Infinity) | [(11,22),(33,44)] | Infinity | Infinity + (1e+300,Infinity) | [(-10,2),(-10,3)] | Infinity | Infinity + (1e+300,Infinity) | [(0,-20),(30,-20)] | Infinity | Infinity + (1e+300,Infinity) | [(NaN,1),(NaN,90)] | Infinity | Infinity + (Infinity,1e+300) | [(1,2),(3,4)] | Infinity | Infinity + (Infinity,1e+300) | [(0,0),(6,6)] | Infinity | Infinity + (Infinity,1e+300) | [(10,-10),(-3,-4)] | Infinity | Infinity + (Infinity,1e+300) | [(-1000000,200),(300000,-40)] | Infinity | Infinity + (Infinity,1e+300) | [(11,22),(33,44)] | Infinity | Infinity + (Infinity,1e+300) | [(-10,2),(-10,3)] | Infinity | Infinity + (Infinity,1e+300) | [(0,-20),(30,-20)] | Infinity | Infinity + (Infinity,1e+300) | [(NaN,1),(NaN,90)] | NaN | NaN + (NaN,NaN) | [(1,2),(3,4)] | NaN | NaN + (NaN,NaN) | [(0,0),(6,6)] | NaN | NaN + (NaN,NaN) | [(10,-10),(-3,-4)] | NaN | NaN + (NaN,NaN) | [(-1000000,200),(300000,-40)] | NaN | NaN + (NaN,NaN) | [(11,22),(33,44)] | NaN | NaN + (NaN,NaN) | [(-10,2),(-10,3)] | NaN | NaN + (NaN,NaN) | [(0,-20),(30,-20)] | NaN | NaN + (NaN,NaN) | [(NaN,1),(NaN,90)] | NaN | NaN + (10,10) | [(1,2),(3,4)] | 9.21954445729 | 9.21954445729 + (10,10) | [(0,0),(6,6)] | 5.65685424949 | 5.65685424949 + (10,10) | [(10,-10),(-3,-4)] | 18.15918769 | 18.15918769 + (10,10) | [(-1000000,200),(300000,-40)] | 5.38276913904 | 5.38276913904 + (10,10) | [(11,22),(33,44)] | 12.0415945788 | 12.0415945788 + (10,10) | [(-10,2),(-10,3)] | 21.1896201004 | 21.1896201004 + (10,10) | [(0,-20),(30,-20)] | 30 | 30 + (10,10) | [(NaN,1),(NaN,90)] | NaN | NaN +(80 rows) + +-- Distance to box +SELECT p.f1, b.f1, p.f1 <-> b.f1 AS dist_pb, b.f1 <-> p.f1 AS dist_bp FROM POINT_TBL p, BOX_TBL b; + f1 | f1 | dist_pb | dist_bp +-------------------+---------------------+--------------------+-------------------- + (0,0) | (2,2),(0,0) | 0 | 0 + (0,0) | (3,3),(1,1) | 1.41421356237 | 1.41421356237 + (0,0) | (-2,2),(-8,-10) | 2 | 2 + (0,0) | (2.5,3.5),(2.5,2.5) | 3.53553390593 | 3.53553390593 + (0,0) | (3,3),(3,3) | 4.24264068712 | 4.24264068712 + (-10,0) | (2,2),(0,0) | 10 | 10 + (-10,0) | (3,3),(1,1) | 11.0453610172 | 11.0453610172 + (-10,0) | (-2,2),(-8,-10) | 2 | 2 + (-10,0) | (2.5,3.5),(2.5,2.5) | 12.747548784 | 12.747548784 + (-10,0) | (3,3),(3,3) | 13.3416640641 | 13.3416640641 + (-3,4) | (2,2),(0,0) | 3.60555127546 | 3.60555127546 + (-3,4) | (3,3),(1,1) | 4.12310562562 | 4.12310562562 + (-3,4) | (-2,2),(-8,-10) | 2 | 2 + (-3,4) | (2.5,3.5),(2.5,2.5) | 5.52268050859 | 5.52268050859 + (-3,4) | (3,3),(3,3) | 6.0827625303 | 6.0827625303 + (5.1,34.5) | (2,2),(0,0) | 32.6475113906 | 32.6475113906 + (5.1,34.5) | (3,3),(1,1) | 31.5699223946 | 31.5699223946 + (5.1,34.5) | (-2,2),(-8,-10) | 33.2664996656 | 33.2664996656 + (5.1,34.5) | (2.5,3.5),(2.5,2.5) | 31.108841187 | 31.108841187 + (5.1,34.5) | (3,3),(3,3) | 31.5699223946 | 31.5699223946 + (-5,-12) | (2,2),(0,0) | 13 | 13 + (-5,-12) | (3,3),(1,1) | 14.3178210633 | 14.3178210633 + (-5,-12) | (-2,2),(-8,-10) | 2 | 2 + (-5,-12) | (2.5,3.5),(2.5,2.5) | 16.3248277173 | 16.3248277173 + (-5,-12) | (3,3),(3,3) | 17 | 17 + (1e-300,-1e-300) | (2,2),(0,0) | 1.41421356237e-300 | 1.41421356237e-300 + (1e-300,-1e-300) | (3,3),(1,1) | 1.41421356237 | 1.41421356237 + (1e-300,-1e-300) | (-2,2),(-8,-10) | 2 | 2 + (1e-300,-1e-300) | (2.5,3.5),(2.5,2.5) | 3.53553390593 | 3.53553390593 + (1e-300,-1e-300) | (3,3),(3,3) | 4.24264068712 | 4.24264068712 + (1e+300,Infinity) | (2,2),(0,0) | Infinity | Infinity + (1e+300,Infinity) | (3,3),(1,1) | Infinity | Infinity + (1e+300,Infinity) | (-2,2),(-8,-10) | Infinity | Infinity + (1e+300,Infinity) | (2.5,3.5),(2.5,2.5) | Infinity | Infinity + (1e+300,Infinity) | (3,3),(3,3) | Infinity | Infinity + (Infinity,1e+300) | (2,2),(0,0) | Infinity | Infinity + (Infinity,1e+300) | (3,3),(1,1) | Infinity | Infinity + (Infinity,1e+300) | (-2,2),(-8,-10) | Infinity | Infinity + (Infinity,1e+300) | (2.5,3.5),(2.5,2.5) | Infinity | Infinity + (Infinity,1e+300) | (3,3),(3,3) | Infinity | Infinity + (NaN,NaN) | (2,2),(0,0) | NaN | NaN + (NaN,NaN) | (3,3),(1,1) | NaN | NaN + (NaN,NaN) | (-2,2),(-8,-10) | NaN | NaN + (NaN,NaN) | (2.5,3.5),(2.5,2.5) | NaN | NaN + (NaN,NaN) | (3,3),(3,3) | NaN | NaN + (10,10) | (2,2),(0,0) | 11.313708499 | 11.313708499 + (10,10) | (3,3),(1,1) | 9.89949493661 | 9.89949493661 + (10,10) | (-2,2),(-8,-10) | 14.4222051019 | 14.4222051019 + (10,10) | (2.5,3.5),(2.5,2.5) | 9.92471662064 | 9.92471662064 + (10,10) | (3,3),(3,3) | 9.89949493661 | 9.89949493661 +(50 rows) + +-- Distance to path +SELECT p.f1, p1.f1, p.f1 <-> p1.f1 AS dist_ppath, p1.f1 <-> p.f1 AS dist_pathp FROM POINT_TBL p, PATH_TBL p1; + f1 | f1 | dist_ppath | dist_pathp +-------------------+---------------------------+--------------------+-------------------- + (0,0) | [(1,2),(3,4)] | 2.2360679775 | 2.2360679775 + (0,0) | ((1,2),(3,4)) | 2.2360679775 | 2.2360679775 + (0,0) | [(0,0),(3,0),(4,5),(1,6)] | 0 | 0 + (0,0) | ((1,2),(3,4)) | 2.2360679775 | 2.2360679775 + (0,0) | ((1,2),(3,4)) | 2.2360679775 | 2.2360679775 + (0,0) | [(1,2),(3,4)] | 2.2360679775 | 2.2360679775 + (0,0) | ((10,20)) | 22.360679775 | 22.360679775 + (0,0) | [(11,12),(13,14)] | 16.2788205961 | 16.2788205961 + (0,0) | ((11,12),(13,14)) | 16.2788205961 | 16.2788205961 + (-10,0) | [(1,2),(3,4)] | 11.1803398875 | 11.1803398875 + (-10,0) | ((1,2),(3,4)) | 11.1803398875 | 11.1803398875 + (-10,0) | [(0,0),(3,0),(4,5),(1,6)] | 10 | 10 + (-10,0) | ((1,2),(3,4)) | 11.1803398875 | 11.1803398875 + (-10,0) | ((1,2),(3,4)) | 11.1803398875 | 11.1803398875 + (-10,0) | [(1,2),(3,4)] | 11.1803398875 | 11.1803398875 + (-10,0) | ((10,20)) | 28.2842712475 | 28.2842712475 + (-10,0) | [(11,12),(13,14)] | 24.1867732449 | 24.1867732449 + (-10,0) | ((11,12),(13,14)) | 24.1867732449 | 24.1867732449 + (-3,4) | [(1,2),(3,4)] | 4.472135955 | 4.472135955 + (-3,4) | ((1,2),(3,4)) | 4.472135955 | 4.472135955 + (-3,4) | [(0,0),(3,0),(4,5),(1,6)] | 4.472135955 | 4.472135955 + (-3,4) | ((1,2),(3,4)) | 4.472135955 | 4.472135955 + (-3,4) | ((1,2),(3,4)) | 4.472135955 | 4.472135955 + (-3,4) | [(1,2),(3,4)] | 4.472135955 | 4.472135955 + (-3,4) | ((10,20)) | 20.6155281281 | 20.6155281281 + (-3,4) | [(11,12),(13,14)] | 16.1245154966 | 16.1245154966 + (-3,4) | ((11,12),(13,14)) | 16.1245154966 | 16.1245154966 + (5.1,34.5) | [(1,2),(3,4)] | 30.5722096028 | 30.5722096028 + (5.1,34.5) | ((1,2),(3,4)) | 30.5722096028 | 30.5722096028 + (5.1,34.5) | [(0,0),(3,0),(4,5),(1,6)] | 28.793402022 | 28.793402022 + (5.1,34.5) | ((1,2),(3,4)) | 30.5722096028 | 30.5722096028 + (5.1,34.5) | ((1,2),(3,4)) | 30.5722096028 | 30.5722096028 + (5.1,34.5) | [(1,2),(3,4)] | 30.5722096028 | 30.5722096028 + (5.1,34.5) | ((10,20)) | 15.3055545473 | 15.3055545473 + (5.1,34.5) | [(11,12),(13,14)] | 21.9695243462 | 21.9695243462 + (5.1,34.5) | ((11,12),(13,14)) | 21.9695243462 | 21.9695243462 + (-5,-12) | [(1,2),(3,4)] | 15.2315462117 | 15.2315462117 + (-5,-12) | ((1,2),(3,4)) | 15.2315462117 | 15.2315462117 + (-5,-12) | [(0,0),(3,0),(4,5),(1,6)] | 13 | 13 + (-5,-12) | ((1,2),(3,4)) | 15.2315462117 | 15.2315462117 + (-5,-12) | ((1,2),(3,4)) | 15.2315462117 | 15.2315462117 + (-5,-12) | [(1,2),(3,4)] | 15.2315462117 | 15.2315462117 + (-5,-12) | ((10,20)) | 35.3411940941 | 35.3411940941 + (-5,-12) | [(11,12),(13,14)] | 28.8444102037 | 28.8444102037 + (-5,-12) | ((11,12),(13,14)) | 28.8444102037 | 28.8444102037 + (1e-300,-1e-300) | [(1,2),(3,4)] | 2.2360679775 | 2.2360679775 + (1e-300,-1e-300) | ((1,2),(3,4)) | 2.2360679775 | 2.2360679775 + (1e-300,-1e-300) | [(0,0),(3,0),(4,5),(1,6)] | 1.41421356237e-300 | 1.41421356237e-300 + (1e-300,-1e-300) | ((1,2),(3,4)) | 2.2360679775 | 2.2360679775 + (1e-300,-1e-300) | ((1,2),(3,4)) | 2.2360679775 | 2.2360679775 + (1e-300,-1e-300) | [(1,2),(3,4)] | 2.2360679775 | 2.2360679775 + (1e-300,-1e-300) | ((10,20)) | 22.360679775 | 22.360679775 + (1e-300,-1e-300) | [(11,12),(13,14)] | 16.2788205961 | 16.2788205961 + (1e-300,-1e-300) | ((11,12),(13,14)) | 16.2788205961 | 16.2788205961 + (1e+300,Infinity) | [(1,2),(3,4)] | Infinity | Infinity + (1e+300,Infinity) | ((1,2),(3,4)) | Infinity | Infinity + (1e+300,Infinity) | [(0,0),(3,0),(4,5),(1,6)] | Infinity | Infinity + (1e+300,Infinity) | ((1,2),(3,4)) | Infinity | Infinity + (1e+300,Infinity) | ((1,2),(3,4)) | Infinity | Infinity + (1e+300,Infinity) | [(1,2),(3,4)] | Infinity | Infinity + (1e+300,Infinity) | ((10,20)) | Infinity | Infinity + (1e+300,Infinity) | [(11,12),(13,14)] | Infinity | Infinity + (1e+300,Infinity) | ((11,12),(13,14)) | Infinity | Infinity + (Infinity,1e+300) | [(1,2),(3,4)] | Infinity | Infinity + (Infinity,1e+300) | ((1,2),(3,4)) | Infinity | Infinity + (Infinity,1e+300) | [(0,0),(3,0),(4,5),(1,6)] | Infinity | Infinity + (Infinity,1e+300) | ((1,2),(3,4)) | Infinity | Infinity + (Infinity,1e+300) | ((1,2),(3,4)) | Infinity | Infinity + (Infinity,1e+300) | [(1,2),(3,4)] | Infinity | Infinity + (Infinity,1e+300) | ((10,20)) | Infinity | Infinity + (Infinity,1e+300) | [(11,12),(13,14)] | Infinity | Infinity + (Infinity,1e+300) | ((11,12),(13,14)) | Infinity | Infinity + (NaN,NaN) | [(1,2),(3,4)] | NaN | NaN + (NaN,NaN) | ((1,2),(3,4)) | NaN | NaN + (NaN,NaN) | [(0,0),(3,0),(4,5),(1,6)] | NaN | NaN + (NaN,NaN) | ((1,2),(3,4)) | NaN | NaN + (NaN,NaN) | ((1,2),(3,4)) | NaN | NaN + (NaN,NaN) | [(1,2),(3,4)] | NaN | NaN + (NaN,NaN) | ((10,20)) | NaN | NaN + (NaN,NaN) | [(11,12),(13,14)] | NaN | NaN + (NaN,NaN) | ((11,12),(13,14)) | NaN | NaN + (10,10) | [(1,2),(3,4)] | 9.21954445729 | 9.21954445729 + (10,10) | ((1,2),(3,4)) | 9.21954445729 | 9.21954445729 + (10,10) | [(0,0),(3,0),(4,5),(1,6)] | 7.81024967591 | 7.81024967591 + (10,10) | ((1,2),(3,4)) | 9.21954445729 | 9.21954445729 + (10,10) | ((1,2),(3,4)) | 9.21954445729 | 9.21954445729 + (10,10) | [(1,2),(3,4)] | 9.21954445729 | 9.21954445729 + (10,10) | ((10,20)) | 10 | 10 + (10,10) | [(11,12),(13,14)] | 2.2360679775 | 2.2360679775 + (10,10) | ((11,12),(13,14)) | 2.2360679775 | 2.2360679775 +(90 rows) + +-- Distance to polygon +SELECT p.f1, p1.f1, p.f1 <-> p1.f1 AS dist_ppoly, p1.f1 <-> p.f1 AS dist_polyp FROM POINT_TBL p, POLYGON_TBL p1; + f1 | f1 | dist_ppoly | dist_polyp +-------------------+----------------------------+---------------+--------------- + (0,0) | ((2,0),(2,4),(0,0)) | 0 | 0 + (0,0) | ((3,1),(3,3),(1,0)) | 1 | 1 + (0,0) | ((1,2),(3,4),(5,6),(7,8)) | 2.2360679775 | 2.2360679775 + (0,0) | ((7,8),(5,6),(3,4),(1,2)) | 2.2360679775 | 2.2360679775 + (0,0) | ((1,2),(7,8),(5,6),(3,-4)) | 1.58113883008 | 1.58113883008 + (0,0) | ((0,0)) | 0 | 0 + (0,0) | ((0,1),(0,1)) | 1 | 1 + (-10,0) | ((2,0),(2,4),(0,0)) | 10 | 10 + (-10,0) | ((3,1),(3,3),(1,0)) | 11 | 11 + (-10,0) | ((1,2),(3,4),(5,6),(7,8)) | 11.1803398875 | 11.1803398875 + (-10,0) | ((7,8),(5,6),(3,4),(1,2)) | 11.1803398875 | 11.1803398875 + (-10,0) | ((1,2),(7,8),(5,6),(3,-4)) | 11.1803398875 | 11.1803398875 + (-10,0) | ((0,0)) | 10 | 10 + (-10,0) | ((0,1),(0,1)) | 10.0498756211 | 10.0498756211 + (-3,4) | ((2,0),(2,4),(0,0)) | 4.472135955 | 4.472135955 + (-3,4) | ((3,1),(3,3),(1,0)) | 5.54700196225 | 5.54700196225 + (-3,4) | ((1,2),(3,4),(5,6),(7,8)) | 4.472135955 | 4.472135955 + (-3,4) | ((7,8),(5,6),(3,4),(1,2)) | 4.472135955 | 4.472135955 + (-3,4) | ((1,2),(7,8),(5,6),(3,-4)) | 4.472135955 | 4.472135955 + (-3,4) | ((0,0)) | 5 | 5 + (-3,4) | ((0,1),(0,1)) | 4.24264068712 | 4.24264068712 + (5.1,34.5) | ((2,0),(2,4),(0,0)) | 30.6571362002 | 30.6571362002 + (5.1,34.5) | ((3,1),(3,3),(1,0)) | 31.5699223946 | 31.5699223946 + (5.1,34.5) | ((1,2),(3,4),(5,6),(7,8)) | 26.5680258958 | 26.5680258958 + (5.1,34.5) | ((7,8),(5,6),(3,4),(1,2)) | 26.5680258958 | 26.5680258958 + (5.1,34.5) | ((1,2),(7,8),(5,6),(3,-4)) | 26.5680258958 | 26.5680258958 + (5.1,34.5) | ((0,0)) | 34.8749193547 | 34.8749193547 + (5.1,34.5) | ((0,1),(0,1)) | 33.8859853037 | 33.8859853037 + (-5,-12) | ((2,0),(2,4),(0,0)) | 13 | 13 + (-5,-12) | ((3,1),(3,3),(1,0)) | 13.416407865 | 13.416407865 + (-5,-12) | ((1,2),(3,4),(5,6),(7,8)) | 15.2315462117 | 15.2315462117 + (-5,-12) | ((7,8),(5,6),(3,4),(1,2)) | 15.2315462117 | 15.2315462117 + (-5,-12) | ((1,2),(7,8),(5,6),(3,-4)) | 11.313708499 | 11.313708499 + (-5,-12) | ((0,0)) | 13 | 13 + (-5,-12) | ((0,1),(0,1)) | 13.9283882772 | 13.9283882772 + (1e-300,-1e-300) | ((2,0),(2,4),(0,0)) | 0 | 0 + (1e-300,-1e-300) | ((3,1),(3,3),(1,0)) | 1 | 1 + (1e-300,-1e-300) | ((1,2),(3,4),(5,6),(7,8)) | 2.2360679775 | 2.2360679775 + (1e-300,-1e-300) | ((7,8),(5,6),(3,4),(1,2)) | 2.2360679775 | 2.2360679775 + (1e-300,-1e-300) | ((1,2),(7,8),(5,6),(3,-4)) | 1.58113883008 | 1.58113883008 + (1e-300,-1e-300) | ((0,0)) | 0 | 0 + (1e-300,-1e-300) | ((0,1),(0,1)) | 1 | 1 + (1e+300,Infinity) | ((2,0),(2,4),(0,0)) | Infinity | Infinity + (1e+300,Infinity) | ((3,1),(3,3),(1,0)) | Infinity | Infinity + (1e+300,Infinity) | ((1,2),(3,4),(5,6),(7,8)) | Infinity | Infinity + (1e+300,Infinity) | ((7,8),(5,6),(3,4),(1,2)) | Infinity | Infinity + (1e+300,Infinity) | ((1,2),(7,8),(5,6),(3,-4)) | Infinity | Infinity + (1e+300,Infinity) | ((0,0)) | Infinity | Infinity + (1e+300,Infinity) | ((0,1),(0,1)) | Infinity | Infinity + (Infinity,1e+300) | ((2,0),(2,4),(0,0)) | Infinity | Infinity + (Infinity,1e+300) | ((3,1),(3,3),(1,0)) | Infinity | Infinity + (Infinity,1e+300) | ((1,2),(3,4),(5,6),(7,8)) | Infinity | Infinity + (Infinity,1e+300) | ((7,8),(5,6),(3,4),(1,2)) | Infinity | Infinity + (Infinity,1e+300) | ((1,2),(7,8),(5,6),(3,-4)) | Infinity | Infinity + (Infinity,1e+300) | ((0,0)) | Infinity | Infinity + (Infinity,1e+300) | ((0,1),(0,1)) | Infinity | Infinity + (NaN,NaN) | ((2,0),(2,4),(0,0)) | 0 | 0 + (NaN,NaN) | ((3,1),(3,3),(1,0)) | 0 | 0 + (NaN,NaN) | ((1,2),(3,4),(5,6),(7,8)) | 0 | 0 + (NaN,NaN) | ((7,8),(5,6),(3,4),(1,2)) | 0 | 0 + (NaN,NaN) | ((1,2),(7,8),(5,6),(3,-4)) | 0 | 0 + (NaN,NaN) | ((0,0)) | 0 | 0 + (NaN,NaN) | ((0,1),(0,1)) | 0 | 0 + (10,10) | ((2,0),(2,4),(0,0)) | 10 | 10 + (10,10) | ((3,1),(3,3),(1,0)) | 9.89949493661 | 9.89949493661 + (10,10) | ((1,2),(3,4),(5,6),(7,8)) | 3.60555127546 | 3.60555127546 + (10,10) | ((7,8),(5,6),(3,4),(1,2)) | 3.60555127546 | 3.60555127546 + (10,10) | ((1,2),(7,8),(5,6),(3,-4)) | 3.60555127546 | 3.60555127546 + (10,10) | ((0,0)) | 14.1421356237 | 14.1421356237 + (10,10) | ((0,1),(0,1)) | 13.4536240471 | 13.4536240471 +(70 rows) + +-- Construct line through two points +SELECT p1.f1, p2.f1, line(p1.f1, p2.f1) + FROM POINT_TBL p1, POINT_TBL p2 WHERE p1.f1 <> p2.f1; + f1 | f1 | line +-------------------+-------------------+---------------------------------------- + (0,0) | (-10,0) | {0,-1,0} + (0,0) | (-3,4) | {-1.33333333333,-1,0} + (0,0) | (5.1,34.5) | {6.76470588235,-1,0} + (0,0) | (-5,-12) | {2.4,-1,0} + (0,0) | (1e+300,Infinity) | {-1,0,0} + (0,0) | (Infinity,1e+300) | {0,-1,0} + (0,0) | (NaN,NaN) | {NaN,-1,NaN} + (0,0) | (10,10) | {1,-1,0} + (-10,0) | (0,0) | {0,-1,0} + (-10,0) | (-3,4) | {0.571428571429,-1,5.71428571429} + (-10,0) | (5.1,34.5) | {2.28476821192,-1,22.8476821192} + (-10,0) | (-5,-12) | {-2.4,-1,-24} + (-10,0) | (1e-300,-1e-300) | {0,-1,0} + (-10,0) | (1e+300,Infinity) | {-1,0,-10} + (-10,0) | (Infinity,1e+300) | {0,-1,0} + (-10,0) | (NaN,NaN) | {NaN,-1,NaN} + (-10,0) | (10,10) | {0.5,-1,5} + (-3,4) | (0,0) | {-1.33333333333,-1,0} + (-3,4) | (-10,0) | {0.571428571429,-1,5.71428571429} + (-3,4) | (5.1,34.5) | {3.76543209877,-1,15.2962962963} + (-3,4) | (-5,-12) | {8,-1,28} + (-3,4) | (1e-300,-1e-300) | {-1.33333333333,-1,0} + (-3,4) | (1e+300,Infinity) | {-1,0,-3} + (-3,4) | (Infinity,1e+300) | {0,-1,4} + (-3,4) | (NaN,NaN) | {NaN,-1,NaN} + (-3,4) | (10,10) | {0.461538461538,-1,5.38461538462} + (5.1,34.5) | (0,0) | {6.76470588235,-1,0} + (5.1,34.5) | (-10,0) | {2.28476821192,-1,22.8476821192} + (5.1,34.5) | (-3,4) | {3.76543209877,-1,15.2962962963} + (5.1,34.5) | (-5,-12) | {4.60396039604,-1,11.0198019802} + (5.1,34.5) | (1e-300,-1e-300) | {6.76470588235,-1,0} + (5.1,34.5) | (1e+300,Infinity) | {-1,0,5.1} + (5.1,34.5) | (Infinity,1e+300) | {0,-1,34.5} + (5.1,34.5) | (NaN,NaN) | {NaN,-1,NaN} + (5.1,34.5) | (10,10) | {-5,-1,60} + (-5,-12) | (0,0) | {2.4,-1,0} + (-5,-12) | (-10,0) | {-2.4,-1,-24} + (-5,-12) | (-3,4) | {8,-1,28} + (-5,-12) | (5.1,34.5) | {4.60396039604,-1,11.0198019802} + (-5,-12) | (1e-300,-1e-300) | {2.4,-1,0} + (-5,-12) | (1e+300,Infinity) | {-1,0,-5} + (-5,-12) | (Infinity,1e+300) | {0,-1,-12} + (-5,-12) | (NaN,NaN) | {NaN,-1,NaN} + (-5,-12) | (10,10) | {1.46666666667,-1,-4.66666666667} + (1e-300,-1e-300) | (-10,0) | {0,-1,-1e-300} + (1e-300,-1e-300) | (-3,4) | {-1.33333333333,-1,3.33333333333e-301} + (1e-300,-1e-300) | (5.1,34.5) | {6.76470588235,-1,-7.76470588235e-300} + (1e-300,-1e-300) | (-5,-12) | {2.4,-1,-3.4e-300} + (1e-300,-1e-300) | (1e+300,Infinity) | {-1,0,1e-300} + (1e-300,-1e-300) | (Infinity,1e+300) | {0,-1,-1e-300} + (1e-300,-1e-300) | (NaN,NaN) | {NaN,-1,NaN} + (1e-300,-1e-300) | (10,10) | {1,-1,-2e-300} + (1e+300,Infinity) | (0,0) | {-1,0,1e+300} + (1e+300,Infinity) | (-10,0) | {-1,0,1e+300} + (1e+300,Infinity) | (-3,4) | {-1,0,1e+300} + (1e+300,Infinity) | (5.1,34.5) | {-1,0,1e+300} + (1e+300,Infinity) | (-5,-12) | {-1,0,1e+300} + (1e+300,Infinity) | (1e-300,-1e-300) | {-1,0,1e+300} + (1e+300,Infinity) | (Infinity,1e+300) | {NaN,-1,NaN} + (1e+300,Infinity) | (NaN,NaN) | {NaN,-1,NaN} + (1e+300,Infinity) | (10,10) | {-1,0,1e+300} + (Infinity,1e+300) | (0,0) | {0,-1,1e+300} + (Infinity,1e+300) | (-10,0) | {0,-1,1e+300} + (Infinity,1e+300) | (-3,4) | {0,-1,1e+300} + (Infinity,1e+300) | (5.1,34.5) | {0,-1,1e+300} + (Infinity,1e+300) | (-5,-12) | {0,-1,1e+300} + (Infinity,1e+300) | (1e-300,-1e-300) | {0,-1,1e+300} + (Infinity,1e+300) | (1e+300,Infinity) | {NaN,-1,NaN} + (Infinity,1e+300) | (NaN,NaN) | {NaN,-1,NaN} + (Infinity,1e+300) | (10,10) | {0,-1,1e+300} + (NaN,NaN) | (0,0) | {NaN,-1,NaN} + (NaN,NaN) | (-10,0) | {NaN,-1,NaN} + (NaN,NaN) | (-3,4) | {NaN,-1,NaN} + (NaN,NaN) | (5.1,34.5) | {NaN,-1,NaN} + (NaN,NaN) | (-5,-12) | {NaN,-1,NaN} + (NaN,NaN) | (1e-300,-1e-300) | {NaN,-1,NaN} + (NaN,NaN) | (1e+300,Infinity) | {NaN,-1,NaN} + (NaN,NaN) | (Infinity,1e+300) | {NaN,-1,NaN} + (NaN,NaN) | (10,10) | {NaN,-1,NaN} + (10,10) | (0,0) | {1,-1,0} + (10,10) | (-10,0) | {0.5,-1,5} + (10,10) | (-3,4) | {0.461538461538,-1,5.38461538462} + (10,10) | (5.1,34.5) | {-5,-1,60} + (10,10) | (-5,-12) | {1.46666666667,-1,-4.66666666667} + (10,10) | (1e-300,-1e-300) | {1,-1,0} + (10,10) | (1e+300,Infinity) | {-1,0,10} + (10,10) | (Infinity,1e+300) | {0,-1,10} + (10,10) | (NaN,NaN) | {NaN,-1,NaN} +(88 rows) + +-- Closest point to line +SELECT p.f1, l.s, p.f1 ## l.s FROM POINT_TBL p, LINE_TBL l; + f1 | s | ?column? +-------------------+---------------------------------------+---------------------------------- + (0,0) | {0,-1,5} | (0,5) + (0,0) | {1,0,5} | (-5,0) + (0,0) | {0,3,0} | (0,0) + (0,0) | {1,-1,0} | (0,0) + (0,0) | {-0.4,-1,-6} | (-2.06896551724,-5.1724137931) + (0,0) | {-0.000184615384615,-1,15.3846153846} | (0.00284023658959,15.3846148603) + (0,0) | {3,NaN,5} | + (0,0) | {NaN,NaN,NaN} | + (0,0) | {0,-1,3} | (0,3) + (0,0) | {-1,0,3} | (3,0) + (-10,0) | {0,-1,5} | (-10,5) + (-10,0) | {1,0,5} | (-5,0) + (-10,0) | {0,3,0} | (-10,0) + (-10,0) | {1,-1,0} | (-5,-5) + (-10,0) | {-0.4,-1,-6} | (-10.6896551724,-1.72413793103) + (-10,0) | {-0.000184615384615,-1,15.3846153846} | (-9.99715942258,15.386461014) + (-10,0) | {3,NaN,5} | + (-10,0) | {NaN,NaN,NaN} | + (-10,0) | {0,-1,3} | (-10,3) + (-10,0) | {-1,0,3} | (3,0) + (-3,4) | {0,-1,5} | (-3,5) + (-3,4) | {1,0,5} | (-5,4) + (-3,4) | {0,3,0} | (-3,0) + (-3,4) | {1,-1,0} | (0.5,0.5) + (-3,4) | {-0.4,-1,-6} | (-6.03448275862,-3.58620689655) + (-3,4) | {-0.000184615384615,-1,15.3846153846} | (-2.99789812268,15.3851688427) + (-3,4) | {3,NaN,5} | + (-3,4) | {NaN,NaN,NaN} | + (-3,4) | {0,-1,3} | (-3,3) + (-3,4) | {-1,0,3} | (3,4) + (5.1,34.5) | {0,-1,5} | (5.1,5) + (5.1,34.5) | {1,0,5} | (-5,34.5) + (5.1,34.5) | {0,3,0} | (5.1,0) + (5.1,34.5) | {1,-1,0} | (19.8,19.8) + (5.1,34.5) | {-0.4,-1,-6} | (-9.56896551724,-2.1724137931) + (5.1,34.5) | {-0.000184615384615,-1,15.3846153846} | (5.09647083221,15.3836744977) + (5.1,34.5) | {3,NaN,5} | + (5.1,34.5) | {NaN,NaN,NaN} | + (5.1,34.5) | {0,-1,3} | (5.1,3) + (5.1,34.5) | {-1,0,3} | (3,34.5) + (-5,-12) | {0,-1,5} | (-5,5) + (-5,-12) | {1,0,5} | (-5,-12) + (-5,-12) | {0,3,0} | (-5,0) + (-5,-12) | {1,-1,0} | (-8.5,-8.5) + (-5,-12) | {-0.4,-1,-6} | (-2.24137931034,-5.10344827586) + (-5,-12) | {-0.000184615384615,-1,15.3846153846} | (-4.99494420846,15.3855375282) + (-5,-12) | {3,NaN,5} | + (-5,-12) | {NaN,NaN,NaN} | + (-5,-12) | {0,-1,3} | (-5,3) + (-5,-12) | {-1,0,3} | (3,-12) + (1e-300,-1e-300) | {0,-1,5} | (1e-300,5) + (1e-300,-1e-300) | {1,0,5} | (-5,-1e-300) + (1e-300,-1e-300) | {0,3,0} | (1e-300,0) + (1e-300,-1e-300) | {1,-1,0} | (0,0) + (1e-300,-1e-300) | {-0.4,-1,-6} | (-2.06896551724,-5.1724137931) + (1e-300,-1e-300) | {-0.000184615384615,-1,15.3846153846} | (0.00284023658959,15.3846148603) + (1e-300,-1e-300) | {3,NaN,5} | + (1e-300,-1e-300) | {NaN,NaN,NaN} | + (1e-300,-1e-300) | {0,-1,3} | (1e-300,3) + (1e-300,-1e-300) | {-1,0,3} | (3,-1e-300) + (1e+300,Infinity) | {0,-1,5} | (1e+300,5) + (1e+300,Infinity) | {1,0,5} | + (1e+300,Infinity) | {0,3,0} | (1e+300,0) + (1e+300,Infinity) | {1,-1,0} | (Infinity,NaN) + (1e+300,Infinity) | {-0.4,-1,-6} | (-Infinity,NaN) + (1e+300,Infinity) | {-0.000184615384615,-1,15.3846153846} | (-Infinity,NaN) + (1e+300,Infinity) | {3,NaN,5} | + (1e+300,Infinity) | {NaN,NaN,NaN} | + (1e+300,Infinity) | {0,-1,3} | (1e+300,3) + (1e+300,Infinity) | {-1,0,3} | + (Infinity,1e+300) | {0,-1,5} | + (Infinity,1e+300) | {1,0,5} | (-5,1e+300) + (Infinity,1e+300) | {0,3,0} | + (Infinity,1e+300) | {1,-1,0} | + (Infinity,1e+300) | {-0.4,-1,-6} | + (Infinity,1e+300) | {-0.000184615384615,-1,15.3846153846} | + (Infinity,1e+300) | {3,NaN,5} | + (Infinity,1e+300) | {NaN,NaN,NaN} | + (Infinity,1e+300) | {0,-1,3} | + (Infinity,1e+300) | {-1,0,3} | (3,1e+300) + (NaN,NaN) | {0,-1,5} | + (NaN,NaN) | {1,0,5} | + (NaN,NaN) | {0,3,0} | + (NaN,NaN) | {1,-1,0} | + (NaN,NaN) | {-0.4,-1,-6} | + (NaN,NaN) | {-0.000184615384615,-1,15.3846153846} | + (NaN,NaN) | {3,NaN,5} | + (NaN,NaN) | {NaN,NaN,NaN} | + (NaN,NaN) | {0,-1,3} | + (NaN,NaN) | {-1,0,3} | + (10,10) | {0,-1,5} | (10,5) + (10,10) | {1,0,5} | (-5,10) + (10,10) | {0,3,0} | (10,0) + (10,10) | {1,-1,0} | (10,10) + (10,10) | {-0.4,-1,-6} | (3.10344827586,-7.24137931034) + (10,10) | {-0.000184615384615,-1,15.3846153846} | (10.000993742,15.3827690473) + (10,10) | {3,NaN,5} | + (10,10) | {NaN,NaN,NaN} | + (10,10) | {0,-1,3} | (10,3) + (10,10) | {-1,0,3} | (3,10) +(100 rows) + +-- Closest point to line segment +SELECT p.f1, l.s, p.f1 ## l.s FROM POINT_TBL p, LSEG_TBL l; + f1 | s | ?column? +-------------------+-------------------------------+---------------------------------- + (0,0) | [(1,2),(3,4)] | (1,2) + (0,0) | [(0,0),(6,6)] | (0,0) + (0,0) | [(10,-10),(-3,-4)] | (-2.0487804878,-4.43902439024) + (0,0) | [(-1000000,200),(300000,-40)] | (0.00284023658959,15.3846148603) + (0,0) | [(11,22),(33,44)] | (11,22) + (0,0) | [(-10,2),(-10,3)] | (-10,2) + (0,0) | [(0,-20),(30,-20)] | (0,-20) + (0,0) | [(NaN,1),(NaN,90)] | + (-10,0) | [(1,2),(3,4)] | (1,2) + (-10,0) | [(0,0),(6,6)] | (0,0) + (-10,0) | [(10,-10),(-3,-4)] | (-3,-4) + (-10,0) | [(-1000000,200),(300000,-40)] | (-9.99715942258,15.386461014) + (-10,0) | [(11,22),(33,44)] | (11,22) + (-10,0) | [(-10,2),(-10,3)] | (-10,2) + (-10,0) | [(0,-20),(30,-20)] | (0,-20) + (-10,0) | [(NaN,1),(NaN,90)] | + (-3,4) | [(1,2),(3,4)] | (1,2) + (-3,4) | [(0,0),(6,6)] | (0.5,0.5) + (-3,4) | [(10,-10),(-3,-4)] | (-3,-4) + (-3,4) | [(-1000000,200),(300000,-40)] | (-2.99789812268,15.3851688427) + (-3,4) | [(11,22),(33,44)] | (11,22) + (-3,4) | [(-10,2),(-10,3)] | (-10,3) + (-3,4) | [(0,-20),(30,-20)] | (0,-20) + (-3,4) | [(NaN,1),(NaN,90)] | + (5.1,34.5) | [(1,2),(3,4)] | (3,4) + (5.1,34.5) | [(0,0),(6,6)] | (6,6) + (5.1,34.5) | [(10,-10),(-3,-4)] | (-3,-4) + (5.1,34.5) | [(-1000000,200),(300000,-40)] | (5.09647083221,15.3836744977) + (5.1,34.5) | [(11,22),(33,44)] | (14.3,25.3) + (5.1,34.5) | [(-10,2),(-10,3)] | (-10,3) + (5.1,34.5) | [(0,-20),(30,-20)] | (5.1,-20) + (5.1,34.5) | [(NaN,1),(NaN,90)] | + (-5,-12) | [(1,2),(3,4)] | (1,2) + (-5,-12) | [(0,0),(6,6)] | (0,0) + (-5,-12) | [(10,-10),(-3,-4)] | (-1.60487804878,-4.64390243902) + (-5,-12) | [(-1000000,200),(300000,-40)] | (-4.99494420846,15.3855375282) + (-5,-12) | [(11,22),(33,44)] | (11,22) + (-5,-12) | [(-10,2),(-10,3)] | (-10,2) + (-5,-12) | [(0,-20),(30,-20)] | (0,-20) + (-5,-12) | [(NaN,1),(NaN,90)] | + (1e-300,-1e-300) | [(1,2),(3,4)] | (1,2) + (1e-300,-1e-300) | [(0,0),(6,6)] | (0,0) + (1e-300,-1e-300) | [(10,-10),(-3,-4)] | (-2.0487804878,-4.43902439024) + (1e-300,-1e-300) | [(-1000000,200),(300000,-40)] | (0.00284023658959,15.3846148603) + (1e-300,-1e-300) | [(11,22),(33,44)] | (11,22) + (1e-300,-1e-300) | [(-10,2),(-10,3)] | (-10,2) + (1e-300,-1e-300) | [(0,-20),(30,-20)] | (0,-20) + (1e-300,-1e-300) | [(NaN,1),(NaN,90)] | + (1e+300,Infinity) | [(1,2),(3,4)] | (3,4) + (1e+300,Infinity) | [(0,0),(6,6)] | (6,6) + (1e+300,Infinity) | [(10,-10),(-3,-4)] | (-3,-4) + (1e+300,Infinity) | [(-1000000,200),(300000,-40)] | (300000,-40) + (1e+300,Infinity) | [(11,22),(33,44)] | (33,44) + (1e+300,Infinity) | [(-10,2),(-10,3)] | (-10,3) + (1e+300,Infinity) | [(0,-20),(30,-20)] | (30,-20) + (1e+300,Infinity) | [(NaN,1),(NaN,90)] | (NaN,90) + (Infinity,1e+300) | [(1,2),(3,4)] | (3,4) + (Infinity,1e+300) | [(0,0),(6,6)] | (6,6) + (Infinity,1e+300) | [(10,-10),(-3,-4)] | (-3,-4) + (Infinity,1e+300) | [(-1000000,200),(300000,-40)] | (300000,-40) + (Infinity,1e+300) | [(11,22),(33,44)] | (33,44) + (Infinity,1e+300) | [(-10,2),(-10,3)] | (-10,3) + (Infinity,1e+300) | [(0,-20),(30,-20)] | (30,-20) + (Infinity,1e+300) | [(NaN,1),(NaN,90)] | + (NaN,NaN) | [(1,2),(3,4)] | + (NaN,NaN) | [(0,0),(6,6)] | + (NaN,NaN) | [(10,-10),(-3,-4)] | + (NaN,NaN) | [(-1000000,200),(300000,-40)] | + (NaN,NaN) | [(11,22),(33,44)] | + (NaN,NaN) | [(-10,2),(-10,3)] | + (NaN,NaN) | [(0,-20),(30,-20)] | + (NaN,NaN) | [(NaN,1),(NaN,90)] | + (10,10) | [(1,2),(3,4)] | (3,4) + (10,10) | [(0,0),(6,6)] | (6,6) + (10,10) | [(10,-10),(-3,-4)] | (2.39024390244,-6.48780487805) + (10,10) | [(-1000000,200),(300000,-40)] | (10.000993742,15.3827690473) + (10,10) | [(11,22),(33,44)] | (11,22) + (10,10) | [(-10,2),(-10,3)] | (-10,3) + (10,10) | [(0,-20),(30,-20)] | (10,-20) + (10,10) | [(NaN,1),(NaN,90)] | +(80 rows) + +-- Closest point to box +SELECT p.f1, b.f1, p.f1 ## b.f1 FROM POINT_TBL p, BOX_TBL b; + f1 | f1 | ?column? +-------------------+---------------------+-------------- + (0,0) | (2,2),(0,0) | (0,0) + (0,0) | (3,3),(1,1) | (1,1) + (0,0) | (-2,2),(-8,-10) | (-2,0) + (0,0) | (2.5,3.5),(2.5,2.5) | (2.5,2.5) + (0,0) | (3,3),(3,3) | (3,3) + (-10,0) | (2,2),(0,0) | (0,0) + (-10,0) | (3,3),(1,1) | (1,1) + (-10,0) | (-2,2),(-8,-10) | (-8,0) + (-10,0) | (2.5,3.5),(2.5,2.5) | (2.5,2.5) + (-10,0) | (3,3),(3,3) | (3,3) + (-3,4) | (2,2),(0,0) | (0,2) + (-3,4) | (3,3),(1,1) | (1,3) + (-3,4) | (-2,2),(-8,-10) | (-3,2) + (-3,4) | (2.5,3.5),(2.5,2.5) | (2.5,3.5) + (-3,4) | (3,3),(3,3) | (3,3) + (5.1,34.5) | (2,2),(0,0) | (2,2) + (5.1,34.5) | (3,3),(1,1) | (3,3) + (5.1,34.5) | (-2,2),(-8,-10) | (-2,2) + (5.1,34.5) | (2.5,3.5),(2.5,2.5) | (2.5,3.5) + (5.1,34.5) | (3,3),(3,3) | (3,3) + (-5,-12) | (2,2),(0,0) | (0,0) + (-5,-12) | (3,3),(1,1) | (1,1) + (-5,-12) | (-2,2),(-8,-10) | (-5,-10) + (-5,-12) | (2.5,3.5),(2.5,2.5) | (2.5,2.5) + (-5,-12) | (3,3),(3,3) | (3,3) + (1e-300,-1e-300) | (2,2),(0,0) | (0,0) + (1e-300,-1e-300) | (3,3),(1,1) | (1,1) + (1e-300,-1e-300) | (-2,2),(-8,-10) | (-2,-1e-300) + (1e-300,-1e-300) | (2.5,3.5),(2.5,2.5) | (2.5,2.5) + (1e-300,-1e-300) | (3,3),(3,3) | (3,3) + (1e+300,Infinity) | (2,2),(0,0) | (0,2) + (1e+300,Infinity) | (3,3),(1,1) | (1,3) + (1e+300,Infinity) | (-2,2),(-8,-10) | (-8,2) + (1e+300,Infinity) | (2.5,3.5),(2.5,2.5) | (2.5,3.5) + (1e+300,Infinity) | (3,3),(3,3) | (3,3) + (Infinity,1e+300) | (2,2),(0,0) | (0,2) + (Infinity,1e+300) | (3,3),(1,1) | (1,3) + (Infinity,1e+300) | (-2,2),(-8,-10) | (-8,2) + (Infinity,1e+300) | (2.5,3.5),(2.5,2.5) | (2.5,3.5) + (Infinity,1e+300) | (3,3),(3,3) | (3,3) + (NaN,NaN) | (2,2),(0,0) | + (NaN,NaN) | (3,3),(1,1) | + (NaN,NaN) | (-2,2),(-8,-10) | + (NaN,NaN) | (2.5,3.5),(2.5,2.5) | + (NaN,NaN) | (3,3),(3,3) | + (10,10) | (2,2),(0,0) | (2,2) + (10,10) | (3,3),(1,1) | (3,3) + (10,10) | (-2,2),(-8,-10) | (-2,2) + (10,10) | (2.5,3.5),(2.5,2.5) | (2.5,3.5) + (10,10) | (3,3),(3,3) | (3,3) +(50 rows) + +-- On line +SELECT p.f1, l.s FROM POINT_TBL p, LINE_TBL l WHERE p.f1 <@ l.s; + f1 | s +------------------+---------- + (0,0) | {0,3,0} + (0,0) | {1,-1,0} + (-10,0) | {0,3,0} + (-5,-12) | {1,0,5} + (1e-300,-1e-300) | {0,3,0} + (1e-300,-1e-300) | {1,-1,0} + (10,10) | {1,-1,0} +(7 rows) + +-- On line segment +SELECT p.f1, l.s FROM POINT_TBL p, LSEG_TBL l WHERE p.f1 <@ l.s; + f1 | s +------------------+--------------- + (0,0) | [(0,0),(6,6)] + (1e-300,-1e-300) | [(0,0),(6,6)] +(2 rows) + +-- On path +SELECT p.f1, p1.f1 FROM POINT_TBL p, PATH_TBL p1 WHERE p.f1 <@ p1.f1; + f1 | f1 +------------------+--------------------------- + (0,0) | [(0,0),(3,0),(4,5),(1,6)] + (1e-300,-1e-300) | [(0,0),(3,0),(4,5),(1,6)] + (NaN,NaN) | ((1,2),(3,4)) + (NaN,NaN) | ((1,2),(3,4)) + (NaN,NaN) | ((1,2),(3,4)) + (NaN,NaN) | ((10,20)) + (NaN,NaN) | ((11,12),(13,14)) +(7 rows) + +-- +-- Lines +-- +-- Vertical +SELECT s FROM LINE_TBL WHERE ?| s; + s +---------- + {1,0,5} + {-1,0,3} +(2 rows) + +-- Horizontal +SELECT s FROM LINE_TBL WHERE ?- s; + s +---------- + {0,-1,5} + {0,3,0} + {0,-1,3} +(3 rows) + +-- Same as line +SELECT l1.s, l2.s FROM LINE_TBL l1, LINE_TBL l2 WHERE l1.s = l2.s; + s | s +---------------------------------------+--------------------------------------- + {0,-1,5} | {0,-1,5} + {1,0,5} | {1,0,5} + {0,3,0} | {0,3,0} + {1,-1,0} | {1,-1,0} + {-0.4,-1,-6} | {-0.4,-1,-6} + {-0.000184615384615,-1,15.3846153846} | {-0.000184615384615,-1,15.3846153846} + {3,NaN,5} | {3,NaN,5} + {NaN,NaN,NaN} | {NaN,NaN,NaN} + {0,-1,3} | {0,-1,3} + {-1,0,3} | {-1,0,3} +(10 rows) + +-- Parallel to line +SELECT l1.s, l2.s FROM LINE_TBL l1, LINE_TBL l2 WHERE l1.s ?|| l2.s; + s | s +---------------------------------------+--------------------------------------- + {0,-1,5} | {0,-1,5} + {0,-1,5} | {0,3,0} + {0,-1,5} | {0,-1,3} + {1,0,5} | {1,0,5} + {1,0,5} | {-1,0,3} + {0,3,0} | {0,-1,5} + {0,3,0} | {0,3,0} + {0,3,0} | {0,-1,3} + {1,-1,0} | {1,-1,0} + {-0.4,-1,-6} | {-0.4,-1,-6} + {-0.000184615384615,-1,15.3846153846} | {-0.000184615384615,-1,15.3846153846} + {0,-1,3} | {0,-1,5} + {0,-1,3} | {0,3,0} + {0,-1,3} | {0,-1,3} + {-1,0,3} | {1,0,5} + {-1,0,3} | {-1,0,3} +(16 rows) + +-- Perpendicular to line +SELECT l1.s, l2.s FROM LINE_TBL l1, LINE_TBL l2 WHERE l1.s ?-| l2.s; + s | s +----------+---------- + {0,-1,5} | {1,0,5} + {0,-1,5} | {-1,0,3} + {1,0,5} | {0,-1,5} + {1,0,5} | {0,3,0} + {1,0,5} | {0,-1,3} + {0,3,0} | {1,0,5} + {0,3,0} | {-1,0,3} + {0,-1,3} | {1,0,5} + {0,-1,3} | {-1,0,3} + {-1,0,3} | {0,-1,5} + {-1,0,3} | {0,3,0} + {-1,0,3} | {0,-1,3} +(12 rows) + +-- Distance to line +SELECT l1.s, l2.s, l1.s <-> l2.s FROM LINE_TBL l1, LINE_TBL l2; + s | s | ?column? +---------------------------------------+---------------------------------------+---------- + {0,-1,5} | {0,-1,5} | 0 + {0,-1,5} | {1,0,5} | 0 + {0,-1,5} | {0,3,0} | 5 + {0,-1,5} | {1,-1,0} | 0 + {0,-1,5} | {-0.4,-1,-6} | 0 + {0,-1,5} | {-0.000184615384615,-1,15.3846153846} | 0 + {0,-1,5} | {3,NaN,5} | 0 + {0,-1,5} | {NaN,NaN,NaN} | 0 + {0,-1,5} | {0,-1,3} | 2 + {0,-1,5} | {-1,0,3} | 0 + {1,0,5} | {0,-1,5} | 0 + {1,0,5} | {1,0,5} | 0 + {1,0,5} | {0,3,0} | 0 + {1,0,5} | {1,-1,0} | 0 + {1,0,5} | {-0.4,-1,-6} | 0 + {1,0,5} | {-0.000184615384615,-1,15.3846153846} | 0 + {1,0,5} | {3,NaN,5} | 0 + {1,0,5} | {NaN,NaN,NaN} | 0 + {1,0,5} | {0,-1,3} | 0 + {1,0,5} | {-1,0,3} | 8 + {0,3,0} | {0,-1,5} | 5 + {0,3,0} | {1,0,5} | 0 + {0,3,0} | {0,3,0} | 0 + {0,3,0} | {1,-1,0} | 0 + {0,3,0} | {-0.4,-1,-6} | 0 + {0,3,0} | {-0.000184615384615,-1,15.3846153846} | 0 + {0,3,0} | {3,NaN,5} | 0 + {0,3,0} | {NaN,NaN,NaN} | 0 + {0,3,0} | {0,-1,3} | 3 + {0,3,0} | {-1,0,3} | 0 + {1,-1,0} | {0,-1,5} | 0 + {1,-1,0} | {1,0,5} | 0 + {1,-1,0} | {0,3,0} | 0 + {1,-1,0} | {1,-1,0} | 0 + {1,-1,0} | {-0.4,-1,-6} | 0 + {1,-1,0} | {-0.000184615384615,-1,15.3846153846} | 0 + {1,-1,0} | {3,NaN,5} | 0 + {1,-1,0} | {NaN,NaN,NaN} | 0 + {1,-1,0} | {0,-1,3} | 0 + {1,-1,0} | {-1,0,3} | 0 + {-0.4,-1,-6} | {0,-1,5} | 0 + {-0.4,-1,-6} | {1,0,5} | 0 + {-0.4,-1,-6} | {0,3,0} | 0 + {-0.4,-1,-6} | {1,-1,0} | 0 + {-0.4,-1,-6} | {-0.4,-1,-6} | 0 + {-0.4,-1,-6} | {-0.000184615384615,-1,15.3846153846} | 0 + {-0.4,-1,-6} | {3,NaN,5} | 0 + {-0.4,-1,-6} | {NaN,NaN,NaN} | 0 + {-0.4,-1,-6} | {0,-1,3} | 0 + {-0.4,-1,-6} | {-1,0,3} | 0 + {-0.000184615384615,-1,15.3846153846} | {0,-1,5} | 0 + {-0.000184615384615,-1,15.3846153846} | {1,0,5} | 0 + {-0.000184615384615,-1,15.3846153846} | {0,3,0} | 0 + {-0.000184615384615,-1,15.3846153846} | {1,-1,0} | 0 + {-0.000184615384615,-1,15.3846153846} | {-0.4,-1,-6} | 0 + {-0.000184615384615,-1,15.3846153846} | {-0.000184615384615,-1,15.3846153846} | 0 + {-0.000184615384615,-1,15.3846153846} | {3,NaN,5} | 0 + {-0.000184615384615,-1,15.3846153846} | {NaN,NaN,NaN} | 0 + {-0.000184615384615,-1,15.3846153846} | {0,-1,3} | 0 + {-0.000184615384615,-1,15.3846153846} | {-1,0,3} | 0 + {3,NaN,5} | {0,-1,5} | 0 + {3,NaN,5} | {1,0,5} | 0 + {3,NaN,5} | {0,3,0} | 0 + {3,NaN,5} | {1,-1,0} | 0 + {3,NaN,5} | {-0.4,-1,-6} | 0 + {3,NaN,5} | {-0.000184615384615,-1,15.3846153846} | 0 + {3,NaN,5} | {3,NaN,5} | 0 + {3,NaN,5} | {NaN,NaN,NaN} | 0 + {3,NaN,5} | {0,-1,3} | 0 + {3,NaN,5} | {-1,0,3} | 0 + {NaN,NaN,NaN} | {0,-1,5} | 0 + {NaN,NaN,NaN} | {1,0,5} | 0 + {NaN,NaN,NaN} | {0,3,0} | 0 + {NaN,NaN,NaN} | {1,-1,0} | 0 + {NaN,NaN,NaN} | {-0.4,-1,-6} | 0 + {NaN,NaN,NaN} | {-0.000184615384615,-1,15.3846153846} | 0 + {NaN,NaN,NaN} | {3,NaN,5} | 0 + {NaN,NaN,NaN} | {NaN,NaN,NaN} | 0 + {NaN,NaN,NaN} | {0,-1,3} | 0 + {NaN,NaN,NaN} | {-1,0,3} | 0 + {0,-1,3} | {0,-1,5} | 2 + {0,-1,3} | {1,0,5} | 0 + {0,-1,3} | {0,3,0} | 3 + {0,-1,3} | {1,-1,0} | 0 + {0,-1,3} | {-0.4,-1,-6} | 0 + {0,-1,3} | {-0.000184615384615,-1,15.3846153846} | 0 + {0,-1,3} | {3,NaN,5} | 0 + {0,-1,3} | {NaN,NaN,NaN} | 0 + {0,-1,3} | {0,-1,3} | 0 + {0,-1,3} | {-1,0,3} | 0 + {-1,0,3} | {0,-1,5} | 0 + {-1,0,3} | {1,0,5} | 8 + {-1,0,3} | {0,3,0} | 0 + {-1,0,3} | {1,-1,0} | 0 + {-1,0,3} | {-0.4,-1,-6} | 0 + {-1,0,3} | {-0.000184615384615,-1,15.3846153846} | 0 + {-1,0,3} | {3,NaN,5} | 0 + {-1,0,3} | {NaN,NaN,NaN} | 0 + {-1,0,3} | {0,-1,3} | 0 + {-1,0,3} | {-1,0,3} | 0 +(100 rows) + +-- Intersect with line +SELECT l1.s, l2.s FROM LINE_TBL l1, LINE_TBL l2 WHERE l1.s ?# l2.s; + s | s +---------------------------------------+--------------------------------------- + {0,-1,5} | {1,0,5} + {0,-1,5} | {1,-1,0} + {0,-1,5} | {-0.4,-1,-6} + {0,-1,5} | {-0.000184615384615,-1,15.3846153846} + {0,-1,5} | {3,NaN,5} + {0,-1,5} | {NaN,NaN,NaN} + {0,-1,5} | {-1,0,3} + {1,0,5} | {0,-1,5} + {1,0,5} | {0,3,0} + {1,0,5} | {1,-1,0} + {1,0,5} | {-0.4,-1,-6} + {1,0,5} | {-0.000184615384615,-1,15.3846153846} + {1,0,5} | {3,NaN,5} + {1,0,5} | {NaN,NaN,NaN} + {1,0,5} | {0,-1,3} + {0,3,0} | {1,0,5} + {0,3,0} | {1,-1,0} + {0,3,0} | {-0.4,-1,-6} + {0,3,0} | {-0.000184615384615,-1,15.3846153846} + {0,3,0} | {3,NaN,5} + {0,3,0} | {NaN,NaN,NaN} + {0,3,0} | {-1,0,3} + {1,-1,0} | {0,-1,5} + {1,-1,0} | {1,0,5} + {1,-1,0} | {0,3,0} + {1,-1,0} | {-0.4,-1,-6} + {1,-1,0} | {-0.000184615384615,-1,15.3846153846} + {1,-1,0} | {3,NaN,5} + {1,-1,0} | {NaN,NaN,NaN} + {1,-1,0} | {0,-1,3} + {1,-1,0} | {-1,0,3} + {-0.4,-1,-6} | {0,-1,5} + {-0.4,-1,-6} | {1,0,5} + {-0.4,-1,-6} | {0,3,0} + {-0.4,-1,-6} | {1,-1,0} + {-0.4,-1,-6} | {-0.000184615384615,-1,15.3846153846} + {-0.4,-1,-6} | {3,NaN,5} + {-0.4,-1,-6} | {NaN,NaN,NaN} + {-0.4,-1,-6} | {0,-1,3} + {-0.4,-1,-6} | {-1,0,3} + {-0.000184615384615,-1,15.3846153846} | {0,-1,5} + {-0.000184615384615,-1,15.3846153846} | {1,0,5} + {-0.000184615384615,-1,15.3846153846} | {0,3,0} + {-0.000184615384615,-1,15.3846153846} | {1,-1,0} + {-0.000184615384615,-1,15.3846153846} | {-0.4,-1,-6} + {-0.000184615384615,-1,15.3846153846} | {3,NaN,5} + {-0.000184615384615,-1,15.3846153846} | {NaN,NaN,NaN} + {-0.000184615384615,-1,15.3846153846} | {0,-1,3} + {-0.000184615384615,-1,15.3846153846} | {-1,0,3} + {3,NaN,5} | {0,-1,5} + {3,NaN,5} | {1,0,5} + {3,NaN,5} | {0,3,0} + {3,NaN,5} | {1,-1,0} + {3,NaN,5} | {-0.4,-1,-6} + {3,NaN,5} | {-0.000184615384615,-1,15.3846153846} + {3,NaN,5} | {3,NaN,5} + {3,NaN,5} | {NaN,NaN,NaN} + {3,NaN,5} | {0,-1,3} + {3,NaN,5} | {-1,0,3} + {NaN,NaN,NaN} | {0,-1,5} + {NaN,NaN,NaN} | {1,0,5} + {NaN,NaN,NaN} | {0,3,0} + {NaN,NaN,NaN} | {1,-1,0} + {NaN,NaN,NaN} | {-0.4,-1,-6} + {NaN,NaN,NaN} | {-0.000184615384615,-1,15.3846153846} + {NaN,NaN,NaN} | {3,NaN,5} + {NaN,NaN,NaN} | {NaN,NaN,NaN} + {NaN,NaN,NaN} | {0,-1,3} + {NaN,NaN,NaN} | {-1,0,3} + {0,-1,3} | {1,0,5} + {0,-1,3} | {1,-1,0} + {0,-1,3} | {-0.4,-1,-6} + {0,-1,3} | {-0.000184615384615,-1,15.3846153846} + {0,-1,3} | {3,NaN,5} + {0,-1,3} | {NaN,NaN,NaN} + {0,-1,3} | {-1,0,3} + {-1,0,3} | {0,-1,5} + {-1,0,3} | {0,3,0} + {-1,0,3} | {1,-1,0} + {-1,0,3} | {-0.4,-1,-6} + {-1,0,3} | {-0.000184615384615,-1,15.3846153846} + {-1,0,3} | {3,NaN,5} + {-1,0,3} | {NaN,NaN,NaN} + {-1,0,3} | {0,-1,3} +(84 rows) + +-- Intersect with box +SELECT l.s, b.f1 FROM LINE_TBL l, BOX_TBL b WHERE l.s ?# b.f1; + s | f1 +--------------+--------------------- + {1,0,5} | (-2,2),(-8,-10) + {0,3,0} | (2,2),(0,0) + {0,3,0} | (-2,2),(-8,-10) + {1,-1,0} | (2,2),(0,0) + {1,-1,0} | (3,3),(1,1) + {1,-1,0} | (-2,2),(-8,-10) + {1,-1,0} | (2.5,3.5),(2.5,2.5) + {1,-1,0} | (3,3),(3,3) + {-0.4,-1,-6} | (-2,2),(-8,-10) + {0,-1,3} | (3,3),(1,1) + {0,-1,3} | (2.5,3.5),(2.5,2.5) + {0,-1,3} | (3,3),(3,3) + {-1,0,3} | (3,3),(1,1) +(13 rows) + +-- Intersection point with line +SELECT l1.s, l2.s, l1.s # l2.s FROM LINE_TBL l1, LINE_TBL l2; + s | s | ?column? +---------------------------------------+---------------------------------------+----------------------------------- + {0,-1,5} | {0,-1,5} | + {0,-1,5} | {1,0,5} | (-5,5) + {0,-1,5} | {0,3,0} | + {0,-1,5} | {1,-1,0} | (5,5) + {0,-1,5} | {-0.4,-1,-6} | (-27.5,5) + {0,-1,5} | {-0.000184615384615,-1,15.3846153846} | (56250,5) + {0,-1,5} | {3,NaN,5} | (NaN,NaN) + {0,-1,5} | {NaN,NaN,NaN} | (NaN,NaN) + {0,-1,5} | {0,-1,3} | + {0,-1,5} | {-1,0,3} | (3,5) + {1,0,5} | {0,-1,5} | (-5,5) + {1,0,5} | {1,0,5} | + {1,0,5} | {0,3,0} | (-5,0) + {1,0,5} | {1,-1,0} | (-5,-5) + {1,0,5} | {-0.4,-1,-6} | (-5,-4) + {1,0,5} | {-0.000184615384615,-1,15.3846153846} | (-5,15.3855384615) + {1,0,5} | {3,NaN,5} | (NaN,NaN) + {1,0,5} | {NaN,NaN,NaN} | (NaN,NaN) + {1,0,5} | {0,-1,3} | (-5,3) + {1,0,5} | {-1,0,3} | + {0,3,0} | {0,-1,5} | + {0,3,0} | {1,0,5} | (-5,0) + {0,3,0} | {0,3,0} | + {0,3,0} | {1,-1,0} | (0,0) + {0,3,0} | {-0.4,-1,-6} | (-15,0) + {0,3,0} | {-0.000184615384615,-1,15.3846153846} | (83333.3333333,0) + {0,3,0} | {3,NaN,5} | (NaN,NaN) + {0,3,0} | {NaN,NaN,NaN} | (NaN,NaN) + {0,3,0} | {0,-1,3} | + {0,3,0} | {-1,0,3} | (3,0) + {1,-1,0} | {0,-1,5} | (5,5) + {1,-1,0} | {1,0,5} | (-5,-5) + {1,-1,0} | {0,3,0} | (0,0) + {1,-1,0} | {1,-1,0} | + {1,-1,0} | {-0.4,-1,-6} | (-4.28571428571,-4.28571428571) + {1,-1,0} | {-0.000184615384615,-1,15.3846153846} | (15.3817756722,15.3817756722) + {1,-1,0} | {3,NaN,5} | (NaN,NaN) + {1,-1,0} | {NaN,NaN,NaN} | (NaN,NaN) + {1,-1,0} | {0,-1,3} | (3,3) + {1,-1,0} | {-1,0,3} | (3,3) + {-0.4,-1,-6} | {0,-1,5} | (-27.5,5) + {-0.4,-1,-6} | {1,0,5} | (-5,-4) + {-0.4,-1,-6} | {0,3,0} | (-15,0) + {-0.4,-1,-6} | {1,-1,0} | (-4.28571428571,-4.28571428571) + {-0.4,-1,-6} | {-0.4,-1,-6} | + {-0.4,-1,-6} | {-0.000184615384615,-1,15.3846153846} | (-53.4862244113,15.3944897645) + {-0.4,-1,-6} | {3,NaN,5} | (NaN,NaN) + {-0.4,-1,-6} | {NaN,NaN,NaN} | (NaN,NaN) + {-0.4,-1,-6} | {0,-1,3} | (-22.5,3) + {-0.4,-1,-6} | {-1,0,3} | (3,-7.2) + {-0.000184615384615,-1,15.3846153846} | {0,-1,5} | (56250,5) + {-0.000184615384615,-1,15.3846153846} | {1,0,5} | (-5,15.3855384615) + {-0.000184615384615,-1,15.3846153846} | {0,3,0} | (83333.3333333,-1.7763568394e-15) + {-0.000184615384615,-1,15.3846153846} | {1,-1,0} | (15.3817756722,15.3817756722) + {-0.000184615384615,-1,15.3846153846} | {-0.4,-1,-6} | (-53.4862244113,15.3944897645) + {-0.000184615384615,-1,15.3846153846} | {-0.000184615384615,-1,15.3846153846} | + {-0.000184615384615,-1,15.3846153846} | {3,NaN,5} | (NaN,NaN) + {-0.000184615384615,-1,15.3846153846} | {NaN,NaN,NaN} | (NaN,NaN) + {-0.000184615384615,-1,15.3846153846} | {0,-1,3} | (67083.3333333,3) + {-0.000184615384615,-1,15.3846153846} | {-1,0,3} | (3,15.3840615385) + {3,NaN,5} | {0,-1,5} | (NaN,NaN) + {3,NaN,5} | {1,0,5} | (NaN,NaN) + {3,NaN,5} | {0,3,0} | (NaN,NaN) + {3,NaN,5} | {1,-1,0} | (NaN,NaN) + {3,NaN,5} | {-0.4,-1,-6} | (NaN,NaN) + {3,NaN,5} | {-0.000184615384615,-1,15.3846153846} | (NaN,NaN) + {3,NaN,5} | {3,NaN,5} | (NaN,NaN) + {3,NaN,5} | {NaN,NaN,NaN} | (NaN,NaN) + {3,NaN,5} | {0,-1,3} | (NaN,NaN) + {3,NaN,5} | {-1,0,3} | (NaN,NaN) + {NaN,NaN,NaN} | {0,-1,5} | (NaN,NaN) + {NaN,NaN,NaN} | {1,0,5} | (NaN,NaN) + {NaN,NaN,NaN} | {0,3,0} | (NaN,NaN) + {NaN,NaN,NaN} | {1,-1,0} | (NaN,NaN) + {NaN,NaN,NaN} | {-0.4,-1,-6} | (NaN,NaN) + {NaN,NaN,NaN} | {-0.000184615384615,-1,15.3846153846} | (NaN,NaN) + {NaN,NaN,NaN} | {3,NaN,5} | (NaN,NaN) + {NaN,NaN,NaN} | {NaN,NaN,NaN} | (NaN,NaN) + {NaN,NaN,NaN} | {0,-1,3} | (NaN,NaN) + {NaN,NaN,NaN} | {-1,0,3} | (NaN,NaN) + {0,-1,3} | {0,-1,5} | + {0,-1,3} | {1,0,5} | (-5,3) + {0,-1,3} | {0,3,0} | + {0,-1,3} | {1,-1,0} | (3,3) + {0,-1,3} | {-0.4,-1,-6} | (-22.5,3) + {0,-1,3} | {-0.000184615384615,-1,15.3846153846} | (67083.3333333,3) + {0,-1,3} | {3,NaN,5} | (NaN,NaN) + {0,-1,3} | {NaN,NaN,NaN} | (NaN,NaN) + {0,-1,3} | {0,-1,3} | + {0,-1,3} | {-1,0,3} | (3,3) + {-1,0,3} | {0,-1,5} | (3,5) + {-1,0,3} | {1,0,5} | + {-1,0,3} | {0,3,0} | (3,0) + {-1,0,3} | {1,-1,0} | (3,3) + {-1,0,3} | {-0.4,-1,-6} | (3,-7.2) + {-1,0,3} | {-0.000184615384615,-1,15.3846153846} | (3,15.3840615385) + {-1,0,3} | {3,NaN,5} | (NaN,NaN) + {-1,0,3} | {NaN,NaN,NaN} | (NaN,NaN) + {-1,0,3} | {0,-1,3} | (3,3) + {-1,0,3} | {-1,0,3} | +(100 rows) + +-- Closest point to line segment +SELECT l.s, l1.s, l.s ## l1.s FROM LINE_TBL l, LSEG_TBL l1; + s | s | ?column? +---------------------------------------+-------------------------------+----------------------------------- + {0,-1,5} | [(1,2),(3,4)] | (3,4) + {0,-1,5} | [(0,0),(6,6)] | (5,5) + {0,-1,5} | [(10,-10),(-3,-4)] | (-3,-4) + {0,-1,5} | [(-1000000,200),(300000,-40)] | (56250,5) + {0,-1,5} | [(11,22),(33,44)] | (11,22) + {0,-1,5} | [(-10,2),(-10,3)] | (-10,3) + {0,-1,5} | [(0,-20),(30,-20)] | + {0,-1,5} | [(NaN,1),(NaN,90)] | + {1,0,5} | [(1,2),(3,4)] | (1,2) + {1,0,5} | [(0,0),(6,6)] | (0,0) + {1,0,5} | [(10,-10),(-3,-4)] | (-3,-4) + {1,0,5} | [(-1000000,200),(300000,-40)] | (-5,15.3855384615) + {1,0,5} | [(11,22),(33,44)] | (11,22) + {1,0,5} | [(-10,2),(-10,3)] | + {1,0,5} | [(0,-20),(30,-20)] | (0,-20) + {1,0,5} | [(NaN,1),(NaN,90)] | + {0,3,0} | [(1,2),(3,4)] | (1,2) + {0,3,0} | [(0,0),(6,6)] | (0,0) + {0,3,0} | [(10,-10),(-3,-4)] | (-3,-4) + {0,3,0} | [(-1000000,200),(300000,-40)] | (83333.3333333,-1.7763568394e-15) + {0,3,0} | [(11,22),(33,44)] | (11,22) + {0,3,0} | [(-10,2),(-10,3)] | (-10,2) + {0,3,0} | [(0,-20),(30,-20)] | + {0,3,0} | [(NaN,1),(NaN,90)] | + {1,-1,0} | [(1,2),(3,4)] | + {1,-1,0} | [(0,0),(6,6)] | + {1,-1,0} | [(10,-10),(-3,-4)] | (-3,-4) + {1,-1,0} | [(-1000000,200),(300000,-40)] | (15.3817756722,15.3817756722) + {1,-1,0} | [(11,22),(33,44)] | + {1,-1,0} | [(-10,2),(-10,3)] | (-10,2) + {1,-1,0} | [(0,-20),(30,-20)] | (0,-20) + {1,-1,0} | [(NaN,1),(NaN,90)] | + {-0.4,-1,-6} | [(1,2),(3,4)] | (1,2) + {-0.4,-1,-6} | [(0,0),(6,6)] | (0,0) + {-0.4,-1,-6} | [(10,-10),(-3,-4)] | (10,-10) + {-0.4,-1,-6} | [(-1000000,200),(300000,-40)] | (-53.4862244113,15.3944897645) + {-0.4,-1,-6} | [(11,22),(33,44)] | (11,22) + {-0.4,-1,-6} | [(-10,2),(-10,3)] | (-10,2) + {-0.4,-1,-6} | [(0,-20),(30,-20)] | (30,-20) + {-0.4,-1,-6} | [(NaN,1),(NaN,90)] | + {-0.000184615384615,-1,15.3846153846} | [(1,2),(3,4)] | (3,4) + {-0.000184615384615,-1,15.3846153846} | [(0,0),(6,6)] | (6,6) + {-0.000184615384615,-1,15.3846153846} | [(10,-10),(-3,-4)] | (-3,-4) + {-0.000184615384615,-1,15.3846153846} | [(-1000000,200),(300000,-40)] | + {-0.000184615384615,-1,15.3846153846} | [(11,22),(33,44)] | (11,22) + {-0.000184615384615,-1,15.3846153846} | [(-10,2),(-10,3)] | (-10,3) + {-0.000184615384615,-1,15.3846153846} | [(0,-20),(30,-20)] | (30,-20) + {-0.000184615384615,-1,15.3846153846} | [(NaN,1),(NaN,90)] | + {3,NaN,5} | [(1,2),(3,4)] | + {3,NaN,5} | [(0,0),(6,6)] | + {3,NaN,5} | [(10,-10),(-3,-4)] | + {3,NaN,5} | [(-1000000,200),(300000,-40)] | + {3,NaN,5} | [(11,22),(33,44)] | + {3,NaN,5} | [(-10,2),(-10,3)] | + {3,NaN,5} | [(0,-20),(30,-20)] | + {3,NaN,5} | [(NaN,1),(NaN,90)] | + {NaN,NaN,NaN} | [(1,2),(3,4)] | + {NaN,NaN,NaN} | [(0,0),(6,6)] | + {NaN,NaN,NaN} | [(10,-10),(-3,-4)] | + {NaN,NaN,NaN} | [(-1000000,200),(300000,-40)] | + {NaN,NaN,NaN} | [(11,22),(33,44)] | + {NaN,NaN,NaN} | [(-10,2),(-10,3)] | + {NaN,NaN,NaN} | [(0,-20),(30,-20)] | + {NaN,NaN,NaN} | [(NaN,1),(NaN,90)] | + {0,-1,3} | [(1,2),(3,4)] | (2,3) + {0,-1,3} | [(0,0),(6,6)] | (3,3) + {0,-1,3} | [(10,-10),(-3,-4)] | (-3,-4) + {0,-1,3} | [(-1000000,200),(300000,-40)] | (67083.3333333,3) + {0,-1,3} | [(11,22),(33,44)] | (11,22) + {0,-1,3} | [(-10,2),(-10,3)] | (-10,3) + {0,-1,3} | [(0,-20),(30,-20)] | + {0,-1,3} | [(NaN,1),(NaN,90)] | + {-1,0,3} | [(1,2),(3,4)] | (3,4) + {-1,0,3} | [(0,0),(6,6)] | (3,3) + {-1,0,3} | [(10,-10),(-3,-4)] | (3,-6.76923076923) + {-1,0,3} | [(-1000000,200),(300000,-40)] | (3,15.3840615385) + {-1,0,3} | [(11,22),(33,44)] | (11,22) + {-1,0,3} | [(-10,2),(-10,3)] | + {-1,0,3} | [(0,-20),(30,-20)] | (3,-20) + {-1,0,3} | [(NaN,1),(NaN,90)] | +(80 rows) + +-- +-- Line segments +-- +-- intersection +SELECT p.f1, l.s, l.s # p.f1 AS intersection + FROM LSEG_TBL l, POINT_TBL p; +ERROR: operator does not exist: lseg # point +LINE 1: SELECT p.f1, l.s, l.s # p.f1 AS intersection + ^ +HINT: No operator matches the given name and argument types. You might need to add explicit type casts. +-- Length +SELECT s, @-@ s FROM LSEG_TBL; + s | ?column? +-------------------------------+--------------- + [(1,2),(3,4)] | 2.82842712475 + [(0,0),(6,6)] | 8.48528137424 + [(10,-10),(-3,-4)] | 14.3178210633 + [(-1000000,200),(300000,-40)] | 1300000.02215 + [(11,22),(33,44)] | 31.1126983722 + [(-10,2),(-10,3)] | 1 + [(0,-20),(30,-20)] | 30 + [(NaN,1),(NaN,90)] | NaN +(8 rows) + +-- Vertical +SELECT s FROM LSEG_TBL WHERE ?| s; + s +------------------- + [(-10,2),(-10,3)] +(1 row) + +-- Horizontal +SELECT s FROM LSEG_TBL WHERE ?- s; + s +-------------------- + [(0,-20),(30,-20)] +(1 row) + +-- Center +SELECT s, @@ s FROM LSEG_TBL; + s | ?column? +-------------------------------+-------------- + [(1,2),(3,4)] | (2,3) + [(0,0),(6,6)] | (3,3) + [(10,-10),(-3,-4)] | (3.5,-7) + [(-1000000,200),(300000,-40)] | (-350000,80) + [(11,22),(33,44)] | (22,33) + [(-10,2),(-10,3)] | (-10,2.5) + [(0,-20),(30,-20)] | (15,-20) + [(NaN,1),(NaN,90)] | (NaN,45.5) +(8 rows) + +-- To point +SELECT s, s::point FROM LSEG_TBL; + s | s +-------------------------------+-------------- + [(1,2),(3,4)] | (2,3) + [(0,0),(6,6)] | (3,3) + [(10,-10),(-3,-4)] | (3.5,-7) + [(-1000000,200),(300000,-40)] | (-350000,80) + [(11,22),(33,44)] | (22,33) + [(-10,2),(-10,3)] | (-10,2.5) + [(0,-20),(30,-20)] | (15,-20) + [(NaN,1),(NaN,90)] | (NaN,45.5) +(8 rows) + +-- Has points less than line segment +SELECT l1.s, l2.s FROM LSEG_TBL l1, LSEG_TBL l2 WHERE l1.s < l2.s; + s | s +--------------------+------------------------------- + [(1,2),(3,4)] | [(0,0),(6,6)] + [(1,2),(3,4)] | [(10,-10),(-3,-4)] + [(1,2),(3,4)] | [(-1000000,200),(300000,-40)] + [(1,2),(3,4)] | [(11,22),(33,44)] + [(1,2),(3,4)] | [(0,-20),(30,-20)] + [(0,0),(6,6)] | [(10,-10),(-3,-4)] + [(0,0),(6,6)] | [(-1000000,200),(300000,-40)] + [(0,0),(6,6)] | [(11,22),(33,44)] + [(0,0),(6,6)] | [(0,-20),(30,-20)] + [(10,-10),(-3,-4)] | [(-1000000,200),(300000,-40)] + [(10,-10),(-3,-4)] | [(11,22),(33,44)] + [(10,-10),(-3,-4)] | [(0,-20),(30,-20)] + [(11,22),(33,44)] | [(-1000000,200),(300000,-40)] + [(-10,2),(-10,3)] | [(1,2),(3,4)] + [(-10,2),(-10,3)] | [(0,0),(6,6)] + [(-10,2),(-10,3)] | [(10,-10),(-3,-4)] + [(-10,2),(-10,3)] | [(-1000000,200),(300000,-40)] + [(-10,2),(-10,3)] | [(11,22),(33,44)] + [(-10,2),(-10,3)] | [(0,-20),(30,-20)] + [(0,-20),(30,-20)] | [(-1000000,200),(300000,-40)] + [(0,-20),(30,-20)] | [(11,22),(33,44)] +(21 rows) + +-- Has points less than or equal to line segment +SELECT l1.s, l2.s FROM LSEG_TBL l1, LSEG_TBL l2 WHERE l1.s <= l2.s; + s | s +-------------------------------+------------------------------- + [(1,2),(3,4)] | [(1,2),(3,4)] + [(1,2),(3,4)] | [(0,0),(6,6)] + [(1,2),(3,4)] | [(10,-10),(-3,-4)] + [(1,2),(3,4)] | [(-1000000,200),(300000,-40)] + [(1,2),(3,4)] | [(11,22),(33,44)] + [(1,2),(3,4)] | [(0,-20),(30,-20)] + [(0,0),(6,6)] | [(0,0),(6,6)] + [(0,0),(6,6)] | [(10,-10),(-3,-4)] + [(0,0),(6,6)] | [(-1000000,200),(300000,-40)] + [(0,0),(6,6)] | [(11,22),(33,44)] + [(0,0),(6,6)] | [(0,-20),(30,-20)] + [(10,-10),(-3,-4)] | [(10,-10),(-3,-4)] + [(10,-10),(-3,-4)] | [(-1000000,200),(300000,-40)] + [(10,-10),(-3,-4)] | [(11,22),(33,44)] + [(10,-10),(-3,-4)] | [(0,-20),(30,-20)] + [(-1000000,200),(300000,-40)] | [(-1000000,200),(300000,-40)] + [(11,22),(33,44)] | [(-1000000,200),(300000,-40)] + [(11,22),(33,44)] | [(11,22),(33,44)] + [(-10,2),(-10,3)] | [(1,2),(3,4)] + [(-10,2),(-10,3)] | [(0,0),(6,6)] + [(-10,2),(-10,3)] | [(10,-10),(-3,-4)] + [(-10,2),(-10,3)] | [(-1000000,200),(300000,-40)] + [(-10,2),(-10,3)] | [(11,22),(33,44)] + [(-10,2),(-10,3)] | [(-10,2),(-10,3)] + [(-10,2),(-10,3)] | [(0,-20),(30,-20)] + [(0,-20),(30,-20)] | [(-1000000,200),(300000,-40)] + [(0,-20),(30,-20)] | [(11,22),(33,44)] + [(0,-20),(30,-20)] | [(0,-20),(30,-20)] +(28 rows) + +-- Has points equal to line segment +SELECT l1.s, l2.s FROM LSEG_TBL l1, LSEG_TBL l2 WHERE l1.s = l2.s; + s | s +-------------------------------+------------------------------- + [(1,2),(3,4)] | [(1,2),(3,4)] + [(0,0),(6,6)] | [(0,0),(6,6)] + [(10,-10),(-3,-4)] | [(10,-10),(-3,-4)] + [(-1000000,200),(300000,-40)] | [(-1000000,200),(300000,-40)] + [(11,22),(33,44)] | [(11,22),(33,44)] + [(-10,2),(-10,3)] | [(-10,2),(-10,3)] + [(0,-20),(30,-20)] | [(0,-20),(30,-20)] + [(NaN,1),(NaN,90)] | [(NaN,1),(NaN,90)] +(8 rows) + +-- Has points greater than or equal to line segment +SELECT l1.s, l2.s FROM LSEG_TBL l1, LSEG_TBL l2 WHERE l1.s >= l2.s; + s | s +-------------------------------+------------------------------- + [(1,2),(3,4)] | [(1,2),(3,4)] + [(1,2),(3,4)] | [(-10,2),(-10,3)] + [(0,0),(6,6)] | [(1,2),(3,4)] + [(0,0),(6,6)] | [(0,0),(6,6)] + [(0,0),(6,6)] | [(-10,2),(-10,3)] + [(10,-10),(-3,-4)] | [(1,2),(3,4)] + [(10,-10),(-3,-4)] | [(0,0),(6,6)] + [(10,-10),(-3,-4)] | [(10,-10),(-3,-4)] + [(10,-10),(-3,-4)] | [(-10,2),(-10,3)] + [(-1000000,200),(300000,-40)] | [(1,2),(3,4)] + [(-1000000,200),(300000,-40)] | [(0,0),(6,6)] + [(-1000000,200),(300000,-40)] | [(10,-10),(-3,-4)] + [(-1000000,200),(300000,-40)] | [(-1000000,200),(300000,-40)] + [(-1000000,200),(300000,-40)] | [(11,22),(33,44)] + [(-1000000,200),(300000,-40)] | [(-10,2),(-10,3)] + [(-1000000,200),(300000,-40)] | [(0,-20),(30,-20)] + [(11,22),(33,44)] | [(1,2),(3,4)] + [(11,22),(33,44)] | [(0,0),(6,6)] + [(11,22),(33,44)] | [(10,-10),(-3,-4)] + [(11,22),(33,44)] | [(11,22),(33,44)] + [(11,22),(33,44)] | [(-10,2),(-10,3)] + [(11,22),(33,44)] | [(0,-20),(30,-20)] + [(-10,2),(-10,3)] | [(-10,2),(-10,3)] + [(0,-20),(30,-20)] | [(1,2),(3,4)] + [(0,-20),(30,-20)] | [(0,0),(6,6)] + [(0,-20),(30,-20)] | [(10,-10),(-3,-4)] + [(0,-20),(30,-20)] | [(-10,2),(-10,3)] + [(0,-20),(30,-20)] | [(0,-20),(30,-20)] +(28 rows) + +-- Has points greater than line segment +SELECT l1.s, l2.s FROM LSEG_TBL l1, LSEG_TBL l2 WHERE l1.s > l2.s; + s | s +-------------------------------+-------------------- + [(1,2),(3,4)] | [(-10,2),(-10,3)] + [(0,0),(6,6)] | [(1,2),(3,4)] + [(0,0),(6,6)] | [(-10,2),(-10,3)] + [(10,-10),(-3,-4)] | [(1,2),(3,4)] + [(10,-10),(-3,-4)] | [(0,0),(6,6)] + [(10,-10),(-3,-4)] | [(-10,2),(-10,3)] + [(-1000000,200),(300000,-40)] | [(1,2),(3,4)] + [(-1000000,200),(300000,-40)] | [(0,0),(6,6)] + [(-1000000,200),(300000,-40)] | [(10,-10),(-3,-4)] + [(-1000000,200),(300000,-40)] | [(11,22),(33,44)] + [(-1000000,200),(300000,-40)] | [(-10,2),(-10,3)] + [(-1000000,200),(300000,-40)] | [(0,-20),(30,-20)] + [(11,22),(33,44)] | [(1,2),(3,4)] + [(11,22),(33,44)] | [(0,0),(6,6)] + [(11,22),(33,44)] | [(10,-10),(-3,-4)] + [(11,22),(33,44)] | [(-10,2),(-10,3)] + [(11,22),(33,44)] | [(0,-20),(30,-20)] + [(0,-20),(30,-20)] | [(1,2),(3,4)] + [(0,-20),(30,-20)] | [(0,0),(6,6)] + [(0,-20),(30,-20)] | [(10,-10),(-3,-4)] + [(0,-20),(30,-20)] | [(-10,2),(-10,3)] +(21 rows) + +-- Has points not equal to line segment +SELECT l1.s, l2.s FROM LSEG_TBL l1, LSEG_TBL l2 WHERE l1.s != l2.s; + s | s +-------------------------------+------------------------------- + [(1,2),(3,4)] | [(0,0),(6,6)] + [(1,2),(3,4)] | [(10,-10),(-3,-4)] + [(1,2),(3,4)] | [(-1000000,200),(300000,-40)] + [(1,2),(3,4)] | [(11,22),(33,44)] + [(1,2),(3,4)] | [(-10,2),(-10,3)] + [(1,2),(3,4)] | [(0,-20),(30,-20)] + [(1,2),(3,4)] | [(NaN,1),(NaN,90)] + [(0,0),(6,6)] | [(1,2),(3,4)] + [(0,0),(6,6)] | [(10,-10),(-3,-4)] + [(0,0),(6,6)] | [(-1000000,200),(300000,-40)] + [(0,0),(6,6)] | [(11,22),(33,44)] + [(0,0),(6,6)] | [(-10,2),(-10,3)] + [(0,0),(6,6)] | [(0,-20),(30,-20)] + [(0,0),(6,6)] | [(NaN,1),(NaN,90)] + [(10,-10),(-3,-4)] | [(1,2),(3,4)] + [(10,-10),(-3,-4)] | [(0,0),(6,6)] + [(10,-10),(-3,-4)] | [(-1000000,200),(300000,-40)] + [(10,-10),(-3,-4)] | [(11,22),(33,44)] + [(10,-10),(-3,-4)] | [(-10,2),(-10,3)] + [(10,-10),(-3,-4)] | [(0,-20),(30,-20)] + [(10,-10),(-3,-4)] | [(NaN,1),(NaN,90)] + [(-1000000,200),(300000,-40)] | [(1,2),(3,4)] + [(-1000000,200),(300000,-40)] | [(0,0),(6,6)] + [(-1000000,200),(300000,-40)] | [(10,-10),(-3,-4)] + [(-1000000,200),(300000,-40)] | [(11,22),(33,44)] + [(-1000000,200),(300000,-40)] | [(-10,2),(-10,3)] + [(-1000000,200),(300000,-40)] | [(0,-20),(30,-20)] + [(-1000000,200),(300000,-40)] | [(NaN,1),(NaN,90)] + [(11,22),(33,44)] | [(1,2),(3,4)] + [(11,22),(33,44)] | [(0,0),(6,6)] + [(11,22),(33,44)] | [(10,-10),(-3,-4)] + [(11,22),(33,44)] | [(-1000000,200),(300000,-40)] + [(11,22),(33,44)] | [(-10,2),(-10,3)] + [(11,22),(33,44)] | [(0,-20),(30,-20)] + [(11,22),(33,44)] | [(NaN,1),(NaN,90)] + [(-10,2),(-10,3)] | [(1,2),(3,4)] + [(-10,2),(-10,3)] | [(0,0),(6,6)] + [(-10,2),(-10,3)] | [(10,-10),(-3,-4)] + [(-10,2),(-10,3)] | [(-1000000,200),(300000,-40)] + [(-10,2),(-10,3)] | [(11,22),(33,44)] + [(-10,2),(-10,3)] | [(0,-20),(30,-20)] + [(-10,2),(-10,3)] | [(NaN,1),(NaN,90)] + [(0,-20),(30,-20)] | [(1,2),(3,4)] + [(0,-20),(30,-20)] | [(0,0),(6,6)] + [(0,-20),(30,-20)] | [(10,-10),(-3,-4)] + [(0,-20),(30,-20)] | [(-1000000,200),(300000,-40)] + [(0,-20),(30,-20)] | [(11,22),(33,44)] + [(0,-20),(30,-20)] | [(-10,2),(-10,3)] + [(0,-20),(30,-20)] | [(NaN,1),(NaN,90)] + [(NaN,1),(NaN,90)] | [(1,2),(3,4)] + [(NaN,1),(NaN,90)] | [(0,0),(6,6)] + [(NaN,1),(NaN,90)] | [(10,-10),(-3,-4)] + [(NaN,1),(NaN,90)] | [(-1000000,200),(300000,-40)] + [(NaN,1),(NaN,90)] | [(11,22),(33,44)] + [(NaN,1),(NaN,90)] | [(-10,2),(-10,3)] + [(NaN,1),(NaN,90)] | [(0,-20),(30,-20)] +(56 rows) + +-- Parallel with line segment +SELECT l1.s, l2.s FROM LSEG_TBL l1, LSEG_TBL l2 WHERE l1.s ?|| l2.s; + s | s +-------------------------------+------------------------------- + [(1,2),(3,4)] | [(1,2),(3,4)] + [(1,2),(3,4)] | [(0,0),(6,6)] + [(1,2),(3,4)] | [(11,22),(33,44)] + [(0,0),(6,6)] | [(1,2),(3,4)] + [(0,0),(6,6)] | [(0,0),(6,6)] + [(0,0),(6,6)] | [(11,22),(33,44)] + [(10,-10),(-3,-4)] | [(10,-10),(-3,-4)] + [(-1000000,200),(300000,-40)] | [(-1000000,200),(300000,-40)] + [(11,22),(33,44)] | [(1,2),(3,4)] + [(11,22),(33,44)] | [(0,0),(6,6)] + [(11,22),(33,44)] | [(11,22),(33,44)] + [(-10,2),(-10,3)] | [(-10,2),(-10,3)] + [(0,-20),(30,-20)] | [(0,-20),(30,-20)] +(13 rows) + +-- Perpendicular with line segment +SELECT l1.s, l2.s FROM LSEG_TBL l1, LSEG_TBL l2 WHERE l1.s ?-| l2.s; + s | s +--------------------+-------------------- + [(-10,2),(-10,3)] | [(0,-20),(30,-20)] + [(0,-20),(30,-20)] | [(-10,2),(-10,3)] +(2 rows) + +-- Distance to line +SELECT l.s, l1.s, l.s <-> l1.s AS dist_sl, l1.s <-> l.s AS dist_ls FROM LSEG_TBL l, LINE_TBL l1; + s | s | dist_sl | dist_ls +-------------------------------+---------------------------------------+----------------+---------------- + [(1,2),(3,4)] | {0,-1,5} | 1 | 1 + [(0,0),(6,6)] | {0,-1,5} | 0 | 0 + [(10,-10),(-3,-4)] | {0,-1,5} | 9 | 9 + [(-1000000,200),(300000,-40)] | {0,-1,5} | 0 | 0 + [(11,22),(33,44)] | {0,-1,5} | 17 | 17 + [(-10,2),(-10,3)] | {0,-1,5} | 2 | 2 + [(0,-20),(30,-20)] | {0,-1,5} | 25 | 25 + [(NaN,1),(NaN,90)] | {0,-1,5} | NaN | NaN + [(1,2),(3,4)] | {1,0,5} | 6 | 6 + [(0,0),(6,6)] | {1,0,5} | 5 | 5 + [(10,-10),(-3,-4)] | {1,0,5} | 2 | 2 + [(-1000000,200),(300000,-40)] | {1,0,5} | 0 | 0 + [(11,22),(33,44)] | {1,0,5} | 16 | 16 + [(-10,2),(-10,3)] | {1,0,5} | 5 | 5 + [(0,-20),(30,-20)] | {1,0,5} | 5 | 5 + [(NaN,1),(NaN,90)] | {1,0,5} | NaN | NaN + [(1,2),(3,4)] | {0,3,0} | 2 | 2 + [(0,0),(6,6)] | {0,3,0} | 0 | 0 + [(10,-10),(-3,-4)] | {0,3,0} | 4 | 4 + [(-1000000,200),(300000,-40)] | {0,3,0} | 0 | 0 + [(11,22),(33,44)] | {0,3,0} | 22 | 22 + [(-10,2),(-10,3)] | {0,3,0} | 2 | 2 + [(0,-20),(30,-20)] | {0,3,0} | 20 | 20 + [(NaN,1),(NaN,90)] | {0,3,0} | NaN | NaN + [(1,2),(3,4)] | {1,-1,0} | 0.707106781187 | 0.707106781187 + [(0,0),(6,6)] | {1,-1,0} | 0 | 0 + [(10,-10),(-3,-4)] | {1,-1,0} | 0.707106781187 | 0.707106781187 + [(-1000000,200),(300000,-40)] | {1,-1,0} | 0 | 0 + [(11,22),(33,44)] | {1,-1,0} | 7.77817459305 | 7.77817459305 + [(-10,2),(-10,3)] | {1,-1,0} | 8.48528137424 | 8.48528137424 + [(0,-20),(30,-20)] | {1,-1,0} | 14.1421356237 | 14.1421356237 + [(NaN,1),(NaN,90)] | {1,-1,0} | NaN | NaN + [(1,2),(3,4)] | {-0.4,-1,-6} | 7.79920420344 | 7.79920420344 + [(0,0),(6,6)] | {-0.4,-1,-6} | 5.57086014531 | 5.57086014531 + [(10,-10),(-3,-4)] | {-0.4,-1,-6} | 0 | 0 + [(-1000000,200),(300000,-40)] | {-0.4,-1,-6} | 0 | 0 + [(11,22),(33,44)] | {-0.4,-1,-6} | 30.0826447847 | 30.0826447847 + [(-10,2),(-10,3)] | {-0.4,-1,-6} | 3.71390676354 | 3.71390676354 + [(0,-20),(30,-20)] | {-0.4,-1,-6} | 1.85695338177 | 1.85695338177 + [(NaN,1),(NaN,90)] | {-0.4,-1,-6} | NaN | NaN + [(1,2),(3,4)] | {-0.000184615384615,-1,15.3846153846} | 11.3840613445 | 11.3840613445 + [(0,0),(6,6)] | {-0.000184615384615,-1,15.3846153846} | 9.3835075324 | 9.3835075324 + [(10,-10),(-3,-4)] | {-0.000184615384615,-1,15.3846153846} | 19.3851689004 | 19.3851689004 + [(-1000000,200),(300000,-40)] | {-0.000184615384615,-1,15.3846153846} | 0 | 0 + [(11,22),(33,44)] | {-0.000184615384615,-1,15.3846153846} | 6.61741527185 | 6.61741527185 + [(-10,2),(-10,3)] | {-0.000184615384615,-1,15.3846153846} | 12.3864613274 | 12.3864613274 + [(0,-20),(30,-20)] | {-0.000184615384615,-1,15.3846153846} | 35.3790763202 | 35.3790763202 + [(NaN,1),(NaN,90)] | {-0.000184615384615,-1,15.3846153846} | NaN | NaN + [(1,2),(3,4)] | {3,NaN,5} | NaN | NaN + [(0,0),(6,6)] | {3,NaN,5} | NaN | NaN + [(10,-10),(-3,-4)] | {3,NaN,5} | NaN | NaN + [(-1000000,200),(300000,-40)] | {3,NaN,5} | NaN | NaN + [(11,22),(33,44)] | {3,NaN,5} | NaN | NaN + [(-10,2),(-10,3)] | {3,NaN,5} | NaN | NaN + [(0,-20),(30,-20)] | {3,NaN,5} | NaN | NaN + [(NaN,1),(NaN,90)] | {3,NaN,5} | NaN | NaN + [(1,2),(3,4)] | {NaN,NaN,NaN} | NaN | NaN + [(0,0),(6,6)] | {NaN,NaN,NaN} | NaN | NaN + [(10,-10),(-3,-4)] | {NaN,NaN,NaN} | NaN | NaN + [(-1000000,200),(300000,-40)] | {NaN,NaN,NaN} | NaN | NaN + [(11,22),(33,44)] | {NaN,NaN,NaN} | NaN | NaN + [(-10,2),(-10,3)] | {NaN,NaN,NaN} | NaN | NaN + [(0,-20),(30,-20)] | {NaN,NaN,NaN} | NaN | NaN + [(NaN,1),(NaN,90)] | {NaN,NaN,NaN} | NaN | NaN + [(1,2),(3,4)] | {0,-1,3} | 0 | 0 + [(0,0),(6,6)] | {0,-1,3} | 0 | 0 + [(10,-10),(-3,-4)] | {0,-1,3} | 7 | 7 + [(-1000000,200),(300000,-40)] | {0,-1,3} | 0 | 0 + [(11,22),(33,44)] | {0,-1,3} | 19 | 19 + [(-10,2),(-10,3)] | {0,-1,3} | 0 | 0 + [(0,-20),(30,-20)] | {0,-1,3} | 23 | 23 + [(NaN,1),(NaN,90)] | {0,-1,3} | NaN | NaN + [(1,2),(3,4)] | {-1,0,3} | 0 | 0 + [(0,0),(6,6)] | {-1,0,3} | 0 | 0 + [(10,-10),(-3,-4)] | {-1,0,3} | 0 | 0 + [(-1000000,200),(300000,-40)] | {-1,0,3} | 0 | 0 + [(11,22),(33,44)] | {-1,0,3} | 8 | 8 + [(-10,2),(-10,3)] | {-1,0,3} | 13 | 13 + [(0,-20),(30,-20)] | {-1,0,3} | 0 | 0 + [(NaN,1),(NaN,90)] | {-1,0,3} | NaN | NaN +(80 rows) + +-- Distance to line segment +SELECT l1.s, l2.s, l1.s <-> l2.s FROM LSEG_TBL l1, LSEG_TBL l2; + s | s | ?column? +-------------------------------+-------------------------------+---------------- + [(1,2),(3,4)] | [(1,2),(3,4)] | 0 + [(1,2),(3,4)] | [(0,0),(6,6)] | 0.707106781187 + [(1,2),(3,4)] | [(10,-10),(-3,-4)] | 7.12398901685 + [(1,2),(3,4)] | [(-1000000,200),(300000,-40)] | 11.3840613445 + [(1,2),(3,4)] | [(11,22),(33,44)] | 19.6977156036 + [(1,2),(3,4)] | [(-10,2),(-10,3)] | 11 + [(1,2),(3,4)] | [(0,-20),(30,-20)] | 22 + [(1,2),(3,4)] | [(NaN,1),(NaN,90)] | NaN + [(0,0),(6,6)] | [(1,2),(3,4)] | 0.707106781187 + [(0,0),(6,6)] | [(0,0),(6,6)] | 0 + [(0,0),(6,6)] | [(10,-10),(-3,-4)] | 4.88901207039 + [(0,0),(6,6)] | [(-1000000,200),(300000,-40)] | 9.3835075324 + [(0,0),(6,6)] | [(11,22),(33,44)] | 16.7630546142 + [(0,0),(6,6)] | [(-10,2),(-10,3)] | 10.1980390272 + [(0,0),(6,6)] | [(0,-20),(30,-20)] | 20 + [(0,0),(6,6)] | [(NaN,1),(NaN,90)] | NaN + [(10,-10),(-3,-4)] | [(1,2),(3,4)] | 7.12398901685 + [(10,-10),(-3,-4)] | [(0,0),(6,6)] | 4.88901207039 + [(10,-10),(-3,-4)] | [(10,-10),(-3,-4)] | 0 + [(10,-10),(-3,-4)] | [(-1000000,200),(300000,-40)] | 19.3851689004 + [(10,-10),(-3,-4)] | [(11,22),(33,44)] | 29.4737584815 + [(10,-10),(-3,-4)] | [(-10,2),(-10,3)] | 9.21954445729 + [(10,-10),(-3,-4)] | [(0,-20),(30,-20)] | 10 + [(10,-10),(-3,-4)] | [(NaN,1),(NaN,90)] | NaN + [(-1000000,200),(300000,-40)] | [(1,2),(3,4)] | 11.3840613445 + [(-1000000,200),(300000,-40)] | [(0,0),(6,6)] | 9.3835075324 + [(-1000000,200),(300000,-40)] | [(10,-10),(-3,-4)] | 19.3851689004 + [(-1000000,200),(300000,-40)] | [(-1000000,200),(300000,-40)] | 0 + [(-1000000,200),(300000,-40)] | [(11,22),(33,44)] | 6.61741527185 + [(-1000000,200),(300000,-40)] | [(-10,2),(-10,3)] | 12.3864613274 + [(-1000000,200),(300000,-40)] | [(0,-20),(30,-20)] | 35.3790763202 + [(-1000000,200),(300000,-40)] | [(NaN,1),(NaN,90)] | NaN + [(11,22),(33,44)] | [(1,2),(3,4)] | 19.6977156036 + [(11,22),(33,44)] | [(0,0),(6,6)] | 16.7630546142 + [(11,22),(33,44)] | [(10,-10),(-3,-4)] | 29.4737584815 + [(11,22),(33,44)] | [(-1000000,200),(300000,-40)] | 6.61741527185 + [(11,22),(33,44)] | [(11,22),(33,44)] | 0 + [(11,22),(33,44)] | [(-10,2),(-10,3)] | 28.319604517 + [(11,22),(33,44)] | [(0,-20),(30,-20)] | 42 + [(11,22),(33,44)] | [(NaN,1),(NaN,90)] | NaN + [(-10,2),(-10,3)] | [(1,2),(3,4)] | 11 + [(-10,2),(-10,3)] | [(0,0),(6,6)] | 10.1980390272 + [(-10,2),(-10,3)] | [(10,-10),(-3,-4)] | 9.21954445729 + [(-10,2),(-10,3)] | [(-1000000,200),(300000,-40)] | 12.3864613274 + [(-10,2),(-10,3)] | [(11,22),(33,44)] | 28.319604517 + [(-10,2),(-10,3)] | [(-10,2),(-10,3)] | 0 + [(-10,2),(-10,3)] | [(0,-20),(30,-20)] | 24.1660919472 + [(-10,2),(-10,3)] | [(NaN,1),(NaN,90)] | NaN + [(0,-20),(30,-20)] | [(1,2),(3,4)] | 22 + [(0,-20),(30,-20)] | [(0,0),(6,6)] | 20 + [(0,-20),(30,-20)] | [(10,-10),(-3,-4)] | 10 + [(0,-20),(30,-20)] | [(-1000000,200),(300000,-40)] | 35.3790763202 + [(0,-20),(30,-20)] | [(11,22),(33,44)] | 42 + [(0,-20),(30,-20)] | [(-10,2),(-10,3)] | 24.1660919472 + [(0,-20),(30,-20)] | [(0,-20),(30,-20)] | 0 + [(0,-20),(30,-20)] | [(NaN,1),(NaN,90)] | NaN + [(NaN,1),(NaN,90)] | [(1,2),(3,4)] | NaN + [(NaN,1),(NaN,90)] | [(0,0),(6,6)] | NaN + [(NaN,1),(NaN,90)] | [(10,-10),(-3,-4)] | NaN + [(NaN,1),(NaN,90)] | [(-1000000,200),(300000,-40)] | NaN + [(NaN,1),(NaN,90)] | [(11,22),(33,44)] | NaN + [(NaN,1),(NaN,90)] | [(-10,2),(-10,3)] | NaN + [(NaN,1),(NaN,90)] | [(0,-20),(30,-20)] | NaN + [(NaN,1),(NaN,90)] | [(NaN,1),(NaN,90)] | NaN +(64 rows) + +-- Distance to box +SELECT l.s, b.f1, l.s <-> b.f1 AS dist_sb, b.f1 <-> l.s AS dist_bs FROM LSEG_TBL l, BOX_TBL b; + s | f1 | dist_sb | dist_bs +-------------------------------+---------------------+----------------+---------------- + [(1,2),(3,4)] | (2,2),(0,0) | 0 | 0 + [(1,2),(3,4)] | (3,3),(1,1) | 0 | 0 + [(1,2),(3,4)] | (-2,2),(-8,-10) | 3 | 3 + [(1,2),(3,4)] | (2.5,3.5),(2.5,2.5) | 0 | 0 + [(1,2),(3,4)] | (3,3),(3,3) | 0.707106781187 | 0.707106781187 + [(0,0),(6,6)] | (2,2),(0,0) | 0 | 0 + [(0,0),(6,6)] | (3,3),(1,1) | 0 | 0 + [(0,0),(6,6)] | (-2,2),(-8,-10) | 2 | 2 + [(0,0),(6,6)] | (2.5,3.5),(2.5,2.5) | 0 | 0 + [(0,0),(6,6)] | (3,3),(3,3) | 0 | 0 + [(10,-10),(-3,-4)] | (2,2),(0,0) | 4.88901207039 | 4.88901207039 + [(10,-10),(-3,-4)] | (3,3),(1,1) | 6.21602963235 | 6.21602963235 + [(10,-10),(-3,-4)] | (-2,2),(-8,-10) | 0 | 0 + [(10,-10),(-3,-4)] | (2.5,3.5),(2.5,2.5) | 8.20655597529 | 8.20655597529 + [(10,-10),(-3,-4)] | (3,3),(3,3) | 8.87006475627 | 8.87006475627 + [(-1000000,200),(300000,-40)] | (2,2),(0,0) | 13.3842459258 | 13.3842459258 + [(-1000000,200),(300000,-40)] | (3,3),(1,1) | 12.3840613274 | 12.3840613274 + [(-1000000,200),(300000,-40)] | (-2,2),(-8,-10) | 13.3849843873 | 13.3849843873 + [(-1000000,200),(300000,-40)] | (2.5,3.5),(2.5,2.5) | 11.8841536436 | 11.8841536436 + [(-1000000,200),(300000,-40)] | (3,3),(3,3) | 12.3840613274 | 12.3840613274 + [(11,22),(33,44)] | (2,2),(0,0) | 21.9317121995 | 21.9317121995 + [(11,22),(33,44)] | (3,3),(1,1) | 20.6155281281 | 20.6155281281 + [(11,22),(33,44)] | (-2,2),(-8,-10) | 23.8537208838 | 23.8537208838 + [(11,22),(33,44)] | (2.5,3.5),(2.5,2.5) | 20.3592730715 | 20.3592730715 + [(11,22),(33,44)] | (3,3),(3,3) | 20.6155281281 | 20.6155281281 + [(-10,2),(-10,3)] | (2,2),(0,0) | 10 | 10 + [(-10,2),(-10,3)] | (3,3),(1,1) | 11 | 11 + [(-10,2),(-10,3)] | (-2,2),(-8,-10) | 2 | 2 + [(-10,2),(-10,3)] | (2.5,3.5),(2.5,2.5) | 12.5 | 12.5 + [(-10,2),(-10,3)] | (3,3),(3,3) | 13 | 13 + [(0,-20),(30,-20)] | (2,2),(0,0) | 20 | 20 + [(0,-20),(30,-20)] | (3,3),(1,1) | 21 | 21 + [(0,-20),(30,-20)] | (-2,2),(-8,-10) | 10.1980390272 | 10.1980390272 + [(0,-20),(30,-20)] | (2.5,3.5),(2.5,2.5) | 22.5 | 22.5 + [(0,-20),(30,-20)] | (3,3),(3,3) | 23 | 23 + [(NaN,1),(NaN,90)] | (2,2),(0,0) | NaN | NaN + [(NaN,1),(NaN,90)] | (3,3),(1,1) | NaN | NaN + [(NaN,1),(NaN,90)] | (-2,2),(-8,-10) | NaN | NaN + [(NaN,1),(NaN,90)] | (2.5,3.5),(2.5,2.5) | NaN | NaN + [(NaN,1),(NaN,90)] | (3,3),(3,3) | NaN | NaN +(40 rows) + +-- Intersect with line segment +SELECT l.s, l1.s FROM LSEG_TBL l, LINE_TBL l1 WHERE l.s ?# l1.s; + s | s +-------------------------------+-------------- + [(0,0),(6,6)] | {0,-1,5} + [(-1000000,200),(300000,-40)] | {0,-1,5} + [(-1000000,200),(300000,-40)] | {1,0,5} + [(0,0),(6,6)] | {0,3,0} + [(-1000000,200),(300000,-40)] | {0,3,0} + [(-1000000,200),(300000,-40)] | {1,-1,0} + [(10,-10),(-3,-4)] | {-0.4,-1,-6} + [(-1000000,200),(300000,-40)] | {-0.4,-1,-6} + [(1,2),(3,4)] | {0,-1,3} + [(0,0),(6,6)] | {0,-1,3} + [(-1000000,200),(300000,-40)] | {0,-1,3} + [(-10,2),(-10,3)] | {0,-1,3} + [(1,2),(3,4)] | {-1,0,3} + [(0,0),(6,6)] | {-1,0,3} + [(10,-10),(-3,-4)] | {-1,0,3} + [(-1000000,200),(300000,-40)] | {-1,0,3} + [(0,-20),(30,-20)] | {-1,0,3} +(17 rows) + +-- Intersect with box +SELECT l.s, b.f1 FROM LSEG_TBL l, BOX_TBL b WHERE l.s ?# b.f1; + s | f1 +--------------------+--------------------- + [(1,2),(3,4)] | (2,2),(0,0) + [(1,2),(3,4)] | (3,3),(1,1) + [(1,2),(3,4)] | (2.5,3.5),(2.5,2.5) + [(0,0),(6,6)] | (2,2),(0,0) + [(0,0),(6,6)] | (3,3),(1,1) + [(0,0),(6,6)] | (2.5,3.5),(2.5,2.5) + [(0,0),(6,6)] | (3,3),(3,3) + [(10,-10),(-3,-4)] | (-2,2),(-8,-10) +(8 rows) + +-- Intersection point with line segment +SELECT l1.s, l2.s, l1.s # l2.s FROM LSEG_TBL l1, LSEG_TBL l2; + s | s | ?column? +-------------------------------+-------------------------------+---------- + [(1,2),(3,4)] | [(1,2),(3,4)] | + [(1,2),(3,4)] | [(0,0),(6,6)] | + [(1,2),(3,4)] | [(10,-10),(-3,-4)] | + [(1,2),(3,4)] | [(-1000000,200),(300000,-40)] | + [(1,2),(3,4)] | [(11,22),(33,44)] | + [(1,2),(3,4)] | [(-10,2),(-10,3)] | + [(1,2),(3,4)] | [(0,-20),(30,-20)] | + [(1,2),(3,4)] | [(NaN,1),(NaN,90)] | + [(0,0),(6,6)] | [(1,2),(3,4)] | + [(0,0),(6,6)] | [(0,0),(6,6)] | + [(0,0),(6,6)] | [(10,-10),(-3,-4)] | + [(0,0),(6,6)] | [(-1000000,200),(300000,-40)] | + [(0,0),(6,6)] | [(11,22),(33,44)] | + [(0,0),(6,6)] | [(-10,2),(-10,3)] | + [(0,0),(6,6)] | [(0,-20),(30,-20)] | + [(0,0),(6,6)] | [(NaN,1),(NaN,90)] | + [(10,-10),(-3,-4)] | [(1,2),(3,4)] | + [(10,-10),(-3,-4)] | [(0,0),(6,6)] | + [(10,-10),(-3,-4)] | [(10,-10),(-3,-4)] | + [(10,-10),(-3,-4)] | [(-1000000,200),(300000,-40)] | + [(10,-10),(-3,-4)] | [(11,22),(33,44)] | + [(10,-10),(-3,-4)] | [(-10,2),(-10,3)] | + [(10,-10),(-3,-4)] | [(0,-20),(30,-20)] | + [(10,-10),(-3,-4)] | [(NaN,1),(NaN,90)] | + [(-1000000,200),(300000,-40)] | [(1,2),(3,4)] | + [(-1000000,200),(300000,-40)] | [(0,0),(6,6)] | + [(-1000000,200),(300000,-40)] | [(10,-10),(-3,-4)] | + [(-1000000,200),(300000,-40)] | [(-1000000,200),(300000,-40)] | + [(-1000000,200),(300000,-40)] | [(11,22),(33,44)] | + [(-1000000,200),(300000,-40)] | [(-10,2),(-10,3)] | + [(-1000000,200),(300000,-40)] | [(0,-20),(30,-20)] | + [(-1000000,200),(300000,-40)] | [(NaN,1),(NaN,90)] | + [(11,22),(33,44)] | [(1,2),(3,4)] | + [(11,22),(33,44)] | [(0,0),(6,6)] | + [(11,22),(33,44)] | [(10,-10),(-3,-4)] | + [(11,22),(33,44)] | [(-1000000,200),(300000,-40)] | + [(11,22),(33,44)] | [(11,22),(33,44)] | + [(11,22),(33,44)] | [(-10,2),(-10,3)] | + [(11,22),(33,44)] | [(0,-20),(30,-20)] | + [(11,22),(33,44)] | [(NaN,1),(NaN,90)] | + [(-10,2),(-10,3)] | [(1,2),(3,4)] | + [(-10,2),(-10,3)] | [(0,0),(6,6)] | + [(-10,2),(-10,3)] | [(10,-10),(-3,-4)] | + [(-10,2),(-10,3)] | [(-1000000,200),(300000,-40)] | + [(-10,2),(-10,3)] | [(11,22),(33,44)] | + [(-10,2),(-10,3)] | [(-10,2),(-10,3)] | + [(-10,2),(-10,3)] | [(0,-20),(30,-20)] | + [(-10,2),(-10,3)] | [(NaN,1),(NaN,90)] | + [(0,-20),(30,-20)] | [(1,2),(3,4)] | + [(0,-20),(30,-20)] | [(0,0),(6,6)] | + [(0,-20),(30,-20)] | [(10,-10),(-3,-4)] | + [(0,-20),(30,-20)] | [(-1000000,200),(300000,-40)] | + [(0,-20),(30,-20)] | [(11,22),(33,44)] | + [(0,-20),(30,-20)] | [(-10,2),(-10,3)] | + [(0,-20),(30,-20)] | [(0,-20),(30,-20)] | + [(0,-20),(30,-20)] | [(NaN,1),(NaN,90)] | + [(NaN,1),(NaN,90)] | [(1,2),(3,4)] | + [(NaN,1),(NaN,90)] | [(0,0),(6,6)] | + [(NaN,1),(NaN,90)] | [(10,-10),(-3,-4)] | + [(NaN,1),(NaN,90)] | [(-1000000,200),(300000,-40)] | + [(NaN,1),(NaN,90)] | [(11,22),(33,44)] | + [(NaN,1),(NaN,90)] | [(-10,2),(-10,3)] | + [(NaN,1),(NaN,90)] | [(0,-20),(30,-20)] | + [(NaN,1),(NaN,90)] | [(NaN,1),(NaN,90)] | +(64 rows) + +-- Closest point to line segment +SELECT l1.s, l2.s, l1.s ## l2.s FROM LSEG_TBL l1, LSEG_TBL l2; + s | s | ?column? +-------------------------------+-------------------------------+--------------------------------- + [(1,2),(3,4)] | [(1,2),(3,4)] | + [(1,2),(3,4)] | [(0,0),(6,6)] | + [(1,2),(3,4)] | [(10,-10),(-3,-4)] | (-1.98536585366,-4.46829268293) + [(1,2),(3,4)] | [(-1000000,200),(300000,-40)] | (3.00210167283,15.3840611505) + [(1,2),(3,4)] | [(11,22),(33,44)] | + [(1,2),(3,4)] | [(-10,2),(-10,3)] | (-10,2) + [(1,2),(3,4)] | [(0,-20),(30,-20)] | (1,-20) + [(1,2),(3,4)] | [(NaN,1),(NaN,90)] | + [(0,0),(6,6)] | [(1,2),(3,4)] | + [(0,0),(6,6)] | [(0,0),(6,6)] | + [(0,0),(6,6)] | [(10,-10),(-3,-4)] | (-2.0487804878,-4.43902439024) + [(0,0),(6,6)] | [(-1000000,200),(300000,-40)] | (6.00173233982,15.3835073725) + [(0,0),(6,6)] | [(11,22),(33,44)] | + [(0,0),(6,6)] | [(-10,2),(-10,3)] | (-10,2) + [(0,0),(6,6)] | [(0,-20),(30,-20)] | (0,-20) + [(0,0),(6,6)] | [(NaN,1),(NaN,90)] | + [(10,-10),(-3,-4)] | [(1,2),(3,4)] | (1,2) + [(10,-10),(-3,-4)] | [(0,0),(6,6)] | (0,0) + [(10,-10),(-3,-4)] | [(10,-10),(-3,-4)] | + [(10,-10),(-3,-4)] | [(-1000000,200),(300000,-40)] | (-2.99642119965,15.3851685701) + [(10,-10),(-3,-4)] | [(11,22),(33,44)] | (11,22) + [(10,-10),(-3,-4)] | [(-10,2),(-10,3)] | (-10,2) + [(10,-10),(-3,-4)] | [(0,-20),(30,-20)] | (10,-20) + [(10,-10),(-3,-4)] | [(NaN,1),(NaN,90)] | + [(-1000000,200),(300000,-40)] | [(1,2),(3,4)] | (3,4) + [(-1000000,200),(300000,-40)] | [(0,0),(6,6)] | (6,6) + [(-1000000,200),(300000,-40)] | [(10,-10),(-3,-4)] | (-3,-4) + [(-1000000,200),(300000,-40)] | [(-1000000,200),(300000,-40)] | + [(-1000000,200),(300000,-40)] | [(11,22),(33,44)] | (11,22) + [(-1000000,200),(300000,-40)] | [(-10,2),(-10,3)] | (-10,3) + [(-1000000,200),(300000,-40)] | [(0,-20),(30,-20)] | (30,-20) + [(-1000000,200),(300000,-40)] | [(NaN,1),(NaN,90)] | + [(11,22),(33,44)] | [(1,2),(3,4)] | + [(11,22),(33,44)] | [(0,0),(6,6)] | + [(11,22),(33,44)] | [(10,-10),(-3,-4)] | (-1.3512195122,-4.76097560976) + [(11,22),(33,44)] | [(-1000000,200),(300000,-40)] | (10.9987783234,15.3825848409) + [(11,22),(33,44)] | [(11,22),(33,44)] | + [(11,22),(33,44)] | [(-10,2),(-10,3)] | (-10,3) + [(11,22),(33,44)] | [(0,-20),(30,-20)] | (11,-20) + [(11,22),(33,44)] | [(NaN,1),(NaN,90)] | + [(-10,2),(-10,3)] | [(1,2),(3,4)] | (1,2) + [(-10,2),(-10,3)] | [(0,0),(6,6)] | (0,0) + [(-10,2),(-10,3)] | [(10,-10),(-3,-4)] | (-3,-4) + [(-10,2),(-10,3)] | [(-1000000,200),(300000,-40)] | (-9.99771326872,15.3864611163) + [(-10,2),(-10,3)] | [(11,22),(33,44)] | (11,22) + [(-10,2),(-10,3)] | [(-10,2),(-10,3)] | + [(-10,2),(-10,3)] | [(0,-20),(30,-20)] | (0,-20) + [(-10,2),(-10,3)] | [(NaN,1),(NaN,90)] | + [(0,-20),(30,-20)] | [(1,2),(3,4)] | (1,2) + [(0,-20),(30,-20)] | [(0,0),(6,6)] | (0,0) + [(0,-20),(30,-20)] | [(10,-10),(-3,-4)] | (10,-10) + [(0,-20),(30,-20)] | [(-1000000,200),(300000,-40)] | (30.0065315217,15.3790757173) + [(0,-20),(30,-20)] | [(11,22),(33,44)] | (11,22) + [(0,-20),(30,-20)] | [(-10,2),(-10,3)] | (-10,2) + [(0,-20),(30,-20)] | [(0,-20),(30,-20)] | + [(0,-20),(30,-20)] | [(NaN,1),(NaN,90)] | + [(NaN,1),(NaN,90)] | [(1,2),(3,4)] | + [(NaN,1),(NaN,90)] | [(0,0),(6,6)] | + [(NaN,1),(NaN,90)] | [(10,-10),(-3,-4)] | + [(NaN,1),(NaN,90)] | [(-1000000,200),(300000,-40)] | + [(NaN,1),(NaN,90)] | [(11,22),(33,44)] | + [(NaN,1),(NaN,90)] | [(-10,2),(-10,3)] | + [(NaN,1),(NaN,90)] | [(0,-20),(30,-20)] | + [(NaN,1),(NaN,90)] | [(NaN,1),(NaN,90)] | +(64 rows) + +-- Closest point to box +SELECT l.s, b.f1, l.s ## b.f1 FROM LSEG_TBL l, BOX_TBL b; + s | f1 | ?column? +-------------------------------+---------------------+------------- + [(1,2),(3,4)] | (2,2),(0,0) | (1,2) + [(1,2),(3,4)] | (3,3),(1,1) | (1.5,2.5) + [(1,2),(3,4)] | (-2,2),(-8,-10) | (-2,2) + [(1,2),(3,4)] | (2.5,3.5),(2.5,2.5) | (2.25,3.25) + [(1,2),(3,4)] | (3,3),(3,3) | (3,3) + [(0,0),(6,6)] | (2,2),(0,0) | (1,1) + [(0,0),(6,6)] | (3,3),(1,1) | (2,2) + [(0,0),(6,6)] | (-2,2),(-8,-10) | (-2,0) + [(0,0),(6,6)] | (2.5,3.5),(2.5,2.5) | (2.75,2.75) + [(0,0),(6,6)] | (3,3),(3,3) | (3,3) + [(10,-10),(-3,-4)] | (2,2),(0,0) | (0,0) + [(10,-10),(-3,-4)] | (3,3),(1,1) | (1,1) + [(10,-10),(-3,-4)] | (-2,2),(-8,-10) | (-3,-4) + [(10,-10),(-3,-4)] | (2.5,3.5),(2.5,2.5) | (2.5,2.5) + [(10,-10),(-3,-4)] | (3,3),(3,3) | (3,3) + [(-1000000,200),(300000,-40)] | (2,2),(0,0) | (2,2) + [(-1000000,200),(300000,-40)] | (3,3),(1,1) | (3,3) + [(-1000000,200),(300000,-40)] | (-2,2),(-8,-10) | (-2,2) + [(-1000000,200),(300000,-40)] | (2.5,3.5),(2.5,2.5) | (2.5,3.5) + [(-1000000,200),(300000,-40)] | (3,3),(3,3) | (3,3) + [(11,22),(33,44)] | (2,2),(0,0) | (2,2) + [(11,22),(33,44)] | (3,3),(1,1) | (3,3) + [(11,22),(33,44)] | (-2,2),(-8,-10) | (-2,2) + [(11,22),(33,44)] | (2.5,3.5),(2.5,2.5) | (2.5,3.5) + [(11,22),(33,44)] | (3,3),(3,3) | (3,3) + [(-10,2),(-10,3)] | (2,2),(0,0) | (0,2) + [(-10,2),(-10,3)] | (3,3),(1,1) | (1,2) + [(-10,2),(-10,3)] | (-2,2),(-8,-10) | (-8,2) + [(-10,2),(-10,3)] | (2.5,3.5),(2.5,2.5) | (2.5,3) + [(-10,2),(-10,3)] | (3,3),(3,3) | (3,3) + [(0,-20),(30,-20)] | (2,2),(0,0) | (0,0) + [(0,-20),(30,-20)] | (3,3),(1,1) | (1,1) + [(0,-20),(30,-20)] | (-2,2),(-8,-10) | (-2,-10) + [(0,-20),(30,-20)] | (2.5,3.5),(2.5,2.5) | (2.5,2.5) + [(0,-20),(30,-20)] | (3,3),(3,3) | (3,3) + [(NaN,1),(NaN,90)] | (2,2),(0,0) | + [(NaN,1),(NaN,90)] | (3,3),(1,1) | + [(NaN,1),(NaN,90)] | (-2,2),(-8,-10) | + [(NaN,1),(NaN,90)] | (2.5,3.5),(2.5,2.5) | + [(NaN,1),(NaN,90)] | (3,3),(3,3) | +(40 rows) + +-- On line +SELECT l.s, l1.s FROM LSEG_TBL l, LINE_TBL l1 WHERE l.s <@ l1.s; + s | s +-------------------------------+--------------------------------------- + [(0,0),(6,6)] | {1,-1,0} + [(-1000000,200),(300000,-40)] | {-0.000184615384615,-1,15.3846153846} +(2 rows) + +-- On box +SELECT l.s, b.f1 FROM LSEG_TBL l, BOX_TBL b WHERE l.s <@ b.f1; + s | f1 +---+---- +(0 rows) + +-- +-- Boxes +-- +SELECT box(f1) AS box FROM CIRCLE_TBL; + box +---------------------------------------------------------------- + (7.12132034356,3.12132034356),(2.87867965644,-1.12132034356) + (71.7106781187,72.7106781187),(-69.7106781187,-68.7106781187) + (4.53553390593,6.53553390593),(-2.53553390593,-0.535533905933) + (3.12132034356,4.12132034356),(-1.12132034356,-0.12132034356) + (107.071067812,207.071067812),(92.9289321881,192.928932188) + (181.317279836,82.3172798365),(18.6827201635,-80.3172798365) + (3,5),(3,5) + (NaN,NaN),(NaN,NaN) +(8 rows) + +-- translation +SELECT b.f1 + p.f1 AS translation + FROM BOX_TBL b, POINT_TBL p; + translation +------------------------------------- + (2,2),(0,0) + (3,3),(1,1) + (-2,2),(-8,-10) + (2.5,3.5),(2.5,2.5) + (3,3),(3,3) + (-8,2),(-10,0) + (-7,3),(-9,1) + (-12,2),(-18,-10) + (-7.5,3.5),(-7.5,2.5) + (-7,3),(-7,3) + (-1,6),(-3,4) + (0,7),(-2,5) + (-5,6),(-11,-6) + (-0.5,7.5),(-0.5,6.5) + (0,7),(0,7) + (7.1,36.5),(5.1,34.5) + (8.1,37.5),(6.1,35.5) + (3.1,36.5),(-2.9,24.5) + (7.6,38),(7.6,37) + (8.1,37.5),(8.1,37.5) + (-3,-10),(-5,-12) + (-2,-9),(-4,-11) + (-7,-10),(-13,-22) + (-2.5,-8.5),(-2.5,-9.5) + (-2,-9),(-2,-9) + (2,2),(1e-300,-1e-300) + (3,3),(1,1) + (-2,2),(-8,-10) + (2.5,3.5),(2.5,2.5) + (3,3),(3,3) + (1e+300,Infinity),(1e+300,Infinity) + (1e+300,Infinity),(1e+300,Infinity) + (1e+300,Infinity),(1e+300,Infinity) + (1e+300,Infinity),(1e+300,Infinity) + (1e+300,Infinity),(1e+300,Infinity) + (Infinity,1e+300),(Infinity,1e+300) + (Infinity,1e+300),(Infinity,1e+300) + (Infinity,1e+300),(Infinity,1e+300) + (Infinity,1e+300),(Infinity,1e+300) + (Infinity,1e+300),(Infinity,1e+300) + (NaN,NaN),(NaN,NaN) + (NaN,NaN),(NaN,NaN) + (NaN,NaN),(NaN,NaN) + (NaN,NaN),(NaN,NaN) + (NaN,NaN),(NaN,NaN) + (12,12),(10,10) + (13,13),(11,11) + (8,12),(2,0) + (12.5,13.5),(12.5,12.5) + (13,13),(13,13) +(50 rows) + +SELECT b.f1 - p.f1 AS translation + FROM BOX_TBL b, POINT_TBL p; + translation +----------------------------------------- + (2,2),(0,0) + (3,3),(1,1) + (-2,2),(-8,-10) + (2.5,3.5),(2.5,2.5) + (3,3),(3,3) + (12,2),(10,0) + (13,3),(11,1) + (8,2),(2,-10) + (12.5,3.5),(12.5,2.5) + (13,3),(13,3) + (5,-2),(3,-4) + (6,-1),(4,-3) + (1,-2),(-5,-14) + (5.5,-0.5),(5.5,-1.5) + (6,-1),(6,-1) + (-3.1,-32.5),(-5.1,-34.5) + (-2.1,-31.5),(-4.1,-33.5) + (-7.1,-32.5),(-13.1,-44.5) + (-2.6,-31),(-2.6,-32) + (-2.1,-31.5),(-2.1,-31.5) + (7,14),(5,12) + (8,15),(6,13) + (3,14),(-3,2) + (7.5,15.5),(7.5,14.5) + (8,15),(8,15) + (2,2),(-1e-300,1e-300) + (3,3),(1,1) + (-2,2),(-8,-10) + (2.5,3.5),(2.5,2.5) + (3,3),(3,3) + (-1e+300,-Infinity),(-1e+300,-Infinity) + (-1e+300,-Infinity),(-1e+300,-Infinity) + (-1e+300,-Infinity),(-1e+300,-Infinity) + (-1e+300,-Infinity),(-1e+300,-Infinity) + (-1e+300,-Infinity),(-1e+300,-Infinity) + (-Infinity,-1e+300),(-Infinity,-1e+300) + (-Infinity,-1e+300),(-Infinity,-1e+300) + (-Infinity,-1e+300),(-Infinity,-1e+300) + (-Infinity,-1e+300),(-Infinity,-1e+300) + (-Infinity,-1e+300),(-Infinity,-1e+300) + (NaN,NaN),(NaN,NaN) + (NaN,NaN),(NaN,NaN) + (NaN,NaN),(NaN,NaN) + (NaN,NaN),(NaN,NaN) + (NaN,NaN),(NaN,NaN) + (-8,-8),(-10,-10) + (-7,-7),(-9,-9) + (-12,-8),(-18,-20) + (-7.5,-6.5),(-7.5,-7.5) + (-7,-7),(-7,-7) +(50 rows) + +-- Multiply with point +SELECT b.f1, p.f1, b.f1 * p.f1 FROM BOX_TBL b, POINT_TBL p WHERE p.f1[0] BETWEEN 1 AND 1000; + f1 | f1 | ?column? +---------------------+------------+----------------------------- + (2,2),(0,0) | (5.1,34.5) | (0,79.2),(-58.8,0) + (2,2),(0,0) | (10,10) | (0,40),(0,0) + (3,3),(1,1) | (5.1,34.5) | (-29.4,118.8),(-88.2,39.6) + (3,3),(1,1) | (10,10) | (0,60),(0,20) + (-2,2),(-8,-10) | (5.1,34.5) | (304.2,-58.8),(-79.2,-327) + (-2,2),(-8,-10) | (10,10) | (20,0),(-40,-180) + (2.5,3.5),(2.5,2.5) | (5.1,34.5) | (-73.5,104.1),(-108,99) + (2.5,3.5),(2.5,2.5) | (10,10) | (0,60),(-10,50) + (3,3),(3,3) | (5.1,34.5) | (-88.2,118.8),(-88.2,118.8) + (3,3),(3,3) | (10,10) | (0,60),(0,60) +(10 rows) + +-- Overflow error +SELECT b.f1, p.f1, b.f1 * p.f1 FROM BOX_TBL b, POINT_TBL p WHERE p.f1[0] > 1000; + f1 | f1 | ?column? +---------------------+-------------------+-------------------------------------------- + (2,2),(0,0) | (1e+300,Infinity) | (NaN,NaN),(-Infinity,Infinity) + (2,2),(0,0) | (Infinity,1e+300) | (NaN,NaN),(Infinity,Infinity) + (2,2),(0,0) | (NaN,NaN) | (NaN,NaN),(NaN,NaN) + (3,3),(1,1) | (1e+300,Infinity) | (-Infinity,Infinity),(-Infinity,Infinity) + (3,3),(1,1) | (Infinity,1e+300) | (Infinity,Infinity),(Infinity,Infinity) + (3,3),(1,1) | (NaN,NaN) | (NaN,NaN),(NaN,NaN) + (-2,2),(-8,-10) | (1e+300,Infinity) | (Infinity,-Infinity),(-Infinity,-Infinity) + (-2,2),(-8,-10) | (Infinity,1e+300) | (-Infinity,Infinity),(-Infinity,-Infinity) + (-2,2),(-8,-10) | (NaN,NaN) | (NaN,NaN),(NaN,NaN) + (2.5,3.5),(2.5,2.5) | (1e+300,Infinity) | (-Infinity,Infinity),(-Infinity,Infinity) + (2.5,3.5),(2.5,2.5) | (Infinity,1e+300) | (Infinity,Infinity),(Infinity,Infinity) + (2.5,3.5),(2.5,2.5) | (NaN,NaN) | (NaN,NaN),(NaN,NaN) + (3,3),(3,3) | (1e+300,Infinity) | (-Infinity,Infinity),(-Infinity,Infinity) + (3,3),(3,3) | (Infinity,1e+300) | (Infinity,Infinity),(Infinity,Infinity) + (3,3),(3,3) | (NaN,NaN) | (NaN,NaN),(NaN,NaN) +(15 rows) + +-- Divide by point +SELECT b.f1, p.f1, b.f1 / p.f1 FROM BOX_TBL b, POINT_TBL p WHERE p.f1[0] BETWEEN 1 AND 1000; + f1 | f1 | ?column? +---------------------+------------+---------------------------------------------------------------------- + (2,2),(0,0) | (5.1,34.5) | (0.0651176557644,0),(0,-0.0483449262493) + (2,2),(0,0) | (10,10) | (0.2,0),(0,0) + (3,3),(1,1) | (5.1,34.5) | (0.0976764836466,-0.0241724631247),(0.0325588278822,-0.072517389374) + (3,3),(1,1) | (10,10) | (0.3,0),(0.1,0) + (-2,2),(-8,-10) | (5.1,34.5) | (0.0483449262493,0.18499334024),(-0.317201914064,0.0651176557644) + (-2,2),(-8,-10) | (10,10) | (0,0.2),(-0.9,-0.1) + (2.5,3.5),(2.5,2.5) | (5.1,34.5) | (0.109762715209,-0.0562379754329),(0.0813970697055,-0.0604311578117) + (2.5,3.5),(2.5,2.5) | (10,10) | (0.3,0.05),(0.25,0) + (3,3),(3,3) | (5.1,34.5) | (0.0976764836466,-0.072517389374),(0.0976764836466,-0.072517389374) + (3,3),(3,3) | (10,10) | (0.3,0),(0.3,0) +(10 rows) + +-- To box +SELECT f1::box + FROM POINT_TBL; + f1 +------------------------------------- + (0,0),(0,0) + (-10,0),(-10,0) + (-3,4),(-3,4) + (5.1,34.5),(5.1,34.5) + (-5,-12),(-5,-12) + (1e-300,-1e-300),(1e-300,-1e-300) + (1e+300,Infinity),(1e+300,Infinity) + (Infinity,1e+300),(Infinity,1e+300) + (NaN,NaN),(NaN,NaN) + (10,10),(10,10) +(10 rows) + +SELECT bound_box(a.f1, b.f1) + FROM BOX_TBL a, BOX_TBL b; + bound_box +--------------------- + (2,2),(0,0) + (3,3),(0,0) + (2,2),(-8,-10) + (2.5,3.5),(0,0) + (3,3),(0,0) + (3,3),(0,0) + (3,3),(1,1) + (3,3),(-8,-10) + (3,3.5),(1,1) + (3,3),(1,1) + (2,2),(-8,-10) + (3,3),(-8,-10) + (-2,2),(-8,-10) + (2.5,3.5),(-8,-10) + (3,3),(-8,-10) + (2.5,3.5),(0,0) + (3,3.5),(1,1) + (2.5,3.5),(-8,-10) + (2.5,3.5),(2.5,2.5) + (3,3.5),(2.5,2.5) + (3,3),(0,0) + (3,3),(1,1) + (3,3),(-8,-10) + (3,3.5),(2.5,2.5) + (3,3),(3,3) +(25 rows) + +-- Below box +SELECT b1.f1, b2.f1, b1.f1 <^ b2.f1 FROM BOX_TBL b1, BOX_TBL b2; + f1 | f1 | ?column? +---------------------+---------------------+---------- + (2,2),(0,0) | (2,2),(0,0) | f + (2,2),(0,0) | (3,3),(1,1) | f + (2,2),(0,0) | (-2,2),(-8,-10) | f + (2,2),(0,0) | (2.5,3.5),(2.5,2.5) | t + (2,2),(0,0) | (3,3),(3,3) | t + (3,3),(1,1) | (2,2),(0,0) | f + (3,3),(1,1) | (3,3),(1,1) | f + (3,3),(1,1) | (-2,2),(-8,-10) | f + (3,3),(1,1) | (2.5,3.5),(2.5,2.5) | f + (3,3),(1,1) | (3,3),(3,3) | t + (-2,2),(-8,-10) | (2,2),(0,0) | f + (-2,2),(-8,-10) | (3,3),(1,1) | f + (-2,2),(-8,-10) | (-2,2),(-8,-10) | f + (-2,2),(-8,-10) | (2.5,3.5),(2.5,2.5) | t + (-2,2),(-8,-10) | (3,3),(3,3) | t + (2.5,3.5),(2.5,2.5) | (2,2),(0,0) | f + (2.5,3.5),(2.5,2.5) | (3,3),(1,1) | f + (2.5,3.5),(2.5,2.5) | (-2,2),(-8,-10) | f + (2.5,3.5),(2.5,2.5) | (2.5,3.5),(2.5,2.5) | f + (2.5,3.5),(2.5,2.5) | (3,3),(3,3) | f + (3,3),(3,3) | (2,2),(0,0) | f + (3,3),(3,3) | (3,3),(1,1) | f + (3,3),(3,3) | (-2,2),(-8,-10) | f + (3,3),(3,3) | (2.5,3.5),(2.5,2.5) | f + (3,3),(3,3) | (3,3),(3,3) | t +(25 rows) + +-- Above box +SELECT b1.f1, b2.f1, b1.f1 >^ b2.f1 FROM BOX_TBL b1, BOX_TBL b2; + f1 | f1 | ?column? +---------------------+---------------------+---------- + (2,2),(0,0) | (2,2),(0,0) | f + (2,2),(0,0) | (3,3),(1,1) | f + (2,2),(0,0) | (-2,2),(-8,-10) | f + (2,2),(0,0) | (2.5,3.5),(2.5,2.5) | f + (2,2),(0,0) | (3,3),(3,3) | f + (3,3),(1,1) | (2,2),(0,0) | f + (3,3),(1,1) | (3,3),(1,1) | f + (3,3),(1,1) | (-2,2),(-8,-10) | f + (3,3),(1,1) | (2.5,3.5),(2.5,2.5) | f + (3,3),(1,1) | (3,3),(3,3) | f + (-2,2),(-8,-10) | (2,2),(0,0) | f + (-2,2),(-8,-10) | (3,3),(1,1) | f + (-2,2),(-8,-10) | (-2,2),(-8,-10) | f + (-2,2),(-8,-10) | (2.5,3.5),(2.5,2.5) | f + (-2,2),(-8,-10) | (3,3),(3,3) | f + (2.5,3.5),(2.5,2.5) | (2,2),(0,0) | t + (2.5,3.5),(2.5,2.5) | (3,3),(1,1) | f + (2.5,3.5),(2.5,2.5) | (-2,2),(-8,-10) | t + (2.5,3.5),(2.5,2.5) | (2.5,3.5),(2.5,2.5) | f + (2.5,3.5),(2.5,2.5) | (3,3),(3,3) | f + (3,3),(3,3) | (2,2),(0,0) | t + (3,3),(3,3) | (3,3),(1,1) | t + (3,3),(3,3) | (-2,2),(-8,-10) | t + (3,3),(3,3) | (2.5,3.5),(2.5,2.5) | f + (3,3),(3,3) | (3,3),(3,3) | t +(25 rows) + +-- Intersection point with box +SELECT b1.f1, b2.f1, b1.f1 # b2.f1 FROM BOX_TBL b1, BOX_TBL b2; + f1 | f1 | ?column? +---------------------+---------------------+--------------------- + (2,2),(0,0) | (2,2),(0,0) | (2,2),(0,0) + (2,2),(0,0) | (3,3),(1,1) | (2,2),(1,1) + (2,2),(0,0) | (-2,2),(-8,-10) | + (2,2),(0,0) | (2.5,3.5),(2.5,2.5) | + (2,2),(0,0) | (3,3),(3,3) | + (3,3),(1,1) | (2,2),(0,0) | (2,2),(1,1) + (3,3),(1,1) | (3,3),(1,1) | (3,3),(1,1) + (3,3),(1,1) | (-2,2),(-8,-10) | + (3,3),(1,1) | (2.5,3.5),(2.5,2.5) | (2.5,3),(2.5,2.5) + (3,3),(1,1) | (3,3),(3,3) | (3,3),(3,3) + (-2,2),(-8,-10) | (2,2),(0,0) | + (-2,2),(-8,-10) | (3,3),(1,1) | + (-2,2),(-8,-10) | (-2,2),(-8,-10) | (-2,2),(-8,-10) + (-2,2),(-8,-10) | (2.5,3.5),(2.5,2.5) | + (-2,2),(-8,-10) | (3,3),(3,3) | + (2.5,3.5),(2.5,2.5) | (2,2),(0,0) | + (2.5,3.5),(2.5,2.5) | (3,3),(1,1) | (2.5,3),(2.5,2.5) + (2.5,3.5),(2.5,2.5) | (-2,2),(-8,-10) | + (2.5,3.5),(2.5,2.5) | (2.5,3.5),(2.5,2.5) | (2.5,3.5),(2.5,2.5) + (2.5,3.5),(2.5,2.5) | (3,3),(3,3) | + (3,3),(3,3) | (2,2),(0,0) | + (3,3),(3,3) | (3,3),(1,1) | (3,3),(3,3) + (3,3),(3,3) | (-2,2),(-8,-10) | + (3,3),(3,3) | (2.5,3.5),(2.5,2.5) | + (3,3),(3,3) | (3,3),(3,3) | (3,3),(3,3) +(25 rows) + +-- Diagonal +SELECT f1, diagonal(f1) FROM BOX_TBL; + f1 | diagonal +---------------------+----------------------- + (2,2),(0,0) | [(2,2),(0,0)] + (3,3),(1,1) | [(3,3),(1,1)] + (-2,2),(-8,-10) | [(-2,2),(-8,-10)] + (2.5,3.5),(2.5,2.5) | [(2.5,3.5),(2.5,2.5)] + (3,3),(3,3) | [(3,3),(3,3)] +(5 rows) + +-- Distance to box +SELECT b1.f1, b2.f1, b1.f1 <-> b2.f1 FROM BOX_TBL b1, BOX_TBL b2; + f1 | f1 | ?column? +---------------------+---------------------+--------------- + (2,2),(0,0) | (2,2),(0,0) | 0 + (2,2),(0,0) | (3,3),(1,1) | 1.41421356237 + (2,2),(0,0) | (-2,2),(-8,-10) | 7.81024967591 + (2,2),(0,0) | (2.5,3.5),(2.5,2.5) | 2.5 + (2,2),(0,0) | (3,3),(3,3) | 2.82842712475 + (3,3),(1,1) | (2,2),(0,0) | 1.41421356237 + (3,3),(1,1) | (3,3),(1,1) | 0 + (3,3),(1,1) | (-2,2),(-8,-10) | 9.21954445729 + (3,3),(1,1) | (2.5,3.5),(2.5,2.5) | 1.11803398875 + (3,3),(1,1) | (3,3),(3,3) | 1.41421356237 + (-2,2),(-8,-10) | (2,2),(0,0) | 7.81024967591 + (-2,2),(-8,-10) | (3,3),(1,1) | 9.21954445729 + (-2,2),(-8,-10) | (-2,2),(-8,-10) | 0 + (-2,2),(-8,-10) | (2.5,3.5),(2.5,2.5) | 10.2591422643 + (-2,2),(-8,-10) | (3,3),(3,3) | 10.6301458127 + (2.5,3.5),(2.5,2.5) | (2,2),(0,0) | 2.5 + (2.5,3.5),(2.5,2.5) | (3,3),(1,1) | 1.11803398875 + (2.5,3.5),(2.5,2.5) | (-2,2),(-8,-10) | 10.2591422643 + (2.5,3.5),(2.5,2.5) | (2.5,3.5),(2.5,2.5) | 0 + (2.5,3.5),(2.5,2.5) | (3,3),(3,3) | 0.5 + (3,3),(3,3) | (2,2),(0,0) | 2.82842712475 + (3,3),(3,3) | (3,3),(1,1) | 1.41421356237 + (3,3),(3,3) | (-2,2),(-8,-10) | 10.6301458127 + (3,3),(3,3) | (2.5,3.5),(2.5,2.5) | 0.5 + (3,3),(3,3) | (3,3),(3,3) | 0 +(25 rows) + +-- +-- Paths +-- +-- Points +SELECT f1, npoints(f1) FROM PATH_TBL; + f1 | npoints +---------------------------+--------- + [(1,2),(3,4)] | 2 + ((1,2),(3,4)) | 2 + [(0,0),(3,0),(4,5),(1,6)] | 4 + ((1,2),(3,4)) | 2 + ((1,2),(3,4)) | 2 + [(1,2),(3,4)] | 2 + ((10,20)) | 1 + [(11,12),(13,14)] | 2 + ((11,12),(13,14)) | 2 +(9 rows) + +-- Area +SELECT f1, area(f1) FROM PATH_TBL; + f1 | area +---------------------------+------ + [(1,2),(3,4)] | + ((1,2),(3,4)) | 0 + [(0,0),(3,0),(4,5),(1,6)] | + ((1,2),(3,4)) | 0 + ((1,2),(3,4)) | 0 + [(1,2),(3,4)] | + ((10,20)) | 0 + [(11,12),(13,14)] | + ((11,12),(13,14)) | 0 +(9 rows) + +-- Length +SELECT f1, @-@ f1 FROM PATH_TBL; + f1 | ?column? +---------------------------+--------------- + [(1,2),(3,4)] | 2.82842712475 + ((1,2),(3,4)) | 5.65685424949 + [(0,0),(3,0),(4,5),(1,6)] | 11.2612971738 + ((1,2),(3,4)) | 5.65685424949 + ((1,2),(3,4)) | 5.65685424949 + [(1,2),(3,4)] | 2.82842712475 + ((10,20)) | 0 + [(11,12),(13,14)] | 2.82842712475 + ((11,12),(13,14)) | 5.65685424949 +(9 rows) + +-- To polygon +SELECT f1, f1::polygon FROM PATH_TBL WHERE isclosed(f1); + f1 | f1 +-------------------+------------------- + ((1,2),(3,4)) | ((1,2),(3,4)) + ((1,2),(3,4)) | ((1,2),(3,4)) + ((1,2),(3,4)) | ((1,2),(3,4)) + ((10,20)) | ((10,20)) + ((11,12),(13,14)) | ((11,12),(13,14)) +(5 rows) + +-- Open path cannot be converted to polygon error +SELECT f1, f1::polygon FROM PATH_TBL WHERE isopen(f1); +ERROR: open path cannot be converted to polygon +-- Has points less than path +SELECT p1.f1, p2.f1 FROM PATH_TBL p1, PATH_TBL p2 WHERE p1.f1 < p2.f1; + f1 | f1 +-------------------+--------------------------- + [(1,2),(3,4)] | [(0,0),(3,0),(4,5),(1,6)] + ((1,2),(3,4)) | [(0,0),(3,0),(4,5),(1,6)] + ((1,2),(3,4)) | [(0,0),(3,0),(4,5),(1,6)] + ((1,2),(3,4)) | [(0,0),(3,0),(4,5),(1,6)] + [(1,2),(3,4)] | [(0,0),(3,0),(4,5),(1,6)] + ((10,20)) | [(1,2),(3,4)] + ((10,20)) | ((1,2),(3,4)) + ((10,20)) | [(0,0),(3,0),(4,5),(1,6)] + ((10,20)) | ((1,2),(3,4)) + ((10,20)) | ((1,2),(3,4)) + ((10,20)) | [(1,2),(3,4)] + ((10,20)) | [(11,12),(13,14)] + ((10,20)) | ((11,12),(13,14)) + [(11,12),(13,14)] | [(0,0),(3,0),(4,5),(1,6)] + ((11,12),(13,14)) | [(0,0),(3,0),(4,5),(1,6)] +(15 rows) + +-- Has points less than or equal to path +SELECT p1.f1, p2.f1 FROM PATH_TBL p1, PATH_TBL p2 WHERE p1.f1 <= p2.f1; + f1 | f1 +---------------------------+--------------------------- + [(1,2),(3,4)] | [(1,2),(3,4)] + [(1,2),(3,4)] | ((1,2),(3,4)) + [(1,2),(3,4)] | [(0,0),(3,0),(4,5),(1,6)] + [(1,2),(3,4)] | ((1,2),(3,4)) + [(1,2),(3,4)] | ((1,2),(3,4)) + [(1,2),(3,4)] | [(1,2),(3,4)] + [(1,2),(3,4)] | [(11,12),(13,14)] + [(1,2),(3,4)] | ((11,12),(13,14)) + ((1,2),(3,4)) | [(1,2),(3,4)] + ((1,2),(3,4)) | ((1,2),(3,4)) + ((1,2),(3,4)) | [(0,0),(3,0),(4,5),(1,6)] + ((1,2),(3,4)) | ((1,2),(3,4)) + ((1,2),(3,4)) | ((1,2),(3,4)) + ((1,2),(3,4)) | [(1,2),(3,4)] + ((1,2),(3,4)) | [(11,12),(13,14)] + ((1,2),(3,4)) | ((11,12),(13,14)) + [(0,0),(3,0),(4,5),(1,6)] | [(0,0),(3,0),(4,5),(1,6)] + ((1,2),(3,4)) | [(1,2),(3,4)] + ((1,2),(3,4)) | ((1,2),(3,4)) + ((1,2),(3,4)) | [(0,0),(3,0),(4,5),(1,6)] + ((1,2),(3,4)) | ((1,2),(3,4)) + ((1,2),(3,4)) | ((1,2),(3,4)) + ((1,2),(3,4)) | [(1,2),(3,4)] + ((1,2),(3,4)) | [(11,12),(13,14)] + ((1,2),(3,4)) | ((11,12),(13,14)) + ((1,2),(3,4)) | [(1,2),(3,4)] + ((1,2),(3,4)) | ((1,2),(3,4)) + ((1,2),(3,4)) | [(0,0),(3,0),(4,5),(1,6)] + ((1,2),(3,4)) | ((1,2),(3,4)) + ((1,2),(3,4)) | ((1,2),(3,4)) + ((1,2),(3,4)) | [(1,2),(3,4)] + ((1,2),(3,4)) | [(11,12),(13,14)] + ((1,2),(3,4)) | ((11,12),(13,14)) + [(1,2),(3,4)] | [(1,2),(3,4)] + [(1,2),(3,4)] | ((1,2),(3,4)) + [(1,2),(3,4)] | [(0,0),(3,0),(4,5),(1,6)] + [(1,2),(3,4)] | ((1,2),(3,4)) + [(1,2),(3,4)] | ((1,2),(3,4)) + [(1,2),(3,4)] | [(1,2),(3,4)] + [(1,2),(3,4)] | [(11,12),(13,14)] + [(1,2),(3,4)] | ((11,12),(13,14)) + ((10,20)) | [(1,2),(3,4)] + ((10,20)) | ((1,2),(3,4)) + ((10,20)) | [(0,0),(3,0),(4,5),(1,6)] + ((10,20)) | ((1,2),(3,4)) + ((10,20)) | ((1,2),(3,4)) + ((10,20)) | [(1,2),(3,4)] + ((10,20)) | ((10,20)) + ((10,20)) | [(11,12),(13,14)] + ((10,20)) | ((11,12),(13,14)) + [(11,12),(13,14)] | [(1,2),(3,4)] + [(11,12),(13,14)] | ((1,2),(3,4)) + [(11,12),(13,14)] | [(0,0),(3,0),(4,5),(1,6)] + [(11,12),(13,14)] | ((1,2),(3,4)) + [(11,12),(13,14)] | ((1,2),(3,4)) + [(11,12),(13,14)] | [(1,2),(3,4)] + [(11,12),(13,14)] | [(11,12),(13,14)] + [(11,12),(13,14)] | ((11,12),(13,14)) + ((11,12),(13,14)) | [(1,2),(3,4)] + ((11,12),(13,14)) | ((1,2),(3,4)) + ((11,12),(13,14)) | [(0,0),(3,0),(4,5),(1,6)] + ((11,12),(13,14)) | ((1,2),(3,4)) + ((11,12),(13,14)) | ((1,2),(3,4)) + ((11,12),(13,14)) | [(1,2),(3,4)] + ((11,12),(13,14)) | [(11,12),(13,14)] + ((11,12),(13,14)) | ((11,12),(13,14)) +(66 rows) + +-- Has points equal to path +SELECT p1.f1, p2.f1 FROM PATH_TBL p1, PATH_TBL p2 WHERE p1.f1 = p2.f1; + f1 | f1 +---------------------------+--------------------------- + [(1,2),(3,4)] | [(1,2),(3,4)] + [(1,2),(3,4)] | ((1,2),(3,4)) + [(1,2),(3,4)] | ((1,2),(3,4)) + [(1,2),(3,4)] | ((1,2),(3,4)) + [(1,2),(3,4)] | [(1,2),(3,4)] + [(1,2),(3,4)] | [(11,12),(13,14)] + [(1,2),(3,4)] | ((11,12),(13,14)) + ((1,2),(3,4)) | [(1,2),(3,4)] + ((1,2),(3,4)) | ((1,2),(3,4)) + ((1,2),(3,4)) | ((1,2),(3,4)) + ((1,2),(3,4)) | ((1,2),(3,4)) + ((1,2),(3,4)) | [(1,2),(3,4)] + ((1,2),(3,4)) | [(11,12),(13,14)] + ((1,2),(3,4)) | ((11,12),(13,14)) + [(0,0),(3,0),(4,5),(1,6)] | [(0,0),(3,0),(4,5),(1,6)] + ((1,2),(3,4)) | [(1,2),(3,4)] + ((1,2),(3,4)) | ((1,2),(3,4)) + ((1,2),(3,4)) | ((1,2),(3,4)) + ((1,2),(3,4)) | ((1,2),(3,4)) + ((1,2),(3,4)) | [(1,2),(3,4)] + ((1,2),(3,4)) | [(11,12),(13,14)] + ((1,2),(3,4)) | ((11,12),(13,14)) + ((1,2),(3,4)) | [(1,2),(3,4)] + ((1,2),(3,4)) | ((1,2),(3,4)) + ((1,2),(3,4)) | ((1,2),(3,4)) + ((1,2),(3,4)) | ((1,2),(3,4)) + ((1,2),(3,4)) | [(1,2),(3,4)] + ((1,2),(3,4)) | [(11,12),(13,14)] + ((1,2),(3,4)) | ((11,12),(13,14)) + [(1,2),(3,4)] | [(1,2),(3,4)] + [(1,2),(3,4)] | ((1,2),(3,4)) + [(1,2),(3,4)] | ((1,2),(3,4)) + [(1,2),(3,4)] | ((1,2),(3,4)) + [(1,2),(3,4)] | [(1,2),(3,4)] + [(1,2),(3,4)] | [(11,12),(13,14)] + [(1,2),(3,4)] | ((11,12),(13,14)) + ((10,20)) | ((10,20)) + [(11,12),(13,14)] | [(1,2),(3,4)] + [(11,12),(13,14)] | ((1,2),(3,4)) + [(11,12),(13,14)] | ((1,2),(3,4)) + [(11,12),(13,14)] | ((1,2),(3,4)) + [(11,12),(13,14)] | [(1,2),(3,4)] + [(11,12),(13,14)] | [(11,12),(13,14)] + [(11,12),(13,14)] | ((11,12),(13,14)) + ((11,12),(13,14)) | [(1,2),(3,4)] + ((11,12),(13,14)) | ((1,2),(3,4)) + ((11,12),(13,14)) | ((1,2),(3,4)) + ((11,12),(13,14)) | ((1,2),(3,4)) + ((11,12),(13,14)) | [(1,2),(3,4)] + ((11,12),(13,14)) | [(11,12),(13,14)] + ((11,12),(13,14)) | ((11,12),(13,14)) +(51 rows) + +-- Has points greater than or equal to path +SELECT p1.f1, p2.f1 FROM PATH_TBL p1, PATH_TBL p2 WHERE p1.f1 >= p2.f1; + f1 | f1 +---------------------------+--------------------------- + [(1,2),(3,4)] | [(1,2),(3,4)] + [(1,2),(3,4)] | ((1,2),(3,4)) + [(1,2),(3,4)] | ((1,2),(3,4)) + [(1,2),(3,4)] | ((1,2),(3,4)) + [(1,2),(3,4)] | [(1,2),(3,4)] + [(1,2),(3,4)] | ((10,20)) + [(1,2),(3,4)] | [(11,12),(13,14)] + [(1,2),(3,4)] | ((11,12),(13,14)) + ((1,2),(3,4)) | [(1,2),(3,4)] + ((1,2),(3,4)) | ((1,2),(3,4)) + ((1,2),(3,4)) | ((1,2),(3,4)) + ((1,2),(3,4)) | ((1,2),(3,4)) + ((1,2),(3,4)) | [(1,2),(3,4)] + ((1,2),(3,4)) | ((10,20)) + ((1,2),(3,4)) | [(11,12),(13,14)] + ((1,2),(3,4)) | ((11,12),(13,14)) + [(0,0),(3,0),(4,5),(1,6)] | [(1,2),(3,4)] + [(0,0),(3,0),(4,5),(1,6)] | ((1,2),(3,4)) + [(0,0),(3,0),(4,5),(1,6)] | [(0,0),(3,0),(4,5),(1,6)] + [(0,0),(3,0),(4,5),(1,6)] | ((1,2),(3,4)) + [(0,0),(3,0),(4,5),(1,6)] | ((1,2),(3,4)) + [(0,0),(3,0),(4,5),(1,6)] | [(1,2),(3,4)] + [(0,0),(3,0),(4,5),(1,6)] | ((10,20)) + [(0,0),(3,0),(4,5),(1,6)] | [(11,12),(13,14)] + [(0,0),(3,0),(4,5),(1,6)] | ((11,12),(13,14)) + ((1,2),(3,4)) | [(1,2),(3,4)] + ((1,2),(3,4)) | ((1,2),(3,4)) + ((1,2),(3,4)) | ((1,2),(3,4)) + ((1,2),(3,4)) | ((1,2),(3,4)) + ((1,2),(3,4)) | [(1,2),(3,4)] + ((1,2),(3,4)) | ((10,20)) + ((1,2),(3,4)) | [(11,12),(13,14)] + ((1,2),(3,4)) | ((11,12),(13,14)) + ((1,2),(3,4)) | [(1,2),(3,4)] + ((1,2),(3,4)) | ((1,2),(3,4)) + ((1,2),(3,4)) | ((1,2),(3,4)) + ((1,2),(3,4)) | ((1,2),(3,4)) + ((1,2),(3,4)) | [(1,2),(3,4)] + ((1,2),(3,4)) | ((10,20)) + ((1,2),(3,4)) | [(11,12),(13,14)] + ((1,2),(3,4)) | ((11,12),(13,14)) + [(1,2),(3,4)] | [(1,2),(3,4)] + [(1,2),(3,4)] | ((1,2),(3,4)) + [(1,2),(3,4)] | ((1,2),(3,4)) + [(1,2),(3,4)] | ((1,2),(3,4)) + [(1,2),(3,4)] | [(1,2),(3,4)] + [(1,2),(3,4)] | ((10,20)) + [(1,2),(3,4)] | [(11,12),(13,14)] + [(1,2),(3,4)] | ((11,12),(13,14)) + ((10,20)) | ((10,20)) + [(11,12),(13,14)] | [(1,2),(3,4)] + [(11,12),(13,14)] | ((1,2),(3,4)) + [(11,12),(13,14)] | ((1,2),(3,4)) + [(11,12),(13,14)] | ((1,2),(3,4)) + [(11,12),(13,14)] | [(1,2),(3,4)] + [(11,12),(13,14)] | ((10,20)) + [(11,12),(13,14)] | [(11,12),(13,14)] + [(11,12),(13,14)] | ((11,12),(13,14)) + ((11,12),(13,14)) | [(1,2),(3,4)] + ((11,12),(13,14)) | ((1,2),(3,4)) + ((11,12),(13,14)) | ((1,2),(3,4)) + ((11,12),(13,14)) | ((1,2),(3,4)) + ((11,12),(13,14)) | [(1,2),(3,4)] + ((11,12),(13,14)) | ((10,20)) + ((11,12),(13,14)) | [(11,12),(13,14)] + ((11,12),(13,14)) | ((11,12),(13,14)) +(66 rows) + +-- Has points greater than path +SELECT p1.f1, p2.f1 FROM PATH_TBL p1, PATH_TBL p2 WHERE p1.f1 > p2.f1; + f1 | f1 +---------------------------+------------------- + [(1,2),(3,4)] | ((10,20)) + ((1,2),(3,4)) | ((10,20)) + [(0,0),(3,0),(4,5),(1,6)] | [(1,2),(3,4)] + [(0,0),(3,0),(4,5),(1,6)] | ((1,2),(3,4)) + [(0,0),(3,0),(4,5),(1,6)] | ((1,2),(3,4)) + [(0,0),(3,0),(4,5),(1,6)] | ((1,2),(3,4)) + [(0,0),(3,0),(4,5),(1,6)] | [(1,2),(3,4)] + [(0,0),(3,0),(4,5),(1,6)] | ((10,20)) + [(0,0),(3,0),(4,5),(1,6)] | [(11,12),(13,14)] + [(0,0),(3,0),(4,5),(1,6)] | ((11,12),(13,14)) + ((1,2),(3,4)) | ((10,20)) + ((1,2),(3,4)) | ((10,20)) + [(1,2),(3,4)] | ((10,20)) + [(11,12),(13,14)] | ((10,20)) + ((11,12),(13,14)) | ((10,20)) +(15 rows) + +-- Add path +SELECT p1.f1, p2.f1, p1.f1 + p2.f1 FROM PATH_TBL p1, PATH_TBL p2; + f1 | f1 | ?column? +---------------------------+---------------------------+--------------------------------------------------- + [(1,2),(3,4)] | [(1,2),(3,4)] | [(1,2),(3,4),(1,2),(3,4)] + [(1,2),(3,4)] | ((1,2),(3,4)) | + [(1,2),(3,4)] | [(0,0),(3,0),(4,5),(1,6)] | [(1,2),(3,4),(0,0),(3,0),(4,5),(1,6)] + [(1,2),(3,4)] | ((1,2),(3,4)) | + [(1,2),(3,4)] | ((1,2),(3,4)) | + [(1,2),(3,4)] | [(1,2),(3,4)] | [(1,2),(3,4),(1,2),(3,4)] + [(1,2),(3,4)] | ((10,20)) | + [(1,2),(3,4)] | [(11,12),(13,14)] | [(1,2),(3,4),(11,12),(13,14)] + [(1,2),(3,4)] | ((11,12),(13,14)) | + ((1,2),(3,4)) | [(1,2),(3,4)] | + ((1,2),(3,4)) | ((1,2),(3,4)) | + ((1,2),(3,4)) | [(0,0),(3,0),(4,5),(1,6)] | + ((1,2),(3,4)) | ((1,2),(3,4)) | + ((1,2),(3,4)) | ((1,2),(3,4)) | + ((1,2),(3,4)) | [(1,2),(3,4)] | + ((1,2),(3,4)) | ((10,20)) | + ((1,2),(3,4)) | [(11,12),(13,14)] | + ((1,2),(3,4)) | ((11,12),(13,14)) | + [(0,0),(3,0),(4,5),(1,6)] | [(1,2),(3,4)] | [(0,0),(3,0),(4,5),(1,6),(1,2),(3,4)] + [(0,0),(3,0),(4,5),(1,6)] | ((1,2),(3,4)) | + [(0,0),(3,0),(4,5),(1,6)] | [(0,0),(3,0),(4,5),(1,6)] | [(0,0),(3,0),(4,5),(1,6),(0,0),(3,0),(4,5),(1,6)] + [(0,0),(3,0),(4,5),(1,6)] | ((1,2),(3,4)) | + [(0,0),(3,0),(4,5),(1,6)] | ((1,2),(3,4)) | + [(0,0),(3,0),(4,5),(1,6)] | [(1,2),(3,4)] | [(0,0),(3,0),(4,5),(1,6),(1,2),(3,4)] + [(0,0),(3,0),(4,5),(1,6)] | ((10,20)) | + [(0,0),(3,0),(4,5),(1,6)] | [(11,12),(13,14)] | [(0,0),(3,0),(4,5),(1,6),(11,12),(13,14)] + [(0,0),(3,0),(4,5),(1,6)] | ((11,12),(13,14)) | + ((1,2),(3,4)) | [(1,2),(3,4)] | + ((1,2),(3,4)) | ((1,2),(3,4)) | + ((1,2),(3,4)) | [(0,0),(3,0),(4,5),(1,6)] | + ((1,2),(3,4)) | ((1,2),(3,4)) | + ((1,2),(3,4)) | ((1,2),(3,4)) | + ((1,2),(3,4)) | [(1,2),(3,4)] | + ((1,2),(3,4)) | ((10,20)) | + ((1,2),(3,4)) | [(11,12),(13,14)] | + ((1,2),(3,4)) | ((11,12),(13,14)) | + ((1,2),(3,4)) | [(1,2),(3,4)] | + ((1,2),(3,4)) | ((1,2),(3,4)) | + ((1,2),(3,4)) | [(0,0),(3,0),(4,5),(1,6)] | + ((1,2),(3,4)) | ((1,2),(3,4)) | + ((1,2),(3,4)) | ((1,2),(3,4)) | + ((1,2),(3,4)) | [(1,2),(3,4)] | + ((1,2),(3,4)) | ((10,20)) | + ((1,2),(3,4)) | [(11,12),(13,14)] | + ((1,2),(3,4)) | ((11,12),(13,14)) | + [(1,2),(3,4)] | [(1,2),(3,4)] | [(1,2),(3,4),(1,2),(3,4)] + [(1,2),(3,4)] | ((1,2),(3,4)) | + [(1,2),(3,4)] | [(0,0),(3,0),(4,5),(1,6)] | [(1,2),(3,4),(0,0),(3,0),(4,5),(1,6)] + [(1,2),(3,4)] | ((1,2),(3,4)) | + [(1,2),(3,4)] | ((1,2),(3,4)) | + [(1,2),(3,4)] | [(1,2),(3,4)] | [(1,2),(3,4),(1,2),(3,4)] + [(1,2),(3,4)] | ((10,20)) | + [(1,2),(3,4)] | [(11,12),(13,14)] | [(1,2),(3,4),(11,12),(13,14)] + [(1,2),(3,4)] | ((11,12),(13,14)) | + ((10,20)) | [(1,2),(3,4)] | + ((10,20)) | ((1,2),(3,4)) | + ((10,20)) | [(0,0),(3,0),(4,5),(1,6)] | + ((10,20)) | ((1,2),(3,4)) | + ((10,20)) | ((1,2),(3,4)) | + ((10,20)) | [(1,2),(3,4)] | + ((10,20)) | ((10,20)) | + ((10,20)) | [(11,12),(13,14)] | + ((10,20)) | ((11,12),(13,14)) | + [(11,12),(13,14)] | [(1,2),(3,4)] | [(11,12),(13,14),(1,2),(3,4)] + [(11,12),(13,14)] | ((1,2),(3,4)) | + [(11,12),(13,14)] | [(0,0),(3,0),(4,5),(1,6)] | [(11,12),(13,14),(0,0),(3,0),(4,5),(1,6)] + [(11,12),(13,14)] | ((1,2),(3,4)) | + [(11,12),(13,14)] | ((1,2),(3,4)) | + [(11,12),(13,14)] | [(1,2),(3,4)] | [(11,12),(13,14),(1,2),(3,4)] + [(11,12),(13,14)] | ((10,20)) | + [(11,12),(13,14)] | [(11,12),(13,14)] | [(11,12),(13,14),(11,12),(13,14)] + [(11,12),(13,14)] | ((11,12),(13,14)) | + ((11,12),(13,14)) | [(1,2),(3,4)] | + ((11,12),(13,14)) | ((1,2),(3,4)) | + ((11,12),(13,14)) | [(0,0),(3,0),(4,5),(1,6)] | + ((11,12),(13,14)) | ((1,2),(3,4)) | + ((11,12),(13,14)) | ((1,2),(3,4)) | + ((11,12),(13,14)) | [(1,2),(3,4)] | + ((11,12),(13,14)) | ((10,20)) | + ((11,12),(13,14)) | [(11,12),(13,14)] | + ((11,12),(13,14)) | ((11,12),(13,14)) | +(81 rows) + +-- Add point +SELECT p.f1, p1.f1, p.f1 + p1.f1 FROM PATH_TBL p, POINT_TBL p1; + f1 | f1 | ?column? +---------------------------+-------------------+--------------------------------------------------------------------------- + [(1,2),(3,4)] | (0,0) | [(1,2),(3,4)] + ((1,2),(3,4)) | (0,0) | ((1,2),(3,4)) + [(0,0),(3,0),(4,5),(1,6)] | (0,0) | [(0,0),(3,0),(4,5),(1,6)] + ((1,2),(3,4)) | (0,0) | ((1,2),(3,4)) + ((1,2),(3,4)) | (0,0) | ((1,2),(3,4)) + [(1,2),(3,4)] | (0,0) | [(1,2),(3,4)] + ((10,20)) | (0,0) | ((10,20)) + [(11,12),(13,14)] | (0,0) | [(11,12),(13,14)] + ((11,12),(13,14)) | (0,0) | ((11,12),(13,14)) + [(1,2),(3,4)] | (-10,0) | [(-9,2),(-7,4)] + ((1,2),(3,4)) | (-10,0) | ((-9,2),(-7,4)) + [(0,0),(3,0),(4,5),(1,6)] | (-10,0) | [(-10,0),(-7,0),(-6,5),(-9,6)] + ((1,2),(3,4)) | (-10,0) | ((-9,2),(-7,4)) + ((1,2),(3,4)) | (-10,0) | ((-9,2),(-7,4)) + [(1,2),(3,4)] | (-10,0) | [(-9,2),(-7,4)] + ((10,20)) | (-10,0) | ((0,20)) + [(11,12),(13,14)] | (-10,0) | [(1,12),(3,14)] + ((11,12),(13,14)) | (-10,0) | ((1,12),(3,14)) + [(1,2),(3,4)] | (-3,4) | [(-2,6),(0,8)] + ((1,2),(3,4)) | (-3,4) | ((-2,6),(0,8)) + [(0,0),(3,0),(4,5),(1,6)] | (-3,4) | [(-3,4),(0,4),(1,9),(-2,10)] + ((1,2),(3,4)) | (-3,4) | ((-2,6),(0,8)) + ((1,2),(3,4)) | (-3,4) | ((-2,6),(0,8)) + [(1,2),(3,4)] | (-3,4) | [(-2,6),(0,8)] + ((10,20)) | (-3,4) | ((7,24)) + [(11,12),(13,14)] | (-3,4) | [(8,16),(10,18)] + ((11,12),(13,14)) | (-3,4) | ((8,16),(10,18)) + [(1,2),(3,4)] | (5.1,34.5) | [(6.1,36.5),(8.1,38.5)] + ((1,2),(3,4)) | (5.1,34.5) | ((6.1,36.5),(8.1,38.5)) + [(0,0),(3,0),(4,5),(1,6)] | (5.1,34.5) | [(5.1,34.5),(8.1,34.5),(9.1,39.5),(6.1,40.5)] + ((1,2),(3,4)) | (5.1,34.5) | ((6.1,36.5),(8.1,38.5)) + ((1,2),(3,4)) | (5.1,34.5) | ((6.1,36.5),(8.1,38.5)) + [(1,2),(3,4)] | (5.1,34.5) | [(6.1,36.5),(8.1,38.5)] + ((10,20)) | (5.1,34.5) | ((15.1,54.5)) + [(11,12),(13,14)] | (5.1,34.5) | [(16.1,46.5),(18.1,48.5)] + ((11,12),(13,14)) | (5.1,34.5) | ((16.1,46.5),(18.1,48.5)) + [(1,2),(3,4)] | (-5,-12) | [(-4,-10),(-2,-8)] + ((1,2),(3,4)) | (-5,-12) | ((-4,-10),(-2,-8)) + [(0,0),(3,0),(4,5),(1,6)] | (-5,-12) | [(-5,-12),(-2,-12),(-1,-7),(-4,-6)] + ((1,2),(3,4)) | (-5,-12) | ((-4,-10),(-2,-8)) + ((1,2),(3,4)) | (-5,-12) | ((-4,-10),(-2,-8)) + [(1,2),(3,4)] | (-5,-12) | [(-4,-10),(-2,-8)] + ((10,20)) | (-5,-12) | ((5,8)) + [(11,12),(13,14)] | (-5,-12) | [(6,0),(8,2)] + ((11,12),(13,14)) | (-5,-12) | ((6,0),(8,2)) + [(1,2),(3,4)] | (1e-300,-1e-300) | [(1,2),(3,4)] + ((1,2),(3,4)) | (1e-300,-1e-300) | ((1,2),(3,4)) + [(0,0),(3,0),(4,5),(1,6)] | (1e-300,-1e-300) | [(1e-300,-1e-300),(3,-1e-300),(4,5),(1,6)] + ((1,2),(3,4)) | (1e-300,-1e-300) | ((1,2),(3,4)) + ((1,2),(3,4)) | (1e-300,-1e-300) | ((1,2),(3,4)) + [(1,2),(3,4)] | (1e-300,-1e-300) | [(1,2),(3,4)] + ((10,20)) | (1e-300,-1e-300) | ((10,20)) + [(11,12),(13,14)] | (1e-300,-1e-300) | [(11,12),(13,14)] + ((11,12),(13,14)) | (1e-300,-1e-300) | ((11,12),(13,14)) + [(1,2),(3,4)] | (1e+300,Infinity) | [(1e+300,Infinity),(1e+300,Infinity)] + ((1,2),(3,4)) | (1e+300,Infinity) | ((1e+300,Infinity),(1e+300,Infinity)) + [(0,0),(3,0),(4,5),(1,6)] | (1e+300,Infinity) | [(1e+300,Infinity),(1e+300,Infinity),(1e+300,Infinity),(1e+300,Infinity)] + ((1,2),(3,4)) | (1e+300,Infinity) | ((1e+300,Infinity),(1e+300,Infinity)) + ((1,2),(3,4)) | (1e+300,Infinity) | ((1e+300,Infinity),(1e+300,Infinity)) + [(1,2),(3,4)] | (1e+300,Infinity) | [(1e+300,Infinity),(1e+300,Infinity)] + ((10,20)) | (1e+300,Infinity) | ((1e+300,Infinity)) + [(11,12),(13,14)] | (1e+300,Infinity) | [(1e+300,Infinity),(1e+300,Infinity)] + ((11,12),(13,14)) | (1e+300,Infinity) | ((1e+300,Infinity),(1e+300,Infinity)) + [(1,2),(3,4)] | (Infinity,1e+300) | [(Infinity,1e+300),(Infinity,1e+300)] + ((1,2),(3,4)) | (Infinity,1e+300) | ((Infinity,1e+300),(Infinity,1e+300)) + [(0,0),(3,0),(4,5),(1,6)] | (Infinity,1e+300) | [(Infinity,1e+300),(Infinity,1e+300),(Infinity,1e+300),(Infinity,1e+300)] + ((1,2),(3,4)) | (Infinity,1e+300) | ((Infinity,1e+300),(Infinity,1e+300)) + ((1,2),(3,4)) | (Infinity,1e+300) | ((Infinity,1e+300),(Infinity,1e+300)) + [(1,2),(3,4)] | (Infinity,1e+300) | [(Infinity,1e+300),(Infinity,1e+300)] + ((10,20)) | (Infinity,1e+300) | ((Infinity,1e+300)) + [(11,12),(13,14)] | (Infinity,1e+300) | [(Infinity,1e+300),(Infinity,1e+300)] + ((11,12),(13,14)) | (Infinity,1e+300) | ((Infinity,1e+300),(Infinity,1e+300)) + [(1,2),(3,4)] | (NaN,NaN) | [(NaN,NaN),(NaN,NaN)] + ((1,2),(3,4)) | (NaN,NaN) | ((NaN,NaN),(NaN,NaN)) + [(0,0),(3,0),(4,5),(1,6)] | (NaN,NaN) | [(NaN,NaN),(NaN,NaN),(NaN,NaN),(NaN,NaN)] + ((1,2),(3,4)) | (NaN,NaN) | ((NaN,NaN),(NaN,NaN)) + ((1,2),(3,4)) | (NaN,NaN) | ((NaN,NaN),(NaN,NaN)) + [(1,2),(3,4)] | (NaN,NaN) | [(NaN,NaN),(NaN,NaN)] + ((10,20)) | (NaN,NaN) | ((NaN,NaN)) + [(11,12),(13,14)] | (NaN,NaN) | [(NaN,NaN),(NaN,NaN)] + ((11,12),(13,14)) | (NaN,NaN) | ((NaN,NaN),(NaN,NaN)) + [(1,2),(3,4)] | (10,10) | [(11,12),(13,14)] + ((1,2),(3,4)) | (10,10) | ((11,12),(13,14)) + [(0,0),(3,0),(4,5),(1,6)] | (10,10) | [(10,10),(13,10),(14,15),(11,16)] + ((1,2),(3,4)) | (10,10) | ((11,12),(13,14)) + ((1,2),(3,4)) | (10,10) | ((11,12),(13,14)) + [(1,2),(3,4)] | (10,10) | [(11,12),(13,14)] + ((10,20)) | (10,10) | ((20,30)) + [(11,12),(13,14)] | (10,10) | [(21,22),(23,24)] + ((11,12),(13,14)) | (10,10) | ((21,22),(23,24)) +(90 rows) + +-- Subtract point +SELECT p.f1, p1.f1, p.f1 - p1.f1 FROM PATH_TBL p, POINT_TBL p1; + f1 | f1 | ?column? +---------------------------+-------------------+----------------------------------------------------------------------------------- + [(1,2),(3,4)] | (0,0) | [(1,2),(3,4)] + ((1,2),(3,4)) | (0,0) | ((1,2),(3,4)) + [(0,0),(3,0),(4,5),(1,6)] | (0,0) | [(0,0),(3,0),(4,5),(1,6)] + ((1,2),(3,4)) | (0,0) | ((1,2),(3,4)) + ((1,2),(3,4)) | (0,0) | ((1,2),(3,4)) + [(1,2),(3,4)] | (0,0) | [(1,2),(3,4)] + ((10,20)) | (0,0) | ((10,20)) + [(11,12),(13,14)] | (0,0) | [(11,12),(13,14)] + ((11,12),(13,14)) | (0,0) | ((11,12),(13,14)) + [(1,2),(3,4)] | (-10,0) | [(11,2),(13,4)] + ((1,2),(3,4)) | (-10,0) | ((11,2),(13,4)) + [(0,0),(3,0),(4,5),(1,6)] | (-10,0) | [(10,0),(13,0),(14,5),(11,6)] + ((1,2),(3,4)) | (-10,0) | ((11,2),(13,4)) + ((1,2),(3,4)) | (-10,0) | ((11,2),(13,4)) + [(1,2),(3,4)] | (-10,0) | [(11,2),(13,4)] + ((10,20)) | (-10,0) | ((20,20)) + [(11,12),(13,14)] | (-10,0) | [(21,12),(23,14)] + ((11,12),(13,14)) | (-10,0) | ((21,12),(23,14)) + [(1,2),(3,4)] | (-3,4) | [(4,-2),(6,0)] + ((1,2),(3,4)) | (-3,4) | ((4,-2),(6,0)) + [(0,0),(3,0),(4,5),(1,6)] | (-3,4) | [(3,-4),(6,-4),(7,1),(4,2)] + ((1,2),(3,4)) | (-3,4) | ((4,-2),(6,0)) + ((1,2),(3,4)) | (-3,4) | ((4,-2),(6,0)) + [(1,2),(3,4)] | (-3,4) | [(4,-2),(6,0)] + ((10,20)) | (-3,4) | ((13,16)) + [(11,12),(13,14)] | (-3,4) | [(14,8),(16,10)] + ((11,12),(13,14)) | (-3,4) | ((14,8),(16,10)) + [(1,2),(3,4)] | (5.1,34.5) | [(-4.1,-32.5),(-2.1,-30.5)] + ((1,2),(3,4)) | (5.1,34.5) | ((-4.1,-32.5),(-2.1,-30.5)) + [(0,0),(3,0),(4,5),(1,6)] | (5.1,34.5) | [(-5.1,-34.5),(-2.1,-34.5),(-1.1,-29.5),(-4.1,-28.5)] + ((1,2),(3,4)) | (5.1,34.5) | ((-4.1,-32.5),(-2.1,-30.5)) + ((1,2),(3,4)) | (5.1,34.5) | ((-4.1,-32.5),(-2.1,-30.5)) + [(1,2),(3,4)] | (5.1,34.5) | [(-4.1,-32.5),(-2.1,-30.5)] + ((10,20)) | (5.1,34.5) | ((4.9,-14.5)) + [(11,12),(13,14)] | (5.1,34.5) | [(5.9,-22.5),(7.9,-20.5)] + ((11,12),(13,14)) | (5.1,34.5) | ((5.9,-22.5),(7.9,-20.5)) + [(1,2),(3,4)] | (-5,-12) | [(6,14),(8,16)] + ((1,2),(3,4)) | (-5,-12) | ((6,14),(8,16)) + [(0,0),(3,0),(4,5),(1,6)] | (-5,-12) | [(5,12),(8,12),(9,17),(6,18)] + ((1,2),(3,4)) | (-5,-12) | ((6,14),(8,16)) + ((1,2),(3,4)) | (-5,-12) | ((6,14),(8,16)) + [(1,2),(3,4)] | (-5,-12) | [(6,14),(8,16)] + ((10,20)) | (-5,-12) | ((15,32)) + [(11,12),(13,14)] | (-5,-12) | [(16,24),(18,26)] + ((11,12),(13,14)) | (-5,-12) | ((16,24),(18,26)) + [(1,2),(3,4)] | (1e-300,-1e-300) | [(1,2),(3,4)] + ((1,2),(3,4)) | (1e-300,-1e-300) | ((1,2),(3,4)) + [(0,0),(3,0),(4,5),(1,6)] | (1e-300,-1e-300) | [(-1e-300,1e-300),(3,1e-300),(4,5),(1,6)] + ((1,2),(3,4)) | (1e-300,-1e-300) | ((1,2),(3,4)) + ((1,2),(3,4)) | (1e-300,-1e-300) | ((1,2),(3,4)) + [(1,2),(3,4)] | (1e-300,-1e-300) | [(1,2),(3,4)] + ((10,20)) | (1e-300,-1e-300) | ((10,20)) + [(11,12),(13,14)] | (1e-300,-1e-300) | [(11,12),(13,14)] + ((11,12),(13,14)) | (1e-300,-1e-300) | ((11,12),(13,14)) + [(1,2),(3,4)] | (1e+300,Infinity) | [(-1e+300,-Infinity),(-1e+300,-Infinity)] + ((1,2),(3,4)) | (1e+300,Infinity) | ((-1e+300,-Infinity),(-1e+300,-Infinity)) + [(0,0),(3,0),(4,5),(1,6)] | (1e+300,Infinity) | [(-1e+300,-Infinity),(-1e+300,-Infinity),(-1e+300,-Infinity),(-1e+300,-Infinity)] + ((1,2),(3,4)) | (1e+300,Infinity) | ((-1e+300,-Infinity),(-1e+300,-Infinity)) + ((1,2),(3,4)) | (1e+300,Infinity) | ((-1e+300,-Infinity),(-1e+300,-Infinity)) + [(1,2),(3,4)] | (1e+300,Infinity) | [(-1e+300,-Infinity),(-1e+300,-Infinity)] + ((10,20)) | (1e+300,Infinity) | ((-1e+300,-Infinity)) + [(11,12),(13,14)] | (1e+300,Infinity) | [(-1e+300,-Infinity),(-1e+300,-Infinity)] + ((11,12),(13,14)) | (1e+300,Infinity) | ((-1e+300,-Infinity),(-1e+300,-Infinity)) + [(1,2),(3,4)] | (Infinity,1e+300) | [(-Infinity,-1e+300),(-Infinity,-1e+300)] + ((1,2),(3,4)) | (Infinity,1e+300) | ((-Infinity,-1e+300),(-Infinity,-1e+300)) + [(0,0),(3,0),(4,5),(1,6)] | (Infinity,1e+300) | [(-Infinity,-1e+300),(-Infinity,-1e+300),(-Infinity,-1e+300),(-Infinity,-1e+300)] + ((1,2),(3,4)) | (Infinity,1e+300) | ((-Infinity,-1e+300),(-Infinity,-1e+300)) + ((1,2),(3,4)) | (Infinity,1e+300) | ((-Infinity,-1e+300),(-Infinity,-1e+300)) + [(1,2),(3,4)] | (Infinity,1e+300) | [(-Infinity,-1e+300),(-Infinity,-1e+300)] + ((10,20)) | (Infinity,1e+300) | ((-Infinity,-1e+300)) + [(11,12),(13,14)] | (Infinity,1e+300) | [(-Infinity,-1e+300),(-Infinity,-1e+300)] + ((11,12),(13,14)) | (Infinity,1e+300) | ((-Infinity,-1e+300),(-Infinity,-1e+300)) + [(1,2),(3,4)] | (NaN,NaN) | [(NaN,NaN),(NaN,NaN)] + ((1,2),(3,4)) | (NaN,NaN) | ((NaN,NaN),(NaN,NaN)) + [(0,0),(3,0),(4,5),(1,6)] | (NaN,NaN) | [(NaN,NaN),(NaN,NaN),(NaN,NaN),(NaN,NaN)] + ((1,2),(3,4)) | (NaN,NaN) | ((NaN,NaN),(NaN,NaN)) + ((1,2),(3,4)) | (NaN,NaN) | ((NaN,NaN),(NaN,NaN)) + [(1,2),(3,4)] | (NaN,NaN) | [(NaN,NaN),(NaN,NaN)] + ((10,20)) | (NaN,NaN) | ((NaN,NaN)) + [(11,12),(13,14)] | (NaN,NaN) | [(NaN,NaN),(NaN,NaN)] + ((11,12),(13,14)) | (NaN,NaN) | ((NaN,NaN),(NaN,NaN)) + [(1,2),(3,4)] | (10,10) | [(-9,-8),(-7,-6)] + ((1,2),(3,4)) | (10,10) | ((-9,-8),(-7,-6)) + [(0,0),(3,0),(4,5),(1,6)] | (10,10) | [(-10,-10),(-7,-10),(-6,-5),(-9,-4)] + ((1,2),(3,4)) | (10,10) | ((-9,-8),(-7,-6)) + ((1,2),(3,4)) | (10,10) | ((-9,-8),(-7,-6)) + [(1,2),(3,4)] | (10,10) | [(-9,-8),(-7,-6)] + ((10,20)) | (10,10) | ((0,10)) + [(11,12),(13,14)] | (10,10) | [(1,2),(3,4)] + ((11,12),(13,14)) | (10,10) | ((1,2),(3,4)) +(90 rows) + +-- Multiply with point +SELECT p.f1, p1.f1, p.f1 * p1.f1 FROM PATH_TBL p, POINT_TBL p1; + f1 | f1 | ?column? +---------------------------+-------------------+---------------------------------------------------------------------- + [(1,2),(3,4)] | (0,0) | [(0,0),(0,0)] + ((1,2),(3,4)) | (0,0) | ((0,0),(0,0)) + [(0,0),(3,0),(4,5),(1,6)] | (0,0) | [(0,0),(0,0),(0,0),(0,0)] + ((1,2),(3,4)) | (0,0) | ((0,0),(0,0)) + ((1,2),(3,4)) | (0,0) | ((0,0),(0,0)) + [(1,2),(3,4)] | (0,0) | [(0,0),(0,0)] + ((10,20)) | (0,0) | ((0,0)) + [(11,12),(13,14)] | (0,0) | [(0,0),(0,0)] + ((11,12),(13,14)) | (0,0) | ((0,0),(0,0)) + [(1,2),(3,4)] | (-10,0) | [(-10,-20),(-30,-40)] + ((1,2),(3,4)) | (-10,0) | ((-10,-20),(-30,-40)) + [(0,0),(3,0),(4,5),(1,6)] | (-10,0) | [(-0,0),(-30,0),(-40,-50),(-10,-60)] + ((1,2),(3,4)) | (-10,0) | ((-10,-20),(-30,-40)) + ((1,2),(3,4)) | (-10,0) | ((-10,-20),(-30,-40)) + [(1,2),(3,4)] | (-10,0) | [(-10,-20),(-30,-40)] + ((10,20)) | (-10,0) | ((-100,-200)) + [(11,12),(13,14)] | (-10,0) | [(-110,-120),(-130,-140)] + ((11,12),(13,14)) | (-10,0) | ((-110,-120),(-130,-140)) + [(1,2),(3,4)] | (-3,4) | [(-11,-2),(-25,0)] + ((1,2),(3,4)) | (-3,4) | ((-11,-2),(-25,0)) + [(0,0),(3,0),(4,5),(1,6)] | (-3,4) | [(-0,0),(-9,12),(-32,1),(-27,-14)] + ((1,2),(3,4)) | (-3,4) | ((-11,-2),(-25,0)) + ((1,2),(3,4)) | (-3,4) | ((-11,-2),(-25,0)) + [(1,2),(3,4)] | (-3,4) | [(-11,-2),(-25,0)] + ((10,20)) | (-3,4) | ((-110,-20)) + [(11,12),(13,14)] | (-3,4) | [(-81,8),(-95,10)] + ((11,12),(13,14)) | (-3,4) | ((-81,8),(-95,10)) + [(1,2),(3,4)] | (5.1,34.5) | [(-63.9,44.7),(-122.7,123.9)] + ((1,2),(3,4)) | (5.1,34.5) | ((-63.9,44.7),(-122.7,123.9)) + [(0,0),(3,0),(4,5),(1,6)] | (5.1,34.5) | [(0,0),(15.3,103.5),(-152.1,163.5),(-201.9,65.1)] + ((1,2),(3,4)) | (5.1,34.5) | ((-63.9,44.7),(-122.7,123.9)) + ((1,2),(3,4)) | (5.1,34.5) | ((-63.9,44.7),(-122.7,123.9)) + [(1,2),(3,4)] | (5.1,34.5) | [(-63.9,44.7),(-122.7,123.9)] + ((10,20)) | (5.1,34.5) | ((-639,447)) + [(11,12),(13,14)] | (5.1,34.5) | [(-357.9,440.7),(-416.7,519.9)] + ((11,12),(13,14)) | (5.1,34.5) | ((-357.9,440.7),(-416.7,519.9)) + [(1,2),(3,4)] | (-5,-12) | [(19,-22),(33,-56)] + ((1,2),(3,4)) | (-5,-12) | ((19,-22),(33,-56)) + [(0,0),(3,0),(4,5),(1,6)] | (-5,-12) | [(0,-0),(-15,-36),(40,-73),(67,-42)] + ((1,2),(3,4)) | (-5,-12) | ((19,-22),(33,-56)) + ((1,2),(3,4)) | (-5,-12) | ((19,-22),(33,-56)) + [(1,2),(3,4)] | (-5,-12) | [(19,-22),(33,-56)] + ((10,20)) | (-5,-12) | ((190,-220)) + [(11,12),(13,14)] | (-5,-12) | [(89,-192),(103,-226)] + ((11,12),(13,14)) | (-5,-12) | ((89,-192),(103,-226)) + [(1,2),(3,4)] | (1e-300,-1e-300) | [(3e-300,1e-300),(7e-300,1e-300)] + ((1,2),(3,4)) | (1e-300,-1e-300) | ((3e-300,1e-300),(7e-300,1e-300)) + [(0,0),(3,0),(4,5),(1,6)] | (1e-300,-1e-300) | [(0,0),(3e-300,-3e-300),(9e-300,1e-300),(7e-300,5e-300)] + ((1,2),(3,4)) | (1e-300,-1e-300) | ((3e-300,1e-300),(7e-300,1e-300)) + ((1,2),(3,4)) | (1e-300,-1e-300) | ((3e-300,1e-300),(7e-300,1e-300)) + [(1,2),(3,4)] | (1e-300,-1e-300) | [(3e-300,1e-300),(7e-300,1e-300)] + ((10,20)) | (1e-300,-1e-300) | ((3e-299,1e-299)) + [(11,12),(13,14)] | (1e-300,-1e-300) | [(2.3e-299,1e-300),(2.7e-299,1e-300)] + ((11,12),(13,14)) | (1e-300,-1e-300) | ((2.3e-299,1e-300),(2.7e-299,1e-300)) + [(1,2),(3,4)] | (1e+300,Infinity) | [(-Infinity,Infinity),(-Infinity,Infinity)] + ((1,2),(3,4)) | (1e+300,Infinity) | ((-Infinity,Infinity),(-Infinity,Infinity)) + [(0,0),(3,0),(4,5),(1,6)] | (1e+300,Infinity) | [(NaN,NaN),(NaN,Infinity),(-Infinity,Infinity),(-Infinity,Infinity)] + ((1,2),(3,4)) | (1e+300,Infinity) | ((-Infinity,Infinity),(-Infinity,Infinity)) + ((1,2),(3,4)) | (1e+300,Infinity) | ((-Infinity,Infinity),(-Infinity,Infinity)) + [(1,2),(3,4)] | (1e+300,Infinity) | [(-Infinity,Infinity),(-Infinity,Infinity)] + ((10,20)) | (1e+300,Infinity) | ((-Infinity,Infinity)) + [(11,12),(13,14)] | (1e+300,Infinity) | [(-Infinity,Infinity),(-Infinity,Infinity)] + ((11,12),(13,14)) | (1e+300,Infinity) | ((-Infinity,Infinity),(-Infinity,Infinity)) + [(1,2),(3,4)] | (Infinity,1e+300) | [(Infinity,Infinity),(Infinity,Infinity)] + ((1,2),(3,4)) | (Infinity,1e+300) | ((Infinity,Infinity),(Infinity,Infinity)) + [(0,0),(3,0),(4,5),(1,6)] | (Infinity,1e+300) | [(NaN,NaN),(Infinity,NaN),(Infinity,Infinity),(Infinity,Infinity)] + ((1,2),(3,4)) | (Infinity,1e+300) | ((Infinity,Infinity),(Infinity,Infinity)) + ((1,2),(3,4)) | (Infinity,1e+300) | ((Infinity,Infinity),(Infinity,Infinity)) + [(1,2),(3,4)] | (Infinity,1e+300) | [(Infinity,Infinity),(Infinity,Infinity)] + ((10,20)) | (Infinity,1e+300) | ((Infinity,Infinity)) + [(11,12),(13,14)] | (Infinity,1e+300) | [(Infinity,Infinity),(Infinity,Infinity)] + ((11,12),(13,14)) | (Infinity,1e+300) | ((Infinity,Infinity),(Infinity,Infinity)) + [(1,2),(3,4)] | (NaN,NaN) | [(NaN,NaN),(NaN,NaN)] + ((1,2),(3,4)) | (NaN,NaN) | ((NaN,NaN),(NaN,NaN)) + [(0,0),(3,0),(4,5),(1,6)] | (NaN,NaN) | [(NaN,NaN),(NaN,NaN),(NaN,NaN),(NaN,NaN)] + ((1,2),(3,4)) | (NaN,NaN) | ((NaN,NaN),(NaN,NaN)) + ((1,2),(3,4)) | (NaN,NaN) | ((NaN,NaN),(NaN,NaN)) + [(1,2),(3,4)] | (NaN,NaN) | [(NaN,NaN),(NaN,NaN)] + ((10,20)) | (NaN,NaN) | ((NaN,NaN)) + [(11,12),(13,14)] | (NaN,NaN) | [(NaN,NaN),(NaN,NaN)] + ((11,12),(13,14)) | (NaN,NaN) | ((NaN,NaN),(NaN,NaN)) + [(1,2),(3,4)] | (10,10) | [(-10,30),(-10,70)] + ((1,2),(3,4)) | (10,10) | ((-10,30),(-10,70)) + [(0,0),(3,0),(4,5),(1,6)] | (10,10) | [(0,0),(30,30),(-10,90),(-50,70)] + ((1,2),(3,4)) | (10,10) | ((-10,30),(-10,70)) + ((1,2),(3,4)) | (10,10) | ((-10,30),(-10,70)) + [(1,2),(3,4)] | (10,10) | [(-10,30),(-10,70)] + ((10,20)) | (10,10) | ((-100,300)) + [(11,12),(13,14)] | (10,10) | [(-10,230),(-10,270)] + ((11,12),(13,14)) | (10,10) | ((-10,230),(-10,270)) +(90 rows) + +-- Divide by point +SELECT p.f1, p1.f1, p.f1 / p1.f1 FROM PATH_TBL p, POINT_TBL p1 WHERE p1.f1[0] BETWEEN 1 AND 1000; + f1 | f1 | ?column? +---------------------------+------------+----------------------------------------------------------------------------------------------------------------- + [(1,2),(3,4)] | (5.1,34.5) | [(0.0609244733856,-0.0199792807459),(0.12604212915,-0.0683242069952)] + [(1,2),(3,4)] | (10,10) | [(0.15,0.05),(0.35,0.05)] + ((1,2),(3,4)) | (5.1,34.5) | ((0.0609244733856,-0.0199792807459),(0.12604212915,-0.0683242069952)) + ((1,2),(3,4)) | (10,10) | ((0.15,0.05),(0.35,0.05)) + [(0,0),(3,0),(4,5),(1,6)] | (5.1,34.5) | [(0,0),(0.0125795471363,-0.0850969365103),(0.158600957032,-0.0924966701199),(0.174387055399,-0.00320655123082)] + [(0,0),(3,0),(4,5),(1,6)] | (10,10) | [(0,0),(0.15,-0.15),(0.45,0.05),(0.35,0.25)] + ((1,2),(3,4)) | (5.1,34.5) | ((0.0609244733856,-0.0199792807459),(0.12604212915,-0.0683242069952)) + ((1,2),(3,4)) | (10,10) | ((0.15,0.05),(0.35,0.05)) + ((1,2),(3,4)) | (5.1,34.5) | ((0.0609244733856,-0.0199792807459),(0.12604212915,-0.0683242069952)) + ((1,2),(3,4)) | (10,10) | ((0.15,0.05),(0.35,0.05)) + [(1,2),(3,4)] | (5.1,34.5) | [(0.0609244733856,-0.0199792807459),(0.12604212915,-0.0683242069952)] + [(1,2),(3,4)] | (10,10) | [(0.15,0.05),(0.35,0.05)] + ((10,20)) | (5.1,34.5) | ((0.609244733856,-0.199792807459)) + ((10,20)) | (10,10) | ((1.5,0.5)) + [(11,12),(13,14)] | (5.1,34.5) | [(0.386512752208,-0.261703911993),(0.451630407972,-0.310048838242)] + [(11,12),(13,14)] | (10,10) | [(1.15,0.05),(1.35,0.05)] + ((11,12),(13,14)) | (5.1,34.5) | ((0.386512752208,-0.261703911993),(0.451630407972,-0.310048838242)) + ((11,12),(13,14)) | (10,10) | ((1.15,0.05),(1.35,0.05)) +(18 rows) + +-- Division by 0 error +SELECT p.f1, p1.f1, p.f1 / p1.f1 FROM PATH_TBL p, POINT_TBL p1 WHERE p1.f1 ~= '(0,0)'::point; +ERROR: division by zero +-- Distance to path +SELECT p1.f1, p2.f1, p1.f1 <-> p2.f1 FROM PATH_TBL p1, PATH_TBL p2; + f1 | f1 | ?column? +---------------------------+---------------------------+---------------- + [(1,2),(3,4)] | [(1,2),(3,4)] | 0 + [(1,2),(3,4)] | ((1,2),(3,4)) | 0 + [(1,2),(3,4)] | [(0,0),(3,0),(4,5),(1,6)] | 0.784464540553 + [(1,2),(3,4)] | ((1,2),(3,4)) | 0 + [(1,2),(3,4)] | ((1,2),(3,4)) | 0 + [(1,2),(3,4)] | [(1,2),(3,4)] | 0 + [(1,2),(3,4)] | ((10,20)) | 17.4642491966 + [(1,2),(3,4)] | [(11,12),(13,14)] | 11.313708499 + [(1,2),(3,4)] | ((11,12),(13,14)) | 11.313708499 + ((1,2),(3,4)) | [(1,2),(3,4)] | 0 + ((1,2),(3,4)) | ((1,2),(3,4)) | 0 + ((1,2),(3,4)) | [(0,0),(3,0),(4,5),(1,6)] | 0.784464540553 + ((1,2),(3,4)) | ((1,2),(3,4)) | 0 + ((1,2),(3,4)) | ((1,2),(3,4)) | 0 + ((1,2),(3,4)) | [(1,2),(3,4)] | 0 + ((1,2),(3,4)) | ((10,20)) | 17.4642491966 + ((1,2),(3,4)) | [(11,12),(13,14)] | 11.313708499 + ((1,2),(3,4)) | ((11,12),(13,14)) | 11.313708499 + [(0,0),(3,0),(4,5),(1,6)] | [(1,2),(3,4)] | 0.784464540553 + [(0,0),(3,0),(4,5),(1,6)] | ((1,2),(3,4)) | 0.784464540553 + [(0,0),(3,0),(4,5),(1,6)] | [(0,0),(3,0),(4,5),(1,6)] | 0 + [(0,0),(3,0),(4,5),(1,6)] | ((1,2),(3,4)) | 0.784464540553 + [(0,0),(3,0),(4,5),(1,6)] | ((1,2),(3,4)) | 0.784464540553 + [(0,0),(3,0),(4,5),(1,6)] | [(1,2),(3,4)] | 0.784464540553 + [(0,0),(3,0),(4,5),(1,6)] | ((10,20)) | 16.1554944214 + [(0,0),(3,0),(4,5),(1,6)] | [(11,12),(13,14)] | 9.89949493661 + [(0,0),(3,0),(4,5),(1,6)] | ((11,12),(13,14)) | 9.89949493661 + ((1,2),(3,4)) | [(1,2),(3,4)] | 0 + ((1,2),(3,4)) | ((1,2),(3,4)) | 0 + ((1,2),(3,4)) | [(0,0),(3,0),(4,5),(1,6)] | 0.784464540553 + ((1,2),(3,4)) | ((1,2),(3,4)) | 0 + ((1,2),(3,4)) | ((1,2),(3,4)) | 0 + ((1,2),(3,4)) | [(1,2),(3,4)] | 0 + ((1,2),(3,4)) | ((10,20)) | 17.4642491966 + ((1,2),(3,4)) | [(11,12),(13,14)] | 11.313708499 + ((1,2),(3,4)) | ((11,12),(13,14)) | 11.313708499 + ((1,2),(3,4)) | [(1,2),(3,4)] | 0 + ((1,2),(3,4)) | ((1,2),(3,4)) | 0 + ((1,2),(3,4)) | [(0,0),(3,0),(4,5),(1,6)] | 0.784464540553 + ((1,2),(3,4)) | ((1,2),(3,4)) | 0 + ((1,2),(3,4)) | ((1,2),(3,4)) | 0 + ((1,2),(3,4)) | [(1,2),(3,4)] | 0 + ((1,2),(3,4)) | ((10,20)) | 17.4642491966 + ((1,2),(3,4)) | [(11,12),(13,14)] | 11.313708499 + ((1,2),(3,4)) | ((11,12),(13,14)) | 11.313708499 + [(1,2),(3,4)] | [(1,2),(3,4)] | 0 + [(1,2),(3,4)] | ((1,2),(3,4)) | 0 + [(1,2),(3,4)] | [(0,0),(3,0),(4,5),(1,6)] | 0.784464540553 + [(1,2),(3,4)] | ((1,2),(3,4)) | 0 + [(1,2),(3,4)] | ((1,2),(3,4)) | 0 + [(1,2),(3,4)] | [(1,2),(3,4)] | 0 + [(1,2),(3,4)] | ((10,20)) | 17.4642491966 + [(1,2),(3,4)] | [(11,12),(13,14)] | 11.313708499 + [(1,2),(3,4)] | ((11,12),(13,14)) | 11.313708499 + ((10,20)) | [(1,2),(3,4)] | 17.4642491966 + ((10,20)) | ((1,2),(3,4)) | 17.4642491966 + ((10,20)) | [(0,0),(3,0),(4,5),(1,6)] | 16.1554944214 + ((10,20)) | ((1,2),(3,4)) | 17.4642491966 + ((10,20)) | ((1,2),(3,4)) | 17.4642491966 + ((10,20)) | [(1,2),(3,4)] | 17.4642491966 + ((10,20)) | ((10,20)) | 0 + ((10,20)) | [(11,12),(13,14)] | 6.7082039325 + ((10,20)) | ((11,12),(13,14)) | 6.7082039325 + [(11,12),(13,14)] | [(1,2),(3,4)] | 11.313708499 + [(11,12),(13,14)] | ((1,2),(3,4)) | 11.313708499 + [(11,12),(13,14)] | [(0,0),(3,0),(4,5),(1,6)] | 9.89949493661 + [(11,12),(13,14)] | ((1,2),(3,4)) | 11.313708499 + [(11,12),(13,14)] | ((1,2),(3,4)) | 11.313708499 + [(11,12),(13,14)] | [(1,2),(3,4)] | 11.313708499 + [(11,12),(13,14)] | ((10,20)) | 6.7082039325 + [(11,12),(13,14)] | [(11,12),(13,14)] | 0 + [(11,12),(13,14)] | ((11,12),(13,14)) | 0 + ((11,12),(13,14)) | [(1,2),(3,4)] | 11.313708499 + ((11,12),(13,14)) | ((1,2),(3,4)) | 11.313708499 + ((11,12),(13,14)) | [(0,0),(3,0),(4,5),(1,6)] | 9.89949493661 + ((11,12),(13,14)) | ((1,2),(3,4)) | 11.313708499 + ((11,12),(13,14)) | ((1,2),(3,4)) | 11.313708499 + ((11,12),(13,14)) | [(1,2),(3,4)] | 11.313708499 + ((11,12),(13,14)) | ((10,20)) | 6.7082039325 + ((11,12),(13,14)) | [(11,12),(13,14)] | 0 + ((11,12),(13,14)) | ((11,12),(13,14)) | 0 +(81 rows) + +-- +-- Polygons +-- +-- containment +SELECT p.f1, poly.f1, poly.f1 @> p.f1 AS contains + FROM POLYGON_TBL poly, POINT_TBL p; + f1 | f1 | contains +-------------------+----------------------------+---------- + (0,0) | ((2,0),(2,4),(0,0)) | t + (0,0) | ((3,1),(3,3),(1,0)) | f + (0,0) | ((1,2),(3,4),(5,6),(7,8)) | f + (0,0) | ((7,8),(5,6),(3,4),(1,2)) | f + (0,0) | ((1,2),(7,8),(5,6),(3,-4)) | f + (0,0) | ((0,0)) | t + (0,0) | ((0,1),(0,1)) | f + (-10,0) | ((2,0),(2,4),(0,0)) | f + (-10,0) | ((3,1),(3,3),(1,0)) | f + (-10,0) | ((1,2),(3,4),(5,6),(7,8)) | f + (-10,0) | ((7,8),(5,6),(3,4),(1,2)) | f + (-10,0) | ((1,2),(7,8),(5,6),(3,-4)) | f + (-10,0) | ((0,0)) | f + (-10,0) | ((0,1),(0,1)) | f + (-3,4) | ((2,0),(2,4),(0,0)) | f + (-3,4) | ((3,1),(3,3),(1,0)) | f + (-3,4) | ((1,2),(3,4),(5,6),(7,8)) | f + (-3,4) | ((7,8),(5,6),(3,4),(1,2)) | f + (-3,4) | ((1,2),(7,8),(5,6),(3,-4)) | f + (-3,4) | ((0,0)) | f + (-3,4) | ((0,1),(0,1)) | f + (5.1,34.5) | ((2,0),(2,4),(0,0)) | f + (5.1,34.5) | ((3,1),(3,3),(1,0)) | f + (5.1,34.5) | ((1,2),(3,4),(5,6),(7,8)) | f + (5.1,34.5) | ((7,8),(5,6),(3,4),(1,2)) | f + (5.1,34.5) | ((1,2),(7,8),(5,6),(3,-4)) | f + (5.1,34.5) | ((0,0)) | f + (5.1,34.5) | ((0,1),(0,1)) | f + (-5,-12) | ((2,0),(2,4),(0,0)) | f + (-5,-12) | ((3,1),(3,3),(1,0)) | f + (-5,-12) | ((1,2),(3,4),(5,6),(7,8)) | f + (-5,-12) | ((7,8),(5,6),(3,4),(1,2)) | f + (-5,-12) | ((1,2),(7,8),(5,6),(3,-4)) | f + (-5,-12) | ((0,0)) | f + (-5,-12) | ((0,1),(0,1)) | f + (1e-300,-1e-300) | ((2,0),(2,4),(0,0)) | t + (1e-300,-1e-300) | ((3,1),(3,3),(1,0)) | f + (1e-300,-1e-300) | ((1,2),(3,4),(5,6),(7,8)) | f + (1e-300,-1e-300) | ((7,8),(5,6),(3,4),(1,2)) | f + (1e-300,-1e-300) | ((1,2),(7,8),(5,6),(3,-4)) | f + (1e-300,-1e-300) | ((0,0)) | t + (1e-300,-1e-300) | ((0,1),(0,1)) | f + (1e+300,Infinity) | ((2,0),(2,4),(0,0)) | f + (1e+300,Infinity) | ((3,1),(3,3),(1,0)) | f + (1e+300,Infinity) | ((1,2),(3,4),(5,6),(7,8)) | f + (1e+300,Infinity) | ((7,8),(5,6),(3,4),(1,2)) | f + (1e+300,Infinity) | ((1,2),(7,8),(5,6),(3,-4)) | f + (1e+300,Infinity) | ((0,0)) | f + (1e+300,Infinity) | ((0,1),(0,1)) | f + (Infinity,1e+300) | ((2,0),(2,4),(0,0)) | f + (Infinity,1e+300) | ((3,1),(3,3),(1,0)) | f + (Infinity,1e+300) | ((1,2),(3,4),(5,6),(7,8)) | f + (Infinity,1e+300) | ((7,8),(5,6),(3,4),(1,2)) | f + (Infinity,1e+300) | ((1,2),(7,8),(5,6),(3,-4)) | f + (Infinity,1e+300) | ((0,0)) | f + (Infinity,1e+300) | ((0,1),(0,1)) | f + (NaN,NaN) | ((2,0),(2,4),(0,0)) | t + (NaN,NaN) | ((3,1),(3,3),(1,0)) | t + (NaN,NaN) | ((1,2),(3,4),(5,6),(7,8)) | t + (NaN,NaN) | ((7,8),(5,6),(3,4),(1,2)) | t + (NaN,NaN) | ((1,2),(7,8),(5,6),(3,-4)) | t + (NaN,NaN) | ((0,0)) | t + (NaN,NaN) | ((0,1),(0,1)) | t + (10,10) | ((2,0),(2,4),(0,0)) | f + (10,10) | ((3,1),(3,3),(1,0)) | f + (10,10) | ((1,2),(3,4),(5,6),(7,8)) | f + (10,10) | ((7,8),(5,6),(3,4),(1,2)) | f + (10,10) | ((1,2),(7,8),(5,6),(3,-4)) | f + (10,10) | ((0,0)) | f + (10,10) | ((0,1),(0,1)) | f +(70 rows) + +SELECT p.f1, poly.f1, p.f1 <@ poly.f1 AS contained + FROM POLYGON_TBL poly, POINT_TBL p; + f1 | f1 | contained +-------------------+----------------------------+----------- + (0,0) | ((2,0),(2,4),(0,0)) | t + (0,0) | ((3,1),(3,3),(1,0)) | f + (0,0) | ((1,2),(3,4),(5,6),(7,8)) | f + (0,0) | ((7,8),(5,6),(3,4),(1,2)) | f + (0,0) | ((1,2),(7,8),(5,6),(3,-4)) | f + (0,0) | ((0,0)) | t + (0,0) | ((0,1),(0,1)) | f + (-10,0) | ((2,0),(2,4),(0,0)) | f + (-10,0) | ((3,1),(3,3),(1,0)) | f + (-10,0) | ((1,2),(3,4),(5,6),(7,8)) | f + (-10,0) | ((7,8),(5,6),(3,4),(1,2)) | f + (-10,0) | ((1,2),(7,8),(5,6),(3,-4)) | f + (-10,0) | ((0,0)) | f + (-10,0) | ((0,1),(0,1)) | f + (-3,4) | ((2,0),(2,4),(0,0)) | f + (-3,4) | ((3,1),(3,3),(1,0)) | f + (-3,4) | ((1,2),(3,4),(5,6),(7,8)) | f + (-3,4) | ((7,8),(5,6),(3,4),(1,2)) | f + (-3,4) | ((1,2),(7,8),(5,6),(3,-4)) | f + (-3,4) | ((0,0)) | f + (-3,4) | ((0,1),(0,1)) | f + (5.1,34.5) | ((2,0),(2,4),(0,0)) | f + (5.1,34.5) | ((3,1),(3,3),(1,0)) | f + (5.1,34.5) | ((1,2),(3,4),(5,6),(7,8)) | f + (5.1,34.5) | ((7,8),(5,6),(3,4),(1,2)) | f + (5.1,34.5) | ((1,2),(7,8),(5,6),(3,-4)) | f + (5.1,34.5) | ((0,0)) | f + (5.1,34.5) | ((0,1),(0,1)) | f + (-5,-12) | ((2,0),(2,4),(0,0)) | f + (-5,-12) | ((3,1),(3,3),(1,0)) | f + (-5,-12) | ((1,2),(3,4),(5,6),(7,8)) | f + (-5,-12) | ((7,8),(5,6),(3,4),(1,2)) | f + (-5,-12) | ((1,2),(7,8),(5,6),(3,-4)) | f + (-5,-12) | ((0,0)) | f + (-5,-12) | ((0,1),(0,1)) | f + (1e-300,-1e-300) | ((2,0),(2,4),(0,0)) | t + (1e-300,-1e-300) | ((3,1),(3,3),(1,0)) | f + (1e-300,-1e-300) | ((1,2),(3,4),(5,6),(7,8)) | f + (1e-300,-1e-300) | ((7,8),(5,6),(3,4),(1,2)) | f + (1e-300,-1e-300) | ((1,2),(7,8),(5,6),(3,-4)) | f + (1e-300,-1e-300) | ((0,0)) | t + (1e-300,-1e-300) | ((0,1),(0,1)) | f + (1e+300,Infinity) | ((2,0),(2,4),(0,0)) | f + (1e+300,Infinity) | ((3,1),(3,3),(1,0)) | f + (1e+300,Infinity) | ((1,2),(3,4),(5,6),(7,8)) | f + (1e+300,Infinity) | ((7,8),(5,6),(3,4),(1,2)) | f + (1e+300,Infinity) | ((1,2),(7,8),(5,6),(3,-4)) | f + (1e+300,Infinity) | ((0,0)) | f + (1e+300,Infinity) | ((0,1),(0,1)) | f + (Infinity,1e+300) | ((2,0),(2,4),(0,0)) | f + (Infinity,1e+300) | ((3,1),(3,3),(1,0)) | f + (Infinity,1e+300) | ((1,2),(3,4),(5,6),(7,8)) | f + (Infinity,1e+300) | ((7,8),(5,6),(3,4),(1,2)) | f + (Infinity,1e+300) | ((1,2),(7,8),(5,6),(3,-4)) | f + (Infinity,1e+300) | ((0,0)) | f + (Infinity,1e+300) | ((0,1),(0,1)) | f + (NaN,NaN) | ((2,0),(2,4),(0,0)) | t + (NaN,NaN) | ((3,1),(3,3),(1,0)) | t + (NaN,NaN) | ((1,2),(3,4),(5,6),(7,8)) | t + (NaN,NaN) | ((7,8),(5,6),(3,4),(1,2)) | t + (NaN,NaN) | ((1,2),(7,8),(5,6),(3,-4)) | t + (NaN,NaN) | ((0,0)) | t + (NaN,NaN) | ((0,1),(0,1)) | t + (10,10) | ((2,0),(2,4),(0,0)) | f + (10,10) | ((3,1),(3,3),(1,0)) | f + (10,10) | ((1,2),(3,4),(5,6),(7,8)) | f + (10,10) | ((7,8),(5,6),(3,4),(1,2)) | f + (10,10) | ((1,2),(7,8),(5,6),(3,-4)) | f + (10,10) | ((0,0)) | f + (10,10) | ((0,1),(0,1)) | f +(70 rows) + +SELECT npoints(f1) AS npoints, f1 AS polygon + FROM POLYGON_TBL; + npoints | polygon +---------+---------------------------- + 3 | ((2,0),(2,4),(0,0)) + 3 | ((3,1),(3,3),(1,0)) + 4 | ((1,2),(3,4),(5,6),(7,8)) + 4 | ((7,8),(5,6),(3,4),(1,2)) + 4 | ((1,2),(7,8),(5,6),(3,-4)) + 1 | ((0,0)) + 2 | ((0,1),(0,1)) +(7 rows) + +SELECT polygon(f1) + FROM BOX_TBL; + polygon +------------------------------------------- + ((0,0),(0,2),(2,2),(2,0)) + ((1,1),(1,3),(3,3),(3,1)) + ((-8,-10),(-8,2),(-2,2),(-2,-10)) + ((2.5,2.5),(2.5,3.5),(2.5,3.5),(2.5,2.5)) + ((3,3),(3,3),(3,3),(3,3)) +(5 rows) + +SELECT polygon(f1) + FROM PATH_TBL WHERE isclosed(f1); + polygon +------------------- + ((1,2),(3,4)) + ((1,2),(3,4)) + ((1,2),(3,4)) + ((10,20)) + ((11,12),(13,14)) +(5 rows) + +SELECT f1 AS open_path, polygon( pclose(f1)) AS polygon + FROM PATH_TBL + WHERE isopen(f1); + open_path | polygon +---------------------------+--------------------------- + [(1,2),(3,4)] | ((1,2),(3,4)) + [(0,0),(3,0),(4,5),(1,6)] | ((0,0),(3,0),(4,5),(1,6)) + [(1,2),(3,4)] | ((1,2),(3,4)) + [(11,12),(13,14)] | ((11,12),(13,14)) +(4 rows) + +-- To box +SELECT f1, f1::box FROM POLYGON_TBL; + f1 | f1 +----------------------------+-------------- + ((2,0),(2,4),(0,0)) | (2,4),(0,0) + ((3,1),(3,3),(1,0)) | (3,3),(1,0) + ((1,2),(3,4),(5,6),(7,8)) | (7,8),(1,2) + ((7,8),(5,6),(3,4),(1,2)) | (7,8),(1,2) + ((1,2),(7,8),(5,6),(3,-4)) | (7,8),(1,-4) + ((0,0)) | (0,0),(0,0) + ((0,1),(0,1)) | (0,1),(0,1) +(7 rows) + +-- To path +SELECT f1, f1::path FROM POLYGON_TBL; + f1 | f1 +----------------------------+---------------------------- + ((2,0),(2,4),(0,0)) | ((2,0),(2,4),(0,0)) + ((3,1),(3,3),(1,0)) | ((3,1),(3,3),(1,0)) + ((1,2),(3,4),(5,6),(7,8)) | ((1,2),(3,4),(5,6),(7,8)) + ((7,8),(5,6),(3,4),(1,2)) | ((7,8),(5,6),(3,4),(1,2)) + ((1,2),(7,8),(5,6),(3,-4)) | ((1,2),(7,8),(5,6),(3,-4)) + ((0,0)) | ((0,0)) + ((0,1),(0,1)) | ((0,1),(0,1)) +(7 rows) + +-- Same as polygon +SELECT p1.f1, p2.f1 FROM POLYGON_TBL p1, POLYGON_TBL p2 WHERE p1.f1 ~= p2.f1; + f1 | f1 +----------------------------+---------------------------- + ((2,0),(2,4),(0,0)) | ((2,0),(2,4),(0,0)) + ((3,1),(3,3),(1,0)) | ((3,1),(3,3),(1,0)) + ((1,2),(3,4),(5,6),(7,8)) | ((1,2),(3,4),(5,6),(7,8)) + ((1,2),(3,4),(5,6),(7,8)) | ((7,8),(5,6),(3,4),(1,2)) + ((7,8),(5,6),(3,4),(1,2)) | ((1,2),(3,4),(5,6),(7,8)) + ((7,8),(5,6),(3,4),(1,2)) | ((7,8),(5,6),(3,4),(1,2)) + ((1,2),(7,8),(5,6),(3,-4)) | ((1,2),(7,8),(5,6),(3,-4)) + ((0,0)) | ((0,0)) + ((0,1),(0,1)) | ((0,1),(0,1)) +(9 rows) + +-- Contained by polygon +SELECT p1.f1, p2.f1 FROM POLYGON_TBL p1, POLYGON_TBL p2 WHERE p1.f1 <@ p2.f1; + f1 | f1 +----------------------------+---------------------------- + ((2,0),(2,4),(0,0)) | ((2,0),(2,4),(0,0)) + ((3,1),(3,3),(1,0)) | ((3,1),(3,3),(1,0)) + ((1,2),(3,4),(5,6),(7,8)) | ((1,2),(3,4),(5,6),(7,8)) + ((1,2),(3,4),(5,6),(7,8)) | ((7,8),(5,6),(3,4),(1,2)) + ((1,2),(3,4),(5,6),(7,8)) | ((1,2),(7,8),(5,6),(3,-4)) + ((7,8),(5,6),(3,4),(1,2)) | ((1,2),(3,4),(5,6),(7,8)) + ((7,8),(5,6),(3,4),(1,2)) | ((7,8),(5,6),(3,4),(1,2)) + ((7,8),(5,6),(3,4),(1,2)) | ((1,2),(7,8),(5,6),(3,-4)) + ((1,2),(7,8),(5,6),(3,-4)) | ((1,2),(7,8),(5,6),(3,-4)) + ((0,0)) | ((2,0),(2,4),(0,0)) + ((0,0)) | ((0,0)) + ((0,1),(0,1)) | ((0,1),(0,1)) +(12 rows) + +-- Contains polygon +SELECT p1.f1, p2.f1 FROM POLYGON_TBL p1, POLYGON_TBL p2 WHERE p1.f1 @> p2.f1; + f1 | f1 +----------------------------+---------------------------- + ((2,0),(2,4),(0,0)) | ((2,0),(2,4),(0,0)) + ((2,0),(2,4),(0,0)) | ((0,0)) + ((3,1),(3,3),(1,0)) | ((3,1),(3,3),(1,0)) + ((1,2),(3,4),(5,6),(7,8)) | ((1,2),(3,4),(5,6),(7,8)) + ((1,2),(3,4),(5,6),(7,8)) | ((7,8),(5,6),(3,4),(1,2)) + ((7,8),(5,6),(3,4),(1,2)) | ((1,2),(3,4),(5,6),(7,8)) + ((7,8),(5,6),(3,4),(1,2)) | ((7,8),(5,6),(3,4),(1,2)) + ((1,2),(7,8),(5,6),(3,-4)) | ((1,2),(3,4),(5,6),(7,8)) + ((1,2),(7,8),(5,6),(3,-4)) | ((7,8),(5,6),(3,4),(1,2)) + ((1,2),(7,8),(5,6),(3,-4)) | ((1,2),(7,8),(5,6),(3,-4)) + ((0,0)) | ((0,0)) + ((0,1),(0,1)) | ((0,1),(0,1)) +(12 rows) + +-- Overlap with polygon +SELECT p1.f1, p2.f1 FROM POLYGON_TBL p1, POLYGON_TBL p2 WHERE p1.f1 && p2.f1; + f1 | f1 +----------------------------+---------------------------- + ((2,0),(2,4),(0,0)) | ((2,0),(2,4),(0,0)) + ((2,0),(2,4),(0,0)) | ((3,1),(3,3),(1,0)) + ((2,0),(2,4),(0,0)) | ((1,2),(3,4),(5,6),(7,8)) + ((2,0),(2,4),(0,0)) | ((7,8),(5,6),(3,4),(1,2)) + ((2,0),(2,4),(0,0)) | ((1,2),(7,8),(5,6),(3,-4)) + ((2,0),(2,4),(0,0)) | ((0,0)) + ((3,1),(3,3),(1,0)) | ((2,0),(2,4),(0,0)) + ((3,1),(3,3),(1,0)) | ((3,1),(3,3),(1,0)) + ((3,1),(3,3),(1,0)) | ((1,2),(7,8),(5,6),(3,-4)) + ((1,2),(3,4),(5,6),(7,8)) | ((2,0),(2,4),(0,0)) + ((1,2),(3,4),(5,6),(7,8)) | ((1,2),(3,4),(5,6),(7,8)) + ((1,2),(3,4),(5,6),(7,8)) | ((7,8),(5,6),(3,4),(1,2)) + ((1,2),(3,4),(5,6),(7,8)) | ((1,2),(7,8),(5,6),(3,-4)) + ((7,8),(5,6),(3,4),(1,2)) | ((2,0),(2,4),(0,0)) + ((7,8),(5,6),(3,4),(1,2)) | ((1,2),(3,4),(5,6),(7,8)) + ((7,8),(5,6),(3,4),(1,2)) | ((7,8),(5,6),(3,4),(1,2)) + ((7,8),(5,6),(3,4),(1,2)) | ((1,2),(7,8),(5,6),(3,-4)) + ((1,2),(7,8),(5,6),(3,-4)) | ((2,0),(2,4),(0,0)) + ((1,2),(7,8),(5,6),(3,-4)) | ((3,1),(3,3),(1,0)) + ((1,2),(7,8),(5,6),(3,-4)) | ((1,2),(3,4),(5,6),(7,8)) + ((1,2),(7,8),(5,6),(3,-4)) | ((7,8),(5,6),(3,4),(1,2)) + ((1,2),(7,8),(5,6),(3,-4)) | ((1,2),(7,8),(5,6),(3,-4)) + ((0,0)) | ((2,0),(2,4),(0,0)) + ((0,0)) | ((0,0)) + ((0,1),(0,1)) | ((0,1),(0,1)) +(25 rows) + +-- Left of polygon +SELECT p1.f1, p2.f1 FROM POLYGON_TBL p1, POLYGON_TBL p2 WHERE p1.f1 << p2.f1; + f1 | f1 +---------------+---------------------------- + ((0,0)) | ((3,1),(3,3),(1,0)) + ((0,0)) | ((1,2),(3,4),(5,6),(7,8)) + ((0,0)) | ((7,8),(5,6),(3,4),(1,2)) + ((0,0)) | ((1,2),(7,8),(5,6),(3,-4)) + ((0,1),(0,1)) | ((3,1),(3,3),(1,0)) + ((0,1),(0,1)) | ((1,2),(3,4),(5,6),(7,8)) + ((0,1),(0,1)) | ((7,8),(5,6),(3,4),(1,2)) + ((0,1),(0,1)) | ((1,2),(7,8),(5,6),(3,-4)) +(8 rows) + +-- Overlap of left of polygon +SELECT p1.f1, p2.f1 FROM POLYGON_TBL p1, POLYGON_TBL p2 WHERE p1.f1 &< p2.f1; + f1 | f1 +----------------------------+---------------------------- + ((2,0),(2,4),(0,0)) | ((2,0),(2,4),(0,0)) + ((2,0),(2,4),(0,0)) | ((3,1),(3,3),(1,0)) + ((2,0),(2,4),(0,0)) | ((1,2),(3,4),(5,6),(7,8)) + ((2,0),(2,4),(0,0)) | ((7,8),(5,6),(3,4),(1,2)) + ((2,0),(2,4),(0,0)) | ((1,2),(7,8),(5,6),(3,-4)) + ((3,1),(3,3),(1,0)) | ((3,1),(3,3),(1,0)) + ((3,1),(3,3),(1,0)) | ((1,2),(3,4),(5,6),(7,8)) + ((3,1),(3,3),(1,0)) | ((7,8),(5,6),(3,4),(1,2)) + ((3,1),(3,3),(1,0)) | ((1,2),(7,8),(5,6),(3,-4)) + ((1,2),(3,4),(5,6),(7,8)) | ((1,2),(3,4),(5,6),(7,8)) + ((1,2),(3,4),(5,6),(7,8)) | ((7,8),(5,6),(3,4),(1,2)) + ((1,2),(3,4),(5,6),(7,8)) | ((1,2),(7,8),(5,6),(3,-4)) + ((7,8),(5,6),(3,4),(1,2)) | ((1,2),(3,4),(5,6),(7,8)) + ((7,8),(5,6),(3,4),(1,2)) | ((7,8),(5,6),(3,4),(1,2)) + ((7,8),(5,6),(3,4),(1,2)) | ((1,2),(7,8),(5,6),(3,-4)) + ((1,2),(7,8),(5,6),(3,-4)) | ((1,2),(3,4),(5,6),(7,8)) + ((1,2),(7,8),(5,6),(3,-4)) | ((7,8),(5,6),(3,4),(1,2)) + ((1,2),(7,8),(5,6),(3,-4)) | ((1,2),(7,8),(5,6),(3,-4)) + ((0,0)) | ((2,0),(2,4),(0,0)) + ((0,0)) | ((3,1),(3,3),(1,0)) + ((0,0)) | ((1,2),(3,4),(5,6),(7,8)) + ((0,0)) | ((7,8),(5,6),(3,4),(1,2)) + ((0,0)) | ((1,2),(7,8),(5,6),(3,-4)) + ((0,0)) | ((0,0)) + ((0,0)) | ((0,1),(0,1)) + ((0,1),(0,1)) | ((2,0),(2,4),(0,0)) + ((0,1),(0,1)) | ((3,1),(3,3),(1,0)) + ((0,1),(0,1)) | ((1,2),(3,4),(5,6),(7,8)) + ((0,1),(0,1)) | ((7,8),(5,6),(3,4),(1,2)) + ((0,1),(0,1)) | ((1,2),(7,8),(5,6),(3,-4)) + ((0,1),(0,1)) | ((0,0)) + ((0,1),(0,1)) | ((0,1),(0,1)) +(32 rows) + +-- Right of polygon +SELECT p1.f1, p2.f1 FROM POLYGON_TBL p1, POLYGON_TBL p2 WHERE p1.f1 >> p2.f1; + f1 | f1 +----------------------------+--------------- + ((3,1),(3,3),(1,0)) | ((0,0)) + ((3,1),(3,3),(1,0)) | ((0,1),(0,1)) + ((1,2),(3,4),(5,6),(7,8)) | ((0,0)) + ((1,2),(3,4),(5,6),(7,8)) | ((0,1),(0,1)) + ((7,8),(5,6),(3,4),(1,2)) | ((0,0)) + ((7,8),(5,6),(3,4),(1,2)) | ((0,1),(0,1)) + ((1,2),(7,8),(5,6),(3,-4)) | ((0,0)) + ((1,2),(7,8),(5,6),(3,-4)) | ((0,1),(0,1)) +(8 rows) + +-- Overlap of right of polygon +SELECT p1.f1, p2.f1 FROM POLYGON_TBL p1, POLYGON_TBL p2 WHERE p1.f1 &> p2.f1; + f1 | f1 +----------------------------+---------------------------- + ((2,0),(2,4),(0,0)) | ((2,0),(2,4),(0,0)) + ((2,0),(2,4),(0,0)) | ((0,0)) + ((2,0),(2,4),(0,0)) | ((0,1),(0,1)) + ((3,1),(3,3),(1,0)) | ((2,0),(2,4),(0,0)) + ((3,1),(3,3),(1,0)) | ((3,1),(3,3),(1,0)) + ((3,1),(3,3),(1,0)) | ((1,2),(3,4),(5,6),(7,8)) + ((3,1),(3,3),(1,0)) | ((7,8),(5,6),(3,4),(1,2)) + ((3,1),(3,3),(1,0)) | ((1,2),(7,8),(5,6),(3,-4)) + ((3,1),(3,3),(1,0)) | ((0,0)) + ((3,1),(3,3),(1,0)) | ((0,1),(0,1)) + ((1,2),(3,4),(5,6),(7,8)) | ((2,0),(2,4),(0,0)) + ((1,2),(3,4),(5,6),(7,8)) | ((3,1),(3,3),(1,0)) + ((1,2),(3,4),(5,6),(7,8)) | ((1,2),(3,4),(5,6),(7,8)) + ((1,2),(3,4),(5,6),(7,8)) | ((7,8),(5,6),(3,4),(1,2)) + ((1,2),(3,4),(5,6),(7,8)) | ((1,2),(7,8),(5,6),(3,-4)) + ((1,2),(3,4),(5,6),(7,8)) | ((0,0)) + ((1,2),(3,4),(5,6),(7,8)) | ((0,1),(0,1)) + ((7,8),(5,6),(3,4),(1,2)) | ((2,0),(2,4),(0,0)) + ((7,8),(5,6),(3,4),(1,2)) | ((3,1),(3,3),(1,0)) + ((7,8),(5,6),(3,4),(1,2)) | ((1,2),(3,4),(5,6),(7,8)) + ((7,8),(5,6),(3,4),(1,2)) | ((7,8),(5,6),(3,4),(1,2)) + ((7,8),(5,6),(3,4),(1,2)) | ((1,2),(7,8),(5,6),(3,-4)) + ((7,8),(5,6),(3,4),(1,2)) | ((0,0)) + ((7,8),(5,6),(3,4),(1,2)) | ((0,1),(0,1)) + ((1,2),(7,8),(5,6),(3,-4)) | ((2,0),(2,4),(0,0)) + ((1,2),(7,8),(5,6),(3,-4)) | ((3,1),(3,3),(1,0)) + ((1,2),(7,8),(5,6),(3,-4)) | ((1,2),(3,4),(5,6),(7,8)) + ((1,2),(7,8),(5,6),(3,-4)) | ((7,8),(5,6),(3,4),(1,2)) + ((1,2),(7,8),(5,6),(3,-4)) | ((1,2),(7,8),(5,6),(3,-4)) + ((1,2),(7,8),(5,6),(3,-4)) | ((0,0)) + ((1,2),(7,8),(5,6),(3,-4)) | ((0,1),(0,1)) + ((0,0)) | ((2,0),(2,4),(0,0)) + ((0,0)) | ((0,0)) + ((0,0)) | ((0,1),(0,1)) + ((0,1),(0,1)) | ((2,0),(2,4),(0,0)) + ((0,1),(0,1)) | ((0,0)) + ((0,1),(0,1)) | ((0,1),(0,1)) +(37 rows) + +-- Below polygon +SELECT p1.f1, p2.f1 FROM POLYGON_TBL p1, POLYGON_TBL p2 WHERE p1.f1 <<| p2.f1; + f1 | f1 +---------------+--------------------------- + ((0,0)) | ((1,2),(3,4),(5,6),(7,8)) + ((0,0)) | ((7,8),(5,6),(3,4),(1,2)) + ((0,0)) | ((0,1),(0,1)) + ((0,1),(0,1)) | ((1,2),(3,4),(5,6),(7,8)) + ((0,1),(0,1)) | ((7,8),(5,6),(3,4),(1,2)) +(5 rows) + +-- Overlap or below polygon +SELECT p1.f1, p2.f1 FROM POLYGON_TBL p1, POLYGON_TBL p2 WHERE p1.f1 &<| p2.f1; + f1 | f1 +----------------------------+---------------------------- + ((2,0),(2,4),(0,0)) | ((2,0),(2,4),(0,0)) + ((2,0),(2,4),(0,0)) | ((1,2),(3,4),(5,6),(7,8)) + ((2,0),(2,4),(0,0)) | ((7,8),(5,6),(3,4),(1,2)) + ((2,0),(2,4),(0,0)) | ((1,2),(7,8),(5,6),(3,-4)) + ((3,1),(3,3),(1,0)) | ((2,0),(2,4),(0,0)) + ((3,1),(3,3),(1,0)) | ((3,1),(3,3),(1,0)) + ((3,1),(3,3),(1,0)) | ((1,2),(3,4),(5,6),(7,8)) + ((3,1),(3,3),(1,0)) | ((7,8),(5,6),(3,4),(1,2)) + ((3,1),(3,3),(1,0)) | ((1,2),(7,8),(5,6),(3,-4)) + ((1,2),(3,4),(5,6),(7,8)) | ((1,2),(3,4),(5,6),(7,8)) + ((1,2),(3,4),(5,6),(7,8)) | ((7,8),(5,6),(3,4),(1,2)) + ((1,2),(3,4),(5,6),(7,8)) | ((1,2),(7,8),(5,6),(3,-4)) + ((7,8),(5,6),(3,4),(1,2)) | ((1,2),(3,4),(5,6),(7,8)) + ((7,8),(5,6),(3,4),(1,2)) | ((7,8),(5,6),(3,4),(1,2)) + ((7,8),(5,6),(3,4),(1,2)) | ((1,2),(7,8),(5,6),(3,-4)) + ((1,2),(7,8),(5,6),(3,-4)) | ((1,2),(3,4),(5,6),(7,8)) + ((1,2),(7,8),(5,6),(3,-4)) | ((7,8),(5,6),(3,4),(1,2)) + ((1,2),(7,8),(5,6),(3,-4)) | ((1,2),(7,8),(5,6),(3,-4)) + ((0,0)) | ((2,0),(2,4),(0,0)) + ((0,0)) | ((3,1),(3,3),(1,0)) + ((0,0)) | ((1,2),(3,4),(5,6),(7,8)) + ((0,0)) | ((7,8),(5,6),(3,4),(1,2)) + ((0,0)) | ((1,2),(7,8),(5,6),(3,-4)) + ((0,0)) | ((0,0)) + ((0,0)) | ((0,1),(0,1)) + ((0,1),(0,1)) | ((2,0),(2,4),(0,0)) + ((0,1),(0,1)) | ((3,1),(3,3),(1,0)) + ((0,1),(0,1)) | ((1,2),(3,4),(5,6),(7,8)) + ((0,1),(0,1)) | ((7,8),(5,6),(3,4),(1,2)) + ((0,1),(0,1)) | ((1,2),(7,8),(5,6),(3,-4)) + ((0,1),(0,1)) | ((0,1),(0,1)) +(31 rows) + +-- Above polygon +SELECT p1.f1, p2.f1 FROM POLYGON_TBL p1, POLYGON_TBL p2 WHERE p1.f1 |>> p2.f1; + f1 | f1 +---------------------------+--------------- + ((1,2),(3,4),(5,6),(7,8)) | ((0,0)) + ((1,2),(3,4),(5,6),(7,8)) | ((0,1),(0,1)) + ((7,8),(5,6),(3,4),(1,2)) | ((0,0)) + ((7,8),(5,6),(3,4),(1,2)) | ((0,1),(0,1)) + ((0,1),(0,1)) | ((0,0)) +(5 rows) + +-- Overlap or above polygon +SELECT p1.f1, p2.f1 FROM POLYGON_TBL p1, POLYGON_TBL p2 WHERE p1.f1 |&> p2.f1; + f1 | f1 +----------------------------+---------------------------- + ((2,0),(2,4),(0,0)) | ((2,0),(2,4),(0,0)) + ((2,0),(2,4),(0,0)) | ((3,1),(3,3),(1,0)) + ((2,0),(2,4),(0,0)) | ((1,2),(7,8),(5,6),(3,-4)) + ((2,0),(2,4),(0,0)) | ((0,0)) + ((3,1),(3,3),(1,0)) | ((2,0),(2,4),(0,0)) + ((3,1),(3,3),(1,0)) | ((3,1),(3,3),(1,0)) + ((3,1),(3,3),(1,0)) | ((1,2),(7,8),(5,6),(3,-4)) + ((3,1),(3,3),(1,0)) | ((0,0)) + ((1,2),(3,4),(5,6),(7,8)) | ((2,0),(2,4),(0,0)) + ((1,2),(3,4),(5,6),(7,8)) | ((3,1),(3,3),(1,0)) + ((1,2),(3,4),(5,6),(7,8)) | ((1,2),(3,4),(5,6),(7,8)) + ((1,2),(3,4),(5,6),(7,8)) | ((7,8),(5,6),(3,4),(1,2)) + ((1,2),(3,4),(5,6),(7,8)) | ((1,2),(7,8),(5,6),(3,-4)) + ((1,2),(3,4),(5,6),(7,8)) | ((0,0)) + ((1,2),(3,4),(5,6),(7,8)) | ((0,1),(0,1)) + ((7,8),(5,6),(3,4),(1,2)) | ((2,0),(2,4),(0,0)) + ((7,8),(5,6),(3,4),(1,2)) | ((3,1),(3,3),(1,0)) + ((7,8),(5,6),(3,4),(1,2)) | ((1,2),(3,4),(5,6),(7,8)) + ((7,8),(5,6),(3,4),(1,2)) | ((7,8),(5,6),(3,4),(1,2)) + ((7,8),(5,6),(3,4),(1,2)) | ((1,2),(7,8),(5,6),(3,-4)) + ((7,8),(5,6),(3,4),(1,2)) | ((0,0)) + ((7,8),(5,6),(3,4),(1,2)) | ((0,1),(0,1)) + ((1,2),(7,8),(5,6),(3,-4)) | ((1,2),(7,8),(5,6),(3,-4)) + ((0,0)) | ((2,0),(2,4),(0,0)) + ((0,0)) | ((3,1),(3,3),(1,0)) + ((0,0)) | ((1,2),(7,8),(5,6),(3,-4)) + ((0,0)) | ((0,0)) + ((0,1),(0,1)) | ((2,0),(2,4),(0,0)) + ((0,1),(0,1)) | ((3,1),(3,3),(1,0)) + ((0,1),(0,1)) | ((1,2),(7,8),(5,6),(3,-4)) + ((0,1),(0,1)) | ((0,0)) + ((0,1),(0,1)) | ((0,1),(0,1)) +(32 rows) + +-- Distance to polygon +SELECT p1.f1, p2.f1, p1.f1 <-> p2.f1 FROM POLYGON_TBL p1, POLYGON_TBL p2; + f1 | f1 | ?column? +----------------------------+----------------------------+---------------- + ((2,0),(2,4),(0,0)) | ((2,0),(2,4),(0,0)) | 0 + ((2,0),(2,4),(0,0)) | ((3,1),(3,3),(1,0)) | 0 + ((2,0),(2,4),(0,0)) | ((1,2),(3,4),(5,6),(7,8)) | 0 + ((2,0),(2,4),(0,0)) | ((7,8),(5,6),(3,4),(1,2)) | 0 + ((2,0),(2,4),(0,0)) | ((1,2),(7,8),(5,6),(3,-4)) | 0 + ((2,0),(2,4),(0,0)) | ((0,0)) | 0 + ((2,0),(2,4),(0,0)) | ((0,1),(0,1)) | 0.4472135955 + ((3,1),(3,3),(1,0)) | ((2,0),(2,4),(0,0)) | 0 + ((3,1),(3,3),(1,0)) | ((3,1),(3,3),(1,0)) | 0 + ((3,1),(3,3),(1,0)) | ((1,2),(3,4),(5,6),(7,8)) | 0.707106781187 + ((3,1),(3,3),(1,0)) | ((7,8),(5,6),(3,4),(1,2)) | 0.707106781187 + ((3,1),(3,3),(1,0)) | ((1,2),(7,8),(5,6),(3,-4)) | 0 + ((3,1),(3,3),(1,0)) | ((0,0)) | 1 + ((3,1),(3,3),(1,0)) | ((0,1),(0,1)) | 1.38675049056 + ((1,2),(3,4),(5,6),(7,8)) | ((2,0),(2,4),(0,0)) | 0 + ((1,2),(3,4),(5,6),(7,8)) | ((3,1),(3,3),(1,0)) | 0.707106781187 + ((1,2),(3,4),(5,6),(7,8)) | ((1,2),(3,4),(5,6),(7,8)) | 0 + ((1,2),(3,4),(5,6),(7,8)) | ((7,8),(5,6),(3,4),(1,2)) | 0 + ((1,2),(3,4),(5,6),(7,8)) | ((1,2),(7,8),(5,6),(3,-4)) | 0 + ((1,2),(3,4),(5,6),(7,8)) | ((0,0)) | 2.2360679775 + ((1,2),(3,4),(5,6),(7,8)) | ((0,1),(0,1)) | 1.41421356237 + ((7,8),(5,6),(3,4),(1,2)) | ((2,0),(2,4),(0,0)) | 0 + ((7,8),(5,6),(3,4),(1,2)) | ((3,1),(3,3),(1,0)) | 0.707106781187 + ((7,8),(5,6),(3,4),(1,2)) | ((1,2),(3,4),(5,6),(7,8)) | 0 + ((7,8),(5,6),(3,4),(1,2)) | ((7,8),(5,6),(3,4),(1,2)) | 0 + ((7,8),(5,6),(3,4),(1,2)) | ((1,2),(7,8),(5,6),(3,-4)) | 0 + ((7,8),(5,6),(3,4),(1,2)) | ((0,0)) | 2.2360679775 + ((7,8),(5,6),(3,4),(1,2)) | ((0,1),(0,1)) | 1.41421356237 + ((1,2),(7,8),(5,6),(3,-4)) | ((2,0),(2,4),(0,0)) | 0 + ((1,2),(7,8),(5,6),(3,-4)) | ((3,1),(3,3),(1,0)) | 0 + ((1,2),(7,8),(5,6),(3,-4)) | ((1,2),(3,4),(5,6),(7,8)) | 0 + ((1,2),(7,8),(5,6),(3,-4)) | ((7,8),(5,6),(3,4),(1,2)) | 0 + ((1,2),(7,8),(5,6),(3,-4)) | ((1,2),(7,8),(5,6),(3,-4)) | 0 + ((1,2),(7,8),(5,6),(3,-4)) | ((0,0)) | 1.58113883008 + ((1,2),(7,8),(5,6),(3,-4)) | ((0,1),(0,1)) | 1.26491106407 + ((0,0)) | ((2,0),(2,4),(0,0)) | 0 + ((0,0)) | ((3,1),(3,3),(1,0)) | 1 + ((0,0)) | ((1,2),(3,4),(5,6),(7,8)) | 2.2360679775 + ((0,0)) | ((7,8),(5,6),(3,4),(1,2)) | 2.2360679775 + ((0,0)) | ((1,2),(7,8),(5,6),(3,-4)) | 1.58113883008 + ((0,0)) | ((0,0)) | 0 + ((0,0)) | ((0,1),(0,1)) | 1 + ((0,1),(0,1)) | ((2,0),(2,4),(0,0)) | 0.4472135955 + ((0,1),(0,1)) | ((3,1),(3,3),(1,0)) | 1.38675049056 + ((0,1),(0,1)) | ((1,2),(3,4),(5,6),(7,8)) | 1.41421356237 + ((0,1),(0,1)) | ((7,8),(5,6),(3,4),(1,2)) | 1.41421356237 + ((0,1),(0,1)) | ((1,2),(7,8),(5,6),(3,-4)) | 1.26491106407 + ((0,1),(0,1)) | ((0,0)) | 1 + ((0,1),(0,1)) | ((0,1),(0,1)) | 0 +(49 rows) + +-- +-- Circles +-- +SELECT circle(f1, 50.0) + FROM POINT_TBL; + circle +------------------------ + <(0,0),50> + <(-10,0),50> + <(-3,4),50> + <(5.1,34.5),50> + <(-5,-12),50> + <(1e-300,-1e-300),50> + <(1e+300,Infinity),50> + <(Infinity,1e+300),50> + <(NaN,NaN),50> + <(10,10),50> +(10 rows) + +SELECT circle(f1) + FROM BOX_TBL; + circle +------------------------ + <(1,1),1.41421356237> + <(2,2),1.41421356237> + <(-5,-4),6.7082039325> + <(2.5,3),0.5> + <(3,3),0> +(5 rows) + +SELECT circle(f1) + FROM POLYGON_TBL + WHERE (# f1) >= 3; + circle +----------------------------------------------- + <(1.33333333333,1.33333333333),2.04168905064> + <(2.33333333333,1.33333333333),1.47534300379> + <(4,5),2.82842712475> + <(4,5),2.82842712475> + <(4,3),4.80664375676> +(5 rows) + +SELECT c1.f1 AS circle, p1.f1 AS point, (p1.f1 <-> c1.f1) AS distance + FROM CIRCLE_TBL c1, POINT_TBL p1 + WHERE (p1.f1 <-> c1.f1) > 0 + ORDER BY distance, area(c1.f1), p1.f1[0]; + circle | point | distance +----------------+-------------------+--------------- + <(1,2),3> | (-3,4) | 1.472135955 + <(5,1),3> | (0,0) | 2.09901951359 + <(5,1),3> | (1e-300,-1e-300) | 2.09901951359 + <(5,1),3> | (-3,4) | 5.54400374532 + <(3,5),0> | (0,0) | 5.83095189485 + <(3,5),0> | (1e-300,-1e-300) | 5.83095189485 + <(3,5),0> | (-3,4) | 6.0827625303 + <(1,3),5> | (-10,0) | 6.40175425099 + <(1,3),5> | (10,10) | 6.40175425099 + <(5,1),3> | (10,10) | 7.29563014099 + <(1,2),3> | (-10,0) | 8.1803398875 + <(3,5),0> | (10,10) | 8.60232526704 + <(1,2),3> | (10,10) | 9.04159457879 + <(1,3),5> | (-5,-12) | 11.1554944214 + <(5,1),3> | (-10,0) | 12.0332963784 + <(1,2),3> | (-5,-12) | 12.2315462117 + <(5,1),3> | (-5,-12) | 13.4012194669 + <(3,5),0> | (-10,0) | 13.9283882772 + <(3,5),0> | (-5,-12) | 18.7882942281 + <(1,3),5> | (5.1,34.5) | 26.7657047773 + <(3,5),0> | (5.1,34.5) | 29.5746513082 + <(1,2),3> | (5.1,34.5) | 29.7575945393 + <(5,1),3> | (5.1,34.5) | 30.5001492534 + <(100,200),10> | (5.1,34.5) | 180.778038568 + <(100,200),10> | (10,10) | 200.237960416 + <(100,200),10> | (-3,4) | 211.415898255 + <(100,200),10> | (0,0) | 213.60679775 + <(100,200),10> | (1e-300,-1e-300) | 213.60679775 + <(100,200),10> | (-10,0) | 218.25424421 + <(100,200),10> | (-5,-12) | 226.577682802 + <(3,5),0> | (1e+300,Infinity) | Infinity + <(3,5),0> | (Infinity,1e+300) | Infinity + <(1,2),3> | (1e+300,Infinity) | Infinity + <(5,1),3> | (1e+300,Infinity) | Infinity + <(5,1),3> | (Infinity,1e+300) | Infinity + <(1,2),3> | (Infinity,1e+300) | Infinity + <(1,3),5> | (1e+300,Infinity) | Infinity + <(1,3),5> | (Infinity,1e+300) | Infinity + <(100,200),10> | (1e+300,Infinity) | Infinity + <(100,200),10> | (Infinity,1e+300) | Infinity + <(1,2),100> | (1e+300,Infinity) | Infinity + <(1,2),100> | (Infinity,1e+300) | Infinity + <(100,1),115> | (1e+300,Infinity) | Infinity + <(100,1),115> | (Infinity,1e+300) | Infinity + <(3,5),0> | (NaN,NaN) | NaN + <(1,2),3> | (NaN,NaN) | NaN + <(5,1),3> | (NaN,NaN) | NaN + <(1,3),5> | (NaN,NaN) | NaN + <(100,200),10> | (NaN,NaN) | NaN + <(1,2),100> | (NaN,NaN) | NaN + <(100,1),115> | (NaN,NaN) | NaN + <(3,5),NaN> | (-10,0) | NaN + <(3,5),NaN> | (-5,-12) | NaN + <(3,5),NaN> | (-3,4) | NaN + <(3,5),NaN> | (0,0) | NaN + <(3,5),NaN> | (1e-300,-1e-300) | NaN + <(3,5),NaN> | (5.1,34.5) | NaN + <(3,5),NaN> | (10,10) | NaN + <(3,5),NaN> | (1e+300,Infinity) | NaN + <(3,5),NaN> | (Infinity,1e+300) | NaN + <(3,5),NaN> | (NaN,NaN) | NaN +(61 rows) + +-- To polygon +SELECT f1, f1::polygon FROM CIRCLE_TBL WHERE f1 >= '<(0,0),1>'; + f1 | f1 +----------------+---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + <(5,1),3> | ((2,1),(2.40192378865,2.5),(3.5,3.59807621135),(5,4),(6.5,3.59807621135),(7.59807621135,2.5),(8,1),(7.59807621135,-0.5),(6.5,-1.59807621135),(5,-2),(3.5,-1.59807621135),(2.40192378865,-0.5)) + <(1,2),100> | ((-99,2),(-85.6025403784,52),(-49,88.6025403784),(1,102),(51,88.6025403784),(87.6025403784,52),(101,2),(87.6025403784,-48),(51,-84.6025403784),(1,-98),(-49,-84.6025403784),(-85.6025403784,-48)) + <(1,3),5> | ((-4,3),(-3.33012701892,5.5),(-1.5,7.33012701892),(1,8),(3.5,7.33012701892),(5.33012701892,5.5),(6,3),(5.33012701892,0.5),(3.5,-1.33012701892),(1,-2),(-1.5,-1.33012701892),(-3.33012701892,0.5)) + <(1,2),3> | ((-2,2),(-1.59807621135,3.5),(-0.5,4.59807621135),(1,5),(2.5,4.59807621135),(3.59807621135,3.5),(4,2),(3.59807621135,0.5),(2.5,-0.598076211353),(1,-1),(-0.5,-0.598076211353),(-1.59807621135,0.5)) + <(100,200),10> | ((90,200),(91.3397459622,205),(95,208.660254038),(100,210),(105,208.660254038),(108.660254038,205),(110,200),(108.660254038,195),(105,191.339745962),(100,190),(95,191.339745962),(91.3397459622,195)) + <(100,1),115> | ((-15,1),(0.40707856479,58.5),(42.5,100.592921435),(100,116),(157.5,100.592921435),(199.592921435,58.5),(215,1),(199.592921435,-56.5),(157.5,-98.5929214352),(100,-114),(42.5,-98.5929214352),(0.40707856479,-56.5)) +(6 rows) + +-- To polygon with less points +SELECT f1, polygon(8, f1) FROM CIRCLE_TBL WHERE f1 >= '<(0,0),1>'; + f1 | polygon +----------------+------------------------------------------------------------------------------------------------------------------------------------------------------------------ + <(5,1),3> | ((2,1),(2.87867965644,3.12132034356),(5,4),(7.12132034356,3.12132034356),(8,1),(7.12132034356,-1.12132034356),(5,-2),(2.87867965644,-1.12132034356)) + <(1,2),100> | ((-99,2),(-69.7106781187,72.7106781187),(1,102),(71.7106781187,72.7106781187),(101,2),(71.7106781187,-68.7106781187),(1,-98),(-69.7106781187,-68.7106781187)) + <(1,3),5> | ((-4,3),(-2.53553390593,6.53553390593),(1,8),(4.53553390593,6.53553390593),(6,3),(4.53553390593,-0.535533905933),(1,-2),(-2.53553390593,-0.535533905933)) + <(1,2),3> | ((-2,2),(-1.12132034356,4.12132034356),(1,5),(3.12132034356,4.12132034356),(4,2),(3.12132034356,-0.12132034356),(1,-1),(-1.12132034356,-0.12132034356)) + <(100,200),10> | ((90,200),(92.9289321881,207.071067812),(100,210),(107.071067812,207.071067812),(110,200),(107.071067812,192.928932188),(100,190),(92.9289321881,192.928932188)) + <(100,1),115> | ((-15,1),(18.6827201635,82.3172798365),(100,116),(181.317279836,82.3172798365),(215,1),(181.317279836,-80.3172798365),(100,-114),(18.6827201635,-80.3172798365)) +(6 rows) + +-- Error for insufficient number of points +SELECT f1, polygon(1, f1) FROM CIRCLE_TBL WHERE f1 >= '<(0,0),1>'; +ERROR: must request at least 2 points +-- Zero radius error +SELECT f1, polygon(10, f1) FROM CIRCLE_TBL WHERE f1 < '<(0,0),1>'; +ERROR: cannot convert circle with radius zero to polygon +-- Same as circle +SELECT c1.f1, c2.f1 FROM CIRCLE_TBL c1, CIRCLE_TBL c2 WHERE c1.f1 ~= c2.f1; + f1 | f1 +----------------+---------------- + <(5,1),3> | <(5,1),3> + <(1,2),100> | <(1,2),100> + <(1,3),5> | <(1,3),5> + <(1,2),3> | <(1,2),3> + <(100,200),10> | <(100,200),10> + <(100,1),115> | <(100,1),115> + <(3,5),0> | <(3,5),0> + <(3,5),NaN> | <(3,5),NaN> +(8 rows) + +-- Overlap with circle +SELECT c1.f1, c2.f1 FROM CIRCLE_TBL c1, CIRCLE_TBL c2 WHERE c1.f1 && c2.f1; + f1 | f1 +----------------+---------------- + <(5,1),3> | <(5,1),3> + <(5,1),3> | <(1,2),100> + <(5,1),3> | <(1,3),5> + <(5,1),3> | <(1,2),3> + <(5,1),3> | <(100,1),115> + <(1,2),100> | <(5,1),3> + <(1,2),100> | <(1,2),100> + <(1,2),100> | <(1,3),5> + <(1,2),100> | <(1,2),3> + <(1,2),100> | <(100,1),115> + <(1,2),100> | <(3,5),0> + <(1,3),5> | <(5,1),3> + <(1,3),5> | <(1,2),100> + <(1,3),5> | <(1,3),5> + <(1,3),5> | <(1,2),3> + <(1,3),5> | <(100,1),115> + <(1,3),5> | <(3,5),0> + <(1,2),3> | <(5,1),3> + <(1,2),3> | <(1,2),100> + <(1,2),3> | <(1,3),5> + <(1,2),3> | <(1,2),3> + <(1,2),3> | <(100,1),115> + <(100,200),10> | <(100,200),10> + <(100,1),115> | <(5,1),3> + <(100,1),115> | <(1,2),100> + <(100,1),115> | <(1,3),5> + <(100,1),115> | <(1,2),3> + <(100,1),115> | <(100,1),115> + <(100,1),115> | <(3,5),0> + <(3,5),0> | <(1,2),100> + <(3,5),0> | <(1,3),5> + <(3,5),0> | <(100,1),115> + <(3,5),0> | <(3,5),0> +(33 rows) + +-- Overlap or left of circle +SELECT c1.f1, c2.f1 FROM CIRCLE_TBL c1, CIRCLE_TBL c2 WHERE c1.f1 &< c2.f1; + f1 | f1 +----------------+---------------- + <(5,1),3> | <(5,1),3> + <(5,1),3> | <(1,2),100> + <(5,1),3> | <(100,200),10> + <(5,1),3> | <(100,1),115> + <(1,2),100> | <(1,2),100> + <(1,2),100> | <(100,200),10> + <(1,2),100> | <(100,1),115> + <(1,3),5> | <(5,1),3> + <(1,3),5> | <(1,2),100> + <(1,3),5> | <(1,3),5> + <(1,3),5> | <(100,200),10> + <(1,3),5> | <(100,1),115> + <(1,2),3> | <(5,1),3> + <(1,2),3> | <(1,2),100> + <(1,2),3> | <(1,3),5> + <(1,2),3> | <(1,2),3> + <(1,2),3> | <(100,200),10> + <(1,2),3> | <(100,1),115> + <(100,200),10> | <(100,200),10> + <(100,200),10> | <(100,1),115> + <(100,1),115> | <(100,1),115> + <(3,5),0> | <(5,1),3> + <(3,5),0> | <(1,2),100> + <(3,5),0> | <(1,3),5> + <(3,5),0> | <(1,2),3> + <(3,5),0> | <(100,200),10> + <(3,5),0> | <(100,1),115> + <(3,5),0> | <(3,5),0> +(28 rows) + +-- Left of circle +SELECT c1.f1, c2.f1 FROM CIRCLE_TBL c1, CIRCLE_TBL c2 WHERE c1.f1 << c2.f1; + f1 | f1 +-----------+---------------- + <(5,1),3> | <(100,200),10> + <(1,3),5> | <(100,200),10> + <(1,2),3> | <(100,200),10> + <(3,5),0> | <(100,200),10> +(4 rows) + +-- Right of circle +SELECT c1.f1, c2.f1 FROM CIRCLE_TBL c1, CIRCLE_TBL c2 WHERE c1.f1 >> c2.f1; + f1 | f1 +----------------+----------- + <(100,200),10> | <(5,1),3> + <(100,200),10> | <(1,3),5> + <(100,200),10> | <(1,2),3> + <(100,200),10> | <(3,5),0> +(4 rows) + +-- Overlap or right of circle +SELECT c1.f1, c2.f1 FROM CIRCLE_TBL c1, CIRCLE_TBL c2 WHERE c1.f1 &> c2.f1; + f1 | f1 +----------------+---------------- + <(5,1),3> | <(5,1),3> + <(5,1),3> | <(1,2),100> + <(5,1),3> | <(1,3),5> + <(5,1),3> | <(1,2),3> + <(5,1),3> | <(100,1),115> + <(1,2),100> | <(1,2),100> + <(1,3),5> | <(1,2),100> + <(1,3),5> | <(1,3),5> + <(1,3),5> | <(100,1),115> + <(1,2),3> | <(1,2),100> + <(1,2),3> | <(1,3),5> + <(1,2),3> | <(1,2),3> + <(1,2),3> | <(100,1),115> + <(100,200),10> | <(5,1),3> + <(100,200),10> | <(1,2),100> + <(100,200),10> | <(1,3),5> + <(100,200),10> | <(1,2),3> + <(100,200),10> | <(100,200),10> + <(100,200),10> | <(100,1),115> + <(100,200),10> | <(3,5),0> + <(100,1),115> | <(1,2),100> + <(100,1),115> | <(100,1),115> + <(3,5),0> | <(5,1),3> + <(3,5),0> | <(1,2),100> + <(3,5),0> | <(1,3),5> + <(3,5),0> | <(1,2),3> + <(3,5),0> | <(100,1),115> + <(3,5),0> | <(3,5),0> +(28 rows) + +-- Contained by circle +SELECT c1.f1, c2.f1 FROM CIRCLE_TBL c1, CIRCLE_TBL c2 WHERE c1.f1 <@ c2.f1; + f1 | f1 +----------------+---------------- + <(5,1),3> | <(5,1),3> + <(5,1),3> | <(1,2),100> + <(5,1),3> | <(100,1),115> + <(1,2),100> | <(1,2),100> + <(1,3),5> | <(1,2),100> + <(1,3),5> | <(1,3),5> + <(1,3),5> | <(100,1),115> + <(1,2),3> | <(1,2),100> + <(1,2),3> | <(1,3),5> + <(1,2),3> | <(1,2),3> + <(1,2),3> | <(100,1),115> + <(100,200),10> | <(100,200),10> + <(100,1),115> | <(100,1),115> + <(3,5),0> | <(1,2),100> + <(3,5),0> | <(1,3),5> + <(3,5),0> | <(100,1),115> + <(3,5),0> | <(3,5),0> +(17 rows) + +-- Contain by circle +SELECT c1.f1, c2.f1 FROM CIRCLE_TBL c1, CIRCLE_TBL c2 WHERE c1.f1 @> c2.f1; + f1 | f1 +----------------+---------------- + <(5,1),3> | <(5,1),3> + <(1,2),100> | <(5,1),3> + <(1,2),100> | <(1,2),100> + <(1,2),100> | <(1,3),5> + <(1,2),100> | <(1,2),3> + <(1,2),100> | <(3,5),0> + <(1,3),5> | <(1,3),5> + <(1,3),5> | <(1,2),3> + <(1,3),5> | <(3,5),0> + <(1,2),3> | <(1,2),3> + <(100,200),10> | <(100,200),10> + <(100,1),115> | <(5,1),3> + <(100,1),115> | <(1,3),5> + <(100,1),115> | <(1,2),3> + <(100,1),115> | <(100,1),115> + <(100,1),115> | <(3,5),0> + <(3,5),0> | <(3,5),0> +(17 rows) + +-- Below circle +SELECT c1.f1, c2.f1 FROM CIRCLE_TBL c1, CIRCLE_TBL c2 WHERE c1.f1 <<| c2.f1; + f1 | f1 +---------------+---------------- + <(5,1),3> | <(100,200),10> + <(5,1),3> | <(3,5),0> + <(1,2),100> | <(100,200),10> + <(1,3),5> | <(100,200),10> + <(1,2),3> | <(100,200),10> + <(100,1),115> | <(100,200),10> + <(3,5),0> | <(100,200),10> +(7 rows) + +-- Above circle +SELECT c1.f1, c2.f1 FROM CIRCLE_TBL c1, CIRCLE_TBL c2 WHERE c1.f1 |>> c2.f1; + f1 | f1 +----------------+--------------- + <(100,200),10> | <(5,1),3> + <(100,200),10> | <(1,2),100> + <(100,200),10> | <(1,3),5> + <(100,200),10> | <(1,2),3> + <(100,200),10> | <(100,1),115> + <(100,200),10> | <(3,5),0> + <(3,5),0> | <(5,1),3> +(7 rows) + +-- Overlap or below circle +SELECT c1.f1, c2.f1 FROM CIRCLE_TBL c1, CIRCLE_TBL c2 WHERE c1.f1 &<| c2.f1; + f1 | f1 +----------------+---------------- + <(5,1),3> | <(5,1),3> + <(5,1),3> | <(1,2),100> + <(5,1),3> | <(1,3),5> + <(5,1),3> | <(1,2),3> + <(5,1),3> | <(100,200),10> + <(5,1),3> | <(100,1),115> + <(5,1),3> | <(3,5),0> + <(1,2),100> | <(1,2),100> + <(1,2),100> | <(100,200),10> + <(1,2),100> | <(100,1),115> + <(1,3),5> | <(1,2),100> + <(1,3),5> | <(1,3),5> + <(1,3),5> | <(100,200),10> + <(1,3),5> | <(100,1),115> + <(1,2),3> | <(1,2),100> + <(1,2),3> | <(1,3),5> + <(1,2),3> | <(1,2),3> + <(1,2),3> | <(100,200),10> + <(1,2),3> | <(100,1),115> + <(1,2),3> | <(3,5),0> + <(100,200),10> | <(100,200),10> + <(100,1),115> | <(100,200),10> + <(100,1),115> | <(100,1),115> + <(3,5),0> | <(1,2),100> + <(3,5),0> | <(1,3),5> + <(3,5),0> | <(1,2),3> + <(3,5),0> | <(100,200),10> + <(3,5),0> | <(100,1),115> + <(3,5),0> | <(3,5),0> +(29 rows) + +-- Overlap or above circle +SELECT c1.f1, c2.f1 FROM CIRCLE_TBL c1, CIRCLE_TBL c2 WHERE c1.f1 |&> c2.f1; + f1 | f1 +----------------+---------------- + <(5,1),3> | <(5,1),3> + <(5,1),3> | <(1,2),100> + <(5,1),3> | <(1,3),5> + <(5,1),3> | <(100,1),115> + <(1,2),100> | <(1,2),100> + <(1,2),100> | <(100,1),115> + <(1,3),5> | <(5,1),3> + <(1,3),5> | <(1,2),100> + <(1,3),5> | <(1,3),5> + <(1,3),5> | <(100,1),115> + <(1,2),3> | <(5,1),3> + <(1,2),3> | <(1,2),100> + <(1,2),3> | <(1,3),5> + <(1,2),3> | <(1,2),3> + <(1,2),3> | <(100,1),115> + <(100,200),10> | <(5,1),3> + <(100,200),10> | <(1,2),100> + <(100,200),10> | <(1,3),5> + <(100,200),10> | <(1,2),3> + <(100,200),10> | <(100,200),10> + <(100,200),10> | <(100,1),115> + <(100,200),10> | <(3,5),0> + <(100,1),115> | <(100,1),115> + <(3,5),0> | <(5,1),3> + <(3,5),0> | <(1,2),100> + <(3,5),0> | <(1,3),5> + <(3,5),0> | <(1,2),3> + <(3,5),0> | <(100,1),115> + <(3,5),0> | <(3,5),0> +(29 rows) + +-- Area equal with circle +SELECT c1.f1, c2.f1 FROM CIRCLE_TBL c1, CIRCLE_TBL c2 WHERE c1.f1 = c2.f1; + f1 | f1 +----------------+---------------- + <(5,1),3> | <(5,1),3> + <(5,1),3> | <(1,2),3> + <(1,2),100> | <(1,2),100> + <(1,3),5> | <(1,3),5> + <(1,2),3> | <(5,1),3> + <(1,2),3> | <(1,2),3> + <(100,200),10> | <(100,200),10> + <(100,1),115> | <(100,1),115> + <(3,5),0> | <(3,5),0> +(9 rows) + +-- Area not equal with circle +SELECT c1.f1, c2.f1 FROM CIRCLE_TBL c1, CIRCLE_TBL c2 WHERE c1.f1 != c2.f1; + f1 | f1 +----------------+---------------- + <(5,1),3> | <(1,2),100> + <(5,1),3> | <(1,3),5> + <(5,1),3> | <(100,200),10> + <(5,1),3> | <(100,1),115> + <(5,1),3> | <(3,5),0> + <(1,2),100> | <(5,1),3> + <(1,2),100> | <(1,3),5> + <(1,2),100> | <(1,2),3> + <(1,2),100> | <(100,200),10> + <(1,2),100> | <(100,1),115> + <(1,2),100> | <(3,5),0> + <(1,3),5> | <(5,1),3> + <(1,3),5> | <(1,2),100> + <(1,3),5> | <(1,2),3> + <(1,3),5> | <(100,200),10> + <(1,3),5> | <(100,1),115> + <(1,3),5> | <(3,5),0> + <(1,2),3> | <(1,2),100> + <(1,2),3> | <(1,3),5> + <(1,2),3> | <(100,200),10> + <(1,2),3> | <(100,1),115> + <(1,2),3> | <(3,5),0> + <(100,200),10> | <(5,1),3> + <(100,200),10> | <(1,2),100> + <(100,200),10> | <(1,3),5> + <(100,200),10> | <(1,2),3> + <(100,200),10> | <(100,1),115> + <(100,200),10> | <(3,5),0> + <(100,1),115> | <(5,1),3> + <(100,1),115> | <(1,2),100> + <(100,1),115> | <(1,3),5> + <(100,1),115> | <(1,2),3> + <(100,1),115> | <(100,200),10> + <(100,1),115> | <(3,5),0> + <(3,5),0> | <(5,1),3> + <(3,5),0> | <(1,2),100> + <(3,5),0> | <(1,3),5> + <(3,5),0> | <(1,2),3> + <(3,5),0> | <(100,200),10> + <(3,5),0> | <(100,1),115> +(40 rows) + +-- Area less than circle +SELECT c1.f1, c2.f1 FROM CIRCLE_TBL c1, CIRCLE_TBL c2 WHERE c1.f1 < c2.f1; + f1 | f1 +----------------+---------------- + <(5,1),3> | <(1,2),100> + <(5,1),3> | <(1,3),5> + <(5,1),3> | <(100,200),10> + <(5,1),3> | <(100,1),115> + <(1,2),100> | <(100,1),115> + <(1,3),5> | <(1,2),100> + <(1,3),5> | <(100,200),10> + <(1,3),5> | <(100,1),115> + <(1,2),3> | <(1,2),100> + <(1,2),3> | <(1,3),5> + <(1,2),3> | <(100,200),10> + <(1,2),3> | <(100,1),115> + <(100,200),10> | <(1,2),100> + <(100,200),10> | <(100,1),115> + <(3,5),0> | <(5,1),3> + <(3,5),0> | <(1,2),100> + <(3,5),0> | <(1,3),5> + <(3,5),0> | <(1,2),3> + <(3,5),0> | <(100,200),10> + <(3,5),0> | <(100,1),115> +(20 rows) + +-- Area greater than circle +SELECT c1.f1, c2.f1 FROM CIRCLE_TBL c1, CIRCLE_TBL c2 WHERE c1.f1 > c2.f1; + f1 | f1 +----------------+---------------- + <(5,1),3> | <(3,5),0> + <(1,2),100> | <(5,1),3> + <(1,2),100> | <(1,3),5> + <(1,2),100> | <(1,2),3> + <(1,2),100> | <(100,200),10> + <(1,2),100> | <(3,5),0> + <(1,3),5> | <(5,1),3> + <(1,3),5> | <(1,2),3> + <(1,3),5> | <(3,5),0> + <(1,2),3> | <(3,5),0> + <(100,200),10> | <(5,1),3> + <(100,200),10> | <(1,3),5> + <(100,200),10> | <(1,2),3> + <(100,200),10> | <(3,5),0> + <(100,1),115> | <(5,1),3> + <(100,1),115> | <(1,2),100> + <(100,1),115> | <(1,3),5> + <(100,1),115> | <(1,2),3> + <(100,1),115> | <(100,200),10> + <(100,1),115> | <(3,5),0> +(20 rows) + +-- Area less than or equal circle +SELECT c1.f1, c2.f1 FROM CIRCLE_TBL c1, CIRCLE_TBL c2 WHERE c1.f1 <= c2.f1; + f1 | f1 +----------------+---------------- + <(5,1),3> | <(5,1),3> + <(5,1),3> | <(1,2),100> + <(5,1),3> | <(1,3),5> + <(5,1),3> | <(1,2),3> + <(5,1),3> | <(100,200),10> + <(5,1),3> | <(100,1),115> + <(1,2),100> | <(1,2),100> + <(1,2),100> | <(100,1),115> + <(1,3),5> | <(1,2),100> + <(1,3),5> | <(1,3),5> + <(1,3),5> | <(100,200),10> + <(1,3),5> | <(100,1),115> + <(1,2),3> | <(5,1),3> + <(1,2),3> | <(1,2),100> + <(1,2),3> | <(1,3),5> + <(1,2),3> | <(1,2),3> + <(1,2),3> | <(100,200),10> + <(1,2),3> | <(100,1),115> + <(100,200),10> | <(1,2),100> + <(100,200),10> | <(100,200),10> + <(100,200),10> | <(100,1),115> + <(100,1),115> | <(100,1),115> + <(3,5),0> | <(5,1),3> + <(3,5),0> | <(1,2),100> + <(3,5),0> | <(1,3),5> + <(3,5),0> | <(1,2),3> + <(3,5),0> | <(100,200),10> + <(3,5),0> | <(100,1),115> + <(3,5),0> | <(3,5),0> +(29 rows) + +-- Area greater than or equal circle +SELECT c1.f1, c2.f1 FROM CIRCLE_TBL c1, CIRCLE_TBL c2 WHERE c1.f1 >= c2.f1; + f1 | f1 +----------------+---------------- + <(5,1),3> | <(5,1),3> + <(5,1),3> | <(1,2),3> + <(5,1),3> | <(3,5),0> + <(1,2),100> | <(5,1),3> + <(1,2),100> | <(1,2),100> + <(1,2),100> | <(1,3),5> + <(1,2),100> | <(1,2),3> + <(1,2),100> | <(100,200),10> + <(1,2),100> | <(3,5),0> + <(1,3),5> | <(5,1),3> + <(1,3),5> | <(1,3),5> + <(1,3),5> | <(1,2),3> + <(1,3),5> | <(3,5),0> + <(1,2),3> | <(5,1),3> + <(1,2),3> | <(1,2),3> + <(1,2),3> | <(3,5),0> + <(100,200),10> | <(5,1),3> + <(100,200),10> | <(1,3),5> + <(100,200),10> | <(1,2),3> + <(100,200),10> | <(100,200),10> + <(100,200),10> | <(3,5),0> + <(100,1),115> | <(5,1),3> + <(100,1),115> | <(1,2),100> + <(100,1),115> | <(1,3),5> + <(100,1),115> | <(1,2),3> + <(100,1),115> | <(100,200),10> + <(100,1),115> | <(100,1),115> + <(100,1),115> | <(3,5),0> + <(3,5),0> | <(3,5),0> +(29 rows) + +-- Area less than circle +SELECT c1.f1, c2.f1 FROM CIRCLE_TBL c1, CIRCLE_TBL c2 WHERE c1.f1 < c2.f1; + f1 | f1 +----------------+---------------- + <(5,1),3> | <(1,2),100> + <(5,1),3> | <(1,3),5> + <(5,1),3> | <(100,200),10> + <(5,1),3> | <(100,1),115> + <(1,2),100> | <(100,1),115> + <(1,3),5> | <(1,2),100> + <(1,3),5> | <(100,200),10> + <(1,3),5> | <(100,1),115> + <(1,2),3> | <(1,2),100> + <(1,2),3> | <(1,3),5> + <(1,2),3> | <(100,200),10> + <(1,2),3> | <(100,1),115> + <(100,200),10> | <(1,2),100> + <(100,200),10> | <(100,1),115> + <(3,5),0> | <(5,1),3> + <(3,5),0> | <(1,2),100> + <(3,5),0> | <(1,3),5> + <(3,5),0> | <(1,2),3> + <(3,5),0> | <(100,200),10> + <(3,5),0> | <(100,1),115> +(20 rows) + +-- Area greater than circle +SELECT c1.f1, c2.f1 FROM CIRCLE_TBL c1, CIRCLE_TBL c2 WHERE c1.f1 < c2.f1; + f1 | f1 +----------------+---------------- + <(5,1),3> | <(1,2),100> + <(5,1),3> | <(1,3),5> + <(5,1),3> | <(100,200),10> + <(5,1),3> | <(100,1),115> + <(1,2),100> | <(100,1),115> + <(1,3),5> | <(1,2),100> + <(1,3),5> | <(100,200),10> + <(1,3),5> | <(100,1),115> + <(1,2),3> | <(1,2),100> + <(1,2),3> | <(1,3),5> + <(1,2),3> | <(100,200),10> + <(1,2),3> | <(100,1),115> + <(100,200),10> | <(1,2),100> + <(100,200),10> | <(100,1),115> + <(3,5),0> | <(5,1),3> + <(3,5),0> | <(1,2),100> + <(3,5),0> | <(1,3),5> + <(3,5),0> | <(1,2),3> + <(3,5),0> | <(100,200),10> + <(3,5),0> | <(100,1),115> +(20 rows) + +-- Add point +SELECT c.f1, p.f1, c.f1 + p.f1 FROM CIRCLE_TBL c, POINT_TBL p; + f1 | f1 | ?column? +----------------+-------------------+------------------------- + <(5,1),3> | (0,0) | <(5,1),3> + <(1,2),100> | (0,0) | <(1,2),100> + <(1,3),5> | (0,0) | <(1,3),5> + <(1,2),3> | (0,0) | <(1,2),3> + <(100,200),10> | (0,0) | <(100,200),10> + <(100,1),115> | (0,0) | <(100,1),115> + <(3,5),0> | (0,0) | <(3,5),0> + <(3,5),NaN> | (0,0) | <(3,5),NaN> + <(5,1),3> | (-10,0) | <(-5,1),3> + <(1,2),100> | (-10,0) | <(-9,2),100> + <(1,3),5> | (-10,0) | <(-9,3),5> + <(1,2),3> | (-10,0) | <(-9,2),3> + <(100,200),10> | (-10,0) | <(90,200),10> + <(100,1),115> | (-10,0) | <(90,1),115> + <(3,5),0> | (-10,0) | <(-7,5),0> + <(3,5),NaN> | (-10,0) | <(-7,5),NaN> + <(5,1),3> | (-3,4) | <(2,5),3> + <(1,2),100> | (-3,4) | <(-2,6),100> + <(1,3),5> | (-3,4) | <(-2,7),5> + <(1,2),3> | (-3,4) | <(-2,6),3> + <(100,200),10> | (-3,4) | <(97,204),10> + <(100,1),115> | (-3,4) | <(97,5),115> + <(3,5),0> | (-3,4) | <(0,9),0> + <(3,5),NaN> | (-3,4) | <(0,9),NaN> + <(5,1),3> | (5.1,34.5) | <(10.1,35.5),3> + <(1,2),100> | (5.1,34.5) | <(6.1,36.5),100> + <(1,3),5> | (5.1,34.5) | <(6.1,37.5),5> + <(1,2),3> | (5.1,34.5) | <(6.1,36.5),3> + <(100,200),10> | (5.1,34.5) | <(105.1,234.5),10> + <(100,1),115> | (5.1,34.5) | <(105.1,35.5),115> + <(3,5),0> | (5.1,34.5) | <(8.1,39.5),0> + <(3,5),NaN> | (5.1,34.5) | <(8.1,39.5),NaN> + <(5,1),3> | (-5,-12) | <(0,-11),3> + <(1,2),100> | (-5,-12) | <(-4,-10),100> + <(1,3),5> | (-5,-12) | <(-4,-9),5> + <(1,2),3> | (-5,-12) | <(-4,-10),3> + <(100,200),10> | (-5,-12) | <(95,188),10> + <(100,1),115> | (-5,-12) | <(95,-11),115> + <(3,5),0> | (-5,-12) | <(-2,-7),0> + <(3,5),NaN> | (-5,-12) | <(-2,-7),NaN> + <(5,1),3> | (1e-300,-1e-300) | <(5,1),3> + <(1,2),100> | (1e-300,-1e-300) | <(1,2),100> + <(1,3),5> | (1e-300,-1e-300) | <(1,3),5> + <(1,2),3> | (1e-300,-1e-300) | <(1,2),3> + <(100,200),10> | (1e-300,-1e-300) | <(100,200),10> + <(100,1),115> | (1e-300,-1e-300) | <(100,1),115> + <(3,5),0> | (1e-300,-1e-300) | <(3,5),0> + <(3,5),NaN> | (1e-300,-1e-300) | <(3,5),NaN> + <(5,1),3> | (1e+300,Infinity) | <(1e+300,Infinity),3> + <(1,2),100> | (1e+300,Infinity) | <(1e+300,Infinity),100> + <(1,3),5> | (1e+300,Infinity) | <(1e+300,Infinity),5> + <(1,2),3> | (1e+300,Infinity) | <(1e+300,Infinity),3> + <(100,200),10> | (1e+300,Infinity) | <(1e+300,Infinity),10> + <(100,1),115> | (1e+300,Infinity) | <(1e+300,Infinity),115> + <(3,5),0> | (1e+300,Infinity) | <(1e+300,Infinity),0> + <(3,5),NaN> | (1e+300,Infinity) | <(1e+300,Infinity),NaN> + <(5,1),3> | (Infinity,1e+300) | <(Infinity,1e+300),3> + <(1,2),100> | (Infinity,1e+300) | <(Infinity,1e+300),100> + <(1,3),5> | (Infinity,1e+300) | <(Infinity,1e+300),5> + <(1,2),3> | (Infinity,1e+300) | <(Infinity,1e+300),3> + <(100,200),10> | (Infinity,1e+300) | <(Infinity,1e+300),10> + <(100,1),115> | (Infinity,1e+300) | <(Infinity,1e+300),115> + <(3,5),0> | (Infinity,1e+300) | <(Infinity,1e+300),0> + <(3,5),NaN> | (Infinity,1e+300) | <(Infinity,1e+300),NaN> + <(5,1),3> | (NaN,NaN) | <(NaN,NaN),3> + <(1,2),100> | (NaN,NaN) | <(NaN,NaN),100> + <(1,3),5> | (NaN,NaN) | <(NaN,NaN),5> + <(1,2),3> | (NaN,NaN) | <(NaN,NaN),3> + <(100,200),10> | (NaN,NaN) | <(NaN,NaN),10> + <(100,1),115> | (NaN,NaN) | <(NaN,NaN),115> + <(3,5),0> | (NaN,NaN) | <(NaN,NaN),0> + <(3,5),NaN> | (NaN,NaN) | <(NaN,NaN),NaN> + <(5,1),3> | (10,10) | <(15,11),3> + <(1,2),100> | (10,10) | <(11,12),100> + <(1,3),5> | (10,10) | <(11,13),5> + <(1,2),3> | (10,10) | <(11,12),3> + <(100,200),10> | (10,10) | <(110,210),10> + <(100,1),115> | (10,10) | <(110,11),115> + <(3,5),0> | (10,10) | <(13,15),0> + <(3,5),NaN> | (10,10) | <(13,15),NaN> +(80 rows) + +-- Subtract point +SELECT c.f1, p.f1, c.f1 - p.f1 FROM CIRCLE_TBL c, POINT_TBL p; + f1 | f1 | ?column? +----------------+-------------------+--------------------------- + <(5,1),3> | (0,0) | <(5,1),3> + <(1,2),100> | (0,0) | <(1,2),100> + <(1,3),5> | (0,0) | <(1,3),5> + <(1,2),3> | (0,0) | <(1,2),3> + <(100,200),10> | (0,0) | <(100,200),10> + <(100,1),115> | (0,0) | <(100,1),115> + <(3,5),0> | (0,0) | <(3,5),0> + <(3,5),NaN> | (0,0) | <(3,5),NaN> + <(5,1),3> | (-10,0) | <(15,1),3> + <(1,2),100> | (-10,0) | <(11,2),100> + <(1,3),5> | (-10,0) | <(11,3),5> + <(1,2),3> | (-10,0) | <(11,2),3> + <(100,200),10> | (-10,0) | <(110,200),10> + <(100,1),115> | (-10,0) | <(110,1),115> + <(3,5),0> | (-10,0) | <(13,5),0> + <(3,5),NaN> | (-10,0) | <(13,5),NaN> + <(5,1),3> | (-3,4) | <(8,-3),3> + <(1,2),100> | (-3,4) | <(4,-2),100> + <(1,3),5> | (-3,4) | <(4,-1),5> + <(1,2),3> | (-3,4) | <(4,-2),3> + <(100,200),10> | (-3,4) | <(103,196),10> + <(100,1),115> | (-3,4) | <(103,-3),115> + <(3,5),0> | (-3,4) | <(6,1),0> + <(3,5),NaN> | (-3,4) | <(6,1),NaN> + <(5,1),3> | (5.1,34.5) | <(-0.1,-33.5),3> + <(1,2),100> | (5.1,34.5) | <(-4.1,-32.5),100> + <(1,3),5> | (5.1,34.5) | <(-4.1,-31.5),5> + <(1,2),3> | (5.1,34.5) | <(-4.1,-32.5),3> + <(100,200),10> | (5.1,34.5) | <(94.9,165.5),10> + <(100,1),115> | (5.1,34.5) | <(94.9,-33.5),115> + <(3,5),0> | (5.1,34.5) | <(-2.1,-29.5),0> + <(3,5),NaN> | (5.1,34.5) | <(-2.1,-29.5),NaN> + <(5,1),3> | (-5,-12) | <(10,13),3> + <(1,2),100> | (-5,-12) | <(6,14),100> + <(1,3),5> | (-5,-12) | <(6,15),5> + <(1,2),3> | (-5,-12) | <(6,14),3> + <(100,200),10> | (-5,-12) | <(105,212),10> + <(100,1),115> | (-5,-12) | <(105,13),115> + <(3,5),0> | (-5,-12) | <(8,17),0> + <(3,5),NaN> | (-5,-12) | <(8,17),NaN> + <(5,1),3> | (1e-300,-1e-300) | <(5,1),3> + <(1,2),100> | (1e-300,-1e-300) | <(1,2),100> + <(1,3),5> | (1e-300,-1e-300) | <(1,3),5> + <(1,2),3> | (1e-300,-1e-300) | <(1,2),3> + <(100,200),10> | (1e-300,-1e-300) | <(100,200),10> + <(100,1),115> | (1e-300,-1e-300) | <(100,1),115> + <(3,5),0> | (1e-300,-1e-300) | <(3,5),0> + <(3,5),NaN> | (1e-300,-1e-300) | <(3,5),NaN> + <(5,1),3> | (1e+300,Infinity) | <(-1e+300,-Infinity),3> + <(1,2),100> | (1e+300,Infinity) | <(-1e+300,-Infinity),100> + <(1,3),5> | (1e+300,Infinity) | <(-1e+300,-Infinity),5> + <(1,2),3> | (1e+300,Infinity) | <(-1e+300,-Infinity),3> + <(100,200),10> | (1e+300,Infinity) | <(-1e+300,-Infinity),10> + <(100,1),115> | (1e+300,Infinity) | <(-1e+300,-Infinity),115> + <(3,5),0> | (1e+300,Infinity) | <(-1e+300,-Infinity),0> + <(3,5),NaN> | (1e+300,Infinity) | <(-1e+300,-Infinity),NaN> + <(5,1),3> | (Infinity,1e+300) | <(-Infinity,-1e+300),3> + <(1,2),100> | (Infinity,1e+300) | <(-Infinity,-1e+300),100> + <(1,3),5> | (Infinity,1e+300) | <(-Infinity,-1e+300),5> + <(1,2),3> | (Infinity,1e+300) | <(-Infinity,-1e+300),3> + <(100,200),10> | (Infinity,1e+300) | <(-Infinity,-1e+300),10> + <(100,1),115> | (Infinity,1e+300) | <(-Infinity,-1e+300),115> + <(3,5),0> | (Infinity,1e+300) | <(-Infinity,-1e+300),0> + <(3,5),NaN> | (Infinity,1e+300) | <(-Infinity,-1e+300),NaN> + <(5,1),3> | (NaN,NaN) | <(NaN,NaN),3> + <(1,2),100> | (NaN,NaN) | <(NaN,NaN),100> + <(1,3),5> | (NaN,NaN) | <(NaN,NaN),5> + <(1,2),3> | (NaN,NaN) | <(NaN,NaN),3> + <(100,200),10> | (NaN,NaN) | <(NaN,NaN),10> + <(100,1),115> | (NaN,NaN) | <(NaN,NaN),115> + <(3,5),0> | (NaN,NaN) | <(NaN,NaN),0> + <(3,5),NaN> | (NaN,NaN) | <(NaN,NaN),NaN> + <(5,1),3> | (10,10) | <(-5,-9),3> + <(1,2),100> | (10,10) | <(-9,-8),100> + <(1,3),5> | (10,10) | <(-9,-7),5> + <(1,2),3> | (10,10) | <(-9,-8),3> + <(100,200),10> | (10,10) | <(90,190),10> + <(100,1),115> | (10,10) | <(90,-9),115> + <(3,5),0> | (10,10) | <(-7,-5),0> + <(3,5),NaN> | (10,10) | <(-7,-5),NaN> +(80 rows) + +-- Multiply with point +SELECT c.f1, p.f1, c.f1 * p.f1 FROM CIRCLE_TBL c, POINT_TBL p; + f1 | f1 | ?column? +----------------+-------------------+-------------------------------------------- + <(5,1),3> | (0,0) | <(0,0),0> + <(1,2),100> | (0,0) | <(0,0),0> + <(1,3),5> | (0,0) | <(0,0),0> + <(1,2),3> | (0,0) | <(0,0),0> + <(100,200),10> | (0,0) | <(0,0),0> + <(100,1),115> | (0,0) | <(0,0),0> + <(3,5),0> | (0,0) | <(0,0),0> + <(3,5),NaN> | (0,0) | <(0,0),NaN> + <(5,1),3> | (-10,0) | <(-50,-10),30> + <(1,2),100> | (-10,0) | <(-10,-20),1000> + <(1,3),5> | (-10,0) | <(-10,-30),50> + <(1,2),3> | (-10,0) | <(-10,-20),30> + <(100,200),10> | (-10,0) | <(-1000,-2000),100> + <(100,1),115> | (-10,0) | <(-1000,-10),1150> + <(3,5),0> | (-10,0) | <(-30,-50),0> + <(3,5),NaN> | (-10,0) | <(-30,-50),NaN> + <(5,1),3> | (-3,4) | <(-19,17),15> + <(1,2),100> | (-3,4) | <(-11,-2),500> + <(1,3),5> | (-3,4) | <(-15,-5),25> + <(1,2),3> | (-3,4) | <(-11,-2),15> + <(100,200),10> | (-3,4) | <(-1100,-200),50> + <(100,1),115> | (-3,4) | <(-304,397),575> + <(3,5),0> | (-3,4) | <(-29,-3),0> + <(3,5),NaN> | (-3,4) | <(-29,-3),NaN> + <(5,1),3> | (5.1,34.5) | <(-9,177.6),104.624758064> + <(1,2),100> | (5.1,34.5) | <(-63.9,44.7),3487.49193547> + <(1,3),5> | (5.1,34.5) | <(-98.4,49.8),174.374596774> + <(1,2),3> | (5.1,34.5) | <(-63.9,44.7),104.624758064> + <(100,200),10> | (5.1,34.5) | <(-6390,4470),348.749193547> + <(100,1),115> | (5.1,34.5) | <(475.5,3455.1),4010.6157258> + <(3,5),0> | (5.1,34.5) | <(-157.2,129),0> + <(3,5),NaN> | (5.1,34.5) | <(-157.2,129),NaN> + <(5,1),3> | (-5,-12) | <(-13,-65),39> + <(1,2),100> | (-5,-12) | <(19,-22),1300> + <(1,3),5> | (-5,-12) | <(31,-27),65> + <(1,2),3> | (-5,-12) | <(19,-22),39> + <(100,200),10> | (-5,-12) | <(1900,-2200),130> + <(100,1),115> | (-5,-12) | <(-488,-1205),1495> + <(3,5),0> | (-5,-12) | <(45,-61),0> + <(3,5),NaN> | (-5,-12) | <(45,-61),NaN> + <(5,1),3> | (1e-300,-1e-300) | <(6e-300,-4e-300),4.24264068712e-300> + <(1,2),100> | (1e-300,-1e-300) | <(3e-300,1e-300),1.41421356237e-298> + <(1,3),5> | (1e-300,-1e-300) | <(4e-300,2e-300),7.07106781187e-300> + <(1,2),3> | (1e-300,-1e-300) | <(3e-300,1e-300),4.24264068712e-300> + <(100,200),10> | (1e-300,-1e-300) | <(3e-298,1e-298),1.41421356237e-299> + <(100,1),115> | (1e-300,-1e-300) | <(1.01e-298,-9.9e-299),1.62634559673e-298> + <(3,5),0> | (1e-300,-1e-300) | <(8e-300,2e-300),0> + <(3,5),NaN> | (1e-300,-1e-300) | <(8e-300,2e-300),NaN> + <(5,1),3> | (1e+300,Infinity) | <(-Infinity,Infinity),Infinity> + <(1,2),100> | (1e+300,Infinity) | <(-Infinity,Infinity),Infinity> + <(1,3),5> | (1e+300,Infinity) | <(-Infinity,Infinity),Infinity> + <(1,2),3> | (1e+300,Infinity) | <(-Infinity,Infinity),Infinity> + <(100,200),10> | (1e+300,Infinity) | <(-Infinity,Infinity),Infinity> + <(100,1),115> | (1e+300,Infinity) | <(-Infinity,Infinity),Infinity> + <(3,5),0> | (1e+300,Infinity) | <(-Infinity,Infinity),NaN> + <(3,5),NaN> | (1e+300,Infinity) | <(-Infinity,Infinity),NaN> + <(5,1),3> | (Infinity,1e+300) | <(Infinity,Infinity),Infinity> + <(1,2),100> | (Infinity,1e+300) | <(Infinity,Infinity),Infinity> + <(1,3),5> | (Infinity,1e+300) | <(Infinity,Infinity),Infinity> + <(1,2),3> | (Infinity,1e+300) | <(Infinity,Infinity),Infinity> + <(100,200),10> | (Infinity,1e+300) | <(Infinity,Infinity),Infinity> + <(100,1),115> | (Infinity,1e+300) | <(Infinity,Infinity),Infinity> + <(3,5),0> | (Infinity,1e+300) | <(Infinity,Infinity),NaN> + <(3,5),NaN> | (Infinity,1e+300) | <(Infinity,Infinity),NaN> + <(5,1),3> | (NaN,NaN) | <(NaN,NaN),NaN> + <(1,2),100> | (NaN,NaN) | <(NaN,NaN),NaN> + <(1,3),5> | (NaN,NaN) | <(NaN,NaN),NaN> + <(1,2),3> | (NaN,NaN) | <(NaN,NaN),NaN> + <(100,200),10> | (NaN,NaN) | <(NaN,NaN),NaN> + <(100,1),115> | (NaN,NaN) | <(NaN,NaN),NaN> + <(3,5),0> | (NaN,NaN) | <(NaN,NaN),NaN> + <(3,5),NaN> | (NaN,NaN) | <(NaN,NaN),NaN> + <(5,1),3> | (10,10) | <(40,60),42.4264068712> + <(1,2),100> | (10,10) | <(-10,30),1414.21356237> + <(1,3),5> | (10,10) | <(-20,40),70.7106781187> + <(1,2),3> | (10,10) | <(-10,30),42.4264068712> + <(100,200),10> | (10,10) | <(-1000,3000),141.421356237> + <(100,1),115> | (10,10) | <(990,1010),1626.34559673> + <(3,5),0> | (10,10) | <(-20,80),0> + <(3,5),NaN> | (10,10) | <(-20,80),NaN> +(80 rows) + +-- Divide by point +SELECT c.f1, p.f1, c.f1 / p.f1 FROM CIRCLE_TBL c, POINT_TBL p WHERE p.f1[0] BETWEEN 1 AND 1000; + f1 | f1 | ?column? +----------------+------------+------------------------------------------------------ + <(5,1),3> | (5.1,34.5) | <(0.0493315573973,-0.137635045138),0.0860217042937> + <(5,1),3> | (10,10) | <(0.3,-0.2),0.212132034356> + <(1,2),100> | (5.1,34.5) | <(0.0609244733856,-0.0199792807459),2.86739014312> + <(1,2),100> | (10,10) | <(0.15,0.05),7.07106781187> + <(1,3),5> | (5.1,34.5) | <(0.0892901188891,-0.0157860983671),0.143369507156> + <(1,3),5> | (10,10) | <(0.2,0.1),0.353553390593> + <(1,2),3> | (5.1,34.5) | <(0.0609244733856,-0.0199792807459),0.0860217042937> + <(1,2),3> | (10,10) | <(0.15,0.05),0.212132034356> + <(100,200),10> | (5.1,34.5) | <(6.09244733856,-1.99792807459),0.286739014312> + <(100,200),10> | (10,10) | <(15,5),0.707106781187> + <(100,1),115> | (5.1,34.5) | <(0.44768388338,-2.83237136796),3.29749866459> + <(100,1),115> | (10,10) | <(5.05,-4.95),8.13172798365> + <(3,5),0> | (5.1,34.5) | <(0.154407774653,-0.0641310246164),0> + <(3,5),0> | (10,10) | <(0.4,0.1),0> + <(3,5),NaN> | (5.1,34.5) | <(0.154407774653,-0.0641310246164),NaN> + <(3,5),NaN> | (10,10) | <(0.4,0.1),NaN> +(16 rows) + +-- Overflow error +SELECT c.f1, p.f1, c.f1 / p.f1 FROM CIRCLE_TBL c, POINT_TBL p WHERE p.f1[0] > 1000; +ERROR: value out of range: overflow +-- Division by 0 error +SELECT c.f1, p.f1, c.f1 / p.f1 FROM CIRCLE_TBL c, POINT_TBL p WHERE p.f1 ~= '(0,0)'::point; +ERROR: division by zero +-- Distance to polygon +SELECT c.f1, p.f1, c.f1 <-> p.f1 FROM CIRCLE_TBL c, POLYGON_TBL p; + f1 | f1 | ?column? +----------------+----------------------------+---------------- + <(5,1),3> | ((2,0),(2,4),(0,0)) | 0 + <(5,1),3> | ((3,1),(3,3),(1,0)) | 0 + <(5,1),3> | ((1,2),(3,4),(5,6),(7,8)) | 0.535533905933 + <(5,1),3> | ((7,8),(5,6),(3,4),(1,2)) | 0.535533905933 + <(5,1),3> | ((1,2),(7,8),(5,6),(3,-4)) | 0 + <(5,1),3> | ((0,0)) | 2.09901951359 + <(5,1),3> | ((0,1),(0,1)) | 2 + <(1,2),100> | ((2,0),(2,4),(0,0)) | 0 + <(1,2),100> | ((3,1),(3,3),(1,0)) | 0 + <(1,2),100> | ((1,2),(3,4),(5,6),(7,8)) | 0 + <(1,2),100> | ((7,8),(5,6),(3,4),(1,2)) | 0 + <(1,2),100> | ((1,2),(7,8),(5,6),(3,-4)) | 0 + <(1,2),100> | ((0,0)) | 0 + <(1,2),100> | ((0,1),(0,1)) | 0 + <(1,3),5> | ((2,0),(2,4),(0,0)) | 0 + <(1,3),5> | ((3,1),(3,3),(1,0)) | 0 + <(1,3),5> | ((1,2),(3,4),(5,6),(7,8)) | 0 + <(1,3),5> | ((7,8),(5,6),(3,4),(1,2)) | 0 + <(1,3),5> | ((1,2),(7,8),(5,6),(3,-4)) | 0 + <(1,3),5> | ((0,0)) | 0 + <(1,3),5> | ((0,1),(0,1)) | 0 + <(1,2),3> | ((2,0),(2,4),(0,0)) | 0 + <(1,2),3> | ((3,1),(3,3),(1,0)) | 0 + <(1,2),3> | ((1,2),(3,4),(5,6),(7,8)) | 0 + <(1,2),3> | ((7,8),(5,6),(3,4),(1,2)) | 0 + <(1,2),3> | ((1,2),(7,8),(5,6),(3,-4)) | 0 + <(1,2),3> | ((0,0)) | 0 + <(1,2),3> | ((0,1),(0,1)) | 0 + <(100,200),10> | ((2,0),(2,4),(0,0)) | 209.134661795 + <(100,200),10> | ((3,1),(3,3),(1,0)) | 209.585974051 + <(100,200),10> | ((1,2),(3,4),(5,6),(7,8)) | 203.337760371 + <(100,200),10> | ((7,8),(5,6),(3,4),(1,2)) | 203.337760371 + <(100,200),10> | ((1,2),(7,8),(5,6),(3,-4)) | 203.337760371 + <(100,200),10> | ((0,0)) | 213.60679775 + <(100,200),10> | ((0,1),(0,1)) | 212.712819568 + <(100,1),115> | ((2,0),(2,4),(0,0)) | 0 + <(100,1),115> | ((3,1),(3,3),(1,0)) | 0 + <(100,1),115> | ((1,2),(3,4),(5,6),(7,8)) | 0 + <(100,1),115> | ((7,8),(5,6),(3,4),(1,2)) | 0 + <(100,1),115> | ((1,2),(7,8),(5,6),(3,-4)) | 0 + <(100,1),115> | ((0,0)) | 0 + <(100,1),115> | ((0,1),(0,1)) | 0 + <(3,5),0> | ((2,0),(2,4),(0,0)) | 1.41421356237 + <(3,5),0> | ((3,1),(3,3),(1,0)) | 2 + <(3,5),0> | ((1,2),(3,4),(5,6),(7,8)) | 0.707106781187 + <(3,5),0> | ((7,8),(5,6),(3,4),(1,2)) | 0.707106781187 + <(3,5),0> | ((1,2),(7,8),(5,6),(3,-4)) | 0.707106781187 + <(3,5),0> | ((0,0)) | 5.83095189485 + <(3,5),0> | ((0,1),(0,1)) | 5 + <(3,5),NaN> | ((2,0),(2,4),(0,0)) | NaN + <(3,5),NaN> | ((3,1),(3,3),(1,0)) | NaN + <(3,5),NaN> | ((1,2),(3,4),(5,6),(7,8)) | NaN + <(3,5),NaN> | ((7,8),(5,6),(3,4),(1,2)) | NaN + <(3,5),NaN> | ((1,2),(7,8),(5,6),(3,-4)) | NaN + <(3,5),NaN> | ((0,0)) | NaN + <(3,5),NaN> | ((0,1),(0,1)) | NaN +(56 rows) + +-- Check index behavior for circles +CREATE INDEX gcircleind ON circle_tbl USING gist (f1); +SELECT * FROM circle_tbl WHERE f1 && circle(point(1,-2), 1) + ORDER BY area(f1); + f1 +--------------- + <(1,2),3> + <(1,3),5> + <(1,2),100> + <(100,1),115> +(4 rows) + +EXPLAIN (COSTS OFF) +SELECT * FROM circle_tbl WHERE f1 && circle(point(1,-2), 1) + ORDER BY area(f1); + QUERY PLAN +---------------------------------------------- + Sort + Sort Key: (area(f1)) + -> Seq Scan on circle_tbl + Filter: (f1 && '<(1,-2),1>'::circle) +(4 rows) + +SELECT * FROM circle_tbl WHERE f1 && circle(point(1,-2), 1) + ORDER BY area(f1); + f1 +--------------- + <(1,2),3> + <(1,3),5> + <(1,2),100> + <(100,1),115> +(4 rows) + +-- Check index behavior for polygons +CREATE INDEX gpolygonind ON polygon_tbl USING gist (f1); +SELECT * FROM polygon_tbl WHERE f1 @> '((1,1),(2,2),(2,1))'::polygon + ORDER BY (poly_center(f1))[0]; + f1 +--------------------- + ((2,0),(2,4),(0,0)) +(1 row) + +EXPLAIN (COSTS OFF) +SELECT * FROM polygon_tbl WHERE f1 @> '((1,1),(2,2),(2,1))'::polygon + ORDER BY (poly_center(f1))[0]; + QUERY PLAN +-------------------------------------------------------- + Sort + Sort Key: ((poly_center(f1))[0]) + -> Seq Scan on polygon_tbl + Filter: (f1 @> '((1,1),(2,2),(2,1))'::polygon) +(4 rows) + +SELECT * FROM polygon_tbl WHERE f1 @> '((1,1),(2,2),(2,1))'::polygon + ORDER BY (poly_center(f1))[0]; + f1 +--------------------- + ((2,0),(2,4),(0,0)) +(1 row) + +-- test non-error-throwing API for some core types +SELECT pg_input_is_valid('(1', 'circle'); + pg_input_is_valid +------------------- + f +(1 row) + +SELECT * FROM pg_input_error_info('1,', 'circle'); + message | detail | hint | sql_error_code +--------------------------------------------+--------+------+---------------- + invalid input syntax for type circle: "1," | | | 22P02 +(1 row) + +SELECT pg_input_is_valid('(1,2),-1', 'circle'); + pg_input_is_valid +------------------- + f +(1 row) + +SELECT * FROM pg_input_error_info('(1,2),-1', 'circle'); + message | detail | hint | sql_error_code +--------------------------------------------------+--------+------+---------------- + invalid input syntax for type circle: "(1,2),-1" | | | 22P02 +(1 row) + diff --git a/src/test/regress/expected/gin.out b/src/test/regress/expected/gin.out new file mode 100644 index 0000000..0af4643 --- /dev/null +++ b/src/test/regress/expected/gin.out @@ -0,0 +1,299 @@ +-- +-- Test GIN indexes. +-- +-- There are other tests to test different GIN opclasses. This is for testing +-- GIN itself. +-- Create and populate a test table with a GIN index. +create table gin_test_tbl(i int4[]) with (autovacuum_enabled = off); +create index gin_test_idx on gin_test_tbl using gin (i) + with (fastupdate = on, gin_pending_list_limit = 4096); +insert into gin_test_tbl select array[1, 2, g] from generate_series(1, 20000) g; +insert into gin_test_tbl select array[1, 3, g] from generate_series(1, 1000) g; +select gin_clean_pending_list('gin_test_idx')>10 as many; -- flush the fastupdate buffers + many +------ + t +(1 row) + +insert into gin_test_tbl select array[3, 1, g] from generate_series(1, 1000) g; +vacuum gin_test_tbl; -- flush the fastupdate buffers +select gin_clean_pending_list('gin_test_idx'); -- nothing to flush + gin_clean_pending_list +------------------------ + 0 +(1 row) + +-- Test vacuuming +delete from gin_test_tbl where i @> array[2]; +vacuum gin_test_tbl; +-- Disable fastupdate, and do more insertions. With fastupdate enabled, most +-- insertions (by flushing the list pages) cause page splits. Without +-- fastupdate, we get more churn in the GIN data leaf pages, and exercise the +-- recompression codepaths. +alter index gin_test_idx set (fastupdate = off); +insert into gin_test_tbl select array[1, 2, g] from generate_series(1, 1000) g; +insert into gin_test_tbl select array[1, 3, g] from generate_series(1, 1000) g; +delete from gin_test_tbl where i @> array[2]; +vacuum gin_test_tbl; +-- Test for "rare && frequent" searches +explain (costs off) +select count(*) from gin_test_tbl where i @> array[1, 999]; + QUERY PLAN +------------------------------------------------------- + Aggregate + -> Bitmap Heap Scan on gin_test_tbl + Recheck Cond: (i @> '{1,999}'::integer[]) + -> Bitmap Index Scan on gin_test_idx + Index Cond: (i @> '{1,999}'::integer[]) +(5 rows) + +select count(*) from gin_test_tbl where i @> array[1, 999]; + count +------- + 3 +(1 row) + +-- Very weak test for gin_fuzzy_search_limit +set gin_fuzzy_search_limit = 1000; +explain (costs off) +select count(*) > 0 as ok from gin_test_tbl where i @> array[1]; + QUERY PLAN +--------------------------------------------------- + Aggregate + -> Bitmap Heap Scan on gin_test_tbl + Recheck Cond: (i @> '{1}'::integer[]) + -> Bitmap Index Scan on gin_test_idx + Index Cond: (i @> '{1}'::integer[]) +(5 rows) + +select count(*) > 0 as ok from gin_test_tbl where i @> array[1]; + ok +---- + t +(1 row) + +reset gin_fuzzy_search_limit; +-- Test optimization of empty queries +create temp table t_gin_test_tbl(i int4[], j int4[]); +create index on t_gin_test_tbl using gin (i, j); +insert into t_gin_test_tbl +values + (null, null), + ('{}', null), + ('{1}', null), + ('{1,2}', null), + (null, '{}'), + (null, '{10}'), + ('{1,2}', '{10}'), + ('{2}', '{10}'), + ('{1,3}', '{}'), + ('{1,1}', '{10}'); +set enable_seqscan = off; +explain (costs off) +select * from t_gin_test_tbl where array[0] <@ i; + QUERY PLAN +--------------------------------------------------- + Bitmap Heap Scan on t_gin_test_tbl + Recheck Cond: ('{0}'::integer[] <@ i) + -> Bitmap Index Scan on t_gin_test_tbl_i_j_idx + Index Cond: (i @> '{0}'::integer[]) +(4 rows) + +select * from t_gin_test_tbl where array[0] <@ i; + i | j +---+--- +(0 rows) + +select * from t_gin_test_tbl where array[0] <@ i and '{}'::int4[] <@ j; + i | j +---+--- +(0 rows) + +explain (costs off) +select * from t_gin_test_tbl where i @> '{}'; + QUERY PLAN +--------------------------------------------------- + Bitmap Heap Scan on t_gin_test_tbl + Recheck Cond: (i @> '{}'::integer[]) + -> Bitmap Index Scan on t_gin_test_tbl_i_j_idx + Index Cond: (i @> '{}'::integer[]) +(4 rows) + +select * from t_gin_test_tbl where i @> '{}'; + i | j +-------+------ + {} | + {1} | + {1,2} | + {1,2} | {10} + {2} | {10} + {1,3} | {} + {1,1} | {10} +(7 rows) + +create function explain_query_json(query_sql text) +returns table (explain_line json) +language plpgsql as +$$ +begin + set enable_seqscan = off; + set enable_bitmapscan = on; + return query execute 'EXPLAIN (ANALYZE, FORMAT json) ' || query_sql; +end; +$$; +create function execute_text_query_index(query_sql text) +returns setof text +language plpgsql +as +$$ +begin + set enable_seqscan = off; + set enable_bitmapscan = on; + return query execute query_sql; +end; +$$; +create function execute_text_query_heap(query_sql text) +returns setof text +language plpgsql +as +$$ +begin + set enable_seqscan = on; + set enable_bitmapscan = off; + return query execute query_sql; +end; +$$; +-- check number of rows returned by index and removed by recheck +select + query, + js->0->'Plan'->'Plans'->0->'Actual Rows' as "return by index", + js->0->'Plan'->'Rows Removed by Index Recheck' as "removed by recheck", + (res_index = res_heap) as "match" +from + (values + ($$ i @> '{}' $$), + ($$ j @> '{}' $$), + ($$ i @> '{}' and j @> '{}' $$), + ($$ i @> '{1}' $$), + ($$ i @> '{1}' and j @> '{}' $$), + ($$ i @> '{1}' and i @> '{}' and j @> '{}' $$), + ($$ j @> '{10}' $$), + ($$ j @> '{10}' and i @> '{}' $$), + ($$ j @> '{10}' and j @> '{}' and i @> '{}' $$), + ($$ i @> '{1}' and j @> '{10}' $$) + ) q(query), + lateral explain_query_json($$select * from t_gin_test_tbl where $$ || query) js, + lateral execute_text_query_index($$select string_agg((i, j)::text, ' ') from t_gin_test_tbl where $$ || query) res_index, + lateral execute_text_query_heap($$select string_agg((i, j)::text, ' ') from t_gin_test_tbl where $$ || query) res_heap; + query | return by index | removed by recheck | match +-------------------------------------------+-----------------+--------------------+------- + i @> '{}' | 7 | 0 | t + j @> '{}' | 6 | 0 | t + i @> '{}' and j @> '{}' | 4 | 0 | t + i @> '{1}' | 5 | 0 | t + i @> '{1}' and j @> '{}' | 3 | 0 | t + i @> '{1}' and i @> '{}' and j @> '{}' | 3 | 0 | t + j @> '{10}' | 4 | 0 | t + j @> '{10}' and i @> '{}' | 3 | 0 | t + j @> '{10}' and j @> '{}' and i @> '{}' | 3 | 0 | t + i @> '{1}' and j @> '{10}' | 2 | 0 | t +(10 rows) + +reset enable_seqscan; +reset enable_bitmapscan; +-- re-purpose t_gin_test_tbl to test scans involving posting trees +insert into t_gin_test_tbl select array[1, g, g/10], array[2, g, g/10] + from generate_series(1, 20000) g; +select gin_clean_pending_list('t_gin_test_tbl_i_j_idx') is not null; + ?column? +---------- + t +(1 row) + +analyze t_gin_test_tbl; +set enable_seqscan = off; +set enable_bitmapscan = on; +explain (costs off) +select count(*) from t_gin_test_tbl where j @> array[50]; + QUERY PLAN +--------------------------------------------------------- + Aggregate + -> Bitmap Heap Scan on t_gin_test_tbl + Recheck Cond: (j @> '{50}'::integer[]) + -> Bitmap Index Scan on t_gin_test_tbl_i_j_idx + Index Cond: (j @> '{50}'::integer[]) +(5 rows) + +select count(*) from t_gin_test_tbl where j @> array[50]; + count +------- + 11 +(1 row) + +explain (costs off) +select count(*) from t_gin_test_tbl where j @> array[2]; + QUERY PLAN +--------------------------------------------------------- + Aggregate + -> Bitmap Heap Scan on t_gin_test_tbl + Recheck Cond: (j @> '{2}'::integer[]) + -> Bitmap Index Scan on t_gin_test_tbl_i_j_idx + Index Cond: (j @> '{2}'::integer[]) +(5 rows) + +select count(*) from t_gin_test_tbl where j @> array[2]; + count +------- + 20000 +(1 row) + +explain (costs off) +select count(*) from t_gin_test_tbl where j @> '{}'::int[]; + QUERY PLAN +--------------------------------------------------------- + Aggregate + -> Bitmap Heap Scan on t_gin_test_tbl + Recheck Cond: (j @> '{}'::integer[]) + -> Bitmap Index Scan on t_gin_test_tbl_i_j_idx + Index Cond: (j @> '{}'::integer[]) +(5 rows) + +select count(*) from t_gin_test_tbl where j @> '{}'::int[]; + count +------- + 20006 +(1 row) + +-- test vacuuming of posting trees +delete from t_gin_test_tbl where j @> array[2]; +vacuum t_gin_test_tbl; +select count(*) from t_gin_test_tbl where j @> array[50]; + count +------- + 0 +(1 row) + +select count(*) from t_gin_test_tbl where j @> array[2]; + count +------- + 0 +(1 row) + +select count(*) from t_gin_test_tbl where j @> '{}'::int[]; + count +------- + 6 +(1 row) + +reset enable_seqscan; +reset enable_bitmapscan; +drop table t_gin_test_tbl; +-- test an unlogged table, mostly to get coverage of ginbuildempty +create unlogged table t_gin_test_tbl(i int4[], j int4[]); +create index on t_gin_test_tbl using gin (i, j); +insert into t_gin_test_tbl +values + (null, null), + ('{}', null), + ('{1}', '{2,3}'); +drop table t_gin_test_tbl; diff --git a/src/test/regress/expected/gist.out b/src/test/regress/expected/gist.out new file mode 100644 index 0000000..c75bbb2 --- /dev/null +++ b/src/test/regress/expected/gist.out @@ -0,0 +1,403 @@ +-- +-- Test GiST indexes. +-- +-- There are other tests to test different GiST opclasses. This is for +-- testing GiST code itself. Vacuuming in particular. +create table gist_point_tbl(id int4, p point); +create index gist_pointidx on gist_point_tbl using gist(p); +-- Verify the fillfactor and buffering options +create index gist_pointidx2 on gist_point_tbl using gist(p) with (buffering = on, fillfactor=50); +create index gist_pointidx3 on gist_point_tbl using gist(p) with (buffering = off); +create index gist_pointidx4 on gist_point_tbl using gist(p) with (buffering = auto); +drop index gist_pointidx2, gist_pointidx3, gist_pointidx4; +-- Make sure bad values are refused +create index gist_pointidx5 on gist_point_tbl using gist(p) with (buffering = invalid_value); +ERROR: invalid value for enum option "buffering": invalid_value +DETAIL: Valid values are "on", "off", and "auto". +create index gist_pointidx5 on gist_point_tbl using gist(p) with (fillfactor=9); +ERROR: value 9 out of bounds for option "fillfactor" +DETAIL: Valid values are between "10" and "100". +create index gist_pointidx5 on gist_point_tbl using gist(p) with (fillfactor=101); +ERROR: value 101 out of bounds for option "fillfactor" +DETAIL: Valid values are between "10" and "100". +-- Insert enough data to create a tree that's a couple of levels deep. +insert into gist_point_tbl (id, p) +select g, point(g*10, g*10) from generate_series(1, 10000) g; +insert into gist_point_tbl (id, p) +select g+100000, point(g*10+1, g*10+1) from generate_series(1, 10000) g; +-- To test vacuum, delete some entries from all over the index. +delete from gist_point_tbl where id % 2 = 1; +-- And also delete some concentration of values. +delete from gist_point_tbl where id > 5000; +vacuum analyze gist_point_tbl; +-- rebuild the index with a different fillfactor +alter index gist_pointidx SET (fillfactor = 40); +reindex index gist_pointidx; +-- +-- Test Index-only plans on GiST indexes +-- +create table gist_tbl (b box, p point, c circle); +insert into gist_tbl +select box(point(0.05*i, 0.05*i), point(0.05*i, 0.05*i)), + point(0.05*i, 0.05*i), + circle(point(0.05*i, 0.05*i), 1.0) +from generate_series(0,10000) as i; +vacuum analyze gist_tbl; +set enable_seqscan=off; +set enable_bitmapscan=off; +set enable_indexonlyscan=on; +-- Test index-only scan with point opclass +create index gist_tbl_point_index on gist_tbl using gist (p); +-- check that the planner chooses an index-only scan +explain (costs off) +select p from gist_tbl where p <@ box(point(0,0), point(0.5, 0.5)); + QUERY PLAN +-------------------------------------------------------- + Index Only Scan using gist_tbl_point_index on gist_tbl + Index Cond: (p <@ '(0.5,0.5),(0,0)'::box) +(2 rows) + +-- execute the same +select p from gist_tbl where p <@ box(point(0,0), point(0.5, 0.5)); + p +------------- + (0,0) + (0.05,0.05) + (0.1,0.1) + (0.15,0.15) + (0.2,0.2) + (0.25,0.25) + (0.3,0.3) + (0.35,0.35) + (0.4,0.4) + (0.45,0.45) + (0.5,0.5) +(11 rows) + +-- Also test an index-only knn-search +explain (costs off) +select p from gist_tbl where p <@ box(point(0,0), point(0.5, 0.5)) +order by p <-> point(0.201, 0.201); + QUERY PLAN +-------------------------------------------------------- + Index Only Scan using gist_tbl_point_index on gist_tbl + Index Cond: (p <@ '(0.5,0.5),(0,0)'::box) + Order By: (p <-> '(0.201,0.201)'::point) +(3 rows) + +select p from gist_tbl where p <@ box(point(0,0), point(0.5, 0.5)) +order by p <-> point(0.201, 0.201); + p +------------- + (0.2,0.2) + (0.25,0.25) + (0.15,0.15) + (0.3,0.3) + (0.1,0.1) + (0.35,0.35) + (0.05,0.05) + (0.4,0.4) + (0,0) + (0.45,0.45) + (0.5,0.5) +(11 rows) + +-- Check commuted case as well +explain (costs off) +select p from gist_tbl where p <@ box(point(0,0), point(0.5, 0.5)) +order by point(0.101, 0.101) <-> p; + QUERY PLAN +-------------------------------------------------------- + Index Only Scan using gist_tbl_point_index on gist_tbl + Index Cond: (p <@ '(0.5,0.5),(0,0)'::box) + Order By: (p <-> '(0.101,0.101)'::point) +(3 rows) + +select p from gist_tbl where p <@ box(point(0,0), point(0.5, 0.5)) +order by point(0.101, 0.101) <-> p; + p +------------- + (0.1,0.1) + (0.15,0.15) + (0.05,0.05) + (0.2,0.2) + (0,0) + (0.25,0.25) + (0.3,0.3) + (0.35,0.35) + (0.4,0.4) + (0.45,0.45) + (0.5,0.5) +(11 rows) + +-- Check case with multiple rescans (bug #14641) +explain (costs off) +select p from + (values (box(point(0,0), point(0.5,0.5))), + (box(point(0.5,0.5), point(0.75,0.75))), + (box(point(0.8,0.8), point(1.0,1.0)))) as v(bb) +cross join lateral + (select p from gist_tbl where p <@ bb order by p <-> bb[0] limit 2) ss; + QUERY PLAN +-------------------------------------------------------------------- + Nested Loop + -> Values Scan on "*VALUES*" + -> Limit + -> Index Only Scan using gist_tbl_point_index on gist_tbl + Index Cond: (p <@ "*VALUES*".column1) + Order By: (p <-> ("*VALUES*".column1)[0]) +(6 rows) + +select p from + (values (box(point(0,0), point(0.5,0.5))), + (box(point(0.5,0.5), point(0.75,0.75))), + (box(point(0.8,0.8), point(1.0,1.0)))) as v(bb) +cross join lateral + (select p from gist_tbl where p <@ bb order by p <-> bb[0] limit 2) ss; + p +------------- + (0.5,0.5) + (0.45,0.45) + (0.75,0.75) + (0.7,0.7) + (1,1) + (0.95,0.95) +(6 rows) + +drop index gist_tbl_point_index; +-- Test index-only scan with box opclass +create index gist_tbl_box_index on gist_tbl using gist (b); +-- check that the planner chooses an index-only scan +explain (costs off) +select b from gist_tbl where b <@ box(point(5,5), point(6,6)); + QUERY PLAN +------------------------------------------------------ + Index Only Scan using gist_tbl_box_index on gist_tbl + Index Cond: (b <@ '(6,6),(5,5)'::box) +(2 rows) + +-- execute the same +select b from gist_tbl where b <@ box(point(5,5), point(6,6)); + b +------------------------- + (5,5),(5,5) + (5.05,5.05),(5.05,5.05) + (5.1,5.1),(5.1,5.1) + (5.15,5.15),(5.15,5.15) + (5.2,5.2),(5.2,5.2) + (5.25,5.25),(5.25,5.25) + (5.3,5.3),(5.3,5.3) + (5.35,5.35),(5.35,5.35) + (5.4,5.4),(5.4,5.4) + (5.45,5.45),(5.45,5.45) + (5.5,5.5),(5.5,5.5) + (5.55,5.55),(5.55,5.55) + (5.6,5.6),(5.6,5.6) + (5.65,5.65),(5.65,5.65) + (5.7,5.7),(5.7,5.7) + (5.75,5.75),(5.75,5.75) + (5.8,5.8),(5.8,5.8) + (5.85,5.85),(5.85,5.85) + (5.9,5.9),(5.9,5.9) + (5.95,5.95),(5.95,5.95) + (6,6),(6,6) +(21 rows) + +-- Also test an index-only knn-search +explain (costs off) +select b from gist_tbl where b <@ box(point(5,5), point(6,6)) +order by b <-> point(5.2, 5.91); + QUERY PLAN +------------------------------------------------------ + Index Only Scan using gist_tbl_box_index on gist_tbl + Index Cond: (b <@ '(6,6),(5,5)'::box) + Order By: (b <-> '(5.2,5.91)'::point) +(3 rows) + +select b from gist_tbl where b <@ box(point(5,5), point(6,6)) +order by b <-> point(5.2, 5.91); + b +------------------------- + (5.55,5.55),(5.55,5.55) + (5.6,5.6),(5.6,5.6) + (5.5,5.5),(5.5,5.5) + (5.65,5.65),(5.65,5.65) + (5.45,5.45),(5.45,5.45) + (5.7,5.7),(5.7,5.7) + (5.4,5.4),(5.4,5.4) + (5.75,5.75),(5.75,5.75) + (5.35,5.35),(5.35,5.35) + (5.8,5.8),(5.8,5.8) + (5.3,5.3),(5.3,5.3) + (5.85,5.85),(5.85,5.85) + (5.25,5.25),(5.25,5.25) + (5.9,5.9),(5.9,5.9) + (5.2,5.2),(5.2,5.2) + (5.95,5.95),(5.95,5.95) + (5.15,5.15),(5.15,5.15) + (6,6),(6,6) + (5.1,5.1),(5.1,5.1) + (5.05,5.05),(5.05,5.05) + (5,5),(5,5) +(21 rows) + +-- Check commuted case as well +explain (costs off) +select b from gist_tbl where b <@ box(point(5,5), point(6,6)) +order by point(5.2, 5.91) <-> b; + QUERY PLAN +------------------------------------------------------ + Index Only Scan using gist_tbl_box_index on gist_tbl + Index Cond: (b <@ '(6,6),(5,5)'::box) + Order By: (b <-> '(5.2,5.91)'::point) +(3 rows) + +select b from gist_tbl where b <@ box(point(5,5), point(6,6)) +order by point(5.2, 5.91) <-> b; + b +------------------------- + (5.55,5.55),(5.55,5.55) + (5.6,5.6),(5.6,5.6) + (5.5,5.5),(5.5,5.5) + (5.65,5.65),(5.65,5.65) + (5.45,5.45),(5.45,5.45) + (5.7,5.7),(5.7,5.7) + (5.4,5.4),(5.4,5.4) + (5.75,5.75),(5.75,5.75) + (5.35,5.35),(5.35,5.35) + (5.8,5.8),(5.8,5.8) + (5.3,5.3),(5.3,5.3) + (5.85,5.85),(5.85,5.85) + (5.25,5.25),(5.25,5.25) + (5.9,5.9),(5.9,5.9) + (5.2,5.2),(5.2,5.2) + (5.95,5.95),(5.95,5.95) + (5.15,5.15),(5.15,5.15) + (6,6),(6,6) + (5.1,5.1),(5.1,5.1) + (5.05,5.05),(5.05,5.05) + (5,5),(5,5) +(21 rows) + +drop index gist_tbl_box_index; +-- Test that an index-only scan is not chosen, when the query involves the +-- circle column (the circle opclass does not support index-only scans). +create index gist_tbl_multi_index on gist_tbl using gist (p, c); +explain (costs off) +select p, c from gist_tbl +where p <@ box(point(5,5), point(6, 6)); + QUERY PLAN +--------------------------------------------------- + Index Scan using gist_tbl_multi_index on gist_tbl + Index Cond: (p <@ '(6,6),(5,5)'::box) +(2 rows) + +-- execute the same +select b, p from gist_tbl +where b <@ box(point(4.5, 4.5), point(5.5, 5.5)) +and p <@ box(point(5,5), point(6, 6)); + b | p +-------------------------+------------- + (5,5),(5,5) | (5,5) + (5.05,5.05),(5.05,5.05) | (5.05,5.05) + (5.1,5.1),(5.1,5.1) | (5.1,5.1) + (5.15,5.15),(5.15,5.15) | (5.15,5.15) + (5.2,5.2),(5.2,5.2) | (5.2,5.2) + (5.25,5.25),(5.25,5.25) | (5.25,5.25) + (5.3,5.3),(5.3,5.3) | (5.3,5.3) + (5.35,5.35),(5.35,5.35) | (5.35,5.35) + (5.4,5.4),(5.4,5.4) | (5.4,5.4) + (5.45,5.45),(5.45,5.45) | (5.45,5.45) + (5.5,5.5),(5.5,5.5) | (5.5,5.5) +(11 rows) + +drop index gist_tbl_multi_index; +-- Test that we don't try to return the value of a non-returnable +-- column in an index-only scan. (This isn't GIST-specific, but +-- it only applies to index AMs that can return some columns and not +-- others, so GIST with appropriate opclasses is a convenient test case.) +create index gist_tbl_multi_index on gist_tbl using gist (circle(p,1), p); +explain (verbose, costs off) +select circle(p,1) from gist_tbl +where p <@ box(point(5, 5), point(5.3, 5.3)); + QUERY PLAN +--------------------------------------------------------------- + Index Only Scan using gist_tbl_multi_index on public.gist_tbl + Output: circle(p, '1'::double precision) + Index Cond: (gist_tbl.p <@ '(5.3,5.3),(5,5)'::box) +(3 rows) + +select circle(p,1) from gist_tbl +where p <@ box(point(5, 5), point(5.3, 5.3)); + circle +----------------- + <(5,5),1> + <(5.05,5.05),1> + <(5.1,5.1),1> + <(5.15,5.15),1> + <(5.2,5.2),1> + <(5.25,5.25),1> + <(5.3,5.3),1> +(7 rows) + +-- Similarly, test that index rechecks involving a non-returnable column +-- are done correctly. +explain (verbose, costs off) +select p from gist_tbl where circle(p,1) @> circle(point(0,0),0.95); + QUERY PLAN +--------------------------------------------------------------------------------------- + Index Only Scan using gist_tbl_multi_index on public.gist_tbl + Output: p + Index Cond: ((circle(gist_tbl.p, '1'::double precision)) @> '<(0,0),0.95>'::circle) +(3 rows) + +select p from gist_tbl where circle(p,1) @> circle(point(0,0),0.95); + p +------- + (0,0) +(1 row) + +-- Also check that use_physical_tlist doesn't trigger in such cases. +explain (verbose, costs off) +select count(*) from gist_tbl; + QUERY PLAN +--------------------------------------------------------------------- + Aggregate + Output: count(*) + -> Index Only Scan using gist_tbl_multi_index on public.gist_tbl +(3 rows) + +select count(*) from gist_tbl; + count +------- + 10001 +(1 row) + +-- This case isn't supported, but it should at least EXPLAIN correctly. +explain (verbose, costs off) +select p from gist_tbl order by circle(p,1) <-> point(0,0) limit 1; + QUERY PLAN +------------------------------------------------------------------------------------ + Limit + Output: p, ((circle(p, '1'::double precision) <-> '(0,0)'::point)) + -> Index Only Scan using gist_tbl_multi_index on public.gist_tbl + Output: p, (circle(p, '1'::double precision) <-> '(0,0)'::point) + Order By: ((circle(gist_tbl.p, '1'::double precision)) <-> '(0,0)'::point) +(5 rows) + +select p from gist_tbl order by circle(p,1) <-> point(0,0) limit 1; +ERROR: lossy distance functions are not supported in index-only scans +-- Force an index build using buffering. +create index gist_tbl_box_index_forcing_buffering on gist_tbl using gist (p) + with (buffering=on, fillfactor=50); +-- Clean up +reset enable_seqscan; +reset enable_bitmapscan; +reset enable_indexonlyscan; +drop table gist_tbl; +-- test an unlogged table, mostly to get coverage of gistbuildempty +create unlogged table gist_tbl (b box); +create index gist_tbl_box_index on gist_tbl using gist (b); +insert into gist_tbl + select box(point(0.05*i, 0.05*i)) from generate_series(0,10) as i; +drop table gist_tbl; diff --git a/src/test/regress/expected/groupingsets.out b/src/test/regress/expected/groupingsets.out new file mode 100644 index 0000000..a3b9aac --- /dev/null +++ b/src/test/regress/expected/groupingsets.out @@ -0,0 +1,2153 @@ +-- +-- grouping sets +-- +-- test data sources +create temp view gstest1(a,b,v) + as values (1,1,10),(1,1,11),(1,2,12),(1,2,13),(1,3,14), + (2,3,15), + (3,3,16),(3,4,17), + (4,1,18),(4,1,19); +create temp table gstest2 (a integer, b integer, c integer, d integer, + e integer, f integer, g integer, h integer); +copy gstest2 from stdin; +create temp table gstest3 (a integer, b integer, c integer, d integer); +copy gstest3 from stdin; +alter table gstest3 add primary key (a); +create temp table gstest4(id integer, v integer, + unhashable_col bit(4), unsortable_col xid); +insert into gstest4 +values (1,1,b'0000','1'), (2,2,b'0001','1'), + (3,4,b'0010','2'), (4,8,b'0011','2'), + (5,16,b'0000','2'), (6,32,b'0001','2'), + (7,64,b'0010','1'), (8,128,b'0011','1'); +create temp table gstest_empty (a integer, b integer, v integer); +create function gstest_data(v integer, out a integer, out b integer) + returns setof record + as $f$ + begin + return query select v, i from generate_series(1,3) i; + end; + $f$ language plpgsql; +-- basic functionality +set enable_hashagg = false; -- test hashing explicitly later +-- simple rollup with multiple plain aggregates, with and without ordering +-- (and with ordering differing from grouping) +select a, b, grouping(a,b), sum(v), count(*), max(v) + from gstest1 group by rollup (a,b); + a | b | grouping | sum | count | max +---+---+----------+-----+-------+----- + 1 | 1 | 0 | 21 | 2 | 11 + 1 | 2 | 0 | 25 | 2 | 13 + 1 | 3 | 0 | 14 | 1 | 14 + 1 | | 1 | 60 | 5 | 14 + 2 | 3 | 0 | 15 | 1 | 15 + 2 | | 1 | 15 | 1 | 15 + 3 | 3 | 0 | 16 | 1 | 16 + 3 | 4 | 0 | 17 | 1 | 17 + 3 | | 1 | 33 | 2 | 17 + 4 | 1 | 0 | 37 | 2 | 19 + 4 | | 1 | 37 | 2 | 19 + | | 3 | 145 | 10 | 19 +(12 rows) + +select a, b, grouping(a,b), sum(v), count(*), max(v) + from gstest1 group by rollup (a,b) order by a,b; + a | b | grouping | sum | count | max +---+---+----------+-----+-------+----- + 1 | 1 | 0 | 21 | 2 | 11 + 1 | 2 | 0 | 25 | 2 | 13 + 1 | 3 | 0 | 14 | 1 | 14 + 1 | | 1 | 60 | 5 | 14 + 2 | 3 | 0 | 15 | 1 | 15 + 2 | | 1 | 15 | 1 | 15 + 3 | 3 | 0 | 16 | 1 | 16 + 3 | 4 | 0 | 17 | 1 | 17 + 3 | | 1 | 33 | 2 | 17 + 4 | 1 | 0 | 37 | 2 | 19 + 4 | | 1 | 37 | 2 | 19 + | | 3 | 145 | 10 | 19 +(12 rows) + +select a, b, grouping(a,b), sum(v), count(*), max(v) + from gstest1 group by rollup (a,b) order by b desc, a; + a | b | grouping | sum | count | max +---+---+----------+-----+-------+----- + 1 | | 1 | 60 | 5 | 14 + 2 | | 1 | 15 | 1 | 15 + 3 | | 1 | 33 | 2 | 17 + 4 | | 1 | 37 | 2 | 19 + | | 3 | 145 | 10 | 19 + 3 | 4 | 0 | 17 | 1 | 17 + 1 | 3 | 0 | 14 | 1 | 14 + 2 | 3 | 0 | 15 | 1 | 15 + 3 | 3 | 0 | 16 | 1 | 16 + 1 | 2 | 0 | 25 | 2 | 13 + 1 | 1 | 0 | 21 | 2 | 11 + 4 | 1 | 0 | 37 | 2 | 19 +(12 rows) + +select a, b, grouping(a,b), sum(v), count(*), max(v) + from gstest1 group by rollup (a,b) order by coalesce(a,0)+coalesce(b,0); + a | b | grouping | sum | count | max +---+---+----------+-----+-------+----- + | | 3 | 145 | 10 | 19 + 1 | | 1 | 60 | 5 | 14 + 1 | 1 | 0 | 21 | 2 | 11 + 2 | | 1 | 15 | 1 | 15 + 3 | | 1 | 33 | 2 | 17 + 1 | 2 | 0 | 25 | 2 | 13 + 1 | 3 | 0 | 14 | 1 | 14 + 4 | | 1 | 37 | 2 | 19 + 4 | 1 | 0 | 37 | 2 | 19 + 2 | 3 | 0 | 15 | 1 | 15 + 3 | 3 | 0 | 16 | 1 | 16 + 3 | 4 | 0 | 17 | 1 | 17 +(12 rows) + +-- various types of ordered aggs +select a, b, grouping(a,b), + array_agg(v order by v), + string_agg(v::text, ':' order by v desc), + percentile_disc(0.5) within group (order by v), + rank(1,2,12) within group (order by a,b,v) + from gstest1 group by rollup (a,b) order by a,b; + a | b | grouping | array_agg | string_agg | percentile_disc | rank +---+---+----------+---------------------------------+-------------------------------+-----------------+------ + 1 | 1 | 0 | {10,11} | 11:10 | 10 | 3 + 1 | 2 | 0 | {12,13} | 13:12 | 12 | 1 + 1 | 3 | 0 | {14} | 14 | 14 | 1 + 1 | | 1 | {10,11,12,13,14} | 14:13:12:11:10 | 12 | 3 + 2 | 3 | 0 | {15} | 15 | 15 | 1 + 2 | | 1 | {15} | 15 | 15 | 1 + 3 | 3 | 0 | {16} | 16 | 16 | 1 + 3 | 4 | 0 | {17} | 17 | 17 | 1 + 3 | | 1 | {16,17} | 17:16 | 16 | 1 + 4 | 1 | 0 | {18,19} | 19:18 | 18 | 1 + 4 | | 1 | {18,19} | 19:18 | 18 | 1 + | | 3 | {10,11,12,13,14,15,16,17,18,19} | 19:18:17:16:15:14:13:12:11:10 | 14 | 3 +(12 rows) + +-- test usage of grouped columns in direct args of aggs +select grouping(a), a, array_agg(b), + rank(a) within group (order by b nulls first), + rank(a) within group (order by b nulls last) + from (values (1,1),(1,4),(1,5),(3,1),(3,2)) v(a,b) + group by rollup (a) order by a; + grouping | a | array_agg | rank | rank +----------+---+-------------+------+------ + 0 | 1 | {1,4,5} | 1 | 1 + 0 | 3 | {1,2} | 3 | 3 + 1 | | {1,4,5,1,2} | 1 | 6 +(3 rows) + +-- nesting with window functions +select a, b, sum(c), sum(sum(c)) over (order by a,b) as rsum + from gstest2 group by rollup (a,b) order by rsum, a, b; + a | b | sum | rsum +---+---+-----+------ + 1 | 1 | 8 | 8 + 1 | 2 | 2 | 10 + 1 | | 10 | 20 + 2 | 2 | 2 | 22 + 2 | | 2 | 24 + | | 12 | 36 +(6 rows) + +-- nesting with grouping sets +select sum(c) from gstest2 + group by grouping sets((), grouping sets((), grouping sets(()))) + order by 1 desc; + sum +----- + 12 + 12 + 12 +(3 rows) + +select sum(c) from gstest2 + group by grouping sets((), grouping sets((), grouping sets(((a, b))))) + order by 1 desc; + sum +----- + 12 + 12 + 8 + 2 + 2 +(5 rows) + +select sum(c) from gstest2 + group by grouping sets(grouping sets(rollup(c), grouping sets(cube(c)))) + order by 1 desc; + sum +----- + 12 + 12 + 6 + 6 + 6 + 6 +(6 rows) + +select sum(c) from gstest2 + group by grouping sets(a, grouping sets(a, cube(b))) + order by 1 desc; + sum +----- + 12 + 10 + 10 + 8 + 4 + 2 + 2 +(7 rows) + +select sum(c) from gstest2 + group by grouping sets(grouping sets((a, (b)))) + order by 1 desc; + sum +----- + 8 + 2 + 2 +(3 rows) + +select sum(c) from gstest2 + group by grouping sets(grouping sets((a, b))) + order by 1 desc; + sum +----- + 8 + 2 + 2 +(3 rows) + +select sum(c) from gstest2 + group by grouping sets(grouping sets(a, grouping sets(a), a)) + order by 1 desc; + sum +----- + 10 + 10 + 10 + 2 + 2 + 2 +(6 rows) + +select sum(c) from gstest2 + group by grouping sets(grouping sets(a, grouping sets(a, grouping sets(a), ((a)), a, grouping sets(a), (a)), a)) + order by 1 desc; + sum +----- + 10 + 10 + 10 + 10 + 10 + 10 + 10 + 10 + 2 + 2 + 2 + 2 + 2 + 2 + 2 + 2 +(16 rows) + +select sum(c) from gstest2 + group by grouping sets((a,(a,b)), grouping sets((a,(a,b)),a)) + order by 1 desc; + sum +----- + 10 + 8 + 8 + 2 + 2 + 2 + 2 + 2 +(8 rows) + +-- empty input: first is 0 rows, second 1, third 3 etc. +select a, b, sum(v), count(*) from gstest_empty group by grouping sets ((a,b),a); + a | b | sum | count +---+---+-----+------- +(0 rows) + +select a, b, sum(v), count(*) from gstest_empty group by grouping sets ((a,b),()); + a | b | sum | count +---+---+-----+------- + | | | 0 +(1 row) + +select a, b, sum(v), count(*) from gstest_empty group by grouping sets ((a,b),(),(),()); + a | b | sum | count +---+---+-----+------- + | | | 0 + | | | 0 + | | | 0 +(3 rows) + +select sum(v), count(*) from gstest_empty group by grouping sets ((),(),()); + sum | count +-----+------- + | 0 + | 0 + | 0 +(3 rows) + +-- empty input with joins tests some important code paths +select t1.a, t2.b, sum(t1.v), count(*) from gstest_empty t1, gstest_empty t2 + group by grouping sets ((t1.a,t2.b),()); + a | b | sum | count +---+---+-----+------- + | | | 0 +(1 row) + +-- simple joins, var resolution, GROUPING on join vars +select t1.a, t2.b, grouping(t1.a, t2.b), sum(t1.v), max(t2.a) + from gstest1 t1, gstest2 t2 + group by grouping sets ((t1.a, t2.b), ()); + a | b | grouping | sum | max +---+---+----------+------+----- + 1 | 1 | 0 | 420 | 1 + 1 | 2 | 0 | 120 | 2 + 2 | 1 | 0 | 105 | 1 + 2 | 2 | 0 | 30 | 2 + 3 | 1 | 0 | 231 | 1 + 3 | 2 | 0 | 66 | 2 + 4 | 1 | 0 | 259 | 1 + 4 | 2 | 0 | 74 | 2 + | | 3 | 1305 | 2 +(9 rows) + +select t1.a, t2.b, grouping(t1.a, t2.b), sum(t1.v), max(t2.a) + from gstest1 t1 join gstest2 t2 on (t1.a=t2.a) + group by grouping sets ((t1.a, t2.b), ()); + a | b | grouping | sum | max +---+---+----------+-----+----- + 1 | 1 | 0 | 420 | 1 + 1 | 2 | 0 | 60 | 1 + 2 | 2 | 0 | 15 | 2 + | | 3 | 495 | 2 +(4 rows) + +select a, b, grouping(a, b), sum(t1.v), max(t2.c) + from gstest1 t1 join gstest2 t2 using (a,b) + group by grouping sets ((a, b), ()); + a | b | grouping | sum | max +---+---+----------+-----+----- + 1 | 1 | 0 | 147 | 2 + 1 | 2 | 0 | 25 | 2 + | | 3 | 172 | 2 +(3 rows) + +-- check that functionally dependent cols are not nulled +select a, d, grouping(a,b,c) + from gstest3 + group by grouping sets ((a,b), (a,c)); + a | d | grouping +---+---+---------- + 1 | 1 | 1 + 2 | 2 | 1 + 1 | 1 | 2 + 2 | 2 | 2 +(4 rows) + +-- check that distinct grouping columns are kept separate +-- even if they are equal() +explain (costs off) +select g as alias1, g as alias2 + from generate_series(1,3) g + group by alias1, rollup(alias2); + QUERY PLAN +------------------------------------------------ + GroupAggregate + Group Key: g, g + Group Key: g + -> Sort + Sort Key: g + -> Function Scan on generate_series g +(6 rows) + +select g as alias1, g as alias2 + from generate_series(1,3) g + group by alias1, rollup(alias2); + alias1 | alias2 +--------+-------- + 1 | 1 + 1 | + 2 | 2 + 2 | + 3 | 3 + 3 | +(6 rows) + +-- check that pulled-up subquery outputs still go to null when appropriate +select four, x + from (select four, ten, 'foo'::text as x from tenk1) as t + group by grouping sets (four, x) + having x = 'foo'; + four | x +------+----- + | foo +(1 row) + +select four, x || 'x' + from (select four, ten, 'foo'::text as x from tenk1) as t + group by grouping sets (four, x) + order by four; + four | ?column? +------+---------- + 0 | + 1 | + 2 | + 3 | + | foox +(5 rows) + +select (x+y)*1, sum(z) + from (select 1 as x, 2 as y, 3 as z) s + group by grouping sets (x+y, x); + ?column? | sum +----------+----- + 3 | 3 + | 3 +(2 rows) + +select x, not x as not_x, q2 from + (select *, q1 = 1 as x from int8_tbl i1) as t + group by grouping sets(x, q2) + order by x, q2; + x | not_x | q2 +---+-------+------------------- + f | t | + | | -4567890123456789 + | | 123 + | | 456 + | | 4567890123456789 +(5 rows) + +-- check qual push-down rules for a subquery with grouping sets +explain (verbose, costs off) +select * from ( + select 1 as x, q1, sum(q2) + from int8_tbl i1 + group by grouping sets(1, 2) +) ss +where x = 1 and q1 = 123; + QUERY PLAN +-------------------------------------------- + Subquery Scan on ss + Output: ss.x, ss.q1, ss.sum + Filter: ((ss.x = 1) AND (ss.q1 = 123)) + -> GroupAggregate + Output: (1), i1.q1, sum(i1.q2) + Group Key: 1 + Sort Key: i1.q1 + Group Key: i1.q1 + -> Seq Scan on public.int8_tbl i1 + Output: 1, i1.q1, i1.q2 +(10 rows) + +select * from ( + select 1 as x, q1, sum(q2) + from int8_tbl i1 + group by grouping sets(1, 2) +) ss +where x = 1 and q1 = 123; + x | q1 | sum +---+----+----- +(0 rows) + +-- check handling of pulled-up SubPlan in GROUPING() argument (bug #17479) +explain (verbose, costs off) +select grouping(ss.x) +from int8_tbl i1 +cross join lateral (select (select i1.q1) as x) ss +group by ss.x; + QUERY PLAN +------------------------------------------------ + GroupAggregate + Output: GROUPING((SubPlan 1)), ((SubPlan 2)) + Group Key: ((SubPlan 2)) + -> Sort + Output: ((SubPlan 2)), i1.q1 + Sort Key: ((SubPlan 2)) + -> Seq Scan on public.int8_tbl i1 + Output: (SubPlan 2), i1.q1 + SubPlan 2 + -> Result + Output: i1.q1 +(11 rows) + +select grouping(ss.x) +from int8_tbl i1 +cross join lateral (select (select i1.q1) as x) ss +group by ss.x; + grouping +---------- + 0 + 0 +(2 rows) + +explain (verbose, costs off) +select (select grouping(ss.x)) +from int8_tbl i1 +cross join lateral (select (select i1.q1) as x) ss +group by ss.x; + QUERY PLAN +-------------------------------------------- + GroupAggregate + Output: (SubPlan 2), ((SubPlan 3)) + Group Key: ((SubPlan 3)) + -> Sort + Output: ((SubPlan 3)), i1.q1 + Sort Key: ((SubPlan 3)) + -> Seq Scan on public.int8_tbl i1 + Output: (SubPlan 3), i1.q1 + SubPlan 3 + -> Result + Output: i1.q1 + SubPlan 2 + -> Result + Output: GROUPING((SubPlan 1)) +(14 rows) + +select (select grouping(ss.x)) +from int8_tbl i1 +cross join lateral (select (select i1.q1) as x) ss +group by ss.x; + grouping +---------- + 0 + 0 +(2 rows) + +-- simple rescan tests +select a, b, sum(v.x) + from (values (1),(2)) v(x), gstest_data(v.x) + group by rollup (a,b); + a | b | sum +---+---+----- + 1 | 1 | 1 + 1 | 2 | 1 + 1 | 3 | 1 + 1 | | 3 + 2 | 1 | 2 + 2 | 2 | 2 + 2 | 3 | 2 + 2 | | 6 + | | 9 +(9 rows) + +select * + from (values (1),(2)) v(x), + lateral (select a, b, sum(v.x) from gstest_data(v.x) group by rollup (a,b)) s; +ERROR: aggregate functions are not allowed in FROM clause of their own query level +LINE 3: lateral (select a, b, sum(v.x) from gstest_data(v.x) ... + ^ +-- min max optimization should still work with GROUP BY () +explain (costs off) + select min(unique1) from tenk1 GROUP BY (); + QUERY PLAN +------------------------------------------------------------ + Result + InitPlan 1 (returns $0) + -> Limit + -> Index Only Scan using tenk1_unique1 on tenk1 + Index Cond: (unique1 IS NOT NULL) +(5 rows) + +-- Views with GROUPING SET queries +CREATE VIEW gstest_view AS select a, b, grouping(a,b), sum(c), count(*), max(c) + from gstest2 group by rollup ((a,b,c),(c,d)); +NOTICE: view "gstest_view" will be a temporary view +select pg_get_viewdef('gstest_view'::regclass, true); + pg_get_viewdef +--------------------------------------- + SELECT a, + + b, + + GROUPING(a, b) AS "grouping", + + sum(c) AS sum, + + count(*) AS count, + + max(c) AS max + + FROM gstest2 + + GROUP BY ROLLUP((a, b, c), (c, d)); +(1 row) + +-- Nested queries with 3 or more levels of nesting +select(select (select grouping(a,b) from (values (1)) v2(c)) from (values (1,2)) v1(a,b) group by (a,b)) from (values(6,7)) v3(e,f) GROUP BY ROLLUP(e,f); + grouping +---------- + 0 + 0 + 0 +(3 rows) + +select(select (select grouping(e,f) from (values (1)) v2(c)) from (values (1,2)) v1(a,b) group by (a,b)) from (values(6,7)) v3(e,f) GROUP BY ROLLUP(e,f); + grouping +---------- + 0 + 1 + 3 +(3 rows) + +select(select (select grouping(c) from (values (1)) v2(c) GROUP BY c) from (values (1,2)) v1(a,b) group by (a,b)) from (values(6,7)) v3(e,f) GROUP BY ROLLUP(e,f); + grouping +---------- + 0 + 0 + 0 +(3 rows) + +-- Combinations of operations +select a, b, c, d from gstest2 group by rollup(a,b),grouping sets(c,d); + a | b | c | d +---+---+---+--- + 1 | 1 | 1 | + 1 | | 1 | + | | 1 | + 1 | 1 | 2 | + 1 | 2 | 2 | + 1 | | 2 | + 2 | 2 | 2 | + 2 | | 2 | + | | 2 | + 1 | 1 | | 1 + 1 | | | 1 + | | | 1 + 1 | 1 | | 2 + 1 | 2 | | 2 + 1 | | | 2 + 2 | 2 | | 2 + 2 | | | 2 + | | | 2 +(18 rows) + +select a, b from (values (1,2),(2,3)) v(a,b) group by a,b, grouping sets(a); + a | b +---+--- + 1 | 2 + 2 | 3 +(2 rows) + +-- Tests for chained aggregates +select a, b, grouping(a,b), sum(v), count(*), max(v) + from gstest1 group by grouping sets ((a,b),(a+1,b+1),(a+2,b+2)) order by 3,6; + a | b | grouping | sum | count | max +---+---+----------+-----+-------+----- + 1 | 1 | 0 | 21 | 2 | 11 + 1 | 2 | 0 | 25 | 2 | 13 + 1 | 3 | 0 | 14 | 1 | 14 + 2 | 3 | 0 | 15 | 1 | 15 + 3 | 3 | 0 | 16 | 1 | 16 + 3 | 4 | 0 | 17 | 1 | 17 + 4 | 1 | 0 | 37 | 2 | 19 + | | 3 | 21 | 2 | 11 + | | 3 | 21 | 2 | 11 + | | 3 | 25 | 2 | 13 + | | 3 | 25 | 2 | 13 + | | 3 | 14 | 1 | 14 + | | 3 | 14 | 1 | 14 + | | 3 | 15 | 1 | 15 + | | 3 | 15 | 1 | 15 + | | 3 | 16 | 1 | 16 + | | 3 | 16 | 1 | 16 + | | 3 | 17 | 1 | 17 + | | 3 | 17 | 1 | 17 + | | 3 | 37 | 2 | 19 + | | 3 | 37 | 2 | 19 +(21 rows) + +select(select (select grouping(a,b) from (values (1)) v2(c)) from (values (1,2)) v1(a,b) group by (a,b)) from (values(6,7)) v3(e,f) GROUP BY ROLLUP((e+1),(f+1)); + grouping +---------- + 0 + 0 + 0 +(3 rows) + +select(select (select grouping(a,b) from (values (1)) v2(c)) from (values (1,2)) v1(a,b) group by (a,b)) from (values(6,7)) v3(e,f) GROUP BY CUBE((e+1),(f+1)) ORDER BY (e+1),(f+1); + grouping +---------- + 0 + 0 + 0 + 0 +(4 rows) + +select a, b, sum(c), sum(sum(c)) over (order by a,b) as rsum + from gstest2 group by cube (a,b) order by rsum, a, b; + a | b | sum | rsum +---+---+-----+------ + 1 | 1 | 8 | 8 + 1 | 2 | 2 | 10 + 1 | | 10 | 20 + 2 | 2 | 2 | 22 + 2 | | 2 | 24 + | 1 | 8 | 32 + | 2 | 4 | 36 + | | 12 | 48 +(8 rows) + +select a, b, sum(c) from (values (1,1,10),(1,1,11),(1,2,12),(1,2,13),(1,3,14),(2,3,15),(3,3,16),(3,4,17),(4,1,18),(4,1,19)) v(a,b,c) group by rollup (a,b); + a | b | sum +---+---+----- + 1 | 1 | 21 + 1 | 2 | 25 + 1 | 3 | 14 + 1 | | 60 + 2 | 3 | 15 + 2 | | 15 + 3 | 3 | 16 + 3 | 4 | 17 + 3 | | 33 + 4 | 1 | 37 + 4 | | 37 + | | 145 +(12 rows) + +select a, b, sum(v.x) + from (values (1),(2)) v(x), gstest_data(v.x) + group by cube (a,b) order by a,b; + a | b | sum +---+---+----- + 1 | 1 | 1 + 1 | 2 | 1 + 1 | 3 | 1 + 1 | | 3 + 2 | 1 | 2 + 2 | 2 | 2 + 2 | 3 | 2 + 2 | | 6 + | 1 | 3 + | 2 | 3 + | 3 | 3 + | | 9 +(12 rows) + +-- Test reordering of grouping sets +explain (costs off) +select * from gstest1 group by grouping sets((a,b,v),(v)) order by v,b,a; + QUERY PLAN +------------------------------------------------------------------------------ + GroupAggregate + Group Key: "*VALUES*".column3, "*VALUES*".column2, "*VALUES*".column1 + Group Key: "*VALUES*".column3 + -> Sort + Sort Key: "*VALUES*".column3, "*VALUES*".column2, "*VALUES*".column1 + -> Values Scan on "*VALUES*" +(6 rows) + +-- Agg level check. This query should error out. +select (select grouping(a,b) from gstest2) from gstest2 group by a,b; +ERROR: arguments to GROUPING must be grouping expressions of the associated query level +LINE 1: select (select grouping(a,b) from gstest2) from gstest2 grou... + ^ +--Nested queries +select a, b, sum(c), count(*) from gstest2 group by grouping sets (rollup(a,b),a); + a | b | sum | count +---+---+-----+------- + 1 | 1 | 8 | 7 + 1 | 2 | 2 | 1 + 1 | | 10 | 8 + 1 | | 10 | 8 + 2 | 2 | 2 | 1 + 2 | | 2 | 1 + 2 | | 2 | 1 + | | 12 | 9 +(8 rows) + +-- HAVING queries +select ten, sum(distinct four) from onek a +group by grouping sets((ten,four),(ten)) +having exists (select 1 from onek b where sum(distinct a.four) = b.four); + ten | sum +-----+----- + 0 | 0 + 0 | 2 + 0 | 2 + 1 | 1 + 1 | 3 + 2 | 0 + 2 | 2 + 2 | 2 + 3 | 1 + 3 | 3 + 4 | 0 + 4 | 2 + 4 | 2 + 5 | 1 + 5 | 3 + 6 | 0 + 6 | 2 + 6 | 2 + 7 | 1 + 7 | 3 + 8 | 0 + 8 | 2 + 8 | 2 + 9 | 1 + 9 | 3 +(25 rows) + +-- Tests around pushdown of HAVING clauses, partially testing against previous bugs +select a,count(*) from gstest2 group by rollup(a) order by a; + a | count +---+------- + 1 | 8 + 2 | 1 + | 9 +(3 rows) + +select a,count(*) from gstest2 group by rollup(a) having a is distinct from 1 order by a; + a | count +---+------- + 2 | 1 + | 9 +(2 rows) + +explain (costs off) + select a,count(*) from gstest2 group by rollup(a) having a is distinct from 1 order by a; + QUERY PLAN +---------------------------------- + GroupAggregate + Group Key: a + Group Key: () + Filter: (a IS DISTINCT FROM 1) + -> Sort + Sort Key: a + -> Seq Scan on gstest2 +(7 rows) + +select v.c, (select count(*) from gstest2 group by () having v.c) + from (values (false),(true)) v(c) order by v.c; + c | count +---+------- + f | + t | 9 +(2 rows) + +explain (costs off) + select v.c, (select count(*) from gstest2 group by () having v.c) + from (values (false),(true)) v(c) order by v.c; + QUERY PLAN +----------------------------------------------------------- + Sort + Sort Key: "*VALUES*".column1 + -> Values Scan on "*VALUES*" + SubPlan 1 + -> Aggregate + Group Key: () + Filter: "*VALUES*".column1 + -> Result + One-Time Filter: "*VALUES*".column1 + -> Seq Scan on gstest2 +(10 rows) + +-- HAVING with GROUPING queries +select ten, grouping(ten) from onek +group by grouping sets(ten) having grouping(ten) >= 0 +order by 2,1; + ten | grouping +-----+---------- + 0 | 0 + 1 | 0 + 2 | 0 + 3 | 0 + 4 | 0 + 5 | 0 + 6 | 0 + 7 | 0 + 8 | 0 + 9 | 0 +(10 rows) + +select ten, grouping(ten) from onek +group by grouping sets(ten, four) having grouping(ten) > 0 +order by 2,1; + ten | grouping +-----+---------- + | 1 + | 1 + | 1 + | 1 +(4 rows) + +select ten, grouping(ten) from onek +group by rollup(ten) having grouping(ten) > 0 +order by 2,1; + ten | grouping +-----+---------- + | 1 +(1 row) + +select ten, grouping(ten) from onek +group by cube(ten) having grouping(ten) > 0 +order by 2,1; + ten | grouping +-----+---------- + | 1 +(1 row) + +select ten, grouping(ten) from onek +group by (ten) having grouping(ten) >= 0 +order by 2,1; + ten | grouping +-----+---------- + 0 | 0 + 1 | 0 + 2 | 0 + 3 | 0 + 4 | 0 + 5 | 0 + 6 | 0 + 7 | 0 + 8 | 0 + 9 | 0 +(10 rows) + +-- FILTER queries +select ten, sum(distinct four) filter (where four::text ~ '123') from onek a +group by rollup(ten); + ten | sum +-----+----- + 0 | + 1 | + 2 | + 3 | + 4 | + 5 | + 6 | + 7 | + 8 | + 9 | + | +(11 rows) + +-- More rescan tests +select * from (values (1),(2)) v(a) left join lateral (select v.a, four, ten, count(*) from onek group by cube(four,ten)) s on true order by v.a,four,ten; + a | a | four | ten | count +---+---+------+-----+------- + 1 | 1 | 0 | 0 | 50 + 1 | 1 | 0 | 2 | 50 + 1 | 1 | 0 | 4 | 50 + 1 | 1 | 0 | 6 | 50 + 1 | 1 | 0 | 8 | 50 + 1 | 1 | 0 | | 250 + 1 | 1 | 1 | 1 | 50 + 1 | 1 | 1 | 3 | 50 + 1 | 1 | 1 | 5 | 50 + 1 | 1 | 1 | 7 | 50 + 1 | 1 | 1 | 9 | 50 + 1 | 1 | 1 | | 250 + 1 | 1 | 2 | 0 | 50 + 1 | 1 | 2 | 2 | 50 + 1 | 1 | 2 | 4 | 50 + 1 | 1 | 2 | 6 | 50 + 1 | 1 | 2 | 8 | 50 + 1 | 1 | 2 | | 250 + 1 | 1 | 3 | 1 | 50 + 1 | 1 | 3 | 3 | 50 + 1 | 1 | 3 | 5 | 50 + 1 | 1 | 3 | 7 | 50 + 1 | 1 | 3 | 9 | 50 + 1 | 1 | 3 | | 250 + 1 | 1 | | 0 | 100 + 1 | 1 | | 1 | 100 + 1 | 1 | | 2 | 100 + 1 | 1 | | 3 | 100 + 1 | 1 | | 4 | 100 + 1 | 1 | | 5 | 100 + 1 | 1 | | 6 | 100 + 1 | 1 | | 7 | 100 + 1 | 1 | | 8 | 100 + 1 | 1 | | 9 | 100 + 1 | 1 | | | 1000 + 2 | 2 | 0 | 0 | 50 + 2 | 2 | 0 | 2 | 50 + 2 | 2 | 0 | 4 | 50 + 2 | 2 | 0 | 6 | 50 + 2 | 2 | 0 | 8 | 50 + 2 | 2 | 0 | | 250 + 2 | 2 | 1 | 1 | 50 + 2 | 2 | 1 | 3 | 50 + 2 | 2 | 1 | 5 | 50 + 2 | 2 | 1 | 7 | 50 + 2 | 2 | 1 | 9 | 50 + 2 | 2 | 1 | | 250 + 2 | 2 | 2 | 0 | 50 + 2 | 2 | 2 | 2 | 50 + 2 | 2 | 2 | 4 | 50 + 2 | 2 | 2 | 6 | 50 + 2 | 2 | 2 | 8 | 50 + 2 | 2 | 2 | | 250 + 2 | 2 | 3 | 1 | 50 + 2 | 2 | 3 | 3 | 50 + 2 | 2 | 3 | 5 | 50 + 2 | 2 | 3 | 7 | 50 + 2 | 2 | 3 | 9 | 50 + 2 | 2 | 3 | | 250 + 2 | 2 | | 0 | 100 + 2 | 2 | | 1 | 100 + 2 | 2 | | 2 | 100 + 2 | 2 | | 3 | 100 + 2 | 2 | | 4 | 100 + 2 | 2 | | 5 | 100 + 2 | 2 | | 6 | 100 + 2 | 2 | | 7 | 100 + 2 | 2 | | 8 | 100 + 2 | 2 | | 9 | 100 + 2 | 2 | | | 1000 +(70 rows) + +select array(select row(v.a,s1.*) from (select two,four, count(*) from onek group by cube(two,four) order by two,four) s1) from (values (1),(2)) v(a); + array +------------------------------------------------------------------------------------------------------------------------------------------------------ + {"(1,0,0,250)","(1,0,2,250)","(1,0,,500)","(1,1,1,250)","(1,1,3,250)","(1,1,,500)","(1,,0,250)","(1,,1,250)","(1,,2,250)","(1,,3,250)","(1,,,1000)"} + {"(2,0,0,250)","(2,0,2,250)","(2,0,,500)","(2,1,1,250)","(2,1,3,250)","(2,1,,500)","(2,,0,250)","(2,,1,250)","(2,,2,250)","(2,,3,250)","(2,,,1000)"} +(2 rows) + +-- Grouping on text columns +select sum(ten) from onek group by two, rollup(four::text) order by 1; + sum +------ + 1000 + 1000 + 1250 + 1250 + 2000 + 2500 +(6 rows) + +select sum(ten) from onek group by rollup(four::text), two order by 1; + sum +------ + 1000 + 1000 + 1250 + 1250 + 2000 + 2500 +(6 rows) + +-- hashing support +set enable_hashagg = true; +-- failure cases +select count(*) from gstest4 group by rollup(unhashable_col,unsortable_col); +ERROR: could not implement GROUP BY +DETAIL: Some of the datatypes only support hashing, while others only support sorting. +select array_agg(v order by v) from gstest4 group by grouping sets ((id,unsortable_col),(id)); +ERROR: could not implement GROUP BY +DETAIL: Some of the datatypes only support hashing, while others only support sorting. +-- simple cases +select a, b, grouping(a,b), sum(v), count(*), max(v) + from gstest1 group by grouping sets ((a),(b)) order by 3,1,2; + a | b | grouping | sum | count | max +---+---+----------+-----+-------+----- + 1 | | 1 | 60 | 5 | 14 + 2 | | 1 | 15 | 1 | 15 + 3 | | 1 | 33 | 2 | 17 + 4 | | 1 | 37 | 2 | 19 + | 1 | 2 | 58 | 4 | 19 + | 2 | 2 | 25 | 2 | 13 + | 3 | 2 | 45 | 3 | 16 + | 4 | 2 | 17 | 1 | 17 +(8 rows) + +explain (costs off) select a, b, grouping(a,b), sum(v), count(*), max(v) + from gstest1 group by grouping sets ((a),(b)) order by 3,1,2; + QUERY PLAN +-------------------------------------------------------------------------------------------------------- + Sort + Sort Key: (GROUPING("*VALUES*".column1, "*VALUES*".column2)), "*VALUES*".column1, "*VALUES*".column2 + -> HashAggregate + Hash Key: "*VALUES*".column1 + Hash Key: "*VALUES*".column2 + -> Values Scan on "*VALUES*" +(6 rows) + +select a, b, grouping(a,b), sum(v), count(*), max(v) + from gstest1 group by cube(a,b) order by 3,1,2; + a | b | grouping | sum | count | max +---+---+----------+-----+-------+----- + 1 | 1 | 0 | 21 | 2 | 11 + 1 | 2 | 0 | 25 | 2 | 13 + 1 | 3 | 0 | 14 | 1 | 14 + 2 | 3 | 0 | 15 | 1 | 15 + 3 | 3 | 0 | 16 | 1 | 16 + 3 | 4 | 0 | 17 | 1 | 17 + 4 | 1 | 0 | 37 | 2 | 19 + 1 | | 1 | 60 | 5 | 14 + 2 | | 1 | 15 | 1 | 15 + 3 | | 1 | 33 | 2 | 17 + 4 | | 1 | 37 | 2 | 19 + | 1 | 2 | 58 | 4 | 19 + | 2 | 2 | 25 | 2 | 13 + | 3 | 2 | 45 | 3 | 16 + | 4 | 2 | 17 | 1 | 17 + | | 3 | 145 | 10 | 19 +(16 rows) + +explain (costs off) select a, b, grouping(a,b), sum(v), count(*), max(v) + from gstest1 group by cube(a,b) order by 3,1,2; + QUERY PLAN +-------------------------------------------------------------------------------------------------------- + Sort + Sort Key: (GROUPING("*VALUES*".column1, "*VALUES*".column2)), "*VALUES*".column1, "*VALUES*".column2 + -> MixedAggregate + Hash Key: "*VALUES*".column1, "*VALUES*".column2 + Hash Key: "*VALUES*".column1 + Hash Key: "*VALUES*".column2 + Group Key: () + -> Values Scan on "*VALUES*" +(8 rows) + +-- shouldn't try and hash +explain (costs off) + select a, b, grouping(a,b), array_agg(v order by v) + from gstest1 group by cube(a,b); + QUERY PLAN +---------------------------------------------------------- + GroupAggregate + Group Key: "*VALUES*".column1, "*VALUES*".column2 + Group Key: "*VALUES*".column1 + Group Key: () + Sort Key: "*VALUES*".column2 + Group Key: "*VALUES*".column2 + -> Sort + Sort Key: "*VALUES*".column1, "*VALUES*".column2 + -> Values Scan on "*VALUES*" +(9 rows) + +-- unsortable cases +select unsortable_col, count(*) + from gstest4 group by grouping sets ((unsortable_col),(unsortable_col)) + order by unsortable_col::text; + unsortable_col | count +----------------+------- + 1 | 4 + 1 | 4 + 2 | 4 + 2 | 4 +(4 rows) + +-- mixed hashable/sortable cases +select unhashable_col, unsortable_col, + grouping(unhashable_col, unsortable_col), + count(*), sum(v) + from gstest4 group by grouping sets ((unhashable_col),(unsortable_col)) + order by 3, 5; + unhashable_col | unsortable_col | grouping | count | sum +----------------+----------------+----------+-------+----- + 0000 | | 1 | 2 | 17 + 0001 | | 1 | 2 | 34 + 0010 | | 1 | 2 | 68 + 0011 | | 1 | 2 | 136 + | 2 | 2 | 4 | 60 + | 1 | 2 | 4 | 195 +(6 rows) + +explain (costs off) + select unhashable_col, unsortable_col, + grouping(unhashable_col, unsortable_col), + count(*), sum(v) + from gstest4 group by grouping sets ((unhashable_col),(unsortable_col)) + order by 3,5; + QUERY PLAN +------------------------------------------------------------------ + Sort + Sort Key: (GROUPING(unhashable_col, unsortable_col)), (sum(v)) + -> MixedAggregate + Hash Key: unsortable_col + Group Key: unhashable_col + -> Sort + Sort Key: unhashable_col + -> Seq Scan on gstest4 +(8 rows) + +select unhashable_col, unsortable_col, + grouping(unhashable_col, unsortable_col), + count(*), sum(v) + from gstest4 group by grouping sets ((v,unhashable_col),(v,unsortable_col)) + order by 3,5; + unhashable_col | unsortable_col | grouping | count | sum +----------------+----------------+----------+-------+----- + 0000 | | 1 | 1 | 1 + 0001 | | 1 | 1 | 2 + 0010 | | 1 | 1 | 4 + 0011 | | 1 | 1 | 8 + 0000 | | 1 | 1 | 16 + 0001 | | 1 | 1 | 32 + 0010 | | 1 | 1 | 64 + 0011 | | 1 | 1 | 128 + | 1 | 2 | 1 | 1 + | 1 | 2 | 1 | 2 + | 2 | 2 | 1 | 4 + | 2 | 2 | 1 | 8 + | 2 | 2 | 1 | 16 + | 2 | 2 | 1 | 32 + | 1 | 2 | 1 | 64 + | 1 | 2 | 1 | 128 +(16 rows) + +explain (costs off) + select unhashable_col, unsortable_col, + grouping(unhashable_col, unsortable_col), + count(*), sum(v) + from gstest4 group by grouping sets ((v,unhashable_col),(v,unsortable_col)) + order by 3,5; + QUERY PLAN +------------------------------------------------------------------ + Sort + Sort Key: (GROUPING(unhashable_col, unsortable_col)), (sum(v)) + -> MixedAggregate + Hash Key: v, unsortable_col + Group Key: v, unhashable_col + -> Sort + Sort Key: v, unhashable_col + -> Seq Scan on gstest4 +(8 rows) + +-- empty input: first is 0 rows, second 1, third 3 etc. +select a, b, sum(v), count(*) from gstest_empty group by grouping sets ((a,b),a); + a | b | sum | count +---+---+-----+------- +(0 rows) + +explain (costs off) + select a, b, sum(v), count(*) from gstest_empty group by grouping sets ((a,b),a); + QUERY PLAN +-------------------------------- + HashAggregate + Hash Key: a, b + Hash Key: a + -> Seq Scan on gstest_empty +(4 rows) + +select a, b, sum(v), count(*) from gstest_empty group by grouping sets ((a,b),()); + a | b | sum | count +---+---+-----+------- + | | | 0 +(1 row) + +select a, b, sum(v), count(*) from gstest_empty group by grouping sets ((a,b),(),(),()); + a | b | sum | count +---+---+-----+------- + | | | 0 + | | | 0 + | | | 0 +(3 rows) + +explain (costs off) + select a, b, sum(v), count(*) from gstest_empty group by grouping sets ((a,b),(),(),()); + QUERY PLAN +-------------------------------- + MixedAggregate + Hash Key: a, b + Group Key: () + Group Key: () + Group Key: () + -> Seq Scan on gstest_empty +(6 rows) + +select sum(v), count(*) from gstest_empty group by grouping sets ((),(),()); + sum | count +-----+------- + | 0 + | 0 + | 0 +(3 rows) + +explain (costs off) + select sum(v), count(*) from gstest_empty group by grouping sets ((),(),()); + QUERY PLAN +-------------------------------- + Aggregate + Group Key: () + Group Key: () + Group Key: () + -> Seq Scan on gstest_empty +(5 rows) + +-- check that functionally dependent cols are not nulled +select a, d, grouping(a,b,c) + from gstest3 + group by grouping sets ((a,b), (a,c)); + a | d | grouping +---+---+---------- + 1 | 1 | 1 + 2 | 2 | 1 + 1 | 1 | 2 + 2 | 2 | 2 +(4 rows) + +explain (costs off) + select a, d, grouping(a,b,c) + from gstest3 + group by grouping sets ((a,b), (a,c)); + QUERY PLAN +--------------------------- + HashAggregate + Hash Key: a, b + Hash Key: a, c + -> Seq Scan on gstest3 +(4 rows) + +-- simple rescan tests +select a, b, sum(v.x) + from (values (1),(2)) v(x), gstest_data(v.x) + group by grouping sets (a,b) + order by 1, 2, 3; + a | b | sum +---+---+----- + 1 | | 3 + 2 | | 6 + | 1 | 3 + | 2 | 3 + | 3 | 3 +(5 rows) + +explain (costs off) + select a, b, sum(v.x) + from (values (1),(2)) v(x), gstest_data(v.x) + group by grouping sets (a,b) + order by 3, 1, 2; + QUERY PLAN +--------------------------------------------------------------------- + Sort + Sort Key: (sum("*VALUES*".column1)), gstest_data.a, gstest_data.b + -> HashAggregate + Hash Key: gstest_data.a + Hash Key: gstest_data.b + -> Nested Loop + -> Values Scan on "*VALUES*" + -> Function Scan on gstest_data +(8 rows) + +select * + from (values (1),(2)) v(x), + lateral (select a, b, sum(v.x) from gstest_data(v.x) group by grouping sets (a,b)) s; +ERROR: aggregate functions are not allowed in FROM clause of their own query level +LINE 3: lateral (select a, b, sum(v.x) from gstest_data(v.x) ... + ^ +explain (costs off) + select * + from (values (1),(2)) v(x), + lateral (select a, b, sum(v.x) from gstest_data(v.x) group by grouping sets (a,b)) s; +ERROR: aggregate functions are not allowed in FROM clause of their own query level +LINE 4: lateral (select a, b, sum(v.x) from gstest_data(v.x... + ^ +-- Tests for chained aggregates +select a, b, grouping(a,b), sum(v), count(*), max(v) + from gstest1 group by grouping sets ((a,b),(a+1,b+1),(a+2,b+2)) order by 3,6; + a | b | grouping | sum | count | max +---+---+----------+-----+-------+----- + 1 | 1 | 0 | 21 | 2 | 11 + 1 | 2 | 0 | 25 | 2 | 13 + 1 | 3 | 0 | 14 | 1 | 14 + 2 | 3 | 0 | 15 | 1 | 15 + 3 | 3 | 0 | 16 | 1 | 16 + 3 | 4 | 0 | 17 | 1 | 17 + 4 | 1 | 0 | 37 | 2 | 19 + | | 3 | 21 | 2 | 11 + | | 3 | 21 | 2 | 11 + | | 3 | 25 | 2 | 13 + | | 3 | 25 | 2 | 13 + | | 3 | 14 | 1 | 14 + | | 3 | 14 | 1 | 14 + | | 3 | 15 | 1 | 15 + | | 3 | 15 | 1 | 15 + | | 3 | 16 | 1 | 16 + | | 3 | 16 | 1 | 16 + | | 3 | 17 | 1 | 17 + | | 3 | 17 | 1 | 17 + | | 3 | 37 | 2 | 19 + | | 3 | 37 | 2 | 19 +(21 rows) + +explain (costs off) + select a, b, grouping(a,b), sum(v), count(*), max(v) + from gstest1 group by grouping sets ((a,b),(a+1,b+1),(a+2,b+2)) order by 3,6; + QUERY PLAN +------------------------------------------------------------------------------------------- + Sort + Sort Key: (GROUPING("*VALUES*".column1, "*VALUES*".column2)), (max("*VALUES*".column3)) + -> HashAggregate + Hash Key: "*VALUES*".column1, "*VALUES*".column2 + Hash Key: ("*VALUES*".column1 + 1), ("*VALUES*".column2 + 1) + Hash Key: ("*VALUES*".column1 + 2), ("*VALUES*".column2 + 2) + -> Values Scan on "*VALUES*" +(7 rows) + +select a, b, sum(c), sum(sum(c)) over (order by a,b) as rsum + from gstest2 group by cube (a,b) order by rsum, a, b; + a | b | sum | rsum +---+---+-----+------ + 1 | 1 | 8 | 8 + 1 | 2 | 2 | 10 + 1 | | 10 | 20 + 2 | 2 | 2 | 22 + 2 | | 2 | 24 + | 1 | 8 | 32 + | 2 | 4 | 36 + | | 12 | 48 +(8 rows) + +explain (costs off) + select a, b, sum(c), sum(sum(c)) over (order by a,b) as rsum + from gstest2 group by cube (a,b) order by rsum, a, b; + QUERY PLAN +--------------------------------------------- + Sort + Sort Key: (sum((sum(c))) OVER (?)), a, b + -> WindowAgg + -> Sort + Sort Key: a, b + -> MixedAggregate + Hash Key: a, b + Hash Key: a + Hash Key: b + Group Key: () + -> Seq Scan on gstest2 +(11 rows) + +select a, b, sum(v.x) + from (values (1),(2)) v(x), gstest_data(v.x) + group by cube (a,b) order by a,b; + a | b | sum +---+---+----- + 1 | 1 | 1 + 1 | 2 | 1 + 1 | 3 | 1 + 1 | | 3 + 2 | 1 | 2 + 2 | 2 | 2 + 2 | 3 | 2 + 2 | | 6 + | 1 | 3 + | 2 | 3 + | 3 | 3 + | | 9 +(12 rows) + +explain (costs off) + select a, b, sum(v.x) + from (values (1),(2)) v(x), gstest_data(v.x) + group by cube (a,b) order by a,b; + QUERY PLAN +------------------------------------------------ + Sort + Sort Key: gstest_data.a, gstest_data.b + -> MixedAggregate + Hash Key: gstest_data.a, gstest_data.b + Hash Key: gstest_data.a + Hash Key: gstest_data.b + Group Key: () + -> Nested Loop + -> Values Scan on "*VALUES*" + -> Function Scan on gstest_data +(10 rows) + +-- Verify that we correctly handle the child node returning a +-- non-minimal slot, which happens if the input is pre-sorted, +-- e.g. due to an index scan. +BEGIN; +SET LOCAL enable_hashagg = false; +EXPLAIN (COSTS OFF) SELECT a, b, count(*), max(a), max(b) FROM gstest3 GROUP BY GROUPING SETS(a, b,()) ORDER BY a, b; + QUERY PLAN +--------------------------------------- + Sort + Sort Key: a, b + -> GroupAggregate + Group Key: a + Group Key: () + Sort Key: b + Group Key: b + -> Sort + Sort Key: a + -> Seq Scan on gstest3 +(10 rows) + +SELECT a, b, count(*), max(a), max(b) FROM gstest3 GROUP BY GROUPING SETS(a, b,()) ORDER BY a, b; + a | b | count | max | max +---+---+-------+-----+----- + 1 | | 1 | 1 | 1 + 2 | | 1 | 2 | 2 + | 1 | 1 | 1 | 1 + | 2 | 1 | 2 | 2 + | | 2 | 2 | 2 +(5 rows) + +SET LOCAL enable_seqscan = false; +EXPLAIN (COSTS OFF) SELECT a, b, count(*), max(a), max(b) FROM gstest3 GROUP BY GROUPING SETS(a, b,()) ORDER BY a, b; + QUERY PLAN +------------------------------------------------------ + Sort + Sort Key: a, b + -> GroupAggregate + Group Key: a + Group Key: () + Sort Key: b + Group Key: b + -> Index Scan using gstest3_pkey on gstest3 +(8 rows) + +SELECT a, b, count(*), max(a), max(b) FROM gstest3 GROUP BY GROUPING SETS(a, b,()) ORDER BY a, b; + a | b | count | max | max +---+---+-------+-----+----- + 1 | | 1 | 1 | 1 + 2 | | 1 | 2 | 2 + | 1 | 1 | 1 | 1 + | 2 | 1 | 2 | 2 + | | 2 | 2 | 2 +(5 rows) + +COMMIT; +-- More rescan tests +select * from (values (1),(2)) v(a) left join lateral (select v.a, four, ten, count(*) from onek group by cube(four,ten)) s on true order by v.a,four,ten; + a | a | four | ten | count +---+---+------+-----+------- + 1 | 1 | 0 | 0 | 50 + 1 | 1 | 0 | 2 | 50 + 1 | 1 | 0 | 4 | 50 + 1 | 1 | 0 | 6 | 50 + 1 | 1 | 0 | 8 | 50 + 1 | 1 | 0 | | 250 + 1 | 1 | 1 | 1 | 50 + 1 | 1 | 1 | 3 | 50 + 1 | 1 | 1 | 5 | 50 + 1 | 1 | 1 | 7 | 50 + 1 | 1 | 1 | 9 | 50 + 1 | 1 | 1 | | 250 + 1 | 1 | 2 | 0 | 50 + 1 | 1 | 2 | 2 | 50 + 1 | 1 | 2 | 4 | 50 + 1 | 1 | 2 | 6 | 50 + 1 | 1 | 2 | 8 | 50 + 1 | 1 | 2 | | 250 + 1 | 1 | 3 | 1 | 50 + 1 | 1 | 3 | 3 | 50 + 1 | 1 | 3 | 5 | 50 + 1 | 1 | 3 | 7 | 50 + 1 | 1 | 3 | 9 | 50 + 1 | 1 | 3 | | 250 + 1 | 1 | | 0 | 100 + 1 | 1 | | 1 | 100 + 1 | 1 | | 2 | 100 + 1 | 1 | | 3 | 100 + 1 | 1 | | 4 | 100 + 1 | 1 | | 5 | 100 + 1 | 1 | | 6 | 100 + 1 | 1 | | 7 | 100 + 1 | 1 | | 8 | 100 + 1 | 1 | | 9 | 100 + 1 | 1 | | | 1000 + 2 | 2 | 0 | 0 | 50 + 2 | 2 | 0 | 2 | 50 + 2 | 2 | 0 | 4 | 50 + 2 | 2 | 0 | 6 | 50 + 2 | 2 | 0 | 8 | 50 + 2 | 2 | 0 | | 250 + 2 | 2 | 1 | 1 | 50 + 2 | 2 | 1 | 3 | 50 + 2 | 2 | 1 | 5 | 50 + 2 | 2 | 1 | 7 | 50 + 2 | 2 | 1 | 9 | 50 + 2 | 2 | 1 | | 250 + 2 | 2 | 2 | 0 | 50 + 2 | 2 | 2 | 2 | 50 + 2 | 2 | 2 | 4 | 50 + 2 | 2 | 2 | 6 | 50 + 2 | 2 | 2 | 8 | 50 + 2 | 2 | 2 | | 250 + 2 | 2 | 3 | 1 | 50 + 2 | 2 | 3 | 3 | 50 + 2 | 2 | 3 | 5 | 50 + 2 | 2 | 3 | 7 | 50 + 2 | 2 | 3 | 9 | 50 + 2 | 2 | 3 | | 250 + 2 | 2 | | 0 | 100 + 2 | 2 | | 1 | 100 + 2 | 2 | | 2 | 100 + 2 | 2 | | 3 | 100 + 2 | 2 | | 4 | 100 + 2 | 2 | | 5 | 100 + 2 | 2 | | 6 | 100 + 2 | 2 | | 7 | 100 + 2 | 2 | | 8 | 100 + 2 | 2 | | 9 | 100 + 2 | 2 | | | 1000 +(70 rows) + +select array(select row(v.a,s1.*) from (select two,four, count(*) from onek group by cube(two,four) order by two,four) s1) from (values (1),(2)) v(a); + array +------------------------------------------------------------------------------------------------------------------------------------------------------ + {"(1,0,0,250)","(1,0,2,250)","(1,0,,500)","(1,1,1,250)","(1,1,3,250)","(1,1,,500)","(1,,0,250)","(1,,1,250)","(1,,2,250)","(1,,3,250)","(1,,,1000)"} + {"(2,0,0,250)","(2,0,2,250)","(2,0,,500)","(2,1,1,250)","(2,1,3,250)","(2,1,,500)","(2,,0,250)","(2,,1,250)","(2,,2,250)","(2,,3,250)","(2,,,1000)"} +(2 rows) + +-- Rescan logic changes when there are no empty grouping sets, so test +-- that too: +select * from (values (1),(2)) v(a) left join lateral (select v.a, four, ten, count(*) from onek group by grouping sets(four,ten)) s on true order by v.a,four,ten; + a | a | four | ten | count +---+---+------+-----+------- + 1 | 1 | 0 | | 250 + 1 | 1 | 1 | | 250 + 1 | 1 | 2 | | 250 + 1 | 1 | 3 | | 250 + 1 | 1 | | 0 | 100 + 1 | 1 | | 1 | 100 + 1 | 1 | | 2 | 100 + 1 | 1 | | 3 | 100 + 1 | 1 | | 4 | 100 + 1 | 1 | | 5 | 100 + 1 | 1 | | 6 | 100 + 1 | 1 | | 7 | 100 + 1 | 1 | | 8 | 100 + 1 | 1 | | 9 | 100 + 2 | 2 | 0 | | 250 + 2 | 2 | 1 | | 250 + 2 | 2 | 2 | | 250 + 2 | 2 | 3 | | 250 + 2 | 2 | | 0 | 100 + 2 | 2 | | 1 | 100 + 2 | 2 | | 2 | 100 + 2 | 2 | | 3 | 100 + 2 | 2 | | 4 | 100 + 2 | 2 | | 5 | 100 + 2 | 2 | | 6 | 100 + 2 | 2 | | 7 | 100 + 2 | 2 | | 8 | 100 + 2 | 2 | | 9 | 100 +(28 rows) + +select array(select row(v.a,s1.*) from (select two,four, count(*) from onek group by grouping sets(two,four) order by two,four) s1) from (values (1),(2)) v(a); + array +--------------------------------------------------------------------------------- + {"(1,0,,500)","(1,1,,500)","(1,,0,250)","(1,,1,250)","(1,,2,250)","(1,,3,250)"} + {"(2,0,,500)","(2,1,,500)","(2,,0,250)","(2,,1,250)","(2,,2,250)","(2,,3,250)"} +(2 rows) + +-- test the knapsack +set enable_indexscan = false; +set hash_mem_multiplier = 1.0; +set work_mem = '64kB'; +explain (costs off) + select unique1, + count(two), count(four), count(ten), + count(hundred), count(thousand), count(twothousand), + count(*) + from tenk1 group by grouping sets (unique1,twothousand,thousand,hundred,ten,four,two); + QUERY PLAN +------------------------------- + MixedAggregate + Hash Key: two + Hash Key: four + Hash Key: ten + Hash Key: hundred + Group Key: unique1 + Sort Key: twothousand + Group Key: twothousand + Sort Key: thousand + Group Key: thousand + -> Sort + Sort Key: unique1 + -> Seq Scan on tenk1 +(13 rows) + +explain (costs off) + select unique1, + count(two), count(four), count(ten), + count(hundred), count(thousand), count(twothousand), + count(*) + from tenk1 group by grouping sets (unique1,hundred,ten,four,two); + QUERY PLAN +------------------------------- + MixedAggregate + Hash Key: two + Hash Key: four + Hash Key: ten + Hash Key: hundred + Group Key: unique1 + -> Sort + Sort Key: unique1 + -> Seq Scan on tenk1 +(9 rows) + +set work_mem = '384kB'; +explain (costs off) + select unique1, + count(two), count(four), count(ten), + count(hundred), count(thousand), count(twothousand), + count(*) + from tenk1 group by grouping sets (unique1,twothousand,thousand,hundred,ten,four,two); + QUERY PLAN +------------------------------- + MixedAggregate + Hash Key: two + Hash Key: four + Hash Key: ten + Hash Key: hundred + Hash Key: thousand + Group Key: unique1 + Sort Key: twothousand + Group Key: twothousand + -> Sort + Sort Key: unique1 + -> Seq Scan on tenk1 +(12 rows) + +-- check collation-sensitive matching between grouping expressions +-- (similar to a check for aggregates, but there are additional code +-- paths for GROUPING, so check again here) +select v||'a', case grouping(v||'a') when 1 then 1 else 0 end, count(*) + from unnest(array[1,1], array['a','b']) u(i,v) + group by rollup(i, v||'a') order by 1,3; + ?column? | case | count +----------+------+------- + aa | 0 | 1 + ba | 0 | 1 + | 1 | 2 + | 1 | 2 +(4 rows) + +select v||'a', case when grouping(v||'a') = 1 then 1 else 0 end, count(*) + from unnest(array[1,1], array['a','b']) u(i,v) + group by rollup(i, v||'a') order by 1,3; + ?column? | case | count +----------+------+------- + aa | 0 | 1 + ba | 0 | 1 + | 1 | 2 + | 1 | 2 +(4 rows) + +-- Bug #16784 +create table bug_16784(i int, j int); +analyze bug_16784; +alter table bug_16784 set (autovacuum_enabled = 'false'); +update pg_class set reltuples = 10 where relname='bug_16784'; +insert into bug_16784 select g/10, g from generate_series(1,40) g; +set work_mem='64kB'; +set enable_sort = false; +select * from + (values (1),(2)) v(a), + lateral (select a, i, j, count(*) from + bug_16784 group by cube(i,j)) s + order by v.a, i, j; + a | a | i | j | count +---+---+---+----+------- + 1 | 1 | 0 | 1 | 1 + 1 | 1 | 0 | 2 | 1 + 1 | 1 | 0 | 3 | 1 + 1 | 1 | 0 | 4 | 1 + 1 | 1 | 0 | 5 | 1 + 1 | 1 | 0 | 6 | 1 + 1 | 1 | 0 | 7 | 1 + 1 | 1 | 0 | 8 | 1 + 1 | 1 | 0 | 9 | 1 + 1 | 1 | 0 | | 9 + 1 | 1 | 1 | 10 | 1 + 1 | 1 | 1 | 11 | 1 + 1 | 1 | 1 | 12 | 1 + 1 | 1 | 1 | 13 | 1 + 1 | 1 | 1 | 14 | 1 + 1 | 1 | 1 | 15 | 1 + 1 | 1 | 1 | 16 | 1 + 1 | 1 | 1 | 17 | 1 + 1 | 1 | 1 | 18 | 1 + 1 | 1 | 1 | 19 | 1 + 1 | 1 | 1 | | 10 + 1 | 1 | 2 | 20 | 1 + 1 | 1 | 2 | 21 | 1 + 1 | 1 | 2 | 22 | 1 + 1 | 1 | 2 | 23 | 1 + 1 | 1 | 2 | 24 | 1 + 1 | 1 | 2 | 25 | 1 + 1 | 1 | 2 | 26 | 1 + 1 | 1 | 2 | 27 | 1 + 1 | 1 | 2 | 28 | 1 + 1 | 1 | 2 | 29 | 1 + 1 | 1 | 2 | | 10 + 1 | 1 | 3 | 30 | 1 + 1 | 1 | 3 | 31 | 1 + 1 | 1 | 3 | 32 | 1 + 1 | 1 | 3 | 33 | 1 + 1 | 1 | 3 | 34 | 1 + 1 | 1 | 3 | 35 | 1 + 1 | 1 | 3 | 36 | 1 + 1 | 1 | 3 | 37 | 1 + 1 | 1 | 3 | 38 | 1 + 1 | 1 | 3 | 39 | 1 + 1 | 1 | 3 | | 10 + 1 | 1 | 4 | 40 | 1 + 1 | 1 | 4 | | 1 + 1 | 1 | | 1 | 1 + 1 | 1 | | 2 | 1 + 1 | 1 | | 3 | 1 + 1 | 1 | | 4 | 1 + 1 | 1 | | 5 | 1 + 1 | 1 | | 6 | 1 + 1 | 1 | | 7 | 1 + 1 | 1 | | 8 | 1 + 1 | 1 | | 9 | 1 + 1 | 1 | | 10 | 1 + 1 | 1 | | 11 | 1 + 1 | 1 | | 12 | 1 + 1 | 1 | | 13 | 1 + 1 | 1 | | 14 | 1 + 1 | 1 | | 15 | 1 + 1 | 1 | | 16 | 1 + 1 | 1 | | 17 | 1 + 1 | 1 | | 18 | 1 + 1 | 1 | | 19 | 1 + 1 | 1 | | 20 | 1 + 1 | 1 | | 21 | 1 + 1 | 1 | | 22 | 1 + 1 | 1 | | 23 | 1 + 1 | 1 | | 24 | 1 + 1 | 1 | | 25 | 1 + 1 | 1 | | 26 | 1 + 1 | 1 | | 27 | 1 + 1 | 1 | | 28 | 1 + 1 | 1 | | 29 | 1 + 1 | 1 | | 30 | 1 + 1 | 1 | | 31 | 1 + 1 | 1 | | 32 | 1 + 1 | 1 | | 33 | 1 + 1 | 1 | | 34 | 1 + 1 | 1 | | 35 | 1 + 1 | 1 | | 36 | 1 + 1 | 1 | | 37 | 1 + 1 | 1 | | 38 | 1 + 1 | 1 | | 39 | 1 + 1 | 1 | | 40 | 1 + 1 | 1 | | | 40 + 2 | 2 | 0 | 1 | 1 + 2 | 2 | 0 | 2 | 1 + 2 | 2 | 0 | 3 | 1 + 2 | 2 | 0 | 4 | 1 + 2 | 2 | 0 | 5 | 1 + 2 | 2 | 0 | 6 | 1 + 2 | 2 | 0 | 7 | 1 + 2 | 2 | 0 | 8 | 1 + 2 | 2 | 0 | 9 | 1 + 2 | 2 | 0 | | 9 + 2 | 2 | 1 | 10 | 1 + 2 | 2 | 1 | 11 | 1 + 2 | 2 | 1 | 12 | 1 + 2 | 2 | 1 | 13 | 1 + 2 | 2 | 1 | 14 | 1 + 2 | 2 | 1 | 15 | 1 + 2 | 2 | 1 | 16 | 1 + 2 | 2 | 1 | 17 | 1 + 2 | 2 | 1 | 18 | 1 + 2 | 2 | 1 | 19 | 1 + 2 | 2 | 1 | | 10 + 2 | 2 | 2 | 20 | 1 + 2 | 2 | 2 | 21 | 1 + 2 | 2 | 2 | 22 | 1 + 2 | 2 | 2 | 23 | 1 + 2 | 2 | 2 | 24 | 1 + 2 | 2 | 2 | 25 | 1 + 2 | 2 | 2 | 26 | 1 + 2 | 2 | 2 | 27 | 1 + 2 | 2 | 2 | 28 | 1 + 2 | 2 | 2 | 29 | 1 + 2 | 2 | 2 | | 10 + 2 | 2 | 3 | 30 | 1 + 2 | 2 | 3 | 31 | 1 + 2 | 2 | 3 | 32 | 1 + 2 | 2 | 3 | 33 | 1 + 2 | 2 | 3 | 34 | 1 + 2 | 2 | 3 | 35 | 1 + 2 | 2 | 3 | 36 | 1 + 2 | 2 | 3 | 37 | 1 + 2 | 2 | 3 | 38 | 1 + 2 | 2 | 3 | 39 | 1 + 2 | 2 | 3 | | 10 + 2 | 2 | 4 | 40 | 1 + 2 | 2 | 4 | | 1 + 2 | 2 | | 1 | 1 + 2 | 2 | | 2 | 1 + 2 | 2 | | 3 | 1 + 2 | 2 | | 4 | 1 + 2 | 2 | | 5 | 1 + 2 | 2 | | 6 | 1 + 2 | 2 | | 7 | 1 + 2 | 2 | | 8 | 1 + 2 | 2 | | 9 | 1 + 2 | 2 | | 10 | 1 + 2 | 2 | | 11 | 1 + 2 | 2 | | 12 | 1 + 2 | 2 | | 13 | 1 + 2 | 2 | | 14 | 1 + 2 | 2 | | 15 | 1 + 2 | 2 | | 16 | 1 + 2 | 2 | | 17 | 1 + 2 | 2 | | 18 | 1 + 2 | 2 | | 19 | 1 + 2 | 2 | | 20 | 1 + 2 | 2 | | 21 | 1 + 2 | 2 | | 22 | 1 + 2 | 2 | | 23 | 1 + 2 | 2 | | 24 | 1 + 2 | 2 | | 25 | 1 + 2 | 2 | | 26 | 1 + 2 | 2 | | 27 | 1 + 2 | 2 | | 28 | 1 + 2 | 2 | | 29 | 1 + 2 | 2 | | 30 | 1 + 2 | 2 | | 31 | 1 + 2 | 2 | | 32 | 1 + 2 | 2 | | 33 | 1 + 2 | 2 | | 34 | 1 + 2 | 2 | | 35 | 1 + 2 | 2 | | 36 | 1 + 2 | 2 | | 37 | 1 + 2 | 2 | | 38 | 1 + 2 | 2 | | 39 | 1 + 2 | 2 | | 40 | 1 + 2 | 2 | | | 40 +(172 rows) + +-- +-- Compare results between plans using sorting and plans using hash +-- aggregation. Force spilling in both cases by setting work_mem low +-- and altering the statistics. +-- +create table gs_data_1 as +select g%1000 as g1000, g%100 as g100, g%10 as g10, g + from generate_series(0,1999) g; +analyze gs_data_1; +alter table gs_data_1 set (autovacuum_enabled = 'false'); +update pg_class set reltuples = 10 where relname='gs_data_1'; +set work_mem='64kB'; +-- Produce results with sorting. +set enable_sort = true; +set enable_hashagg = false; +set jit_above_cost = 0; +explain (costs off) +select g100, g10, sum(g::numeric), count(*), max(g::text) +from gs_data_1 group by cube (g1000, g100,g10); + QUERY PLAN +------------------------------------ + GroupAggregate + Group Key: g1000, g100, g10 + Group Key: g1000, g100 + Group Key: g1000 + Group Key: () + Sort Key: g100, g10 + Group Key: g100, g10 + Group Key: g100 + Sort Key: g10, g1000 + Group Key: g10, g1000 + Group Key: g10 + -> Sort + Sort Key: g1000, g100, g10 + -> Seq Scan on gs_data_1 +(14 rows) + +create table gs_group_1 as +select g100, g10, sum(g::numeric), count(*), max(g::text) +from gs_data_1 group by cube (g1000, g100,g10); +-- Produce results with hash aggregation. +set enable_hashagg = true; +set enable_sort = false; +explain (costs off) +select g100, g10, sum(g::numeric), count(*), max(g::text) +from gs_data_1 group by cube (g1000, g100,g10); + QUERY PLAN +------------------------------ + MixedAggregate + Hash Key: g1000, g100, g10 + Hash Key: g1000, g100 + Hash Key: g1000 + Hash Key: g100, g10 + Hash Key: g100 + Hash Key: g10, g1000 + Hash Key: g10 + Group Key: () + -> Seq Scan on gs_data_1 +(10 rows) + +create table gs_hash_1 as +select g100, g10, sum(g::numeric), count(*), max(g::text) +from gs_data_1 group by cube (g1000, g100,g10); +set enable_sort = true; +set work_mem to default; +set hash_mem_multiplier to default; +-- Compare results +(select * from gs_hash_1 except select * from gs_group_1) + union all +(select * from gs_group_1 except select * from gs_hash_1); + g100 | g10 | sum | count | max +------+-----+-----+-------+----- +(0 rows) + +drop table gs_group_1; +drop table gs_hash_1; +-- GROUP BY DISTINCT +-- "normal" behavior... +select a, b, c +from (values (1, 2, 3), (4, null, 6), (7, 8, 9)) as t (a, b, c) +group by all rollup(a, b), rollup(a, c) +order by a, b, c; + a | b | c +---+---+--- + 1 | 2 | 3 + 1 | 2 | + 1 | 2 | + 1 | | 3 + 1 | | 3 + 1 | | + 1 | | + 1 | | + 4 | | 6 + 4 | | 6 + 4 | | 6 + 4 | | + 4 | | + 4 | | + 4 | | + 4 | | + 7 | 8 | 9 + 7 | 8 | + 7 | 8 | + 7 | | 9 + 7 | | 9 + 7 | | + 7 | | + 7 | | + | | +(25 rows) + +-- ...which is also the default +select a, b, c +from (values (1, 2, 3), (4, null, 6), (7, 8, 9)) as t (a, b, c) +group by rollup(a, b), rollup(a, c) +order by a, b, c; + a | b | c +---+---+--- + 1 | 2 | 3 + 1 | 2 | + 1 | 2 | + 1 | | 3 + 1 | | 3 + 1 | | + 1 | | + 1 | | + 4 | | 6 + 4 | | 6 + 4 | | 6 + 4 | | + 4 | | + 4 | | + 4 | | + 4 | | + 7 | 8 | 9 + 7 | 8 | + 7 | 8 | + 7 | | 9 + 7 | | 9 + 7 | | + 7 | | + 7 | | + | | +(25 rows) + +-- "group by distinct" behavior... +select a, b, c +from (values (1, 2, 3), (4, null, 6), (7, 8, 9)) as t (a, b, c) +group by distinct rollup(a, b), rollup(a, c) +order by a, b, c; + a | b | c +---+---+--- + 1 | 2 | 3 + 1 | 2 | + 1 | | 3 + 1 | | + 4 | | 6 + 4 | | 6 + 4 | | + 4 | | + 7 | 8 | 9 + 7 | 8 | + 7 | | 9 + 7 | | + | | +(13 rows) + +-- ...which is not the same as "select distinct" +select distinct a, b, c +from (values (1, 2, 3), (4, null, 6), (7, 8, 9)) as t (a, b, c) +group by rollup(a, b), rollup(a, c) +order by a, b, c; + a | b | c +---+---+--- + 1 | 2 | 3 + 1 | 2 | + 1 | | 3 + 1 | | + 4 | | 6 + 4 | | + 7 | 8 | 9 + 7 | 8 | + 7 | | 9 + 7 | | + | | +(11 rows) + +-- test handling of outer GroupingFunc within subqueries +explain (costs off) +select (select grouping(v1)) from (values ((select 1))) v(v1) group by cube(v1); + QUERY PLAN +--------------------------- + MixedAggregate + Hash Key: $2 + Group Key: () + InitPlan 1 (returns $1) + -> Result + InitPlan 3 (returns $2) + -> Result + -> Result + SubPlan 2 + -> Result +(10 rows) + +select (select grouping(v1)) from (values ((select 1))) v(v1) group by cube(v1); + grouping +---------- + 1 + 0 +(2 rows) + +explain (costs off) +select (select grouping(v1)) from (values ((select 1))) v(v1) group by v1; + QUERY PLAN +--------------------------- + GroupAggregate + InitPlan 1 (returns $1) + -> Result + InitPlan 3 (returns $2) + -> Result + -> Result + SubPlan 2 + -> Result +(8 rows) + +select (select grouping(v1)) from (values ((select 1))) v(v1) group by v1; + grouping +---------- + 0 +(1 row) + +-- end diff --git a/src/test/regress/expected/guc.out b/src/test/regress/expected/guc.out new file mode 100644 index 0000000..127c953 --- /dev/null +++ b/src/test/regress/expected/guc.out @@ -0,0 +1,890 @@ +-- pg_regress should ensure that this default value applies; however +-- we can't rely on any specific default value of vacuum_cost_delay +SHOW datestyle; + DateStyle +--------------- + Postgres, MDY +(1 row) + +-- SET to some nondefault value +SET vacuum_cost_delay TO 40; +SET datestyle = 'ISO, YMD'; +SHOW vacuum_cost_delay; + vacuum_cost_delay +------------------- + 40ms +(1 row) + +SHOW datestyle; + DateStyle +----------- + ISO, YMD +(1 row) + +SELECT '2006-08-13 12:34:56'::timestamptz; + timestamptz +------------------------ + 2006-08-13 12:34:56-07 +(1 row) + +-- SET LOCAL has no effect outside of a transaction +SET LOCAL vacuum_cost_delay TO 50; +WARNING: SET LOCAL can only be used in transaction blocks +SHOW vacuum_cost_delay; + vacuum_cost_delay +------------------- + 40ms +(1 row) + +SET LOCAL datestyle = 'SQL'; +WARNING: SET LOCAL can only be used in transaction blocks +SHOW datestyle; + DateStyle +----------- + ISO, YMD +(1 row) + +SELECT '2006-08-13 12:34:56'::timestamptz; + timestamptz +------------------------ + 2006-08-13 12:34:56-07 +(1 row) + +-- SET LOCAL within a transaction that commits +BEGIN; +SET LOCAL vacuum_cost_delay TO 50; +SHOW vacuum_cost_delay; + vacuum_cost_delay +------------------- + 50ms +(1 row) + +SET LOCAL datestyle = 'SQL'; +SHOW datestyle; + DateStyle +----------- + SQL, YMD +(1 row) + +SELECT '2006-08-13 12:34:56'::timestamptz; + timestamptz +------------------------- + 08/13/2006 12:34:56 PDT +(1 row) + +COMMIT; +SHOW vacuum_cost_delay; + vacuum_cost_delay +------------------- + 40ms +(1 row) + +SHOW datestyle; + DateStyle +----------- + ISO, YMD +(1 row) + +SELECT '2006-08-13 12:34:56'::timestamptz; + timestamptz +------------------------ + 2006-08-13 12:34:56-07 +(1 row) + +-- SET should be reverted after ROLLBACK +BEGIN; +SET vacuum_cost_delay TO 60; +SHOW vacuum_cost_delay; + vacuum_cost_delay +------------------- + 60ms +(1 row) + +SET datestyle = 'German'; +SHOW datestyle; + DateStyle +------------- + German, DMY +(1 row) + +SELECT '2006-08-13 12:34:56'::timestamptz; + timestamptz +------------------------- + 13.08.2006 12:34:56 PDT +(1 row) + +ROLLBACK; +SHOW vacuum_cost_delay; + vacuum_cost_delay +------------------- + 40ms +(1 row) + +SHOW datestyle; + DateStyle +----------- + ISO, YMD +(1 row) + +SELECT '2006-08-13 12:34:56'::timestamptz; + timestamptz +------------------------ + 2006-08-13 12:34:56-07 +(1 row) + +-- Some tests with subtransactions +BEGIN; +SET vacuum_cost_delay TO 70; +SET datestyle = 'MDY'; +SHOW datestyle; + DateStyle +----------- + ISO, MDY +(1 row) + +SELECT '2006-08-13 12:34:56'::timestamptz; + timestamptz +------------------------ + 2006-08-13 12:34:56-07 +(1 row) + +SAVEPOINT first_sp; +SET vacuum_cost_delay TO 80.1; +SHOW vacuum_cost_delay; + vacuum_cost_delay +------------------- + 80100us +(1 row) + +SET datestyle = 'German, DMY'; +SHOW datestyle; + DateStyle +------------- + German, DMY +(1 row) + +SELECT '2006-08-13 12:34:56'::timestamptz; + timestamptz +------------------------- + 13.08.2006 12:34:56 PDT +(1 row) + +ROLLBACK TO first_sp; +SHOW datestyle; + DateStyle +----------- + ISO, MDY +(1 row) + +SELECT '2006-08-13 12:34:56'::timestamptz; + timestamptz +------------------------ + 2006-08-13 12:34:56-07 +(1 row) + +SAVEPOINT second_sp; +SET vacuum_cost_delay TO '900us'; +SET datestyle = 'SQL, YMD'; +SHOW datestyle; + DateStyle +----------- + SQL, YMD +(1 row) + +SELECT '2006-08-13 12:34:56'::timestamptz; + timestamptz +------------------------- + 08/13/2006 12:34:56 PDT +(1 row) + +SAVEPOINT third_sp; +SET vacuum_cost_delay TO 100; +SHOW vacuum_cost_delay; + vacuum_cost_delay +------------------- + 100ms +(1 row) + +SET datestyle = 'Postgres, MDY'; +SHOW datestyle; + DateStyle +--------------- + Postgres, MDY +(1 row) + +SELECT '2006-08-13 12:34:56'::timestamptz; + timestamptz +------------------------------ + Sun Aug 13 12:34:56 2006 PDT +(1 row) + +ROLLBACK TO third_sp; +SHOW vacuum_cost_delay; + vacuum_cost_delay +------------------- + 900us +(1 row) + +SHOW datestyle; + DateStyle +----------- + SQL, YMD +(1 row) + +SELECT '2006-08-13 12:34:56'::timestamptz; + timestamptz +------------------------- + 08/13/2006 12:34:56 PDT +(1 row) + +ROLLBACK TO second_sp; +SHOW vacuum_cost_delay; + vacuum_cost_delay +------------------- + 70ms +(1 row) + +SHOW datestyle; + DateStyle +----------- + ISO, MDY +(1 row) + +SELECT '2006-08-13 12:34:56'::timestamptz; + timestamptz +------------------------ + 2006-08-13 12:34:56-07 +(1 row) + +ROLLBACK; +SHOW vacuum_cost_delay; + vacuum_cost_delay +------------------- + 40ms +(1 row) + +SHOW datestyle; + DateStyle +----------- + ISO, YMD +(1 row) + +SELECT '2006-08-13 12:34:56'::timestamptz; + timestamptz +------------------------ + 2006-08-13 12:34:56-07 +(1 row) + +-- SET LOCAL with Savepoints +BEGIN; +SHOW vacuum_cost_delay; + vacuum_cost_delay +------------------- + 40ms +(1 row) + +SHOW datestyle; + DateStyle +----------- + ISO, YMD +(1 row) + +SELECT '2006-08-13 12:34:56'::timestamptz; + timestamptz +------------------------ + 2006-08-13 12:34:56-07 +(1 row) + +SAVEPOINT sp; +SET LOCAL vacuum_cost_delay TO 30; +SHOW vacuum_cost_delay; + vacuum_cost_delay +------------------- + 30ms +(1 row) + +SET LOCAL datestyle = 'Postgres, MDY'; +SHOW datestyle; + DateStyle +--------------- + Postgres, MDY +(1 row) + +SELECT '2006-08-13 12:34:56'::timestamptz; + timestamptz +------------------------------ + Sun Aug 13 12:34:56 2006 PDT +(1 row) + +ROLLBACK TO sp; +SHOW vacuum_cost_delay; + vacuum_cost_delay +------------------- + 40ms +(1 row) + +SHOW datestyle; + DateStyle +----------- + ISO, YMD +(1 row) + +SELECT '2006-08-13 12:34:56'::timestamptz; + timestamptz +------------------------ + 2006-08-13 12:34:56-07 +(1 row) + +ROLLBACK; +SHOW vacuum_cost_delay; + vacuum_cost_delay +------------------- + 40ms +(1 row) + +SHOW datestyle; + DateStyle +----------- + ISO, YMD +(1 row) + +SELECT '2006-08-13 12:34:56'::timestamptz; + timestamptz +------------------------ + 2006-08-13 12:34:56-07 +(1 row) + +-- SET LOCAL persists through RELEASE (which was not true in 8.0-8.2) +BEGIN; +SHOW vacuum_cost_delay; + vacuum_cost_delay +------------------- + 40ms +(1 row) + +SHOW datestyle; + DateStyle +----------- + ISO, YMD +(1 row) + +SELECT '2006-08-13 12:34:56'::timestamptz; + timestamptz +------------------------ + 2006-08-13 12:34:56-07 +(1 row) + +SAVEPOINT sp; +SET LOCAL vacuum_cost_delay TO 30; +SHOW vacuum_cost_delay; + vacuum_cost_delay +------------------- + 30ms +(1 row) + +SET LOCAL datestyle = 'Postgres, MDY'; +SHOW datestyle; + DateStyle +--------------- + Postgres, MDY +(1 row) + +SELECT '2006-08-13 12:34:56'::timestamptz; + timestamptz +------------------------------ + Sun Aug 13 12:34:56 2006 PDT +(1 row) + +RELEASE SAVEPOINT sp; +SHOW vacuum_cost_delay; + vacuum_cost_delay +------------------- + 30ms +(1 row) + +SHOW datestyle; + DateStyle +--------------- + Postgres, MDY +(1 row) + +SELECT '2006-08-13 12:34:56'::timestamptz; + timestamptz +------------------------------ + Sun Aug 13 12:34:56 2006 PDT +(1 row) + +ROLLBACK; +SHOW vacuum_cost_delay; + vacuum_cost_delay +------------------- + 40ms +(1 row) + +SHOW datestyle; + DateStyle +----------- + ISO, YMD +(1 row) + +SELECT '2006-08-13 12:34:56'::timestamptz; + timestamptz +------------------------ + 2006-08-13 12:34:56-07 +(1 row) + +-- SET followed by SET LOCAL +BEGIN; +SET vacuum_cost_delay TO 40; +SET LOCAL vacuum_cost_delay TO 50; +SHOW vacuum_cost_delay; + vacuum_cost_delay +------------------- + 50ms +(1 row) + +SET datestyle = 'ISO, DMY'; +SET LOCAL datestyle = 'Postgres, MDY'; +SHOW datestyle; + DateStyle +--------------- + Postgres, MDY +(1 row) + +SELECT '2006-08-13 12:34:56'::timestamptz; + timestamptz +------------------------------ + Sun Aug 13 12:34:56 2006 PDT +(1 row) + +COMMIT; +SHOW vacuum_cost_delay; + vacuum_cost_delay +------------------- + 40ms +(1 row) + +SHOW datestyle; + DateStyle +----------- + ISO, DMY +(1 row) + +SELECT '2006-08-13 12:34:56'::timestamptz; + timestamptz +------------------------ + 2006-08-13 12:34:56-07 +(1 row) + +-- +-- Test RESET. We use datestyle because the reset value is forced by +-- pg_regress, so it doesn't depend on the installation's configuration. +-- +SET datestyle = iso, ymd; +SHOW datestyle; + DateStyle +----------- + ISO, YMD +(1 row) + +SELECT '2006-08-13 12:34:56'::timestamptz; + timestamptz +------------------------ + 2006-08-13 12:34:56-07 +(1 row) + +RESET datestyle; +SHOW datestyle; + DateStyle +--------------- + Postgres, MDY +(1 row) + +SELECT '2006-08-13 12:34:56'::timestamptz; + timestamptz +------------------------------ + Sun Aug 13 12:34:56 2006 PDT +(1 row) + +-- Test some simple error cases +SET seq_page_cost TO 'NaN'; +ERROR: invalid value for parameter "seq_page_cost": "NaN" +SET vacuum_cost_delay TO '10s'; +ERROR: 10000 ms is outside the valid range for parameter "vacuum_cost_delay" (0 .. 100) +SET no_such_variable TO 42; +ERROR: unrecognized configuration parameter "no_such_variable" +-- Test "custom" GUCs created on the fly (which aren't really an +-- intended feature, but many people use them). +SHOW custom.my_guc; -- error, not known yet +ERROR: unrecognized configuration parameter "custom.my_guc" +SET custom.my_guc = 42; +SHOW custom.my_guc; + custom.my_guc +--------------- + 42 +(1 row) + +RESET custom.my_guc; -- this makes it go to empty, not become unknown again +SHOW custom.my_guc; + custom.my_guc +--------------- + +(1 row) + +SET custom.my.qualified.guc = 'foo'; +SHOW custom.my.qualified.guc; + custom.my.qualified.guc +------------------------- + foo +(1 row) + +SET custom."bad-guc" = 42; -- disallowed because -c cannot set this name +ERROR: invalid configuration parameter name "custom.bad-guc" +DETAIL: Custom parameter names must be two or more simple identifiers separated by dots. +SHOW custom."bad-guc"; +ERROR: unrecognized configuration parameter "custom.bad-guc" +SET special."weird name" = 'foo'; -- could be allowed, but we choose not to +ERROR: invalid configuration parameter name "special.weird name" +DETAIL: Custom parameter names must be two or more simple identifiers separated by dots. +SHOW special."weird name"; +ERROR: unrecognized configuration parameter "special.weird name" +-- Check what happens when you try to set a "custom" GUC within the +-- namespace of an extension. +SET plpgsql.extra_foo_warnings = true; -- allowed if plpgsql is not loaded yet +LOAD 'plpgsql'; -- this will throw a warning and delete the variable +WARNING: invalid configuration parameter name "plpgsql.extra_foo_warnings", removing it +DETAIL: "plpgsql" is now a reserved prefix. +SET plpgsql.extra_foo_warnings = true; -- now, it's an error +ERROR: invalid configuration parameter name "plpgsql.extra_foo_warnings" +DETAIL: "plpgsql" is a reserved prefix. +SHOW plpgsql.extra_foo_warnings; +ERROR: unrecognized configuration parameter "plpgsql.extra_foo_warnings" +-- +-- Test DISCARD TEMP +-- +CREATE TEMP TABLE reset_test ( data text ) ON COMMIT DELETE ROWS; +SELECT relname FROM pg_class WHERE relname = 'reset_test'; + relname +------------ + reset_test +(1 row) + +DISCARD TEMP; +SELECT relname FROM pg_class WHERE relname = 'reset_test'; + relname +--------- +(0 rows) + +-- +-- Test DISCARD ALL +-- +-- do changes +DECLARE foo CURSOR WITH HOLD FOR SELECT 1; +PREPARE foo AS SELECT 1; +LISTEN foo_event; +SET vacuum_cost_delay = 13; +CREATE TEMP TABLE tmp_foo (data text) ON COMMIT DELETE ROWS; +CREATE ROLE regress_guc_user; +SET SESSION AUTHORIZATION regress_guc_user; +-- look changes +SELECT pg_listening_channels(); + pg_listening_channels +----------------------- + foo_event +(1 row) + +SELECT name FROM pg_prepared_statements; + name +------ + foo +(1 row) + +SELECT name FROM pg_cursors; + name +------ + foo +(1 row) + +SHOW vacuum_cost_delay; + vacuum_cost_delay +------------------- + 13ms +(1 row) + +SELECT relname from pg_class where relname = 'tmp_foo'; + relname +--------- + tmp_foo +(1 row) + +SELECT current_user = 'regress_guc_user'; + ?column? +---------- + t +(1 row) + +-- discard everything +DISCARD ALL; +-- look again +SELECT pg_listening_channels(); + pg_listening_channels +----------------------- +(0 rows) + +SELECT name FROM pg_prepared_statements; + name +------ +(0 rows) + +SELECT name FROM pg_cursors; + name +------ +(0 rows) + +SHOW vacuum_cost_delay; + vacuum_cost_delay +------------------- + 0 +(1 row) + +SELECT relname from pg_class where relname = 'tmp_foo'; + relname +--------- +(0 rows) + +SELECT current_user = 'regress_guc_user'; + ?column? +---------- + f +(1 row) + +DROP ROLE regress_guc_user; +-- +-- search_path should react to changes in pg_namespace +-- +set search_path = foo, public, not_there_initially; +select current_schemas(false); + current_schemas +----------------- + {public} +(1 row) + +create schema not_there_initially; +select current_schemas(false); + current_schemas +------------------------------ + {public,not_there_initially} +(1 row) + +drop schema not_there_initially; +select current_schemas(false); + current_schemas +----------------- + {public} +(1 row) + +reset search_path; +-- +-- Tests for function-local GUC settings +-- +set work_mem = '3MB'; +create function report_guc(text) returns text as +$$ select current_setting($1) $$ language sql +set work_mem = '1MB'; +select report_guc('work_mem'), current_setting('work_mem'); + report_guc | current_setting +------------+----------------- + 1MB | 3MB +(1 row) + +alter function report_guc(text) set work_mem = '2MB'; +select report_guc('work_mem'), current_setting('work_mem'); + report_guc | current_setting +------------+----------------- + 2MB | 3MB +(1 row) + +alter function report_guc(text) reset all; +select report_guc('work_mem'), current_setting('work_mem'); + report_guc | current_setting +------------+----------------- + 3MB | 3MB +(1 row) + +-- SET LOCAL is restricted by a function SET option +create or replace function myfunc(int) returns text as $$ +begin + set local work_mem = '2MB'; + return current_setting('work_mem'); +end $$ +language plpgsql +set work_mem = '1MB'; +select myfunc(0), current_setting('work_mem'); + myfunc | current_setting +--------+----------------- + 2MB | 3MB +(1 row) + +alter function myfunc(int) reset all; +select myfunc(0), current_setting('work_mem'); + myfunc | current_setting +--------+----------------- + 2MB | 2MB +(1 row) + +set work_mem = '3MB'; +-- but SET isn't +create or replace function myfunc(int) returns text as $$ +begin + set work_mem = '2MB'; + return current_setting('work_mem'); +end $$ +language plpgsql +set work_mem = '1MB'; +select myfunc(0), current_setting('work_mem'); + myfunc | current_setting +--------+----------------- + 2MB | 2MB +(1 row) + +set work_mem = '3MB'; +-- it should roll back on error, though +create or replace function myfunc(int) returns text as $$ +begin + set work_mem = '2MB'; + perform 1/$1; + return current_setting('work_mem'); +end $$ +language plpgsql +set work_mem = '1MB'; +select myfunc(0); +ERROR: division by zero +CONTEXT: SQL statement "SELECT 1/$1" +PL/pgSQL function myfunc(integer) line 4 at PERFORM +select current_setting('work_mem'); + current_setting +----------------- + 3MB +(1 row) + +select myfunc(1), current_setting('work_mem'); + myfunc | current_setting +--------+----------------- + 2MB | 2MB +(1 row) + +-- check current_setting()'s behavior with invalid setting name +select current_setting('nosuch.setting'); -- FAIL +ERROR: unrecognized configuration parameter "nosuch.setting" +select current_setting('nosuch.setting', false); -- FAIL +ERROR: unrecognized configuration parameter "nosuch.setting" +select current_setting('nosuch.setting', true) is null; + ?column? +---------- + t +(1 row) + +-- after this, all three cases should yield 'nada' +set nosuch.setting = 'nada'; +select current_setting('nosuch.setting'); + current_setting +----------------- + nada +(1 row) + +select current_setting('nosuch.setting', false); + current_setting +----------------- + nada +(1 row) + +select current_setting('nosuch.setting', true); + current_setting +----------------- + nada +(1 row) + +-- Normally, CREATE FUNCTION should complain about invalid values in +-- function SET options; but not if check_function_bodies is off, +-- because that creates ordering hazards for pg_dump +create function func_with_bad_set() returns int as $$ select 1 $$ +language sql +set default_text_search_config = no_such_config; +NOTICE: text search configuration "no_such_config" does not exist +ERROR: invalid value for parameter "default_text_search_config": "no_such_config" +set check_function_bodies = off; +create function func_with_bad_set() returns int as $$ select 1 $$ +language sql +set default_text_search_config = no_such_config; +NOTICE: text search configuration "no_such_config" does not exist +select func_with_bad_set(); +ERROR: invalid value for parameter "default_text_search_config": "no_such_config" +reset check_function_bodies; +set default_with_oids to f; +-- Should not allow to set it to true. +set default_with_oids to t; +ERROR: tables declared WITH OIDS are not supported +-- Test GUC categories and flag patterns +SELECT pg_settings_get_flags(NULL); + pg_settings_get_flags +----------------------- + +(1 row) + +SELECT pg_settings_get_flags('does_not_exist'); + pg_settings_get_flags +----------------------- + +(1 row) + +CREATE TABLE tab_settings_flags AS SELECT name, category, + 'EXPLAIN' = ANY(flags) AS explain, + 'NO_RESET' = ANY(flags) AS no_reset, + 'NO_RESET_ALL' = ANY(flags) AS no_reset_all, + 'NOT_IN_SAMPLE' = ANY(flags) AS not_in_sample, + 'RUNTIME_COMPUTED' = ANY(flags) AS runtime_computed + FROM pg_show_all_settings() AS psas, + pg_settings_get_flags(psas.name) AS flags; +-- Developer GUCs should be flagged with GUC_NOT_IN_SAMPLE: +SELECT name FROM tab_settings_flags + WHERE category = 'Developer Options' AND NOT not_in_sample + ORDER BY 1; + name +------ +(0 rows) + +-- Most query-tuning GUCs are flagged as valid for EXPLAIN. +-- default_statistics_target is an exception. +SELECT name FROM tab_settings_flags + WHERE category ~ '^Query Tuning' AND NOT explain + ORDER BY 1; + name +--------------------------- + default_statistics_target +(1 row) + +-- Runtime-computed GUCs should be part of the preset category. +SELECT name FROM tab_settings_flags + WHERE NOT category = 'Preset Options' AND runtime_computed + ORDER BY 1; + name +------ +(0 rows) + +-- Preset GUCs are flagged as NOT_IN_SAMPLE. +SELECT name FROM tab_settings_flags + WHERE category = 'Preset Options' AND NOT not_in_sample + ORDER BY 1; + name +------ +(0 rows) + +-- NO_RESET implies NO_RESET_ALL. +SELECT name FROM tab_settings_flags + WHERE no_reset AND NOT no_reset_all + ORDER BY 1; + name +------ +(0 rows) + +DROP TABLE tab_settings_flags; diff --git a/src/test/regress/expected/hash_func.out b/src/test/regress/expected/hash_func.out new file mode 100644 index 0000000..8e23dc3 --- /dev/null +++ b/src/test/regress/expected/hash_func.out @@ -0,0 +1,374 @@ +-- +-- Test hash functions +-- +-- When the salt is 0, the extended hash function should produce a result +-- whose low 32 bits match the standard hash function. When the salt is +-- not 0, we should get a different result. +-- +SELECT v as value, hashint2(v)::bit(32) as standard, + hashint2extended(v, 0)::bit(32) as extended0, + hashint2extended(v, 1)::bit(32) as extended1 +FROM (VALUES (0::int2), (1::int2), (17::int2), (42::int2)) x(v) +WHERE hashint2(v)::bit(32) != hashint2extended(v, 0)::bit(32) + OR hashint2(v)::bit(32) = hashint2extended(v, 1)::bit(32); + value | standard | extended0 | extended1 +-------+----------+-----------+----------- +(0 rows) + +SELECT v as value, hashint4(v)::bit(32) as standard, + hashint4extended(v, 0)::bit(32) as extended0, + hashint4extended(v, 1)::bit(32) as extended1 +FROM (VALUES (0), (1), (17), (42), (550273), (207112489)) x(v) +WHERE hashint4(v)::bit(32) != hashint4extended(v, 0)::bit(32) + OR hashint4(v)::bit(32) = hashint4extended(v, 1)::bit(32); + value | standard | extended0 | extended1 +-------+----------+-----------+----------- +(0 rows) + +SELECT v as value, hashint8(v)::bit(32) as standard, + hashint8extended(v, 0)::bit(32) as extended0, + hashint8extended(v, 1)::bit(32) as extended1 +FROM (VALUES (0), (1), (17), (42), (550273), (207112489)) x(v) +WHERE hashint8(v)::bit(32) != hashint8extended(v, 0)::bit(32) + OR hashint8(v)::bit(32) = hashint8extended(v, 1)::bit(32); + value | standard | extended0 | extended1 +-------+----------+-----------+----------- +(0 rows) + +SELECT v as value, hashfloat4(v)::bit(32) as standard, + hashfloat4extended(v, 0)::bit(32) as extended0, + hashfloat4extended(v, 1)::bit(32) as extended1 +FROM (VALUES (0), (1), (17), (42), (550273), (207112489)) x(v) +WHERE hashfloat4(v)::bit(32) != hashfloat4extended(v, 0)::bit(32) + OR hashfloat4(v)::bit(32) = hashfloat4extended(v, 1)::bit(32); + value | standard | extended0 | extended1 +-------+----------+-----------+----------- +(0 rows) + +SELECT v as value, hashfloat8(v)::bit(32) as standard, + hashfloat8extended(v, 0)::bit(32) as extended0, + hashfloat8extended(v, 1)::bit(32) as extended1 +FROM (VALUES (0), (1), (17), (42), (550273), (207112489)) x(v) +WHERE hashfloat8(v)::bit(32) != hashfloat8extended(v, 0)::bit(32) + OR hashfloat8(v)::bit(32) = hashfloat8extended(v, 1)::bit(32); + value | standard | extended0 | extended1 +-------+----------+-----------+----------- +(0 rows) + +SELECT v as value, hashoid(v)::bit(32) as standard, + hashoidextended(v, 0)::bit(32) as extended0, + hashoidextended(v, 1)::bit(32) as extended1 +FROM (VALUES (0), (1), (17), (42), (550273), (207112489)) x(v) +WHERE hashoid(v)::bit(32) != hashoidextended(v, 0)::bit(32) + OR hashoid(v)::bit(32) = hashoidextended(v, 1)::bit(32); + value | standard | extended0 | extended1 +-------+----------+-----------+----------- +(0 rows) + +SELECT v as value, hashchar(v)::bit(32) as standard, + hashcharextended(v, 0)::bit(32) as extended0, + hashcharextended(v, 1)::bit(32) as extended1 +FROM (VALUES (NULL::"char"), ('1'), ('x'), ('X'), ('p'), ('N')) x(v) +WHERE hashchar(v)::bit(32) != hashcharextended(v, 0)::bit(32) + OR hashchar(v)::bit(32) = hashcharextended(v, 1)::bit(32); + value | standard | extended0 | extended1 +-------+----------+-----------+----------- +(0 rows) + +SELECT v as value, hashname(v)::bit(32) as standard, + hashnameextended(v, 0)::bit(32) as extended0, + hashnameextended(v, 1)::bit(32) as extended1 +FROM (VALUES (NULL), ('PostgreSQL'), ('eIpUEtqmY89'), ('AXKEJBTK'), + ('muop28x03'), ('yi3nm0d73')) x(v) +WHERE hashname(v)::bit(32) != hashnameextended(v, 0)::bit(32) + OR hashname(v)::bit(32) = hashnameextended(v, 1)::bit(32); + value | standard | extended0 | extended1 +-------+----------+-----------+----------- +(0 rows) + +SELECT v as value, hashtext(v)::bit(32) as standard, + hashtextextended(v, 0)::bit(32) as extended0, + hashtextextended(v, 1)::bit(32) as extended1 +FROM (VALUES (NULL), ('PostgreSQL'), ('eIpUEtqmY89'), ('AXKEJBTK'), + ('muop28x03'), ('yi3nm0d73')) x(v) +WHERE hashtext(v)::bit(32) != hashtextextended(v, 0)::bit(32) + OR hashtext(v)::bit(32) = hashtextextended(v, 1)::bit(32); + value | standard | extended0 | extended1 +-------+----------+-----------+----------- +(0 rows) + +SELECT v as value, hashoidvector(v)::bit(32) as standard, + hashoidvectorextended(v, 0)::bit(32) as extended0, + hashoidvectorextended(v, 1)::bit(32) as extended1 +FROM (VALUES (NULL::oidvector), ('0 1 2 3 4'), ('17 18 19 20'), + ('42 43 42 45'), ('550273 550273 570274'), + ('207112489 207112499 21512 2155 372325 1363252')) x(v) +WHERE hashoidvector(v)::bit(32) != hashoidvectorextended(v, 0)::bit(32) + OR hashoidvector(v)::bit(32) = hashoidvectorextended(v, 1)::bit(32); + value | standard | extended0 | extended1 +-------+----------+-----------+----------- +(0 rows) + +SELECT v as value, hash_aclitem(v)::bit(32) as standard, + hash_aclitem_extended(v, 0)::bit(32) as extended0, + hash_aclitem_extended(v, 1)::bit(32) as extended1 +FROM (SELECT DISTINCT(relacl[1]) FROM pg_class LIMIT 10) x(v) +WHERE hash_aclitem(v)::bit(32) != hash_aclitem_extended(v, 0)::bit(32) + OR hash_aclitem(v)::bit(32) = hash_aclitem_extended(v, 1)::bit(32); + value | standard | extended0 | extended1 +-------+----------+-----------+----------- +(0 rows) + +SELECT v as value, hashmacaddr(v)::bit(32) as standard, + hashmacaddrextended(v, 0)::bit(32) as extended0, + hashmacaddrextended(v, 1)::bit(32) as extended1 +FROM (VALUES (NULL::macaddr), ('08:00:2b:01:02:04'), ('08:00:2b:01:02:04'), + ('e2:7f:51:3e:70:49'), ('d6:a9:4a:78:1c:d5'), + ('ea:29:b1:5e:1f:a5')) x(v) +WHERE hashmacaddr(v)::bit(32) != hashmacaddrextended(v, 0)::bit(32) + OR hashmacaddr(v)::bit(32) = hashmacaddrextended(v, 1)::bit(32); + value | standard | extended0 | extended1 +-------+----------+-----------+----------- +(0 rows) + +SELECT v as value, hashinet(v)::bit(32) as standard, + hashinetextended(v, 0)::bit(32) as extended0, + hashinetextended(v, 1)::bit(32) as extended1 +FROM (VALUES (NULL::inet), ('192.168.100.128/25'), ('192.168.100.0/8'), + ('172.168.10.126/16'), ('172.18.103.126/24'), ('192.188.13.16/32')) x(v) +WHERE hashinet(v)::bit(32) != hashinetextended(v, 0)::bit(32) + OR hashinet(v)::bit(32) = hashinetextended(v, 1)::bit(32); + value | standard | extended0 | extended1 +-------+----------+-----------+----------- +(0 rows) + +SELECT v as value, hash_numeric(v)::bit(32) as standard, + hash_numeric_extended(v, 0)::bit(32) as extended0, + hash_numeric_extended(v, 1)::bit(32) as extended1 +FROM (VALUES (0), (1.149484958), (17.149484958), (42.149484958), + (149484958.550273), (2071124898672)) x(v) +WHERE hash_numeric(v)::bit(32) != hash_numeric_extended(v, 0)::bit(32) + OR hash_numeric(v)::bit(32) = hash_numeric_extended(v, 1)::bit(32); + value | standard | extended0 | extended1 +-------+----------+-----------+----------- +(0 rows) + +SELECT v as value, hashmacaddr8(v)::bit(32) as standard, + hashmacaddr8extended(v, 0)::bit(32) as extended0, + hashmacaddr8extended(v, 1)::bit(32) as extended1 +FROM (VALUES (NULL::macaddr8), ('08:00:2b:01:02:04:36:49'), + ('08:00:2b:01:02:04:f0:e8'), ('e2:7f:51:3e:70:49:16:29'), + ('d6:a9:4a:78:1c:d5:47:32'), ('ea:29:b1:5e:1f:a5')) x(v) +WHERE hashmacaddr8(v)::bit(32) != hashmacaddr8extended(v, 0)::bit(32) + OR hashmacaddr8(v)::bit(32) = hashmacaddr8extended(v, 1)::bit(32); + value | standard | extended0 | extended1 +-------+----------+-----------+----------- +(0 rows) + +SELECT v as value, hash_array(v)::bit(32) as standard, + hash_array_extended(v, 0)::bit(32) as extended0, + hash_array_extended(v, 1)::bit(32) as extended1 +FROM (VALUES ('{0}'::int4[]), ('{0,1,2,3,4}'), ('{17,18,19,20}'), + ('{42,34,65,98}'), ('{550273,590027, 870273}'), + ('{207112489, 807112489}')) x(v) +WHERE hash_array(v)::bit(32) != hash_array_extended(v, 0)::bit(32) + OR hash_array(v)::bit(32) = hash_array_extended(v, 1)::bit(32); + value | standard | extended0 | extended1 +-------+----------+-----------+----------- +(0 rows) + +-- array hashing with non-hashable element type +SELECT v as value, hash_array(v)::bit(32) as standard +FROM (VALUES ('{0}'::money[])) x(v); +ERROR: could not identify a hash function for type money +SELECT v as value, hash_array_extended(v, 0)::bit(32) as extended0 +FROM (VALUES ('{0}'::money[])) x(v); +ERROR: could not identify an extended hash function for type money +SELECT v as value, hashbpchar(v)::bit(32) as standard, + hashbpcharextended(v, 0)::bit(32) as extended0, + hashbpcharextended(v, 1)::bit(32) as extended1 +FROM (VALUES (NULL), ('PostgreSQL'), ('eIpUEtqmY89'), ('AXKEJBTK'), + ('muop28x03'), ('yi3nm0d73')) x(v) +WHERE hashbpchar(v)::bit(32) != hashbpcharextended(v, 0)::bit(32) + OR hashbpchar(v)::bit(32) = hashbpcharextended(v, 1)::bit(32); + value | standard | extended0 | extended1 +-------+----------+-----------+----------- +(0 rows) + +SELECT v as value, time_hash(v)::bit(32) as standard, + time_hash_extended(v, 0)::bit(32) as extended0, + time_hash_extended(v, 1)::bit(32) as extended1 +FROM (VALUES (NULL::time), ('11:09:59'), ('1:09:59'), ('11:59:59'), + ('7:9:59'), ('5:15:59')) x(v) +WHERE time_hash(v)::bit(32) != time_hash_extended(v, 0)::bit(32) + OR time_hash(v)::bit(32) = time_hash_extended(v, 1)::bit(32); + value | standard | extended0 | extended1 +-------+----------+-----------+----------- +(0 rows) + +SELECT v as value, timetz_hash(v)::bit(32) as standard, + timetz_hash_extended(v, 0)::bit(32) as extended0, + timetz_hash_extended(v, 1)::bit(32) as extended1 +FROM (VALUES (NULL::timetz), ('00:11:52.518762-07'), ('00:11:52.51762-08'), + ('00:11:52.62-01'), ('00:11:52.62+01'), ('11:59:59+04')) x(v) +WHERE timetz_hash(v)::bit(32) != timetz_hash_extended(v, 0)::bit(32) + OR timetz_hash(v)::bit(32) = timetz_hash_extended(v, 1)::bit(32); + value | standard | extended0 | extended1 +-------+----------+-----------+----------- +(0 rows) + +SELECT v as value, interval_hash(v)::bit(32) as standard, + interval_hash_extended(v, 0)::bit(32) as extended0, + interval_hash_extended(v, 1)::bit(32) as extended1 +FROM (VALUES (NULL::interval), + ('5 month 7 day 46 minutes'), ('1 year 7 day 46 minutes'), + ('1 year 7 month 20 day 46 minutes'), ('5 month'), + ('17 year 11 month 7 day 9 hours 46 minutes 5 seconds')) x(v) +WHERE interval_hash(v)::bit(32) != interval_hash_extended(v, 0)::bit(32) + OR interval_hash(v)::bit(32) = interval_hash_extended(v, 1)::bit(32); + value | standard | extended0 | extended1 +-------+----------+-----------+----------- +(0 rows) + +SELECT v as value, timestamp_hash(v)::bit(32) as standard, + timestamp_hash_extended(v, 0)::bit(32) as extended0, + timestamp_hash_extended(v, 1)::bit(32) as extended1 +FROM (VALUES (NULL::timestamp), ('2017-08-22 00:09:59.518762'), + ('2015-08-20 00:11:52.51762-08'), + ('2017-05-22 00:11:52.62-01'), + ('2013-08-22 00:11:52.62+01'), ('2013-08-22 11:59:59+04')) x(v) +WHERE timestamp_hash(v)::bit(32) != timestamp_hash_extended(v, 0)::bit(32) + OR timestamp_hash(v)::bit(32) = timestamp_hash_extended(v, 1)::bit(32); + value | standard | extended0 | extended1 +-------+----------+-----------+----------- +(0 rows) + +SELECT v as value, uuid_hash(v)::bit(32) as standard, + uuid_hash_extended(v, 0)::bit(32) as extended0, + uuid_hash_extended(v, 1)::bit(32) as extended1 +FROM (VALUES (NULL::uuid), ('a0eebc99-9c0b-4ef8-bb6d-6bb9bd380a11'), + ('5a9ba4ac-8d6f-11e7-bb31-be2e44b06b34'), + ('99c6705c-d939-461c-a3c9-1690ad64ed7b'), + ('7deed3ca-8d6f-11e7-bb31-be2e44b06b34'), + ('9ad46d4f-6f2a-4edd-aadb-745993928e1e')) x(v) +WHERE uuid_hash(v)::bit(32) != uuid_hash_extended(v, 0)::bit(32) + OR uuid_hash(v)::bit(32) = uuid_hash_extended(v, 1)::bit(32); + value | standard | extended0 | extended1 +-------+----------+-----------+----------- +(0 rows) + +SELECT v as value, pg_lsn_hash(v)::bit(32) as standard, + pg_lsn_hash_extended(v, 0)::bit(32) as extended0, + pg_lsn_hash_extended(v, 1)::bit(32) as extended1 +FROM (VALUES (NULL::pg_lsn), ('16/B374D84'), ('30/B374D84'), + ('255/B374D84'), ('25/B379D90'), ('900/F37FD90')) x(v) +WHERE pg_lsn_hash(v)::bit(32) != pg_lsn_hash_extended(v, 0)::bit(32) + OR pg_lsn_hash(v)::bit(32) = pg_lsn_hash_extended(v, 1)::bit(32); + value | standard | extended0 | extended1 +-------+----------+-----------+----------- +(0 rows) + +CREATE TYPE mood AS ENUM ('sad', 'ok', 'happy'); +SELECT v as value, hashenum(v)::bit(32) as standard, + hashenumextended(v, 0)::bit(32) as extended0, + hashenumextended(v, 1)::bit(32) as extended1 +FROM (VALUES ('sad'::mood), ('ok'), ('happy')) x(v) +WHERE hashenum(v)::bit(32) != hashenumextended(v, 0)::bit(32) + OR hashenum(v)::bit(32) = hashenumextended(v, 1)::bit(32); + value | standard | extended0 | extended1 +-------+----------+-----------+----------- +(0 rows) + +DROP TYPE mood; +SELECT v as value, jsonb_hash(v)::bit(32) as standard, + jsonb_hash_extended(v, 0)::bit(32) as extended0, + jsonb_hash_extended(v, 1)::bit(32) as extended1 +FROM (VALUES (NULL::jsonb), + ('{"a": "aaa bbb ddd ccc", "b": ["eee fff ggg"], "c": {"d": "hhh iii"}}'), + ('{"foo": [true, "bar"], "tags": {"e": 1, "f": null}}'), + ('{"g": {"h": "value"}}')) x(v) +WHERE jsonb_hash(v)::bit(32) != jsonb_hash_extended(v, 0)::bit(32) + OR jsonb_hash(v)::bit(32) = jsonb_hash_extended(v, 1)::bit(32); + value | standard | extended0 | extended1 +-------+----------+-----------+----------- +(0 rows) + +SELECT v as value, hash_range(v)::bit(32) as standard, + hash_range_extended(v, 0)::bit(32) as extended0, + hash_range_extended(v, 1)::bit(32) as extended1 +FROM (VALUES (int4range(10, 20)), (int4range(23, 43)), + (int4range(5675, 550273)), + (int4range(550274, 1550274)), (int4range(1550275, 208112489))) x(v) +WHERE hash_range(v)::bit(32) != hash_range_extended(v, 0)::bit(32) + OR hash_range(v)::bit(32) = hash_range_extended(v, 1)::bit(32); + value | standard | extended0 | extended1 +-------+----------+-----------+----------- +(0 rows) + +SELECT v as value, hash_multirange(v)::bit(32) as standard, + hash_multirange_extended(v, 0)::bit(32) as extended0, + hash_multirange_extended(v, 1)::bit(32) as extended1 +FROM (VALUES ('{[10,20)}'::int4multirange), ('{[23, 43]}'::int4multirange), + ('{[5675, 550273)}'::int4multirange), + ('{[550274, 1550274)}'::int4multirange), + ('{[1550275, 208112489)}'::int4multirange)) x(v) +WHERE hash_multirange(v)::bit(32) != hash_multirange_extended(v, 0)::bit(32) + OR hash_multirange(v)::bit(32) = hash_multirange_extended(v, 1)::bit(32); + value | standard | extended0 | extended1 +-------+----------+-----------+----------- +(0 rows) + +CREATE TYPE hash_test_t1 AS (a int, b text); +SELECT v as value, hash_record(v)::bit(32) as standard, + hash_record_extended(v, 0)::bit(32) as extended0, + hash_record_extended(v, 1)::bit(32) as extended1 +FROM (VALUES (row(1, 'aaa')::hash_test_t1, row(2, 'bbb'), row(-1, 'ccc'))) x(v) +WHERE hash_record(v)::bit(32) != hash_record_extended(v, 0)::bit(32) + OR hash_record(v)::bit(32) = hash_record_extended(v, 1)::bit(32); + value | standard | extended0 | extended1 +-------+----------+-----------+----------- +(0 rows) + +DROP TYPE hash_test_t1; +-- record hashing with non-hashable field type +CREATE TYPE hash_test_t2 AS (a money, b text); +SELECT v as value, hash_record(v)::bit(32) as standard +FROM (VALUES (row(1, 'aaa')::hash_test_t2)) x(v); +ERROR: could not identify a hash function for type money +SELECT v as value, hash_record_extended(v, 0)::bit(32) as extended0 +FROM (VALUES (row(1, 'aaa')::hash_test_t2)) x(v); +ERROR: could not identify an extended hash function for type money +DROP TYPE hash_test_t2; +-- +-- Check special cases for specific data types +-- +SELECT hashfloat4('0'::float4) = hashfloat4('-0'::float4) AS t; + t +--- + t +(1 row) + +SELECT hashfloat4('NaN'::float4) = hashfloat4(-'NaN'::float4) AS t; + t +--- + t +(1 row) + +SELECT hashfloat8('0'::float8) = hashfloat8('-0'::float8) AS t; + t +--- + t +(1 row) + +SELECT hashfloat8('NaN'::float8) = hashfloat8(-'NaN'::float8) AS t; + t +--- + t +(1 row) + +SELECT hashfloat4('NaN'::float4) = hashfloat8('NaN'::float8) AS t; + t +--- + t +(1 row) + diff --git a/src/test/regress/expected/hash_index.out b/src/test/regress/expected/hash_index.out new file mode 100644 index 0000000..a2036a1 --- /dev/null +++ b/src/test/regress/expected/hash_index.out @@ -0,0 +1,292 @@ +-- +-- HASH_INDEX +-- +-- directory paths are passed to us in environment variables +\getenv abs_srcdir PG_ABS_SRCDIR +CREATE TABLE hash_i4_heap ( + seqno int4, + random int4 +); +CREATE TABLE hash_name_heap ( + seqno int4, + random name +); +CREATE TABLE hash_txt_heap ( + seqno int4, + random text +); +CREATE TABLE hash_f8_heap ( + seqno int4, + random float8 +); +\set filename :abs_srcdir '/data/hash.data' +COPY hash_i4_heap FROM :'filename'; +COPY hash_name_heap FROM :'filename'; +COPY hash_txt_heap FROM :'filename'; +COPY hash_f8_heap FROM :'filename'; +-- the data in this file has a lot of duplicates in the index key +-- fields, leading to long bucket chains and lots of table expansion. +-- this is therefore a stress test of the bucket overflow code (unlike +-- the data in hash.data, which has unique index keys). +-- +-- \set filename :abs_srcdir '/data/hashovfl.data' +-- COPY hash_ovfl_heap FROM :'filename'; +ANALYZE hash_i4_heap; +ANALYZE hash_name_heap; +ANALYZE hash_txt_heap; +ANALYZE hash_f8_heap; +CREATE INDEX hash_i4_index ON hash_i4_heap USING hash (random int4_ops); +CREATE INDEX hash_name_index ON hash_name_heap USING hash (random name_ops); +CREATE INDEX hash_txt_index ON hash_txt_heap USING hash (random text_ops); +CREATE INDEX hash_f8_index ON hash_f8_heap USING hash (random float8_ops) + WITH (fillfactor=60); +-- +-- Also try building functional, expressional, and partial indexes on +-- tables that already contain data. +-- +create unique index hash_f8_index_1 on hash_f8_heap(abs(random)); +create unique index hash_f8_index_2 on hash_f8_heap((seqno + 1), random); +create unique index hash_f8_index_3 on hash_f8_heap(random) where seqno > 1000; +-- +-- hash index +-- grep 843938989 hash.data +-- +SELECT * FROM hash_i4_heap + WHERE hash_i4_heap.random = 843938989; + seqno | random +-------+----------- + 15 | 843938989 +(1 row) + +-- +-- hash index +-- grep 66766766 hash.data +-- +SELECT * FROM hash_i4_heap + WHERE hash_i4_heap.random = 66766766; + seqno | random +-------+-------- +(0 rows) + +-- +-- hash index +-- grep 1505703298 hash.data +-- +SELECT * FROM hash_name_heap + WHERE hash_name_heap.random = '1505703298'::name; + seqno | random +-------+------------ + 9838 | 1505703298 +(1 row) + +-- +-- hash index +-- grep 7777777 hash.data +-- +SELECT * FROM hash_name_heap + WHERE hash_name_heap.random = '7777777'::name; + seqno | random +-------+-------- +(0 rows) + +-- +-- hash index +-- grep 1351610853 hash.data +-- +SELECT * FROM hash_txt_heap + WHERE hash_txt_heap.random = '1351610853'::text; + seqno | random +-------+------------ + 5677 | 1351610853 +(1 row) + +-- +-- hash index +-- grep 111111112222222233333333 hash.data +-- +SELECT * FROM hash_txt_heap + WHERE hash_txt_heap.random = '111111112222222233333333'::text; + seqno | random +-------+-------- +(0 rows) + +-- +-- hash index +-- grep 444705537 hash.data +-- +SELECT * FROM hash_f8_heap + WHERE hash_f8_heap.random = '444705537'::float8; + seqno | random +-------+----------- + 7853 | 444705537 +(1 row) + +-- +-- hash index +-- grep 88888888 hash.data +-- +SELECT * FROM hash_f8_heap + WHERE hash_f8_heap.random = '88888888'::float8; + seqno | random +-------+-------- +(0 rows) + +-- +-- hash index +-- grep '^90[^0-9]' hashovfl.data +-- +-- SELECT count(*) AS i988 FROM hash_ovfl_heap +-- WHERE x = 90; +-- +-- hash index +-- grep '^1000[^0-9]' hashovfl.data +-- +-- SELECT count(*) AS i0 FROM hash_ovfl_heap +-- WHERE x = 1000; +-- +-- HASH +-- +UPDATE hash_i4_heap + SET random = 1 + WHERE hash_i4_heap.seqno = 1492; +SELECT h.seqno AS i1492, h.random AS i1 + FROM hash_i4_heap h + WHERE h.random = 1; + i1492 | i1 +-------+---- + 1492 | 1 +(1 row) + +UPDATE hash_i4_heap + SET seqno = 20000 + WHERE hash_i4_heap.random = 1492795354; +SELECT h.seqno AS i20000 + FROM hash_i4_heap h + WHERE h.random = 1492795354; + i20000 +-------- + 20000 +(1 row) + +UPDATE hash_name_heap + SET random = '0123456789abcdef'::name + WHERE hash_name_heap.seqno = 6543; +SELECT h.seqno AS i6543, h.random AS c0_to_f + FROM hash_name_heap h + WHERE h.random = '0123456789abcdef'::name; + i6543 | c0_to_f +-------+------------------ + 6543 | 0123456789abcdef +(1 row) + +UPDATE hash_name_heap + SET seqno = 20000 + WHERE hash_name_heap.random = '76652222'::name; +-- +-- this is the row we just replaced; index scan should return zero rows +-- +SELECT h.seqno AS emptyset + FROM hash_name_heap h + WHERE h.random = '76652222'::name; + emptyset +---------- +(0 rows) + +UPDATE hash_txt_heap + SET random = '0123456789abcdefghijklmnop'::text + WHERE hash_txt_heap.seqno = 4002; +SELECT h.seqno AS i4002, h.random AS c0_to_p + FROM hash_txt_heap h + WHERE h.random = '0123456789abcdefghijklmnop'::text; + i4002 | c0_to_p +-------+---------------------------- + 4002 | 0123456789abcdefghijklmnop +(1 row) + +UPDATE hash_txt_heap + SET seqno = 20000 + WHERE hash_txt_heap.random = '959363399'::text; +SELECT h.seqno AS t20000 + FROM hash_txt_heap h + WHERE h.random = '959363399'::text; + t20000 +-------- + 20000 +(1 row) + +UPDATE hash_f8_heap + SET random = '-1234.1234'::float8 + WHERE hash_f8_heap.seqno = 8906; +SELECT h.seqno AS i8096, h.random AS f1234_1234 + FROM hash_f8_heap h + WHERE h.random = '-1234.1234'::float8; + i8096 | f1234_1234 +-------+------------ + 8906 | -1234.1234 +(1 row) + +UPDATE hash_f8_heap + SET seqno = 20000 + WHERE hash_f8_heap.random = '488912369'::float8; +SELECT h.seqno AS f20000 + FROM hash_f8_heap h + WHERE h.random = '488912369'::float8; + f20000 +-------- + 20000 +(1 row) + +-- UPDATE hash_ovfl_heap +-- SET x = 1000 +-- WHERE x = 90; +-- this vacuums the index as well +-- VACUUM hash_ovfl_heap; +-- SELECT count(*) AS i0 FROM hash_ovfl_heap +-- WHERE x = 90; +-- SELECT count(*) AS i988 FROM hash_ovfl_heap +-- WHERE x = 1000; +-- +-- Cause some overflow insert and splits. +-- +CREATE TABLE hash_split_heap (keycol INT); +INSERT INTO hash_split_heap SELECT 1 FROM generate_series(1, 500) a; +CREATE INDEX hash_split_index on hash_split_heap USING HASH (keycol); +INSERT INTO hash_split_heap SELECT 1 FROM generate_series(1, 5000) a; +-- Let's do a backward scan. +BEGIN; +SET enable_seqscan = OFF; +SET enable_bitmapscan = OFF; +DECLARE c CURSOR FOR SELECT * from hash_split_heap WHERE keycol = 1; +MOVE FORWARD ALL FROM c; +MOVE BACKWARD 10000 FROM c; +MOVE BACKWARD ALL FROM c; +CLOSE c; +END; +-- DELETE, INSERT, VACUUM. +DELETE FROM hash_split_heap WHERE keycol = 1; +INSERT INTO hash_split_heap SELECT a/2 FROM generate_series(1, 25000) a; +VACUUM hash_split_heap; +-- Rebuild the index using a different fillfactor +ALTER INDEX hash_split_index SET (fillfactor = 10); +REINDEX INDEX hash_split_index; +-- Clean up. +DROP TABLE hash_split_heap; +-- Index on temp table. +CREATE TEMP TABLE hash_temp_heap (x int, y int); +INSERT INTO hash_temp_heap VALUES (1,1); +CREATE INDEX hash_idx ON hash_temp_heap USING hash (x); +DROP TABLE hash_temp_heap CASCADE; +-- Float4 type. +CREATE TABLE hash_heap_float4 (x float4, y int); +INSERT INTO hash_heap_float4 VALUES (1.1,1); +CREATE INDEX hash_idx ON hash_heap_float4 USING hash (x); +DROP TABLE hash_heap_float4 CASCADE; +-- Test out-of-range fillfactor values +CREATE INDEX hash_f8_index2 ON hash_f8_heap USING hash (random float8_ops) + WITH (fillfactor=9); +ERROR: value 9 out of bounds for option "fillfactor" +DETAIL: Valid values are between "10" and "100". +CREATE INDEX hash_f8_index2 ON hash_f8_heap USING hash (random float8_ops) + WITH (fillfactor=101); +ERROR: value 101 out of bounds for option "fillfactor" +DETAIL: Valid values are between "10" and "100". diff --git a/src/test/regress/expected/hash_part.out b/src/test/regress/expected/hash_part.out new file mode 100644 index 0000000..ac3aabe --- /dev/null +++ b/src/test/regress/expected/hash_part.out @@ -0,0 +1,114 @@ +-- +-- Hash partitioning. +-- +-- Use hand-rolled hash functions and operator classes to get predictable +-- result on different machines. See the definitions of +-- part_part_test_int4_ops and part_test_text_ops in insert.sql. +CREATE TABLE mchash (a int, b text, c jsonb) + PARTITION BY HASH (a part_test_int4_ops, b part_test_text_ops); +CREATE TABLE mchash1 + PARTITION OF mchash FOR VALUES WITH (MODULUS 4, REMAINDER 0); +-- invalid OID, no such table +SELECT satisfies_hash_partition(0, 4, 0, NULL); +ERROR: could not open relation with OID 0 +-- not partitioned +SELECT satisfies_hash_partition('tenk1'::regclass, 4, 0, NULL); +ERROR: "tenk1" is not a hash partitioned table +-- partition rather than the parent +SELECT satisfies_hash_partition('mchash1'::regclass, 4, 0, NULL); +ERROR: "mchash1" is not a hash partitioned table +-- invalid modulus +SELECT satisfies_hash_partition('mchash'::regclass, 0, 0, NULL); +ERROR: modulus for hash partition must be an integer value greater than zero +-- remainder too small +SELECT satisfies_hash_partition('mchash'::regclass, 1, -1, NULL); +ERROR: remainder for hash partition must be an integer value greater than or equal to zero +-- remainder too large +SELECT satisfies_hash_partition('mchash'::regclass, 1, 1, NULL); +ERROR: remainder for hash partition must be less than modulus +-- modulus is null +SELECT satisfies_hash_partition('mchash'::regclass, NULL, 0, NULL); + satisfies_hash_partition +-------------------------- + f +(1 row) + +-- remainder is null +SELECT satisfies_hash_partition('mchash'::regclass, 4, NULL, NULL); + satisfies_hash_partition +-------------------------- + f +(1 row) + +-- too many arguments +SELECT satisfies_hash_partition('mchash'::regclass, 4, 0, NULL::int, NULL::text, NULL::json); +ERROR: number of partitioning columns (2) does not match number of partition keys provided (3) +-- too few arguments +SELECT satisfies_hash_partition('mchash'::regclass, 3, 1, NULL::int); +ERROR: number of partitioning columns (2) does not match number of partition keys provided (1) +-- wrong argument type +SELECT satisfies_hash_partition('mchash'::regclass, 2, 1, NULL::int, NULL::int); +ERROR: column 2 of the partition key has type text, but supplied value is of type integer +-- ok, should be false +SELECT satisfies_hash_partition('mchash'::regclass, 4, 0, 0, ''::text); + satisfies_hash_partition +-------------------------- + f +(1 row) + +-- ok, should be true +SELECT satisfies_hash_partition('mchash'::regclass, 4, 0, 2, ''::text); + satisfies_hash_partition +-------------------------- + t +(1 row) + +-- argument via variadic syntax, should fail because not all partitioning +-- columns are of the correct type +SELECT satisfies_hash_partition('mchash'::regclass, 2, 1, + variadic array[1,2]::int[]); +ERROR: column 2 of the partition key has type "text", but supplied value is of type "integer" +-- multiple partitioning columns of the same type +CREATE TABLE mcinthash (a int, b int, c jsonb) + PARTITION BY HASH (a part_test_int4_ops, b part_test_int4_ops); +-- now variadic should work, should be false +SELECT satisfies_hash_partition('mcinthash'::regclass, 4, 0, + variadic array[0, 0]); + satisfies_hash_partition +-------------------------- + f +(1 row) + +-- should be true +SELECT satisfies_hash_partition('mcinthash'::regclass, 4, 0, + variadic array[0, 1]); + satisfies_hash_partition +-------------------------- + t +(1 row) + +-- wrong length +SELECT satisfies_hash_partition('mcinthash'::regclass, 4, 0, + variadic array[]::int[]); +ERROR: number of partitioning columns (2) does not match number of partition keys provided (0) +-- wrong type +SELECT satisfies_hash_partition('mcinthash'::regclass, 4, 0, + variadic array[now(), now()]); +ERROR: column 1 of the partition key has type "integer", but supplied value is of type "timestamp with time zone" +-- check satisfies_hash_partition passes correct collation +create table text_hashp (a text) partition by hash (a); +create table text_hashp0 partition of text_hashp for values with (modulus 2, remainder 0); +create table text_hashp1 partition of text_hashp for values with (modulus 2, remainder 1); +-- The result here should always be true, because 'xxx' must belong to +-- one of the two defined partitions +select satisfies_hash_partition('text_hashp'::regclass, 2, 0, 'xxx'::text) OR + satisfies_hash_partition('text_hashp'::regclass, 2, 1, 'xxx'::text) AS satisfies; + satisfies +----------- + t +(1 row) + +-- cleanup +DROP TABLE mchash; +DROP TABLE mcinthash; +DROP TABLE text_hashp; diff --git a/src/test/regress/expected/horology.out b/src/test/regress/expected/horology.out new file mode 100644 index 0000000..f3cda4a --- /dev/null +++ b/src/test/regress/expected/horology.out @@ -0,0 +1,3528 @@ +-- +-- HOROLOGY +-- +SET DateStyle = 'Postgres, MDY'; +SHOW TimeZone; -- Many of these tests depend on the prevailing setting + TimeZone +---------- + PST8PDT +(1 row) + +-- +-- Test various input formats +-- +SELECT timestamp with time zone '20011227 040506+08'; + timestamptz +------------------------------ + Wed Dec 26 12:05:06 2001 PST +(1 row) + +SELECT timestamp with time zone '20011227 040506-08'; + timestamptz +------------------------------ + Thu Dec 27 04:05:06 2001 PST +(1 row) + +SELECT timestamp with time zone '20011227 040506.789+08'; + timestamptz +---------------------------------- + Wed Dec 26 12:05:06.789 2001 PST +(1 row) + +SELECT timestamp with time zone '20011227 040506.789-08'; + timestamptz +---------------------------------- + Thu Dec 27 04:05:06.789 2001 PST +(1 row) + +SELECT timestamp with time zone '20011227T040506+08'; + timestamptz +------------------------------ + Wed Dec 26 12:05:06 2001 PST +(1 row) + +SELECT timestamp with time zone '20011227T040506-08'; + timestamptz +------------------------------ + Thu Dec 27 04:05:06 2001 PST +(1 row) + +SELECT timestamp with time zone '20011227T040506.789+08'; + timestamptz +---------------------------------- + Wed Dec 26 12:05:06.789 2001 PST +(1 row) + +SELECT timestamp with time zone '20011227T040506.789-08'; + timestamptz +---------------------------------- + Thu Dec 27 04:05:06.789 2001 PST +(1 row) + +SELECT timestamp with time zone '2001-12-27 04:05:06.789-08'; + timestamptz +---------------------------------- + Thu Dec 27 04:05:06.789 2001 PST +(1 row) + +SELECT timestamp with time zone '2001.12.27 04:05:06.789-08'; + timestamptz +---------------------------------- + Thu Dec 27 04:05:06.789 2001 PST +(1 row) + +SELECT timestamp with time zone '2001/12/27 04:05:06.789-08'; + timestamptz +---------------------------------- + Thu Dec 27 04:05:06.789 2001 PST +(1 row) + +SELECT timestamp with time zone '12/27/2001 04:05:06.789-08'; + timestamptz +---------------------------------- + Thu Dec 27 04:05:06.789 2001 PST +(1 row) + +SELECT timestamp with time zone '2001-12-27 04:05:06.789 MET DST'; + timestamptz +---------------------------------- + Wed Dec 26 18:05:06.789 2001 PST +(1 row) + +SELECT timestamp with time zone '2001-12-27 allballs'; + timestamptz +------------------------------ + Wed Dec 26 16:00:00 2001 PST +(1 row) + +-- should fail in mdy mode: +SELECT timestamp with time zone '27/12/2001 04:05:06.789-08'; +ERROR: date/time field value out of range: "27/12/2001 04:05:06.789-08" +LINE 1: SELECT timestamp with time zone '27/12/2001 04:05:06.789-08'... + ^ +HINT: Perhaps you need a different "datestyle" setting. +set datestyle to dmy; +SELECT timestamp with time zone '27/12/2001 04:05:06.789-08'; + timestamptz +---------------------------------- + Thu 27 Dec 04:05:06.789 2001 PST +(1 row) + +reset datestyle; +SELECT timestamp with time zone 'J2452271+08'; + timestamptz +------------------------------ + Wed Dec 26 08:00:00 2001 PST +(1 row) + +SELECT timestamp with time zone 'J2452271-08'; + timestamptz +------------------------------ + Thu Dec 27 00:00:00 2001 PST +(1 row) + +SELECT timestamp with time zone 'J2452271.5+08'; + timestamptz +------------------------------ + Wed Dec 26 20:00:00 2001 PST +(1 row) + +SELECT timestamp with time zone 'J2452271.5-08'; + timestamptz +------------------------------ + Thu Dec 27 12:00:00 2001 PST +(1 row) + +SELECT timestamp with time zone 'J2452271 04:05:06+08'; + timestamptz +------------------------------ + Wed Dec 26 12:05:06 2001 PST +(1 row) + +SELECT timestamp with time zone 'J2452271 04:05:06-08'; + timestamptz +------------------------------ + Thu Dec 27 04:05:06 2001 PST +(1 row) + +SELECT timestamp with time zone 'J2452271T040506+08'; + timestamptz +------------------------------ + Wed Dec 26 12:05:06 2001 PST +(1 row) + +SELECT timestamp with time zone 'J2452271T040506-08'; + timestamptz +------------------------------ + Thu Dec 27 04:05:06 2001 PST +(1 row) + +SELECT timestamp with time zone 'J2452271T040506.789+08'; + timestamptz +---------------------------------- + Wed Dec 26 12:05:06.789 2001 PST +(1 row) + +SELECT timestamp with time zone 'J2452271T040506.789-08'; + timestamptz +---------------------------------- + Thu Dec 27 04:05:06.789 2001 PST +(1 row) + +-- German/European-style dates with periods as delimiters +SELECT timestamp with time zone '12.27.2001 04:05:06.789+08'; + timestamptz +---------------------------------- + Wed Dec 26 12:05:06.789 2001 PST +(1 row) + +SELECT timestamp with time zone '12.27.2001 04:05:06.789-08'; + timestamptz +---------------------------------- + Thu Dec 27 04:05:06.789 2001 PST +(1 row) + +SET DateStyle = 'German'; +SELECT timestamp with time zone '27.12.2001 04:05:06.789+08'; + timestamptz +----------------------------- + 26.12.2001 12:05:06.789 PST +(1 row) + +SELECT timestamp with time zone '27.12.2001 04:05:06.789-08'; + timestamptz +----------------------------- + 27.12.2001 04:05:06.789 PST +(1 row) + +SET DateStyle = 'ISO'; +-- As of 7.4, allow time without time zone having a time zone specified +SELECT time without time zone '040506.789+08'; + time +-------------- + 04:05:06.789 +(1 row) + +SELECT time without time zone '040506.789-08'; + time +-------------- + 04:05:06.789 +(1 row) + +SELECT time without time zone 'T040506.789+08'; + time +-------------- + 04:05:06.789 +(1 row) + +SELECT time without time zone 'T040506.789-08'; + time +-------------- + 04:05:06.789 +(1 row) + +SELECT time with time zone '040506.789+08'; + timetz +----------------- + 04:05:06.789+08 +(1 row) + +SELECT time with time zone '040506.789-08'; + timetz +----------------- + 04:05:06.789-08 +(1 row) + +SELECT time with time zone 'T040506.789+08'; + timetz +----------------- + 04:05:06.789+08 +(1 row) + +SELECT time with time zone 'T040506.789-08'; + timetz +----------------- + 04:05:06.789-08 +(1 row) + +SELECT time with time zone 'T040506.789 +08'; + timetz +----------------- + 04:05:06.789+08 +(1 row) + +SELECT time with time zone 'T040506.789 -08'; + timetz +----------------- + 04:05:06.789-08 +(1 row) + +-- time with time zone should accept a date for DST resolution purposes +SELECT time with time zone 'T040506.789 America/Los_Angeles'; +ERROR: invalid input syntax for type time with time zone: "T040506.789 America/Los_Angeles" +LINE 1: SELECT time with time zone 'T040506.789 America/Los_Angeles'... + ^ +SELECT time with time zone '2001-12-27 T040506.789 America/Los_Angeles'; + timetz +----------------- + 04:05:06.789-08 +(1 row) + +SELECT time with time zone 'J2452271 T040506.789 America/Los_Angeles'; + timetz +----------------- + 04:05:06.789-08 +(1 row) + +SET DateStyle = 'Postgres, MDY'; +-- Check Julian dates BC +SELECT date 'J1520447' AS "Confucius' Birthday"; + Confucius' Birthday +--------------------- + 09-28-0551 BC +(1 row) + +SELECT date 'J0' AS "Julian Epoch"; + Julian Epoch +--------------- + 11-24-4714 BC +(1 row) + +-- test error on dangling Julian units +SELECT date '1995-08-06 J J J'; +ERROR: invalid input syntax for type date: "1995-08-06 J J J" +LINE 1: SELECT date '1995-08-06 J J J'; + ^ +SELECT date 'J J 1520447'; +ERROR: invalid input syntax for type date: "J J 1520447" +LINE 1: SELECT date 'J J 1520447'; + ^ +-- We used to accept this input style, but it was based on a misreading +-- of ISO8601, and it was never documented anyway +SELECT timestamp with time zone 'Y2001M12D27H04M05S06.789+08'; +ERROR: invalid input syntax for type timestamp with time zone: "Y2001M12D27H04M05S06.789+08" +LINE 1: SELECT timestamp with time zone 'Y2001M12D27H04M05S06.789+08... + ^ +SELECT timestamp with time zone 'Y2001M12D27H04MM05S06.789-08'; +ERROR: invalid input syntax for type timestamp with time zone: "Y2001M12D27H04MM05S06.789-08" +LINE 1: SELECT timestamp with time zone 'Y2001M12D27H04MM05S06.789-0... + ^ +-- conflicting fields should throw errors +SELECT date '1995-08-06 epoch'; +ERROR: invalid input syntax for type date: "1995-08-06 epoch" +LINE 1: SELECT date '1995-08-06 epoch'; + ^ +SELECT date '1995-08-06 infinity'; +ERROR: invalid input syntax for type date: "1995-08-06 infinity" +LINE 1: SELECT date '1995-08-06 infinity'; + ^ +SELECT date '1995-08-06 -infinity'; +ERROR: invalid input syntax for type date: "1995-08-06 -infinity" +LINE 1: SELECT date '1995-08-06 -infinity'; + ^ +SELECT date 'today infinity'; +ERROR: invalid input syntax for type date: "today infinity" +LINE 1: SELECT date 'today infinity'; + ^ +SELECT date '-infinity infinity'; +ERROR: invalid input syntax for type date: "-infinity infinity" +LINE 1: SELECT date '-infinity infinity'; + ^ +SELECT timestamp '1995-08-06 epoch'; +ERROR: invalid input syntax for type timestamp: "1995-08-06 epoch" +LINE 1: SELECT timestamp '1995-08-06 epoch'; + ^ +SELECT timestamp '1995-08-06 infinity'; +ERROR: invalid input syntax for type timestamp: "1995-08-06 infinity" +LINE 1: SELECT timestamp '1995-08-06 infinity'; + ^ +SELECT timestamp '1995-08-06 -infinity'; +ERROR: invalid input syntax for type timestamp: "1995-08-06 -infinity" +LINE 1: SELECT timestamp '1995-08-06 -infinity'; + ^ +SELECT timestamp 'epoch 01:01:01'; +ERROR: invalid input syntax for type timestamp: "epoch 01:01:01" +LINE 1: SELECT timestamp 'epoch 01:01:01'; + ^ +SELECT timestamp 'infinity 01:01:01'; +ERROR: invalid input syntax for type timestamp: "infinity 01:01:01" +LINE 1: SELECT timestamp 'infinity 01:01:01'; + ^ +SELECT timestamp '-infinity 01:01:01'; +ERROR: invalid input syntax for type timestamp: "-infinity 01:01:01" +LINE 1: SELECT timestamp '-infinity 01:01:01'; + ^ +SELECT timestamp 'now epoch'; +ERROR: invalid input syntax for type timestamp: "now epoch" +LINE 1: SELECT timestamp 'now epoch'; + ^ +SELECT timestamp '-infinity infinity'; +ERROR: invalid input syntax for type timestamp: "-infinity infinity" +LINE 1: SELECT timestamp '-infinity infinity'; + ^ +SELECT timestamptz '1995-08-06 epoch'; +ERROR: invalid input syntax for type timestamp with time zone: "1995-08-06 epoch" +LINE 1: SELECT timestamptz '1995-08-06 epoch'; + ^ +SELECT timestamptz '1995-08-06 infinity'; +ERROR: invalid input syntax for type timestamp with time zone: "1995-08-06 infinity" +LINE 1: SELECT timestamptz '1995-08-06 infinity'; + ^ +SELECT timestamptz '1995-08-06 -infinity'; +ERROR: invalid input syntax for type timestamp with time zone: "1995-08-06 -infinity" +LINE 1: SELECT timestamptz '1995-08-06 -infinity'; + ^ +SELECT timestamptz 'epoch 01:01:01'; +ERROR: invalid input syntax for type timestamp with time zone: "epoch 01:01:01" +LINE 1: SELECT timestamptz 'epoch 01:01:01'; + ^ +SELECT timestamptz 'infinity 01:01:01'; +ERROR: invalid input syntax for type timestamp with time zone: "infinity 01:01:01" +LINE 1: SELECT timestamptz 'infinity 01:01:01'; + ^ +SELECT timestamptz '-infinity 01:01:01'; +ERROR: invalid input syntax for type timestamp with time zone: "-infinity 01:01:01" +LINE 1: SELECT timestamptz '-infinity 01:01:01'; + ^ +SELECT timestamptz 'now epoch'; +ERROR: invalid input syntax for type timestamp with time zone: "now epoch" +LINE 1: SELECT timestamptz 'now epoch'; + ^ +SELECT timestamptz '-infinity infinity'; +ERROR: invalid input syntax for type timestamp with time zone: "-infinity infinity" +LINE 1: SELECT timestamptz '-infinity infinity'; + ^ +-- +-- date, time arithmetic +-- +SELECT date '1981-02-03' + time '04:05:06' AS "Date + Time"; + Date + Time +-------------------------- + Tue Feb 03 04:05:06 1981 +(1 row) + +SELECT date '1991-02-03' + time with time zone '04:05:06 PST' AS "Date + Time PST"; + Date + Time PST +------------------------------ + Sun Feb 03 04:05:06 1991 PST +(1 row) + +SELECT date '2001-02-03' + time with time zone '04:05:06 UTC' AS "Date + Time UTC"; + Date + Time UTC +------------------------------ + Fri Feb 02 20:05:06 2001 PST +(1 row) + +SELECT date '1991-02-03' + interval '2 years' AS "Add Two Years"; + Add Two Years +-------------------------- + Wed Feb 03 00:00:00 1993 +(1 row) + +SELECT date '2001-12-13' - interval '2 years' AS "Subtract Two Years"; + Subtract Two Years +-------------------------- + Mon Dec 13 00:00:00 1999 +(1 row) + +-- subtract time from date should not make sense; use interval instead +SELECT date '1991-02-03' - time '04:05:06' AS "Subtract Time"; + Subtract Time +-------------------------- + Sat Feb 02 19:54:54 1991 +(1 row) + +SELECT date '1991-02-03' - time with time zone '04:05:06 UTC' AS "Subtract Time UTC"; +ERROR: operator does not exist: date - time with time zone +LINE 1: SELECT date '1991-02-03' - time with time zone '04:05:06 UTC... + ^ +HINT: No operator matches the given name and argument types. You might need to add explicit type casts. +-- +-- timestamp, interval arithmetic +-- +SELECT timestamp without time zone '1996-03-01' - interval '1 second' AS "Feb 29"; + Feb 29 +-------------------------- + Thu Feb 29 23:59:59 1996 +(1 row) + +SELECT timestamp without time zone '1999-03-01' - interval '1 second' AS "Feb 28"; + Feb 28 +-------------------------- + Sun Feb 28 23:59:59 1999 +(1 row) + +SELECT timestamp without time zone '2000-03-01' - interval '1 second' AS "Feb 29"; + Feb 29 +-------------------------- + Tue Feb 29 23:59:59 2000 +(1 row) + +SELECT timestamp without time zone '1999-12-01' + interval '1 month - 1 second' AS "Dec 31"; + Dec 31 +-------------------------- + Fri Dec 31 23:59:59 1999 +(1 row) + +SELECT timestamp without time zone 'Jan 1, 4713 BC' + interval '106000000 days' AS "Feb 23, 285506"; + Feb 23, 285506 +---------------------------- + Fri Feb 23 00:00:00 285506 +(1 row) + +SELECT timestamp without time zone 'Jan 1, 4713 BC' + interval '107000000 days' AS "Jan 20, 288244"; + Jan 20, 288244 +---------------------------- + Sat Jan 20 00:00:00 288244 +(1 row) + +SELECT timestamp without time zone 'Jan 1, 4713 BC' + interval '109203489 days' AS "Dec 31, 294276"; + Dec 31, 294276 +---------------------------- + Sun Dec 31 00:00:00 294276 +(1 row) + +SELECT timestamp without time zone '2000-01-01' - interval '2483590 days' AS "out of range"; +ERROR: timestamp out of range +SELECT timestamp without time zone '12/31/294276' - timestamp without time zone '12/23/1999' AS "106751991 Days"; + 106751991 Days +------------------ + @ 106751991 days +(1 row) + +-- Shorthand values +-- Not directly usable for regression testing since these are not constants. +-- So, just try to test parser and hope for the best - thomas 97/04/26 +SELECT (timestamp without time zone 'today' = (timestamp without time zone 'yesterday' + interval '1 day')) as "True"; + True +------ + t +(1 row) + +SELECT (timestamp without time zone 'today' = (timestamp without time zone 'tomorrow' - interval '1 day')) as "True"; + True +------ + t +(1 row) + +SELECT (timestamp without time zone 'today 10:30' = (timestamp without time zone 'yesterday' + interval '1 day 10 hr 30 min')) as "True"; + True +------ + t +(1 row) + +SELECT (timestamp without time zone '10:30 today' = (timestamp without time zone 'yesterday' + interval '1 day 10 hr 30 min')) as "True"; + True +------ + t +(1 row) + +SELECT (timestamp without time zone 'tomorrow' = (timestamp without time zone 'yesterday' + interval '2 days')) as "True"; + True +------ + t +(1 row) + +SELECT (timestamp without time zone 'tomorrow 16:00:00' = (timestamp without time zone 'today' + interval '1 day 16 hours')) as "True"; + True +------ + t +(1 row) + +SELECT (timestamp without time zone '16:00:00 tomorrow' = (timestamp without time zone 'today' + interval '1 day 16 hours')) as "True"; + True +------ + t +(1 row) + +SELECT (timestamp without time zone 'yesterday 12:34:56' = (timestamp without time zone 'tomorrow' - interval '2 days - 12:34:56')) as "True"; + True +------ + t +(1 row) + +SELECT (timestamp without time zone '12:34:56 yesterday' = (timestamp without time zone 'tomorrow' - interval '2 days - 12:34:56')) as "True"; + True +------ + t +(1 row) + +SELECT (timestamp without time zone 'tomorrow' > 'now') as "True"; + True +------ + t +(1 row) + +-- Convert from date and time to timestamp +-- This test used to be timestamp(date,time) but no longer allowed by grammar +-- to enable support for SQL99 timestamp type syntax. +SELECT date '1994-01-01' + time '11:00' AS "Jan_01_1994_11am"; + Jan_01_1994_11am +-------------------------- + Sat Jan 01 11:00:00 1994 +(1 row) + +SELECT date '1994-01-01' + time '10:00' AS "Jan_01_1994_10am"; + Jan_01_1994_10am +-------------------------- + Sat Jan 01 10:00:00 1994 +(1 row) + +SELECT date '1994-01-01' + timetz '11:00-5' AS "Jan_01_1994_8am"; + Jan_01_1994_8am +------------------------------ + Sat Jan 01 08:00:00 1994 PST +(1 row) + +SELECT timestamptz(date '1994-01-01', time with time zone '11:00-5') AS "Jan_01_1994_8am"; + Jan_01_1994_8am +------------------------------ + Sat Jan 01 08:00:00 1994 PST +(1 row) + +SELECT d1 + interval '1 year' AS one_year FROM TIMESTAMP_TBL; + one_year +----------------------------- + -infinity + infinity + Fri Jan 01 00:00:00 1971 + Tue Feb 10 17:32:01 1998 + Tue Feb 10 17:32:01 1998 + Tue Feb 10 17:32:02 1998 + Tue Feb 10 17:32:01.4 1998 + Tue Feb 10 17:32:01.5 1998 + Tue Feb 10 17:32:01.6 1998 + Fri Jan 02 00:00:00 1998 + Fri Jan 02 03:04:05 1998 + Tue Feb 10 17:32:01 1998 + Tue Feb 10 17:32:01 1998 + Tue Feb 10 17:32:01 1998 + Tue Feb 10 17:32:01 1998 + Wed Jun 10 17:32:01 1998 + Sun Sep 22 18:19:20 2002 + Thu Mar 15 08:14:01 2001 + Thu Mar 15 13:14:02 2001 + Thu Mar 15 12:14:03 2001 + Thu Mar 15 03:14:04 2001 + Thu Mar 15 02:14:05 2001 + Tue Feb 10 17:32:01 1998 + Tue Feb 10 17:32:01 1998 + Tue Feb 10 17:32:00 1998 + Tue Feb 10 17:32:01 1998 + Tue Feb 10 17:32:01 1998 + Tue Feb 10 17:32:01 1998 + Tue Feb 10 17:32:01 1998 + Tue Feb 10 17:32:01 1998 + Tue Feb 10 17:32:01 1998 + Tue Feb 10 17:32:01 1998 + Tue Feb 10 17:32:01 1998 + Tue Feb 10 17:32:01 1998 + Wed Jun 10 18:32:01 1998 + Tue Feb 10 17:32:01 1998 + Wed Feb 11 17:32:01 1998 + Thu Feb 12 17:32:01 1998 + Fri Feb 13 17:32:01 1998 + Sat Feb 14 17:32:01 1998 + Sun Feb 15 17:32:01 1998 + Mon Feb 16 17:32:01 1998 + Thu Feb 16 17:32:01 0096 BC + Sun Feb 16 17:32:01 0098 + Fri Feb 16 17:32:01 0598 + Wed Feb 16 17:32:01 1098 + Sun Feb 16 17:32:01 1698 + Fri Feb 16 17:32:01 1798 + Wed Feb 16 17:32:01 1898 + Mon Feb 16 17:32:01 1998 + Sun Feb 16 17:32:01 2098 + Fri Feb 28 17:32:01 1997 + Fri Feb 28 17:32:01 1997 + Sat Mar 01 17:32:01 1997 + Tue Dec 30 17:32:01 1997 + Wed Dec 31 17:32:01 1997 + Thu Jan 01 17:32:01 1998 + Sat Feb 28 17:32:01 1998 + Sun Mar 01 17:32:01 1998 + Wed Dec 30 17:32:01 1998 + Thu Dec 31 17:32:01 1998 + Sun Dec 31 17:32:01 2000 + Mon Jan 01 17:32:01 2001 + Mon Dec 31 17:32:01 2001 + Tue Jan 01 17:32:01 2002 +(65 rows) + +SELECT d1 - interval '1 year' AS one_year FROM TIMESTAMP_TBL; + one_year +----------------------------- + -infinity + infinity + Wed Jan 01 00:00:00 1969 + Sat Feb 10 17:32:01 1996 + Sat Feb 10 17:32:01 1996 + Sat Feb 10 17:32:02 1996 + Sat Feb 10 17:32:01.4 1996 + Sat Feb 10 17:32:01.5 1996 + Sat Feb 10 17:32:01.6 1996 + Tue Jan 02 00:00:00 1996 + Tue Jan 02 03:04:05 1996 + Sat Feb 10 17:32:01 1996 + Sat Feb 10 17:32:01 1996 + Sat Feb 10 17:32:01 1996 + Sat Feb 10 17:32:01 1996 + Mon Jun 10 17:32:01 1996 + Fri Sep 22 18:19:20 2000 + Mon Mar 15 08:14:01 1999 + Mon Mar 15 13:14:02 1999 + Mon Mar 15 12:14:03 1999 + Mon Mar 15 03:14:04 1999 + Mon Mar 15 02:14:05 1999 + Sat Feb 10 17:32:01 1996 + Sat Feb 10 17:32:01 1996 + Sat Feb 10 17:32:00 1996 + Sat Feb 10 17:32:01 1996 + Sat Feb 10 17:32:01 1996 + Sat Feb 10 17:32:01 1996 + Sat Feb 10 17:32:01 1996 + Sat Feb 10 17:32:01 1996 + Sat Feb 10 17:32:01 1996 + Sat Feb 10 17:32:01 1996 + Sat Feb 10 17:32:01 1996 + Sat Feb 10 17:32:01 1996 + Mon Jun 10 18:32:01 1996 + Sat Feb 10 17:32:01 1996 + Sun Feb 11 17:32:01 1996 + Mon Feb 12 17:32:01 1996 + Tue Feb 13 17:32:01 1996 + Wed Feb 14 17:32:01 1996 + Thu Feb 15 17:32:01 1996 + Fri Feb 16 17:32:01 1996 + Mon Feb 16 17:32:01 0098 BC + Thu Feb 16 17:32:01 0096 + Tue Feb 16 17:32:01 0596 + Sun Feb 16 17:32:01 1096 + Thu Feb 16 17:32:01 1696 + Tue Feb 16 17:32:01 1796 + Sun Feb 16 17:32:01 1896 + Fri Feb 16 17:32:01 1996 + Thu Feb 16 17:32:01 2096 + Tue Feb 28 17:32:01 1995 + Tue Feb 28 17:32:01 1995 + Wed Mar 01 17:32:01 1995 + Sat Dec 30 17:32:01 1995 + Sun Dec 31 17:32:01 1995 + Mon Jan 01 17:32:01 1996 + Wed Feb 28 17:32:01 1996 + Fri Mar 01 17:32:01 1996 + Mon Dec 30 17:32:01 1996 + Tue Dec 31 17:32:01 1996 + Thu Dec 31 17:32:01 1998 + Fri Jan 01 17:32:01 1999 + Fri Dec 31 17:32:01 1999 + Sat Jan 01 17:32:01 2000 +(65 rows) + +SELECT timestamp with time zone '1996-03-01' - interval '1 second' AS "Feb 29"; + Feb 29 +------------------------------ + Thu Feb 29 23:59:59 1996 PST +(1 row) + +SELECT timestamp with time zone '1999-03-01' - interval '1 second' AS "Feb 28"; + Feb 28 +------------------------------ + Sun Feb 28 23:59:59 1999 PST +(1 row) + +SELECT timestamp with time zone '2000-03-01' - interval '1 second' AS "Feb 29"; + Feb 29 +------------------------------ + Tue Feb 29 23:59:59 2000 PST +(1 row) + +SELECT timestamp with time zone '1999-12-01' + interval '1 month - 1 second' AS "Dec 31"; + Dec 31 +------------------------------ + Fri Dec 31 23:59:59 1999 PST +(1 row) + +SELECT timestamp with time zone '2000-01-01' - interval '2483590 days' AS "out of range"; +ERROR: timestamp out of range +SELECT (timestamp with time zone 'today' = (timestamp with time zone 'yesterday' + interval '1 day')) as "True"; + True +------ + t +(1 row) + +SELECT (timestamp with time zone 'today' = (timestamp with time zone 'tomorrow' - interval '1 day')) as "True"; + True +------ + t +(1 row) + +SELECT (timestamp with time zone 'tomorrow' = (timestamp with time zone 'yesterday' + interval '2 days')) as "True"; + True +------ + t +(1 row) + +SELECT (timestamp with time zone 'tomorrow' > 'now') as "True"; + True +------ + t +(1 row) + +-- timestamp with time zone, interval arithmetic around DST change +-- (just for fun, let's use an intentionally nonstandard POSIX zone spec) +SET TIME ZONE 'CST7CDT,M4.1.0,M10.5.0'; +SELECT timestamp with time zone '2005-04-02 12:00-07' + interval '1 day' as "Apr 3, 12:00"; + Apr 3, 12:00 +------------------------------ + Sun Apr 03 12:00:00 2005 CDT +(1 row) + +SELECT timestamp with time zone '2005-04-02 12:00-07' + interval '24 hours' as "Apr 3, 13:00"; + Apr 3, 13:00 +------------------------------ + Sun Apr 03 13:00:00 2005 CDT +(1 row) + +SELECT timestamp with time zone '2005-04-03 12:00-06' - interval '1 day' as "Apr 2, 12:00"; + Apr 2, 12:00 +------------------------------ + Sat Apr 02 12:00:00 2005 CST +(1 row) + +SELECT timestamp with time zone '2005-04-03 12:00-06' - interval '24 hours' as "Apr 2, 11:00"; + Apr 2, 11:00 +------------------------------ + Sat Apr 02 11:00:00 2005 CST +(1 row) + +RESET TIME ZONE; +SELECT timestamptz(date '1994-01-01', time '11:00') AS "Jan_01_1994_10am"; + Jan_01_1994_10am +------------------------------ + Sat Jan 01 11:00:00 1994 PST +(1 row) + +SELECT timestamptz(date '1994-01-01', time '10:00') AS "Jan_01_1994_9am"; + Jan_01_1994_9am +------------------------------ + Sat Jan 01 10:00:00 1994 PST +(1 row) + +SELECT timestamptz(date '1994-01-01', time with time zone '11:00-8') AS "Jan_01_1994_11am"; + Jan_01_1994_11am +------------------------------ + Sat Jan 01 11:00:00 1994 PST +(1 row) + +SELECT timestamptz(date '1994-01-01', time with time zone '10:00-8') AS "Jan_01_1994_10am"; + Jan_01_1994_10am +------------------------------ + Sat Jan 01 10:00:00 1994 PST +(1 row) + +SELECT timestamptz(date '1994-01-01', time with time zone '11:00-5') AS "Jan_01_1994_8am"; + Jan_01_1994_8am +------------------------------ + Sat Jan 01 08:00:00 1994 PST +(1 row) + +SELECT d1 + interval '1 year' AS one_year FROM TIMESTAMPTZ_TBL; + one_year +--------------------------------- + -infinity + infinity + Thu Dec 31 16:00:00 1970 PST + Tue Feb 10 17:32:01 1998 PST + Tue Feb 10 17:32:01 1998 PST + Tue Feb 10 17:32:02 1998 PST + Tue Feb 10 17:32:01.4 1998 PST + Tue Feb 10 17:32:01.5 1998 PST + Tue Feb 10 17:32:01.6 1998 PST + Fri Jan 02 00:00:00 1998 PST + Fri Jan 02 03:04:05 1998 PST + Tue Feb 10 17:32:01 1998 PST + Tue Feb 10 17:32:01 1998 PST + Tue Feb 10 17:32:01 1998 PST + Tue Feb 10 17:32:01 1998 PST + Wed Jun 10 17:32:01 1998 PDT + Sun Sep 22 18:19:20 2002 PDT + Thu Mar 15 08:14:01 2001 PST + Thu Mar 15 04:14:02 2001 PST + Thu Mar 15 02:14:03 2001 PST + Thu Mar 15 03:14:04 2001 PST + Thu Mar 15 01:14:05 2001 PST + Tue Feb 10 17:32:01 1998 PST + Tue Feb 10 17:32:01 1998 PST + Tue Feb 10 17:32:00 1998 PST + Tue Feb 10 17:32:01 1998 PST + Tue Feb 10 17:32:01 1998 PST + Tue Feb 10 17:32:01 1998 PST + Tue Feb 10 17:32:01 1998 PST + Tue Feb 10 17:32:01 1998 PST + Tue Feb 10 09:32:01 1998 PST + Tue Feb 10 09:32:01 1998 PST + Tue Feb 10 09:32:01 1998 PST + Tue Feb 10 14:32:01 1998 PST + Fri Jul 10 14:32:01 1998 PDT + Wed Jun 10 18:32:01 1998 PDT + Tue Feb 10 17:32:01 1998 PST + Wed Feb 11 17:32:01 1998 PST + Thu Feb 12 17:32:01 1998 PST + Fri Feb 13 17:32:01 1998 PST + Sat Feb 14 17:32:01 1998 PST + Sun Feb 15 17:32:01 1998 PST + Mon Feb 16 17:32:01 1998 PST + Thu Feb 16 17:32:01 0096 PST BC + Sun Feb 16 17:32:01 0098 PST + Fri Feb 16 17:32:01 0598 PST + Wed Feb 16 17:32:01 1098 PST + Sun Feb 16 17:32:01 1698 PST + Fri Feb 16 17:32:01 1798 PST + Wed Feb 16 17:32:01 1898 PST + Mon Feb 16 17:32:01 1998 PST + Sun Feb 16 17:32:01 2098 PST + Fri Feb 28 17:32:01 1997 PST + Fri Feb 28 17:32:01 1997 PST + Sat Mar 01 17:32:01 1997 PST + Tue Dec 30 17:32:01 1997 PST + Wed Dec 31 17:32:01 1997 PST + Thu Jan 01 17:32:01 1998 PST + Sat Feb 28 17:32:01 1998 PST + Sun Mar 01 17:32:01 1998 PST + Wed Dec 30 17:32:01 1998 PST + Thu Dec 31 17:32:01 1998 PST + Sun Dec 31 17:32:01 2000 PST + Mon Jan 01 17:32:01 2001 PST + Mon Dec 31 17:32:01 2001 PST + Tue Jan 01 17:32:01 2002 PST +(66 rows) + +SELECT d1 - interval '1 year' AS one_year FROM TIMESTAMPTZ_TBL; + one_year +--------------------------------- + -infinity + infinity + Tue Dec 31 16:00:00 1968 PST + Sat Feb 10 17:32:01 1996 PST + Sat Feb 10 17:32:01 1996 PST + Sat Feb 10 17:32:02 1996 PST + Sat Feb 10 17:32:01.4 1996 PST + Sat Feb 10 17:32:01.5 1996 PST + Sat Feb 10 17:32:01.6 1996 PST + Tue Jan 02 00:00:00 1996 PST + Tue Jan 02 03:04:05 1996 PST + Sat Feb 10 17:32:01 1996 PST + Sat Feb 10 17:32:01 1996 PST + Sat Feb 10 17:32:01 1996 PST + Sat Feb 10 17:32:01 1996 PST + Mon Jun 10 17:32:01 1996 PDT + Fri Sep 22 18:19:20 2000 PDT + Mon Mar 15 08:14:01 1999 PST + Mon Mar 15 04:14:02 1999 PST + Mon Mar 15 02:14:03 1999 PST + Mon Mar 15 03:14:04 1999 PST + Mon Mar 15 01:14:05 1999 PST + Sat Feb 10 17:32:01 1996 PST + Sat Feb 10 17:32:01 1996 PST + Sat Feb 10 17:32:00 1996 PST + Sat Feb 10 17:32:01 1996 PST + Sat Feb 10 17:32:01 1996 PST + Sat Feb 10 17:32:01 1996 PST + Sat Feb 10 17:32:01 1996 PST + Sat Feb 10 17:32:01 1996 PST + Sat Feb 10 09:32:01 1996 PST + Sat Feb 10 09:32:01 1996 PST + Sat Feb 10 09:32:01 1996 PST + Sat Feb 10 14:32:01 1996 PST + Wed Jul 10 14:32:01 1996 PDT + Mon Jun 10 18:32:01 1996 PDT + Sat Feb 10 17:32:01 1996 PST + Sun Feb 11 17:32:01 1996 PST + Mon Feb 12 17:32:01 1996 PST + Tue Feb 13 17:32:01 1996 PST + Wed Feb 14 17:32:01 1996 PST + Thu Feb 15 17:32:01 1996 PST + Fri Feb 16 17:32:01 1996 PST + Mon Feb 16 17:32:01 0098 PST BC + Thu Feb 16 17:32:01 0096 PST + Tue Feb 16 17:32:01 0596 PST + Sun Feb 16 17:32:01 1096 PST + Thu Feb 16 17:32:01 1696 PST + Tue Feb 16 17:32:01 1796 PST + Sun Feb 16 17:32:01 1896 PST + Fri Feb 16 17:32:01 1996 PST + Thu Feb 16 17:32:01 2096 PST + Tue Feb 28 17:32:01 1995 PST + Tue Feb 28 17:32:01 1995 PST + Wed Mar 01 17:32:01 1995 PST + Sat Dec 30 17:32:01 1995 PST + Sun Dec 31 17:32:01 1995 PST + Mon Jan 01 17:32:01 1996 PST + Wed Feb 28 17:32:01 1996 PST + Fri Mar 01 17:32:01 1996 PST + Mon Dec 30 17:32:01 1996 PST + Tue Dec 31 17:32:01 1996 PST + Thu Dec 31 17:32:01 1998 PST + Fri Jan 01 17:32:01 1999 PST + Fri Dec 31 17:32:01 1999 PST + Sat Jan 01 17:32:01 2000 PST +(66 rows) + +-- +-- time, interval arithmetic +-- +SELECT CAST(time '01:02' AS interval) AS "+01:02"; + +01:02 +----------------- + @ 1 hour 2 mins +(1 row) + +SELECT CAST(interval '02:03' AS time) AS "02:03:00"; + 02:03:00 +---------- + 02:03:00 +(1 row) + +SELECT time '01:30' + interval '02:01' AS "03:31:00"; + 03:31:00 +---------- + 03:31:00 +(1 row) + +SELECT time '01:30' - interval '02:01' AS "23:29:00"; + 23:29:00 +---------- + 23:29:00 +(1 row) + +SELECT time '02:30' + interval '36:01' AS "14:31:00"; + 14:31:00 +---------- + 14:31:00 +(1 row) + +SELECT time '03:30' + interval '1 month 04:01' AS "07:31:00"; + 07:31:00 +---------- + 07:31:00 +(1 row) + +SELECT CAST(time with time zone '01:02-08' AS interval) AS "+00:01"; +ERROR: cannot cast type time with time zone to interval +LINE 1: SELECT CAST(time with time zone '01:02-08' AS interval) AS "... + ^ +SELECT CAST(interval '02:03' AS time with time zone) AS "02:03:00-08"; +ERROR: cannot cast type interval to time with time zone +LINE 1: SELECT CAST(interval '02:03' AS time with time zone) AS "02:... + ^ +SELECT time with time zone '01:30-08' - interval '02:01' AS "23:29:00-08"; + 23:29:00-08 +------------- + 23:29:00-08 +(1 row) + +SELECT time with time zone '02:30-08' + interval '36:01' AS "14:31:00-08"; + 14:31:00-08 +------------- + 14:31:00-08 +(1 row) + +-- These two tests cannot be used because they default to current timezone, +-- which may be either -08 or -07 depending on the time of year. +-- SELECT time with time zone '01:30' + interval '02:01' AS "03:31:00-08"; +-- SELECT time with time zone '03:30' + interval '1 month 04:01' AS "07:31:00-08"; +-- Try the following two tests instead, as a poor substitute +SELECT CAST(CAST(date 'today' + time with time zone '05:30' + + interval '02:01' AS time with time zone) AS time) AS "07:31:00"; + 07:31:00 +---------- + 07:31:00 +(1 row) + +SELECT CAST(cast(date 'today' + time with time zone '03:30' + + interval '1 month 04:01' as timestamp without time zone) AS time) AS "07:31:00"; + 07:31:00 +---------- + 07:31:00 +(1 row) + +SELECT t.d1 AS t, i.f1 AS i, t.d1 + i.f1 AS "add", t.d1 - i.f1 AS "subtract" + FROM TIMESTAMP_TBL t, INTERVAL_TBL i + WHERE t.d1 BETWEEN '1990-01-01' AND '2001-01-01' + AND i.f1 BETWEEN '00:00' AND '23:00' + ORDER BY 1,2; + t | i | add | subtract +----------------------------+-----------+----------------------------+---------------------------- + Wed Feb 28 17:32:01 1996 | @ 1 min | Wed Feb 28 17:33:01 1996 | Wed Feb 28 17:31:01 1996 + Wed Feb 28 17:32:01 1996 | @ 5 hours | Wed Feb 28 22:32:01 1996 | Wed Feb 28 12:32:01 1996 + Thu Feb 29 17:32:01 1996 | @ 1 min | Thu Feb 29 17:33:01 1996 | Thu Feb 29 17:31:01 1996 + Thu Feb 29 17:32:01 1996 | @ 5 hours | Thu Feb 29 22:32:01 1996 | Thu Feb 29 12:32:01 1996 + Fri Mar 01 17:32:01 1996 | @ 1 min | Fri Mar 01 17:33:01 1996 | Fri Mar 01 17:31:01 1996 + Fri Mar 01 17:32:01 1996 | @ 5 hours | Fri Mar 01 22:32:01 1996 | Fri Mar 01 12:32:01 1996 + Mon Dec 30 17:32:01 1996 | @ 1 min | Mon Dec 30 17:33:01 1996 | Mon Dec 30 17:31:01 1996 + Mon Dec 30 17:32:01 1996 | @ 5 hours | Mon Dec 30 22:32:01 1996 | Mon Dec 30 12:32:01 1996 + Tue Dec 31 17:32:01 1996 | @ 1 min | Tue Dec 31 17:33:01 1996 | Tue Dec 31 17:31:01 1996 + Tue Dec 31 17:32:01 1996 | @ 5 hours | Tue Dec 31 22:32:01 1996 | Tue Dec 31 12:32:01 1996 + Wed Jan 01 17:32:01 1997 | @ 1 min | Wed Jan 01 17:33:01 1997 | Wed Jan 01 17:31:01 1997 + Wed Jan 01 17:32:01 1997 | @ 5 hours | Wed Jan 01 22:32:01 1997 | Wed Jan 01 12:32:01 1997 + Thu Jan 02 00:00:00 1997 | @ 1 min | Thu Jan 02 00:01:00 1997 | Wed Jan 01 23:59:00 1997 + Thu Jan 02 00:00:00 1997 | @ 5 hours | Thu Jan 02 05:00:00 1997 | Wed Jan 01 19:00:00 1997 + Thu Jan 02 03:04:05 1997 | @ 1 min | Thu Jan 02 03:05:05 1997 | Thu Jan 02 03:03:05 1997 + Thu Jan 02 03:04:05 1997 | @ 5 hours | Thu Jan 02 08:04:05 1997 | Wed Jan 01 22:04:05 1997 + Mon Feb 10 17:32:00 1997 | @ 1 min | Mon Feb 10 17:33:00 1997 | Mon Feb 10 17:31:00 1997 + Mon Feb 10 17:32:00 1997 | @ 5 hours | Mon Feb 10 22:32:00 1997 | Mon Feb 10 12:32:00 1997 + Mon Feb 10 17:32:01 1997 | @ 1 min | Mon Feb 10 17:33:01 1997 | Mon Feb 10 17:31:01 1997 + Mon Feb 10 17:32:01 1997 | @ 1 min | Mon Feb 10 17:33:01 1997 | Mon Feb 10 17:31:01 1997 + Mon Feb 10 17:32:01 1997 | @ 1 min | Mon Feb 10 17:33:01 1997 | Mon Feb 10 17:31:01 1997 + Mon Feb 10 17:32:01 1997 | @ 1 min | Mon Feb 10 17:33:01 1997 | Mon Feb 10 17:31:01 1997 + Mon Feb 10 17:32:01 1997 | @ 1 min | Mon Feb 10 17:33:01 1997 | Mon Feb 10 17:31:01 1997 + Mon Feb 10 17:32:01 1997 | @ 1 min | Mon Feb 10 17:33:01 1997 | Mon Feb 10 17:31:01 1997 + Mon Feb 10 17:32:01 1997 | @ 1 min | Mon Feb 10 17:33:01 1997 | Mon Feb 10 17:31:01 1997 + Mon Feb 10 17:32:01 1997 | @ 1 min | Mon Feb 10 17:33:01 1997 | Mon Feb 10 17:31:01 1997 + Mon Feb 10 17:32:01 1997 | @ 1 min | Mon Feb 10 17:33:01 1997 | Mon Feb 10 17:31:01 1997 + Mon Feb 10 17:32:01 1997 | @ 1 min | Mon Feb 10 17:33:01 1997 | Mon Feb 10 17:31:01 1997 + Mon Feb 10 17:32:01 1997 | @ 1 min | Mon Feb 10 17:33:01 1997 | Mon Feb 10 17:31:01 1997 + Mon Feb 10 17:32:01 1997 | @ 1 min | Mon Feb 10 17:33:01 1997 | Mon Feb 10 17:31:01 1997 + Mon Feb 10 17:32:01 1997 | @ 1 min | Mon Feb 10 17:33:01 1997 | Mon Feb 10 17:31:01 1997 + Mon Feb 10 17:32:01 1997 | @ 1 min | Mon Feb 10 17:33:01 1997 | Mon Feb 10 17:31:01 1997 + Mon Feb 10 17:32:01 1997 | @ 1 min | Mon Feb 10 17:33:01 1997 | Mon Feb 10 17:31:01 1997 + Mon Feb 10 17:32:01 1997 | @ 1 min | Mon Feb 10 17:33:01 1997 | Mon Feb 10 17:31:01 1997 + Mon Feb 10 17:32:01 1997 | @ 1 min | Mon Feb 10 17:33:01 1997 | Mon Feb 10 17:31:01 1997 + Mon Feb 10 17:32:01 1997 | @ 1 min | Mon Feb 10 17:33:01 1997 | Mon Feb 10 17:31:01 1997 + Mon Feb 10 17:32:01 1997 | @ 5 hours | Mon Feb 10 22:32:01 1997 | Mon Feb 10 12:32:01 1997 + Mon Feb 10 17:32:01 1997 | @ 5 hours | Mon Feb 10 22:32:01 1997 | Mon Feb 10 12:32:01 1997 + Mon Feb 10 17:32:01 1997 | @ 5 hours | Mon Feb 10 22:32:01 1997 | Mon Feb 10 12:32:01 1997 + Mon Feb 10 17:32:01 1997 | @ 5 hours | Mon Feb 10 22:32:01 1997 | Mon Feb 10 12:32:01 1997 + Mon Feb 10 17:32:01 1997 | @ 5 hours | Mon Feb 10 22:32:01 1997 | Mon Feb 10 12:32:01 1997 + Mon Feb 10 17:32:01 1997 | @ 5 hours | Mon Feb 10 22:32:01 1997 | Mon Feb 10 12:32:01 1997 + Mon Feb 10 17:32:01 1997 | @ 5 hours | Mon Feb 10 22:32:01 1997 | Mon Feb 10 12:32:01 1997 + Mon Feb 10 17:32:01 1997 | @ 5 hours | Mon Feb 10 22:32:01 1997 | Mon Feb 10 12:32:01 1997 + Mon Feb 10 17:32:01 1997 | @ 5 hours | Mon Feb 10 22:32:01 1997 | Mon Feb 10 12:32:01 1997 + Mon Feb 10 17:32:01 1997 | @ 5 hours | Mon Feb 10 22:32:01 1997 | Mon Feb 10 12:32:01 1997 + Mon Feb 10 17:32:01 1997 | @ 5 hours | Mon Feb 10 22:32:01 1997 | Mon Feb 10 12:32:01 1997 + Mon Feb 10 17:32:01 1997 | @ 5 hours | Mon Feb 10 22:32:01 1997 | Mon Feb 10 12:32:01 1997 + Mon Feb 10 17:32:01 1997 | @ 5 hours | Mon Feb 10 22:32:01 1997 | Mon Feb 10 12:32:01 1997 + Mon Feb 10 17:32:01 1997 | @ 5 hours | Mon Feb 10 22:32:01 1997 | Mon Feb 10 12:32:01 1997 + Mon Feb 10 17:32:01 1997 | @ 5 hours | Mon Feb 10 22:32:01 1997 | Mon Feb 10 12:32:01 1997 + Mon Feb 10 17:32:01 1997 | @ 5 hours | Mon Feb 10 22:32:01 1997 | Mon Feb 10 12:32:01 1997 + Mon Feb 10 17:32:01 1997 | @ 5 hours | Mon Feb 10 22:32:01 1997 | Mon Feb 10 12:32:01 1997 + Mon Feb 10 17:32:01 1997 | @ 5 hours | Mon Feb 10 22:32:01 1997 | Mon Feb 10 12:32:01 1997 + Mon Feb 10 17:32:01.4 1997 | @ 1 min | Mon Feb 10 17:33:01.4 1997 | Mon Feb 10 17:31:01.4 1997 + Mon Feb 10 17:32:01.4 1997 | @ 5 hours | Mon Feb 10 22:32:01.4 1997 | Mon Feb 10 12:32:01.4 1997 + Mon Feb 10 17:32:01.5 1997 | @ 1 min | Mon Feb 10 17:33:01.5 1997 | Mon Feb 10 17:31:01.5 1997 + Mon Feb 10 17:32:01.5 1997 | @ 5 hours | Mon Feb 10 22:32:01.5 1997 | Mon Feb 10 12:32:01.5 1997 + Mon Feb 10 17:32:01.6 1997 | @ 1 min | Mon Feb 10 17:33:01.6 1997 | Mon Feb 10 17:31:01.6 1997 + Mon Feb 10 17:32:01.6 1997 | @ 5 hours | Mon Feb 10 22:32:01.6 1997 | Mon Feb 10 12:32:01.6 1997 + Mon Feb 10 17:32:02 1997 | @ 1 min | Mon Feb 10 17:33:02 1997 | Mon Feb 10 17:31:02 1997 + Mon Feb 10 17:32:02 1997 | @ 5 hours | Mon Feb 10 22:32:02 1997 | Mon Feb 10 12:32:02 1997 + Tue Feb 11 17:32:01 1997 | @ 1 min | Tue Feb 11 17:33:01 1997 | Tue Feb 11 17:31:01 1997 + Tue Feb 11 17:32:01 1997 | @ 5 hours | Tue Feb 11 22:32:01 1997 | Tue Feb 11 12:32:01 1997 + Wed Feb 12 17:32:01 1997 | @ 1 min | Wed Feb 12 17:33:01 1997 | Wed Feb 12 17:31:01 1997 + Wed Feb 12 17:32:01 1997 | @ 5 hours | Wed Feb 12 22:32:01 1997 | Wed Feb 12 12:32:01 1997 + Thu Feb 13 17:32:01 1997 | @ 1 min | Thu Feb 13 17:33:01 1997 | Thu Feb 13 17:31:01 1997 + Thu Feb 13 17:32:01 1997 | @ 5 hours | Thu Feb 13 22:32:01 1997 | Thu Feb 13 12:32:01 1997 + Fri Feb 14 17:32:01 1997 | @ 1 min | Fri Feb 14 17:33:01 1997 | Fri Feb 14 17:31:01 1997 + Fri Feb 14 17:32:01 1997 | @ 5 hours | Fri Feb 14 22:32:01 1997 | Fri Feb 14 12:32:01 1997 + Sat Feb 15 17:32:01 1997 | @ 1 min | Sat Feb 15 17:33:01 1997 | Sat Feb 15 17:31:01 1997 + Sat Feb 15 17:32:01 1997 | @ 5 hours | Sat Feb 15 22:32:01 1997 | Sat Feb 15 12:32:01 1997 + Sun Feb 16 17:32:01 1997 | @ 1 min | Sun Feb 16 17:33:01 1997 | Sun Feb 16 17:31:01 1997 + Sun Feb 16 17:32:01 1997 | @ 1 min | Sun Feb 16 17:33:01 1997 | Sun Feb 16 17:31:01 1997 + Sun Feb 16 17:32:01 1997 | @ 5 hours | Sun Feb 16 22:32:01 1997 | Sun Feb 16 12:32:01 1997 + Sun Feb 16 17:32:01 1997 | @ 5 hours | Sun Feb 16 22:32:01 1997 | Sun Feb 16 12:32:01 1997 + Fri Feb 28 17:32:01 1997 | @ 1 min | Fri Feb 28 17:33:01 1997 | Fri Feb 28 17:31:01 1997 + Fri Feb 28 17:32:01 1997 | @ 5 hours | Fri Feb 28 22:32:01 1997 | Fri Feb 28 12:32:01 1997 + Sat Mar 01 17:32:01 1997 | @ 1 min | Sat Mar 01 17:33:01 1997 | Sat Mar 01 17:31:01 1997 + Sat Mar 01 17:32:01 1997 | @ 5 hours | Sat Mar 01 22:32:01 1997 | Sat Mar 01 12:32:01 1997 + Tue Jun 10 17:32:01 1997 | @ 1 min | Tue Jun 10 17:33:01 1997 | Tue Jun 10 17:31:01 1997 + Tue Jun 10 17:32:01 1997 | @ 5 hours | Tue Jun 10 22:32:01 1997 | Tue Jun 10 12:32:01 1997 + Tue Jun 10 18:32:01 1997 | @ 1 min | Tue Jun 10 18:33:01 1997 | Tue Jun 10 18:31:01 1997 + Tue Jun 10 18:32:01 1997 | @ 5 hours | Tue Jun 10 23:32:01 1997 | Tue Jun 10 13:32:01 1997 + Tue Dec 30 17:32:01 1997 | @ 1 min | Tue Dec 30 17:33:01 1997 | Tue Dec 30 17:31:01 1997 + Tue Dec 30 17:32:01 1997 | @ 5 hours | Tue Dec 30 22:32:01 1997 | Tue Dec 30 12:32:01 1997 + Wed Dec 31 17:32:01 1997 | @ 1 min | Wed Dec 31 17:33:01 1997 | Wed Dec 31 17:31:01 1997 + Wed Dec 31 17:32:01 1997 | @ 5 hours | Wed Dec 31 22:32:01 1997 | Wed Dec 31 12:32:01 1997 + Fri Dec 31 17:32:01 1999 | @ 1 min | Fri Dec 31 17:33:01 1999 | Fri Dec 31 17:31:01 1999 + Fri Dec 31 17:32:01 1999 | @ 5 hours | Fri Dec 31 22:32:01 1999 | Fri Dec 31 12:32:01 1999 + Sat Jan 01 17:32:01 2000 | @ 1 min | Sat Jan 01 17:33:01 2000 | Sat Jan 01 17:31:01 2000 + Sat Jan 01 17:32:01 2000 | @ 5 hours | Sat Jan 01 22:32:01 2000 | Sat Jan 01 12:32:01 2000 + Wed Mar 15 02:14:05 2000 | @ 1 min | Wed Mar 15 02:15:05 2000 | Wed Mar 15 02:13:05 2000 + Wed Mar 15 02:14:05 2000 | @ 5 hours | Wed Mar 15 07:14:05 2000 | Tue Mar 14 21:14:05 2000 + Wed Mar 15 03:14:04 2000 | @ 1 min | Wed Mar 15 03:15:04 2000 | Wed Mar 15 03:13:04 2000 + Wed Mar 15 03:14:04 2000 | @ 5 hours | Wed Mar 15 08:14:04 2000 | Tue Mar 14 22:14:04 2000 + Wed Mar 15 08:14:01 2000 | @ 1 min | Wed Mar 15 08:15:01 2000 | Wed Mar 15 08:13:01 2000 + Wed Mar 15 08:14:01 2000 | @ 5 hours | Wed Mar 15 13:14:01 2000 | Wed Mar 15 03:14:01 2000 + Wed Mar 15 12:14:03 2000 | @ 1 min | Wed Mar 15 12:15:03 2000 | Wed Mar 15 12:13:03 2000 + Wed Mar 15 12:14:03 2000 | @ 5 hours | Wed Mar 15 17:14:03 2000 | Wed Mar 15 07:14:03 2000 + Wed Mar 15 13:14:02 2000 | @ 1 min | Wed Mar 15 13:15:02 2000 | Wed Mar 15 13:13:02 2000 + Wed Mar 15 13:14:02 2000 | @ 5 hours | Wed Mar 15 18:14:02 2000 | Wed Mar 15 08:14:02 2000 + Sun Dec 31 17:32:01 2000 | @ 1 min | Sun Dec 31 17:33:01 2000 | Sun Dec 31 17:31:01 2000 + Sun Dec 31 17:32:01 2000 | @ 5 hours | Sun Dec 31 22:32:01 2000 | Sun Dec 31 12:32:01 2000 +(104 rows) + +SELECT t.f1 AS t, i.f1 AS i, t.f1 + i.f1 AS "add", t.f1 - i.f1 AS "subtract" + FROM TIME_TBL t, INTERVAL_TBL i + ORDER BY 1,2; + t | i | add | subtract +-------------+-------------------------------+-------------+------------- + 00:00:00 | @ 14 secs ago | 23:59:46 | 00:00:14 + 00:00:00 | @ 1 min | 00:01:00 | 23:59:00 + 00:00:00 | @ 5 hours | 05:00:00 | 19:00:00 + 00:00:00 | @ 1 day 2 hours 3 mins 4 secs | 02:03:04 | 21:56:56 + 00:00:00 | @ 10 days | 00:00:00 | 00:00:00 + 00:00:00 | @ 3 mons | 00:00:00 | 00:00:00 + 00:00:00 | @ 5 mons | 00:00:00 | 00:00:00 + 00:00:00 | @ 5 mons 12 hours | 12:00:00 | 12:00:00 + 00:00:00 | @ 6 years | 00:00:00 | 00:00:00 + 00:00:00 | @ 34 years | 00:00:00 | 00:00:00 + 01:00:00 | @ 14 secs ago | 00:59:46 | 01:00:14 + 01:00:00 | @ 1 min | 01:01:00 | 00:59:00 + 01:00:00 | @ 5 hours | 06:00:00 | 20:00:00 + 01:00:00 | @ 1 day 2 hours 3 mins 4 secs | 03:03:04 | 22:56:56 + 01:00:00 | @ 10 days | 01:00:00 | 01:00:00 + 01:00:00 | @ 3 mons | 01:00:00 | 01:00:00 + 01:00:00 | @ 5 mons | 01:00:00 | 01:00:00 + 01:00:00 | @ 5 mons 12 hours | 13:00:00 | 13:00:00 + 01:00:00 | @ 6 years | 01:00:00 | 01:00:00 + 01:00:00 | @ 34 years | 01:00:00 | 01:00:00 + 02:03:00 | @ 14 secs ago | 02:02:46 | 02:03:14 + 02:03:00 | @ 1 min | 02:04:00 | 02:02:00 + 02:03:00 | @ 5 hours | 07:03:00 | 21:03:00 + 02:03:00 | @ 1 day 2 hours 3 mins 4 secs | 04:06:04 | 23:59:56 + 02:03:00 | @ 10 days | 02:03:00 | 02:03:00 + 02:03:00 | @ 3 mons | 02:03:00 | 02:03:00 + 02:03:00 | @ 5 mons | 02:03:00 | 02:03:00 + 02:03:00 | @ 5 mons 12 hours | 14:03:00 | 14:03:00 + 02:03:00 | @ 6 years | 02:03:00 | 02:03:00 + 02:03:00 | @ 34 years | 02:03:00 | 02:03:00 + 11:59:00 | @ 14 secs ago | 11:58:46 | 11:59:14 + 11:59:00 | @ 1 min | 12:00:00 | 11:58:00 + 11:59:00 | @ 5 hours | 16:59:00 | 06:59:00 + 11:59:00 | @ 1 day 2 hours 3 mins 4 secs | 14:02:04 | 09:55:56 + 11:59:00 | @ 10 days | 11:59:00 | 11:59:00 + 11:59:00 | @ 3 mons | 11:59:00 | 11:59:00 + 11:59:00 | @ 5 mons | 11:59:00 | 11:59:00 + 11:59:00 | @ 5 mons 12 hours | 23:59:00 | 23:59:00 + 11:59:00 | @ 6 years | 11:59:00 | 11:59:00 + 11:59:00 | @ 34 years | 11:59:00 | 11:59:00 + 12:00:00 | @ 14 secs ago | 11:59:46 | 12:00:14 + 12:00:00 | @ 1 min | 12:01:00 | 11:59:00 + 12:00:00 | @ 5 hours | 17:00:00 | 07:00:00 + 12:00:00 | @ 1 day 2 hours 3 mins 4 secs | 14:03:04 | 09:56:56 + 12:00:00 | @ 10 days | 12:00:00 | 12:00:00 + 12:00:00 | @ 3 mons | 12:00:00 | 12:00:00 + 12:00:00 | @ 5 mons | 12:00:00 | 12:00:00 + 12:00:00 | @ 5 mons 12 hours | 00:00:00 | 00:00:00 + 12:00:00 | @ 6 years | 12:00:00 | 12:00:00 + 12:00:00 | @ 34 years | 12:00:00 | 12:00:00 + 12:01:00 | @ 14 secs ago | 12:00:46 | 12:01:14 + 12:01:00 | @ 1 min | 12:02:00 | 12:00:00 + 12:01:00 | @ 5 hours | 17:01:00 | 07:01:00 + 12:01:00 | @ 1 day 2 hours 3 mins 4 secs | 14:04:04 | 09:57:56 + 12:01:00 | @ 10 days | 12:01:00 | 12:01:00 + 12:01:00 | @ 3 mons | 12:01:00 | 12:01:00 + 12:01:00 | @ 5 mons | 12:01:00 | 12:01:00 + 12:01:00 | @ 5 mons 12 hours | 00:01:00 | 00:01:00 + 12:01:00 | @ 6 years | 12:01:00 | 12:01:00 + 12:01:00 | @ 34 years | 12:01:00 | 12:01:00 + 15:36:39 | @ 14 secs ago | 15:36:25 | 15:36:53 + 15:36:39 | @ 14 secs ago | 15:36:25 | 15:36:53 + 15:36:39 | @ 1 min | 15:37:39 | 15:35:39 + 15:36:39 | @ 1 min | 15:37:39 | 15:35:39 + 15:36:39 | @ 5 hours | 20:36:39 | 10:36:39 + 15:36:39 | @ 5 hours | 20:36:39 | 10:36:39 + 15:36:39 | @ 1 day 2 hours 3 mins 4 secs | 17:39:43 | 13:33:35 + 15:36:39 | @ 1 day 2 hours 3 mins 4 secs | 17:39:43 | 13:33:35 + 15:36:39 | @ 10 days | 15:36:39 | 15:36:39 + 15:36:39 | @ 10 days | 15:36:39 | 15:36:39 + 15:36:39 | @ 3 mons | 15:36:39 | 15:36:39 + 15:36:39 | @ 3 mons | 15:36:39 | 15:36:39 + 15:36:39 | @ 5 mons | 15:36:39 | 15:36:39 + 15:36:39 | @ 5 mons | 15:36:39 | 15:36:39 + 15:36:39 | @ 5 mons 12 hours | 03:36:39 | 03:36:39 + 15:36:39 | @ 5 mons 12 hours | 03:36:39 | 03:36:39 + 15:36:39 | @ 6 years | 15:36:39 | 15:36:39 + 15:36:39 | @ 6 years | 15:36:39 | 15:36:39 + 15:36:39 | @ 34 years | 15:36:39 | 15:36:39 + 15:36:39 | @ 34 years | 15:36:39 | 15:36:39 + 23:59:00 | @ 14 secs ago | 23:58:46 | 23:59:14 + 23:59:00 | @ 1 min | 00:00:00 | 23:58:00 + 23:59:00 | @ 5 hours | 04:59:00 | 18:59:00 + 23:59:00 | @ 1 day 2 hours 3 mins 4 secs | 02:02:04 | 21:55:56 + 23:59:00 | @ 10 days | 23:59:00 | 23:59:00 + 23:59:00 | @ 3 mons | 23:59:00 | 23:59:00 + 23:59:00 | @ 5 mons | 23:59:00 | 23:59:00 + 23:59:00 | @ 5 mons 12 hours | 11:59:00 | 11:59:00 + 23:59:00 | @ 6 years | 23:59:00 | 23:59:00 + 23:59:00 | @ 34 years | 23:59:00 | 23:59:00 + 23:59:59.99 | @ 14 secs ago | 23:59:45.99 | 00:00:13.99 + 23:59:59.99 | @ 1 min | 00:00:59.99 | 23:58:59.99 + 23:59:59.99 | @ 5 hours | 04:59:59.99 | 18:59:59.99 + 23:59:59.99 | @ 1 day 2 hours 3 mins 4 secs | 02:03:03.99 | 21:56:55.99 + 23:59:59.99 | @ 10 days | 23:59:59.99 | 23:59:59.99 + 23:59:59.99 | @ 3 mons | 23:59:59.99 | 23:59:59.99 + 23:59:59.99 | @ 5 mons | 23:59:59.99 | 23:59:59.99 + 23:59:59.99 | @ 5 mons 12 hours | 11:59:59.99 | 11:59:59.99 + 23:59:59.99 | @ 6 years | 23:59:59.99 | 23:59:59.99 + 23:59:59.99 | @ 34 years | 23:59:59.99 | 23:59:59.99 +(100 rows) + +SELECT t.f1 AS t, i.f1 AS i, t.f1 + i.f1 AS "add", t.f1 - i.f1 AS "subtract" + FROM TIMETZ_TBL t, INTERVAL_TBL i + ORDER BY 1,2; + t | i | add | subtract +----------------+-------------------------------+----------------+---------------- + 00:01:00-07 | @ 14 secs ago | 00:00:46-07 | 00:01:14-07 + 00:01:00-07 | @ 1 min | 00:02:00-07 | 00:00:00-07 + 00:01:00-07 | @ 5 hours | 05:01:00-07 | 19:01:00-07 + 00:01:00-07 | @ 1 day 2 hours 3 mins 4 secs | 02:04:04-07 | 21:57:56-07 + 00:01:00-07 | @ 10 days | 00:01:00-07 | 00:01:00-07 + 00:01:00-07 | @ 3 mons | 00:01:00-07 | 00:01:00-07 + 00:01:00-07 | @ 5 mons | 00:01:00-07 | 00:01:00-07 + 00:01:00-07 | @ 5 mons 12 hours | 12:01:00-07 | 12:01:00-07 + 00:01:00-07 | @ 6 years | 00:01:00-07 | 00:01:00-07 + 00:01:00-07 | @ 34 years | 00:01:00-07 | 00:01:00-07 + 01:00:00-07 | @ 14 secs ago | 00:59:46-07 | 01:00:14-07 + 01:00:00-07 | @ 1 min | 01:01:00-07 | 00:59:00-07 + 01:00:00-07 | @ 5 hours | 06:00:00-07 | 20:00:00-07 + 01:00:00-07 | @ 1 day 2 hours 3 mins 4 secs | 03:03:04-07 | 22:56:56-07 + 01:00:00-07 | @ 10 days | 01:00:00-07 | 01:00:00-07 + 01:00:00-07 | @ 3 mons | 01:00:00-07 | 01:00:00-07 + 01:00:00-07 | @ 5 mons | 01:00:00-07 | 01:00:00-07 + 01:00:00-07 | @ 5 mons 12 hours | 13:00:00-07 | 13:00:00-07 + 01:00:00-07 | @ 6 years | 01:00:00-07 | 01:00:00-07 + 01:00:00-07 | @ 34 years | 01:00:00-07 | 01:00:00-07 + 02:03:00-07 | @ 14 secs ago | 02:02:46-07 | 02:03:14-07 + 02:03:00-07 | @ 1 min | 02:04:00-07 | 02:02:00-07 + 02:03:00-07 | @ 5 hours | 07:03:00-07 | 21:03:00-07 + 02:03:00-07 | @ 1 day 2 hours 3 mins 4 secs | 04:06:04-07 | 23:59:56-07 + 02:03:00-07 | @ 10 days | 02:03:00-07 | 02:03:00-07 + 02:03:00-07 | @ 3 mons | 02:03:00-07 | 02:03:00-07 + 02:03:00-07 | @ 5 mons | 02:03:00-07 | 02:03:00-07 + 02:03:00-07 | @ 5 mons 12 hours | 14:03:00-07 | 14:03:00-07 + 02:03:00-07 | @ 6 years | 02:03:00-07 | 02:03:00-07 + 02:03:00-07 | @ 34 years | 02:03:00-07 | 02:03:00-07 + 08:08:00-04 | @ 14 secs ago | 08:07:46-04 | 08:08:14-04 + 08:08:00-04 | @ 1 min | 08:09:00-04 | 08:07:00-04 + 08:08:00-04 | @ 5 hours | 13:08:00-04 | 03:08:00-04 + 08:08:00-04 | @ 1 day 2 hours 3 mins 4 secs | 10:11:04-04 | 06:04:56-04 + 08:08:00-04 | @ 10 days | 08:08:00-04 | 08:08:00-04 + 08:08:00-04 | @ 3 mons | 08:08:00-04 | 08:08:00-04 + 08:08:00-04 | @ 5 mons | 08:08:00-04 | 08:08:00-04 + 08:08:00-04 | @ 5 mons 12 hours | 20:08:00-04 | 20:08:00-04 + 08:08:00-04 | @ 6 years | 08:08:00-04 | 08:08:00-04 + 08:08:00-04 | @ 34 years | 08:08:00-04 | 08:08:00-04 + 07:07:00-08 | @ 14 secs ago | 07:06:46-08 | 07:07:14-08 + 07:07:00-08 | @ 1 min | 07:08:00-08 | 07:06:00-08 + 07:07:00-08 | @ 5 hours | 12:07:00-08 | 02:07:00-08 + 07:07:00-08 | @ 1 day 2 hours 3 mins 4 secs | 09:10:04-08 | 05:03:56-08 + 07:07:00-08 | @ 10 days | 07:07:00-08 | 07:07:00-08 + 07:07:00-08 | @ 3 mons | 07:07:00-08 | 07:07:00-08 + 07:07:00-08 | @ 5 mons | 07:07:00-08 | 07:07:00-08 + 07:07:00-08 | @ 5 mons 12 hours | 19:07:00-08 | 19:07:00-08 + 07:07:00-08 | @ 6 years | 07:07:00-08 | 07:07:00-08 + 07:07:00-08 | @ 34 years | 07:07:00-08 | 07:07:00-08 + 11:59:00-07 | @ 14 secs ago | 11:58:46-07 | 11:59:14-07 + 11:59:00-07 | @ 1 min | 12:00:00-07 | 11:58:00-07 + 11:59:00-07 | @ 5 hours | 16:59:00-07 | 06:59:00-07 + 11:59:00-07 | @ 1 day 2 hours 3 mins 4 secs | 14:02:04-07 | 09:55:56-07 + 11:59:00-07 | @ 10 days | 11:59:00-07 | 11:59:00-07 + 11:59:00-07 | @ 3 mons | 11:59:00-07 | 11:59:00-07 + 11:59:00-07 | @ 5 mons | 11:59:00-07 | 11:59:00-07 + 11:59:00-07 | @ 5 mons 12 hours | 23:59:00-07 | 23:59:00-07 + 11:59:00-07 | @ 6 years | 11:59:00-07 | 11:59:00-07 + 11:59:00-07 | @ 34 years | 11:59:00-07 | 11:59:00-07 + 12:00:00-07 | @ 14 secs ago | 11:59:46-07 | 12:00:14-07 + 12:00:00-07 | @ 1 min | 12:01:00-07 | 11:59:00-07 + 12:00:00-07 | @ 5 hours | 17:00:00-07 | 07:00:00-07 + 12:00:00-07 | @ 1 day 2 hours 3 mins 4 secs | 14:03:04-07 | 09:56:56-07 + 12:00:00-07 | @ 10 days | 12:00:00-07 | 12:00:00-07 + 12:00:00-07 | @ 3 mons | 12:00:00-07 | 12:00:00-07 + 12:00:00-07 | @ 5 mons | 12:00:00-07 | 12:00:00-07 + 12:00:00-07 | @ 5 mons 12 hours | 00:00:00-07 | 00:00:00-07 + 12:00:00-07 | @ 6 years | 12:00:00-07 | 12:00:00-07 + 12:00:00-07 | @ 34 years | 12:00:00-07 | 12:00:00-07 + 12:01:00-07 | @ 14 secs ago | 12:00:46-07 | 12:01:14-07 + 12:01:00-07 | @ 1 min | 12:02:00-07 | 12:00:00-07 + 12:01:00-07 | @ 5 hours | 17:01:00-07 | 07:01:00-07 + 12:01:00-07 | @ 1 day 2 hours 3 mins 4 secs | 14:04:04-07 | 09:57:56-07 + 12:01:00-07 | @ 10 days | 12:01:00-07 | 12:01:00-07 + 12:01:00-07 | @ 3 mons | 12:01:00-07 | 12:01:00-07 + 12:01:00-07 | @ 5 mons | 12:01:00-07 | 12:01:00-07 + 12:01:00-07 | @ 5 mons 12 hours | 00:01:00-07 | 00:01:00-07 + 12:01:00-07 | @ 6 years | 12:01:00-07 | 12:01:00-07 + 12:01:00-07 | @ 34 years | 12:01:00-07 | 12:01:00-07 + 15:36:39-04 | @ 14 secs ago | 15:36:25-04 | 15:36:53-04 + 15:36:39-04 | @ 1 min | 15:37:39-04 | 15:35:39-04 + 15:36:39-04 | @ 5 hours | 20:36:39-04 | 10:36:39-04 + 15:36:39-04 | @ 1 day 2 hours 3 mins 4 secs | 17:39:43-04 | 13:33:35-04 + 15:36:39-04 | @ 10 days | 15:36:39-04 | 15:36:39-04 + 15:36:39-04 | @ 3 mons | 15:36:39-04 | 15:36:39-04 + 15:36:39-04 | @ 5 mons | 15:36:39-04 | 15:36:39-04 + 15:36:39-04 | @ 5 mons 12 hours | 03:36:39-04 | 03:36:39-04 + 15:36:39-04 | @ 6 years | 15:36:39-04 | 15:36:39-04 + 15:36:39-04 | @ 34 years | 15:36:39-04 | 15:36:39-04 + 15:36:39-05 | @ 14 secs ago | 15:36:25-05 | 15:36:53-05 + 15:36:39-05 | @ 1 min | 15:37:39-05 | 15:35:39-05 + 15:36:39-05 | @ 5 hours | 20:36:39-05 | 10:36:39-05 + 15:36:39-05 | @ 1 day 2 hours 3 mins 4 secs | 17:39:43-05 | 13:33:35-05 + 15:36:39-05 | @ 10 days | 15:36:39-05 | 15:36:39-05 + 15:36:39-05 | @ 3 mons | 15:36:39-05 | 15:36:39-05 + 15:36:39-05 | @ 5 mons | 15:36:39-05 | 15:36:39-05 + 15:36:39-05 | @ 5 mons 12 hours | 03:36:39-05 | 03:36:39-05 + 15:36:39-05 | @ 6 years | 15:36:39-05 | 15:36:39-05 + 15:36:39-05 | @ 34 years | 15:36:39-05 | 15:36:39-05 + 23:59:00-07 | @ 14 secs ago | 23:58:46-07 | 23:59:14-07 + 23:59:00-07 | @ 1 min | 00:00:00-07 | 23:58:00-07 + 23:59:00-07 | @ 5 hours | 04:59:00-07 | 18:59:00-07 + 23:59:00-07 | @ 1 day 2 hours 3 mins 4 secs | 02:02:04-07 | 21:55:56-07 + 23:59:00-07 | @ 10 days | 23:59:00-07 | 23:59:00-07 + 23:59:00-07 | @ 3 mons | 23:59:00-07 | 23:59:00-07 + 23:59:00-07 | @ 5 mons | 23:59:00-07 | 23:59:00-07 + 23:59:00-07 | @ 5 mons 12 hours | 11:59:00-07 | 11:59:00-07 + 23:59:00-07 | @ 6 years | 23:59:00-07 | 23:59:00-07 + 23:59:00-07 | @ 34 years | 23:59:00-07 | 23:59:00-07 + 23:59:59.99-07 | @ 14 secs ago | 23:59:45.99-07 | 00:00:13.99-07 + 23:59:59.99-07 | @ 1 min | 00:00:59.99-07 | 23:58:59.99-07 + 23:59:59.99-07 | @ 5 hours | 04:59:59.99-07 | 18:59:59.99-07 + 23:59:59.99-07 | @ 1 day 2 hours 3 mins 4 secs | 02:03:03.99-07 | 21:56:55.99-07 + 23:59:59.99-07 | @ 10 days | 23:59:59.99-07 | 23:59:59.99-07 + 23:59:59.99-07 | @ 3 mons | 23:59:59.99-07 | 23:59:59.99-07 + 23:59:59.99-07 | @ 5 mons | 23:59:59.99-07 | 23:59:59.99-07 + 23:59:59.99-07 | @ 5 mons 12 hours | 11:59:59.99-07 | 11:59:59.99-07 + 23:59:59.99-07 | @ 6 years | 23:59:59.99-07 | 23:59:59.99-07 + 23:59:59.99-07 | @ 34 years | 23:59:59.99-07 | 23:59:59.99-07 +(120 rows) + +-- SQL9x OVERLAPS operator +-- test with time zone +SELECT (timestamp with time zone '2000-11-27', timestamp with time zone '2000-11-28') + OVERLAPS (timestamp with time zone '2000-11-27 12:00', timestamp with time zone '2000-11-30') AS "True"; + True +------ + t +(1 row) + +SELECT (timestamp with time zone '2000-11-26', timestamp with time zone '2000-11-27') + OVERLAPS (timestamp with time zone '2000-11-27 12:00', timestamp with time zone '2000-11-30') AS "False"; + False +------- + f +(1 row) + +SELECT (timestamp with time zone '2000-11-27', timestamp with time zone '2000-11-28') + OVERLAPS (timestamp with time zone '2000-11-27 12:00', interval '1 day') AS "True"; + True +------ + t +(1 row) + +SELECT (timestamp with time zone '2000-11-27', interval '12 hours') + OVERLAPS (timestamp with time zone '2000-11-27 12:00', timestamp with time zone '2000-11-30') AS "False"; + False +------- + f +(1 row) + +SELECT (timestamp with time zone '2000-11-27', interval '12 hours') + OVERLAPS (timestamp with time zone '2000-11-27', interval '12 hours') AS "True"; + True +------ + t +(1 row) + +SELECT (timestamp with time zone '2000-11-27', interval '12 hours') + OVERLAPS (timestamp with time zone '2000-11-27 12:00', interval '12 hours') AS "False"; + False +------- + f +(1 row) + +-- test without time zone +SELECT (timestamp without time zone '2000-11-27', timestamp without time zone '2000-11-28') + OVERLAPS (timestamp without time zone '2000-11-27 12:00', timestamp without time zone '2000-11-30') AS "True"; + True +------ + t +(1 row) + +SELECT (timestamp without time zone '2000-11-26', timestamp without time zone '2000-11-27') + OVERLAPS (timestamp without time zone '2000-11-27 12:00', timestamp without time zone '2000-11-30') AS "False"; + False +------- + f +(1 row) + +SELECT (timestamp without time zone '2000-11-27', timestamp without time zone '2000-11-28') + OVERLAPS (timestamp without time zone '2000-11-27 12:00', interval '1 day') AS "True"; + True +------ + t +(1 row) + +SELECT (timestamp without time zone '2000-11-27', interval '12 hours') + OVERLAPS (timestamp without time zone '2000-11-27 12:00', timestamp without time zone '2000-11-30') AS "False"; + False +------- + f +(1 row) + +SELECT (timestamp without time zone '2000-11-27', interval '12 hours') + OVERLAPS (timestamp without time zone '2000-11-27', interval '12 hours') AS "True"; + True +------ + t +(1 row) + +SELECT (timestamp without time zone '2000-11-27', interval '12 hours') + OVERLAPS (timestamp without time zone '2000-11-27 12:00', interval '12 hours') AS "False"; + False +------- + f +(1 row) + +-- test time and interval +SELECT (time '00:00', time '01:00') + OVERLAPS (time '00:30', time '01:30') AS "True"; + True +------ + t +(1 row) + +SELECT (time '00:00', interval '1 hour') + OVERLAPS (time '00:30', interval '1 hour') AS "True"; + True +------ + t +(1 row) + +SELECT (time '00:00', interval '1 hour') + OVERLAPS (time '01:30', interval '1 hour') AS "False"; + False +------- + f +(1 row) + +-- SQL99 seems to want this to be false (and we conform to the spec). +-- istm that this *should* return true, on the theory that time +-- intervals can wrap around the day boundary - thomas 2001-09-25 +SELECT (time '00:00', interval '1 hour') + OVERLAPS (time '01:30', interval '1 day') AS "False"; + False +------- + f +(1 row) + +CREATE TABLE TEMP_TIMESTAMP (f1 timestamp with time zone); +-- get some candidate input values +INSERT INTO TEMP_TIMESTAMP (f1) + SELECT d1 FROM TIMESTAMP_TBL + WHERE d1 BETWEEN '13-jun-1957' AND '1-jan-1997' + OR d1 BETWEEN '1-jan-1999' AND '1-jan-2010'; +SELECT f1 AS "timestamp" + FROM TEMP_TIMESTAMP + ORDER BY "timestamp"; + timestamp +------------------------------ + Thu Jan 01 00:00:00 1970 PST + Wed Feb 28 17:32:01 1996 PST + Thu Feb 29 17:32:01 1996 PST + Fri Mar 01 17:32:01 1996 PST + Mon Dec 30 17:32:01 1996 PST + Tue Dec 31 17:32:01 1996 PST + Fri Dec 31 17:32:01 1999 PST + Sat Jan 01 17:32:01 2000 PST + Wed Mar 15 02:14:05 2000 PST + Wed Mar 15 03:14:04 2000 PST + Wed Mar 15 08:14:01 2000 PST + Wed Mar 15 12:14:03 2000 PST + Wed Mar 15 13:14:02 2000 PST + Sun Dec 31 17:32:01 2000 PST + Mon Jan 01 17:32:01 2001 PST + Sat Sep 22 18:19:20 2001 PDT +(16 rows) + +SELECT d.f1 AS "timestamp", t.f1 AS "interval", d.f1 + t.f1 AS plus + FROM TEMP_TIMESTAMP d, INTERVAL_TBL t + ORDER BY plus, "timestamp", "interval"; + timestamp | interval | plus +------------------------------+-------------------------------+------------------------------ + Thu Jan 01 00:00:00 1970 PST | @ 14 secs ago | Wed Dec 31 23:59:46 1969 PST + Thu Jan 01 00:00:00 1970 PST | @ 1 min | Thu Jan 01 00:01:00 1970 PST + Thu Jan 01 00:00:00 1970 PST | @ 5 hours | Thu Jan 01 05:00:00 1970 PST + Thu Jan 01 00:00:00 1970 PST | @ 1 day 2 hours 3 mins 4 secs | Fri Jan 02 02:03:04 1970 PST + Thu Jan 01 00:00:00 1970 PST | @ 10 days | Sun Jan 11 00:00:00 1970 PST + Thu Jan 01 00:00:00 1970 PST | @ 3 mons | Wed Apr 01 00:00:00 1970 PST + Thu Jan 01 00:00:00 1970 PST | @ 5 mons | Mon Jun 01 00:00:00 1970 PDT + Thu Jan 01 00:00:00 1970 PST | @ 5 mons 12 hours | Mon Jun 01 12:00:00 1970 PDT + Thu Jan 01 00:00:00 1970 PST | @ 6 years | Thu Jan 01 00:00:00 1976 PST + Wed Feb 28 17:32:01 1996 PST | @ 14 secs ago | Wed Feb 28 17:31:47 1996 PST + Wed Feb 28 17:32:01 1996 PST | @ 1 min | Wed Feb 28 17:33:01 1996 PST + Wed Feb 28 17:32:01 1996 PST | @ 5 hours | Wed Feb 28 22:32:01 1996 PST + Thu Feb 29 17:32:01 1996 PST | @ 14 secs ago | Thu Feb 29 17:31:47 1996 PST + Thu Feb 29 17:32:01 1996 PST | @ 1 min | Thu Feb 29 17:33:01 1996 PST + Wed Feb 28 17:32:01 1996 PST | @ 1 day 2 hours 3 mins 4 secs | Thu Feb 29 19:35:05 1996 PST + Thu Feb 29 17:32:01 1996 PST | @ 5 hours | Thu Feb 29 22:32:01 1996 PST + Fri Mar 01 17:32:01 1996 PST | @ 14 secs ago | Fri Mar 01 17:31:47 1996 PST + Fri Mar 01 17:32:01 1996 PST | @ 1 min | Fri Mar 01 17:33:01 1996 PST + Thu Feb 29 17:32:01 1996 PST | @ 1 day 2 hours 3 mins 4 secs | Fri Mar 01 19:35:05 1996 PST + Fri Mar 01 17:32:01 1996 PST | @ 5 hours | Fri Mar 01 22:32:01 1996 PST + Fri Mar 01 17:32:01 1996 PST | @ 1 day 2 hours 3 mins 4 secs | Sat Mar 02 19:35:05 1996 PST + Wed Feb 28 17:32:01 1996 PST | @ 10 days | Sat Mar 09 17:32:01 1996 PST + Thu Feb 29 17:32:01 1996 PST | @ 10 days | Sun Mar 10 17:32:01 1996 PST + Fri Mar 01 17:32:01 1996 PST | @ 10 days | Mon Mar 11 17:32:01 1996 PST + Wed Feb 28 17:32:01 1996 PST | @ 3 mons | Tue May 28 17:32:01 1996 PDT + Thu Feb 29 17:32:01 1996 PST | @ 3 mons | Wed May 29 17:32:01 1996 PDT + Fri Mar 01 17:32:01 1996 PST | @ 3 mons | Sat Jun 01 17:32:01 1996 PDT + Wed Feb 28 17:32:01 1996 PST | @ 5 mons | Sun Jul 28 17:32:01 1996 PDT + Wed Feb 28 17:32:01 1996 PST | @ 5 mons 12 hours | Mon Jul 29 05:32:01 1996 PDT + Thu Feb 29 17:32:01 1996 PST | @ 5 mons | Mon Jul 29 17:32:01 1996 PDT + Thu Feb 29 17:32:01 1996 PST | @ 5 mons 12 hours | Tue Jul 30 05:32:01 1996 PDT + Fri Mar 01 17:32:01 1996 PST | @ 5 mons | Thu Aug 01 17:32:01 1996 PDT + Fri Mar 01 17:32:01 1996 PST | @ 5 mons 12 hours | Fri Aug 02 05:32:01 1996 PDT + Mon Dec 30 17:32:01 1996 PST | @ 14 secs ago | Mon Dec 30 17:31:47 1996 PST + Mon Dec 30 17:32:01 1996 PST | @ 1 min | Mon Dec 30 17:33:01 1996 PST + Mon Dec 30 17:32:01 1996 PST | @ 5 hours | Mon Dec 30 22:32:01 1996 PST + Tue Dec 31 17:32:01 1996 PST | @ 14 secs ago | Tue Dec 31 17:31:47 1996 PST + Tue Dec 31 17:32:01 1996 PST | @ 1 min | Tue Dec 31 17:33:01 1996 PST + Mon Dec 30 17:32:01 1996 PST | @ 1 day 2 hours 3 mins 4 secs | Tue Dec 31 19:35:05 1996 PST + Tue Dec 31 17:32:01 1996 PST | @ 5 hours | Tue Dec 31 22:32:01 1996 PST + Tue Dec 31 17:32:01 1996 PST | @ 1 day 2 hours 3 mins 4 secs | Wed Jan 01 19:35:05 1997 PST + Mon Dec 30 17:32:01 1996 PST | @ 10 days | Thu Jan 09 17:32:01 1997 PST + Tue Dec 31 17:32:01 1996 PST | @ 10 days | Fri Jan 10 17:32:01 1997 PST + Mon Dec 30 17:32:01 1996 PST | @ 3 mons | Sun Mar 30 17:32:01 1997 PST + Tue Dec 31 17:32:01 1996 PST | @ 3 mons | Mon Mar 31 17:32:01 1997 PST + Mon Dec 30 17:32:01 1996 PST | @ 5 mons | Fri May 30 17:32:01 1997 PDT + Mon Dec 30 17:32:01 1996 PST | @ 5 mons 12 hours | Sat May 31 05:32:01 1997 PDT + Tue Dec 31 17:32:01 1996 PST | @ 5 mons | Sat May 31 17:32:01 1997 PDT + Tue Dec 31 17:32:01 1996 PST | @ 5 mons 12 hours | Sun Jun 01 05:32:01 1997 PDT + Fri Dec 31 17:32:01 1999 PST | @ 14 secs ago | Fri Dec 31 17:31:47 1999 PST + Fri Dec 31 17:32:01 1999 PST | @ 1 min | Fri Dec 31 17:33:01 1999 PST + Fri Dec 31 17:32:01 1999 PST | @ 5 hours | Fri Dec 31 22:32:01 1999 PST + Sat Jan 01 17:32:01 2000 PST | @ 14 secs ago | Sat Jan 01 17:31:47 2000 PST + Sat Jan 01 17:32:01 2000 PST | @ 1 min | Sat Jan 01 17:33:01 2000 PST + Fri Dec 31 17:32:01 1999 PST | @ 1 day 2 hours 3 mins 4 secs | Sat Jan 01 19:35:05 2000 PST + Sat Jan 01 17:32:01 2000 PST | @ 5 hours | Sat Jan 01 22:32:01 2000 PST + Sat Jan 01 17:32:01 2000 PST | @ 1 day 2 hours 3 mins 4 secs | Sun Jan 02 19:35:05 2000 PST + Fri Dec 31 17:32:01 1999 PST | @ 10 days | Mon Jan 10 17:32:01 2000 PST + Sat Jan 01 17:32:01 2000 PST | @ 10 days | Tue Jan 11 17:32:01 2000 PST + Wed Mar 15 02:14:05 2000 PST | @ 14 secs ago | Wed Mar 15 02:13:51 2000 PST + Wed Mar 15 02:14:05 2000 PST | @ 1 min | Wed Mar 15 02:15:05 2000 PST + Wed Mar 15 03:14:04 2000 PST | @ 14 secs ago | Wed Mar 15 03:13:50 2000 PST + Wed Mar 15 03:14:04 2000 PST | @ 1 min | Wed Mar 15 03:15:04 2000 PST + Wed Mar 15 02:14:05 2000 PST | @ 5 hours | Wed Mar 15 07:14:05 2000 PST + Wed Mar 15 08:14:01 2000 PST | @ 14 secs ago | Wed Mar 15 08:13:47 2000 PST + Wed Mar 15 03:14:04 2000 PST | @ 5 hours | Wed Mar 15 08:14:04 2000 PST + Wed Mar 15 08:14:01 2000 PST | @ 1 min | Wed Mar 15 08:15:01 2000 PST + Wed Mar 15 12:14:03 2000 PST | @ 14 secs ago | Wed Mar 15 12:13:49 2000 PST + Wed Mar 15 12:14:03 2000 PST | @ 1 min | Wed Mar 15 12:15:03 2000 PST + Wed Mar 15 13:14:02 2000 PST | @ 14 secs ago | Wed Mar 15 13:13:48 2000 PST + Wed Mar 15 08:14:01 2000 PST | @ 5 hours | Wed Mar 15 13:14:01 2000 PST + Wed Mar 15 13:14:02 2000 PST | @ 1 min | Wed Mar 15 13:15:02 2000 PST + Wed Mar 15 12:14:03 2000 PST | @ 5 hours | Wed Mar 15 17:14:03 2000 PST + Wed Mar 15 13:14:02 2000 PST | @ 5 hours | Wed Mar 15 18:14:02 2000 PST + Wed Mar 15 02:14:05 2000 PST | @ 1 day 2 hours 3 mins 4 secs | Thu Mar 16 04:17:09 2000 PST + Wed Mar 15 03:14:04 2000 PST | @ 1 day 2 hours 3 mins 4 secs | Thu Mar 16 05:17:08 2000 PST + Wed Mar 15 08:14:01 2000 PST | @ 1 day 2 hours 3 mins 4 secs | Thu Mar 16 10:17:05 2000 PST + Wed Mar 15 12:14:03 2000 PST | @ 1 day 2 hours 3 mins 4 secs | Thu Mar 16 14:17:07 2000 PST + Wed Mar 15 13:14:02 2000 PST | @ 1 day 2 hours 3 mins 4 secs | Thu Mar 16 15:17:06 2000 PST + Wed Mar 15 02:14:05 2000 PST | @ 10 days | Sat Mar 25 02:14:05 2000 PST + Wed Mar 15 03:14:04 2000 PST | @ 10 days | Sat Mar 25 03:14:04 2000 PST + Wed Mar 15 08:14:01 2000 PST | @ 10 days | Sat Mar 25 08:14:01 2000 PST + Wed Mar 15 12:14:03 2000 PST | @ 10 days | Sat Mar 25 12:14:03 2000 PST + Wed Mar 15 13:14:02 2000 PST | @ 10 days | Sat Mar 25 13:14:02 2000 PST + Fri Dec 31 17:32:01 1999 PST | @ 3 mons | Fri Mar 31 17:32:01 2000 PST + Sat Jan 01 17:32:01 2000 PST | @ 3 mons | Sat Apr 01 17:32:01 2000 PST + Fri Dec 31 17:32:01 1999 PST | @ 5 mons | Wed May 31 17:32:01 2000 PDT + Fri Dec 31 17:32:01 1999 PST | @ 5 mons 12 hours | Thu Jun 01 05:32:01 2000 PDT + Sat Jan 01 17:32:01 2000 PST | @ 5 mons | Thu Jun 01 17:32:01 2000 PDT + Sat Jan 01 17:32:01 2000 PST | @ 5 mons 12 hours | Fri Jun 02 05:32:01 2000 PDT + Wed Mar 15 02:14:05 2000 PST | @ 3 mons | Thu Jun 15 02:14:05 2000 PDT + Wed Mar 15 03:14:04 2000 PST | @ 3 mons | Thu Jun 15 03:14:04 2000 PDT + Wed Mar 15 08:14:01 2000 PST | @ 3 mons | Thu Jun 15 08:14:01 2000 PDT + Wed Mar 15 12:14:03 2000 PST | @ 3 mons | Thu Jun 15 12:14:03 2000 PDT + Wed Mar 15 13:14:02 2000 PST | @ 3 mons | Thu Jun 15 13:14:02 2000 PDT + Wed Mar 15 02:14:05 2000 PST | @ 5 mons | Tue Aug 15 02:14:05 2000 PDT + Wed Mar 15 03:14:04 2000 PST | @ 5 mons | Tue Aug 15 03:14:04 2000 PDT + Wed Mar 15 08:14:01 2000 PST | @ 5 mons | Tue Aug 15 08:14:01 2000 PDT + Wed Mar 15 12:14:03 2000 PST | @ 5 mons | Tue Aug 15 12:14:03 2000 PDT + Wed Mar 15 13:14:02 2000 PST | @ 5 mons | Tue Aug 15 13:14:02 2000 PDT + Wed Mar 15 02:14:05 2000 PST | @ 5 mons 12 hours | Tue Aug 15 14:14:05 2000 PDT + Wed Mar 15 03:14:04 2000 PST | @ 5 mons 12 hours | Tue Aug 15 15:14:04 2000 PDT + Wed Mar 15 08:14:01 2000 PST | @ 5 mons 12 hours | Tue Aug 15 20:14:01 2000 PDT + Wed Mar 15 12:14:03 2000 PST | @ 5 mons 12 hours | Wed Aug 16 00:14:03 2000 PDT + Wed Mar 15 13:14:02 2000 PST | @ 5 mons 12 hours | Wed Aug 16 01:14:02 2000 PDT + Sun Dec 31 17:32:01 2000 PST | @ 14 secs ago | Sun Dec 31 17:31:47 2000 PST + Sun Dec 31 17:32:01 2000 PST | @ 1 min | Sun Dec 31 17:33:01 2000 PST + Sun Dec 31 17:32:01 2000 PST | @ 5 hours | Sun Dec 31 22:32:01 2000 PST + Mon Jan 01 17:32:01 2001 PST | @ 14 secs ago | Mon Jan 01 17:31:47 2001 PST + Mon Jan 01 17:32:01 2001 PST | @ 1 min | Mon Jan 01 17:33:01 2001 PST + Sun Dec 31 17:32:01 2000 PST | @ 1 day 2 hours 3 mins 4 secs | Mon Jan 01 19:35:05 2001 PST + Mon Jan 01 17:32:01 2001 PST | @ 5 hours | Mon Jan 01 22:32:01 2001 PST + Mon Jan 01 17:32:01 2001 PST | @ 1 day 2 hours 3 mins 4 secs | Tue Jan 02 19:35:05 2001 PST + Sun Dec 31 17:32:01 2000 PST | @ 10 days | Wed Jan 10 17:32:01 2001 PST + Mon Jan 01 17:32:01 2001 PST | @ 10 days | Thu Jan 11 17:32:01 2001 PST + Sun Dec 31 17:32:01 2000 PST | @ 3 mons | Sat Mar 31 17:32:01 2001 PST + Mon Jan 01 17:32:01 2001 PST | @ 3 mons | Sun Apr 01 17:32:01 2001 PDT + Sun Dec 31 17:32:01 2000 PST | @ 5 mons | Thu May 31 17:32:01 2001 PDT + Sun Dec 31 17:32:01 2000 PST | @ 5 mons 12 hours | Fri Jun 01 05:32:01 2001 PDT + Mon Jan 01 17:32:01 2001 PST | @ 5 mons | Fri Jun 01 17:32:01 2001 PDT + Mon Jan 01 17:32:01 2001 PST | @ 5 mons 12 hours | Sat Jun 02 05:32:01 2001 PDT + Sat Sep 22 18:19:20 2001 PDT | @ 14 secs ago | Sat Sep 22 18:19:06 2001 PDT + Sat Sep 22 18:19:20 2001 PDT | @ 1 min | Sat Sep 22 18:20:20 2001 PDT + Sat Sep 22 18:19:20 2001 PDT | @ 5 hours | Sat Sep 22 23:19:20 2001 PDT + Sat Sep 22 18:19:20 2001 PDT | @ 1 day 2 hours 3 mins 4 secs | Sun Sep 23 20:22:24 2001 PDT + Sat Sep 22 18:19:20 2001 PDT | @ 10 days | Tue Oct 02 18:19:20 2001 PDT + Sat Sep 22 18:19:20 2001 PDT | @ 3 mons | Sat Dec 22 18:19:20 2001 PST + Sat Sep 22 18:19:20 2001 PDT | @ 5 mons | Fri Feb 22 18:19:20 2002 PST + Sat Sep 22 18:19:20 2001 PDT | @ 5 mons 12 hours | Sat Feb 23 06:19:20 2002 PST + Wed Feb 28 17:32:01 1996 PST | @ 6 years | Thu Feb 28 17:32:01 2002 PST + Thu Feb 29 17:32:01 1996 PST | @ 6 years | Thu Feb 28 17:32:01 2002 PST + Fri Mar 01 17:32:01 1996 PST | @ 6 years | Fri Mar 01 17:32:01 2002 PST + Mon Dec 30 17:32:01 1996 PST | @ 6 years | Mon Dec 30 17:32:01 2002 PST + Tue Dec 31 17:32:01 1996 PST | @ 6 years | Tue Dec 31 17:32:01 2002 PST + Thu Jan 01 00:00:00 1970 PST | @ 34 years | Thu Jan 01 00:00:00 2004 PST + Fri Dec 31 17:32:01 1999 PST | @ 6 years | Sat Dec 31 17:32:01 2005 PST + Sat Jan 01 17:32:01 2000 PST | @ 6 years | Sun Jan 01 17:32:01 2006 PST + Wed Mar 15 02:14:05 2000 PST | @ 6 years | Wed Mar 15 02:14:05 2006 PST + Wed Mar 15 03:14:04 2000 PST | @ 6 years | Wed Mar 15 03:14:04 2006 PST + Wed Mar 15 08:14:01 2000 PST | @ 6 years | Wed Mar 15 08:14:01 2006 PST + Wed Mar 15 12:14:03 2000 PST | @ 6 years | Wed Mar 15 12:14:03 2006 PST + Wed Mar 15 13:14:02 2000 PST | @ 6 years | Wed Mar 15 13:14:02 2006 PST + Sun Dec 31 17:32:01 2000 PST | @ 6 years | Sun Dec 31 17:32:01 2006 PST + Mon Jan 01 17:32:01 2001 PST | @ 6 years | Mon Jan 01 17:32:01 2007 PST + Sat Sep 22 18:19:20 2001 PDT | @ 6 years | Sat Sep 22 18:19:20 2007 PDT + Wed Feb 28 17:32:01 1996 PST | @ 34 years | Thu Feb 28 17:32:01 2030 PST + Thu Feb 29 17:32:01 1996 PST | @ 34 years | Thu Feb 28 17:32:01 2030 PST + Fri Mar 01 17:32:01 1996 PST | @ 34 years | Fri Mar 01 17:32:01 2030 PST + Mon Dec 30 17:32:01 1996 PST | @ 34 years | Mon Dec 30 17:32:01 2030 PST + Tue Dec 31 17:32:01 1996 PST | @ 34 years | Tue Dec 31 17:32:01 2030 PST + Fri Dec 31 17:32:01 1999 PST | @ 34 years | Sat Dec 31 17:32:01 2033 PST + Sat Jan 01 17:32:01 2000 PST | @ 34 years | Sun Jan 01 17:32:01 2034 PST + Wed Mar 15 02:14:05 2000 PST | @ 34 years | Wed Mar 15 02:14:05 2034 PDT + Wed Mar 15 03:14:04 2000 PST | @ 34 years | Wed Mar 15 03:14:04 2034 PDT + Wed Mar 15 08:14:01 2000 PST | @ 34 years | Wed Mar 15 08:14:01 2034 PDT + Wed Mar 15 12:14:03 2000 PST | @ 34 years | Wed Mar 15 12:14:03 2034 PDT + Wed Mar 15 13:14:02 2000 PST | @ 34 years | Wed Mar 15 13:14:02 2034 PDT + Sun Dec 31 17:32:01 2000 PST | @ 34 years | Sun Dec 31 17:32:01 2034 PST + Mon Jan 01 17:32:01 2001 PST | @ 34 years | Mon Jan 01 17:32:01 2035 PST + Sat Sep 22 18:19:20 2001 PDT | @ 34 years | Sat Sep 22 18:19:20 2035 PDT +(160 rows) + +SELECT d.f1 AS "timestamp", t.f1 AS "interval", d.f1 - t.f1 AS minus + FROM TEMP_TIMESTAMP d, INTERVAL_TBL t + WHERE isfinite(d.f1) + ORDER BY minus, "timestamp", "interval"; + timestamp | interval | minus +------------------------------+-------------------------------+------------------------------ + Thu Jan 01 00:00:00 1970 PST | @ 34 years | Wed Jan 01 00:00:00 1936 PST + Wed Feb 28 17:32:01 1996 PST | @ 34 years | Wed Feb 28 17:32:01 1962 PST + Thu Feb 29 17:32:01 1996 PST | @ 34 years | Wed Feb 28 17:32:01 1962 PST + Fri Mar 01 17:32:01 1996 PST | @ 34 years | Thu Mar 01 17:32:01 1962 PST + Mon Dec 30 17:32:01 1996 PST | @ 34 years | Sun Dec 30 17:32:01 1962 PST + Tue Dec 31 17:32:01 1996 PST | @ 34 years | Mon Dec 31 17:32:01 1962 PST + Thu Jan 01 00:00:00 1970 PST | @ 6 years | Wed Jan 01 00:00:00 1964 PST + Fri Dec 31 17:32:01 1999 PST | @ 34 years | Fri Dec 31 17:32:01 1965 PST + Sat Jan 01 17:32:01 2000 PST | @ 34 years | Sat Jan 01 17:32:01 1966 PST + Wed Mar 15 02:14:05 2000 PST | @ 34 years | Tue Mar 15 02:14:05 1966 PST + Wed Mar 15 03:14:04 2000 PST | @ 34 years | Tue Mar 15 03:14:04 1966 PST + Wed Mar 15 08:14:01 2000 PST | @ 34 years | Tue Mar 15 08:14:01 1966 PST + Wed Mar 15 12:14:03 2000 PST | @ 34 years | Tue Mar 15 12:14:03 1966 PST + Wed Mar 15 13:14:02 2000 PST | @ 34 years | Tue Mar 15 13:14:02 1966 PST + Sun Dec 31 17:32:01 2000 PST | @ 34 years | Sat Dec 31 17:32:01 1966 PST + Mon Jan 01 17:32:01 2001 PST | @ 34 years | Sun Jan 01 17:32:01 1967 PST + Sat Sep 22 18:19:20 2001 PDT | @ 34 years | Fri Sep 22 18:19:20 1967 PDT + Thu Jan 01 00:00:00 1970 PST | @ 5 mons 12 hours | Thu Jul 31 12:00:00 1969 PDT + Thu Jan 01 00:00:00 1970 PST | @ 5 mons | Fri Aug 01 00:00:00 1969 PDT + Thu Jan 01 00:00:00 1970 PST | @ 3 mons | Wed Oct 01 00:00:00 1969 PDT + Thu Jan 01 00:00:00 1970 PST | @ 10 days | Mon Dec 22 00:00:00 1969 PST + Thu Jan 01 00:00:00 1970 PST | @ 1 day 2 hours 3 mins 4 secs | Tue Dec 30 21:56:56 1969 PST + Thu Jan 01 00:00:00 1970 PST | @ 5 hours | Wed Dec 31 19:00:00 1969 PST + Thu Jan 01 00:00:00 1970 PST | @ 1 min | Wed Dec 31 23:59:00 1969 PST + Thu Jan 01 00:00:00 1970 PST | @ 14 secs ago | Thu Jan 01 00:00:14 1970 PST + Wed Feb 28 17:32:01 1996 PST | @ 6 years | Wed Feb 28 17:32:01 1990 PST + Thu Feb 29 17:32:01 1996 PST | @ 6 years | Wed Feb 28 17:32:01 1990 PST + Fri Mar 01 17:32:01 1996 PST | @ 6 years | Thu Mar 01 17:32:01 1990 PST + Mon Dec 30 17:32:01 1996 PST | @ 6 years | Sun Dec 30 17:32:01 1990 PST + Tue Dec 31 17:32:01 1996 PST | @ 6 years | Mon Dec 31 17:32:01 1990 PST + Fri Dec 31 17:32:01 1999 PST | @ 6 years | Fri Dec 31 17:32:01 1993 PST + Sat Jan 01 17:32:01 2000 PST | @ 6 years | Sat Jan 01 17:32:01 1994 PST + Wed Mar 15 02:14:05 2000 PST | @ 6 years | Tue Mar 15 02:14:05 1994 PST + Wed Mar 15 03:14:04 2000 PST | @ 6 years | Tue Mar 15 03:14:04 1994 PST + Wed Mar 15 08:14:01 2000 PST | @ 6 years | Tue Mar 15 08:14:01 1994 PST + Wed Mar 15 12:14:03 2000 PST | @ 6 years | Tue Mar 15 12:14:03 1994 PST + Wed Mar 15 13:14:02 2000 PST | @ 6 years | Tue Mar 15 13:14:02 1994 PST + Sun Dec 31 17:32:01 2000 PST | @ 6 years | Sat Dec 31 17:32:01 1994 PST + Mon Jan 01 17:32:01 2001 PST | @ 6 years | Sun Jan 01 17:32:01 1995 PST + Sat Sep 22 18:19:20 2001 PDT | @ 6 years | Fri Sep 22 18:19:20 1995 PDT + Wed Feb 28 17:32:01 1996 PST | @ 5 mons 12 hours | Thu Sep 28 05:32:01 1995 PDT + Wed Feb 28 17:32:01 1996 PST | @ 5 mons | Thu Sep 28 17:32:01 1995 PDT + Thu Feb 29 17:32:01 1996 PST | @ 5 mons 12 hours | Fri Sep 29 05:32:01 1995 PDT + Thu Feb 29 17:32:01 1996 PST | @ 5 mons | Fri Sep 29 17:32:01 1995 PDT + Fri Mar 01 17:32:01 1996 PST | @ 5 mons 12 hours | Sun Oct 01 05:32:01 1995 PDT + Fri Mar 01 17:32:01 1996 PST | @ 5 mons | Sun Oct 01 17:32:01 1995 PDT + Wed Feb 28 17:32:01 1996 PST | @ 3 mons | Tue Nov 28 17:32:01 1995 PST + Thu Feb 29 17:32:01 1996 PST | @ 3 mons | Wed Nov 29 17:32:01 1995 PST + Fri Mar 01 17:32:01 1996 PST | @ 3 mons | Fri Dec 01 17:32:01 1995 PST + Wed Feb 28 17:32:01 1996 PST | @ 10 days | Sun Feb 18 17:32:01 1996 PST + Thu Feb 29 17:32:01 1996 PST | @ 10 days | Mon Feb 19 17:32:01 1996 PST + Fri Mar 01 17:32:01 1996 PST | @ 10 days | Tue Feb 20 17:32:01 1996 PST + Wed Feb 28 17:32:01 1996 PST | @ 1 day 2 hours 3 mins 4 secs | Tue Feb 27 15:28:57 1996 PST + Wed Feb 28 17:32:01 1996 PST | @ 5 hours | Wed Feb 28 12:32:01 1996 PST + Thu Feb 29 17:32:01 1996 PST | @ 1 day 2 hours 3 mins 4 secs | Wed Feb 28 15:28:57 1996 PST + Wed Feb 28 17:32:01 1996 PST | @ 1 min | Wed Feb 28 17:31:01 1996 PST + Wed Feb 28 17:32:01 1996 PST | @ 14 secs ago | Wed Feb 28 17:32:15 1996 PST + Thu Feb 29 17:32:01 1996 PST | @ 5 hours | Thu Feb 29 12:32:01 1996 PST + Fri Mar 01 17:32:01 1996 PST | @ 1 day 2 hours 3 mins 4 secs | Thu Feb 29 15:28:57 1996 PST + Thu Feb 29 17:32:01 1996 PST | @ 1 min | Thu Feb 29 17:31:01 1996 PST + Thu Feb 29 17:32:01 1996 PST | @ 14 secs ago | Thu Feb 29 17:32:15 1996 PST + Fri Mar 01 17:32:01 1996 PST | @ 5 hours | Fri Mar 01 12:32:01 1996 PST + Fri Mar 01 17:32:01 1996 PST | @ 1 min | Fri Mar 01 17:31:01 1996 PST + Fri Mar 01 17:32:01 1996 PST | @ 14 secs ago | Fri Mar 01 17:32:15 1996 PST + Mon Dec 30 17:32:01 1996 PST | @ 5 mons 12 hours | Tue Jul 30 05:32:01 1996 PDT + Mon Dec 30 17:32:01 1996 PST | @ 5 mons | Tue Jul 30 17:32:01 1996 PDT + Tue Dec 31 17:32:01 1996 PST | @ 5 mons 12 hours | Wed Jul 31 05:32:01 1996 PDT + Tue Dec 31 17:32:01 1996 PST | @ 5 mons | Wed Jul 31 17:32:01 1996 PDT + Mon Dec 30 17:32:01 1996 PST | @ 3 mons | Mon Sep 30 17:32:01 1996 PDT + Tue Dec 31 17:32:01 1996 PST | @ 3 mons | Mon Sep 30 17:32:01 1996 PDT + Mon Dec 30 17:32:01 1996 PST | @ 10 days | Fri Dec 20 17:32:01 1996 PST + Tue Dec 31 17:32:01 1996 PST | @ 10 days | Sat Dec 21 17:32:01 1996 PST + Mon Dec 30 17:32:01 1996 PST | @ 1 day 2 hours 3 mins 4 secs | Sun Dec 29 15:28:57 1996 PST + Mon Dec 30 17:32:01 1996 PST | @ 5 hours | Mon Dec 30 12:32:01 1996 PST + Tue Dec 31 17:32:01 1996 PST | @ 1 day 2 hours 3 mins 4 secs | Mon Dec 30 15:28:57 1996 PST + Mon Dec 30 17:32:01 1996 PST | @ 1 min | Mon Dec 30 17:31:01 1996 PST + Mon Dec 30 17:32:01 1996 PST | @ 14 secs ago | Mon Dec 30 17:32:15 1996 PST + Tue Dec 31 17:32:01 1996 PST | @ 5 hours | Tue Dec 31 12:32:01 1996 PST + Tue Dec 31 17:32:01 1996 PST | @ 1 min | Tue Dec 31 17:31:01 1996 PST + Tue Dec 31 17:32:01 1996 PST | @ 14 secs ago | Tue Dec 31 17:32:15 1996 PST + Fri Dec 31 17:32:01 1999 PST | @ 5 mons 12 hours | Sat Jul 31 05:32:01 1999 PDT + Fri Dec 31 17:32:01 1999 PST | @ 5 mons | Sat Jul 31 17:32:01 1999 PDT + Sat Jan 01 17:32:01 2000 PST | @ 5 mons 12 hours | Sun Aug 01 05:32:01 1999 PDT + Sat Jan 01 17:32:01 2000 PST | @ 5 mons | Sun Aug 01 17:32:01 1999 PDT + Fri Dec 31 17:32:01 1999 PST | @ 3 mons | Thu Sep 30 17:32:01 1999 PDT + Sat Jan 01 17:32:01 2000 PST | @ 3 mons | Fri Oct 01 17:32:01 1999 PDT + Wed Mar 15 02:14:05 2000 PST | @ 5 mons 12 hours | Thu Oct 14 14:14:05 1999 PDT + Wed Mar 15 03:14:04 2000 PST | @ 5 mons 12 hours | Thu Oct 14 15:14:04 1999 PDT + Wed Mar 15 08:14:01 2000 PST | @ 5 mons 12 hours | Thu Oct 14 20:14:01 1999 PDT + Wed Mar 15 12:14:03 2000 PST | @ 5 mons 12 hours | Fri Oct 15 00:14:03 1999 PDT + Wed Mar 15 13:14:02 2000 PST | @ 5 mons 12 hours | Fri Oct 15 01:14:02 1999 PDT + Wed Mar 15 02:14:05 2000 PST | @ 5 mons | Fri Oct 15 02:14:05 1999 PDT + Wed Mar 15 03:14:04 2000 PST | @ 5 mons | Fri Oct 15 03:14:04 1999 PDT + Wed Mar 15 08:14:01 2000 PST | @ 5 mons | Fri Oct 15 08:14:01 1999 PDT + Wed Mar 15 12:14:03 2000 PST | @ 5 mons | Fri Oct 15 12:14:03 1999 PDT + Wed Mar 15 13:14:02 2000 PST | @ 5 mons | Fri Oct 15 13:14:02 1999 PDT + Wed Mar 15 02:14:05 2000 PST | @ 3 mons | Wed Dec 15 02:14:05 1999 PST + Wed Mar 15 03:14:04 2000 PST | @ 3 mons | Wed Dec 15 03:14:04 1999 PST + Wed Mar 15 08:14:01 2000 PST | @ 3 mons | Wed Dec 15 08:14:01 1999 PST + Wed Mar 15 12:14:03 2000 PST | @ 3 mons | Wed Dec 15 12:14:03 1999 PST + Wed Mar 15 13:14:02 2000 PST | @ 3 mons | Wed Dec 15 13:14:02 1999 PST + Fri Dec 31 17:32:01 1999 PST | @ 10 days | Tue Dec 21 17:32:01 1999 PST + Sat Jan 01 17:32:01 2000 PST | @ 10 days | Wed Dec 22 17:32:01 1999 PST + Fri Dec 31 17:32:01 1999 PST | @ 1 day 2 hours 3 mins 4 secs | Thu Dec 30 15:28:57 1999 PST + Fri Dec 31 17:32:01 1999 PST | @ 5 hours | Fri Dec 31 12:32:01 1999 PST + Sat Jan 01 17:32:01 2000 PST | @ 1 day 2 hours 3 mins 4 secs | Fri Dec 31 15:28:57 1999 PST + Fri Dec 31 17:32:01 1999 PST | @ 1 min | Fri Dec 31 17:31:01 1999 PST + Fri Dec 31 17:32:01 1999 PST | @ 14 secs ago | Fri Dec 31 17:32:15 1999 PST + Sat Jan 01 17:32:01 2000 PST | @ 5 hours | Sat Jan 01 12:32:01 2000 PST + Sat Jan 01 17:32:01 2000 PST | @ 1 min | Sat Jan 01 17:31:01 2000 PST + Sat Jan 01 17:32:01 2000 PST | @ 14 secs ago | Sat Jan 01 17:32:15 2000 PST + Wed Mar 15 02:14:05 2000 PST | @ 10 days | Sun Mar 05 02:14:05 2000 PST + Wed Mar 15 03:14:04 2000 PST | @ 10 days | Sun Mar 05 03:14:04 2000 PST + Wed Mar 15 08:14:01 2000 PST | @ 10 days | Sun Mar 05 08:14:01 2000 PST + Wed Mar 15 12:14:03 2000 PST | @ 10 days | Sun Mar 05 12:14:03 2000 PST + Wed Mar 15 13:14:02 2000 PST | @ 10 days | Sun Mar 05 13:14:02 2000 PST + Wed Mar 15 02:14:05 2000 PST | @ 1 day 2 hours 3 mins 4 secs | Tue Mar 14 00:11:01 2000 PST + Wed Mar 15 03:14:04 2000 PST | @ 1 day 2 hours 3 mins 4 secs | Tue Mar 14 01:11:00 2000 PST + Wed Mar 15 08:14:01 2000 PST | @ 1 day 2 hours 3 mins 4 secs | Tue Mar 14 06:10:57 2000 PST + Wed Mar 15 12:14:03 2000 PST | @ 1 day 2 hours 3 mins 4 secs | Tue Mar 14 10:10:59 2000 PST + Wed Mar 15 13:14:02 2000 PST | @ 1 day 2 hours 3 mins 4 secs | Tue Mar 14 11:10:58 2000 PST + Wed Mar 15 02:14:05 2000 PST | @ 5 hours | Tue Mar 14 21:14:05 2000 PST + Wed Mar 15 03:14:04 2000 PST | @ 5 hours | Tue Mar 14 22:14:04 2000 PST + Wed Mar 15 02:14:05 2000 PST | @ 1 min | Wed Mar 15 02:13:05 2000 PST + Wed Mar 15 02:14:05 2000 PST | @ 14 secs ago | Wed Mar 15 02:14:19 2000 PST + Wed Mar 15 03:14:04 2000 PST | @ 1 min | Wed Mar 15 03:13:04 2000 PST + Wed Mar 15 08:14:01 2000 PST | @ 5 hours | Wed Mar 15 03:14:01 2000 PST + Wed Mar 15 03:14:04 2000 PST | @ 14 secs ago | Wed Mar 15 03:14:18 2000 PST + Wed Mar 15 12:14:03 2000 PST | @ 5 hours | Wed Mar 15 07:14:03 2000 PST + Wed Mar 15 08:14:01 2000 PST | @ 1 min | Wed Mar 15 08:13:01 2000 PST + Wed Mar 15 13:14:02 2000 PST | @ 5 hours | Wed Mar 15 08:14:02 2000 PST + Wed Mar 15 08:14:01 2000 PST | @ 14 secs ago | Wed Mar 15 08:14:15 2000 PST + Wed Mar 15 12:14:03 2000 PST | @ 1 min | Wed Mar 15 12:13:03 2000 PST + Wed Mar 15 12:14:03 2000 PST | @ 14 secs ago | Wed Mar 15 12:14:17 2000 PST + Wed Mar 15 13:14:02 2000 PST | @ 1 min | Wed Mar 15 13:13:02 2000 PST + Wed Mar 15 13:14:02 2000 PST | @ 14 secs ago | Wed Mar 15 13:14:16 2000 PST + Sun Dec 31 17:32:01 2000 PST | @ 5 mons 12 hours | Mon Jul 31 05:32:01 2000 PDT + Sun Dec 31 17:32:01 2000 PST | @ 5 mons | Mon Jul 31 17:32:01 2000 PDT + Mon Jan 01 17:32:01 2001 PST | @ 5 mons 12 hours | Tue Aug 01 05:32:01 2000 PDT + Mon Jan 01 17:32:01 2001 PST | @ 5 mons | Tue Aug 01 17:32:01 2000 PDT + Sun Dec 31 17:32:01 2000 PST | @ 3 mons | Sat Sep 30 17:32:01 2000 PDT + Mon Jan 01 17:32:01 2001 PST | @ 3 mons | Sun Oct 01 17:32:01 2000 PDT + Sun Dec 31 17:32:01 2000 PST | @ 10 days | Thu Dec 21 17:32:01 2000 PST + Mon Jan 01 17:32:01 2001 PST | @ 10 days | Fri Dec 22 17:32:01 2000 PST + Sun Dec 31 17:32:01 2000 PST | @ 1 day 2 hours 3 mins 4 secs | Sat Dec 30 15:28:57 2000 PST + Sun Dec 31 17:32:01 2000 PST | @ 5 hours | Sun Dec 31 12:32:01 2000 PST + Mon Jan 01 17:32:01 2001 PST | @ 1 day 2 hours 3 mins 4 secs | Sun Dec 31 15:28:57 2000 PST + Sun Dec 31 17:32:01 2000 PST | @ 1 min | Sun Dec 31 17:31:01 2000 PST + Sun Dec 31 17:32:01 2000 PST | @ 14 secs ago | Sun Dec 31 17:32:15 2000 PST + Mon Jan 01 17:32:01 2001 PST | @ 5 hours | Mon Jan 01 12:32:01 2001 PST + Mon Jan 01 17:32:01 2001 PST | @ 1 min | Mon Jan 01 17:31:01 2001 PST + Mon Jan 01 17:32:01 2001 PST | @ 14 secs ago | Mon Jan 01 17:32:15 2001 PST + Sat Sep 22 18:19:20 2001 PDT | @ 5 mons 12 hours | Sun Apr 22 06:19:20 2001 PDT + Sat Sep 22 18:19:20 2001 PDT | @ 5 mons | Sun Apr 22 18:19:20 2001 PDT + Sat Sep 22 18:19:20 2001 PDT | @ 3 mons | Fri Jun 22 18:19:20 2001 PDT + Sat Sep 22 18:19:20 2001 PDT | @ 10 days | Wed Sep 12 18:19:20 2001 PDT + Sat Sep 22 18:19:20 2001 PDT | @ 1 day 2 hours 3 mins 4 secs | Fri Sep 21 16:16:16 2001 PDT + Sat Sep 22 18:19:20 2001 PDT | @ 5 hours | Sat Sep 22 13:19:20 2001 PDT + Sat Sep 22 18:19:20 2001 PDT | @ 1 min | Sat Sep 22 18:18:20 2001 PDT + Sat Sep 22 18:19:20 2001 PDT | @ 14 secs ago | Sat Sep 22 18:19:34 2001 PDT +(160 rows) + +SELECT d.f1 AS "timestamp", + timestamp with time zone '1980-01-06 00:00 GMT' AS gpstime_zero, + d.f1 - timestamp with time zone '1980-01-06 00:00 GMT' AS difference + FROM TEMP_TIMESTAMP d + ORDER BY difference; + timestamp | gpstime_zero | difference +------------------------------+------------------------------+------------------------------------- + Thu Jan 01 00:00:00 1970 PST | Sat Jan 05 16:00:00 1980 PST | @ 3656 days 16 hours ago + Wed Feb 28 17:32:01 1996 PST | Sat Jan 05 16:00:00 1980 PST | @ 5898 days 1 hour 32 mins 1 sec + Thu Feb 29 17:32:01 1996 PST | Sat Jan 05 16:00:00 1980 PST | @ 5899 days 1 hour 32 mins 1 sec + Fri Mar 01 17:32:01 1996 PST | Sat Jan 05 16:00:00 1980 PST | @ 5900 days 1 hour 32 mins 1 sec + Mon Dec 30 17:32:01 1996 PST | Sat Jan 05 16:00:00 1980 PST | @ 6204 days 1 hour 32 mins 1 sec + Tue Dec 31 17:32:01 1996 PST | Sat Jan 05 16:00:00 1980 PST | @ 6205 days 1 hour 32 mins 1 sec + Fri Dec 31 17:32:01 1999 PST | Sat Jan 05 16:00:00 1980 PST | @ 7300 days 1 hour 32 mins 1 sec + Sat Jan 01 17:32:01 2000 PST | Sat Jan 05 16:00:00 1980 PST | @ 7301 days 1 hour 32 mins 1 sec + Wed Mar 15 02:14:05 2000 PST | Sat Jan 05 16:00:00 1980 PST | @ 7374 days 10 hours 14 mins 5 secs + Wed Mar 15 03:14:04 2000 PST | Sat Jan 05 16:00:00 1980 PST | @ 7374 days 11 hours 14 mins 4 secs + Wed Mar 15 08:14:01 2000 PST | Sat Jan 05 16:00:00 1980 PST | @ 7374 days 16 hours 14 mins 1 sec + Wed Mar 15 12:14:03 2000 PST | Sat Jan 05 16:00:00 1980 PST | @ 7374 days 20 hours 14 mins 3 secs + Wed Mar 15 13:14:02 2000 PST | Sat Jan 05 16:00:00 1980 PST | @ 7374 days 21 hours 14 mins 2 secs + Sun Dec 31 17:32:01 2000 PST | Sat Jan 05 16:00:00 1980 PST | @ 7666 days 1 hour 32 mins 1 sec + Mon Jan 01 17:32:01 2001 PST | Sat Jan 05 16:00:00 1980 PST | @ 7667 days 1 hour 32 mins 1 sec + Sat Sep 22 18:19:20 2001 PDT | Sat Jan 05 16:00:00 1980 PST | @ 7931 days 1 hour 19 mins 20 secs +(16 rows) + +SELECT d1.f1 AS timestamp1, d2.f1 AS timestamp2, d1.f1 - d2.f1 AS difference + FROM TEMP_TIMESTAMP d1, TEMP_TIMESTAMP d2 + ORDER BY timestamp1, timestamp2, difference; + timestamp1 | timestamp2 | difference +------------------------------+------------------------------+------------------------------------------- + Thu Jan 01 00:00:00 1970 PST | Thu Jan 01 00:00:00 1970 PST | @ 0 + Thu Jan 01 00:00:00 1970 PST | Wed Feb 28 17:32:01 1996 PST | @ 9554 days 17 hours 32 mins 1 sec ago + Thu Jan 01 00:00:00 1970 PST | Thu Feb 29 17:32:01 1996 PST | @ 9555 days 17 hours 32 mins 1 sec ago + Thu Jan 01 00:00:00 1970 PST | Fri Mar 01 17:32:01 1996 PST | @ 9556 days 17 hours 32 mins 1 sec ago + Thu Jan 01 00:00:00 1970 PST | Mon Dec 30 17:32:01 1996 PST | @ 9860 days 17 hours 32 mins 1 sec ago + Thu Jan 01 00:00:00 1970 PST | Tue Dec 31 17:32:01 1996 PST | @ 9861 days 17 hours 32 mins 1 sec ago + Thu Jan 01 00:00:00 1970 PST | Fri Dec 31 17:32:01 1999 PST | @ 10956 days 17 hours 32 mins 1 sec ago + Thu Jan 01 00:00:00 1970 PST | Sat Jan 01 17:32:01 2000 PST | @ 10957 days 17 hours 32 mins 1 sec ago + Thu Jan 01 00:00:00 1970 PST | Wed Mar 15 02:14:05 2000 PST | @ 11031 days 2 hours 14 mins 5 secs ago + Thu Jan 01 00:00:00 1970 PST | Wed Mar 15 03:14:04 2000 PST | @ 11031 days 3 hours 14 mins 4 secs ago + Thu Jan 01 00:00:00 1970 PST | Wed Mar 15 08:14:01 2000 PST | @ 11031 days 8 hours 14 mins 1 sec ago + Thu Jan 01 00:00:00 1970 PST | Wed Mar 15 12:14:03 2000 PST | @ 11031 days 12 hours 14 mins 3 secs ago + Thu Jan 01 00:00:00 1970 PST | Wed Mar 15 13:14:02 2000 PST | @ 11031 days 13 hours 14 mins 2 secs ago + Thu Jan 01 00:00:00 1970 PST | Sun Dec 31 17:32:01 2000 PST | @ 11322 days 17 hours 32 mins 1 sec ago + Thu Jan 01 00:00:00 1970 PST | Mon Jan 01 17:32:01 2001 PST | @ 11323 days 17 hours 32 mins 1 sec ago + Thu Jan 01 00:00:00 1970 PST | Sat Sep 22 18:19:20 2001 PDT | @ 11587 days 17 hours 19 mins 20 secs ago + Wed Feb 28 17:32:01 1996 PST | Thu Jan 01 00:00:00 1970 PST | @ 9554 days 17 hours 32 mins 1 sec + Wed Feb 28 17:32:01 1996 PST | Wed Feb 28 17:32:01 1996 PST | @ 0 + Wed Feb 28 17:32:01 1996 PST | Thu Feb 29 17:32:01 1996 PST | @ 1 day ago + Wed Feb 28 17:32:01 1996 PST | Fri Mar 01 17:32:01 1996 PST | @ 2 days ago + Wed Feb 28 17:32:01 1996 PST | Mon Dec 30 17:32:01 1996 PST | @ 306 days ago + Wed Feb 28 17:32:01 1996 PST | Tue Dec 31 17:32:01 1996 PST | @ 307 days ago + Wed Feb 28 17:32:01 1996 PST | Fri Dec 31 17:32:01 1999 PST | @ 1402 days ago + Wed Feb 28 17:32:01 1996 PST | Sat Jan 01 17:32:01 2000 PST | @ 1403 days ago + Wed Feb 28 17:32:01 1996 PST | Wed Mar 15 02:14:05 2000 PST | @ 1476 days 8 hours 42 mins 4 secs ago + Wed Feb 28 17:32:01 1996 PST | Wed Mar 15 03:14:04 2000 PST | @ 1476 days 9 hours 42 mins 3 secs ago + Wed Feb 28 17:32:01 1996 PST | Wed Mar 15 08:14:01 2000 PST | @ 1476 days 14 hours 42 mins ago + Wed Feb 28 17:32:01 1996 PST | Wed Mar 15 12:14:03 2000 PST | @ 1476 days 18 hours 42 mins 2 secs ago + Wed Feb 28 17:32:01 1996 PST | Wed Mar 15 13:14:02 2000 PST | @ 1476 days 19 hours 42 mins 1 sec ago + Wed Feb 28 17:32:01 1996 PST | Sun Dec 31 17:32:01 2000 PST | @ 1768 days ago + Wed Feb 28 17:32:01 1996 PST | Mon Jan 01 17:32:01 2001 PST | @ 1769 days ago + Wed Feb 28 17:32:01 1996 PST | Sat Sep 22 18:19:20 2001 PDT | @ 2032 days 23 hours 47 mins 19 secs ago + Thu Feb 29 17:32:01 1996 PST | Thu Jan 01 00:00:00 1970 PST | @ 9555 days 17 hours 32 mins 1 sec + Thu Feb 29 17:32:01 1996 PST | Wed Feb 28 17:32:01 1996 PST | @ 1 day + Thu Feb 29 17:32:01 1996 PST | Thu Feb 29 17:32:01 1996 PST | @ 0 + Thu Feb 29 17:32:01 1996 PST | Fri Mar 01 17:32:01 1996 PST | @ 1 day ago + Thu Feb 29 17:32:01 1996 PST | Mon Dec 30 17:32:01 1996 PST | @ 305 days ago + Thu Feb 29 17:32:01 1996 PST | Tue Dec 31 17:32:01 1996 PST | @ 306 days ago + Thu Feb 29 17:32:01 1996 PST | Fri Dec 31 17:32:01 1999 PST | @ 1401 days ago + Thu Feb 29 17:32:01 1996 PST | Sat Jan 01 17:32:01 2000 PST | @ 1402 days ago + Thu Feb 29 17:32:01 1996 PST | Wed Mar 15 02:14:05 2000 PST | @ 1475 days 8 hours 42 mins 4 secs ago + Thu Feb 29 17:32:01 1996 PST | Wed Mar 15 03:14:04 2000 PST | @ 1475 days 9 hours 42 mins 3 secs ago + Thu Feb 29 17:32:01 1996 PST | Wed Mar 15 08:14:01 2000 PST | @ 1475 days 14 hours 42 mins ago + Thu Feb 29 17:32:01 1996 PST | Wed Mar 15 12:14:03 2000 PST | @ 1475 days 18 hours 42 mins 2 secs ago + Thu Feb 29 17:32:01 1996 PST | Wed Mar 15 13:14:02 2000 PST | @ 1475 days 19 hours 42 mins 1 sec ago + Thu Feb 29 17:32:01 1996 PST | Sun Dec 31 17:32:01 2000 PST | @ 1767 days ago + Thu Feb 29 17:32:01 1996 PST | Mon Jan 01 17:32:01 2001 PST | @ 1768 days ago + Thu Feb 29 17:32:01 1996 PST | Sat Sep 22 18:19:20 2001 PDT | @ 2031 days 23 hours 47 mins 19 secs ago + Fri Mar 01 17:32:01 1996 PST | Thu Jan 01 00:00:00 1970 PST | @ 9556 days 17 hours 32 mins 1 sec + Fri Mar 01 17:32:01 1996 PST | Wed Feb 28 17:32:01 1996 PST | @ 2 days + Fri Mar 01 17:32:01 1996 PST | Thu Feb 29 17:32:01 1996 PST | @ 1 day + Fri Mar 01 17:32:01 1996 PST | Fri Mar 01 17:32:01 1996 PST | @ 0 + Fri Mar 01 17:32:01 1996 PST | Mon Dec 30 17:32:01 1996 PST | @ 304 days ago + Fri Mar 01 17:32:01 1996 PST | Tue Dec 31 17:32:01 1996 PST | @ 305 days ago + Fri Mar 01 17:32:01 1996 PST | Fri Dec 31 17:32:01 1999 PST | @ 1400 days ago + Fri Mar 01 17:32:01 1996 PST | Sat Jan 01 17:32:01 2000 PST | @ 1401 days ago + Fri Mar 01 17:32:01 1996 PST | Wed Mar 15 02:14:05 2000 PST | @ 1474 days 8 hours 42 mins 4 secs ago + Fri Mar 01 17:32:01 1996 PST | Wed Mar 15 03:14:04 2000 PST | @ 1474 days 9 hours 42 mins 3 secs ago + Fri Mar 01 17:32:01 1996 PST | Wed Mar 15 08:14:01 2000 PST | @ 1474 days 14 hours 42 mins ago + Fri Mar 01 17:32:01 1996 PST | Wed Mar 15 12:14:03 2000 PST | @ 1474 days 18 hours 42 mins 2 secs ago + Fri Mar 01 17:32:01 1996 PST | Wed Mar 15 13:14:02 2000 PST | @ 1474 days 19 hours 42 mins 1 sec ago + Fri Mar 01 17:32:01 1996 PST | Sun Dec 31 17:32:01 2000 PST | @ 1766 days ago + Fri Mar 01 17:32:01 1996 PST | Mon Jan 01 17:32:01 2001 PST | @ 1767 days ago + Fri Mar 01 17:32:01 1996 PST | Sat Sep 22 18:19:20 2001 PDT | @ 2030 days 23 hours 47 mins 19 secs ago + Mon Dec 30 17:32:01 1996 PST | Thu Jan 01 00:00:00 1970 PST | @ 9860 days 17 hours 32 mins 1 sec + Mon Dec 30 17:32:01 1996 PST | Wed Feb 28 17:32:01 1996 PST | @ 306 days + Mon Dec 30 17:32:01 1996 PST | Thu Feb 29 17:32:01 1996 PST | @ 305 days + Mon Dec 30 17:32:01 1996 PST | Fri Mar 01 17:32:01 1996 PST | @ 304 days + Mon Dec 30 17:32:01 1996 PST | Mon Dec 30 17:32:01 1996 PST | @ 0 + Mon Dec 30 17:32:01 1996 PST | Tue Dec 31 17:32:01 1996 PST | @ 1 day ago + Mon Dec 30 17:32:01 1996 PST | Fri Dec 31 17:32:01 1999 PST | @ 1096 days ago + Mon Dec 30 17:32:01 1996 PST | Sat Jan 01 17:32:01 2000 PST | @ 1097 days ago + Mon Dec 30 17:32:01 1996 PST | Wed Mar 15 02:14:05 2000 PST | @ 1170 days 8 hours 42 mins 4 secs ago + Mon Dec 30 17:32:01 1996 PST | Wed Mar 15 03:14:04 2000 PST | @ 1170 days 9 hours 42 mins 3 secs ago + Mon Dec 30 17:32:01 1996 PST | Wed Mar 15 08:14:01 2000 PST | @ 1170 days 14 hours 42 mins ago + Mon Dec 30 17:32:01 1996 PST | Wed Mar 15 12:14:03 2000 PST | @ 1170 days 18 hours 42 mins 2 secs ago + Mon Dec 30 17:32:01 1996 PST | Wed Mar 15 13:14:02 2000 PST | @ 1170 days 19 hours 42 mins 1 sec ago + Mon Dec 30 17:32:01 1996 PST | Sun Dec 31 17:32:01 2000 PST | @ 1462 days ago + Mon Dec 30 17:32:01 1996 PST | Mon Jan 01 17:32:01 2001 PST | @ 1463 days ago + Mon Dec 30 17:32:01 1996 PST | Sat Sep 22 18:19:20 2001 PDT | @ 1726 days 23 hours 47 mins 19 secs ago + Tue Dec 31 17:32:01 1996 PST | Thu Jan 01 00:00:00 1970 PST | @ 9861 days 17 hours 32 mins 1 sec + Tue Dec 31 17:32:01 1996 PST | Wed Feb 28 17:32:01 1996 PST | @ 307 days + Tue Dec 31 17:32:01 1996 PST | Thu Feb 29 17:32:01 1996 PST | @ 306 days + Tue Dec 31 17:32:01 1996 PST | Fri Mar 01 17:32:01 1996 PST | @ 305 days + Tue Dec 31 17:32:01 1996 PST | Mon Dec 30 17:32:01 1996 PST | @ 1 day + Tue Dec 31 17:32:01 1996 PST | Tue Dec 31 17:32:01 1996 PST | @ 0 + Tue Dec 31 17:32:01 1996 PST | Fri Dec 31 17:32:01 1999 PST | @ 1095 days ago + Tue Dec 31 17:32:01 1996 PST | Sat Jan 01 17:32:01 2000 PST | @ 1096 days ago + Tue Dec 31 17:32:01 1996 PST | Wed Mar 15 02:14:05 2000 PST | @ 1169 days 8 hours 42 mins 4 secs ago + Tue Dec 31 17:32:01 1996 PST | Wed Mar 15 03:14:04 2000 PST | @ 1169 days 9 hours 42 mins 3 secs ago + Tue Dec 31 17:32:01 1996 PST | Wed Mar 15 08:14:01 2000 PST | @ 1169 days 14 hours 42 mins ago + Tue Dec 31 17:32:01 1996 PST | Wed Mar 15 12:14:03 2000 PST | @ 1169 days 18 hours 42 mins 2 secs ago + Tue Dec 31 17:32:01 1996 PST | Wed Mar 15 13:14:02 2000 PST | @ 1169 days 19 hours 42 mins 1 sec ago + Tue Dec 31 17:32:01 1996 PST | Sun Dec 31 17:32:01 2000 PST | @ 1461 days ago + Tue Dec 31 17:32:01 1996 PST | Mon Jan 01 17:32:01 2001 PST | @ 1462 days ago + Tue Dec 31 17:32:01 1996 PST | Sat Sep 22 18:19:20 2001 PDT | @ 1725 days 23 hours 47 mins 19 secs ago + Fri Dec 31 17:32:01 1999 PST | Thu Jan 01 00:00:00 1970 PST | @ 10956 days 17 hours 32 mins 1 sec + Fri Dec 31 17:32:01 1999 PST | Wed Feb 28 17:32:01 1996 PST | @ 1402 days + Fri Dec 31 17:32:01 1999 PST | Thu Feb 29 17:32:01 1996 PST | @ 1401 days + Fri Dec 31 17:32:01 1999 PST | Fri Mar 01 17:32:01 1996 PST | @ 1400 days + Fri Dec 31 17:32:01 1999 PST | Mon Dec 30 17:32:01 1996 PST | @ 1096 days + Fri Dec 31 17:32:01 1999 PST | Tue Dec 31 17:32:01 1996 PST | @ 1095 days + Fri Dec 31 17:32:01 1999 PST | Fri Dec 31 17:32:01 1999 PST | @ 0 + Fri Dec 31 17:32:01 1999 PST | Sat Jan 01 17:32:01 2000 PST | @ 1 day ago + Fri Dec 31 17:32:01 1999 PST | Wed Mar 15 02:14:05 2000 PST | @ 74 days 8 hours 42 mins 4 secs ago + Fri Dec 31 17:32:01 1999 PST | Wed Mar 15 03:14:04 2000 PST | @ 74 days 9 hours 42 mins 3 secs ago + Fri Dec 31 17:32:01 1999 PST | Wed Mar 15 08:14:01 2000 PST | @ 74 days 14 hours 42 mins ago + Fri Dec 31 17:32:01 1999 PST | Wed Mar 15 12:14:03 2000 PST | @ 74 days 18 hours 42 mins 2 secs ago + Fri Dec 31 17:32:01 1999 PST | Wed Mar 15 13:14:02 2000 PST | @ 74 days 19 hours 42 mins 1 sec ago + Fri Dec 31 17:32:01 1999 PST | Sun Dec 31 17:32:01 2000 PST | @ 366 days ago + Fri Dec 31 17:32:01 1999 PST | Mon Jan 01 17:32:01 2001 PST | @ 367 days ago + Fri Dec 31 17:32:01 1999 PST | Sat Sep 22 18:19:20 2001 PDT | @ 630 days 23 hours 47 mins 19 secs ago + Sat Jan 01 17:32:01 2000 PST | Thu Jan 01 00:00:00 1970 PST | @ 10957 days 17 hours 32 mins 1 sec + Sat Jan 01 17:32:01 2000 PST | Wed Feb 28 17:32:01 1996 PST | @ 1403 days + Sat Jan 01 17:32:01 2000 PST | Thu Feb 29 17:32:01 1996 PST | @ 1402 days + Sat Jan 01 17:32:01 2000 PST | Fri Mar 01 17:32:01 1996 PST | @ 1401 days + Sat Jan 01 17:32:01 2000 PST | Mon Dec 30 17:32:01 1996 PST | @ 1097 days + Sat Jan 01 17:32:01 2000 PST | Tue Dec 31 17:32:01 1996 PST | @ 1096 days + Sat Jan 01 17:32:01 2000 PST | Fri Dec 31 17:32:01 1999 PST | @ 1 day + Sat Jan 01 17:32:01 2000 PST | Sat Jan 01 17:32:01 2000 PST | @ 0 + Sat Jan 01 17:32:01 2000 PST | Wed Mar 15 02:14:05 2000 PST | @ 73 days 8 hours 42 mins 4 secs ago + Sat Jan 01 17:32:01 2000 PST | Wed Mar 15 03:14:04 2000 PST | @ 73 days 9 hours 42 mins 3 secs ago + Sat Jan 01 17:32:01 2000 PST | Wed Mar 15 08:14:01 2000 PST | @ 73 days 14 hours 42 mins ago + Sat Jan 01 17:32:01 2000 PST | Wed Mar 15 12:14:03 2000 PST | @ 73 days 18 hours 42 mins 2 secs ago + Sat Jan 01 17:32:01 2000 PST | Wed Mar 15 13:14:02 2000 PST | @ 73 days 19 hours 42 mins 1 sec ago + Sat Jan 01 17:32:01 2000 PST | Sun Dec 31 17:32:01 2000 PST | @ 365 days ago + Sat Jan 01 17:32:01 2000 PST | Mon Jan 01 17:32:01 2001 PST | @ 366 days ago + Sat Jan 01 17:32:01 2000 PST | Sat Sep 22 18:19:20 2001 PDT | @ 629 days 23 hours 47 mins 19 secs ago + Wed Mar 15 02:14:05 2000 PST | Thu Jan 01 00:00:00 1970 PST | @ 11031 days 2 hours 14 mins 5 secs + Wed Mar 15 02:14:05 2000 PST | Wed Feb 28 17:32:01 1996 PST | @ 1476 days 8 hours 42 mins 4 secs + Wed Mar 15 02:14:05 2000 PST | Thu Feb 29 17:32:01 1996 PST | @ 1475 days 8 hours 42 mins 4 secs + Wed Mar 15 02:14:05 2000 PST | Fri Mar 01 17:32:01 1996 PST | @ 1474 days 8 hours 42 mins 4 secs + Wed Mar 15 02:14:05 2000 PST | Mon Dec 30 17:32:01 1996 PST | @ 1170 days 8 hours 42 mins 4 secs + Wed Mar 15 02:14:05 2000 PST | Tue Dec 31 17:32:01 1996 PST | @ 1169 days 8 hours 42 mins 4 secs + Wed Mar 15 02:14:05 2000 PST | Fri Dec 31 17:32:01 1999 PST | @ 74 days 8 hours 42 mins 4 secs + Wed Mar 15 02:14:05 2000 PST | Sat Jan 01 17:32:01 2000 PST | @ 73 days 8 hours 42 mins 4 secs + Wed Mar 15 02:14:05 2000 PST | Wed Mar 15 02:14:05 2000 PST | @ 0 + Wed Mar 15 02:14:05 2000 PST | Wed Mar 15 03:14:04 2000 PST | @ 59 mins 59 secs ago + Wed Mar 15 02:14:05 2000 PST | Wed Mar 15 08:14:01 2000 PST | @ 5 hours 59 mins 56 secs ago + Wed Mar 15 02:14:05 2000 PST | Wed Mar 15 12:14:03 2000 PST | @ 9 hours 59 mins 58 secs ago + Wed Mar 15 02:14:05 2000 PST | Wed Mar 15 13:14:02 2000 PST | @ 10 hours 59 mins 57 secs ago + Wed Mar 15 02:14:05 2000 PST | Sun Dec 31 17:32:01 2000 PST | @ 291 days 15 hours 17 mins 56 secs ago + Wed Mar 15 02:14:05 2000 PST | Mon Jan 01 17:32:01 2001 PST | @ 292 days 15 hours 17 mins 56 secs ago + Wed Mar 15 02:14:05 2000 PST | Sat Sep 22 18:19:20 2001 PDT | @ 556 days 15 hours 5 mins 15 secs ago + Wed Mar 15 03:14:04 2000 PST | Thu Jan 01 00:00:00 1970 PST | @ 11031 days 3 hours 14 mins 4 secs + Wed Mar 15 03:14:04 2000 PST | Wed Feb 28 17:32:01 1996 PST | @ 1476 days 9 hours 42 mins 3 secs + Wed Mar 15 03:14:04 2000 PST | Thu Feb 29 17:32:01 1996 PST | @ 1475 days 9 hours 42 mins 3 secs + Wed Mar 15 03:14:04 2000 PST | Fri Mar 01 17:32:01 1996 PST | @ 1474 days 9 hours 42 mins 3 secs + Wed Mar 15 03:14:04 2000 PST | Mon Dec 30 17:32:01 1996 PST | @ 1170 days 9 hours 42 mins 3 secs + Wed Mar 15 03:14:04 2000 PST | Tue Dec 31 17:32:01 1996 PST | @ 1169 days 9 hours 42 mins 3 secs + Wed Mar 15 03:14:04 2000 PST | Fri Dec 31 17:32:01 1999 PST | @ 74 days 9 hours 42 mins 3 secs + Wed Mar 15 03:14:04 2000 PST | Sat Jan 01 17:32:01 2000 PST | @ 73 days 9 hours 42 mins 3 secs + Wed Mar 15 03:14:04 2000 PST | Wed Mar 15 02:14:05 2000 PST | @ 59 mins 59 secs + Wed Mar 15 03:14:04 2000 PST | Wed Mar 15 03:14:04 2000 PST | @ 0 + Wed Mar 15 03:14:04 2000 PST | Wed Mar 15 08:14:01 2000 PST | @ 4 hours 59 mins 57 secs ago + Wed Mar 15 03:14:04 2000 PST | Wed Mar 15 12:14:03 2000 PST | @ 8 hours 59 mins 59 secs ago + Wed Mar 15 03:14:04 2000 PST | Wed Mar 15 13:14:02 2000 PST | @ 9 hours 59 mins 58 secs ago + Wed Mar 15 03:14:04 2000 PST | Sun Dec 31 17:32:01 2000 PST | @ 291 days 14 hours 17 mins 57 secs ago + Wed Mar 15 03:14:04 2000 PST | Mon Jan 01 17:32:01 2001 PST | @ 292 days 14 hours 17 mins 57 secs ago + Wed Mar 15 03:14:04 2000 PST | Sat Sep 22 18:19:20 2001 PDT | @ 556 days 14 hours 5 mins 16 secs ago + Wed Mar 15 08:14:01 2000 PST | Thu Jan 01 00:00:00 1970 PST | @ 11031 days 8 hours 14 mins 1 sec + Wed Mar 15 08:14:01 2000 PST | Wed Feb 28 17:32:01 1996 PST | @ 1476 days 14 hours 42 mins + Wed Mar 15 08:14:01 2000 PST | Thu Feb 29 17:32:01 1996 PST | @ 1475 days 14 hours 42 mins + Wed Mar 15 08:14:01 2000 PST | Fri Mar 01 17:32:01 1996 PST | @ 1474 days 14 hours 42 mins + Wed Mar 15 08:14:01 2000 PST | Mon Dec 30 17:32:01 1996 PST | @ 1170 days 14 hours 42 mins + Wed Mar 15 08:14:01 2000 PST | Tue Dec 31 17:32:01 1996 PST | @ 1169 days 14 hours 42 mins + Wed Mar 15 08:14:01 2000 PST | Fri Dec 31 17:32:01 1999 PST | @ 74 days 14 hours 42 mins + Wed Mar 15 08:14:01 2000 PST | Sat Jan 01 17:32:01 2000 PST | @ 73 days 14 hours 42 mins + Wed Mar 15 08:14:01 2000 PST | Wed Mar 15 02:14:05 2000 PST | @ 5 hours 59 mins 56 secs + Wed Mar 15 08:14:01 2000 PST | Wed Mar 15 03:14:04 2000 PST | @ 4 hours 59 mins 57 secs + Wed Mar 15 08:14:01 2000 PST | Wed Mar 15 08:14:01 2000 PST | @ 0 + Wed Mar 15 08:14:01 2000 PST | Wed Mar 15 12:14:03 2000 PST | @ 4 hours 2 secs ago + Wed Mar 15 08:14:01 2000 PST | Wed Mar 15 13:14:02 2000 PST | @ 5 hours 1 sec ago + Wed Mar 15 08:14:01 2000 PST | Sun Dec 31 17:32:01 2000 PST | @ 291 days 9 hours 18 mins ago + Wed Mar 15 08:14:01 2000 PST | Mon Jan 01 17:32:01 2001 PST | @ 292 days 9 hours 18 mins ago + Wed Mar 15 08:14:01 2000 PST | Sat Sep 22 18:19:20 2001 PDT | @ 556 days 9 hours 5 mins 19 secs ago + Wed Mar 15 12:14:03 2000 PST | Thu Jan 01 00:00:00 1970 PST | @ 11031 days 12 hours 14 mins 3 secs + Wed Mar 15 12:14:03 2000 PST | Wed Feb 28 17:32:01 1996 PST | @ 1476 days 18 hours 42 mins 2 secs + Wed Mar 15 12:14:03 2000 PST | Thu Feb 29 17:32:01 1996 PST | @ 1475 days 18 hours 42 mins 2 secs + Wed Mar 15 12:14:03 2000 PST | Fri Mar 01 17:32:01 1996 PST | @ 1474 days 18 hours 42 mins 2 secs + Wed Mar 15 12:14:03 2000 PST | Mon Dec 30 17:32:01 1996 PST | @ 1170 days 18 hours 42 mins 2 secs + Wed Mar 15 12:14:03 2000 PST | Tue Dec 31 17:32:01 1996 PST | @ 1169 days 18 hours 42 mins 2 secs + Wed Mar 15 12:14:03 2000 PST | Fri Dec 31 17:32:01 1999 PST | @ 74 days 18 hours 42 mins 2 secs + Wed Mar 15 12:14:03 2000 PST | Sat Jan 01 17:32:01 2000 PST | @ 73 days 18 hours 42 mins 2 secs + Wed Mar 15 12:14:03 2000 PST | Wed Mar 15 02:14:05 2000 PST | @ 9 hours 59 mins 58 secs + Wed Mar 15 12:14:03 2000 PST | Wed Mar 15 03:14:04 2000 PST | @ 8 hours 59 mins 59 secs + Wed Mar 15 12:14:03 2000 PST | Wed Mar 15 08:14:01 2000 PST | @ 4 hours 2 secs + Wed Mar 15 12:14:03 2000 PST | Wed Mar 15 12:14:03 2000 PST | @ 0 + Wed Mar 15 12:14:03 2000 PST | Wed Mar 15 13:14:02 2000 PST | @ 59 mins 59 secs ago + Wed Mar 15 12:14:03 2000 PST | Sun Dec 31 17:32:01 2000 PST | @ 291 days 5 hours 17 mins 58 secs ago + Wed Mar 15 12:14:03 2000 PST | Mon Jan 01 17:32:01 2001 PST | @ 292 days 5 hours 17 mins 58 secs ago + Wed Mar 15 12:14:03 2000 PST | Sat Sep 22 18:19:20 2001 PDT | @ 556 days 5 hours 5 mins 17 secs ago + Wed Mar 15 13:14:02 2000 PST | Thu Jan 01 00:00:00 1970 PST | @ 11031 days 13 hours 14 mins 2 secs + Wed Mar 15 13:14:02 2000 PST | Wed Feb 28 17:32:01 1996 PST | @ 1476 days 19 hours 42 mins 1 sec + Wed Mar 15 13:14:02 2000 PST | Thu Feb 29 17:32:01 1996 PST | @ 1475 days 19 hours 42 mins 1 sec + Wed Mar 15 13:14:02 2000 PST | Fri Mar 01 17:32:01 1996 PST | @ 1474 days 19 hours 42 mins 1 sec + Wed Mar 15 13:14:02 2000 PST | Mon Dec 30 17:32:01 1996 PST | @ 1170 days 19 hours 42 mins 1 sec + Wed Mar 15 13:14:02 2000 PST | Tue Dec 31 17:32:01 1996 PST | @ 1169 days 19 hours 42 mins 1 sec + Wed Mar 15 13:14:02 2000 PST | Fri Dec 31 17:32:01 1999 PST | @ 74 days 19 hours 42 mins 1 sec + Wed Mar 15 13:14:02 2000 PST | Sat Jan 01 17:32:01 2000 PST | @ 73 days 19 hours 42 mins 1 sec + Wed Mar 15 13:14:02 2000 PST | Wed Mar 15 02:14:05 2000 PST | @ 10 hours 59 mins 57 secs + Wed Mar 15 13:14:02 2000 PST | Wed Mar 15 03:14:04 2000 PST | @ 9 hours 59 mins 58 secs + Wed Mar 15 13:14:02 2000 PST | Wed Mar 15 08:14:01 2000 PST | @ 5 hours 1 sec + Wed Mar 15 13:14:02 2000 PST | Wed Mar 15 12:14:03 2000 PST | @ 59 mins 59 secs + Wed Mar 15 13:14:02 2000 PST | Wed Mar 15 13:14:02 2000 PST | @ 0 + Wed Mar 15 13:14:02 2000 PST | Sun Dec 31 17:32:01 2000 PST | @ 291 days 4 hours 17 mins 59 secs ago + Wed Mar 15 13:14:02 2000 PST | Mon Jan 01 17:32:01 2001 PST | @ 292 days 4 hours 17 mins 59 secs ago + Wed Mar 15 13:14:02 2000 PST | Sat Sep 22 18:19:20 2001 PDT | @ 556 days 4 hours 5 mins 18 secs ago + Sun Dec 31 17:32:01 2000 PST | Thu Jan 01 00:00:00 1970 PST | @ 11322 days 17 hours 32 mins 1 sec + Sun Dec 31 17:32:01 2000 PST | Wed Feb 28 17:32:01 1996 PST | @ 1768 days + Sun Dec 31 17:32:01 2000 PST | Thu Feb 29 17:32:01 1996 PST | @ 1767 days + Sun Dec 31 17:32:01 2000 PST | Fri Mar 01 17:32:01 1996 PST | @ 1766 days + Sun Dec 31 17:32:01 2000 PST | Mon Dec 30 17:32:01 1996 PST | @ 1462 days + Sun Dec 31 17:32:01 2000 PST | Tue Dec 31 17:32:01 1996 PST | @ 1461 days + Sun Dec 31 17:32:01 2000 PST | Fri Dec 31 17:32:01 1999 PST | @ 366 days + Sun Dec 31 17:32:01 2000 PST | Sat Jan 01 17:32:01 2000 PST | @ 365 days + Sun Dec 31 17:32:01 2000 PST | Wed Mar 15 02:14:05 2000 PST | @ 291 days 15 hours 17 mins 56 secs + Sun Dec 31 17:32:01 2000 PST | Wed Mar 15 03:14:04 2000 PST | @ 291 days 14 hours 17 mins 57 secs + Sun Dec 31 17:32:01 2000 PST | Wed Mar 15 08:14:01 2000 PST | @ 291 days 9 hours 18 mins + Sun Dec 31 17:32:01 2000 PST | Wed Mar 15 12:14:03 2000 PST | @ 291 days 5 hours 17 mins 58 secs + Sun Dec 31 17:32:01 2000 PST | Wed Mar 15 13:14:02 2000 PST | @ 291 days 4 hours 17 mins 59 secs + Sun Dec 31 17:32:01 2000 PST | Sun Dec 31 17:32:01 2000 PST | @ 0 + Sun Dec 31 17:32:01 2000 PST | Mon Jan 01 17:32:01 2001 PST | @ 1 day ago + Sun Dec 31 17:32:01 2000 PST | Sat Sep 22 18:19:20 2001 PDT | @ 264 days 23 hours 47 mins 19 secs ago + Mon Jan 01 17:32:01 2001 PST | Thu Jan 01 00:00:00 1970 PST | @ 11323 days 17 hours 32 mins 1 sec + Mon Jan 01 17:32:01 2001 PST | Wed Feb 28 17:32:01 1996 PST | @ 1769 days + Mon Jan 01 17:32:01 2001 PST | Thu Feb 29 17:32:01 1996 PST | @ 1768 days + Mon Jan 01 17:32:01 2001 PST | Fri Mar 01 17:32:01 1996 PST | @ 1767 days + Mon Jan 01 17:32:01 2001 PST | Mon Dec 30 17:32:01 1996 PST | @ 1463 days + Mon Jan 01 17:32:01 2001 PST | Tue Dec 31 17:32:01 1996 PST | @ 1462 days + Mon Jan 01 17:32:01 2001 PST | Fri Dec 31 17:32:01 1999 PST | @ 367 days + Mon Jan 01 17:32:01 2001 PST | Sat Jan 01 17:32:01 2000 PST | @ 366 days + Mon Jan 01 17:32:01 2001 PST | Wed Mar 15 02:14:05 2000 PST | @ 292 days 15 hours 17 mins 56 secs + Mon Jan 01 17:32:01 2001 PST | Wed Mar 15 03:14:04 2000 PST | @ 292 days 14 hours 17 mins 57 secs + Mon Jan 01 17:32:01 2001 PST | Wed Mar 15 08:14:01 2000 PST | @ 292 days 9 hours 18 mins + Mon Jan 01 17:32:01 2001 PST | Wed Mar 15 12:14:03 2000 PST | @ 292 days 5 hours 17 mins 58 secs + Mon Jan 01 17:32:01 2001 PST | Wed Mar 15 13:14:02 2000 PST | @ 292 days 4 hours 17 mins 59 secs + Mon Jan 01 17:32:01 2001 PST | Sun Dec 31 17:32:01 2000 PST | @ 1 day + Mon Jan 01 17:32:01 2001 PST | Mon Jan 01 17:32:01 2001 PST | @ 0 + Mon Jan 01 17:32:01 2001 PST | Sat Sep 22 18:19:20 2001 PDT | @ 263 days 23 hours 47 mins 19 secs ago + Sat Sep 22 18:19:20 2001 PDT | Thu Jan 01 00:00:00 1970 PST | @ 11587 days 17 hours 19 mins 20 secs + Sat Sep 22 18:19:20 2001 PDT | Wed Feb 28 17:32:01 1996 PST | @ 2032 days 23 hours 47 mins 19 secs + Sat Sep 22 18:19:20 2001 PDT | Thu Feb 29 17:32:01 1996 PST | @ 2031 days 23 hours 47 mins 19 secs + Sat Sep 22 18:19:20 2001 PDT | Fri Mar 01 17:32:01 1996 PST | @ 2030 days 23 hours 47 mins 19 secs + Sat Sep 22 18:19:20 2001 PDT | Mon Dec 30 17:32:01 1996 PST | @ 1726 days 23 hours 47 mins 19 secs + Sat Sep 22 18:19:20 2001 PDT | Tue Dec 31 17:32:01 1996 PST | @ 1725 days 23 hours 47 mins 19 secs + Sat Sep 22 18:19:20 2001 PDT | Fri Dec 31 17:32:01 1999 PST | @ 630 days 23 hours 47 mins 19 secs + Sat Sep 22 18:19:20 2001 PDT | Sat Jan 01 17:32:01 2000 PST | @ 629 days 23 hours 47 mins 19 secs + Sat Sep 22 18:19:20 2001 PDT | Wed Mar 15 02:14:05 2000 PST | @ 556 days 15 hours 5 mins 15 secs + Sat Sep 22 18:19:20 2001 PDT | Wed Mar 15 03:14:04 2000 PST | @ 556 days 14 hours 5 mins 16 secs + Sat Sep 22 18:19:20 2001 PDT | Wed Mar 15 08:14:01 2000 PST | @ 556 days 9 hours 5 mins 19 secs + Sat Sep 22 18:19:20 2001 PDT | Wed Mar 15 12:14:03 2000 PST | @ 556 days 5 hours 5 mins 17 secs + Sat Sep 22 18:19:20 2001 PDT | Wed Mar 15 13:14:02 2000 PST | @ 556 days 4 hours 5 mins 18 secs + Sat Sep 22 18:19:20 2001 PDT | Sun Dec 31 17:32:01 2000 PST | @ 264 days 23 hours 47 mins 19 secs + Sat Sep 22 18:19:20 2001 PDT | Mon Jan 01 17:32:01 2001 PST | @ 263 days 23 hours 47 mins 19 secs + Sat Sep 22 18:19:20 2001 PDT | Sat Sep 22 18:19:20 2001 PDT | @ 0 +(256 rows) + +-- +-- Conversions +-- +SELECT f1 AS "timestamp", date(f1) AS date + FROM TEMP_TIMESTAMP + WHERE f1 <> timestamp 'now' + ORDER BY date, "timestamp"; + timestamp | date +------------------------------+------------ + Thu Jan 01 00:00:00 1970 PST | 01-01-1970 + Wed Feb 28 17:32:01 1996 PST | 02-28-1996 + Thu Feb 29 17:32:01 1996 PST | 02-29-1996 + Fri Mar 01 17:32:01 1996 PST | 03-01-1996 + Mon Dec 30 17:32:01 1996 PST | 12-30-1996 + Tue Dec 31 17:32:01 1996 PST | 12-31-1996 + Fri Dec 31 17:32:01 1999 PST | 12-31-1999 + Sat Jan 01 17:32:01 2000 PST | 01-01-2000 + Wed Mar 15 02:14:05 2000 PST | 03-15-2000 + Wed Mar 15 03:14:04 2000 PST | 03-15-2000 + Wed Mar 15 08:14:01 2000 PST | 03-15-2000 + Wed Mar 15 12:14:03 2000 PST | 03-15-2000 + Wed Mar 15 13:14:02 2000 PST | 03-15-2000 + Sun Dec 31 17:32:01 2000 PST | 12-31-2000 + Mon Jan 01 17:32:01 2001 PST | 01-01-2001 + Sat Sep 22 18:19:20 2001 PDT | 09-22-2001 +(16 rows) + +DROP TABLE TEMP_TIMESTAMP; +-- +-- Comparisons between datetime types, especially overflow cases +--- +SELECT '2202020-10-05'::date::timestamp; -- fail +ERROR: date out of range for timestamp +SELECT '2202020-10-05'::date > '2020-10-05'::timestamp as t; + t +--- + t +(1 row) + +SELECT '2020-10-05'::timestamp > '2202020-10-05'::date as f; + f +--- + f +(1 row) + +SELECT '2202020-10-05'::date::timestamptz; -- fail +ERROR: date out of range for timestamp +SELECT '2202020-10-05'::date > '2020-10-05'::timestamptz as t; + t +--- + t +(1 row) + +SELECT '2020-10-05'::timestamptz > '2202020-10-05'::date as f; + f +--- + f +(1 row) + +-- This conversion may work depending on timezone +SELECT '4714-11-24 BC'::date::timestamptz; + timestamptz +--------------------------------- + Mon Nov 24 00:00:00 4714 PST BC +(1 row) + +SET TimeZone = 'UTC-2'; +SELECT '4714-11-24 BC'::date::timestamptz; -- fail +ERROR: date out of range for timestamp +SELECT '4714-11-24 BC'::date < '2020-10-05'::timestamptz as t; + t +--- + t +(1 row) + +SELECT '2020-10-05'::timestamptz >= '4714-11-24 BC'::date as t; + t +--- + t +(1 row) + +SELECT '4714-11-24 BC'::timestamp < '2020-10-05'::timestamptz as t; + t +--- + t +(1 row) + +SELECT '2020-10-05'::timestamptz >= '4714-11-24 BC'::timestamp as t; + t +--- + t +(1 row) + +RESET TimeZone; +-- +-- Tests for BETWEEN +-- +explain (costs off) +select count(*) from date_tbl + where f1 between '1997-01-01' and '1998-01-01'; + QUERY PLAN +----------------------------------------------------------------------------- + Aggregate + -> Seq Scan on date_tbl + Filter: ((f1 >= '01-01-1997'::date) AND (f1 <= '01-01-1998'::date)) +(3 rows) + +select count(*) from date_tbl + where f1 between '1997-01-01' and '1998-01-01'; + count +------- + 3 +(1 row) + +explain (costs off) +select count(*) from date_tbl + where f1 not between '1997-01-01' and '1998-01-01'; + QUERY PLAN +-------------------------------------------------------------------------- + Aggregate + -> Seq Scan on date_tbl + Filter: ((f1 < '01-01-1997'::date) OR (f1 > '01-01-1998'::date)) +(3 rows) + +select count(*) from date_tbl + where f1 not between '1997-01-01' and '1998-01-01'; + count +------- + 13 +(1 row) + +explain (costs off) +select count(*) from date_tbl + where f1 between symmetric '1997-01-01' and '1998-01-01'; + QUERY PLAN +---------------------------------------------------------------------------------------------------------------------------------------------- + Aggregate + -> Seq Scan on date_tbl + Filter: (((f1 >= '01-01-1997'::date) AND (f1 <= '01-01-1998'::date)) OR ((f1 >= '01-01-1998'::date) AND (f1 <= '01-01-1997'::date))) +(3 rows) + +select count(*) from date_tbl + where f1 between symmetric '1997-01-01' and '1998-01-01'; + count +------- + 3 +(1 row) + +explain (costs off) +select count(*) from date_tbl + where f1 not between symmetric '1997-01-01' and '1998-01-01'; + QUERY PLAN +----------------------------------------------------------------------------------------------------------------------------------------- + Aggregate + -> Seq Scan on date_tbl + Filter: (((f1 < '01-01-1997'::date) OR (f1 > '01-01-1998'::date)) AND ((f1 < '01-01-1998'::date) OR (f1 > '01-01-1997'::date))) +(3 rows) + +select count(*) from date_tbl + where f1 not between symmetric '1997-01-01' and '1998-01-01'; + count +------- + 13 +(1 row) + +-- +-- Formats +-- +SET DateStyle TO 'US,Postgres'; +SHOW DateStyle; + DateStyle +--------------- + Postgres, MDY +(1 row) + +SELECT d1 AS us_postgres FROM TIMESTAMP_TBL; + us_postgres +----------------------------- + -infinity + infinity + Thu Jan 01 00:00:00 1970 + Mon Feb 10 17:32:01 1997 + Mon Feb 10 17:32:01 1997 + Mon Feb 10 17:32:02 1997 + Mon Feb 10 17:32:01.4 1997 + Mon Feb 10 17:32:01.5 1997 + Mon Feb 10 17:32:01.6 1997 + Thu Jan 02 00:00:00 1997 + Thu Jan 02 03:04:05 1997 + Mon Feb 10 17:32:01 1997 + Mon Feb 10 17:32:01 1997 + Mon Feb 10 17:32:01 1997 + Mon Feb 10 17:32:01 1997 + Tue Jun 10 17:32:01 1997 + Sat Sep 22 18:19:20 2001 + Wed Mar 15 08:14:01 2000 + Wed Mar 15 13:14:02 2000 + Wed Mar 15 12:14:03 2000 + Wed Mar 15 03:14:04 2000 + Wed Mar 15 02:14:05 2000 + Mon Feb 10 17:32:01 1997 + Mon Feb 10 17:32:01 1997 + Mon Feb 10 17:32:00 1997 + Mon Feb 10 17:32:01 1997 + Mon Feb 10 17:32:01 1997 + Mon Feb 10 17:32:01 1997 + Mon Feb 10 17:32:01 1997 + Mon Feb 10 17:32:01 1997 + Mon Feb 10 17:32:01 1997 + Mon Feb 10 17:32:01 1997 + Mon Feb 10 17:32:01 1997 + Mon Feb 10 17:32:01 1997 + Tue Jun 10 18:32:01 1997 + Mon Feb 10 17:32:01 1997 + Tue Feb 11 17:32:01 1997 + Wed Feb 12 17:32:01 1997 + Thu Feb 13 17:32:01 1997 + Fri Feb 14 17:32:01 1997 + Sat Feb 15 17:32:01 1997 + Sun Feb 16 17:32:01 1997 + Tue Feb 16 17:32:01 0097 BC + Sat Feb 16 17:32:01 0097 + Thu Feb 16 17:32:01 0597 + Tue Feb 16 17:32:01 1097 + Sat Feb 16 17:32:01 1697 + Thu Feb 16 17:32:01 1797 + Tue Feb 16 17:32:01 1897 + Sun Feb 16 17:32:01 1997 + Sat Feb 16 17:32:01 2097 + Wed Feb 28 17:32:01 1996 + Thu Feb 29 17:32:01 1996 + Fri Mar 01 17:32:01 1996 + Mon Dec 30 17:32:01 1996 + Tue Dec 31 17:32:01 1996 + Wed Jan 01 17:32:01 1997 + Fri Feb 28 17:32:01 1997 + Sat Mar 01 17:32:01 1997 + Tue Dec 30 17:32:01 1997 + Wed Dec 31 17:32:01 1997 + Fri Dec 31 17:32:01 1999 + Sat Jan 01 17:32:01 2000 + Sun Dec 31 17:32:01 2000 + Mon Jan 01 17:32:01 2001 +(65 rows) + +SET DateStyle TO 'US,ISO'; +SELECT d1 AS us_iso FROM TIMESTAMP_TBL; + us_iso +------------------------ + -infinity + infinity + 1970-01-01 00:00:00 + 1997-02-10 17:32:01 + 1997-02-10 17:32:01 + 1997-02-10 17:32:02 + 1997-02-10 17:32:01.4 + 1997-02-10 17:32:01.5 + 1997-02-10 17:32:01.6 + 1997-01-02 00:00:00 + 1997-01-02 03:04:05 + 1997-02-10 17:32:01 + 1997-02-10 17:32:01 + 1997-02-10 17:32:01 + 1997-02-10 17:32:01 + 1997-06-10 17:32:01 + 2001-09-22 18:19:20 + 2000-03-15 08:14:01 + 2000-03-15 13:14:02 + 2000-03-15 12:14:03 + 2000-03-15 03:14:04 + 2000-03-15 02:14:05 + 1997-02-10 17:32:01 + 1997-02-10 17:32:01 + 1997-02-10 17:32:00 + 1997-02-10 17:32:01 + 1997-02-10 17:32:01 + 1997-02-10 17:32:01 + 1997-02-10 17:32:01 + 1997-02-10 17:32:01 + 1997-02-10 17:32:01 + 1997-02-10 17:32:01 + 1997-02-10 17:32:01 + 1997-02-10 17:32:01 + 1997-06-10 18:32:01 + 1997-02-10 17:32:01 + 1997-02-11 17:32:01 + 1997-02-12 17:32:01 + 1997-02-13 17:32:01 + 1997-02-14 17:32:01 + 1997-02-15 17:32:01 + 1997-02-16 17:32:01 + 0097-02-16 17:32:01 BC + 0097-02-16 17:32:01 + 0597-02-16 17:32:01 + 1097-02-16 17:32:01 + 1697-02-16 17:32:01 + 1797-02-16 17:32:01 + 1897-02-16 17:32:01 + 1997-02-16 17:32:01 + 2097-02-16 17:32:01 + 1996-02-28 17:32:01 + 1996-02-29 17:32:01 + 1996-03-01 17:32:01 + 1996-12-30 17:32:01 + 1996-12-31 17:32:01 + 1997-01-01 17:32:01 + 1997-02-28 17:32:01 + 1997-03-01 17:32:01 + 1997-12-30 17:32:01 + 1997-12-31 17:32:01 + 1999-12-31 17:32:01 + 2000-01-01 17:32:01 + 2000-12-31 17:32:01 + 2001-01-01 17:32:01 +(65 rows) + +SET DateStyle TO 'US,SQL'; +SHOW DateStyle; + DateStyle +----------- + SQL, MDY +(1 row) + +SELECT d1 AS us_sql FROM TIMESTAMP_TBL; + us_sql +------------------------ + -infinity + infinity + 01/01/1970 00:00:00 + 02/10/1997 17:32:01 + 02/10/1997 17:32:01 + 02/10/1997 17:32:02 + 02/10/1997 17:32:01.4 + 02/10/1997 17:32:01.5 + 02/10/1997 17:32:01.6 + 01/02/1997 00:00:00 + 01/02/1997 03:04:05 + 02/10/1997 17:32:01 + 02/10/1997 17:32:01 + 02/10/1997 17:32:01 + 02/10/1997 17:32:01 + 06/10/1997 17:32:01 + 09/22/2001 18:19:20 + 03/15/2000 08:14:01 + 03/15/2000 13:14:02 + 03/15/2000 12:14:03 + 03/15/2000 03:14:04 + 03/15/2000 02:14:05 + 02/10/1997 17:32:01 + 02/10/1997 17:32:01 + 02/10/1997 17:32:00 + 02/10/1997 17:32:01 + 02/10/1997 17:32:01 + 02/10/1997 17:32:01 + 02/10/1997 17:32:01 + 02/10/1997 17:32:01 + 02/10/1997 17:32:01 + 02/10/1997 17:32:01 + 02/10/1997 17:32:01 + 02/10/1997 17:32:01 + 06/10/1997 18:32:01 + 02/10/1997 17:32:01 + 02/11/1997 17:32:01 + 02/12/1997 17:32:01 + 02/13/1997 17:32:01 + 02/14/1997 17:32:01 + 02/15/1997 17:32:01 + 02/16/1997 17:32:01 + 02/16/0097 17:32:01 BC + 02/16/0097 17:32:01 + 02/16/0597 17:32:01 + 02/16/1097 17:32:01 + 02/16/1697 17:32:01 + 02/16/1797 17:32:01 + 02/16/1897 17:32:01 + 02/16/1997 17:32:01 + 02/16/2097 17:32:01 + 02/28/1996 17:32:01 + 02/29/1996 17:32:01 + 03/01/1996 17:32:01 + 12/30/1996 17:32:01 + 12/31/1996 17:32:01 + 01/01/1997 17:32:01 + 02/28/1997 17:32:01 + 03/01/1997 17:32:01 + 12/30/1997 17:32:01 + 12/31/1997 17:32:01 + 12/31/1999 17:32:01 + 01/01/2000 17:32:01 + 12/31/2000 17:32:01 + 01/01/2001 17:32:01 +(65 rows) + +SET DateStyle TO 'European,Postgres'; +SHOW DateStyle; + DateStyle +--------------- + Postgres, DMY +(1 row) + +INSERT INTO TIMESTAMP_TBL VALUES('13/06/1957'); +SELECT count(*) as one FROM TIMESTAMP_TBL WHERE d1 = 'Jun 13 1957'; + one +----- + 1 +(1 row) + +SELECT d1 AS european_postgres FROM TIMESTAMP_TBL; + european_postgres +----------------------------- + -infinity + infinity + Thu 01 Jan 00:00:00 1970 + Mon 10 Feb 17:32:01 1997 + Mon 10 Feb 17:32:01 1997 + Mon 10 Feb 17:32:02 1997 + Mon 10 Feb 17:32:01.4 1997 + Mon 10 Feb 17:32:01.5 1997 + Mon 10 Feb 17:32:01.6 1997 + Thu 02 Jan 00:00:00 1997 + Thu 02 Jan 03:04:05 1997 + Mon 10 Feb 17:32:01 1997 + Mon 10 Feb 17:32:01 1997 + Mon 10 Feb 17:32:01 1997 + Mon 10 Feb 17:32:01 1997 + Tue 10 Jun 17:32:01 1997 + Sat 22 Sep 18:19:20 2001 + Wed 15 Mar 08:14:01 2000 + Wed 15 Mar 13:14:02 2000 + Wed 15 Mar 12:14:03 2000 + Wed 15 Mar 03:14:04 2000 + Wed 15 Mar 02:14:05 2000 + Mon 10 Feb 17:32:01 1997 + Mon 10 Feb 17:32:01 1997 + Mon 10 Feb 17:32:00 1997 + Mon 10 Feb 17:32:01 1997 + Mon 10 Feb 17:32:01 1997 + Mon 10 Feb 17:32:01 1997 + Mon 10 Feb 17:32:01 1997 + Mon 10 Feb 17:32:01 1997 + Mon 10 Feb 17:32:01 1997 + Mon 10 Feb 17:32:01 1997 + Mon 10 Feb 17:32:01 1997 + Mon 10 Feb 17:32:01 1997 + Tue 10 Jun 18:32:01 1997 + Mon 10 Feb 17:32:01 1997 + Tue 11 Feb 17:32:01 1997 + Wed 12 Feb 17:32:01 1997 + Thu 13 Feb 17:32:01 1997 + Fri 14 Feb 17:32:01 1997 + Sat 15 Feb 17:32:01 1997 + Sun 16 Feb 17:32:01 1997 + Tue 16 Feb 17:32:01 0097 BC + Sat 16 Feb 17:32:01 0097 + Thu 16 Feb 17:32:01 0597 + Tue 16 Feb 17:32:01 1097 + Sat 16 Feb 17:32:01 1697 + Thu 16 Feb 17:32:01 1797 + Tue 16 Feb 17:32:01 1897 + Sun 16 Feb 17:32:01 1997 + Sat 16 Feb 17:32:01 2097 + Wed 28 Feb 17:32:01 1996 + Thu 29 Feb 17:32:01 1996 + Fri 01 Mar 17:32:01 1996 + Mon 30 Dec 17:32:01 1996 + Tue 31 Dec 17:32:01 1996 + Wed 01 Jan 17:32:01 1997 + Fri 28 Feb 17:32:01 1997 + Sat 01 Mar 17:32:01 1997 + Tue 30 Dec 17:32:01 1997 + Wed 31 Dec 17:32:01 1997 + Fri 31 Dec 17:32:01 1999 + Sat 01 Jan 17:32:01 2000 + Sun 31 Dec 17:32:01 2000 + Mon 01 Jan 17:32:01 2001 + Thu 13 Jun 00:00:00 1957 +(66 rows) + +SET DateStyle TO 'European,ISO'; +SHOW DateStyle; + DateStyle +----------- + ISO, DMY +(1 row) + +SELECT d1 AS european_iso FROM TIMESTAMP_TBL; + european_iso +------------------------ + -infinity + infinity + 1970-01-01 00:00:00 + 1997-02-10 17:32:01 + 1997-02-10 17:32:01 + 1997-02-10 17:32:02 + 1997-02-10 17:32:01.4 + 1997-02-10 17:32:01.5 + 1997-02-10 17:32:01.6 + 1997-01-02 00:00:00 + 1997-01-02 03:04:05 + 1997-02-10 17:32:01 + 1997-02-10 17:32:01 + 1997-02-10 17:32:01 + 1997-02-10 17:32:01 + 1997-06-10 17:32:01 + 2001-09-22 18:19:20 + 2000-03-15 08:14:01 + 2000-03-15 13:14:02 + 2000-03-15 12:14:03 + 2000-03-15 03:14:04 + 2000-03-15 02:14:05 + 1997-02-10 17:32:01 + 1997-02-10 17:32:01 + 1997-02-10 17:32:00 + 1997-02-10 17:32:01 + 1997-02-10 17:32:01 + 1997-02-10 17:32:01 + 1997-02-10 17:32:01 + 1997-02-10 17:32:01 + 1997-02-10 17:32:01 + 1997-02-10 17:32:01 + 1997-02-10 17:32:01 + 1997-02-10 17:32:01 + 1997-06-10 18:32:01 + 1997-02-10 17:32:01 + 1997-02-11 17:32:01 + 1997-02-12 17:32:01 + 1997-02-13 17:32:01 + 1997-02-14 17:32:01 + 1997-02-15 17:32:01 + 1997-02-16 17:32:01 + 0097-02-16 17:32:01 BC + 0097-02-16 17:32:01 + 0597-02-16 17:32:01 + 1097-02-16 17:32:01 + 1697-02-16 17:32:01 + 1797-02-16 17:32:01 + 1897-02-16 17:32:01 + 1997-02-16 17:32:01 + 2097-02-16 17:32:01 + 1996-02-28 17:32:01 + 1996-02-29 17:32:01 + 1996-03-01 17:32:01 + 1996-12-30 17:32:01 + 1996-12-31 17:32:01 + 1997-01-01 17:32:01 + 1997-02-28 17:32:01 + 1997-03-01 17:32:01 + 1997-12-30 17:32:01 + 1997-12-31 17:32:01 + 1999-12-31 17:32:01 + 2000-01-01 17:32:01 + 2000-12-31 17:32:01 + 2001-01-01 17:32:01 + 1957-06-13 00:00:00 +(66 rows) + +SET DateStyle TO 'European,SQL'; +SHOW DateStyle; + DateStyle +----------- + SQL, DMY +(1 row) + +SELECT d1 AS european_sql FROM TIMESTAMP_TBL; + european_sql +------------------------ + -infinity + infinity + 01/01/1970 00:00:00 + 10/02/1997 17:32:01 + 10/02/1997 17:32:01 + 10/02/1997 17:32:02 + 10/02/1997 17:32:01.4 + 10/02/1997 17:32:01.5 + 10/02/1997 17:32:01.6 + 02/01/1997 00:00:00 + 02/01/1997 03:04:05 + 10/02/1997 17:32:01 + 10/02/1997 17:32:01 + 10/02/1997 17:32:01 + 10/02/1997 17:32:01 + 10/06/1997 17:32:01 + 22/09/2001 18:19:20 + 15/03/2000 08:14:01 + 15/03/2000 13:14:02 + 15/03/2000 12:14:03 + 15/03/2000 03:14:04 + 15/03/2000 02:14:05 + 10/02/1997 17:32:01 + 10/02/1997 17:32:01 + 10/02/1997 17:32:00 + 10/02/1997 17:32:01 + 10/02/1997 17:32:01 + 10/02/1997 17:32:01 + 10/02/1997 17:32:01 + 10/02/1997 17:32:01 + 10/02/1997 17:32:01 + 10/02/1997 17:32:01 + 10/02/1997 17:32:01 + 10/02/1997 17:32:01 + 10/06/1997 18:32:01 + 10/02/1997 17:32:01 + 11/02/1997 17:32:01 + 12/02/1997 17:32:01 + 13/02/1997 17:32:01 + 14/02/1997 17:32:01 + 15/02/1997 17:32:01 + 16/02/1997 17:32:01 + 16/02/0097 17:32:01 BC + 16/02/0097 17:32:01 + 16/02/0597 17:32:01 + 16/02/1097 17:32:01 + 16/02/1697 17:32:01 + 16/02/1797 17:32:01 + 16/02/1897 17:32:01 + 16/02/1997 17:32:01 + 16/02/2097 17:32:01 + 28/02/1996 17:32:01 + 29/02/1996 17:32:01 + 01/03/1996 17:32:01 + 30/12/1996 17:32:01 + 31/12/1996 17:32:01 + 01/01/1997 17:32:01 + 28/02/1997 17:32:01 + 01/03/1997 17:32:01 + 30/12/1997 17:32:01 + 31/12/1997 17:32:01 + 31/12/1999 17:32:01 + 01/01/2000 17:32:01 + 31/12/2000 17:32:01 + 01/01/2001 17:32:01 + 13/06/1957 00:00:00 +(66 rows) + +RESET DateStyle; +-- +-- to_timestamp() +-- +SELECT to_timestamp('0097/Feb/16 --> 08:14:30', 'YYYY/Mon/DD --> HH:MI:SS'); + to_timestamp +------------------------------ + Sat Feb 16 08:14:30 0097 PST +(1 row) + +SELECT to_timestamp('97/2/16 8:14:30', 'FMYYYY/FMMM/FMDD FMHH:FMMI:FMSS'); + to_timestamp +------------------------------ + Sat Feb 16 08:14:30 0097 PST +(1 row) + +SELECT to_timestamp('2011$03!18 23_38_15', 'YYYY-MM-DD HH24:MI:SS'); + to_timestamp +------------------------------ + Fri Mar 18 23:38:15 2011 PDT +(1 row) + +SELECT to_timestamp('1985 January 12', 'YYYY FMMonth DD'); + to_timestamp +------------------------------ + Sat Jan 12 00:00:00 1985 PST +(1 row) + +SELECT to_timestamp('1985 FMMonth 12', 'YYYY "FMMonth" DD'); + to_timestamp +------------------------------ + Sat Jan 12 00:00:00 1985 PST +(1 row) + +SELECT to_timestamp('1985 \ 12', 'YYYY \\ DD'); + to_timestamp +------------------------------ + Sat Jan 12 00:00:00 1985 PST +(1 row) + +SELECT to_timestamp('My birthday-> Year: 1976, Month: May, Day: 16', + '"My birthday-> Year:" YYYY, "Month:" FMMonth, "Day:" DD'); + to_timestamp +------------------------------ + Sun May 16 00:00:00 1976 PDT +(1 row) + +SELECT to_timestamp('1,582nd VIII 21', 'Y,YYYth FMRM DD'); + to_timestamp +------------------------------ + Sat Aug 21 00:00:00 1582 PST +(1 row) + +SELECT to_timestamp('15 "text between quote marks" 98 54 45', + E'HH24 "\\"text between quote marks\\"" YY MI SS'); + to_timestamp +------------------------------ + Thu Jan 01 15:54:45 1998 PST +(1 row) + +SELECT to_timestamp('05121445482000', 'MMDDHH24MISSYYYY'); + to_timestamp +------------------------------ + Fri May 12 14:45:48 2000 PDT +(1 row) + +SELECT to_timestamp('2000January09Sunday', 'YYYYFMMonthDDFMDay'); + to_timestamp +------------------------------ + Sun Jan 09 00:00:00 2000 PST +(1 row) + +SELECT to_timestamp('97/Feb/16', 'YYMonDD'); +ERROR: invalid value "/Feb/16" for "Mon" +DETAIL: The given value did not match any of the allowed values for this field. +SELECT to_timestamp('97/Feb/16', 'YY:Mon:DD'); + to_timestamp +------------------------------ + Sun Feb 16 00:00:00 1997 PST +(1 row) + +SELECT to_timestamp('97/Feb/16', 'FXYY:Mon:DD'); + to_timestamp +------------------------------ + Sun Feb 16 00:00:00 1997 PST +(1 row) + +SELECT to_timestamp('97/Feb/16', 'FXYY/Mon/DD'); + to_timestamp +------------------------------ + Sun Feb 16 00:00:00 1997 PST +(1 row) + +SELECT to_timestamp('19971116', 'YYYYMMDD'); + to_timestamp +------------------------------ + Sun Nov 16 00:00:00 1997 PST +(1 row) + +SELECT to_timestamp('20000-1116', 'YYYY-MMDD'); + to_timestamp +------------------------------- + Thu Nov 16 00:00:00 20000 PST +(1 row) + +SELECT to_timestamp('1997 AD 11 16', 'YYYY BC MM DD'); + to_timestamp +------------------------------ + Sun Nov 16 00:00:00 1997 PST +(1 row) + +SELECT to_timestamp('1997 BC 11 16', 'YYYY BC MM DD'); + to_timestamp +--------------------------------- + Tue Nov 16 00:00:00 1997 PST BC +(1 row) + +SELECT to_timestamp('1997 A.D. 11 16', 'YYYY B.C. MM DD'); + to_timestamp +------------------------------ + Sun Nov 16 00:00:00 1997 PST +(1 row) + +SELECT to_timestamp('1997 B.C. 11 16', 'YYYY B.C. MM DD'); + to_timestamp +--------------------------------- + Tue Nov 16 00:00:00 1997 PST BC +(1 row) + +SELECT to_timestamp('9-1116', 'Y-MMDD'); + to_timestamp +------------------------------ + Mon Nov 16 00:00:00 2009 PST +(1 row) + +SELECT to_timestamp('95-1116', 'YY-MMDD'); + to_timestamp +------------------------------ + Thu Nov 16 00:00:00 1995 PST +(1 row) + +SELECT to_timestamp('995-1116', 'YYY-MMDD'); + to_timestamp +------------------------------ + Thu Nov 16 00:00:00 1995 PST +(1 row) + +SELECT to_timestamp('2005426', 'YYYYWWD'); + to_timestamp +------------------------------ + Sat Oct 15 00:00:00 2005 PDT +(1 row) + +SELECT to_timestamp('2005300', 'YYYYDDD'); + to_timestamp +------------------------------ + Thu Oct 27 00:00:00 2005 PDT +(1 row) + +SELECT to_timestamp('2005527', 'IYYYIWID'); + to_timestamp +------------------------------ + Sun Jan 01 00:00:00 2006 PST +(1 row) + +SELECT to_timestamp('005527', 'IYYIWID'); + to_timestamp +------------------------------ + Sun Jan 01 00:00:00 2006 PST +(1 row) + +SELECT to_timestamp('05527', 'IYIWID'); + to_timestamp +------------------------------ + Sun Jan 01 00:00:00 2006 PST +(1 row) + +SELECT to_timestamp('5527', 'IIWID'); + to_timestamp +------------------------------ + Sun Jan 01 00:00:00 2006 PST +(1 row) + +SELECT to_timestamp('2005364', 'IYYYIDDD'); + to_timestamp +------------------------------ + Sun Jan 01 00:00:00 2006 PST +(1 row) + +SELECT to_timestamp('20050302', 'YYYYMMDD'); + to_timestamp +------------------------------ + Wed Mar 02 00:00:00 2005 PST +(1 row) + +SELECT to_timestamp('2005 03 02', 'YYYYMMDD'); + to_timestamp +------------------------------ + Wed Mar 02 00:00:00 2005 PST +(1 row) + +SELECT to_timestamp(' 2005 03 02', 'YYYYMMDD'); + to_timestamp +------------------------------ + Wed Mar 02 00:00:00 2005 PST +(1 row) + +SELECT to_timestamp(' 20050302', 'YYYYMMDD'); + to_timestamp +------------------------------ + Wed Mar 02 00:00:00 2005 PST +(1 row) + +SELECT to_timestamp('2011-12-18 11:38 AM', 'YYYY-MM-DD HH12:MI PM'); + to_timestamp +------------------------------ + Sun Dec 18 11:38:00 2011 PST +(1 row) + +SELECT to_timestamp('2011-12-18 11:38 PM', 'YYYY-MM-DD HH12:MI PM'); + to_timestamp +------------------------------ + Sun Dec 18 23:38:00 2011 PST +(1 row) + +SELECT to_timestamp('2011-12-18 11:38 A.M.', 'YYYY-MM-DD HH12:MI P.M.'); + to_timestamp +------------------------------ + Sun Dec 18 11:38:00 2011 PST +(1 row) + +SELECT to_timestamp('2011-12-18 11:38 P.M.', 'YYYY-MM-DD HH12:MI P.M.'); + to_timestamp +------------------------------ + Sun Dec 18 23:38:00 2011 PST +(1 row) + +SELECT to_timestamp('2011-12-18 11:38 +05', 'YYYY-MM-DD HH12:MI TZH'); + to_timestamp +------------------------------ + Sat Dec 17 22:38:00 2011 PST +(1 row) + +SELECT to_timestamp('2011-12-18 11:38 -05', 'YYYY-MM-DD HH12:MI TZH'); + to_timestamp +------------------------------ + Sun Dec 18 08:38:00 2011 PST +(1 row) + +SELECT to_timestamp('2011-12-18 11:38 +05:20', 'YYYY-MM-DD HH12:MI TZH:TZM'); + to_timestamp +------------------------------ + Sat Dec 17 22:18:00 2011 PST +(1 row) + +SELECT to_timestamp('2011-12-18 11:38 -05:20', 'YYYY-MM-DD HH12:MI TZH:TZM'); + to_timestamp +------------------------------ + Sun Dec 18 08:58:00 2011 PST +(1 row) + +SELECT to_timestamp('2011-12-18 11:38 20', 'YYYY-MM-DD HH12:MI TZM'); + to_timestamp +------------------------------ + Sun Dec 18 03:18:00 2011 PST +(1 row) + +SELECT to_timestamp('2011-12-18 11:38 PST', 'YYYY-MM-DD HH12:MI TZ'); -- NYI +ERROR: formatting field "TZ" is only supported in to_char +SELECT to_timestamp('2018-11-02 12:34:56.025', 'YYYY-MM-DD HH24:MI:SS.MS'); + to_timestamp +---------------------------------- + Fri Nov 02 12:34:56.025 2018 PDT +(1 row) + +SELECT i, to_timestamp('2018-11-02 12:34:56', 'YYYY-MM-DD HH24:MI:SS.FF' || i) FROM generate_series(1, 6) i; + i | to_timestamp +---+------------------------------ + 1 | Fri Nov 02 12:34:56 2018 PDT + 2 | Fri Nov 02 12:34:56 2018 PDT + 3 | Fri Nov 02 12:34:56 2018 PDT + 4 | Fri Nov 02 12:34:56 2018 PDT + 5 | Fri Nov 02 12:34:56 2018 PDT + 6 | Fri Nov 02 12:34:56 2018 PDT +(6 rows) + +SELECT i, to_timestamp('2018-11-02 12:34:56.1', 'YYYY-MM-DD HH24:MI:SS.FF' || i) FROM generate_series(1, 6) i; + i | to_timestamp +---+-------------------------------- + 1 | Fri Nov 02 12:34:56.1 2018 PDT + 2 | Fri Nov 02 12:34:56.1 2018 PDT + 3 | Fri Nov 02 12:34:56.1 2018 PDT + 4 | Fri Nov 02 12:34:56.1 2018 PDT + 5 | Fri Nov 02 12:34:56.1 2018 PDT + 6 | Fri Nov 02 12:34:56.1 2018 PDT +(6 rows) + +SELECT i, to_timestamp('2018-11-02 12:34:56.12', 'YYYY-MM-DD HH24:MI:SS.FF' || i) FROM generate_series(1, 6) i; + i | to_timestamp +---+--------------------------------- + 1 | Fri Nov 02 12:34:56.1 2018 PDT + 2 | Fri Nov 02 12:34:56.12 2018 PDT + 3 | Fri Nov 02 12:34:56.12 2018 PDT + 4 | Fri Nov 02 12:34:56.12 2018 PDT + 5 | Fri Nov 02 12:34:56.12 2018 PDT + 6 | Fri Nov 02 12:34:56.12 2018 PDT +(6 rows) + +SELECT i, to_timestamp('2018-11-02 12:34:56.123', 'YYYY-MM-DD HH24:MI:SS.FF' || i) FROM generate_series(1, 6) i; + i | to_timestamp +---+---------------------------------- + 1 | Fri Nov 02 12:34:56.1 2018 PDT + 2 | Fri Nov 02 12:34:56.12 2018 PDT + 3 | Fri Nov 02 12:34:56.123 2018 PDT + 4 | Fri Nov 02 12:34:56.123 2018 PDT + 5 | Fri Nov 02 12:34:56.123 2018 PDT + 6 | Fri Nov 02 12:34:56.123 2018 PDT +(6 rows) + +SELECT i, to_timestamp('2018-11-02 12:34:56.1234', 'YYYY-MM-DD HH24:MI:SS.FF' || i) FROM generate_series(1, 6) i; + i | to_timestamp +---+----------------------------------- + 1 | Fri Nov 02 12:34:56.1 2018 PDT + 2 | Fri Nov 02 12:34:56.12 2018 PDT + 3 | Fri Nov 02 12:34:56.123 2018 PDT + 4 | Fri Nov 02 12:34:56.1234 2018 PDT + 5 | Fri Nov 02 12:34:56.1234 2018 PDT + 6 | Fri Nov 02 12:34:56.1234 2018 PDT +(6 rows) + +SELECT i, to_timestamp('2018-11-02 12:34:56.12345', 'YYYY-MM-DD HH24:MI:SS.FF' || i) FROM generate_series(1, 6) i; + i | to_timestamp +---+------------------------------------ + 1 | Fri Nov 02 12:34:56.1 2018 PDT + 2 | Fri Nov 02 12:34:56.12 2018 PDT + 3 | Fri Nov 02 12:34:56.123 2018 PDT + 4 | Fri Nov 02 12:34:56.1235 2018 PDT + 5 | Fri Nov 02 12:34:56.12345 2018 PDT + 6 | Fri Nov 02 12:34:56.12345 2018 PDT +(6 rows) + +SELECT i, to_timestamp('2018-11-02 12:34:56.123456', 'YYYY-MM-DD HH24:MI:SS.FF' || i) FROM generate_series(1, 6) i; + i | to_timestamp +---+------------------------------------- + 1 | Fri Nov 02 12:34:56.1 2018 PDT + 2 | Fri Nov 02 12:34:56.12 2018 PDT + 3 | Fri Nov 02 12:34:56.123 2018 PDT + 4 | Fri Nov 02 12:34:56.1235 2018 PDT + 5 | Fri Nov 02 12:34:56.12346 2018 PDT + 6 | Fri Nov 02 12:34:56.123456 2018 PDT +(6 rows) + +SELECT i, to_timestamp('2018-11-02 12:34:56.123456789', 'YYYY-MM-DD HH24:MI:SS.FF' || i) FROM generate_series(1, 6) i; +ERROR: date/time field value out of range: "2018-11-02 12:34:56.123456789" +SELECT to_date('1 4 1902', 'Q MM YYYY'); -- Q is ignored + to_date +------------ + 04-01-1902 +(1 row) + +SELECT to_date('3 4 21 01', 'W MM CC YY'); + to_date +------------ + 04-15-2001 +(1 row) + +SELECT to_date('2458872', 'J'); + to_date +------------ + 01-23-2020 +(1 row) + +-- +-- Check handling of BC dates +-- +SELECT to_date('44-02-01 BC','YYYY-MM-DD BC'); + to_date +--------------- + 02-01-0044 BC +(1 row) + +SELECT to_date('-44-02-01','YYYY-MM-DD'); + to_date +--------------- + 02-01-0044 BC +(1 row) + +SELECT to_date('-44-02-01 BC','YYYY-MM-DD BC'); + to_date +------------ + 02-01-0044 +(1 row) + +SELECT to_timestamp('44-02-01 11:12:13 BC','YYYY-MM-DD HH24:MI:SS BC'); + to_timestamp +--------------------------------- + Fri Feb 01 11:12:13 0044 PST BC +(1 row) + +SELECT to_timestamp('-44-02-01 11:12:13','YYYY-MM-DD HH24:MI:SS'); + to_timestamp +--------------------------------- + Fri Feb 01 11:12:13 0044 PST BC +(1 row) + +SELECT to_timestamp('-44-02-01 11:12:13 BC','YYYY-MM-DD HH24:MI:SS BC'); + to_timestamp +------------------------------ + Mon Feb 01 11:12:13 0044 PST +(1 row) + +-- +-- Check handling of multiple spaces in format and/or input +-- +SELECT to_timestamp('2011-12-18 23:38:15', 'YYYY-MM-DD HH24:MI:SS'); + to_timestamp +------------------------------ + Sun Dec 18 23:38:15 2011 PST +(1 row) + +SELECT to_timestamp('2011-12-18 23:38:15', 'YYYY-MM-DD HH24:MI:SS'); + to_timestamp +------------------------------ + Sun Dec 18 23:38:15 2011 PST +(1 row) + +SELECT to_timestamp('2011-12-18 23:38:15', 'YYYY-MM-DD HH24:MI:SS'); + to_timestamp +------------------------------ + Sun Dec 18 23:38:15 2011 PST +(1 row) + +SELECT to_timestamp('2011-12-18 23:38:15', 'YYYY-MM-DD HH24:MI:SS'); + to_timestamp +------------------------------ + Sun Dec 18 23:38:15 2011 PST +(1 row) + +SELECT to_timestamp('2011-12-18 23:38:15', 'YYYY-MM-DD HH24:MI:SS'); + to_timestamp +------------------------------ + Sun Dec 18 23:38:15 2011 PST +(1 row) + +SELECT to_timestamp('2011-12-18 23:38:15', 'YYYY-MM-DD HH24:MI:SS'); + to_timestamp +------------------------------ + Sun Dec 18 23:38:15 2011 PST +(1 row) + +SELECT to_timestamp('2000+ JUN', 'YYYY/MON'); + to_timestamp +------------------------------ + Thu Jun 01 00:00:00 2000 PDT +(1 row) + +SELECT to_timestamp(' 2000 +JUN', 'YYYY/MON'); + to_timestamp +------------------------------ + Thu Jun 01 00:00:00 2000 PDT +(1 row) + +SELECT to_timestamp(' 2000 +JUN', 'YYYY//MON'); + to_timestamp +------------------------------ + Thu Jun 01 00:00:00 2000 PDT +(1 row) + +SELECT to_timestamp('2000 +JUN', 'YYYY//MON'); + to_timestamp +------------------------------ + Thu Jun 01 00:00:00 2000 PDT +(1 row) + +SELECT to_timestamp('2000 + JUN', 'YYYY MON'); + to_timestamp +------------------------------ + Thu Jun 01 00:00:00 2000 PDT +(1 row) + +SELECT to_timestamp('2000 ++ JUN', 'YYYY MON'); + to_timestamp +------------------------------ + Thu Jun 01 00:00:00 2000 PDT +(1 row) + +SELECT to_timestamp('2000 + + JUN', 'YYYY MON'); +ERROR: invalid value "+" for "MON" +DETAIL: The given value did not match any of the allowed values for this field. +SELECT to_timestamp('2000 + + JUN', 'YYYY MON'); + to_timestamp +------------------------------ + Thu Jun 01 00:00:00 2000 PDT +(1 row) + +SELECT to_timestamp('2000 -10', 'YYYY TZH'); + to_timestamp +------------------------------ + Sat Jan 01 02:00:00 2000 PST +(1 row) + +SELECT to_timestamp('2000 -10', 'YYYY TZH'); + to_timestamp +------------------------------ + Fri Dec 31 06:00:00 1999 PST +(1 row) + +SELECT to_date('2011 12 18', 'YYYY MM DD'); + to_date +------------ + 12-18-2011 +(1 row) + +SELECT to_date('2011 12 18', 'YYYY MM DD'); + to_date +------------ + 12-18-2011 +(1 row) + +SELECT to_date('2011 12 18', 'YYYY MM DD'); + to_date +------------ + 12-18-2011 +(1 row) + +SELECT to_date('2011 12 18', 'YYYY MM DD'); + to_date +------------ + 12-18-2011 +(1 row) + +SELECT to_date('2011 12 18', 'YYYY MM DD'); + to_date +------------ + 12-18-2011 +(1 row) + +SELECT to_date('2011 12 18', 'YYYY MM DD'); + to_date +------------ + 12-18-2011 +(1 row) + +SELECT to_date('2011 12 18', 'YYYYxMMxDD'); + to_date +------------ + 12-18-2011 +(1 row) + +SELECT to_date('2011x 12x 18', 'YYYYxMMxDD'); + to_date +------------ + 12-18-2011 +(1 row) + +SELECT to_date('2011 x12 x18', 'YYYYxMMxDD'); +ERROR: invalid value "x1" for "MM" +DETAIL: Value must be an integer. +-- +-- Check errors for some incorrect usages of to_timestamp() and to_date() +-- +-- Mixture of date conventions (ISO week and Gregorian): +SELECT to_timestamp('2005527', 'YYYYIWID'); +ERROR: invalid combination of date conventions +HINT: Do not mix Gregorian and ISO week date conventions in a formatting template. +-- Insufficient characters in the source string: +SELECT to_timestamp('19971', 'YYYYMMDD'); +ERROR: source string too short for "MM" formatting field +DETAIL: Field requires 2 characters, but only 1 remain. +HINT: If your source string is not fixed-width, try using the "FM" modifier. +-- Insufficient digit characters for a single node: +SELECT to_timestamp('19971)24', 'YYYYMMDD'); +ERROR: invalid value "1)" for "MM" +DETAIL: Field requires 2 characters, but only 1 could be parsed. +HINT: If your source string is not fixed-width, try using the "FM" modifier. +-- We don't accept full-length day or month names if short form is specified: +SELECT to_timestamp('Friday 1-January-1999', 'DY DD MON YYYY'); +ERROR: invalid value "da" for "DD" +DETAIL: Value must be an integer. +SELECT to_timestamp('Fri 1-January-1999', 'DY DD MON YYYY'); +ERROR: invalid value "uary" for "YYYY" +DETAIL: Value must be an integer. +SELECT to_timestamp('Fri 1-Jan-1999', 'DY DD MON YYYY'); -- ok + to_timestamp +------------------------------ + Fri Jan 01 00:00:00 1999 PST +(1 row) + +-- Value clobbering: +SELECT to_timestamp('1997-11-Jan-16', 'YYYY-MM-Mon-DD'); +ERROR: conflicting values for "Mon" field in formatting string +DETAIL: This value contradicts a previous setting for the same field type. +-- Non-numeric input: +SELECT to_timestamp('199711xy', 'YYYYMMDD'); +ERROR: invalid value "xy" for "DD" +DETAIL: Value must be an integer. +-- Input that doesn't fit in an int: +SELECT to_timestamp('10000000000', 'FMYYYY'); +ERROR: value for "YYYY" in source string is out of range +DETAIL: Value must be in the range -2147483648 to 2147483647. +-- Out-of-range and not-quite-out-of-range fields: +SELECT to_timestamp('2016-06-13 25:00:00', 'YYYY-MM-DD HH24:MI:SS'); +ERROR: date/time field value out of range: "2016-06-13 25:00:00" +SELECT to_timestamp('2016-06-13 15:60:00', 'YYYY-MM-DD HH24:MI:SS'); +ERROR: date/time field value out of range: "2016-06-13 15:60:00" +SELECT to_timestamp('2016-06-13 15:50:60', 'YYYY-MM-DD HH24:MI:SS'); +ERROR: date/time field value out of range: "2016-06-13 15:50:60" +SELECT to_timestamp('2016-06-13 15:50:55', 'YYYY-MM-DD HH24:MI:SS'); -- ok + to_timestamp +------------------------------ + Mon Jun 13 15:50:55 2016 PDT +(1 row) + +SELECT to_timestamp('2016-06-13 15:50:55', 'YYYY-MM-DD HH:MI:SS'); +ERROR: hour "15" is invalid for the 12-hour clock +HINT: Use the 24-hour clock, or give an hour between 1 and 12. +SELECT to_timestamp('2016-13-01 15:50:55', 'YYYY-MM-DD HH24:MI:SS'); +ERROR: date/time field value out of range: "2016-13-01 15:50:55" +SELECT to_timestamp('2016-02-30 15:50:55', 'YYYY-MM-DD HH24:MI:SS'); +ERROR: date/time field value out of range: "2016-02-30 15:50:55" +SELECT to_timestamp('2016-02-29 15:50:55', 'YYYY-MM-DD HH24:MI:SS'); -- ok + to_timestamp +------------------------------ + Mon Feb 29 15:50:55 2016 PST +(1 row) + +SELECT to_timestamp('2015-02-29 15:50:55', 'YYYY-MM-DD HH24:MI:SS'); +ERROR: date/time field value out of range: "2015-02-29 15:50:55" +SELECT to_timestamp('2015-02-11 86000', 'YYYY-MM-DD SSSS'); -- ok + to_timestamp +------------------------------ + Wed Feb 11 23:53:20 2015 PST +(1 row) + +SELECT to_timestamp('2015-02-11 86400', 'YYYY-MM-DD SSSS'); +ERROR: date/time field value out of range: "2015-02-11 86400" +SELECT to_timestamp('2015-02-11 86000', 'YYYY-MM-DD SSSSS'); -- ok + to_timestamp +------------------------------ + Wed Feb 11 23:53:20 2015 PST +(1 row) + +SELECT to_timestamp('2015-02-11 86400', 'YYYY-MM-DD SSSSS'); +ERROR: date/time field value out of range: "2015-02-11 86400" +SELECT to_date('2016-13-10', 'YYYY-MM-DD'); +ERROR: date/time field value out of range: "2016-13-10" +SELECT to_date('2016-02-30', 'YYYY-MM-DD'); +ERROR: date/time field value out of range: "2016-02-30" +SELECT to_date('2016-02-29', 'YYYY-MM-DD'); -- ok + to_date +------------ + 02-29-2016 +(1 row) + +SELECT to_date('2015-02-29', 'YYYY-MM-DD'); +ERROR: date/time field value out of range: "2015-02-29" +SELECT to_date('2015 365', 'YYYY DDD'); -- ok + to_date +------------ + 12-31-2015 +(1 row) + +SELECT to_date('2015 366', 'YYYY DDD'); +ERROR: date/time field value out of range: "2015 366" +SELECT to_date('2016 365', 'YYYY DDD'); -- ok + to_date +------------ + 12-30-2016 +(1 row) + +SELECT to_date('2016 366', 'YYYY DDD'); -- ok + to_date +------------ + 12-31-2016 +(1 row) + +SELECT to_date('2016 367', 'YYYY DDD'); +ERROR: date/time field value out of range: "2016 367" +SELECT to_date('0000-02-01','YYYY-MM-DD'); -- allowed, though it shouldn't be + to_date +--------------- + 02-01-0001 BC +(1 row) + +-- +-- Check behavior with SQL-style fixed-GMT-offset time zone (cf bug #8572) +-- +SET TIME ZONE 'America/New_York'; +SET TIME ZONE '-1.5'; +SHOW TIME ZONE; + TimeZone +---------------- + <-01:30>+01:30 +(1 row) + +SELECT '2012-12-12 12:00'::timestamptz; + timestamptz +--------------------------------- + Wed Dec 12 12:00:00 2012 -01:30 +(1 row) + +SELECT '2012-12-12 12:00 America/New_York'::timestamptz; + timestamptz +--------------------------------- + Wed Dec 12 15:30:00 2012 -01:30 +(1 row) + +SELECT to_char('2012-12-12 12:00'::timestamptz, 'YYYY-MM-DD HH:MI:SS TZ'); + to_char +---------------------------- + 2012-12-12 12:00:00 -01:30 +(1 row) + +SELECT to_char('2012-12-12 12:00'::timestamptz, 'YYYY-MM-DD SSSS'); + to_char +------------------ + 2012-12-12 43200 +(1 row) + +SELECT to_char('2012-12-12 12:00'::timestamptz, 'YYYY-MM-DD SSSSS'); + to_char +------------------ + 2012-12-12 43200 +(1 row) + +RESET TIME ZONE; diff --git a/src/test/regress/expected/identity.out b/src/test/regress/expected/identity.out new file mode 100644 index 0000000..5f03d8e --- /dev/null +++ b/src/test/regress/expected/identity.out @@ -0,0 +1,616 @@ +-- sanity check of system catalog +SELECT attrelid, attname, attidentity FROM pg_attribute WHERE attidentity NOT IN ('', 'a', 'd'); + attrelid | attname | attidentity +----------+---------+------------- +(0 rows) + +CREATE TABLE itest1 (a int generated by default as identity, b text); +CREATE TABLE itest2 (a bigint generated always as identity, b text); +CREATE TABLE itest3 (a smallint generated by default as identity (start with 7 increment by 5), b text); +ALTER TABLE itest3 ALTER COLUMN a ADD GENERATED ALWAYS AS IDENTITY; -- error +ERROR: column "a" of relation "itest3" is already an identity column +SELECT table_name, column_name, column_default, is_nullable, is_identity, identity_generation, identity_start, identity_increment, identity_maximum, identity_minimum, identity_cycle FROM information_schema.columns WHERE table_name LIKE 'itest_' ORDER BY 1, 2; + table_name | column_name | column_default | is_nullable | is_identity | identity_generation | identity_start | identity_increment | identity_maximum | identity_minimum | identity_cycle +------------+-------------+----------------+-------------+-------------+---------------------+----------------+--------------------+---------------------+------------------+---------------- + itest1 | a | | NO | YES | BY DEFAULT | 1 | 1 | 2147483647 | 1 | NO + itest1 | b | | YES | NO | | | | | | NO + itest2 | a | | NO | YES | ALWAYS | 1 | 1 | 9223372036854775807 | 1 | NO + itest2 | b | | YES | NO | | | | | | NO + itest3 | a | | NO | YES | BY DEFAULT | 7 | 5 | 32767 | 1 | NO + itest3 | b | | YES | NO | | | | | | NO +(6 rows) + +-- internal sequences should not be shown here +SELECT sequence_name FROM information_schema.sequences WHERE sequence_name LIKE 'itest%'; + sequence_name +--------------- +(0 rows) + +SELECT pg_get_serial_sequence('itest1', 'a'); + pg_get_serial_sequence +------------------------ + public.itest1_a_seq +(1 row) + +\d itest1_a_seq + Sequence "public.itest1_a_seq" + Type | Start | Minimum | Maximum | Increment | Cycles? | Cache +---------+-------+---------+------------+-----------+---------+------- + integer | 1 | 1 | 2147483647 | 1 | no | 1 +Sequence for identity column: public.itest1.a + +CREATE TABLE itest4 (a int, b text); +ALTER TABLE itest4 ALTER COLUMN a ADD GENERATED ALWAYS AS IDENTITY; -- error, requires NOT NULL +ERROR: column "a" of relation "itest4" must be declared NOT NULL before identity can be added +ALTER TABLE itest4 ALTER COLUMN a SET NOT NULL; +ALTER TABLE itest4 ALTER COLUMN a ADD GENERATED ALWAYS AS IDENTITY; -- ok +ALTER TABLE itest4 ALTER COLUMN a DROP NOT NULL; -- error, disallowed +ERROR: column "a" of relation "itest4" is an identity column +ALTER TABLE itest4 ALTER COLUMN a ADD GENERATED ALWAYS AS IDENTITY; -- error, already set +ERROR: column "a" of relation "itest4" is already an identity column +ALTER TABLE itest4 ALTER COLUMN b ADD GENERATED ALWAYS AS IDENTITY; -- error, wrong data type +ERROR: identity column type must be smallint, integer, or bigint +-- for later +ALTER TABLE itest4 ALTER COLUMN b SET DEFAULT ''; +-- invalid column type +CREATE TABLE itest_err_1 (a text generated by default as identity); +ERROR: identity column type must be smallint, integer, or bigint +-- duplicate identity +CREATE TABLE itest_err_2 (a int generated always as identity generated by default as identity); +ERROR: multiple identity specifications for column "a" of table "itest_err_2" +LINE 1: ...E itest_err_2 (a int generated always as identity generated ... + ^ +-- cannot have default and identity +CREATE TABLE itest_err_3 (a int default 5 generated by default as identity); +ERROR: both default and identity specified for column "a" of table "itest_err_3" +LINE 1: CREATE TABLE itest_err_3 (a int default 5 generated by defau... + ^ +-- cannot combine serial and identity +CREATE TABLE itest_err_4 (a serial generated by default as identity); +ERROR: both default and identity specified for column "a" of table "itest_err_4" +INSERT INTO itest1 DEFAULT VALUES; +INSERT INTO itest1 DEFAULT VALUES; +INSERT INTO itest2 DEFAULT VALUES; +INSERT INTO itest2 DEFAULT VALUES; +INSERT INTO itest3 DEFAULT VALUES; +INSERT INTO itest3 DEFAULT VALUES; +INSERT INTO itest4 DEFAULT VALUES; +INSERT INTO itest4 DEFAULT VALUES; +SELECT * FROM itest1; + a | b +---+--- + 1 | + 2 | +(2 rows) + +SELECT * FROM itest2; + a | b +---+--- + 1 | + 2 | +(2 rows) + +SELECT * FROM itest3; + a | b +----+--- + 7 | + 12 | +(2 rows) + +SELECT * FROM itest4; + a | b +---+--- + 1 | + 2 | +(2 rows) + +-- VALUES RTEs +CREATE TABLE itest5 (a int generated always as identity, b text); +INSERT INTO itest5 VALUES (1, 'a'); -- error +ERROR: cannot insert a non-DEFAULT value into column "a" +DETAIL: Column "a" is an identity column defined as GENERATED ALWAYS. +HINT: Use OVERRIDING SYSTEM VALUE to override. +INSERT INTO itest5 VALUES (DEFAULT, 'a'); -- ok +INSERT INTO itest5 VALUES (2, 'b'), (3, 'c'); -- error +ERROR: cannot insert a non-DEFAULT value into column "a" +DETAIL: Column "a" is an identity column defined as GENERATED ALWAYS. +HINT: Use OVERRIDING SYSTEM VALUE to override. +INSERT INTO itest5 VALUES (DEFAULT, 'b'), (3, 'c'); -- error +ERROR: cannot insert a non-DEFAULT value into column "a" +DETAIL: Column "a" is an identity column defined as GENERATED ALWAYS. +HINT: Use OVERRIDING SYSTEM VALUE to override. +INSERT INTO itest5 VALUES (2, 'b'), (DEFAULT, 'c'); -- error +ERROR: cannot insert a non-DEFAULT value into column "a" +DETAIL: Column "a" is an identity column defined as GENERATED ALWAYS. +HINT: Use OVERRIDING SYSTEM VALUE to override. +INSERT INTO itest5 VALUES (DEFAULT, 'b'), (DEFAULT, 'c'); -- ok +INSERT INTO itest5 OVERRIDING SYSTEM VALUE VALUES (-1, 'aa'); +INSERT INTO itest5 OVERRIDING SYSTEM VALUE VALUES (-2, 'bb'), (-3, 'cc'); +INSERT INTO itest5 OVERRIDING SYSTEM VALUE VALUES (DEFAULT, 'dd'), (-4, 'ee'); +INSERT INTO itest5 OVERRIDING SYSTEM VALUE VALUES (-5, 'ff'), (DEFAULT, 'gg'); +INSERT INTO itest5 OVERRIDING SYSTEM VALUE VALUES (DEFAULT, 'hh'), (DEFAULT, 'ii'); +INSERT INTO itest5 OVERRIDING USER VALUE VALUES (-1, 'aaa'); +INSERT INTO itest5 OVERRIDING USER VALUE VALUES (-2, 'bbb'), (-3, 'ccc'); +INSERT INTO itest5 OVERRIDING USER VALUE VALUES (DEFAULT, 'ddd'), (-4, 'eee'); +INSERT INTO itest5 OVERRIDING USER VALUE VALUES (-5, 'fff'), (DEFAULT, 'ggg'); +INSERT INTO itest5 OVERRIDING USER VALUE VALUES (DEFAULT, 'hhh'), (DEFAULT, 'iii'); +SELECT * FROM itest5; + a | b +----+----- + 1 | a + 2 | b + 3 | c + -1 | aa + -2 | bb + -3 | cc + 4 | dd + -4 | ee + -5 | ff + 5 | gg + 6 | hh + 7 | ii + 8 | aaa + 9 | bbb + 10 | ccc + 11 | ddd + 12 | eee + 13 | fff + 14 | ggg + 15 | hhh + 16 | iii +(21 rows) + +DROP TABLE itest5; +INSERT INTO itest3 VALUES (DEFAULT, 'a'); +INSERT INTO itest3 VALUES (DEFAULT, 'b'), (DEFAULT, 'c'); +SELECT * FROM itest3; + a | b +----+--- + 7 | + 12 | + 17 | a + 22 | b + 27 | c +(5 rows) + +-- OVERRIDING tests +-- GENERATED BY DEFAULT +-- This inserts the row as presented: +INSERT INTO itest1 VALUES (10, 'xyz'); +-- With GENERATED BY DEFAULT, OVERRIDING SYSTEM VALUE is not allowed +-- by the standard, but we allow it as a no-op, since it is of use if +-- there are multiple identity columns in a table, which is also an +-- extension. +INSERT INTO itest1 OVERRIDING SYSTEM VALUE VALUES (20, 'xyz'); +-- This ignores the 30 and uses the sequence value instead: +INSERT INTO itest1 OVERRIDING USER VALUE VALUES (30, 'xyz'); +SELECT * FROM itest1; + a | b +----+----- + 1 | + 2 | + 10 | xyz + 20 | xyz + 3 | xyz +(5 rows) + +-- GENERATED ALWAYS +-- This is an error: +INSERT INTO itest2 VALUES (10, 'xyz'); +ERROR: cannot insert a non-DEFAULT value into column "a" +DETAIL: Column "a" is an identity column defined as GENERATED ALWAYS. +HINT: Use OVERRIDING SYSTEM VALUE to override. +-- This inserts the row as presented: +INSERT INTO itest2 OVERRIDING SYSTEM VALUE VALUES (20, 'xyz'); +-- This ignores the 30 and uses the sequence value instead: +INSERT INTO itest2 OVERRIDING USER VALUE VALUES (30, 'xyz'); +SELECT * FROM itest2; + a | b +----+----- + 1 | + 2 | + 20 | xyz + 3 | xyz +(4 rows) + +-- UPDATE tests +-- GENERATED BY DEFAULT is not restricted. +UPDATE itest1 SET a = 101 WHERE a = 1; +UPDATE itest1 SET a = DEFAULT WHERE a = 2; +SELECT * FROM itest1; + a | b +-----+----- + 10 | xyz + 20 | xyz + 3 | xyz + 101 | + 4 | +(5 rows) + +-- GENERATED ALWAYS allows only DEFAULT. +UPDATE itest2 SET a = 101 WHERE a = 1; -- error +ERROR: column "a" can only be updated to DEFAULT +DETAIL: Column "a" is an identity column defined as GENERATED ALWAYS. +UPDATE itest2 SET a = DEFAULT WHERE a = 2; -- ok +SELECT * FROM itest2; + a | b +----+----- + 1 | + 20 | xyz + 3 | xyz + 4 | +(4 rows) + +-- COPY tests +CREATE TABLE itest9 (a int GENERATED ALWAYS AS IDENTITY, b text, c bigint); +COPY itest9 FROM stdin; +COPY itest9 (b, c) FROM stdin; +SELECT * FROM itest9 ORDER BY c; + a | b | c +-----+------+----- + 100 | foo | 200 + 101 | bar | 201 + 1 | foo2 | 202 + 2 | bar2 | 203 +(4 rows) + +-- DROP IDENTITY tests +ALTER TABLE itest4 ALTER COLUMN a DROP IDENTITY; +ALTER TABLE itest4 ALTER COLUMN a DROP IDENTITY; -- error +ERROR: column "a" of relation "itest4" is not an identity column +ALTER TABLE itest4 ALTER COLUMN a DROP IDENTITY IF EXISTS; -- noop +NOTICE: column "a" of relation "itest4" is not an identity column, skipping +INSERT INTO itest4 DEFAULT VALUES; -- fails because NOT NULL is not dropped +ERROR: null value in column "a" of relation "itest4" violates not-null constraint +DETAIL: Failing row contains (null, ). +ALTER TABLE itest4 ALTER COLUMN a DROP NOT NULL; +INSERT INTO itest4 DEFAULT VALUES; +SELECT * FROM itest4; + a | b +---+--- + 1 | + 2 | + | +(3 rows) + +-- check that sequence is removed +SELECT sequence_name FROM itest4_a_seq; +ERROR: relation "itest4_a_seq" does not exist +LINE 1: SELECT sequence_name FROM itest4_a_seq; + ^ +-- test views +CREATE TABLE itest10 (a int generated by default as identity, b text); +CREATE TABLE itest11 (a int generated always as identity, b text); +CREATE VIEW itestv10 AS SELECT * FROM itest10; +CREATE VIEW itestv11 AS SELECT * FROM itest11; +INSERT INTO itestv10 DEFAULT VALUES; +INSERT INTO itestv10 DEFAULT VALUES; +INSERT INTO itestv11 DEFAULT VALUES; +INSERT INTO itestv11 DEFAULT VALUES; +SELECT * FROM itestv10; + a | b +---+--- + 1 | + 2 | +(2 rows) + +SELECT * FROM itestv11; + a | b +---+--- + 1 | + 2 | +(2 rows) + +INSERT INTO itestv10 VALUES (10, 'xyz'); +INSERT INTO itestv10 OVERRIDING USER VALUE VALUES (11, 'xyz'); +SELECT * FROM itestv10; + a | b +----+----- + 1 | + 2 | + 10 | xyz + 3 | xyz +(4 rows) + +INSERT INTO itestv11 VALUES (10, 'xyz'); +ERROR: cannot insert a non-DEFAULT value into column "a" +DETAIL: Column "a" is an identity column defined as GENERATED ALWAYS. +HINT: Use OVERRIDING SYSTEM VALUE to override. +INSERT INTO itestv11 OVERRIDING SYSTEM VALUE VALUES (11, 'xyz'); +SELECT * FROM itestv11; + a | b +----+----- + 1 | + 2 | + 11 | xyz +(3 rows) + +DROP VIEW itestv10, itestv11; +-- ADD COLUMN +CREATE TABLE itest13 (a int); +-- add column to empty table +ALTER TABLE itest13 ADD COLUMN b int GENERATED BY DEFAULT AS IDENTITY; +INSERT INTO itest13 VALUES (1), (2), (3); +-- add column to populated table +ALTER TABLE itest13 ADD COLUMN c int GENERATED BY DEFAULT AS IDENTITY; +SELECT * FROM itest13; + a | b | c +---+---+--- + 1 | 1 | 1 + 2 | 2 | 2 + 3 | 3 | 3 +(3 rows) + +-- various ALTER COLUMN tests +-- fail, not allowed for identity columns +ALTER TABLE itest1 ALTER COLUMN a SET DEFAULT 1; +ERROR: column "a" of relation "itest1" is an identity column +-- fail, not allowed, already has a default +CREATE TABLE itest5 (a serial, b text); +ALTER TABLE itest5 ALTER COLUMN a ADD GENERATED ALWAYS AS IDENTITY; +ERROR: column "a" of relation "itest5" already has a default value +ALTER TABLE itest3 ALTER COLUMN a TYPE int; +SELECT seqtypid::regtype FROM pg_sequence WHERE seqrelid = 'itest3_a_seq'::regclass; + seqtypid +---------- + integer +(1 row) + +\d itest3 + Table "public.itest3" + Column | Type | Collation | Nullable | Default +--------+---------+-----------+----------+---------------------------------- + a | integer | | not null | generated by default as identity + b | text | | | + +ALTER TABLE itest3 ALTER COLUMN a TYPE text; -- error +ERROR: identity column type must be smallint, integer, or bigint +-- kinda silly to change property in the same command, but it should work +ALTER TABLE itest3 + ADD COLUMN c int GENERATED BY DEFAULT AS IDENTITY, + ALTER COLUMN c SET GENERATED ALWAYS; +\d itest3 + Table "public.itest3" + Column | Type | Collation | Nullable | Default +--------+---------+-----------+----------+---------------------------------- + a | integer | | not null | generated by default as identity + b | text | | | + c | integer | | not null | generated always as identity + +-- ALTER COLUMN ... SET +CREATE TABLE itest6 (a int GENERATED ALWAYS AS IDENTITY, b text); +INSERT INTO itest6 DEFAULT VALUES; +ALTER TABLE itest6 ALTER COLUMN a SET GENERATED BY DEFAULT SET INCREMENT BY 2 SET START WITH 100 RESTART; +INSERT INTO itest6 DEFAULT VALUES; +INSERT INTO itest6 DEFAULT VALUES; +SELECT * FROM itest6; + a | b +-----+--- + 1 | + 100 | + 102 | +(3 rows) + +SELECT table_name, column_name, is_identity, identity_generation FROM information_schema.columns WHERE table_name = 'itest6' ORDER BY 1, 2; + table_name | column_name | is_identity | identity_generation +------------+-------------+-------------+--------------------- + itest6 | a | YES | BY DEFAULT + itest6 | b | NO | +(2 rows) + +ALTER TABLE itest6 ALTER COLUMN b SET INCREMENT BY 2; -- fail, not identity +ERROR: column "b" of relation "itest6" is not an identity column +-- prohibited direct modification of sequence +ALTER SEQUENCE itest6_a_seq OWNED BY NONE; +ERROR: cannot change ownership of identity sequence +DETAIL: Sequence "itest6_a_seq" is linked to table "itest6". +-- inheritance +CREATE TABLE itest7 (a int GENERATED ALWAYS AS IDENTITY); +INSERT INTO itest7 DEFAULT VALUES; +SELECT * FROM itest7; + a +--- + 1 +(1 row) + +-- identity property is not inherited +CREATE TABLE itest7a (b text) INHERITS (itest7); +-- make column identity in child table +CREATE TABLE itest7b (a int); +CREATE TABLE itest7c (a int GENERATED ALWAYS AS IDENTITY) INHERITS (itest7b); +NOTICE: merging column "a" with inherited definition +INSERT INTO itest7c DEFAULT VALUES; +SELECT * FROM itest7c; + a +--- + 1 +(1 row) + +CREATE TABLE itest7d (a int not null); +CREATE TABLE itest7e () INHERITS (itest7d); +ALTER TABLE itest7d ALTER COLUMN a ADD GENERATED ALWAYS AS IDENTITY; +ALTER TABLE itest7d ADD COLUMN b int GENERATED ALWAYS AS IDENTITY; -- error +ERROR: cannot recursively add identity column to table that has child tables +SELECT table_name, column_name, is_nullable, is_identity, identity_generation FROM information_schema.columns WHERE table_name LIKE 'itest7%' ORDER BY 1, 2; + table_name | column_name | is_nullable | is_identity | identity_generation +------------+-------------+-------------+-------------+--------------------- + itest7 | a | NO | YES | ALWAYS + itest7a | a | NO | NO | + itest7a | b | YES | NO | + itest7b | a | YES | NO | + itest7c | a | NO | YES | ALWAYS + itest7d | a | NO | YES | ALWAYS + itest7e | a | NO | NO | +(7 rows) + +-- These ALTER TABLE variants will not recurse. +ALTER TABLE itest7 ALTER COLUMN a SET GENERATED BY DEFAULT; +ALTER TABLE itest7 ALTER COLUMN a RESTART; +ALTER TABLE itest7 ALTER COLUMN a DROP IDENTITY; +-- privileges +CREATE USER regress_identity_user1; +CREATE TABLE itest8 (a int GENERATED ALWAYS AS IDENTITY, b text); +GRANT SELECT, INSERT ON itest8 TO regress_identity_user1; +SET ROLE regress_identity_user1; +INSERT INTO itest8 DEFAULT VALUES; +SELECT * FROM itest8; + a | b +---+--- + 1 | +(1 row) + +RESET ROLE; +DROP TABLE itest8; +DROP USER regress_identity_user1; +-- multiple steps in ALTER TABLE +CREATE TABLE itest8 (f1 int); +ALTER TABLE itest8 + ADD COLUMN f2 int NOT NULL, + ALTER COLUMN f2 ADD GENERATED ALWAYS AS IDENTITY; +ALTER TABLE itest8 + ADD COLUMN f3 int NOT NULL, + ALTER COLUMN f3 ADD GENERATED ALWAYS AS IDENTITY, + ALTER COLUMN f3 SET GENERATED BY DEFAULT SET INCREMENT 10; +ALTER TABLE itest8 + ADD COLUMN f4 int; +ALTER TABLE itest8 + ALTER COLUMN f4 SET NOT NULL, + ALTER COLUMN f4 ADD GENERATED ALWAYS AS IDENTITY, + ALTER COLUMN f4 SET DATA TYPE bigint; +ALTER TABLE itest8 + ADD COLUMN f5 int GENERATED ALWAYS AS IDENTITY; +ALTER TABLE itest8 + ALTER COLUMN f5 DROP IDENTITY, + ALTER COLUMN f5 DROP NOT NULL, + ALTER COLUMN f5 SET DATA TYPE bigint; +INSERT INTO itest8 VALUES(0), (1); +-- This does not work when the table isn't empty. That's intentional, +-- since ADD GENERATED should only affect later insertions: +ALTER TABLE itest8 + ADD COLUMN f22 int NOT NULL, + ALTER COLUMN f22 ADD GENERATED ALWAYS AS IDENTITY; +ERROR: column "f22" of relation "itest8" contains null values +TABLE itest8; + f1 | f2 | f3 | f4 | f5 +----+----+----+----+---- + 0 | 1 | 1 | 1 | + 1 | 2 | 11 | 2 | +(2 rows) + +\d+ itest8 + Table "public.itest8" + Column | Type | Collation | Nullable | Default | Storage | Stats target | Description +--------+---------+-----------+----------+----------------------------------+---------+--------------+------------- + f1 | integer | | | | plain | | + f2 | integer | | not null | generated always as identity | plain | | + f3 | integer | | not null | generated by default as identity | plain | | + f4 | bigint | | not null | generated always as identity | plain | | + f5 | bigint | | | | plain | | + +\d itest8_f2_seq + Sequence "public.itest8_f2_seq" + Type | Start | Minimum | Maximum | Increment | Cycles? | Cache +---------+-------+---------+------------+-----------+---------+------- + integer | 1 | 1 | 2147483647 | 1 | no | 1 +Sequence for identity column: public.itest8.f2 + +\d itest8_f3_seq + Sequence "public.itest8_f3_seq" + Type | Start | Minimum | Maximum | Increment | Cycles? | Cache +---------+-------+---------+------------+-----------+---------+------- + integer | 1 | 1 | 2147483647 | 10 | no | 1 +Sequence for identity column: public.itest8.f3 + +\d itest8_f4_seq + Sequence "public.itest8_f4_seq" + Type | Start | Minimum | Maximum | Increment | Cycles? | Cache +--------+-------+---------+---------------------+-----------+---------+------- + bigint | 1 | 1 | 9223372036854775807 | 1 | no | 1 +Sequence for identity column: public.itest8.f4 + +\d itest8_f5_seq +DROP TABLE itest8; +-- typed tables (currently not supported) +CREATE TYPE itest_type AS (f1 integer, f2 text, f3 bigint); +CREATE TABLE itest12 OF itest_type (f1 WITH OPTIONS GENERATED ALWAYS AS IDENTITY); -- error +ERROR: identity columns are not supported on typed tables +DROP TYPE itest_type CASCADE; +-- table partitions (currently not supported) +CREATE TABLE itest_parent (f1 date NOT NULL, f2 text, f3 bigint) PARTITION BY RANGE (f1); +CREATE TABLE itest_child PARTITION OF itest_parent ( + f3 WITH OPTIONS GENERATED ALWAYS AS IDENTITY +) FOR VALUES FROM ('2016-07-01') TO ('2016-08-01'); -- error +ERROR: identity columns are not supported on partitions +DROP TABLE itest_parent; +-- test that sequence of half-dropped serial column is properly ignored +CREATE TABLE itest14 (id serial); +ALTER TABLE itest14 ALTER id DROP DEFAULT; +ALTER TABLE itest14 ALTER id ADD GENERATED BY DEFAULT AS IDENTITY; +INSERT INTO itest14 (id) VALUES (DEFAULT); +-- Identity columns must be NOT NULL (cf bug #16913) +CREATE TABLE itest15 (id integer GENERATED ALWAYS AS IDENTITY NULL); -- fail +ERROR: conflicting NULL/NOT NULL declarations for column "id" of table "itest15" +LINE 1: ...ABLE itest15 (id integer GENERATED ALWAYS AS IDENTITY NULL); + ^ +CREATE TABLE itest15 (id integer NULL GENERATED ALWAYS AS IDENTITY); -- fail +ERROR: conflicting NULL/NOT NULL declarations for column "id" of table "itest15" +LINE 1: CREATE TABLE itest15 (id integer NULL GENERATED ALWAYS AS ID... + ^ +CREATE TABLE itest15 (id integer GENERATED ALWAYS AS IDENTITY NOT NULL); +DROP TABLE itest15; +CREATE TABLE itest15 (id integer NOT NULL GENERATED ALWAYS AS IDENTITY); +DROP TABLE itest15; +-- MERGE tests +CREATE TABLE itest15 (a int GENERATED ALWAYS AS IDENTITY, b text); +CREATE TABLE itest16 (a int GENERATED BY DEFAULT AS IDENTITY, b text); +MERGE INTO itest15 t +USING (SELECT 10 AS s_a, 'inserted by merge' AS s_b) s +ON t.a = s.s_a +WHEN NOT MATCHED THEN + INSERT (a, b) VALUES (s.s_a, s.s_b); +ERROR: cannot insert a non-DEFAULT value into column "a" +DETAIL: Column "a" is an identity column defined as GENERATED ALWAYS. +HINT: Use OVERRIDING SYSTEM VALUE to override. +-- Used to fail, but now it works and ignores the user supplied value +MERGE INTO itest15 t +USING (SELECT 20 AS s_a, 'inserted by merge' AS s_b) s +ON t.a = s.s_a +WHEN NOT MATCHED THEN + INSERT (a, b) OVERRIDING USER VALUE VALUES (s.s_a, s.s_b); +MERGE INTO itest15 t +USING (SELECT 30 AS s_a, 'inserted by merge' AS s_b) s +ON t.a = s.s_a +WHEN NOT MATCHED THEN + INSERT (a, b) OVERRIDING SYSTEM VALUE VALUES (s.s_a, s.s_b); +MERGE INTO itest16 t +USING (SELECT 10 AS s_a, 'inserted by merge' AS s_b) s +ON t.a = s.s_a +WHEN NOT MATCHED THEN + INSERT (a, b) VALUES (s.s_a, s.s_b); +MERGE INTO itest16 t +USING (SELECT 20 AS s_a, 'inserted by merge' AS s_b) s +ON t.a = s.s_a +WHEN NOT MATCHED THEN + INSERT (a, b) OVERRIDING USER VALUE VALUES (s.s_a, s.s_b); +MERGE INTO itest16 t +USING (SELECT 30 AS s_a, 'inserted by merge' AS s_b) s +ON t.a = s.s_a +WHEN NOT MATCHED THEN + INSERT (a, b) OVERRIDING SYSTEM VALUE VALUES (s.s_a, s.s_b); +SELECT * FROM itest15; + a | b +----+------------------- + 1 | inserted by merge + 30 | inserted by merge +(2 rows) + +SELECT * FROM itest16; + a | b +----+------------------- + 10 | inserted by merge + 1 | inserted by merge + 30 | inserted by merge +(3 rows) + +DROP TABLE itest15; +DROP TABLE itest16; diff --git a/src/test/regress/expected/incremental_sort.out b/src/test/regress/expected/incremental_sort.out new file mode 100644 index 0000000..0c3433f --- /dev/null +++ b/src/test/regress/expected/incremental_sort.out @@ -0,0 +1,1662 @@ +-- When there is a LIMIT clause, incremental sort is beneficial because +-- it only has to sort some of the groups, and not the entire table. +explain (costs off) +select * from (select * from tenk1 order by four) t order by four, ten +limit 1; + QUERY PLAN +----------------------------------------- + Limit + -> Incremental Sort + Sort Key: tenk1.four, tenk1.ten + Presorted Key: tenk1.four + -> Sort + Sort Key: tenk1.four + -> Seq Scan on tenk1 +(7 rows) + +-- When work_mem is not enough to sort the entire table, incremental sort +-- may be faster if individual groups still fit into work_mem. +set work_mem to '2MB'; +explain (costs off) +select * from (select * from tenk1 order by four) t order by four, ten; + QUERY PLAN +----------------------------------- + Incremental Sort + Sort Key: tenk1.four, tenk1.ten + Presorted Key: tenk1.four + -> Sort + Sort Key: tenk1.four + -> Seq Scan on tenk1 +(6 rows) + +reset work_mem; +create table t(a integer, b integer); +create or replace function explain_analyze_without_memory(query text) +returns table (out_line text) language plpgsql +as +$$ +declare + line text; +begin + for line in + execute 'explain (analyze, costs off, summary off, timing off) ' || query + loop + out_line := regexp_replace(line, '\d+kB', 'NNkB', 'g'); + return next; + end loop; +end; +$$; +create or replace function explain_analyze_inc_sort_nodes(query text) +returns jsonb language plpgsql +as +$$ +declare + elements jsonb; + element jsonb; + matching_nodes jsonb := '[]'::jsonb; +begin + execute 'explain (analyze, costs off, summary off, timing off, format ''json'') ' || query into strict elements; + while jsonb_array_length(elements) > 0 loop + element := elements->0; + elements := elements - 0; + case jsonb_typeof(element) + when 'array' then + if jsonb_array_length(element) > 0 then + elements := elements || element; + end if; + when 'object' then + if element ? 'Plan' then + elements := elements || jsonb_build_array(element->'Plan'); + element := element - 'Plan'; + else + if element ? 'Plans' then + elements := elements || jsonb_build_array(element->'Plans'); + element := element - 'Plans'; + end if; + if (element->>'Node Type')::text = 'Incremental Sort' then + matching_nodes := matching_nodes || element; + end if; + end if; + end case; + end loop; + return matching_nodes; +end; +$$; +create or replace function explain_analyze_inc_sort_nodes_without_memory(query text) +returns jsonb language plpgsql +as +$$ +declare + nodes jsonb := '[]'::jsonb; + node jsonb; + group_key text; + space_key text; +begin + for node in select * from jsonb_array_elements(explain_analyze_inc_sort_nodes(query)) t loop + for group_key in select unnest(array['Full-sort Groups', 'Pre-sorted Groups']::text[]) t loop + for space_key in select unnest(array['Sort Space Memory', 'Sort Space Disk']::text[]) t loop + node := jsonb_set(node, array[group_key, space_key, 'Average Sort Space Used'], '"NN"', false); + node := jsonb_set(node, array[group_key, space_key, 'Peak Sort Space Used'], '"NN"', false); + end loop; + end loop; + nodes := nodes || node; + end loop; + return nodes; +end; +$$; +create or replace function explain_analyze_inc_sort_nodes_verify_invariants(query text) +returns bool language plpgsql +as +$$ +declare + node jsonb; + group_stats jsonb; + group_key text; + space_key text; +begin + for node in select * from jsonb_array_elements(explain_analyze_inc_sort_nodes(query)) t loop + for group_key in select unnest(array['Full-sort Groups', 'Pre-sorted Groups']::text[]) t loop + group_stats := node->group_key; + for space_key in select unnest(array['Sort Space Memory', 'Sort Space Disk']::text[]) t loop + if (group_stats->space_key->'Peak Sort Space Used')::bigint < (group_stats->space_key->'Peak Sort Space Used')::bigint then + raise exception '% has invalid max space < average space', group_key; + end if; + end loop; + end loop; + end loop; + return true; +end; +$$; +-- A single large group tested around each mode transition point. +insert into t(a, b) select i/100 + 1, i + 1 from generate_series(0, 999) n(i); +analyze t; +explain (costs off) select * from (select * from t order by a) s order by a, b limit 31; + QUERY PLAN +--------------------------------- + Limit + -> Incremental Sort + Sort Key: t.a, t.b + Presorted Key: t.a + -> Sort + Sort Key: t.a + -> Seq Scan on t +(7 rows) + +select * from (select * from t order by a) s order by a, b limit 31; + a | b +---+---- + 1 | 1 + 1 | 2 + 1 | 3 + 1 | 4 + 1 | 5 + 1 | 6 + 1 | 7 + 1 | 8 + 1 | 9 + 1 | 10 + 1 | 11 + 1 | 12 + 1 | 13 + 1 | 14 + 1 | 15 + 1 | 16 + 1 | 17 + 1 | 18 + 1 | 19 + 1 | 20 + 1 | 21 + 1 | 22 + 1 | 23 + 1 | 24 + 1 | 25 + 1 | 26 + 1 | 27 + 1 | 28 + 1 | 29 + 1 | 30 + 1 | 31 +(31 rows) + +explain (costs off) select * from (select * from t order by a) s order by a, b limit 32; + QUERY PLAN +--------------------------------- + Limit + -> Incremental Sort + Sort Key: t.a, t.b + Presorted Key: t.a + -> Sort + Sort Key: t.a + -> Seq Scan on t +(7 rows) + +select * from (select * from t order by a) s order by a, b limit 32; + a | b +---+---- + 1 | 1 + 1 | 2 + 1 | 3 + 1 | 4 + 1 | 5 + 1 | 6 + 1 | 7 + 1 | 8 + 1 | 9 + 1 | 10 + 1 | 11 + 1 | 12 + 1 | 13 + 1 | 14 + 1 | 15 + 1 | 16 + 1 | 17 + 1 | 18 + 1 | 19 + 1 | 20 + 1 | 21 + 1 | 22 + 1 | 23 + 1 | 24 + 1 | 25 + 1 | 26 + 1 | 27 + 1 | 28 + 1 | 29 + 1 | 30 + 1 | 31 + 1 | 32 +(32 rows) + +explain (costs off) select * from (select * from t order by a) s order by a, b limit 33; + QUERY PLAN +--------------------------------- + Limit + -> Incremental Sort + Sort Key: t.a, t.b + Presorted Key: t.a + -> Sort + Sort Key: t.a + -> Seq Scan on t +(7 rows) + +select * from (select * from t order by a) s order by a, b limit 33; + a | b +---+---- + 1 | 1 + 1 | 2 + 1 | 3 + 1 | 4 + 1 | 5 + 1 | 6 + 1 | 7 + 1 | 8 + 1 | 9 + 1 | 10 + 1 | 11 + 1 | 12 + 1 | 13 + 1 | 14 + 1 | 15 + 1 | 16 + 1 | 17 + 1 | 18 + 1 | 19 + 1 | 20 + 1 | 21 + 1 | 22 + 1 | 23 + 1 | 24 + 1 | 25 + 1 | 26 + 1 | 27 + 1 | 28 + 1 | 29 + 1 | 30 + 1 | 31 + 1 | 32 + 1 | 33 +(33 rows) + +explain (costs off) select * from (select * from t order by a) s order by a, b limit 65; + QUERY PLAN +--------------------------------- + Limit + -> Incremental Sort + Sort Key: t.a, t.b + Presorted Key: t.a + -> Sort + Sort Key: t.a + -> Seq Scan on t +(7 rows) + +select * from (select * from t order by a) s order by a, b limit 65; + a | b +---+---- + 1 | 1 + 1 | 2 + 1 | 3 + 1 | 4 + 1 | 5 + 1 | 6 + 1 | 7 + 1 | 8 + 1 | 9 + 1 | 10 + 1 | 11 + 1 | 12 + 1 | 13 + 1 | 14 + 1 | 15 + 1 | 16 + 1 | 17 + 1 | 18 + 1 | 19 + 1 | 20 + 1 | 21 + 1 | 22 + 1 | 23 + 1 | 24 + 1 | 25 + 1 | 26 + 1 | 27 + 1 | 28 + 1 | 29 + 1 | 30 + 1 | 31 + 1 | 32 + 1 | 33 + 1 | 34 + 1 | 35 + 1 | 36 + 1 | 37 + 1 | 38 + 1 | 39 + 1 | 40 + 1 | 41 + 1 | 42 + 1 | 43 + 1 | 44 + 1 | 45 + 1 | 46 + 1 | 47 + 1 | 48 + 1 | 49 + 1 | 50 + 1 | 51 + 1 | 52 + 1 | 53 + 1 | 54 + 1 | 55 + 1 | 56 + 1 | 57 + 1 | 58 + 1 | 59 + 1 | 60 + 1 | 61 + 1 | 62 + 1 | 63 + 1 | 64 + 1 | 65 +(65 rows) + +explain (costs off) select * from (select * from t order by a) s order by a, b limit 66; + QUERY PLAN +--------------------------------- + Limit + -> Incremental Sort + Sort Key: t.a, t.b + Presorted Key: t.a + -> Sort + Sort Key: t.a + -> Seq Scan on t +(7 rows) + +select * from (select * from t order by a) s order by a, b limit 66; + a | b +---+---- + 1 | 1 + 1 | 2 + 1 | 3 + 1 | 4 + 1 | 5 + 1 | 6 + 1 | 7 + 1 | 8 + 1 | 9 + 1 | 10 + 1 | 11 + 1 | 12 + 1 | 13 + 1 | 14 + 1 | 15 + 1 | 16 + 1 | 17 + 1 | 18 + 1 | 19 + 1 | 20 + 1 | 21 + 1 | 22 + 1 | 23 + 1 | 24 + 1 | 25 + 1 | 26 + 1 | 27 + 1 | 28 + 1 | 29 + 1 | 30 + 1 | 31 + 1 | 32 + 1 | 33 + 1 | 34 + 1 | 35 + 1 | 36 + 1 | 37 + 1 | 38 + 1 | 39 + 1 | 40 + 1 | 41 + 1 | 42 + 1 | 43 + 1 | 44 + 1 | 45 + 1 | 46 + 1 | 47 + 1 | 48 + 1 | 49 + 1 | 50 + 1 | 51 + 1 | 52 + 1 | 53 + 1 | 54 + 1 | 55 + 1 | 56 + 1 | 57 + 1 | 58 + 1 | 59 + 1 | 60 + 1 | 61 + 1 | 62 + 1 | 63 + 1 | 64 + 1 | 65 + 1 | 66 +(66 rows) + +delete from t; +-- An initial large group followed by a small group. +insert into t(a, b) select i/50 + 1, i + 1 from generate_series(0, 999) n(i); +analyze t; +explain (costs off) select * from (select * from t order by a) s order by a, b limit 55; + QUERY PLAN +--------------------------------- + Limit + -> Incremental Sort + Sort Key: t.a, t.b + Presorted Key: t.a + -> Sort + Sort Key: t.a + -> Seq Scan on t +(7 rows) + +select * from (select * from t order by a) s order by a, b limit 55; + a | b +---+---- + 1 | 1 + 1 | 2 + 1 | 3 + 1 | 4 + 1 | 5 + 1 | 6 + 1 | 7 + 1 | 8 + 1 | 9 + 1 | 10 + 1 | 11 + 1 | 12 + 1 | 13 + 1 | 14 + 1 | 15 + 1 | 16 + 1 | 17 + 1 | 18 + 1 | 19 + 1 | 20 + 1 | 21 + 1 | 22 + 1 | 23 + 1 | 24 + 1 | 25 + 1 | 26 + 1 | 27 + 1 | 28 + 1 | 29 + 1 | 30 + 1 | 31 + 1 | 32 + 1 | 33 + 1 | 34 + 1 | 35 + 1 | 36 + 1 | 37 + 1 | 38 + 1 | 39 + 1 | 40 + 1 | 41 + 1 | 42 + 1 | 43 + 1 | 44 + 1 | 45 + 1 | 46 + 1 | 47 + 1 | 48 + 1 | 49 + 1 | 50 + 2 | 51 + 2 | 52 + 2 | 53 + 2 | 54 + 2 | 55 +(55 rows) + +-- Test EXPLAIN ANALYZE with only a fullsort group. +select explain_analyze_without_memory('select * from (select * from t order by a) s order by a, b limit 55'); + explain_analyze_without_memory +--------------------------------------------------------------------------------------------------------------- + Limit (actual rows=55 loops=1) + -> Incremental Sort (actual rows=55 loops=1) + Sort Key: t.a, t.b + Presorted Key: t.a + Full-sort Groups: 2 Sort Methods: top-N heapsort, quicksort Average Memory: NNkB Peak Memory: NNkB + -> Sort (actual rows=101 loops=1) + Sort Key: t.a + Sort Method: quicksort Memory: NNkB + -> Seq Scan on t (actual rows=1000 loops=1) +(9 rows) + +select jsonb_pretty(explain_analyze_inc_sort_nodes_without_memory('select * from (select * from t order by a) s order by a, b limit 55')); + jsonb_pretty +------------------------------------------------- + [ + + { + + "Sort Key": [ + + "t.a", + + "t.b" + + ], + + "Node Type": "Incremental Sort", + + "Actual Rows": 55, + + "Actual Loops": 1, + + "Async Capable": false, + + "Presorted Key": [ + + "t.a" + + ], + + "Parallel Aware": false, + + "Full-sort Groups": { + + "Group Count": 2, + + "Sort Methods Used": [ + + "top-N heapsort", + + "quicksort" + + ], + + "Sort Space Memory": { + + "Peak Sort Space Used": "NN", + + "Average Sort Space Used": "NN"+ + } + + }, + + "Parent Relationship": "Outer" + + } + + ] +(1 row) + +select explain_analyze_inc_sort_nodes_verify_invariants('select * from (select * from t order by a) s order by a, b limit 55'); + explain_analyze_inc_sort_nodes_verify_invariants +-------------------------------------------------- + t +(1 row) + +delete from t; +-- An initial small group followed by a large group. +insert into t(a, b) select (case when i < 5 then i else 9 end), i from generate_series(1, 1000) n(i); +analyze t; +explain (costs off) select * from (select * from t order by a) s order by a, b limit 70; + QUERY PLAN +--------------------------------- + Limit + -> Incremental Sort + Sort Key: t.a, t.b + Presorted Key: t.a + -> Sort + Sort Key: t.a + -> Seq Scan on t +(7 rows) + +select * from (select * from t order by a) s order by a, b limit 70; + a | b +---+---- + 1 | 1 + 2 | 2 + 3 | 3 + 4 | 4 + 9 | 5 + 9 | 6 + 9 | 7 + 9 | 8 + 9 | 9 + 9 | 10 + 9 | 11 + 9 | 12 + 9 | 13 + 9 | 14 + 9 | 15 + 9 | 16 + 9 | 17 + 9 | 18 + 9 | 19 + 9 | 20 + 9 | 21 + 9 | 22 + 9 | 23 + 9 | 24 + 9 | 25 + 9 | 26 + 9 | 27 + 9 | 28 + 9 | 29 + 9 | 30 + 9 | 31 + 9 | 32 + 9 | 33 + 9 | 34 + 9 | 35 + 9 | 36 + 9 | 37 + 9 | 38 + 9 | 39 + 9 | 40 + 9 | 41 + 9 | 42 + 9 | 43 + 9 | 44 + 9 | 45 + 9 | 46 + 9 | 47 + 9 | 48 + 9 | 49 + 9 | 50 + 9 | 51 + 9 | 52 + 9 | 53 + 9 | 54 + 9 | 55 + 9 | 56 + 9 | 57 + 9 | 58 + 9 | 59 + 9 | 60 + 9 | 61 + 9 | 62 + 9 | 63 + 9 | 64 + 9 | 65 + 9 | 66 + 9 | 67 + 9 | 68 + 9 | 69 + 9 | 70 +(70 rows) + +-- Checks case where we hit a group boundary at the last tuple of a batch. +-- Because the full sort state is bounded, we scan 64 tuples (the mode +-- transition point) but only retain 5. Thus when we transition modes, all +-- tuples in the full sort state have different prefix keys. +explain (costs off) select * from (select * from t order by a) s order by a, b limit 5; + QUERY PLAN +--------------------------------- + Limit + -> Incremental Sort + Sort Key: t.a, t.b + Presorted Key: t.a + -> Sort + Sort Key: t.a + -> Seq Scan on t +(7 rows) + +select * from (select * from t order by a) s order by a, b limit 5; + a | b +---+--- + 1 | 1 + 2 | 2 + 3 | 3 + 4 | 4 + 9 | 5 +(5 rows) + +-- Test rescan. +begin; +-- We force the planner to choose a plan with incremental sort on the right side +-- of a nested loop join node. That way we trigger the rescan code path. +set local enable_hashjoin = off; +set local enable_mergejoin = off; +set local enable_material = off; +set local enable_sort = off; +explain (costs off) select * from t left join (select * from (select * from t order by a) v order by a, b) s on s.a = t.a where t.a in (1, 2); + QUERY PLAN +------------------------------------------------ + Nested Loop Left Join + Join Filter: (t_1.a = t.a) + -> Seq Scan on t + Filter: (a = ANY ('{1,2}'::integer[])) + -> Incremental Sort + Sort Key: t_1.a, t_1.b + Presorted Key: t_1.a + -> Sort + Sort Key: t_1.a + -> Seq Scan on t t_1 +(10 rows) + +select * from t left join (select * from (select * from t order by a) v order by a, b) s on s.a = t.a where t.a in (1, 2); + a | b | a | b +---+---+---+--- + 1 | 1 | 1 | 1 + 2 | 2 | 2 | 2 +(2 rows) + +rollback; +-- Test EXPLAIN ANALYZE with both fullsort and presorted groups. +select explain_analyze_without_memory('select * from (select * from t order by a) s order by a, b limit 70'); + explain_analyze_without_memory +---------------------------------------------------------------------------------------------------------------- + Limit (actual rows=70 loops=1) + -> Incremental Sort (actual rows=70 loops=1) + Sort Key: t.a, t.b + Presorted Key: t.a + Full-sort Groups: 1 Sort Method: quicksort Average Memory: NNkB Peak Memory: NNkB + Pre-sorted Groups: 5 Sort Methods: top-N heapsort, quicksort Average Memory: NNkB Peak Memory: NNkB + -> Sort (actual rows=1000 loops=1) + Sort Key: t.a + Sort Method: quicksort Memory: NNkB + -> Seq Scan on t (actual rows=1000 loops=1) +(10 rows) + +select jsonb_pretty(explain_analyze_inc_sort_nodes_without_memory('select * from (select * from t order by a) s order by a, b limit 70')); + jsonb_pretty +------------------------------------------------- + [ + + { + + "Sort Key": [ + + "t.a", + + "t.b" + + ], + + "Node Type": "Incremental Sort", + + "Actual Rows": 70, + + "Actual Loops": 1, + + "Async Capable": false, + + "Presorted Key": [ + + "t.a" + + ], + + "Parallel Aware": false, + + "Full-sort Groups": { + + "Group Count": 1, + + "Sort Methods Used": [ + + "quicksort" + + ], + + "Sort Space Memory": { + + "Peak Sort Space Used": "NN", + + "Average Sort Space Used": "NN"+ + } + + }, + + "Pre-sorted Groups": { + + "Group Count": 5, + + "Sort Methods Used": [ + + "top-N heapsort", + + "quicksort" + + ], + + "Sort Space Memory": { + + "Peak Sort Space Used": "NN", + + "Average Sort Space Used": "NN"+ + } + + }, + + "Parent Relationship": "Outer" + + } + + ] +(1 row) + +select explain_analyze_inc_sort_nodes_verify_invariants('select * from (select * from t order by a) s order by a, b limit 70'); + explain_analyze_inc_sort_nodes_verify_invariants +-------------------------------------------------- + t +(1 row) + +delete from t; +-- Small groups of 10 tuples each tested around each mode transition point. +insert into t(a, b) select i / 10, i from generate_series(1, 1000) n(i); +analyze t; +explain (costs off) select * from (select * from t order by a) s order by a, b limit 31; + QUERY PLAN +--------------------------------- + Limit + -> Incremental Sort + Sort Key: t.a, t.b + Presorted Key: t.a + -> Sort + Sort Key: t.a + -> Seq Scan on t +(7 rows) + +select * from (select * from t order by a) s order by a, b limit 31; + a | b +---+---- + 0 | 1 + 0 | 2 + 0 | 3 + 0 | 4 + 0 | 5 + 0 | 6 + 0 | 7 + 0 | 8 + 0 | 9 + 1 | 10 + 1 | 11 + 1 | 12 + 1 | 13 + 1 | 14 + 1 | 15 + 1 | 16 + 1 | 17 + 1 | 18 + 1 | 19 + 2 | 20 + 2 | 21 + 2 | 22 + 2 | 23 + 2 | 24 + 2 | 25 + 2 | 26 + 2 | 27 + 2 | 28 + 2 | 29 + 3 | 30 + 3 | 31 +(31 rows) + +explain (costs off) select * from (select * from t order by a) s order by a, b limit 32; + QUERY PLAN +--------------------------------- + Limit + -> Incremental Sort + Sort Key: t.a, t.b + Presorted Key: t.a + -> Sort + Sort Key: t.a + -> Seq Scan on t +(7 rows) + +select * from (select * from t order by a) s order by a, b limit 32; + a | b +---+---- + 0 | 1 + 0 | 2 + 0 | 3 + 0 | 4 + 0 | 5 + 0 | 6 + 0 | 7 + 0 | 8 + 0 | 9 + 1 | 10 + 1 | 11 + 1 | 12 + 1 | 13 + 1 | 14 + 1 | 15 + 1 | 16 + 1 | 17 + 1 | 18 + 1 | 19 + 2 | 20 + 2 | 21 + 2 | 22 + 2 | 23 + 2 | 24 + 2 | 25 + 2 | 26 + 2 | 27 + 2 | 28 + 2 | 29 + 3 | 30 + 3 | 31 + 3 | 32 +(32 rows) + +explain (costs off) select * from (select * from t order by a) s order by a, b limit 33; + QUERY PLAN +--------------------------------- + Limit + -> Incremental Sort + Sort Key: t.a, t.b + Presorted Key: t.a + -> Sort + Sort Key: t.a + -> Seq Scan on t +(7 rows) + +select * from (select * from t order by a) s order by a, b limit 33; + a | b +---+---- + 0 | 1 + 0 | 2 + 0 | 3 + 0 | 4 + 0 | 5 + 0 | 6 + 0 | 7 + 0 | 8 + 0 | 9 + 1 | 10 + 1 | 11 + 1 | 12 + 1 | 13 + 1 | 14 + 1 | 15 + 1 | 16 + 1 | 17 + 1 | 18 + 1 | 19 + 2 | 20 + 2 | 21 + 2 | 22 + 2 | 23 + 2 | 24 + 2 | 25 + 2 | 26 + 2 | 27 + 2 | 28 + 2 | 29 + 3 | 30 + 3 | 31 + 3 | 32 + 3 | 33 +(33 rows) + +explain (costs off) select * from (select * from t order by a) s order by a, b limit 65; + QUERY PLAN +--------------------------------- + Limit + -> Incremental Sort + Sort Key: t.a, t.b + Presorted Key: t.a + -> Sort + Sort Key: t.a + -> Seq Scan on t +(7 rows) + +select * from (select * from t order by a) s order by a, b limit 65; + a | b +---+---- + 0 | 1 + 0 | 2 + 0 | 3 + 0 | 4 + 0 | 5 + 0 | 6 + 0 | 7 + 0 | 8 + 0 | 9 + 1 | 10 + 1 | 11 + 1 | 12 + 1 | 13 + 1 | 14 + 1 | 15 + 1 | 16 + 1 | 17 + 1 | 18 + 1 | 19 + 2 | 20 + 2 | 21 + 2 | 22 + 2 | 23 + 2 | 24 + 2 | 25 + 2 | 26 + 2 | 27 + 2 | 28 + 2 | 29 + 3 | 30 + 3 | 31 + 3 | 32 + 3 | 33 + 3 | 34 + 3 | 35 + 3 | 36 + 3 | 37 + 3 | 38 + 3 | 39 + 4 | 40 + 4 | 41 + 4 | 42 + 4 | 43 + 4 | 44 + 4 | 45 + 4 | 46 + 4 | 47 + 4 | 48 + 4 | 49 + 5 | 50 + 5 | 51 + 5 | 52 + 5 | 53 + 5 | 54 + 5 | 55 + 5 | 56 + 5 | 57 + 5 | 58 + 5 | 59 + 6 | 60 + 6 | 61 + 6 | 62 + 6 | 63 + 6 | 64 + 6 | 65 +(65 rows) + +explain (costs off) select * from (select * from t order by a) s order by a, b limit 66; + QUERY PLAN +--------------------------------- + Limit + -> Incremental Sort + Sort Key: t.a, t.b + Presorted Key: t.a + -> Sort + Sort Key: t.a + -> Seq Scan on t +(7 rows) + +select * from (select * from t order by a) s order by a, b limit 66; + a | b +---+---- + 0 | 1 + 0 | 2 + 0 | 3 + 0 | 4 + 0 | 5 + 0 | 6 + 0 | 7 + 0 | 8 + 0 | 9 + 1 | 10 + 1 | 11 + 1 | 12 + 1 | 13 + 1 | 14 + 1 | 15 + 1 | 16 + 1 | 17 + 1 | 18 + 1 | 19 + 2 | 20 + 2 | 21 + 2 | 22 + 2 | 23 + 2 | 24 + 2 | 25 + 2 | 26 + 2 | 27 + 2 | 28 + 2 | 29 + 3 | 30 + 3 | 31 + 3 | 32 + 3 | 33 + 3 | 34 + 3 | 35 + 3 | 36 + 3 | 37 + 3 | 38 + 3 | 39 + 4 | 40 + 4 | 41 + 4 | 42 + 4 | 43 + 4 | 44 + 4 | 45 + 4 | 46 + 4 | 47 + 4 | 48 + 4 | 49 + 5 | 50 + 5 | 51 + 5 | 52 + 5 | 53 + 5 | 54 + 5 | 55 + 5 | 56 + 5 | 57 + 5 | 58 + 5 | 59 + 6 | 60 + 6 | 61 + 6 | 62 + 6 | 63 + 6 | 64 + 6 | 65 + 6 | 66 +(66 rows) + +delete from t; +-- Small groups of only 1 tuple each tested around each mode transition point. +insert into t(a, b) select i, i from generate_series(1, 1000) n(i); +analyze t; +explain (costs off) select * from (select * from t order by a) s order by a, b limit 31; + QUERY PLAN +--------------------------------- + Limit + -> Incremental Sort + Sort Key: t.a, t.b + Presorted Key: t.a + -> Sort + Sort Key: t.a + -> Seq Scan on t +(7 rows) + +select * from (select * from t order by a) s order by a, b limit 31; + a | b +----+---- + 1 | 1 + 2 | 2 + 3 | 3 + 4 | 4 + 5 | 5 + 6 | 6 + 7 | 7 + 8 | 8 + 9 | 9 + 10 | 10 + 11 | 11 + 12 | 12 + 13 | 13 + 14 | 14 + 15 | 15 + 16 | 16 + 17 | 17 + 18 | 18 + 19 | 19 + 20 | 20 + 21 | 21 + 22 | 22 + 23 | 23 + 24 | 24 + 25 | 25 + 26 | 26 + 27 | 27 + 28 | 28 + 29 | 29 + 30 | 30 + 31 | 31 +(31 rows) + +explain (costs off) select * from (select * from t order by a) s order by a, b limit 32; + QUERY PLAN +--------------------------------- + Limit + -> Incremental Sort + Sort Key: t.a, t.b + Presorted Key: t.a + -> Sort + Sort Key: t.a + -> Seq Scan on t +(7 rows) + +select * from (select * from t order by a) s order by a, b limit 32; + a | b +----+---- + 1 | 1 + 2 | 2 + 3 | 3 + 4 | 4 + 5 | 5 + 6 | 6 + 7 | 7 + 8 | 8 + 9 | 9 + 10 | 10 + 11 | 11 + 12 | 12 + 13 | 13 + 14 | 14 + 15 | 15 + 16 | 16 + 17 | 17 + 18 | 18 + 19 | 19 + 20 | 20 + 21 | 21 + 22 | 22 + 23 | 23 + 24 | 24 + 25 | 25 + 26 | 26 + 27 | 27 + 28 | 28 + 29 | 29 + 30 | 30 + 31 | 31 + 32 | 32 +(32 rows) + +explain (costs off) select * from (select * from t order by a) s order by a, b limit 33; + QUERY PLAN +--------------------------------- + Limit + -> Incremental Sort + Sort Key: t.a, t.b + Presorted Key: t.a + -> Sort + Sort Key: t.a + -> Seq Scan on t +(7 rows) + +select * from (select * from t order by a) s order by a, b limit 33; + a | b +----+---- + 1 | 1 + 2 | 2 + 3 | 3 + 4 | 4 + 5 | 5 + 6 | 6 + 7 | 7 + 8 | 8 + 9 | 9 + 10 | 10 + 11 | 11 + 12 | 12 + 13 | 13 + 14 | 14 + 15 | 15 + 16 | 16 + 17 | 17 + 18 | 18 + 19 | 19 + 20 | 20 + 21 | 21 + 22 | 22 + 23 | 23 + 24 | 24 + 25 | 25 + 26 | 26 + 27 | 27 + 28 | 28 + 29 | 29 + 30 | 30 + 31 | 31 + 32 | 32 + 33 | 33 +(33 rows) + +explain (costs off) select * from (select * from t order by a) s order by a, b limit 65; + QUERY PLAN +--------------------------------- + Limit + -> Incremental Sort + Sort Key: t.a, t.b + Presorted Key: t.a + -> Sort + Sort Key: t.a + -> Seq Scan on t +(7 rows) + +select * from (select * from t order by a) s order by a, b limit 65; + a | b +----+---- + 1 | 1 + 2 | 2 + 3 | 3 + 4 | 4 + 5 | 5 + 6 | 6 + 7 | 7 + 8 | 8 + 9 | 9 + 10 | 10 + 11 | 11 + 12 | 12 + 13 | 13 + 14 | 14 + 15 | 15 + 16 | 16 + 17 | 17 + 18 | 18 + 19 | 19 + 20 | 20 + 21 | 21 + 22 | 22 + 23 | 23 + 24 | 24 + 25 | 25 + 26 | 26 + 27 | 27 + 28 | 28 + 29 | 29 + 30 | 30 + 31 | 31 + 32 | 32 + 33 | 33 + 34 | 34 + 35 | 35 + 36 | 36 + 37 | 37 + 38 | 38 + 39 | 39 + 40 | 40 + 41 | 41 + 42 | 42 + 43 | 43 + 44 | 44 + 45 | 45 + 46 | 46 + 47 | 47 + 48 | 48 + 49 | 49 + 50 | 50 + 51 | 51 + 52 | 52 + 53 | 53 + 54 | 54 + 55 | 55 + 56 | 56 + 57 | 57 + 58 | 58 + 59 | 59 + 60 | 60 + 61 | 61 + 62 | 62 + 63 | 63 + 64 | 64 + 65 | 65 +(65 rows) + +explain (costs off) select * from (select * from t order by a) s order by a, b limit 66; + QUERY PLAN +--------------------------------- + Limit + -> Incremental Sort + Sort Key: t.a, t.b + Presorted Key: t.a + -> Sort + Sort Key: t.a + -> Seq Scan on t +(7 rows) + +select * from (select * from t order by a) s order by a, b limit 66; + a | b +----+---- + 1 | 1 + 2 | 2 + 3 | 3 + 4 | 4 + 5 | 5 + 6 | 6 + 7 | 7 + 8 | 8 + 9 | 9 + 10 | 10 + 11 | 11 + 12 | 12 + 13 | 13 + 14 | 14 + 15 | 15 + 16 | 16 + 17 | 17 + 18 | 18 + 19 | 19 + 20 | 20 + 21 | 21 + 22 | 22 + 23 | 23 + 24 | 24 + 25 | 25 + 26 | 26 + 27 | 27 + 28 | 28 + 29 | 29 + 30 | 30 + 31 | 31 + 32 | 32 + 33 | 33 + 34 | 34 + 35 | 35 + 36 | 36 + 37 | 37 + 38 | 38 + 39 | 39 + 40 | 40 + 41 | 41 + 42 | 42 + 43 | 43 + 44 | 44 + 45 | 45 + 46 | 46 + 47 | 47 + 48 | 48 + 49 | 49 + 50 | 50 + 51 | 51 + 52 | 52 + 53 | 53 + 54 | 54 + 55 | 55 + 56 | 56 + 57 | 57 + 58 | 58 + 59 | 59 + 60 | 60 + 61 | 61 + 62 | 62 + 63 | 63 + 64 | 64 + 65 | 65 + 66 | 66 +(66 rows) + +delete from t; +drop table t; +-- Incremental sort vs. parallel queries +set min_parallel_table_scan_size = '1kB'; +set min_parallel_index_scan_size = '1kB'; +set parallel_setup_cost = 0; +set parallel_tuple_cost = 0; +set max_parallel_workers_per_gather = 2; +create table t (a int, b int, c int); +insert into t select mod(i,10),mod(i,10),i from generate_series(1,10000) s(i); +create index on t (a); +analyze t; +set enable_incremental_sort = off; +explain (costs off) select a,b,sum(c) from t group by 1,2 order by 1,2,3 limit 1; + QUERY PLAN +------------------------------------------------------ + Limit + -> Sort + Sort Key: a, b, (sum(c)) + -> Finalize HashAggregate + Group Key: a, b + -> Gather + Workers Planned: 2 + -> Partial HashAggregate + Group Key: a, b + -> Parallel Seq Scan on t +(10 rows) + +set enable_incremental_sort = on; +explain (costs off) select a,b,sum(c) from t group by 1,2 order by 1,2,3 limit 1; + QUERY PLAN +---------------------------------------------------------------------- + Limit + -> Incremental Sort + Sort Key: a, b, (sum(c)) + Presorted Key: a, b + -> GroupAggregate + Group Key: a, b + -> Gather Merge + Workers Planned: 2 + -> Incremental Sort + Sort Key: a, b + Presorted Key: a + -> Parallel Index Scan using t_a_idx on t +(12 rows) + +-- Incremental sort vs. set operations with varno 0 +set enable_hashagg to off; +explain (costs off) select * from t union select * from t order by 1,3; + QUERY PLAN +---------------------------------------------------------- + Incremental Sort + Sort Key: t.a, t.c + Presorted Key: t.a + -> Unique + -> Sort + Sort Key: t.a, t.b, t.c + -> Gather + Workers Planned: 2 + -> Parallel Append + -> Parallel Seq Scan on t + -> Parallel Seq Scan on t t_1 +(11 rows) + +-- Full sort, not just incremental sort can be pushed below a gather merge path +-- by generate_useful_gather_paths. +explain (costs off) select distinct a,b from t; + QUERY PLAN +------------------------------------------------ + Unique + -> Gather Merge + Workers Planned: 2 + -> Unique + -> Sort + Sort Key: a, b + -> Parallel Seq Scan on t +(7 rows) + +drop table t; +-- Sort pushdown can't go below where expressions are part of the rel target. +-- In particular this is interesting for volatile expressions which have to +-- go above joins since otherwise we'll incorrectly use expression evaluations +-- across multiple rows. +set enable_hashagg=off; +set enable_seqscan=off; +set enable_incremental_sort = off; +set parallel_tuple_cost=0; +set parallel_setup_cost=0; +set min_parallel_table_scan_size = 0; +set min_parallel_index_scan_size = 0; +-- Parallel sort below join. +explain (costs off) select distinct sub.unique1, stringu1 +from tenk1, lateral (select tenk1.unique1 from generate_series(1, 1000)) as sub; + QUERY PLAN +-------------------------------------------------------------------------- + Unique + -> Nested Loop + -> Gather Merge + Workers Planned: 2 + -> Sort + Sort Key: tenk1.unique1, tenk1.stringu1 + -> Parallel Index Scan using tenk1_unique1 on tenk1 + -> Function Scan on generate_series +(8 rows) + +explain (costs off) select sub.unique1, stringu1 +from tenk1, lateral (select tenk1.unique1 from generate_series(1, 1000)) as sub +order by 1, 2; + QUERY PLAN +-------------------------------------------------------------------- + Nested Loop + -> Gather Merge + Workers Planned: 2 + -> Sort + Sort Key: tenk1.unique1, tenk1.stringu1 + -> Parallel Index Scan using tenk1_unique1 on tenk1 + -> Function Scan on generate_series +(7 rows) + +-- Parallel sort but with expression that can be safely generated at the base rel. +explain (costs off) select distinct sub.unique1, md5(stringu1) +from tenk1, lateral (select tenk1.unique1 from generate_series(1, 1000)) as sub; + QUERY PLAN +---------------------------------------------------------------------------------------- + Unique + -> Nested Loop + -> Gather Merge + Workers Planned: 2 + -> Sort + Sort Key: tenk1.unique1, (md5((tenk1.stringu1)::text)) COLLATE "C" + -> Parallel Index Scan using tenk1_unique1 on tenk1 + -> Function Scan on generate_series +(8 rows) + +explain (costs off) select sub.unique1, md5(stringu1) +from tenk1, lateral (select tenk1.unique1 from generate_series(1, 1000)) as sub +order by 1, 2; + QUERY PLAN +---------------------------------------------------------------------------------- + Nested Loop + -> Gather Merge + Workers Planned: 2 + -> Sort + Sort Key: tenk1.unique1, (md5((tenk1.stringu1)::text)) COLLATE "C" + -> Parallel Index Scan using tenk1_unique1 on tenk1 + -> Function Scan on generate_series +(7 rows) + +-- Parallel sort with an aggregate that can be safely generated in parallel, +-- but we can't sort by partial aggregate values. +explain (costs off) select count(*) +from tenk1 t1 +join tenk1 t2 on t1.unique1 = t2.unique2 +join tenk1 t3 on t2.unique1 = t3.unique1 +order by count(*); + QUERY PLAN +----------------------------------------------------------------------------------------------- + Sort + Sort Key: (count(*)) + -> Finalize Aggregate + -> Gather + Workers Planned: 2 + -> Partial Aggregate + -> Parallel Hash Join + Hash Cond: (t2.unique1 = t3.unique1) + -> Parallel Hash Join + Hash Cond: (t1.unique1 = t2.unique2) + -> Parallel Index Only Scan using tenk1_unique1 on tenk1 t1 + -> Parallel Hash + -> Parallel Index Scan using tenk1_unique2 on tenk1 t2 + -> Parallel Hash + -> Parallel Index Only Scan using tenk1_unique1 on tenk1 t3 +(15 rows) + +-- Parallel sort but with expression (correlated subquery) that +-- is prohibited in parallel plans. +explain (costs off) select distinct + unique1, + (select t.unique1 from tenk1 where tenk1.unique1 = t.unique1) +from tenk1 t, generate_series(1, 1000); + QUERY PLAN +--------------------------------------------------------------------------------- + Unique + -> Sort + Sort Key: t.unique1, ((SubPlan 1)) + -> Gather + Workers Planned: 2 + -> Nested Loop + -> Parallel Index Only Scan using tenk1_unique1 on tenk1 t + -> Function Scan on generate_series + SubPlan 1 + -> Index Only Scan using tenk1_unique1 on tenk1 + Index Cond: (unique1 = t.unique1) +(11 rows) + +explain (costs off) select + unique1, + (select t.unique1 from tenk1 where tenk1.unique1 = t.unique1) +from tenk1 t, generate_series(1, 1000) +order by 1, 2; + QUERY PLAN +--------------------------------------------------------------------------- + Sort + Sort Key: t.unique1, ((SubPlan 1)) + -> Gather + Workers Planned: 2 + -> Nested Loop + -> Parallel Index Only Scan using tenk1_unique1 on tenk1 t + -> Function Scan on generate_series + SubPlan 1 + -> Index Only Scan using tenk1_unique1 on tenk1 + Index Cond: (unique1 = t.unique1) +(10 rows) + +-- Parallel sort but with expression not available until the upper rel. +explain (costs off) select distinct sub.unique1, stringu1 || random()::text +from tenk1, lateral (select tenk1.unique1 from generate_series(1, 1000)) as sub; + QUERY PLAN +--------------------------------------------------------------------------------------------- + Unique + -> Sort + Sort Key: tenk1.unique1, (((tenk1.stringu1)::text || (random())::text)) COLLATE "C" + -> Gather + Workers Planned: 2 + -> Nested Loop + -> Parallel Index Scan using tenk1_unique1 on tenk1 + -> Function Scan on generate_series +(8 rows) + +explain (costs off) select sub.unique1, stringu1 || random()::text +from tenk1, lateral (select tenk1.unique1 from generate_series(1, 1000)) as sub +order by 1, 2; + QUERY PLAN +--------------------------------------------------------------------------------------- + Sort + Sort Key: tenk1.unique1, (((tenk1.stringu1)::text || (random())::text)) COLLATE "C" + -> Gather + Workers Planned: 2 + -> Nested Loop + -> Parallel Index Scan using tenk1_unique1 on tenk1 + -> Function Scan on generate_series +(7 rows) + diff --git a/src/test/regress/expected/index_including.out b/src/test/regress/expected/index_including.out new file mode 100644 index 0000000..8651068 --- /dev/null +++ b/src/test/regress/expected/index_including.out @@ -0,0 +1,400 @@ +/* + * 1.test CREATE INDEX + * + * Deliberately avoid dropping objects in this section, to get some pg_dump + * coverage. + */ +-- Regular index with included columns +CREATE TABLE tbl_include_reg (c1 int, c2 int, c3 int, c4 box); +INSERT INTO tbl_include_reg SELECT x, 2*x, 3*x, box('4,4,4,4') FROM generate_series(1,10) AS x; +CREATE INDEX tbl_include_reg_idx ON tbl_include_reg (c1, c2) INCLUDE (c3, c4); +-- duplicate column is pretty pointless, but we allow it anyway +CREATE INDEX ON tbl_include_reg (c1, c2) INCLUDE (c1, c3); +SELECT pg_get_indexdef(i.indexrelid) +FROM pg_index i JOIN pg_class c ON i.indexrelid = c.oid +WHERE i.indrelid = 'tbl_include_reg'::regclass ORDER BY c.relname; + pg_get_indexdef +--------------------------------------------------------------------------------------------------------------- + CREATE INDEX tbl_include_reg_c1_c2_c11_c3_idx ON public.tbl_include_reg USING btree (c1, c2) INCLUDE (c1, c3) + CREATE INDEX tbl_include_reg_idx ON public.tbl_include_reg USING btree (c1, c2) INCLUDE (c3, c4) +(2 rows) + +\d tbl_include_reg_idx + Index "public.tbl_include_reg_idx" + Column | Type | Key? | Definition +--------+---------+------+------------ + c1 | integer | yes | c1 + c2 | integer | yes | c2 + c3 | integer | no | c3 + c4 | box | no | c4 +btree, for table "public.tbl_include_reg" + +-- Unique index and unique constraint +CREATE TABLE tbl_include_unique1 (c1 int, c2 int, c3 int, c4 box); +INSERT INTO tbl_include_unique1 SELECT x, 2*x, 3*x, box('4,4,4,4') FROM generate_series(1,10) AS x; +CREATE UNIQUE INDEX tbl_include_unique1_idx_unique ON tbl_include_unique1 using btree (c1, c2) INCLUDE (c3, c4); +ALTER TABLE tbl_include_unique1 add UNIQUE USING INDEX tbl_include_unique1_idx_unique; +ALTER TABLE tbl_include_unique1 add UNIQUE (c1, c2) INCLUDE (c3, c4); +SELECT pg_get_indexdef(i.indexrelid) +FROM pg_index i JOIN pg_class c ON i.indexrelid = c.oid +WHERE i.indrelid = 'tbl_include_unique1'::regclass ORDER BY c.relname; + pg_get_indexdef +----------------------------------------------------------------------------------------------------------------------------- + CREATE UNIQUE INDEX tbl_include_unique1_c1_c2_c3_c4_key ON public.tbl_include_unique1 USING btree (c1, c2) INCLUDE (c3, c4) + CREATE UNIQUE INDEX tbl_include_unique1_idx_unique ON public.tbl_include_unique1 USING btree (c1, c2) INCLUDE (c3, c4) +(2 rows) + +-- Unique index and unique constraint. Both must fail. +CREATE TABLE tbl_include_unique2 (c1 int, c2 int, c3 int, c4 box); +INSERT INTO tbl_include_unique2 SELECT 1, 2, 3*x, box('4,4,4,4') FROM generate_series(1,10) AS x; +CREATE UNIQUE INDEX tbl_include_unique2_idx_unique ON tbl_include_unique2 using btree (c1, c2) INCLUDE (c3, c4); +ERROR: could not create unique index "tbl_include_unique2_idx_unique" +DETAIL: Key (c1, c2)=(1, 2) is duplicated. +ALTER TABLE tbl_include_unique2 add UNIQUE (c1, c2) INCLUDE (c3, c4); +ERROR: could not create unique index "tbl_include_unique2_c1_c2_c3_c4_key" +DETAIL: Key (c1, c2)=(1, 2) is duplicated. +-- PK constraint +CREATE TABLE tbl_include_pk (c1 int, c2 int, c3 int, c4 box); +INSERT INTO tbl_include_pk SELECT 1, 2*x, 3*x, box('4,4,4,4') FROM generate_series(1,10) AS x; +ALTER TABLE tbl_include_pk add PRIMARY KEY (c1, c2) INCLUDE (c3, c4); +SELECT pg_get_indexdef(i.indexrelid) +FROM pg_index i JOIN pg_class c ON i.indexrelid = c.oid +WHERE i.indrelid = 'tbl_include_pk'::regclass ORDER BY c.relname; + pg_get_indexdef +-------------------------------------------------------------------------------------------------------- + CREATE UNIQUE INDEX tbl_include_pk_pkey ON public.tbl_include_pk USING btree (c1, c2) INCLUDE (c3, c4) +(1 row) + +CREATE TABLE tbl_include_box (c1 int, c2 int, c3 int, c4 box); +INSERT INTO tbl_include_box SELECT 1, 2*x, 3*x, box('4,4,4,4') FROM generate_series(1,10) AS x; +CREATE UNIQUE INDEX tbl_include_box_idx_unique ON tbl_include_box using btree (c1, c2) INCLUDE (c3, c4); +ALTER TABLE tbl_include_box add PRIMARY KEY USING INDEX tbl_include_box_idx_unique; +SELECT pg_get_indexdef(i.indexrelid) +FROM pg_index i JOIN pg_class c ON i.indexrelid = c.oid +WHERE i.indrelid = 'tbl_include_box'::regclass ORDER BY c.relname; + pg_get_indexdef +---------------------------------------------------------------------------------------------------------------- + CREATE UNIQUE INDEX tbl_include_box_idx_unique ON public.tbl_include_box USING btree (c1, c2) INCLUDE (c3, c4) +(1 row) + +-- PK constraint. Must fail. +CREATE TABLE tbl_include_box_pk (c1 int, c2 int, c3 int, c4 box); +INSERT INTO tbl_include_box_pk SELECT 1, 2, 3*x, box('4,4,4,4') FROM generate_series(1,10) AS x; +ALTER TABLE tbl_include_box_pk add PRIMARY KEY (c1, c2) INCLUDE (c3, c4); +ERROR: could not create unique index "tbl_include_box_pk_pkey" +DETAIL: Key (c1, c2)=(1, 2) is duplicated. +/* + * 2. Test CREATE TABLE with constraint + */ +CREATE TABLE tbl (c1 int,c2 int, c3 int, c4 box, + CONSTRAINT covering UNIQUE(c1,c2) INCLUDE(c3,c4)); +SELECT indexrelid::regclass, indnatts, indnkeyatts, indisunique, indisprimary, indkey, indclass FROM pg_index WHERE indrelid = 'tbl'::regclass::oid; + indexrelid | indnatts | indnkeyatts | indisunique | indisprimary | indkey | indclass +------------+----------+-------------+-------------+--------------+---------+----------- + covering | 4 | 2 | t | f | 1 2 3 4 | 1978 1978 +(1 row) + +SELECT pg_get_constraintdef(oid), conname, conkey FROM pg_constraint WHERE conrelid = 'tbl'::regclass::oid; + pg_get_constraintdef | conname | conkey +----------------------------------+----------+-------- + UNIQUE (c1, c2) INCLUDE (c3, c4) | covering | {1,2} +(1 row) + +-- ensure that constraint works +INSERT INTO tbl SELECT 1, 2, 3*x, box('4,4,4,4') FROM generate_series(1,10) AS x; +ERROR: duplicate key value violates unique constraint "covering" +DETAIL: Key (c1, c2)=(1, 2) already exists. +DROP TABLE tbl; +CREATE TABLE tbl (c1 int,c2 int, c3 int, c4 box, + CONSTRAINT covering PRIMARY KEY(c1,c2) INCLUDE(c3,c4)); +SELECT indexrelid::regclass, indnatts, indnkeyatts, indisunique, indisprimary, indkey, indclass FROM pg_index WHERE indrelid = 'tbl'::regclass::oid; + indexrelid | indnatts | indnkeyatts | indisunique | indisprimary | indkey | indclass +------------+----------+-------------+-------------+--------------+---------+----------- + covering | 4 | 2 | t | t | 1 2 3 4 | 1978 1978 +(1 row) + +SELECT pg_get_constraintdef(oid), conname, conkey FROM pg_constraint WHERE conrelid = 'tbl'::regclass::oid; + pg_get_constraintdef | conname | conkey +---------------------------------------+----------+-------- + PRIMARY KEY (c1, c2) INCLUDE (c3, c4) | covering | {1,2} +(1 row) + +-- ensure that constraint works +INSERT INTO tbl SELECT 1, 2, 3*x, box('4,4,4,4') FROM generate_series(1,10) AS x; +ERROR: duplicate key value violates unique constraint "covering" +DETAIL: Key (c1, c2)=(1, 2) already exists. +INSERT INTO tbl SELECT 1, NULL, 3*x, box('4,4,4,4') FROM generate_series(1,10) AS x; +ERROR: null value in column "c2" of relation "tbl" violates not-null constraint +DETAIL: Failing row contains (1, null, 3, (4,4),(4,4)). +INSERT INTO tbl SELECT x, 2*x, NULL, NULL FROM generate_series(1,300) AS x; +explain (costs off) +select * from tbl where (c1,c2,c3) < (2,5,1); + QUERY PLAN +------------------------------------------------ + Bitmap Heap Scan on tbl + Filter: (ROW(c1, c2, c3) < ROW(2, 5, 1)) + -> Bitmap Index Scan on covering + Index Cond: (ROW(c1, c2) <= ROW(2, 5)) +(4 rows) + +select * from tbl where (c1,c2,c3) < (2,5,1); + c1 | c2 | c3 | c4 +----+----+----+---- + 1 | 2 | | + 2 | 4 | | +(2 rows) + +-- row comparison that compares high key at page boundary +SET enable_seqscan = off; +explain (costs off) +select * from tbl where (c1,c2,c3) < (262,1,1) limit 1; + QUERY PLAN +---------------------------------------------------- + Limit + -> Index Only Scan using covering on tbl + Index Cond: (ROW(c1, c2) <= ROW(262, 1)) + Filter: (ROW(c1, c2, c3) < ROW(262, 1, 1)) +(4 rows) + +select * from tbl where (c1,c2,c3) < (262,1,1) limit 1; + c1 | c2 | c3 | c4 +----+----+----+---- + 1 | 2 | | +(1 row) + +DROP TABLE tbl; +RESET enable_seqscan; +CREATE TABLE tbl (c1 int,c2 int, c3 int, c4 box, + UNIQUE(c1,c2) INCLUDE(c3,c4)); +SELECT indexrelid::regclass, indnatts, indnkeyatts, indisunique, indisprimary, indkey, indclass FROM pg_index WHERE indrelid = 'tbl'::regclass::oid; + indexrelid | indnatts | indnkeyatts | indisunique | indisprimary | indkey | indclass +---------------------+----------+-------------+-------------+--------------+---------+----------- + tbl_c1_c2_c3_c4_key | 4 | 2 | t | f | 1 2 3 4 | 1978 1978 +(1 row) + +SELECT pg_get_constraintdef(oid), conname, conkey FROM pg_constraint WHERE conrelid = 'tbl'::regclass::oid; + pg_get_constraintdef | conname | conkey +----------------------------------+---------------------+-------- + UNIQUE (c1, c2) INCLUDE (c3, c4) | tbl_c1_c2_c3_c4_key | {1,2} +(1 row) + +-- ensure that constraint works +INSERT INTO tbl SELECT 1, 2, 3*x, box('4,4,4,4') FROM generate_series(1,10) AS x; +ERROR: duplicate key value violates unique constraint "tbl_c1_c2_c3_c4_key" +DETAIL: Key (c1, c2)=(1, 2) already exists. +DROP TABLE tbl; +CREATE TABLE tbl (c1 int,c2 int, c3 int, c4 box, + PRIMARY KEY(c1,c2) INCLUDE(c3,c4)); +SELECT indexrelid::regclass, indnatts, indnkeyatts, indisunique, indisprimary, indkey, indclass FROM pg_index WHERE indrelid = 'tbl'::regclass::oid; + indexrelid | indnatts | indnkeyatts | indisunique | indisprimary | indkey | indclass +------------+----------+-------------+-------------+--------------+---------+----------- + tbl_pkey | 4 | 2 | t | t | 1 2 3 4 | 1978 1978 +(1 row) + +SELECT pg_get_constraintdef(oid), conname, conkey FROM pg_constraint WHERE conrelid = 'tbl'::regclass::oid; + pg_get_constraintdef | conname | conkey +---------------------------------------+----------+-------- + PRIMARY KEY (c1, c2) INCLUDE (c3, c4) | tbl_pkey | {1,2} +(1 row) + +-- ensure that constraint works +INSERT INTO tbl SELECT 1, 2, 3*x, box('4,4,4,4') FROM generate_series(1,10) AS x; +ERROR: duplicate key value violates unique constraint "tbl_pkey" +DETAIL: Key (c1, c2)=(1, 2) already exists. +INSERT INTO tbl SELECT 1, NULL, 3*x, box('4,4,4,4') FROM generate_series(1,10) AS x; +ERROR: null value in column "c2" of relation "tbl" violates not-null constraint +DETAIL: Failing row contains (1, null, 3, (4,4),(4,4)). +INSERT INTO tbl SELECT x, 2*x, NULL, NULL FROM generate_series(1,10) AS x; +DROP TABLE tbl; +CREATE TABLE tbl (c1 int,c2 int, c3 int, c4 box, + EXCLUDE USING btree (c1 WITH =) INCLUDE(c3,c4)); +SELECT indexrelid::regclass, indnatts, indnkeyatts, indisunique, indisprimary, indkey, indclass FROM pg_index WHERE indrelid = 'tbl'::regclass::oid; + indexrelid | indnatts | indnkeyatts | indisunique | indisprimary | indkey | indclass +-------------------+----------+-------------+-------------+--------------+--------+---------- + tbl_c1_c3_c4_excl | 3 | 1 | f | f | 1 3 4 | 1978 +(1 row) + +SELECT pg_get_constraintdef(oid), conname, conkey FROM pg_constraint WHERE conrelid = 'tbl'::regclass::oid; + pg_get_constraintdef | conname | conkey +--------------------------------------------------+-------------------+-------- + EXCLUDE USING btree (c1 WITH =) INCLUDE (c3, c4) | tbl_c1_c3_c4_excl | {1} +(1 row) + +-- ensure that constraint works +INSERT INTO tbl SELECT 1, 2, 3*x, box('4,4,4,4') FROM generate_series(1,10) AS x; +ERROR: conflicting key value violates exclusion constraint "tbl_c1_c3_c4_excl" +DETAIL: Key (c1)=(1) conflicts with existing key (c1)=(1). +INSERT INTO tbl SELECT x, 2*x, NULL, NULL FROM generate_series(1,10) AS x; +DROP TABLE tbl; +/* + * 3.0 Test ALTER TABLE DROP COLUMN. + * Any column deletion leads to index deletion. + */ +CREATE TABLE tbl (c1 int,c2 int, c3 int, c4 int); +CREATE UNIQUE INDEX tbl_idx ON tbl using btree(c1, c2, c3, c4); +SELECT indexdef FROM pg_indexes WHERE tablename = 'tbl' ORDER BY indexname; + indexdef +------------------------------------------------------------------------ + CREATE UNIQUE INDEX tbl_idx ON public.tbl USING btree (c1, c2, c3, c4) +(1 row) + +ALTER TABLE tbl DROP COLUMN c3; +SELECT indexdef FROM pg_indexes WHERE tablename = 'tbl' ORDER BY indexname; + indexdef +---------- +(0 rows) + +DROP TABLE tbl; +/* + * 3.1 Test ALTER TABLE DROP COLUMN. + * Included column deletion leads to the index deletion, + * AS well AS key columns deletion. It's explained in documentation. + */ +CREATE TABLE tbl (c1 int,c2 int, c3 int, c4 box); +CREATE UNIQUE INDEX tbl_idx ON tbl using btree(c1, c2) INCLUDE(c3,c4); +SELECT indexdef FROM pg_indexes WHERE tablename = 'tbl' ORDER BY indexname; + indexdef +--------------------------------------------------------------------------------- + CREATE UNIQUE INDEX tbl_idx ON public.tbl USING btree (c1, c2) INCLUDE (c3, c4) +(1 row) + +ALTER TABLE tbl DROP COLUMN c3; +SELECT indexdef FROM pg_indexes WHERE tablename = 'tbl' ORDER BY indexname; + indexdef +---------- +(0 rows) + +DROP TABLE tbl; +/* + * 3.2 Test ALTER TABLE DROP COLUMN. + * Included column deletion leads to the index deletion. + * AS well AS key columns deletion. It's explained in documentation. + */ +CREATE TABLE tbl (c1 int,c2 int, c3 int, c4 box, UNIQUE(c1, c2) INCLUDE(c3,c4)); +SELECT indexdef FROM pg_indexes WHERE tablename = 'tbl' ORDER BY indexname; + indexdef +--------------------------------------------------------------------------------------------- + CREATE UNIQUE INDEX tbl_c1_c2_c3_c4_key ON public.tbl USING btree (c1, c2) INCLUDE (c3, c4) +(1 row) + +ALTER TABLE tbl DROP COLUMN c3; +SELECT indexdef FROM pg_indexes WHERE tablename = 'tbl' ORDER BY indexname; + indexdef +---------- +(0 rows) + +ALTER TABLE tbl DROP COLUMN c1; +SELECT indexdef FROM pg_indexes WHERE tablename = 'tbl' ORDER BY indexname; + indexdef +---------- +(0 rows) + +DROP TABLE tbl; +/* + * 3.3 Test ALTER TABLE SET STATISTICS + */ +CREATE TABLE tbl (c1 int, c2 int); +CREATE INDEX tbl_idx ON tbl (c1, (c1+0)) INCLUDE (c2); +ALTER INDEX tbl_idx ALTER COLUMN 1 SET STATISTICS 1000; +ERROR: cannot alter statistics on non-expression column "c1" of index "tbl_idx" +HINT: Alter statistics on table column instead. +ALTER INDEX tbl_idx ALTER COLUMN 2 SET STATISTICS 1000; +ALTER INDEX tbl_idx ALTER COLUMN 3 SET STATISTICS 1000; +ERROR: cannot alter statistics on included column "c2" of index "tbl_idx" +ALTER INDEX tbl_idx ALTER COLUMN 4 SET STATISTICS 1000; +ERROR: column number 4 of relation "tbl_idx" does not exist +DROP TABLE tbl; +/* + * 4. CREATE INDEX CONCURRENTLY + */ +CREATE TABLE tbl (c1 int,c2 int, c3 int, c4 box, UNIQUE(c1, c2) INCLUDE(c3,c4)); +INSERT INTO tbl SELECT x, 2*x, 3*x, box('4,4,4,4') FROM generate_series(1,1000) AS x; +CREATE UNIQUE INDEX CONCURRENTLY on tbl (c1, c2) INCLUDE (c3, c4); +SELECT indexdef FROM pg_indexes WHERE tablename = 'tbl' ORDER BY indexname; + indexdef +--------------------------------------------------------------------------------------------- + CREATE UNIQUE INDEX tbl_c1_c2_c3_c4_idx ON public.tbl USING btree (c1, c2) INCLUDE (c3, c4) + CREATE UNIQUE INDEX tbl_c1_c2_c3_c4_key ON public.tbl USING btree (c1, c2) INCLUDE (c3, c4) +(2 rows) + +DROP TABLE tbl; +/* + * 5. REINDEX + */ +CREATE TABLE tbl (c1 int,c2 int, c3 int, c4 box, UNIQUE(c1, c2) INCLUDE(c3,c4)); +SELECT indexdef FROM pg_indexes WHERE tablename = 'tbl' ORDER BY indexname; + indexdef +--------------------------------------------------------------------------------------------- + CREATE UNIQUE INDEX tbl_c1_c2_c3_c4_key ON public.tbl USING btree (c1, c2) INCLUDE (c3, c4) +(1 row) + +ALTER TABLE tbl DROP COLUMN c3; +SELECT indexdef FROM pg_indexes WHERE tablename = 'tbl' ORDER BY indexname; + indexdef +---------- +(0 rows) + +REINDEX INDEX tbl_c1_c2_c3_c4_key; +ERROR: relation "tbl_c1_c2_c3_c4_key" does not exist +SELECT indexdef FROM pg_indexes WHERE tablename = 'tbl' ORDER BY indexname; + indexdef +---------- +(0 rows) + +ALTER TABLE tbl DROP COLUMN c1; +SELECT indexdef FROM pg_indexes WHERE tablename = 'tbl' ORDER BY indexname; + indexdef +---------- +(0 rows) + +DROP TABLE tbl; +/* + * 7. Check various AMs. All but btree, gist and spgist must fail. + */ +CREATE TABLE tbl (c1 int,c2 int, c3 box, c4 box); +CREATE INDEX on tbl USING brin(c1, c2) INCLUDE (c3, c4); +ERROR: access method "brin" does not support included columns +CREATE INDEX on tbl USING gist(c3) INCLUDE (c1, c4); +CREATE INDEX on tbl USING spgist(c3) INCLUDE (c4); +CREATE INDEX on tbl USING gin(c1, c2) INCLUDE (c3, c4); +ERROR: access method "gin" does not support included columns +CREATE INDEX on tbl USING hash(c1, c2) INCLUDE (c3, c4); +ERROR: access method "hash" does not support included columns +CREATE INDEX on tbl USING rtree(c3) INCLUDE (c1, c4); +NOTICE: substituting access method "gist" for obsolete method "rtree" +CREATE INDEX on tbl USING btree(c1, c2) INCLUDE (c3, c4); +DROP TABLE tbl; +/* + * 8. Update, delete values in indexed table. + */ +CREATE TABLE tbl (c1 int, c2 int, c3 int, c4 box); +INSERT INTO tbl SELECT x, 2*x, 3*x, box('4,4,4,4') FROM generate_series(1,10) AS x; +CREATE UNIQUE INDEX tbl_idx_unique ON tbl using btree(c1, c2) INCLUDE (c3,c4); +UPDATE tbl SET c1 = 100 WHERE c1 = 2; +UPDATE tbl SET c1 = 1 WHERE c1 = 3; +-- should fail +UPDATE tbl SET c2 = 2 WHERE c1 = 1; +ERROR: duplicate key value violates unique constraint "tbl_idx_unique" +DETAIL: Key (c1, c2)=(1, 2) already exists. +UPDATE tbl SET c3 = 1; +DELETE FROM tbl WHERE c1 = 5 OR c3 = 12; +DROP TABLE tbl; +/* + * 9. Alter column type. + */ +CREATE TABLE tbl (c1 int,c2 int, c3 int, c4 box, UNIQUE(c1, c2) INCLUDE(c3,c4)); +INSERT INTO tbl SELECT x, 2*x, 3*x, box('4,4,4,4') FROM generate_series(1,10) AS x; +ALTER TABLE tbl ALTER c1 TYPE bigint; +ALTER TABLE tbl ALTER c3 TYPE bigint; +\d tbl + Table "public.tbl" + Column | Type | Collation | Nullable | Default +--------+---------+-----------+----------+--------- + c1 | bigint | | | + c2 | integer | | | + c3 | bigint | | | + c4 | box | | | +Indexes: + "tbl_c1_c2_c3_c4_key" UNIQUE CONSTRAINT, btree (c1, c2) INCLUDE (c3, c4) + +DROP TABLE tbl; diff --git a/src/test/regress/expected/index_including_gist.out b/src/test/regress/expected/index_including_gist.out new file mode 100644 index 0000000..ed9906d --- /dev/null +++ b/src/test/regress/expected/index_including_gist.out @@ -0,0 +1,166 @@ +/* + * 1.1. test CREATE INDEX with buffered build + */ +-- Regular index with included columns +CREATE TABLE tbl_gist (c1 int, c2 int, c3 int, c4 box); +-- size is chosen to exceed page size and trigger actual truncation +INSERT INTO tbl_gist SELECT x, 2*x, 3*x, box(point(x,x+1),point(2*x,2*x+1)) FROM generate_series(1,8000) AS x; +CREATE INDEX tbl_gist_idx ON tbl_gist using gist (c4) INCLUDE (c1,c2,c3); +SELECT pg_get_indexdef(i.indexrelid) +FROM pg_index i JOIN pg_class c ON i.indexrelid = c.oid +WHERE i.indrelid = 'tbl_gist'::regclass ORDER BY c.relname; + pg_get_indexdef +----------------------------------------------------------------------------------- + CREATE INDEX tbl_gist_idx ON public.tbl_gist USING gist (c4) INCLUDE (c1, c2, c3) +(1 row) + +SELECT * FROM tbl_gist where c4 <@ box(point(1,1),point(10,10)); + c1 | c2 | c3 | c4 +----+----+----+------------- + 1 | 2 | 3 | (2,3),(1,2) + 2 | 4 | 6 | (4,5),(2,3) + 3 | 6 | 9 | (6,7),(3,4) + 4 | 8 | 12 | (8,9),(4,5) +(4 rows) + +SET enable_bitmapscan TO off; +EXPLAIN (costs off) SELECT * FROM tbl_gist where c4 <@ box(point(1,1),point(10,10)); + QUERY PLAN +------------------------------------------------ + Index Only Scan using tbl_gist_idx on tbl_gist + Index Cond: (c4 <@ '(10,10),(1,1)'::box) +(2 rows) + +SET enable_bitmapscan TO default; +DROP TABLE tbl_gist; +/* + * 1.2. test CREATE INDEX with inserts + */ +-- Regular index with included columns +CREATE TABLE tbl_gist (c1 int, c2 int, c3 int, c4 box); +-- size is chosen to exceed page size and trigger actual truncation +CREATE INDEX tbl_gist_idx ON tbl_gist using gist (c4) INCLUDE (c1,c2,c3); +INSERT INTO tbl_gist SELECT x, 2*x, 3*x, box(point(x,x+1),point(2*x,2*x+1)) FROM generate_series(1,8000) AS x; +SELECT pg_get_indexdef(i.indexrelid) +FROM pg_index i JOIN pg_class c ON i.indexrelid = c.oid +WHERE i.indrelid = 'tbl_gist'::regclass ORDER BY c.relname; + pg_get_indexdef +----------------------------------------------------------------------------------- + CREATE INDEX tbl_gist_idx ON public.tbl_gist USING gist (c4) INCLUDE (c1, c2, c3) +(1 row) + +SELECT * FROM tbl_gist where c4 <@ box(point(1,1),point(10,10)); + c1 | c2 | c3 | c4 +----+----+----+------------- + 1 | 2 | 3 | (2,3),(1,2) + 2 | 4 | 6 | (4,5),(2,3) + 3 | 6 | 9 | (6,7),(3,4) + 4 | 8 | 12 | (8,9),(4,5) +(4 rows) + +SET enable_bitmapscan TO off; +EXPLAIN (costs off) SELECT * FROM tbl_gist where c4 <@ box(point(1,1),point(10,10)); + QUERY PLAN +------------------------------------------------ + Index Only Scan using tbl_gist_idx on tbl_gist + Index Cond: (c4 <@ '(10,10),(1,1)'::box) +(2 rows) + +SET enable_bitmapscan TO default; +DROP TABLE tbl_gist; +/* + * 2. CREATE INDEX CONCURRENTLY + */ +CREATE TABLE tbl_gist (c1 int, c2 int, c3 int, c4 box); +INSERT INTO tbl_gist SELECT x, 2*x, 3*x, box(point(x,x+1),point(2*x,2*x+1)) FROM generate_series(1,10) AS x; +CREATE INDEX CONCURRENTLY tbl_gist_idx ON tbl_gist using gist (c4) INCLUDE (c1,c2,c3); +SELECT indexdef FROM pg_indexes WHERE tablename = 'tbl_gist' ORDER BY indexname; + indexdef +----------------------------------------------------------------------------------- + CREATE INDEX tbl_gist_idx ON public.tbl_gist USING gist (c4) INCLUDE (c1, c2, c3) +(1 row) + +DROP TABLE tbl_gist; +/* + * 3. REINDEX + */ +CREATE TABLE tbl_gist (c1 int, c2 int, c3 int, c4 box); +INSERT INTO tbl_gist SELECT x, 2*x, 3*x, box(point(x,x+1),point(2*x,2*x+1)) FROM generate_series(1,10) AS x; +CREATE INDEX tbl_gist_idx ON tbl_gist using gist (c4) INCLUDE (c1,c3); +SELECT indexdef FROM pg_indexes WHERE tablename = 'tbl_gist' ORDER BY indexname; + indexdef +------------------------------------------------------------------------------- + CREATE INDEX tbl_gist_idx ON public.tbl_gist USING gist (c4) INCLUDE (c1, c3) +(1 row) + +REINDEX INDEX tbl_gist_idx; +SELECT indexdef FROM pg_indexes WHERE tablename = 'tbl_gist' ORDER BY indexname; + indexdef +------------------------------------------------------------------------------- + CREATE INDEX tbl_gist_idx ON public.tbl_gist USING gist (c4) INCLUDE (c1, c3) +(1 row) + +ALTER TABLE tbl_gist DROP COLUMN c1; +SELECT indexdef FROM pg_indexes WHERE tablename = 'tbl_gist' ORDER BY indexname; + indexdef +---------- +(0 rows) + +DROP TABLE tbl_gist; +/* + * 4. Update, delete values in indexed table. + */ +CREATE TABLE tbl_gist (c1 int, c2 int, c3 int, c4 box); +INSERT INTO tbl_gist SELECT x, 2*x, 3*x, box(point(x,x+1),point(2*x,2*x+1)) FROM generate_series(1,10) AS x; +CREATE INDEX tbl_gist_idx ON tbl_gist using gist (c4) INCLUDE (c1,c3); +UPDATE tbl_gist SET c1 = 100 WHERE c1 = 2; +UPDATE tbl_gist SET c1 = 1 WHERE c1 = 3; +DELETE FROM tbl_gist WHERE c1 = 5 OR c3 = 12; +DROP TABLE tbl_gist; +/* + * 5. Alter column type. + */ +CREATE TABLE tbl_gist (c1 int, c2 int, c3 int, c4 box); +INSERT INTO tbl_gist SELECT x, 2*x, 3*x, box(point(x,x+1),point(2*x,2*x+1)) FROM generate_series(1,10) AS x; +CREATE INDEX tbl_gist_idx ON tbl_gist using gist (c4) INCLUDE (c1,c3); +ALTER TABLE tbl_gist ALTER c1 TYPE bigint; +ALTER TABLE tbl_gist ALTER c3 TYPE bigint; +\d tbl_gist + Table "public.tbl_gist" + Column | Type | Collation | Nullable | Default +--------+---------+-----------+----------+--------- + c1 | bigint | | | + c2 | integer | | | + c3 | bigint | | | + c4 | box | | | +Indexes: + "tbl_gist_idx" gist (c4) INCLUDE (c1, c3) + +DROP TABLE tbl_gist; +/* + * 6. EXCLUDE constraint. + */ +CREATE TABLE tbl_gist (c1 int, c2 int, c3 int, c4 box, EXCLUDE USING gist (c4 WITH &&) INCLUDE (c1, c2, c3)); +INSERT INTO tbl_gist SELECT x, 2*x, 3*x, box(point(x,x+1),point(2*x,2*x+1)) FROM generate_series(1,10) AS x; +ERROR: conflicting key value violates exclusion constraint "tbl_gist_c4_c1_c2_c3_excl" +DETAIL: Key (c4)=((4,5),(2,3)) conflicts with existing key (c4)=((2,3),(1,2)). +INSERT INTO tbl_gist SELECT x, 2*x, 3*x, box(point(3*x,2*x),point(3*x+1,2*x+1)) FROM generate_series(1,10) AS x; +EXPLAIN (costs off) SELECT * FROM tbl_gist where c4 <@ box(point(1,1),point(10,10)); + QUERY PLAN +------------------------------------------------------------- + Index Only Scan using tbl_gist_c4_c1_c2_c3_excl on tbl_gist + Index Cond: (c4 <@ '(10,10),(1,1)'::box) +(2 rows) + +\d tbl_gist + Table "public.tbl_gist" + Column | Type | Collation | Nullable | Default +--------+---------+-----------+----------+--------- + c1 | integer | | | + c2 | integer | | | + c3 | integer | | | + c4 | box | | | +Indexes: + "tbl_gist_c4_c1_c2_c3_excl" EXCLUDE USING gist (c4 WITH &&) INCLUDE (c1, c2, c3) + +DROP TABLE tbl_gist; diff --git a/src/test/regress/expected/indexing.out b/src/test/regress/expected/indexing.out new file mode 100644 index 0000000..e17879b --- /dev/null +++ b/src/test/regress/expected/indexing.out @@ -0,0 +1,1590 @@ +-- Creating an index on a partitioned table makes the partitions +-- automatically get the index +create table idxpart (a int, b int, c text) partition by range (a); +-- relhassubclass of a partitioned index is false before creating any partition. +-- It will be set after the first partition is created. +create index idxpart_idx on idxpart (a); +select relhassubclass from pg_class where relname = 'idxpart_idx'; + relhassubclass +---------------- + f +(1 row) + +-- Check that partitioned indexes are present in pg_indexes. +select indexdef from pg_indexes where indexname like 'idxpart_idx%'; + indexdef +----------------------------------------------------------------- + CREATE INDEX idxpart_idx ON ONLY public.idxpart USING btree (a) +(1 row) + +drop index idxpart_idx; +create table idxpart1 partition of idxpart for values from (0) to (10); +create table idxpart2 partition of idxpart for values from (10) to (100) + partition by range (b); +create table idxpart21 partition of idxpart2 for values from (0) to (100); +-- Even with partitions, relhassubclass should not be set if a partitioned +-- index is created only on the parent. +create index idxpart_idx on only idxpart(a); +select relhassubclass from pg_class where relname = 'idxpart_idx'; + relhassubclass +---------------- + f +(1 row) + +drop index idxpart_idx; +create index on idxpart (a); +select relname, relkind, relhassubclass, inhparent::regclass + from pg_class left join pg_index ix on (indexrelid = oid) + left join pg_inherits on (ix.indexrelid = inhrelid) + where relname like 'idxpart%' order by relname; + relname | relkind | relhassubclass | inhparent +-----------------+---------+----------------+---------------- + idxpart | p | t | + idxpart1 | r | f | + idxpart1_a_idx | i | f | idxpart_a_idx + idxpart2 | p | t | + idxpart21 | r | f | + idxpart21_a_idx | i | f | idxpart2_a_idx + idxpart2_a_idx | I | t | idxpart_a_idx + idxpart_a_idx | I | t | +(8 rows) + +drop table idxpart; +-- Some unsupported features +create table idxpart (a int, b int, c text) partition by range (a); +create table idxpart1 partition of idxpart for values from (0) to (10); +create index concurrently on idxpart (a); +ERROR: cannot create index on partitioned table "idxpart" concurrently +drop table idxpart; +-- Verify bugfix with query on indexed partitioned table with no partitions +-- https://postgr.es/m/20180124162006.pmapfiznhgngwtjf@alvherre.pgsql +CREATE TABLE idxpart (col1 INT) PARTITION BY RANGE (col1); +CREATE INDEX ON idxpart (col1); +CREATE TABLE idxpart_two (col2 INT); +SELECT col2 FROM idxpart_two fk LEFT OUTER JOIN idxpart pk ON (col1 = col2); + col2 +------ +(0 rows) + +DROP table idxpart, idxpart_two; +-- Verify bugfix with index rewrite on ALTER TABLE / SET DATA TYPE +-- https://postgr.es/m/CAKcux6mxNCGsgATwf5CGMF8g4WSupCXicCVMeKUTuWbyxHOMsQ@mail.gmail.com +CREATE TABLE idxpart (a INT, b TEXT, c INT) PARTITION BY RANGE(a); +CREATE TABLE idxpart1 PARTITION OF idxpart FOR VALUES FROM (MINVALUE) TO (MAXVALUE); +CREATE INDEX partidx_abc_idx ON idxpart (a, b, c); +INSERT INTO idxpart (a, b, c) SELECT i, i, i FROM generate_series(1, 50) i; +ALTER TABLE idxpart ALTER COLUMN c TYPE numeric; +DROP TABLE idxpart; +-- If a table without index is attached as partition to a table with +-- an index, the index is automatically created +create table idxpart (a int, b int, c text) partition by range (a); +create index idxparti on idxpart (a); +create index idxparti2 on idxpart (b, c); +create table idxpart1 (like idxpart); +\d idxpart1 + Table "public.idxpart1" + Column | Type | Collation | Nullable | Default +--------+---------+-----------+----------+--------- + a | integer | | | + b | integer | | | + c | text | | | + +alter table idxpart attach partition idxpart1 for values from (0) to (10); +\d idxpart1 + Table "public.idxpart1" + Column | Type | Collation | Nullable | Default +--------+---------+-----------+----------+--------- + a | integer | | | + b | integer | | | + c | text | | | +Partition of: idxpart FOR VALUES FROM (0) TO (10) +Indexes: + "idxpart1_a_idx" btree (a) + "idxpart1_b_c_idx" btree (b, c) + +\d+ idxpart1_a_idx + Index "public.idxpart1_a_idx" + Column | Type | Key? | Definition | Storage | Stats target +--------+---------+------+------------+---------+-------------- + a | integer | yes | a | plain | +Partition of: idxparti +No partition constraint +btree, for table "public.idxpart1" + +\d+ idxpart1_b_c_idx + Index "public.idxpart1_b_c_idx" + Column | Type | Key? | Definition | Storage | Stats target +--------+---------+------+------------+----------+-------------- + b | integer | yes | b | plain | + c | text | yes | c | extended | +Partition of: idxparti2 +No partition constraint +btree, for table "public.idxpart1" + +-- Forbid ALTER TABLE when attaching or detaching an index to a partition. +create index idxpart_c on only idxpart (c); +create index idxpart1_c on idxpart1 (c); +alter table idxpart_c attach partition idxpart1_c for values from (10) to (20); +ERROR: "idxpart_c" is not a partitioned table +alter index idxpart_c attach partition idxpart1_c; +select relname, relpartbound from pg_class + where relname in ('idxpart_c', 'idxpart1_c') + order by relname; + relname | relpartbound +------------+-------------- + idxpart1_c | + idxpart_c | +(2 rows) + +alter table idxpart_c detach partition idxpart1_c; +ERROR: ALTER action DETACH PARTITION cannot be performed on relation "idxpart_c" +DETAIL: This operation is not supported for partitioned indexes. +drop table idxpart; +-- If a partition already has an index, don't create a duplicative one +create table idxpart (a int, b int) partition by range (a, b); +create table idxpart1 partition of idxpart for values from (0, 0) to (10, 10); +create index on idxpart1 (a, b); +create index on idxpart (a, b); +\d idxpart1 + Table "public.idxpart1" + Column | Type | Collation | Nullable | Default +--------+---------+-----------+----------+--------- + a | integer | | | + b | integer | | | +Partition of: idxpart FOR VALUES FROM (0, 0) TO (10, 10) +Indexes: + "idxpart1_a_b_idx" btree (a, b) + +select relname, relkind, relhassubclass, inhparent::regclass + from pg_class left join pg_index ix on (indexrelid = oid) + left join pg_inherits on (ix.indexrelid = inhrelid) + where relname like 'idxpart%' order by relname; + relname | relkind | relhassubclass | inhparent +------------------+---------+----------------+----------------- + idxpart | p | t | + idxpart1 | r | f | + idxpart1_a_b_idx | i | f | idxpart_a_b_idx + idxpart_a_b_idx | I | t | +(4 rows) + +drop table idxpart; +-- DROP behavior for partitioned indexes +create table idxpart (a int) partition by range (a); +create index on idxpart (a); +create table idxpart1 partition of idxpart for values from (0) to (10); +drop index idxpart1_a_idx; -- no way +ERROR: cannot drop index idxpart1_a_idx because index idxpart_a_idx requires it +HINT: You can drop index idxpart_a_idx instead. +drop index concurrently idxpart_a_idx; -- unsupported +ERROR: cannot drop partitioned index "idxpart_a_idx" concurrently +drop index idxpart_a_idx; -- both indexes go away +select relname, relkind from pg_class + where relname like 'idxpart%' order by relname; + relname | relkind +----------+--------- + idxpart | p + idxpart1 | r +(2 rows) + +create index on idxpart (a); +drop table idxpart1; -- the index on partition goes away too +select relname, relkind from pg_class + where relname like 'idxpart%' order by relname; + relname | relkind +---------------+--------- + idxpart | p + idxpart_a_idx | I +(2 rows) + +drop table idxpart; +-- DROP behavior with temporary partitioned indexes +create temp table idxpart_temp (a int) partition by range (a); +create index on idxpart_temp(a); +create temp table idxpart1_temp partition of idxpart_temp + for values from (0) to (10); +drop index idxpart1_temp_a_idx; -- error +ERROR: cannot drop index idxpart1_temp_a_idx because index idxpart_temp_a_idx requires it +HINT: You can drop index idxpart_temp_a_idx instead. +-- non-concurrent drop is enforced here, so it is a valid case. +drop index concurrently idxpart_temp_a_idx; +select relname, relkind from pg_class + where relname like 'idxpart_temp%' order by relname; + relname | relkind +--------------+--------- + idxpart_temp | p +(1 row) + +drop table idxpart_temp; +-- ALTER INDEX .. ATTACH, error cases +create table idxpart (a int, b int) partition by range (a, b); +create table idxpart1 partition of idxpart for values from (0, 0) to (10, 10); +create index idxpart_a_b_idx on only idxpart (a, b); +create index idxpart1_a_b_idx on idxpart1 (a, b); +create index idxpart1_tst1 on idxpart1 (b, a); +create index idxpart1_tst2 on idxpart1 using hash (a); +create index idxpart1_tst3 on idxpart1 (a, b) where a > 10; +alter index idxpart attach partition idxpart1; +ERROR: "idxpart" is not an index +alter index idxpart_a_b_idx attach partition idxpart1; +ERROR: "idxpart1" is not an index +alter index idxpart_a_b_idx attach partition idxpart_a_b_idx; +ERROR: cannot attach index "idxpart_a_b_idx" as a partition of index "idxpart_a_b_idx" +DETAIL: Index "idxpart_a_b_idx" is not an index on any partition of table "idxpart". +alter index idxpart_a_b_idx attach partition idxpart1_b_idx; +ERROR: relation "idxpart1_b_idx" does not exist +alter index idxpart_a_b_idx attach partition idxpart1_tst1; +ERROR: cannot attach index "idxpart1_tst1" as a partition of index "idxpart_a_b_idx" +DETAIL: The index definitions do not match. +alter index idxpart_a_b_idx attach partition idxpart1_tst2; +ERROR: cannot attach index "idxpart1_tst2" as a partition of index "idxpart_a_b_idx" +DETAIL: The index definitions do not match. +alter index idxpart_a_b_idx attach partition idxpart1_tst3; +ERROR: cannot attach index "idxpart1_tst3" as a partition of index "idxpart_a_b_idx" +DETAIL: The index definitions do not match. +-- OK +alter index idxpart_a_b_idx attach partition idxpart1_a_b_idx; +alter index idxpart_a_b_idx attach partition idxpart1_a_b_idx; -- quiet +-- reject dupe +create index idxpart1_2_a_b on idxpart1 (a, b); +alter index idxpart_a_b_idx attach partition idxpart1_2_a_b; +ERROR: cannot attach index "idxpart1_2_a_b" as a partition of index "idxpart_a_b_idx" +DETAIL: Another index is already attached for partition "idxpart1". +drop table idxpart; +-- make sure everything's gone +select indexrelid::regclass, indrelid::regclass + from pg_index where indexrelid::regclass::text like 'idxpart%'; + indexrelid | indrelid +------------+---------- +(0 rows) + +-- Don't auto-attach incompatible indexes +create table idxpart (a int, b int) partition by range (a); +create table idxpart1 (a int, b int); +create index on idxpart1 using hash (a); +create index on idxpart1 (a) where b > 1; +create index on idxpart1 ((a + 0)); +create index on idxpart1 (a, a); +create index on idxpart (a); +alter table idxpart attach partition idxpart1 for values from (0) to (1000); +\d idxpart1 + Table "public.idxpart1" + Column | Type | Collation | Nullable | Default +--------+---------+-----------+----------+--------- + a | integer | | | + b | integer | | | +Partition of: idxpart FOR VALUES FROM (0) TO (1000) +Indexes: + "idxpart1_a_a1_idx" btree (a, a) + "idxpart1_a_idx" hash (a) + "idxpart1_a_idx1" btree (a) WHERE b > 1 + "idxpart1_a_idx2" btree (a) + "idxpart1_expr_idx" btree ((a + 0)) + +drop table idxpart; +-- If CREATE INDEX ONLY, don't create indexes on partitions; and existing +-- indexes on partitions don't change parent. ALTER INDEX ATTACH can change +-- the parent after the fact. +create table idxpart (a int) partition by range (a); +create table idxpart1 partition of idxpart for values from (0) to (100); +create table idxpart2 partition of idxpart for values from (100) to (1000) + partition by range (a); +create table idxpart21 partition of idxpart2 for values from (100) to (200); +create table idxpart22 partition of idxpart2 for values from (200) to (300); +create index on idxpart22 (a); +create index on only idxpart2 (a); +create index on idxpart (a); +-- Here we expect that idxpart1 and idxpart2 have a new index, but idxpart21 +-- does not; also, idxpart22 is not attached. +\d idxpart1 + Table "public.idxpart1" + Column | Type | Collation | Nullable | Default +--------+---------+-----------+----------+--------- + a | integer | | | +Partition of: idxpart FOR VALUES FROM (0) TO (100) +Indexes: + "idxpart1_a_idx" btree (a) + +\d idxpart2 + Partitioned table "public.idxpart2" + Column | Type | Collation | Nullable | Default +--------+---------+-----------+----------+--------- + a | integer | | | +Partition of: idxpart FOR VALUES FROM (100) TO (1000) +Partition key: RANGE (a) +Indexes: + "idxpart2_a_idx" btree (a) INVALID +Number of partitions: 2 (Use \d+ to list them.) + +\d idxpart21 + Table "public.idxpart21" + Column | Type | Collation | Nullable | Default +--------+---------+-----------+----------+--------- + a | integer | | | +Partition of: idxpart2 FOR VALUES FROM (100) TO (200) + +select indexrelid::regclass, indrelid::regclass, inhparent::regclass + from pg_index idx left join pg_inherits inh on (idx.indexrelid = inh.inhrelid) +where indexrelid::regclass::text like 'idxpart%' + order by indexrelid::regclass::text collate "C"; + indexrelid | indrelid | inhparent +-----------------+-----------+--------------- + idxpart1_a_idx | idxpart1 | idxpart_a_idx + idxpart22_a_idx | idxpart22 | + idxpart2_a_idx | idxpart2 | idxpart_a_idx + idxpart_a_idx | idxpart | +(4 rows) + +alter index idxpart2_a_idx attach partition idxpart22_a_idx; +select indexrelid::regclass, indrelid::regclass, inhparent::regclass + from pg_index idx left join pg_inherits inh on (idx.indexrelid = inh.inhrelid) +where indexrelid::regclass::text like 'idxpart%' + order by indexrelid::regclass::text collate "C"; + indexrelid | indrelid | inhparent +-----------------+-----------+---------------- + idxpart1_a_idx | idxpart1 | idxpart_a_idx + idxpart22_a_idx | idxpart22 | idxpart2_a_idx + idxpart2_a_idx | idxpart2 | idxpart_a_idx + idxpart_a_idx | idxpart | +(4 rows) + +-- attaching idxpart22 is not enough to set idxpart22_a_idx valid ... +alter index idxpart2_a_idx attach partition idxpart22_a_idx; +\d idxpart2 + Partitioned table "public.idxpart2" + Column | Type | Collation | Nullable | Default +--------+---------+-----------+----------+--------- + a | integer | | | +Partition of: idxpart FOR VALUES FROM (100) TO (1000) +Partition key: RANGE (a) +Indexes: + "idxpart2_a_idx" btree (a) INVALID +Number of partitions: 2 (Use \d+ to list them.) + +-- ... but this one is. +create index on idxpart21 (a); +alter index idxpart2_a_idx attach partition idxpart21_a_idx; +\d idxpart2 + Partitioned table "public.idxpart2" + Column | Type | Collation | Nullable | Default +--------+---------+-----------+----------+--------- + a | integer | | | +Partition of: idxpart FOR VALUES FROM (100) TO (1000) +Partition key: RANGE (a) +Indexes: + "idxpart2_a_idx" btree (a) +Number of partitions: 2 (Use \d+ to list them.) + +drop table idxpart; +-- When a table is attached a partition and it already has an index, a +-- duplicate index should not get created, but rather the index becomes +-- attached to the parent's index. +create table idxpart (a int, b int, c text, d bool) partition by range (a); +create index idxparti on idxpart (a); +create index idxparti2 on idxpart (b, c); +create table idxpart1 (like idxpart including indexes); +\d idxpart1 + Table "public.idxpart1" + Column | Type | Collation | Nullable | Default +--------+---------+-----------+----------+--------- + a | integer | | | + b | integer | | | + c | text | | | + d | boolean | | | +Indexes: + "idxpart1_a_idx" btree (a) + "idxpart1_b_c_idx" btree (b, c) + +select relname, relkind, inhparent::regclass + from pg_class left join pg_index ix on (indexrelid = oid) + left join pg_inherits on (ix.indexrelid = inhrelid) + where relname like 'idxpart%' order by relname; + relname | relkind | inhparent +------------------+---------+----------- + idxpart | p | + idxpart1 | r | + idxpart1_a_idx | i | + idxpart1_b_c_idx | i | + idxparti | I | + idxparti2 | I | +(6 rows) + +alter table idxpart attach partition idxpart1 for values from (0) to (10); +\d idxpart1 + Table "public.idxpart1" + Column | Type | Collation | Nullable | Default +--------+---------+-----------+----------+--------- + a | integer | | | + b | integer | | | + c | text | | | + d | boolean | | | +Partition of: idxpart FOR VALUES FROM (0) TO (10) +Indexes: + "idxpart1_a_idx" btree (a) + "idxpart1_b_c_idx" btree (b, c) + +select relname, relkind, inhparent::regclass + from pg_class left join pg_index ix on (indexrelid = oid) + left join pg_inherits on (ix.indexrelid = inhrelid) + where relname like 'idxpart%' order by relname; + relname | relkind | inhparent +------------------+---------+----------- + idxpart | p | + idxpart1 | r | + idxpart1_a_idx | i | idxparti + idxpart1_b_c_idx | i | idxparti2 + idxparti | I | + idxparti2 | I | +(6 rows) + +-- While here, also check matching when creating an index after the fact. +create index on idxpart1 ((a+b)) where d = true; +\d idxpart1 + Table "public.idxpart1" + Column | Type | Collation | Nullable | Default +--------+---------+-----------+----------+--------- + a | integer | | | + b | integer | | | + c | text | | | + d | boolean | | | +Partition of: idxpart FOR VALUES FROM (0) TO (10) +Indexes: + "idxpart1_a_idx" btree (a) + "idxpart1_b_c_idx" btree (b, c) + "idxpart1_expr_idx" btree ((a + b)) WHERE d = true + +select relname, relkind, inhparent::regclass + from pg_class left join pg_index ix on (indexrelid = oid) + left join pg_inherits on (ix.indexrelid = inhrelid) + where relname like 'idxpart%' order by relname; + relname | relkind | inhparent +-------------------+---------+----------- + idxpart | p | + idxpart1 | r | + idxpart1_a_idx | i | idxparti + idxpart1_b_c_idx | i | idxparti2 + idxpart1_expr_idx | i | + idxparti | I | + idxparti2 | I | +(7 rows) + +create index idxparti3 on idxpart ((a+b)) where d = true; +\d idxpart1 + Table "public.idxpart1" + Column | Type | Collation | Nullable | Default +--------+---------+-----------+----------+--------- + a | integer | | | + b | integer | | | + c | text | | | + d | boolean | | | +Partition of: idxpart FOR VALUES FROM (0) TO (10) +Indexes: + "idxpart1_a_idx" btree (a) + "idxpart1_b_c_idx" btree (b, c) + "idxpart1_expr_idx" btree ((a + b)) WHERE d = true + +select relname, relkind, inhparent::regclass + from pg_class left join pg_index ix on (indexrelid = oid) + left join pg_inherits on (ix.indexrelid = inhrelid) + where relname like 'idxpart%' order by relname; + relname | relkind | inhparent +-------------------+---------+----------- + idxpart | p | + idxpart1 | r | + idxpart1_a_idx | i | idxparti + idxpart1_b_c_idx | i | idxparti2 + idxpart1_expr_idx | i | idxparti3 + idxparti | I | + idxparti2 | I | + idxparti3 | I | +(8 rows) + +drop table idxpart; +-- Verify that attaching an invalid index does not mark the parent index valid. +-- On the other hand, attaching a valid index marks not only its direct +-- ancestor valid, but also any indirect ancestor that was only missing the one +-- that was just made valid +create table idxpart (a int, b int) partition by range (a); +create table idxpart1 partition of idxpart for values from (1) to (1000) partition by range (a); +create table idxpart11 partition of idxpart1 for values from (1) to (100); +create index on only idxpart1 (a); +create index on only idxpart (a); +-- this results in two invalid indexes: +select relname, indisvalid from pg_class join pg_index on indexrelid = oid + where relname like 'idxpart%' order by relname; + relname | indisvalid +----------------+------------ + idxpart1_a_idx | f + idxpart_a_idx | f +(2 rows) + +-- idxpart1_a_idx is not valid, so idxpart_a_idx should not become valid: +alter index idxpart_a_idx attach partition idxpart1_a_idx; +select relname, indisvalid from pg_class join pg_index on indexrelid = oid + where relname like 'idxpart%' order by relname; + relname | indisvalid +----------------+------------ + idxpart1_a_idx | f + idxpart_a_idx | f +(2 rows) + +-- after creating and attaching this, both idxpart1_a_idx and idxpart_a_idx +-- should become valid +create index on idxpart11 (a); +alter index idxpart1_a_idx attach partition idxpart11_a_idx; +select relname, indisvalid from pg_class join pg_index on indexrelid = oid + where relname like 'idxpart%' order by relname; + relname | indisvalid +-----------------+------------ + idxpart11_a_idx | t + idxpart1_a_idx | t + idxpart_a_idx | t +(3 rows) + +drop table idxpart; +-- verify dependency handling during ALTER TABLE DETACH PARTITION +create table idxpart (a int) partition by range (a); +create table idxpart1 (like idxpart); +create index on idxpart1 (a); +create index on idxpart (a); +create table idxpart2 (like idxpart); +alter table idxpart attach partition idxpart1 for values from (0000) to (1000); +alter table idxpart attach partition idxpart2 for values from (1000) to (2000); +create table idxpart3 partition of idxpart for values from (2000) to (3000); +select relname, relkind from pg_class where relname like 'idxpart%' order by relname; + relname | relkind +----------------+--------- + idxpart | p + idxpart1 | r + idxpart1_a_idx | i + idxpart2 | r + idxpart2_a_idx | i + idxpart3 | r + idxpart3_a_idx | i + idxpart_a_idx | I +(8 rows) + +-- a) after detaching partitions, the indexes can be dropped independently +alter table idxpart detach partition idxpart1; +alter table idxpart detach partition idxpart2; +alter table idxpart detach partition idxpart3; +drop index idxpart1_a_idx; +drop index idxpart2_a_idx; +drop index idxpart3_a_idx; +select relname, relkind from pg_class where relname like 'idxpart%' order by relname; + relname | relkind +---------------+--------- + idxpart | p + idxpart1 | r + idxpart2 | r + idxpart3 | r + idxpart_a_idx | I +(5 rows) + +drop table idxpart, idxpart1, idxpart2, idxpart3; +select relname, relkind from pg_class where relname like 'idxpart%' order by relname; + relname | relkind +---------+--------- +(0 rows) + +create table idxpart (a int) partition by range (a); +create table idxpart1 (like idxpart); +create index on idxpart1 (a); +create index on idxpart (a); +create table idxpart2 (like idxpart); +alter table idxpart attach partition idxpart1 for values from (0000) to (1000); +alter table idxpart attach partition idxpart2 for values from (1000) to (2000); +create table idxpart3 partition of idxpart for values from (2000) to (3000); +-- b) after detaching, dropping the index on parent does not remove the others +select relname, relkind from pg_class where relname like 'idxpart%' order by relname; + relname | relkind +----------------+--------- + idxpart | p + idxpart1 | r + idxpart1_a_idx | i + idxpart2 | r + idxpart2_a_idx | i + idxpart3 | r + idxpart3_a_idx | i + idxpart_a_idx | I +(8 rows) + +alter table idxpart detach partition idxpart1; +alter table idxpart detach partition idxpart2; +alter table idxpart detach partition idxpart3; +drop index idxpart_a_idx; +select relname, relkind from pg_class where relname like 'idxpart%' order by relname; + relname | relkind +----------------+--------- + idxpart | p + idxpart1 | r + idxpart1_a_idx | i + idxpart2 | r + idxpart2_a_idx | i + idxpart3 | r + idxpart3_a_idx | i +(7 rows) + +drop table idxpart, idxpart1, idxpart2, idxpart3; +select relname, relkind from pg_class where relname like 'idxpart%' order by relname; + relname | relkind +---------+--------- +(0 rows) + +create table idxpart (a int, b int, c int) partition by range(a); +create index on idxpart(c); +create table idxpart1 partition of idxpart for values from (0) to (250); +create table idxpart2 partition of idxpart for values from (250) to (500); +alter table idxpart detach partition idxpart2; +\d idxpart2 + Table "public.idxpart2" + Column | Type | Collation | Nullable | Default +--------+---------+-----------+----------+--------- + a | integer | | | + b | integer | | | + c | integer | | | +Indexes: + "idxpart2_c_idx" btree (c) + +alter table idxpart2 drop column c; +\d idxpart2 + Table "public.idxpart2" + Column | Type | Collation | Nullable | Default +--------+---------+-----------+----------+--------- + a | integer | | | + b | integer | | | + +drop table idxpart, idxpart2; +-- Verify that expression indexes inherit correctly +create table idxpart (a int, b int) partition by range (a); +create table idxpart1 (like idxpart); +create index on idxpart1 ((a + b)); +create index on idxpart ((a + b)); +create table idxpart2 (like idxpart); +alter table idxpart attach partition idxpart1 for values from (0000) to (1000); +alter table idxpart attach partition idxpart2 for values from (1000) to (2000); +create table idxpart3 partition of idxpart for values from (2000) to (3000); +select relname as child, inhparent::regclass as parent, pg_get_indexdef as childdef + from pg_class join pg_inherits on inhrelid = oid, + lateral pg_get_indexdef(pg_class.oid) + where relkind in ('i', 'I') and relname like 'idxpart%' order by relname; + child | parent | childdef +-------------------+------------------+--------------------------------------------------------------------------- + idxpart1_expr_idx | idxpart_expr_idx | CREATE INDEX idxpart1_expr_idx ON public.idxpart1 USING btree (((a + b))) + idxpart2_expr_idx | idxpart_expr_idx | CREATE INDEX idxpart2_expr_idx ON public.idxpart2 USING btree (((a + b))) + idxpart3_expr_idx | idxpart_expr_idx | CREATE INDEX idxpart3_expr_idx ON public.idxpart3 USING btree (((a + b))) +(3 rows) + +drop table idxpart; +-- Verify behavior for collation (mis)matches +create table idxpart (a text) partition by range (a); +create table idxpart1 (like idxpart); +create table idxpart2 (like idxpart); +create index on idxpart2 (a collate "POSIX"); +create index on idxpart2 (a); +create index on idxpart2 (a collate "C"); +alter table idxpart attach partition idxpart1 for values from ('aaa') to ('bbb'); +alter table idxpart attach partition idxpart2 for values from ('bbb') to ('ccc'); +create table idxpart3 partition of idxpart for values from ('ccc') to ('ddd'); +create index on idxpart (a collate "C"); +create table idxpart4 partition of idxpart for values from ('ddd') to ('eee'); +select relname as child, inhparent::regclass as parent, pg_get_indexdef as childdef + from pg_class left join pg_inherits on inhrelid = oid, + lateral pg_get_indexdef(pg_class.oid) + where relkind in ('i', 'I') and relname like 'idxpart%' order by relname; + child | parent | childdef +-----------------+---------------+-------------------------------------------------------------------------------- + idxpart1_a_idx | idxpart_a_idx | CREATE INDEX idxpart1_a_idx ON public.idxpart1 USING btree (a COLLATE "C") + idxpart2_a_idx | | CREATE INDEX idxpart2_a_idx ON public.idxpart2 USING btree (a COLLATE "POSIX") + idxpart2_a_idx1 | | CREATE INDEX idxpart2_a_idx1 ON public.idxpart2 USING btree (a) + idxpart2_a_idx2 | idxpart_a_idx | CREATE INDEX idxpart2_a_idx2 ON public.idxpart2 USING btree (a COLLATE "C") + idxpart3_a_idx | idxpart_a_idx | CREATE INDEX idxpart3_a_idx ON public.idxpart3 USING btree (a COLLATE "C") + idxpart4_a_idx | idxpart_a_idx | CREATE INDEX idxpart4_a_idx ON public.idxpart4 USING btree (a COLLATE "C") + idxpart_a_idx | | CREATE INDEX idxpart_a_idx ON ONLY public.idxpart USING btree (a COLLATE "C") +(7 rows) + +drop table idxpart; +-- Verify behavior for opclass (mis)matches +create table idxpart (a text) partition by range (a); +create table idxpart1 (like idxpart); +create table idxpart2 (like idxpart); +create index on idxpart2 (a); +alter table idxpart attach partition idxpart1 for values from ('aaa') to ('bbb'); +alter table idxpart attach partition idxpart2 for values from ('bbb') to ('ccc'); +create table idxpart3 partition of idxpart for values from ('ccc') to ('ddd'); +create index on idxpart (a text_pattern_ops); +create table idxpart4 partition of idxpart for values from ('ddd') to ('eee'); +-- must *not* have attached the index we created on idxpart2 +select relname as child, inhparent::regclass as parent, pg_get_indexdef as childdef + from pg_class left join pg_inherits on inhrelid = oid, + lateral pg_get_indexdef(pg_class.oid) + where relkind in ('i', 'I') and relname like 'idxpart%' order by relname; + child | parent | childdef +-----------------+---------------+------------------------------------------------------------------------------------ + idxpart1_a_idx | idxpart_a_idx | CREATE INDEX idxpart1_a_idx ON public.idxpart1 USING btree (a text_pattern_ops) + idxpart2_a_idx | | CREATE INDEX idxpart2_a_idx ON public.idxpart2 USING btree (a) + idxpart2_a_idx1 | idxpart_a_idx | CREATE INDEX idxpart2_a_idx1 ON public.idxpart2 USING btree (a text_pattern_ops) + idxpart3_a_idx | idxpart_a_idx | CREATE INDEX idxpart3_a_idx ON public.idxpart3 USING btree (a text_pattern_ops) + idxpart4_a_idx | idxpart_a_idx | CREATE INDEX idxpart4_a_idx ON public.idxpart4 USING btree (a text_pattern_ops) + idxpart_a_idx | | CREATE INDEX idxpart_a_idx ON ONLY public.idxpart USING btree (a text_pattern_ops) +(6 rows) + +drop index idxpart_a_idx; +create index on only idxpart (a text_pattern_ops); +-- must reject +alter index idxpart_a_idx attach partition idxpart2_a_idx; +ERROR: cannot attach index "idxpart2_a_idx" as a partition of index "idxpart_a_idx" +DETAIL: The index definitions do not match. +drop table idxpart; +-- Verify that attaching indexes maps attribute numbers correctly +create table idxpart (col1 int, a int, col2 int, b int) partition by range (a); +create table idxpart1 (b int, col1 int, col2 int, col3 int, a int); +alter table idxpart drop column col1, drop column col2; +alter table idxpart1 drop column col1, drop column col2, drop column col3; +alter table idxpart attach partition idxpart1 for values from (0) to (1000); +create index idxpart_1_idx on only idxpart (b, a); +create index idxpart1_1_idx on idxpart1 (b, a); +create index idxpart1_1b_idx on idxpart1 (b); +-- test expressions and partial-index predicate, too +create index idxpart_2_idx on only idxpart ((b + a)) where a > 1; +create index idxpart1_2_idx on idxpart1 ((b + a)) where a > 1; +create index idxpart1_2b_idx on idxpart1 ((a + b)) where a > 1; +create index idxpart1_2c_idx on idxpart1 ((b + a)) where b > 1; +alter index idxpart_1_idx attach partition idxpart1_1b_idx; -- fail +ERROR: cannot attach index "idxpart1_1b_idx" as a partition of index "idxpart_1_idx" +DETAIL: The index definitions do not match. +alter index idxpart_1_idx attach partition idxpart1_1_idx; +alter index idxpart_2_idx attach partition idxpart1_2b_idx; -- fail +ERROR: cannot attach index "idxpart1_2b_idx" as a partition of index "idxpart_2_idx" +DETAIL: The index definitions do not match. +alter index idxpart_2_idx attach partition idxpart1_2c_idx; -- fail +ERROR: cannot attach index "idxpart1_2c_idx" as a partition of index "idxpart_2_idx" +DETAIL: The index definitions do not match. +alter index idxpart_2_idx attach partition idxpart1_2_idx; -- ok +select relname as child, inhparent::regclass as parent, pg_get_indexdef as childdef + from pg_class left join pg_inherits on inhrelid = oid, + lateral pg_get_indexdef(pg_class.oid) + where relkind in ('i', 'I') and relname like 'idxpart%' order by relname; + child | parent | childdef +-----------------+---------------+----------------------------------------------------------------------------------------- + idxpart1_1_idx | idxpart_1_idx | CREATE INDEX idxpart1_1_idx ON public.idxpart1 USING btree (b, a) + idxpart1_1b_idx | | CREATE INDEX idxpart1_1b_idx ON public.idxpart1 USING btree (b) + idxpart1_2_idx | idxpart_2_idx | CREATE INDEX idxpart1_2_idx ON public.idxpart1 USING btree (((b + a))) WHERE (a > 1) + idxpart1_2b_idx | | CREATE INDEX idxpart1_2b_idx ON public.idxpart1 USING btree (((a + b))) WHERE (a > 1) + idxpart1_2c_idx | | CREATE INDEX idxpart1_2c_idx ON public.idxpart1 USING btree (((b + a))) WHERE (b > 1) + idxpart_1_idx | | CREATE INDEX idxpart_1_idx ON ONLY public.idxpart USING btree (b, a) + idxpart_2_idx | | CREATE INDEX idxpart_2_idx ON ONLY public.idxpart USING btree (((b + a))) WHERE (a > 1) +(7 rows) + +drop table idxpart; +-- Make sure the partition columns are mapped correctly +create table idxpart (a int, b int, c text) partition by range (a); +create index idxparti on idxpart (a); +create index idxparti2 on idxpart (c, b); +create table idxpart1 (c text, a int, b int); +alter table idxpart attach partition idxpart1 for values from (0) to (10); +create table idxpart2 (c text, a int, b int); +create index on idxpart2 (a); +create index on idxpart2 (c, b); +alter table idxpart attach partition idxpart2 for values from (10) to (20); +select c.relname, pg_get_indexdef(indexrelid) + from pg_class c join pg_index i on c.oid = i.indexrelid + where indrelid::regclass::text like 'idxpart%' + order by indexrelid::regclass::text collate "C"; + relname | pg_get_indexdef +------------------+--------------------------------------------------------------------- + idxpart1_a_idx | CREATE INDEX idxpart1_a_idx ON public.idxpart1 USING btree (a) + idxpart1_c_b_idx | CREATE INDEX idxpart1_c_b_idx ON public.idxpart1 USING btree (c, b) + idxpart2_a_idx | CREATE INDEX idxpart2_a_idx ON public.idxpart2 USING btree (a) + idxpart2_c_b_idx | CREATE INDEX idxpart2_c_b_idx ON public.idxpart2 USING btree (c, b) + idxparti | CREATE INDEX idxparti ON ONLY public.idxpart USING btree (a) + idxparti2 | CREATE INDEX idxparti2 ON ONLY public.idxpart USING btree (c, b) +(6 rows) + +drop table idxpart; +-- Verify that columns are mapped correctly in expression indexes +create table idxpart (col1 int, col2 int, a int, b int) partition by range (a); +create table idxpart1 (col2 int, b int, col1 int, a int); +create table idxpart2 (col1 int, col2 int, b int, a int); +alter table idxpart drop column col1, drop column col2; +alter table idxpart1 drop column col1, drop column col2; +alter table idxpart2 drop column col1, drop column col2; +create index on idxpart2 (abs(b)); +alter table idxpart attach partition idxpart2 for values from (0) to (1); +create index on idxpart (abs(b)); +create index on idxpart ((b + 1)); +alter table idxpart attach partition idxpart1 for values from (1) to (2); +select c.relname, pg_get_indexdef(indexrelid) + from pg_class c join pg_index i on c.oid = i.indexrelid + where indrelid::regclass::text like 'idxpart%' + order by indexrelid::regclass::text collate "C"; + relname | pg_get_indexdef +-------------------+------------------------------------------------------------------------------ + idxpart1_abs_idx | CREATE INDEX idxpart1_abs_idx ON public.idxpart1 USING btree (abs(b)) + idxpart1_expr_idx | CREATE INDEX idxpart1_expr_idx ON public.idxpart1 USING btree (((b + 1))) + idxpart2_abs_idx | CREATE INDEX idxpart2_abs_idx ON public.idxpart2 USING btree (abs(b)) + idxpart2_expr_idx | CREATE INDEX idxpart2_expr_idx ON public.idxpart2 USING btree (((b + 1))) + idxpart_abs_idx | CREATE INDEX idxpart_abs_idx ON ONLY public.idxpart USING btree (abs(b)) + idxpart_expr_idx | CREATE INDEX idxpart_expr_idx ON ONLY public.idxpart USING btree (((b + 1))) +(6 rows) + +drop table idxpart; +-- Verify that columns are mapped correctly for WHERE in a partial index +create table idxpart (col1 int, a int, col3 int, b int) partition by range (a); +alter table idxpart drop column col1, drop column col3; +create table idxpart1 (col1 int, col2 int, col3 int, col4 int, b int, a int); +alter table idxpart1 drop column col1, drop column col2, drop column col3, drop column col4; +alter table idxpart attach partition idxpart1 for values from (0) to (1000); +create table idxpart2 (col1 int, col2 int, b int, a int); +create index on idxpart2 (a) where b > 1000; +alter table idxpart2 drop column col1, drop column col2; +alter table idxpart attach partition idxpart2 for values from (1000) to (2000); +create index on idxpart (a) where b > 1000; +select c.relname, pg_get_indexdef(indexrelid) + from pg_class c join pg_index i on c.oid = i.indexrelid + where indrelid::regclass::text like 'idxpart%' + order by indexrelid::regclass::text collate "C"; + relname | pg_get_indexdef +----------------+------------------------------------------------------------------------------------ + idxpart1_a_idx | CREATE INDEX idxpart1_a_idx ON public.idxpart1 USING btree (a) WHERE (b > 1000) + idxpart2_a_idx | CREATE INDEX idxpart2_a_idx ON public.idxpart2 USING btree (a) WHERE (b > 1000) + idxpart_a_idx | CREATE INDEX idxpart_a_idx ON ONLY public.idxpart USING btree (a) WHERE (b > 1000) +(3 rows) + +drop table idxpart; +-- Column number mapping: dropped columns in the partition +create table idxpart1 (drop_1 int, drop_2 int, col_keep int, drop_3 int); +alter table idxpart1 drop column drop_1; +alter table idxpart1 drop column drop_2; +alter table idxpart1 drop column drop_3; +create index on idxpart1 (col_keep); +create table idxpart (col_keep int) partition by range (col_keep); +create index on idxpart (col_keep); +alter table idxpart attach partition idxpart1 for values from (0) to (1000); +\d idxpart + Partitioned table "public.idxpart" + Column | Type | Collation | Nullable | Default +----------+---------+-----------+----------+--------- + col_keep | integer | | | +Partition key: RANGE (col_keep) +Indexes: + "idxpart_col_keep_idx" btree (col_keep) +Number of partitions: 1 (Use \d+ to list them.) + +\d idxpart1 + Table "public.idxpart1" + Column | Type | Collation | Nullable | Default +----------+---------+-----------+----------+--------- + col_keep | integer | | | +Partition of: idxpart FOR VALUES FROM (0) TO (1000) +Indexes: + "idxpart1_col_keep_idx" btree (col_keep) + +select attrelid::regclass, attname, attnum from pg_attribute + where attrelid::regclass::text like 'idxpart%' and attnum > 0 + order by attrelid::regclass, attnum; + attrelid | attname | attnum +-----------------------+------------------------------+-------- + idxpart1 | ........pg.dropped.1........ | 1 + idxpart1 | ........pg.dropped.2........ | 2 + idxpart1 | col_keep | 3 + idxpart1 | ........pg.dropped.4........ | 4 + idxpart1_col_keep_idx | col_keep | 1 + idxpart | col_keep | 1 + idxpart_col_keep_idx | col_keep | 1 +(7 rows) + +drop table idxpart; +-- Column number mapping: dropped columns in the parent table +create table idxpart(drop_1 int, drop_2 int, col_keep int, drop_3 int) partition by range (col_keep); +alter table idxpart drop column drop_1; +alter table idxpart drop column drop_2; +alter table idxpart drop column drop_3; +create table idxpart1 (col_keep int); +create index on idxpart1 (col_keep); +create index on idxpart (col_keep); +alter table idxpart attach partition idxpart1 for values from (0) to (1000); +\d idxpart + Partitioned table "public.idxpart" + Column | Type | Collation | Nullable | Default +----------+---------+-----------+----------+--------- + col_keep | integer | | | +Partition key: RANGE (col_keep) +Indexes: + "idxpart_col_keep_idx" btree (col_keep) +Number of partitions: 1 (Use \d+ to list them.) + +\d idxpart1 + Table "public.idxpart1" + Column | Type | Collation | Nullable | Default +----------+---------+-----------+----------+--------- + col_keep | integer | | | +Partition of: idxpart FOR VALUES FROM (0) TO (1000) +Indexes: + "idxpart1_col_keep_idx" btree (col_keep) + +select attrelid::regclass, attname, attnum from pg_attribute + where attrelid::regclass::text like 'idxpart%' and attnum > 0 + order by attrelid::regclass, attnum; + attrelid | attname | attnum +-----------------------+------------------------------+-------- + idxpart | ........pg.dropped.1........ | 1 + idxpart | ........pg.dropped.2........ | 2 + idxpart | col_keep | 3 + idxpart | ........pg.dropped.4........ | 4 + idxpart1 | col_keep | 1 + idxpart1_col_keep_idx | col_keep | 1 + idxpart_col_keep_idx | col_keep | 1 +(7 rows) + +drop table idxpart; +-- +-- Constraint-related indexes +-- +-- Verify that it works to add primary key / unique to partitioned tables +create table idxpart (a int primary key, b int) partition by range (a); +\d idxpart + Partitioned table "public.idxpart" + Column | Type | Collation | Nullable | Default +--------+---------+-----------+----------+--------- + a | integer | | not null | + b | integer | | | +Partition key: RANGE (a) +Indexes: + "idxpart_pkey" PRIMARY KEY, btree (a) +Number of partitions: 0 + +-- multiple primary key on child should fail +create table failpart partition of idxpart (b primary key) for values from (0) to (100); +ERROR: multiple primary keys for table "failpart" are not allowed +drop table idxpart; +-- primary key on child is okay if there's no PK in the parent, though +create table idxpart (a int) partition by range (a); +create table idxpart1pk partition of idxpart (a primary key) for values from (0) to (100); +\d idxpart1pk + Table "public.idxpart1pk" + Column | Type | Collation | Nullable | Default +--------+---------+-----------+----------+--------- + a | integer | | not null | +Partition of: idxpart FOR VALUES FROM (0) TO (100) +Indexes: + "idxpart1pk_pkey" PRIMARY KEY, btree (a) + +drop table idxpart; +-- Failing to use the full partition key is not allowed +create table idxpart (a int unique, b int) partition by range (a, b); +ERROR: unique constraint on partitioned table must include all partitioning columns +DETAIL: UNIQUE constraint on table "idxpart" lacks column "b" which is part of the partition key. +create table idxpart (a int, b int unique) partition by range (a, b); +ERROR: unique constraint on partitioned table must include all partitioning columns +DETAIL: UNIQUE constraint on table "idxpart" lacks column "a" which is part of the partition key. +create table idxpart (a int primary key, b int) partition by range (b, a); +ERROR: unique constraint on partitioned table must include all partitioning columns +DETAIL: PRIMARY KEY constraint on table "idxpart" lacks column "b" which is part of the partition key. +create table idxpart (a int, b int primary key) partition by range (b, a); +ERROR: unique constraint on partitioned table must include all partitioning columns +DETAIL: PRIMARY KEY constraint on table "idxpart" lacks column "a" which is part of the partition key. +-- OK if you use them in some other order +create table idxpart (a int, b int, c text, primary key (a, b, c)) partition by range (b, c, a); +drop table idxpart; +-- not other types of index-based constraints +create table idxpart (a int, exclude (a with = )) partition by range (a); +ERROR: exclusion constraints are not supported on partitioned tables +LINE 1: create table idxpart (a int, exclude (a with = )) partition ... + ^ +-- no expressions in partition key for PK/UNIQUE +create table idxpart (a int primary key, b int) partition by range ((b + a)); +ERROR: unsupported PRIMARY KEY constraint with partition key definition +DETAIL: PRIMARY KEY constraints cannot be used when partition keys include expressions. +create table idxpart (a int unique, b int) partition by range ((b + a)); +ERROR: unsupported UNIQUE constraint with partition key definition +DETAIL: UNIQUE constraints cannot be used when partition keys include expressions. +-- use ALTER TABLE to add a primary key +create table idxpart (a int, b int, c text) partition by range (a, b); +alter table idxpart add primary key (a); -- not an incomplete one though +ERROR: unique constraint on partitioned table must include all partitioning columns +DETAIL: PRIMARY KEY constraint on table "idxpart" lacks column "b" which is part of the partition key. +alter table idxpart add primary key (a, b); -- this works +\d idxpart + Partitioned table "public.idxpart" + Column | Type | Collation | Nullable | Default +--------+---------+-----------+----------+--------- + a | integer | | not null | + b | integer | | not null | + c | text | | | +Partition key: RANGE (a, b) +Indexes: + "idxpart_pkey" PRIMARY KEY, btree (a, b) +Number of partitions: 0 + +create table idxpart1 partition of idxpart for values from (0, 0) to (1000, 1000); +\d idxpart1 + Table "public.idxpart1" + Column | Type | Collation | Nullable | Default +--------+---------+-----------+----------+--------- + a | integer | | not null | + b | integer | | not null | + c | text | | | +Partition of: idxpart FOR VALUES FROM (0, 0) TO (1000, 1000) +Indexes: + "idxpart1_pkey" PRIMARY KEY, btree (a, b) + +drop table idxpart; +-- use ALTER TABLE to add a unique constraint +create table idxpart (a int, b int) partition by range (a, b); +alter table idxpart add unique (a); -- not an incomplete one though +ERROR: unique constraint on partitioned table must include all partitioning columns +DETAIL: UNIQUE constraint on table "idxpart" lacks column "b" which is part of the partition key. +alter table idxpart add unique (b, a); -- this works +\d idxpart + Partitioned table "public.idxpart" + Column | Type | Collation | Nullable | Default +--------+---------+-----------+----------+--------- + a | integer | | | + b | integer | | | +Partition key: RANGE (a, b) +Indexes: + "idxpart_b_a_key" UNIQUE CONSTRAINT, btree (b, a) +Number of partitions: 0 + +drop table idxpart; +-- Exclusion constraints cannot be added +create table idxpart (a int, b int) partition by range (a); +alter table idxpart add exclude (a with =); +ERROR: exclusion constraints are not supported on partitioned tables +LINE 1: alter table idxpart add exclude (a with =); + ^ +drop table idxpart; +-- When (sub)partitions are created, they also contain the constraint +create table idxpart (a int, b int, primary key (a, b)) partition by range (a, b); +create table idxpart1 partition of idxpart for values from (1, 1) to (10, 10); +create table idxpart2 partition of idxpart for values from (10, 10) to (20, 20) + partition by range (b); +create table idxpart21 partition of idxpart2 for values from (10) to (15); +create table idxpart22 partition of idxpart2 for values from (15) to (20); +create table idxpart3 (b int not null, a int not null); +alter table idxpart attach partition idxpart3 for values from (20, 20) to (30, 30); +select conname, contype, conrelid::regclass, conindid::regclass, conkey + from pg_constraint where conrelid::regclass::text like 'idxpart%' + order by conname; + conname | contype | conrelid | conindid | conkey +----------------+---------+-----------+----------------+-------- + idxpart1_pkey | p | idxpart1 | idxpart1_pkey | {1,2} + idxpart21_pkey | p | idxpart21 | idxpart21_pkey | {1,2} + idxpart22_pkey | p | idxpart22 | idxpart22_pkey | {1,2} + idxpart2_pkey | p | idxpart2 | idxpart2_pkey | {1,2} + idxpart3_pkey | p | idxpart3 | idxpart3_pkey | {2,1} + idxpart_pkey | p | idxpart | idxpart_pkey | {1,2} +(6 rows) + +drop table idxpart; +-- Verify that multi-layer partitioning honors the requirement that all +-- columns in the partition key must appear in primary/unique key +create table idxpart (a int, b int, primary key (a)) partition by range (a); +create table idxpart2 partition of idxpart +for values from (0) to (1000) partition by range (b); -- fail +ERROR: unique constraint on partitioned table must include all partitioning columns +DETAIL: PRIMARY KEY constraint on table "idxpart2" lacks column "b" which is part of the partition key. +drop table idxpart; +-- Ditto for the ATTACH PARTITION case +create table idxpart (a int unique, b int) partition by range (a); +create table idxpart1 (a int not null, b int, unique (a, b)) + partition by range (a, b); +alter table idxpart attach partition idxpart1 for values from (1) to (1000); +ERROR: unique constraint on partitioned table must include all partitioning columns +DETAIL: UNIQUE constraint on table "idxpart1" lacks column "b" which is part of the partition key. +DROP TABLE idxpart, idxpart1; +-- Multi-layer partitioning works correctly in this case: +create table idxpart (a int, b int, primary key (a, b)) partition by range (a); +create table idxpart2 partition of idxpart for values from (0) to (1000) partition by range (b); +create table idxpart21 partition of idxpart2 for values from (0) to (1000); +select conname, contype, conrelid::regclass, conindid::regclass, conkey + from pg_constraint where conrelid::regclass::text like 'idxpart%' + order by conname; + conname | contype | conrelid | conindid | conkey +----------------+---------+-----------+----------------+-------- + idxpart21_pkey | p | idxpart21 | idxpart21_pkey | {1,2} + idxpart2_pkey | p | idxpart2 | idxpart2_pkey | {1,2} + idxpart_pkey | p | idxpart | idxpart_pkey | {1,2} +(3 rows) + +drop table idxpart; +-- If a partitioned table has a unique/PK constraint, then it's not possible +-- to drop the corresponding constraint in the children; nor it's possible +-- to drop the indexes individually. Dropping the constraint in the parent +-- gets rid of the lot. +create table idxpart (i int) partition by hash (i); +create table idxpart0 partition of idxpart (i) for values with (modulus 2, remainder 0); +create table idxpart1 partition of idxpart (i) for values with (modulus 2, remainder 1); +alter table idxpart0 add primary key(i); +alter table idxpart add primary key(i); +select indrelid::regclass, indexrelid::regclass, inhparent::regclass, indisvalid, + conname, conislocal, coninhcount, connoinherit, convalidated + from pg_index idx left join pg_inherits inh on (idx.indexrelid = inh.inhrelid) + left join pg_constraint con on (idx.indexrelid = con.conindid) + where indrelid::regclass::text like 'idxpart%' + order by indexrelid::regclass::text collate "C"; + indrelid | indexrelid | inhparent | indisvalid | conname | conislocal | coninhcount | connoinherit | convalidated +----------+---------------+--------------+------------+---------------+------------+-------------+--------------+-------------- + idxpart0 | idxpart0_pkey | idxpart_pkey | t | idxpart0_pkey | f | 1 | t | t + idxpart1 | idxpart1_pkey | idxpart_pkey | t | idxpart1_pkey | f | 1 | f | t + idxpart | idxpart_pkey | | t | idxpart_pkey | t | 0 | t | t +(3 rows) + +drop index idxpart0_pkey; -- fail +ERROR: cannot drop index idxpart0_pkey because index idxpart_pkey requires it +HINT: You can drop index idxpart_pkey instead. +drop index idxpart1_pkey; -- fail +ERROR: cannot drop index idxpart1_pkey because index idxpart_pkey requires it +HINT: You can drop index idxpart_pkey instead. +alter table idxpart0 drop constraint idxpart0_pkey; -- fail +ERROR: cannot drop inherited constraint "idxpart0_pkey" of relation "idxpart0" +alter table idxpart1 drop constraint idxpart1_pkey; -- fail +ERROR: cannot drop inherited constraint "idxpart1_pkey" of relation "idxpart1" +alter table idxpart drop constraint idxpart_pkey; -- ok +select indrelid::regclass, indexrelid::regclass, inhparent::regclass, indisvalid, + conname, conislocal, coninhcount, connoinherit, convalidated + from pg_index idx left join pg_inherits inh on (idx.indexrelid = inh.inhrelid) + left join pg_constraint con on (idx.indexrelid = con.conindid) + where indrelid::regclass::text like 'idxpart%' + order by indexrelid::regclass::text collate "C"; + indrelid | indexrelid | inhparent | indisvalid | conname | conislocal | coninhcount | connoinherit | convalidated +----------+------------+-----------+------------+---------+------------+-------------+--------------+-------------- +(0 rows) + +drop table idxpart; +-- If the partition to be attached already has a primary key, fail if +-- it doesn't match the parent's PK. +CREATE TABLE idxpart (c1 INT PRIMARY KEY, c2 INT, c3 VARCHAR(10)) PARTITION BY RANGE(c1); +CREATE TABLE idxpart1 (LIKE idxpart); +ALTER TABLE idxpart1 ADD PRIMARY KEY (c1, c2); +ALTER TABLE idxpart ATTACH PARTITION idxpart1 FOR VALUES FROM (100) TO (200); +ERROR: multiple primary keys for table "idxpart1" are not allowed +DROP TABLE idxpart, idxpart1; +-- Ditto if there is some distance between the PKs (subpartitioning) +create table idxpart (a int, b int, primary key (a)) partition by range (a); +create table idxpart1 (a int not null, b int) partition by range (a); +create table idxpart11 (a int not null, b int primary key); +alter table idxpart1 attach partition idxpart11 for values from (0) to (1000); +alter table idxpart attach partition idxpart1 for values from (0) to (10000); +ERROR: multiple primary keys for table "idxpart11" are not allowed +drop table idxpart, idxpart1, idxpart11; +-- If a partitioned table has a constraint whose index is not valid, +-- attaching a missing partition makes it valid. +create table idxpart (a int) partition by range (a); +create table idxpart0 (like idxpart); +alter table idxpart0 add primary key (a); +alter table idxpart attach partition idxpart0 for values from (0) to (1000); +alter table only idxpart add primary key (a); +select indrelid::regclass, indexrelid::regclass, inhparent::regclass, indisvalid, + conname, conislocal, coninhcount, connoinherit, convalidated + from pg_index idx left join pg_inherits inh on (idx.indexrelid = inh.inhrelid) + left join pg_constraint con on (idx.indexrelid = con.conindid) + where indrelid::regclass::text like 'idxpart%' + order by indexrelid::regclass::text collate "C"; + indrelid | indexrelid | inhparent | indisvalid | conname | conislocal | coninhcount | connoinherit | convalidated +----------+---------------+-----------+------------+---------------+------------+-------------+--------------+-------------- + idxpart0 | idxpart0_pkey | | t | idxpart0_pkey | t | 0 | t | t + idxpart | idxpart_pkey | | f | idxpart_pkey | t | 0 | t | t +(2 rows) + +alter index idxpart_pkey attach partition idxpart0_pkey; +select indrelid::regclass, indexrelid::regclass, inhparent::regclass, indisvalid, + conname, conislocal, coninhcount, connoinherit, convalidated + from pg_index idx left join pg_inherits inh on (idx.indexrelid = inh.inhrelid) + left join pg_constraint con on (idx.indexrelid = con.conindid) + where indrelid::regclass::text like 'idxpart%' + order by indexrelid::regclass::text collate "C"; + indrelid | indexrelid | inhparent | indisvalid | conname | conislocal | coninhcount | connoinherit | convalidated +----------+---------------+--------------+------------+---------------+------------+-------------+--------------+-------------- + idxpart0 | idxpart0_pkey | idxpart_pkey | t | idxpart0_pkey | f | 1 | t | t + idxpart | idxpart_pkey | | t | idxpart_pkey | t | 0 | t | t +(2 rows) + +drop table idxpart; +-- Related to the above scenario: ADD PRIMARY KEY on the parent mustn't +-- automatically propagate NOT NULL to child columns. +create table idxpart (a int) partition by range (a); +create table idxpart0 (like idxpart); +alter table idxpart0 add unique (a); +alter table idxpart attach partition idxpart0 default; +alter table only idxpart add primary key (a); -- fail, no NOT NULL constraint +ERROR: constraint must be added to child tables too +DETAIL: Column "a" of relation "idxpart0" is not already NOT NULL. +HINT: Do not specify the ONLY keyword. +alter table idxpart0 alter column a set not null; +alter table only idxpart add primary key (a); -- now it works +alter table idxpart0 alter column a drop not null; -- fail, pkey needs it +ERROR: column "a" is marked NOT NULL in parent table +drop table idxpart; +-- if a partition has a unique index without a constraint, does not attach +-- automatically; creates a new index instead. +create table idxpart (a int, b int) partition by range (a); +create table idxpart1 (a int not null, b int); +create unique index on idxpart1 (a); +alter table idxpart add primary key (a); +alter table idxpart attach partition idxpart1 for values from (1) to (1000); +select indrelid::regclass, indexrelid::regclass, inhparent::regclass, indisvalid, + conname, conislocal, coninhcount, connoinherit, convalidated + from pg_index idx left join pg_inherits inh on (idx.indexrelid = inh.inhrelid) + left join pg_constraint con on (idx.indexrelid = con.conindid) + where indrelid::regclass::text like 'idxpart%' + order by indexrelid::regclass::text collate "C"; + indrelid | indexrelid | inhparent | indisvalid | conname | conislocal | coninhcount | connoinherit | convalidated +----------+----------------+--------------+------------+---------------+------------+-------------+--------------+-------------- + idxpart1 | idxpart1_a_idx | | t | | | | | + idxpart1 | idxpart1_pkey | idxpart_pkey | t | idxpart1_pkey | f | 1 | f | t + idxpart | idxpart_pkey | | t | idxpart_pkey | t | 0 | t | t +(3 rows) + +drop table idxpart; +-- Can't attach an index without a corresponding constraint +create table idxpart (a int, b int) partition by range (a); +create table idxpart1 (a int not null, b int); +create unique index on idxpart1 (a); +alter table idxpart attach partition idxpart1 for values from (1) to (1000); +alter table only idxpart add primary key (a); +alter index idxpart_pkey attach partition idxpart1_a_idx; -- fail +ERROR: cannot attach index "idxpart1_a_idx" as a partition of index "idxpart_pkey" +DETAIL: The index "idxpart_pkey" belongs to a constraint in table "idxpart" but no constraint exists for index "idxpart1_a_idx". +drop table idxpart; +-- Test that unique constraints are working +create table idxpart (a int, b text, primary key (a, b)) partition by range (a); +create table idxpart1 partition of idxpart for values from (0) to (100000); +create table idxpart2 (c int, like idxpart); +insert into idxpart2 (c, a, b) values (42, 572814, 'inserted first'); +alter table idxpart2 drop column c; +create unique index on idxpart (a); +alter table idxpart attach partition idxpart2 for values from (100000) to (1000000); +insert into idxpart values (0, 'zero'), (42, 'life'), (2^16, 'sixteen'); +insert into idxpart select 2^g, format('two to power of %s', g) from generate_series(15, 17) g; +ERROR: duplicate key value violates unique constraint "idxpart1_a_idx" +DETAIL: Key (a)=(65536) already exists. +insert into idxpart values (16, 'sixteen'); +insert into idxpart (b, a) values ('one', 142857), ('two', 285714); +insert into idxpart select a * 2, b || b from idxpart where a between 2^16 and 2^19; +ERROR: duplicate key value violates unique constraint "idxpart2_a_idx" +DETAIL: Key (a)=(285714) already exists. +insert into idxpart values (572814, 'five'); +ERROR: duplicate key value violates unique constraint "idxpart2_a_idx" +DETAIL: Key (a)=(572814) already exists. +insert into idxpart values (857142, 'six'); +select tableoid::regclass, * from idxpart order by a; + tableoid | a | b +----------+--------+---------------- + idxpart1 | 0 | zero + idxpart1 | 16 | sixteen + idxpart1 | 42 | life + idxpart1 | 65536 | sixteen + idxpart2 | 142857 | one + idxpart2 | 285714 | two + idxpart2 | 572814 | inserted first + idxpart2 | 857142 | six +(8 rows) + +drop table idxpart; +-- Test some other non-btree index types +create table idxpart (a int, b text, c int[]) partition by range (a); +create table idxpart1 partition of idxpart for values from (0) to (100000); +set enable_seqscan to off; +create index idxpart_brin on idxpart using brin(b); +explain (costs off) select * from idxpart where b = 'abcd'; + QUERY PLAN +------------------------------------------- + Bitmap Heap Scan on idxpart1 idxpart + Recheck Cond: (b = 'abcd'::text) + -> Bitmap Index Scan on idxpart1_b_idx + Index Cond: (b = 'abcd'::text) +(4 rows) + +drop index idxpart_brin; +create index idxpart_spgist on idxpart using spgist(b); +explain (costs off) select * from idxpart where b = 'abcd'; + QUERY PLAN +------------------------------------------- + Bitmap Heap Scan on idxpart1 idxpart + Recheck Cond: (b = 'abcd'::text) + -> Bitmap Index Scan on idxpart1_b_idx + Index Cond: (b = 'abcd'::text) +(4 rows) + +drop index idxpart_spgist; +create index idxpart_gin on idxpart using gin(c); +explain (costs off) select * from idxpart where c @> array[42]; + QUERY PLAN +---------------------------------------------- + Bitmap Heap Scan on idxpart1 idxpart + Recheck Cond: (c @> '{42}'::integer[]) + -> Bitmap Index Scan on idxpart1_c_idx + Index Cond: (c @> '{42}'::integer[]) +(4 rows) + +drop index idxpart_gin; +reset enable_seqscan; +drop table idxpart; +-- intentionally leave some objects around +create table idxpart (a int) partition by range (a); +create table idxpart1 partition of idxpart for values from (0) to (100); +create table idxpart2 partition of idxpart for values from (100) to (1000) + partition by range (a); +create table idxpart21 partition of idxpart2 for values from (100) to (200); +create table idxpart22 partition of idxpart2 for values from (200) to (300); +create index on idxpart22 (a); +create index on only idxpart2 (a); +alter index idxpart2_a_idx attach partition idxpart22_a_idx; +create index on idxpart (a); +create table idxpart_another (a int, b int, primary key (a, b)) partition by range (a); +create table idxpart_another_1 partition of idxpart_another for values from (0) to (100); +create table idxpart3 (c int, b int, a int) partition by range (a); +alter table idxpart3 drop column b, drop column c; +create table idxpart31 partition of idxpart3 for values from (1000) to (1200); +create table idxpart32 partition of idxpart3 for values from (1200) to (1400); +alter table idxpart attach partition idxpart3 for values from (1000) to (2000); +-- More objects intentionally left behind, to verify some pg_dump/pg_upgrade +-- behavior; see https://postgr.es/m/20190321204928.GA17535@alvherre.pgsql +create schema regress_indexing; +set search_path to regress_indexing; +create table pk (a int primary key) partition by range (a); +create table pk1 partition of pk for values from (0) to (1000); +create table pk2 (b int, a int); +alter table pk2 drop column b; +alter table pk2 alter a set not null; +alter table pk attach partition pk2 for values from (1000) to (2000); +create table pk3 partition of pk for values from (2000) to (3000); +create table pk4 (like pk); +alter table pk attach partition pk4 for values from (3000) to (4000); +create table pk5 (like pk) partition by range (a); +create table pk51 partition of pk5 for values from (4000) to (4500); +create table pk52 partition of pk5 for values from (4500) to (5000); +alter table pk attach partition pk5 for values from (4000) to (5000); +reset search_path; +-- Test that covering partitioned indexes work in various cases +create table covidxpart (a int, b int) partition by list (a); +create unique index on covidxpart (a) include (b); +create table covidxpart1 partition of covidxpart for values in (1); +create table covidxpart2 partition of covidxpart for values in (2); +insert into covidxpart values (1, 1); +insert into covidxpart values (1, 1); +ERROR: duplicate key value violates unique constraint "covidxpart1_a_b_idx" +DETAIL: Key (a)=(1) already exists. +create table covidxpart3 (b int, c int, a int); +alter table covidxpart3 drop c; +alter table covidxpart attach partition covidxpart3 for values in (3); +insert into covidxpart values (3, 1); +insert into covidxpart values (3, 1); +ERROR: duplicate key value violates unique constraint "covidxpart3_a_b_idx" +DETAIL: Key (a)=(3) already exists. +create table covidxpart4 (b int, a int); +create unique index on covidxpart4 (a) include (b); +create unique index on covidxpart4 (a); +alter table covidxpart attach partition covidxpart4 for values in (4); +insert into covidxpart values (4, 1); +insert into covidxpart values (4, 1); +ERROR: duplicate key value violates unique constraint "covidxpart4_a_b_idx" +DETAIL: Key (a)=(4) already exists. +create unique index on covidxpart (b) include (a); -- should fail +ERROR: unique constraint on partitioned table must include all partitioning columns +DETAIL: UNIQUE constraint on table "covidxpart" lacks column "a" which is part of the partition key. +-- check that detaching a partition also detaches the primary key constraint +create table parted_pk_detach_test (a int primary key) partition by list (a); +create table parted_pk_detach_test1 partition of parted_pk_detach_test for values in (1); +alter table parted_pk_detach_test1 drop constraint parted_pk_detach_test1_pkey; -- should fail +ERROR: cannot drop inherited constraint "parted_pk_detach_test1_pkey" of relation "parted_pk_detach_test1" +alter table parted_pk_detach_test detach partition parted_pk_detach_test1; +alter table parted_pk_detach_test1 drop constraint parted_pk_detach_test1_pkey; +drop table parted_pk_detach_test, parted_pk_detach_test1; +create table parted_uniq_detach_test (a int unique) partition by list (a); +create table parted_uniq_detach_test1 partition of parted_uniq_detach_test for values in (1); +alter table parted_uniq_detach_test1 drop constraint parted_uniq_detach_test1_a_key; -- should fail +ERROR: cannot drop inherited constraint "parted_uniq_detach_test1_a_key" of relation "parted_uniq_detach_test1" +alter table parted_uniq_detach_test detach partition parted_uniq_detach_test1; +alter table parted_uniq_detach_test1 drop constraint parted_uniq_detach_test1_a_key; +drop table parted_uniq_detach_test, parted_uniq_detach_test1; +-- check that dropping a column takes with it any partitioned indexes +-- depending on it. +create table parted_index_col_drop(a int, b int, c int) + partition by list (a); +create table parted_index_col_drop1 partition of parted_index_col_drop + for values in (1) partition by list (a); +-- leave this partition without children. +create table parted_index_col_drop2 partition of parted_index_col_drop + for values in (2) partition by list (a); +create table parted_index_col_drop11 partition of parted_index_col_drop1 + for values in (1); +create index on parted_index_col_drop (b); +create index on parted_index_col_drop (c); +create index on parted_index_col_drop (b, c); +alter table parted_index_col_drop drop column c; +\d parted_index_col_drop + Partitioned table "public.parted_index_col_drop" + Column | Type | Collation | Nullable | Default +--------+---------+-----------+----------+--------- + a | integer | | | + b | integer | | | +Partition key: LIST (a) +Indexes: + "parted_index_col_drop_b_idx" btree (b) +Number of partitions: 2 (Use \d+ to list them.) + +\d parted_index_col_drop1 + Partitioned table "public.parted_index_col_drop1" + Column | Type | Collation | Nullable | Default +--------+---------+-----------+----------+--------- + a | integer | | | + b | integer | | | +Partition of: parted_index_col_drop FOR VALUES IN (1) +Partition key: LIST (a) +Indexes: + "parted_index_col_drop1_b_idx" btree (b) +Number of partitions: 1 (Use \d+ to list them.) + +\d parted_index_col_drop2 + Partitioned table "public.parted_index_col_drop2" + Column | Type | Collation | Nullable | Default +--------+---------+-----------+----------+--------- + a | integer | | | + b | integer | | | +Partition of: parted_index_col_drop FOR VALUES IN (2) +Partition key: LIST (a) +Indexes: + "parted_index_col_drop2_b_idx" btree (b) +Number of partitions: 0 + +\d parted_index_col_drop11 + Table "public.parted_index_col_drop11" + Column | Type | Collation | Nullable | Default +--------+---------+-----------+----------+--------- + a | integer | | | + b | integer | | | +Partition of: parted_index_col_drop1 FOR VALUES IN (1) +Indexes: + "parted_index_col_drop11_b_idx" btree (b) + +drop table parted_index_col_drop; +-- Check that invalid indexes are not selected when attaching a partition. +create table parted_inval_tab (a int) partition by range (a); +create index parted_inval_idx on parted_inval_tab (a); +create table parted_inval_tab_1 (a int) partition by range (a); +create table parted_inval_tab_1_1 partition of parted_inval_tab_1 + for values from (0) to (10); +create table parted_inval_tab_1_2 partition of parted_inval_tab_1 + for values from (10) to (20); +-- this creates an invalid index. +create index parted_inval_ixd_1 on only parted_inval_tab_1 (a); +-- this creates new indexes for all the partitions of parted_inval_tab_1, +-- discarding the invalid index created previously as what is chosen. +alter table parted_inval_tab attach partition parted_inval_tab_1 + for values from (1) to (100); +select indexrelid::regclass, indisvalid, + indrelid::regclass, inhparent::regclass + from pg_index idx left join + pg_inherits inh on (idx.indexrelid = inh.inhrelid) + where indexrelid::regclass::text like 'parted_inval%' + order by indexrelid::regclass::text collate "C"; + indexrelid | indisvalid | indrelid | inhparent +----------------------------+------------+----------------------+-------------------------- + parted_inval_idx | t | parted_inval_tab | + parted_inval_ixd_1 | f | parted_inval_tab_1 | + parted_inval_tab_1_1_a_idx | t | parted_inval_tab_1_1 | parted_inval_tab_1_a_idx + parted_inval_tab_1_2_a_idx | t | parted_inval_tab_1_2 | parted_inval_tab_1_a_idx + parted_inval_tab_1_a_idx | t | parted_inval_tab_1 | parted_inval_idx +(5 rows) + +drop table parted_inval_tab; +-- Check setup of indisvalid across a complex partition tree on index +-- creation. If one index in a partition index is invalid, so should its +-- partitioned index. +create table parted_isvalid_tab (a int, b int) partition by range (a); +create table parted_isvalid_tab_1 partition of parted_isvalid_tab + for values from (1) to (10) partition by range (a); +create table parted_isvalid_tab_2 partition of parted_isvalid_tab + for values from (10) to (20) partition by range (a); +create table parted_isvalid_tab_11 partition of parted_isvalid_tab_1 + for values from (1) to (5); +create table parted_isvalid_tab_12 partition of parted_isvalid_tab_1 + for values from (5) to (10); +-- create an invalid index on one of the partitions. +insert into parted_isvalid_tab_11 values (1, 0); +create index concurrently parted_isvalid_idx_11 on parted_isvalid_tab_11 ((a/b)); +ERROR: division by zero +-- The previous invalid index is selected, invalidating all the indexes up to +-- the top-most parent. +create index parted_isvalid_idx on parted_isvalid_tab ((a/b)); +select indexrelid::regclass, indisvalid, + indrelid::regclass, inhparent::regclass + from pg_index idx left join + pg_inherits inh on (idx.indexrelid = inh.inhrelid) + where indexrelid::regclass::text like 'parted_isvalid%' + order by indexrelid::regclass::text collate "C"; + indexrelid | indisvalid | indrelid | inhparent +--------------------------------+------------+-----------------------+------------------------------- + parted_isvalid_idx | f | parted_isvalid_tab | + parted_isvalid_idx_11 | f | parted_isvalid_tab_11 | parted_isvalid_tab_1_expr_idx + parted_isvalid_tab_12_expr_idx | t | parted_isvalid_tab_12 | parted_isvalid_tab_1_expr_idx + parted_isvalid_tab_1_expr_idx | f | parted_isvalid_tab_1 | parted_isvalid_idx + parted_isvalid_tab_2_expr_idx | t | parted_isvalid_tab_2 | parted_isvalid_idx +(5 rows) + +drop table parted_isvalid_tab; +-- Check state of replica indexes when attaching a partition. +begin; +create table parted_replica_tab (id int not null) partition by range (id); +create table parted_replica_tab_1 partition of parted_replica_tab + for values from (1) to (10) partition by range (id); +create table parted_replica_tab_11 partition of parted_replica_tab_1 + for values from (1) to (5); +create unique index parted_replica_idx + on only parted_replica_tab using btree (id); +create unique index parted_replica_idx_1 + on only parted_replica_tab_1 using btree (id); +-- This triggers an update of pg_index.indisreplident for parted_replica_idx. +alter table only parted_replica_tab_1 replica identity + using index parted_replica_idx_1; +create unique index parted_replica_idx_11 on parted_replica_tab_11 USING btree (id); +select indexrelid::regclass, indisvalid, indisreplident, + indrelid::regclass, inhparent::regclass + from pg_index idx left join + pg_inherits inh on (idx.indexrelid = inh.inhrelid) + where indexrelid::regclass::text like 'parted_replica%' + order by indexrelid::regclass::text collate "C"; + indexrelid | indisvalid | indisreplident | indrelid | inhparent +-----------------------+------------+----------------+-----------------------+----------- + parted_replica_idx | f | f | parted_replica_tab | + parted_replica_idx_1 | f | t | parted_replica_tab_1 | + parted_replica_idx_11 | t | f | parted_replica_tab_11 | +(3 rows) + +-- parted_replica_idx is not valid yet here, because parted_replica_idx_1 +-- is not valid. +alter index parted_replica_idx ATTACH PARTITION parted_replica_idx_1; +select indexrelid::regclass, indisvalid, indisreplident, + indrelid::regclass, inhparent::regclass + from pg_index idx left join + pg_inherits inh on (idx.indexrelid = inh.inhrelid) + where indexrelid::regclass::text like 'parted_replica%' + order by indexrelid::regclass::text collate "C"; + indexrelid | indisvalid | indisreplident | indrelid | inhparent +-----------------------+------------+----------------+-----------------------+-------------------- + parted_replica_idx | f | f | parted_replica_tab | + parted_replica_idx_1 | f | t | parted_replica_tab_1 | parted_replica_idx + parted_replica_idx_11 | t | f | parted_replica_tab_11 | +(3 rows) + +-- parted_replica_idx becomes valid here. +alter index parted_replica_idx_1 ATTACH PARTITION parted_replica_idx_11; +alter table only parted_replica_tab_1 replica identity + using index parted_replica_idx_1; +commit; +select indexrelid::regclass, indisvalid, indisreplident, + indrelid::regclass, inhparent::regclass + from pg_index idx left join + pg_inherits inh on (idx.indexrelid = inh.inhrelid) + where indexrelid::regclass::text like 'parted_replica%' + order by indexrelid::regclass::text collate "C"; + indexrelid | indisvalid | indisreplident | indrelid | inhparent +-----------------------+------------+----------------+-----------------------+---------------------- + parted_replica_idx | t | f | parted_replica_tab | + parted_replica_idx_1 | t | t | parted_replica_tab_1 | parted_replica_idx + parted_replica_idx_11 | t | f | parted_replica_tab_11 | parted_replica_idx_1 +(3 rows) + +drop table parted_replica_tab; diff --git a/src/test/regress/expected/indirect_toast.out b/src/test/regress/expected/indirect_toast.out new file mode 100644 index 0000000..44b54dc --- /dev/null +++ b/src/test/regress/expected/indirect_toast.out @@ -0,0 +1,166 @@ +-- +-- Tests for external toast datums +-- +-- directory paths and dlsuffix are passed to us in environment variables +\getenv libdir PG_LIBDIR +\getenv dlsuffix PG_DLSUFFIX +\set regresslib :libdir '/regress' :dlsuffix +CREATE FUNCTION make_tuple_indirect (record) + RETURNS record + AS :'regresslib' + LANGUAGE C STRICT; +-- Other compression algorithms may cause the compressed data to be stored +-- inline. pglz guarantees that the data is externalized, so stick to it. +SET default_toast_compression = 'pglz'; +CREATE TABLE indtoasttest(descr text, cnt int DEFAULT 0, f1 text, f2 text); +INSERT INTO indtoasttest(descr, f1, f2) VALUES('two-compressed', repeat('1234567890',1000), repeat('1234567890',1000)); +INSERT INTO indtoasttest(descr, f1, f2) VALUES('two-toasted', repeat('1234567890',30000), repeat('1234567890',50000)); +INSERT INTO indtoasttest(descr, f1, f2) VALUES('one-compressed,one-null', NULL, repeat('1234567890',1000)); +INSERT INTO indtoasttest(descr, f1, f2) VALUES('one-toasted,one-null', NULL, repeat('1234567890',50000)); +-- check whether indirect tuples works on the most basic level +SELECT descr, substring(make_tuple_indirect(indtoasttest)::text, 1, 200) FROM indtoasttest; + descr | substring +-------------------------+---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + two-compressed | (two-compressed,0,12345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012 + two-toasted | (two-toasted,0,12345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345 + one-compressed,one-null | ("one-compressed,one-null",0,,12345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890 + one-toasted,one-null | ("one-toasted,one-null",0,,12345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123 +(4 rows) + +-- modification without changing varlenas +UPDATE indtoasttest SET cnt = cnt +1 RETURNING substring(indtoasttest::text, 1, 200); + substring +---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + (two-compressed,1,12345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012 + (two-toasted,1,12345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345 + ("one-compressed,one-null",1,,12345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890 + ("one-toasted,one-null",1,,12345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123 +(4 rows) + +-- modification without modifying assigned value +UPDATE indtoasttest SET cnt = cnt +1, f1 = f1 RETURNING substring(indtoasttest::text, 1, 200); + substring +---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + (two-compressed,2,12345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012 + (two-toasted,2,12345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345 + ("one-compressed,one-null",2,,12345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890 + ("one-toasted,one-null",2,,12345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123 +(4 rows) + +-- modification modifying, but effectively not changing +UPDATE indtoasttest SET cnt = cnt +1, f1 = f1||'' RETURNING substring(indtoasttest::text, 1, 200); + substring +---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + (two-compressed,3,12345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012 + (two-toasted,3,12345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345 + ("one-compressed,one-null",3,,12345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890 + ("one-toasted,one-null",3,,12345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123 +(4 rows) + +UPDATE indtoasttest SET cnt = cnt +1, f1 = '-'||f1||'-' RETURNING substring(indtoasttest::text, 1, 200); + substring +---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + (two-compressed,4,-1234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901 + (two-toasted,4,-1234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234 + ("one-compressed,one-null",4,,12345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890 + ("one-toasted,one-null",4,,12345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123 +(4 rows) + +SELECT substring(indtoasttest::text, 1, 200) FROM indtoasttest; + substring +---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + (two-compressed,4,-1234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901 + (two-toasted,4,-1234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234 + ("one-compressed,one-null",4,,12345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890 + ("one-toasted,one-null",4,,12345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123 +(4 rows) + +-- check we didn't screw with main/toast tuple visibility +VACUUM FREEZE indtoasttest; +SELECT substring(indtoasttest::text, 1, 200) FROM indtoasttest; + substring +---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + (two-compressed,4,-1234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901 + (two-toasted,4,-1234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234 + ("one-compressed,one-null",4,,12345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890 + ("one-toasted,one-null",4,,12345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123 +(4 rows) + +-- now create a trigger that forces all Datums to be indirect ones +CREATE FUNCTION update_using_indirect() + RETURNS trigger + LANGUAGE plpgsql AS $$ +BEGIN + NEW := make_tuple_indirect(NEW); + RETURN NEW; +END$$; +CREATE TRIGGER indtoasttest_update_indirect + BEFORE INSERT OR UPDATE + ON indtoasttest + FOR EACH ROW + EXECUTE PROCEDURE update_using_indirect(); +-- modification without changing varlenas +UPDATE indtoasttest SET cnt = cnt +1 RETURNING substring(indtoasttest::text, 1, 200); + substring +---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + (two-compressed,5,-1234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901 + (two-toasted,5,-1234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234 + ("one-compressed,one-null",5,,12345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890 + ("one-toasted,one-null",5,,12345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123 +(4 rows) + +-- modification without modifying assigned value +UPDATE indtoasttest SET cnt = cnt +1, f1 = f1 RETURNING substring(indtoasttest::text, 1, 200); + substring +---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + (two-compressed,6,-1234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901 + (two-toasted,6,-1234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234 + ("one-compressed,one-null",6,,12345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890 + ("one-toasted,one-null",6,,12345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123 +(4 rows) + +-- modification modifying, but effectively not changing +UPDATE indtoasttest SET cnt = cnt +1, f1 = f1||'' RETURNING substring(indtoasttest::text, 1, 200); + substring +---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + (two-compressed,7,-1234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901 + (two-toasted,7,-1234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234 + ("one-compressed,one-null",7,,12345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890 + ("one-toasted,one-null",7,,12345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123 +(4 rows) + +UPDATE indtoasttest SET cnt = cnt +1, f1 = '-'||f1||'-' RETURNING substring(indtoasttest::text, 1, 200); + substring +---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + (two-compressed,8,--123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890 + (two-toasted,8,--123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123 + ("one-compressed,one-null",8,,12345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890 + ("one-toasted,one-null",8,,12345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123 +(4 rows) + +INSERT INTO indtoasttest(descr, f1, f2) VALUES('one-toasted,one-null, via indirect', repeat('1234567890',30000), NULL); +SELECT substring(indtoasttest::text, 1, 200) FROM indtoasttest; + substring +---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + (two-compressed,8,--123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890 + (two-toasted,8,--123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123 + ("one-compressed,one-null",8,,12345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890 + ("one-toasted,one-null",8,,12345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123 + ("one-toasted,one-null, via indirect",0,1234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890 +(5 rows) + +-- check we didn't screw with main/toast tuple visibility +VACUUM FREEZE indtoasttest; +SELECT substring(indtoasttest::text, 1, 200) FROM indtoasttest; + substring +---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + (two-compressed,8,--123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890 + (two-toasted,8,--123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123 + ("one-compressed,one-null",8,,12345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890 + ("one-toasted,one-null",8,,12345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123 + ("one-toasted,one-null, via indirect",0,1234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890 +(5 rows) + +DROP TABLE indtoasttest; +DROP FUNCTION update_using_indirect(); +RESET default_toast_compression; diff --git a/src/test/regress/expected/inet.out b/src/test/regress/expected/inet.out new file mode 100644 index 0000000..b6895d9 --- /dev/null +++ b/src/test/regress/expected/inet.out @@ -0,0 +1,1095 @@ +-- +-- INET +-- +-- prepare the table... +DROP TABLE INET_TBL; +ERROR: table "inet_tbl" does not exist +CREATE TABLE INET_TBL (c cidr, i inet); +INSERT INTO INET_TBL (c, i) VALUES ('192.168.1', '192.168.1.226/24'); +INSERT INTO INET_TBL (c, i) VALUES ('192.168.1.0/26', '192.168.1.226'); +INSERT INTO INET_TBL (c, i) VALUES ('192.168.1', '192.168.1.0/24'); +INSERT INTO INET_TBL (c, i) VALUES ('192.168.1', '192.168.1.0/25'); +INSERT INTO INET_TBL (c, i) VALUES ('192.168.1', '192.168.1.255/24'); +INSERT INTO INET_TBL (c, i) VALUES ('192.168.1', '192.168.1.255/25'); +INSERT INTO INET_TBL (c, i) VALUES ('10', '10.1.2.3/8'); +INSERT INTO INET_TBL (c, i) VALUES ('10.0.0.0', '10.1.2.3/8'); +INSERT INTO INET_TBL (c, i) VALUES ('10.1.2.3', '10.1.2.3/32'); +INSERT INTO INET_TBL (c, i) VALUES ('10.1.2', '10.1.2.3/24'); +INSERT INTO INET_TBL (c, i) VALUES ('10.1', '10.1.2.3/16'); +INSERT INTO INET_TBL (c, i) VALUES ('10', '10.1.2.3/8'); +INSERT INTO INET_TBL (c, i) VALUES ('10', '11.1.2.3/8'); +INSERT INTO INET_TBL (c, i) VALUES ('10', '9.1.2.3/8'); +INSERT INTO INET_TBL (c, i) VALUES ('10:23::f1', '10:23::f1/64'); +INSERT INTO INET_TBL (c, i) VALUES ('10:23::8000/113', '10:23::ffff'); +INSERT INTO INET_TBL (c, i) VALUES ('::ffff:1.2.3.4', '::4.3.2.1/24'); +-- check that CIDR rejects invalid input: +INSERT INTO INET_TBL (c, i) VALUES ('192.168.1.2/30', '192.168.1.226'); +ERROR: invalid cidr value: "192.168.1.2/30" +LINE 1: INSERT INTO INET_TBL (c, i) VALUES ('192.168.1.2/30', '192.1... + ^ +DETAIL: Value has bits set to right of mask. +INSERT INTO INET_TBL (c, i) VALUES ('1234::1234::1234', '::1.2.3.4'); +ERROR: invalid input syntax for type cidr: "1234::1234::1234" +LINE 1: INSERT INTO INET_TBL (c, i) VALUES ('1234::1234::1234', '::1... + ^ +-- check that CIDR rejects invalid input when converting from text: +INSERT INTO INET_TBL (c, i) VALUES (cidr('192.168.1.2/30'), '192.168.1.226'); +ERROR: invalid cidr value: "192.168.1.2/30" +LINE 1: INSERT INTO INET_TBL (c, i) VALUES (cidr('192.168.1.2/30'), ... + ^ +DETAIL: Value has bits set to right of mask. +INSERT INTO INET_TBL (c, i) VALUES (cidr('ffff:ffff:ffff:ffff::/24'), '::192.168.1.226'); +ERROR: invalid cidr value: "ffff:ffff:ffff:ffff::/24" +LINE 1: INSERT INTO INET_TBL (c, i) VALUES (cidr('ffff:ffff:ffff:fff... + ^ +DETAIL: Value has bits set to right of mask. +SELECT c AS cidr, i AS inet FROM INET_TBL; + cidr | inet +--------------------+------------------ + 192.168.1.0/24 | 192.168.1.226/24 + 192.168.1.0/26 | 192.168.1.226 + 192.168.1.0/24 | 192.168.1.0/24 + 192.168.1.0/24 | 192.168.1.0/25 + 192.168.1.0/24 | 192.168.1.255/24 + 192.168.1.0/24 | 192.168.1.255/25 + 10.0.0.0/8 | 10.1.2.3/8 + 10.0.0.0/32 | 10.1.2.3/8 + 10.1.2.3/32 | 10.1.2.3 + 10.1.2.0/24 | 10.1.2.3/24 + 10.1.0.0/16 | 10.1.2.3/16 + 10.0.0.0/8 | 10.1.2.3/8 + 10.0.0.0/8 | 11.1.2.3/8 + 10.0.0.0/8 | 9.1.2.3/8 + 10:23::f1/128 | 10:23::f1/64 + 10:23::8000/113 | 10:23::ffff + ::ffff:1.2.3.4/128 | ::4.3.2.1/24 +(17 rows) + +-- now test some support functions +SELECT i AS inet, host(i), text(i), family(i) FROM INET_TBL; + inet | host | text | family +------------------+---------------+------------------+-------- + 192.168.1.226/24 | 192.168.1.226 | 192.168.1.226/24 | 4 + 192.168.1.226 | 192.168.1.226 | 192.168.1.226/32 | 4 + 192.168.1.0/24 | 192.168.1.0 | 192.168.1.0/24 | 4 + 192.168.1.0/25 | 192.168.1.0 | 192.168.1.0/25 | 4 + 192.168.1.255/24 | 192.168.1.255 | 192.168.1.255/24 | 4 + 192.168.1.255/25 | 192.168.1.255 | 192.168.1.255/25 | 4 + 10.1.2.3/8 | 10.1.2.3 | 10.1.2.3/8 | 4 + 10.1.2.3/8 | 10.1.2.3 | 10.1.2.3/8 | 4 + 10.1.2.3 | 10.1.2.3 | 10.1.2.3/32 | 4 + 10.1.2.3/24 | 10.1.2.3 | 10.1.2.3/24 | 4 + 10.1.2.3/16 | 10.1.2.3 | 10.1.2.3/16 | 4 + 10.1.2.3/8 | 10.1.2.3 | 10.1.2.3/8 | 4 + 11.1.2.3/8 | 11.1.2.3 | 11.1.2.3/8 | 4 + 9.1.2.3/8 | 9.1.2.3 | 9.1.2.3/8 | 4 + 10:23::f1/64 | 10:23::f1 | 10:23::f1/64 | 6 + 10:23::ffff | 10:23::ffff | 10:23::ffff/128 | 6 + ::4.3.2.1/24 | ::4.3.2.1 | ::4.3.2.1/24 | 6 +(17 rows) + +SELECT c AS cidr, abbrev(c) FROM INET_TBL; + cidr | abbrev +--------------------+-------------------- + 192.168.1.0/24 | 192.168.1/24 + 192.168.1.0/26 | 192.168.1.0/26 + 192.168.1.0/24 | 192.168.1/24 + 192.168.1.0/24 | 192.168.1/24 + 192.168.1.0/24 | 192.168.1/24 + 192.168.1.0/24 | 192.168.1/24 + 10.0.0.0/8 | 10/8 + 10.0.0.0/32 | 10.0.0.0/32 + 10.1.2.3/32 | 10.1.2.3/32 + 10.1.2.0/24 | 10.1.2/24 + 10.1.0.0/16 | 10.1/16 + 10.0.0.0/8 | 10/8 + 10.0.0.0/8 | 10/8 + 10.0.0.0/8 | 10/8 + 10:23::f1/128 | 10:23::f1/128 + 10:23::8000/113 | 10:23::8000/113 + ::ffff:1.2.3.4/128 | ::ffff:1.2.3.4/128 +(17 rows) + +SELECT c AS cidr, broadcast(c), + i AS inet, broadcast(i) FROM INET_TBL; + cidr | broadcast | inet | broadcast +--------------------+------------------+------------------+--------------------------------------- + 192.168.1.0/24 | 192.168.1.255/24 | 192.168.1.226/24 | 192.168.1.255/24 + 192.168.1.0/26 | 192.168.1.63/26 | 192.168.1.226 | 192.168.1.226 + 192.168.1.0/24 | 192.168.1.255/24 | 192.168.1.0/24 | 192.168.1.255/24 + 192.168.1.0/24 | 192.168.1.255/24 | 192.168.1.0/25 | 192.168.1.127/25 + 192.168.1.0/24 | 192.168.1.255/24 | 192.168.1.255/24 | 192.168.1.255/24 + 192.168.1.0/24 | 192.168.1.255/24 | 192.168.1.255/25 | 192.168.1.255/25 + 10.0.0.0/8 | 10.255.255.255/8 | 10.1.2.3/8 | 10.255.255.255/8 + 10.0.0.0/32 | 10.0.0.0 | 10.1.2.3/8 | 10.255.255.255/8 + 10.1.2.3/32 | 10.1.2.3 | 10.1.2.3 | 10.1.2.3 + 10.1.2.0/24 | 10.1.2.255/24 | 10.1.2.3/24 | 10.1.2.255/24 + 10.1.0.0/16 | 10.1.255.255/16 | 10.1.2.3/16 | 10.1.255.255/16 + 10.0.0.0/8 | 10.255.255.255/8 | 10.1.2.3/8 | 10.255.255.255/8 + 10.0.0.0/8 | 10.255.255.255/8 | 11.1.2.3/8 | 11.255.255.255/8 + 10.0.0.0/8 | 10.255.255.255/8 | 9.1.2.3/8 | 9.255.255.255/8 + 10:23::f1/128 | 10:23::f1 | 10:23::f1/64 | 10:23::ffff:ffff:ffff:ffff/64 + 10:23::8000/113 | 10:23::ffff/113 | 10:23::ffff | 10:23::ffff + ::ffff:1.2.3.4/128 | ::ffff:1.2.3.4 | ::4.3.2.1/24 | 0:ff:ffff:ffff:ffff:ffff:ffff:ffff/24 +(17 rows) + +SELECT c AS cidr, network(c) AS "network(cidr)", + i AS inet, network(i) AS "network(inet)" FROM INET_TBL; + cidr | network(cidr) | inet | network(inet) +--------------------+--------------------+------------------+------------------ + 192.168.1.0/24 | 192.168.1.0/24 | 192.168.1.226/24 | 192.168.1.0/24 + 192.168.1.0/26 | 192.168.1.0/26 | 192.168.1.226 | 192.168.1.226/32 + 192.168.1.0/24 | 192.168.1.0/24 | 192.168.1.0/24 | 192.168.1.0/24 + 192.168.1.0/24 | 192.168.1.0/24 | 192.168.1.0/25 | 192.168.1.0/25 + 192.168.1.0/24 | 192.168.1.0/24 | 192.168.1.255/24 | 192.168.1.0/24 + 192.168.1.0/24 | 192.168.1.0/24 | 192.168.1.255/25 | 192.168.1.128/25 + 10.0.0.0/8 | 10.0.0.0/8 | 10.1.2.3/8 | 10.0.0.0/8 + 10.0.0.0/32 | 10.0.0.0/32 | 10.1.2.3/8 | 10.0.0.0/8 + 10.1.2.3/32 | 10.1.2.3/32 | 10.1.2.3 | 10.1.2.3/32 + 10.1.2.0/24 | 10.1.2.0/24 | 10.1.2.3/24 | 10.1.2.0/24 + 10.1.0.0/16 | 10.1.0.0/16 | 10.1.2.3/16 | 10.1.0.0/16 + 10.0.0.0/8 | 10.0.0.0/8 | 10.1.2.3/8 | 10.0.0.0/8 + 10.0.0.0/8 | 10.0.0.0/8 | 11.1.2.3/8 | 11.0.0.0/8 + 10.0.0.0/8 | 10.0.0.0/8 | 9.1.2.3/8 | 9.0.0.0/8 + 10:23::f1/128 | 10:23::f1/128 | 10:23::f1/64 | 10:23::/64 + 10:23::8000/113 | 10:23::8000/113 | 10:23::ffff | 10:23::ffff/128 + ::ffff:1.2.3.4/128 | ::ffff:1.2.3.4/128 | ::4.3.2.1/24 | ::/24 +(17 rows) + +SELECT c AS cidr, masklen(c) AS "masklen(cidr)", + i AS inet, masklen(i) AS "masklen(inet)" FROM INET_TBL; + cidr | masklen(cidr) | inet | masklen(inet) +--------------------+---------------+------------------+--------------- + 192.168.1.0/24 | 24 | 192.168.1.226/24 | 24 + 192.168.1.0/26 | 26 | 192.168.1.226 | 32 + 192.168.1.0/24 | 24 | 192.168.1.0/24 | 24 + 192.168.1.0/24 | 24 | 192.168.1.0/25 | 25 + 192.168.1.0/24 | 24 | 192.168.1.255/24 | 24 + 192.168.1.0/24 | 24 | 192.168.1.255/25 | 25 + 10.0.0.0/8 | 8 | 10.1.2.3/8 | 8 + 10.0.0.0/32 | 32 | 10.1.2.3/8 | 8 + 10.1.2.3/32 | 32 | 10.1.2.3 | 32 + 10.1.2.0/24 | 24 | 10.1.2.3/24 | 24 + 10.1.0.0/16 | 16 | 10.1.2.3/16 | 16 + 10.0.0.0/8 | 8 | 10.1.2.3/8 | 8 + 10.0.0.0/8 | 8 | 11.1.2.3/8 | 8 + 10.0.0.0/8 | 8 | 9.1.2.3/8 | 8 + 10:23::f1/128 | 128 | 10:23::f1/64 | 64 + 10:23::8000/113 | 113 | 10:23::ffff | 128 + ::ffff:1.2.3.4/128 | 128 | ::4.3.2.1/24 | 24 +(17 rows) + +SELECT c AS cidr, masklen(c) AS "masklen(cidr)", + i AS inet, masklen(i) AS "masklen(inet)" FROM INET_TBL + WHERE masklen(c) <= 8; + cidr | masklen(cidr) | inet | masklen(inet) +------------+---------------+------------+--------------- + 10.0.0.0/8 | 8 | 10.1.2.3/8 | 8 + 10.0.0.0/8 | 8 | 10.1.2.3/8 | 8 + 10.0.0.0/8 | 8 | 11.1.2.3/8 | 8 + 10.0.0.0/8 | 8 | 9.1.2.3/8 | 8 +(4 rows) + +SELECT c AS cidr, i AS inet FROM INET_TBL + WHERE c = i; + cidr | inet +----------------+---------------- + 192.168.1.0/24 | 192.168.1.0/24 + 10.1.2.3/32 | 10.1.2.3 +(2 rows) + +SELECT i, c, + i < c AS lt, i <= c AS le, i = c AS eq, + i >= c AS ge, i > c AS gt, i <> c AS ne, + i << c AS sb, i <<= c AS sbe, + i >> c AS sup, i >>= c AS spe, + i && c AS ovr + FROM INET_TBL; + i | c | lt | le | eq | ge | gt | ne | sb | sbe | sup | spe | ovr +------------------+--------------------+----+----+----+----+----+----+----+-----+-----+-----+----- + 192.168.1.226/24 | 192.168.1.0/24 | f | f | f | t | t | t | f | t | f | t | t + 192.168.1.226 | 192.168.1.0/26 | f | f | f | t | t | t | f | f | f | f | f + 192.168.1.0/24 | 192.168.1.0/24 | f | t | t | t | f | f | f | t | f | t | t + 192.168.1.0/25 | 192.168.1.0/24 | f | f | f | t | t | t | t | t | f | f | t + 192.168.1.255/24 | 192.168.1.0/24 | f | f | f | t | t | t | f | t | f | t | t + 192.168.1.255/25 | 192.168.1.0/24 | f | f | f | t | t | t | t | t | f | f | t + 10.1.2.3/8 | 10.0.0.0/8 | f | f | f | t | t | t | f | t | f | t | t + 10.1.2.3/8 | 10.0.0.0/32 | t | t | f | f | f | t | f | f | t | t | t + 10.1.2.3 | 10.1.2.3/32 | f | t | t | t | f | f | f | t | f | t | t + 10.1.2.3/24 | 10.1.2.0/24 | f | f | f | t | t | t | f | t | f | t | t + 10.1.2.3/16 | 10.1.0.0/16 | f | f | f | t | t | t | f | t | f | t | t + 10.1.2.3/8 | 10.0.0.0/8 | f | f | f | t | t | t | f | t | f | t | t + 11.1.2.3/8 | 10.0.0.0/8 | f | f | f | t | t | t | f | f | f | f | f + 9.1.2.3/8 | 10.0.0.0/8 | t | t | f | f | f | t | f | f | f | f | f + 10:23::f1/64 | 10:23::f1/128 | t | t | f | f | f | t | f | f | t | t | t + 10:23::ffff | 10:23::8000/113 | f | f | f | t | t | t | t | t | f | f | t + ::4.3.2.1/24 | ::ffff:1.2.3.4/128 | t | t | f | f | f | t | f | f | t | t | t +(17 rows) + +SELECT max(i) AS max, min(i) AS min FROM INET_TBL; + max | min +-------------+----------- + 10:23::ffff | 9.1.2.3/8 +(1 row) + +SELECT max(c) AS max, min(c) AS min FROM INET_TBL; + max | min +-----------------+------------ + 10:23::8000/113 | 10.0.0.0/8 +(1 row) + +-- check the conversion to/from text and set_netmask +SELECT set_masklen(inet(text(i)), 24) FROM INET_TBL; + set_masklen +------------------ + 192.168.1.226/24 + 192.168.1.226/24 + 192.168.1.0/24 + 192.168.1.0/24 + 192.168.1.255/24 + 192.168.1.255/24 + 10.1.2.3/24 + 10.1.2.3/24 + 10.1.2.3/24 + 10.1.2.3/24 + 10.1.2.3/24 + 10.1.2.3/24 + 11.1.2.3/24 + 9.1.2.3/24 + 10:23::f1/24 + 10:23::ffff/24 + ::4.3.2.1/24 +(17 rows) + +-- check that btree index works correctly +CREATE INDEX inet_idx1 ON inet_tbl(i); +SET enable_seqscan TO off; +EXPLAIN (COSTS OFF) +SELECT * FROM inet_tbl WHERE i<<'192.168.1.0/24'::cidr; + QUERY PLAN +------------------------------------------------------------------------------- + Index Scan using inet_idx1 on inet_tbl + Index Cond: ((i > '192.168.1.0/24'::inet) AND (i <= '192.168.1.255'::inet)) + Filter: (i << '192.168.1.0/24'::inet) +(3 rows) + +SELECT * FROM inet_tbl WHERE i<<'192.168.1.0/24'::cidr; + c | i +----------------+------------------ + 192.168.1.0/24 | 192.168.1.0/25 + 192.168.1.0/24 | 192.168.1.255/25 + 192.168.1.0/26 | 192.168.1.226 +(3 rows) + +EXPLAIN (COSTS OFF) +SELECT * FROM inet_tbl WHERE i<<='192.168.1.0/24'::cidr; + QUERY PLAN +-------------------------------------------------------------------------------- + Index Scan using inet_idx1 on inet_tbl + Index Cond: ((i >= '192.168.1.0/24'::inet) AND (i <= '192.168.1.255'::inet)) + Filter: (i <<= '192.168.1.0/24'::inet) +(3 rows) + +SELECT * FROM inet_tbl WHERE i<<='192.168.1.0/24'::cidr; + c | i +----------------+------------------ + 192.168.1.0/24 | 192.168.1.0/24 + 192.168.1.0/24 | 192.168.1.226/24 + 192.168.1.0/24 | 192.168.1.255/24 + 192.168.1.0/24 | 192.168.1.0/25 + 192.168.1.0/24 | 192.168.1.255/25 + 192.168.1.0/26 | 192.168.1.226 +(6 rows) + +EXPLAIN (COSTS OFF) +SELECT * FROM inet_tbl WHERE '192.168.1.0/24'::cidr >>= i; + QUERY PLAN +-------------------------------------------------------------------------------- + Index Scan using inet_idx1 on inet_tbl + Index Cond: ((i >= '192.168.1.0/24'::inet) AND (i <= '192.168.1.255'::inet)) + Filter: ('192.168.1.0/24'::inet >>= i) +(3 rows) + +SELECT * FROM inet_tbl WHERE '192.168.1.0/24'::cidr >>= i; + c | i +----------------+------------------ + 192.168.1.0/24 | 192.168.1.0/24 + 192.168.1.0/24 | 192.168.1.226/24 + 192.168.1.0/24 | 192.168.1.255/24 + 192.168.1.0/24 | 192.168.1.0/25 + 192.168.1.0/24 | 192.168.1.255/25 + 192.168.1.0/26 | 192.168.1.226 +(6 rows) + +EXPLAIN (COSTS OFF) +SELECT * FROM inet_tbl WHERE '192.168.1.0/24'::cidr >> i; + QUERY PLAN +------------------------------------------------------------------------------- + Index Scan using inet_idx1 on inet_tbl + Index Cond: ((i > '192.168.1.0/24'::inet) AND (i <= '192.168.1.255'::inet)) + Filter: ('192.168.1.0/24'::inet >> i) +(3 rows) + +SELECT * FROM inet_tbl WHERE '192.168.1.0/24'::cidr >> i; + c | i +----------------+------------------ + 192.168.1.0/24 | 192.168.1.0/25 + 192.168.1.0/24 | 192.168.1.255/25 + 192.168.1.0/26 | 192.168.1.226 +(3 rows) + +SET enable_seqscan TO on; +DROP INDEX inet_idx1; +-- check that gist index works correctly +CREATE INDEX inet_idx2 ON inet_tbl using gist (i inet_ops); +SET enable_seqscan TO off; +SELECT * FROM inet_tbl WHERE i << '192.168.1.0/24'::cidr ORDER BY i; + c | i +----------------+------------------ + 192.168.1.0/24 | 192.168.1.0/25 + 192.168.1.0/24 | 192.168.1.255/25 + 192.168.1.0/26 | 192.168.1.226 +(3 rows) + +SELECT * FROM inet_tbl WHERE i <<= '192.168.1.0/24'::cidr ORDER BY i; + c | i +----------------+------------------ + 192.168.1.0/24 | 192.168.1.0/24 + 192.168.1.0/24 | 192.168.1.226/24 + 192.168.1.0/24 | 192.168.1.255/24 + 192.168.1.0/24 | 192.168.1.0/25 + 192.168.1.0/24 | 192.168.1.255/25 + 192.168.1.0/26 | 192.168.1.226 +(6 rows) + +SELECT * FROM inet_tbl WHERE i && '192.168.1.0/24'::cidr ORDER BY i; + c | i +----------------+------------------ + 192.168.1.0/24 | 192.168.1.0/24 + 192.168.1.0/24 | 192.168.1.226/24 + 192.168.1.0/24 | 192.168.1.255/24 + 192.168.1.0/24 | 192.168.1.0/25 + 192.168.1.0/24 | 192.168.1.255/25 + 192.168.1.0/26 | 192.168.1.226 +(6 rows) + +SELECT * FROM inet_tbl WHERE i >>= '192.168.1.0/24'::cidr ORDER BY i; + c | i +----------------+------------------ + 192.168.1.0/24 | 192.168.1.0/24 + 192.168.1.0/24 | 192.168.1.226/24 + 192.168.1.0/24 | 192.168.1.255/24 +(3 rows) + +SELECT * FROM inet_tbl WHERE i >> '192.168.1.0/24'::cidr ORDER BY i; + c | i +---+--- +(0 rows) + +SELECT * FROM inet_tbl WHERE i < '192.168.1.0/24'::cidr ORDER BY i; + c | i +-------------+------------- + 10.0.0.0/8 | 9.1.2.3/8 + 10.0.0.0/32 | 10.1.2.3/8 + 10.0.0.0/8 | 10.1.2.3/8 + 10.0.0.0/8 | 10.1.2.3/8 + 10.1.0.0/16 | 10.1.2.3/16 + 10.1.2.0/24 | 10.1.2.3/24 + 10.1.2.3/32 | 10.1.2.3 + 10.0.0.0/8 | 11.1.2.3/8 +(8 rows) + +SELECT * FROM inet_tbl WHERE i <= '192.168.1.0/24'::cidr ORDER BY i; + c | i +----------------+---------------- + 10.0.0.0/8 | 9.1.2.3/8 + 10.0.0.0/8 | 10.1.2.3/8 + 10.0.0.0/32 | 10.1.2.3/8 + 10.0.0.0/8 | 10.1.2.3/8 + 10.1.0.0/16 | 10.1.2.3/16 + 10.1.2.0/24 | 10.1.2.3/24 + 10.1.2.3/32 | 10.1.2.3 + 10.0.0.0/8 | 11.1.2.3/8 + 192.168.1.0/24 | 192.168.1.0/24 +(9 rows) + +SELECT * FROM inet_tbl WHERE i = '192.168.1.0/24'::cidr ORDER BY i; + c | i +----------------+---------------- + 192.168.1.0/24 | 192.168.1.0/24 +(1 row) + +SELECT * FROM inet_tbl WHERE i >= '192.168.1.0/24'::cidr ORDER BY i; + c | i +--------------------+------------------ + 192.168.1.0/24 | 192.168.1.0/24 + 192.168.1.0/24 | 192.168.1.226/24 + 192.168.1.0/24 | 192.168.1.255/24 + 192.168.1.0/24 | 192.168.1.0/25 + 192.168.1.0/24 | 192.168.1.255/25 + 192.168.1.0/26 | 192.168.1.226 + ::ffff:1.2.3.4/128 | ::4.3.2.1/24 + 10:23::f1/128 | 10:23::f1/64 + 10:23::8000/113 | 10:23::ffff +(9 rows) + +SELECT * FROM inet_tbl WHERE i > '192.168.1.0/24'::cidr ORDER BY i; + c | i +--------------------+------------------ + 192.168.1.0/24 | 192.168.1.226/24 + 192.168.1.0/24 | 192.168.1.255/24 + 192.168.1.0/24 | 192.168.1.0/25 + 192.168.1.0/24 | 192.168.1.255/25 + 192.168.1.0/26 | 192.168.1.226 + ::ffff:1.2.3.4/128 | ::4.3.2.1/24 + 10:23::f1/128 | 10:23::f1/64 + 10:23::8000/113 | 10:23::ffff +(8 rows) + +SELECT * FROM inet_tbl WHERE i <> '192.168.1.0/24'::cidr ORDER BY i; + c | i +--------------------+------------------ + 10.0.0.0/8 | 9.1.2.3/8 + 10.0.0.0/8 | 10.1.2.3/8 + 10.0.0.0/32 | 10.1.2.3/8 + 10.0.0.0/8 | 10.1.2.3/8 + 10.1.0.0/16 | 10.1.2.3/16 + 10.1.2.0/24 | 10.1.2.3/24 + 10.1.2.3/32 | 10.1.2.3 + 10.0.0.0/8 | 11.1.2.3/8 + 192.168.1.0/24 | 192.168.1.226/24 + 192.168.1.0/24 | 192.168.1.255/24 + 192.168.1.0/24 | 192.168.1.0/25 + 192.168.1.0/24 | 192.168.1.255/25 + 192.168.1.0/26 | 192.168.1.226 + ::ffff:1.2.3.4/128 | ::4.3.2.1/24 + 10:23::f1/128 | 10:23::f1/64 + 10:23::8000/113 | 10:23::ffff +(16 rows) + +-- test index-only scans +EXPLAIN (COSTS OFF) +SELECT i FROM inet_tbl WHERE i << '192.168.1.0/24'::cidr ORDER BY i; + QUERY PLAN +--------------------------------------------------- + Sort + Sort Key: i + -> Index Only Scan using inet_idx2 on inet_tbl + Index Cond: (i << '192.168.1.0/24'::inet) +(4 rows) + +SELECT i FROM inet_tbl WHERE i << '192.168.1.0/24'::cidr ORDER BY i; + i +------------------ + 192.168.1.0/25 + 192.168.1.255/25 + 192.168.1.226 +(3 rows) + +SET enable_seqscan TO on; +DROP INDEX inet_idx2; +-- check that spgist index works correctly +CREATE INDEX inet_idx3 ON inet_tbl using spgist (i); +SET enable_seqscan TO off; +SELECT * FROM inet_tbl WHERE i << '192.168.1.0/24'::cidr ORDER BY i; + c | i +----------------+------------------ + 192.168.1.0/24 | 192.168.1.0/25 + 192.168.1.0/24 | 192.168.1.255/25 + 192.168.1.0/26 | 192.168.1.226 +(3 rows) + +SELECT * FROM inet_tbl WHERE i <<= '192.168.1.0/24'::cidr ORDER BY i; + c | i +----------------+------------------ + 192.168.1.0/24 | 192.168.1.0/24 + 192.168.1.0/24 | 192.168.1.226/24 + 192.168.1.0/24 | 192.168.1.255/24 + 192.168.1.0/24 | 192.168.1.0/25 + 192.168.1.0/24 | 192.168.1.255/25 + 192.168.1.0/26 | 192.168.1.226 +(6 rows) + +SELECT * FROM inet_tbl WHERE i && '192.168.1.0/24'::cidr ORDER BY i; + c | i +----------------+------------------ + 192.168.1.0/24 | 192.168.1.0/24 + 192.168.1.0/24 | 192.168.1.226/24 + 192.168.1.0/24 | 192.168.1.255/24 + 192.168.1.0/24 | 192.168.1.0/25 + 192.168.1.0/24 | 192.168.1.255/25 + 192.168.1.0/26 | 192.168.1.226 +(6 rows) + +SELECT * FROM inet_tbl WHERE i >>= '192.168.1.0/24'::cidr ORDER BY i; + c | i +----------------+------------------ + 192.168.1.0/24 | 192.168.1.0/24 + 192.168.1.0/24 | 192.168.1.226/24 + 192.168.1.0/24 | 192.168.1.255/24 +(3 rows) + +SELECT * FROM inet_tbl WHERE i >> '192.168.1.0/24'::cidr ORDER BY i; + c | i +---+--- +(0 rows) + +SELECT * FROM inet_tbl WHERE i < '192.168.1.0/24'::cidr ORDER BY i; + c | i +-------------+------------- + 10.0.0.0/8 | 9.1.2.3/8 + 10.0.0.0/32 | 10.1.2.3/8 + 10.0.0.0/8 | 10.1.2.3/8 + 10.0.0.0/8 | 10.1.2.3/8 + 10.1.0.0/16 | 10.1.2.3/16 + 10.1.2.0/24 | 10.1.2.3/24 + 10.1.2.3/32 | 10.1.2.3 + 10.0.0.0/8 | 11.1.2.3/8 +(8 rows) + +SELECT * FROM inet_tbl WHERE i <= '192.168.1.0/24'::cidr ORDER BY i; + c | i +----------------+---------------- + 10.0.0.0/8 | 9.1.2.3/8 + 10.0.0.0/8 | 10.1.2.3/8 + 10.0.0.0/32 | 10.1.2.3/8 + 10.0.0.0/8 | 10.1.2.3/8 + 10.1.0.0/16 | 10.1.2.3/16 + 10.1.2.0/24 | 10.1.2.3/24 + 10.1.2.3/32 | 10.1.2.3 + 10.0.0.0/8 | 11.1.2.3/8 + 192.168.1.0/24 | 192.168.1.0/24 +(9 rows) + +SELECT * FROM inet_tbl WHERE i = '192.168.1.0/24'::cidr ORDER BY i; + c | i +----------------+---------------- + 192.168.1.0/24 | 192.168.1.0/24 +(1 row) + +SELECT * FROM inet_tbl WHERE i >= '192.168.1.0/24'::cidr ORDER BY i; + c | i +--------------------+------------------ + 192.168.1.0/24 | 192.168.1.0/24 + 192.168.1.0/24 | 192.168.1.226/24 + 192.168.1.0/24 | 192.168.1.255/24 + 192.168.1.0/24 | 192.168.1.0/25 + 192.168.1.0/24 | 192.168.1.255/25 + 192.168.1.0/26 | 192.168.1.226 + ::ffff:1.2.3.4/128 | ::4.3.2.1/24 + 10:23::f1/128 | 10:23::f1/64 + 10:23::8000/113 | 10:23::ffff +(9 rows) + +SELECT * FROM inet_tbl WHERE i > '192.168.1.0/24'::cidr ORDER BY i; + c | i +--------------------+------------------ + 192.168.1.0/24 | 192.168.1.226/24 + 192.168.1.0/24 | 192.168.1.255/24 + 192.168.1.0/24 | 192.168.1.0/25 + 192.168.1.0/24 | 192.168.1.255/25 + 192.168.1.0/26 | 192.168.1.226 + ::ffff:1.2.3.4/128 | ::4.3.2.1/24 + 10:23::f1/128 | 10:23::f1/64 + 10:23::8000/113 | 10:23::ffff +(8 rows) + +SELECT * FROM inet_tbl WHERE i <> '192.168.1.0/24'::cidr ORDER BY i; + c | i +--------------------+------------------ + 10.0.0.0/8 | 9.1.2.3/8 + 10.0.0.0/8 | 10.1.2.3/8 + 10.0.0.0/32 | 10.1.2.3/8 + 10.0.0.0/8 | 10.1.2.3/8 + 10.1.0.0/16 | 10.1.2.3/16 + 10.1.2.0/24 | 10.1.2.3/24 + 10.1.2.3/32 | 10.1.2.3 + 10.0.0.0/8 | 11.1.2.3/8 + 192.168.1.0/24 | 192.168.1.226/24 + 192.168.1.0/24 | 192.168.1.255/24 + 192.168.1.0/24 | 192.168.1.0/25 + 192.168.1.0/24 | 192.168.1.255/25 + 192.168.1.0/26 | 192.168.1.226 + ::ffff:1.2.3.4/128 | ::4.3.2.1/24 + 10:23::f1/128 | 10:23::f1/64 + 10:23::8000/113 | 10:23::ffff +(16 rows) + +-- test index-only scans +EXPLAIN (COSTS OFF) +SELECT i FROM inet_tbl WHERE i << '192.168.1.0/24'::cidr ORDER BY i; + QUERY PLAN +--------------------------------------------------- + Sort + Sort Key: i + -> Index Only Scan using inet_idx3 on inet_tbl + Index Cond: (i << '192.168.1.0/24'::inet) +(4 rows) + +SELECT i FROM inet_tbl WHERE i << '192.168.1.0/24'::cidr ORDER BY i; + i +------------------ + 192.168.1.0/25 + 192.168.1.255/25 + 192.168.1.226 +(3 rows) + +SET enable_seqscan TO on; +DROP INDEX inet_idx3; +-- simple tests of inet boolean and arithmetic operators +SELECT i, ~i AS "~i" FROM inet_tbl; + i | ~i +------------------+-------------------------------------------- + 192.168.1.226/24 | 63.87.254.29/24 + 192.168.1.226 | 63.87.254.29 + 192.168.1.0/24 | 63.87.254.255/24 + 192.168.1.0/25 | 63.87.254.255/25 + 192.168.1.255/24 | 63.87.254.0/24 + 192.168.1.255/25 | 63.87.254.0/25 + 10.1.2.3/8 | 245.254.253.252/8 + 10.1.2.3/8 | 245.254.253.252/8 + 10.1.2.3 | 245.254.253.252 + 10.1.2.3/24 | 245.254.253.252/24 + 10.1.2.3/16 | 245.254.253.252/16 + 10.1.2.3/8 | 245.254.253.252/8 + 11.1.2.3/8 | 244.254.253.252/8 + 9.1.2.3/8 | 246.254.253.252/8 + 10:23::f1/64 | ffef:ffdc:ffff:ffff:ffff:ffff:ffff:ff0e/64 + 10:23::ffff | ffef:ffdc:ffff:ffff:ffff:ffff:ffff:0 + ::4.3.2.1/24 | ffff:ffff:ffff:ffff:ffff:ffff:fbfc:fdfe/24 +(17 rows) + +SELECT i, c, i & c AS "and" FROM inet_tbl; + i | c | and +------------------+--------------------+---------------- + 192.168.1.226/24 | 192.168.1.0/24 | 192.168.1.0/24 + 192.168.1.226 | 192.168.1.0/26 | 192.168.1.0 + 192.168.1.0/24 | 192.168.1.0/24 | 192.168.1.0/24 + 192.168.1.0/25 | 192.168.1.0/24 | 192.168.1.0/25 + 192.168.1.255/24 | 192.168.1.0/24 | 192.168.1.0/24 + 192.168.1.255/25 | 192.168.1.0/24 | 192.168.1.0/25 + 10.1.2.3/8 | 10.0.0.0/8 | 10.0.0.0/8 + 10.1.2.3/8 | 10.0.0.0/32 | 10.0.0.0 + 10.1.2.3 | 10.1.2.3/32 | 10.1.2.3 + 10.1.2.3/24 | 10.1.2.0/24 | 10.1.2.0/24 + 10.1.2.3/16 | 10.1.0.0/16 | 10.1.0.0/16 + 10.1.2.3/8 | 10.0.0.0/8 | 10.0.0.0/8 + 11.1.2.3/8 | 10.0.0.0/8 | 10.0.0.0/8 + 9.1.2.3/8 | 10.0.0.0/8 | 8.0.0.0/8 + 10:23::f1/64 | 10:23::f1/128 | 10:23::f1 + 10:23::ffff | 10:23::8000/113 | 10:23::8000 + ::4.3.2.1/24 | ::ffff:1.2.3.4/128 | ::0.2.2.0 +(17 rows) + +SELECT i, c, i | c AS "or" FROM inet_tbl; + i | c | or +------------------+--------------------+------------------ + 192.168.1.226/24 | 192.168.1.0/24 | 192.168.1.226/24 + 192.168.1.226 | 192.168.1.0/26 | 192.168.1.226 + 192.168.1.0/24 | 192.168.1.0/24 | 192.168.1.0/24 + 192.168.1.0/25 | 192.168.1.0/24 | 192.168.1.0/25 + 192.168.1.255/24 | 192.168.1.0/24 | 192.168.1.255/24 + 192.168.1.255/25 | 192.168.1.0/24 | 192.168.1.255/25 + 10.1.2.3/8 | 10.0.0.0/8 | 10.1.2.3/8 + 10.1.2.3/8 | 10.0.0.0/32 | 10.1.2.3 + 10.1.2.3 | 10.1.2.3/32 | 10.1.2.3 + 10.1.2.3/24 | 10.1.2.0/24 | 10.1.2.3/24 + 10.1.2.3/16 | 10.1.0.0/16 | 10.1.2.3/16 + 10.1.2.3/8 | 10.0.0.0/8 | 10.1.2.3/8 + 11.1.2.3/8 | 10.0.0.0/8 | 11.1.2.3/8 + 9.1.2.3/8 | 10.0.0.0/8 | 11.1.2.3/8 + 10:23::f1/64 | 10:23::f1/128 | 10:23::f1 + 10:23::ffff | 10:23::8000/113 | 10:23::ffff + ::4.3.2.1/24 | ::ffff:1.2.3.4/128 | ::ffff:5.3.3.5 +(17 rows) + +SELECT i, i + 500 AS "i+500" FROM inet_tbl; + i | i+500 +------------------+------------------ + 192.168.1.226/24 | 192.168.3.214/24 + 192.168.1.226 | 192.168.3.214 + 192.168.1.0/24 | 192.168.2.244/24 + 192.168.1.0/25 | 192.168.2.244/25 + 192.168.1.255/24 | 192.168.3.243/24 + 192.168.1.255/25 | 192.168.3.243/25 + 10.1.2.3/8 | 10.1.3.247/8 + 10.1.2.3/8 | 10.1.3.247/8 + 10.1.2.3 | 10.1.3.247 + 10.1.2.3/24 | 10.1.3.247/24 + 10.1.2.3/16 | 10.1.3.247/16 + 10.1.2.3/8 | 10.1.3.247/8 + 11.1.2.3/8 | 11.1.3.247/8 + 9.1.2.3/8 | 9.1.3.247/8 + 10:23::f1/64 | 10:23::2e5/64 + 10:23::ffff | 10:23::1:1f3 + ::4.3.2.1/24 | ::4.3.3.245/24 +(17 rows) + +SELECT i, i - 500 AS "i-500" FROM inet_tbl; + i | i-500 +------------------+---------------------------------------- + 192.168.1.226/24 | 192.167.255.238/24 + 192.168.1.226 | 192.167.255.238 + 192.168.1.0/24 | 192.167.255.12/24 + 192.168.1.0/25 | 192.167.255.12/25 + 192.168.1.255/24 | 192.168.0.11/24 + 192.168.1.255/25 | 192.168.0.11/25 + 10.1.2.3/8 | 10.1.0.15/8 + 10.1.2.3/8 | 10.1.0.15/8 + 10.1.2.3 | 10.1.0.15 + 10.1.2.3/24 | 10.1.0.15/24 + 10.1.2.3/16 | 10.1.0.15/16 + 10.1.2.3/8 | 10.1.0.15/8 + 11.1.2.3/8 | 11.1.0.15/8 + 9.1.2.3/8 | 9.1.0.15/8 + 10:23::f1/64 | 10:22:ffff:ffff:ffff:ffff:ffff:fefd/64 + 10:23::ffff | 10:23::fe0b + ::4.3.2.1/24 | ::4.3.0.13/24 +(17 rows) + +SELECT i, c, i - c AS "minus" FROM inet_tbl; + i | c | minus +------------------+--------------------+------------------ + 192.168.1.226/24 | 192.168.1.0/24 | 226 + 192.168.1.226 | 192.168.1.0/26 | 226 + 192.168.1.0/24 | 192.168.1.0/24 | 0 + 192.168.1.0/25 | 192.168.1.0/24 | 0 + 192.168.1.255/24 | 192.168.1.0/24 | 255 + 192.168.1.255/25 | 192.168.1.0/24 | 255 + 10.1.2.3/8 | 10.0.0.0/8 | 66051 + 10.1.2.3/8 | 10.0.0.0/32 | 66051 + 10.1.2.3 | 10.1.2.3/32 | 0 + 10.1.2.3/24 | 10.1.2.0/24 | 3 + 10.1.2.3/16 | 10.1.0.0/16 | 515 + 10.1.2.3/8 | 10.0.0.0/8 | 66051 + 11.1.2.3/8 | 10.0.0.0/8 | 16843267 + 9.1.2.3/8 | 10.0.0.0/8 | -16711165 + 10:23::f1/64 | 10:23::f1/128 | 0 + 10:23::ffff | 10:23::8000/113 | 32767 + ::4.3.2.1/24 | ::ffff:1.2.3.4/128 | -281470631346435 +(17 rows) + +SELECT '127.0.0.1'::inet + 257; + ?column? +----------- + 127.0.1.2 +(1 row) + +SELECT ('127.0.0.1'::inet + 257) - 257; + ?column? +----------- + 127.0.0.1 +(1 row) + +SELECT '127::1'::inet + 257; + ?column? +---------- + 127::102 +(1 row) + +SELECT ('127::1'::inet + 257) - 257; + ?column? +---------- + 127::1 +(1 row) + +SELECT '127.0.0.2'::inet - ('127.0.0.2'::inet + 500); + ?column? +---------- + -500 +(1 row) + +SELECT '127.0.0.2'::inet - ('127.0.0.2'::inet - 500); + ?column? +---------- + 500 +(1 row) + +SELECT '127::2'::inet - ('127::2'::inet + 500); + ?column? +---------- + -500 +(1 row) + +SELECT '127::2'::inet - ('127::2'::inet - 500); + ?column? +---------- + 500 +(1 row) + +-- these should give overflow errors: +SELECT '127.0.0.1'::inet + 10000000000; +ERROR: result is out of range +SELECT '127.0.0.1'::inet - 10000000000; +ERROR: result is out of range +SELECT '126::1'::inet - '127::2'::inet; +ERROR: result is out of range +SELECT '127::1'::inet - '126::2'::inet; +ERROR: result is out of range +-- but not these +SELECT '127::1'::inet + 10000000000; + ?column? +------------------ + 127::2:540b:e401 +(1 row) + +SELECT '127::1'::inet - '127::2'::inet; + ?column? +---------- + -1 +(1 row) + +-- insert one more row with addressed from different families +INSERT INTO INET_TBL (c, i) VALUES ('10', '10::/8'); +-- now, this one should fail +SELECT inet_merge(c, i) FROM INET_TBL; +ERROR: cannot merge addresses from different families +-- fix it by inet_same_family() condition +SELECT inet_merge(c, i) FROM INET_TBL WHERE inet_same_family(c, i); + inet_merge +----------------- + 192.168.1.0/24 + 192.168.1.0/24 + 192.168.1.0/24 + 192.168.1.0/24 + 192.168.1.0/24 + 192.168.1.0/24 + 10.0.0.0/8 + 10.0.0.0/8 + 10.1.2.3/32 + 10.1.2.0/24 + 10.1.0.0/16 + 10.0.0.0/8 + 10.0.0.0/7 + 8.0.0.0/6 + 10:23::/64 + 10:23::8000/113 + ::/24 +(17 rows) + +-- Test inet sortsupport with a variety of boundary inputs: +SELECT a FROM (VALUES + ('0.0.0.0/0'::inet), + ('0.0.0.0/1'::inet), + ('0.0.0.0/32'::inet), + ('0.0.0.1/0'::inet), + ('0.0.0.1/1'::inet), + ('127.126.127.127/0'::inet), + ('127.127.127.127/0'::inet), + ('127.128.127.127/0'::inet), + ('192.168.1.0/24'::inet), + ('192.168.1.0/25'::inet), + ('192.168.1.1/23'::inet), + ('192.168.1.1/5'::inet), + ('192.168.1.1/6'::inet), + ('192.168.1.1/25'::inet), + ('192.168.1.2/25'::inet), + ('192.168.1.1/26'::inet), + ('192.168.1.2/26'::inet), + ('192.168.1.2/23'::inet), + ('192.168.1.255/5'::inet), + ('192.168.1.255/6'::inet), + ('192.168.1.3/1'::inet), + ('192.168.1.3/23'::inet), + ('192.168.1.4/0'::inet), + ('192.168.1.5/0'::inet), + ('255.0.0.0/0'::inet), + ('255.1.0.0/0'::inet), + ('255.2.0.0/0'::inet), + ('255.255.000.000/0'::inet), + ('255.255.000.000/0'::inet), + ('255.255.000.000/15'::inet), + ('255.255.000.000/16'::inet), + ('255.255.255.254/32'::inet), + ('255.255.255.000/32'::inet), + ('255.255.255.001/31'::inet), + ('255.255.255.002/31'::inet), + ('255.255.255.003/31'::inet), + ('255.255.255.003/32'::inet), + ('255.255.255.001/32'::inet), + ('255.255.255.255/0'::inet), + ('255.255.255.255/0'::inet), + ('255.255.255.255/0'::inet), + ('255.255.255.255/1'::inet), + ('255.255.255.255/16'::inet), + ('255.255.255.255/16'::inet), + ('255.255.255.255/31'::inet), + ('255.255.255.255/32'::inet), + ('255.255.255.253/32'::inet), + ('255.255.255.252/32'::inet), + ('255.3.0.0/0'::inet), + ('0000:0000:0000:0000:0000:0000:0000:0000/0'::inet), + ('0000:0000:0000:0000:0000:0000:0000:0000/128'::inet), + ('0000:0000:0000:0000:0000:0000:0000:0001/128'::inet), + ('10:23::f1/64'::inet), + ('10:23::f1/65'::inet), + ('10:23::ffff'::inet), + ('127::1'::inet), + ('127::2'::inet), + ('8000:0000:0000:0000:0000:0000:0000:0000/1'::inet), + ('::1:ffff:ffff:ffff:ffff/128'::inet), + ('::2:ffff:ffff:ffff:ffff/128'::inet), + ('::4:3:2:0/24'::inet), + ('::4:3:2:1/24'::inet), + ('::4:3:2:2/24'::inet), + ('ffff:83e7:f118:57dc:6093:6d92:689d:58cf/70'::inet), + ('ffff:84b0:4775:536e:c3ed:7116:a6d6:34f0/44'::inet), + ('ffff:8566:f84:5867:47f1:7867:d2ba:8a1a/69'::inet), + ('ffff:8883:f028:7d2:4d68:d510:7d6b:ac43/73'::inet), + ('ffff:8ae8:7c14:65b3:196:8e4a:89ae:fb30/89'::inet), + ('ffff:8dd0:646:694c:7c16:7e35:6a26:171/104'::inet), + ('ffff:8eef:cbf:700:eda3:ae32:f4b4:318b/121'::inet), + ('ffff:90e7:e744:664:a93:8efe:1f25:7663/122'::inet), + ('ffff:9597:c69c:8b24:57a:8639:ec78:6026/111'::inet), + ('ffff:9e86:79ea:f16e:df31:8e4d:7783:532e/88'::inet), + ('ffff:a0c7:82d3:24de:f762:6e1f:316d:3fb2/23'::inet), + ('ffff:fffa:ffff:ffff:ffff:ffff:ffff:ffff/0'::inet), + ('ffff:fffb:ffff:ffff:ffff:ffff:ffff:ffff/0'::inet), + ('ffff:fffc:ffff:ffff:ffff:ffff:ffff:ffff/0'::inet), + ('ffff:fffd:ffff:ffff:ffff:ffff:ffff:ffff/0'::inet), + ('ffff:fffe:ffff:ffff:ffff:ffff:ffff:ffff/0'::inet), + ('ffff:ffff:ffff:fffa:ffff:ffff:ffff:ffff/0'::inet), + ('ffff:ffff:ffff:fffb:ffff:ffff:ffff:ffff/0'::inet), + ('ffff:ffff:ffff:fffc:ffff:ffff:ffff:ffff/0'::inet), + ('ffff:ffff:ffff:fffd::/128'::inet), + ('ffff:ffff:ffff:fffd:ffff:ffff:ffff:ffff/0'::inet), + ('ffff:ffff:ffff:fffe::/128'::inet), + ('ffff:ffff:ffff:fffe:ffff:ffff:ffff:ffff/0'::inet), + ('ffff:ffff:ffff:ffff:4:3:2:0/24'::inet), + ('ffff:ffff:ffff:ffff:4:3:2:1/24'::inet), + ('ffff:ffff:ffff:ffff:4:3:2:2/24'::inet), + ('ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffff/0'::inet), + ('ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffff/128'::inet) +) AS i(a) ORDER BY a; + a +-------------------------------------------- + 0.0.0.0/0 + 0.0.0.1/0 + 127.126.127.127/0 + 127.127.127.127/0 + 127.128.127.127/0 + 192.168.1.4/0 + 192.168.1.5/0 + 255.0.0.0/0 + 255.1.0.0/0 + 255.2.0.0/0 + 255.3.0.0/0 + 255.255.0.0/0 + 255.255.0.0/0 + 255.255.255.255/0 + 255.255.255.255/0 + 255.255.255.255/0 + 0.0.0.0/1 + 0.0.0.1/1 + 0.0.0.0 + 192.168.1.3/1 + 255.255.255.255/1 + 192.168.1.1/5 + 192.168.1.255/5 + 192.168.1.1/6 + 192.168.1.255/6 + 192.168.1.1/23 + 192.168.1.2/23 + 192.168.1.3/23 + 192.168.1.0/24 + 192.168.1.0/25 + 192.168.1.1/25 + 192.168.1.2/25 + 192.168.1.1/26 + 192.168.1.2/26 + 255.255.0.0/15 + 255.255.0.0/16 + 255.255.255.255/16 + 255.255.255.255/16 + 255.255.255.1/31 + 255.255.255.0 + 255.255.255.1 + 255.255.255.2/31 + 255.255.255.3/31 + 255.255.255.3 + 255.255.255.252 + 255.255.255.253 + 255.255.255.255/31 + 255.255.255.254 + 255.255.255.255 + ::/0 + ffff:fffa:ffff:ffff:ffff:ffff:ffff:ffff/0 + ffff:fffb:ffff:ffff:ffff:ffff:ffff:ffff/0 + ffff:fffc:ffff:ffff:ffff:ffff:ffff:ffff/0 + ffff:fffd:ffff:ffff:ffff:ffff:ffff:ffff/0 + ffff:fffe:ffff:ffff:ffff:ffff:ffff:ffff/0 + ffff:ffff:ffff:fffa:ffff:ffff:ffff:ffff/0 + ffff:ffff:ffff:fffb:ffff:ffff:ffff:ffff/0 + ffff:ffff:ffff:fffc:ffff:ffff:ffff:ffff/0 + ffff:ffff:ffff:fffd:ffff:ffff:ffff:ffff/0 + ffff:ffff:ffff:fffe:ffff:ffff:ffff:ffff/0 + ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffff/0 + ::4:3:2:0/24 + ::4:3:2:1/24 + ::4:3:2:2/24 + :: + ::1 + ::1:ffff:ffff:ffff:ffff + ::2:ffff:ffff:ffff:ffff + 10:23::f1/64 + 10:23::f1/65 + 10:23::ffff + 127::1 + 127::2 + 8000::/1 + ffff:83e7:f118:57dc:6093:6d92:689d:58cf/70 + ffff:84b0:4775:536e:c3ed:7116:a6d6:34f0/44 + ffff:8566:f84:5867:47f1:7867:d2ba:8a1a/69 + ffff:8883:f028:7d2:4d68:d510:7d6b:ac43/73 + ffff:8ae8:7c14:65b3:196:8e4a:89ae:fb30/89 + ffff:8dd0:646:694c:7c16:7e35:6a26:171/104 + ffff:8eef:cbf:700:eda3:ae32:f4b4:318b/121 + ffff:90e7:e744:664:a93:8efe:1f25:7663/122 + ffff:9597:c69c:8b24:57a:8639:ec78:6026/111 + ffff:9e86:79ea:f16e:df31:8e4d:7783:532e/88 + ffff:a0c7:82d3:24de:f762:6e1f:316d:3fb2/23 + ffff:ffff:ffff:ffff:4:3:2:0/24 + ffff:ffff:ffff:ffff:4:3:2:1/24 + ffff:ffff:ffff:ffff:4:3:2:2/24 + ffff:ffff:ffff:fffd:: + ffff:ffff:ffff:fffe:: + ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffff +(91 rows) + +-- test non-error-throwing API for some core types +SELECT pg_input_is_valid('1234', 'cidr'); + pg_input_is_valid +------------------- + f +(1 row) + +SELECT * FROM pg_input_error_info('1234', 'cidr'); + message | detail | hint | sql_error_code +--------------------------------------------+--------+------+---------------- + invalid input syntax for type cidr: "1234" | | | 22P02 +(1 row) + +SELECT pg_input_is_valid('192.168.198.200/24', 'cidr'); + pg_input_is_valid +------------------- + f +(1 row) + +SELECT * FROM pg_input_error_info('192.168.198.200/24', 'cidr'); + message | detail | hint | sql_error_code +------------------------------------------+--------------------------------------+------+---------------- + invalid cidr value: "192.168.198.200/24" | Value has bits set to right of mask. | | 22P02 +(1 row) + +SELECT pg_input_is_valid('1234', 'inet'); + pg_input_is_valid +------------------- + f +(1 row) + +SELECT * FROM pg_input_error_info('1234', 'inet'); + message | detail | hint | sql_error_code +--------------------------------------------+--------+------+---------------- + invalid input syntax for type inet: "1234" | | | 22P02 +(1 row) + diff --git a/src/test/regress/expected/infinite_recurse.out b/src/test/regress/expected/infinite_recurse.out new file mode 100644 index 0000000..aa102fa --- /dev/null +++ b/src/test/regress/expected/infinite_recurse.out @@ -0,0 +1,24 @@ +-- Check that stack depth detection mechanism works and +-- max_stack_depth is not set too high. +create function infinite_recurse() returns int as +'select infinite_recurse()' language sql; +-- Unfortunately, up till mid 2020 the Linux kernel had a bug in PPC64 +-- signal handling that would cause this test to crash if it happened +-- to receive an sinval catchup interrupt while the stack is deep: +-- https://bugzilla.kernel.org/show_bug.cgi?id=205183 +-- It is likely to be many years before that bug disappears from all +-- production kernels, so disable this test on such platforms. +-- (We still create the function, so as not to have a cross-platform +-- difference in the end state of the regression database.) +SELECT version() ~ 'powerpc64[^,]*-linux-gnu' + AS skip_test \gset +\if :skip_test +\quit +\endif +-- The full error report is not very stable, so we show only SQLSTATE +-- and primary error message. +\set VERBOSITY sqlstate +select infinite_recurse(); +ERROR: 54001 +\echo :LAST_ERROR_MESSAGE +stack depth limit exceeded diff --git a/src/test/regress/expected/infinite_recurse_1.out b/src/test/regress/expected/infinite_recurse_1.out new file mode 100644 index 0000000..b2c99a0 --- /dev/null +++ b/src/test/regress/expected/infinite_recurse_1.out @@ -0,0 +1,16 @@ +-- Check that stack depth detection mechanism works and +-- max_stack_depth is not set too high. +create function infinite_recurse() returns int as +'select infinite_recurse()' language sql; +-- Unfortunately, up till mid 2020 the Linux kernel had a bug in PPC64 +-- signal handling that would cause this test to crash if it happened +-- to receive an sinval catchup interrupt while the stack is deep: +-- https://bugzilla.kernel.org/show_bug.cgi?id=205183 +-- It is likely to be many years before that bug disappears from all +-- production kernels, so disable this test on such platforms. +-- (We still create the function, so as not to have a cross-platform +-- difference in the end state of the regression database.) +SELECT version() ~ 'powerpc64[^,]*-linux-gnu' + AS skip_test \gset +\if :skip_test +\quit diff --git a/src/test/regress/expected/inherit.out b/src/test/regress/expected/inherit.out new file mode 100644 index 0000000..4943429 --- /dev/null +++ b/src/test/regress/expected/inherit.out @@ -0,0 +1,2794 @@ +-- +-- Test inheritance features +-- +CREATE TABLE a (aa TEXT); +CREATE TABLE b (bb TEXT) INHERITS (a); +CREATE TABLE c (cc TEXT) INHERITS (a); +CREATE TABLE d (dd TEXT) INHERITS (b,c,a); +NOTICE: merging multiple inherited definitions of column "aa" +NOTICE: merging multiple inherited definitions of column "aa" +INSERT INTO a(aa) VALUES('aaa'); +INSERT INTO a(aa) VALUES('aaaa'); +INSERT INTO a(aa) VALUES('aaaaa'); +INSERT INTO a(aa) VALUES('aaaaaa'); +INSERT INTO a(aa) VALUES('aaaaaaa'); +INSERT INTO a(aa) VALUES('aaaaaaaa'); +INSERT INTO b(aa) VALUES('bbb'); +INSERT INTO b(aa) VALUES('bbbb'); +INSERT INTO b(aa) VALUES('bbbbb'); +INSERT INTO b(aa) VALUES('bbbbbb'); +INSERT INTO b(aa) VALUES('bbbbbbb'); +INSERT INTO b(aa) VALUES('bbbbbbbb'); +INSERT INTO c(aa) VALUES('ccc'); +INSERT INTO c(aa) VALUES('cccc'); +INSERT INTO c(aa) VALUES('ccccc'); +INSERT INTO c(aa) VALUES('cccccc'); +INSERT INTO c(aa) VALUES('ccccccc'); +INSERT INTO c(aa) VALUES('cccccccc'); +INSERT INTO d(aa) VALUES('ddd'); +INSERT INTO d(aa) VALUES('dddd'); +INSERT INTO d(aa) VALUES('ddddd'); +INSERT INTO d(aa) VALUES('dddddd'); +INSERT INTO d(aa) VALUES('ddddddd'); +INSERT INTO d(aa) VALUES('dddddddd'); +SELECT relname, a.* FROM a, pg_class where a.tableoid = pg_class.oid; + relname | aa +---------+---------- + a | aaa + a | aaaa + a | aaaaa + a | aaaaaa + a | aaaaaaa + a | aaaaaaaa + b | bbb + b | bbbb + b | bbbbb + b | bbbbbb + b | bbbbbbb + b | bbbbbbbb + c | ccc + c | cccc + c | ccccc + c | cccccc + c | ccccccc + c | cccccccc + d | ddd + d | dddd + d | ddddd + d | dddddd + d | ddddddd + d | dddddddd +(24 rows) + +SELECT relname, b.* FROM b, pg_class where b.tableoid = pg_class.oid; + relname | aa | bb +---------+----------+---- + b | bbb | + b | bbbb | + b | bbbbb | + b | bbbbbb | + b | bbbbbbb | + b | bbbbbbbb | + d | ddd | + d | dddd | + d | ddddd | + d | dddddd | + d | ddddddd | + d | dddddddd | +(12 rows) + +SELECT relname, c.* FROM c, pg_class where c.tableoid = pg_class.oid; + relname | aa | cc +---------+----------+---- + c | ccc | + c | cccc | + c | ccccc | + c | cccccc | + c | ccccccc | + c | cccccccc | + d | ddd | + d | dddd | + d | ddddd | + d | dddddd | + d | ddddddd | + d | dddddddd | +(12 rows) + +SELECT relname, d.* FROM d, pg_class where d.tableoid = pg_class.oid; + relname | aa | bb | cc | dd +---------+----------+----+----+---- + d | ddd | | | + d | dddd | | | + d | ddddd | | | + d | dddddd | | | + d | ddddddd | | | + d | dddddddd | | | +(6 rows) + +SELECT relname, a.* FROM ONLY a, pg_class where a.tableoid = pg_class.oid; + relname | aa +---------+---------- + a | aaa + a | aaaa + a | aaaaa + a | aaaaaa + a | aaaaaaa + a | aaaaaaaa +(6 rows) + +SELECT relname, b.* FROM ONLY b, pg_class where b.tableoid = pg_class.oid; + relname | aa | bb +---------+----------+---- + b | bbb | + b | bbbb | + b | bbbbb | + b | bbbbbb | + b | bbbbbbb | + b | bbbbbbbb | +(6 rows) + +SELECT relname, c.* FROM ONLY c, pg_class where c.tableoid = pg_class.oid; + relname | aa | cc +---------+----------+---- + c | ccc | + c | cccc | + c | ccccc | + c | cccccc | + c | ccccccc | + c | cccccccc | +(6 rows) + +SELECT relname, d.* FROM ONLY d, pg_class where d.tableoid = pg_class.oid; + relname | aa | bb | cc | dd +---------+----------+----+----+---- + d | ddd | | | + d | dddd | | | + d | ddddd | | | + d | dddddd | | | + d | ddddddd | | | + d | dddddddd | | | +(6 rows) + +UPDATE a SET aa='zzzz' WHERE aa='aaaa'; +UPDATE ONLY a SET aa='zzzzz' WHERE aa='aaaaa'; +UPDATE b SET aa='zzz' WHERE aa='aaa'; +UPDATE ONLY b SET aa='zzz' WHERE aa='aaa'; +UPDATE a SET aa='zzzzzz' WHERE aa LIKE 'aaa%'; +SELECT relname, a.* FROM a, pg_class where a.tableoid = pg_class.oid; + relname | aa +---------+---------- + a | zzzz + a | zzzzz + a | zzzzzz + a | zzzzzz + a | zzzzzz + a | zzzzzz + b | bbb + b | bbbb + b | bbbbb + b | bbbbbb + b | bbbbbbb + b | bbbbbbbb + c | ccc + c | cccc + c | ccccc + c | cccccc + c | ccccccc + c | cccccccc + d | ddd + d | dddd + d | ddddd + d | dddddd + d | ddddddd + d | dddddddd +(24 rows) + +SELECT relname, b.* FROM b, pg_class where b.tableoid = pg_class.oid; + relname | aa | bb +---------+----------+---- + b | bbb | + b | bbbb | + b | bbbbb | + b | bbbbbb | + b | bbbbbbb | + b | bbbbbbbb | + d | ddd | + d | dddd | + d | ddddd | + d | dddddd | + d | ddddddd | + d | dddddddd | +(12 rows) + +SELECT relname, c.* FROM c, pg_class where c.tableoid = pg_class.oid; + relname | aa | cc +---------+----------+---- + c | ccc | + c | cccc | + c | ccccc | + c | cccccc | + c | ccccccc | + c | cccccccc | + d | ddd | + d | dddd | + d | ddddd | + d | dddddd | + d | ddddddd | + d | dddddddd | +(12 rows) + +SELECT relname, d.* FROM d, pg_class where d.tableoid = pg_class.oid; + relname | aa | bb | cc | dd +---------+----------+----+----+---- + d | ddd | | | + d | dddd | | | + d | ddddd | | | + d | dddddd | | | + d | ddddddd | | | + d | dddddddd | | | +(6 rows) + +SELECT relname, a.* FROM ONLY a, pg_class where a.tableoid = pg_class.oid; + relname | aa +---------+-------- + a | zzzz + a | zzzzz + a | zzzzzz + a | zzzzzz + a | zzzzzz + a | zzzzzz +(6 rows) + +SELECT relname, b.* FROM ONLY b, pg_class where b.tableoid = pg_class.oid; + relname | aa | bb +---------+----------+---- + b | bbb | + b | bbbb | + b | bbbbb | + b | bbbbbb | + b | bbbbbbb | + b | bbbbbbbb | +(6 rows) + +SELECT relname, c.* FROM ONLY c, pg_class where c.tableoid = pg_class.oid; + relname | aa | cc +---------+----------+---- + c | ccc | + c | cccc | + c | ccccc | + c | cccccc | + c | ccccccc | + c | cccccccc | +(6 rows) + +SELECT relname, d.* FROM ONLY d, pg_class where d.tableoid = pg_class.oid; + relname | aa | bb | cc | dd +---------+----------+----+----+---- + d | ddd | | | + d | dddd | | | + d | ddddd | | | + d | dddddd | | | + d | ddddddd | | | + d | dddddddd | | | +(6 rows) + +UPDATE b SET aa='new'; +SELECT relname, a.* FROM a, pg_class where a.tableoid = pg_class.oid; + relname | aa +---------+---------- + a | zzzz + a | zzzzz + a | zzzzzz + a | zzzzzz + a | zzzzzz + a | zzzzzz + b | new + b | new + b | new + b | new + b | new + b | new + c | ccc + c | cccc + c | ccccc + c | cccccc + c | ccccccc + c | cccccccc + d | new + d | new + d | new + d | new + d | new + d | new +(24 rows) + +SELECT relname, b.* FROM b, pg_class where b.tableoid = pg_class.oid; + relname | aa | bb +---------+-----+---- + b | new | + b | new | + b | new | + b | new | + b | new | + b | new | + d | new | + d | new | + d | new | + d | new | + d | new | + d | new | +(12 rows) + +SELECT relname, c.* FROM c, pg_class where c.tableoid = pg_class.oid; + relname | aa | cc +---------+----------+---- + c | ccc | + c | cccc | + c | ccccc | + c | cccccc | + c | ccccccc | + c | cccccccc | + d | new | + d | new | + d | new | + d | new | + d | new | + d | new | +(12 rows) + +SELECT relname, d.* FROM d, pg_class where d.tableoid = pg_class.oid; + relname | aa | bb | cc | dd +---------+-----+----+----+---- + d | new | | | + d | new | | | + d | new | | | + d | new | | | + d | new | | | + d | new | | | +(6 rows) + +SELECT relname, a.* FROM ONLY a, pg_class where a.tableoid = pg_class.oid; + relname | aa +---------+-------- + a | zzzz + a | zzzzz + a | zzzzzz + a | zzzzzz + a | zzzzzz + a | zzzzzz +(6 rows) + +SELECT relname, b.* FROM ONLY b, pg_class where b.tableoid = pg_class.oid; + relname | aa | bb +---------+-----+---- + b | new | + b | new | + b | new | + b | new | + b | new | + b | new | +(6 rows) + +SELECT relname, c.* FROM ONLY c, pg_class where c.tableoid = pg_class.oid; + relname | aa | cc +---------+----------+---- + c | ccc | + c | cccc | + c | ccccc | + c | cccccc | + c | ccccccc | + c | cccccccc | +(6 rows) + +SELECT relname, d.* FROM ONLY d, pg_class where d.tableoid = pg_class.oid; + relname | aa | bb | cc | dd +---------+-----+----+----+---- + d | new | | | + d | new | | | + d | new | | | + d | new | | | + d | new | | | + d | new | | | +(6 rows) + +UPDATE a SET aa='new'; +DELETE FROM ONLY c WHERE aa='new'; +SELECT relname, a.* FROM a, pg_class where a.tableoid = pg_class.oid; + relname | aa +---------+----- + a | new + a | new + a | new + a | new + a | new + a | new + b | new + b | new + b | new + b | new + b | new + b | new + d | new + d | new + d | new + d | new + d | new + d | new +(18 rows) + +SELECT relname, b.* FROM b, pg_class where b.tableoid = pg_class.oid; + relname | aa | bb +---------+-----+---- + b | new | + b | new | + b | new | + b | new | + b | new | + b | new | + d | new | + d | new | + d | new | + d | new | + d | new | + d | new | +(12 rows) + +SELECT relname, c.* FROM c, pg_class where c.tableoid = pg_class.oid; + relname | aa | cc +---------+-----+---- + d | new | + d | new | + d | new | + d | new | + d | new | + d | new | +(6 rows) + +SELECT relname, d.* FROM d, pg_class where d.tableoid = pg_class.oid; + relname | aa | bb | cc | dd +---------+-----+----+----+---- + d | new | | | + d | new | | | + d | new | | | + d | new | | | + d | new | | | + d | new | | | +(6 rows) + +SELECT relname, a.* FROM ONLY a, pg_class where a.tableoid = pg_class.oid; + relname | aa +---------+----- + a | new + a | new + a | new + a | new + a | new + a | new +(6 rows) + +SELECT relname, b.* FROM ONLY b, pg_class where b.tableoid = pg_class.oid; + relname | aa | bb +---------+-----+---- + b | new | + b | new | + b | new | + b | new | + b | new | + b | new | +(6 rows) + +SELECT relname, c.* FROM ONLY c, pg_class where c.tableoid = pg_class.oid; + relname | aa | cc +---------+----+---- +(0 rows) + +SELECT relname, d.* FROM ONLY d, pg_class where d.tableoid = pg_class.oid; + relname | aa | bb | cc | dd +---------+-----+----+----+---- + d | new | | | + d | new | | | + d | new | | | + d | new | | | + d | new | | | + d | new | | | +(6 rows) + +DELETE FROM a; +SELECT relname, a.* FROM a, pg_class where a.tableoid = pg_class.oid; + relname | aa +---------+---- +(0 rows) + +SELECT relname, b.* FROM b, pg_class where b.tableoid = pg_class.oid; + relname | aa | bb +---------+----+---- +(0 rows) + +SELECT relname, c.* FROM c, pg_class where c.tableoid = pg_class.oid; + relname | aa | cc +---------+----+---- +(0 rows) + +SELECT relname, d.* FROM d, pg_class where d.tableoid = pg_class.oid; + relname | aa | bb | cc | dd +---------+----+----+----+---- +(0 rows) + +SELECT relname, a.* FROM ONLY a, pg_class where a.tableoid = pg_class.oid; + relname | aa +---------+---- +(0 rows) + +SELECT relname, b.* FROM ONLY b, pg_class where b.tableoid = pg_class.oid; + relname | aa | bb +---------+----+---- +(0 rows) + +SELECT relname, c.* FROM ONLY c, pg_class where c.tableoid = pg_class.oid; + relname | aa | cc +---------+----+---- +(0 rows) + +SELECT relname, d.* FROM ONLY d, pg_class where d.tableoid = pg_class.oid; + relname | aa | bb | cc | dd +---------+----+----+----+---- +(0 rows) + +-- Confirm PRIMARY KEY adds NOT NULL constraint to child table +CREATE TEMP TABLE z (b TEXT, PRIMARY KEY(aa, b)) inherits (a); +INSERT INTO z VALUES (NULL, 'text'); -- should fail +ERROR: null value in column "aa" of relation "z" violates not-null constraint +DETAIL: Failing row contains (null, text). +-- Check inherited UPDATE with first child excluded +create table some_tab (f1 int, f2 int, f3 int, check (f1 < 10) no inherit); +create table some_tab_child () inherits(some_tab); +insert into some_tab_child select i, i+1, 0 from generate_series(1,1000) i; +create index on some_tab_child(f1, f2); +-- while at it, also check that statement-level triggers fire +create function some_tab_stmt_trig_func() returns trigger as +$$begin raise notice 'updating some_tab'; return NULL; end;$$ +language plpgsql; +create trigger some_tab_stmt_trig + before update on some_tab execute function some_tab_stmt_trig_func(); +explain (costs off) +update some_tab set f3 = 11 where f1 = 12 and f2 = 13; + QUERY PLAN +------------------------------------------------------------------------------------ + Update on some_tab + Update on some_tab_child some_tab_1 + -> Result + -> Index Scan using some_tab_child_f1_f2_idx on some_tab_child some_tab_1 + Index Cond: ((f1 = 12) AND (f2 = 13)) +(5 rows) + +update some_tab set f3 = 11 where f1 = 12 and f2 = 13; +NOTICE: updating some_tab +drop table some_tab cascade; +NOTICE: drop cascades to table some_tab_child +drop function some_tab_stmt_trig_func(); +-- Check inherited UPDATE with all children excluded +create table some_tab (a int, b int); +create table some_tab_child () inherits (some_tab); +insert into some_tab_child values(1,2); +explain (verbose, costs off) +update some_tab set a = a + 1 where false; + QUERY PLAN +-------------------------------------------------------- + Update on public.some_tab + -> Result + Output: (some_tab.a + 1), NULL::oid, NULL::tid + One-Time Filter: false +(4 rows) + +update some_tab set a = a + 1 where false; +explain (verbose, costs off) +update some_tab set a = a + 1 where false returning b, a; + QUERY PLAN +-------------------------------------------------------- + Update on public.some_tab + Output: some_tab.b, some_tab.a + -> Result + Output: (some_tab.a + 1), NULL::oid, NULL::tid + One-Time Filter: false +(5 rows) + +update some_tab set a = a + 1 where false returning b, a; + b | a +---+--- +(0 rows) + +table some_tab; + a | b +---+--- + 1 | 2 +(1 row) + +drop table some_tab cascade; +NOTICE: drop cascades to table some_tab_child +-- Check UPDATE with inherited target and an inherited source table +create temp table foo(f1 int, f2 int); +create temp table foo2(f3 int) inherits (foo); +create temp table bar(f1 int, f2 int); +create temp table bar2(f3 int) inherits (bar); +insert into foo values(1,1); +insert into foo values(3,3); +insert into foo2 values(2,2,2); +insert into foo2 values(3,3,3); +insert into bar values(1,1); +insert into bar values(2,2); +insert into bar values(3,3); +insert into bar values(4,4); +insert into bar2 values(1,1,1); +insert into bar2 values(2,2,2); +insert into bar2 values(3,3,3); +insert into bar2 values(4,4,4); +update bar set f2 = f2 + 100 where f1 in (select f1 from foo); +select tableoid::regclass::text as relname, bar.* from bar order by 1,2; + relname | f1 | f2 +---------+----+----- + bar | 1 | 101 + bar | 2 | 102 + bar | 3 | 103 + bar | 4 | 4 + bar2 | 1 | 101 + bar2 | 2 | 102 + bar2 | 3 | 103 + bar2 | 4 | 4 +(8 rows) + +-- Check UPDATE with inherited target and an appendrel subquery +update bar set f2 = f2 + 100 +from + ( select f1 from foo union all select f1+3 from foo ) ss +where bar.f1 = ss.f1; +select tableoid::regclass::text as relname, bar.* from bar order by 1,2; + relname | f1 | f2 +---------+----+----- + bar | 1 | 201 + bar | 2 | 202 + bar | 3 | 203 + bar | 4 | 104 + bar2 | 1 | 201 + bar2 | 2 | 202 + bar2 | 3 | 203 + bar2 | 4 | 104 +(8 rows) + +-- Check UPDATE with *partitioned* inherited target and an appendrel subquery +create table some_tab (a int); +insert into some_tab values (0); +create table some_tab_child () inherits (some_tab); +insert into some_tab_child values (1); +create table parted_tab (a int, b char) partition by list (a); +create table parted_tab_part1 partition of parted_tab for values in (1); +create table parted_tab_part2 partition of parted_tab for values in (2); +create table parted_tab_part3 partition of parted_tab for values in (3); +insert into parted_tab values (1, 'a'), (2, 'a'), (3, 'a'); +update parted_tab set b = 'b' +from + (select a from some_tab union all select a+1 from some_tab) ss (a) +where parted_tab.a = ss.a; +select tableoid::regclass::text as relname, parted_tab.* from parted_tab order by 1,2; + relname | a | b +------------------+---+--- + parted_tab_part1 | 1 | b + parted_tab_part2 | 2 | b + parted_tab_part3 | 3 | a +(3 rows) + +truncate parted_tab; +insert into parted_tab values (1, 'a'), (2, 'a'), (3, 'a'); +update parted_tab set b = 'b' +from + (select 0 from parted_tab union all select 1 from parted_tab) ss (a) +where parted_tab.a = ss.a; +select tableoid::regclass::text as relname, parted_tab.* from parted_tab order by 1,2; + relname | a | b +------------------+---+--- + parted_tab_part1 | 1 | b + parted_tab_part2 | 2 | a + parted_tab_part3 | 3 | a +(3 rows) + +-- modifies partition key, but no rows will actually be updated +explain update parted_tab set a = 2 where false; + QUERY PLAN +-------------------------------------------------------- + Update on parted_tab (cost=0.00..0.00 rows=0 width=0) + -> Result (cost=0.00..0.00 rows=0 width=10) + One-Time Filter: false +(3 rows) + +drop table parted_tab; +-- Check UPDATE with multi-level partitioned inherited target +create table mlparted_tab (a int, b char, c text) partition by list (a); +create table mlparted_tab_part1 partition of mlparted_tab for values in (1); +create table mlparted_tab_part2 partition of mlparted_tab for values in (2) partition by list (b); +create table mlparted_tab_part3 partition of mlparted_tab for values in (3); +create table mlparted_tab_part2a partition of mlparted_tab_part2 for values in ('a'); +create table mlparted_tab_part2b partition of mlparted_tab_part2 for values in ('b'); +insert into mlparted_tab values (1, 'a'), (2, 'a'), (2, 'b'), (3, 'a'); +update mlparted_tab mlp set c = 'xxx' +from + (select a from some_tab union all select a+1 from some_tab) ss (a) +where (mlp.a = ss.a and mlp.b = 'b') or mlp.a = 3; +select tableoid::regclass::text as relname, mlparted_tab.* from mlparted_tab order by 1,2; + relname | a | b | c +---------------------+---+---+----- + mlparted_tab_part1 | 1 | a | + mlparted_tab_part2a | 2 | a | + mlparted_tab_part2b | 2 | b | xxx + mlparted_tab_part3 | 3 | a | xxx +(4 rows) + +drop table mlparted_tab; +drop table some_tab cascade; +NOTICE: drop cascades to table some_tab_child +/* Test multiple inheritance of column defaults */ +CREATE TABLE firstparent (tomorrow date default now()::date + 1); +CREATE TABLE secondparent (tomorrow date default now() :: date + 1); +CREATE TABLE jointchild () INHERITS (firstparent, secondparent); -- ok +NOTICE: merging multiple inherited definitions of column "tomorrow" +CREATE TABLE thirdparent (tomorrow date default now()::date - 1); +CREATE TABLE otherchild () INHERITS (firstparent, thirdparent); -- not ok +NOTICE: merging multiple inherited definitions of column "tomorrow" +ERROR: column "tomorrow" inherits conflicting default values +HINT: To resolve the conflict, specify a default explicitly. +CREATE TABLE otherchild (tomorrow date default now()) + INHERITS (firstparent, thirdparent); -- ok, child resolves ambiguous default +NOTICE: merging multiple inherited definitions of column "tomorrow" +NOTICE: merging column "tomorrow" with inherited definition +DROP TABLE firstparent, secondparent, jointchild, thirdparent, otherchild; +-- Test changing the type of inherited columns +insert into d values('test','one','two','three'); +alter table a alter column aa type integer using bit_length(aa); +select * from d; + aa | bb | cc | dd +----+-----+-----+------- + 32 | one | two | three +(1 row) + +-- The above verified that we can change the type of a multiply-inherited +-- column; but we should reject that if any definition was inherited from +-- an unrelated parent. +create temp table parent1(f1 int, f2 int); +create temp table parent2(f1 int, f3 bigint); +create temp table childtab(f4 int) inherits(parent1, parent2); +NOTICE: merging multiple inherited definitions of column "f1" +alter table parent1 alter column f1 type bigint; -- fail, conflict w/parent2 +ERROR: cannot alter inherited column "f1" of relation "childtab" +alter table parent1 alter column f2 type bigint; -- ok +-- Test non-inheritable parent constraints +create table p1(ff1 int); +alter table p1 add constraint p1chk check (ff1 > 0) no inherit; +alter table p1 add constraint p2chk check (ff1 > 10); +-- connoinherit should be true for NO INHERIT constraint +select pc.relname, pgc.conname, pgc.contype, pgc.conislocal, pgc.coninhcount, pgc.connoinherit from pg_class as pc inner join pg_constraint as pgc on (pgc.conrelid = pc.oid) where pc.relname = 'p1' order by 1,2; + relname | conname | contype | conislocal | coninhcount | connoinherit +---------+---------+---------+------------+-------------+-------------- + p1 | p1chk | c | t | 0 | t + p1 | p2chk | c | t | 0 | f +(2 rows) + +-- Test that child does not inherit NO INHERIT constraints +create table c1 () inherits (p1); +\d p1 + Table "public.p1" + Column | Type | Collation | Nullable | Default +--------+---------+-----------+----------+--------- + ff1 | integer | | | +Check constraints: + "p1chk" CHECK (ff1 > 0) NO INHERIT + "p2chk" CHECK (ff1 > 10) +Number of child tables: 1 (Use \d+ to list them.) + +\d c1 + Table "public.c1" + Column | Type | Collation | Nullable | Default +--------+---------+-----------+----------+--------- + ff1 | integer | | | +Check constraints: + "p2chk" CHECK (ff1 > 10) +Inherits: p1 + +-- Test that child does not override inheritable constraints of the parent +create table c2 (constraint p2chk check (ff1 > 10) no inherit) inherits (p1); --fails +ERROR: constraint "p2chk" conflicts with inherited constraint on relation "c2" +drop table p1 cascade; +NOTICE: drop cascades to table c1 +-- Tests for casting between the rowtypes of parent and child +-- tables. See the pgsql-hackers thread beginning Dec. 4/04 +create table base (i integer); +create table derived () inherits (base); +create table more_derived (like derived, b int) inherits (derived); +NOTICE: merging column "i" with inherited definition +insert into derived (i) values (0); +select derived::base from derived; + derived +--------- + (0) +(1 row) + +select NULL::derived::base; + base +------ + +(1 row) + +-- remove redundant conversions. +explain (verbose on, costs off) select row(i, b)::more_derived::derived::base from more_derived; + QUERY PLAN +------------------------------------------- + Seq Scan on public.more_derived + Output: (ROW(i, b)::more_derived)::base +(2 rows) + +explain (verbose on, costs off) select (1, 2)::more_derived::derived::base; + QUERY PLAN +----------------------- + Result + Output: '(1)'::base +(2 rows) + +drop table more_derived; +drop table derived; +drop table base; +create table p1(ff1 int); +create table p2(f1 text); +create function p2text(p2) returns text as 'select $1.f1' language sql; +create table c1(f3 int) inherits(p1,p2); +insert into c1 values(123456789, 'hi', 42); +select p2text(c1.*) from c1; + p2text +-------- + hi +(1 row) + +drop function p2text(p2); +drop table c1; +drop table p2; +drop table p1; +CREATE TABLE ac (aa TEXT); +alter table ac add constraint ac_check check (aa is not null); +CREATE TABLE bc (bb TEXT) INHERITS (ac); +select pc.relname, pgc.conname, pgc.contype, pgc.conislocal, pgc.coninhcount, pg_get_expr(pgc.conbin, pc.oid) as consrc from pg_class as pc inner join pg_constraint as pgc on (pgc.conrelid = pc.oid) where pc.relname in ('ac', 'bc') order by 1,2; + relname | conname | contype | conislocal | coninhcount | consrc +---------+----------+---------+------------+-------------+------------------ + ac | ac_check | c | t | 0 | (aa IS NOT NULL) + bc | ac_check | c | f | 1 | (aa IS NOT NULL) +(2 rows) + +insert into ac (aa) values (NULL); +ERROR: new row for relation "ac" violates check constraint "ac_check" +DETAIL: Failing row contains (null). +insert into bc (aa) values (NULL); +ERROR: new row for relation "bc" violates check constraint "ac_check" +DETAIL: Failing row contains (null, null). +alter table bc drop constraint ac_check; -- fail, disallowed +ERROR: cannot drop inherited constraint "ac_check" of relation "bc" +alter table ac drop constraint ac_check; +select pc.relname, pgc.conname, pgc.contype, pgc.conislocal, pgc.coninhcount, pg_get_expr(pgc.conbin, pc.oid) as consrc from pg_class as pc inner join pg_constraint as pgc on (pgc.conrelid = pc.oid) where pc.relname in ('ac', 'bc') order by 1,2; + relname | conname | contype | conislocal | coninhcount | consrc +---------+---------+---------+------------+-------------+-------- +(0 rows) + +-- try the unnamed-constraint case +alter table ac add check (aa is not null); +select pc.relname, pgc.conname, pgc.contype, pgc.conislocal, pgc.coninhcount, pg_get_expr(pgc.conbin, pc.oid) as consrc from pg_class as pc inner join pg_constraint as pgc on (pgc.conrelid = pc.oid) where pc.relname in ('ac', 'bc') order by 1,2; + relname | conname | contype | conislocal | coninhcount | consrc +---------+-------------+---------+------------+-------------+------------------ + ac | ac_aa_check | c | t | 0 | (aa IS NOT NULL) + bc | ac_aa_check | c | f | 1 | (aa IS NOT NULL) +(2 rows) + +insert into ac (aa) values (NULL); +ERROR: new row for relation "ac" violates check constraint "ac_aa_check" +DETAIL: Failing row contains (null). +insert into bc (aa) values (NULL); +ERROR: new row for relation "bc" violates check constraint "ac_aa_check" +DETAIL: Failing row contains (null, null). +alter table bc drop constraint ac_aa_check; -- fail, disallowed +ERROR: cannot drop inherited constraint "ac_aa_check" of relation "bc" +alter table ac drop constraint ac_aa_check; +select pc.relname, pgc.conname, pgc.contype, pgc.conislocal, pgc.coninhcount, pg_get_expr(pgc.conbin, pc.oid) as consrc from pg_class as pc inner join pg_constraint as pgc on (pgc.conrelid = pc.oid) where pc.relname in ('ac', 'bc') order by 1,2; + relname | conname | contype | conislocal | coninhcount | consrc +---------+---------+---------+------------+-------------+-------- +(0 rows) + +alter table ac add constraint ac_check check (aa is not null); +alter table bc no inherit ac; +select pc.relname, pgc.conname, pgc.contype, pgc.conislocal, pgc.coninhcount, pg_get_expr(pgc.conbin, pc.oid) as consrc from pg_class as pc inner join pg_constraint as pgc on (pgc.conrelid = pc.oid) where pc.relname in ('ac', 'bc') order by 1,2; + relname | conname | contype | conislocal | coninhcount | consrc +---------+----------+---------+------------+-------------+------------------ + ac | ac_check | c | t | 0 | (aa IS NOT NULL) + bc | ac_check | c | t | 0 | (aa IS NOT NULL) +(2 rows) + +alter table bc drop constraint ac_check; +select pc.relname, pgc.conname, pgc.contype, pgc.conislocal, pgc.coninhcount, pg_get_expr(pgc.conbin, pc.oid) as consrc from pg_class as pc inner join pg_constraint as pgc on (pgc.conrelid = pc.oid) where pc.relname in ('ac', 'bc') order by 1,2; + relname | conname | contype | conislocal | coninhcount | consrc +---------+----------+---------+------------+-------------+------------------ + ac | ac_check | c | t | 0 | (aa IS NOT NULL) +(1 row) + +alter table ac drop constraint ac_check; +select pc.relname, pgc.conname, pgc.contype, pgc.conislocal, pgc.coninhcount, pg_get_expr(pgc.conbin, pc.oid) as consrc from pg_class as pc inner join pg_constraint as pgc on (pgc.conrelid = pc.oid) where pc.relname in ('ac', 'bc') order by 1,2; + relname | conname | contype | conislocal | coninhcount | consrc +---------+---------+---------+------------+-------------+-------- +(0 rows) + +drop table bc; +drop table ac; +create table ac (a int constraint check_a check (a <> 0)); +create table bc (a int constraint check_a check (a <> 0), b int constraint check_b check (b <> 0)) inherits (ac); +NOTICE: merging column "a" with inherited definition +NOTICE: merging constraint "check_a" with inherited definition +select pc.relname, pgc.conname, pgc.contype, pgc.conislocal, pgc.coninhcount, pg_get_expr(pgc.conbin, pc.oid) as consrc from pg_class as pc inner join pg_constraint as pgc on (pgc.conrelid = pc.oid) where pc.relname in ('ac', 'bc') order by 1,2; + relname | conname | contype | conislocal | coninhcount | consrc +---------+---------+---------+------------+-------------+---------- + ac | check_a | c | t | 0 | (a <> 0) + bc | check_a | c | t | 1 | (a <> 0) + bc | check_b | c | t | 0 | (b <> 0) +(3 rows) + +drop table bc; +drop table ac; +create table ac (a int constraint check_a check (a <> 0)); +create table bc (b int constraint check_b check (b <> 0)); +create table cc (c int constraint check_c check (c <> 0)) inherits (ac, bc); +select pc.relname, pgc.conname, pgc.contype, pgc.conislocal, pgc.coninhcount, pg_get_expr(pgc.conbin, pc.oid) as consrc from pg_class as pc inner join pg_constraint as pgc on (pgc.conrelid = pc.oid) where pc.relname in ('ac', 'bc', 'cc') order by 1,2; + relname | conname | contype | conislocal | coninhcount | consrc +---------+---------+---------+------------+-------------+---------- + ac | check_a | c | t | 0 | (a <> 0) + bc | check_b | c | t | 0 | (b <> 0) + cc | check_a | c | f | 1 | (a <> 0) + cc | check_b | c | f | 1 | (b <> 0) + cc | check_c | c | t | 0 | (c <> 0) +(5 rows) + +alter table cc no inherit bc; +select pc.relname, pgc.conname, pgc.contype, pgc.conislocal, pgc.coninhcount, pg_get_expr(pgc.conbin, pc.oid) as consrc from pg_class as pc inner join pg_constraint as pgc on (pgc.conrelid = pc.oid) where pc.relname in ('ac', 'bc', 'cc') order by 1,2; + relname | conname | contype | conislocal | coninhcount | consrc +---------+---------+---------+------------+-------------+---------- + ac | check_a | c | t | 0 | (a <> 0) + bc | check_b | c | t | 0 | (b <> 0) + cc | check_a | c | f | 1 | (a <> 0) + cc | check_b | c | t | 0 | (b <> 0) + cc | check_c | c | t | 0 | (c <> 0) +(5 rows) + +drop table cc; +drop table bc; +drop table ac; +create table p1(f1 int); +create table p2(f2 int); +create table c1(f3 int) inherits(p1,p2); +insert into c1 values(1,-1,2); +alter table p2 add constraint cc check (f2>0); -- fail +ERROR: check constraint "cc" of relation "c1" is violated by some row +alter table p2 add check (f2>0); -- check it without a name, too +ERROR: check constraint "p2_f2_check" of relation "c1" is violated by some row +delete from c1; +insert into c1 values(1,1,2); +alter table p2 add check (f2>0); +insert into c1 values(1,-1,2); -- fail +ERROR: new row for relation "c1" violates check constraint "p2_f2_check" +DETAIL: Failing row contains (1, -1, 2). +create table c2(f3 int) inherits(p1,p2); +\d c2 + Table "public.c2" + Column | Type | Collation | Nullable | Default +--------+---------+-----------+----------+--------- + f1 | integer | | | + f2 | integer | | | + f3 | integer | | | +Check constraints: + "p2_f2_check" CHECK (f2 > 0) +Inherits: p1, + p2 + +create table c3 (f4 int) inherits(c1,c2); +NOTICE: merging multiple inherited definitions of column "f1" +NOTICE: merging multiple inherited definitions of column "f2" +NOTICE: merging multiple inherited definitions of column "f3" +\d c3 + Table "public.c3" + Column | Type | Collation | Nullable | Default +--------+---------+-----------+----------+--------- + f1 | integer | | | + f2 | integer | | | + f3 | integer | | | + f4 | integer | | | +Check constraints: + "p2_f2_check" CHECK (f2 > 0) +Inherits: c1, + c2 + +drop table p1 cascade; +NOTICE: drop cascades to 3 other objects +DETAIL: drop cascades to table c1 +drop cascades to table c2 +drop cascades to table c3 +drop table p2 cascade; +create table pp1 (f1 int); +create table cc1 (f2 text, f3 int) inherits (pp1); +alter table pp1 add column a1 int check (a1 > 0); +\d cc1 + Table "public.cc1" + Column | Type | Collation | Nullable | Default +--------+---------+-----------+----------+--------- + f1 | integer | | | + f2 | text | | | + f3 | integer | | | + a1 | integer | | | +Check constraints: + "pp1_a1_check" CHECK (a1 > 0) +Inherits: pp1 + +create table cc2(f4 float) inherits(pp1,cc1); +NOTICE: merging multiple inherited definitions of column "f1" +NOTICE: merging multiple inherited definitions of column "a1" +\d cc2 + Table "public.cc2" + Column | Type | Collation | Nullable | Default +--------+------------------+-----------+----------+--------- + f1 | integer | | | + a1 | integer | | | + f2 | text | | | + f3 | integer | | | + f4 | double precision | | | +Check constraints: + "pp1_a1_check" CHECK (a1 > 0) +Inherits: pp1, + cc1 + +alter table pp1 add column a2 int check (a2 > 0); +NOTICE: merging definition of column "a2" for child "cc2" +NOTICE: merging constraint "pp1_a2_check" with inherited definition +\d cc2 + Table "public.cc2" + Column | Type | Collation | Nullable | Default +--------+------------------+-----------+----------+--------- + f1 | integer | | | + a1 | integer | | | + f2 | text | | | + f3 | integer | | | + f4 | double precision | | | + a2 | integer | | | +Check constraints: + "pp1_a1_check" CHECK (a1 > 0) + "pp1_a2_check" CHECK (a2 > 0) +Inherits: pp1, + cc1 + +drop table pp1 cascade; +NOTICE: drop cascades to 2 other objects +DETAIL: drop cascades to table cc1 +drop cascades to table cc2 +-- Test for renaming in simple multiple inheritance +CREATE TABLE inht1 (a int, b int); +CREATE TABLE inhs1 (b int, c int); +CREATE TABLE inhts (d int) INHERITS (inht1, inhs1); +NOTICE: merging multiple inherited definitions of column "b" +ALTER TABLE inht1 RENAME a TO aa; +ALTER TABLE inht1 RENAME b TO bb; -- to be failed +ERROR: cannot rename inherited column "b" +ALTER TABLE inhts RENAME aa TO aaa; -- to be failed +ERROR: cannot rename inherited column "aa" +ALTER TABLE inhts RENAME d TO dd; +\d+ inhts + Table "public.inhts" + Column | Type | Collation | Nullable | Default | Storage | Stats target | Description +--------+---------+-----------+----------+---------+---------+--------------+------------- + aa | integer | | | | plain | | + b | integer | | | | plain | | + c | integer | | | | plain | | + dd | integer | | | | plain | | +Inherits: inht1, + inhs1 + +DROP TABLE inhts; +-- Test for adding a column to a parent table with complex inheritance +CREATE TABLE inhta (); +CREATE TABLE inhtb () INHERITS (inhta); +CREATE TABLE inhtc () INHERITS (inhtb); +CREATE TABLE inhtd () INHERITS (inhta, inhtb, inhtc); +ALTER TABLE inhta ADD COLUMN i int; +NOTICE: merging definition of column "i" for child "inhtd" +NOTICE: merging definition of column "i" for child "inhtd" +\d+ inhta + Table "public.inhta" + Column | Type | Collation | Nullable | Default | Storage | Stats target | Description +--------+---------+-----------+----------+---------+---------+--------------+------------- + i | integer | | | | plain | | +Child tables: inhtb, + inhtd + +DROP TABLE inhta, inhtb, inhtc, inhtd; +-- Test for renaming in diamond inheritance +CREATE TABLE inht2 (x int) INHERITS (inht1); +CREATE TABLE inht3 (y int) INHERITS (inht1); +CREATE TABLE inht4 (z int) INHERITS (inht2, inht3); +NOTICE: merging multiple inherited definitions of column "aa" +NOTICE: merging multiple inherited definitions of column "b" +ALTER TABLE inht1 RENAME aa TO aaa; +\d+ inht4 + Table "public.inht4" + Column | Type | Collation | Nullable | Default | Storage | Stats target | Description +--------+---------+-----------+----------+---------+---------+--------------+------------- + aaa | integer | | | | plain | | + b | integer | | | | plain | | + x | integer | | | | plain | | + y | integer | | | | plain | | + z | integer | | | | plain | | +Inherits: inht2, + inht3 + +CREATE TABLE inhts (d int) INHERITS (inht2, inhs1); +NOTICE: merging multiple inherited definitions of column "b" +ALTER TABLE inht1 RENAME aaa TO aaaa; +ALTER TABLE inht1 RENAME b TO bb; -- to be failed +ERROR: cannot rename inherited column "b" +\d+ inhts + Table "public.inhts" + Column | Type | Collation | Nullable | Default | Storage | Stats target | Description +--------+---------+-----------+----------+---------+---------+--------------+------------- + aaaa | integer | | | | plain | | + b | integer | | | | plain | | + x | integer | | | | plain | | + c | integer | | | | plain | | + d | integer | | | | plain | | +Inherits: inht2, + inhs1 + +WITH RECURSIVE r AS ( + SELECT 'inht1'::regclass AS inhrelid +UNION ALL + SELECT c.inhrelid FROM pg_inherits c, r WHERE r.inhrelid = c.inhparent +) +SELECT a.attrelid::regclass, a.attname, a.attinhcount, e.expected + FROM (SELECT inhrelid, count(*) AS expected FROM pg_inherits + WHERE inhparent IN (SELECT inhrelid FROM r) GROUP BY inhrelid) e + JOIN pg_attribute a ON e.inhrelid = a.attrelid WHERE NOT attislocal + ORDER BY a.attrelid::regclass::name, a.attnum; + attrelid | attname | attinhcount | expected +----------+---------+-------------+---------- + inht2 | aaaa | 1 | 1 + inht2 | b | 1 | 1 + inht3 | aaaa | 1 | 1 + inht3 | b | 1 | 1 + inht4 | aaaa | 2 | 2 + inht4 | b | 2 | 2 + inht4 | x | 1 | 2 + inht4 | y | 1 | 2 + inhts | aaaa | 1 | 1 + inhts | b | 2 | 1 + inhts | x | 1 | 1 + inhts | c | 1 | 1 +(12 rows) + +DROP TABLE inht1, inhs1 CASCADE; +NOTICE: drop cascades to 4 other objects +DETAIL: drop cascades to table inht2 +drop cascades to table inhts +drop cascades to table inht3 +drop cascades to table inht4 +-- Test non-inheritable indices [UNIQUE, EXCLUDE] constraints +CREATE TABLE test_constraints (id int, val1 varchar, val2 int, UNIQUE(val1, val2)); +CREATE TABLE test_constraints_inh () INHERITS (test_constraints); +\d+ test_constraints + Table "public.test_constraints" + Column | Type | Collation | Nullable | Default | Storage | Stats target | Description +--------+-------------------+-----------+----------+---------+----------+--------------+------------- + id | integer | | | | plain | | + val1 | character varying | | | | extended | | + val2 | integer | | | | plain | | +Indexes: + "test_constraints_val1_val2_key" UNIQUE CONSTRAINT, btree (val1, val2) +Child tables: test_constraints_inh + +ALTER TABLE ONLY test_constraints DROP CONSTRAINT test_constraints_val1_val2_key; +\d+ test_constraints + Table "public.test_constraints" + Column | Type | Collation | Nullable | Default | Storage | Stats target | Description +--------+-------------------+-----------+----------+---------+----------+--------------+------------- + id | integer | | | | plain | | + val1 | character varying | | | | extended | | + val2 | integer | | | | plain | | +Child tables: test_constraints_inh + +\d+ test_constraints_inh + Table "public.test_constraints_inh" + Column | Type | Collation | Nullable | Default | Storage | Stats target | Description +--------+-------------------+-----------+----------+---------+----------+--------------+------------- + id | integer | | | | plain | | + val1 | character varying | | | | extended | | + val2 | integer | | | | plain | | +Inherits: test_constraints + +DROP TABLE test_constraints_inh; +DROP TABLE test_constraints; +CREATE TABLE test_ex_constraints ( + c circle, + EXCLUDE USING gist (c WITH &&) +); +CREATE TABLE test_ex_constraints_inh () INHERITS (test_ex_constraints); +\d+ test_ex_constraints + Table "public.test_ex_constraints" + Column | Type | Collation | Nullable | Default | Storage | Stats target | Description +--------+--------+-----------+----------+---------+---------+--------------+------------- + c | circle | | | | plain | | +Indexes: + "test_ex_constraints_c_excl" EXCLUDE USING gist (c WITH &&) +Child tables: test_ex_constraints_inh + +ALTER TABLE test_ex_constraints DROP CONSTRAINT test_ex_constraints_c_excl; +\d+ test_ex_constraints + Table "public.test_ex_constraints" + Column | Type | Collation | Nullable | Default | Storage | Stats target | Description +--------+--------+-----------+----------+---------+---------+--------------+------------- + c | circle | | | | plain | | +Child tables: test_ex_constraints_inh + +\d+ test_ex_constraints_inh + Table "public.test_ex_constraints_inh" + Column | Type | Collation | Nullable | Default | Storage | Stats target | Description +--------+--------+-----------+----------+---------+---------+--------------+------------- + c | circle | | | | plain | | +Inherits: test_ex_constraints + +DROP TABLE test_ex_constraints_inh; +DROP TABLE test_ex_constraints; +-- Test non-inheritable foreign key constraints +CREATE TABLE test_primary_constraints(id int PRIMARY KEY); +CREATE TABLE test_foreign_constraints(id1 int REFERENCES test_primary_constraints(id)); +CREATE TABLE test_foreign_constraints_inh () INHERITS (test_foreign_constraints); +\d+ test_primary_constraints + Table "public.test_primary_constraints" + Column | Type | Collation | Nullable | Default | Storage | Stats target | Description +--------+---------+-----------+----------+---------+---------+--------------+------------- + id | integer | | not null | | plain | | +Indexes: + "test_primary_constraints_pkey" PRIMARY KEY, btree (id) +Referenced by: + TABLE "test_foreign_constraints" CONSTRAINT "test_foreign_constraints_id1_fkey" FOREIGN KEY (id1) REFERENCES test_primary_constraints(id) + +\d+ test_foreign_constraints + Table "public.test_foreign_constraints" + Column | Type | Collation | Nullable | Default | Storage | Stats target | Description +--------+---------+-----------+----------+---------+---------+--------------+------------- + id1 | integer | | | | plain | | +Foreign-key constraints: + "test_foreign_constraints_id1_fkey" FOREIGN KEY (id1) REFERENCES test_primary_constraints(id) +Child tables: test_foreign_constraints_inh + +ALTER TABLE test_foreign_constraints DROP CONSTRAINT test_foreign_constraints_id1_fkey; +\d+ test_foreign_constraints + Table "public.test_foreign_constraints" + Column | Type | Collation | Nullable | Default | Storage | Stats target | Description +--------+---------+-----------+----------+---------+---------+--------------+------------- + id1 | integer | | | | plain | | +Child tables: test_foreign_constraints_inh + +\d+ test_foreign_constraints_inh + Table "public.test_foreign_constraints_inh" + Column | Type | Collation | Nullable | Default | Storage | Stats target | Description +--------+---------+-----------+----------+---------+---------+--------------+------------- + id1 | integer | | | | plain | | +Inherits: test_foreign_constraints + +DROP TABLE test_foreign_constraints_inh; +DROP TABLE test_foreign_constraints; +DROP TABLE test_primary_constraints; +-- Test foreign key behavior +create table inh_fk_1 (a int primary key); +insert into inh_fk_1 values (1), (2), (3); +create table inh_fk_2 (x int primary key, y int references inh_fk_1 on delete cascade); +insert into inh_fk_2 values (11, 1), (22, 2), (33, 3); +create table inh_fk_2_child () inherits (inh_fk_2); +insert into inh_fk_2_child values (111, 1), (222, 2); +delete from inh_fk_1 where a = 1; +select * from inh_fk_1 order by 1; + a +--- + 2 + 3 +(2 rows) + +select * from inh_fk_2 order by 1, 2; + x | y +-----+--- + 22 | 2 + 33 | 3 + 111 | 1 + 222 | 2 +(4 rows) + +drop table inh_fk_1, inh_fk_2, inh_fk_2_child; +-- Test that parent and child CHECK constraints can be created in either order +create table p1(f1 int); +create table p1_c1() inherits(p1); +alter table p1 add constraint inh_check_constraint1 check (f1 > 0); +alter table p1_c1 add constraint inh_check_constraint1 check (f1 > 0); +NOTICE: merging constraint "inh_check_constraint1" with inherited definition +alter table p1_c1 add constraint inh_check_constraint2 check (f1 < 10); +alter table p1 add constraint inh_check_constraint2 check (f1 < 10); +NOTICE: merging constraint "inh_check_constraint2" with inherited definition +select conrelid::regclass::text as relname, conname, conislocal, coninhcount +from pg_constraint where conname like 'inh\_check\_constraint%' +order by 1, 2; + relname | conname | conislocal | coninhcount +---------+-----------------------+------------+------------- + p1 | inh_check_constraint1 | t | 0 + p1 | inh_check_constraint2 | t | 0 + p1_c1 | inh_check_constraint1 | t | 1 + p1_c1 | inh_check_constraint2 | t | 1 +(4 rows) + +drop table p1 cascade; +NOTICE: drop cascades to table p1_c1 +-- Test that a valid child can have not-valid parent, but not vice versa +create table invalid_check_con(f1 int); +create table invalid_check_con_child() inherits(invalid_check_con); +alter table invalid_check_con_child add constraint inh_check_constraint check(f1 > 0) not valid; +alter table invalid_check_con add constraint inh_check_constraint check(f1 > 0); -- fail +ERROR: constraint "inh_check_constraint" conflicts with NOT VALID constraint on relation "invalid_check_con_child" +alter table invalid_check_con_child drop constraint inh_check_constraint; +insert into invalid_check_con values(0); +alter table invalid_check_con_child add constraint inh_check_constraint check(f1 > 0); +alter table invalid_check_con add constraint inh_check_constraint check(f1 > 0) not valid; +NOTICE: merging constraint "inh_check_constraint" with inherited definition +insert into invalid_check_con values(0); -- fail +ERROR: new row for relation "invalid_check_con" violates check constraint "inh_check_constraint" +DETAIL: Failing row contains (0). +insert into invalid_check_con_child values(0); -- fail +ERROR: new row for relation "invalid_check_con_child" violates check constraint "inh_check_constraint" +DETAIL: Failing row contains (0). +select conrelid::regclass::text as relname, conname, + convalidated, conislocal, coninhcount, connoinherit +from pg_constraint where conname like 'inh\_check\_constraint%' +order by 1, 2; + relname | conname | convalidated | conislocal | coninhcount | connoinherit +-------------------------+----------------------+--------------+------------+-------------+-------------- + invalid_check_con | inh_check_constraint | f | t | 0 | f + invalid_check_con_child | inh_check_constraint | t | t | 1 | f +(2 rows) + +-- We don't drop the invalid_check_con* tables, to test dump/reload with +-- +-- Test parameterized append plans for inheritance trees +-- +create temp table patest0 (id, x) as + select x, x from generate_series(0,1000) x; +create temp table patest1() inherits (patest0); +insert into patest1 + select x, x from generate_series(0,1000) x; +create temp table patest2() inherits (patest0); +insert into patest2 + select x, x from generate_series(0,1000) x; +create index patest0i on patest0(id); +create index patest1i on patest1(id); +create index patest2i on patest2(id); +analyze patest0; +analyze patest1; +analyze patest2; +explain (costs off) +select * from patest0 join (select f1 from int4_tbl limit 1) ss on id = f1; + QUERY PLAN +------------------------------------------------------------ + Nested Loop + -> Limit + -> Seq Scan on int4_tbl + -> Append + -> Index Scan using patest0i on patest0 patest0_1 + Index Cond: (id = int4_tbl.f1) + -> Index Scan using patest1i on patest1 patest0_2 + Index Cond: (id = int4_tbl.f1) + -> Index Scan using patest2i on patest2 patest0_3 + Index Cond: (id = int4_tbl.f1) +(10 rows) + +select * from patest0 join (select f1 from int4_tbl limit 1) ss on id = f1; + id | x | f1 +----+---+---- + 0 | 0 | 0 + 0 | 0 | 0 + 0 | 0 | 0 +(3 rows) + +drop index patest2i; +explain (costs off) +select * from patest0 join (select f1 from int4_tbl limit 1) ss on id = f1; + QUERY PLAN +------------------------------------------------------------ + Nested Loop + -> Limit + -> Seq Scan on int4_tbl + -> Append + -> Index Scan using patest0i on patest0 patest0_1 + Index Cond: (id = int4_tbl.f1) + -> Index Scan using patest1i on patest1 patest0_2 + Index Cond: (id = int4_tbl.f1) + -> Seq Scan on patest2 patest0_3 + Filter: (int4_tbl.f1 = id) +(10 rows) + +select * from patest0 join (select f1 from int4_tbl limit 1) ss on id = f1; + id | x | f1 +----+---+---- + 0 | 0 | 0 + 0 | 0 | 0 + 0 | 0 | 0 +(3 rows) + +drop table patest0 cascade; +NOTICE: drop cascades to 2 other objects +DETAIL: drop cascades to table patest1 +drop cascades to table patest2 +-- +-- Test merge-append plans for inheritance trees +-- +create table matest0 (id serial primary key, name text); +create table matest1 (id integer primary key) inherits (matest0); +NOTICE: merging column "id" with inherited definition +create table matest2 (id integer primary key) inherits (matest0); +NOTICE: merging column "id" with inherited definition +create table matest3 (id integer primary key) inherits (matest0); +NOTICE: merging column "id" with inherited definition +create index matest0i on matest0 ((1-id)); +create index matest1i on matest1 ((1-id)); +-- create index matest2i on matest2 ((1-id)); -- intentionally missing +create index matest3i on matest3 ((1-id)); +insert into matest1 (name) values ('Test 1'); +insert into matest1 (name) values ('Test 2'); +insert into matest2 (name) values ('Test 3'); +insert into matest2 (name) values ('Test 4'); +insert into matest3 (name) values ('Test 5'); +insert into matest3 (name) values ('Test 6'); +set enable_indexscan = off; -- force use of seqscan/sort, so no merge +explain (verbose, costs off) select * from matest0 order by 1-id; + QUERY PLAN +------------------------------------------------------------ + Sort + Output: matest0.id, matest0.name, ((1 - matest0.id)) + Sort Key: ((1 - matest0.id)) + -> Result + Output: matest0.id, matest0.name, (1 - matest0.id) + -> Append + -> Seq Scan on public.matest0 matest0_1 + Output: matest0_1.id, matest0_1.name + -> Seq Scan on public.matest1 matest0_2 + Output: matest0_2.id, matest0_2.name + -> Seq Scan on public.matest2 matest0_3 + Output: matest0_3.id, matest0_3.name + -> Seq Scan on public.matest3 matest0_4 + Output: matest0_4.id, matest0_4.name +(14 rows) + +select * from matest0 order by 1-id; + id | name +----+-------- + 6 | Test 6 + 5 | Test 5 + 4 | Test 4 + 3 | Test 3 + 2 | Test 2 + 1 | Test 1 +(6 rows) + +explain (verbose, costs off) select min(1-id) from matest0; + QUERY PLAN +-------------------------------------------------- + Aggregate + Output: min((1 - matest0.id)) + -> Append + -> Seq Scan on public.matest0 matest0_1 + Output: matest0_1.id + -> Seq Scan on public.matest1 matest0_2 + Output: matest0_2.id + -> Seq Scan on public.matest2 matest0_3 + Output: matest0_3.id + -> Seq Scan on public.matest3 matest0_4 + Output: matest0_4.id +(11 rows) + +select min(1-id) from matest0; + min +----- + -5 +(1 row) + +reset enable_indexscan; +set enable_seqscan = off; -- plan with fewest seqscans should be merge +set enable_parallel_append = off; -- Don't let parallel-append interfere +explain (verbose, costs off) select * from matest0 order by 1-id; + QUERY PLAN +------------------------------------------------------------------------ + Merge Append + Sort Key: ((1 - matest0.id)) + -> Index Scan using matest0i on public.matest0 matest0_1 + Output: matest0_1.id, matest0_1.name, (1 - matest0_1.id) + -> Index Scan using matest1i on public.matest1 matest0_2 + Output: matest0_2.id, matest0_2.name, (1 - matest0_2.id) + -> Sort + Output: matest0_3.id, matest0_3.name, ((1 - matest0_3.id)) + Sort Key: ((1 - matest0_3.id)) + -> Seq Scan on public.matest2 matest0_3 + Output: matest0_3.id, matest0_3.name, (1 - matest0_3.id) + -> Index Scan using matest3i on public.matest3 matest0_4 + Output: matest0_4.id, matest0_4.name, (1 - matest0_4.id) +(13 rows) + +select * from matest0 order by 1-id; + id | name +----+-------- + 6 | Test 6 + 5 | Test 5 + 4 | Test 4 + 3 | Test 3 + 2 | Test 2 + 1 | Test 1 +(6 rows) + +explain (verbose, costs off) select min(1-id) from matest0; + QUERY PLAN +--------------------------------------------------------------------------------- + Result + Output: $0 + InitPlan 1 (returns $0) + -> Limit + Output: ((1 - matest0.id)) + -> Result + Output: ((1 - matest0.id)) + -> Merge Append + Sort Key: ((1 - matest0.id)) + -> Index Scan using matest0i on public.matest0 matest0_1 + Output: matest0_1.id, (1 - matest0_1.id) + Index Cond: ((1 - matest0_1.id) IS NOT NULL) + -> Index Scan using matest1i on public.matest1 matest0_2 + Output: matest0_2.id, (1 - matest0_2.id) + Index Cond: ((1 - matest0_2.id) IS NOT NULL) + -> Sort + Output: matest0_3.id, ((1 - matest0_3.id)) + Sort Key: ((1 - matest0_3.id)) + -> Bitmap Heap Scan on public.matest2 matest0_3 + Output: matest0_3.id, (1 - matest0_3.id) + Filter: ((1 - matest0_3.id) IS NOT NULL) + -> Bitmap Index Scan on matest2_pkey + -> Index Scan using matest3i on public.matest3 matest0_4 + Output: matest0_4.id, (1 - matest0_4.id) + Index Cond: ((1 - matest0_4.id) IS NOT NULL) +(25 rows) + +select min(1-id) from matest0; + min +----- + -5 +(1 row) + +reset enable_seqscan; +reset enable_parallel_append; +drop table matest0 cascade; +NOTICE: drop cascades to 3 other objects +DETAIL: drop cascades to table matest1 +drop cascades to table matest2 +drop cascades to table matest3 +-- +-- Check that use of an index with an extraneous column doesn't produce +-- a plan with extraneous sorting +-- +create table matest0 (a int, b int, c int, d int); +create table matest1 () inherits(matest0); +create index matest0i on matest0 (b, c); +create index matest1i on matest1 (b, c); +set enable_nestloop = off; -- we want a plan with two MergeAppends +explain (costs off) +select t1.* from matest0 t1, matest0 t2 +where t1.b = t2.b and t2.c = t2.d +order by t1.b limit 10; + QUERY PLAN +------------------------------------------------------------------- + Limit + -> Merge Join + Merge Cond: (t1.b = t2.b) + -> Merge Append + Sort Key: t1.b + -> Index Scan using matest0i on matest0 t1_1 + -> Index Scan using matest1i on matest1 t1_2 + -> Materialize + -> Merge Append + Sort Key: t2.b + -> Index Scan using matest0i on matest0 t2_1 + Filter: (c = d) + -> Index Scan using matest1i on matest1 t2_2 + Filter: (c = d) +(14 rows) + +reset enable_nestloop; +drop table matest0 cascade; +NOTICE: drop cascades to table matest1 +-- +-- Test merge-append for UNION ALL append relations +-- +set enable_seqscan = off; +set enable_indexscan = on; +set enable_bitmapscan = off; +-- Check handling of duplicated, constant, or volatile targetlist items +explain (costs off) +SELECT thousand, tenthous FROM tenk1 +UNION ALL +SELECT thousand, thousand FROM tenk1 +ORDER BY thousand, tenthous; + QUERY PLAN +------------------------------------------------------------------------- + Merge Append + Sort Key: tenk1.thousand, tenk1.tenthous + -> Index Only Scan using tenk1_thous_tenthous on tenk1 + -> Sort + Sort Key: tenk1_1.thousand, tenk1_1.thousand + -> Index Only Scan using tenk1_thous_tenthous on tenk1 tenk1_1 +(6 rows) + +explain (costs off) +SELECT thousand, tenthous, thousand+tenthous AS x FROM tenk1 +UNION ALL +SELECT 42, 42, hundred FROM tenk1 +ORDER BY thousand, tenthous; + QUERY PLAN +------------------------------------------------------------------ + Merge Append + Sort Key: tenk1.thousand, tenk1.tenthous + -> Index Only Scan using tenk1_thous_tenthous on tenk1 + -> Sort + Sort Key: 42, 42 + -> Index Only Scan using tenk1_hundred on tenk1 tenk1_1 +(6 rows) + +explain (costs off) +SELECT thousand, tenthous FROM tenk1 +UNION ALL +SELECT thousand, random()::integer FROM tenk1 +ORDER BY thousand, tenthous; + QUERY PLAN +------------------------------------------------------------------------- + Merge Append + Sort Key: tenk1.thousand, tenk1.tenthous + -> Index Only Scan using tenk1_thous_tenthous on tenk1 + -> Sort + Sort Key: tenk1_1.thousand, ((random())::integer) + -> Index Only Scan using tenk1_thous_tenthous on tenk1 tenk1_1 +(6 rows) + +-- Check min/max aggregate optimization +explain (costs off) +SELECT min(x) FROM + (SELECT unique1 AS x FROM tenk1 a + UNION ALL + SELECT unique2 AS x FROM tenk1 b) s; + QUERY PLAN +-------------------------------------------------------------------- + Result + InitPlan 1 (returns $0) + -> Limit + -> Merge Append + Sort Key: a.unique1 + -> Index Only Scan using tenk1_unique1 on tenk1 a + Index Cond: (unique1 IS NOT NULL) + -> Index Only Scan using tenk1_unique2 on tenk1 b + Index Cond: (unique2 IS NOT NULL) +(9 rows) + +explain (costs off) +SELECT min(y) FROM + (SELECT unique1 AS x, unique1 AS y FROM tenk1 a + UNION ALL + SELECT unique2 AS x, unique2 AS y FROM tenk1 b) s; + QUERY PLAN +-------------------------------------------------------------------- + Result + InitPlan 1 (returns $0) + -> Limit + -> Merge Append + Sort Key: a.unique1 + -> Index Only Scan using tenk1_unique1 on tenk1 a + Index Cond: (unique1 IS NOT NULL) + -> Index Only Scan using tenk1_unique2 on tenk1 b + Index Cond: (unique2 IS NOT NULL) +(9 rows) + +-- XXX planner doesn't recognize that index on unique2 is sufficiently sorted +explain (costs off) +SELECT x, y FROM + (SELECT thousand AS x, tenthous AS y FROM tenk1 a + UNION ALL + SELECT unique2 AS x, unique2 AS y FROM tenk1 b) s +ORDER BY x, y; + QUERY PLAN +------------------------------------------------------------- + Merge Append + Sort Key: a.thousand, a.tenthous + -> Index Only Scan using tenk1_thous_tenthous on tenk1 a + -> Sort + Sort Key: b.unique2, b.unique2 + -> Index Only Scan using tenk1_unique2 on tenk1 b +(6 rows) + +-- exercise rescan code path via a repeatedly-evaluated subquery +explain (costs off) +SELECT + ARRAY(SELECT f.i FROM ( + (SELECT d + g.i FROM generate_series(4, 30, 3) d ORDER BY 1) + UNION ALL + (SELECT d + g.i FROM generate_series(0, 30, 5) d ORDER BY 1) + ) f(i) + ORDER BY f.i LIMIT 10) +FROM generate_series(1, 3) g(i); + QUERY PLAN +---------------------------------------------------------------- + Function Scan on generate_series g + SubPlan 1 + -> Limit + -> Merge Append + Sort Key: ((d.d + g.i)) + -> Sort + Sort Key: ((d.d + g.i)) + -> Function Scan on generate_series d + -> Sort + Sort Key: ((d_1.d + g.i)) + -> Function Scan on generate_series d_1 +(11 rows) + +SELECT + ARRAY(SELECT f.i FROM ( + (SELECT d + g.i FROM generate_series(4, 30, 3) d ORDER BY 1) + UNION ALL + (SELECT d + g.i FROM generate_series(0, 30, 5) d ORDER BY 1) + ) f(i) + ORDER BY f.i LIMIT 10) +FROM generate_series(1, 3) g(i); + array +------------------------------ + {1,5,6,8,11,11,14,16,17,20} + {2,6,7,9,12,12,15,17,18,21} + {3,7,8,10,13,13,16,18,19,22} +(3 rows) + +reset enable_seqscan; +reset enable_indexscan; +reset enable_bitmapscan; +-- +-- Check handling of MULTIEXPR SubPlans in inherited updates +-- +create table inhpar(f1 int, f2 name); +create table inhcld(f2 name, f1 int); +alter table inhcld inherit inhpar; +insert into inhpar select x, x::text from generate_series(1,5) x; +insert into inhcld select x::text, x from generate_series(6,10) x; +explain (verbose, costs off) +update inhpar i set (f1, f2) = (select i.f1, i.f2 || '-' from int4_tbl limit 1); + QUERY PLAN +------------------------------------------------------------------------- + Update on public.inhpar i + Update on public.inhpar i_1 + Update on public.inhcld i_2 + -> Result + Output: $2, $3, (SubPlan 1 (returns $2,$3)), i.tableoid, i.ctid + -> Append + -> Seq Scan on public.inhpar i_1 + Output: i_1.f1, i_1.f2, i_1.tableoid, i_1.ctid + -> Seq Scan on public.inhcld i_2 + Output: i_2.f1, i_2.f2, i_2.tableoid, i_2.ctid + SubPlan 1 (returns $2,$3) + -> Limit + Output: (i.f1), (((i.f2)::text || '-'::text)) + -> Seq Scan on public.int4_tbl + Output: i.f1, ((i.f2)::text || '-'::text) +(15 rows) + +update inhpar i set (f1, f2) = (select i.f1, i.f2 || '-' from int4_tbl limit 1); +select * from inhpar; + f1 | f2 +----+----- + 1 | 1- + 2 | 2- + 3 | 3- + 4 | 4- + 5 | 5- + 6 | 6- + 7 | 7- + 8 | 8- + 9 | 9- + 10 | 10- +(10 rows) + +drop table inhpar cascade; +NOTICE: drop cascades to table inhcld +-- +-- And the same for partitioned cases +-- +create table inhpar(f1 int primary key, f2 name) partition by range (f1); +create table inhcld1(f2 name, f1 int primary key); +create table inhcld2(f1 int primary key, f2 name); +alter table inhpar attach partition inhcld1 for values from (1) to (5); +alter table inhpar attach partition inhcld2 for values from (5) to (100); +insert into inhpar select x, x::text from generate_series(1,10) x; +explain (verbose, costs off) +update inhpar i set (f1, f2) = (select i.f1, i.f2 || '-' from int4_tbl limit 1); + QUERY PLAN +----------------------------------------------------------------------------------- + Update on public.inhpar i + Update on public.inhcld1 i_1 + Update on public.inhcld2 i_2 + -> Append + -> Seq Scan on public.inhcld1 i_1 + Output: $2, $3, (SubPlan 1 (returns $2,$3)), i_1.tableoid, i_1.ctid + SubPlan 1 (returns $2,$3) + -> Limit + Output: (i_1.f1), (((i_1.f2)::text || '-'::text)) + -> Seq Scan on public.int4_tbl + Output: i_1.f1, ((i_1.f2)::text || '-'::text) + -> Seq Scan on public.inhcld2 i_2 + Output: $2, $3, (SubPlan 1 (returns $2,$3)), i_2.tableoid, i_2.ctid +(13 rows) + +update inhpar i set (f1, f2) = (select i.f1, i.f2 || '-' from int4_tbl limit 1); +select * from inhpar; + f1 | f2 +----+----- + 1 | 1- + 2 | 2- + 3 | 3- + 4 | 4- + 5 | 5- + 6 | 6- + 7 | 7- + 8 | 8- + 9 | 9- + 10 | 10- +(10 rows) + +-- Also check ON CONFLICT +insert into inhpar as i values (3), (7) on conflict (f1) + do update set (f1, f2) = (select i.f1, i.f2 || '+'); +select * from inhpar order by f1; -- tuple order might be unstable here + f1 | f2 +----+----- + 1 | 1- + 2 | 2- + 3 | 3-+ + 4 | 4- + 5 | 5- + 6 | 6- + 7 | 7-+ + 8 | 8- + 9 | 9- + 10 | 10- +(10 rows) + +drop table inhpar cascade; +-- +-- Check handling of a constant-null CHECK constraint +-- +create table cnullparent (f1 int); +create table cnullchild (check (f1 = 1 or f1 = null)) inherits(cnullparent); +insert into cnullchild values(1); +insert into cnullchild values(2); +insert into cnullchild values(null); +select * from cnullparent; + f1 +---- + 1 + 2 + +(3 rows) + +select * from cnullparent where f1 = 2; + f1 +---- + 2 +(1 row) + +drop table cnullparent cascade; +NOTICE: drop cascades to table cnullchild +-- +-- Check use of temporary tables with inheritance trees +-- +create table inh_perm_parent (a1 int); +create temp table inh_temp_parent (a1 int); +create temp table inh_temp_child () inherits (inh_perm_parent); -- ok +create table inh_perm_child () inherits (inh_temp_parent); -- error +ERROR: cannot inherit from temporary relation "inh_temp_parent" +create temp table inh_temp_child_2 () inherits (inh_temp_parent); -- ok +insert into inh_perm_parent values (1); +insert into inh_temp_parent values (2); +insert into inh_temp_child values (3); +insert into inh_temp_child_2 values (4); +select tableoid::regclass, a1 from inh_perm_parent; + tableoid | a1 +-----------------+---- + inh_perm_parent | 1 + inh_temp_child | 3 +(2 rows) + +select tableoid::regclass, a1 from inh_temp_parent; + tableoid | a1 +------------------+---- + inh_temp_parent | 2 + inh_temp_child_2 | 4 +(2 rows) + +drop table inh_perm_parent cascade; +NOTICE: drop cascades to table inh_temp_child +drop table inh_temp_parent cascade; +NOTICE: drop cascades to table inh_temp_child_2 +-- +-- Check that constraint exclusion works correctly with partitions using +-- implicit constraints generated from the partition bound information. +-- +create table list_parted ( + a varchar +) partition by list (a); +create table part_ab_cd partition of list_parted for values in ('ab', 'cd'); +create table part_ef_gh partition of list_parted for values in ('ef', 'gh'); +create table part_null_xy partition of list_parted for values in (null, 'xy'); +explain (costs off) select * from list_parted; + QUERY PLAN +---------------------------------------------- + Append + -> Seq Scan on part_ab_cd list_parted_1 + -> Seq Scan on part_ef_gh list_parted_2 + -> Seq Scan on part_null_xy list_parted_3 +(4 rows) + +explain (costs off) select * from list_parted where a is null; + QUERY PLAN +-------------------------------------- + Seq Scan on part_null_xy list_parted + Filter: (a IS NULL) +(2 rows) + +explain (costs off) select * from list_parted where a is not null; + QUERY PLAN +---------------------------------------------- + Append + -> Seq Scan on part_ab_cd list_parted_1 + Filter: (a IS NOT NULL) + -> Seq Scan on part_ef_gh list_parted_2 + Filter: (a IS NOT NULL) + -> Seq Scan on part_null_xy list_parted_3 + Filter: (a IS NOT NULL) +(7 rows) + +explain (costs off) select * from list_parted where a in ('ab', 'cd', 'ef'); + QUERY PLAN +---------------------------------------------------------- + Append + -> Seq Scan on part_ab_cd list_parted_1 + Filter: ((a)::text = ANY ('{ab,cd,ef}'::text[])) + -> Seq Scan on part_ef_gh list_parted_2 + Filter: ((a)::text = ANY ('{ab,cd,ef}'::text[])) +(5 rows) + +explain (costs off) select * from list_parted where a = 'ab' or a in (null, 'cd'); + QUERY PLAN +--------------------------------------------------------------------------------- + Seq Scan on part_ab_cd list_parted + Filter: (((a)::text = 'ab'::text) OR ((a)::text = ANY ('{NULL,cd}'::text[]))) +(2 rows) + +explain (costs off) select * from list_parted where a = 'ab'; + QUERY PLAN +------------------------------------ + Seq Scan on part_ab_cd list_parted + Filter: ((a)::text = 'ab'::text) +(2 rows) + +create table range_list_parted ( + a int, + b char(2) +) partition by range (a); +create table part_1_10 partition of range_list_parted for values from (1) to (10) partition by list (b); +create table part_1_10_ab partition of part_1_10 for values in ('ab'); +create table part_1_10_cd partition of part_1_10 for values in ('cd'); +create table part_10_20 partition of range_list_parted for values from (10) to (20) partition by list (b); +create table part_10_20_ab partition of part_10_20 for values in ('ab'); +create table part_10_20_cd partition of part_10_20 for values in ('cd'); +create table part_21_30 partition of range_list_parted for values from (21) to (30) partition by list (b); +create table part_21_30_ab partition of part_21_30 for values in ('ab'); +create table part_21_30_cd partition of part_21_30 for values in ('cd'); +create table part_40_inf partition of range_list_parted for values from (40) to (maxvalue) partition by list (b); +create table part_40_inf_ab partition of part_40_inf for values in ('ab'); +create table part_40_inf_cd partition of part_40_inf for values in ('cd'); +create table part_40_inf_null partition of part_40_inf for values in (null); +explain (costs off) select * from range_list_parted; + QUERY PLAN +-------------------------------------------------------- + Append + -> Seq Scan on part_1_10_ab range_list_parted_1 + -> Seq Scan on part_1_10_cd range_list_parted_2 + -> Seq Scan on part_10_20_ab range_list_parted_3 + -> Seq Scan on part_10_20_cd range_list_parted_4 + -> Seq Scan on part_21_30_ab range_list_parted_5 + -> Seq Scan on part_21_30_cd range_list_parted_6 + -> Seq Scan on part_40_inf_ab range_list_parted_7 + -> Seq Scan on part_40_inf_cd range_list_parted_8 + -> Seq Scan on part_40_inf_null range_list_parted_9 +(10 rows) + +explain (costs off) select * from range_list_parted where a = 5; + QUERY PLAN +---------------------------------------------------- + Append + -> Seq Scan on part_1_10_ab range_list_parted_1 + Filter: (a = 5) + -> Seq Scan on part_1_10_cd range_list_parted_2 + Filter: (a = 5) +(5 rows) + +explain (costs off) select * from range_list_parted where b = 'ab'; + QUERY PLAN +------------------------------------------------------ + Append + -> Seq Scan on part_1_10_ab range_list_parted_1 + Filter: (b = 'ab'::bpchar) + -> Seq Scan on part_10_20_ab range_list_parted_2 + Filter: (b = 'ab'::bpchar) + -> Seq Scan on part_21_30_ab range_list_parted_3 + Filter: (b = 'ab'::bpchar) + -> Seq Scan on part_40_inf_ab range_list_parted_4 + Filter: (b = 'ab'::bpchar) +(9 rows) + +explain (costs off) select * from range_list_parted where a between 3 and 23 and b in ('ab'); + QUERY PLAN +----------------------------------------------------------------- + Append + -> Seq Scan on part_1_10_ab range_list_parted_1 + Filter: ((a >= 3) AND (a <= 23) AND (b = 'ab'::bpchar)) + -> Seq Scan on part_10_20_ab range_list_parted_2 + Filter: ((a >= 3) AND (a <= 23) AND (b = 'ab'::bpchar)) + -> Seq Scan on part_21_30_ab range_list_parted_3 + Filter: ((a >= 3) AND (a <= 23) AND (b = 'ab'::bpchar)) +(7 rows) + +/* Should select no rows because range partition key cannot be null */ +explain (costs off) select * from range_list_parted where a is null; + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +/* Should only select rows from the null-accepting partition */ +explain (costs off) select * from range_list_parted where b is null; + QUERY PLAN +------------------------------------------------ + Seq Scan on part_40_inf_null range_list_parted + Filter: (b IS NULL) +(2 rows) + +explain (costs off) select * from range_list_parted where a is not null and a < 67; + QUERY PLAN +-------------------------------------------------------- + Append + -> Seq Scan on part_1_10_ab range_list_parted_1 + Filter: ((a IS NOT NULL) AND (a < 67)) + -> Seq Scan on part_1_10_cd range_list_parted_2 + Filter: ((a IS NOT NULL) AND (a < 67)) + -> Seq Scan on part_10_20_ab range_list_parted_3 + Filter: ((a IS NOT NULL) AND (a < 67)) + -> Seq Scan on part_10_20_cd range_list_parted_4 + Filter: ((a IS NOT NULL) AND (a < 67)) + -> Seq Scan on part_21_30_ab range_list_parted_5 + Filter: ((a IS NOT NULL) AND (a < 67)) + -> Seq Scan on part_21_30_cd range_list_parted_6 + Filter: ((a IS NOT NULL) AND (a < 67)) + -> Seq Scan on part_40_inf_ab range_list_parted_7 + Filter: ((a IS NOT NULL) AND (a < 67)) + -> Seq Scan on part_40_inf_cd range_list_parted_8 + Filter: ((a IS NOT NULL) AND (a < 67)) + -> Seq Scan on part_40_inf_null range_list_parted_9 + Filter: ((a IS NOT NULL) AND (a < 67)) +(19 rows) + +explain (costs off) select * from range_list_parted where a >= 30; + QUERY PLAN +-------------------------------------------------------- + Append + -> Seq Scan on part_40_inf_ab range_list_parted_1 + Filter: (a >= 30) + -> Seq Scan on part_40_inf_cd range_list_parted_2 + Filter: (a >= 30) + -> Seq Scan on part_40_inf_null range_list_parted_3 + Filter: (a >= 30) +(7 rows) + +drop table list_parted; +drop table range_list_parted; +-- check that constraint exclusion is able to cope with the partition +-- constraint emitted for multi-column range partitioned tables +create table mcrparted (a int, b int, c int) partition by range (a, abs(b), c); +create table mcrparted_def partition of mcrparted default; +create table mcrparted0 partition of mcrparted for values from (minvalue, minvalue, minvalue) to (1, 1, 1); +create table mcrparted1 partition of mcrparted for values from (1, 1, 1) to (10, 5, 10); +create table mcrparted2 partition of mcrparted for values from (10, 5, 10) to (10, 10, 10); +create table mcrparted3 partition of mcrparted for values from (11, 1, 1) to (20, 10, 10); +create table mcrparted4 partition of mcrparted for values from (20, 10, 10) to (20, 20, 20); +create table mcrparted5 partition of mcrparted for values from (20, 20, 20) to (maxvalue, maxvalue, maxvalue); +explain (costs off) select * from mcrparted where a = 0; -- scans mcrparted0, mcrparted_def + QUERY PLAN +--------------------------------------------- + Append + -> Seq Scan on mcrparted0 mcrparted_1 + Filter: (a = 0) + -> Seq Scan on mcrparted_def mcrparted_2 + Filter: (a = 0) +(5 rows) + +explain (costs off) select * from mcrparted where a = 10 and abs(b) < 5; -- scans mcrparted1, mcrparted_def + QUERY PLAN +--------------------------------------------- + Append + -> Seq Scan on mcrparted1 mcrparted_1 + Filter: ((a = 10) AND (abs(b) < 5)) + -> Seq Scan on mcrparted_def mcrparted_2 + Filter: ((a = 10) AND (abs(b) < 5)) +(5 rows) + +explain (costs off) select * from mcrparted where a = 10 and abs(b) = 5; -- scans mcrparted1, mcrparted2, mcrparted_def + QUERY PLAN +--------------------------------------------- + Append + -> Seq Scan on mcrparted1 mcrparted_1 + Filter: ((a = 10) AND (abs(b) = 5)) + -> Seq Scan on mcrparted2 mcrparted_2 + Filter: ((a = 10) AND (abs(b) = 5)) + -> Seq Scan on mcrparted_def mcrparted_3 + Filter: ((a = 10) AND (abs(b) = 5)) +(7 rows) + +explain (costs off) select * from mcrparted where abs(b) = 5; -- scans all partitions + QUERY PLAN +--------------------------------------------- + Append + -> Seq Scan on mcrparted0 mcrparted_1 + Filter: (abs(b) = 5) + -> Seq Scan on mcrparted1 mcrparted_2 + Filter: (abs(b) = 5) + -> Seq Scan on mcrparted2 mcrparted_3 + Filter: (abs(b) = 5) + -> Seq Scan on mcrparted3 mcrparted_4 + Filter: (abs(b) = 5) + -> Seq Scan on mcrparted4 mcrparted_5 + Filter: (abs(b) = 5) + -> Seq Scan on mcrparted5 mcrparted_6 + Filter: (abs(b) = 5) + -> Seq Scan on mcrparted_def mcrparted_7 + Filter: (abs(b) = 5) +(15 rows) + +explain (costs off) select * from mcrparted where a > -1; -- scans all partitions + QUERY PLAN +--------------------------------------------- + Append + -> Seq Scan on mcrparted0 mcrparted_1 + Filter: (a > '-1'::integer) + -> Seq Scan on mcrparted1 mcrparted_2 + Filter: (a > '-1'::integer) + -> Seq Scan on mcrparted2 mcrparted_3 + Filter: (a > '-1'::integer) + -> Seq Scan on mcrparted3 mcrparted_4 + Filter: (a > '-1'::integer) + -> Seq Scan on mcrparted4 mcrparted_5 + Filter: (a > '-1'::integer) + -> Seq Scan on mcrparted5 mcrparted_6 + Filter: (a > '-1'::integer) + -> Seq Scan on mcrparted_def mcrparted_7 + Filter: (a > '-1'::integer) +(15 rows) + +explain (costs off) select * from mcrparted where a = 20 and abs(b) = 10 and c > 10; -- scans mcrparted4 + QUERY PLAN +----------------------------------------------------- + Seq Scan on mcrparted4 mcrparted + Filter: ((c > 10) AND (a = 20) AND (abs(b) = 10)) +(2 rows) + +explain (costs off) select * from mcrparted where a = 20 and c > 20; -- scans mcrparted3, mcrparte4, mcrparte5, mcrparted_def + QUERY PLAN +--------------------------------------------- + Append + -> Seq Scan on mcrparted3 mcrparted_1 + Filter: ((c > 20) AND (a = 20)) + -> Seq Scan on mcrparted4 mcrparted_2 + Filter: ((c > 20) AND (a = 20)) + -> Seq Scan on mcrparted5 mcrparted_3 + Filter: ((c > 20) AND (a = 20)) + -> Seq Scan on mcrparted_def mcrparted_4 + Filter: ((c > 20) AND (a = 20)) +(9 rows) + +-- check that partitioned table Appends cope with being referenced in +-- subplans +create table parted_minmax (a int, b varchar(16)) partition by range (a); +create table parted_minmax1 partition of parted_minmax for values from (1) to (10); +create index parted_minmax1i on parted_minmax1 (a, b); +insert into parted_minmax values (1,'12345'); +explain (costs off) select min(a), max(a) from parted_minmax where b = '12345'; + QUERY PLAN +------------------------------------------------------------------------------------------------ + Result + InitPlan 1 (returns $0) + -> Limit + -> Index Only Scan using parted_minmax1i on parted_minmax1 parted_minmax + Index Cond: ((a IS NOT NULL) AND (b = '12345'::text)) + InitPlan 2 (returns $1) + -> Limit + -> Index Only Scan Backward using parted_minmax1i on parted_minmax1 parted_minmax_1 + Index Cond: ((a IS NOT NULL) AND (b = '12345'::text)) +(9 rows) + +select min(a), max(a) from parted_minmax where b = '12345'; + min | max +-----+----- + 1 | 1 +(1 row) + +drop table parted_minmax; +-- Test code that uses Append nodes in place of MergeAppend when the +-- partition ordering matches the desired ordering. +create index mcrparted_a_abs_c_idx on mcrparted (a, abs(b), c); +-- MergeAppend must be used when a default partition exists +explain (costs off) select * from mcrparted order by a, abs(b), c; + QUERY PLAN +------------------------------------------------------------------------------- + Merge Append + Sort Key: mcrparted.a, (abs(mcrparted.b)), mcrparted.c + -> Index Scan using mcrparted0_a_abs_c_idx on mcrparted0 mcrparted_1 + -> Index Scan using mcrparted1_a_abs_c_idx on mcrparted1 mcrparted_2 + -> Index Scan using mcrparted2_a_abs_c_idx on mcrparted2 mcrparted_3 + -> Index Scan using mcrparted3_a_abs_c_idx on mcrparted3 mcrparted_4 + -> Index Scan using mcrparted4_a_abs_c_idx on mcrparted4 mcrparted_5 + -> Index Scan using mcrparted5_a_abs_c_idx on mcrparted5 mcrparted_6 + -> Index Scan using mcrparted_def_a_abs_c_idx on mcrparted_def mcrparted_7 +(9 rows) + +drop table mcrparted_def; +-- Append is used for a RANGE partitioned table with no default +-- and no subpartitions +explain (costs off) select * from mcrparted order by a, abs(b), c; + QUERY PLAN +------------------------------------------------------------------------- + Append + -> Index Scan using mcrparted0_a_abs_c_idx on mcrparted0 mcrparted_1 + -> Index Scan using mcrparted1_a_abs_c_idx on mcrparted1 mcrparted_2 + -> Index Scan using mcrparted2_a_abs_c_idx on mcrparted2 mcrparted_3 + -> Index Scan using mcrparted3_a_abs_c_idx on mcrparted3 mcrparted_4 + -> Index Scan using mcrparted4_a_abs_c_idx on mcrparted4 mcrparted_5 + -> Index Scan using mcrparted5_a_abs_c_idx on mcrparted5 mcrparted_6 +(7 rows) + +-- Append is used with subpaths in reverse order with backwards index scans +explain (costs off) select * from mcrparted order by a desc, abs(b) desc, c desc; + QUERY PLAN +---------------------------------------------------------------------------------- + Append + -> Index Scan Backward using mcrparted5_a_abs_c_idx on mcrparted5 mcrparted_6 + -> Index Scan Backward using mcrparted4_a_abs_c_idx on mcrparted4 mcrparted_5 + -> Index Scan Backward using mcrparted3_a_abs_c_idx on mcrparted3 mcrparted_4 + -> Index Scan Backward using mcrparted2_a_abs_c_idx on mcrparted2 mcrparted_3 + -> Index Scan Backward using mcrparted1_a_abs_c_idx on mcrparted1 mcrparted_2 + -> Index Scan Backward using mcrparted0_a_abs_c_idx on mcrparted0 mcrparted_1 +(7 rows) + +-- check that Append plan is used containing a MergeAppend for sub-partitions +-- that are unordered. +drop table mcrparted5; +create table mcrparted5 partition of mcrparted for values from (20, 20, 20) to (maxvalue, maxvalue, maxvalue) partition by list (a); +create table mcrparted5a partition of mcrparted5 for values in(20); +create table mcrparted5_def partition of mcrparted5 default; +explain (costs off) select * from mcrparted order by a, abs(b), c; + QUERY PLAN +--------------------------------------------------------------------------------------- + Append + -> Index Scan using mcrparted0_a_abs_c_idx on mcrparted0 mcrparted_1 + -> Index Scan using mcrparted1_a_abs_c_idx on mcrparted1 mcrparted_2 + -> Index Scan using mcrparted2_a_abs_c_idx on mcrparted2 mcrparted_3 + -> Index Scan using mcrparted3_a_abs_c_idx on mcrparted3 mcrparted_4 + -> Index Scan using mcrparted4_a_abs_c_idx on mcrparted4 mcrparted_5 + -> Merge Append + Sort Key: mcrparted_7.a, (abs(mcrparted_7.b)), mcrparted_7.c + -> Index Scan using mcrparted5a_a_abs_c_idx on mcrparted5a mcrparted_7 + -> Index Scan using mcrparted5_def_a_abs_c_idx on mcrparted5_def mcrparted_8 +(10 rows) + +drop table mcrparted5_def; +-- check that an Append plan is used and the sub-partitions are flattened +-- into the main Append when the sub-partition is unordered but contains +-- just a single sub-partition. +explain (costs off) select a, abs(b) from mcrparted order by a, abs(b), c; + QUERY PLAN +--------------------------------------------------------------------------- + Append + -> Index Scan using mcrparted0_a_abs_c_idx on mcrparted0 mcrparted_1 + -> Index Scan using mcrparted1_a_abs_c_idx on mcrparted1 mcrparted_2 + -> Index Scan using mcrparted2_a_abs_c_idx on mcrparted2 mcrparted_3 + -> Index Scan using mcrparted3_a_abs_c_idx on mcrparted3 mcrparted_4 + -> Index Scan using mcrparted4_a_abs_c_idx on mcrparted4 mcrparted_5 + -> Index Scan using mcrparted5a_a_abs_c_idx on mcrparted5a mcrparted_6 +(7 rows) + +-- check that Append is used when the sub-partitioned tables are pruned +-- during planning. +explain (costs off) select * from mcrparted where a < 20 order by a, abs(b), c; + QUERY PLAN +------------------------------------------------------------------------- + Append + -> Index Scan using mcrparted0_a_abs_c_idx on mcrparted0 mcrparted_1 + Index Cond: (a < 20) + -> Index Scan using mcrparted1_a_abs_c_idx on mcrparted1 mcrparted_2 + Index Cond: (a < 20) + -> Index Scan using mcrparted2_a_abs_c_idx on mcrparted2 mcrparted_3 + Index Cond: (a < 20) + -> Index Scan using mcrparted3_a_abs_c_idx on mcrparted3 mcrparted_4 + Index Cond: (a < 20) +(9 rows) + +set enable_bitmapscan to off; +set enable_sort to off; +create table mclparted (a int) partition by list(a); +create table mclparted1 partition of mclparted for values in(1); +create table mclparted2 partition of mclparted for values in(2); +create index on mclparted (a); +-- Ensure an Append is used for a list partition with an order by. +explain (costs off) select * from mclparted order by a; + QUERY PLAN +------------------------------------------------------------------------ + Append + -> Index Only Scan using mclparted1_a_idx on mclparted1 mclparted_1 + -> Index Only Scan using mclparted2_a_idx on mclparted2 mclparted_2 +(3 rows) + +-- Ensure a MergeAppend is used when a partition exists with interleaved +-- datums in the partition bound. +create table mclparted3_5 partition of mclparted for values in(3,5); +create table mclparted4 partition of mclparted for values in(4); +explain (costs off) select * from mclparted order by a; + QUERY PLAN +---------------------------------------------------------------------------- + Merge Append + Sort Key: mclparted.a + -> Index Only Scan using mclparted1_a_idx on mclparted1 mclparted_1 + -> Index Only Scan using mclparted2_a_idx on mclparted2 mclparted_2 + -> Index Only Scan using mclparted3_5_a_idx on mclparted3_5 mclparted_3 + -> Index Only Scan using mclparted4_a_idx on mclparted4 mclparted_4 +(6 rows) + +explain (costs off) select * from mclparted where a in(3,4,5) order by a; + QUERY PLAN +---------------------------------------------------------------------------- + Merge Append + Sort Key: mclparted.a + -> Index Only Scan using mclparted3_5_a_idx on mclparted3_5 mclparted_1 + Index Cond: (a = ANY ('{3,4,5}'::integer[])) + -> Index Only Scan using mclparted4_a_idx on mclparted4 mclparted_2 + Index Cond: (a = ANY ('{3,4,5}'::integer[])) +(6 rows) + +-- Introduce a NULL and DEFAULT partition so we can test more complex cases +create table mclparted_null partition of mclparted for values in(null); +create table mclparted_def partition of mclparted default; +-- Append can be used providing we don't scan the interleaved partition +explain (costs off) select * from mclparted where a in(1,2,4) order by a; + QUERY PLAN +------------------------------------------------------------------------ + Append + -> Index Only Scan using mclparted1_a_idx on mclparted1 mclparted_1 + Index Cond: (a = ANY ('{1,2,4}'::integer[])) + -> Index Only Scan using mclparted2_a_idx on mclparted2 mclparted_2 + Index Cond: (a = ANY ('{1,2,4}'::integer[])) + -> Index Only Scan using mclparted4_a_idx on mclparted4 mclparted_3 + Index Cond: (a = ANY ('{1,2,4}'::integer[])) +(7 rows) + +explain (costs off) select * from mclparted where a in(1,2,4) or a is null order by a; + QUERY PLAN +-------------------------------------------------------------------------------- + Append + -> Index Only Scan using mclparted1_a_idx on mclparted1 mclparted_1 + Filter: ((a = ANY ('{1,2,4}'::integer[])) OR (a IS NULL)) + -> Index Only Scan using mclparted2_a_idx on mclparted2 mclparted_2 + Filter: ((a = ANY ('{1,2,4}'::integer[])) OR (a IS NULL)) + -> Index Only Scan using mclparted4_a_idx on mclparted4 mclparted_3 + Filter: ((a = ANY ('{1,2,4}'::integer[])) OR (a IS NULL)) + -> Index Only Scan using mclparted_null_a_idx on mclparted_null mclparted_4 + Filter: ((a = ANY ('{1,2,4}'::integer[])) OR (a IS NULL)) +(9 rows) + +-- Test a more complex case where the NULL partition allows some other value +drop table mclparted_null; +create table mclparted_0_null partition of mclparted for values in(0,null); +-- Ensure MergeAppend is used since 0 and NULLs are in the same partition. +explain (costs off) select * from mclparted where a in(1,2,4) or a is null order by a; + QUERY PLAN +------------------------------------------------------------------------------------ + Merge Append + Sort Key: mclparted.a + -> Index Only Scan using mclparted_0_null_a_idx on mclparted_0_null mclparted_1 + Filter: ((a = ANY ('{1,2,4}'::integer[])) OR (a IS NULL)) + -> Index Only Scan using mclparted1_a_idx on mclparted1 mclparted_2 + Filter: ((a = ANY ('{1,2,4}'::integer[])) OR (a IS NULL)) + -> Index Only Scan using mclparted2_a_idx on mclparted2 mclparted_3 + Filter: ((a = ANY ('{1,2,4}'::integer[])) OR (a IS NULL)) + -> Index Only Scan using mclparted4_a_idx on mclparted4 mclparted_4 + Filter: ((a = ANY ('{1,2,4}'::integer[])) OR (a IS NULL)) +(10 rows) + +explain (costs off) select * from mclparted where a in(0,1,2,4) order by a; + QUERY PLAN +------------------------------------------------------------------------------------ + Merge Append + Sort Key: mclparted.a + -> Index Only Scan using mclparted_0_null_a_idx on mclparted_0_null mclparted_1 + Index Cond: (a = ANY ('{0,1,2,4}'::integer[])) + -> Index Only Scan using mclparted1_a_idx on mclparted1 mclparted_2 + Index Cond: (a = ANY ('{0,1,2,4}'::integer[])) + -> Index Only Scan using mclparted2_a_idx on mclparted2 mclparted_3 + Index Cond: (a = ANY ('{0,1,2,4}'::integer[])) + -> Index Only Scan using mclparted4_a_idx on mclparted4 mclparted_4 + Index Cond: (a = ANY ('{0,1,2,4}'::integer[])) +(10 rows) + +-- Ensure Append is used when the null partition is pruned +explain (costs off) select * from mclparted where a in(1,2,4) order by a; + QUERY PLAN +------------------------------------------------------------------------ + Append + -> Index Only Scan using mclparted1_a_idx on mclparted1 mclparted_1 + Index Cond: (a = ANY ('{1,2,4}'::integer[])) + -> Index Only Scan using mclparted2_a_idx on mclparted2 mclparted_2 + Index Cond: (a = ANY ('{1,2,4}'::integer[])) + -> Index Only Scan using mclparted4_a_idx on mclparted4 mclparted_3 + Index Cond: (a = ANY ('{1,2,4}'::integer[])) +(7 rows) + +-- Ensure MergeAppend is used when the default partition is not pruned +explain (costs off) select * from mclparted where a in(1,2,4,100) order by a; + QUERY PLAN +------------------------------------------------------------------------------ + Merge Append + Sort Key: mclparted.a + -> Index Only Scan using mclparted1_a_idx on mclparted1 mclparted_1 + Index Cond: (a = ANY ('{1,2,4,100}'::integer[])) + -> Index Only Scan using mclparted2_a_idx on mclparted2 mclparted_2 + Index Cond: (a = ANY ('{1,2,4,100}'::integer[])) + -> Index Only Scan using mclparted4_a_idx on mclparted4 mclparted_3 + Index Cond: (a = ANY ('{1,2,4,100}'::integer[])) + -> Index Only Scan using mclparted_def_a_idx on mclparted_def mclparted_4 + Index Cond: (a = ANY ('{1,2,4,100}'::integer[])) +(10 rows) + +drop table mclparted; +reset enable_sort; +reset enable_bitmapscan; +-- Ensure subplans which don't have a path with the correct pathkeys get +-- sorted correctly. +drop index mcrparted_a_abs_c_idx; +create index on mcrparted1 (a, abs(b), c); +create index on mcrparted2 (a, abs(b), c); +create index on mcrparted3 (a, abs(b), c); +create index on mcrparted4 (a, abs(b), c); +explain (costs off) select * from mcrparted where a < 20 order by a, abs(b), c limit 1; + QUERY PLAN +------------------------------------------------------------------------------- + Limit + -> Append + -> Sort + Sort Key: mcrparted_1.a, (abs(mcrparted_1.b)), mcrparted_1.c + -> Seq Scan on mcrparted0 mcrparted_1 + Filter: (a < 20) + -> Index Scan using mcrparted1_a_abs_c_idx on mcrparted1 mcrparted_2 + Index Cond: (a < 20) + -> Index Scan using mcrparted2_a_abs_c_idx on mcrparted2 mcrparted_3 + Index Cond: (a < 20) + -> Index Scan using mcrparted3_a_abs_c_idx on mcrparted3 mcrparted_4 + Index Cond: (a < 20) +(12 rows) + +set enable_bitmapscan = 0; +-- Ensure Append node can be used when the partition is ordered by some +-- pathkeys which were deemed redundant. +explain (costs off) select * from mcrparted where a = 10 order by a, abs(b), c; + QUERY PLAN +------------------------------------------------------------------------- + Append + -> Index Scan using mcrparted1_a_abs_c_idx on mcrparted1 mcrparted_1 + Index Cond: (a = 10) + -> Index Scan using mcrparted2_a_abs_c_idx on mcrparted2 mcrparted_2 + Index Cond: (a = 10) +(5 rows) + +reset enable_bitmapscan; +drop table mcrparted; +-- Ensure LIST partitions allow an Append to be used instead of a MergeAppend +create table bool_lp (b bool) partition by list(b); +create table bool_lp_true partition of bool_lp for values in(true); +create table bool_lp_false partition of bool_lp for values in(false); +create index on bool_lp (b); +explain (costs off) select * from bool_lp order by b; + QUERY PLAN +---------------------------------------------------------------------------- + Append + -> Index Only Scan using bool_lp_false_b_idx on bool_lp_false bool_lp_1 + -> Index Only Scan using bool_lp_true_b_idx on bool_lp_true bool_lp_2 +(3 rows) + +drop table bool_lp; +-- Ensure const bool quals can be properly detected as redundant +create table bool_rp (b bool, a int) partition by range(b,a); +create table bool_rp_false_1k partition of bool_rp for values from (false,0) to (false,1000); +create table bool_rp_true_1k partition of bool_rp for values from (true,0) to (true,1000); +create table bool_rp_false_2k partition of bool_rp for values from (false,1000) to (false,2000); +create table bool_rp_true_2k partition of bool_rp for values from (true,1000) to (true,2000); +create index on bool_rp (b,a); +explain (costs off) select * from bool_rp where b = true order by b,a; + QUERY PLAN +---------------------------------------------------------------------------------- + Append + -> Index Only Scan using bool_rp_true_1k_b_a_idx on bool_rp_true_1k bool_rp_1 + Index Cond: (b = true) + -> Index Only Scan using bool_rp_true_2k_b_a_idx on bool_rp_true_2k bool_rp_2 + Index Cond: (b = true) +(5 rows) + +explain (costs off) select * from bool_rp where b = false order by b,a; + QUERY PLAN +------------------------------------------------------------------------------------ + Append + -> Index Only Scan using bool_rp_false_1k_b_a_idx on bool_rp_false_1k bool_rp_1 + Index Cond: (b = false) + -> Index Only Scan using bool_rp_false_2k_b_a_idx on bool_rp_false_2k bool_rp_2 + Index Cond: (b = false) +(5 rows) + +explain (costs off) select * from bool_rp where b = true order by a; + QUERY PLAN +---------------------------------------------------------------------------------- + Append + -> Index Only Scan using bool_rp_true_1k_b_a_idx on bool_rp_true_1k bool_rp_1 + Index Cond: (b = true) + -> Index Only Scan using bool_rp_true_2k_b_a_idx on bool_rp_true_2k bool_rp_2 + Index Cond: (b = true) +(5 rows) + +explain (costs off) select * from bool_rp where b = false order by a; + QUERY PLAN +------------------------------------------------------------------------------------ + Append + -> Index Only Scan using bool_rp_false_1k_b_a_idx on bool_rp_false_1k bool_rp_1 + Index Cond: (b = false) + -> Index Only Scan using bool_rp_false_2k_b_a_idx on bool_rp_false_2k bool_rp_2 + Index Cond: (b = false) +(5 rows) + +drop table bool_rp; +-- Ensure an Append scan is chosen when the partition order is a subset of +-- the required order. +create table range_parted (a int, b int, c int) partition by range(a, b); +create table range_parted1 partition of range_parted for values from (0,0) to (10,10); +create table range_parted2 partition of range_parted for values from (10,10) to (20,20); +create index on range_parted (a,b,c); +explain (costs off) select * from range_parted order by a,b,c; + QUERY PLAN +------------------------------------------------------------------------------------- + Append + -> Index Only Scan using range_parted1_a_b_c_idx on range_parted1 range_parted_1 + -> Index Only Scan using range_parted2_a_b_c_idx on range_parted2 range_parted_2 +(3 rows) + +explain (costs off) select * from range_parted order by a desc,b desc,c desc; + QUERY PLAN +---------------------------------------------------------------------------------------------- + Append + -> Index Only Scan Backward using range_parted2_a_b_c_idx on range_parted2 range_parted_2 + -> Index Only Scan Backward using range_parted1_a_b_c_idx on range_parted1 range_parted_1 +(3 rows) + +drop table range_parted; +-- Check that we allow access to a child table's statistics when the user +-- has permissions only for the parent table. +create table permtest_parent (a int, b text, c text) partition by list (a); +create table permtest_child (b text, c text, a int) partition by list (b); +create table permtest_grandchild (c text, b text, a int); +alter table permtest_child attach partition permtest_grandchild for values in ('a'); +alter table permtest_parent attach partition permtest_child for values in (1); +create index on permtest_parent (left(c, 3)); +insert into permtest_parent + select 1, 'a', left(fipshash(i::text), 5) from generate_series(0, 100) i; +analyze permtest_parent; +create role regress_no_child_access; +revoke all on permtest_grandchild from regress_no_child_access; +grant select on permtest_parent to regress_no_child_access; +set session authorization regress_no_child_access; +-- without stats access, these queries would produce hash join plans: +explain (costs off) + select * from permtest_parent p1 inner join permtest_parent p2 + on p1.a = p2.a and p1.c ~ 'a1$'; + QUERY PLAN +------------------------------------------ + Nested Loop + Join Filter: (p1.a = p2.a) + -> Seq Scan on permtest_grandchild p1 + Filter: (c ~ 'a1$'::text) + -> Seq Scan on permtest_grandchild p2 +(5 rows) + +explain (costs off) + select * from permtest_parent p1 inner join permtest_parent p2 + on p1.a = p2.a and left(p1.c, 3) ~ 'a1$'; + QUERY PLAN +---------------------------------------------- + Nested Loop + Join Filter: (p1.a = p2.a) + -> Seq Scan on permtest_grandchild p1 + Filter: ("left"(c, 3) ~ 'a1$'::text) + -> Seq Scan on permtest_grandchild p2 +(5 rows) + +reset session authorization; +revoke all on permtest_parent from regress_no_child_access; +grant select(a,c) on permtest_parent to regress_no_child_access; +set session authorization regress_no_child_access; +explain (costs off) + select p2.a, p1.c from permtest_parent p1 inner join permtest_parent p2 + on p1.a = p2.a and p1.c ~ 'a1$'; + QUERY PLAN +------------------------------------------ + Nested Loop + Join Filter: (p1.a = p2.a) + -> Seq Scan on permtest_grandchild p1 + Filter: (c ~ 'a1$'::text) + -> Seq Scan on permtest_grandchild p2 +(5 rows) + +-- we will not have access to the expression index's stats here: +explain (costs off) + select p2.a, p1.c from permtest_parent p1 inner join permtest_parent p2 + on p1.a = p2.a and left(p1.c, 3) ~ 'a1$'; + QUERY PLAN +---------------------------------------------------- + Hash Join + Hash Cond: (p2.a = p1.a) + -> Seq Scan on permtest_grandchild p2 + -> Hash + -> Seq Scan on permtest_grandchild p1 + Filter: ("left"(c, 3) ~ 'a1$'::text) +(6 rows) + +reset session authorization; +revoke all on permtest_parent from regress_no_child_access; +drop role regress_no_child_access; +drop table permtest_parent; +-- Verify that constraint errors across partition root / child are +-- handled correctly (Bug #16293) +CREATE TABLE errtst_parent ( + partid int not null, + shdata int not null, + data int NOT NULL DEFAULT 0, + CONSTRAINT shdata_small CHECK(shdata < 3) +) PARTITION BY RANGE (partid); +-- fast defaults lead to attribute mapping being used in one +-- direction, but not the other +CREATE TABLE errtst_child_fastdef ( + partid int not null, + shdata int not null, + CONSTRAINT shdata_small CHECK(shdata < 3) +); +-- no remapping in either direction necessary +CREATE TABLE errtst_child_plaindef ( + partid int not null, + shdata int not null, + data int NOT NULL DEFAULT 0, + CONSTRAINT shdata_small CHECK(shdata < 3), + CHECK(data < 10) +); +-- remapping in both direction +CREATE TABLE errtst_child_reorder ( + data int NOT NULL DEFAULT 0, + shdata int not null, + partid int not null, + CONSTRAINT shdata_small CHECK(shdata < 3), + CHECK(data < 10) +); +ALTER TABLE errtst_child_fastdef ADD COLUMN data int NOT NULL DEFAULT 0; +ALTER TABLE errtst_child_fastdef ADD CONSTRAINT errtest_child_fastdef_data_check CHECK (data < 10); +ALTER TABLE errtst_parent ATTACH PARTITION errtst_child_fastdef FOR VALUES FROM (0) TO (10); +ALTER TABLE errtst_parent ATTACH PARTITION errtst_child_plaindef FOR VALUES FROM (10) TO (20); +ALTER TABLE errtst_parent ATTACH PARTITION errtst_child_reorder FOR VALUES FROM (20) TO (30); +-- insert without child check constraint error +INSERT INTO errtst_parent(partid, shdata, data) VALUES ( '0', '1', '5'); +INSERT INTO errtst_parent(partid, shdata, data) VALUES ('10', '1', '5'); +INSERT INTO errtst_parent(partid, shdata, data) VALUES ('20', '1', '5'); +-- insert with child check constraint error +INSERT INTO errtst_parent(partid, shdata, data) VALUES ( '0', '1', '10'); +ERROR: new row for relation "errtst_child_fastdef" violates check constraint "errtest_child_fastdef_data_check" +DETAIL: Failing row contains (0, 1, 10). +INSERT INTO errtst_parent(partid, shdata, data) VALUES ('10', '1', '10'); +ERROR: new row for relation "errtst_child_plaindef" violates check constraint "errtst_child_plaindef_data_check" +DETAIL: Failing row contains (10, 1, 10). +INSERT INTO errtst_parent(partid, shdata, data) VALUES ('20', '1', '10'); +ERROR: new row for relation "errtst_child_reorder" violates check constraint "errtst_child_reorder_data_check" +DETAIL: Failing row contains (20, 1, 10). +-- insert with child not null constraint error +INSERT INTO errtst_parent(partid, shdata, data) VALUES ( '0', '1', NULL); +ERROR: null value in column "data" of relation "errtst_child_fastdef" violates not-null constraint +DETAIL: Failing row contains (0, 1, null). +INSERT INTO errtst_parent(partid, shdata, data) VALUES ('10', '1', NULL); +ERROR: null value in column "data" of relation "errtst_child_plaindef" violates not-null constraint +DETAIL: Failing row contains (10, 1, null). +INSERT INTO errtst_parent(partid, shdata, data) VALUES ('20', '1', NULL); +ERROR: null value in column "data" of relation "errtst_child_reorder" violates not-null constraint +DETAIL: Failing row contains (20, 1, null). +-- insert with shared check constraint error +INSERT INTO errtst_parent(partid, shdata, data) VALUES ( '0', '5', '5'); +ERROR: new row for relation "errtst_child_fastdef" violates check constraint "shdata_small" +DETAIL: Failing row contains (0, 5, 5). +INSERT INTO errtst_parent(partid, shdata, data) VALUES ('10', '5', '5'); +ERROR: new row for relation "errtst_child_plaindef" violates check constraint "shdata_small" +DETAIL: Failing row contains (10, 5, 5). +INSERT INTO errtst_parent(partid, shdata, data) VALUES ('20', '5', '5'); +ERROR: new row for relation "errtst_child_reorder" violates check constraint "shdata_small" +DETAIL: Failing row contains (20, 5, 5). +-- within partition update without child check constraint violation +BEGIN; +UPDATE errtst_parent SET data = data + 1 WHERE partid = 0; +UPDATE errtst_parent SET data = data + 1 WHERE partid = 10; +UPDATE errtst_parent SET data = data + 1 WHERE partid = 20; +ROLLBACK; +-- within partition update with child check constraint violation +UPDATE errtst_parent SET data = data + 10 WHERE partid = 0; +ERROR: new row for relation "errtst_child_fastdef" violates check constraint "errtest_child_fastdef_data_check" +DETAIL: Failing row contains (0, 1, 15). +UPDATE errtst_parent SET data = data + 10 WHERE partid = 10; +ERROR: new row for relation "errtst_child_plaindef" violates check constraint "errtst_child_plaindef_data_check" +DETAIL: Failing row contains (10, 1, 15). +UPDATE errtst_parent SET data = data + 10 WHERE partid = 20; +ERROR: new row for relation "errtst_child_reorder" violates check constraint "errtst_child_reorder_data_check" +DETAIL: Failing row contains (20, 1, 15). +-- direct leaf partition update, without partition id violation +BEGIN; +UPDATE errtst_child_fastdef SET partid = 1 WHERE partid = 0; +UPDATE errtst_child_plaindef SET partid = 11 WHERE partid = 10; +UPDATE errtst_child_reorder SET partid = 21 WHERE partid = 20; +ROLLBACK; +-- direct leaf partition update, with partition id violation +UPDATE errtst_child_fastdef SET partid = partid + 10 WHERE partid = 0; +ERROR: new row for relation "errtst_child_fastdef" violates partition constraint +DETAIL: Failing row contains (10, 1, 5). +UPDATE errtst_child_plaindef SET partid = partid + 10 WHERE partid = 10; +ERROR: new row for relation "errtst_child_plaindef" violates partition constraint +DETAIL: Failing row contains (20, 1, 5). +UPDATE errtst_child_reorder SET partid = partid + 10 WHERE partid = 20; +ERROR: new row for relation "errtst_child_reorder" violates partition constraint +DETAIL: Failing row contains (5, 1, 30). +-- partition move, without child check constraint violation +BEGIN; +UPDATE errtst_parent SET partid = 10, data = data + 1 WHERE partid = 0; +UPDATE errtst_parent SET partid = 20, data = data + 1 WHERE partid = 10; +UPDATE errtst_parent SET partid = 0, data = data + 1 WHERE partid = 20; +ROLLBACK; +-- partition move, with child check constraint violation +UPDATE errtst_parent SET partid = 10, data = data + 10 WHERE partid = 0; +ERROR: new row for relation "errtst_child_plaindef" violates check constraint "errtst_child_plaindef_data_check" +DETAIL: Failing row contains (10, 1, 15). +UPDATE errtst_parent SET partid = 20, data = data + 10 WHERE partid = 10; +ERROR: new row for relation "errtst_child_reorder" violates check constraint "errtst_child_reorder_data_check" +DETAIL: Failing row contains (20, 1, 15). +UPDATE errtst_parent SET partid = 0, data = data + 10 WHERE partid = 20; +ERROR: new row for relation "errtst_child_fastdef" violates check constraint "errtest_child_fastdef_data_check" +DETAIL: Failing row contains (0, 1, 15). +-- partition move, without target partition +UPDATE errtst_parent SET partid = 30, data = data + 10 WHERE partid = 20; +ERROR: no partition of relation "errtst_parent" found for row +DETAIL: Partition key of the failing row contains (partid) = (30). +DROP TABLE errtst_parent; diff --git a/src/test/regress/expected/init_privs.out b/src/test/regress/expected/init_privs.out new file mode 100644 index 0000000..292b1a1 --- /dev/null +++ b/src/test/regress/expected/init_privs.out @@ -0,0 +1,12 @@ +-- Test initial privileges +-- There should always be some initial privileges, set up by initdb +SELECT count(*) > 0 FROM pg_init_privs; + ?column? +---------- + t +(1 row) + +-- Intentionally include some non-initial privs for pg_dump to dump out +GRANT SELECT ON pg_proc TO CURRENT_USER; +GRANT SELECT (prosrc) ON pg_proc TO CURRENT_USER; +GRANT SELECT (rolname, rolsuper) ON pg_authid TO CURRENT_USER; diff --git a/src/test/regress/expected/insert.out b/src/test/regress/expected/insert.out new file mode 100644 index 0000000..dd4354f --- /dev/null +++ b/src/test/regress/expected/insert.out @@ -0,0 +1,982 @@ +-- +-- insert with DEFAULT in the target_list +-- +create table inserttest (col1 int4, col2 int4 NOT NULL, col3 text default 'testing'); +insert into inserttest (col1, col2, col3) values (DEFAULT, DEFAULT, DEFAULT); +ERROR: null value in column "col2" of relation "inserttest" violates not-null constraint +DETAIL: Failing row contains (null, null, testing). +insert into inserttest (col2, col3) values (3, DEFAULT); +insert into inserttest (col1, col2, col3) values (DEFAULT, 5, DEFAULT); +insert into inserttest values (DEFAULT, 5, 'test'); +insert into inserttest values (DEFAULT, 7); +select * from inserttest; + col1 | col2 | col3 +------+------+--------- + | 3 | testing + | 5 | testing + | 5 | test + | 7 | testing +(4 rows) + +-- +-- insert with similar expression / target_list values (all fail) +-- +insert into inserttest (col1, col2, col3) values (DEFAULT, DEFAULT); +ERROR: INSERT has more target columns than expressions +LINE 1: insert into inserttest (col1, col2, col3) values (DEFAULT, D... + ^ +insert into inserttest (col1, col2, col3) values (1, 2); +ERROR: INSERT has more target columns than expressions +LINE 1: insert into inserttest (col1, col2, col3) values (1, 2); + ^ +insert into inserttest (col1) values (1, 2); +ERROR: INSERT has more expressions than target columns +LINE 1: insert into inserttest (col1) values (1, 2); + ^ +insert into inserttest (col1) values (DEFAULT, DEFAULT); +ERROR: INSERT has more expressions than target columns +LINE 1: insert into inserttest (col1) values (DEFAULT, DEFAULT); + ^ +select * from inserttest; + col1 | col2 | col3 +------+------+--------- + | 3 | testing + | 5 | testing + | 5 | test + | 7 | testing +(4 rows) + +-- +-- VALUES test +-- +insert into inserttest values(10, 20, '40'), (-1, 2, DEFAULT), + ((select 2), (select i from (values(3)) as foo (i)), 'values are fun!'); +select * from inserttest; + col1 | col2 | col3 +------+------+----------------- + | 3 | testing + | 5 | testing + | 5 | test + | 7 | testing + 10 | 20 | 40 + -1 | 2 | testing + 2 | 3 | values are fun! +(7 rows) + +-- +-- TOASTed value test +-- +insert into inserttest values(30, 50, repeat('x', 10000)); +select col1, col2, char_length(col3) from inserttest; + col1 | col2 | char_length +------+------+------------- + | 3 | 7 + | 5 | 7 + | 5 | 4 + | 7 | 7 + 10 | 20 | 2 + -1 | 2 | 7 + 2 | 3 | 15 + 30 | 50 | 10000 +(8 rows) + +drop table inserttest; +-- +-- tuple larger than fillfactor +-- +CREATE TABLE large_tuple_test (a int, b text) WITH (fillfactor = 10); +ALTER TABLE large_tuple_test ALTER COLUMN b SET STORAGE plain; +-- create page w/ free space in range [nearlyEmptyFreeSpace, MaxHeapTupleSize) +INSERT INTO large_tuple_test (select 1, NULL); +-- should still fit on the page +INSERT INTO large_tuple_test (select 2, repeat('a', 1000)); +SELECT pg_size_pretty(pg_relation_size('large_tuple_test'::regclass, 'main')); + pg_size_pretty +---------------- + 8192 bytes +(1 row) + +-- add small record to the second page +INSERT INTO large_tuple_test (select 3, NULL); +-- now this tuple won't fit on the second page, but the insert should +-- still succeed by extending the relation +INSERT INTO large_tuple_test (select 4, repeat('a', 8126)); +DROP TABLE large_tuple_test; +-- +-- check indirection (field/array assignment), cf bug #14265 +-- +-- these tests are aware that transformInsertStmt has 3 separate code paths +-- +create type insert_test_type as (if1 int, if2 text[]); +create table inserttest (f1 int, f2 int[], + f3 insert_test_type, f4 insert_test_type[]); +insert into inserttest (f2[1], f2[2]) values (1,2); +insert into inserttest (f2[1], f2[2]) values (3,4), (5,6); +insert into inserttest (f2[1], f2[2]) select 7,8; +insert into inserttest (f2[1], f2[2]) values (1,default); -- not supported +ERROR: cannot set an array element to DEFAULT +LINE 1: insert into inserttest (f2[1], f2[2]) values (1,default); + ^ +insert into inserttest (f3.if1, f3.if2) values (1,array['foo']); +insert into inserttest (f3.if1, f3.if2) values (1,'{foo}'), (2,'{bar}'); +insert into inserttest (f3.if1, f3.if2) select 3, '{baz,quux}'; +insert into inserttest (f3.if1, f3.if2) values (1,default); -- not supported +ERROR: cannot set a subfield to DEFAULT +LINE 1: insert into inserttest (f3.if1, f3.if2) values (1,default); + ^ +insert into inserttest (f3.if2[1], f3.if2[2]) values ('foo', 'bar'); +insert into inserttest (f3.if2[1], f3.if2[2]) values ('foo', 'bar'), ('baz', 'quux'); +insert into inserttest (f3.if2[1], f3.if2[2]) select 'bear', 'beer'; +insert into inserttest (f4[1].if2[1], f4[1].if2[2]) values ('foo', 'bar'); +insert into inserttest (f4[1].if2[1], f4[1].if2[2]) values ('foo', 'bar'), ('baz', 'quux'); +insert into inserttest (f4[1].if2[1], f4[1].if2[2]) select 'bear', 'beer'; +select * from inserttest; + f1 | f2 | f3 | f4 +----+-------+------------------+------------------------ + | {1,2} | | + | {3,4} | | + | {5,6} | | + | {7,8} | | + | | (1,{foo}) | + | | (1,{foo}) | + | | (2,{bar}) | + | | (3,"{baz,quux}") | + | | (,"{foo,bar}") | + | | (,"{foo,bar}") | + | | (,"{baz,quux}") | + | | (,"{bear,beer}") | + | | | {"(,\"{foo,bar}\")"} + | | | {"(,\"{foo,bar}\")"} + | | | {"(,\"{baz,quux}\")"} + | | | {"(,\"{bear,beer}\")"} +(16 rows) + +-- also check reverse-listing +create table inserttest2 (f1 bigint, f2 text); +create rule irule1 as on insert to inserttest2 do also + insert into inserttest (f3.if2[1], f3.if2[2]) + values (new.f1,new.f2); +create rule irule2 as on insert to inserttest2 do also + insert into inserttest (f4[1].if1, f4[1].if2[2]) + values (1,'fool'),(new.f1,new.f2); +create rule irule3 as on insert to inserttest2 do also + insert into inserttest (f4[1].if1, f4[1].if2[2]) + select new.f1, new.f2; +\d+ inserttest2 + Table "public.inserttest2" + Column | Type | Collation | Nullable | Default | Storage | Stats target | Description +--------+--------+-----------+----------+---------+----------+--------------+------------- + f1 | bigint | | | | plain | | + f2 | text | | | | extended | | +Rules: + irule1 AS + ON INSERT TO inserttest2 DO INSERT INTO inserttest (f3.if2[1], f3.if2[2]) + VALUES (new.f1, new.f2) + irule2 AS + ON INSERT TO inserttest2 DO INSERT INTO inserttest (f4[1].if1, f4[1].if2[2]) VALUES (1,'fool'::text), (new.f1,new.f2) + irule3 AS + ON INSERT TO inserttest2 DO INSERT INTO inserttest (f4[1].if1, f4[1].if2[2]) SELECT new.f1, + new.f2 + +drop table inserttest2; +drop table inserttest; +drop type insert_test_type; +-- direct partition inserts should check partition bound constraint +create table range_parted ( + a text, + b int +) partition by range (a, (b+0)); +-- no partitions, so fail +insert into range_parted values ('a', 11); +ERROR: no partition of relation "range_parted" found for row +DETAIL: Partition key of the failing row contains (a, (b + 0)) = (a, 11). +create table part1 partition of range_parted for values from ('a', 1) to ('a', 10); +create table part2 partition of range_parted for values from ('a', 10) to ('a', 20); +create table part3 partition of range_parted for values from ('b', 1) to ('b', 10); +create table part4 partition of range_parted for values from ('b', 10) to ('b', 20); +-- fail +insert into part1 values ('a', 11); +ERROR: new row for relation "part1" violates partition constraint +DETAIL: Failing row contains (a, 11). +insert into part1 values ('b', 1); +ERROR: new row for relation "part1" violates partition constraint +DETAIL: Failing row contains (b, 1). +-- ok +insert into part1 values ('a', 1); +-- fail +insert into part4 values ('b', 21); +ERROR: new row for relation "part4" violates partition constraint +DETAIL: Failing row contains (b, 21). +insert into part4 values ('a', 10); +ERROR: new row for relation "part4" violates partition constraint +DETAIL: Failing row contains (a, 10). +-- ok +insert into part4 values ('b', 10); +-- fail (partition key a has a NOT NULL constraint) +insert into part1 values (null); +ERROR: new row for relation "part1" violates partition constraint +DETAIL: Failing row contains (null, null). +-- fail (expression key (b+0) cannot be null either) +insert into part1 values (1); +ERROR: new row for relation "part1" violates partition constraint +DETAIL: Failing row contains (1, null). +create table list_parted ( + a text, + b int +) partition by list (lower(a)); +create table part_aa_bb partition of list_parted FOR VALUES IN ('aa', 'bb'); +create table part_cc_dd partition of list_parted FOR VALUES IN ('cc', 'dd'); +create table part_null partition of list_parted FOR VALUES IN (null); +-- fail +insert into part_aa_bb values ('cc', 1); +ERROR: new row for relation "part_aa_bb" violates partition constraint +DETAIL: Failing row contains (cc, 1). +insert into part_aa_bb values ('AAa', 1); +ERROR: new row for relation "part_aa_bb" violates partition constraint +DETAIL: Failing row contains (AAa, 1). +insert into part_aa_bb values (null); +ERROR: new row for relation "part_aa_bb" violates partition constraint +DETAIL: Failing row contains (null, null). +-- ok +insert into part_cc_dd values ('cC', 1); +insert into part_null values (null, 0); +-- check in case of multi-level partitioned table +create table part_ee_ff partition of list_parted for values in ('ee', 'ff') partition by range (b); +create table part_ee_ff1 partition of part_ee_ff for values from (1) to (10); +create table part_ee_ff2 partition of part_ee_ff for values from (10) to (20); +-- test default partition +create table part_default partition of list_parted default; +-- Negative test: a row, which would fit in other partition, does not fit +-- default partition, even when inserted directly +insert into part_default values ('aa', 2); +ERROR: new row for relation "part_default" violates partition constraint +DETAIL: Failing row contains (aa, 2). +insert into part_default values (null, 2); +ERROR: new row for relation "part_default" violates partition constraint +DETAIL: Failing row contains (null, 2). +-- ok +insert into part_default values ('Zz', 2); +-- test if default partition works as expected for multi-level partitioned +-- table as well as when default partition itself is further partitioned +drop table part_default; +create table part_xx_yy partition of list_parted for values in ('xx', 'yy') partition by list (a); +create table part_xx_yy_p1 partition of part_xx_yy for values in ('xx'); +create table part_xx_yy_defpart partition of part_xx_yy default; +create table part_default partition of list_parted default partition by range(b); +create table part_default_p1 partition of part_default for values from (20) to (30); +create table part_default_p2 partition of part_default for values from (30) to (40); +-- fail +insert into part_ee_ff1 values ('EE', 11); +ERROR: new row for relation "part_ee_ff1" violates partition constraint +DETAIL: Failing row contains (EE, 11). +insert into part_default_p2 values ('gg', 43); +ERROR: new row for relation "part_default_p2" violates partition constraint +DETAIL: Failing row contains (gg, 43). +-- fail (even the parent's, ie, part_ee_ff's partition constraint applies) +insert into part_ee_ff1 values ('cc', 1); +ERROR: new row for relation "part_ee_ff1" violates partition constraint +DETAIL: Failing row contains (cc, 1). +insert into part_default values ('gg', 43); +ERROR: no partition of relation "part_default" found for row +DETAIL: Partition key of the failing row contains (b) = (43). +-- ok +insert into part_ee_ff1 values ('ff', 1); +insert into part_ee_ff2 values ('ff', 11); +insert into part_default_p1 values ('cd', 25); +insert into part_default_p2 values ('de', 35); +insert into list_parted values ('ab', 21); +insert into list_parted values ('xx', 1); +insert into list_parted values ('yy', 2); +select tableoid::regclass, * from list_parted; + tableoid | a | b +--------------------+----+---- + part_cc_dd | cC | 1 + part_ee_ff1 | ff | 1 + part_ee_ff2 | ff | 11 + part_xx_yy_p1 | xx | 1 + part_xx_yy_defpart | yy | 2 + part_null | | 0 + part_default_p1 | cd | 25 + part_default_p1 | ab | 21 + part_default_p2 | de | 35 +(9 rows) + +-- Check tuple routing for partitioned tables +-- fail +insert into range_parted values ('a', 0); +ERROR: no partition of relation "range_parted" found for row +DETAIL: Partition key of the failing row contains (a, (b + 0)) = (a, 0). +-- ok +insert into range_parted values ('a', 1); +insert into range_parted values ('a', 10); +-- fail +insert into range_parted values ('a', 20); +ERROR: no partition of relation "range_parted" found for row +DETAIL: Partition key of the failing row contains (a, (b + 0)) = (a, 20). +-- ok +insert into range_parted values ('b', 1); +insert into range_parted values ('b', 10); +-- fail (partition key (b+0) is null) +insert into range_parted values ('a'); +ERROR: no partition of relation "range_parted" found for row +DETAIL: Partition key of the failing row contains (a, (b + 0)) = (a, null). +-- Check default partition +create table part_def partition of range_parted default; +-- fail +insert into part_def values ('b', 10); +ERROR: new row for relation "part_def" violates partition constraint +DETAIL: Failing row contains (b, 10). +-- ok +insert into part_def values ('c', 10); +insert into range_parted values (null, null); +insert into range_parted values ('a', null); +insert into range_parted values (null, 19); +insert into range_parted values ('b', 20); +select tableoid::regclass, * from range_parted; + tableoid | a | b +----------+---+---- + part1 | a | 1 + part1 | a | 1 + part2 | a | 10 + part3 | b | 1 + part4 | b | 10 + part4 | b | 10 + part_def | c | 10 + part_def | | + part_def | a | + part_def | | 19 + part_def | b | 20 +(11 rows) + +-- ok +insert into list_parted values (null, 1); +insert into list_parted (a) values ('aA'); +-- fail (partition of part_ee_ff not found in both cases) +insert into list_parted values ('EE', 0); +ERROR: no partition of relation "part_ee_ff" found for row +DETAIL: Partition key of the failing row contains (b) = (0). +insert into part_ee_ff values ('EE', 0); +ERROR: no partition of relation "part_ee_ff" found for row +DETAIL: Partition key of the failing row contains (b) = (0). +-- ok +insert into list_parted values ('EE', 1); +insert into part_ee_ff values ('EE', 10); +select tableoid::regclass, * from list_parted; + tableoid | a | b +--------------------+----+---- + part_aa_bb | aA | + part_cc_dd | cC | 1 + part_ee_ff1 | ff | 1 + part_ee_ff1 | EE | 1 + part_ee_ff2 | ff | 11 + part_ee_ff2 | EE | 10 + part_xx_yy_p1 | xx | 1 + part_xx_yy_defpart | yy | 2 + part_null | | 0 + part_null | | 1 + part_default_p1 | cd | 25 + part_default_p1 | ab | 21 + part_default_p2 | de | 35 +(13 rows) + +-- some more tests to exercise tuple-routing with multi-level partitioning +create table part_gg partition of list_parted for values in ('gg') partition by range (b); +create table part_gg1 partition of part_gg for values from (minvalue) to (1); +create table part_gg2 partition of part_gg for values from (1) to (10) partition by range (b); +create table part_gg2_1 partition of part_gg2 for values from (1) to (5); +create table part_gg2_2 partition of part_gg2 for values from (5) to (10); +create table part_ee_ff3 partition of part_ee_ff for values from (20) to (30) partition by range (b); +create table part_ee_ff3_1 partition of part_ee_ff3 for values from (20) to (25); +create table part_ee_ff3_2 partition of part_ee_ff3 for values from (25) to (30); +truncate list_parted; +insert into list_parted values ('aa'), ('cc'); +insert into list_parted select 'Ff', s.a from generate_series(1, 29) s(a); +insert into list_parted select 'gg', s.a from generate_series(1, 9) s(a); +insert into list_parted (b) values (1); +select tableoid::regclass::text, a, min(b) as min_b, max(b) as max_b from list_parted group by 1, 2 order by 1; + tableoid | a | min_b | max_b +---------------+----+-------+------- + part_aa_bb | aa | | + part_cc_dd | cc | | + part_ee_ff1 | Ff | 1 | 9 + part_ee_ff2 | Ff | 10 | 19 + part_ee_ff3_1 | Ff | 20 | 24 + part_ee_ff3_2 | Ff | 25 | 29 + part_gg2_1 | gg | 1 | 4 + part_gg2_2 | gg | 5 | 9 + part_null | | 1 | 1 +(9 rows) + +-- direct partition inserts should check hash partition bound constraint +create table hash_parted ( + a int +) partition by hash (a part_test_int4_ops); +create table hpart0 partition of hash_parted for values with (modulus 4, remainder 0); +create table hpart1 partition of hash_parted for values with (modulus 4, remainder 1); +create table hpart2 partition of hash_parted for values with (modulus 4, remainder 2); +create table hpart3 partition of hash_parted for values with (modulus 4, remainder 3); +insert into hash_parted values(generate_series(1,10)); +-- direct insert of values divisible by 4 - ok; +insert into hpart0 values(12),(16); +-- fail; +insert into hpart0 values(11); +ERROR: new row for relation "hpart0" violates partition constraint +DETAIL: Failing row contains (11). +-- 11 % 4 -> 3 remainder i.e. valid data for hpart3 partition +insert into hpart3 values(11); +-- view data +select tableoid::regclass as part, a, a%4 as "remainder = a % 4" +from hash_parted order by part; + part | a | remainder = a % 4 +--------+----+------------------- + hpart0 | 4 | 0 + hpart0 | 8 | 0 + hpart0 | 12 | 0 + hpart0 | 16 | 0 + hpart1 | 1 | 1 + hpart1 | 5 | 1 + hpart1 | 9 | 1 + hpart2 | 2 | 2 + hpart2 | 6 | 2 + hpart2 | 10 | 2 + hpart3 | 3 | 3 + hpart3 | 7 | 3 + hpart3 | 11 | 3 +(13 rows) + +-- test \d+ output on a table which has both partitioned and unpartitioned +-- partitions +\d+ list_parted + Partitioned table "public.list_parted" + Column | Type | Collation | Nullable | Default | Storage | Stats target | Description +--------+---------+-----------+----------+---------+----------+--------------+------------- + a | text | | | | extended | | + b | integer | | | | plain | | +Partition key: LIST (lower(a)) +Partitions: part_aa_bb FOR VALUES IN ('aa', 'bb'), + part_cc_dd FOR VALUES IN ('cc', 'dd'), + part_ee_ff FOR VALUES IN ('ee', 'ff'), PARTITIONED, + part_gg FOR VALUES IN ('gg'), PARTITIONED, + part_null FOR VALUES IN (NULL), + part_xx_yy FOR VALUES IN ('xx', 'yy'), PARTITIONED, + part_default DEFAULT, PARTITIONED + +-- cleanup +drop table range_parted, list_parted; +drop table hash_parted; +-- test that a default partition added as the first partition accepts any value +-- including null +create table list_parted (a int) partition by list (a); +create table part_default partition of list_parted default; +\d+ part_default + Table "public.part_default" + Column | Type | Collation | Nullable | Default | Storage | Stats target | Description +--------+---------+-----------+----------+---------+---------+--------------+------------- + a | integer | | | | plain | | +Partition of: list_parted DEFAULT +No partition constraint + +insert into part_default values (null); +insert into part_default values (1); +insert into part_default values (-1); +select tableoid::regclass, a from list_parted; + tableoid | a +--------------+---- + part_default | + part_default | 1 + part_default | -1 +(3 rows) + +-- cleanup +drop table list_parted; +-- more tests for certain multi-level partitioning scenarios +create table mlparted (a int, b int) partition by range (a, b); +create table mlparted1 (b int not null, a int not null) partition by range ((b+0)); +create table mlparted11 (like mlparted1); +alter table mlparted11 drop a; +alter table mlparted11 add a int; +alter table mlparted11 drop a; +alter table mlparted11 add a int not null; +-- attnum for key attribute 'a' is different in mlparted, mlparted1, and mlparted11 +select attrelid::regclass, attname, attnum +from pg_attribute +where attname = 'a' + and (attrelid = 'mlparted'::regclass + or attrelid = 'mlparted1'::regclass + or attrelid = 'mlparted11'::regclass) +order by attrelid::regclass::text; + attrelid | attname | attnum +------------+---------+-------- + mlparted | a | 1 + mlparted1 | a | 2 + mlparted11 | a | 4 +(3 rows) + +alter table mlparted1 attach partition mlparted11 for values from (2) to (5); +alter table mlparted attach partition mlparted1 for values from (1, 2) to (1, 10); +-- check that "(1, 2)" is correctly routed to mlparted11. +insert into mlparted values (1, 2); +select tableoid::regclass, * from mlparted; + tableoid | a | b +------------+---+--- + mlparted11 | 1 | 2 +(1 row) + +-- check that proper message is shown after failure to route through mlparted1 +insert into mlparted (a, b) values (1, 5); +ERROR: no partition of relation "mlparted1" found for row +DETAIL: Partition key of the failing row contains ((b + 0)) = (5). +truncate mlparted; +alter table mlparted add constraint check_b check (b = 3); +-- have a BR trigger modify the row such that the check_b is violated +create function mlparted11_trig_fn() +returns trigger AS +$$ +begin + NEW.b := 4; + return NEW; +end; +$$ +language plpgsql; +create trigger mlparted11_trig before insert ON mlparted11 + for each row execute procedure mlparted11_trig_fn(); +-- check that the correct row is shown when constraint check_b fails after +-- "(1, 2)" is routed to mlparted11 (actually "(1, 4)" would be shown due +-- to the BR trigger mlparted11_trig_fn) +insert into mlparted values (1, 2); +ERROR: new row for relation "mlparted11" violates check constraint "check_b" +DETAIL: Failing row contains (1, 4). +drop trigger mlparted11_trig on mlparted11; +drop function mlparted11_trig_fn(); +-- check that inserting into an internal partition successfully results in +-- checking its partition constraint before inserting into the leaf partition +-- selected by tuple-routing +insert into mlparted1 (a, b) values (2, 3); +ERROR: new row for relation "mlparted1" violates partition constraint +DETAIL: Failing row contains (3, 2). +-- check routing error through a list partitioned table when the key is null +create table lparted_nonullpart (a int, b char) partition by list (b); +create table lparted_nonullpart_a partition of lparted_nonullpart for values in ('a'); +insert into lparted_nonullpart values (1); +ERROR: no partition of relation "lparted_nonullpart" found for row +DETAIL: Partition key of the failing row contains (b) = (null). +drop table lparted_nonullpart; +-- check that RETURNING works correctly with tuple-routing +alter table mlparted drop constraint check_b; +create table mlparted12 partition of mlparted1 for values from (5) to (10); +create table mlparted2 (b int not null, a int not null); +alter table mlparted attach partition mlparted2 for values from (1, 10) to (1, 20); +create table mlparted3 partition of mlparted for values from (1, 20) to (1, 30); +create table mlparted4 (like mlparted); +alter table mlparted4 drop a; +alter table mlparted4 add a int not null; +alter table mlparted attach partition mlparted4 for values from (1, 30) to (1, 40); +with ins (a, b, c) as + (insert into mlparted (b, a) select s.a, 1 from generate_series(2, 39) s(a) returning tableoid::regclass, *) + select a, b, min(c), max(c) from ins group by a, b order by 1; + a | b | min | max +------------+---+-----+----- + mlparted11 | 1 | 2 | 4 + mlparted12 | 1 | 5 | 9 + mlparted2 | 1 | 10 | 19 + mlparted3 | 1 | 20 | 29 + mlparted4 | 1 | 30 | 39 +(5 rows) + +alter table mlparted add c text; +create table mlparted5 (c text, a int not null, b int not null) partition by list (c); +create table mlparted5a (a int not null, c text, b int not null); +alter table mlparted5 attach partition mlparted5a for values in ('a'); +alter table mlparted attach partition mlparted5 for values from (1, 40) to (1, 50); +alter table mlparted add constraint check_b check (a = 1 and b < 45); +insert into mlparted values (1, 45, 'a'); +ERROR: new row for relation "mlparted5a" violates check constraint "check_b" +DETAIL: Failing row contains (1, 45, a). +create function mlparted5abrtrig_func() returns trigger as $$ begin new.c = 'b'; return new; end; $$ language plpgsql; +create trigger mlparted5abrtrig before insert on mlparted5a for each row execute procedure mlparted5abrtrig_func(); +insert into mlparted5 (a, b, c) values (1, 40, 'a'); +ERROR: new row for relation "mlparted5a" violates partition constraint +DETAIL: Failing row contains (b, 1, 40). +drop table mlparted5; +alter table mlparted drop constraint check_b; +-- Check multi-level default partition +create table mlparted_def partition of mlparted default partition by range(a); +create table mlparted_def1 partition of mlparted_def for values from (40) to (50); +create table mlparted_def2 partition of mlparted_def for values from (50) to (60); +insert into mlparted values (40, 100); +insert into mlparted_def1 values (42, 100); +insert into mlparted_def2 values (54, 50); +-- fail +insert into mlparted values (70, 100); +ERROR: no partition of relation "mlparted_def" found for row +DETAIL: Partition key of the failing row contains (a) = (70). +insert into mlparted_def1 values (52, 50); +ERROR: new row for relation "mlparted_def1" violates partition constraint +DETAIL: Failing row contains (52, 50, null). +insert into mlparted_def2 values (34, 50); +ERROR: new row for relation "mlparted_def2" violates partition constraint +DETAIL: Failing row contains (34, 50, null). +-- ok +create table mlparted_defd partition of mlparted_def default; +insert into mlparted values (70, 100); +select tableoid::regclass, * from mlparted_def; + tableoid | a | b | c +---------------+----+-----+--- + mlparted_def1 | 40 | 100 | + mlparted_def1 | 42 | 100 | + mlparted_def2 | 54 | 50 | + mlparted_defd | 70 | 100 | +(4 rows) + +-- Check multi-level tuple routing with attributes dropped from the +-- top-most parent. First remove the last attribute. +alter table mlparted add d int, add e int; +alter table mlparted drop e; +create table mlparted5 partition of mlparted + for values from (1, 40) to (1, 50) partition by range (c); +create table mlparted5_ab partition of mlparted5 + for values from ('a') to ('c') partition by list (c); +-- This partitioned table should remain with no partitions. +create table mlparted5_cd partition of mlparted5 + for values from ('c') to ('e') partition by list (c); +create table mlparted5_a partition of mlparted5_ab for values in ('a'); +create table mlparted5_b (d int, b int, c text, a int); +alter table mlparted5_ab attach partition mlparted5_b for values in ('b'); +truncate mlparted; +insert into mlparted values (1, 2, 'a', 1); +insert into mlparted values (1, 40, 'a', 1); -- goes to mlparted5_a +insert into mlparted values (1, 45, 'b', 1); -- goes to mlparted5_b +insert into mlparted values (1, 45, 'c', 1); -- goes to mlparted5_cd, fails +ERROR: no partition of relation "mlparted5_cd" found for row +DETAIL: Partition key of the failing row contains (c) = (c). +insert into mlparted values (1, 45, 'f', 1); -- goes to mlparted5, fails +ERROR: no partition of relation "mlparted5" found for row +DETAIL: Partition key of the failing row contains (c) = (f). +select tableoid::regclass, * from mlparted order by a, b, c, d; + tableoid | a | b | c | d +-------------+---+----+---+--- + mlparted11 | 1 | 2 | a | 1 + mlparted5_a | 1 | 40 | a | 1 + mlparted5_b | 1 | 45 | b | 1 +(3 rows) + +alter table mlparted drop d; +truncate mlparted; +-- Remove the before last attribute. +alter table mlparted add e int, add d int; +alter table mlparted drop e; +insert into mlparted values (1, 2, 'a', 1); +insert into mlparted values (1, 40, 'a', 1); -- goes to mlparted5_a +insert into mlparted values (1, 45, 'b', 1); -- goes to mlparted5_b +insert into mlparted values (1, 45, 'c', 1); -- goes to mlparted5_cd, fails +ERROR: no partition of relation "mlparted5_cd" found for row +DETAIL: Partition key of the failing row contains (c) = (c). +insert into mlparted values (1, 45, 'f', 1); -- goes to mlparted5, fails +ERROR: no partition of relation "mlparted5" found for row +DETAIL: Partition key of the failing row contains (c) = (f). +select tableoid::regclass, * from mlparted order by a, b, c, d; + tableoid | a | b | c | d +-------------+---+----+---+--- + mlparted11 | 1 | 2 | a | 1 + mlparted5_a | 1 | 40 | a | 1 + mlparted5_b | 1 | 45 | b | 1 +(3 rows) + +alter table mlparted drop d; +drop table mlparted5; +-- check that message shown after failure to find a partition shows the +-- appropriate key description (or none) in various situations +create table key_desc (a int, b int) partition by list ((a+0)); +create table key_desc_1 partition of key_desc for values in (1) partition by range (b); +create user regress_insert_other_user; +grant select (a) on key_desc_1 to regress_insert_other_user; +grant insert on key_desc to regress_insert_other_user; +set role regress_insert_other_user; +-- no key description is shown +insert into key_desc values (1, 1); +ERROR: no partition of relation "key_desc_1" found for row +reset role; +grant select (b) on key_desc_1 to regress_insert_other_user; +set role regress_insert_other_user; +-- key description (b)=(1) is now shown +insert into key_desc values (1, 1); +ERROR: no partition of relation "key_desc_1" found for row +DETAIL: Partition key of the failing row contains (b) = (1). +-- key description is not shown if key contains expression +insert into key_desc values (2, 1); +ERROR: no partition of relation "key_desc" found for row +reset role; +revoke all on key_desc from regress_insert_other_user; +revoke all on key_desc_1 from regress_insert_other_user; +drop role regress_insert_other_user; +drop table key_desc, key_desc_1; +-- test minvalue/maxvalue restrictions +create table mcrparted (a int, b int, c int) partition by range (a, abs(b), c); +create table mcrparted0 partition of mcrparted for values from (minvalue, 0, 0) to (1, maxvalue, maxvalue); +ERROR: every bound following MINVALUE must also be MINVALUE +LINE 1: ...partition of mcrparted for values from (minvalue, 0, 0) to (... + ^ +create table mcrparted2 partition of mcrparted for values from (10, 6, minvalue) to (10, maxvalue, minvalue); +ERROR: every bound following MAXVALUE must also be MAXVALUE +LINE 1: ...r values from (10, 6, minvalue) to (10, maxvalue, minvalue); + ^ +create table mcrparted4 partition of mcrparted for values from (21, minvalue, 0) to (30, 20, minvalue); +ERROR: every bound following MINVALUE must also be MINVALUE +LINE 1: ...ition of mcrparted for values from (21, minvalue, 0) to (30,... + ^ +-- check multi-column range partitioning expression enforces the same +-- constraint as what tuple-routing would determine it to be +create table mcrparted0 partition of mcrparted for values from (minvalue, minvalue, minvalue) to (1, maxvalue, maxvalue); +create table mcrparted1 partition of mcrparted for values from (2, 1, minvalue) to (10, 5, 10); +create table mcrparted2 partition of mcrparted for values from (10, 6, minvalue) to (10, maxvalue, maxvalue); +create table mcrparted3 partition of mcrparted for values from (11, 1, 1) to (20, 10, 10); +create table mcrparted4 partition of mcrparted for values from (21, minvalue, minvalue) to (30, 20, maxvalue); +create table mcrparted5 partition of mcrparted for values from (30, 21, 20) to (maxvalue, maxvalue, maxvalue); +-- null not allowed in range partition +insert into mcrparted values (null, null, null); +ERROR: no partition of relation "mcrparted" found for row +DETAIL: Partition key of the failing row contains (a, abs(b), c) = (null, null, null). +-- routed to mcrparted0 +insert into mcrparted values (0, 1, 1); +insert into mcrparted0 values (0, 1, 1); +-- routed to mcparted1 +insert into mcrparted values (9, 1000, 1); +insert into mcrparted1 values (9, 1000, 1); +insert into mcrparted values (10, 5, -1); +insert into mcrparted1 values (10, 5, -1); +insert into mcrparted values (2, 1, 0); +insert into mcrparted1 values (2, 1, 0); +-- routed to mcparted2 +insert into mcrparted values (10, 6, 1000); +insert into mcrparted2 values (10, 6, 1000); +insert into mcrparted values (10, 1000, 1000); +insert into mcrparted2 values (10, 1000, 1000); +-- no partition exists, nor does mcrparted3 accept it +insert into mcrparted values (11, 1, -1); +ERROR: no partition of relation "mcrparted" found for row +DETAIL: Partition key of the failing row contains (a, abs(b), c) = (11, 1, -1). +insert into mcrparted3 values (11, 1, -1); +ERROR: new row for relation "mcrparted3" violates partition constraint +DETAIL: Failing row contains (11, 1, -1). +-- routed to mcrparted5 +insert into mcrparted values (30, 21, 20); +insert into mcrparted5 values (30, 21, 20); +insert into mcrparted4 values (30, 21, 20); -- error +ERROR: new row for relation "mcrparted4" violates partition constraint +DETAIL: Failing row contains (30, 21, 20). +-- check rows +select tableoid::regclass::text, * from mcrparted order by 1; + tableoid | a | b | c +------------+----+------+------ + mcrparted0 | 0 | 1 | 1 + mcrparted0 | 0 | 1 | 1 + mcrparted1 | 9 | 1000 | 1 + mcrparted1 | 9 | 1000 | 1 + mcrparted1 | 10 | 5 | -1 + mcrparted1 | 10 | 5 | -1 + mcrparted1 | 2 | 1 | 0 + mcrparted1 | 2 | 1 | 0 + mcrparted2 | 10 | 6 | 1000 + mcrparted2 | 10 | 6 | 1000 + mcrparted2 | 10 | 1000 | 1000 + mcrparted2 | 10 | 1000 | 1000 + mcrparted5 | 30 | 21 | 20 + mcrparted5 | 30 | 21 | 20 +(14 rows) + +-- cleanup +drop table mcrparted; +-- check that a BR constraint can't make partition contain violating rows +create table brtrigpartcon (a int, b text) partition by list (a); +create table brtrigpartcon1 partition of brtrigpartcon for values in (1); +create or replace function brtrigpartcon1trigf() returns trigger as $$begin new.a := 2; return new; end$$ language plpgsql; +create trigger brtrigpartcon1trig before insert on brtrigpartcon1 for each row execute procedure brtrigpartcon1trigf(); +insert into brtrigpartcon values (1, 'hi there'); +ERROR: new row for relation "brtrigpartcon1" violates partition constraint +DETAIL: Failing row contains (2, hi there). +insert into brtrigpartcon1 values (1, 'hi there'); +ERROR: new row for relation "brtrigpartcon1" violates partition constraint +DETAIL: Failing row contains (2, hi there). +-- check that the message shows the appropriate column description in a +-- situation where the partitioned table is not the primary ModifyTable node +create table inserttest3 (f1 text default 'foo', f2 text default 'bar', f3 int); +create role regress_coldesc_role; +grant insert on inserttest3 to regress_coldesc_role; +grant insert on brtrigpartcon to regress_coldesc_role; +revoke select on brtrigpartcon from regress_coldesc_role; +set role regress_coldesc_role; +with result as (insert into brtrigpartcon values (1, 'hi there') returning 1) + insert into inserttest3 (f3) select * from result; +ERROR: new row for relation "brtrigpartcon1" violates partition constraint +DETAIL: Failing row contains (a, b) = (2, hi there). +reset role; +-- cleanup +revoke all on inserttest3 from regress_coldesc_role; +revoke all on brtrigpartcon from regress_coldesc_role; +drop role regress_coldesc_role; +drop table inserttest3; +drop table brtrigpartcon; +drop function brtrigpartcon1trigf(); +-- check that "do nothing" BR triggers work with tuple-routing +create table donothingbrtrig_test (a int, b text) partition by list (a); +create table donothingbrtrig_test1 (b text, a int); +create table donothingbrtrig_test2 (c text, b text, a int); +alter table donothingbrtrig_test2 drop column c; +create or replace function donothingbrtrig_func() returns trigger as $$begin raise notice 'b: %', new.b; return NULL; end$$ language plpgsql; +create trigger donothingbrtrig1 before insert on donothingbrtrig_test1 for each row execute procedure donothingbrtrig_func(); +create trigger donothingbrtrig2 before insert on donothingbrtrig_test2 for each row execute procedure donothingbrtrig_func(); +alter table donothingbrtrig_test attach partition donothingbrtrig_test1 for values in (1); +alter table donothingbrtrig_test attach partition donothingbrtrig_test2 for values in (2); +insert into donothingbrtrig_test values (1, 'foo'), (2, 'bar'); +NOTICE: b: foo +NOTICE: b: bar +copy donothingbrtrig_test from stdout; +NOTICE: b: baz +NOTICE: b: qux +select tableoid::regclass, * from donothingbrtrig_test; + tableoid | a | b +----------+---+--- +(0 rows) + +-- cleanup +drop table donothingbrtrig_test; +drop function donothingbrtrig_func(); +-- check multi-column range partitioning with minvalue/maxvalue constraints +create table mcrparted (a text, b int) partition by range(a, b); +create table mcrparted1_lt_b partition of mcrparted for values from (minvalue, minvalue) to ('b', minvalue); +create table mcrparted2_b partition of mcrparted for values from ('b', minvalue) to ('c', minvalue); +create table mcrparted3_c_to_common partition of mcrparted for values from ('c', minvalue) to ('common', minvalue); +create table mcrparted4_common_lt_0 partition of mcrparted for values from ('common', minvalue) to ('common', 0); +create table mcrparted5_common_0_to_10 partition of mcrparted for values from ('common', 0) to ('common', 10); +create table mcrparted6_common_ge_10 partition of mcrparted for values from ('common', 10) to ('common', maxvalue); +create table mcrparted7_gt_common_lt_d partition of mcrparted for values from ('common', maxvalue) to ('d', minvalue); +create table mcrparted8_ge_d partition of mcrparted for values from ('d', minvalue) to (maxvalue, maxvalue); +\d+ mcrparted + Partitioned table "public.mcrparted" + Column | Type | Collation | Nullable | Default | Storage | Stats target | Description +--------+---------+-----------+----------+---------+----------+--------------+------------- + a | text | | | | extended | | + b | integer | | | | plain | | +Partition key: RANGE (a, b) +Partitions: mcrparted1_lt_b FOR VALUES FROM (MINVALUE, MINVALUE) TO ('b', MINVALUE), + mcrparted2_b FOR VALUES FROM ('b', MINVALUE) TO ('c', MINVALUE), + mcrparted3_c_to_common FOR VALUES FROM ('c', MINVALUE) TO ('common', MINVALUE), + mcrparted4_common_lt_0 FOR VALUES FROM ('common', MINVALUE) TO ('common', 0), + mcrparted5_common_0_to_10 FOR VALUES FROM ('common', 0) TO ('common', 10), + mcrparted6_common_ge_10 FOR VALUES FROM ('common', 10) TO ('common', MAXVALUE), + mcrparted7_gt_common_lt_d FOR VALUES FROM ('common', MAXVALUE) TO ('d', MINVALUE), + mcrparted8_ge_d FOR VALUES FROM ('d', MINVALUE) TO (MAXVALUE, MAXVALUE) + +\d+ mcrparted1_lt_b + Table "public.mcrparted1_lt_b" + Column | Type | Collation | Nullable | Default | Storage | Stats target | Description +--------+---------+-----------+----------+---------+----------+--------------+------------- + a | text | | | | extended | | + b | integer | | | | plain | | +Partition of: mcrparted FOR VALUES FROM (MINVALUE, MINVALUE) TO ('b', MINVALUE) +Partition constraint: ((a IS NOT NULL) AND (b IS NOT NULL) AND (a < 'b'::text)) + +\d+ mcrparted2_b + Table "public.mcrparted2_b" + Column | Type | Collation | Nullable | Default | Storage | Stats target | Description +--------+---------+-----------+----------+---------+----------+--------------+------------- + a | text | | | | extended | | + b | integer | | | | plain | | +Partition of: mcrparted FOR VALUES FROM ('b', MINVALUE) TO ('c', MINVALUE) +Partition constraint: ((a IS NOT NULL) AND (b IS NOT NULL) AND (a >= 'b'::text) AND (a < 'c'::text)) + +\d+ mcrparted3_c_to_common + Table "public.mcrparted3_c_to_common" + Column | Type | Collation | Nullable | Default | Storage | Stats target | Description +--------+---------+-----------+----------+---------+----------+--------------+------------- + a | text | | | | extended | | + b | integer | | | | plain | | +Partition of: mcrparted FOR VALUES FROM ('c', MINVALUE) TO ('common', MINVALUE) +Partition constraint: ((a IS NOT NULL) AND (b IS NOT NULL) AND (a >= 'c'::text) AND (a < 'common'::text)) + +\d+ mcrparted4_common_lt_0 + Table "public.mcrparted4_common_lt_0" + Column | Type | Collation | Nullable | Default | Storage | Stats target | Description +--------+---------+-----------+----------+---------+----------+--------------+------------- + a | text | | | | extended | | + b | integer | | | | plain | | +Partition of: mcrparted FOR VALUES FROM ('common', MINVALUE) TO ('common', 0) +Partition constraint: ((a IS NOT NULL) AND (b IS NOT NULL) AND (a = 'common'::text) AND (b < 0)) + +\d+ mcrparted5_common_0_to_10 + Table "public.mcrparted5_common_0_to_10" + Column | Type | Collation | Nullable | Default | Storage | Stats target | Description +--------+---------+-----------+----------+---------+----------+--------------+------------- + a | text | | | | extended | | + b | integer | | | | plain | | +Partition of: mcrparted FOR VALUES FROM ('common', 0) TO ('common', 10) +Partition constraint: ((a IS NOT NULL) AND (b IS NOT NULL) AND (a = 'common'::text) AND (b >= 0) AND (b < 10)) + +\d+ mcrparted6_common_ge_10 + Table "public.mcrparted6_common_ge_10" + Column | Type | Collation | Nullable | Default | Storage | Stats target | Description +--------+---------+-----------+----------+---------+----------+--------------+------------- + a | text | | | | extended | | + b | integer | | | | plain | | +Partition of: mcrparted FOR VALUES FROM ('common', 10) TO ('common', MAXVALUE) +Partition constraint: ((a IS NOT NULL) AND (b IS NOT NULL) AND (a = 'common'::text) AND (b >= 10)) + +\d+ mcrparted7_gt_common_lt_d + Table "public.mcrparted7_gt_common_lt_d" + Column | Type | Collation | Nullable | Default | Storage | Stats target | Description +--------+---------+-----------+----------+---------+----------+--------------+------------- + a | text | | | | extended | | + b | integer | | | | plain | | +Partition of: mcrparted FOR VALUES FROM ('common', MAXVALUE) TO ('d', MINVALUE) +Partition constraint: ((a IS NOT NULL) AND (b IS NOT NULL) AND (a > 'common'::text) AND (a < 'd'::text)) + +\d+ mcrparted8_ge_d + Table "public.mcrparted8_ge_d" + Column | Type | Collation | Nullable | Default | Storage | Stats target | Description +--------+---------+-----------+----------+---------+----------+--------------+------------- + a | text | | | | extended | | + b | integer | | | | plain | | +Partition of: mcrparted FOR VALUES FROM ('d', MINVALUE) TO (MAXVALUE, MAXVALUE) +Partition constraint: ((a IS NOT NULL) AND (b IS NOT NULL) AND (a >= 'd'::text)) + +insert into mcrparted values ('aaa', 0), ('b', 0), ('bz', 10), ('c', -10), + ('comm', -10), ('common', -10), ('common', 0), ('common', 10), + ('commons', 0), ('d', -10), ('e', 0); +select tableoid::regclass, * from mcrparted order by a, b; + tableoid | a | b +---------------------------+---------+----- + mcrparted1_lt_b | aaa | 0 + mcrparted2_b | b | 0 + mcrparted2_b | bz | 10 + mcrparted3_c_to_common | c | -10 + mcrparted3_c_to_common | comm | -10 + mcrparted4_common_lt_0 | common | -10 + mcrparted5_common_0_to_10 | common | 0 + mcrparted6_common_ge_10 | common | 10 + mcrparted7_gt_common_lt_d | commons | 0 + mcrparted8_ge_d | d | -10 + mcrparted8_ge_d | e | 0 +(11 rows) + +drop table mcrparted; +-- check that wholerow vars in the RETURNING list work with partitioned tables +create table returningwrtest (a int) partition by list (a); +create table returningwrtest1 partition of returningwrtest for values in (1); +insert into returningwrtest values (1) returning returningwrtest; + returningwrtest +----------------- + (1) +(1 row) + +-- check also that the wholerow vars in RETURNING list are converted as needed +alter table returningwrtest add b text; +create table returningwrtest2 (b text, c int, a int); +alter table returningwrtest2 drop c; +alter table returningwrtest attach partition returningwrtest2 for values in (2); +insert into returningwrtest values (2, 'foo') returning returningwrtest; + returningwrtest +----------------- + (2,foo) +(1 row) + +drop table returningwrtest; diff --git a/src/test/regress/expected/insert_conflict.out b/src/test/regress/expected/insert_conflict.out new file mode 100644 index 0000000..9e9e3bd --- /dev/null +++ b/src/test/regress/expected/insert_conflict.out @@ -0,0 +1,866 @@ +-- +-- insert...on conflict do unique index inference +-- +create table insertconflicttest(key int4, fruit text); +-- +-- Test unique index inference with operator class specifications and +-- named collations +-- +create unique index op_index_key on insertconflicttest(key, fruit text_pattern_ops); +create unique index collation_index_key on insertconflicttest(key, fruit collate "C"); +create unique index both_index_key on insertconflicttest(key, fruit collate "C" text_pattern_ops); +create unique index both_index_expr_key on insertconflicttest(key, lower(fruit) collate "C" text_pattern_ops); +-- fails +explain (costs off) insert into insertconflicttest values(0, 'Crowberry') on conflict (key) do nothing; +ERROR: there is no unique or exclusion constraint matching the ON CONFLICT specification +explain (costs off) insert into insertconflicttest values(0, 'Crowberry') on conflict (fruit) do nothing; +ERROR: there is no unique or exclusion constraint matching the ON CONFLICT specification +-- succeeds +explain (costs off) insert into insertconflicttest values(0, 'Crowberry') on conflict (key, fruit) do nothing; + QUERY PLAN +------------------------------------------------------------------------------- + Insert on insertconflicttest + Conflict Resolution: NOTHING + Conflict Arbiter Indexes: op_index_key, collation_index_key, both_index_key + -> Result +(4 rows) + +explain (costs off) insert into insertconflicttest values(0, 'Crowberry') on conflict (fruit, key, fruit, key) do nothing; + QUERY PLAN +------------------------------------------------------------------------------- + Insert on insertconflicttest + Conflict Resolution: NOTHING + Conflict Arbiter Indexes: op_index_key, collation_index_key, both_index_key + -> Result +(4 rows) + +explain (costs off) insert into insertconflicttest values(0, 'Crowberry') on conflict (lower(fruit), key, lower(fruit), key) do nothing; + QUERY PLAN +------------------------------------------------- + Insert on insertconflicttest + Conflict Resolution: NOTHING + Conflict Arbiter Indexes: both_index_expr_key + -> Result +(4 rows) + +explain (costs off) insert into insertconflicttest values(0, 'Crowberry') on conflict (key, fruit) do update set fruit = excluded.fruit + where exists (select 1 from insertconflicttest ii where ii.key = excluded.key); + QUERY PLAN +------------------------------------------------------------------------------- + Insert on insertconflicttest + Conflict Resolution: UPDATE + Conflict Arbiter Indexes: op_index_key, collation_index_key, both_index_key + Conflict Filter: (SubPlan 1) + -> Result + SubPlan 1 + -> Index Only Scan using both_index_expr_key on insertconflicttest ii + Index Cond: (key = excluded.key) +(8 rows) + +-- Neither collation nor operator class specifications are required -- +-- supplying them merely *limits* matches to indexes with matching opclasses +-- used for relevant indexes +explain (costs off) insert into insertconflicttest values(0, 'Crowberry') on conflict (key, fruit text_pattern_ops) do nothing; + QUERY PLAN +---------------------------------------------------------- + Insert on insertconflicttest + Conflict Resolution: NOTHING + Conflict Arbiter Indexes: op_index_key, both_index_key + -> Result +(4 rows) + +-- Okay, arbitrates using both index where text_pattern_ops opclass does and +-- does not appear. +explain (costs off) insert into insertconflicttest values(0, 'Crowberry') on conflict (key, fruit collate "C") do nothing; + QUERY PLAN +----------------------------------------------------------------- + Insert on insertconflicttest + Conflict Resolution: NOTHING + Conflict Arbiter Indexes: collation_index_key, both_index_key + -> Result +(4 rows) + +-- Okay, but only accepts the single index where both opclass and collation are +-- specified +explain (costs off) insert into insertconflicttest values(0, 'Crowberry') on conflict (fruit collate "C" text_pattern_ops, key) do nothing; + QUERY PLAN +-------------------------------------------- + Insert on insertconflicttest + Conflict Resolution: NOTHING + Conflict Arbiter Indexes: both_index_key + -> Result +(4 rows) + +-- Okay, but only accepts the single index where both opclass and collation are +-- specified (plus expression variant) +explain (costs off) insert into insertconflicttest values(0, 'Crowberry') on conflict (lower(fruit) collate "C", key, key) do nothing; + QUERY PLAN +------------------------------------------------- + Insert on insertconflicttest + Conflict Resolution: NOTHING + Conflict Arbiter Indexes: both_index_expr_key + -> Result +(4 rows) + +-- Attribute appears twice, while not all attributes/expressions on attributes +-- appearing within index definition match in terms of both opclass and +-- collation. +-- +-- Works because every attribute in inference specification needs to be +-- satisfied once or more by cataloged index attribute, and as always when an +-- attribute in the cataloged definition has a non-default opclass/collation, +-- it still satisfied some inference attribute lacking any particular +-- opclass/collation specification. +-- +-- The implementation is liberal in accepting inference specifications on the +-- assumption that multiple inferred unique indexes will prevent problematic +-- cases. It rolls with unique indexes where attributes redundantly appear +-- multiple times, too (which is not tested here). +explain (costs off) insert into insertconflicttest values(0, 'Crowberry') on conflict (fruit, key, fruit text_pattern_ops, key) do nothing; + QUERY PLAN +---------------------------------------------------------- + Insert on insertconflicttest + Conflict Resolution: NOTHING + Conflict Arbiter Indexes: op_index_key, both_index_key + -> Result +(4 rows) + +explain (costs off) insert into insertconflicttest values(0, 'Crowberry') on conflict (lower(fruit) collate "C" text_pattern_ops, key, key) do nothing; + QUERY PLAN +------------------------------------------------- + Insert on insertconflicttest + Conflict Resolution: NOTHING + Conflict Arbiter Indexes: both_index_expr_key + -> Result +(4 rows) + +drop index op_index_key; +drop index collation_index_key; +drop index both_index_key; +drop index both_index_expr_key; +-- +-- Make sure that cross matching of attribute opclass/collation does not occur +-- +create unique index cross_match on insertconflicttest(lower(fruit) collate "C", upper(fruit) text_pattern_ops); +-- fails: +explain (costs off) insert into insertconflicttest values(0, 'Crowberry') on conflict (lower(fruit) text_pattern_ops, upper(fruit) collate "C") do nothing; +ERROR: there is no unique or exclusion constraint matching the ON CONFLICT specification +-- works: +explain (costs off) insert into insertconflicttest values(0, 'Crowberry') on conflict (lower(fruit) collate "C", upper(fruit) text_pattern_ops) do nothing; + QUERY PLAN +----------------------------------------- + Insert on insertconflicttest + Conflict Resolution: NOTHING + Conflict Arbiter Indexes: cross_match + -> Result +(4 rows) + +drop index cross_match; +-- +-- Single key tests +-- +create unique index key_index on insertconflicttest(key); +-- +-- Explain tests +-- +explain (costs off) insert into insertconflicttest values (0, 'Bilberry') on conflict (key) do update set fruit = excluded.fruit; + QUERY PLAN +--------------------------------------- + Insert on insertconflicttest + Conflict Resolution: UPDATE + Conflict Arbiter Indexes: key_index + -> Result +(4 rows) + +-- Should display qual actually attributable to internal sequential scan: +explain (costs off) insert into insertconflicttest values (0, 'Bilberry') on conflict (key) do update set fruit = excluded.fruit where insertconflicttest.fruit != 'Cawesh'; + QUERY PLAN +----------------------------------------------------------------- + Insert on insertconflicttest + Conflict Resolution: UPDATE + Conflict Arbiter Indexes: key_index + Conflict Filter: (insertconflicttest.fruit <> 'Cawesh'::text) + -> Result +(5 rows) + +-- With EXCLUDED.* expression in scan node: +explain (costs off) insert into insertconflicttest values(0, 'Crowberry') on conflict (key) do update set fruit = excluded.fruit where excluded.fruit != 'Elderberry'; + QUERY PLAN +----------------------------------------------------------- + Insert on insertconflicttest + Conflict Resolution: UPDATE + Conflict Arbiter Indexes: key_index + Conflict Filter: (excluded.fruit <> 'Elderberry'::text) + -> Result +(5 rows) + +-- Does the same, but JSON format shows "Conflict Arbiter Index" as JSON array: +explain (costs off, format json) insert into insertconflicttest values (0, 'Bilberry') on conflict (key) do update set fruit = excluded.fruit where insertconflicttest.fruit != 'Lime' returning *; + QUERY PLAN +------------------------------------------------------------------------ + [ + + { + + "Plan": { + + "Node Type": "ModifyTable", + + "Operation": "Insert", + + "Parallel Aware": false, + + "Async Capable": false, + + "Relation Name": "insertconflicttest", + + "Alias": "insertconflicttest", + + "Conflict Resolution": "UPDATE", + + "Conflict Arbiter Indexes": ["key_index"], + + "Conflict Filter": "(insertconflicttest.fruit <> 'Lime'::text)",+ + "Plans": [ + + { + + "Node Type": "Result", + + "Parent Relationship": "Outer", + + "Parallel Aware": false, + + "Async Capable": false + + } + + ] + + } + + } + + ] +(1 row) + +-- Fails (no unique index inference specification, required for do update variant): +insert into insertconflicttest values (1, 'Apple') on conflict do update set fruit = excluded.fruit; +ERROR: ON CONFLICT DO UPDATE requires inference specification or constraint name +LINE 1: ...nsert into insertconflicttest values (1, 'Apple') on conflic... + ^ +HINT: For example, ON CONFLICT (column_name). +-- inference succeeds: +insert into insertconflicttest values (1, 'Apple') on conflict (key) do update set fruit = excluded.fruit; +insert into insertconflicttest values (2, 'Orange') on conflict (key, key, key) do update set fruit = excluded.fruit; +-- Succeed, since multi-assignment does not involve subquery: +insert into insertconflicttest +values (1, 'Apple'), (2, 'Orange') +on conflict (key) do update set (fruit, key) = (excluded.fruit, excluded.key); +-- Give good diagnostic message when EXCLUDED.* spuriously referenced from +-- RETURNING: +insert into insertconflicttest values (1, 'Apple') on conflict (key) do update set fruit = excluded.fruit RETURNING excluded.fruit; +ERROR: invalid reference to FROM-clause entry for table "excluded" +LINE 1: ...y) do update set fruit = excluded.fruit RETURNING excluded.f... + ^ +DETAIL: There is an entry for table "excluded", but it cannot be referenced from this part of the query. +-- Only suggest .* column when inference element misspelled: +insert into insertconflicttest values (1, 'Apple') on conflict (keyy) do update set fruit = excluded.fruit; +ERROR: column "keyy" does not exist +LINE 1: ...nsertconflicttest values (1, 'Apple') on conflict (keyy) do ... + ^ +HINT: Perhaps you meant to reference the column "insertconflicttest.key" or the column "excluded.key". +-- Have useful HINT for EXCLUDED.* RTE within UPDATE: +insert into insertconflicttest values (1, 'Apple') on conflict (key) do update set fruit = excluded.fruitt; +ERROR: column excluded.fruitt does not exist +LINE 1: ... 'Apple') on conflict (key) do update set fruit = excluded.f... + ^ +HINT: Perhaps you meant to reference the column "excluded.fruit". +-- inference fails: +insert into insertconflicttest values (3, 'Kiwi') on conflict (key, fruit) do update set fruit = excluded.fruit; +ERROR: there is no unique or exclusion constraint matching the ON CONFLICT specification +insert into insertconflicttest values (4, 'Mango') on conflict (fruit, key) do update set fruit = excluded.fruit; +ERROR: there is no unique or exclusion constraint matching the ON CONFLICT specification +insert into insertconflicttest values (5, 'Lemon') on conflict (fruit) do update set fruit = excluded.fruit; +ERROR: there is no unique or exclusion constraint matching the ON CONFLICT specification +insert into insertconflicttest values (6, 'Passionfruit') on conflict (lower(fruit)) do update set fruit = excluded.fruit; +ERROR: there is no unique or exclusion constraint matching the ON CONFLICT specification +-- Check the target relation can be aliased +insert into insertconflicttest AS ict values (6, 'Passionfruit') on conflict (key) do update set fruit = excluded.fruit; -- ok, no reference to target table +insert into insertconflicttest AS ict values (6, 'Passionfruit') on conflict (key) do update set fruit = ict.fruit; -- ok, alias +insert into insertconflicttest AS ict values (6, 'Passionfruit') on conflict (key) do update set fruit = insertconflicttest.fruit; -- error, references aliased away name +ERROR: invalid reference to FROM-clause entry for table "insertconflicttest" +LINE 1: ...onfruit') on conflict (key) do update set fruit = insertconf... + ^ +HINT: Perhaps you meant to reference the table alias "ict". +drop index key_index; +-- +-- Composite key tests +-- +create unique index comp_key_index on insertconflicttest(key, fruit); +-- inference succeeds: +insert into insertconflicttest values (7, 'Raspberry') on conflict (key, fruit) do update set fruit = excluded.fruit; +insert into insertconflicttest values (8, 'Lime') on conflict (fruit, key) do update set fruit = excluded.fruit; +-- inference fails: +insert into insertconflicttest values (9, 'Banana') on conflict (key) do update set fruit = excluded.fruit; +ERROR: there is no unique or exclusion constraint matching the ON CONFLICT specification +insert into insertconflicttest values (10, 'Blueberry') on conflict (key, key, key) do update set fruit = excluded.fruit; +ERROR: there is no unique or exclusion constraint matching the ON CONFLICT specification +insert into insertconflicttest values (11, 'Cherry') on conflict (key, lower(fruit)) do update set fruit = excluded.fruit; +ERROR: there is no unique or exclusion constraint matching the ON CONFLICT specification +insert into insertconflicttest values (12, 'Date') on conflict (lower(fruit), key) do update set fruit = excluded.fruit; +ERROR: there is no unique or exclusion constraint matching the ON CONFLICT specification +drop index comp_key_index; +-- +-- Partial index tests, no inference predicate specified +-- +create unique index part_comp_key_index on insertconflicttest(key, fruit) where key < 5; +create unique index expr_part_comp_key_index on insertconflicttest(key, lower(fruit)) where key < 5; +-- inference fails: +insert into insertconflicttest values (13, 'Grape') on conflict (key, fruit) do update set fruit = excluded.fruit; +ERROR: there is no unique or exclusion constraint matching the ON CONFLICT specification +insert into insertconflicttest values (14, 'Raisin') on conflict (fruit, key) do update set fruit = excluded.fruit; +ERROR: there is no unique or exclusion constraint matching the ON CONFLICT specification +insert into insertconflicttest values (15, 'Cranberry') on conflict (key) do update set fruit = excluded.fruit; +ERROR: there is no unique or exclusion constraint matching the ON CONFLICT specification +insert into insertconflicttest values (16, 'Melon') on conflict (key, key, key) do update set fruit = excluded.fruit; +ERROR: there is no unique or exclusion constraint matching the ON CONFLICT specification +insert into insertconflicttest values (17, 'Mulberry') on conflict (key, lower(fruit)) do update set fruit = excluded.fruit; +ERROR: there is no unique or exclusion constraint matching the ON CONFLICT specification +insert into insertconflicttest values (18, 'Pineapple') on conflict (lower(fruit), key) do update set fruit = excluded.fruit; +ERROR: there is no unique or exclusion constraint matching the ON CONFLICT specification +drop index part_comp_key_index; +drop index expr_part_comp_key_index; +-- +-- Expression index tests +-- +create unique index expr_key_index on insertconflicttest(lower(fruit)); +-- inference succeeds: +insert into insertconflicttest values (20, 'Quince') on conflict (lower(fruit)) do update set fruit = excluded.fruit; +insert into insertconflicttest values (21, 'Pomegranate') on conflict (lower(fruit), lower(fruit)) do update set fruit = excluded.fruit; +-- inference fails: +insert into insertconflicttest values (22, 'Apricot') on conflict (upper(fruit)) do update set fruit = excluded.fruit; +ERROR: there is no unique or exclusion constraint matching the ON CONFLICT specification +insert into insertconflicttest values (23, 'Blackberry') on conflict (fruit) do update set fruit = excluded.fruit; +ERROR: there is no unique or exclusion constraint matching the ON CONFLICT specification +drop index expr_key_index; +-- +-- Expression index tests (with regular column) +-- +create unique index expr_comp_key_index on insertconflicttest(key, lower(fruit)); +create unique index tricky_expr_comp_key_index on insertconflicttest(key, lower(fruit), upper(fruit)); +-- inference succeeds: +insert into insertconflicttest values (24, 'Plum') on conflict (key, lower(fruit)) do update set fruit = excluded.fruit; +insert into insertconflicttest values (25, 'Peach') on conflict (lower(fruit), key) do update set fruit = excluded.fruit; +-- Should not infer "tricky_expr_comp_key_index" index: +explain (costs off) insert into insertconflicttest values (26, 'Fig') on conflict (lower(fruit), key, lower(fruit), key) do update set fruit = excluded.fruit; + QUERY PLAN +------------------------------------------------- + Insert on insertconflicttest + Conflict Resolution: UPDATE + Conflict Arbiter Indexes: expr_comp_key_index + -> Result +(4 rows) + +-- inference fails: +insert into insertconflicttest values (27, 'Prune') on conflict (key, upper(fruit)) do update set fruit = excluded.fruit; +ERROR: there is no unique or exclusion constraint matching the ON CONFLICT specification +insert into insertconflicttest values (28, 'Redcurrant') on conflict (fruit, key) do update set fruit = excluded.fruit; +ERROR: there is no unique or exclusion constraint matching the ON CONFLICT specification +insert into insertconflicttest values (29, 'Nectarine') on conflict (key) do update set fruit = excluded.fruit; +ERROR: there is no unique or exclusion constraint matching the ON CONFLICT specification +drop index expr_comp_key_index; +drop index tricky_expr_comp_key_index; +-- +-- Non-spurious duplicate violation tests +-- +create unique index key_index on insertconflicttest(key); +create unique index fruit_index on insertconflicttest(fruit); +-- succeeds, since UPDATE happens to update "fruit" to existing value: +insert into insertconflicttest values (26, 'Fig') on conflict (key) do update set fruit = excluded.fruit; +-- fails, since UPDATE is to row with key value 26, and we're updating "fruit" +-- to a value that happens to exist in another row ('peach'): +insert into insertconflicttest values (26, 'Peach') on conflict (key) do update set fruit = excluded.fruit; +ERROR: duplicate key value violates unique constraint "fruit_index" +DETAIL: Key (fruit)=(Peach) already exists. +-- succeeds, since "key" isn't repeated/referenced in UPDATE, and "fruit" +-- arbitrates that statement updates existing "Fig" row: +insert into insertconflicttest values (25, 'Fig') on conflict (fruit) do update set fruit = excluded.fruit; +drop index key_index; +drop index fruit_index; +-- +-- Test partial unique index inference +-- +create unique index partial_key_index on insertconflicttest(key) where fruit like '%berry'; +-- Succeeds +insert into insertconflicttest values (23, 'Blackberry') on conflict (key) where fruit like '%berry' do update set fruit = excluded.fruit; +insert into insertconflicttest as t values (23, 'Blackberry') on conflict (key) where fruit like '%berry' and t.fruit = 'inconsequential' do nothing; +-- fails +insert into insertconflicttest values (23, 'Blackberry') on conflict (key) do update set fruit = excluded.fruit; +ERROR: there is no unique or exclusion constraint matching the ON CONFLICT specification +insert into insertconflicttest values (23, 'Blackberry') on conflict (key) where fruit like '%berry' or fruit = 'consequential' do nothing; +ERROR: there is no unique or exclusion constraint matching the ON CONFLICT specification +insert into insertconflicttest values (23, 'Blackberry') on conflict (fruit) where fruit like '%berry' do update set fruit = excluded.fruit; +ERROR: there is no unique or exclusion constraint matching the ON CONFLICT specification +drop index partial_key_index; +-- +-- Test that wholerow references to ON CONFLICT's EXCLUDED work +-- +create unique index plain on insertconflicttest(key); +-- Succeeds, updates existing row: +insert into insertconflicttest as i values (23, 'Jackfruit') on conflict (key) do update set fruit = excluded.fruit + where i.* != excluded.* returning *; + key | fruit +-----+----------- + 23 | Jackfruit +(1 row) + +-- No update this time, though: +insert into insertconflicttest as i values (23, 'Jackfruit') on conflict (key) do update set fruit = excluded.fruit + where i.* != excluded.* returning *; + key | fruit +-----+------- +(0 rows) + +-- Predicate changed to require match rather than non-match, so updates once more: +insert into insertconflicttest as i values (23, 'Jackfruit') on conflict (key) do update set fruit = excluded.fruit + where i.* = excluded.* returning *; + key | fruit +-----+----------- + 23 | Jackfruit +(1 row) + +-- Assign: +insert into insertconflicttest as i values (23, 'Avocado') on conflict (key) do update set fruit = excluded.*::text + returning *; + key | fruit +-----+-------------- + 23 | (23,Avocado) +(1 row) + +-- deparse whole row var in WHERE and SET clauses: +explain (costs off) insert into insertconflicttest as i values (23, 'Avocado') on conflict (key) do update set fruit = excluded.fruit where excluded.* is null; + QUERY PLAN +----------------------------------------- + Insert on insertconflicttest i + Conflict Resolution: UPDATE + Conflict Arbiter Indexes: plain + Conflict Filter: (excluded.* IS NULL) + -> Result +(5 rows) + +explain (costs off) insert into insertconflicttest as i values (23, 'Avocado') on conflict (key) do update set fruit = excluded.*::text; + QUERY PLAN +----------------------------------- + Insert on insertconflicttest i + Conflict Resolution: UPDATE + Conflict Arbiter Indexes: plain + -> Result +(4 rows) + +drop index plain; +-- Cleanup +drop table insertconflicttest; +-- +-- Verify that EXCLUDED does not allow system column references. These +-- do not make sense because EXCLUDED isn't an already stored tuple +-- (and thus doesn't have a ctid etc). +-- +create table syscolconflicttest(key int4, data text); +insert into syscolconflicttest values (1); +insert into syscolconflicttest values (1) on conflict (key) do update set data = excluded.ctid::text; +ERROR: column excluded.ctid does not exist +LINE 1: ...values (1) on conflict (key) do update set data = excluded.c... + ^ +drop table syscolconflicttest; +-- +-- Previous tests all managed to not test any expressions requiring +-- planner preprocessing ... +-- +create table insertconflict (a bigint, b bigint); +create unique index insertconflicti1 on insertconflict(coalesce(a, 0)); +create unique index insertconflicti2 on insertconflict(b) + where coalesce(a, 1) > 0; +insert into insertconflict values (1, 2) +on conflict (coalesce(a, 0)) do nothing; +insert into insertconflict values (1, 2) +on conflict (b) where coalesce(a, 1) > 0 do nothing; +insert into insertconflict values (1, 2) +on conflict (b) where coalesce(a, 1) > 1 do nothing; +drop table insertconflict; +-- +-- test insertion through view +-- +create table insertconflict (f1 int primary key, f2 text); +create view insertconflictv as + select * from insertconflict with cascaded check option; +insert into insertconflictv values (1,'foo') + on conflict (f1) do update set f2 = excluded.f2; +select * from insertconflict; + f1 | f2 +----+----- + 1 | foo +(1 row) + +insert into insertconflictv values (1,'bar') + on conflict (f1) do update set f2 = excluded.f2; +select * from insertconflict; + f1 | f2 +----+----- + 1 | bar +(1 row) + +drop view insertconflictv; +drop table insertconflict; +-- ****************************************************************** +-- * * +-- * Test inheritance (example taken from tutorial) * +-- * * +-- ****************************************************************** +create table cities ( + name text, + population float8, + altitude int -- (in ft) +); +create table capitals ( + state char(2) +) inherits (cities); +-- Create unique indexes. Due to a general limitation of inheritance, +-- uniqueness is only enforced per-relation. Unique index inference +-- specification will do the right thing, though. +create unique index cities_names_unique on cities (name); +create unique index capitals_names_unique on capitals (name); +-- prepopulate the tables. +insert into cities values ('San Francisco', 7.24E+5, 63); +insert into cities values ('Las Vegas', 2.583E+5, 2174); +insert into cities values ('Mariposa', 1200, 1953); +insert into capitals values ('Sacramento', 3.694E+5, 30, 'CA'); +insert into capitals values ('Madison', 1.913E+5, 845, 'WI'); +-- Tests proper for inheritance: +select * from capitals; + name | population | altitude | state +------------+------------+----------+------- + Sacramento | 369400 | 30 | CA + Madison | 191300 | 845 | WI +(2 rows) + +-- Succeeds: +insert into cities values ('Las Vegas', 2.583E+5, 2174) on conflict do nothing; +insert into capitals values ('Sacramento', 4664.E+5, 30, 'CA') on conflict (name) do update set population = excluded.population; +-- Wrong "Sacramento", so do nothing: +insert into capitals values ('Sacramento', 50, 2267, 'NE') on conflict (name) do nothing; +select * from capitals; + name | population | altitude | state +------------+------------+----------+------- + Madison | 191300 | 845 | WI + Sacramento | 466400000 | 30 | CA +(2 rows) + +insert into cities values ('Las Vegas', 5.83E+5, 2001) on conflict (name) do update set population = excluded.population, altitude = excluded.altitude; +select tableoid::regclass, * from cities; + tableoid | name | population | altitude +----------+---------------+------------+---------- + cities | San Francisco | 724000 | 63 + cities | Mariposa | 1200 | 1953 + cities | Las Vegas | 583000 | 2001 + capitals | Madison | 191300 | 845 + capitals | Sacramento | 466400000 | 30 +(5 rows) + +insert into capitals values ('Las Vegas', 5.83E+5, 2222, 'NV') on conflict (name) do update set population = excluded.population; +-- Capitals will contain new capital, Las Vegas: +select * from capitals; + name | population | altitude | state +------------+------------+----------+------- + Madison | 191300 | 845 | WI + Sacramento | 466400000 | 30 | CA + Las Vegas | 583000 | 2222 | NV +(3 rows) + +-- Cities contains two instances of "Las Vegas", since unique constraints don't +-- work across inheritance: +select tableoid::regclass, * from cities; + tableoid | name | population | altitude +----------+---------------+------------+---------- + cities | San Francisco | 724000 | 63 + cities | Mariposa | 1200 | 1953 + cities | Las Vegas | 583000 | 2001 + capitals | Madison | 191300 | 845 + capitals | Sacramento | 466400000 | 30 + capitals | Las Vegas | 583000 | 2222 +(6 rows) + +-- This only affects "cities" version of "Las Vegas": +insert into cities values ('Las Vegas', 5.86E+5, 2223) on conflict (name) do update set population = excluded.population, altitude = excluded.altitude; +select tableoid::regclass, * from cities; + tableoid | name | population | altitude +----------+---------------+------------+---------- + cities | San Francisco | 724000 | 63 + cities | Mariposa | 1200 | 1953 + cities | Las Vegas | 586000 | 2223 + capitals | Madison | 191300 | 845 + capitals | Sacramento | 466400000 | 30 + capitals | Las Vegas | 583000 | 2222 +(6 rows) + +-- clean up +drop table capitals; +drop table cities; +-- Make sure a table named excluded is handled properly +create table excluded(key int primary key, data text); +insert into excluded values(1, '1'); +-- error, ambiguous +insert into excluded values(1, '2') on conflict (key) do update set data = excluded.data RETURNING *; +ERROR: table reference "excluded" is ambiguous +LINE 1: ...es(1, '2') on conflict (key) do update set data = excluded.d... + ^ +-- ok, aliased +insert into excluded AS target values(1, '2') on conflict (key) do update set data = excluded.data RETURNING *; + key | data +-----+------ + 1 | 2 +(1 row) + +-- ok, aliased +insert into excluded AS target values(1, '2') on conflict (key) do update set data = target.data RETURNING *; + key | data +-----+------ + 1 | 2 +(1 row) + +-- make sure excluded isn't a problem in returning clause +insert into excluded values(1, '2') on conflict (key) do update set data = 3 RETURNING excluded.*; + key | data +-----+------ + 1 | 3 +(1 row) + +-- clean up +drop table excluded; +-- check that references to columns after dropped columns are handled correctly +create table dropcol(key int primary key, drop1 int, keep1 text, drop2 numeric, keep2 float); +insert into dropcol(key, drop1, keep1, drop2, keep2) values(1, 1, '1', '1', 1); +-- set using excluded +insert into dropcol(key, drop1, keep1, drop2, keep2) values(1, 2, '2', '2', 2) on conflict(key) + do update set drop1 = excluded.drop1, keep1 = excluded.keep1, drop2 = excluded.drop2, keep2 = excluded.keep2 + where excluded.drop1 is not null and excluded.keep1 is not null and excluded.drop2 is not null and excluded.keep2 is not null + and dropcol.drop1 is not null and dropcol.keep1 is not null and dropcol.drop2 is not null and dropcol.keep2 is not null + returning *; + key | drop1 | keep1 | drop2 | keep2 +-----+-------+-------+-------+------- + 1 | 2 | 2 | 2 | 2 +(1 row) + +; +-- set using existing table +insert into dropcol(key, drop1, keep1, drop2, keep2) values(1, 3, '3', '3', 3) on conflict(key) + do update set drop1 = dropcol.drop1, keep1 = dropcol.keep1, drop2 = dropcol.drop2, keep2 = dropcol.keep2 + returning *; + key | drop1 | keep1 | drop2 | keep2 +-----+-------+-------+-------+------- + 1 | 2 | 2 | 2 | 2 +(1 row) + +; +alter table dropcol drop column drop1, drop column drop2; +-- set using excluded +insert into dropcol(key, keep1, keep2) values(1, '4', 4) on conflict(key) + do update set keep1 = excluded.keep1, keep2 = excluded.keep2 + where excluded.keep1 is not null and excluded.keep2 is not null + and dropcol.keep1 is not null and dropcol.keep2 is not null + returning *; + key | keep1 | keep2 +-----+-------+------- + 1 | 4 | 4 +(1 row) + +; +-- set using existing table +insert into dropcol(key, keep1, keep2) values(1, '5', 5) on conflict(key) + do update set keep1 = dropcol.keep1, keep2 = dropcol.keep2 + returning *; + key | keep1 | keep2 +-----+-------+------- + 1 | 4 | 4 +(1 row) + +; +DROP TABLE dropcol; +-- check handling of regular btree constraint along with gist constraint +create table twoconstraints (f1 int unique, f2 box, + exclude using gist(f2 with &&)); +insert into twoconstraints values(1, '((0,0),(1,1))'); +insert into twoconstraints values(1, '((2,2),(3,3))'); -- fail on f1 +ERROR: duplicate key value violates unique constraint "twoconstraints_f1_key" +DETAIL: Key (f1)=(1) already exists. +insert into twoconstraints values(2, '((0,0),(1,2))'); -- fail on f2 +ERROR: conflicting key value violates exclusion constraint "twoconstraints_f2_excl" +DETAIL: Key (f2)=((1,2),(0,0)) conflicts with existing key (f2)=((1,1),(0,0)). +insert into twoconstraints values(2, '((0,0),(1,2))') + on conflict on constraint twoconstraints_f1_key do nothing; -- fail on f2 +ERROR: conflicting key value violates exclusion constraint "twoconstraints_f2_excl" +DETAIL: Key (f2)=((1,2),(0,0)) conflicts with existing key (f2)=((1,1),(0,0)). +insert into twoconstraints values(2, '((0,0),(1,2))') + on conflict on constraint twoconstraints_f2_excl do nothing; -- do nothing +select * from twoconstraints; + f1 | f2 +----+------------- + 1 | (1,1),(0,0) +(1 row) + +drop table twoconstraints; +-- check handling of self-conflicts at various isolation levels +create table selfconflict (f1 int primary key, f2 int); +begin transaction isolation level read committed; +insert into selfconflict values (1,1), (1,2) on conflict do nothing; +commit; +begin transaction isolation level repeatable read; +insert into selfconflict values (2,1), (2,2) on conflict do nothing; +commit; +begin transaction isolation level serializable; +insert into selfconflict values (3,1), (3,2) on conflict do nothing; +commit; +begin transaction isolation level read committed; +insert into selfconflict values (4,1), (4,2) on conflict(f1) do update set f2 = 0; +ERROR: ON CONFLICT DO UPDATE command cannot affect row a second time +HINT: Ensure that no rows proposed for insertion within the same command have duplicate constrained values. +commit; +begin transaction isolation level repeatable read; +insert into selfconflict values (5,1), (5,2) on conflict(f1) do update set f2 = 0; +ERROR: ON CONFLICT DO UPDATE command cannot affect row a second time +HINT: Ensure that no rows proposed for insertion within the same command have duplicate constrained values. +commit; +begin transaction isolation level serializable; +insert into selfconflict values (6,1), (6,2) on conflict(f1) do update set f2 = 0; +ERROR: ON CONFLICT DO UPDATE command cannot affect row a second time +HINT: Ensure that no rows proposed for insertion within the same command have duplicate constrained values. +commit; +select * from selfconflict; + f1 | f2 +----+---- + 1 | 1 + 2 | 1 + 3 | 1 +(3 rows) + +drop table selfconflict; +-- check ON CONFLICT handling with partitioned tables +create table parted_conflict_test (a int unique, b char) partition by list (a); +create table parted_conflict_test_1 partition of parted_conflict_test (b unique) for values in (1, 2); +-- no indexes required here +insert into parted_conflict_test values (1, 'a') on conflict do nothing; +-- index on a required, which does exist in parent +insert into parted_conflict_test values (1, 'a') on conflict (a) do nothing; +insert into parted_conflict_test values (1, 'a') on conflict (a) do update set b = excluded.b; +-- targeting partition directly will work +insert into parted_conflict_test_1 values (1, 'a') on conflict (a) do nothing; +insert into parted_conflict_test_1 values (1, 'b') on conflict (a) do update set b = excluded.b; +-- index on b required, which doesn't exist in parent +insert into parted_conflict_test values (2, 'b') on conflict (b) do update set a = excluded.a; +ERROR: there is no unique or exclusion constraint matching the ON CONFLICT specification +-- targeting partition directly will work +insert into parted_conflict_test_1 values (2, 'b') on conflict (b) do update set a = excluded.a; +-- should see (2, 'b') +select * from parted_conflict_test order by a; + a | b +---+--- + 2 | b +(1 row) + +-- now check that DO UPDATE works correctly for target partition with +-- different attribute numbers +create table parted_conflict_test_2 (b char, a int unique); +alter table parted_conflict_test attach partition parted_conflict_test_2 for values in (3); +truncate parted_conflict_test; +insert into parted_conflict_test values (3, 'a') on conflict (a) do update set b = excluded.b; +insert into parted_conflict_test values (3, 'b') on conflict (a) do update set b = excluded.b; +-- should see (3, 'b') +select * from parted_conflict_test order by a; + a | b +---+--- + 3 | b +(1 row) + +-- case where parent will have a dropped column, but the partition won't +alter table parted_conflict_test drop b, add b char; +create table parted_conflict_test_3 partition of parted_conflict_test for values in (4); +truncate parted_conflict_test; +insert into parted_conflict_test (a, b) values (4, 'a') on conflict (a) do update set b = excluded.b; +insert into parted_conflict_test (a, b) values (4, 'b') on conflict (a) do update set b = excluded.b where parted_conflict_test.b = 'a'; +-- should see (4, 'b') +select * from parted_conflict_test order by a; + a | b +---+--- + 4 | b +(1 row) + +-- case with multi-level partitioning +create table parted_conflict_test_4 partition of parted_conflict_test for values in (5) partition by list (a); +create table parted_conflict_test_4_1 partition of parted_conflict_test_4 for values in (5); +truncate parted_conflict_test; +insert into parted_conflict_test (a, b) values (5, 'a') on conflict (a) do update set b = excluded.b; +insert into parted_conflict_test (a, b) values (5, 'b') on conflict (a) do update set b = excluded.b where parted_conflict_test.b = 'a'; +-- should see (5, 'b') +select * from parted_conflict_test order by a; + a | b +---+--- + 5 | b +(1 row) + +-- test with multiple rows +truncate parted_conflict_test; +insert into parted_conflict_test (a, b) values (1, 'a'), (2, 'a'), (4, 'a') on conflict (a) do update set b = excluded.b where excluded.b = 'b'; +insert into parted_conflict_test (a, b) values (1, 'b'), (2, 'c'), (4, 'b') on conflict (a) do update set b = excluded.b where excluded.b = 'b'; +-- should see (1, 'b'), (2, 'a'), (4, 'b') +select * from parted_conflict_test order by a; + a | b +---+--- + 1 | b + 2 | a + 4 | b +(3 rows) + +drop table parted_conflict_test; +-- test behavior of inserting a conflicting tuple into an intermediate +-- partitioning level +create table parted_conflict (a int primary key, b text) partition by range (a); +create table parted_conflict_1 partition of parted_conflict for values from (0) to (1000) partition by range (a); +create table parted_conflict_1_1 partition of parted_conflict_1 for values from (0) to (500); +insert into parted_conflict values (40, 'forty'); +insert into parted_conflict_1 values (40, 'cuarenta') + on conflict (a) do update set b = excluded.b; +drop table parted_conflict; +-- same thing, but this time try to use an index that's created not in the +-- partition +create table parted_conflict (a int, b text) partition by range (a); +create table parted_conflict_1 partition of parted_conflict for values from (0) to (1000) partition by range (a); +create table parted_conflict_1_1 partition of parted_conflict_1 for values from (0) to (500); +create unique index on only parted_conflict_1 (a); +create unique index on only parted_conflict (a); +alter index parted_conflict_a_idx attach partition parted_conflict_1_a_idx; +insert into parted_conflict values (40, 'forty'); +insert into parted_conflict_1 values (40, 'cuarenta') + on conflict (a) do update set b = excluded.b; +ERROR: there is no unique or exclusion constraint matching the ON CONFLICT specification +drop table parted_conflict; +-- test whole-row Vars in ON CONFLICT expressions +create table parted_conflict (a int, b text, c int) partition by range (a); +create table parted_conflict_1 (drp text, c int, a int, b text); +alter table parted_conflict_1 drop column drp; +create unique index on parted_conflict (a, b); +alter table parted_conflict attach partition parted_conflict_1 for values from (0) to (1000); +truncate parted_conflict; +insert into parted_conflict values (50, 'cincuenta', 1); +insert into parted_conflict values (50, 'cincuenta', 2) + on conflict (a, b) do update set (a, b, c) = row(excluded.*) + where parted_conflict = (50, text 'cincuenta', 1) and + excluded = (50, text 'cincuenta', 2); +-- should see (50, 'cincuenta', 2) +select * from parted_conflict order by a; + a | b | c +----+-----------+--- + 50 | cincuenta | 2 +(1 row) + +-- test with statement level triggers +create or replace function parted_conflict_update_func() returns trigger as $$ +declare + r record; +begin + for r in select * from inserted loop + raise notice 'a = %, b = %, c = %', r.a, r.b, r.c; + end loop; + return new; +end; +$$ language plpgsql; +create trigger parted_conflict_update + after update on parted_conflict + referencing new table as inserted + for each statement + execute procedure parted_conflict_update_func(); +truncate parted_conflict; +insert into parted_conflict values (0, 'cero', 1); +insert into parted_conflict values(0, 'cero', 1) + on conflict (a,b) do update set c = parted_conflict.c + 1; +NOTICE: a = 0, b = cero, c = 2 +drop table parted_conflict; +drop function parted_conflict_update_func(); diff --git a/src/test/regress/expected/int2.out b/src/test/regress/expected/int2.out new file mode 100644 index 0000000..4e03a5f --- /dev/null +++ b/src/test/regress/expected/int2.out @@ -0,0 +1,486 @@ +-- +-- INT2 +-- +-- int2_tbl was already created and filled in test_setup.sql. +-- Here we just try to insert bad values. +INSERT INTO INT2_TBL(f1) VALUES ('34.5'); +ERROR: invalid input syntax for type smallint: "34.5" +LINE 1: INSERT INTO INT2_TBL(f1) VALUES ('34.5'); + ^ +INSERT INTO INT2_TBL(f1) VALUES ('100000'); +ERROR: value "100000" is out of range for type smallint +LINE 1: INSERT INTO INT2_TBL(f1) VALUES ('100000'); + ^ +INSERT INTO INT2_TBL(f1) VALUES ('asdf'); +ERROR: invalid input syntax for type smallint: "asdf" +LINE 1: INSERT INTO INT2_TBL(f1) VALUES ('asdf'); + ^ +INSERT INTO INT2_TBL(f1) VALUES (' '); +ERROR: invalid input syntax for type smallint: " " +LINE 1: INSERT INTO INT2_TBL(f1) VALUES (' '); + ^ +INSERT INTO INT2_TBL(f1) VALUES ('- 1234'); +ERROR: invalid input syntax for type smallint: "- 1234" +LINE 1: INSERT INTO INT2_TBL(f1) VALUES ('- 1234'); + ^ +INSERT INTO INT2_TBL(f1) VALUES ('4 444'); +ERROR: invalid input syntax for type smallint: "4 444" +LINE 1: INSERT INTO INT2_TBL(f1) VALUES ('4 444'); + ^ +INSERT INTO INT2_TBL(f1) VALUES ('123 dt'); +ERROR: invalid input syntax for type smallint: "123 dt" +LINE 1: INSERT INTO INT2_TBL(f1) VALUES ('123 dt'); + ^ +INSERT INTO INT2_TBL(f1) VALUES (''); +ERROR: invalid input syntax for type smallint: "" +LINE 1: INSERT INTO INT2_TBL(f1) VALUES (''); + ^ +SELECT * FROM INT2_TBL; + f1 +-------- + 0 + 1234 + -1234 + 32767 + -32767 +(5 rows) + +-- Also try it with non-error-throwing API +SELECT pg_input_is_valid('34', 'int2'); + pg_input_is_valid +------------------- + t +(1 row) + +SELECT pg_input_is_valid('asdf', 'int2'); + pg_input_is_valid +------------------- + f +(1 row) + +SELECT pg_input_is_valid('50000', 'int2'); + pg_input_is_valid +------------------- + f +(1 row) + +SELECT * FROM pg_input_error_info('50000', 'int2'); + message | detail | hint | sql_error_code +-------------------------------------------------+--------+------+---------------- + value "50000" is out of range for type smallint | | | 22003 +(1 row) + +-- While we're here, check int2vector as well +SELECT pg_input_is_valid(' 1 3 5 ', 'int2vector'); + pg_input_is_valid +------------------- + t +(1 row) + +SELECT * FROM pg_input_error_info('1 asdf', 'int2vector'); + message | detail | hint | sql_error_code +------------------------------------------------+--------+------+---------------- + invalid input syntax for type smallint: "asdf" | | | 22P02 +(1 row) + +SELECT * FROM pg_input_error_info('50000', 'int2vector'); + message | detail | hint | sql_error_code +-------------------------------------------------+--------+------+---------------- + value "50000" is out of range for type smallint | | | 22003 +(1 row) + +SELECT * FROM INT2_TBL AS f(a, b); +ERROR: table "f" has 1 columns available but 2 columns specified +SELECT * FROM (TABLE int2_tbl) AS s (a, b); +ERROR: table "s" has 1 columns available but 2 columns specified +SELECT i.* FROM INT2_TBL i WHERE i.f1 <> int2 '0'; + f1 +-------- + 1234 + -1234 + 32767 + -32767 +(4 rows) + +SELECT i.* FROM INT2_TBL i WHERE i.f1 <> int4 '0'; + f1 +-------- + 1234 + -1234 + 32767 + -32767 +(4 rows) + +SELECT i.* FROM INT2_TBL i WHERE i.f1 = int2 '0'; + f1 +---- + 0 +(1 row) + +SELECT i.* FROM INT2_TBL i WHERE i.f1 = int4 '0'; + f1 +---- + 0 +(1 row) + +SELECT i.* FROM INT2_TBL i WHERE i.f1 < int2 '0'; + f1 +-------- + -1234 + -32767 +(2 rows) + +SELECT i.* FROM INT2_TBL i WHERE i.f1 < int4 '0'; + f1 +-------- + -1234 + -32767 +(2 rows) + +SELECT i.* FROM INT2_TBL i WHERE i.f1 <= int2 '0'; + f1 +-------- + 0 + -1234 + -32767 +(3 rows) + +SELECT i.* FROM INT2_TBL i WHERE i.f1 <= int4 '0'; + f1 +-------- + 0 + -1234 + -32767 +(3 rows) + +SELECT i.* FROM INT2_TBL i WHERE i.f1 > int2 '0'; + f1 +------- + 1234 + 32767 +(2 rows) + +SELECT i.* FROM INT2_TBL i WHERE i.f1 > int4 '0'; + f1 +------- + 1234 + 32767 +(2 rows) + +SELECT i.* FROM INT2_TBL i WHERE i.f1 >= int2 '0'; + f1 +------- + 0 + 1234 + 32767 +(3 rows) + +SELECT i.* FROM INT2_TBL i WHERE i.f1 >= int4 '0'; + f1 +------- + 0 + 1234 + 32767 +(3 rows) + +-- positive odds +SELECT i.* FROM INT2_TBL i WHERE (i.f1 % int2 '2') = int2 '1'; + f1 +------- + 32767 +(1 row) + +-- any evens +SELECT i.* FROM INT2_TBL i WHERE (i.f1 % int4 '2') = int2 '0'; + f1 +------- + 0 + 1234 + -1234 +(3 rows) + +SELECT i.f1, i.f1 * int2 '2' AS x FROM INT2_TBL i; +ERROR: smallint out of range +SELECT i.f1, i.f1 * int2 '2' AS x FROM INT2_TBL i +WHERE abs(f1) < 16384; + f1 | x +-------+------- + 0 | 0 + 1234 | 2468 + -1234 | -2468 +(3 rows) + +SELECT i.f1, i.f1 * int4 '2' AS x FROM INT2_TBL i; + f1 | x +--------+-------- + 0 | 0 + 1234 | 2468 + -1234 | -2468 + 32767 | 65534 + -32767 | -65534 +(5 rows) + +SELECT i.f1, i.f1 + int2 '2' AS x FROM INT2_TBL i; +ERROR: smallint out of range +SELECT i.f1, i.f1 + int2 '2' AS x FROM INT2_TBL i +WHERE f1 < 32766; + f1 | x +--------+-------- + 0 | 2 + 1234 | 1236 + -1234 | -1232 + -32767 | -32765 +(4 rows) + +SELECT i.f1, i.f1 + int4 '2' AS x FROM INT2_TBL i; + f1 | x +--------+-------- + 0 | 2 + 1234 | 1236 + -1234 | -1232 + 32767 | 32769 + -32767 | -32765 +(5 rows) + +SELECT i.f1, i.f1 - int2 '2' AS x FROM INT2_TBL i; +ERROR: smallint out of range +SELECT i.f1, i.f1 - int2 '2' AS x FROM INT2_TBL i +WHERE f1 > -32767; + f1 | x +-------+------- + 0 | -2 + 1234 | 1232 + -1234 | -1236 + 32767 | 32765 +(4 rows) + +SELECT i.f1, i.f1 - int4 '2' AS x FROM INT2_TBL i; + f1 | x +--------+-------- + 0 | -2 + 1234 | 1232 + -1234 | -1236 + 32767 | 32765 + -32767 | -32769 +(5 rows) + +SELECT i.f1, i.f1 / int2 '2' AS x FROM INT2_TBL i; + f1 | x +--------+-------- + 0 | 0 + 1234 | 617 + -1234 | -617 + 32767 | 16383 + -32767 | -16383 +(5 rows) + +SELECT i.f1, i.f1 / int4 '2' AS x FROM INT2_TBL i; + f1 | x +--------+-------- + 0 | 0 + 1234 | 617 + -1234 | -617 + 32767 | 16383 + -32767 | -16383 +(5 rows) + +-- corner cases +SELECT (-1::int2<<15)::text; + text +-------- + -32768 +(1 row) + +SELECT ((-1::int2<<15)+1::int2)::text; + text +-------- + -32767 +(1 row) + +-- check sane handling of INT16_MIN overflow cases +SELECT (-32768)::int2 * (-1)::int2; +ERROR: smallint out of range +SELECT (-32768)::int2 / (-1)::int2; +ERROR: smallint out of range +SELECT (-32768)::int2 % (-1)::int2; + ?column? +---------- + 0 +(1 row) + +-- check rounding when casting from float +SELECT x, x::int2 AS int2_value +FROM (VALUES (-2.5::float8), + (-1.5::float8), + (-0.5::float8), + (0.0::float8), + (0.5::float8), + (1.5::float8), + (2.5::float8)) t(x); + x | int2_value +------+------------ + -2.5 | -2 + -1.5 | -2 + -0.5 | 0 + 0 | 0 + 0.5 | 0 + 1.5 | 2 + 2.5 | 2 +(7 rows) + +-- check rounding when casting from numeric +SELECT x, x::int2 AS int2_value +FROM (VALUES (-2.5::numeric), + (-1.5::numeric), + (-0.5::numeric), + (0.0::numeric), + (0.5::numeric), + (1.5::numeric), + (2.5::numeric)) t(x); + x | int2_value +------+------------ + -2.5 | -3 + -1.5 | -2 + -0.5 | -1 + 0.0 | 0 + 0.5 | 1 + 1.5 | 2 + 2.5 | 3 +(7 rows) + +-- non-decimal literals +SELECT int2 '0b100101'; + int2 +------ + 37 +(1 row) + +SELECT int2 '0o273'; + int2 +------ + 187 +(1 row) + +SELECT int2 '0x42F'; + int2 +------ + 1071 +(1 row) + +SELECT int2 '0b'; +ERROR: invalid input syntax for type smallint: "0b" +LINE 1: SELECT int2 '0b'; + ^ +SELECT int2 '0o'; +ERROR: invalid input syntax for type smallint: "0o" +LINE 1: SELECT int2 '0o'; + ^ +SELECT int2 '0x'; +ERROR: invalid input syntax for type smallint: "0x" +LINE 1: SELECT int2 '0x'; + ^ +-- cases near overflow +SELECT int2 '0b111111111111111'; + int2 +------- + 32767 +(1 row) + +SELECT int2 '0b1000000000000000'; +ERROR: value "0b1000000000000000" is out of range for type smallint +LINE 1: SELECT int2 '0b1000000000000000'; + ^ +SELECT int2 '0o77777'; + int2 +------- + 32767 +(1 row) + +SELECT int2 '0o100000'; +ERROR: value "0o100000" is out of range for type smallint +LINE 1: SELECT int2 '0o100000'; + ^ +SELECT int2 '0x7FFF'; + int2 +------- + 32767 +(1 row) + +SELECT int2 '0x8000'; +ERROR: value "0x8000" is out of range for type smallint +LINE 1: SELECT int2 '0x8000'; + ^ +SELECT int2 '-0b1000000000000000'; + int2 +-------- + -32768 +(1 row) + +SELECT int2 '-0b1000000000000001'; +ERROR: value "-0b1000000000000001" is out of range for type smallint +LINE 1: SELECT int2 '-0b1000000000000001'; + ^ +SELECT int2 '-0o100000'; + int2 +-------- + -32768 +(1 row) + +SELECT int2 '-0o100001'; +ERROR: value "-0o100001" is out of range for type smallint +LINE 1: SELECT int2 '-0o100001'; + ^ +SELECT int2 '-0x8000'; + int2 +-------- + -32768 +(1 row) + +SELECT int2 '-0x8001'; +ERROR: value "-0x8001" is out of range for type smallint +LINE 1: SELECT int2 '-0x8001'; + ^ +-- underscores +SELECT int2 '1_000'; + int2 +------ + 1000 +(1 row) + +SELECT int2 '1_2_3'; + int2 +------ + 123 +(1 row) + +SELECT int2 '0xE_FF'; + int2 +------ + 3839 +(1 row) + +SELECT int2 '0o2_73'; + int2 +------ + 187 +(1 row) + +SELECT int2 '0b_10_0101'; + int2 +------ + 37 +(1 row) + +-- error cases +SELECT int2 '_100'; +ERROR: invalid input syntax for type smallint: "_100" +LINE 1: SELECT int2 '_100'; + ^ +SELECT int2 '100_'; +ERROR: invalid input syntax for type smallint: "100_" +LINE 1: SELECT int2 '100_'; + ^ +SELECT int2 '10__000'; +ERROR: invalid input syntax for type smallint: "10__000" +LINE 1: SELECT int2 '10__000'; + ^ diff --git a/src/test/regress/expected/int4.out b/src/test/regress/expected/int4.out new file mode 100644 index 0000000..b1a1588 --- /dev/null +++ b/src/test/regress/expected/int4.out @@ -0,0 +1,594 @@ +-- +-- INT4 +-- +-- int4_tbl was already created and filled in test_setup.sql. +-- Here we just try to insert bad values. +INSERT INTO INT4_TBL(f1) VALUES ('34.5'); +ERROR: invalid input syntax for type integer: "34.5" +LINE 1: INSERT INTO INT4_TBL(f1) VALUES ('34.5'); + ^ +INSERT INTO INT4_TBL(f1) VALUES ('1000000000000'); +ERROR: value "1000000000000" is out of range for type integer +LINE 1: INSERT INTO INT4_TBL(f1) VALUES ('1000000000000'); + ^ +INSERT INTO INT4_TBL(f1) VALUES ('asdf'); +ERROR: invalid input syntax for type integer: "asdf" +LINE 1: INSERT INTO INT4_TBL(f1) VALUES ('asdf'); + ^ +INSERT INTO INT4_TBL(f1) VALUES (' '); +ERROR: invalid input syntax for type integer: " " +LINE 1: INSERT INTO INT4_TBL(f1) VALUES (' '); + ^ +INSERT INTO INT4_TBL(f1) VALUES (' asdf '); +ERROR: invalid input syntax for type integer: " asdf " +LINE 1: INSERT INTO INT4_TBL(f1) VALUES (' asdf '); + ^ +INSERT INTO INT4_TBL(f1) VALUES ('- 1234'); +ERROR: invalid input syntax for type integer: "- 1234" +LINE 1: INSERT INTO INT4_TBL(f1) VALUES ('- 1234'); + ^ +INSERT INTO INT4_TBL(f1) VALUES ('123 5'); +ERROR: invalid input syntax for type integer: "123 5" +LINE 1: INSERT INTO INT4_TBL(f1) VALUES ('123 5'); + ^ +INSERT INTO INT4_TBL(f1) VALUES (''); +ERROR: invalid input syntax for type integer: "" +LINE 1: INSERT INTO INT4_TBL(f1) VALUES (''); + ^ +SELECT * FROM INT4_TBL; + f1 +------------- + 0 + 123456 + -123456 + 2147483647 + -2147483647 +(5 rows) + +-- Also try it with non-error-throwing API +SELECT pg_input_is_valid('34', 'int4'); + pg_input_is_valid +------------------- + t +(1 row) + +SELECT pg_input_is_valid('asdf', 'int4'); + pg_input_is_valid +------------------- + f +(1 row) + +SELECT pg_input_is_valid('1000000000000', 'int4'); + pg_input_is_valid +------------------- + f +(1 row) + +SELECT * FROM pg_input_error_info('1000000000000', 'int4'); + message | detail | hint | sql_error_code +--------------------------------------------------------+--------+------+---------------- + value "1000000000000" is out of range for type integer | | | 22003 +(1 row) + +SELECT i.* FROM INT4_TBL i WHERE i.f1 <> int2 '0'; + f1 +------------- + 123456 + -123456 + 2147483647 + -2147483647 +(4 rows) + +SELECT i.* FROM INT4_TBL i WHERE i.f1 <> int4 '0'; + f1 +------------- + 123456 + -123456 + 2147483647 + -2147483647 +(4 rows) + +SELECT i.* FROM INT4_TBL i WHERE i.f1 = int2 '0'; + f1 +---- + 0 +(1 row) + +SELECT i.* FROM INT4_TBL i WHERE i.f1 = int4 '0'; + f1 +---- + 0 +(1 row) + +SELECT i.* FROM INT4_TBL i WHERE i.f1 < int2 '0'; + f1 +------------- + -123456 + -2147483647 +(2 rows) + +SELECT i.* FROM INT4_TBL i WHERE i.f1 < int4 '0'; + f1 +------------- + -123456 + -2147483647 +(2 rows) + +SELECT i.* FROM INT4_TBL i WHERE i.f1 <= int2 '0'; + f1 +------------- + 0 + -123456 + -2147483647 +(3 rows) + +SELECT i.* FROM INT4_TBL i WHERE i.f1 <= int4 '0'; + f1 +------------- + 0 + -123456 + -2147483647 +(3 rows) + +SELECT i.* FROM INT4_TBL i WHERE i.f1 > int2 '0'; + f1 +------------ + 123456 + 2147483647 +(2 rows) + +SELECT i.* FROM INT4_TBL i WHERE i.f1 > int4 '0'; + f1 +------------ + 123456 + 2147483647 +(2 rows) + +SELECT i.* FROM INT4_TBL i WHERE i.f1 >= int2 '0'; + f1 +------------ + 0 + 123456 + 2147483647 +(3 rows) + +SELECT i.* FROM INT4_TBL i WHERE i.f1 >= int4 '0'; + f1 +------------ + 0 + 123456 + 2147483647 +(3 rows) + +-- positive odds +SELECT i.* FROM INT4_TBL i WHERE (i.f1 % int2 '2') = int2 '1'; + f1 +------------ + 2147483647 +(1 row) + +-- any evens +SELECT i.* FROM INT4_TBL i WHERE (i.f1 % int4 '2') = int2 '0'; + f1 +--------- + 0 + 123456 + -123456 +(3 rows) + +SELECT i.f1, i.f1 * int2 '2' AS x FROM INT4_TBL i; +ERROR: integer out of range +SELECT i.f1, i.f1 * int2 '2' AS x FROM INT4_TBL i +WHERE abs(f1) < 1073741824; + f1 | x +---------+--------- + 0 | 0 + 123456 | 246912 + -123456 | -246912 +(3 rows) + +SELECT i.f1, i.f1 * int4 '2' AS x FROM INT4_TBL i; +ERROR: integer out of range +SELECT i.f1, i.f1 * int4 '2' AS x FROM INT4_TBL i +WHERE abs(f1) < 1073741824; + f1 | x +---------+--------- + 0 | 0 + 123456 | 246912 + -123456 | -246912 +(3 rows) + +SELECT i.f1, i.f1 + int2 '2' AS x FROM INT4_TBL i; +ERROR: integer out of range +SELECT i.f1, i.f1 + int2 '2' AS x FROM INT4_TBL i +WHERE f1 < 2147483646; + f1 | x +-------------+------------- + 0 | 2 + 123456 | 123458 + -123456 | -123454 + -2147483647 | -2147483645 +(4 rows) + +SELECT i.f1, i.f1 + int4 '2' AS x FROM INT4_TBL i; +ERROR: integer out of range +SELECT i.f1, i.f1 + int4 '2' AS x FROM INT4_TBL i +WHERE f1 < 2147483646; + f1 | x +-------------+------------- + 0 | 2 + 123456 | 123458 + -123456 | -123454 + -2147483647 | -2147483645 +(4 rows) + +SELECT i.f1, i.f1 - int2 '2' AS x FROM INT4_TBL i; +ERROR: integer out of range +SELECT i.f1, i.f1 - int2 '2' AS x FROM INT4_TBL i +WHERE f1 > -2147483647; + f1 | x +------------+------------ + 0 | -2 + 123456 | 123454 + -123456 | -123458 + 2147483647 | 2147483645 +(4 rows) + +SELECT i.f1, i.f1 - int4 '2' AS x FROM INT4_TBL i; +ERROR: integer out of range +SELECT i.f1, i.f1 - int4 '2' AS x FROM INT4_TBL i +WHERE f1 > -2147483647; + f1 | x +------------+------------ + 0 | -2 + 123456 | 123454 + -123456 | -123458 + 2147483647 | 2147483645 +(4 rows) + +SELECT i.f1, i.f1 / int2 '2' AS x FROM INT4_TBL i; + f1 | x +-------------+------------- + 0 | 0 + 123456 | 61728 + -123456 | -61728 + 2147483647 | 1073741823 + -2147483647 | -1073741823 +(5 rows) + +SELECT i.f1, i.f1 / int4 '2' AS x FROM INT4_TBL i; + f1 | x +-------------+------------- + 0 | 0 + 123456 | 61728 + -123456 | -61728 + 2147483647 | 1073741823 + -2147483647 | -1073741823 +(5 rows) + +-- +-- more complex expressions +-- +-- variations on unary minus parsing +SELECT -2+3 AS one; + one +----- + 1 +(1 row) + +SELECT 4-2 AS two; + two +----- + 2 +(1 row) + +SELECT 2- -1 AS three; + three +------- + 3 +(1 row) + +SELECT 2 - -2 AS four; + four +------ + 4 +(1 row) + +SELECT int2 '2' * int2 '2' = int2 '16' / int2 '4' AS true; + true +------ + t +(1 row) + +SELECT int4 '2' * int2 '2' = int2 '16' / int4 '4' AS true; + true +------ + t +(1 row) + +SELECT int2 '2' * int4 '2' = int4 '16' / int2 '4' AS true; + true +------ + t +(1 row) + +SELECT int4 '1000' < int4 '999' AS false; + false +------- + f +(1 row) + +SELECT 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 AS ten; + ten +----- + 10 +(1 row) + +SELECT 2 + 2 / 2 AS three; + three +------- + 3 +(1 row) + +SELECT (2 + 2) / 2 AS two; + two +----- + 2 +(1 row) + +-- corner case +SELECT (-1::int4<<31)::text; + text +------------- + -2147483648 +(1 row) + +SELECT ((-1::int4<<31)+1)::text; + text +------------- + -2147483647 +(1 row) + +-- check sane handling of INT_MIN overflow cases +SELECT (-2147483648)::int4 * (-1)::int4; +ERROR: integer out of range +SELECT (-2147483648)::int4 / (-1)::int4; +ERROR: integer out of range +SELECT (-2147483648)::int4 % (-1)::int4; + ?column? +---------- + 0 +(1 row) + +SELECT (-2147483648)::int4 * (-1)::int2; +ERROR: integer out of range +SELECT (-2147483648)::int4 / (-1)::int2; +ERROR: integer out of range +SELECT (-2147483648)::int4 % (-1)::int2; + ?column? +---------- + 0 +(1 row) + +-- check rounding when casting from float +SELECT x, x::int4 AS int4_value +FROM (VALUES (-2.5::float8), + (-1.5::float8), + (-0.5::float8), + (0.0::float8), + (0.5::float8), + (1.5::float8), + (2.5::float8)) t(x); + x | int4_value +------+------------ + -2.5 | -2 + -1.5 | -2 + -0.5 | 0 + 0 | 0 + 0.5 | 0 + 1.5 | 2 + 2.5 | 2 +(7 rows) + +-- check rounding when casting from numeric +SELECT x, x::int4 AS int4_value +FROM (VALUES (-2.5::numeric), + (-1.5::numeric), + (-0.5::numeric), + (0.0::numeric), + (0.5::numeric), + (1.5::numeric), + (2.5::numeric)) t(x); + x | int4_value +------+------------ + -2.5 | -3 + -1.5 | -2 + -0.5 | -1 + 0.0 | 0 + 0.5 | 1 + 1.5 | 2 + 2.5 | 3 +(7 rows) + +-- test gcd() +SELECT a, b, gcd(a, b), gcd(a, -b), gcd(b, a), gcd(-b, a) +FROM (VALUES (0::int4, 0::int4), + (0::int4, 6410818::int4), + (61866666::int4, 6410818::int4), + (-61866666::int4, 6410818::int4), + ((-2147483648)::int4, 1::int4), + ((-2147483648)::int4, 2147483647::int4), + ((-2147483648)::int4, 1073741824::int4)) AS v(a, b); + a | b | gcd | gcd | gcd | gcd +-------------+------------+------------+------------+------------+------------ + 0 | 0 | 0 | 0 | 0 | 0 + 0 | 6410818 | 6410818 | 6410818 | 6410818 | 6410818 + 61866666 | 6410818 | 1466 | 1466 | 1466 | 1466 + -61866666 | 6410818 | 1466 | 1466 | 1466 | 1466 + -2147483648 | 1 | 1 | 1 | 1 | 1 + -2147483648 | 2147483647 | 1 | 1 | 1 | 1 + -2147483648 | 1073741824 | 1073741824 | 1073741824 | 1073741824 | 1073741824 +(7 rows) + +SELECT gcd((-2147483648)::int4, 0::int4); -- overflow +ERROR: integer out of range +SELECT gcd((-2147483648)::int4, (-2147483648)::int4); -- overflow +ERROR: integer out of range +-- test lcm() +SELECT a, b, lcm(a, b), lcm(a, -b), lcm(b, a), lcm(-b, a) +FROM (VALUES (0::int4, 0::int4), + (0::int4, 42::int4), + (42::int4, 42::int4), + (330::int4, 462::int4), + (-330::int4, 462::int4), + ((-2147483648)::int4, 0::int4)) AS v(a, b); + a | b | lcm | lcm | lcm | lcm +-------------+-----+------+------+------+------ + 0 | 0 | 0 | 0 | 0 | 0 + 0 | 42 | 0 | 0 | 0 | 0 + 42 | 42 | 42 | 42 | 42 | 42 + 330 | 462 | 2310 | 2310 | 2310 | 2310 + -330 | 462 | 2310 | 2310 | 2310 | 2310 + -2147483648 | 0 | 0 | 0 | 0 | 0 +(6 rows) + +SELECT lcm((-2147483648)::int4, 1::int4); -- overflow +ERROR: integer out of range +SELECT lcm(2147483647::int4, 2147483646::int4); -- overflow +ERROR: integer out of range +-- non-decimal literals +SELECT int4 '0b100101'; + int4 +------ + 37 +(1 row) + +SELECT int4 '0o273'; + int4 +------ + 187 +(1 row) + +SELECT int4 '0x42F'; + int4 +------ + 1071 +(1 row) + +SELECT int4 '0b'; +ERROR: invalid input syntax for type integer: "0b" +LINE 1: SELECT int4 '0b'; + ^ +SELECT int4 '0o'; +ERROR: invalid input syntax for type integer: "0o" +LINE 1: SELECT int4 '0o'; + ^ +SELECT int4 '0x'; +ERROR: invalid input syntax for type integer: "0x" +LINE 1: SELECT int4 '0x'; + ^ +-- cases near overflow +SELECT int4 '0b1111111111111111111111111111111'; + int4 +------------ + 2147483647 +(1 row) + +SELECT int4 '0b10000000000000000000000000000000'; +ERROR: value "0b10000000000000000000000000000000" is out of range for type integer +LINE 1: SELECT int4 '0b10000000000000000000000000000000'; + ^ +SELECT int4 '0o17777777777'; + int4 +------------ + 2147483647 +(1 row) + +SELECT int4 '0o20000000000'; +ERROR: value "0o20000000000" is out of range for type integer +LINE 1: SELECT int4 '0o20000000000'; + ^ +SELECT int4 '0x7FFFFFFF'; + int4 +------------ + 2147483647 +(1 row) + +SELECT int4 '0x80000000'; +ERROR: value "0x80000000" is out of range for type integer +LINE 1: SELECT int4 '0x80000000'; + ^ +SELECT int4 '-0b10000000000000000000000000000000'; + int4 +------------- + -2147483648 +(1 row) + +SELECT int4 '-0b10000000000000000000000000000001'; +ERROR: value "-0b10000000000000000000000000000001" is out of range for type integer +LINE 1: SELECT int4 '-0b10000000000000000000000000000001'; + ^ +SELECT int4 '-0o20000000000'; + int4 +------------- + -2147483648 +(1 row) + +SELECT int4 '-0o20000000001'; +ERROR: value "-0o20000000001" is out of range for type integer +LINE 1: SELECT int4 '-0o20000000001'; + ^ +SELECT int4 '-0x80000000'; + int4 +------------- + -2147483648 +(1 row) + +SELECT int4 '-0x80000001'; +ERROR: value "-0x80000001" is out of range for type integer +LINE 1: SELECT int4 '-0x80000001'; + ^ +-- underscores +SELECT int4 '1_000_000'; + int4 +--------- + 1000000 +(1 row) + +SELECT int4 '1_2_3'; + int4 +------ + 123 +(1 row) + +SELECT int4 '0x1EEE_FFFF'; + int4 +----------- + 518979583 +(1 row) + +SELECT int4 '0o2_73'; + int4 +------ + 187 +(1 row) + +SELECT int4 '0b_10_0101'; + int4 +------ + 37 +(1 row) + +-- error cases +SELECT int4 '_100'; +ERROR: invalid input syntax for type integer: "_100" +LINE 1: SELECT int4 '_100'; + ^ +SELECT int4 '100_'; +ERROR: invalid input syntax for type integer: "100_" +LINE 1: SELECT int4 '100_'; + ^ +SELECT int4 '100__000'; +ERROR: invalid input syntax for type integer: "100__000" +LINE 1: SELECT int4 '100__000'; + ^ diff --git a/src/test/regress/expected/int8.out b/src/test/regress/expected/int8.out new file mode 100644 index 0000000..fddc09f --- /dev/null +++ b/src/test/regress/expected/int8.out @@ -0,0 +1,1092 @@ +-- +-- INT8 +-- Test int8 64-bit integers. +-- +-- int8_tbl was already created and filled in test_setup.sql. +-- Here we just try to insert bad values. +INSERT INTO INT8_TBL(q1) VALUES (' '); +ERROR: invalid input syntax for type bigint: " " +LINE 1: INSERT INTO INT8_TBL(q1) VALUES (' '); + ^ +INSERT INTO INT8_TBL(q1) VALUES ('xxx'); +ERROR: invalid input syntax for type bigint: "xxx" +LINE 1: INSERT INTO INT8_TBL(q1) VALUES ('xxx'); + ^ +INSERT INTO INT8_TBL(q1) VALUES ('3908203590239580293850293850329485'); +ERROR: value "3908203590239580293850293850329485" is out of range for type bigint +LINE 1: INSERT INTO INT8_TBL(q1) VALUES ('39082035902395802938502938... + ^ +INSERT INTO INT8_TBL(q1) VALUES ('-1204982019841029840928340329840934'); +ERROR: value "-1204982019841029840928340329840934" is out of range for type bigint +LINE 1: INSERT INTO INT8_TBL(q1) VALUES ('-1204982019841029840928340... + ^ +INSERT INTO INT8_TBL(q1) VALUES ('- 123'); +ERROR: invalid input syntax for type bigint: "- 123" +LINE 1: INSERT INTO INT8_TBL(q1) VALUES ('- 123'); + ^ +INSERT INTO INT8_TBL(q1) VALUES (' 345 5'); +ERROR: invalid input syntax for type bigint: " 345 5" +LINE 1: INSERT INTO INT8_TBL(q1) VALUES (' 345 5'); + ^ +INSERT INTO INT8_TBL(q1) VALUES (''); +ERROR: invalid input syntax for type bigint: "" +LINE 1: INSERT INTO INT8_TBL(q1) VALUES (''); + ^ +SELECT * FROM INT8_TBL; + q1 | q2 +------------------+------------------- + 123 | 456 + 123 | 4567890123456789 + 4567890123456789 | 123 + 4567890123456789 | 4567890123456789 + 4567890123456789 | -4567890123456789 +(5 rows) + +-- Also try it with non-error-throwing API +SELECT pg_input_is_valid('34', 'int8'); + pg_input_is_valid +------------------- + t +(1 row) + +SELECT pg_input_is_valid('asdf', 'int8'); + pg_input_is_valid +------------------- + f +(1 row) + +SELECT pg_input_is_valid('10000000000000000000', 'int8'); + pg_input_is_valid +------------------- + f +(1 row) + +SELECT * FROM pg_input_error_info('10000000000000000000', 'int8'); + message | detail | hint | sql_error_code +--------------------------------------------------------------+--------+------+---------------- + value "10000000000000000000" is out of range for type bigint | | | 22003 +(1 row) + +-- int8/int8 cmp +SELECT * FROM INT8_TBL WHERE q2 = 4567890123456789; + q1 | q2 +------------------+------------------ + 123 | 4567890123456789 + 4567890123456789 | 4567890123456789 +(2 rows) + +SELECT * FROM INT8_TBL WHERE q2 <> 4567890123456789; + q1 | q2 +------------------+------------------- + 123 | 456 + 4567890123456789 | 123 + 4567890123456789 | -4567890123456789 +(3 rows) + +SELECT * FROM INT8_TBL WHERE q2 < 4567890123456789; + q1 | q2 +------------------+------------------- + 123 | 456 + 4567890123456789 | 123 + 4567890123456789 | -4567890123456789 +(3 rows) + +SELECT * FROM INT8_TBL WHERE q2 > 4567890123456789; + q1 | q2 +----+---- +(0 rows) + +SELECT * FROM INT8_TBL WHERE q2 <= 4567890123456789; + q1 | q2 +------------------+------------------- + 123 | 456 + 123 | 4567890123456789 + 4567890123456789 | 123 + 4567890123456789 | 4567890123456789 + 4567890123456789 | -4567890123456789 +(5 rows) + +SELECT * FROM INT8_TBL WHERE q2 >= 4567890123456789; + q1 | q2 +------------------+------------------ + 123 | 4567890123456789 + 4567890123456789 | 4567890123456789 +(2 rows) + +-- int8/int4 cmp +SELECT * FROM INT8_TBL WHERE q2 = 456; + q1 | q2 +-----+----- + 123 | 456 +(1 row) + +SELECT * FROM INT8_TBL WHERE q2 <> 456; + q1 | q2 +------------------+------------------- + 123 | 4567890123456789 + 4567890123456789 | 123 + 4567890123456789 | 4567890123456789 + 4567890123456789 | -4567890123456789 +(4 rows) + +SELECT * FROM INT8_TBL WHERE q2 < 456; + q1 | q2 +------------------+------------------- + 4567890123456789 | 123 + 4567890123456789 | -4567890123456789 +(2 rows) + +SELECT * FROM INT8_TBL WHERE q2 > 456; + q1 | q2 +------------------+------------------ + 123 | 4567890123456789 + 4567890123456789 | 4567890123456789 +(2 rows) + +SELECT * FROM INT8_TBL WHERE q2 <= 456; + q1 | q2 +------------------+------------------- + 123 | 456 + 4567890123456789 | 123 + 4567890123456789 | -4567890123456789 +(3 rows) + +SELECT * FROM INT8_TBL WHERE q2 >= 456; + q1 | q2 +------------------+------------------ + 123 | 456 + 123 | 4567890123456789 + 4567890123456789 | 4567890123456789 +(3 rows) + +-- int4/int8 cmp +SELECT * FROM INT8_TBL WHERE 123 = q1; + q1 | q2 +-----+------------------ + 123 | 456 + 123 | 4567890123456789 +(2 rows) + +SELECT * FROM INT8_TBL WHERE 123 <> q1; + q1 | q2 +------------------+------------------- + 4567890123456789 | 123 + 4567890123456789 | 4567890123456789 + 4567890123456789 | -4567890123456789 +(3 rows) + +SELECT * FROM INT8_TBL WHERE 123 < q1; + q1 | q2 +------------------+------------------- + 4567890123456789 | 123 + 4567890123456789 | 4567890123456789 + 4567890123456789 | -4567890123456789 +(3 rows) + +SELECT * FROM INT8_TBL WHERE 123 > q1; + q1 | q2 +----+---- +(0 rows) + +SELECT * FROM INT8_TBL WHERE 123 <= q1; + q1 | q2 +------------------+------------------- + 123 | 456 + 123 | 4567890123456789 + 4567890123456789 | 123 + 4567890123456789 | 4567890123456789 + 4567890123456789 | -4567890123456789 +(5 rows) + +SELECT * FROM INT8_TBL WHERE 123 >= q1; + q1 | q2 +-----+------------------ + 123 | 456 + 123 | 4567890123456789 +(2 rows) + +-- int8/int2 cmp +SELECT * FROM INT8_TBL WHERE q2 = '456'::int2; + q1 | q2 +-----+----- + 123 | 456 +(1 row) + +SELECT * FROM INT8_TBL WHERE q2 <> '456'::int2; + q1 | q2 +------------------+------------------- + 123 | 4567890123456789 + 4567890123456789 | 123 + 4567890123456789 | 4567890123456789 + 4567890123456789 | -4567890123456789 +(4 rows) + +SELECT * FROM INT8_TBL WHERE q2 < '456'::int2; + q1 | q2 +------------------+------------------- + 4567890123456789 | 123 + 4567890123456789 | -4567890123456789 +(2 rows) + +SELECT * FROM INT8_TBL WHERE q2 > '456'::int2; + q1 | q2 +------------------+------------------ + 123 | 4567890123456789 + 4567890123456789 | 4567890123456789 +(2 rows) + +SELECT * FROM INT8_TBL WHERE q2 <= '456'::int2; + q1 | q2 +------------------+------------------- + 123 | 456 + 4567890123456789 | 123 + 4567890123456789 | -4567890123456789 +(3 rows) + +SELECT * FROM INT8_TBL WHERE q2 >= '456'::int2; + q1 | q2 +------------------+------------------ + 123 | 456 + 123 | 4567890123456789 + 4567890123456789 | 4567890123456789 +(3 rows) + +-- int2/int8 cmp +SELECT * FROM INT8_TBL WHERE '123'::int2 = q1; + q1 | q2 +-----+------------------ + 123 | 456 + 123 | 4567890123456789 +(2 rows) + +SELECT * FROM INT8_TBL WHERE '123'::int2 <> q1; + q1 | q2 +------------------+------------------- + 4567890123456789 | 123 + 4567890123456789 | 4567890123456789 + 4567890123456789 | -4567890123456789 +(3 rows) + +SELECT * FROM INT8_TBL WHERE '123'::int2 < q1; + q1 | q2 +------------------+------------------- + 4567890123456789 | 123 + 4567890123456789 | 4567890123456789 + 4567890123456789 | -4567890123456789 +(3 rows) + +SELECT * FROM INT8_TBL WHERE '123'::int2 > q1; + q1 | q2 +----+---- +(0 rows) + +SELECT * FROM INT8_TBL WHERE '123'::int2 <= q1; + q1 | q2 +------------------+------------------- + 123 | 456 + 123 | 4567890123456789 + 4567890123456789 | 123 + 4567890123456789 | 4567890123456789 + 4567890123456789 | -4567890123456789 +(5 rows) + +SELECT * FROM INT8_TBL WHERE '123'::int2 >= q1; + q1 | q2 +-----+------------------ + 123 | 456 + 123 | 4567890123456789 +(2 rows) + +SELECT q1 AS plus, -q1 AS minus FROM INT8_TBL; + plus | minus +------------------+------------------- + 123 | -123 + 123 | -123 + 4567890123456789 | -4567890123456789 + 4567890123456789 | -4567890123456789 + 4567890123456789 | -4567890123456789 +(5 rows) + +SELECT q1, q2, q1 + q2 AS plus FROM INT8_TBL; + q1 | q2 | plus +------------------+-------------------+------------------ + 123 | 456 | 579 + 123 | 4567890123456789 | 4567890123456912 + 4567890123456789 | 123 | 4567890123456912 + 4567890123456789 | 4567890123456789 | 9135780246913578 + 4567890123456789 | -4567890123456789 | 0 +(5 rows) + +SELECT q1, q2, q1 - q2 AS minus FROM INT8_TBL; + q1 | q2 | minus +------------------+-------------------+------------------- + 123 | 456 | -333 + 123 | 4567890123456789 | -4567890123456666 + 4567890123456789 | 123 | 4567890123456666 + 4567890123456789 | 4567890123456789 | 0 + 4567890123456789 | -4567890123456789 | 9135780246913578 +(5 rows) + +SELECT q1, q2, q1 * q2 AS multiply FROM INT8_TBL; +ERROR: bigint out of range +SELECT q1, q2, q1 * q2 AS multiply FROM INT8_TBL + WHERE q1 < 1000 or (q2 > 0 and q2 < 1000); + q1 | q2 | multiply +------------------+------------------+-------------------- + 123 | 456 | 56088 + 123 | 4567890123456789 | 561850485185185047 + 4567890123456789 | 123 | 561850485185185047 +(3 rows) + +SELECT q1, q2, q1 / q2 AS divide, q1 % q2 AS mod FROM INT8_TBL; + q1 | q2 | divide | mod +------------------+-------------------+----------------+----- + 123 | 456 | 0 | 123 + 123 | 4567890123456789 | 0 | 123 + 4567890123456789 | 123 | 37137318076884 | 57 + 4567890123456789 | 4567890123456789 | 1 | 0 + 4567890123456789 | -4567890123456789 | -1 | 0 +(5 rows) + +SELECT q1, float8(q1) FROM INT8_TBL; + q1 | float8 +------------------+----------------------- + 123 | 123 + 123 | 123 + 4567890123456789 | 4.567890123456789e+15 + 4567890123456789 | 4.567890123456789e+15 + 4567890123456789 | 4.567890123456789e+15 +(5 rows) + +SELECT q2, float8(q2) FROM INT8_TBL; + q2 | float8 +-------------------+------------------------ + 456 | 456 + 4567890123456789 | 4.567890123456789e+15 + 123 | 123 + 4567890123456789 | 4.567890123456789e+15 + -4567890123456789 | -4.567890123456789e+15 +(5 rows) + +SELECT 37 + q1 AS plus4 FROM INT8_TBL; + plus4 +------------------ + 160 + 160 + 4567890123456826 + 4567890123456826 + 4567890123456826 +(5 rows) + +SELECT 37 - q1 AS minus4 FROM INT8_TBL; + minus4 +------------------- + -86 + -86 + -4567890123456752 + -4567890123456752 + -4567890123456752 +(5 rows) + +SELECT 2 * q1 AS "twice int4" FROM INT8_TBL; + twice int4 +------------------ + 246 + 246 + 9135780246913578 + 9135780246913578 + 9135780246913578 +(5 rows) + +SELECT q1 * 2 AS "twice int4" FROM INT8_TBL; + twice int4 +------------------ + 246 + 246 + 9135780246913578 + 9135780246913578 + 9135780246913578 +(5 rows) + +-- int8 op int4 +SELECT q1 + 42::int4 AS "8plus4", q1 - 42::int4 AS "8minus4", q1 * 42::int4 AS "8mul4", q1 / 42::int4 AS "8div4" FROM INT8_TBL; + 8plus4 | 8minus4 | 8mul4 | 8div4 +------------------+------------------+--------------------+----------------- + 165 | 81 | 5166 | 2 + 165 | 81 | 5166 | 2 + 4567890123456831 | 4567890123456747 | 191851385185185138 | 108759288653733 + 4567890123456831 | 4567890123456747 | 191851385185185138 | 108759288653733 + 4567890123456831 | 4567890123456747 | 191851385185185138 | 108759288653733 +(5 rows) + +-- int4 op int8 +SELECT 246::int4 + q1 AS "4plus8", 246::int4 - q1 AS "4minus8", 246::int4 * q1 AS "4mul8", 246::int4 / q1 AS "4div8" FROM INT8_TBL; + 4plus8 | 4minus8 | 4mul8 | 4div8 +------------------+-------------------+---------------------+------- + 369 | 123 | 30258 | 2 + 369 | 123 | 30258 | 2 + 4567890123457035 | -4567890123456543 | 1123700970370370094 | 0 + 4567890123457035 | -4567890123456543 | 1123700970370370094 | 0 + 4567890123457035 | -4567890123456543 | 1123700970370370094 | 0 +(5 rows) + +-- int8 op int2 +SELECT q1 + 42::int2 AS "8plus2", q1 - 42::int2 AS "8minus2", q1 * 42::int2 AS "8mul2", q1 / 42::int2 AS "8div2" FROM INT8_TBL; + 8plus2 | 8minus2 | 8mul2 | 8div2 +------------------+------------------+--------------------+----------------- + 165 | 81 | 5166 | 2 + 165 | 81 | 5166 | 2 + 4567890123456831 | 4567890123456747 | 191851385185185138 | 108759288653733 + 4567890123456831 | 4567890123456747 | 191851385185185138 | 108759288653733 + 4567890123456831 | 4567890123456747 | 191851385185185138 | 108759288653733 +(5 rows) + +-- int2 op int8 +SELECT 246::int2 + q1 AS "2plus8", 246::int2 - q1 AS "2minus8", 246::int2 * q1 AS "2mul8", 246::int2 / q1 AS "2div8" FROM INT8_TBL; + 2plus8 | 2minus8 | 2mul8 | 2div8 +------------------+-------------------+---------------------+------- + 369 | 123 | 30258 | 2 + 369 | 123 | 30258 | 2 + 4567890123457035 | -4567890123456543 | 1123700970370370094 | 0 + 4567890123457035 | -4567890123456543 | 1123700970370370094 | 0 + 4567890123457035 | -4567890123456543 | 1123700970370370094 | 0 +(5 rows) + +SELECT q2, abs(q2) FROM INT8_TBL; + q2 | abs +-------------------+------------------ + 456 | 456 + 4567890123456789 | 4567890123456789 + 123 | 123 + 4567890123456789 | 4567890123456789 + -4567890123456789 | 4567890123456789 +(5 rows) + +SELECT min(q1), min(q2) FROM INT8_TBL; + min | min +-----+------------------- + 123 | -4567890123456789 +(1 row) + +SELECT max(q1), max(q2) FROM INT8_TBL; + max | max +------------------+------------------ + 4567890123456789 | 4567890123456789 +(1 row) + +-- TO_CHAR() +-- +SELECT to_char(q1, '9G999G999G999G999G999'), to_char(q2, '9,999,999,999,999,999') + FROM INT8_TBL; + to_char | to_char +------------------------+------------------------ + 123 | 456 + 123 | 4,567,890,123,456,789 + 4,567,890,123,456,789 | 123 + 4,567,890,123,456,789 | 4,567,890,123,456,789 + 4,567,890,123,456,789 | -4,567,890,123,456,789 +(5 rows) + +SELECT to_char(q1, '9G999G999G999G999G999D999G999'), to_char(q2, '9,999,999,999,999,999.999,999') + FROM INT8_TBL; + to_char | to_char +--------------------------------+-------------------------------- + 123.000,000 | 456.000,000 + 123.000,000 | 4,567,890,123,456,789.000,000 + 4,567,890,123,456,789.000,000 | 123.000,000 + 4,567,890,123,456,789.000,000 | 4,567,890,123,456,789.000,000 + 4,567,890,123,456,789.000,000 | -4,567,890,123,456,789.000,000 +(5 rows) + +SELECT to_char( (q1 * -1), '9999999999999999PR'), to_char( (q2 * -1), '9999999999999999.999PR') + FROM INT8_TBL; + to_char | to_char +--------------------+------------------------ + <123> | <456.000> + <123> | <4567890123456789.000> + <4567890123456789> | <123.000> + <4567890123456789> | <4567890123456789.000> + <4567890123456789> | 4567890123456789.000 +(5 rows) + +SELECT to_char( (q1 * -1), '9999999999999999S'), to_char( (q2 * -1), 'S9999999999999999') + FROM INT8_TBL; + to_char | to_char +-------------------+------------------- + 123- | -456 + 123- | -4567890123456789 + 4567890123456789- | -123 + 4567890123456789- | -4567890123456789 + 4567890123456789- | +4567890123456789 +(5 rows) + +SELECT to_char(q2, 'MI9999999999999999') FROM INT8_TBL; + to_char +------------------- + 456 + 4567890123456789 + 123 + 4567890123456789 + -4567890123456789 +(5 rows) + +SELECT to_char(q2, 'FMS9999999999999999') FROM INT8_TBL; + to_char +------------------- + +456 + +4567890123456789 + +123 + +4567890123456789 + -4567890123456789 +(5 rows) + +SELECT to_char(q2, 'FM9999999999999999THPR') FROM INT8_TBL; + to_char +-------------------- + 456TH + 4567890123456789TH + 123RD + 4567890123456789TH + <4567890123456789> +(5 rows) + +SELECT to_char(q2, 'SG9999999999999999th') FROM INT8_TBL; + to_char +--------------------- + + 456th + +4567890123456789th + + 123rd + +4567890123456789th + -4567890123456789 +(5 rows) + +SELECT to_char(q2, '0999999999999999') FROM INT8_TBL; + to_char +------------------- + 0000000000000456 + 4567890123456789 + 0000000000000123 + 4567890123456789 + -4567890123456789 +(5 rows) + +SELECT to_char(q2, 'S0999999999999999') FROM INT8_TBL; + to_char +------------------- + +0000000000000456 + +4567890123456789 + +0000000000000123 + +4567890123456789 + -4567890123456789 +(5 rows) + +SELECT to_char(q2, 'FM0999999999999999') FROM INT8_TBL; + to_char +------------------- + 0000000000000456 + 4567890123456789 + 0000000000000123 + 4567890123456789 + -4567890123456789 +(5 rows) + +SELECT to_char(q2, 'FM9999999999999999.000') FROM INT8_TBL; + to_char +----------------------- + 456.000 + 4567890123456789.000 + 123.000 + 4567890123456789.000 + -4567890123456789.000 +(5 rows) + +SELECT to_char(q2, 'L9999999999999999.000') FROM INT8_TBL; + to_char +------------------------ + 456.000 + 4567890123456789.000 + 123.000 + 4567890123456789.000 + -4567890123456789.000 +(5 rows) + +SELECT to_char(q2, 'FM9999999999999999.999') FROM INT8_TBL; + to_char +-------------------- + 456. + 4567890123456789. + 123. + 4567890123456789. + -4567890123456789. +(5 rows) + +SELECT to_char(q2, 'S 9 9 9 9 9 9 9 9 9 9 9 9 9 9 9 9 . 9 9 9') FROM INT8_TBL; + to_char +------------------------------------------- + +4 5 6 . 0 0 0 + +4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 . 0 0 0 + +1 2 3 . 0 0 0 + +4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 . 0 0 0 + -4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 . 0 0 0 +(5 rows) + +SELECT to_char(q2, E'99999 "text" 9999 "9999" 999 "\\"text between quote marks\\"" 9999') FROM INT8_TBL; + to_char +----------------------------------------------------------- + text 9999 "text between quote marks" 456 + 45678 text 9012 9999 345 "text between quote marks" 6789 + text 9999 "text between quote marks" 123 + 45678 text 9012 9999 345 "text between quote marks" 6789 + -45678 text 9012 9999 345 "text between quote marks" 6789 +(5 rows) + +SELECT to_char(q2, '999999SG9999999999') FROM INT8_TBL; + to_char +------------------- + + 456 + 456789+0123456789 + + 123 + 456789+0123456789 + 456789-0123456789 +(5 rows) + +-- check min/max values and overflow behavior +select '-9223372036854775808'::int8; + int8 +---------------------- + -9223372036854775808 +(1 row) + +select '-9223372036854775809'::int8; +ERROR: value "-9223372036854775809" is out of range for type bigint +LINE 1: select '-9223372036854775809'::int8; + ^ +select '9223372036854775807'::int8; + int8 +--------------------- + 9223372036854775807 +(1 row) + +select '9223372036854775808'::int8; +ERROR: value "9223372036854775808" is out of range for type bigint +LINE 1: select '9223372036854775808'::int8; + ^ +select -('-9223372036854775807'::int8); + ?column? +--------------------- + 9223372036854775807 +(1 row) + +select -('-9223372036854775808'::int8); +ERROR: bigint out of range +select 0::int8 - '-9223372036854775808'::int8; +ERROR: bigint out of range +select '9223372036854775800'::int8 + '9223372036854775800'::int8; +ERROR: bigint out of range +select '-9223372036854775800'::int8 + '-9223372036854775800'::int8; +ERROR: bigint out of range +select '9223372036854775800'::int8 - '-9223372036854775800'::int8; +ERROR: bigint out of range +select '-9223372036854775800'::int8 - '9223372036854775800'::int8; +ERROR: bigint out of range +select '9223372036854775800'::int8 * '9223372036854775800'::int8; +ERROR: bigint out of range +select '9223372036854775800'::int8 / '0'::int8; +ERROR: division by zero +select '9223372036854775800'::int8 % '0'::int8; +ERROR: division by zero +select abs('-9223372036854775808'::int8); +ERROR: bigint out of range +select '9223372036854775800'::int8 + '100'::int4; +ERROR: bigint out of range +select '-9223372036854775800'::int8 - '100'::int4; +ERROR: bigint out of range +select '9223372036854775800'::int8 * '100'::int4; +ERROR: bigint out of range +select '100'::int4 + '9223372036854775800'::int8; +ERROR: bigint out of range +select '-100'::int4 - '9223372036854775800'::int8; +ERROR: bigint out of range +select '100'::int4 * '9223372036854775800'::int8; +ERROR: bigint out of range +select '9223372036854775800'::int8 + '100'::int2; +ERROR: bigint out of range +select '-9223372036854775800'::int8 - '100'::int2; +ERROR: bigint out of range +select '9223372036854775800'::int8 * '100'::int2; +ERROR: bigint out of range +select '-9223372036854775808'::int8 / '0'::int2; +ERROR: division by zero +select '100'::int2 + '9223372036854775800'::int8; +ERROR: bigint out of range +select '-100'::int2 - '9223372036854775800'::int8; +ERROR: bigint out of range +select '100'::int2 * '9223372036854775800'::int8; +ERROR: bigint out of range +select '100'::int2 / '0'::int8; +ERROR: division by zero +SELECT CAST(q1 AS int4) FROM int8_tbl WHERE q2 = 456; + q1 +----- + 123 +(1 row) + +SELECT CAST(q1 AS int4) FROM int8_tbl WHERE q2 <> 456; +ERROR: integer out of range +SELECT CAST(q1 AS int2) FROM int8_tbl WHERE q2 = 456; + q1 +----- + 123 +(1 row) + +SELECT CAST(q1 AS int2) FROM int8_tbl WHERE q2 <> 456; +ERROR: smallint out of range +SELECT CAST('42'::int2 AS int8), CAST('-37'::int2 AS int8); + int8 | int8 +------+------ + 42 | -37 +(1 row) + +SELECT CAST(q1 AS float4), CAST(q2 AS float8) FROM INT8_TBL; + q1 | q2 +-------------+------------------------ + 123 | 456 + 123 | 4.567890123456789e+15 + 4.56789e+15 | 123 + 4.56789e+15 | 4.567890123456789e+15 + 4.56789e+15 | -4.567890123456789e+15 +(5 rows) + +SELECT CAST('36854775807.0'::float4 AS int8); + int8 +------------- + 36854775808 +(1 row) + +SELECT CAST('922337203685477580700.0'::float8 AS int8); +ERROR: bigint out of range +SELECT CAST(q1 AS oid) FROM INT8_TBL; +ERROR: OID out of range +SELECT oid::int8 FROM pg_class WHERE relname = 'pg_class'; + oid +------ + 1259 +(1 row) + +-- bit operations +SELECT q1, q2, q1 & q2 AS "and", q1 | q2 AS "or", q1 # q2 AS "xor", ~q1 AS "not" FROM INT8_TBL; + q1 | q2 | and | or | xor | not +------------------+-------------------+------------------+------------------+------------------+------------------- + 123 | 456 | 72 | 507 | 435 | -124 + 123 | 4567890123456789 | 17 | 4567890123456895 | 4567890123456878 | -124 + 4567890123456789 | 123 | 17 | 4567890123456895 | 4567890123456878 | -4567890123456790 + 4567890123456789 | 4567890123456789 | 4567890123456789 | 4567890123456789 | 0 | -4567890123456790 + 4567890123456789 | -4567890123456789 | 1 | -1 | -2 | -4567890123456790 +(5 rows) + +SELECT q1, q1 << 2 AS "shl", q1 >> 3 AS "shr" FROM INT8_TBL; + q1 | shl | shr +------------------+-------------------+----------------- + 123 | 492 | 15 + 123 | 492 | 15 + 4567890123456789 | 18271560493827156 | 570986265432098 + 4567890123456789 | 18271560493827156 | 570986265432098 + 4567890123456789 | 18271560493827156 | 570986265432098 +(5 rows) + +-- generate_series +SELECT * FROM generate_series('+4567890123456789'::int8, '+4567890123456799'::int8); + generate_series +------------------ + 4567890123456789 + 4567890123456790 + 4567890123456791 + 4567890123456792 + 4567890123456793 + 4567890123456794 + 4567890123456795 + 4567890123456796 + 4567890123456797 + 4567890123456798 + 4567890123456799 +(11 rows) + +SELECT * FROM generate_series('+4567890123456789'::int8, '+4567890123456799'::int8, 0); +ERROR: step size cannot equal zero +SELECT * FROM generate_series('+4567890123456789'::int8, '+4567890123456799'::int8, 2); + generate_series +------------------ + 4567890123456789 + 4567890123456791 + 4567890123456793 + 4567890123456795 + 4567890123456797 + 4567890123456799 +(6 rows) + +-- corner case +SELECT (-1::int8<<63)::text; + text +---------------------- + -9223372036854775808 +(1 row) + +SELECT ((-1::int8<<63)+1)::text; + text +---------------------- + -9223372036854775807 +(1 row) + +-- check sane handling of INT64_MIN overflow cases +SELECT (-9223372036854775808)::int8 * (-1)::int8; +ERROR: bigint out of range +SELECT (-9223372036854775808)::int8 / (-1)::int8; +ERROR: bigint out of range +SELECT (-9223372036854775808)::int8 % (-1)::int8; + ?column? +---------- + 0 +(1 row) + +SELECT (-9223372036854775808)::int8 * (-1)::int4; +ERROR: bigint out of range +SELECT (-9223372036854775808)::int8 / (-1)::int4; +ERROR: bigint out of range +SELECT (-9223372036854775808)::int8 % (-1)::int4; + ?column? +---------- + 0 +(1 row) + +SELECT (-9223372036854775808)::int8 * (-1)::int2; +ERROR: bigint out of range +SELECT (-9223372036854775808)::int8 / (-1)::int2; +ERROR: bigint out of range +SELECT (-9223372036854775808)::int8 % (-1)::int2; + ?column? +---------- + 0 +(1 row) + +-- check rounding when casting from float +SELECT x, x::int8 AS int8_value +FROM (VALUES (-2.5::float8), + (-1.5::float8), + (-0.5::float8), + (0.0::float8), + (0.5::float8), + (1.5::float8), + (2.5::float8)) t(x); + x | int8_value +------+------------ + -2.5 | -2 + -1.5 | -2 + -0.5 | 0 + 0 | 0 + 0.5 | 0 + 1.5 | 2 + 2.5 | 2 +(7 rows) + +-- check rounding when casting from numeric +SELECT x, x::int8 AS int8_value +FROM (VALUES (-2.5::numeric), + (-1.5::numeric), + (-0.5::numeric), + (0.0::numeric), + (0.5::numeric), + (1.5::numeric), + (2.5::numeric)) t(x); + x | int8_value +------+------------ + -2.5 | -3 + -1.5 | -2 + -0.5 | -1 + 0.0 | 0 + 0.5 | 1 + 1.5 | 2 + 2.5 | 3 +(7 rows) + +-- test gcd() +SELECT a, b, gcd(a, b), gcd(a, -b), gcd(b, a), gcd(-b, a) +FROM (VALUES (0::int8, 0::int8), + (0::int8, 29893644334::int8), + (288484263558::int8, 29893644334::int8), + (-288484263558::int8, 29893644334::int8), + ((-9223372036854775808)::int8, 1::int8), + ((-9223372036854775808)::int8, 9223372036854775807::int8), + ((-9223372036854775808)::int8, 4611686018427387904::int8)) AS v(a, b); + a | b | gcd | gcd | gcd | gcd +----------------------+---------------------+---------------------+---------------------+---------------------+--------------------- + 0 | 0 | 0 | 0 | 0 | 0 + 0 | 29893644334 | 29893644334 | 29893644334 | 29893644334 | 29893644334 + 288484263558 | 29893644334 | 6835958 | 6835958 | 6835958 | 6835958 + -288484263558 | 29893644334 | 6835958 | 6835958 | 6835958 | 6835958 + -9223372036854775808 | 1 | 1 | 1 | 1 | 1 + -9223372036854775808 | 9223372036854775807 | 1 | 1 | 1 | 1 + -9223372036854775808 | 4611686018427387904 | 4611686018427387904 | 4611686018427387904 | 4611686018427387904 | 4611686018427387904 +(7 rows) + +SELECT gcd((-9223372036854775808)::int8, 0::int8); -- overflow +ERROR: bigint out of range +SELECT gcd((-9223372036854775808)::int8, (-9223372036854775808)::int8); -- overflow +ERROR: bigint out of range +-- test lcm() +SELECT a, b, lcm(a, b), lcm(a, -b), lcm(b, a), lcm(-b, a) +FROM (VALUES (0::int8, 0::int8), + (0::int8, 29893644334::int8), + (29893644334::int8, 29893644334::int8), + (288484263558::int8, 29893644334::int8), + (-288484263558::int8, 29893644334::int8), + ((-9223372036854775808)::int8, 0::int8)) AS v(a, b); + a | b | lcm | lcm | lcm | lcm +----------------------+-------------+------------------+------------------+------------------+------------------ + 0 | 0 | 0 | 0 | 0 | 0 + 0 | 29893644334 | 0 | 0 | 0 | 0 + 29893644334 | 29893644334 | 29893644334 | 29893644334 | 29893644334 | 29893644334 + 288484263558 | 29893644334 | 1261541684539134 | 1261541684539134 | 1261541684539134 | 1261541684539134 + -288484263558 | 29893644334 | 1261541684539134 | 1261541684539134 | 1261541684539134 | 1261541684539134 + -9223372036854775808 | 0 | 0 | 0 | 0 | 0 +(6 rows) + +SELECT lcm((-9223372036854775808)::int8, 1::int8); -- overflow +ERROR: bigint out of range +SELECT lcm(9223372036854775807::int8, 9223372036854775806::int8); -- overflow +ERROR: bigint out of range +-- non-decimal literals +SELECT int8 '0b100101'; + int8 +------ + 37 +(1 row) + +SELECT int8 '0o273'; + int8 +------ + 187 +(1 row) + +SELECT int8 '0x42F'; + int8 +------ + 1071 +(1 row) + +SELECT int8 '0b'; +ERROR: invalid input syntax for type bigint: "0b" +LINE 1: SELECT int8 '0b'; + ^ +SELECT int8 '0o'; +ERROR: invalid input syntax for type bigint: "0o" +LINE 1: SELECT int8 '0o'; + ^ +SELECT int8 '0x'; +ERROR: invalid input syntax for type bigint: "0x" +LINE 1: SELECT int8 '0x'; + ^ +-- cases near overflow +SELECT int8 '0b111111111111111111111111111111111111111111111111111111111111111'; + int8 +--------------------- + 9223372036854775807 +(1 row) + +SELECT int8 '0b1000000000000000000000000000000000000000000000000000000000000000'; +ERROR: value "0b1000000000000000000000000000000000000000000000000000000000000000" is out of range for type bigint +LINE 1: SELECT int8 '0b100000000000000000000000000000000000000000000... + ^ +SELECT int8 '0o777777777777777777777'; + int8 +--------------------- + 9223372036854775807 +(1 row) + +SELECT int8 '0o1000000000000000000000'; +ERROR: value "0o1000000000000000000000" is out of range for type bigint +LINE 1: SELECT int8 '0o1000000000000000000000'; + ^ +SELECT int8 '0x7FFFFFFFFFFFFFFF'; + int8 +--------------------- + 9223372036854775807 +(1 row) + +SELECT int8 '0x8000000000000000'; +ERROR: value "0x8000000000000000" is out of range for type bigint +LINE 1: SELECT int8 '0x8000000000000000'; + ^ +SELECT int8 '-0b1000000000000000000000000000000000000000000000000000000000000000'; + int8 +---------------------- + -9223372036854775808 +(1 row) + +SELECT int8 '-0b1000000000000000000000000000000000000000000000000000000000000001'; +ERROR: value "-0b1000000000000000000000000000000000000000000000000000000000000001" is out of range for type bigint +LINE 1: SELECT int8 '-0b10000000000000000000000000000000000000000000... + ^ +SELECT int8 '-0o1000000000000000000000'; + int8 +---------------------- + -9223372036854775808 +(1 row) + +SELECT int8 '-0o1000000000000000000001'; +ERROR: value "-0o1000000000000000000001" is out of range for type bigint +LINE 1: SELECT int8 '-0o1000000000000000000001'; + ^ +SELECT int8 '-0x8000000000000000'; + int8 +---------------------- + -9223372036854775808 +(1 row) + +SELECT int8 '-0x8000000000000001'; +ERROR: value "-0x8000000000000001" is out of range for type bigint +LINE 1: SELECT int8 '-0x8000000000000001'; + ^ +-- underscores +SELECT int8 '1_000_000'; + int8 +--------- + 1000000 +(1 row) + +SELECT int8 '1_2_3'; + int8 +------ + 123 +(1 row) + +SELECT int8 '0x1EEE_FFFF'; + int8 +----------- + 518979583 +(1 row) + +SELECT int8 '0o2_73'; + int8 +------ + 187 +(1 row) + +SELECT int8 '0b_10_0101'; + int8 +------ + 37 +(1 row) + +-- error cases +SELECT int8 '_100'; +ERROR: invalid input syntax for type bigint: "_100" +LINE 1: SELECT int8 '_100'; + ^ +SELECT int8 '100_'; +ERROR: invalid input syntax for type bigint: "100_" +LINE 1: SELECT int8 '100_'; + ^ +SELECT int8 '100__000'; +ERROR: invalid input syntax for type bigint: "100__000" +LINE 1: SELECT int8 '100__000'; + ^ diff --git a/src/test/regress/expected/interval.out b/src/test/regress/expected/interval.out new file mode 100644 index 0000000..9fc9b0f --- /dev/null +++ b/src/test/regress/expected/interval.out @@ -0,0 +1,1802 @@ +-- +-- INTERVAL +-- +SET DATESTYLE = 'ISO'; +SET IntervalStyle to postgres; +-- check acceptance of "time zone style" +SELECT INTERVAL '01:00' AS "One hour"; + One hour +---------- + 01:00:00 +(1 row) + +SELECT INTERVAL '+02:00' AS "Two hours"; + Two hours +----------- + 02:00:00 +(1 row) + +SELECT INTERVAL '-08:00' AS "Eight hours"; + Eight hours +------------- + -08:00:00 +(1 row) + +SELECT INTERVAL '-1 +02:03' AS "22 hours ago..."; + 22 hours ago... +------------------- + -1 days +02:03:00 +(1 row) + +SELECT INTERVAL '-1 days +02:03' AS "22 hours ago..."; + 22 hours ago... +------------------- + -1 days +02:03:00 +(1 row) + +SELECT INTERVAL '1.5 weeks' AS "Ten days twelve hours"; + Ten days twelve hours +----------------------- + 10 days 12:00:00 +(1 row) + +SELECT INTERVAL '1.5 months' AS "One month 15 days"; + One month 15 days +------------------- + 1 mon 15 days +(1 row) + +SELECT INTERVAL '10 years -11 month -12 days +13:14' AS "9 years..."; + 9 years... +---------------------------------- + 9 years 1 mon -12 days +13:14:00 +(1 row) + +CREATE TABLE INTERVAL_TBL (f1 interval); +INSERT INTO INTERVAL_TBL (f1) VALUES ('@ 1 minute'); +INSERT INTO INTERVAL_TBL (f1) VALUES ('@ 5 hour'); +INSERT INTO INTERVAL_TBL (f1) VALUES ('@ 10 day'); +INSERT INTO INTERVAL_TBL (f1) VALUES ('@ 34 year'); +INSERT INTO INTERVAL_TBL (f1) VALUES ('@ 3 months'); +INSERT INTO INTERVAL_TBL (f1) VALUES ('@ 14 seconds ago'); +INSERT INTO INTERVAL_TBL (f1) VALUES ('1 day 2 hours 3 minutes 4 seconds'); +INSERT INTO INTERVAL_TBL (f1) VALUES ('6 years'); +INSERT INTO INTERVAL_TBL (f1) VALUES ('5 months'); +INSERT INTO INTERVAL_TBL (f1) VALUES ('5 months 12 hours'); +-- badly formatted interval +INSERT INTO INTERVAL_TBL (f1) VALUES ('badly formatted interval'); +ERROR: invalid input syntax for type interval: "badly formatted interval" +LINE 1: INSERT INTO INTERVAL_TBL (f1) VALUES ('badly formatted inter... + ^ +INSERT INTO INTERVAL_TBL (f1) VALUES ('@ 30 eons ago'); +ERROR: invalid input syntax for type interval: "@ 30 eons ago" +LINE 1: INSERT INTO INTERVAL_TBL (f1) VALUES ('@ 30 eons ago'); + ^ +-- Test non-error-throwing API +SELECT pg_input_is_valid('1.5 weeks', 'interval'); + pg_input_is_valid +------------------- + t +(1 row) + +SELECT pg_input_is_valid('garbage', 'interval'); + pg_input_is_valid +------------------- + f +(1 row) + +SELECT pg_input_is_valid('@ 30 eons ago', 'interval'); + pg_input_is_valid +------------------- + f +(1 row) + +SELECT * FROM pg_input_error_info('garbage', 'interval'); + message | detail | hint | sql_error_code +---------------------------------------------------+--------+------+---------------- + invalid input syntax for type interval: "garbage" | | | 22007 +(1 row) + +SELECT * FROM pg_input_error_info('@ 30 eons ago', 'interval'); + message | detail | hint | sql_error_code +---------------------------------------------------------+--------+------+---------------- + invalid input syntax for type interval: "@ 30 eons ago" | | | 22007 +(1 row) + +-- test interval operators +SELECT * FROM INTERVAL_TBL; + f1 +----------------- + 00:01:00 + 05:00:00 + 10 days + 34 years + 3 mons + -00:00:14 + 1 day 02:03:04 + 6 years + 5 mons + 5 mons 12:00:00 +(10 rows) + +SELECT * FROM INTERVAL_TBL + WHERE INTERVAL_TBL.f1 <> interval '@ 10 days'; + f1 +----------------- + 00:01:00 + 05:00:00 + 34 years + 3 mons + -00:00:14 + 1 day 02:03:04 + 6 years + 5 mons + 5 mons 12:00:00 +(9 rows) + +SELECT * FROM INTERVAL_TBL + WHERE INTERVAL_TBL.f1 <= interval '@ 5 hours'; + f1 +----------- + 00:01:00 + 05:00:00 + -00:00:14 +(3 rows) + +SELECT * FROM INTERVAL_TBL + WHERE INTERVAL_TBL.f1 < interval '@ 1 day'; + f1 +----------- + 00:01:00 + 05:00:00 + -00:00:14 +(3 rows) + +SELECT * FROM INTERVAL_TBL + WHERE INTERVAL_TBL.f1 = interval '@ 34 years'; + f1 +---------- + 34 years +(1 row) + +SELECT * FROM INTERVAL_TBL + WHERE INTERVAL_TBL.f1 >= interval '@ 1 month'; + f1 +----------------- + 34 years + 3 mons + 6 years + 5 mons + 5 mons 12:00:00 +(5 rows) + +SELECT * FROM INTERVAL_TBL + WHERE INTERVAL_TBL.f1 > interval '@ 3 seconds ago'; + f1 +----------------- + 00:01:00 + 05:00:00 + 10 days + 34 years + 3 mons + 1 day 02:03:04 + 6 years + 5 mons + 5 mons 12:00:00 +(9 rows) + +SELECT r1.*, r2.* + FROM INTERVAL_TBL r1, INTERVAL_TBL r2 + WHERE r1.f1 > r2.f1 + ORDER BY r1.f1, r2.f1; + f1 | f1 +-----------------+----------------- + 00:01:00 | -00:00:14 + 05:00:00 | -00:00:14 + 05:00:00 | 00:01:00 + 1 day 02:03:04 | -00:00:14 + 1 day 02:03:04 | 00:01:00 + 1 day 02:03:04 | 05:00:00 + 10 days | -00:00:14 + 10 days | 00:01:00 + 10 days | 05:00:00 + 10 days | 1 day 02:03:04 + 3 mons | -00:00:14 + 3 mons | 00:01:00 + 3 mons | 05:00:00 + 3 mons | 1 day 02:03:04 + 3 mons | 10 days + 5 mons | -00:00:14 + 5 mons | 00:01:00 + 5 mons | 05:00:00 + 5 mons | 1 day 02:03:04 + 5 mons | 10 days + 5 mons | 3 mons + 5 mons 12:00:00 | -00:00:14 + 5 mons 12:00:00 | 00:01:00 + 5 mons 12:00:00 | 05:00:00 + 5 mons 12:00:00 | 1 day 02:03:04 + 5 mons 12:00:00 | 10 days + 5 mons 12:00:00 | 3 mons + 5 mons 12:00:00 | 5 mons + 6 years | -00:00:14 + 6 years | 00:01:00 + 6 years | 05:00:00 + 6 years | 1 day 02:03:04 + 6 years | 10 days + 6 years | 3 mons + 6 years | 5 mons + 6 years | 5 mons 12:00:00 + 34 years | -00:00:14 + 34 years | 00:01:00 + 34 years | 05:00:00 + 34 years | 1 day 02:03:04 + 34 years | 10 days + 34 years | 3 mons + 34 years | 5 mons + 34 years | 5 mons 12:00:00 + 34 years | 6 years +(45 rows) + +-- Test intervals that are large enough to overflow 64 bits in comparisons +CREATE TEMP TABLE INTERVAL_TBL_OF (f1 interval); +INSERT INTO INTERVAL_TBL_OF (f1) VALUES + ('2147483647 days 2147483647 months'), + ('2147483647 days -2147483648 months'), + ('1 year'), + ('-2147483648 days 2147483647 months'), + ('-2147483648 days -2147483648 months'); +-- these should fail as out-of-range +INSERT INTO INTERVAL_TBL_OF (f1) VALUES ('2147483648 days'); +ERROR: interval field value out of range: "2147483648 days" +LINE 1: INSERT INTO INTERVAL_TBL_OF (f1) VALUES ('2147483648 days'); + ^ +INSERT INTO INTERVAL_TBL_OF (f1) VALUES ('-2147483649 days'); +ERROR: interval field value out of range: "-2147483649 days" +LINE 1: INSERT INTO INTERVAL_TBL_OF (f1) VALUES ('-2147483649 days')... + ^ +INSERT INTO INTERVAL_TBL_OF (f1) VALUES ('2147483647 years'); +ERROR: interval out of range +LINE 1: INSERT INTO INTERVAL_TBL_OF (f1) VALUES ('2147483647 years')... + ^ +INSERT INTO INTERVAL_TBL_OF (f1) VALUES ('-2147483648 years'); +ERROR: interval out of range +LINE 1: INSERT INTO INTERVAL_TBL_OF (f1) VALUES ('-2147483648 years'... + ^ +-- Test edge-case overflow detection in interval multiplication +select extract(epoch from '256 microseconds'::interval * (2^55)::float8); +ERROR: interval out of range +SELECT r1.*, r2.* + FROM INTERVAL_TBL_OF r1, INTERVAL_TBL_OF r2 + WHERE r1.f1 > r2.f1 + ORDER BY r1.f1, r2.f1; + f1 | f1 +-------------------------------------------+------------------------------------------- + -178956970 years -8 mons +2147483647 days | -178956970 years -8 mons -2147483648 days + 1 year | -178956970 years -8 mons -2147483648 days + 1 year | -178956970 years -8 mons +2147483647 days + 178956970 years 7 mons -2147483648 days | -178956970 years -8 mons -2147483648 days + 178956970 years 7 mons -2147483648 days | -178956970 years -8 mons +2147483647 days + 178956970 years 7 mons -2147483648 days | 1 year + 178956970 years 7 mons 2147483647 days | -178956970 years -8 mons -2147483648 days + 178956970 years 7 mons 2147483647 days | -178956970 years -8 mons +2147483647 days + 178956970 years 7 mons 2147483647 days | 1 year + 178956970 years 7 mons 2147483647 days | 178956970 years 7 mons -2147483648 days +(10 rows) + +CREATE INDEX ON INTERVAL_TBL_OF USING btree (f1); +SET enable_seqscan TO false; +EXPLAIN (COSTS OFF) +SELECT f1 FROM INTERVAL_TBL_OF r1 ORDER BY f1; + QUERY PLAN +-------------------------------------------------------------------- + Index Only Scan using interval_tbl_of_f1_idx on interval_tbl_of r1 +(1 row) + +SELECT f1 FROM INTERVAL_TBL_OF r1 ORDER BY f1; + f1 +------------------------------------------- + -178956970 years -8 mons -2147483648 days + -178956970 years -8 mons +2147483647 days + 1 year + 178956970 years 7 mons -2147483648 days + 178956970 years 7 mons 2147483647 days +(5 rows) + +RESET enable_seqscan; +DROP TABLE INTERVAL_TBL_OF; +-- Test multiplication and division with intervals. +-- Floating point arithmetic rounding errors can lead to unexpected results, +-- though the code attempts to do the right thing and round up to days and +-- minutes to avoid results such as '3 days 24:00 hours' or '14:20:60'. +-- Note that it is expected for some day components to be greater than 29 and +-- some time components be greater than 23:59:59 due to how intervals are +-- stored internally. +CREATE TABLE INTERVAL_MULDIV_TBL (span interval); +COPY INTERVAL_MULDIV_TBL FROM STDIN; +SELECT span * 0.3 AS product +FROM INTERVAL_MULDIV_TBL; + product +------------------------------------ + 1 year 12 days 122:24:00 + -1 years -12 days +93:36:00 + -3 days -14:24:00 + 2 mons 13 days 01:22:28.8 + -10 mons +120 days 37:28:21.6567 + 1 mon 6 days + 4 mons 6 days + 24 years 11 mons 320 days 16:48:00 +(8 rows) + +SELECT span * 8.2 AS product +FROM INTERVAL_MULDIV_TBL; + product +--------------------------------------------- + 28 years 104 days 2961:36:00 + -28 years -104 days +2942:24:00 + -98 days -09:36:00 + 6 years 1 mon -197 days +93:34:27.2 + -24 years -7 mons +3946 days 640:15:11.9498 + 2 years 8 mons 24 days + 9 years 6 mons 24 days + 682 years 7 mons 8215 days 19:12:00 +(8 rows) + +SELECT span / 10 AS quotient +FROM INTERVAL_MULDIV_TBL; + quotient +---------------------------------- + 4 mons 4 days 40:48:00 + -4 mons -4 days +31:12:00 + -1 days -04:48:00 + 25 days -15:32:30.4 + -3 mons +30 days 12:29:27.2189 + 12 days + 1 mon 12 days + 8 years 3 mons 126 days 21:36:00 +(8 rows) + +SELECT span / 100 AS quotient +FROM INTERVAL_MULDIV_TBL; + quotient +------------------------- + 12 days 13:40:48 + -12 days -06:28:48 + -02:52:48 + 2 days 10:26:44.96 + -6 days +01:14:56.72189 + 1 day 04:48:00 + 4 days 04:48:00 + 9 mons 39 days 16:33:36 +(8 rows) + +DROP TABLE INTERVAL_MULDIV_TBL; +SET DATESTYLE = 'postgres'; +SET IntervalStyle to postgres_verbose; +SELECT * FROM INTERVAL_TBL; + f1 +------------------------------- + @ 1 min + @ 5 hours + @ 10 days + @ 34 years + @ 3 mons + @ 14 secs ago + @ 1 day 2 hours 3 mins 4 secs + @ 6 years + @ 5 mons + @ 5 mons 12 hours +(10 rows) + +-- multiplication and division overflow test cases +SELECT '3000000 months'::interval * 1000; +ERROR: interval out of range +SELECT '3000000 months'::interval / 0.001; +ERROR: interval out of range +SELECT '3000000 days'::interval * 1000; +ERROR: interval out of range +SELECT '3000000 days'::interval / 0.001; +ERROR: interval out of range +SELECT '1 month 2146410 days'::interval * 1000.5002; +ERROR: interval out of range +SELECT '4611686018427387904 usec'::interval / 0.1; +ERROR: interval out of range +-- test avg(interval), which is somewhat fragile since people have been +-- known to change the allowed input syntax for type interval without +-- updating pg_aggregate.agginitval +select avg(f1) from interval_tbl; + avg +------------------------------------------------- + @ 4 years 1 mon 10 days 4 hours 18 mins 23 secs +(1 row) + +-- test long interval input +select '4 millenniums 5 centuries 4 decades 1 year 4 months 4 days 17 minutes 31 seconds'::interval; + interval +-------------------------------------------- + @ 4541 years 4 mons 4 days 17 mins 31 secs +(1 row) + +-- test long interval output +-- Note: the actual maximum length of the interval output is longer, +-- but we need the test to work for both integer and floating-point +-- timestamps. +select '100000000y 10mon -1000000000d -100000h -10min -10.000001s ago'::interval; + interval +--------------------------------------------------------------------------------------- + @ 100000000 years 10 mons -1000000000 days -100000 hours -10 mins -10.000001 secs ago +(1 row) + +-- test justify_hours() and justify_days() +SELECT justify_hours(interval '6 months 3 days 52 hours 3 minutes 2 seconds') as "6 mons 5 days 4 hours 3 mins 2 seconds"; + 6 mons 5 days 4 hours 3 mins 2 seconds +---------------------------------------- + @ 6 mons 5 days 4 hours 3 mins 2 secs +(1 row) + +SELECT justify_days(interval '6 months 36 days 5 hours 4 minutes 3 seconds') as "7 mons 6 days 5 hours 4 mins 3 seconds"; + 7 mons 6 days 5 hours 4 mins 3 seconds +---------------------------------------- + @ 7 mons 6 days 5 hours 4 mins 3 secs +(1 row) + +SELECT justify_hours(interval '2147483647 days 24 hrs'); +ERROR: interval out of range +SELECT justify_days(interval '2147483647 months 30 days'); +ERROR: interval out of range +-- test justify_interval() +SELECT justify_interval(interval '1 month -1 hour') as "1 month -1 hour"; + 1 month -1 hour +-------------------- + @ 29 days 23 hours +(1 row) + +SELECT justify_interval(interval '2147483647 days 24 hrs'); + justify_interval +------------------------------- + @ 5965232 years 4 mons 8 days +(1 row) + +SELECT justify_interval(interval '-2147483648 days -24 hrs'); + justify_interval +----------------------------------- + @ 5965232 years 4 mons 9 days ago +(1 row) + +SELECT justify_interval(interval '2147483647 months 30 days'); +ERROR: interval out of range +SELECT justify_interval(interval '-2147483648 months -30 days'); +ERROR: interval out of range +SELECT justify_interval(interval '2147483647 months 30 days -24 hrs'); + justify_interval +---------------------------------- + @ 178956970 years 7 mons 29 days +(1 row) + +SELECT justify_interval(interval '-2147483648 months -30 days 24 hrs'); + justify_interval +-------------------------------------- + @ 178956970 years 8 mons 29 days ago +(1 row) + +SELECT justify_interval(interval '2147483647 months -30 days 1440 hrs'); +ERROR: interval out of range +SELECT justify_interval(interval '-2147483648 months 30 days -1440 hrs'); +ERROR: interval out of range +-- test fractional second input, and detection of duplicate units +SET DATESTYLE = 'ISO'; +SET IntervalStyle TO postgres; +SELECT '1 millisecond'::interval, '1 microsecond'::interval, + '500 seconds 99 milliseconds 51 microseconds'::interval; + interval | interval | interval +--------------+-----------------+----------------- + 00:00:00.001 | 00:00:00.000001 | 00:08:20.099051 +(1 row) + +SELECT '3 days 5 milliseconds'::interval; + interval +--------------------- + 3 days 00:00:00.005 +(1 row) + +SELECT '1 second 2 seconds'::interval; -- error +ERROR: invalid input syntax for type interval: "1 second 2 seconds" +LINE 1: SELECT '1 second 2 seconds'::interval; + ^ +SELECT '10 milliseconds 20 milliseconds'::interval; -- error +ERROR: invalid input syntax for type interval: "10 milliseconds 20 milliseconds" +LINE 1: SELECT '10 milliseconds 20 milliseconds'::interval; + ^ +SELECT '5.5 seconds 3 milliseconds'::interval; -- error +ERROR: invalid input syntax for type interval: "5.5 seconds 3 milliseconds" +LINE 1: SELECT '5.5 seconds 3 milliseconds'::interval; + ^ +SELECT '1:20:05 5 microseconds'::interval; -- error +ERROR: invalid input syntax for type interval: "1:20:05 5 microseconds" +LINE 1: SELECT '1:20:05 5 microseconds'::interval; + ^ +SELECT '1 day 1 day'::interval; -- error +ERROR: invalid input syntax for type interval: "1 day 1 day" +LINE 1: SELECT '1 day 1 day'::interval; + ^ +SELECT interval '1-2'; -- SQL year-month literal + interval +--------------- + 1 year 2 mons +(1 row) + +SELECT interval '999' second; -- oversize leading field is ok + interval +---------- + 00:16:39 +(1 row) + +SELECT interval '999' minute; + interval +---------- + 16:39:00 +(1 row) + +SELECT interval '999' hour; + interval +----------- + 999:00:00 +(1 row) + +SELECT interval '999' day; + interval +---------- + 999 days +(1 row) + +SELECT interval '999' month; + interval +----------------- + 83 years 3 mons +(1 row) + +-- test SQL-spec syntaxes for restricted field sets +SELECT interval '1' year; + interval +---------- + 1 year +(1 row) + +SELECT interval '2' month; + interval +---------- + 2 mons +(1 row) + +SELECT interval '3' day; + interval +---------- + 3 days +(1 row) + +SELECT interval '4' hour; + interval +---------- + 04:00:00 +(1 row) + +SELECT interval '5' minute; + interval +---------- + 00:05:00 +(1 row) + +SELECT interval '6' second; + interval +---------- + 00:00:06 +(1 row) + +SELECT interval '1' year to month; + interval +---------- + 1 mon +(1 row) + +SELECT interval '1-2' year to month; + interval +--------------- + 1 year 2 mons +(1 row) + +SELECT interval '1 2' day to hour; + interval +---------------- + 1 day 02:00:00 +(1 row) + +SELECT interval '1 2:03' day to hour; + interval +---------------- + 1 day 02:00:00 +(1 row) + +SELECT interval '1 2:03:04' day to hour; + interval +---------------- + 1 day 02:00:00 +(1 row) + +SELECT interval '1 2' day to minute; +ERROR: invalid input syntax for type interval: "1 2" +LINE 1: SELECT interval '1 2' day to minute; + ^ +SELECT interval '1 2:03' day to minute; + interval +---------------- + 1 day 02:03:00 +(1 row) + +SELECT interval '1 2:03:04' day to minute; + interval +---------------- + 1 day 02:03:00 +(1 row) + +SELECT interval '1 2' day to second; +ERROR: invalid input syntax for type interval: "1 2" +LINE 1: SELECT interval '1 2' day to second; + ^ +SELECT interval '1 2:03' day to second; + interval +---------------- + 1 day 02:03:00 +(1 row) + +SELECT interval '1 2:03:04' day to second; + interval +---------------- + 1 day 02:03:04 +(1 row) + +SELECT interval '1 2' hour to minute; +ERROR: invalid input syntax for type interval: "1 2" +LINE 1: SELECT interval '1 2' hour to minute; + ^ +SELECT interval '1 2:03' hour to minute; + interval +---------------- + 1 day 02:03:00 +(1 row) + +SELECT interval '1 2:03:04' hour to minute; + interval +---------------- + 1 day 02:03:00 +(1 row) + +SELECT interval '1 2' hour to second; +ERROR: invalid input syntax for type interval: "1 2" +LINE 1: SELECT interval '1 2' hour to second; + ^ +SELECT interval '1 2:03' hour to second; + interval +---------------- + 1 day 02:03:00 +(1 row) + +SELECT interval '1 2:03:04' hour to second; + interval +---------------- + 1 day 02:03:04 +(1 row) + +SELECT interval '1 2' minute to second; +ERROR: invalid input syntax for type interval: "1 2" +LINE 1: SELECT interval '1 2' minute to second; + ^ +SELECT interval '1 2:03' minute to second; + interval +---------------- + 1 day 00:02:03 +(1 row) + +SELECT interval '1 2:03:04' minute to second; + interval +---------------- + 1 day 02:03:04 +(1 row) + +SELECT interval '1 +2:03' minute to second; + interval +---------------- + 1 day 00:02:03 +(1 row) + +SELECT interval '1 +2:03:04' minute to second; + interval +---------------- + 1 day 02:03:04 +(1 row) + +SELECT interval '1 -2:03' minute to second; + interval +----------------- + 1 day -00:02:03 +(1 row) + +SELECT interval '1 -2:03:04' minute to second; + interval +----------------- + 1 day -02:03:04 +(1 row) + +SELECT interval '123 11' day to hour; -- ok + interval +------------------- + 123 days 11:00:00 +(1 row) + +SELECT interval '123 11' day; -- not ok +ERROR: invalid input syntax for type interval: "123 11" +LINE 1: SELECT interval '123 11' day; + ^ +SELECT interval '123 11'; -- not ok, too ambiguous +ERROR: invalid input syntax for type interval: "123 11" +LINE 1: SELECT interval '123 11'; + ^ +SELECT interval '123 2:03 -2:04'; -- not ok, redundant hh:mm fields +ERROR: invalid input syntax for type interval: "123 2:03 -2:04" +LINE 1: SELECT interval '123 2:03 -2:04'; + ^ +-- test syntaxes for restricted precision +SELECT interval(0) '1 day 01:23:45.6789'; + interval +---------------- + 1 day 01:23:46 +(1 row) + +SELECT interval(2) '1 day 01:23:45.6789'; + interval +------------------- + 1 day 01:23:45.68 +(1 row) + +SELECT interval '12:34.5678' minute to second(2); -- per SQL spec + interval +------------- + 00:12:34.57 +(1 row) + +SELECT interval '1.234' second; + interval +-------------- + 00:00:01.234 +(1 row) + +SELECT interval '1.234' second(2); + interval +------------- + 00:00:01.23 +(1 row) + +SELECT interval '1 2.345' day to second(2); +ERROR: invalid input syntax for type interval: "1 2.345" +LINE 1: SELECT interval '1 2.345' day to second(2); + ^ +SELECT interval '1 2:03' day to second(2); + interval +---------------- + 1 day 02:03:00 +(1 row) + +SELECT interval '1 2:03.4567' day to second(2); + interval +------------------- + 1 day 00:02:03.46 +(1 row) + +SELECT interval '1 2:03:04.5678' day to second(2); + interval +------------------- + 1 day 02:03:04.57 +(1 row) + +SELECT interval '1 2.345' hour to second(2); +ERROR: invalid input syntax for type interval: "1 2.345" +LINE 1: SELECT interval '1 2.345' hour to second(2); + ^ +SELECT interval '1 2:03.45678' hour to second(2); + interval +------------------- + 1 day 00:02:03.46 +(1 row) + +SELECT interval '1 2:03:04.5678' hour to second(2); + interval +------------------- + 1 day 02:03:04.57 +(1 row) + +SELECT interval '1 2.3456' minute to second(2); +ERROR: invalid input syntax for type interval: "1 2.3456" +LINE 1: SELECT interval '1 2.3456' minute to second(2); + ^ +SELECT interval '1 2:03.5678' minute to second(2); + interval +------------------- + 1 day 00:02:03.57 +(1 row) + +SELECT interval '1 2:03:04.5678' minute to second(2); + interval +------------------- + 1 day 02:03:04.57 +(1 row) + +-- test casting to restricted precision (bug #14479) +SELECT f1, f1::INTERVAL DAY TO MINUTE AS "minutes", + (f1 + INTERVAL '1 month')::INTERVAL MONTH::INTERVAL YEAR AS "years" + FROM interval_tbl; + f1 | minutes | years +-----------------+-----------------+---------- + 00:01:00 | 00:01:00 | 00:00:00 + 05:00:00 | 05:00:00 | 00:00:00 + 10 days | 10 days | 00:00:00 + 34 years | 34 years | 34 years + 3 mons | 3 mons | 00:00:00 + -00:00:14 | 00:00:00 | 00:00:00 + 1 day 02:03:04 | 1 day 02:03:00 | 00:00:00 + 6 years | 6 years | 6 years + 5 mons | 5 mons | 00:00:00 + 5 mons 12:00:00 | 5 mons 12:00:00 | 00:00:00 +(10 rows) + +-- test inputting and outputting SQL standard interval literals +SET IntervalStyle TO sql_standard; +SELECT interval '0' AS "zero", + interval '1-2' year to month AS "year-month", + interval '1 2:03:04' day to second AS "day-time", + - interval '1-2' AS "negative year-month", + - interval '1 2:03:04' AS "negative day-time"; + zero | year-month | day-time | negative year-month | negative day-time +------+------------+-----------+---------------------+------------------- + 0 | 1-2 | 1 2:03:04 | -1-2 | -1 2:03:04 +(1 row) + +-- test input of some not-quite-standard interval values in the sql style +SET IntervalStyle TO postgres; +SELECT interval '+1 -1:00:00', + interval '-1 +1:00:00', + interval '+1-2 -3 +4:05:06.789', + interval '-1-2 +3 -4:05:06.789'; + interval | interval | interval | interval +-----------------+-------------------+-------------------------------------+---------------------------------------- + 1 day -01:00:00 | -1 days +01:00:00 | 1 year 2 mons -3 days +04:05:06.789 | -1 years -2 mons +3 days -04:05:06.789 +(1 row) + +-- cases that trigger sign-matching rules in the sql style +SELECT interval '-23 hours 45 min 12.34 sec', + interval '-1 day 23 hours 45 min 12.34 sec', + interval '-1 year 2 months 1 day 23 hours 45 min 12.34 sec', + interval '-1 year 2 months 1 day 23 hours 45 min +12.34 sec'; + interval | interval | interval | interval +--------------+----------------------+-----------------------------+----------------------------- + -22:14:47.66 | -1 days +23:45:12.34 | -10 mons +1 day 23:45:12.34 | -10 mons +1 day 23:45:12.34 +(1 row) + +-- test output of couple non-standard interval values in the sql style +SET IntervalStyle TO sql_standard; +SELECT interval '1 day -1 hours', + interval '-1 days +1 hours', + interval '1 years 2 months -3 days 4 hours 5 minutes 6.789 seconds', + - interval '1 years 2 months -3 days 4 hours 5 minutes 6.789 seconds'; + interval | interval | interval | ?column? +------------------+------------------+----------------------+---------------------- + +0-0 +1 -1:00:00 | +0-0 -1 +1:00:00 | +1-2 -3 +4:05:06.789 | -1-2 +3 -4:05:06.789 +(1 row) + +-- cases that trigger sign-matching rules in the sql style +SELECT interval '-23 hours 45 min 12.34 sec', + interval '-1 day 23 hours 45 min 12.34 sec', + interval '-1 year 2 months 1 day 23 hours 45 min 12.34 sec', + interval '-1 year 2 months 1 day 23 hours 45 min +12.34 sec'; + interval | interval | interval | interval +--------------+----------------+----------------------+----------------------- + -23:45:12.34 | -1 23:45:12.34 | -1-2 -1 -23:45:12.34 | -0-10 +1 +23:45:12.34 +(1 row) + +-- edge case for sign-matching rules +SELECT interval ''; -- error +ERROR: invalid input syntax for type interval: "" +LINE 1: SELECT interval ''; + ^ +-- test outputting iso8601 intervals +SET IntervalStyle to iso_8601; +select interval '0' AS "zero", + interval '1-2' AS "a year 2 months", + interval '1 2:03:04' AS "a bit over a day", + interval '2:03:04.45679' AS "a bit over 2 hours", + (interval '1-2' + interval '3 4:05:06.7') AS "all fields", + (interval '1-2' - interval '3 4:05:06.7') AS "mixed sign", + (- interval '1-2' + interval '3 4:05:06.7') AS "negative"; + zero | a year 2 months | a bit over a day | a bit over 2 hours | all fields | mixed sign | negative +------+-----------------+------------------+--------------------+------------------+----------------------+-------------------- + PT0S | P1Y2M | P1DT2H3M4S | PT2H3M4.45679S | P1Y2M3DT4H5M6.7S | P1Y2M-3DT-4H-5M-6.7S | P-1Y-2M3DT4H5M6.7S +(1 row) + +-- test inputting ISO 8601 4.4.2.1 "Format With Time Unit Designators" +SET IntervalStyle to sql_standard; +select interval 'P0Y' AS "zero", + interval 'P1Y2M' AS "a year 2 months", + interval 'P1W' AS "a week", + interval 'P1DT2H3M4S' AS "a bit over a day", + interval 'P1Y2M3DT4H5M6.7S' AS "all fields", + interval 'P-1Y-2M-3DT-4H-5M-6.7S' AS "negative", + interval 'PT-0.1S' AS "fractional second"; + zero | a year 2 months | a week | a bit over a day | all fields | negative | fractional second +------+-----------------+-----------+------------------+--------------------+--------------------+------------------- + 0 | 1-2 | 7 0:00:00 | 1 2:03:04 | +1-2 +3 +4:05:06.7 | -1-2 -3 -4:05:06.7 | -0:00:00.1 +(1 row) + +-- test inputting ISO 8601 4.4.2.2 "Alternative Format" +SET IntervalStyle to postgres; +select interval 'P00021015T103020' AS "ISO8601 Basic Format", + interval 'P0002-10-15T10:30:20' AS "ISO8601 Extended Format"; + ISO8601 Basic Format | ISO8601 Extended Format +----------------------------------+---------------------------------- + 2 years 10 mons 15 days 10:30:20 | 2 years 10 mons 15 days 10:30:20 +(1 row) + +-- Make sure optional ISO8601 alternative format fields are optional. +select interval 'P0002' AS "year only", + interval 'P0002-10' AS "year month", + interval 'P0002-10-15' AS "year month day", + interval 'P0002T1S' AS "year only plus time", + interval 'P0002-10T1S' AS "year month plus time", + interval 'P0002-10-15T1S' AS "year month day plus time", + interval 'PT10' AS "hour only", + interval 'PT10:30' AS "hour minute"; + year only | year month | year month day | year only plus time | year month plus time | year month day plus time | hour only | hour minute +-----------+-----------------+-------------------------+---------------------+--------------------------+----------------------------------+-----------+------------- + 2 years | 2 years 10 mons | 2 years 10 mons 15 days | 2 years 00:00:01 | 2 years 10 mons 00:00:01 | 2 years 10 mons 15 days 00:00:01 | 10:00:00 | 10:30:00 +(1 row) + +-- Check handling of fractional fields in ISO8601 format. +select interval 'P1Y0M3DT4H5M6S'; + interval +------------------------ + 1 year 3 days 04:05:06 +(1 row) + +select interval 'P1.0Y0M3DT4H5M6S'; + interval +------------------------ + 1 year 3 days 04:05:06 +(1 row) + +select interval 'P1.1Y0M3DT4H5M6S'; + interval +------------------------------ + 1 year 1 mon 3 days 04:05:06 +(1 row) + +select interval 'P1.Y0M3DT4H5M6S'; + interval +------------------------ + 1 year 3 days 04:05:06 +(1 row) + +select interval 'P.1Y0M3DT4H5M6S'; + interval +----------------------- + 1 mon 3 days 04:05:06 +(1 row) + +select interval 'P10.5e4Y'; -- not per spec, but we've historically taken it + interval +-------------- + 105000 years +(1 row) + +select interval 'P.Y0M3DT4H5M6S'; -- error +ERROR: invalid input syntax for type interval: "P.Y0M3DT4H5M6S" +LINE 1: select interval 'P.Y0M3DT4H5M6S'; + ^ +-- test a couple rounding cases that changed since 8.3 w/ HAVE_INT64_TIMESTAMP. +SET IntervalStyle to postgres_verbose; +select interval '-10 mons -3 days +03:55:06.70'; + interval +-------------------------------------------------- + @ 10 mons 3 days -3 hours -55 mins -6.7 secs ago +(1 row) + +select interval '1 year 2 mons 3 days 04:05:06.699999'; + interval +----------------------------------------------------- + @ 1 year 2 mons 3 days 4 hours 5 mins 6.699999 secs +(1 row) + +select interval '0:0:0.7', interval '@ 0.70 secs', interval '0.7 seconds'; + interval | interval | interval +------------+------------+------------ + @ 0.7 secs | @ 0.7 secs | @ 0.7 secs +(1 row) + +-- test time fields using entire 64 bit microseconds range +select interval '2562047788.01521550194 hours'; + interval +----------------------------------- + @ 2562047788 hours 54.775807 secs +(1 row) + +select interval '-2562047788.01521550222 hours'; + interval +--------------------------------------- + @ 2562047788 hours 54.775808 secs ago +(1 row) + +select interval '153722867280.912930117 minutes'; + interval +----------------------------------- + @ 2562047788 hours 54.775807 secs +(1 row) + +select interval '-153722867280.912930133 minutes'; + interval +--------------------------------------- + @ 2562047788 hours 54.775808 secs ago +(1 row) + +select interval '9223372036854.775807 seconds'; + interval +----------------------------------- + @ 2562047788 hours 54.775807 secs +(1 row) + +select interval '-9223372036854.775808 seconds'; + interval +--------------------------------------- + @ 2562047788 hours 54.775808 secs ago +(1 row) + +select interval '9223372036854775.807 milliseconds'; + interval +----------------------------------- + @ 2562047788 hours 54.775807 secs +(1 row) + +select interval '-9223372036854775.808 milliseconds'; + interval +--------------------------------------- + @ 2562047788 hours 54.775808 secs ago +(1 row) + +select interval '9223372036854775807 microseconds'; + interval +----------------------------------- + @ 2562047788 hours 54.775807 secs +(1 row) + +select interval '-9223372036854775808 microseconds'; + interval +--------------------------------------- + @ 2562047788 hours 54.775808 secs ago +(1 row) + +select interval 'PT2562047788H54.775807S'; + interval +----------------------------------- + @ 2562047788 hours 54.775807 secs +(1 row) + +select interval 'PT-2562047788H-54.775808S'; + interval +--------------------------------------- + @ 2562047788 hours 54.775808 secs ago +(1 row) + +select interval 'PT2562047788:00:54.775807'; + interval +----------------------------------- + @ 2562047788 hours 54.775807 secs +(1 row) + +select interval 'PT2562047788.0152155019444'; + interval +----------------------------------- + @ 2562047788 hours 54.775429 secs +(1 row) + +select interval 'PT-2562047788.0152155022222'; + interval +--------------------------------------- + @ 2562047788 hours 54.775429 secs ago +(1 row) + +-- overflow each date/time field +select interval '2147483648 years'; +ERROR: interval field value out of range: "2147483648 years" +LINE 1: select interval '2147483648 years'; + ^ +select interval '-2147483649 years'; +ERROR: interval field value out of range: "-2147483649 years" +LINE 1: select interval '-2147483649 years'; + ^ +select interval '2147483648 months'; +ERROR: interval field value out of range: "2147483648 months" +LINE 1: select interval '2147483648 months'; + ^ +select interval '-2147483649 months'; +ERROR: interval field value out of range: "-2147483649 months" +LINE 1: select interval '-2147483649 months'; + ^ +select interval '2147483648 days'; +ERROR: interval field value out of range: "2147483648 days" +LINE 1: select interval '2147483648 days'; + ^ +select interval '-2147483649 days'; +ERROR: interval field value out of range: "-2147483649 days" +LINE 1: select interval '-2147483649 days'; + ^ +select interval '2562047789 hours'; +ERROR: interval field value out of range: "2562047789 hours" +LINE 1: select interval '2562047789 hours'; + ^ +select interval '-2562047789 hours'; +ERROR: interval field value out of range: "-2562047789 hours" +LINE 1: select interval '-2562047789 hours'; + ^ +select interval '153722867281 minutes'; +ERROR: interval field value out of range: "153722867281 minutes" +LINE 1: select interval '153722867281 minutes'; + ^ +select interval '-153722867281 minutes'; +ERROR: interval field value out of range: "-153722867281 minutes" +LINE 1: select interval '-153722867281 minutes'; + ^ +select interval '9223372036855 seconds'; +ERROR: interval field value out of range: "9223372036855 seconds" +LINE 1: select interval '9223372036855 seconds'; + ^ +select interval '-9223372036855 seconds'; +ERROR: interval field value out of range: "-9223372036855 seconds" +LINE 1: select interval '-9223372036855 seconds'; + ^ +select interval '9223372036854777 millisecond'; +ERROR: interval field value out of range: "9223372036854777 millisecond" +LINE 1: select interval '9223372036854777 millisecond'; + ^ +select interval '-9223372036854777 millisecond'; +ERROR: interval field value out of range: "-9223372036854777 millisecond" +LINE 1: select interval '-9223372036854777 millisecond'; + ^ +select interval '9223372036854775808 microsecond'; +ERROR: interval field value out of range: "9223372036854775808 microsecond" +LINE 1: select interval '9223372036854775808 microsecond'; + ^ +select interval '-9223372036854775809 microsecond'; +ERROR: interval field value out of range: "-9223372036854775809 microsecond" +LINE 1: select interval '-9223372036854775809 microsecond'; + ^ +select interval 'P2147483648'; +ERROR: interval field value out of range: "P2147483648" +LINE 1: select interval 'P2147483648'; + ^ +select interval 'P-2147483649'; +ERROR: interval field value out of range: "P-2147483649" +LINE 1: select interval 'P-2147483649'; + ^ +select interval 'P1-2147483647-2147483647'; +ERROR: interval out of range +LINE 1: select interval 'P1-2147483647-2147483647'; + ^ +select interval 'PT2562047789'; +ERROR: interval field value out of range: "PT2562047789" +LINE 1: select interval 'PT2562047789'; + ^ +select interval 'PT-2562047789'; +ERROR: interval field value out of range: "PT-2562047789" +LINE 1: select interval 'PT-2562047789'; + ^ +-- overflow with date/time unit aliases +select interval '2147483647 weeks'; +ERROR: interval field value out of range: "2147483647 weeks" +LINE 1: select interval '2147483647 weeks'; + ^ +select interval '-2147483648 weeks'; +ERROR: interval field value out of range: "-2147483648 weeks" +LINE 1: select interval '-2147483648 weeks'; + ^ +select interval '2147483647 decades'; +ERROR: interval field value out of range: "2147483647 decades" +LINE 1: select interval '2147483647 decades'; + ^ +select interval '-2147483648 decades'; +ERROR: interval field value out of range: "-2147483648 decades" +LINE 1: select interval '-2147483648 decades'; + ^ +select interval '2147483647 centuries'; +ERROR: interval field value out of range: "2147483647 centuries" +LINE 1: select interval '2147483647 centuries'; + ^ +select interval '-2147483648 centuries'; +ERROR: interval field value out of range: "-2147483648 centuries" +LINE 1: select interval '-2147483648 centuries'; + ^ +select interval '2147483647 millennium'; +ERROR: interval field value out of range: "2147483647 millennium" +LINE 1: select interval '2147483647 millennium'; + ^ +select interval '-2147483648 millennium'; +ERROR: interval field value out of range: "-2147483648 millennium" +LINE 1: select interval '-2147483648 millennium'; + ^ +select interval '1 week 2147483647 days'; +ERROR: interval field value out of range: "1 week 2147483647 days" +LINE 1: select interval '1 week 2147483647 days'; + ^ +select interval '-1 week -2147483648 days'; +ERROR: interval field value out of range: "-1 week -2147483648 days" +LINE 1: select interval '-1 week -2147483648 days'; + ^ +select interval '2147483647 days 1 week'; +ERROR: interval field value out of range: "2147483647 days 1 week" +LINE 1: select interval '2147483647 days 1 week'; + ^ +select interval '-2147483648 days -1 week'; +ERROR: interval field value out of range: "-2147483648 days -1 week" +LINE 1: select interval '-2147483648 days -1 week'; + ^ +select interval 'P1W2147483647D'; +ERROR: interval field value out of range: "P1W2147483647D" +LINE 1: select interval 'P1W2147483647D'; + ^ +select interval 'P-1W-2147483648D'; +ERROR: interval field value out of range: "P-1W-2147483648D" +LINE 1: select interval 'P-1W-2147483648D'; + ^ +select interval 'P2147483647D1W'; +ERROR: interval field value out of range: "P2147483647D1W" +LINE 1: select interval 'P2147483647D1W'; + ^ +select interval 'P-2147483648D-1W'; +ERROR: interval field value out of range: "P-2147483648D-1W" +LINE 1: select interval 'P-2147483648D-1W'; + ^ +select interval '1 decade 2147483647 years'; +ERROR: interval field value out of range: "1 decade 2147483647 years" +LINE 1: select interval '1 decade 2147483647 years'; + ^ +select interval '1 century 2147483647 years'; +ERROR: interval field value out of range: "1 century 2147483647 years" +LINE 1: select interval '1 century 2147483647 years'; + ^ +select interval '1 millennium 2147483647 years'; +ERROR: interval field value out of range: "1 millennium 2147483647 years" +LINE 1: select interval '1 millennium 2147483647 years'; + ^ +select interval '-1 decade -2147483648 years'; +ERROR: interval field value out of range: "-1 decade -2147483648 years" +LINE 1: select interval '-1 decade -2147483648 years'; + ^ +select interval '-1 century -2147483648 years'; +ERROR: interval field value out of range: "-1 century -2147483648 years" +LINE 1: select interval '-1 century -2147483648 years'; + ^ +select interval '-1 millennium -2147483648 years'; +ERROR: interval field value out of range: "-1 millennium -2147483648 years" +LINE 1: select interval '-1 millennium -2147483648 years'; + ^ +select interval '2147483647 years 1 decade'; +ERROR: interval field value out of range: "2147483647 years 1 decade" +LINE 1: select interval '2147483647 years 1 decade'; + ^ +select interval '2147483647 years 1 century'; +ERROR: interval field value out of range: "2147483647 years 1 century" +LINE 1: select interval '2147483647 years 1 century'; + ^ +select interval '2147483647 years 1 millennium'; +ERROR: interval field value out of range: "2147483647 years 1 millennium" +LINE 1: select interval '2147483647 years 1 millennium'; + ^ +select interval '-2147483648 years -1 decade'; +ERROR: interval field value out of range: "-2147483648 years -1 decade" +LINE 1: select interval '-2147483648 years -1 decade'; + ^ +select interval '-2147483648 years -1 century'; +ERROR: interval field value out of range: "-2147483648 years -1 century" +LINE 1: select interval '-2147483648 years -1 century'; + ^ +select interval '-2147483648 years -1 millennium'; +ERROR: interval field value out of range: "-2147483648 years -1 millennium" +LINE 1: select interval '-2147483648 years -1 millennium'; + ^ +-- overflowing with fractional fields - postgres format +select interval '0.1 millennium 2147483647 months'; +ERROR: interval field value out of range: "0.1 millennium 2147483647 months" +LINE 1: select interval '0.1 millennium 2147483647 months'; + ^ +select interval '0.1 centuries 2147483647 months'; +ERROR: interval field value out of range: "0.1 centuries 2147483647 months" +LINE 1: select interval '0.1 centuries 2147483647 months'; + ^ +select interval '0.1 decades 2147483647 months'; +ERROR: interval field value out of range: "0.1 decades 2147483647 months" +LINE 1: select interval '0.1 decades 2147483647 months'; + ^ +select interval '0.1 yrs 2147483647 months'; +ERROR: interval field value out of range: "0.1 yrs 2147483647 months" +LINE 1: select interval '0.1 yrs 2147483647 months'; + ^ +select interval '-0.1 millennium -2147483648 months'; +ERROR: interval field value out of range: "-0.1 millennium -2147483648 months" +LINE 1: select interval '-0.1 millennium -2147483648 months'; + ^ +select interval '-0.1 centuries -2147483648 months'; +ERROR: interval field value out of range: "-0.1 centuries -2147483648 months" +LINE 1: select interval '-0.1 centuries -2147483648 months'; + ^ +select interval '-0.1 decades -2147483648 months'; +ERROR: interval field value out of range: "-0.1 decades -2147483648 months" +LINE 1: select interval '-0.1 decades -2147483648 months'; + ^ +select interval '-0.1 yrs -2147483648 months'; +ERROR: interval field value out of range: "-0.1 yrs -2147483648 months" +LINE 1: select interval '-0.1 yrs -2147483648 months'; + ^ +select interval '2147483647 months 0.1 millennium'; +ERROR: interval field value out of range: "2147483647 months 0.1 millennium" +LINE 1: select interval '2147483647 months 0.1 millennium'; + ^ +select interval '2147483647 months 0.1 centuries'; +ERROR: interval field value out of range: "2147483647 months 0.1 centuries" +LINE 1: select interval '2147483647 months 0.1 centuries'; + ^ +select interval '2147483647 months 0.1 decades'; +ERROR: interval field value out of range: "2147483647 months 0.1 decades" +LINE 1: select interval '2147483647 months 0.1 decades'; + ^ +select interval '2147483647 months 0.1 yrs'; +ERROR: interval field value out of range: "2147483647 months 0.1 yrs" +LINE 1: select interval '2147483647 months 0.1 yrs'; + ^ +select interval '-2147483648 months -0.1 millennium'; +ERROR: interval field value out of range: "-2147483648 months -0.1 millennium" +LINE 1: select interval '-2147483648 months -0.1 millennium'; + ^ +select interval '-2147483648 months -0.1 centuries'; +ERROR: interval field value out of range: "-2147483648 months -0.1 centuries" +LINE 1: select interval '-2147483648 months -0.1 centuries'; + ^ +select interval '-2147483648 months -0.1 decades'; +ERROR: interval field value out of range: "-2147483648 months -0.1 decades" +LINE 1: select interval '-2147483648 months -0.1 decades'; + ^ +select interval '-2147483648 months -0.1 yrs'; +ERROR: interval field value out of range: "-2147483648 months -0.1 yrs" +LINE 1: select interval '-2147483648 months -0.1 yrs'; + ^ +select interval '0.1 months 2147483647 days'; +ERROR: interval field value out of range: "0.1 months 2147483647 days" +LINE 1: select interval '0.1 months 2147483647 days'; + ^ +select interval '-0.1 months -2147483648 days'; +ERROR: interval field value out of range: "-0.1 months -2147483648 days" +LINE 1: select interval '-0.1 months -2147483648 days'; + ^ +select interval '2147483647 days 0.1 months'; +ERROR: interval field value out of range: "2147483647 days 0.1 months" +LINE 1: select interval '2147483647 days 0.1 months'; + ^ +select interval '-2147483648 days -0.1 months'; +ERROR: interval field value out of range: "-2147483648 days -0.1 months" +LINE 1: select interval '-2147483648 days -0.1 months'; + ^ +select interval '0.5 weeks 2147483647 days'; +ERROR: interval field value out of range: "0.5 weeks 2147483647 days" +LINE 1: select interval '0.5 weeks 2147483647 days'; + ^ +select interval '-0.5 weeks -2147483648 days'; +ERROR: interval field value out of range: "-0.5 weeks -2147483648 days" +LINE 1: select interval '-0.5 weeks -2147483648 days'; + ^ +select interval '2147483647 days 0.5 weeks'; +ERROR: interval field value out of range: "2147483647 days 0.5 weeks" +LINE 1: select interval '2147483647 days 0.5 weeks'; + ^ +select interval '-2147483648 days -0.5 weeks'; +ERROR: interval field value out of range: "-2147483648 days -0.5 weeks" +LINE 1: select interval '-2147483648 days -0.5 weeks'; + ^ +select interval '0.01 months 9223372036854775807 microseconds'; +ERROR: interval field value out of range: "0.01 months 9223372036854775807 microseconds" +LINE 1: select interval '0.01 months 9223372036854775807 microsecond... + ^ +select interval '-0.01 months -9223372036854775808 microseconds'; +ERROR: interval field value out of range: "-0.01 months -9223372036854775808 microseconds" +LINE 1: select interval '-0.01 months -9223372036854775808 microseco... + ^ +select interval '9223372036854775807 microseconds 0.01 months'; +ERROR: interval field value out of range: "9223372036854775807 microseconds 0.01 months" +LINE 1: select interval '9223372036854775807 microseconds 0.01 month... + ^ +select interval '-9223372036854775808 microseconds -0.01 months'; +ERROR: interval field value out of range: "-9223372036854775808 microseconds -0.01 months" +LINE 1: select interval '-9223372036854775808 microseconds -0.01 mon... + ^ +select interval '0.1 weeks 9223372036854775807 microseconds'; +ERROR: interval field value out of range: "0.1 weeks 9223372036854775807 microseconds" +LINE 1: select interval '0.1 weeks 9223372036854775807 microseconds'... + ^ +select interval '-0.1 weeks -9223372036854775808 microseconds'; +ERROR: interval field value out of range: "-0.1 weeks -9223372036854775808 microseconds" +LINE 1: select interval '-0.1 weeks -9223372036854775808 microsecond... + ^ +select interval '9223372036854775807 microseconds 0.1 weeks'; +ERROR: interval field value out of range: "9223372036854775807 microseconds 0.1 weeks" +LINE 1: select interval '9223372036854775807 microseconds 0.1 weeks'... + ^ +select interval '-9223372036854775808 microseconds -0.1 weeks'; +ERROR: interval field value out of range: "-9223372036854775808 microseconds -0.1 weeks" +LINE 1: select interval '-9223372036854775808 microseconds -0.1 week... + ^ +select interval '0.1 days 9223372036854775807 microseconds'; +ERROR: interval field value out of range: "0.1 days 9223372036854775807 microseconds" +LINE 1: select interval '0.1 days 9223372036854775807 microseconds'; + ^ +select interval '-0.1 days -9223372036854775808 microseconds'; +ERROR: interval field value out of range: "-0.1 days -9223372036854775808 microseconds" +LINE 1: select interval '-0.1 days -9223372036854775808 microseconds... + ^ +select interval '9223372036854775807 microseconds 0.1 days'; +ERROR: interval field value out of range: "9223372036854775807 microseconds 0.1 days" +LINE 1: select interval '9223372036854775807 microseconds 0.1 days'; + ^ +select interval '-9223372036854775808 microseconds -0.1 days'; +ERROR: interval field value out of range: "-9223372036854775808 microseconds -0.1 days" +LINE 1: select interval '-9223372036854775808 microseconds -0.1 days... + ^ +-- overflowing with fractional fields - ISO8601 format +select interval 'P0.1Y2147483647M'; +ERROR: interval field value out of range: "P0.1Y2147483647M" +LINE 1: select interval 'P0.1Y2147483647M'; + ^ +select interval 'P-0.1Y-2147483648M'; +ERROR: interval field value out of range: "P-0.1Y-2147483648M" +LINE 1: select interval 'P-0.1Y-2147483648M'; + ^ +select interval 'P2147483647M0.1Y'; +ERROR: interval field value out of range: "P2147483647M0.1Y" +LINE 1: select interval 'P2147483647M0.1Y'; + ^ +select interval 'P-2147483648M-0.1Y'; +ERROR: interval field value out of range: "P-2147483648M-0.1Y" +LINE 1: select interval 'P-2147483648M-0.1Y'; + ^ +select interval 'P0.1M2147483647D'; +ERROR: interval field value out of range: "P0.1M2147483647D" +LINE 1: select interval 'P0.1M2147483647D'; + ^ +select interval 'P-0.1M-2147483648D'; +ERROR: interval field value out of range: "P-0.1M-2147483648D" +LINE 1: select interval 'P-0.1M-2147483648D'; + ^ +select interval 'P2147483647D0.1M'; +ERROR: interval field value out of range: "P2147483647D0.1M" +LINE 1: select interval 'P2147483647D0.1M'; + ^ +select interval 'P-2147483648D-0.1M'; +ERROR: interval field value out of range: "P-2147483648D-0.1M" +LINE 1: select interval 'P-2147483648D-0.1M'; + ^ +select interval 'P0.5W2147483647D'; +ERROR: interval field value out of range: "P0.5W2147483647D" +LINE 1: select interval 'P0.5W2147483647D'; + ^ +select interval 'P-0.5W-2147483648D'; +ERROR: interval field value out of range: "P-0.5W-2147483648D" +LINE 1: select interval 'P-0.5W-2147483648D'; + ^ +select interval 'P2147483647D0.5W'; +ERROR: interval field value out of range: "P2147483647D0.5W" +LINE 1: select interval 'P2147483647D0.5W'; + ^ +select interval 'P-2147483648D-0.5W'; +ERROR: interval field value out of range: "P-2147483648D-0.5W" +LINE 1: select interval 'P-2147483648D-0.5W'; + ^ +select interval 'P0.01MT2562047788H54.775807S'; +ERROR: interval field value out of range: "P0.01MT2562047788H54.775807S" +LINE 1: select interval 'P0.01MT2562047788H54.775807S'; + ^ +select interval 'P-0.01MT-2562047788H-54.775808S'; +ERROR: interval field value out of range: "P-0.01MT-2562047788H-54.775808S" +LINE 1: select interval 'P-0.01MT-2562047788H-54.775808S'; + ^ +select interval 'P0.1DT2562047788H54.775807S'; +ERROR: interval field value out of range: "P0.1DT2562047788H54.775807S" +LINE 1: select interval 'P0.1DT2562047788H54.775807S'; + ^ +select interval 'P-0.1DT-2562047788H-54.775808S'; +ERROR: interval field value out of range: "P-0.1DT-2562047788H-54.775808S" +LINE 1: select interval 'P-0.1DT-2562047788H-54.775808S'; + ^ +select interval 'PT2562047788.1H54.775807S'; +ERROR: interval field value out of range: "PT2562047788.1H54.775807S" +LINE 1: select interval 'PT2562047788.1H54.775807S'; + ^ +select interval 'PT-2562047788.1H-54.775808S'; +ERROR: interval field value out of range: "PT-2562047788.1H-54.775808S" +LINE 1: select interval 'PT-2562047788.1H-54.775808S'; + ^ +select interval 'PT2562047788H0.1M54.775807S'; +ERROR: interval field value out of range: "PT2562047788H0.1M54.775807S" +LINE 1: select interval 'PT2562047788H0.1M54.775807S'; + ^ +select interval 'PT-2562047788H-0.1M-54.775808S'; +ERROR: interval field value out of range: "PT-2562047788H-0.1M-54.775808S" +LINE 1: select interval 'PT-2562047788H-0.1M-54.775808S'; + ^ +-- overflowing with fractional fields - ISO8601 alternative format +select interval 'P0.1-2147483647-00'; +ERROR: interval field value out of range: "P0.1-2147483647-00" +LINE 1: select interval 'P0.1-2147483647-00'; + ^ +select interval 'P00-0.1-2147483647'; +ERROR: interval field value out of range: "P00-0.1-2147483647" +LINE 1: select interval 'P00-0.1-2147483647'; + ^ +select interval 'P00-0.01-00T2562047788:00:54.775807'; +ERROR: interval field value out of range: "P00-0.01-00T2562047788:00:54.775807" +LINE 1: select interval 'P00-0.01-00T2562047788:00:54.775807'; + ^ +select interval 'P00-00-0.1T2562047788:00:54.775807'; +ERROR: interval field value out of range: "P00-00-0.1T2562047788:00:54.775807" +LINE 1: select interval 'P00-00-0.1T2562047788:00:54.775807'; + ^ +select interval 'PT2562047788.1:00:54.775807'; +ERROR: interval field value out of range: "PT2562047788.1:00:54.775807" +LINE 1: select interval 'PT2562047788.1:00:54.775807'; + ^ +select interval 'PT2562047788:01.:54.775807'; +ERROR: interval field value out of range: "PT2562047788:01.:54.775807" +LINE 1: select interval 'PT2562047788:01.:54.775807'; + ^ +-- overflowing with fractional fields - SQL standard format +select interval '0.1 2562047788:0:54.775807'; +ERROR: interval field value out of range: "0.1 2562047788:0:54.775807" +LINE 1: select interval '0.1 2562047788:0:54.775807'; + ^ +select interval '0.1 2562047788:0:54.775808 ago'; +ERROR: interval field value out of range: "0.1 2562047788:0:54.775808 ago" +LINE 1: select interval '0.1 2562047788:0:54.775808 ago'; + ^ +select interval '2562047788.1:0:54.775807'; +ERROR: interval field value out of range: "2562047788.1:0:54.775807" +LINE 1: select interval '2562047788.1:0:54.775807'; + ^ +select interval '2562047788.1:0:54.775808 ago'; +ERROR: interval field value out of range: "2562047788.1:0:54.775808 ago" +LINE 1: select interval '2562047788.1:0:54.775808 ago'; + ^ +select interval '2562047788:0.1:54.775807'; +ERROR: invalid input syntax for type interval: "2562047788:0.1:54.775807" +LINE 1: select interval '2562047788:0.1:54.775807'; + ^ +select interval '2562047788:0.1:54.775808 ago'; +ERROR: invalid input syntax for type interval: "2562047788:0.1:54.775808 ago" +LINE 1: select interval '2562047788:0.1:54.775808 ago'; + ^ +-- overflowing using AGO with INT_MIN +select interval '-2147483648 months ago'; +ERROR: interval field value out of range: "-2147483648 months ago" +LINE 1: select interval '-2147483648 months ago'; + ^ +select interval '-2147483648 days ago'; +ERROR: interval field value out of range: "-2147483648 days ago" +LINE 1: select interval '-2147483648 days ago'; + ^ +select interval '-9223372036854775808 microseconds ago'; +ERROR: interval field value out of range: "-9223372036854775808 microseconds ago" +LINE 1: select interval '-9223372036854775808 microseconds ago'; + ^ +select interval '-2147483648 months -2147483648 days -9223372036854775808 microseconds ago'; +ERROR: interval field value out of range: "-2147483648 months -2147483648 days -9223372036854775808 microseconds ago" +LINE 1: select interval '-2147483648 months -2147483648 days -922337... + ^ +-- test that INT_MIN number is formatted properly +SET IntervalStyle to postgres; +select interval '-2147483648 months -2147483648 days -9223372036854775808 us'; + interval +-------------------------------------------------------------------- + -178956970 years -8 mons -2147483648 days -2562047788:00:54.775808 +(1 row) + +SET IntervalStyle to sql_standard; +select interval '-2147483648 months -2147483648 days -9223372036854775808 us'; + interval +--------------------------------------------------- + -178956970-8 -2147483648 -2562047788:00:54.775808 +(1 row) + +SET IntervalStyle to iso_8601; +select interval '-2147483648 months -2147483648 days -9223372036854775808 us'; + interval +----------------------------------------------------- + P-178956970Y-8M-2147483648DT-2562047788H-54.775808S +(1 row) + +SET IntervalStyle to postgres_verbose; +select interval '-2147483648 months -2147483648 days -9223372036854775808 us'; + interval +------------------------------------------------------------------------------ + @ 178956970 years 8 mons 2147483648 days 2562047788 hours 54.775808 secs ago +(1 row) + +-- check that '30 days' equals '1 month' according to the hash function +select '30 days'::interval = '1 month'::interval as t; + t +--- + t +(1 row) + +select interval_hash('30 days'::interval) = interval_hash('1 month'::interval) as t; + t +--- + t +(1 row) + +-- numeric constructor +select make_interval(years := 2); + make_interval +--------------- + @ 2 years +(1 row) + +select make_interval(years := 1, months := 6); + make_interval +----------------- + @ 1 year 6 mons +(1 row) + +select make_interval(years := 1, months := -1, weeks := 5, days := -7, hours := 25, mins := -180); + make_interval +---------------------------- + @ 11 mons 28 days 22 hours +(1 row) + +select make_interval() = make_interval(years := 0, months := 0, weeks := 0, days := 0, mins := 0, secs := 0.0); + ?column? +---------- + t +(1 row) + +select make_interval(hours := -2, mins := -10, secs := -25.3); + make_interval +--------------------------------- + @ 2 hours 10 mins 25.3 secs ago +(1 row) + +select make_interval(years := 'inf'::float::int); +ERROR: integer out of range +select make_interval(months := 'NaN'::float::int); +ERROR: integer out of range +select make_interval(secs := 'inf'); +ERROR: interval out of range +select make_interval(secs := 'NaN'); +ERROR: interval out of range +select make_interval(secs := 7e12); + make_interval +------------------------------------ + @ 1944444444 hours 26 mins 40 secs +(1 row) + +-- +-- test EXTRACT +-- +SELECT f1, + EXTRACT(MICROSECOND FROM f1) AS MICROSECOND, + EXTRACT(MILLISECOND FROM f1) AS MILLISECOND, + EXTRACT(SECOND FROM f1) AS SECOND, + EXTRACT(MINUTE FROM f1) AS MINUTE, + EXTRACT(HOUR FROM f1) AS HOUR, + EXTRACT(DAY FROM f1) AS DAY, + EXTRACT(MONTH FROM f1) AS MONTH, + EXTRACT(QUARTER FROM f1) AS QUARTER, + EXTRACT(YEAR FROM f1) AS YEAR, + EXTRACT(DECADE FROM f1) AS DECADE, + EXTRACT(CENTURY FROM f1) AS CENTURY, + EXTRACT(MILLENNIUM FROM f1) AS MILLENNIUM, + EXTRACT(EPOCH FROM f1) AS EPOCH + FROM INTERVAL_TBL; + f1 | microsecond | millisecond | second | minute | hour | day | month | quarter | year | decade | century | millennium | epoch +-------------------------------+-------------+-------------+------------+--------+------+-----+-------+---------+------+--------+---------+------------+------------------- + @ 1 min | 0 | 0.000 | 0.000000 | 1 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 60.000000 + @ 5 hours | 0 | 0.000 | 0.000000 | 0 | 5 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 18000.000000 + @ 10 days | 0 | 0.000 | 0.000000 | 0 | 0 | 10 | 0 | 1 | 0 | 0 | 0 | 0 | 864000.000000 + @ 34 years | 0 | 0.000 | 0.000000 | 0 | 0 | 0 | 0 | 1 | 34 | 3 | 0 | 0 | 1072958400.000000 + @ 3 mons | 0 | 0.000 | 0.000000 | 0 | 0 | 0 | 3 | 2 | 0 | 0 | 0 | 0 | 7776000.000000 + @ 14 secs ago | -14000000 | -14000.000 | -14.000000 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | -14.000000 + @ 1 day 2 hours 3 mins 4 secs | 4000000 | 4000.000 | 4.000000 | 3 | 2 | 1 | 0 | 1 | 0 | 0 | 0 | 0 | 93784.000000 + @ 6 years | 0 | 0.000 | 0.000000 | 0 | 0 | 0 | 0 | 1 | 6 | 0 | 0 | 0 | 189345600.000000 + @ 5 mons | 0 | 0.000 | 0.000000 | 0 | 0 | 0 | 5 | 2 | 0 | 0 | 0 | 0 | 12960000.000000 + @ 5 mons 12 hours | 0 | 0.000 | 0.000000 | 0 | 12 | 0 | 5 | 2 | 0 | 0 | 0 | 0 | 13003200.000000 +(10 rows) + +SELECT EXTRACT(FORTNIGHT FROM INTERVAL '2 days'); -- error +ERROR: unit "fortnight" not recognized for type interval +SELECT EXTRACT(TIMEZONE FROM INTERVAL '2 days'); -- error +ERROR: unit "timezone" not supported for type interval +SELECT EXTRACT(DECADE FROM INTERVAL '100 y'); + extract +--------- + 10 +(1 row) + +SELECT EXTRACT(DECADE FROM INTERVAL '99 y'); + extract +--------- + 9 +(1 row) + +SELECT EXTRACT(DECADE FROM INTERVAL '-99 y'); + extract +--------- + -9 +(1 row) + +SELECT EXTRACT(DECADE FROM INTERVAL '-100 y'); + extract +--------- + -10 +(1 row) + +SELECT EXTRACT(CENTURY FROM INTERVAL '100 y'); + extract +--------- + 1 +(1 row) + +SELECT EXTRACT(CENTURY FROM INTERVAL '99 y'); + extract +--------- + 0 +(1 row) + +SELECT EXTRACT(CENTURY FROM INTERVAL '-99 y'); + extract +--------- + 0 +(1 row) + +SELECT EXTRACT(CENTURY FROM INTERVAL '-100 y'); + extract +--------- + -1 +(1 row) + +-- date_part implementation is mostly the same as extract, so only +-- test a few cases for additional coverage. +SELECT f1, + date_part('microsecond', f1) AS microsecond, + date_part('millisecond', f1) AS millisecond, + date_part('second', f1) AS second, + date_part('epoch', f1) AS epoch + FROM INTERVAL_TBL; + f1 | microsecond | millisecond | second | epoch +-------------------------------+-------------+-------------+--------+------------ + @ 1 min | 0 | 0 | 0 | 60 + @ 5 hours | 0 | 0 | 0 | 18000 + @ 10 days | 0 | 0 | 0 | 864000 + @ 34 years | 0 | 0 | 0 | 1072958400 + @ 3 mons | 0 | 0 | 0 | 7776000 + @ 14 secs ago | -14000000 | -14000 | -14 | -14 + @ 1 day 2 hours 3 mins 4 secs | 4000000 | 4000 | 4 | 93784 + @ 6 years | 0 | 0 | 0 | 189345600 + @ 5 mons | 0 | 0 | 0 | 12960000 + @ 5 mons 12 hours | 0 | 0 | 0 | 13003200 +(10 rows) + +-- internal overflow test case +SELECT extract(epoch from interval '1000000000 days'); + extract +----------------------- + 86400000000000.000000 +(1 row) + diff --git a/src/test/regress/expected/join.out b/src/test/regress/expected/join.out new file mode 100644 index 0000000..3715ae4 --- /dev/null +++ b/src/test/regress/expected/join.out @@ -0,0 +1,7929 @@ +-- +-- JOIN +-- Test JOIN clauses +-- +CREATE TABLE J1_TBL ( + i integer, + j integer, + t text +); +CREATE TABLE J2_TBL ( + i integer, + k integer +); +INSERT INTO J1_TBL VALUES (1, 4, 'one'); +INSERT INTO J1_TBL VALUES (2, 3, 'two'); +INSERT INTO J1_TBL VALUES (3, 2, 'three'); +INSERT INTO J1_TBL VALUES (4, 1, 'four'); +INSERT INTO J1_TBL VALUES (5, 0, 'five'); +INSERT INTO J1_TBL VALUES (6, 6, 'six'); +INSERT INTO J1_TBL VALUES (7, 7, 'seven'); +INSERT INTO J1_TBL VALUES (8, 8, 'eight'); +INSERT INTO J1_TBL VALUES (0, NULL, 'zero'); +INSERT INTO J1_TBL VALUES (NULL, NULL, 'null'); +INSERT INTO J1_TBL VALUES (NULL, 0, 'zero'); +INSERT INTO J2_TBL VALUES (1, -1); +INSERT INTO J2_TBL VALUES (2, 2); +INSERT INTO J2_TBL VALUES (3, -3); +INSERT INTO J2_TBL VALUES (2, 4); +INSERT INTO J2_TBL VALUES (5, -5); +INSERT INTO J2_TBL VALUES (5, -5); +INSERT INTO J2_TBL VALUES (0, NULL); +INSERT INTO J2_TBL VALUES (NULL, NULL); +INSERT INTO J2_TBL VALUES (NULL, 0); +-- useful in some tests below +create temp table onerow(); +insert into onerow default values; +analyze onerow; +-- +-- CORRELATION NAMES +-- Make sure that table/column aliases are supported +-- before diving into more complex join syntax. +-- +SELECT * + FROM J1_TBL AS tx; + i | j | t +---+---+------- + 1 | 4 | one + 2 | 3 | two + 3 | 2 | three + 4 | 1 | four + 5 | 0 | five + 6 | 6 | six + 7 | 7 | seven + 8 | 8 | eight + 0 | | zero + | | null + | 0 | zero +(11 rows) + +SELECT * + FROM J1_TBL tx; + i | j | t +---+---+------- + 1 | 4 | one + 2 | 3 | two + 3 | 2 | three + 4 | 1 | four + 5 | 0 | five + 6 | 6 | six + 7 | 7 | seven + 8 | 8 | eight + 0 | | zero + | | null + | 0 | zero +(11 rows) + +SELECT * + FROM J1_TBL AS t1 (a, b, c); + a | b | c +---+---+------- + 1 | 4 | one + 2 | 3 | two + 3 | 2 | three + 4 | 1 | four + 5 | 0 | five + 6 | 6 | six + 7 | 7 | seven + 8 | 8 | eight + 0 | | zero + | | null + | 0 | zero +(11 rows) + +SELECT * + FROM J1_TBL t1 (a, b, c); + a | b | c +---+---+------- + 1 | 4 | one + 2 | 3 | two + 3 | 2 | three + 4 | 1 | four + 5 | 0 | five + 6 | 6 | six + 7 | 7 | seven + 8 | 8 | eight + 0 | | zero + | | null + | 0 | zero +(11 rows) + +SELECT * + FROM J1_TBL t1 (a, b, c), J2_TBL t2 (d, e); + a | b | c | d | e +---+---+-------+---+---- + 1 | 4 | one | 1 | -1 + 2 | 3 | two | 1 | -1 + 3 | 2 | three | 1 | -1 + 4 | 1 | four | 1 | -1 + 5 | 0 | five | 1 | -1 + 6 | 6 | six | 1 | -1 + 7 | 7 | seven | 1 | -1 + 8 | 8 | eight | 1 | -1 + 0 | | zero | 1 | -1 + | | null | 1 | -1 + | 0 | zero | 1 | -1 + 1 | 4 | one | 2 | 2 + 2 | 3 | two | 2 | 2 + 3 | 2 | three | 2 | 2 + 4 | 1 | four | 2 | 2 + 5 | 0 | five | 2 | 2 + 6 | 6 | six | 2 | 2 + 7 | 7 | seven | 2 | 2 + 8 | 8 | eight | 2 | 2 + 0 | | zero | 2 | 2 + | | null | 2 | 2 + | 0 | zero | 2 | 2 + 1 | 4 | one | 3 | -3 + 2 | 3 | two | 3 | -3 + 3 | 2 | three | 3 | -3 + 4 | 1 | four | 3 | -3 + 5 | 0 | five | 3 | -3 + 6 | 6 | six | 3 | -3 + 7 | 7 | seven | 3 | -3 + 8 | 8 | eight | 3 | -3 + 0 | | zero | 3 | -3 + | | null | 3 | -3 + | 0 | zero | 3 | -3 + 1 | 4 | one | 2 | 4 + 2 | 3 | two | 2 | 4 + 3 | 2 | three | 2 | 4 + 4 | 1 | four | 2 | 4 + 5 | 0 | five | 2 | 4 + 6 | 6 | six | 2 | 4 + 7 | 7 | seven | 2 | 4 + 8 | 8 | eight | 2 | 4 + 0 | | zero | 2 | 4 + | | null | 2 | 4 + | 0 | zero | 2 | 4 + 1 | 4 | one | 5 | -5 + 2 | 3 | two | 5 | -5 + 3 | 2 | three | 5 | -5 + 4 | 1 | four | 5 | -5 + 5 | 0 | five | 5 | -5 + 6 | 6 | six | 5 | -5 + 7 | 7 | seven | 5 | -5 + 8 | 8 | eight | 5 | -5 + 0 | | zero | 5 | -5 + | | null | 5 | -5 + | 0 | zero | 5 | -5 + 1 | 4 | one | 5 | -5 + 2 | 3 | two | 5 | -5 + 3 | 2 | three | 5 | -5 + 4 | 1 | four | 5 | -5 + 5 | 0 | five | 5 | -5 + 6 | 6 | six | 5 | -5 + 7 | 7 | seven | 5 | -5 + 8 | 8 | eight | 5 | -5 + 0 | | zero | 5 | -5 + | | null | 5 | -5 + | 0 | zero | 5 | -5 + 1 | 4 | one | 0 | + 2 | 3 | two | 0 | + 3 | 2 | three | 0 | + 4 | 1 | four | 0 | + 5 | 0 | five | 0 | + 6 | 6 | six | 0 | + 7 | 7 | seven | 0 | + 8 | 8 | eight | 0 | + 0 | | zero | 0 | + | | null | 0 | + | 0 | zero | 0 | + 1 | 4 | one | | + 2 | 3 | two | | + 3 | 2 | three | | + 4 | 1 | four | | + 5 | 0 | five | | + 6 | 6 | six | | + 7 | 7 | seven | | + 8 | 8 | eight | | + 0 | | zero | | + | | null | | + | 0 | zero | | + 1 | 4 | one | | 0 + 2 | 3 | two | | 0 + 3 | 2 | three | | 0 + 4 | 1 | four | | 0 + 5 | 0 | five | | 0 + 6 | 6 | six | | 0 + 7 | 7 | seven | | 0 + 8 | 8 | eight | | 0 + 0 | | zero | | 0 + | | null | | 0 + | 0 | zero | | 0 +(99 rows) + +SELECT t1.a, t2.e + FROM J1_TBL t1 (a, b, c), J2_TBL t2 (d, e) + WHERE t1.a = t2.d; + a | e +---+---- + 0 | + 1 | -1 + 2 | 2 + 2 | 4 + 3 | -3 + 5 | -5 + 5 | -5 +(7 rows) + +-- +-- CROSS JOIN +-- Qualifications are not allowed on cross joins, +-- which degenerate into a standard unqualified inner join. +-- +SELECT * + FROM J1_TBL CROSS JOIN J2_TBL; + i | j | t | i | k +---+---+-------+---+---- + 1 | 4 | one | 1 | -1 + 2 | 3 | two | 1 | -1 + 3 | 2 | three | 1 | -1 + 4 | 1 | four | 1 | -1 + 5 | 0 | five | 1 | -1 + 6 | 6 | six | 1 | -1 + 7 | 7 | seven | 1 | -1 + 8 | 8 | eight | 1 | -1 + 0 | | zero | 1 | -1 + | | null | 1 | -1 + | 0 | zero | 1 | -1 + 1 | 4 | one | 2 | 2 + 2 | 3 | two | 2 | 2 + 3 | 2 | three | 2 | 2 + 4 | 1 | four | 2 | 2 + 5 | 0 | five | 2 | 2 + 6 | 6 | six | 2 | 2 + 7 | 7 | seven | 2 | 2 + 8 | 8 | eight | 2 | 2 + 0 | | zero | 2 | 2 + | | null | 2 | 2 + | 0 | zero | 2 | 2 + 1 | 4 | one | 3 | -3 + 2 | 3 | two | 3 | -3 + 3 | 2 | three | 3 | -3 + 4 | 1 | four | 3 | -3 + 5 | 0 | five | 3 | -3 + 6 | 6 | six | 3 | -3 + 7 | 7 | seven | 3 | -3 + 8 | 8 | eight | 3 | -3 + 0 | | zero | 3 | -3 + | | null | 3 | -3 + | 0 | zero | 3 | -3 + 1 | 4 | one | 2 | 4 + 2 | 3 | two | 2 | 4 + 3 | 2 | three | 2 | 4 + 4 | 1 | four | 2 | 4 + 5 | 0 | five | 2 | 4 + 6 | 6 | six | 2 | 4 + 7 | 7 | seven | 2 | 4 + 8 | 8 | eight | 2 | 4 + 0 | | zero | 2 | 4 + | | null | 2 | 4 + | 0 | zero | 2 | 4 + 1 | 4 | one | 5 | -5 + 2 | 3 | two | 5 | -5 + 3 | 2 | three | 5 | -5 + 4 | 1 | four | 5 | -5 + 5 | 0 | five | 5 | -5 + 6 | 6 | six | 5 | -5 + 7 | 7 | seven | 5 | -5 + 8 | 8 | eight | 5 | -5 + 0 | | zero | 5 | -5 + | | null | 5 | -5 + | 0 | zero | 5 | -5 + 1 | 4 | one | 5 | -5 + 2 | 3 | two | 5 | -5 + 3 | 2 | three | 5 | -5 + 4 | 1 | four | 5 | -5 + 5 | 0 | five | 5 | -5 + 6 | 6 | six | 5 | -5 + 7 | 7 | seven | 5 | -5 + 8 | 8 | eight | 5 | -5 + 0 | | zero | 5 | -5 + | | null | 5 | -5 + | 0 | zero | 5 | -5 + 1 | 4 | one | 0 | + 2 | 3 | two | 0 | + 3 | 2 | three | 0 | + 4 | 1 | four | 0 | + 5 | 0 | five | 0 | + 6 | 6 | six | 0 | + 7 | 7 | seven | 0 | + 8 | 8 | eight | 0 | + 0 | | zero | 0 | + | | null | 0 | + | 0 | zero | 0 | + 1 | 4 | one | | + 2 | 3 | two | | + 3 | 2 | three | | + 4 | 1 | four | | + 5 | 0 | five | | + 6 | 6 | six | | + 7 | 7 | seven | | + 8 | 8 | eight | | + 0 | | zero | | + | | null | | + | 0 | zero | | + 1 | 4 | one | | 0 + 2 | 3 | two | | 0 + 3 | 2 | three | | 0 + 4 | 1 | four | | 0 + 5 | 0 | five | | 0 + 6 | 6 | six | | 0 + 7 | 7 | seven | | 0 + 8 | 8 | eight | | 0 + 0 | | zero | | 0 + | | null | | 0 + | 0 | zero | | 0 +(99 rows) + +-- ambiguous column +SELECT i, k, t + FROM J1_TBL CROSS JOIN J2_TBL; +ERROR: column reference "i" is ambiguous +LINE 1: SELECT i, k, t + ^ +-- resolve previous ambiguity by specifying the table name +SELECT t1.i, k, t + FROM J1_TBL t1 CROSS JOIN J2_TBL t2; + i | k | t +---+----+------- + 1 | -1 | one + 2 | -1 | two + 3 | -1 | three + 4 | -1 | four + 5 | -1 | five + 6 | -1 | six + 7 | -1 | seven + 8 | -1 | eight + 0 | -1 | zero + | -1 | null + | -1 | zero + 1 | 2 | one + 2 | 2 | two + 3 | 2 | three + 4 | 2 | four + 5 | 2 | five + 6 | 2 | six + 7 | 2 | seven + 8 | 2 | eight + 0 | 2 | zero + | 2 | null + | 2 | zero + 1 | -3 | one + 2 | -3 | two + 3 | -3 | three + 4 | -3 | four + 5 | -3 | five + 6 | -3 | six + 7 | -3 | seven + 8 | -3 | eight + 0 | -3 | zero + | -3 | null + | -3 | zero + 1 | 4 | one + 2 | 4 | two + 3 | 4 | three + 4 | 4 | four + 5 | 4 | five + 6 | 4 | six + 7 | 4 | seven + 8 | 4 | eight + 0 | 4 | zero + | 4 | null + | 4 | zero + 1 | -5 | one + 2 | -5 | two + 3 | -5 | three + 4 | -5 | four + 5 | -5 | five + 6 | -5 | six + 7 | -5 | seven + 8 | -5 | eight + 0 | -5 | zero + | -5 | null + | -5 | zero + 1 | -5 | one + 2 | -5 | two + 3 | -5 | three + 4 | -5 | four + 5 | -5 | five + 6 | -5 | six + 7 | -5 | seven + 8 | -5 | eight + 0 | -5 | zero + | -5 | null + | -5 | zero + 1 | | one + 2 | | two + 3 | | three + 4 | | four + 5 | | five + 6 | | six + 7 | | seven + 8 | | eight + 0 | | zero + | | null + | | zero + 1 | | one + 2 | | two + 3 | | three + 4 | | four + 5 | | five + 6 | | six + 7 | | seven + 8 | | eight + 0 | | zero + | | null + | | zero + 1 | 0 | one + 2 | 0 | two + 3 | 0 | three + 4 | 0 | four + 5 | 0 | five + 6 | 0 | six + 7 | 0 | seven + 8 | 0 | eight + 0 | 0 | zero + | 0 | null + | 0 | zero +(99 rows) + +SELECT ii, tt, kk + FROM (J1_TBL CROSS JOIN J2_TBL) + AS tx (ii, jj, tt, ii2, kk); + ii | tt | kk +----+-------+---- + 1 | one | -1 + 2 | two | -1 + 3 | three | -1 + 4 | four | -1 + 5 | five | -1 + 6 | six | -1 + 7 | seven | -1 + 8 | eight | -1 + 0 | zero | -1 + | null | -1 + | zero | -1 + 1 | one | 2 + 2 | two | 2 + 3 | three | 2 + 4 | four | 2 + 5 | five | 2 + 6 | six | 2 + 7 | seven | 2 + 8 | eight | 2 + 0 | zero | 2 + | null | 2 + | zero | 2 + 1 | one | -3 + 2 | two | -3 + 3 | three | -3 + 4 | four | -3 + 5 | five | -3 + 6 | six | -3 + 7 | seven | -3 + 8 | eight | -3 + 0 | zero | -3 + | null | -3 + | zero | -3 + 1 | one | 4 + 2 | two | 4 + 3 | three | 4 + 4 | four | 4 + 5 | five | 4 + 6 | six | 4 + 7 | seven | 4 + 8 | eight | 4 + 0 | zero | 4 + | null | 4 + | zero | 4 + 1 | one | -5 + 2 | two | -5 + 3 | three | -5 + 4 | four | -5 + 5 | five | -5 + 6 | six | -5 + 7 | seven | -5 + 8 | eight | -5 + 0 | zero | -5 + | null | -5 + | zero | -5 + 1 | one | -5 + 2 | two | -5 + 3 | three | -5 + 4 | four | -5 + 5 | five | -5 + 6 | six | -5 + 7 | seven | -5 + 8 | eight | -5 + 0 | zero | -5 + | null | -5 + | zero | -5 + 1 | one | + 2 | two | + 3 | three | + 4 | four | + 5 | five | + 6 | six | + 7 | seven | + 8 | eight | + 0 | zero | + | null | + | zero | + 1 | one | + 2 | two | + 3 | three | + 4 | four | + 5 | five | + 6 | six | + 7 | seven | + 8 | eight | + 0 | zero | + | null | + | zero | + 1 | one | 0 + 2 | two | 0 + 3 | three | 0 + 4 | four | 0 + 5 | five | 0 + 6 | six | 0 + 7 | seven | 0 + 8 | eight | 0 + 0 | zero | 0 + | null | 0 + | zero | 0 +(99 rows) + +SELECT tx.ii, tx.jj, tx.kk + FROM (J1_TBL t1 (a, b, c) CROSS JOIN J2_TBL t2 (d, e)) + AS tx (ii, jj, tt, ii2, kk); + ii | jj | kk +----+----+---- + 1 | 4 | -1 + 2 | 3 | -1 + 3 | 2 | -1 + 4 | 1 | -1 + 5 | 0 | -1 + 6 | 6 | -1 + 7 | 7 | -1 + 8 | 8 | -1 + 0 | | -1 + | | -1 + | 0 | -1 + 1 | 4 | 2 + 2 | 3 | 2 + 3 | 2 | 2 + 4 | 1 | 2 + 5 | 0 | 2 + 6 | 6 | 2 + 7 | 7 | 2 + 8 | 8 | 2 + 0 | | 2 + | | 2 + | 0 | 2 + 1 | 4 | -3 + 2 | 3 | -3 + 3 | 2 | -3 + 4 | 1 | -3 + 5 | 0 | -3 + 6 | 6 | -3 + 7 | 7 | -3 + 8 | 8 | -3 + 0 | | -3 + | | -3 + | 0 | -3 + 1 | 4 | 4 + 2 | 3 | 4 + 3 | 2 | 4 + 4 | 1 | 4 + 5 | 0 | 4 + 6 | 6 | 4 + 7 | 7 | 4 + 8 | 8 | 4 + 0 | | 4 + | | 4 + | 0 | 4 + 1 | 4 | -5 + 2 | 3 | -5 + 3 | 2 | -5 + 4 | 1 | -5 + 5 | 0 | -5 + 6 | 6 | -5 + 7 | 7 | -5 + 8 | 8 | -5 + 0 | | -5 + | | -5 + | 0 | -5 + 1 | 4 | -5 + 2 | 3 | -5 + 3 | 2 | -5 + 4 | 1 | -5 + 5 | 0 | -5 + 6 | 6 | -5 + 7 | 7 | -5 + 8 | 8 | -5 + 0 | | -5 + | | -5 + | 0 | -5 + 1 | 4 | + 2 | 3 | + 3 | 2 | + 4 | 1 | + 5 | 0 | + 6 | 6 | + 7 | 7 | + 8 | 8 | + 0 | | + | | + | 0 | + 1 | 4 | + 2 | 3 | + 3 | 2 | + 4 | 1 | + 5 | 0 | + 6 | 6 | + 7 | 7 | + 8 | 8 | + 0 | | + | | + | 0 | + 1 | 4 | 0 + 2 | 3 | 0 + 3 | 2 | 0 + 4 | 1 | 0 + 5 | 0 | 0 + 6 | 6 | 0 + 7 | 7 | 0 + 8 | 8 | 0 + 0 | | 0 + | | 0 + | 0 | 0 +(99 rows) + +SELECT * + FROM J1_TBL CROSS JOIN J2_TBL a CROSS JOIN J2_TBL b; + i | j | t | i | k | i | k +---+---+-------+---+----+---+---- + 1 | 4 | one | 1 | -1 | 1 | -1 + 1 | 4 | one | 1 | -1 | 2 | 2 + 1 | 4 | one | 1 | -1 | 3 | -3 + 1 | 4 | one | 1 | -1 | 2 | 4 + 1 | 4 | one | 1 | -1 | 5 | -5 + 1 | 4 | one | 1 | -1 | 5 | -5 + 1 | 4 | one | 1 | -1 | 0 | + 1 | 4 | one | 1 | -1 | | + 1 | 4 | one | 1 | -1 | | 0 + 2 | 3 | two | 1 | -1 | 1 | -1 + 2 | 3 | two | 1 | -1 | 2 | 2 + 2 | 3 | two | 1 | -1 | 3 | -3 + 2 | 3 | two | 1 | -1 | 2 | 4 + 2 | 3 | two | 1 | -1 | 5 | -5 + 2 | 3 | two | 1 | -1 | 5 | -5 + 2 | 3 | two | 1 | -1 | 0 | + 2 | 3 | two | 1 | -1 | | + 2 | 3 | two | 1 | -1 | | 0 + 3 | 2 | three | 1 | -1 | 1 | -1 + 3 | 2 | three | 1 | -1 | 2 | 2 + 3 | 2 | three | 1 | -1 | 3 | -3 + 3 | 2 | three | 1 | -1 | 2 | 4 + 3 | 2 | three | 1 | -1 | 5 | -5 + 3 | 2 | three | 1 | -1 | 5 | -5 + 3 | 2 | three | 1 | -1 | 0 | + 3 | 2 | three | 1 | -1 | | + 3 | 2 | three | 1 | -1 | | 0 + 4 | 1 | four | 1 | -1 | 1 | -1 + 4 | 1 | four | 1 | -1 | 2 | 2 + 4 | 1 | four | 1 | -1 | 3 | -3 + 4 | 1 | four | 1 | -1 | 2 | 4 + 4 | 1 | four | 1 | -1 | 5 | -5 + 4 | 1 | four | 1 | -1 | 5 | -5 + 4 | 1 | four | 1 | -1 | 0 | + 4 | 1 | four | 1 | -1 | | + 4 | 1 | four | 1 | -1 | | 0 + 5 | 0 | five | 1 | -1 | 1 | -1 + 5 | 0 | five | 1 | -1 | 2 | 2 + 5 | 0 | five | 1 | -1 | 3 | -3 + 5 | 0 | five | 1 | -1 | 2 | 4 + 5 | 0 | five | 1 | -1 | 5 | -5 + 5 | 0 | five | 1 | -1 | 5 | -5 + 5 | 0 | five | 1 | -1 | 0 | + 5 | 0 | five | 1 | -1 | | + 5 | 0 | five | 1 | -1 | | 0 + 6 | 6 | six | 1 | -1 | 1 | -1 + 6 | 6 | six | 1 | -1 | 2 | 2 + 6 | 6 | six | 1 | -1 | 3 | -3 + 6 | 6 | six | 1 | -1 | 2 | 4 + 6 | 6 | six | 1 | -1 | 5 | -5 + 6 | 6 | six | 1 | -1 | 5 | -5 + 6 | 6 | six | 1 | -1 | 0 | + 6 | 6 | six | 1 | -1 | | + 6 | 6 | six | 1 | -1 | | 0 + 7 | 7 | seven | 1 | -1 | 1 | -1 + 7 | 7 | seven | 1 | -1 | 2 | 2 + 7 | 7 | seven | 1 | -1 | 3 | -3 + 7 | 7 | seven | 1 | -1 | 2 | 4 + 7 | 7 | seven | 1 | -1 | 5 | -5 + 7 | 7 | seven | 1 | -1 | 5 | -5 + 7 | 7 | seven | 1 | -1 | 0 | + 7 | 7 | seven | 1 | -1 | | + 7 | 7 | seven | 1 | -1 | | 0 + 8 | 8 | eight | 1 | -1 | 1 | -1 + 8 | 8 | eight | 1 | -1 | 2 | 2 + 8 | 8 | eight | 1 | -1 | 3 | -3 + 8 | 8 | eight | 1 | -1 | 2 | 4 + 8 | 8 | eight | 1 | -1 | 5 | -5 + 8 | 8 | eight | 1 | -1 | 5 | -5 + 8 | 8 | eight | 1 | -1 | 0 | + 8 | 8 | eight | 1 | -1 | | + 8 | 8 | eight | 1 | -1 | | 0 + 0 | | zero | 1 | -1 | 1 | -1 + 0 | | zero | 1 | -1 | 2 | 2 + 0 | | zero | 1 | -1 | 3 | -3 + 0 | | zero | 1 | -1 | 2 | 4 + 0 | | zero | 1 | -1 | 5 | -5 + 0 | | zero | 1 | -1 | 5 | -5 + 0 | | zero | 1 | -1 | 0 | + 0 | | zero | 1 | -1 | | + 0 | | zero | 1 | -1 | | 0 + | | null | 1 | -1 | 1 | -1 + | | null | 1 | -1 | 2 | 2 + | | null | 1 | -1 | 3 | -3 + | | null | 1 | -1 | 2 | 4 + | | null | 1 | -1 | 5 | -5 + | | null | 1 | -1 | 5 | -5 + | | null | 1 | -1 | 0 | + | | null | 1 | -1 | | + | | null | 1 | -1 | | 0 + | 0 | zero | 1 | -1 | 1 | -1 + | 0 | zero | 1 | -1 | 2 | 2 + | 0 | zero | 1 | -1 | 3 | -3 + | 0 | zero | 1 | -1 | 2 | 4 + | 0 | zero | 1 | -1 | 5 | -5 + | 0 | zero | 1 | -1 | 5 | -5 + | 0 | zero | 1 | -1 | 0 | + | 0 | zero | 1 | -1 | | + | 0 | zero | 1 | -1 | | 0 + 1 | 4 | one | 2 | 2 | 1 | -1 + 1 | 4 | one | 2 | 2 | 2 | 2 + 1 | 4 | one | 2 | 2 | 3 | -3 + 1 | 4 | one | 2 | 2 | 2 | 4 + 1 | 4 | one | 2 | 2 | 5 | -5 + 1 | 4 | one | 2 | 2 | 5 | -5 + 1 | 4 | one | 2 | 2 | 0 | + 1 | 4 | one | 2 | 2 | | + 1 | 4 | one | 2 | 2 | | 0 + 2 | 3 | two | 2 | 2 | 1 | -1 + 2 | 3 | two | 2 | 2 | 2 | 2 + 2 | 3 | two | 2 | 2 | 3 | -3 + 2 | 3 | two | 2 | 2 | 2 | 4 + 2 | 3 | two | 2 | 2 | 5 | -5 + 2 | 3 | two | 2 | 2 | 5 | -5 + 2 | 3 | two | 2 | 2 | 0 | + 2 | 3 | two | 2 | 2 | | + 2 | 3 | two | 2 | 2 | | 0 + 3 | 2 | three | 2 | 2 | 1 | -1 + 3 | 2 | three | 2 | 2 | 2 | 2 + 3 | 2 | three | 2 | 2 | 3 | -3 + 3 | 2 | three | 2 | 2 | 2 | 4 + 3 | 2 | three | 2 | 2 | 5 | -5 + 3 | 2 | three | 2 | 2 | 5 | -5 + 3 | 2 | three | 2 | 2 | 0 | + 3 | 2 | three | 2 | 2 | | + 3 | 2 | three | 2 | 2 | | 0 + 4 | 1 | four | 2 | 2 | 1 | -1 + 4 | 1 | four | 2 | 2 | 2 | 2 + 4 | 1 | four | 2 | 2 | 3 | -3 + 4 | 1 | four | 2 | 2 | 2 | 4 + 4 | 1 | four | 2 | 2 | 5 | -5 + 4 | 1 | four | 2 | 2 | 5 | -5 + 4 | 1 | four | 2 | 2 | 0 | + 4 | 1 | four | 2 | 2 | | + 4 | 1 | four | 2 | 2 | | 0 + 5 | 0 | five | 2 | 2 | 1 | -1 + 5 | 0 | five | 2 | 2 | 2 | 2 + 5 | 0 | five | 2 | 2 | 3 | -3 + 5 | 0 | five | 2 | 2 | 2 | 4 + 5 | 0 | five | 2 | 2 | 5 | -5 + 5 | 0 | five | 2 | 2 | 5 | -5 + 5 | 0 | five | 2 | 2 | 0 | + 5 | 0 | five | 2 | 2 | | + 5 | 0 | five | 2 | 2 | | 0 + 6 | 6 | six | 2 | 2 | 1 | -1 + 6 | 6 | six | 2 | 2 | 2 | 2 + 6 | 6 | six | 2 | 2 | 3 | -3 + 6 | 6 | six | 2 | 2 | 2 | 4 + 6 | 6 | six | 2 | 2 | 5 | -5 + 6 | 6 | six | 2 | 2 | 5 | -5 + 6 | 6 | six | 2 | 2 | 0 | + 6 | 6 | six | 2 | 2 | | + 6 | 6 | six | 2 | 2 | | 0 + 7 | 7 | seven | 2 | 2 | 1 | -1 + 7 | 7 | seven | 2 | 2 | 2 | 2 + 7 | 7 | seven | 2 | 2 | 3 | -3 + 7 | 7 | seven | 2 | 2 | 2 | 4 + 7 | 7 | seven | 2 | 2 | 5 | -5 + 7 | 7 | seven | 2 | 2 | 5 | -5 + 7 | 7 | seven | 2 | 2 | 0 | + 7 | 7 | seven | 2 | 2 | | + 7 | 7 | seven | 2 | 2 | | 0 + 8 | 8 | eight | 2 | 2 | 1 | -1 + 8 | 8 | eight | 2 | 2 | 2 | 2 + 8 | 8 | eight | 2 | 2 | 3 | -3 + 8 | 8 | eight | 2 | 2 | 2 | 4 + 8 | 8 | eight | 2 | 2 | 5 | -5 + 8 | 8 | eight | 2 | 2 | 5 | -5 + 8 | 8 | eight | 2 | 2 | 0 | + 8 | 8 | eight | 2 | 2 | | + 8 | 8 | eight | 2 | 2 | | 0 + 0 | | zero | 2 | 2 | 1 | -1 + 0 | | zero | 2 | 2 | 2 | 2 + 0 | | zero | 2 | 2 | 3 | -3 + 0 | | zero | 2 | 2 | 2 | 4 + 0 | | zero | 2 | 2 | 5 | -5 + 0 | | zero | 2 | 2 | 5 | -5 + 0 | | zero | 2 | 2 | 0 | + 0 | | zero | 2 | 2 | | + 0 | | zero | 2 | 2 | | 0 + | | null | 2 | 2 | 1 | -1 + | | null | 2 | 2 | 2 | 2 + | | null | 2 | 2 | 3 | -3 + | | null | 2 | 2 | 2 | 4 + | | null | 2 | 2 | 5 | -5 + | | null | 2 | 2 | 5 | -5 + | | null | 2 | 2 | 0 | + | | null | 2 | 2 | | + | | null | 2 | 2 | | 0 + | 0 | zero | 2 | 2 | 1 | -1 + | 0 | zero | 2 | 2 | 2 | 2 + | 0 | zero | 2 | 2 | 3 | -3 + | 0 | zero | 2 | 2 | 2 | 4 + | 0 | zero | 2 | 2 | 5 | -5 + | 0 | zero | 2 | 2 | 5 | -5 + | 0 | zero | 2 | 2 | 0 | + | 0 | zero | 2 | 2 | | + | 0 | zero | 2 | 2 | | 0 + 1 | 4 | one | 3 | -3 | 1 | -1 + 1 | 4 | one | 3 | -3 | 2 | 2 + 1 | 4 | one | 3 | -3 | 3 | -3 + 1 | 4 | one | 3 | -3 | 2 | 4 + 1 | 4 | one | 3 | -3 | 5 | -5 + 1 | 4 | one | 3 | -3 | 5 | -5 + 1 | 4 | one | 3 | -3 | 0 | + 1 | 4 | one | 3 | -3 | | + 1 | 4 | one | 3 | -3 | | 0 + 2 | 3 | two | 3 | -3 | 1 | -1 + 2 | 3 | two | 3 | -3 | 2 | 2 + 2 | 3 | two | 3 | -3 | 3 | -3 + 2 | 3 | two | 3 | -3 | 2 | 4 + 2 | 3 | two | 3 | -3 | 5 | -5 + 2 | 3 | two | 3 | -3 | 5 | -5 + 2 | 3 | two | 3 | -3 | 0 | + 2 | 3 | two | 3 | -3 | | + 2 | 3 | two | 3 | -3 | | 0 + 3 | 2 | three | 3 | -3 | 1 | -1 + 3 | 2 | three | 3 | -3 | 2 | 2 + 3 | 2 | three | 3 | -3 | 3 | -3 + 3 | 2 | three | 3 | -3 | 2 | 4 + 3 | 2 | three | 3 | -3 | 5 | -5 + 3 | 2 | three | 3 | -3 | 5 | -5 + 3 | 2 | three | 3 | -3 | 0 | + 3 | 2 | three | 3 | -3 | | + 3 | 2 | three | 3 | -3 | | 0 + 4 | 1 | four | 3 | -3 | 1 | -1 + 4 | 1 | four | 3 | -3 | 2 | 2 + 4 | 1 | four | 3 | -3 | 3 | -3 + 4 | 1 | four | 3 | -3 | 2 | 4 + 4 | 1 | four | 3 | -3 | 5 | -5 + 4 | 1 | four | 3 | -3 | 5 | -5 + 4 | 1 | four | 3 | -3 | 0 | + 4 | 1 | four | 3 | -3 | | + 4 | 1 | four | 3 | -3 | | 0 + 5 | 0 | five | 3 | -3 | 1 | -1 + 5 | 0 | five | 3 | -3 | 2 | 2 + 5 | 0 | five | 3 | -3 | 3 | -3 + 5 | 0 | five | 3 | -3 | 2 | 4 + 5 | 0 | five | 3 | -3 | 5 | -5 + 5 | 0 | five | 3 | -3 | 5 | -5 + 5 | 0 | five | 3 | -3 | 0 | + 5 | 0 | five | 3 | -3 | | + 5 | 0 | five | 3 | -3 | | 0 + 6 | 6 | six | 3 | -3 | 1 | -1 + 6 | 6 | six | 3 | -3 | 2 | 2 + 6 | 6 | six | 3 | -3 | 3 | -3 + 6 | 6 | six | 3 | -3 | 2 | 4 + 6 | 6 | six | 3 | -3 | 5 | -5 + 6 | 6 | six | 3 | -3 | 5 | -5 + 6 | 6 | six | 3 | -3 | 0 | + 6 | 6 | six | 3 | -3 | | + 6 | 6 | six | 3 | -3 | | 0 + 7 | 7 | seven | 3 | -3 | 1 | -1 + 7 | 7 | seven | 3 | -3 | 2 | 2 + 7 | 7 | seven | 3 | -3 | 3 | -3 + 7 | 7 | seven | 3 | -3 | 2 | 4 + 7 | 7 | seven | 3 | -3 | 5 | -5 + 7 | 7 | seven | 3 | -3 | 5 | -5 + 7 | 7 | seven | 3 | -3 | 0 | + 7 | 7 | seven | 3 | -3 | | + 7 | 7 | seven | 3 | -3 | | 0 + 8 | 8 | eight | 3 | -3 | 1 | -1 + 8 | 8 | eight | 3 | -3 | 2 | 2 + 8 | 8 | eight | 3 | -3 | 3 | -3 + 8 | 8 | eight | 3 | -3 | 2 | 4 + 8 | 8 | eight | 3 | -3 | 5 | -5 + 8 | 8 | eight | 3 | -3 | 5 | -5 + 8 | 8 | eight | 3 | -3 | 0 | + 8 | 8 | eight | 3 | -3 | | + 8 | 8 | eight | 3 | -3 | | 0 + 0 | | zero | 3 | -3 | 1 | -1 + 0 | | zero | 3 | -3 | 2 | 2 + 0 | | zero | 3 | -3 | 3 | -3 + 0 | | zero | 3 | -3 | 2 | 4 + 0 | | zero | 3 | -3 | 5 | -5 + 0 | | zero | 3 | -3 | 5 | -5 + 0 | | zero | 3 | -3 | 0 | + 0 | | zero | 3 | -3 | | + 0 | | zero | 3 | -3 | | 0 + | | null | 3 | -3 | 1 | -1 + | | null | 3 | -3 | 2 | 2 + | | null | 3 | -3 | 3 | -3 + | | null | 3 | -3 | 2 | 4 + | | null | 3 | -3 | 5 | -5 + | | null | 3 | -3 | 5 | -5 + | | null | 3 | -3 | 0 | + | | null | 3 | -3 | | + | | null | 3 | -3 | | 0 + | 0 | zero | 3 | -3 | 1 | -1 + | 0 | zero | 3 | -3 | 2 | 2 + | 0 | zero | 3 | -3 | 3 | -3 + | 0 | zero | 3 | -3 | 2 | 4 + | 0 | zero | 3 | -3 | 5 | -5 + | 0 | zero | 3 | -3 | 5 | -5 + | 0 | zero | 3 | -3 | 0 | + | 0 | zero | 3 | -3 | | + | 0 | zero | 3 | -3 | | 0 + 1 | 4 | one | 2 | 4 | 1 | -1 + 1 | 4 | one | 2 | 4 | 2 | 2 + 1 | 4 | one | 2 | 4 | 3 | -3 + 1 | 4 | one | 2 | 4 | 2 | 4 + 1 | 4 | one | 2 | 4 | 5 | -5 + 1 | 4 | one | 2 | 4 | 5 | -5 + 1 | 4 | one | 2 | 4 | 0 | + 1 | 4 | one | 2 | 4 | | + 1 | 4 | one | 2 | 4 | | 0 + 2 | 3 | two | 2 | 4 | 1 | -1 + 2 | 3 | two | 2 | 4 | 2 | 2 + 2 | 3 | two | 2 | 4 | 3 | -3 + 2 | 3 | two | 2 | 4 | 2 | 4 + 2 | 3 | two | 2 | 4 | 5 | -5 + 2 | 3 | two | 2 | 4 | 5 | -5 + 2 | 3 | two | 2 | 4 | 0 | + 2 | 3 | two | 2 | 4 | | + 2 | 3 | two | 2 | 4 | | 0 + 3 | 2 | three | 2 | 4 | 1 | -1 + 3 | 2 | three | 2 | 4 | 2 | 2 + 3 | 2 | three | 2 | 4 | 3 | -3 + 3 | 2 | three | 2 | 4 | 2 | 4 + 3 | 2 | three | 2 | 4 | 5 | -5 + 3 | 2 | three | 2 | 4 | 5 | -5 + 3 | 2 | three | 2 | 4 | 0 | + 3 | 2 | three | 2 | 4 | | + 3 | 2 | three | 2 | 4 | | 0 + 4 | 1 | four | 2 | 4 | 1 | -1 + 4 | 1 | four | 2 | 4 | 2 | 2 + 4 | 1 | four | 2 | 4 | 3 | -3 + 4 | 1 | four | 2 | 4 | 2 | 4 + 4 | 1 | four | 2 | 4 | 5 | -5 + 4 | 1 | four | 2 | 4 | 5 | -5 + 4 | 1 | four | 2 | 4 | 0 | + 4 | 1 | four | 2 | 4 | | + 4 | 1 | four | 2 | 4 | | 0 + 5 | 0 | five | 2 | 4 | 1 | -1 + 5 | 0 | five | 2 | 4 | 2 | 2 + 5 | 0 | five | 2 | 4 | 3 | -3 + 5 | 0 | five | 2 | 4 | 2 | 4 + 5 | 0 | five | 2 | 4 | 5 | -5 + 5 | 0 | five | 2 | 4 | 5 | -5 + 5 | 0 | five | 2 | 4 | 0 | + 5 | 0 | five | 2 | 4 | | + 5 | 0 | five | 2 | 4 | | 0 + 6 | 6 | six | 2 | 4 | 1 | -1 + 6 | 6 | six | 2 | 4 | 2 | 2 + 6 | 6 | six | 2 | 4 | 3 | -3 + 6 | 6 | six | 2 | 4 | 2 | 4 + 6 | 6 | six | 2 | 4 | 5 | -5 + 6 | 6 | six | 2 | 4 | 5 | -5 + 6 | 6 | six | 2 | 4 | 0 | + 6 | 6 | six | 2 | 4 | | + 6 | 6 | six | 2 | 4 | | 0 + 7 | 7 | seven | 2 | 4 | 1 | -1 + 7 | 7 | seven | 2 | 4 | 2 | 2 + 7 | 7 | seven | 2 | 4 | 3 | -3 + 7 | 7 | seven | 2 | 4 | 2 | 4 + 7 | 7 | seven | 2 | 4 | 5 | -5 + 7 | 7 | seven | 2 | 4 | 5 | -5 + 7 | 7 | seven | 2 | 4 | 0 | + 7 | 7 | seven | 2 | 4 | | + 7 | 7 | seven | 2 | 4 | | 0 + 8 | 8 | eight | 2 | 4 | 1 | -1 + 8 | 8 | eight | 2 | 4 | 2 | 2 + 8 | 8 | eight | 2 | 4 | 3 | -3 + 8 | 8 | eight | 2 | 4 | 2 | 4 + 8 | 8 | eight | 2 | 4 | 5 | -5 + 8 | 8 | eight | 2 | 4 | 5 | -5 + 8 | 8 | eight | 2 | 4 | 0 | + 8 | 8 | eight | 2 | 4 | | + 8 | 8 | eight | 2 | 4 | | 0 + 0 | | zero | 2 | 4 | 1 | -1 + 0 | | zero | 2 | 4 | 2 | 2 + 0 | | zero | 2 | 4 | 3 | -3 + 0 | | zero | 2 | 4 | 2 | 4 + 0 | | zero | 2 | 4 | 5 | -5 + 0 | | zero | 2 | 4 | 5 | -5 + 0 | | zero | 2 | 4 | 0 | + 0 | | zero | 2 | 4 | | + 0 | | zero | 2 | 4 | | 0 + | | null | 2 | 4 | 1 | -1 + | | null | 2 | 4 | 2 | 2 + | | null | 2 | 4 | 3 | -3 + | | null | 2 | 4 | 2 | 4 + | | null | 2 | 4 | 5 | -5 + | | null | 2 | 4 | 5 | -5 + | | null | 2 | 4 | 0 | + | | null | 2 | 4 | | + | | null | 2 | 4 | | 0 + | 0 | zero | 2 | 4 | 1 | -1 + | 0 | zero | 2 | 4 | 2 | 2 + | 0 | zero | 2 | 4 | 3 | -3 + | 0 | zero | 2 | 4 | 2 | 4 + | 0 | zero | 2 | 4 | 5 | -5 + | 0 | zero | 2 | 4 | 5 | -5 + | 0 | zero | 2 | 4 | 0 | + | 0 | zero | 2 | 4 | | + | 0 | zero | 2 | 4 | | 0 + 1 | 4 | one | 5 | -5 | 1 | -1 + 1 | 4 | one | 5 | -5 | 2 | 2 + 1 | 4 | one | 5 | -5 | 3 | -3 + 1 | 4 | one | 5 | -5 | 2 | 4 + 1 | 4 | one | 5 | -5 | 5 | -5 + 1 | 4 | one | 5 | -5 | 5 | -5 + 1 | 4 | one | 5 | -5 | 0 | + 1 | 4 | one | 5 | -5 | | + 1 | 4 | one | 5 | -5 | | 0 + 2 | 3 | two | 5 | -5 | 1 | -1 + 2 | 3 | two | 5 | -5 | 2 | 2 + 2 | 3 | two | 5 | -5 | 3 | -3 + 2 | 3 | two | 5 | -5 | 2 | 4 + 2 | 3 | two | 5 | -5 | 5 | -5 + 2 | 3 | two | 5 | -5 | 5 | -5 + 2 | 3 | two | 5 | -5 | 0 | + 2 | 3 | two | 5 | -5 | | + 2 | 3 | two | 5 | -5 | | 0 + 3 | 2 | three | 5 | -5 | 1 | -1 + 3 | 2 | three | 5 | -5 | 2 | 2 + 3 | 2 | three | 5 | -5 | 3 | -3 + 3 | 2 | three | 5 | -5 | 2 | 4 + 3 | 2 | three | 5 | -5 | 5 | -5 + 3 | 2 | three | 5 | -5 | 5 | -5 + 3 | 2 | three | 5 | -5 | 0 | + 3 | 2 | three | 5 | -5 | | + 3 | 2 | three | 5 | -5 | | 0 + 4 | 1 | four | 5 | -5 | 1 | -1 + 4 | 1 | four | 5 | -5 | 2 | 2 + 4 | 1 | four | 5 | -5 | 3 | -3 + 4 | 1 | four | 5 | -5 | 2 | 4 + 4 | 1 | four | 5 | -5 | 5 | -5 + 4 | 1 | four | 5 | -5 | 5 | -5 + 4 | 1 | four | 5 | -5 | 0 | + 4 | 1 | four | 5 | -5 | | + 4 | 1 | four | 5 | -5 | | 0 + 5 | 0 | five | 5 | -5 | 1 | -1 + 5 | 0 | five | 5 | -5 | 2 | 2 + 5 | 0 | five | 5 | -5 | 3 | -3 + 5 | 0 | five | 5 | -5 | 2 | 4 + 5 | 0 | five | 5 | -5 | 5 | -5 + 5 | 0 | five | 5 | -5 | 5 | -5 + 5 | 0 | five | 5 | -5 | 0 | + 5 | 0 | five | 5 | -5 | | + 5 | 0 | five | 5 | -5 | | 0 + 6 | 6 | six | 5 | -5 | 1 | -1 + 6 | 6 | six | 5 | -5 | 2 | 2 + 6 | 6 | six | 5 | -5 | 3 | -3 + 6 | 6 | six | 5 | -5 | 2 | 4 + 6 | 6 | six | 5 | -5 | 5 | -5 + 6 | 6 | six | 5 | -5 | 5 | -5 + 6 | 6 | six | 5 | -5 | 0 | + 6 | 6 | six | 5 | -5 | | + 6 | 6 | six | 5 | -5 | | 0 + 7 | 7 | seven | 5 | -5 | 1 | -1 + 7 | 7 | seven | 5 | -5 | 2 | 2 + 7 | 7 | seven | 5 | -5 | 3 | -3 + 7 | 7 | seven | 5 | -5 | 2 | 4 + 7 | 7 | seven | 5 | -5 | 5 | -5 + 7 | 7 | seven | 5 | -5 | 5 | -5 + 7 | 7 | seven | 5 | -5 | 0 | + 7 | 7 | seven | 5 | -5 | | + 7 | 7 | seven | 5 | -5 | | 0 + 8 | 8 | eight | 5 | -5 | 1 | -1 + 8 | 8 | eight | 5 | -5 | 2 | 2 + 8 | 8 | eight | 5 | -5 | 3 | -3 + 8 | 8 | eight | 5 | -5 | 2 | 4 + 8 | 8 | eight | 5 | -5 | 5 | -5 + 8 | 8 | eight | 5 | -5 | 5 | -5 + 8 | 8 | eight | 5 | -5 | 0 | + 8 | 8 | eight | 5 | -5 | | + 8 | 8 | eight | 5 | -5 | | 0 + 0 | | zero | 5 | -5 | 1 | -1 + 0 | | zero | 5 | -5 | 2 | 2 + 0 | | zero | 5 | -5 | 3 | -3 + 0 | | zero | 5 | -5 | 2 | 4 + 0 | | zero | 5 | -5 | 5 | -5 + 0 | | zero | 5 | -5 | 5 | -5 + 0 | | zero | 5 | -5 | 0 | + 0 | | zero | 5 | -5 | | + 0 | | zero | 5 | -5 | | 0 + | | null | 5 | -5 | 1 | -1 + | | null | 5 | -5 | 2 | 2 + | | null | 5 | -5 | 3 | -3 + | | null | 5 | -5 | 2 | 4 + | | null | 5 | -5 | 5 | -5 + | | null | 5 | -5 | 5 | -5 + | | null | 5 | -5 | 0 | + | | null | 5 | -5 | | + | | null | 5 | -5 | | 0 + | 0 | zero | 5 | -5 | 1 | -1 + | 0 | zero | 5 | -5 | 2 | 2 + | 0 | zero | 5 | -5 | 3 | -3 + | 0 | zero | 5 | -5 | 2 | 4 + | 0 | zero | 5 | -5 | 5 | -5 + | 0 | zero | 5 | -5 | 5 | -5 + | 0 | zero | 5 | -5 | 0 | + | 0 | zero | 5 | -5 | | + | 0 | zero | 5 | -5 | | 0 + 1 | 4 | one | 5 | -5 | 1 | -1 + 1 | 4 | one | 5 | -5 | 2 | 2 + 1 | 4 | one | 5 | -5 | 3 | -3 + 1 | 4 | one | 5 | -5 | 2 | 4 + 1 | 4 | one | 5 | -5 | 5 | -5 + 1 | 4 | one | 5 | -5 | 5 | -5 + 1 | 4 | one | 5 | -5 | 0 | + 1 | 4 | one | 5 | -5 | | + 1 | 4 | one | 5 | -5 | | 0 + 2 | 3 | two | 5 | -5 | 1 | -1 + 2 | 3 | two | 5 | -5 | 2 | 2 + 2 | 3 | two | 5 | -5 | 3 | -3 + 2 | 3 | two | 5 | -5 | 2 | 4 + 2 | 3 | two | 5 | -5 | 5 | -5 + 2 | 3 | two | 5 | -5 | 5 | -5 + 2 | 3 | two | 5 | -5 | 0 | + 2 | 3 | two | 5 | -5 | | + 2 | 3 | two | 5 | -5 | | 0 + 3 | 2 | three | 5 | -5 | 1 | -1 + 3 | 2 | three | 5 | -5 | 2 | 2 + 3 | 2 | three | 5 | -5 | 3 | -3 + 3 | 2 | three | 5 | -5 | 2 | 4 + 3 | 2 | three | 5 | -5 | 5 | -5 + 3 | 2 | three | 5 | -5 | 5 | -5 + 3 | 2 | three | 5 | -5 | 0 | + 3 | 2 | three | 5 | -5 | | + 3 | 2 | three | 5 | -5 | | 0 + 4 | 1 | four | 5 | -5 | 1 | -1 + 4 | 1 | four | 5 | -5 | 2 | 2 + 4 | 1 | four | 5 | -5 | 3 | -3 + 4 | 1 | four | 5 | -5 | 2 | 4 + 4 | 1 | four | 5 | -5 | 5 | -5 + 4 | 1 | four | 5 | -5 | 5 | -5 + 4 | 1 | four | 5 | -5 | 0 | + 4 | 1 | four | 5 | -5 | | + 4 | 1 | four | 5 | -5 | | 0 + 5 | 0 | five | 5 | -5 | 1 | -1 + 5 | 0 | five | 5 | -5 | 2 | 2 + 5 | 0 | five | 5 | -5 | 3 | -3 + 5 | 0 | five | 5 | -5 | 2 | 4 + 5 | 0 | five | 5 | -5 | 5 | -5 + 5 | 0 | five | 5 | -5 | 5 | -5 + 5 | 0 | five | 5 | -5 | 0 | + 5 | 0 | five | 5 | -5 | | + 5 | 0 | five | 5 | -5 | | 0 + 6 | 6 | six | 5 | -5 | 1 | -1 + 6 | 6 | six | 5 | -5 | 2 | 2 + 6 | 6 | six | 5 | -5 | 3 | -3 + 6 | 6 | six | 5 | -5 | 2 | 4 + 6 | 6 | six | 5 | -5 | 5 | -5 + 6 | 6 | six | 5 | -5 | 5 | -5 + 6 | 6 | six | 5 | -5 | 0 | + 6 | 6 | six | 5 | -5 | | + 6 | 6 | six | 5 | -5 | | 0 + 7 | 7 | seven | 5 | -5 | 1 | -1 + 7 | 7 | seven | 5 | -5 | 2 | 2 + 7 | 7 | seven | 5 | -5 | 3 | -3 + 7 | 7 | seven | 5 | -5 | 2 | 4 + 7 | 7 | seven | 5 | -5 | 5 | -5 + 7 | 7 | seven | 5 | -5 | 5 | -5 + 7 | 7 | seven | 5 | -5 | 0 | + 7 | 7 | seven | 5 | -5 | | + 7 | 7 | seven | 5 | -5 | | 0 + 8 | 8 | eight | 5 | -5 | 1 | -1 + 8 | 8 | eight | 5 | -5 | 2 | 2 + 8 | 8 | eight | 5 | -5 | 3 | -3 + 8 | 8 | eight | 5 | -5 | 2 | 4 + 8 | 8 | eight | 5 | -5 | 5 | -5 + 8 | 8 | eight | 5 | -5 | 5 | -5 + 8 | 8 | eight | 5 | -5 | 0 | + 8 | 8 | eight | 5 | -5 | | + 8 | 8 | eight | 5 | -5 | | 0 + 0 | | zero | 5 | -5 | 1 | -1 + 0 | | zero | 5 | -5 | 2 | 2 + 0 | | zero | 5 | -5 | 3 | -3 + 0 | | zero | 5 | -5 | 2 | 4 + 0 | | zero | 5 | -5 | 5 | -5 + 0 | | zero | 5 | -5 | 5 | -5 + 0 | | zero | 5 | -5 | 0 | + 0 | | zero | 5 | -5 | | + 0 | | zero | 5 | -5 | | 0 + | | null | 5 | -5 | 1 | -1 + | | null | 5 | -5 | 2 | 2 + | | null | 5 | -5 | 3 | -3 + | | null | 5 | -5 | 2 | 4 + | | null | 5 | -5 | 5 | -5 + | | null | 5 | -5 | 5 | -5 + | | null | 5 | -5 | 0 | + | | null | 5 | -5 | | + | | null | 5 | -5 | | 0 + | 0 | zero | 5 | -5 | 1 | -1 + | 0 | zero | 5 | -5 | 2 | 2 + | 0 | zero | 5 | -5 | 3 | -3 + | 0 | zero | 5 | -5 | 2 | 4 + | 0 | zero | 5 | -5 | 5 | -5 + | 0 | zero | 5 | -5 | 5 | -5 + | 0 | zero | 5 | -5 | 0 | + | 0 | zero | 5 | -5 | | + | 0 | zero | 5 | -5 | | 0 + 1 | 4 | one | 0 | | 1 | -1 + 1 | 4 | one | 0 | | 2 | 2 + 1 | 4 | one | 0 | | 3 | -3 + 1 | 4 | one | 0 | | 2 | 4 + 1 | 4 | one | 0 | | 5 | -5 + 1 | 4 | one | 0 | | 5 | -5 + 1 | 4 | one | 0 | | 0 | + 1 | 4 | one | 0 | | | + 1 | 4 | one | 0 | | | 0 + 2 | 3 | two | 0 | | 1 | -1 + 2 | 3 | two | 0 | | 2 | 2 + 2 | 3 | two | 0 | | 3 | -3 + 2 | 3 | two | 0 | | 2 | 4 + 2 | 3 | two | 0 | | 5 | -5 + 2 | 3 | two | 0 | | 5 | -5 + 2 | 3 | two | 0 | | 0 | + 2 | 3 | two | 0 | | | + 2 | 3 | two | 0 | | | 0 + 3 | 2 | three | 0 | | 1 | -1 + 3 | 2 | three | 0 | | 2 | 2 + 3 | 2 | three | 0 | | 3 | -3 + 3 | 2 | three | 0 | | 2 | 4 + 3 | 2 | three | 0 | | 5 | -5 + 3 | 2 | three | 0 | | 5 | -5 + 3 | 2 | three | 0 | | 0 | + 3 | 2 | three | 0 | | | + 3 | 2 | three | 0 | | | 0 + 4 | 1 | four | 0 | | 1 | -1 + 4 | 1 | four | 0 | | 2 | 2 + 4 | 1 | four | 0 | | 3 | -3 + 4 | 1 | four | 0 | | 2 | 4 + 4 | 1 | four | 0 | | 5 | -5 + 4 | 1 | four | 0 | | 5 | -5 + 4 | 1 | four | 0 | | 0 | + 4 | 1 | four | 0 | | | + 4 | 1 | four | 0 | | | 0 + 5 | 0 | five | 0 | | 1 | -1 + 5 | 0 | five | 0 | | 2 | 2 + 5 | 0 | five | 0 | | 3 | -3 + 5 | 0 | five | 0 | | 2 | 4 + 5 | 0 | five | 0 | | 5 | -5 + 5 | 0 | five | 0 | | 5 | -5 + 5 | 0 | five | 0 | | 0 | + 5 | 0 | five | 0 | | | + 5 | 0 | five | 0 | | | 0 + 6 | 6 | six | 0 | | 1 | -1 + 6 | 6 | six | 0 | | 2 | 2 + 6 | 6 | six | 0 | | 3 | -3 + 6 | 6 | six | 0 | | 2 | 4 + 6 | 6 | six | 0 | | 5 | -5 + 6 | 6 | six | 0 | | 5 | -5 + 6 | 6 | six | 0 | | 0 | + 6 | 6 | six | 0 | | | + 6 | 6 | six | 0 | | | 0 + 7 | 7 | seven | 0 | | 1 | -1 + 7 | 7 | seven | 0 | | 2 | 2 + 7 | 7 | seven | 0 | | 3 | -3 + 7 | 7 | seven | 0 | | 2 | 4 + 7 | 7 | seven | 0 | | 5 | -5 + 7 | 7 | seven | 0 | | 5 | -5 + 7 | 7 | seven | 0 | | 0 | + 7 | 7 | seven | 0 | | | + 7 | 7 | seven | 0 | | | 0 + 8 | 8 | eight | 0 | | 1 | -1 + 8 | 8 | eight | 0 | | 2 | 2 + 8 | 8 | eight | 0 | | 3 | -3 + 8 | 8 | eight | 0 | | 2 | 4 + 8 | 8 | eight | 0 | | 5 | -5 + 8 | 8 | eight | 0 | | 5 | -5 + 8 | 8 | eight | 0 | | 0 | + 8 | 8 | eight | 0 | | | + 8 | 8 | eight | 0 | | | 0 + 0 | | zero | 0 | | 1 | -1 + 0 | | zero | 0 | | 2 | 2 + 0 | | zero | 0 | | 3 | -3 + 0 | | zero | 0 | | 2 | 4 + 0 | | zero | 0 | | 5 | -5 + 0 | | zero | 0 | | 5 | -5 + 0 | | zero | 0 | | 0 | + 0 | | zero | 0 | | | + 0 | | zero | 0 | | | 0 + | | null | 0 | | 1 | -1 + | | null | 0 | | 2 | 2 + | | null | 0 | | 3 | -3 + | | null | 0 | | 2 | 4 + | | null | 0 | | 5 | -5 + | | null | 0 | | 5 | -5 + | | null | 0 | | 0 | + | | null | 0 | | | + | | null | 0 | | | 0 + | 0 | zero | 0 | | 1 | -1 + | 0 | zero | 0 | | 2 | 2 + | 0 | zero | 0 | | 3 | -3 + | 0 | zero | 0 | | 2 | 4 + | 0 | zero | 0 | | 5 | -5 + | 0 | zero | 0 | | 5 | -5 + | 0 | zero | 0 | | 0 | + | 0 | zero | 0 | | | + | 0 | zero | 0 | | | 0 + 1 | 4 | one | | | 1 | -1 + 1 | 4 | one | | | 2 | 2 + 1 | 4 | one | | | 3 | -3 + 1 | 4 | one | | | 2 | 4 + 1 | 4 | one | | | 5 | -5 + 1 | 4 | one | | | 5 | -5 + 1 | 4 | one | | | 0 | + 1 | 4 | one | | | | + 1 | 4 | one | | | | 0 + 2 | 3 | two | | | 1 | -1 + 2 | 3 | two | | | 2 | 2 + 2 | 3 | two | | | 3 | -3 + 2 | 3 | two | | | 2 | 4 + 2 | 3 | two | | | 5 | -5 + 2 | 3 | two | | | 5 | -5 + 2 | 3 | two | | | 0 | + 2 | 3 | two | | | | + 2 | 3 | two | | | | 0 + 3 | 2 | three | | | 1 | -1 + 3 | 2 | three | | | 2 | 2 + 3 | 2 | three | | | 3 | -3 + 3 | 2 | three | | | 2 | 4 + 3 | 2 | three | | | 5 | -5 + 3 | 2 | three | | | 5 | -5 + 3 | 2 | three | | | 0 | + 3 | 2 | three | | | | + 3 | 2 | three | | | | 0 + 4 | 1 | four | | | 1 | -1 + 4 | 1 | four | | | 2 | 2 + 4 | 1 | four | | | 3 | -3 + 4 | 1 | four | | | 2 | 4 + 4 | 1 | four | | | 5 | -5 + 4 | 1 | four | | | 5 | -5 + 4 | 1 | four | | | 0 | + 4 | 1 | four | | | | + 4 | 1 | four | | | | 0 + 5 | 0 | five | | | 1 | -1 + 5 | 0 | five | | | 2 | 2 + 5 | 0 | five | | | 3 | -3 + 5 | 0 | five | | | 2 | 4 + 5 | 0 | five | | | 5 | -5 + 5 | 0 | five | | | 5 | -5 + 5 | 0 | five | | | 0 | + 5 | 0 | five | | | | + 5 | 0 | five | | | | 0 + 6 | 6 | six | | | 1 | -1 + 6 | 6 | six | | | 2 | 2 + 6 | 6 | six | | | 3 | -3 + 6 | 6 | six | | | 2 | 4 + 6 | 6 | six | | | 5 | -5 + 6 | 6 | six | | | 5 | -5 + 6 | 6 | six | | | 0 | + 6 | 6 | six | | | | + 6 | 6 | six | | | | 0 + 7 | 7 | seven | | | 1 | -1 + 7 | 7 | seven | | | 2 | 2 + 7 | 7 | seven | | | 3 | -3 + 7 | 7 | seven | | | 2 | 4 + 7 | 7 | seven | | | 5 | -5 + 7 | 7 | seven | | | 5 | -5 + 7 | 7 | seven | | | 0 | + 7 | 7 | seven | | | | + 7 | 7 | seven | | | | 0 + 8 | 8 | eight | | | 1 | -1 + 8 | 8 | eight | | | 2 | 2 + 8 | 8 | eight | | | 3 | -3 + 8 | 8 | eight | | | 2 | 4 + 8 | 8 | eight | | | 5 | -5 + 8 | 8 | eight | | | 5 | -5 + 8 | 8 | eight | | | 0 | + 8 | 8 | eight | | | | + 8 | 8 | eight | | | | 0 + 0 | | zero | | | 1 | -1 + 0 | | zero | | | 2 | 2 + 0 | | zero | | | 3 | -3 + 0 | | zero | | | 2 | 4 + 0 | | zero | | | 5 | -5 + 0 | | zero | | | 5 | -5 + 0 | | zero | | | 0 | + 0 | | zero | | | | + 0 | | zero | | | | 0 + | | null | | | 1 | -1 + | | null | | | 2 | 2 + | | null | | | 3 | -3 + | | null | | | 2 | 4 + | | null | | | 5 | -5 + | | null | | | 5 | -5 + | | null | | | 0 | + | | null | | | | + | | null | | | | 0 + | 0 | zero | | | 1 | -1 + | 0 | zero | | | 2 | 2 + | 0 | zero | | | 3 | -3 + | 0 | zero | | | 2 | 4 + | 0 | zero | | | 5 | -5 + | 0 | zero | | | 5 | -5 + | 0 | zero | | | 0 | + | 0 | zero | | | | + | 0 | zero | | | | 0 + 1 | 4 | one | | 0 | 1 | -1 + 1 | 4 | one | | 0 | 2 | 2 + 1 | 4 | one | | 0 | 3 | -3 + 1 | 4 | one | | 0 | 2 | 4 + 1 | 4 | one | | 0 | 5 | -5 + 1 | 4 | one | | 0 | 5 | -5 + 1 | 4 | one | | 0 | 0 | + 1 | 4 | one | | 0 | | + 1 | 4 | one | | 0 | | 0 + 2 | 3 | two | | 0 | 1 | -1 + 2 | 3 | two | | 0 | 2 | 2 + 2 | 3 | two | | 0 | 3 | -3 + 2 | 3 | two | | 0 | 2 | 4 + 2 | 3 | two | | 0 | 5 | -5 + 2 | 3 | two | | 0 | 5 | -5 + 2 | 3 | two | | 0 | 0 | + 2 | 3 | two | | 0 | | + 2 | 3 | two | | 0 | | 0 + 3 | 2 | three | | 0 | 1 | -1 + 3 | 2 | three | | 0 | 2 | 2 + 3 | 2 | three | | 0 | 3 | -3 + 3 | 2 | three | | 0 | 2 | 4 + 3 | 2 | three | | 0 | 5 | -5 + 3 | 2 | three | | 0 | 5 | -5 + 3 | 2 | three | | 0 | 0 | + 3 | 2 | three | | 0 | | + 3 | 2 | three | | 0 | | 0 + 4 | 1 | four | | 0 | 1 | -1 + 4 | 1 | four | | 0 | 2 | 2 + 4 | 1 | four | | 0 | 3 | -3 + 4 | 1 | four | | 0 | 2 | 4 + 4 | 1 | four | | 0 | 5 | -5 + 4 | 1 | four | | 0 | 5 | -5 + 4 | 1 | four | | 0 | 0 | + 4 | 1 | four | | 0 | | + 4 | 1 | four | | 0 | | 0 + 5 | 0 | five | | 0 | 1 | -1 + 5 | 0 | five | | 0 | 2 | 2 + 5 | 0 | five | | 0 | 3 | -3 + 5 | 0 | five | | 0 | 2 | 4 + 5 | 0 | five | | 0 | 5 | -5 + 5 | 0 | five | | 0 | 5 | -5 + 5 | 0 | five | | 0 | 0 | + 5 | 0 | five | | 0 | | + 5 | 0 | five | | 0 | | 0 + 6 | 6 | six | | 0 | 1 | -1 + 6 | 6 | six | | 0 | 2 | 2 + 6 | 6 | six | | 0 | 3 | -3 + 6 | 6 | six | | 0 | 2 | 4 + 6 | 6 | six | | 0 | 5 | -5 + 6 | 6 | six | | 0 | 5 | -5 + 6 | 6 | six | | 0 | 0 | + 6 | 6 | six | | 0 | | + 6 | 6 | six | | 0 | | 0 + 7 | 7 | seven | | 0 | 1 | -1 + 7 | 7 | seven | | 0 | 2 | 2 + 7 | 7 | seven | | 0 | 3 | -3 + 7 | 7 | seven | | 0 | 2 | 4 + 7 | 7 | seven | | 0 | 5 | -5 + 7 | 7 | seven | | 0 | 5 | -5 + 7 | 7 | seven | | 0 | 0 | + 7 | 7 | seven | | 0 | | + 7 | 7 | seven | | 0 | | 0 + 8 | 8 | eight | | 0 | 1 | -1 + 8 | 8 | eight | | 0 | 2 | 2 + 8 | 8 | eight | | 0 | 3 | -3 + 8 | 8 | eight | | 0 | 2 | 4 + 8 | 8 | eight | | 0 | 5 | -5 + 8 | 8 | eight | | 0 | 5 | -5 + 8 | 8 | eight | | 0 | 0 | + 8 | 8 | eight | | 0 | | + 8 | 8 | eight | | 0 | | 0 + 0 | | zero | | 0 | 1 | -1 + 0 | | zero | | 0 | 2 | 2 + 0 | | zero | | 0 | 3 | -3 + 0 | | zero | | 0 | 2 | 4 + 0 | | zero | | 0 | 5 | -5 + 0 | | zero | | 0 | 5 | -5 + 0 | | zero | | 0 | 0 | + 0 | | zero | | 0 | | + 0 | | zero | | 0 | | 0 + | | null | | 0 | 1 | -1 + | | null | | 0 | 2 | 2 + | | null | | 0 | 3 | -3 + | | null | | 0 | 2 | 4 + | | null | | 0 | 5 | -5 + | | null | | 0 | 5 | -5 + | | null | | 0 | 0 | + | | null | | 0 | | + | | null | | 0 | | 0 + | 0 | zero | | 0 | 1 | -1 + | 0 | zero | | 0 | 2 | 2 + | 0 | zero | | 0 | 3 | -3 + | 0 | zero | | 0 | 2 | 4 + | 0 | zero | | 0 | 5 | -5 + | 0 | zero | | 0 | 5 | -5 + | 0 | zero | | 0 | 0 | + | 0 | zero | | 0 | | + | 0 | zero | | 0 | | 0 +(891 rows) + +-- +-- +-- Inner joins (equi-joins) +-- +-- +-- +-- Inner joins (equi-joins) with USING clause +-- The USING syntax changes the shape of the resulting table +-- by including a column in the USING clause only once in the result. +-- +-- Inner equi-join on specified column +SELECT * + FROM J1_TBL INNER JOIN J2_TBL USING (i); + i | j | t | k +---+---+-------+---- + 0 | | zero | + 1 | 4 | one | -1 + 2 | 3 | two | 2 + 2 | 3 | two | 4 + 3 | 2 | three | -3 + 5 | 0 | five | -5 + 5 | 0 | five | -5 +(7 rows) + +-- Same as above, slightly different syntax +SELECT * + FROM J1_TBL JOIN J2_TBL USING (i); + i | j | t | k +---+---+-------+---- + 0 | | zero | + 1 | 4 | one | -1 + 2 | 3 | two | 2 + 2 | 3 | two | 4 + 3 | 2 | three | -3 + 5 | 0 | five | -5 + 5 | 0 | five | -5 +(7 rows) + +SELECT * + FROM J1_TBL t1 (a, b, c) JOIN J2_TBL t2 (a, d) USING (a) + ORDER BY a, d; + a | b | c | d +---+---+-------+---- + 0 | | zero | + 1 | 4 | one | -1 + 2 | 3 | two | 2 + 2 | 3 | two | 4 + 3 | 2 | three | -3 + 5 | 0 | five | -5 + 5 | 0 | five | -5 +(7 rows) + +SELECT * + FROM J1_TBL t1 (a, b, c) JOIN J2_TBL t2 (a, b) USING (b) + ORDER BY b, t1.a; + b | a | c | a +---+---+-------+--- + 0 | 5 | five | + 0 | | zero | + 2 | 3 | three | 2 + 4 | 1 | one | 2 +(4 rows) + +-- test join using aliases +SELECT * FROM J1_TBL JOIN J2_TBL USING (i) WHERE J1_TBL.t = 'one'; -- ok + i | j | t | k +---+---+-----+---- + 1 | 4 | one | -1 +(1 row) + +SELECT * FROM J1_TBL JOIN J2_TBL USING (i) AS x WHERE J1_TBL.t = 'one'; -- ok + i | j | t | k +---+---+-----+---- + 1 | 4 | one | -1 +(1 row) + +SELECT * FROM (J1_TBL JOIN J2_TBL USING (i)) AS x WHERE J1_TBL.t = 'one'; -- error +ERROR: invalid reference to FROM-clause entry for table "j1_tbl" +LINE 1: ... * FROM (J1_TBL JOIN J2_TBL USING (i)) AS x WHERE J1_TBL.t =... + ^ +DETAIL: There is an entry for table "j1_tbl", but it cannot be referenced from this part of the query. +SELECT * FROM J1_TBL JOIN J2_TBL USING (i) AS x WHERE x.i = 1; -- ok + i | j | t | k +---+---+-----+---- + 1 | 4 | one | -1 +(1 row) + +SELECT * FROM J1_TBL JOIN J2_TBL USING (i) AS x WHERE x.t = 'one'; -- error +ERROR: column x.t does not exist +LINE 1: ...CT * FROM J1_TBL JOIN J2_TBL USING (i) AS x WHERE x.t = 'one... + ^ +SELECT * FROM (J1_TBL JOIN J2_TBL USING (i) AS x) AS xx WHERE x.i = 1; -- error (XXX could use better hint) +ERROR: missing FROM-clause entry for table "x" +LINE 1: ...ROM (J1_TBL JOIN J2_TBL USING (i) AS x) AS xx WHERE x.i = 1; + ^ +SELECT * FROM J1_TBL a1 JOIN J2_TBL a2 USING (i) AS a1; -- error +ERROR: table name "a1" specified more than once +SELECT x.* FROM J1_TBL JOIN J2_TBL USING (i) AS x WHERE J1_TBL.t = 'one'; + i +--- + 1 +(1 row) + +SELECT ROW(x.*) FROM J1_TBL JOIN J2_TBL USING (i) AS x WHERE J1_TBL.t = 'one'; + row +----- + (1) +(1 row) + +SELECT row_to_json(x.*) FROM J1_TBL JOIN J2_TBL USING (i) AS x WHERE J1_TBL.t = 'one'; + row_to_json +------------- + {"i":1} +(1 row) + +-- +-- NATURAL JOIN +-- Inner equi-join on all columns with the same name +-- +SELECT * + FROM J1_TBL NATURAL JOIN J2_TBL; + i | j | t | k +---+---+-------+---- + 0 | | zero | + 1 | 4 | one | -1 + 2 | 3 | two | 2 + 2 | 3 | two | 4 + 3 | 2 | three | -3 + 5 | 0 | five | -5 + 5 | 0 | five | -5 +(7 rows) + +SELECT * + FROM J1_TBL t1 (a, b, c) NATURAL JOIN J2_TBL t2 (a, d); + a | b | c | d +---+---+-------+---- + 0 | | zero | + 1 | 4 | one | -1 + 2 | 3 | two | 2 + 2 | 3 | two | 4 + 3 | 2 | three | -3 + 5 | 0 | five | -5 + 5 | 0 | five | -5 +(7 rows) + +SELECT * + FROM J1_TBL t1 (a, b, c) NATURAL JOIN J2_TBL t2 (d, a); + a | b | c | d +---+---+------+--- + 0 | | zero | + 2 | 3 | two | 2 + 4 | 1 | four | 2 +(3 rows) + +-- mismatch number of columns +-- currently, Postgres will fill in with underlying names +SELECT * + FROM J1_TBL t1 (a, b) NATURAL JOIN J2_TBL t2 (a); + a | b | t | k +---+---+-------+---- + 0 | | zero | + 1 | 4 | one | -1 + 2 | 3 | two | 2 + 2 | 3 | two | 4 + 3 | 2 | three | -3 + 5 | 0 | five | -5 + 5 | 0 | five | -5 +(7 rows) + +-- +-- Inner joins (equi-joins) +-- +SELECT * + FROM J1_TBL JOIN J2_TBL ON (J1_TBL.i = J2_TBL.i); + i | j | t | i | k +---+---+-------+---+---- + 0 | | zero | 0 | + 1 | 4 | one | 1 | -1 + 2 | 3 | two | 2 | 2 + 2 | 3 | two | 2 | 4 + 3 | 2 | three | 3 | -3 + 5 | 0 | five | 5 | -5 + 5 | 0 | five | 5 | -5 +(7 rows) + +SELECT * + FROM J1_TBL JOIN J2_TBL ON (J1_TBL.i = J2_TBL.k); + i | j | t | i | k +---+---+------+---+--- + 0 | | zero | | 0 + 2 | 3 | two | 2 | 2 + 4 | 1 | four | 2 | 4 +(3 rows) + +-- +-- Non-equi-joins +-- +SELECT * + FROM J1_TBL JOIN J2_TBL ON (J1_TBL.i <= J2_TBL.k); + i | j | t | i | k +---+---+-------+---+--- + 1 | 4 | one | 2 | 2 + 2 | 3 | two | 2 | 2 + 0 | | zero | 2 | 2 + 1 | 4 | one | 2 | 4 + 2 | 3 | two | 2 | 4 + 3 | 2 | three | 2 | 4 + 4 | 1 | four | 2 | 4 + 0 | | zero | 2 | 4 + 0 | | zero | | 0 +(9 rows) + +-- +-- Outer joins +-- Note that OUTER is a noise word +-- +SELECT * + FROM J1_TBL LEFT OUTER JOIN J2_TBL USING (i) + ORDER BY i, k, t; + i | j | t | k +---+---+-------+---- + 0 | | zero | + 1 | 4 | one | -1 + 2 | 3 | two | 2 + 2 | 3 | two | 4 + 3 | 2 | three | -3 + 4 | 1 | four | + 5 | 0 | five | -5 + 5 | 0 | five | -5 + 6 | 6 | six | + 7 | 7 | seven | + 8 | 8 | eight | + | | null | + | 0 | zero | +(13 rows) + +SELECT * + FROM J1_TBL LEFT JOIN J2_TBL USING (i) + ORDER BY i, k, t; + i | j | t | k +---+---+-------+---- + 0 | | zero | + 1 | 4 | one | -1 + 2 | 3 | two | 2 + 2 | 3 | two | 4 + 3 | 2 | three | -3 + 4 | 1 | four | + 5 | 0 | five | -5 + 5 | 0 | five | -5 + 6 | 6 | six | + 7 | 7 | seven | + 8 | 8 | eight | + | | null | + | 0 | zero | +(13 rows) + +SELECT * + FROM J1_TBL RIGHT OUTER JOIN J2_TBL USING (i); + i | j | t | k +---+---+-------+---- + 0 | | zero | + 1 | 4 | one | -1 + 2 | 3 | two | 2 + 2 | 3 | two | 4 + 3 | 2 | three | -3 + 5 | 0 | five | -5 + 5 | 0 | five | -5 + | | | + | | | 0 +(9 rows) + +SELECT * + FROM J1_TBL RIGHT JOIN J2_TBL USING (i); + i | j | t | k +---+---+-------+---- + 0 | | zero | + 1 | 4 | one | -1 + 2 | 3 | two | 2 + 2 | 3 | two | 4 + 3 | 2 | three | -3 + 5 | 0 | five | -5 + 5 | 0 | five | -5 + | | | + | | | 0 +(9 rows) + +SELECT * + FROM J1_TBL FULL OUTER JOIN J2_TBL USING (i) + ORDER BY i, k, t; + i | j | t | k +---+---+-------+---- + 0 | | zero | + 1 | 4 | one | -1 + 2 | 3 | two | 2 + 2 | 3 | two | 4 + 3 | 2 | three | -3 + 4 | 1 | four | + 5 | 0 | five | -5 + 5 | 0 | five | -5 + 6 | 6 | six | + 7 | 7 | seven | + 8 | 8 | eight | + | | | 0 + | | null | + | 0 | zero | + | | | +(15 rows) + +SELECT * + FROM J1_TBL FULL JOIN J2_TBL USING (i) + ORDER BY i, k, t; + i | j | t | k +---+---+-------+---- + 0 | | zero | + 1 | 4 | one | -1 + 2 | 3 | two | 2 + 2 | 3 | two | 4 + 3 | 2 | three | -3 + 4 | 1 | four | + 5 | 0 | five | -5 + 5 | 0 | five | -5 + 6 | 6 | six | + 7 | 7 | seven | + 8 | 8 | eight | + | | | 0 + | | null | + | 0 | zero | + | | | +(15 rows) + +SELECT * + FROM J1_TBL LEFT JOIN J2_TBL USING (i) WHERE (k = 1); + i | j | t | k +---+---+---+--- +(0 rows) + +SELECT * + FROM J1_TBL LEFT JOIN J2_TBL USING (i) WHERE (i = 1); + i | j | t | k +---+---+-----+---- + 1 | 4 | one | -1 +(1 row) + +-- +-- semijoin selectivity for <> +-- +explain (costs off) +select * from int4_tbl i4, tenk1 a +where exists(select * from tenk1 b + where a.twothousand = b.twothousand and a.fivethous <> b.fivethous) + and i4.f1 = a.tenthous; + QUERY PLAN +---------------------------------------------- + Hash Semi Join + Hash Cond: (a.twothousand = b.twothousand) + Join Filter: (a.fivethous <> b.fivethous) + -> Hash Join + Hash Cond: (a.tenthous = i4.f1) + -> Seq Scan on tenk1 a + -> Hash + -> Seq Scan on int4_tbl i4 + -> Hash + -> Seq Scan on tenk1 b +(10 rows) + +-- +-- More complicated constructs +-- +-- +-- Multiway full join +-- +CREATE TABLE t1 (name TEXT, n INTEGER); +CREATE TABLE t2 (name TEXT, n INTEGER); +CREATE TABLE t3 (name TEXT, n INTEGER); +INSERT INTO t1 VALUES ( 'bb', 11 ); +INSERT INTO t2 VALUES ( 'bb', 12 ); +INSERT INTO t2 VALUES ( 'cc', 22 ); +INSERT INTO t2 VALUES ( 'ee', 42 ); +INSERT INTO t3 VALUES ( 'bb', 13 ); +INSERT INTO t3 VALUES ( 'cc', 23 ); +INSERT INTO t3 VALUES ( 'dd', 33 ); +SELECT * FROM t1 FULL JOIN t2 USING (name) FULL JOIN t3 USING (name); + name | n | n | n +------+----+----+---- + bb | 11 | 12 | 13 + cc | | 22 | 23 + dd | | | 33 + ee | | 42 | +(4 rows) + +-- +-- Test interactions of join syntax and subqueries +-- +-- Basic cases (we expect planner to pull up the subquery here) +SELECT * FROM +(SELECT * FROM t2) as s2 +INNER JOIN +(SELECT * FROM t3) s3 +USING (name); + name | n | n +------+----+---- + bb | 12 | 13 + cc | 22 | 23 +(2 rows) + +SELECT * FROM +(SELECT * FROM t2) as s2 +LEFT JOIN +(SELECT * FROM t3) s3 +USING (name); + name | n | n +------+----+---- + bb | 12 | 13 + cc | 22 | 23 + ee | 42 | +(3 rows) + +SELECT * FROM +(SELECT * FROM t2) as s2 +FULL JOIN +(SELECT * FROM t3) s3 +USING (name); + name | n | n +------+----+---- + bb | 12 | 13 + cc | 22 | 23 + dd | | 33 + ee | 42 | +(4 rows) + +-- Cases with non-nullable expressions in subquery results; +-- make sure these go to null as expected +SELECT * FROM +(SELECT name, n as s2_n, 2 as s2_2 FROM t2) as s2 +NATURAL INNER JOIN +(SELECT name, n as s3_n, 3 as s3_2 FROM t3) s3; + name | s2_n | s2_2 | s3_n | s3_2 +------+------+------+------+------ + bb | 12 | 2 | 13 | 3 + cc | 22 | 2 | 23 | 3 +(2 rows) + +SELECT * FROM +(SELECT name, n as s2_n, 2 as s2_2 FROM t2) as s2 +NATURAL LEFT JOIN +(SELECT name, n as s3_n, 3 as s3_2 FROM t3) s3; + name | s2_n | s2_2 | s3_n | s3_2 +------+------+------+------+------ + bb | 12 | 2 | 13 | 3 + cc | 22 | 2 | 23 | 3 + ee | 42 | 2 | | +(3 rows) + +SELECT * FROM +(SELECT name, n as s2_n, 2 as s2_2 FROM t2) as s2 +NATURAL FULL JOIN +(SELECT name, n as s3_n, 3 as s3_2 FROM t3) s3; + name | s2_n | s2_2 | s3_n | s3_2 +------+------+------+------+------ + bb | 12 | 2 | 13 | 3 + cc | 22 | 2 | 23 | 3 + dd | | | 33 | 3 + ee | 42 | 2 | | +(4 rows) + +SELECT * FROM +(SELECT name, n as s1_n, 1 as s1_1 FROM t1) as s1 +NATURAL INNER JOIN +(SELECT name, n as s2_n, 2 as s2_2 FROM t2) as s2 +NATURAL INNER JOIN +(SELECT name, n as s3_n, 3 as s3_2 FROM t3) s3; + name | s1_n | s1_1 | s2_n | s2_2 | s3_n | s3_2 +------+------+------+------+------+------+------ + bb | 11 | 1 | 12 | 2 | 13 | 3 +(1 row) + +SELECT * FROM +(SELECT name, n as s1_n, 1 as s1_1 FROM t1) as s1 +NATURAL FULL JOIN +(SELECT name, n as s2_n, 2 as s2_2 FROM t2) as s2 +NATURAL FULL JOIN +(SELECT name, n as s3_n, 3 as s3_2 FROM t3) s3; + name | s1_n | s1_1 | s2_n | s2_2 | s3_n | s3_2 +------+------+------+------+------+------+------ + bb | 11 | 1 | 12 | 2 | 13 | 3 + cc | | | 22 | 2 | 23 | 3 + dd | | | | | 33 | 3 + ee | | | 42 | 2 | | +(4 rows) + +SELECT * FROM +(SELECT name, n as s1_n FROM t1) as s1 +NATURAL FULL JOIN + (SELECT * FROM + (SELECT name, n as s2_n FROM t2) as s2 + NATURAL FULL JOIN + (SELECT name, n as s3_n FROM t3) as s3 + ) ss2; + name | s1_n | s2_n | s3_n +------+------+------+------ + bb | 11 | 12 | 13 + cc | | 22 | 23 + dd | | | 33 + ee | | 42 | +(4 rows) + +SELECT * FROM +(SELECT name, n as s1_n FROM t1) as s1 +NATURAL FULL JOIN + (SELECT * FROM + (SELECT name, n as s2_n, 2 as s2_2 FROM t2) as s2 + NATURAL FULL JOIN + (SELECT name, n as s3_n FROM t3) as s3 + ) ss2; + name | s1_n | s2_n | s2_2 | s3_n +------+------+------+------+------ + bb | 11 | 12 | 2 | 13 + cc | | 22 | 2 | 23 + dd | | | | 33 + ee | | 42 | 2 | +(4 rows) + +-- Constants as join keys can also be problematic +SELECT * FROM + (SELECT name, n as s1_n FROM t1) as s1 +FULL JOIN + (SELECT name, 2 as s2_n FROM t2) as s2 +ON (s1_n = s2_n); + name | s1_n | name | s2_n +------+------+------+------ + | | bb | 2 + | | cc | 2 + | | ee | 2 + bb | 11 | | +(4 rows) + +-- Test for propagation of nullability constraints into sub-joins +create temp table x (x1 int, x2 int); +insert into x values (1,11); +insert into x values (2,22); +insert into x values (3,null); +insert into x values (4,44); +insert into x values (5,null); +create temp table y (y1 int, y2 int); +insert into y values (1,111); +insert into y values (2,222); +insert into y values (3,333); +insert into y values (4,null); +select * from x; + x1 | x2 +----+---- + 1 | 11 + 2 | 22 + 3 | + 4 | 44 + 5 | +(5 rows) + +select * from y; + y1 | y2 +----+----- + 1 | 111 + 2 | 222 + 3 | 333 + 4 | +(4 rows) + +select * from x left join y on (x1 = y1 and x2 is not null); + x1 | x2 | y1 | y2 +----+----+----+----- + 1 | 11 | 1 | 111 + 2 | 22 | 2 | 222 + 3 | | | + 4 | 44 | 4 | + 5 | | | +(5 rows) + +select * from x left join y on (x1 = y1 and y2 is not null); + x1 | x2 | y1 | y2 +----+----+----+----- + 1 | 11 | 1 | 111 + 2 | 22 | 2 | 222 + 3 | | 3 | 333 + 4 | 44 | | + 5 | | | +(5 rows) + +select * from (x left join y on (x1 = y1)) left join x xx(xx1,xx2) +on (x1 = xx1); + x1 | x2 | y1 | y2 | xx1 | xx2 +----+----+----+-----+-----+----- + 1 | 11 | 1 | 111 | 1 | 11 + 2 | 22 | 2 | 222 | 2 | 22 + 3 | | 3 | 333 | 3 | + 4 | 44 | 4 | | 4 | 44 + 5 | | | | 5 | +(5 rows) + +select * from (x left join y on (x1 = y1)) left join x xx(xx1,xx2) +on (x1 = xx1 and x2 is not null); + x1 | x2 | y1 | y2 | xx1 | xx2 +----+----+----+-----+-----+----- + 1 | 11 | 1 | 111 | 1 | 11 + 2 | 22 | 2 | 222 | 2 | 22 + 3 | | 3 | 333 | | + 4 | 44 | 4 | | 4 | 44 + 5 | | | | | +(5 rows) + +select * from (x left join y on (x1 = y1)) left join x xx(xx1,xx2) +on (x1 = xx1 and y2 is not null); + x1 | x2 | y1 | y2 | xx1 | xx2 +----+----+----+-----+-----+----- + 1 | 11 | 1 | 111 | 1 | 11 + 2 | 22 | 2 | 222 | 2 | 22 + 3 | | 3 | 333 | 3 | + 4 | 44 | 4 | | | + 5 | | | | | +(5 rows) + +select * from (x left join y on (x1 = y1)) left join x xx(xx1,xx2) +on (x1 = xx1 and xx2 is not null); + x1 | x2 | y1 | y2 | xx1 | xx2 +----+----+----+-----+-----+----- + 1 | 11 | 1 | 111 | 1 | 11 + 2 | 22 | 2 | 222 | 2 | 22 + 3 | | 3 | 333 | | + 4 | 44 | 4 | | 4 | 44 + 5 | | | | | +(5 rows) + +-- these should NOT give the same answers as above +select * from (x left join y on (x1 = y1)) left join x xx(xx1,xx2) +on (x1 = xx1) where (x2 is not null); + x1 | x2 | y1 | y2 | xx1 | xx2 +----+----+----+-----+-----+----- + 1 | 11 | 1 | 111 | 1 | 11 + 2 | 22 | 2 | 222 | 2 | 22 + 4 | 44 | 4 | | 4 | 44 +(3 rows) + +select * from (x left join y on (x1 = y1)) left join x xx(xx1,xx2) +on (x1 = xx1) where (y2 is not null); + x1 | x2 | y1 | y2 | xx1 | xx2 +----+----+----+-----+-----+----- + 1 | 11 | 1 | 111 | 1 | 11 + 2 | 22 | 2 | 222 | 2 | 22 + 3 | | 3 | 333 | 3 | +(3 rows) + +select * from (x left join y on (x1 = y1)) left join x xx(xx1,xx2) +on (x1 = xx1) where (xx2 is not null); + x1 | x2 | y1 | y2 | xx1 | xx2 +----+----+----+-----+-----+----- + 1 | 11 | 1 | 111 | 1 | 11 + 2 | 22 | 2 | 222 | 2 | 22 + 4 | 44 | 4 | | 4 | 44 +(3 rows) + +-- +-- regression test: check for bug with propagation of implied equality +-- to outside an IN +-- +select count(*) from tenk1 a where unique1 in + (select unique1 from tenk1 b join tenk1 c using (unique1) + where b.unique2 = 42); + count +------- + 1 +(1 row) + +-- +-- regression test: check for failure to generate a plan with multiple +-- degenerate IN clauses +-- +select count(*) from tenk1 x where + x.unique1 in (select a.f1 from int4_tbl a,float8_tbl b where a.f1=b.f1) and + x.unique1 = 0 and + x.unique1 in (select aa.f1 from int4_tbl aa,float8_tbl bb where aa.f1=bb.f1); + count +------- + 1 +(1 row) + +-- try that with GEQO too +begin; +set geqo = on; +set geqo_threshold = 2; +select count(*) from tenk1 x where + x.unique1 in (select a.f1 from int4_tbl a,float8_tbl b where a.f1=b.f1) and + x.unique1 = 0 and + x.unique1 in (select aa.f1 from int4_tbl aa,float8_tbl bb where aa.f1=bb.f1); + count +------- + 1 +(1 row) + +rollback; +-- +-- regression test: be sure we cope with proven-dummy append rels +-- +explain (costs off) +select aa, bb, unique1, unique1 + from tenk1 right join b_star on aa = unique1 + where bb < bb and bb is null; + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +select aa, bb, unique1, unique1 + from tenk1 right join b_star on aa = unique1 + where bb < bb and bb is null; + aa | bb | unique1 | unique1 +----+----+---------+--------- +(0 rows) + +-- +-- regression test: check handling of empty-FROM subquery underneath outer join +-- +explain (costs off) +select * from int8_tbl i1 left join (int8_tbl i2 join + (select 123 as x) ss on i2.q1 = x) on i1.q2 = i2.q2 +order by 1, 2; + QUERY PLAN +------------------------------------------- + Sort + Sort Key: i1.q1, i1.q2 + -> Hash Left Join + Hash Cond: (i1.q2 = i2.q2) + -> Seq Scan on int8_tbl i1 + -> Hash + -> Seq Scan on int8_tbl i2 + Filter: (q1 = 123) +(8 rows) + +select * from int8_tbl i1 left join (int8_tbl i2 join + (select 123 as x) ss on i2.q1 = x) on i1.q2 = i2.q2 +order by 1, 2; + q1 | q2 | q1 | q2 | x +------------------+-------------------+-----+------------------+----- + 123 | 456 | 123 | 456 | 123 + 123 | 4567890123456789 | 123 | 4567890123456789 | 123 + 4567890123456789 | -4567890123456789 | | | + 4567890123456789 | 123 | | | + 4567890123456789 | 4567890123456789 | 123 | 4567890123456789 | 123 +(5 rows) + +-- +-- regression test: check a case where join_clause_is_movable_into() +-- used to give an imprecise result, causing an assertion failure +-- +select count(*) +from + (select t3.tenthous as x1, coalesce(t1.stringu1, t2.stringu1) as x2 + from tenk1 t1 + left join tenk1 t2 on t1.unique1 = t2.unique1 + join tenk1 t3 on t1.unique2 = t3.unique2) ss, + tenk1 t4, + tenk1 t5 +where t4.thousand = t5.unique1 and ss.x1 = t4.tenthous and ss.x2 = t5.stringu1; + count +------- + 1000 +(1 row) + +-- +-- regression test: check a case where we formerly missed including an EC +-- enforcement clause because it was expected to be handled at scan level +-- +explain (costs off) +select a.f1, b.f1, t.thousand, t.tenthous from + tenk1 t, + (select sum(f1)+1 as f1 from int4_tbl i4a) a, + (select sum(f1) as f1 from int4_tbl i4b) b +where b.f1 = t.thousand and a.f1 = b.f1 and (a.f1+b.f1+999) = t.tenthous; + QUERY PLAN +----------------------------------------------------------------------------------------------------------------- + Nested Loop + -> Nested Loop + Join Filter: ((sum(i4b.f1)) = ((sum(i4a.f1) + 1))) + -> Aggregate + -> Seq Scan on int4_tbl i4a + -> Aggregate + -> Seq Scan on int4_tbl i4b + -> Index Only Scan using tenk1_thous_tenthous on tenk1 t + Index Cond: ((thousand = (sum(i4b.f1))) AND (tenthous = ((((sum(i4a.f1) + 1)) + (sum(i4b.f1))) + 999))) +(9 rows) + +select a.f1, b.f1, t.thousand, t.tenthous from + tenk1 t, + (select sum(f1)+1 as f1 from int4_tbl i4a) a, + (select sum(f1) as f1 from int4_tbl i4b) b +where b.f1 = t.thousand and a.f1 = b.f1 and (a.f1+b.f1+999) = t.tenthous; + f1 | f1 | thousand | tenthous +----+----+----------+---------- +(0 rows) + +-- +-- checks for correct handling of quals in multiway outer joins +-- +explain (costs off) +select t1.f1 +from int4_tbl t1, int4_tbl t2 + left join int4_tbl t3 on t3.f1 > 0 + left join int4_tbl t4 on t3.f1 > 1 +where t4.f1 is null; + QUERY PLAN +------------------------------------------------------- + Nested Loop + -> Nested Loop Left Join + Filter: (t4.f1 IS NULL) + -> Seq Scan on int4_tbl t2 + -> Materialize + -> Nested Loop Left Join + Join Filter: (t3.f1 > 1) + -> Seq Scan on int4_tbl t3 + Filter: (f1 > 0) + -> Materialize + -> Seq Scan on int4_tbl t4 + -> Seq Scan on int4_tbl t1 +(12 rows) + +select t1.f1 +from int4_tbl t1, int4_tbl t2 + left join int4_tbl t3 on t3.f1 > 0 + left join int4_tbl t4 on t3.f1 > 1 +where t4.f1 is null; + f1 +---- +(0 rows) + +explain (costs off) +select * +from int4_tbl t1 left join int4_tbl t2 on true + left join int4_tbl t3 on t2.f1 > 0 + left join int4_tbl t4 on t3.f1 > 0; + QUERY PLAN +------------------------------------------------------- + Nested Loop Left Join + -> Seq Scan on int4_tbl t1 + -> Materialize + -> Nested Loop Left Join + Join Filter: (t3.f1 > 0) + -> Nested Loop Left Join + Join Filter: (t2.f1 > 0) + -> Seq Scan on int4_tbl t2 + -> Materialize + -> Seq Scan on int4_tbl t3 + -> Materialize + -> Seq Scan on int4_tbl t4 +(12 rows) + +explain (costs off) +select * from onek t1 + left join onek t2 on t1.unique1 = t2.unique1 + left join onek t3 on t2.unique1 != t3.unique1 + left join onek t4 on t3.unique1 = t4.unique1; + QUERY PLAN +---------------------------------------------------- + Nested Loop Left Join + Join Filter: (t2.unique1 <> t3.unique1) + -> Hash Left Join + Hash Cond: (t1.unique1 = t2.unique1) + -> Seq Scan on onek t1 + -> Hash + -> Seq Scan on onek t2 + -> Materialize + -> Hash Left Join + Hash Cond: (t3.unique1 = t4.unique1) + -> Seq Scan on onek t3 + -> Hash + -> Seq Scan on onek t4 +(13 rows) + +explain (costs off) +select * from int4_tbl t1 + left join (select now() from int4_tbl t2 + left join int4_tbl t3 on t2.f1 = t3.f1 + left join int4_tbl t4 on t3.f1 = t4.f1) s on true + inner join int4_tbl t5 on true; + QUERY PLAN +------------------------------------------------------------- + Nested Loop + -> Nested Loop Left Join + -> Seq Scan on int4_tbl t1 + -> Materialize + -> Hash Left Join + Hash Cond: (t3.f1 = t4.f1) + -> Hash Left Join + Hash Cond: (t2.f1 = t3.f1) + -> Seq Scan on int4_tbl t2 + -> Hash + -> Seq Scan on int4_tbl t3 + -> Hash + -> Seq Scan on int4_tbl t4 + -> Materialize + -> Seq Scan on int4_tbl t5 +(15 rows) + +explain (costs off) +select * from int4_tbl t1 + left join int4_tbl t2 on true + left join int4_tbl t3 on true + left join int4_tbl t4 on t2.f1 = t3.f1; + QUERY PLAN +------------------------------------------------- + Nested Loop Left Join + Join Filter: (t2.f1 = t3.f1) + -> Nested Loop Left Join + -> Nested Loop Left Join + -> Seq Scan on int4_tbl t1 + -> Materialize + -> Seq Scan on int4_tbl t2 + -> Materialize + -> Seq Scan on int4_tbl t3 + -> Materialize + -> Seq Scan on int4_tbl t4 +(11 rows) + +explain (costs off) +select * from int4_tbl t1 + left join int4_tbl t2 on true + left join int4_tbl t3 on t2.f1 = t3.f1 + left join int4_tbl t4 on t3.f1 != t4.f1; + QUERY PLAN +------------------------------------------------------- + Nested Loop Left Join + -> Seq Scan on int4_tbl t1 + -> Materialize + -> Nested Loop Left Join + Join Filter: (t3.f1 <> t4.f1) + -> Hash Left Join + Hash Cond: (t2.f1 = t3.f1) + -> Seq Scan on int4_tbl t2 + -> Hash + -> Seq Scan on int4_tbl t3 + -> Materialize + -> Seq Scan on int4_tbl t4 +(12 rows) + +explain (costs off) +select * from int4_tbl t1 + left join (int4_tbl t2 left join int4_tbl t3 on t2.f1 > 0) on t2.f1 > 1 + left join int4_tbl t4 on t2.f1 > 2 and t3.f1 > 3 +where t1.f1 = coalesce(t2.f1, 1); + QUERY PLAN +---------------------------------------------------- + Nested Loop Left Join + Join Filter: ((t2.f1 > 2) AND (t3.f1 > 3)) + -> Nested Loop Left Join + Join Filter: (t2.f1 > 0) + -> Nested Loop Left Join + Filter: (t1.f1 = COALESCE(t2.f1, 1)) + -> Seq Scan on int4_tbl t1 + -> Materialize + -> Seq Scan on int4_tbl t2 + Filter: (f1 > 1) + -> Seq Scan on int4_tbl t3 + -> Materialize + -> Seq Scan on int4_tbl t4 +(13 rows) + +explain (costs off) +select * from int4_tbl t1 + left join ((select t2.f1 from int4_tbl t2 + left join int4_tbl t3 on t2.f1 > 0 + where t3.f1 is null) s + left join tenk1 t4 on s.f1 > 1) + on s.f1 = t1.f1; + QUERY PLAN +------------------------------------------------- + Hash Right Join + Hash Cond: (t2.f1 = t1.f1) + -> Nested Loop Left Join + Join Filter: (t2.f1 > 1) + -> Nested Loop Left Join + Join Filter: (t2.f1 > 0) + Filter: (t3.f1 IS NULL) + -> Seq Scan on int4_tbl t2 + -> Materialize + -> Seq Scan on int4_tbl t3 + -> Seq Scan on tenk1 t4 + -> Hash + -> Seq Scan on int4_tbl t1 +(13 rows) + +explain (costs off) +select * from int4_tbl t1 + left join ((select t2.f1 from int4_tbl t2 + left join int4_tbl t3 on t2.f1 > 0 + where t2.f1 <> coalesce(t3.f1, -1)) s + left join tenk1 t4 on s.f1 > 1) + on s.f1 = t1.f1; + QUERY PLAN +----------------------------------------------------------------- + Nested Loop Left Join + Join Filter: (t2.f1 > 1) + -> Hash Right Join + Hash Cond: (t2.f1 = t1.f1) + -> Nested Loop Left Join + Join Filter: (t2.f1 > 0) + Filter: (t2.f1 <> COALESCE(t3.f1, '-1'::integer)) + -> Seq Scan on int4_tbl t2 + -> Materialize + -> Seq Scan on int4_tbl t3 + -> Hash + -> Seq Scan on int4_tbl t1 + -> Materialize + -> Seq Scan on tenk1 t4 +(14 rows) + +explain (costs off) +select * from onek t1 + left join onek t2 on t1.unique1 = t2.unique1 + left join onek t3 on t2.unique1 = t3.unique1 + left join onek t4 on t3.unique1 = t4.unique1 and t2.unique2 = t4.unique2; + QUERY PLAN +------------------------------------------------------------------------ + Hash Left Join + Hash Cond: ((t3.unique1 = t4.unique1) AND (t2.unique2 = t4.unique2)) + -> Hash Left Join + Hash Cond: (t2.unique1 = t3.unique1) + -> Hash Left Join + Hash Cond: (t1.unique1 = t2.unique1) + -> Seq Scan on onek t1 + -> Hash + -> Seq Scan on onek t2 + -> Hash + -> Seq Scan on onek t3 + -> Hash + -> Seq Scan on onek t4 +(13 rows) + +explain (costs off) +select * from int8_tbl t1 left join + (int8_tbl t2 left join int8_tbl t3 full join int8_tbl t4 on false on false) + left join int8_tbl t5 on t2.q1 = t5.q1 +on t2.q2 = 123; + QUERY PLAN +-------------------------------------------------- + Nested Loop Left Join + -> Seq Scan on int8_tbl t1 + -> Materialize + -> Nested Loop Left Join + Join Filter: (t2.q1 = t5.q1) + -> Nested Loop Left Join + Join Filter: false + -> Seq Scan on int8_tbl t2 + Filter: (q2 = 123) + -> Result + One-Time Filter: false + -> Seq Scan on int8_tbl t5 +(12 rows) + +explain (costs off) +select * from int8_tbl t1 + left join int8_tbl t2 on true + left join lateral + (select * from int8_tbl t3 where t3.q1 = t2.q1 offset 0) s + on t2.q1 = 1; + QUERY PLAN +------------------------------------------- + Nested Loop Left Join + -> Seq Scan on int8_tbl t1 + -> Materialize + -> Nested Loop Left Join + Join Filter: (t2.q1 = 1) + -> Seq Scan on int8_tbl t2 + -> Seq Scan on int8_tbl t3 + Filter: (q1 = t2.q1) +(8 rows) + +explain (costs off) +select * from int8_tbl t1 + left join int8_tbl t2 on true + left join lateral + (select * from generate_series(t2.q1, 100)) s + on t2.q1 = 1; + QUERY PLAN +---------------------------------------------------- + Nested Loop Left Join + -> Seq Scan on int8_tbl t1 + -> Materialize + -> Nested Loop Left Join + Join Filter: (t2.q1 = 1) + -> Seq Scan on int8_tbl t2 + -> Function Scan on generate_series +(7 rows) + +explain (costs off) +select * from int8_tbl t1 + left join int8_tbl t2 on true + left join lateral + (select t2.q1 from int8_tbl t3) s + on t2.q1 = 1; + QUERY PLAN +------------------------------------------- + Nested Loop Left Join + -> Seq Scan on int8_tbl t1 + -> Materialize + -> Nested Loop Left Join + Join Filter: (t2.q1 = 1) + -> Seq Scan on int8_tbl t2 + -> Seq Scan on int8_tbl t3 +(7 rows) + +explain (costs off) +select * from onek t1 + left join onek t2 on true + left join lateral + (select * from onek t3 where t3.two = t2.two offset 0) s + on t2.unique1 = 1; + QUERY PLAN +-------------------------------------------------- + Nested Loop Left Join + -> Seq Scan on onek t1 + -> Materialize + -> Nested Loop Left Join + Join Filter: (t2.unique1 = 1) + -> Seq Scan on onek t2 + -> Memoize + Cache Key: t2.two + Cache Mode: binary + -> Seq Scan on onek t3 + Filter: (two = t2.two) +(11 rows) + +-- +-- check a case where we formerly got confused by conflicting sort orders +-- in redundant merge join path keys +-- +explain (costs off) +select * from + j1_tbl full join + (select * from j2_tbl order by j2_tbl.i desc, j2_tbl.k asc) j2_tbl + on j1_tbl.i = j2_tbl.i and j1_tbl.i = j2_tbl.k; + QUERY PLAN +----------------------------------------------------------------- + Merge Full Join + Merge Cond: ((j2_tbl.i = j1_tbl.i) AND (j2_tbl.k = j1_tbl.i)) + -> Sort + Sort Key: j2_tbl.i DESC, j2_tbl.k + -> Seq Scan on j2_tbl + -> Sort + Sort Key: j1_tbl.i DESC + -> Seq Scan on j1_tbl +(8 rows) + +select * from + j1_tbl full join + (select * from j2_tbl order by j2_tbl.i desc, j2_tbl.k asc) j2_tbl + on j1_tbl.i = j2_tbl.i and j1_tbl.i = j2_tbl.k; + i | j | t | i | k +---+---+-------+---+---- + | | | | 0 + | | | | + | 0 | zero | | + | | null | | + 8 | 8 | eight | | + 7 | 7 | seven | | + 6 | 6 | six | | + | | | 5 | -5 + | | | 5 | -5 + 5 | 0 | five | | + 4 | 1 | four | | + | | | 3 | -3 + 3 | 2 | three | | + 2 | 3 | two | 2 | 2 + | | | 2 | 4 + | | | 1 | -1 + | | | 0 | + 1 | 4 | one | | + 0 | | zero | | +(19 rows) + +-- +-- a different check for handling of redundant sort keys in merge joins +-- +explain (costs off) +select count(*) from + (select * from tenk1 x order by x.thousand, x.twothousand, x.fivethous) x + left join + (select * from tenk1 y order by y.unique2) y + on x.thousand = y.unique2 and x.twothousand = y.hundred and x.fivethous = y.unique2; + QUERY PLAN +---------------------------------------------------------------------------------- + Aggregate + -> Merge Left Join + Merge Cond: (x.thousand = y.unique2) + Join Filter: ((x.twothousand = y.hundred) AND (x.fivethous = y.unique2)) + -> Sort + Sort Key: x.thousand, x.twothousand, x.fivethous + -> Seq Scan on tenk1 x + -> Materialize + -> Index Scan using tenk1_unique2 on tenk1 y +(9 rows) + +select count(*) from + (select * from tenk1 x order by x.thousand, x.twothousand, x.fivethous) x + left join + (select * from tenk1 y order by y.unique2) y + on x.thousand = y.unique2 and x.twothousand = y.hundred and x.fivethous = y.unique2; + count +------- + 10000 +(1 row) + +set enable_hashjoin = 0; +set enable_nestloop = 0; +set enable_hashagg = 0; +-- +-- Check that we use the pathkeys from a prefix of the group by / order by +-- clause for the join pathkeys when that prefix covers all join quals. We +-- expect this to lead to an incremental sort for the group by / order by. +-- +explain (costs off) +select x.thousand, x.twothousand, count(*) +from tenk1 x inner join tenk1 y on x.thousand = y.thousand +group by x.thousand, x.twothousand +order by x.thousand desc, x.twothousand; + QUERY PLAN +---------------------------------------------------------------------------------- + GroupAggregate + Group Key: x.thousand, x.twothousand + -> Incremental Sort + Sort Key: x.thousand DESC, x.twothousand + Presorted Key: x.thousand + -> Merge Join + Merge Cond: (y.thousand = x.thousand) + -> Index Only Scan Backward using tenk1_thous_tenthous on tenk1 y + -> Sort + Sort Key: x.thousand DESC + -> Seq Scan on tenk1 x +(11 rows) + +reset enable_hashagg; +reset enable_nestloop; +reset enable_hashjoin; +-- +-- Clean up +-- +DROP TABLE t1; +DROP TABLE t2; +DROP TABLE t3; +DROP TABLE J1_TBL; +DROP TABLE J2_TBL; +-- Both DELETE and UPDATE allow the specification of additional tables +-- to "join" against to determine which rows should be modified. +CREATE TEMP TABLE t1 (a int, b int); +CREATE TEMP TABLE t2 (a int, b int); +CREATE TEMP TABLE t3 (x int, y int); +INSERT INTO t1 VALUES (5, 10); +INSERT INTO t1 VALUES (15, 20); +INSERT INTO t1 VALUES (100, 100); +INSERT INTO t1 VALUES (200, 1000); +INSERT INTO t2 VALUES (200, 2000); +INSERT INTO t3 VALUES (5, 20); +INSERT INTO t3 VALUES (6, 7); +INSERT INTO t3 VALUES (7, 8); +INSERT INTO t3 VALUES (500, 100); +DELETE FROM t3 USING t1 table1 WHERE t3.x = table1.a; +SELECT * FROM t3; + x | y +-----+----- + 6 | 7 + 7 | 8 + 500 | 100 +(3 rows) + +DELETE FROM t3 USING t1 JOIN t2 USING (a) WHERE t3.x > t1.a; +SELECT * FROM t3; + x | y +---+--- + 6 | 7 + 7 | 8 +(2 rows) + +DELETE FROM t3 USING t3 t3_other WHERE t3.x = t3_other.x AND t3.y = t3_other.y; +SELECT * FROM t3; + x | y +---+--- +(0 rows) + +-- Test join against inheritance tree +create temp table t2a () inherits (t2); +insert into t2a values (200, 2001); +select * from t1 left join t2 on (t1.a = t2.a); + a | b | a | b +-----+------+-----+------ + 5 | 10 | | + 15 | 20 | | + 100 | 100 | | + 200 | 1000 | 200 | 2000 + 200 | 1000 | 200 | 2001 +(5 rows) + +-- Test matching of column name with wrong alias +select t1.x from t1 join t3 on (t1.a = t3.x); +ERROR: column t1.x does not exist +LINE 1: select t1.x from t1 join t3 on (t1.a = t3.x); + ^ +HINT: Perhaps you meant to reference the column "t3.x". +-- Test matching of locking clause with wrong alias +select t1.*, t2.*, unnamed_join.* from + t1 join t2 on (t1.a = t2.a), t3 as unnamed_join + for update of unnamed_join; + a | b | a | b | x | y +---+---+---+---+---+--- +(0 rows) + +select foo.*, unnamed_join.* from + t1 join t2 using (a) as foo, t3 as unnamed_join + for update of unnamed_join; + a | x | y +---+---+--- +(0 rows) + +select foo.*, unnamed_join.* from + t1 join t2 using (a) as foo, t3 as unnamed_join + for update of foo; +ERROR: FOR UPDATE cannot be applied to a join +LINE 3: for update of foo; + ^ +select bar.*, unnamed_join.* from + (t1 join t2 using (a) as foo) as bar, t3 as unnamed_join + for update of foo; +ERROR: relation "foo" in FOR UPDATE clause not found in FROM clause +LINE 3: for update of foo; + ^ +select bar.*, unnamed_join.* from + (t1 join t2 using (a) as foo) as bar, t3 as unnamed_join + for update of bar; +ERROR: FOR UPDATE cannot be applied to a join +LINE 3: for update of bar; + ^ +-- +-- regression test for 8.1 merge right join bug +-- +CREATE TEMP TABLE tt1 ( tt1_id int4, joincol int4 ); +INSERT INTO tt1 VALUES (1, 11); +INSERT INTO tt1 VALUES (2, NULL); +CREATE TEMP TABLE tt2 ( tt2_id int4, joincol int4 ); +INSERT INTO tt2 VALUES (21, 11); +INSERT INTO tt2 VALUES (22, 11); +set enable_hashjoin to off; +set enable_nestloop to off; +-- these should give the same results +select tt1.*, tt2.* from tt1 left join tt2 on tt1.joincol = tt2.joincol; + tt1_id | joincol | tt2_id | joincol +--------+---------+--------+--------- + 1 | 11 | 21 | 11 + 1 | 11 | 22 | 11 + 2 | | | +(3 rows) + +select tt1.*, tt2.* from tt2 right join tt1 on tt1.joincol = tt2.joincol; + tt1_id | joincol | tt2_id | joincol +--------+---------+--------+--------- + 1 | 11 | 21 | 11 + 1 | 11 | 22 | 11 + 2 | | | +(3 rows) + +reset enable_hashjoin; +reset enable_nestloop; +-- +-- regression test for bug #13908 (hash join with skew tuples & nbatch increase) +-- +set work_mem to '64kB'; +set enable_mergejoin to off; +set enable_memoize to off; +explain (costs off) +select count(*) from tenk1 a, tenk1 b + where a.hundred = b.thousand and (b.fivethous % 10) < 10; + QUERY PLAN +------------------------------------------------------------ + Aggregate + -> Hash Join + Hash Cond: (a.hundred = b.thousand) + -> Index Only Scan using tenk1_hundred on tenk1 a + -> Hash + -> Seq Scan on tenk1 b + Filter: ((fivethous % 10) < 10) +(7 rows) + +select count(*) from tenk1 a, tenk1 b + where a.hundred = b.thousand and (b.fivethous % 10) < 10; + count +-------- + 100000 +(1 row) + +reset work_mem; +reset enable_mergejoin; +reset enable_memoize; +-- +-- regression test for 8.2 bug with improper re-ordering of left joins +-- +create temp table tt3(f1 int, f2 text); +insert into tt3 select x, repeat('xyzzy', 100) from generate_series(1,10000) x; +analyze tt3; +create temp table tt4(f1 int); +insert into tt4 values (0),(1),(9999); +analyze tt4; +set enable_nestloop to off; +EXPLAIN (COSTS OFF) +SELECT a.f1 +FROM tt4 a +LEFT JOIN ( + SELECT b.f1 + FROM tt3 b LEFT JOIN tt3 c ON (b.f1 = c.f1) + WHERE COALESCE(c.f1, 0) = 0 +) AS d ON (a.f1 = d.f1) +WHERE COALESCE(d.f1, 0) = 0 +ORDER BY 1; + QUERY PLAN +----------------------------------------------- + Sort + Sort Key: a.f1 + -> Hash Right Join + Hash Cond: (b.f1 = a.f1) + Filter: (COALESCE(b.f1, 0) = 0) + -> Hash Left Join + Hash Cond: (b.f1 = c.f1) + Filter: (COALESCE(c.f1, 0) = 0) + -> Seq Scan on tt3 b + -> Hash + -> Seq Scan on tt3 c + -> Hash + -> Seq Scan on tt4 a +(13 rows) + +SELECT a.f1 +FROM tt4 a +LEFT JOIN ( + SELECT b.f1 + FROM tt3 b LEFT JOIN tt3 c ON (b.f1 = c.f1) + WHERE COALESCE(c.f1, 0) = 0 +) AS d ON (a.f1 = d.f1) +WHERE COALESCE(d.f1, 0) = 0 +ORDER BY 1; + f1 +------ + 0 + 1 + 9999 +(3 rows) + +reset enable_nestloop; +-- +-- basic semijoin and antijoin recognition tests +-- +explain (costs off) +select a.* from tenk1 a +where unique1 in (select unique2 from tenk1 b); + QUERY PLAN +------------------------------------------------------------ + Hash Semi Join + Hash Cond: (a.unique1 = b.unique2) + -> Seq Scan on tenk1 a + -> Hash + -> Index Only Scan using tenk1_unique2 on tenk1 b +(5 rows) + +-- sadly, this is not an antijoin +explain (costs off) +select a.* from tenk1 a +where unique1 not in (select unique2 from tenk1 b); + QUERY PLAN +-------------------------------------------------------- + Seq Scan on tenk1 a + Filter: (NOT (hashed SubPlan 1)) + SubPlan 1 + -> Index Only Scan using tenk1_unique2 on tenk1 b +(4 rows) + +explain (costs off) +select a.* from tenk1 a +where exists (select 1 from tenk1 b where a.unique1 = b.unique2); + QUERY PLAN +------------------------------------------------------------ + Hash Semi Join + Hash Cond: (a.unique1 = b.unique2) + -> Seq Scan on tenk1 a + -> Hash + -> Index Only Scan using tenk1_unique2 on tenk1 b +(5 rows) + +explain (costs off) +select a.* from tenk1 a +where not exists (select 1 from tenk1 b where a.unique1 = b.unique2); + QUERY PLAN +------------------------------------------------------------ + Hash Anti Join + Hash Cond: (a.unique1 = b.unique2) + -> Seq Scan on tenk1 a + -> Hash + -> Index Only Scan using tenk1_unique2 on tenk1 b +(5 rows) + +explain (costs off) +select a.* from tenk1 a left join tenk1 b on a.unique1 = b.unique2 +where b.unique2 is null; + QUERY PLAN +------------------------------------------------------------ + Hash Anti Join + Hash Cond: (a.unique1 = b.unique2) + -> Seq Scan on tenk1 a + -> Hash + -> Index Only Scan using tenk1_unique2 on tenk1 b +(5 rows) + +-- +-- regression test for proper handling of outer joins within antijoins +-- +create temp table tt4x(c1 int, c2 int, c3 int); +explain (costs off) +select * from tt4x t1 +where not exists ( + select 1 from tt4x t2 + left join tt4x t3 on t2.c3 = t3.c1 + left join ( select t5.c1 as c1 + from tt4x t4 left join tt4x t5 on t4.c2 = t5.c1 + ) a1 on t3.c2 = a1.c1 + where t1.c1 = t2.c2 +); + QUERY PLAN +--------------------------------------------------------- + Hash Anti Join + Hash Cond: (t1.c1 = t2.c2) + -> Seq Scan on tt4x t1 + -> Hash + -> Merge Right Join + Merge Cond: (t5.c1 = t3.c2) + -> Merge Join + Merge Cond: (t4.c2 = t5.c1) + -> Sort + Sort Key: t4.c2 + -> Seq Scan on tt4x t4 + -> Sort + Sort Key: t5.c1 + -> Seq Scan on tt4x t5 + -> Sort + Sort Key: t3.c2 + -> Merge Left Join + Merge Cond: (t2.c3 = t3.c1) + -> Sort + Sort Key: t2.c3 + -> Seq Scan on tt4x t2 + -> Sort + Sort Key: t3.c1 + -> Seq Scan on tt4x t3 +(24 rows) + +-- +-- regression test for problems of the sort depicted in bug #3494 +-- +create temp table tt5(f1 int, f2 int); +create temp table tt6(f1 int, f2 int); +insert into tt5 values(1, 10); +insert into tt5 values(1, 11); +insert into tt6 values(1, 9); +insert into tt6 values(1, 2); +insert into tt6 values(2, 9); +select * from tt5,tt6 where tt5.f1 = tt6.f1 and tt5.f1 = tt5.f2 - tt6.f2; + f1 | f2 | f1 | f2 +----+----+----+---- + 1 | 10 | 1 | 9 +(1 row) + +-- +-- regression test for problems of the sort depicted in bug #3588 +-- +create temp table xx (pkxx int); +create temp table yy (pkyy int, pkxx int); +insert into xx values (1); +insert into xx values (2); +insert into xx values (3); +insert into yy values (101, 1); +insert into yy values (201, 2); +insert into yy values (301, NULL); +select yy.pkyy as yy_pkyy, yy.pkxx as yy_pkxx, yya.pkyy as yya_pkyy, + xxa.pkxx as xxa_pkxx, xxb.pkxx as xxb_pkxx +from yy + left join (SELECT * FROM yy where pkyy = 101) as yya ON yy.pkyy = yya.pkyy + left join xx xxa on yya.pkxx = xxa.pkxx + left join xx xxb on coalesce (xxa.pkxx, 1) = xxb.pkxx; + yy_pkyy | yy_pkxx | yya_pkyy | xxa_pkxx | xxb_pkxx +---------+---------+----------+----------+---------- + 101 | 1 | 101 | 1 | 1 + 201 | 2 | | | 1 + 301 | | | | 1 +(3 rows) + +-- +-- regression test for improper pushing of constants across outer-join clauses +-- (as seen in early 8.2.x releases) +-- +create temp table zt1 (f1 int primary key); +create temp table zt2 (f2 int primary key); +create temp table zt3 (f3 int primary key); +insert into zt1 values(53); +insert into zt2 values(53); +select * from + zt2 left join zt3 on (f2 = f3) + left join zt1 on (f3 = f1) +where f2 = 53; + f2 | f3 | f1 +----+----+---- + 53 | | +(1 row) + +create temp view zv1 as select *,'dummy'::text AS junk from zt1; +select * from + zt2 left join zt3 on (f2 = f3) + left join zv1 on (f3 = f1) +where f2 = 53; + f2 | f3 | f1 | junk +----+----+----+------ + 53 | | | +(1 row) + +-- +-- regression test for improper extraction of OR indexqual conditions +-- (as seen in early 8.3.x releases) +-- +select a.unique2, a.ten, b.tenthous, b.unique2, b.hundred +from tenk1 a left join tenk1 b on a.unique2 = b.tenthous +where a.unique1 = 42 and + ((b.unique2 is null and a.ten = 2) or b.hundred = 3); + unique2 | ten | tenthous | unique2 | hundred +---------+-----+----------+---------+--------- +(0 rows) + +-- +-- test proper positioning of one-time quals in EXISTS (8.4devel bug) +-- +prepare foo(bool) as + select count(*) from tenk1 a left join tenk1 b + on (a.unique2 = b.unique1 and exists + (select 1 from tenk1 c where c.thousand = b.unique2 and $1)); +execute foo(true); + count +------- + 10000 +(1 row) + +execute foo(false); + count +------- + 10000 +(1 row) + +-- +-- test for sane behavior with noncanonical merge clauses, per bug #4926 +-- +begin; +set enable_mergejoin = 1; +set enable_hashjoin = 0; +set enable_nestloop = 0; +create temp table a (i integer); +create temp table b (x integer, y integer); +select * from a left join b on i = x and i = y and x = i; + i | x | y +---+---+--- +(0 rows) + +rollback; +-- +-- test handling of merge clauses using record_ops +-- +begin; +create type mycomptype as (id int, v bigint); +create temp table tidv (idv mycomptype); +create index on tidv (idv); +explain (costs off) +select a.idv, b.idv from tidv a, tidv b where a.idv = b.idv; + QUERY PLAN +---------------------------------------------------------- + Merge Join + Merge Cond: (a.idv = b.idv) + -> Index Only Scan using tidv_idv_idx on tidv a + -> Materialize + -> Index Only Scan using tidv_idv_idx on tidv b +(5 rows) + +set enable_mergejoin = 0; +set enable_hashjoin = 0; +explain (costs off) +select a.idv, b.idv from tidv a, tidv b where a.idv = b.idv; + QUERY PLAN +---------------------------------------------------- + Nested Loop + -> Seq Scan on tidv a + -> Index Only Scan using tidv_idv_idx on tidv b + Index Cond: (idv = a.idv) +(4 rows) + +rollback; +-- +-- test NULL behavior of whole-row Vars, per bug #5025 +-- +select t1.q2, count(t2.*) +from int8_tbl t1 left join int8_tbl t2 on (t1.q2 = t2.q1) +group by t1.q2 order by 1; + q2 | count +-------------------+------- + -4567890123456789 | 0 + 123 | 2 + 456 | 0 + 4567890123456789 | 6 +(4 rows) + +select t1.q2, count(t2.*) +from int8_tbl t1 left join (select * from int8_tbl) t2 on (t1.q2 = t2.q1) +group by t1.q2 order by 1; + q2 | count +-------------------+------- + -4567890123456789 | 0 + 123 | 2 + 456 | 0 + 4567890123456789 | 6 +(4 rows) + +select t1.q2, count(t2.*) +from int8_tbl t1 left join (select * from int8_tbl offset 0) t2 on (t1.q2 = t2.q1) +group by t1.q2 order by 1; + q2 | count +-------------------+------- + -4567890123456789 | 0 + 123 | 2 + 456 | 0 + 4567890123456789 | 6 +(4 rows) + +select t1.q2, count(t2.*) +from int8_tbl t1 left join + (select q1, case when q2=1 then 1 else q2 end as q2 from int8_tbl) t2 + on (t1.q2 = t2.q1) +group by t1.q2 order by 1; + q2 | count +-------------------+------- + -4567890123456789 | 0 + 123 | 2 + 456 | 0 + 4567890123456789 | 6 +(4 rows) + +-- +-- test incorrect failure to NULL pulled-up subexpressions +-- +begin; +create temp table a ( + code char not null, + constraint a_pk primary key (code) +); +create temp table b ( + a char not null, + num integer not null, + constraint b_pk primary key (a, num) +); +create temp table c ( + name char not null, + a char, + constraint c_pk primary key (name) +); +insert into a (code) values ('p'); +insert into a (code) values ('q'); +insert into b (a, num) values ('p', 1); +insert into b (a, num) values ('p', 2); +insert into c (name, a) values ('A', 'p'); +insert into c (name, a) values ('B', 'q'); +insert into c (name, a) values ('C', null); +select c.name, ss.code, ss.b_cnt, ss.const +from c left join + (select a.code, coalesce(b_grp.cnt, 0) as b_cnt, -1 as const + from a left join + (select count(1) as cnt, b.a from b group by b.a) as b_grp + on a.code = b_grp.a + ) as ss + on (c.a = ss.code) +order by c.name; + name | code | b_cnt | const +------+------+-------+------- + A | p | 2 | -1 + B | q | 0 | -1 + C | | | +(3 rows) + +rollback; +-- +-- test incorrect handling of placeholders that only appear in targetlists, +-- per bug #6154 +-- +SELECT * FROM +( SELECT 1 as key1 ) sub1 +LEFT JOIN +( SELECT sub3.key3, sub4.value2, COALESCE(sub4.value2, 66) as value3 FROM + ( SELECT 1 as key3 ) sub3 + LEFT JOIN + ( SELECT sub5.key5, COALESCE(sub6.value1, 1) as value2 FROM + ( SELECT 1 as key5 ) sub5 + LEFT JOIN + ( SELECT 2 as key6, 42 as value1 ) sub6 + ON sub5.key5 = sub6.key6 + ) sub4 + ON sub4.key5 = sub3.key3 +) sub2 +ON sub1.key1 = sub2.key3; + key1 | key3 | value2 | value3 +------+------+--------+-------- + 1 | 1 | 1 | 1 +(1 row) + +-- test the path using join aliases, too +SELECT * FROM +( SELECT 1 as key1 ) sub1 +LEFT JOIN +( SELECT sub3.key3, value2, COALESCE(value2, 66) as value3 FROM + ( SELECT 1 as key3 ) sub3 + LEFT JOIN + ( SELECT sub5.key5, COALESCE(sub6.value1, 1) as value2 FROM + ( SELECT 1 as key5 ) sub5 + LEFT JOIN + ( SELECT 2 as key6, 42 as value1 ) sub6 + ON sub5.key5 = sub6.key6 + ) sub4 + ON sub4.key5 = sub3.key3 +) sub2 +ON sub1.key1 = sub2.key3; + key1 | key3 | value2 | value3 +------+------+--------+-------- + 1 | 1 | 1 | 1 +(1 row) + +-- +-- test case where a PlaceHolderVar is used as a nestloop parameter +-- +EXPLAIN (COSTS OFF) +SELECT qq, unique1 + FROM + ( SELECT COALESCE(q1, 0) AS qq FROM int8_tbl a ) AS ss1 + FULL OUTER JOIN + ( SELECT COALESCE(q2, -1) AS qq FROM int8_tbl b ) AS ss2 + USING (qq) + INNER JOIN tenk1 c ON qq = unique2; + QUERY PLAN +--------------------------------------------------------------------------------------------------------- + Nested Loop + -> Hash Full Join + Hash Cond: ((COALESCE(a.q1, '0'::bigint)) = (COALESCE(b.q2, '-1'::bigint))) + -> Seq Scan on int8_tbl a + -> Hash + -> Seq Scan on int8_tbl b + -> Index Scan using tenk1_unique2 on tenk1 c + Index Cond: (unique2 = COALESCE((COALESCE(a.q1, '0'::bigint)), (COALESCE(b.q2, '-1'::bigint)))) +(8 rows) + +SELECT qq, unique1 + FROM + ( SELECT COALESCE(q1, 0) AS qq FROM int8_tbl a ) AS ss1 + FULL OUTER JOIN + ( SELECT COALESCE(q2, -1) AS qq FROM int8_tbl b ) AS ss2 + USING (qq) + INNER JOIN tenk1 c ON qq = unique2; + qq | unique1 +-----+--------- + 123 | 4596 + 123 | 4596 + 456 | 7318 +(3 rows) + +-- +-- nested nestloops can require nested PlaceHolderVars +-- +create temp table nt1 ( + id int primary key, + a1 boolean, + a2 boolean +); +create temp table nt2 ( + id int primary key, + nt1_id int, + b1 boolean, + b2 boolean, + foreign key (nt1_id) references nt1(id) +); +create temp table nt3 ( + id int primary key, + nt2_id int, + c1 boolean, + foreign key (nt2_id) references nt2(id) +); +insert into nt1 values (1,true,true); +insert into nt1 values (2,true,false); +insert into nt1 values (3,false,false); +insert into nt2 values (1,1,true,true); +insert into nt2 values (2,2,true,false); +insert into nt2 values (3,3,false,false); +insert into nt3 values (1,1,true); +insert into nt3 values (2,2,false); +insert into nt3 values (3,3,true); +explain (costs off) +select nt3.id +from nt3 as nt3 + left join + (select nt2.*, (nt2.b1 and ss1.a3) AS b3 + from nt2 as nt2 + left join + (select nt1.*, (nt1.id is not null) as a3 from nt1) as ss1 + on ss1.id = nt2.nt1_id + ) as ss2 + on ss2.id = nt3.nt2_id +where nt3.id = 1 and ss2.b3; + QUERY PLAN +----------------------------------------------- + Nested Loop + -> Nested Loop + -> Index Scan using nt3_pkey on nt3 + Index Cond: (id = 1) + -> Index Scan using nt2_pkey on nt2 + Index Cond: (id = nt3.nt2_id) + -> Index Only Scan using nt1_pkey on nt1 + Index Cond: (id = nt2.nt1_id) + Filter: (nt2.b1 AND (id IS NOT NULL)) +(9 rows) + +select nt3.id +from nt3 as nt3 + left join + (select nt2.*, (nt2.b1 and ss1.a3) AS b3 + from nt2 as nt2 + left join + (select nt1.*, (nt1.id is not null) as a3 from nt1) as ss1 + on ss1.id = nt2.nt1_id + ) as ss2 + on ss2.id = nt3.nt2_id +where nt3.id = 1 and ss2.b3; + id +---- + 1 +(1 row) + +-- +-- test case where a PlaceHolderVar is propagated into a subquery +-- +explain (costs off) +select * from + int8_tbl t1 left join + (select q1 as x, 42 as y from int8_tbl t2) ss + on t1.q2 = ss.x +where + 1 = (select 1 from int8_tbl t3 where ss.y is not null limit 1) +order by 1,2; + QUERY PLAN +----------------------------------------------------------- + Sort + Sort Key: t1.q1, t1.q2 + -> Hash Left Join + Hash Cond: (t1.q2 = t2.q1) + Filter: (1 = (SubPlan 1)) + -> Seq Scan on int8_tbl t1 + -> Hash + -> Seq Scan on int8_tbl t2 + SubPlan 1 + -> Limit + -> Result + One-Time Filter: ((42) IS NOT NULL) + -> Seq Scan on int8_tbl t3 +(13 rows) + +select * from + int8_tbl t1 left join + (select q1 as x, 42 as y from int8_tbl t2) ss + on t1.q2 = ss.x +where + 1 = (select 1 from int8_tbl t3 where ss.y is not null limit 1) +order by 1,2; + q1 | q2 | x | y +------------------+------------------+------------------+---- + 123 | 4567890123456789 | 4567890123456789 | 42 + 123 | 4567890123456789 | 4567890123456789 | 42 + 123 | 4567890123456789 | 4567890123456789 | 42 + 4567890123456789 | 123 | 123 | 42 + 4567890123456789 | 123 | 123 | 42 + 4567890123456789 | 4567890123456789 | 4567890123456789 | 42 + 4567890123456789 | 4567890123456789 | 4567890123456789 | 42 + 4567890123456789 | 4567890123456789 | 4567890123456789 | 42 +(8 rows) + +-- +-- variant where a PlaceHolderVar is needed at a join, but not above the join +-- +explain (costs off) +select * from + int4_tbl as i41, + lateral + (select 1 as x from + (select i41.f1 as lat, + i42.f1 as loc from + int8_tbl as i81, int4_tbl as i42) as ss1 + right join int4_tbl as i43 on (i43.f1 > 1) + where ss1.loc = ss1.lat) as ss2 +where i41.f1 > 0; + QUERY PLAN +-------------------------------------------------- + Nested Loop + -> Nested Loop + -> Seq Scan on int4_tbl i41 + Filter: (f1 > 0) + -> Nested Loop + Join Filter: (i42.f1 = i41.f1) + -> Seq Scan on int8_tbl i81 + -> Materialize + -> Seq Scan on int4_tbl i42 + -> Materialize + -> Seq Scan on int4_tbl i43 + Filter: (f1 > 1) +(12 rows) + +select * from + int4_tbl as i41, + lateral + (select 1 as x from + (select i41.f1 as lat, + i42.f1 as loc from + int8_tbl as i81, int4_tbl as i42) as ss1 + right join int4_tbl as i43 on (i43.f1 > 1) + where ss1.loc = ss1.lat) as ss2 +where i41.f1 > 0; + f1 | x +------------+--- + 123456 | 1 + 123456 | 1 + 123456 | 1 + 123456 | 1 + 123456 | 1 + 123456 | 1 + 123456 | 1 + 123456 | 1 + 123456 | 1 + 123456 | 1 + 2147483647 | 1 + 2147483647 | 1 + 2147483647 | 1 + 2147483647 | 1 + 2147483647 | 1 + 2147483647 | 1 + 2147483647 | 1 + 2147483647 | 1 + 2147483647 | 1 + 2147483647 | 1 +(20 rows) + +-- +-- test the corner cases FULL JOIN ON TRUE and FULL JOIN ON FALSE +-- +select * from int4_tbl a full join int4_tbl b on true; + f1 | f1 +-------------+------------- + 0 | 0 + 0 | 123456 + 0 | -123456 + 0 | 2147483647 + 0 | -2147483647 + 123456 | 0 + 123456 | 123456 + 123456 | -123456 + 123456 | 2147483647 + 123456 | -2147483647 + -123456 | 0 + -123456 | 123456 + -123456 | -123456 + -123456 | 2147483647 + -123456 | -2147483647 + 2147483647 | 0 + 2147483647 | 123456 + 2147483647 | -123456 + 2147483647 | 2147483647 + 2147483647 | -2147483647 + -2147483647 | 0 + -2147483647 | 123456 + -2147483647 | -123456 + -2147483647 | 2147483647 + -2147483647 | -2147483647 +(25 rows) + +select * from int4_tbl a full join int4_tbl b on false; + f1 | f1 +-------------+------------- + | 0 + | 123456 + | -123456 + | 2147483647 + | -2147483647 + 0 | + 123456 | + -123456 | + 2147483647 | + -2147483647 | +(10 rows) + +-- +-- test for ability to use a cartesian join when necessary +-- +create temp table q1 as select 1 as q1; +create temp table q2 as select 0 as q2; +analyze q1; +analyze q2; +explain (costs off) +select * from + tenk1 join int4_tbl on f1 = twothousand, + q1, q2 +where q1 = thousand or q2 = thousand; + QUERY PLAN +------------------------------------------------------------------------ + Hash Join + Hash Cond: (tenk1.twothousand = int4_tbl.f1) + -> Nested Loop + -> Nested Loop + -> Seq Scan on q1 + -> Seq Scan on q2 + -> Bitmap Heap Scan on tenk1 + Recheck Cond: ((q1.q1 = thousand) OR (q2.q2 = thousand)) + -> BitmapOr + -> Bitmap Index Scan on tenk1_thous_tenthous + Index Cond: (thousand = q1.q1) + -> Bitmap Index Scan on tenk1_thous_tenthous + Index Cond: (thousand = q2.q2) + -> Hash + -> Seq Scan on int4_tbl +(15 rows) + +explain (costs off) +select * from + tenk1 join int4_tbl on f1 = twothousand, + q1, q2 +where thousand = (q1 + q2); + QUERY PLAN +-------------------------------------------------------------- + Hash Join + Hash Cond: (tenk1.twothousand = int4_tbl.f1) + -> Nested Loop + -> Nested Loop + -> Seq Scan on q1 + -> Seq Scan on q2 + -> Bitmap Heap Scan on tenk1 + Recheck Cond: (thousand = (q1.q1 + q2.q2)) + -> Bitmap Index Scan on tenk1_thous_tenthous + Index Cond: (thousand = (q1.q1 + q2.q2)) + -> Hash + -> Seq Scan on int4_tbl +(12 rows) + +-- +-- test ability to generate a suitable plan for a star-schema query +-- +explain (costs off) +select * from + tenk1, int8_tbl a, int8_tbl b +where thousand = a.q1 and tenthous = b.q1 and a.q2 = 1 and b.q2 = 2; + QUERY PLAN +--------------------------------------------------------------------- + Nested Loop + -> Seq Scan on int8_tbl b + Filter: (q2 = 2) + -> Nested Loop + -> Seq Scan on int8_tbl a + Filter: (q2 = 1) + -> Index Scan using tenk1_thous_tenthous on tenk1 + Index Cond: ((thousand = a.q1) AND (tenthous = b.q1)) +(8 rows) + +-- +-- test a corner case in which we shouldn't apply the star-schema optimization +-- +explain (costs off) +select t1.unique2, t1.stringu1, t2.unique1, t2.stringu2 from + tenk1 t1 + inner join int4_tbl i1 + left join (select v1.x2, v2.y1, 11 AS d1 + from (select 1,0 from onerow) v1(x1,x2) + left join (select 3,1 from onerow) v2(y1,y2) + on v1.x1 = v2.y2) subq1 + on (i1.f1 = subq1.x2) + on (t1.unique2 = subq1.d1) + left join tenk1 t2 + on (subq1.y1 = t2.unique1) +where t1.unique2 < 42 and t1.stringu1 > t2.stringu2; + QUERY PLAN +----------------------------------------------------------------------- + Nested Loop + -> Nested Loop + Join Filter: (t1.stringu1 > t2.stringu2) + -> Nested Loop + -> Nested Loop + -> Seq Scan on onerow + -> Seq Scan on onerow onerow_1 + -> Index Scan using tenk1_unique2 on tenk1 t1 + Index Cond: ((unique2 = (11)) AND (unique2 < 42)) + -> Index Scan using tenk1_unique1 on tenk1 t2 + Index Cond: (unique1 = (3)) + -> Seq Scan on int4_tbl i1 + Filter: (f1 = 0) +(13 rows) + +select t1.unique2, t1.stringu1, t2.unique1, t2.stringu2 from + tenk1 t1 + inner join int4_tbl i1 + left join (select v1.x2, v2.y1, 11 AS d1 + from (select 1,0 from onerow) v1(x1,x2) + left join (select 3,1 from onerow) v2(y1,y2) + on v1.x1 = v2.y2) subq1 + on (i1.f1 = subq1.x2) + on (t1.unique2 = subq1.d1) + left join tenk1 t2 + on (subq1.y1 = t2.unique1) +where t1.unique2 < 42 and t1.stringu1 > t2.stringu2; + unique2 | stringu1 | unique1 | stringu2 +---------+----------+---------+---------- + 11 | WFAAAA | 3 | LKIAAA +(1 row) + +-- variant that isn't quite a star-schema case +select ss1.d1 from + tenk1 as t1 + inner join tenk1 as t2 + on t1.tenthous = t2.ten + inner join + int8_tbl as i8 + left join int4_tbl as i4 + inner join (select 64::information_schema.cardinal_number as d1 + from tenk1 t3, + lateral (select abs(t3.unique1) + random()) ss0(x) + where t3.fivethous < 0) as ss1 + on i4.f1 = ss1.d1 + on i8.q1 = i4.f1 + on t1.tenthous = ss1.d1 +where t1.unique1 < i4.f1; + d1 +---- +(0 rows) + +-- this variant is foldable by the remove-useless-RESULT-RTEs code +explain (costs off) +select t1.unique2, t1.stringu1, t2.unique1, t2.stringu2 from + tenk1 t1 + inner join int4_tbl i1 + left join (select v1.x2, v2.y1, 11 AS d1 + from (values(1,0)) v1(x1,x2) + left join (values(3,1)) v2(y1,y2) + on v1.x1 = v2.y2) subq1 + on (i1.f1 = subq1.x2) + on (t1.unique2 = subq1.d1) + left join tenk1 t2 + on (subq1.y1 = t2.unique1) +where t1.unique2 < 42 and t1.stringu1 > t2.stringu2; + QUERY PLAN +----------------------------------------------------------------- + Nested Loop + Join Filter: (t1.stringu1 > t2.stringu2) + -> Nested Loop + -> Seq Scan on int4_tbl i1 + Filter: (f1 = 0) + -> Index Scan using tenk1_unique2 on tenk1 t1 + Index Cond: ((unique2 = (11)) AND (unique2 < 42)) + -> Index Scan using tenk1_unique1 on tenk1 t2 + Index Cond: (unique1 = (3)) +(9 rows) + +select t1.unique2, t1.stringu1, t2.unique1, t2.stringu2 from + tenk1 t1 + inner join int4_tbl i1 + left join (select v1.x2, v2.y1, 11 AS d1 + from (values(1,0)) v1(x1,x2) + left join (values(3,1)) v2(y1,y2) + on v1.x1 = v2.y2) subq1 + on (i1.f1 = subq1.x2) + on (t1.unique2 = subq1.d1) + left join tenk1 t2 + on (subq1.y1 = t2.unique1) +where t1.unique2 < 42 and t1.stringu1 > t2.stringu2; + unique2 | stringu1 | unique1 | stringu2 +---------+----------+---------+---------- + 11 | WFAAAA | 3 | LKIAAA +(1 row) + +-- Here's a variant that we can't fold too aggressively, though, +-- or we end up with noplace to evaluate the lateral PHV +explain (verbose, costs off) +select * from + (select 1 as x) ss1 left join (select 2 as y) ss2 on (true), + lateral (select ss2.y as z limit 1) ss3; + QUERY PLAN +--------------------------- + Nested Loop + Output: 1, (2), ((2)) + -> Result + Output: 2 + -> Limit + Output: ((2)) + -> Result + Output: (2) +(8 rows) + +select * from + (select 1 as x) ss1 left join (select 2 as y) ss2 on (true), + lateral (select ss2.y as z limit 1) ss3; + x | y | z +---+---+--- + 1 | 2 | 2 +(1 row) + +-- Test proper handling of appendrel PHVs during useless-RTE removal +explain (costs off) +select * from + (select 0 as z) as t1 + left join + (select true as a) as t2 + on true, + lateral (select true as b + union all + select a as b) as t3 +where b; + QUERY PLAN +--------------------------------------- + Nested Loop + -> Result + -> Append + -> Result + -> Result + One-Time Filter: (true) +(6 rows) + +select * from + (select 0 as z) as t1 + left join + (select true as a) as t2 + on true, + lateral (select true as b + union all + select a as b) as t3 +where b; + z | a | b +---+---+--- + 0 | t | t + 0 | t | t +(2 rows) + +-- Test PHV in a semijoin qual, which confused useless-RTE removal (bug #17700) +explain (verbose, costs off) +with ctetable as not materialized ( select 1 as f1 ) +select * from ctetable c1 +where f1 in ( select c3.f1 from ctetable c2 full join ctetable c3 on true ); + QUERY PLAN +---------------------------- + Result + Output: 1 + One-Time Filter: (1 = 1) +(3 rows) + +with ctetable as not materialized ( select 1 as f1 ) +select * from ctetable c1 +where f1 in ( select c3.f1 from ctetable c2 full join ctetable c3 on true ); + f1 +---- + 1 +(1 row) + +-- Test PHV that winds up in a Result node, despite having nonempty nullingrels +explain (verbose, costs off) +select table_catalog, table_name +from int4_tbl t1 + inner join (int8_tbl t2 + left join information_schema.column_udt_usage on null) + on null; + QUERY PLAN +------------------------------------------------------------------------------------------------------------------- + Result + Output: (current_database())::information_schema.sql_identifier, (c.relname)::information_schema.sql_identifier + One-Time Filter: false +(3 rows) + +-- Test handling of qual pushdown to appendrel members with non-Var outputs +explain (verbose, costs off) +select * from int4_tbl left join ( + select text 'foo' union all select text 'bar' +) ss(x) on true +where ss.x is null; + QUERY PLAN +----------------------------------------- + Nested Loop Left Join + Output: int4_tbl.f1, ('foo'::text) + Filter: (('foo'::text) IS NULL) + -> Seq Scan on public.int4_tbl + Output: int4_tbl.f1 + -> Materialize + Output: ('foo'::text) + -> Append + -> Result + Output: 'foo'::text + -> Result + Output: 'bar'::text +(12 rows) + +-- +-- test inlining of immutable functions +-- +create function f_immutable_int4(i integer) returns integer as +$$ begin return i; end; $$ language plpgsql immutable; +-- check optimization of function scan with join +explain (costs off) +select unique1 from tenk1, (select * from f_immutable_int4(1) x) x +where x = unique1; + QUERY PLAN +---------------------------------------------- + Index Only Scan using tenk1_unique1 on tenk1 + Index Cond: (unique1 = 1) +(2 rows) + +explain (verbose, costs off) +select unique1, x.* +from tenk1, (select *, random() from f_immutable_int4(1) x) x +where x = unique1; + QUERY PLAN +----------------------------------------------------------- + Nested Loop + Output: tenk1.unique1, (1), (random()) + -> Result + Output: 1, random() + -> Index Only Scan using tenk1_unique1 on public.tenk1 + Output: tenk1.unique1 + Index Cond: (tenk1.unique1 = (1)) +(7 rows) + +explain (costs off) +select unique1 from tenk1, f_immutable_int4(1) x where x = unique1; + QUERY PLAN +---------------------------------------------- + Index Only Scan using tenk1_unique1 on tenk1 + Index Cond: (unique1 = 1) +(2 rows) + +explain (costs off) +select unique1 from tenk1, lateral f_immutable_int4(1) x where x = unique1; + QUERY PLAN +---------------------------------------------- + Index Only Scan using tenk1_unique1 on tenk1 + Index Cond: (unique1 = 1) +(2 rows) + +explain (costs off) +select unique1 from tenk1, lateral f_immutable_int4(1) x where x in (select 17); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +explain (costs off) +select unique1, x from tenk1 join f_immutable_int4(1) x on unique1 = x; + QUERY PLAN +---------------------------------------------- + Index Only Scan using tenk1_unique1 on tenk1 + Index Cond: (unique1 = 1) +(2 rows) + +explain (costs off) +select unique1, x from tenk1 left join f_immutable_int4(1) x on unique1 = x; + QUERY PLAN +---------------------------------------------------- + Nested Loop Left Join + Join Filter: (tenk1.unique1 = 1) + -> Index Only Scan using tenk1_unique1 on tenk1 + -> Materialize + -> Result +(5 rows) + +explain (costs off) +select unique1, x from tenk1 right join f_immutable_int4(1) x on unique1 = x; + QUERY PLAN +---------------------------------------------------- + Nested Loop Left Join + -> Result + -> Index Only Scan using tenk1_unique1 on tenk1 + Index Cond: (unique1 = 1) +(4 rows) + +explain (costs off) +select unique1, x from tenk1 full join f_immutable_int4(1) x on unique1 = x; + QUERY PLAN +---------------------------------------------------- + Merge Full Join + Merge Cond: (tenk1.unique1 = (1)) + -> Index Only Scan using tenk1_unique1 on tenk1 + -> Sort + Sort Key: (1) + -> Result +(6 rows) + +-- check that pullup of a const function allows further const-folding +explain (costs off) +select unique1 from tenk1, f_immutable_int4(1) x where x = 42; + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +-- test inlining of immutable functions with PlaceHolderVars +explain (costs off) +select nt3.id +from nt3 as nt3 + left join + (select nt2.*, (nt2.b1 or i4 = 42) AS b3 + from nt2 as nt2 + left join + f_immutable_int4(0) i4 + on i4 = nt2.nt1_id + ) as ss2 + on ss2.id = nt3.nt2_id +where nt3.id = 1 and ss2.b3; + QUERY PLAN +---------------------------------------------- + Nested Loop Left Join + Filter: ((nt2.b1 OR ((0) = 42))) + -> Index Scan using nt3_pkey on nt3 + Index Cond: (id = 1) + -> Nested Loop Left Join + Join Filter: (0 = nt2.nt1_id) + -> Index Scan using nt2_pkey on nt2 + Index Cond: (id = nt3.nt2_id) + -> Result +(9 rows) + +drop function f_immutable_int4(int); +-- test inlining when function returns composite +create function mki8(bigint, bigint) returns int8_tbl as +$$select row($1,$2)::int8_tbl$$ language sql; +create function mki4(int) returns int4_tbl as +$$select row($1)::int4_tbl$$ language sql; +explain (verbose, costs off) +select * from mki8(1,2); + QUERY PLAN +------------------------------------ + Function Scan on mki8 + Output: q1, q2 + Function Call: '(1,2)'::int8_tbl +(3 rows) + +select * from mki8(1,2); + q1 | q2 +----+---- + 1 | 2 +(1 row) + +explain (verbose, costs off) +select * from mki4(42); + QUERY PLAN +----------------------------------- + Function Scan on mki4 + Output: f1 + Function Call: '(42)'::int4_tbl +(3 rows) + +select * from mki4(42); + f1 +---- + 42 +(1 row) + +drop function mki8(bigint, bigint); +drop function mki4(int); +-- test const-folding of a whole-row Var into a per-field Var +-- (need to inline a function to reach this case, else parser does it) +create function f_field_select(t onek) returns int4 as +$$ select t.unique2; $$ language sql immutable; +explain (verbose, costs off) +select (t2.*).unique1, f_field_select(t2) from tenk1 t1 + left join onek t2 on t1.unique1 = t2.unique1 + left join int8_tbl t3 on true; + QUERY PLAN +-------------------------------------------------------------------- + Nested Loop Left Join + Output: t2.unique1, t2.unique2 + -> Hash Left Join + Output: t2.unique1, t2.unique2 + Hash Cond: (t1.unique1 = t2.unique1) + -> Index Only Scan using tenk1_unique1 on public.tenk1 t1 + Output: t1.unique1 + -> Hash + Output: t2.unique1, t2.unique2 + -> Seq Scan on public.onek t2 + Output: t2.unique1, t2.unique2 + -> Materialize + -> Seq Scan on public.int8_tbl t3 +(13 rows) + +drop function f_field_select(t onek); +-- +-- test extraction of restriction OR clauses from join OR clause +-- (we used to only do this for indexable clauses) +-- +explain (costs off) +select * from tenk1 a join tenk1 b on + (a.unique1 = 1 and b.unique1 = 2) or (a.unique2 = 3 and b.hundred = 4); + QUERY PLAN +------------------------------------------------------------------------------------------------- + Nested Loop + Join Filter: (((a.unique1 = 1) AND (b.unique1 = 2)) OR ((a.unique2 = 3) AND (b.hundred = 4))) + -> Bitmap Heap Scan on tenk1 b + Recheck Cond: ((unique1 = 2) OR (hundred = 4)) + -> BitmapOr + -> Bitmap Index Scan on tenk1_unique1 + Index Cond: (unique1 = 2) + -> Bitmap Index Scan on tenk1_hundred + Index Cond: (hundred = 4) + -> Materialize + -> Bitmap Heap Scan on tenk1 a + Recheck Cond: ((unique1 = 1) OR (unique2 = 3)) + -> BitmapOr + -> Bitmap Index Scan on tenk1_unique1 + Index Cond: (unique1 = 1) + -> Bitmap Index Scan on tenk1_unique2 + Index Cond: (unique2 = 3) +(17 rows) + +explain (costs off) +select * from tenk1 a join tenk1 b on + (a.unique1 = 1 and b.unique1 = 2) or (a.unique2 = 3 and b.ten = 4); + QUERY PLAN +--------------------------------------------------------------------------------------------- + Nested Loop + Join Filter: (((a.unique1 = 1) AND (b.unique1 = 2)) OR ((a.unique2 = 3) AND (b.ten = 4))) + -> Seq Scan on tenk1 b + Filter: ((unique1 = 2) OR (ten = 4)) + -> Materialize + -> Bitmap Heap Scan on tenk1 a + Recheck Cond: ((unique1 = 1) OR (unique2 = 3)) + -> BitmapOr + -> Bitmap Index Scan on tenk1_unique1 + Index Cond: (unique1 = 1) + -> Bitmap Index Scan on tenk1_unique2 + Index Cond: (unique2 = 3) +(12 rows) + +explain (costs off) +select * from tenk1 a join tenk1 b on + (a.unique1 = 1 and b.unique1 = 2) or + ((a.unique2 = 3 or a.unique2 = 7) and b.hundred = 4); + QUERY PLAN +---------------------------------------------------------------------------------------------------------------------- + Nested Loop + Join Filter: (((a.unique1 = 1) AND (b.unique1 = 2)) OR (((a.unique2 = 3) OR (a.unique2 = 7)) AND (b.hundred = 4))) + -> Bitmap Heap Scan on tenk1 b + Recheck Cond: ((unique1 = 2) OR (hundred = 4)) + -> BitmapOr + -> Bitmap Index Scan on tenk1_unique1 + Index Cond: (unique1 = 2) + -> Bitmap Index Scan on tenk1_hundred + Index Cond: (hundred = 4) + -> Materialize + -> Bitmap Heap Scan on tenk1 a + Recheck Cond: ((unique1 = 1) OR (unique2 = 3) OR (unique2 = 7)) + -> BitmapOr + -> Bitmap Index Scan on tenk1_unique1 + Index Cond: (unique1 = 1) + -> Bitmap Index Scan on tenk1_unique2 + Index Cond: (unique2 = 3) + -> Bitmap Index Scan on tenk1_unique2 + Index Cond: (unique2 = 7) +(19 rows) + +-- +-- test placement of movable quals in a parameterized join tree +-- +explain (costs off) +select * from tenk1 t1 left join + (tenk1 t2 join tenk1 t3 on t2.thousand = t3.unique2) + on t1.hundred = t2.hundred and t1.ten = t3.ten +where t1.unique1 = 1; + QUERY PLAN +-------------------------------------------------------- + Nested Loop Left Join + -> Index Scan using tenk1_unique1 on tenk1 t1 + Index Cond: (unique1 = 1) + -> Nested Loop + Join Filter: (t1.ten = t3.ten) + -> Bitmap Heap Scan on tenk1 t2 + Recheck Cond: (t1.hundred = hundred) + -> Bitmap Index Scan on tenk1_hundred + Index Cond: (hundred = t1.hundred) + -> Index Scan using tenk1_unique2 on tenk1 t3 + Index Cond: (unique2 = t2.thousand) +(11 rows) + +explain (costs off) +select * from tenk1 t1 left join + (tenk1 t2 join tenk1 t3 on t2.thousand = t3.unique2) + on t1.hundred = t2.hundred and t1.ten + t2.ten = t3.ten +where t1.unique1 = 1; + QUERY PLAN +-------------------------------------------------------- + Nested Loop Left Join + -> Index Scan using tenk1_unique1 on tenk1 t1 + Index Cond: (unique1 = 1) + -> Nested Loop + Join Filter: ((t1.ten + t2.ten) = t3.ten) + -> Bitmap Heap Scan on tenk1 t2 + Recheck Cond: (t1.hundred = hundred) + -> Bitmap Index Scan on tenk1_hundred + Index Cond: (hundred = t1.hundred) + -> Index Scan using tenk1_unique2 on tenk1 t3 + Index Cond: (unique2 = t2.thousand) +(11 rows) + +explain (costs off) +select count(*) from + tenk1 a join tenk1 b on a.unique1 = b.unique2 + left join tenk1 c on a.unique2 = b.unique1 and c.thousand = a.thousand + join int4_tbl on b.thousand = f1; + QUERY PLAN +------------------------------------------------------------------------- + Aggregate + -> Nested Loop Left Join + Join Filter: (a.unique2 = b.unique1) + -> Nested Loop + -> Nested Loop + -> Seq Scan on int4_tbl + -> Bitmap Heap Scan on tenk1 b + Recheck Cond: (thousand = int4_tbl.f1) + -> Bitmap Index Scan on tenk1_thous_tenthous + Index Cond: (thousand = int4_tbl.f1) + -> Index Scan using tenk1_unique1 on tenk1 a + Index Cond: (unique1 = b.unique2) + -> Index Only Scan using tenk1_thous_tenthous on tenk1 c + Index Cond: (thousand = a.thousand) +(14 rows) + +select count(*) from + tenk1 a join tenk1 b on a.unique1 = b.unique2 + left join tenk1 c on a.unique2 = b.unique1 and c.thousand = a.thousand + join int4_tbl on b.thousand = f1; + count +------- + 10 +(1 row) + +explain (costs off) +select b.unique1 from + tenk1 a join tenk1 b on a.unique1 = b.unique2 + left join tenk1 c on b.unique1 = 42 and c.thousand = a.thousand + join int4_tbl i1 on b.thousand = f1 + right join int4_tbl i2 on i2.f1 = b.tenthous + order by 1; + QUERY PLAN +----------------------------------------------------------------------------------------- + Sort + Sort Key: b.unique1 + -> Nested Loop Left Join + -> Seq Scan on int4_tbl i2 + -> Nested Loop Left Join + Join Filter: (b.unique1 = 42) + -> Nested Loop + -> Nested Loop + -> Seq Scan on int4_tbl i1 + -> Index Scan using tenk1_thous_tenthous on tenk1 b + Index Cond: ((thousand = i1.f1) AND (tenthous = i2.f1)) + -> Index Scan using tenk1_unique1 on tenk1 a + Index Cond: (unique1 = b.unique2) + -> Index Only Scan using tenk1_thous_tenthous on tenk1 c + Index Cond: (thousand = a.thousand) +(15 rows) + +select b.unique1 from + tenk1 a join tenk1 b on a.unique1 = b.unique2 + left join tenk1 c on b.unique1 = 42 and c.thousand = a.thousand + join int4_tbl i1 on b.thousand = f1 + right join int4_tbl i2 on i2.f1 = b.tenthous + order by 1; + unique1 +--------- + 0 + + + + +(5 rows) + +explain (costs off) +select * from +( + select unique1, q1, coalesce(unique1, -1) + q1 as fault + from int8_tbl left join tenk1 on (q2 = unique2) +) ss +where fault = 122 +order by fault; + QUERY PLAN +-------------------------------------------------------------------------- + Nested Loop Left Join + Filter: ((COALESCE(tenk1.unique1, '-1'::integer) + int8_tbl.q1) = 122) + -> Seq Scan on int8_tbl + -> Index Scan using tenk1_unique2 on tenk1 + Index Cond: (unique2 = int8_tbl.q2) +(5 rows) + +select * from +( + select unique1, q1, coalesce(unique1, -1) + q1 as fault + from int8_tbl left join tenk1 on (q2 = unique2) +) ss +where fault = 122 +order by fault; + unique1 | q1 | fault +---------+-----+------- + | 123 | 122 +(1 row) + +explain (costs off) +select * from +(values (1, array[10,20]), (2, array[20,30])) as v1(v1x,v1ys) +left join (values (1, 10), (2, 20)) as v2(v2x,v2y) on v2x = v1x +left join unnest(v1ys) as u1(u1y) on u1y = v2y; + QUERY PLAN +------------------------------------------------------------- + Nested Loop Left Join + -> Values Scan on "*VALUES*" + -> Hash Right Join + Hash Cond: (u1.u1y = "*VALUES*_1".column2) + Filter: ("*VALUES*_1".column1 = "*VALUES*".column1) + -> Function Scan on unnest u1 + -> Hash + -> Values Scan on "*VALUES*_1" +(8 rows) + +select * from +(values (1, array[10,20]), (2, array[20,30])) as v1(v1x,v1ys) +left join (values (1, 10), (2, 20)) as v2(v2x,v2y) on v2x = v1x +left join unnest(v1ys) as u1(u1y) on u1y = v2y; + v1x | v1ys | v2x | v2y | u1y +-----+---------+-----+-----+----- + 1 | {10,20} | 1 | 10 | 10 + 2 | {20,30} | 2 | 20 | 20 +(2 rows) + +-- +-- test handling of potential equivalence clauses above outer joins +-- +explain (costs off) +select q1, unique2, thousand, hundred + from int8_tbl a left join tenk1 b on q1 = unique2 + where coalesce(thousand,123) = q1 and q1 = coalesce(hundred,123); + QUERY PLAN +---------------------------------------------------------------------------------------------------------- + Nested Loop Left Join + Filter: ((COALESCE(b.thousand, 123) = COALESCE(b.hundred, 123)) AND (a.q1 = COALESCE(b.hundred, 123))) + -> Seq Scan on int8_tbl a + -> Index Scan using tenk1_unique2 on tenk1 b + Index Cond: (unique2 = a.q1) +(5 rows) + +select q1, unique2, thousand, hundred + from int8_tbl a left join tenk1 b on q1 = unique2 + where coalesce(thousand,123) = q1 and q1 = coalesce(hundred,123); + q1 | unique2 | thousand | hundred +----+---------+----------+--------- +(0 rows) + +explain (costs off) +select f1, unique2, case when unique2 is null then f1 else 0 end + from int4_tbl a left join tenk1 b on f1 = unique2 + where (case when unique2 is null then f1 else 0 end) = 0; + QUERY PLAN +-------------------------------------------------------------------- + Nested Loop Left Join + Filter: (CASE WHEN (b.unique2 IS NULL) THEN a.f1 ELSE 0 END = 0) + -> Seq Scan on int4_tbl a + -> Index Only Scan using tenk1_unique2 on tenk1 b + Index Cond: (unique2 = a.f1) +(5 rows) + +select f1, unique2, case when unique2 is null then f1 else 0 end + from int4_tbl a left join tenk1 b on f1 = unique2 + where (case when unique2 is null then f1 else 0 end) = 0; + f1 | unique2 | case +----+---------+------ + 0 | 0 | 0 +(1 row) + +-- +-- another case with equivalence clauses above outer joins (bug #8591) +-- +explain (costs off) +select a.unique1, b.unique1, c.unique1, coalesce(b.twothousand, a.twothousand) + from tenk1 a left join tenk1 b on b.thousand = a.unique1 left join tenk1 c on c.unique2 = coalesce(b.twothousand, a.twothousand) + where a.unique2 < 10 and coalesce(b.twothousand, a.twothousand) = 44; + QUERY PLAN +--------------------------------------------------------------- + Nested Loop Left Join + -> Nested Loop Left Join + Filter: (COALESCE(b.twothousand, a.twothousand) = 44) + -> Index Scan using tenk1_unique2 on tenk1 a + Index Cond: (unique2 < 10) + -> Bitmap Heap Scan on tenk1 b + Recheck Cond: (thousand = a.unique1) + -> Bitmap Index Scan on tenk1_thous_tenthous + Index Cond: (thousand = a.unique1) + -> Index Scan using tenk1_unique2 on tenk1 c + Index Cond: (unique2 = 44) +(11 rows) + +select a.unique1, b.unique1, c.unique1, coalesce(b.twothousand, a.twothousand) + from tenk1 a left join tenk1 b on b.thousand = a.unique1 left join tenk1 c on c.unique2 = coalesce(b.twothousand, a.twothousand) + where a.unique2 < 10 and coalesce(b.twothousand, a.twothousand) = 44; + unique1 | unique1 | unique1 | coalesce +---------+---------+---------+---------- +(0 rows) + +-- related case +explain (costs off) +select * from int8_tbl t1 left join int8_tbl t2 on t1.q2 = t2.q1, + lateral (select * from int8_tbl t3 where t2.q1 = t2.q2) ss; + QUERY PLAN +------------------------------------------- + Nested Loop + -> Hash Left Join + Hash Cond: (t1.q2 = t2.q1) + Filter: (t2.q1 = t2.q2) + -> Seq Scan on int8_tbl t1 + -> Hash + -> Seq Scan on int8_tbl t2 + -> Seq Scan on int8_tbl t3 +(8 rows) + +select * from int8_tbl t1 left join int8_tbl t2 on t1.q2 = t2.q1, + lateral (select * from int8_tbl t3 where t2.q1 = t2.q2) ss; + q1 | q2 | q1 | q2 | q1 | q2 +------------------+------------------+------------------+------------------+------------------+------------------- + 123 | 4567890123456789 | 4567890123456789 | 4567890123456789 | 123 | 456 + 123 | 4567890123456789 | 4567890123456789 | 4567890123456789 | 123 | 4567890123456789 + 123 | 4567890123456789 | 4567890123456789 | 4567890123456789 | 4567890123456789 | 123 + 123 | 4567890123456789 | 4567890123456789 | 4567890123456789 | 4567890123456789 | 4567890123456789 + 123 | 4567890123456789 | 4567890123456789 | 4567890123456789 | 4567890123456789 | -4567890123456789 + 4567890123456789 | 4567890123456789 | 4567890123456789 | 4567890123456789 | 123 | 456 + 4567890123456789 | 4567890123456789 | 4567890123456789 | 4567890123456789 | 123 | 4567890123456789 + 4567890123456789 | 4567890123456789 | 4567890123456789 | 4567890123456789 | 4567890123456789 | 123 + 4567890123456789 | 4567890123456789 | 4567890123456789 | 4567890123456789 | 4567890123456789 | 4567890123456789 + 4567890123456789 | 4567890123456789 | 4567890123456789 | 4567890123456789 | 4567890123456789 | -4567890123456789 +(10 rows) + +-- +-- check handling of join aliases when flattening multiple levels of subquery +-- +explain (verbose, costs off) +select foo1.join_key as foo1_id, foo3.join_key AS foo3_id, bug_field from + (values (0),(1)) foo1(join_key) +left join + (select join_key, bug_field from + (select ss1.join_key, ss1.bug_field from + (select f1 as join_key, 666 as bug_field from int4_tbl i1) ss1 + ) foo2 + left join + (select unique2 as join_key from tenk1 i2) ss2 + using (join_key) + ) foo3 +using (join_key); + QUERY PLAN +-------------------------------------------------------------------------- + Nested Loop Left Join + Output: "*VALUES*".column1, i1.f1, (666) + Join Filter: ("*VALUES*".column1 = i1.f1) + -> Values Scan on "*VALUES*" + Output: "*VALUES*".column1 + -> Materialize + Output: i1.f1, (666) + -> Nested Loop Left Join + Output: i1.f1, 666 + -> Seq Scan on public.int4_tbl i1 + Output: i1.f1 + -> Index Only Scan using tenk1_unique2 on public.tenk1 i2 + Output: i2.unique2 + Index Cond: (i2.unique2 = i1.f1) +(14 rows) + +select foo1.join_key as foo1_id, foo3.join_key AS foo3_id, bug_field from + (values (0),(1)) foo1(join_key) +left join + (select join_key, bug_field from + (select ss1.join_key, ss1.bug_field from + (select f1 as join_key, 666 as bug_field from int4_tbl i1) ss1 + ) foo2 + left join + (select unique2 as join_key from tenk1 i2) ss2 + using (join_key) + ) foo3 +using (join_key); + foo1_id | foo3_id | bug_field +---------+---------+----------- + 0 | 0 | 666 + 1 | | +(2 rows) + +-- +-- check handling of a variable-free join alias +-- +explain (verbose, costs off) +select * from +int4_tbl i0 left join +( (select *, 123 as x from int4_tbl i1) ss1 + left join + (select *, q2 as x from int8_tbl i2) ss2 + using (x) +) ss0 +on (i0.f1 = ss0.f1) +order by i0.f1, x; + QUERY PLAN +------------------------------------------------------------- + Sort + Output: i0.f1, ('123'::bigint), i1.f1, i2.q1, i2.q2 + Sort Key: i0.f1, ('123'::bigint) + -> Hash Right Join + Output: i0.f1, ('123'::bigint), i1.f1, i2.q1, i2.q2 + Hash Cond: (i1.f1 = i0.f1) + -> Nested Loop Left Join + Output: i1.f1, i2.q1, i2.q2, '123'::bigint + -> Seq Scan on public.int4_tbl i1 + Output: i1.f1 + -> Materialize + Output: i2.q1, i2.q2 + -> Seq Scan on public.int8_tbl i2 + Output: i2.q1, i2.q2 + Filter: (123 = i2.q2) + -> Hash + Output: i0.f1 + -> Seq Scan on public.int4_tbl i0 + Output: i0.f1 +(19 rows) + +select * from +int4_tbl i0 left join +( (select *, 123 as x from int4_tbl i1) ss1 + left join + (select *, q2 as x from int8_tbl i2) ss2 + using (x) +) ss0 +on (i0.f1 = ss0.f1) +order by i0.f1, x; + f1 | x | f1 | q1 | q2 +-------------+-----+-------------+------------------+----- + -2147483647 | 123 | -2147483647 | 4567890123456789 | 123 + -123456 | 123 | -123456 | 4567890123456789 | 123 + 0 | 123 | 0 | 4567890123456789 | 123 + 123456 | 123 | 123456 | 4567890123456789 | 123 + 2147483647 | 123 | 2147483647 | 4567890123456789 | 123 +(5 rows) + +-- +-- test successful handling of nested outer joins with degenerate join quals +-- +explain (verbose, costs off) +select t1.* from + text_tbl t1 + left join (select *, '***'::text as d1 from int8_tbl i8b1) b1 + left join int8_tbl i8 + left join (select *, null::int as d2 from int8_tbl i8b2) b2 + on (i8.q1 = b2.q1) + on (b2.d2 = b1.q2) + on (t1.f1 = b1.d1) + left join int4_tbl i4 + on (i8.q2 = i4.f1); + QUERY PLAN +---------------------------------------------------------------------- + Hash Left Join + Output: t1.f1 + Hash Cond: (i8.q2 = i4.f1) + -> Nested Loop Left Join + Output: t1.f1, i8.q2 + Join Filter: (t1.f1 = '***'::text) + -> Seq Scan on public.text_tbl t1 + Output: t1.f1 + -> Materialize + Output: i8.q2 + -> Hash Right Join + Output: i8.q2 + Hash Cond: ((NULL::integer) = i8b1.q2) + -> Hash Join + Output: i8.q2, (NULL::integer) + Hash Cond: (i8.q1 = i8b2.q1) + -> Seq Scan on public.int8_tbl i8 + Output: i8.q1, i8.q2 + -> Hash + Output: i8b2.q1, (NULL::integer) + -> Seq Scan on public.int8_tbl i8b2 + Output: i8b2.q1, NULL::integer + -> Hash + Output: i8b1.q2 + -> Seq Scan on public.int8_tbl i8b1 + Output: i8b1.q2 + -> Hash + Output: i4.f1 + -> Seq Scan on public.int4_tbl i4 + Output: i4.f1 +(30 rows) + +select t1.* from + text_tbl t1 + left join (select *, '***'::text as d1 from int8_tbl i8b1) b1 + left join int8_tbl i8 + left join (select *, null::int as d2 from int8_tbl i8b2) b2 + on (i8.q1 = b2.q1) + on (b2.d2 = b1.q2) + on (t1.f1 = b1.d1) + left join int4_tbl i4 + on (i8.q2 = i4.f1); + f1 +------------------- + doh! + hi de ho neighbor +(2 rows) + +explain (verbose, costs off) +select t1.* from + text_tbl t1 + left join (select *, '***'::text as d1 from int8_tbl i8b1) b1 + left join int8_tbl i8 + left join (select *, null::int as d2 from int8_tbl i8b2, int4_tbl i4b2) b2 + on (i8.q1 = b2.q1) + on (b2.d2 = b1.q2) + on (t1.f1 = b1.d1) + left join int4_tbl i4 + on (i8.q2 = i4.f1); + QUERY PLAN +---------------------------------------------------------------------------- + Hash Left Join + Output: t1.f1 + Hash Cond: (i8.q2 = i4.f1) + -> Nested Loop Left Join + Output: t1.f1, i8.q2 + Join Filter: (t1.f1 = '***'::text) + -> Seq Scan on public.text_tbl t1 + Output: t1.f1 + -> Materialize + Output: i8.q2 + -> Hash Right Join + Output: i8.q2 + Hash Cond: ((NULL::integer) = i8b1.q2) + -> Hash Right Join + Output: i8.q2, (NULL::integer) + Hash Cond: (i8b2.q1 = i8.q1) + -> Nested Loop + Output: i8b2.q1, NULL::integer + -> Seq Scan on public.int8_tbl i8b2 + Output: i8b2.q1, i8b2.q2 + -> Materialize + -> Seq Scan on public.int4_tbl i4b2 + -> Hash + Output: i8.q1, i8.q2 + -> Seq Scan on public.int8_tbl i8 + Output: i8.q1, i8.q2 + -> Hash + Output: i8b1.q2 + -> Seq Scan on public.int8_tbl i8b1 + Output: i8b1.q2 + -> Hash + Output: i4.f1 + -> Seq Scan on public.int4_tbl i4 + Output: i4.f1 +(34 rows) + +select t1.* from + text_tbl t1 + left join (select *, '***'::text as d1 from int8_tbl i8b1) b1 + left join int8_tbl i8 + left join (select *, null::int as d2 from int8_tbl i8b2, int4_tbl i4b2) b2 + on (i8.q1 = b2.q1) + on (b2.d2 = b1.q2) + on (t1.f1 = b1.d1) + left join int4_tbl i4 + on (i8.q2 = i4.f1); + f1 +------------------- + doh! + hi de ho neighbor +(2 rows) + +explain (verbose, costs off) +select t1.* from + text_tbl t1 + left join (select *, '***'::text as d1 from int8_tbl i8b1) b1 + left join int8_tbl i8 + left join (select *, null::int as d2 from int8_tbl i8b2, int4_tbl i4b2 + where q1 = f1) b2 + on (i8.q1 = b2.q1) + on (b2.d2 = b1.q2) + on (t1.f1 = b1.d1) + left join int4_tbl i4 + on (i8.q2 = i4.f1); + QUERY PLAN +---------------------------------------------------------------------------- + Hash Left Join + Output: t1.f1 + Hash Cond: (i8.q2 = i4.f1) + -> Nested Loop Left Join + Output: t1.f1, i8.q2 + Join Filter: (t1.f1 = '***'::text) + -> Seq Scan on public.text_tbl t1 + Output: t1.f1 + -> Materialize + Output: i8.q2 + -> Hash Right Join + Output: i8.q2 + Hash Cond: ((NULL::integer) = i8b1.q2) + -> Hash Right Join + Output: i8.q2, (NULL::integer) + Hash Cond: (i8b2.q1 = i8.q1) + -> Hash Join + Output: i8b2.q1, NULL::integer + Hash Cond: (i8b2.q1 = i4b2.f1) + -> Seq Scan on public.int8_tbl i8b2 + Output: i8b2.q1, i8b2.q2 + -> Hash + Output: i4b2.f1 + -> Seq Scan on public.int4_tbl i4b2 + Output: i4b2.f1 + -> Hash + Output: i8.q1, i8.q2 + -> Seq Scan on public.int8_tbl i8 + Output: i8.q1, i8.q2 + -> Hash + Output: i8b1.q2 + -> Seq Scan on public.int8_tbl i8b1 + Output: i8b1.q2 + -> Hash + Output: i4.f1 + -> Seq Scan on public.int4_tbl i4 + Output: i4.f1 +(37 rows) + +select t1.* from + text_tbl t1 + left join (select *, '***'::text as d1 from int8_tbl i8b1) b1 + left join int8_tbl i8 + left join (select *, null::int as d2 from int8_tbl i8b2, int4_tbl i4b2 + where q1 = f1) b2 + on (i8.q1 = b2.q1) + on (b2.d2 = b1.q2) + on (t1.f1 = b1.d1) + left join int4_tbl i4 + on (i8.q2 = i4.f1); + f1 +------------------- + doh! + hi de ho neighbor +(2 rows) + +explain (verbose, costs off) +select * from + text_tbl t1 + inner join int8_tbl i8 + on i8.q2 = 456 + right join text_tbl t2 + on t1.f1 = 'doh!' + left join int4_tbl i4 + on i8.q1 = i4.f1; + QUERY PLAN +-------------------------------------------------------- + Nested Loop Left Join + Output: t1.f1, i8.q1, i8.q2, t2.f1, i4.f1 + -> Seq Scan on public.text_tbl t2 + Output: t2.f1 + -> Materialize + Output: i8.q1, i8.q2, i4.f1, t1.f1 + -> Nested Loop + Output: i8.q1, i8.q2, i4.f1, t1.f1 + -> Nested Loop Left Join + Output: i8.q1, i8.q2, i4.f1 + Join Filter: (i8.q1 = i4.f1) + -> Seq Scan on public.int8_tbl i8 + Output: i8.q1, i8.q2 + Filter: (i8.q2 = 456) + -> Seq Scan on public.int4_tbl i4 + Output: i4.f1 + -> Seq Scan on public.text_tbl t1 + Output: t1.f1 + Filter: (t1.f1 = 'doh!'::text) +(19 rows) + +select * from + text_tbl t1 + inner join int8_tbl i8 + on i8.q2 = 456 + right join text_tbl t2 + on t1.f1 = 'doh!' + left join int4_tbl i4 + on i8.q1 = i4.f1; + f1 | q1 | q2 | f1 | f1 +------+-----+-----+-------------------+---- + doh! | 123 | 456 | doh! | + doh! | 123 | 456 | hi de ho neighbor | +(2 rows) + +-- check handling of a variable-free qual for a non-commutable outer join +explain (costs off) +select nspname +from (select 1 as x) ss1 +left join +( select n.nspname, c.relname + from pg_class c left join pg_namespace n on n.oid = c.relnamespace + where c.relkind = 'r' +) ss2 on false; + QUERY PLAN +-------------------------------- + Nested Loop Left Join + Join Filter: false + -> Result + -> Result + One-Time Filter: false +(5 rows) + +-- check handling of apparently-commutable outer joins with non-commutable +-- joins between them +explain (costs off) +select 1 from + int4_tbl i4 + left join int8_tbl i8 on i4.f1 is not null + left join (select 1 as a) ss1 on null + join int4_tbl i42 on ss1.a is null or i8.q1 <> i8.q2 + right join (select 2 as b) ss2 + on ss2.b < i4.f1; + QUERY PLAN +----------------------------------------------------------- + Nested Loop Left Join + -> Result + -> Nested Loop + -> Nested Loop Left Join + Join Filter: NULL::boolean + Filter: (((1) IS NULL) OR (i8.q1 <> i8.q2)) + -> Nested Loop Left Join + Join Filter: (i4.f1 IS NOT NULL) + -> Seq Scan on int4_tbl i4 + Filter: (2 < f1) + -> Materialize + -> Seq Scan on int8_tbl i8 + -> Result + One-Time Filter: false + -> Materialize + -> Seq Scan on int4_tbl i42 +(16 rows) + +-- +-- test for appropriate join order in the presence of lateral references +-- +explain (verbose, costs off) +select * from + text_tbl t1 + left join int8_tbl i8 + on i8.q2 = 123, + lateral (select i8.q1, t2.f1 from text_tbl t2 limit 1) as ss +where t1.f1 = ss.f1; + QUERY PLAN +-------------------------------------------------- + Nested Loop + Output: t1.f1, i8.q1, i8.q2, (i8.q1), t2.f1 + Join Filter: (t1.f1 = t2.f1) + -> Nested Loop Left Join + Output: t1.f1, i8.q1, i8.q2 + -> Seq Scan on public.text_tbl t1 + Output: t1.f1 + -> Materialize + Output: i8.q1, i8.q2 + -> Seq Scan on public.int8_tbl i8 + Output: i8.q1, i8.q2 + Filter: (i8.q2 = 123) + -> Memoize + Output: (i8.q1), t2.f1 + Cache Key: i8.q1 + Cache Mode: binary + -> Limit + Output: (i8.q1), t2.f1 + -> Seq Scan on public.text_tbl t2 + Output: i8.q1, t2.f1 +(20 rows) + +select * from + text_tbl t1 + left join int8_tbl i8 + on i8.q2 = 123, + lateral (select i8.q1, t2.f1 from text_tbl t2 limit 1) as ss +where t1.f1 = ss.f1; + f1 | q1 | q2 | q1 | f1 +------+------------------+-----+------------------+------ + doh! | 4567890123456789 | 123 | 4567890123456789 | doh! +(1 row) + +explain (verbose, costs off) +select * from + text_tbl t1 + left join int8_tbl i8 + on i8.q2 = 123, + lateral (select i8.q1, t2.f1 from text_tbl t2 limit 1) as ss1, + lateral (select ss1.* from text_tbl t3 limit 1) as ss2 +where t1.f1 = ss2.f1; + QUERY PLAN +------------------------------------------------------------------- + Nested Loop + Output: t1.f1, i8.q1, i8.q2, (i8.q1), t2.f1, ((i8.q1)), (t2.f1) + Join Filter: (t1.f1 = (t2.f1)) + -> Nested Loop + Output: t1.f1, i8.q1, i8.q2, (i8.q1), t2.f1 + -> Nested Loop Left Join + Output: t1.f1, i8.q1, i8.q2 + -> Seq Scan on public.text_tbl t1 + Output: t1.f1 + -> Materialize + Output: i8.q1, i8.q2 + -> Seq Scan on public.int8_tbl i8 + Output: i8.q1, i8.q2 + Filter: (i8.q2 = 123) + -> Memoize + Output: (i8.q1), t2.f1 + Cache Key: i8.q1 + Cache Mode: binary + -> Limit + Output: (i8.q1), t2.f1 + -> Seq Scan on public.text_tbl t2 + Output: i8.q1, t2.f1 + -> Memoize + Output: ((i8.q1)), (t2.f1) + Cache Key: (i8.q1), t2.f1 + Cache Mode: binary + -> Limit + Output: ((i8.q1)), (t2.f1) + -> Seq Scan on public.text_tbl t3 + Output: (i8.q1), t2.f1 +(30 rows) + +select * from + text_tbl t1 + left join int8_tbl i8 + on i8.q2 = 123, + lateral (select i8.q1, t2.f1 from text_tbl t2 limit 1) as ss1, + lateral (select ss1.* from text_tbl t3 limit 1) as ss2 +where t1.f1 = ss2.f1; + f1 | q1 | q2 | q1 | f1 | q1 | f1 +------+------------------+-----+------------------+------+------------------+------ + doh! | 4567890123456789 | 123 | 4567890123456789 | doh! | 4567890123456789 | doh! +(1 row) + +explain (verbose, costs off) +select 1 from + text_tbl as tt1 + inner join text_tbl as tt2 on (tt1.f1 = 'foo') + left join text_tbl as tt3 on (tt3.f1 = 'foo') + left join text_tbl as tt4 on (tt3.f1 = tt4.f1), + lateral (select tt4.f1 as c0 from text_tbl as tt5 limit 1) as ss1 +where tt1.f1 = ss1.c0; + QUERY PLAN +---------------------------------------------------------- + Nested Loop + Output: 1 + -> Nested Loop Left Join + Output: tt1.f1, tt4.f1 + -> Nested Loop + Output: tt1.f1 + -> Seq Scan on public.text_tbl tt1 + Output: tt1.f1 + Filter: (tt1.f1 = 'foo'::text) + -> Seq Scan on public.text_tbl tt2 + Output: tt2.f1 + -> Materialize + Output: tt4.f1 + -> Nested Loop Left Join + Output: tt4.f1 + -> Seq Scan on public.text_tbl tt3 + Output: tt3.f1 + Filter: (tt3.f1 = 'foo'::text) + -> Seq Scan on public.text_tbl tt4 + Output: tt4.f1 + Filter: (tt4.f1 = 'foo'::text) + -> Memoize + Output: ss1.c0 + Cache Key: tt4.f1 + Cache Mode: binary + -> Subquery Scan on ss1 + Output: ss1.c0 + Filter: (ss1.c0 = 'foo'::text) + -> Limit + Output: (tt4.f1) + -> Seq Scan on public.text_tbl tt5 + Output: tt4.f1 +(32 rows) + +select 1 from + text_tbl as tt1 + inner join text_tbl as tt2 on (tt1.f1 = 'foo') + left join text_tbl as tt3 on (tt3.f1 = 'foo') + left join text_tbl as tt4 on (tt3.f1 = tt4.f1), + lateral (select tt4.f1 as c0 from text_tbl as tt5 limit 1) as ss1 +where tt1.f1 = ss1.c0; + ?column? +---------- +(0 rows) + +explain (verbose, costs off) +select 1 from + int4_tbl as i4 + inner join + ((select 42 as n from int4_tbl x1 left join int8_tbl x2 on f1 = q1) as ss1 + right join (select 1 as z) as ss2 on true) + on false, + lateral (select i4.f1, ss1.n from int8_tbl as i8 limit 1) as ss3; + QUERY PLAN +-------------------------- + Result + Output: 1 + One-Time Filter: false +(3 rows) + +select 1 from + int4_tbl as i4 + inner join + ((select 42 as n from int4_tbl x1 left join int8_tbl x2 on f1 = q1) as ss1 + right join (select 1 as z) as ss2 on true) + on false, + lateral (select i4.f1, ss1.n from int8_tbl as i8 limit 1) as ss3; + ?column? +---------- +(0 rows) + +-- +-- check a case where we formerly generated invalid parameterized paths +-- +begin; +create temp table t (a int unique); +explain (costs off) +select 1 from t t1 + join lateral (select t1.a from (select 1) foo offset 0) as s1 on true + join + (select 1 from t t2 + inner join (t t3 + left join (t t4 left join t t5 on t4.a = 1) + on t3.a = t4.a) + on false + where t3.a = coalesce(t5.a,1)) as s2 + on true; + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +rollback; +-- +-- check a case in which a PlaceHolderVar forces join order +-- +explain (verbose, costs off) +select ss2.* from + int4_tbl i41 + left join int8_tbl i8 + join (select i42.f1 as c1, i43.f1 as c2, 42 as c3 + from int4_tbl i42, int4_tbl i43) ss1 + on i8.q1 = ss1.c2 + on i41.f1 = ss1.c1, + lateral (select i41.*, i8.*, ss1.* from text_tbl limit 1) ss2 +where ss1.c2 = 0; + QUERY PLAN +------------------------------------------------------------------------ + Nested Loop + Output: (i41.f1), (i8.q1), (i8.q2), (i42.f1), (i43.f1), ((42)) + -> Hash Join + Output: i41.f1, i42.f1, i8.q1, i8.q2, i43.f1, 42 + Hash Cond: (i41.f1 = i42.f1) + -> Nested Loop + Output: i8.q1, i8.q2, i43.f1, i41.f1 + -> Nested Loop + Output: i8.q1, i8.q2, i43.f1 + -> Seq Scan on public.int8_tbl i8 + Output: i8.q1, i8.q2 + Filter: (i8.q1 = 0) + -> Seq Scan on public.int4_tbl i43 + Output: i43.f1 + Filter: (i43.f1 = 0) + -> Seq Scan on public.int4_tbl i41 + Output: i41.f1 + -> Hash + Output: i42.f1 + -> Seq Scan on public.int4_tbl i42 + Output: i42.f1 + -> Limit + Output: (i41.f1), (i8.q1), (i8.q2), (i42.f1), (i43.f1), ((42)) + -> Seq Scan on public.text_tbl + Output: i41.f1, i8.q1, i8.q2, i42.f1, i43.f1, (42) +(25 rows) + +select ss2.* from + int4_tbl i41 + left join int8_tbl i8 + join (select i42.f1 as c1, i43.f1 as c2, 42 as c3 + from int4_tbl i42, int4_tbl i43) ss1 + on i8.q1 = ss1.c2 + on i41.f1 = ss1.c1, + lateral (select i41.*, i8.*, ss1.* from text_tbl limit 1) ss2 +where ss1.c2 = 0; + f1 | q1 | q2 | c1 | c2 | c3 +----+----+----+----+----+---- +(0 rows) + +-- +-- test successful handling of full join underneath left join (bug #14105) +-- +explain (costs off) +select * from + (select 1 as id) as xx + left join + (tenk1 as a1 full join (select 1 as id) as yy on (a1.unique1 = yy.id)) + on (xx.id = coalesce(yy.id)); + QUERY PLAN +--------------------------------------- + Nested Loop Left Join + -> Result + -> Hash Full Join + Hash Cond: (a1.unique1 = (1)) + Filter: (1 = COALESCE((1))) + -> Seq Scan on tenk1 a1 + -> Hash + -> Result +(8 rows) + +select * from + (select 1 as id) as xx + left join + (tenk1 as a1 full join (select 1 as id) as yy on (a1.unique1 = yy.id)) + on (xx.id = coalesce(yy.id)); + id | unique1 | unique2 | two | four | ten | twenty | hundred | thousand | twothousand | fivethous | tenthous | odd | even | stringu1 | stringu2 | string4 | id +----+---------+---------+-----+------+-----+--------+---------+----------+-------------+-----------+----------+-----+------+----------+----------+---------+---- + 1 | 1 | 2838 | 1 | 1 | 1 | 1 | 1 | 1 | 1 | 1 | 1 | 2 | 3 | BAAAAA | EFEAAA | OOOOxx | 1 +(1 row) + +-- +-- test ability to push constants through outer join clauses +-- +explain (costs off) + select * from int4_tbl a left join tenk1 b on f1 = unique2 where f1 = 0; + QUERY PLAN +------------------------------------------------- + Nested Loop Left Join + -> Seq Scan on int4_tbl a + Filter: (f1 = 0) + -> Index Scan using tenk1_unique2 on tenk1 b + Index Cond: (unique2 = 0) +(5 rows) + +explain (costs off) + select * from tenk1 a full join tenk1 b using(unique2) where unique2 = 42; + QUERY PLAN +------------------------------------------------- + Merge Full Join + -> Index Scan using tenk1_unique2 on tenk1 a + Index Cond: (unique2 = 42) + -> Index Scan using tenk1_unique2 on tenk1 b + Index Cond: (unique2 = 42) +(5 rows) + +-- +-- test that quals attached to an outer join have correct semantics, +-- specifically that they don't re-use expressions computed below the join; +-- we force a mergejoin so that coalesce(b.q1, 1) appears as a join input +-- +set enable_hashjoin to off; +set enable_nestloop to off; +explain (verbose, costs off) + select a.q2, b.q1 + from int8_tbl a left join int8_tbl b on a.q2 = coalesce(b.q1, 1) + where coalesce(b.q1, 1) > 0; + QUERY PLAN +--------------------------------------------------------- + Merge Left Join + Output: a.q2, b.q1 + Merge Cond: (a.q2 = (COALESCE(b.q1, '1'::bigint))) + Filter: (COALESCE(b.q1, '1'::bigint) > 0) + -> Sort + Output: a.q2 + Sort Key: a.q2 + -> Seq Scan on public.int8_tbl a + Output: a.q2 + -> Sort + Output: b.q1, (COALESCE(b.q1, '1'::bigint)) + Sort Key: (COALESCE(b.q1, '1'::bigint)) + -> Seq Scan on public.int8_tbl b + Output: b.q1, COALESCE(b.q1, '1'::bigint) +(14 rows) + +select a.q2, b.q1 + from int8_tbl a left join int8_tbl b on a.q2 = coalesce(b.q1, 1) + where coalesce(b.q1, 1) > 0; + q2 | q1 +-------------------+------------------ + -4567890123456789 | + 123 | 123 + 123 | 123 + 456 | + 4567890123456789 | 4567890123456789 + 4567890123456789 | 4567890123456789 + 4567890123456789 | 4567890123456789 + 4567890123456789 | 4567890123456789 + 4567890123456789 | 4567890123456789 + 4567890123456789 | 4567890123456789 +(10 rows) + +reset enable_hashjoin; +reset enable_nestloop; +-- +-- test join strength reduction with a SubPlan providing the proof +-- +explain (costs off) +select a.unique1, b.unique2 + from onek a left join onek b on a.unique1 = b.unique2 + where b.unique2 = any (select q1 from int8_tbl c where c.q1 < b.unique1); + QUERY PLAN +---------------------------------------------------------- + Hash Join + Hash Cond: (b.unique2 = a.unique1) + -> Seq Scan on onek b + Filter: (SubPlan 1) + SubPlan 1 + -> Seq Scan on int8_tbl c + Filter: (q1 < b.unique1) + -> Hash + -> Index Only Scan using onek_unique1 on onek a +(9 rows) + +select a.unique1, b.unique2 + from onek a left join onek b on a.unique1 = b.unique2 + where b.unique2 = any (select q1 from int8_tbl c where c.q1 < b.unique1); + unique1 | unique2 +---------+--------- + 123 | 123 +(1 row) + +-- +-- test full-join strength reduction +-- +explain (costs off) +select a.unique1, b.unique2 + from onek a full join onek b on a.unique1 = b.unique2 + where a.unique1 = 42; + QUERY PLAN +---------------------------------------------------- + Nested Loop Left Join + -> Index Only Scan using onek_unique1 on onek a + Index Cond: (unique1 = 42) + -> Index Only Scan using onek_unique2 on onek b + Index Cond: (unique2 = 42) +(5 rows) + +select a.unique1, b.unique2 + from onek a full join onek b on a.unique1 = b.unique2 + where a.unique1 = 42; + unique1 | unique2 +---------+--------- + 42 | 42 +(1 row) + +explain (costs off) +select a.unique1, b.unique2 + from onek a full join onek b on a.unique1 = b.unique2 + where b.unique2 = 43; + QUERY PLAN +---------------------------------------------------- + Nested Loop Left Join + -> Index Only Scan using onek_unique2 on onek b + Index Cond: (unique2 = 43) + -> Index Only Scan using onek_unique1 on onek a + Index Cond: (unique1 = 43) +(5 rows) + +select a.unique1, b.unique2 + from onek a full join onek b on a.unique1 = b.unique2 + where b.unique2 = 43; + unique1 | unique2 +---------+--------- + 43 | 43 +(1 row) + +explain (costs off) +select a.unique1, b.unique2 + from onek a full join onek b on a.unique1 = b.unique2 + where a.unique1 = 42 and b.unique2 = 42; + QUERY PLAN +---------------------------------------------------- + Nested Loop + -> Index Only Scan using onek_unique1 on onek a + Index Cond: (unique1 = 42) + -> Index Only Scan using onek_unique2 on onek b + Index Cond: (unique2 = 42) +(5 rows) + +select a.unique1, b.unique2 + from onek a full join onek b on a.unique1 = b.unique2 + where a.unique1 = 42 and b.unique2 = 42; + unique1 | unique2 +---------+--------- + 42 | 42 +(1 row) + +-- +-- test result-RTE removal underneath a full join +-- +explain (costs off) +select * from + (select * from int8_tbl i81 join (values(123,2)) v(v1,v2) on q2=v1) ss1 +full join + (select * from (values(456,2)) w(v1,v2) join int8_tbl i82 on q2=v1) ss2 +on true; + QUERY PLAN +-------------------------------------- + Merge Full Join + -> Seq Scan on int8_tbl i81 + Filter: (q2 = 123) + -> Materialize + -> Seq Scan on int8_tbl i82 + Filter: (q2 = 456) +(6 rows) + +select * from + (select * from int8_tbl i81 join (values(123,2)) v(v1,v2) on q2=v1) ss1 +full join + (select * from (values(456,2)) w(v1,v2) join int8_tbl i82 on q2=v1) ss2 +on true; + q1 | q2 | v1 | v2 | v1 | v2 | q1 | q2 +------------------+-----+-----+----+-----+----+-----+----- + 4567890123456789 | 123 | 123 | 2 | 456 | 2 | 123 | 456 +(1 row) + +-- +-- test join removal +-- +begin; +CREATE TEMP TABLE a (id int PRIMARY KEY, b_id int); +CREATE TEMP TABLE b (id int PRIMARY KEY, c_id int); +CREATE TEMP TABLE c (id int PRIMARY KEY); +CREATE TEMP TABLE d (a int, b int); +INSERT INTO a VALUES (0, 0), (1, NULL); +INSERT INTO b VALUES (0, 0), (1, NULL); +INSERT INTO c VALUES (0), (1); +INSERT INTO d VALUES (1,3), (2,2), (3,1); +-- all three cases should be optimizable into a simple seqscan +explain (costs off) SELECT a.* FROM a LEFT JOIN b ON a.b_id = b.id; + QUERY PLAN +--------------- + Seq Scan on a +(1 row) + +explain (costs off) SELECT b.* FROM b LEFT JOIN c ON b.c_id = c.id; + QUERY PLAN +--------------- + Seq Scan on b +(1 row) + +explain (costs off) + SELECT a.* FROM a LEFT JOIN (b left join c on b.c_id = c.id) + ON (a.b_id = b.id); + QUERY PLAN +--------------- + Seq Scan on a +(1 row) + +-- check optimization of outer join within another special join +explain (costs off) +select id from a where id in ( + select b.id from b left join c on b.id = c.id +); + QUERY PLAN +---------------------------- + Hash Join + Hash Cond: (a.id = b.id) + -> Seq Scan on a + -> Hash + -> Seq Scan on b +(5 rows) + +-- check optimization with oddly-nested outer joins +explain (costs off) +select a1.id from + (a a1 left join a a2 on true) + left join + (a a3 left join a a4 on a3.id = a4.id) + on a2.id = a3.id; + QUERY PLAN +------------------------------ + Nested Loop Left Join + -> Seq Scan on a a1 + -> Materialize + -> Seq Scan on a a2 +(4 rows) + +explain (costs off) +select a1.id from + (a a1 left join a a2 on a1.id = a2.id) + left join + (a a3 left join a a4 on a3.id = a4.id) + on a2.id = a3.id; + QUERY PLAN +------------------ + Seq Scan on a a1 +(1 row) + +explain (costs off) +select 1 from a t1 + left join a t2 on true + inner join a t3 on true + left join a t4 on t2.id = t4.id and t2.id = t3.id; + QUERY PLAN +------------------------------------ + Nested Loop + -> Nested Loop Left Join + -> Seq Scan on a t1 + -> Materialize + -> Seq Scan on a t2 + -> Materialize + -> Seq Scan on a t3 +(7 rows) + +-- another example (bug #17781) +explain (costs off) +select ss1.f1 +from int4_tbl as t1 + left join (int4_tbl as t2 + right join int4_tbl as t3 on null + left join (int4_tbl as t4 + right join int8_tbl as t5 on null) + on t2.f1 = t4.f1 + left join ((select null as f1 from int4_tbl as t6) as ss1 + inner join int8_tbl as t7 on null) + on t5.q1 = t7.q2) + on false; + QUERY PLAN +-------------------------------- + Nested Loop Left Join + Join Filter: false + -> Seq Scan on int4_tbl t1 + -> Result + One-Time Filter: false +(5 rows) + +-- variant with Var rather than PHV coming from t6 +explain (costs off) +select ss1.f1 +from int4_tbl as t1 + left join (int4_tbl as t2 + right join int4_tbl as t3 on null + left join (int4_tbl as t4 + right join int8_tbl as t5 on null) + on t2.f1 = t4.f1 + left join ((select f1 from int4_tbl as t6) as ss1 + inner join int8_tbl as t7 on null) + on t5.q1 = t7.q2) + on false; + QUERY PLAN +-------------------------------- + Nested Loop Left Join + Join Filter: false + -> Seq Scan on int4_tbl t1 + -> Result + One-Time Filter: false +(5 rows) + +-- per further discussion of bug #17781 +explain (costs off) +select ss1.x +from (select f1/2 as x from int4_tbl i4 left join a on a.id = i4.f1) ss1 + right join int8_tbl i8 on true +where current_user is not null; -- this is to add a Result node + QUERY PLAN +----------------------------------------------- + Result + One-Time Filter: (CURRENT_USER IS NOT NULL) + -> Nested Loop Left Join + -> Seq Scan on int8_tbl i8 + -> Materialize + -> Seq Scan on int4_tbl i4 +(6 rows) + +-- and further discussion of bug #17781 +explain (costs off) +select * +from int8_tbl t1 + left join (int8_tbl t2 left join onek t3 on t2.q1 > t3.unique1) + on t1.q2 = t2.q2 + left join onek t4 + on t2.q2 < t3.unique2; + QUERY PLAN +------------------------------------------------- + Nested Loop Left Join + Join Filter: (t2.q2 < t3.unique2) + -> Nested Loop Left Join + Join Filter: (t2.q1 > t3.unique1) + -> Hash Left Join + Hash Cond: (t1.q2 = t2.q2) + -> Seq Scan on int8_tbl t1 + -> Hash + -> Seq Scan on int8_tbl t2 + -> Materialize + -> Seq Scan on onek t3 + -> Materialize + -> Seq Scan on onek t4 +(13 rows) + +-- More tests of correct placement of pseudoconstant quals +-- simple constant-false condition +explain (costs off) +select * from int8_tbl t1 left join + (int8_tbl t2 inner join int8_tbl t3 on false + left join int8_tbl t4 on t2.q2 = t4.q2) +on t1.q1 = t2.q1; + QUERY PLAN +-------------------------------------- + Hash Left Join + Hash Cond: (t1.q1 = q1) + -> Seq Scan on int8_tbl t1 + -> Hash + -> Result + One-Time Filter: false +(6 rows) + +-- deduce constant-false from an EquivalenceClass +explain (costs off) +select * from int8_tbl t1 left join + (int8_tbl t2 inner join int8_tbl t3 on (t2.q1-t3.q2) = 0 and (t2.q1-t3.q2) = 1 + left join int8_tbl t4 on t2.q2 = t4.q2) +on t1.q1 = t2.q1; + QUERY PLAN +-------------------------------------- + Hash Left Join + Hash Cond: (t1.q1 = q1) + -> Seq Scan on int8_tbl t1 + -> Hash + -> Result + One-Time Filter: false +(6 rows) + +-- pseudoconstant based on an outer-level Param +explain (costs off) +select exists( + select * from int8_tbl t1 left join + (int8_tbl t2 inner join int8_tbl t3 on x0.f1 = 1 + left join int8_tbl t4 on t2.q2 = t4.q2) + on t1.q1 = t2.q1 +) from int4_tbl x0; + QUERY PLAN +--------------------------------------------------------------------- + Seq Scan on int4_tbl x0 + SubPlan 1 + -> Nested Loop Left Join + Join Filter: (t2.q2 = t4.q2) + -> Nested Loop Left Join + Join Filter: (t1.q1 = t2.q1) + -> Seq Scan on int8_tbl t1 + -> Materialize + -> Result + One-Time Filter: (x0.f1 = 1) + -> Nested Loop + -> Seq Scan on int8_tbl t2 + -> Materialize + -> Seq Scan on int8_tbl t3 + -> Materialize + -> Seq Scan on int8_tbl t4 +(16 rows) + +-- check that join removal works for a left join when joining a subquery +-- that is guaranteed to be unique by its GROUP BY clause +explain (costs off) +select d.* from d left join (select * from b group by b.id, b.c_id) s + on d.a = s.id and d.b = s.c_id; + QUERY PLAN +--------------- + Seq Scan on d +(1 row) + +-- similarly, but keying off a DISTINCT clause +explain (costs off) +select d.* from d left join (select distinct * from b) s + on d.a = s.id and d.b = s.c_id; + QUERY PLAN +--------------- + Seq Scan on d +(1 row) + +-- join removal is not possible when the GROUP BY contains a column that is +-- not in the join condition. (Note: as of 9.6, we notice that b.id is a +-- primary key and so drop b.c_id from the GROUP BY of the resulting plan; +-- but this happens too late for join removal in the outer plan level.) +explain (costs off) +select d.* from d left join (select * from b group by b.id, b.c_id) s + on d.a = s.id; + QUERY PLAN +------------------------------------------ + Merge Right Join + Merge Cond: (b.id = d.a) + -> Group + Group Key: b.id + -> Index Scan using b_pkey on b + -> Sort + Sort Key: d.a + -> Seq Scan on d +(8 rows) + +-- similarly, but keying off a DISTINCT clause +explain (costs off) +select d.* from d left join (select distinct * from b) s + on d.a = s.id; + QUERY PLAN +-------------------------------------- + Merge Right Join + Merge Cond: (b.id = d.a) + -> Unique + -> Sort + Sort Key: b.id, b.c_id + -> Seq Scan on b + -> Sort + Sort Key: d.a + -> Seq Scan on d +(9 rows) + +-- join removal is not possible here +explain (costs off) +select 1 from a t1 + left join (a t2 left join a t3 on t2.id = 1) on t2.id = 1; + QUERY PLAN +-------------------------------------------------------- + Nested Loop Left Join + -> Seq Scan on a t1 + -> Materialize + -> Nested Loop Left Join + Join Filter: (t2.id = 1) + -> Index Only Scan using a_pkey on a t2 + Index Cond: (id = 1) + -> Seq Scan on a t3 +(8 rows) + +-- check join removal works when uniqueness of the join condition is enforced +-- by a UNION +explain (costs off) +select d.* from d left join (select id from a union select id from b) s + on d.a = s.id; + QUERY PLAN +--------------- + Seq Scan on d +(1 row) + +-- check join removal with a cross-type comparison operator +explain (costs off) +select i8.* from int8_tbl i8 left join (select f1 from int4_tbl group by f1) i4 + on i8.q1 = i4.f1; + QUERY PLAN +------------------------- + Seq Scan on int8_tbl i8 +(1 row) + +-- check join removal with lateral references +explain (costs off) +select 1 from (select a.id FROM a left join b on a.b_id = b.id) q, + lateral generate_series(1, q.id) gs(i) where q.id = gs.i; + QUERY PLAN +------------------------------------------- + Nested Loop + -> Seq Scan on a + -> Function Scan on generate_series gs + Filter: (a.id = i) +(4 rows) + +-- check join removal within RHS of an outer join +explain (costs off) +select c.id, ss.a from c + left join (select d.a from onerow, d left join b on d.a = b.id) ss + on c.id = ss.a; + QUERY PLAN +-------------------------------- + Hash Right Join + Hash Cond: (d.a = c.id) + -> Nested Loop + -> Seq Scan on onerow + -> Seq Scan on d + -> Hash + -> Seq Scan on c +(7 rows) + +CREATE TEMP TABLE parted_b (id int PRIMARY KEY) partition by range(id); +CREATE TEMP TABLE parted_b1 partition of parted_b for values from (0) to (10); +-- test join removals on a partitioned table +explain (costs off) +select a.* from a left join parted_b pb on a.b_id = pb.id; + QUERY PLAN +--------------- + Seq Scan on a +(1 row) + +rollback; +create temp table parent (k int primary key, pd int); +create temp table child (k int unique, cd int); +insert into parent values (1, 10), (2, 20), (3, 30); +insert into child values (1, 100), (4, 400); +-- this case is optimizable +select p.* from parent p left join child c on (p.k = c.k); + k | pd +---+---- + 1 | 10 + 2 | 20 + 3 | 30 +(3 rows) + +explain (costs off) + select p.* from parent p left join child c on (p.k = c.k); + QUERY PLAN +---------------------- + Seq Scan on parent p +(1 row) + +-- this case is not +select p.*, linked from parent p + left join (select c.*, true as linked from child c) as ss + on (p.k = ss.k); + k | pd | linked +---+----+-------- + 1 | 10 | t + 2 | 20 | + 3 | 30 | +(3 rows) + +explain (costs off) + select p.*, linked from parent p + left join (select c.*, true as linked from child c) as ss + on (p.k = ss.k); + QUERY PLAN +--------------------------------- + Hash Left Join + Hash Cond: (p.k = c.k) + -> Seq Scan on parent p + -> Hash + -> Seq Scan on child c +(5 rows) + +-- check for a 9.0rc1 bug: join removal breaks pseudoconstant qual handling +select p.* from + parent p left join child c on (p.k = c.k) + where p.k = 1 and p.k = 2; + k | pd +---+---- +(0 rows) + +explain (costs off) +select p.* from + parent p left join child c on (p.k = c.k) + where p.k = 1 and p.k = 2; + QUERY PLAN +------------------------------------------------ + Result + One-Time Filter: false + -> Index Scan using parent_pkey on parent p + Index Cond: (k = 1) +(4 rows) + +select p.* from + (parent p left join child c on (p.k = c.k)) join parent x on p.k = x.k + where p.k = 1 and p.k = 2; + k | pd +---+---- +(0 rows) + +explain (costs off) +select p.* from + (parent p left join child c on (p.k = c.k)) join parent x on p.k = x.k + where p.k = 1 and p.k = 2; + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +-- bug 5255: this is not optimizable by join removal +begin; +CREATE TEMP TABLE a (id int PRIMARY KEY); +CREATE TEMP TABLE b (id int PRIMARY KEY, a_id int); +INSERT INTO a VALUES (0), (1); +INSERT INTO b VALUES (0, 0), (1, NULL); +SELECT * FROM b LEFT JOIN a ON (b.a_id = a.id) WHERE (a.id IS NULL OR a.id > 0); + id | a_id | id +----+------+---- + 1 | | +(1 row) + +SELECT b.* FROM b LEFT JOIN a ON (b.a_id = a.id) WHERE (a.id IS NULL OR a.id > 0); + id | a_id +----+------ + 1 | +(1 row) + +rollback; +-- another join removal bug: this is not optimizable, either +begin; +create temp table innertab (id int8 primary key, dat1 int8); +insert into innertab values(123, 42); +SELECT * FROM + (SELECT 1 AS x) ss1 + LEFT JOIN + (SELECT q1, q2, COALESCE(dat1, q1) AS y + FROM int8_tbl LEFT JOIN innertab ON q2 = id) ss2 + ON true; + x | q1 | q2 | y +---+------------------+-------------------+------------------ + 1 | 123 | 456 | 123 + 1 | 123 | 4567890123456789 | 123 + 1 | 4567890123456789 | 123 | 42 + 1 | 4567890123456789 | 4567890123456789 | 4567890123456789 + 1 | 4567890123456789 | -4567890123456789 | 4567890123456789 +(5 rows) + +-- join removal bug #17769: can't remove if there's a pushed-down reference +EXPLAIN (COSTS OFF) +SELECT q2 FROM + (SELECT * + FROM int8_tbl LEFT JOIN innertab ON q2 = id) ss + WHERE COALESCE(dat1, 0) = q1; + QUERY PLAN +---------------------------------------------------------------- + Nested Loop Left Join + Filter: (COALESCE(innertab.dat1, '0'::bigint) = int8_tbl.q1) + -> Seq Scan on int8_tbl + -> Index Scan using innertab_pkey on innertab + Index Cond: (id = int8_tbl.q2) +(5 rows) + +-- join removal bug #17773: otherwise-removable PHV appears in a qual condition +EXPLAIN (VERBOSE, COSTS OFF) +SELECT q2 FROM + (SELECT q2, 'constant'::text AS x + FROM int8_tbl LEFT JOIN innertab ON q2 = id) ss + RIGHT JOIN int4_tbl ON NULL + WHERE x >= x; + QUERY PLAN +------------------------------------------------------ + Nested Loop Left Join + Output: q2 + Join Filter: NULL::boolean + Filter: (('constant'::text) >= ('constant'::text)) + -> Seq Scan on public.int4_tbl + Output: int4_tbl.f1 + -> Result + Output: q2, 'constant'::text + One-Time Filter: false +(9 rows) + +-- join removal bug #17786: check that OR conditions are cleaned up +EXPLAIN (COSTS OFF) +SELECT f1, x +FROM int4_tbl + JOIN ((SELECT 42 AS x FROM int8_tbl LEFT JOIN innertab ON q1 = id) AS ss1 + RIGHT JOIN tenk1 ON NULL) + ON tenk1.unique1 = ss1.x OR tenk1.unique2 = ss1.x; + QUERY PLAN +-------------------------------------------------------------------------- + Nested Loop + -> Seq Scan on int4_tbl + -> Materialize + -> Nested Loop Left Join + Join Filter: NULL::boolean + Filter: ((tenk1.unique1 = (42)) OR (tenk1.unique2 = (42))) + -> Seq Scan on tenk1 + -> Result + One-Time Filter: false +(9 rows) + +rollback; +-- another join removal bug: we must clean up correctly when removing a PHV +begin; +create temp table uniquetbl (f1 text unique); +explain (costs off) +select t1.* from + uniquetbl as t1 + left join (select *, '***'::text as d1 from uniquetbl) t2 + on t1.f1 = t2.f1 + left join uniquetbl t3 + on t2.d1 = t3.f1; + QUERY PLAN +-------------------------- + Seq Scan on uniquetbl t1 +(1 row) + +explain (costs off) +select t0.* +from + text_tbl t0 + left join + (select case t1.ten when 0 then 'doh!'::text else null::text end as case1, + t1.stringu2 + from tenk1 t1 + join int4_tbl i4 ON i4.f1 = t1.unique2 + left join uniquetbl u1 ON u1.f1 = t1.string4) ss + on t0.f1 = ss.case1 +where ss.stringu2 !~* ss.case1; + QUERY PLAN +-------------------------------------------------------------------------------------------- + Nested Loop + Join Filter: (t0.f1 = CASE t1.ten WHEN 0 THEN 'doh!'::text ELSE NULL::text END) + -> Nested Loop + -> Seq Scan on int4_tbl i4 + -> Index Scan using tenk1_unique2 on tenk1 t1 + Index Cond: (unique2 = i4.f1) + Filter: (stringu2 !~* CASE ten WHEN 0 THEN 'doh!'::text ELSE NULL::text END) + -> Materialize + -> Seq Scan on text_tbl t0 +(9 rows) + +select t0.* +from + text_tbl t0 + left join + (select case t1.ten when 0 then 'doh!'::text else null::text end as case1, + t1.stringu2 + from tenk1 t1 + join int4_tbl i4 ON i4.f1 = t1.unique2 + left join uniquetbl u1 ON u1.f1 = t1.string4) ss + on t0.f1 = ss.case1 +where ss.stringu2 !~* ss.case1; + f1 +------ + doh! +(1 row) + +rollback; +-- another join removal bug: we must clean up EquivalenceClasses too +begin; +create temp table t (a int unique); +insert into t values (1); +explain (costs off) +select 1 +from t t1 + left join (select 2 as c + from t t2 left join t t3 on t2.a = t3.a) s + on true +where t1.a = s.c; + QUERY PLAN +------------------------------ + Nested Loop Left Join + Filter: (t1.a = (2)) + -> Seq Scan on t t1 + -> Materialize + -> Seq Scan on t t2 +(5 rows) + +select 1 +from t t1 + left join (select 2 as c + from t t2 left join t t3 on t2.a = t3.a) s + on true +where t1.a = s.c; + ?column? +---------- +(0 rows) + +rollback; +-- test cases where we can remove a join, but not a PHV computed at it +begin; +create temp table t (a int unique, b int); +insert into t values (1,1), (2,2); +explain (costs off) +select 1 +from t t1 + left join (select t2.a, 1 as c + from t t2 left join t t3 on t2.a = t3.a) s + on true + left join t t4 on true +where s.a < s.c; + QUERY PLAN +------------------------------------- + Nested Loop Left Join + -> Nested Loop + -> Seq Scan on t t1 + -> Materialize + -> Seq Scan on t t2 + Filter: (a < 1) + -> Materialize + -> Seq Scan on t t4 +(8 rows) + +explain (costs off) +select t1.a, s.* +from t t1 + left join lateral (select t2.a, coalesce(t1.a, 1) as c + from t t2 left join t t3 on t2.a = t3.a) s + on true + left join t t4 on true +where s.a < s.c; + QUERY PLAN +----------------------------------------------- + Nested Loop Left Join + -> Nested Loop + -> Seq Scan on t t1 + -> Seq Scan on t t2 + Filter: (a < COALESCE(t1.a, 1)) + -> Materialize + -> Seq Scan on t t4 +(7 rows) + +select t1.a, s.* +from t t1 + left join lateral (select t2.a, coalesce(t1.a, 1) as c + from t t2 left join t t3 on t2.a = t3.a) s + on true + left join t t4 on true +where s.a < s.c; + a | a | c +---+---+--- + 2 | 1 | 2 + 2 | 1 | 2 +(2 rows) + +rollback; +-- test case to expose miscomputation of required relid set for a PHV +explain (verbose, costs off) +select i8.*, ss.v, t.unique2 + from int8_tbl i8 + left join int4_tbl i4 on i4.f1 = 1 + left join lateral (select i4.f1 + 1 as v) as ss on true + left join tenk1 t on t.unique2 = ss.v +where q2 = 456; + QUERY PLAN +------------------------------------------------------------- + Nested Loop Left Join + Output: i8.q1, i8.q2, ((i4.f1 + 1)), t.unique2 + -> Nested Loop Left Join + Output: i8.q1, i8.q2, (i4.f1 + 1) + -> Seq Scan on public.int8_tbl i8 + Output: i8.q1, i8.q2 + Filter: (i8.q2 = 456) + -> Seq Scan on public.int4_tbl i4 + Output: i4.f1 + Filter: (i4.f1 = 1) + -> Index Only Scan using tenk1_unique2 on public.tenk1 t + Output: t.unique2 + Index Cond: (t.unique2 = ((i4.f1 + 1))) +(13 rows) + +select i8.*, ss.v, t.unique2 + from int8_tbl i8 + left join int4_tbl i4 on i4.f1 = 1 + left join lateral (select i4.f1 + 1 as v) as ss on true + left join tenk1 t on t.unique2 = ss.v +where q2 = 456; + q1 | q2 | v | unique2 +-----+-----+---+--------- + 123 | 456 | | +(1 row) + +-- and check a related issue where we miscompute required relids for +-- a PHV that's been translated to a child rel +create temp table parttbl (a integer primary key) partition by range (a); +create temp table parttbl1 partition of parttbl for values from (1) to (100); +insert into parttbl values (11), (12); +explain (costs off) +select * from + (select *, 12 as phv from parttbl) as ss + right join int4_tbl on true +where ss.a = ss.phv and f1 = 0; + QUERY PLAN +------------------------------------ + Nested Loop + -> Seq Scan on int4_tbl + Filter: (f1 = 0) + -> Seq Scan on parttbl1 parttbl + Filter: (a = 12) +(5 rows) + +select * from + (select *, 12 as phv from parttbl) as ss + right join int4_tbl on true +where ss.a = ss.phv and f1 = 0; + a | phv | f1 +----+-----+---- + 12 | 12 | 0 +(1 row) + +-- bug #8444: we've historically allowed duplicate aliases within aliased JOINs +select * from + int8_tbl x join (int4_tbl x cross join int4_tbl y) j on q1 = f1; -- error +ERROR: column reference "f1" is ambiguous +LINE 2: ..._tbl x join (int4_tbl x cross join int4_tbl y) j on q1 = f1; + ^ +select * from + int8_tbl x join (int4_tbl x cross join int4_tbl y) j on q1 = y.f1; -- error +ERROR: invalid reference to FROM-clause entry for table "y" +LINE 2: ...bl x join (int4_tbl x cross join int4_tbl y) j on q1 = y.f1; + ^ +DETAIL: There is an entry for table "y", but it cannot be referenced from this part of the query. +select * from + int8_tbl x join (int4_tbl x cross join int4_tbl y(ff)) j on q1 = f1; -- ok + q1 | q2 | f1 | ff +----+----+----+---- +(0 rows) + +-- +-- Test hints given on incorrect column references are useful +-- +select t1.uunique1 from + tenk1 t1 join tenk2 t2 on t1.two = t2.two; -- error, prefer "t1" suggestion +ERROR: column t1.uunique1 does not exist +LINE 1: select t1.uunique1 from + ^ +HINT: Perhaps you meant to reference the column "t1.unique1". +select t2.uunique1 from + tenk1 t1 join tenk2 t2 on t1.two = t2.two; -- error, prefer "t2" suggestion +ERROR: column t2.uunique1 does not exist +LINE 1: select t2.uunique1 from + ^ +HINT: Perhaps you meant to reference the column "t2.unique1". +select uunique1 from + tenk1 t1 join tenk2 t2 on t1.two = t2.two; -- error, suggest both at once +ERROR: column "uunique1" does not exist +LINE 1: select uunique1 from + ^ +HINT: Perhaps you meant to reference the column "t1.unique1" or the column "t2.unique1". +select ctid from + tenk1 t1 join tenk2 t2 on t1.two = t2.two; -- error, need qualification +ERROR: column "ctid" does not exist +LINE 1: select ctid from + ^ +DETAIL: There are columns named "ctid", but they are in tables that cannot be referenced from this part of the query. +HINT: Try using a table-qualified name. +-- +-- Take care to reference the correct RTE +-- +select atts.relid::regclass, s.* from pg_stats s join + pg_attribute a on s.attname = a.attname and s.tablename = + a.attrelid::regclass::text join (select unnest(indkey) attnum, + indexrelid from pg_index i) atts on atts.attnum = a.attnum where + schemaname != 'pg_catalog'; +ERROR: column atts.relid does not exist +LINE 1: select atts.relid::regclass, s.* from pg_stats s join + ^ +-- Test bug in rangetable flattening +explain (verbose, costs off) +select 1 from + (select * from int8_tbl where q1 <> (select 42) offset 0) ss +where false; + QUERY PLAN +-------------------------- + Result + Output: 1 + One-Time Filter: false +(3 rows) + +-- +-- Test LATERAL +-- +select unique2, x.* +from tenk1 a, lateral (select * from int4_tbl b where f1 = a.unique1) x; + unique2 | f1 +---------+---- + 9998 | 0 +(1 row) + +explain (costs off) + select unique2, x.* + from tenk1 a, lateral (select * from int4_tbl b where f1 = a.unique1) x; + QUERY PLAN +------------------------------------------------- + Nested Loop + -> Seq Scan on int4_tbl b + -> Index Scan using tenk1_unique1 on tenk1 a + Index Cond: (unique1 = b.f1) +(4 rows) + +select unique2, x.* +from int4_tbl x, lateral (select unique2 from tenk1 where f1 = unique1) ss; + unique2 | f1 +---------+---- + 9998 | 0 +(1 row) + +explain (costs off) + select unique2, x.* + from int4_tbl x, lateral (select unique2 from tenk1 where f1 = unique1) ss; + QUERY PLAN +----------------------------------------------- + Nested Loop + -> Seq Scan on int4_tbl x + -> Index Scan using tenk1_unique1 on tenk1 + Index Cond: (unique1 = x.f1) +(4 rows) + +explain (costs off) + select unique2, x.* + from int4_tbl x cross join lateral (select unique2 from tenk1 where f1 = unique1) ss; + QUERY PLAN +----------------------------------------------- + Nested Loop + -> Seq Scan on int4_tbl x + -> Index Scan using tenk1_unique1 on tenk1 + Index Cond: (unique1 = x.f1) +(4 rows) + +select unique2, x.* +from int4_tbl x left join lateral (select unique1, unique2 from tenk1 where f1 = unique1) ss on true; + unique2 | f1 +---------+------------- + 9998 | 0 + | 123456 + | -123456 + | 2147483647 + | -2147483647 +(5 rows) + +explain (costs off) + select unique2, x.* + from int4_tbl x left join lateral (select unique1, unique2 from tenk1 where f1 = unique1) ss on true; + QUERY PLAN +----------------------------------------------- + Nested Loop Left Join + -> Seq Scan on int4_tbl x + -> Index Scan using tenk1_unique1 on tenk1 + Index Cond: (unique1 = x.f1) +(4 rows) + +-- check scoping of lateral versus parent references +-- the first of these should return int8_tbl.q2, the second int8_tbl.q1 +select *, (select r from (select q1 as q2) x, (select q2 as r) y) from int8_tbl; + q1 | q2 | r +------------------+-------------------+------------------- + 123 | 456 | 456 + 123 | 4567890123456789 | 4567890123456789 + 4567890123456789 | 123 | 123 + 4567890123456789 | 4567890123456789 | 4567890123456789 + 4567890123456789 | -4567890123456789 | -4567890123456789 +(5 rows) + +select *, (select r from (select q1 as q2) x, lateral (select q2 as r) y) from int8_tbl; + q1 | q2 | r +------------------+-------------------+------------------ + 123 | 456 | 123 + 123 | 4567890123456789 | 123 + 4567890123456789 | 123 | 4567890123456789 + 4567890123456789 | 4567890123456789 | 4567890123456789 + 4567890123456789 | -4567890123456789 | 4567890123456789 +(5 rows) + +-- lateral with function in FROM +select count(*) from tenk1 a, lateral generate_series(1,two) g; + count +------- + 5000 +(1 row) + +explain (costs off) + select count(*) from tenk1 a, lateral generate_series(1,two) g; + QUERY PLAN +------------------------------------------------------ + Aggregate + -> Nested Loop + -> Seq Scan on tenk1 a + -> Memoize + Cache Key: a.two + Cache Mode: binary + -> Function Scan on generate_series g +(7 rows) + +explain (costs off) + select count(*) from tenk1 a cross join lateral generate_series(1,two) g; + QUERY PLAN +------------------------------------------------------ + Aggregate + -> Nested Loop + -> Seq Scan on tenk1 a + -> Memoize + Cache Key: a.two + Cache Mode: binary + -> Function Scan on generate_series g +(7 rows) + +-- don't need the explicit LATERAL keyword for functions +explain (costs off) + select count(*) from tenk1 a, generate_series(1,two) g; + QUERY PLAN +------------------------------------------------------ + Aggregate + -> Nested Loop + -> Seq Scan on tenk1 a + -> Memoize + Cache Key: a.two + Cache Mode: binary + -> Function Scan on generate_series g +(7 rows) + +-- lateral with UNION ALL subselect +explain (costs off) + select * from generate_series(100,200) g, + lateral (select * from int8_tbl a where g = q1 union all + select * from int8_tbl b where g = q2) ss; + QUERY PLAN +------------------------------------------ + Nested Loop + -> Function Scan on generate_series g + -> Append + -> Seq Scan on int8_tbl a + Filter: (g.g = q1) + -> Seq Scan on int8_tbl b + Filter: (g.g = q2) +(7 rows) + +select * from generate_series(100,200) g, + lateral (select * from int8_tbl a where g = q1 union all + select * from int8_tbl b where g = q2) ss; + g | q1 | q2 +-----+------------------+------------------ + 123 | 123 | 456 + 123 | 123 | 4567890123456789 + 123 | 4567890123456789 | 123 +(3 rows) + +-- lateral with VALUES +explain (costs off) + select count(*) from tenk1 a, + tenk1 b join lateral (values(a.unique1)) ss(x) on b.unique2 = ss.x; + QUERY PLAN +------------------------------------------------------------ + Aggregate + -> Merge Join + Merge Cond: (a.unique1 = b.unique2) + -> Index Only Scan using tenk1_unique1 on tenk1 a + -> Index Only Scan using tenk1_unique2 on tenk1 b +(5 rows) + +select count(*) from tenk1 a, + tenk1 b join lateral (values(a.unique1)) ss(x) on b.unique2 = ss.x; + count +------- + 10000 +(1 row) + +-- lateral with VALUES, no flattening possible +explain (costs off) + select count(*) from tenk1 a, + tenk1 b join lateral (values(a.unique1),(-1)) ss(x) on b.unique2 = ss.x; + QUERY PLAN +------------------------------------------------------------------ + Aggregate + -> Nested Loop + -> Nested Loop + -> Index Only Scan using tenk1_unique1 on tenk1 a + -> Values Scan on "*VALUES*" + -> Memoize + Cache Key: "*VALUES*".column1 + Cache Mode: logical + -> Index Only Scan using tenk1_unique2 on tenk1 b + Index Cond: (unique2 = "*VALUES*".column1) +(10 rows) + +select count(*) from tenk1 a, + tenk1 b join lateral (values(a.unique1),(-1)) ss(x) on b.unique2 = ss.x; + count +------- + 10000 +(1 row) + +-- lateral injecting a strange outer join condition +explain (costs off) + select * from int8_tbl a, + int8_tbl x left join lateral (select a.q1 from int4_tbl y) ss(z) + on x.q2 = ss.z + order by a.q1, a.q2, x.q1, x.q2, ss.z; + QUERY PLAN +------------------------------------------------ + Sort + Sort Key: a.q1, a.q2, x.q1, x.q2, (a.q1) + -> Nested Loop + -> Seq Scan on int8_tbl a + -> Hash Left Join + Hash Cond: (x.q2 = (a.q1)) + -> Seq Scan on int8_tbl x + -> Hash + -> Seq Scan on int4_tbl y +(9 rows) + +select * from int8_tbl a, + int8_tbl x left join lateral (select a.q1 from int4_tbl y) ss(z) + on x.q2 = ss.z + order by a.q1, a.q2, x.q1, x.q2, ss.z; + q1 | q2 | q1 | q2 | z +------------------+-------------------+------------------+-------------------+------------------ + 123 | 456 | 123 | 456 | + 123 | 456 | 123 | 4567890123456789 | + 123 | 456 | 4567890123456789 | -4567890123456789 | + 123 | 456 | 4567890123456789 | 123 | 123 + 123 | 456 | 4567890123456789 | 123 | 123 + 123 | 456 | 4567890123456789 | 123 | 123 + 123 | 456 | 4567890123456789 | 123 | 123 + 123 | 456 | 4567890123456789 | 123 | 123 + 123 | 456 | 4567890123456789 | 4567890123456789 | + 123 | 4567890123456789 | 123 | 456 | + 123 | 4567890123456789 | 123 | 4567890123456789 | + 123 | 4567890123456789 | 4567890123456789 | -4567890123456789 | + 123 | 4567890123456789 | 4567890123456789 | 123 | 123 + 123 | 4567890123456789 | 4567890123456789 | 123 | 123 + 123 | 4567890123456789 | 4567890123456789 | 123 | 123 + 123 | 4567890123456789 | 4567890123456789 | 123 | 123 + 123 | 4567890123456789 | 4567890123456789 | 123 | 123 + 123 | 4567890123456789 | 4567890123456789 | 4567890123456789 | + 4567890123456789 | -4567890123456789 | 123 | 456 | + 4567890123456789 | -4567890123456789 | 123 | 4567890123456789 | 4567890123456789 + 4567890123456789 | -4567890123456789 | 123 | 4567890123456789 | 4567890123456789 + 4567890123456789 | -4567890123456789 | 123 | 4567890123456789 | 4567890123456789 + 4567890123456789 | -4567890123456789 | 123 | 4567890123456789 | 4567890123456789 + 4567890123456789 | -4567890123456789 | 123 | 4567890123456789 | 4567890123456789 + 4567890123456789 | -4567890123456789 | 4567890123456789 | -4567890123456789 | + 4567890123456789 | -4567890123456789 | 4567890123456789 | 123 | + 4567890123456789 | -4567890123456789 | 4567890123456789 | 4567890123456789 | 4567890123456789 + 4567890123456789 | -4567890123456789 | 4567890123456789 | 4567890123456789 | 4567890123456789 + 4567890123456789 | -4567890123456789 | 4567890123456789 | 4567890123456789 | 4567890123456789 + 4567890123456789 | -4567890123456789 | 4567890123456789 | 4567890123456789 | 4567890123456789 + 4567890123456789 | -4567890123456789 | 4567890123456789 | 4567890123456789 | 4567890123456789 + 4567890123456789 | 123 | 123 | 456 | + 4567890123456789 | 123 | 123 | 4567890123456789 | 4567890123456789 + 4567890123456789 | 123 | 123 | 4567890123456789 | 4567890123456789 + 4567890123456789 | 123 | 123 | 4567890123456789 | 4567890123456789 + 4567890123456789 | 123 | 123 | 4567890123456789 | 4567890123456789 + 4567890123456789 | 123 | 123 | 4567890123456789 | 4567890123456789 + 4567890123456789 | 123 | 4567890123456789 | -4567890123456789 | + 4567890123456789 | 123 | 4567890123456789 | 123 | + 4567890123456789 | 123 | 4567890123456789 | 4567890123456789 | 4567890123456789 + 4567890123456789 | 123 | 4567890123456789 | 4567890123456789 | 4567890123456789 + 4567890123456789 | 123 | 4567890123456789 | 4567890123456789 | 4567890123456789 + 4567890123456789 | 123 | 4567890123456789 | 4567890123456789 | 4567890123456789 + 4567890123456789 | 123 | 4567890123456789 | 4567890123456789 | 4567890123456789 + 4567890123456789 | 4567890123456789 | 123 | 456 | + 4567890123456789 | 4567890123456789 | 123 | 4567890123456789 | 4567890123456789 + 4567890123456789 | 4567890123456789 | 123 | 4567890123456789 | 4567890123456789 + 4567890123456789 | 4567890123456789 | 123 | 4567890123456789 | 4567890123456789 + 4567890123456789 | 4567890123456789 | 123 | 4567890123456789 | 4567890123456789 + 4567890123456789 | 4567890123456789 | 123 | 4567890123456789 | 4567890123456789 + 4567890123456789 | 4567890123456789 | 4567890123456789 | -4567890123456789 | + 4567890123456789 | 4567890123456789 | 4567890123456789 | 123 | + 4567890123456789 | 4567890123456789 | 4567890123456789 | 4567890123456789 | 4567890123456789 + 4567890123456789 | 4567890123456789 | 4567890123456789 | 4567890123456789 | 4567890123456789 + 4567890123456789 | 4567890123456789 | 4567890123456789 | 4567890123456789 | 4567890123456789 + 4567890123456789 | 4567890123456789 | 4567890123456789 | 4567890123456789 | 4567890123456789 + 4567890123456789 | 4567890123456789 | 4567890123456789 | 4567890123456789 | 4567890123456789 +(57 rows) + +-- lateral reference to a join alias variable +select * from (select f1/2 as x from int4_tbl) ss1 join int4_tbl i4 on x = f1, + lateral (select x) ss2(y); + x | f1 | y +---+----+--- + 0 | 0 | 0 +(1 row) + +select * from (select f1 as x from int4_tbl) ss1 join int4_tbl i4 on x = f1, + lateral (values(x)) ss2(y); + x | f1 | y +-------------+-------------+------------- + 0 | 0 | 0 + 123456 | 123456 | 123456 + -123456 | -123456 | -123456 + 2147483647 | 2147483647 | 2147483647 + -2147483647 | -2147483647 | -2147483647 +(5 rows) + +select * from ((select f1/2 as x from int4_tbl) ss1 join int4_tbl i4 on x = f1) j, + lateral (select x) ss2(y); + x | f1 | y +---+----+--- + 0 | 0 | 0 +(1 row) + +-- lateral references requiring pullup +select * from (values(1)) x(lb), + lateral generate_series(lb,4) x4; + lb | x4 +----+---- + 1 | 1 + 1 | 2 + 1 | 3 + 1 | 4 +(4 rows) + +select * from (select f1/1000000000 from int4_tbl) x(lb), + lateral generate_series(lb,4) x4; + lb | x4 +----+---- + 0 | 0 + 0 | 1 + 0 | 2 + 0 | 3 + 0 | 4 + 0 | 0 + 0 | 1 + 0 | 2 + 0 | 3 + 0 | 4 + 0 | 0 + 0 | 1 + 0 | 2 + 0 | 3 + 0 | 4 + 2 | 2 + 2 | 3 + 2 | 4 + -2 | -2 + -2 | -1 + -2 | 0 + -2 | 1 + -2 | 2 + -2 | 3 + -2 | 4 +(25 rows) + +select * from (values(1)) x(lb), + lateral (values(lb)) y(lbcopy); + lb | lbcopy +----+-------- + 1 | 1 +(1 row) + +select * from (values(1)) x(lb), + lateral (select lb from int4_tbl) y(lbcopy); + lb | lbcopy +----+-------- + 1 | 1 + 1 | 1 + 1 | 1 + 1 | 1 + 1 | 1 +(5 rows) + +select * from + int8_tbl x left join (select q1,coalesce(q2,0) q2 from int8_tbl) y on x.q2 = y.q1, + lateral (values(x.q1,y.q1,y.q2)) v(xq1,yq1,yq2); + q1 | q2 | q1 | q2 | xq1 | yq1 | yq2 +------------------+-------------------+------------------+-------------------+------------------+------------------+------------------- + 123 | 456 | | | 123 | | + 123 | 4567890123456789 | 4567890123456789 | -4567890123456789 | 123 | 4567890123456789 | -4567890123456789 + 123 | 4567890123456789 | 4567890123456789 | 4567890123456789 | 123 | 4567890123456789 | 4567890123456789 + 123 | 4567890123456789 | 4567890123456789 | 123 | 123 | 4567890123456789 | 123 + 4567890123456789 | 123 | 123 | 4567890123456789 | 4567890123456789 | 123 | 4567890123456789 + 4567890123456789 | 123 | 123 | 456 | 4567890123456789 | 123 | 456 + 4567890123456789 | 4567890123456789 | 4567890123456789 | -4567890123456789 | 4567890123456789 | 4567890123456789 | -4567890123456789 + 4567890123456789 | 4567890123456789 | 4567890123456789 | 4567890123456789 | 4567890123456789 | 4567890123456789 | 4567890123456789 + 4567890123456789 | 4567890123456789 | 4567890123456789 | 123 | 4567890123456789 | 4567890123456789 | 123 + 4567890123456789 | -4567890123456789 | | | 4567890123456789 | | +(10 rows) + +select * from + int8_tbl x left join (select q1,coalesce(q2,0) q2 from int8_tbl) y on x.q2 = y.q1, + lateral (select x.q1,y.q1,y.q2) v(xq1,yq1,yq2); + q1 | q2 | q1 | q2 | xq1 | yq1 | yq2 +------------------+-------------------+------------------+-------------------+------------------+------------------+------------------- + 123 | 456 | | | 123 | | + 123 | 4567890123456789 | 4567890123456789 | -4567890123456789 | 123 | 4567890123456789 | -4567890123456789 + 123 | 4567890123456789 | 4567890123456789 | 4567890123456789 | 123 | 4567890123456789 | 4567890123456789 + 123 | 4567890123456789 | 4567890123456789 | 123 | 123 | 4567890123456789 | 123 + 4567890123456789 | 123 | 123 | 4567890123456789 | 4567890123456789 | 123 | 4567890123456789 + 4567890123456789 | 123 | 123 | 456 | 4567890123456789 | 123 | 456 + 4567890123456789 | 4567890123456789 | 4567890123456789 | -4567890123456789 | 4567890123456789 | 4567890123456789 | -4567890123456789 + 4567890123456789 | 4567890123456789 | 4567890123456789 | 4567890123456789 | 4567890123456789 | 4567890123456789 | 4567890123456789 + 4567890123456789 | 4567890123456789 | 4567890123456789 | 123 | 4567890123456789 | 4567890123456789 | 123 + 4567890123456789 | -4567890123456789 | | | 4567890123456789 | | +(10 rows) + +select x.* from + int8_tbl x left join (select q1,coalesce(q2,0) q2 from int8_tbl) y on x.q2 = y.q1, + lateral (select x.q1,y.q1,y.q2) v(xq1,yq1,yq2); + q1 | q2 +------------------+------------------- + 123 | 456 + 123 | 4567890123456789 + 123 | 4567890123456789 + 123 | 4567890123456789 + 4567890123456789 | 123 + 4567890123456789 | 123 + 4567890123456789 | 4567890123456789 + 4567890123456789 | 4567890123456789 + 4567890123456789 | 4567890123456789 + 4567890123456789 | -4567890123456789 +(10 rows) + +select v.* from + (int8_tbl x left join (select q1,coalesce(q2,0) q2 from int8_tbl) y on x.q2 = y.q1) + left join int4_tbl z on z.f1 = x.q2, + lateral (select x.q1,y.q1 union all select x.q2,y.q2) v(vx,vy); + vx | vy +-------------------+------------------- + 123 | + 456 | + 123 | 4567890123456789 + 4567890123456789 | -4567890123456789 + 123 | 4567890123456789 + 4567890123456789 | 4567890123456789 + 123 | 4567890123456789 + 4567890123456789 | 123 + 4567890123456789 | 123 + 123 | 4567890123456789 + 4567890123456789 | 123 + 123 | 456 + 4567890123456789 | 4567890123456789 + 4567890123456789 | -4567890123456789 + 4567890123456789 | 4567890123456789 + 4567890123456789 | 4567890123456789 + 4567890123456789 | 4567890123456789 + 4567890123456789 | 123 + 4567890123456789 | + -4567890123456789 | +(20 rows) + +select v.* from + (int8_tbl x left join (select q1,(select coalesce(q2,0)) q2 from int8_tbl) y on x.q2 = y.q1) + left join int4_tbl z on z.f1 = x.q2, + lateral (select x.q1,y.q1 union all select x.q2,y.q2) v(vx,vy); + vx | vy +-------------------+------------------- + 4567890123456789 | 123 + 123 | 456 + 4567890123456789 | 123 + 123 | 4567890123456789 + 4567890123456789 | 4567890123456789 + 4567890123456789 | 123 + 123 | 4567890123456789 + 4567890123456789 | 123 + 4567890123456789 | 4567890123456789 + 4567890123456789 | 4567890123456789 + 123 | 4567890123456789 + 4567890123456789 | 4567890123456789 + 4567890123456789 | 4567890123456789 + 4567890123456789 | -4567890123456789 + 123 | 4567890123456789 + 4567890123456789 | -4567890123456789 + 123 | + 456 | + 4567890123456789 | + -4567890123456789 | +(20 rows) + +select v.* from + (int8_tbl x left join (select q1,(select coalesce(q2,0)) q2 from int8_tbl) y on x.q2 = y.q1) + left join int4_tbl z on z.f1 = x.q2, + lateral (select x.q1,y.q1 from onerow union all select x.q2,y.q2 from onerow) v(vx,vy); + vx | vy +-------------------+------------------- + 4567890123456789 | 123 + 123 | 456 + 4567890123456789 | 123 + 123 | 4567890123456789 + 4567890123456789 | 4567890123456789 + 4567890123456789 | 123 + 123 | 4567890123456789 + 4567890123456789 | 123 + 4567890123456789 | 4567890123456789 + 4567890123456789 | 4567890123456789 + 123 | 4567890123456789 + 4567890123456789 | 4567890123456789 + 4567890123456789 | 4567890123456789 + 4567890123456789 | -4567890123456789 + 123 | 4567890123456789 + 4567890123456789 | -4567890123456789 + 123 | + 456 | + 4567890123456789 | + -4567890123456789 | +(20 rows) + +explain (verbose, costs off) +select * from + int8_tbl a left join + lateral (select *, a.q2 as x from int8_tbl b) ss on a.q2 = ss.q1; + QUERY PLAN +------------------------------------------ + Nested Loop Left Join + Output: a.q1, a.q2, b.q1, b.q2, (a.q2) + -> Seq Scan on public.int8_tbl a + Output: a.q1, a.q2 + -> Seq Scan on public.int8_tbl b + Output: b.q1, b.q2, a.q2 + Filter: (a.q2 = b.q1) +(7 rows) + +select * from + int8_tbl a left join + lateral (select *, a.q2 as x from int8_tbl b) ss on a.q2 = ss.q1; + q1 | q2 | q1 | q2 | x +------------------+-------------------+------------------+-------------------+------------------ + 123 | 456 | | | + 123 | 4567890123456789 | 4567890123456789 | 123 | 4567890123456789 + 123 | 4567890123456789 | 4567890123456789 | 4567890123456789 | 4567890123456789 + 123 | 4567890123456789 | 4567890123456789 | -4567890123456789 | 4567890123456789 + 4567890123456789 | 123 | 123 | 456 | 123 + 4567890123456789 | 123 | 123 | 4567890123456789 | 123 + 4567890123456789 | 4567890123456789 | 4567890123456789 | 123 | 4567890123456789 + 4567890123456789 | 4567890123456789 | 4567890123456789 | 4567890123456789 | 4567890123456789 + 4567890123456789 | 4567890123456789 | 4567890123456789 | -4567890123456789 | 4567890123456789 + 4567890123456789 | -4567890123456789 | | | +(10 rows) + +explain (verbose, costs off) +select * from + int8_tbl a left join + lateral (select *, coalesce(a.q2, 42) as x from int8_tbl b) ss on a.q2 = ss.q1; + QUERY PLAN +------------------------------------------------------------------ + Nested Loop Left Join + Output: a.q1, a.q2, b.q1, b.q2, (COALESCE(a.q2, '42'::bigint)) + -> Seq Scan on public.int8_tbl a + Output: a.q1, a.q2 + -> Seq Scan on public.int8_tbl b + Output: b.q1, b.q2, COALESCE(a.q2, '42'::bigint) + Filter: (a.q2 = b.q1) +(7 rows) + +select * from + int8_tbl a left join + lateral (select *, coalesce(a.q2, 42) as x from int8_tbl b) ss on a.q2 = ss.q1; + q1 | q2 | q1 | q2 | x +------------------+-------------------+------------------+-------------------+------------------ + 123 | 456 | | | + 123 | 4567890123456789 | 4567890123456789 | 123 | 4567890123456789 + 123 | 4567890123456789 | 4567890123456789 | 4567890123456789 | 4567890123456789 + 123 | 4567890123456789 | 4567890123456789 | -4567890123456789 | 4567890123456789 + 4567890123456789 | 123 | 123 | 456 | 123 + 4567890123456789 | 123 | 123 | 4567890123456789 | 123 + 4567890123456789 | 4567890123456789 | 4567890123456789 | 123 | 4567890123456789 + 4567890123456789 | 4567890123456789 | 4567890123456789 | 4567890123456789 | 4567890123456789 + 4567890123456789 | 4567890123456789 | 4567890123456789 | -4567890123456789 | 4567890123456789 + 4567890123456789 | -4567890123456789 | | | +(10 rows) + +-- lateral can result in join conditions appearing below their +-- real semantic level +explain (verbose, costs off) +select * from int4_tbl i left join + lateral (select * from int2_tbl j where i.f1 = j.f1) k on true; + QUERY PLAN +------------------------------------------- + Hash Left Join + Output: i.f1, j.f1 + Hash Cond: (i.f1 = j.f1) + -> Seq Scan on public.int4_tbl i + Output: i.f1 + -> Hash + Output: j.f1 + -> Seq Scan on public.int2_tbl j + Output: j.f1 +(9 rows) + +select * from int4_tbl i left join + lateral (select * from int2_tbl j where i.f1 = j.f1) k on true; + f1 | f1 +-------------+---- + 0 | 0 + 123456 | + -123456 | + 2147483647 | + -2147483647 | +(5 rows) + +explain (verbose, costs off) +select * from int4_tbl i left join + lateral (select coalesce(i) from int2_tbl j where i.f1 = j.f1) k on true; + QUERY PLAN +------------------------------------- + Nested Loop Left Join + Output: i.f1, (COALESCE(i.*)) + -> Seq Scan on public.int4_tbl i + Output: i.f1, i.* + -> Seq Scan on public.int2_tbl j + Output: j.f1, COALESCE(i.*) + Filter: (i.f1 = j.f1) +(7 rows) + +select * from int4_tbl i left join + lateral (select coalesce(i) from int2_tbl j where i.f1 = j.f1) k on true; + f1 | coalesce +-------------+---------- + 0 | (0) + 123456 | + -123456 | + 2147483647 | + -2147483647 | +(5 rows) + +explain (verbose, costs off) +select * from int4_tbl a, + lateral ( + select * from int4_tbl b left join int8_tbl c on (b.f1 = q1 and a.f1 = q2) + ) ss; + QUERY PLAN +------------------------------------------------- + Nested Loop + Output: a.f1, b.f1, c.q1, c.q2 + -> Seq Scan on public.int4_tbl a + Output: a.f1 + -> Hash Left Join + Output: b.f1, c.q1, c.q2 + Hash Cond: (b.f1 = c.q1) + -> Seq Scan on public.int4_tbl b + Output: b.f1 + -> Hash + Output: c.q1, c.q2 + -> Seq Scan on public.int8_tbl c + Output: c.q1, c.q2 + Filter: (a.f1 = c.q2) +(14 rows) + +select * from int4_tbl a, + lateral ( + select * from int4_tbl b left join int8_tbl c on (b.f1 = q1 and a.f1 = q2) + ) ss; + f1 | f1 | q1 | q2 +-------------+-------------+----+---- + 0 | 0 | | + 0 | 123456 | | + 0 | -123456 | | + 0 | 2147483647 | | + 0 | -2147483647 | | + 123456 | 0 | | + 123456 | 123456 | | + 123456 | -123456 | | + 123456 | 2147483647 | | + 123456 | -2147483647 | | + -123456 | 0 | | + -123456 | 123456 | | + -123456 | -123456 | | + -123456 | 2147483647 | | + -123456 | -2147483647 | | + 2147483647 | 0 | | + 2147483647 | 123456 | | + 2147483647 | -123456 | | + 2147483647 | 2147483647 | | + 2147483647 | -2147483647 | | + -2147483647 | 0 | | + -2147483647 | 123456 | | + -2147483647 | -123456 | | + -2147483647 | 2147483647 | | + -2147483647 | -2147483647 | | +(25 rows) + +-- lateral reference in a PlaceHolderVar evaluated at join level +explain (verbose, costs off) +select * from + int8_tbl a left join lateral + (select b.q1 as bq1, c.q1 as cq1, least(a.q1,b.q1,c.q1) from + int8_tbl b cross join int8_tbl c) ss + on a.q2 = ss.bq1; + QUERY PLAN +------------------------------------------------------------- + Nested Loop Left Join + Output: a.q1, a.q2, b.q1, c.q1, (LEAST(a.q1, b.q1, c.q1)) + -> Seq Scan on public.int8_tbl a + Output: a.q1, a.q2 + -> Nested Loop + Output: b.q1, c.q1, LEAST(a.q1, b.q1, c.q1) + -> Seq Scan on public.int8_tbl b + Output: b.q1, b.q2 + Filter: (a.q2 = b.q1) + -> Seq Scan on public.int8_tbl c + Output: c.q1, c.q2 +(11 rows) + +select * from + int8_tbl a left join lateral + (select b.q1 as bq1, c.q1 as cq1, least(a.q1,b.q1,c.q1) from + int8_tbl b cross join int8_tbl c) ss + on a.q2 = ss.bq1; + q1 | q2 | bq1 | cq1 | least +------------------+-------------------+------------------+------------------+------------------ + 123 | 456 | | | + 123 | 4567890123456789 | 4567890123456789 | 123 | 123 + 123 | 4567890123456789 | 4567890123456789 | 123 | 123 + 123 | 4567890123456789 | 4567890123456789 | 4567890123456789 | 123 + 123 | 4567890123456789 | 4567890123456789 | 4567890123456789 | 123 + 123 | 4567890123456789 | 4567890123456789 | 4567890123456789 | 123 + 123 | 4567890123456789 | 4567890123456789 | 123 | 123 + 123 | 4567890123456789 | 4567890123456789 | 123 | 123 + 123 | 4567890123456789 | 4567890123456789 | 4567890123456789 | 123 + 123 | 4567890123456789 | 4567890123456789 | 4567890123456789 | 123 + 123 | 4567890123456789 | 4567890123456789 | 4567890123456789 | 123 + 123 | 4567890123456789 | 4567890123456789 | 123 | 123 + 123 | 4567890123456789 | 4567890123456789 | 123 | 123 + 123 | 4567890123456789 | 4567890123456789 | 4567890123456789 | 123 + 123 | 4567890123456789 | 4567890123456789 | 4567890123456789 | 123 + 123 | 4567890123456789 | 4567890123456789 | 4567890123456789 | 123 + 4567890123456789 | 123 | 123 | 123 | 123 + 4567890123456789 | 123 | 123 | 123 | 123 + 4567890123456789 | 123 | 123 | 4567890123456789 | 123 + 4567890123456789 | 123 | 123 | 4567890123456789 | 123 + 4567890123456789 | 123 | 123 | 4567890123456789 | 123 + 4567890123456789 | 123 | 123 | 123 | 123 + 4567890123456789 | 123 | 123 | 123 | 123 + 4567890123456789 | 123 | 123 | 4567890123456789 | 123 + 4567890123456789 | 123 | 123 | 4567890123456789 | 123 + 4567890123456789 | 123 | 123 | 4567890123456789 | 123 + 4567890123456789 | 4567890123456789 | 4567890123456789 | 123 | 123 + 4567890123456789 | 4567890123456789 | 4567890123456789 | 123 | 123 + 4567890123456789 | 4567890123456789 | 4567890123456789 | 4567890123456789 | 4567890123456789 + 4567890123456789 | 4567890123456789 | 4567890123456789 | 4567890123456789 | 4567890123456789 + 4567890123456789 | 4567890123456789 | 4567890123456789 | 4567890123456789 | 4567890123456789 + 4567890123456789 | 4567890123456789 | 4567890123456789 | 123 | 123 + 4567890123456789 | 4567890123456789 | 4567890123456789 | 123 | 123 + 4567890123456789 | 4567890123456789 | 4567890123456789 | 4567890123456789 | 4567890123456789 + 4567890123456789 | 4567890123456789 | 4567890123456789 | 4567890123456789 | 4567890123456789 + 4567890123456789 | 4567890123456789 | 4567890123456789 | 4567890123456789 | 4567890123456789 + 4567890123456789 | 4567890123456789 | 4567890123456789 | 123 | 123 + 4567890123456789 | 4567890123456789 | 4567890123456789 | 123 | 123 + 4567890123456789 | 4567890123456789 | 4567890123456789 | 4567890123456789 | 4567890123456789 + 4567890123456789 | 4567890123456789 | 4567890123456789 | 4567890123456789 | 4567890123456789 + 4567890123456789 | 4567890123456789 | 4567890123456789 | 4567890123456789 | 4567890123456789 + 4567890123456789 | -4567890123456789 | | | +(42 rows) + +-- case requiring nested PlaceHolderVars +explain (verbose, costs off) +select * from + int8_tbl c left join ( + int8_tbl a left join (select q1, coalesce(q2,42) as x from int8_tbl b) ss1 + on a.q2 = ss1.q1 + cross join + lateral (select q1, coalesce(ss1.x,q2) as y from int8_tbl d) ss2 + ) on c.q2 = ss2.q1, + lateral (select ss2.y offset 0) ss3; + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ + Nested Loop + Output: c.q1, c.q2, a.q1, a.q2, b.q1, (COALESCE(b.q2, '42'::bigint)), d.q1, (COALESCE((COALESCE(b.q2, '42'::bigint)), d.q2)), ((COALESCE((COALESCE(b.q2, '42'::bigint)), d.q2))) + -> Hash Right Join + Output: c.q1, c.q2, a.q1, a.q2, b.q1, (COALESCE(b.q2, '42'::bigint)), d.q1, (COALESCE((COALESCE(b.q2, '42'::bigint)), d.q2)) + Hash Cond: (d.q1 = c.q2) + -> Nested Loop + Output: a.q1, a.q2, b.q1, (COALESCE(b.q2, '42'::bigint)), d.q1, (COALESCE((COALESCE(b.q2, '42'::bigint)), d.q2)) + -> Hash Left Join + Output: a.q1, a.q2, b.q1, (COALESCE(b.q2, '42'::bigint)) + Hash Cond: (a.q2 = b.q1) + -> Seq Scan on public.int8_tbl a + Output: a.q1, a.q2 + -> Hash + Output: b.q1, (COALESCE(b.q2, '42'::bigint)) + -> Seq Scan on public.int8_tbl b + Output: b.q1, COALESCE(b.q2, '42'::bigint) + -> Seq Scan on public.int8_tbl d + Output: d.q1, COALESCE((COALESCE(b.q2, '42'::bigint)), d.q2) + -> Hash + Output: c.q1, c.q2 + -> Seq Scan on public.int8_tbl c + Output: c.q1, c.q2 + -> Result + Output: (COALESCE((COALESCE(b.q2, '42'::bigint)), d.q2)) +(24 rows) + +-- another case requiring nested PlaceHolderVars +explain (verbose, costs off) +select * from + (select 0 as val0) as ss0 + left join (select 1 as val) as ss1 on true + left join lateral (select ss1.val as val_filtered where false) as ss2 on true; + QUERY PLAN +-------------------------------- + Nested Loop Left Join + Output: 0, (1), ((1)) + Join Filter: false + -> Result + Output: 1 + -> Result + Output: (1) + One-Time Filter: false +(8 rows) + +select * from + (select 0 as val0) as ss0 + left join (select 1 as val) as ss1 on true + left join lateral (select ss1.val as val_filtered where false) as ss2 on true; + val0 | val | val_filtered +------+-----+-------------- + 0 | 1 | +(1 row) + +-- case that breaks the old ph_may_need optimization +explain (verbose, costs off) +select c.*,a.*,ss1.q1,ss2.q1,ss3.* from + int8_tbl c left join ( + int8_tbl a left join + (select q1, coalesce(q2,f1) as x from int8_tbl b, int4_tbl b2 + where q1 < f1) ss1 + on a.q2 = ss1.q1 + cross join + lateral (select q1, coalesce(ss1.x,q2) as y from int8_tbl d) ss2 + ) on c.q2 = ss2.q1, + lateral (select * from int4_tbl i where ss2.y > f1) ss3; + QUERY PLAN +--------------------------------------------------------------------------------------------------------- + Nested Loop + Output: c.q1, c.q2, a.q1, a.q2, b.q1, d.q1, i.f1 + Join Filter: ((COALESCE((COALESCE(b.q2, (b2.f1)::bigint)), d.q2)) > i.f1) + -> Hash Right Join + Output: c.q1, c.q2, a.q1, a.q2, b.q1, d.q1, (COALESCE((COALESCE(b.q2, (b2.f1)::bigint)), d.q2)) + Hash Cond: (d.q1 = c.q2) + -> Nested Loop + Output: a.q1, a.q2, b.q1, d.q1, (COALESCE((COALESCE(b.q2, (b2.f1)::bigint)), d.q2)) + -> Hash Right Join + Output: a.q1, a.q2, b.q1, (COALESCE(b.q2, (b2.f1)::bigint)) + Hash Cond: (b.q1 = a.q2) + -> Nested Loop + Output: b.q1, COALESCE(b.q2, (b2.f1)::bigint) + Join Filter: (b.q1 < b2.f1) + -> Seq Scan on public.int8_tbl b + Output: b.q1, b.q2 + -> Materialize + Output: b2.f1 + -> Seq Scan on public.int4_tbl b2 + Output: b2.f1 + -> Hash + Output: a.q1, a.q2 + -> Seq Scan on public.int8_tbl a + Output: a.q1, a.q2 + -> Seq Scan on public.int8_tbl d + Output: d.q1, COALESCE((COALESCE(b.q2, (b2.f1)::bigint)), d.q2) + -> Hash + Output: c.q1, c.q2 + -> Seq Scan on public.int8_tbl c + Output: c.q1, c.q2 + -> Materialize + Output: i.f1 + -> Seq Scan on public.int4_tbl i + Output: i.f1 +(34 rows) + +-- check processing of postponed quals (bug #9041) +explain (verbose, costs off) +select * from + (select 1 as x offset 0) x cross join (select 2 as y offset 0) y + left join lateral ( + select * from (select 3 as z offset 0) z where z.z = x.x + ) zz on zz.z = y.y; + QUERY PLAN +---------------------------------------------- + Nested Loop Left Join + Output: (1), (2), (3) + Join Filter: (((3) = (1)) AND ((3) = (2))) + -> Nested Loop + Output: (1), (2) + -> Result + Output: 1 + -> Result + Output: 2 + -> Result + Output: 3 +(11 rows) + +-- a new postponed-quals issue (bug #17768) +explain (costs off) +select * from int4_tbl t1, + lateral (select * from int4_tbl t2 inner join int4_tbl t3 on t1.f1 = 1 + inner join (int4_tbl t4 left join int4_tbl t5 on true) on true) ss; + QUERY PLAN +------------------------------------------------- + Nested Loop Left Join + -> Nested Loop + -> Nested Loop + -> Nested Loop + -> Seq Scan on int4_tbl t1 + Filter: (f1 = 1) + -> Seq Scan on int4_tbl t2 + -> Materialize + -> Seq Scan on int4_tbl t3 + -> Materialize + -> Seq Scan on int4_tbl t4 + -> Materialize + -> Seq Scan on int4_tbl t5 +(13 rows) + +-- check dummy rels with lateral references (bug #15694) +explain (verbose, costs off) +select * from int8_tbl i8 left join lateral + (select *, i8.q2 from int4_tbl where false) ss on true; + QUERY PLAN +-------------------------------------- + Nested Loop Left Join + Output: i8.q1, i8.q2, f1, (i8.q2) + Join Filter: false + -> Seq Scan on public.int8_tbl i8 + Output: i8.q1, i8.q2 + -> Result + Output: f1, i8.q2 + One-Time Filter: false +(8 rows) + +explain (verbose, costs off) +select * from int8_tbl i8 left join lateral + (select *, i8.q2 from int4_tbl i1, int4_tbl i2 where false) ss on true; + QUERY PLAN +----------------------------------------- + Nested Loop Left Join + Output: i8.q1, i8.q2, f1, f1, (i8.q2) + -> Seq Scan on public.int8_tbl i8 + Output: i8.q1, i8.q2 + -> Result + Output: f1, f1, i8.q2 + One-Time Filter: false +(7 rows) + +-- check handling of nested appendrels inside LATERAL +select * from + ((select 2 as v) union all (select 3 as v)) as q1 + cross join lateral + ((select * from + ((select 4 as v) union all (select 5 as v)) as q3) + union all + (select q1.v) + ) as q2; + v | v +---+--- + 2 | 4 + 2 | 5 + 2 | 2 + 3 | 4 + 3 | 5 + 3 | 3 +(6 rows) + +-- check the number of columns specified +SELECT * FROM (int8_tbl i cross join int4_tbl j) ss(a,b,c,d); +ERROR: join expression "ss" has 3 columns available but 4 columns specified +-- check we don't try to do a unique-ified semijoin with LATERAL +explain (verbose, costs off) +select * from + (values (0,9998), (1,1000)) v(id,x), + lateral (select f1 from int4_tbl + where f1 = any (select unique1 from tenk1 + where unique2 = v.x offset 0)) ss; + QUERY PLAN +---------------------------------------------------------------------- + Nested Loop + Output: "*VALUES*".column1, "*VALUES*".column2, int4_tbl.f1 + -> Values Scan on "*VALUES*" + Output: "*VALUES*".column1, "*VALUES*".column2 + -> Nested Loop Semi Join + Output: int4_tbl.f1 + Join Filter: (int4_tbl.f1 = tenk1.unique1) + -> Seq Scan on public.int4_tbl + Output: int4_tbl.f1 + -> Materialize + Output: tenk1.unique1 + -> Index Scan using tenk1_unique2 on public.tenk1 + Output: tenk1.unique1 + Index Cond: (tenk1.unique2 = "*VALUES*".column2) +(14 rows) + +select * from + (values (0,9998), (1,1000)) v(id,x), + lateral (select f1 from int4_tbl + where f1 = any (select unique1 from tenk1 + where unique2 = v.x offset 0)) ss; + id | x | f1 +----+------+---- + 0 | 9998 | 0 +(1 row) + +-- check proper extParam/allParam handling (this isn't exactly a LATERAL issue, +-- but we can make the test case much more compact with LATERAL) +explain (verbose, costs off) +select * from (values (0), (1)) v(id), +lateral (select * from int8_tbl t1, + lateral (select * from + (select * from int8_tbl t2 + where q1 = any (select q2 from int8_tbl t3 + where q2 = (select greatest(t1.q1,t2.q2)) + and (select v.id=0)) offset 0) ss2) ss + where t1.q1 = ss.q2) ss0; + QUERY PLAN +---------------------------------------------------------------------- + Nested Loop + Output: "*VALUES*".column1, t1.q1, t1.q2, ss2.q1, ss2.q2 + -> Seq Scan on public.int8_tbl t1 + Output: t1.q1, t1.q2 + -> Nested Loop + Output: "*VALUES*".column1, ss2.q1, ss2.q2 + -> Values Scan on "*VALUES*" + Output: "*VALUES*".column1 + -> Subquery Scan on ss2 + Output: ss2.q1, ss2.q2 + Filter: (t1.q1 = ss2.q2) + -> Seq Scan on public.int8_tbl t2 + Output: t2.q1, t2.q2 + Filter: (SubPlan 3) + SubPlan 3 + -> Result + Output: t3.q2 + One-Time Filter: $4 + InitPlan 1 (returns $2) + -> Result + Output: GREATEST(t1.q1, t2.q2) + InitPlan 2 (returns $4) + -> Result + Output: ("*VALUES*".column1 = 0) + -> Seq Scan on public.int8_tbl t3 + Output: t3.q1, t3.q2 + Filter: (t3.q2 = $2) +(27 rows) + +select * from (values (0), (1)) v(id), +lateral (select * from int8_tbl t1, + lateral (select * from + (select * from int8_tbl t2 + where q1 = any (select q2 from int8_tbl t3 + where q2 = (select greatest(t1.q1,t2.q2)) + and (select v.id=0)) offset 0) ss2) ss + where t1.q1 = ss.q2) ss0; + id | q1 | q2 | q1 | q2 +----+------------------+-------------------+------------------+------------------ + 0 | 4567890123456789 | 123 | 4567890123456789 | 4567890123456789 + 0 | 4567890123456789 | 4567890123456789 | 4567890123456789 | 4567890123456789 + 0 | 4567890123456789 | -4567890123456789 | 4567890123456789 | 4567890123456789 +(3 rows) + +-- test some error cases where LATERAL should have been used but wasn't +select f1,g from int4_tbl a, (select f1 as g) ss; +ERROR: column "f1" does not exist +LINE 1: select f1,g from int4_tbl a, (select f1 as g) ss; + ^ +DETAIL: There is a column named "f1" in table "a", but it cannot be referenced from this part of the query. +HINT: To reference that column, you must mark this subquery with LATERAL. +select f1,g from int4_tbl a, (select a.f1 as g) ss; +ERROR: invalid reference to FROM-clause entry for table "a" +LINE 1: select f1,g from int4_tbl a, (select a.f1 as g) ss; + ^ +DETAIL: There is an entry for table "a", but it cannot be referenced from this part of the query. +HINT: To reference that table, you must mark this subquery with LATERAL. +select f1,g from int4_tbl a cross join (select f1 as g) ss; +ERROR: column "f1" does not exist +LINE 1: select f1,g from int4_tbl a cross join (select f1 as g) ss; + ^ +DETAIL: There is a column named "f1" in table "a", but it cannot be referenced from this part of the query. +HINT: To reference that column, you must mark this subquery with LATERAL. +select f1,g from int4_tbl a cross join (select a.f1 as g) ss; +ERROR: invalid reference to FROM-clause entry for table "a" +LINE 1: select f1,g from int4_tbl a cross join (select a.f1 as g) ss... + ^ +DETAIL: There is an entry for table "a", but it cannot be referenced from this part of the query. +HINT: To reference that table, you must mark this subquery with LATERAL. +-- SQL:2008 says the left table is in scope but illegal to access here +select f1,g from int4_tbl a right join lateral generate_series(0, a.f1) g on true; +ERROR: invalid reference to FROM-clause entry for table "a" +LINE 1: ... int4_tbl a right join lateral generate_series(0, a.f1) g on... + ^ +DETAIL: The combining JOIN type must be INNER or LEFT for a LATERAL reference. +select f1,g from int4_tbl a full join lateral generate_series(0, a.f1) g on true; +ERROR: invalid reference to FROM-clause entry for table "a" +LINE 1: ...m int4_tbl a full join lateral generate_series(0, a.f1) g on... + ^ +DETAIL: The combining JOIN type must be INNER or LEFT for a LATERAL reference. +-- check we complain about ambiguous table references +select * from + int8_tbl x cross join (int4_tbl x cross join lateral (select x.f1) ss); +ERROR: table reference "x" is ambiguous +LINE 2: ...cross join (int4_tbl x cross join lateral (select x.f1) ss); + ^ +-- LATERAL can be used to put an aggregate into the FROM clause of its query +select 1 from tenk1 a, lateral (select max(a.unique1) from int4_tbl b) ss; +ERROR: aggregate functions are not allowed in FROM clause of their own query level +LINE 1: select 1 from tenk1 a, lateral (select max(a.unique1) from i... + ^ +-- check behavior of LATERAL in UPDATE/DELETE +create temp table xx1 as select f1 as x1, -f1 as x2 from int4_tbl; +-- error, can't do this: +update xx1 set x2 = f1 from (select * from int4_tbl where f1 = x1) ss; +ERROR: column "x1" does not exist +LINE 1: ... set x2 = f1 from (select * from int4_tbl where f1 = x1) ss; + ^ +DETAIL: There is a column named "x1" in table "xx1", but it cannot be referenced from this part of the query. +update xx1 set x2 = f1 from (select * from int4_tbl where f1 = xx1.x1) ss; +ERROR: invalid reference to FROM-clause entry for table "xx1" +LINE 1: ...t x2 = f1 from (select * from int4_tbl where f1 = xx1.x1) ss... + ^ +DETAIL: There is an entry for table "xx1", but it cannot be referenced from this part of the query. +-- can't do it even with LATERAL: +update xx1 set x2 = f1 from lateral (select * from int4_tbl where f1 = x1) ss; +ERROR: invalid reference to FROM-clause entry for table "xx1" +LINE 1: ...= f1 from lateral (select * from int4_tbl where f1 = x1) ss; + ^ +HINT: There is an entry for table "xx1", but it cannot be referenced from this part of the query. +-- we might in future allow something like this, but for now it's an error: +update xx1 set x2 = f1 from xx1, lateral (select * from int4_tbl where f1 = x1) ss; +ERROR: table name "xx1" specified more than once +-- also errors: +delete from xx1 using (select * from int4_tbl where f1 = x1) ss; +ERROR: column "x1" does not exist +LINE 1: ...te from xx1 using (select * from int4_tbl where f1 = x1) ss; + ^ +DETAIL: There is a column named "x1" in table "xx1", but it cannot be referenced from this part of the query. +delete from xx1 using (select * from int4_tbl where f1 = xx1.x1) ss; +ERROR: invalid reference to FROM-clause entry for table "xx1" +LINE 1: ...from xx1 using (select * from int4_tbl where f1 = xx1.x1) ss... + ^ +DETAIL: There is an entry for table "xx1", but it cannot be referenced from this part of the query. +delete from xx1 using lateral (select * from int4_tbl where f1 = x1) ss; +ERROR: invalid reference to FROM-clause entry for table "xx1" +LINE 1: ...xx1 using lateral (select * from int4_tbl where f1 = x1) ss; + ^ +HINT: There is an entry for table "xx1", but it cannot be referenced from this part of the query. +-- +-- test LATERAL reference propagation down a multi-level inheritance hierarchy +-- produced for a multi-level partitioned table hierarchy. +-- +create table join_pt1 (a int, b int, c varchar) partition by range(a); +create table join_pt1p1 partition of join_pt1 for values from (0) to (100) partition by range(b); +create table join_pt1p2 partition of join_pt1 for values from (100) to (200); +create table join_pt1p1p1 partition of join_pt1p1 for values from (0) to (100); +insert into join_pt1 values (1, 1, 'x'), (101, 101, 'y'); +create table join_ut1 (a int, b int, c varchar); +insert into join_ut1 values (101, 101, 'y'), (2, 2, 'z'); +explain (verbose, costs off) +select t1.b, ss.phv from join_ut1 t1 left join lateral + (select t2.a as t2a, t3.a t3a, least(t1.a, t2.a, t3.a) phv + from join_pt1 t2 join join_ut1 t3 on t2.a = t3.b) ss + on t1.a = ss.t2a order by t1.a; + QUERY PLAN +-------------------------------------------------------------------- + Sort + Output: t1.b, (LEAST(t1.a, t2.a, t3.a)), t1.a + Sort Key: t1.a + -> Nested Loop Left Join + Output: t1.b, (LEAST(t1.a, t2.a, t3.a)), t1.a + -> Seq Scan on public.join_ut1 t1 + Output: t1.a, t1.b, t1.c + -> Hash Join + Output: t2.a, LEAST(t1.a, t2.a, t3.a) + Hash Cond: (t3.b = t2.a) + -> Seq Scan on public.join_ut1 t3 + Output: t3.a, t3.b, t3.c + -> Hash + Output: t2.a + -> Append + -> Seq Scan on public.join_pt1p1p1 t2_1 + Output: t2_1.a + Filter: (t1.a = t2_1.a) + -> Seq Scan on public.join_pt1p2 t2_2 + Output: t2_2.a + Filter: (t1.a = t2_2.a) +(21 rows) + +select t1.b, ss.phv from join_ut1 t1 left join lateral + (select t2.a as t2a, t3.a t3a, least(t1.a, t2.a, t3.a) phv + from join_pt1 t2 join join_ut1 t3 on t2.a = t3.b) ss + on t1.a = ss.t2a order by t1.a; + b | phv +-----+----- + 2 | + 101 | 101 +(2 rows) + +drop table join_pt1; +drop table join_ut1; +-- +-- test estimation behavior with multi-column foreign key and constant qual +-- +begin; +create table fkest (x integer, x10 integer, x10b integer, x100 integer); +insert into fkest select x, x/10, x/10, x/100 from generate_series(1,1000) x; +create unique index on fkest(x, x10, x100); +analyze fkest; +explain (costs off) +select * from fkest f1 + join fkest f2 on (f1.x = f2.x and f1.x10 = f2.x10b and f1.x100 = f2.x100) + join fkest f3 on f1.x = f3.x + where f1.x100 = 2; + QUERY PLAN +----------------------------------------------------------- + Nested Loop + -> Hash Join + Hash Cond: ((f2.x = f1.x) AND (f2.x10b = f1.x10)) + -> Seq Scan on fkest f2 + Filter: (x100 = 2) + -> Hash + -> Seq Scan on fkest f1 + Filter: (x100 = 2) + -> Index Scan using fkest_x_x10_x100_idx on fkest f3 + Index Cond: (x = f1.x) +(10 rows) + +alter table fkest add constraint fk + foreign key (x, x10b, x100) references fkest (x, x10, x100); +explain (costs off) +select * from fkest f1 + join fkest f2 on (f1.x = f2.x and f1.x10 = f2.x10b and f1.x100 = f2.x100) + join fkest f3 on f1.x = f3.x + where f1.x100 = 2; + QUERY PLAN +----------------------------------------------------- + Hash Join + Hash Cond: ((f2.x = f1.x) AND (f2.x10b = f1.x10)) + -> Hash Join + Hash Cond: (f3.x = f2.x) + -> Seq Scan on fkest f3 + -> Hash + -> Seq Scan on fkest f2 + Filter: (x100 = 2) + -> Hash + -> Seq Scan on fkest f1 + Filter: (x100 = 2) +(11 rows) + +rollback; +-- +-- test that foreign key join estimation performs sanely for outer joins +-- +begin; +create table fkest (a int, b int, c int unique, primary key(a,b)); +create table fkest1 (a int, b int, primary key(a,b)); +insert into fkest select x/10, x%10, x from generate_series(1,1000) x; +insert into fkest1 select x/10, x%10 from generate_series(1,1000) x; +alter table fkest1 + add constraint fkest1_a_b_fkey foreign key (a,b) references fkest; +analyze fkest; +analyze fkest1; +explain (costs off) +select * +from fkest f + left join fkest1 f1 on f.a = f1.a and f.b = f1.b + left join fkest1 f2 on f.a = f2.a and f.b = f2.b + left join fkest1 f3 on f.a = f3.a and f.b = f3.b +where f.c = 1; + QUERY PLAN +------------------------------------------------------------------ + Nested Loop Left Join + -> Nested Loop Left Join + -> Nested Loop Left Join + -> Index Scan using fkest_c_key on fkest f + Index Cond: (c = 1) + -> Index Only Scan using fkest1_pkey on fkest1 f1 + Index Cond: ((a = f.a) AND (b = f.b)) + -> Index Only Scan using fkest1_pkey on fkest1 f2 + Index Cond: ((a = f.a) AND (b = f.b)) + -> Index Only Scan using fkest1_pkey on fkest1 f3 + Index Cond: ((a = f.a) AND (b = f.b)) +(11 rows) + +rollback; +-- +-- test planner's ability to mark joins as unique +-- +create table j1 (id int primary key); +create table j2 (id int primary key); +create table j3 (id int); +insert into j1 values(1),(2),(3); +insert into j2 values(1),(2),(3); +insert into j3 values(1),(1); +analyze j1; +analyze j2; +analyze j3; +-- ensure join is properly marked as unique +explain (verbose, costs off) +select * from j1 inner join j2 on j1.id = j2.id; + QUERY PLAN +----------------------------------- + Hash Join + Output: j1.id, j2.id + Inner Unique: true + Hash Cond: (j1.id = j2.id) + -> Seq Scan on public.j1 + Output: j1.id + -> Hash + Output: j2.id + -> Seq Scan on public.j2 + Output: j2.id +(10 rows) + +-- ensure join is not unique when not an equi-join +explain (verbose, costs off) +select * from j1 inner join j2 on j1.id > j2.id; + QUERY PLAN +----------------------------------- + Nested Loop + Output: j1.id, j2.id + Join Filter: (j1.id > j2.id) + -> Seq Scan on public.j1 + Output: j1.id + -> Materialize + Output: j2.id + -> Seq Scan on public.j2 + Output: j2.id +(9 rows) + +-- ensure non-unique rel is not chosen as inner +explain (verbose, costs off) +select * from j1 inner join j3 on j1.id = j3.id; + QUERY PLAN +----------------------------------- + Hash Join + Output: j1.id, j3.id + Inner Unique: true + Hash Cond: (j3.id = j1.id) + -> Seq Scan on public.j3 + Output: j3.id + -> Hash + Output: j1.id + -> Seq Scan on public.j1 + Output: j1.id +(10 rows) + +-- ensure left join is marked as unique +explain (verbose, costs off) +select * from j1 left join j2 on j1.id = j2.id; + QUERY PLAN +----------------------------------- + Hash Left Join + Output: j1.id, j2.id + Inner Unique: true + Hash Cond: (j1.id = j2.id) + -> Seq Scan on public.j1 + Output: j1.id + -> Hash + Output: j2.id + -> Seq Scan on public.j2 + Output: j2.id +(10 rows) + +-- ensure right join is marked as unique +explain (verbose, costs off) +select * from j1 right join j2 on j1.id = j2.id; + QUERY PLAN +----------------------------------- + Hash Left Join + Output: j1.id, j2.id + Inner Unique: true + Hash Cond: (j2.id = j1.id) + -> Seq Scan on public.j2 + Output: j2.id + -> Hash + Output: j1.id + -> Seq Scan on public.j1 + Output: j1.id +(10 rows) + +-- ensure full join is marked as unique +explain (verbose, costs off) +select * from j1 full join j2 on j1.id = j2.id; + QUERY PLAN +----------------------------------- + Hash Full Join + Output: j1.id, j2.id + Inner Unique: true + Hash Cond: (j1.id = j2.id) + -> Seq Scan on public.j1 + Output: j1.id + -> Hash + Output: j2.id + -> Seq Scan on public.j2 + Output: j2.id +(10 rows) + +-- a clauseless (cross) join can't be unique +explain (verbose, costs off) +select * from j1 cross join j2; + QUERY PLAN +----------------------------------- + Nested Loop + Output: j1.id, j2.id + -> Seq Scan on public.j1 + Output: j1.id + -> Materialize + Output: j2.id + -> Seq Scan on public.j2 + Output: j2.id +(8 rows) + +-- ensure a natural join is marked as unique +explain (verbose, costs off) +select * from j1 natural join j2; + QUERY PLAN +----------------------------------- + Hash Join + Output: j1.id + Inner Unique: true + Hash Cond: (j1.id = j2.id) + -> Seq Scan on public.j1 + Output: j1.id + -> Hash + Output: j2.id + -> Seq Scan on public.j2 + Output: j2.id +(10 rows) + +-- ensure a distinct clause allows the inner to become unique +explain (verbose, costs off) +select * from j1 +inner join (select distinct id from j3) j3 on j1.id = j3.id; + QUERY PLAN +----------------------------------------- + Nested Loop + Output: j1.id, j3.id + Inner Unique: true + Join Filter: (j1.id = j3.id) + -> Unique + Output: j3.id + -> Sort + Output: j3.id + Sort Key: j3.id + -> Seq Scan on public.j3 + Output: j3.id + -> Seq Scan on public.j1 + Output: j1.id +(13 rows) + +-- ensure group by clause allows the inner to become unique +explain (verbose, costs off) +select * from j1 +inner join (select id from j3 group by id) j3 on j1.id = j3.id; + QUERY PLAN +----------------------------------------- + Nested Loop + Output: j1.id, j3.id + Inner Unique: true + Join Filter: (j1.id = j3.id) + -> Group + Output: j3.id + Group Key: j3.id + -> Sort + Output: j3.id + Sort Key: j3.id + -> Seq Scan on public.j3 + Output: j3.id + -> Seq Scan on public.j1 + Output: j1.id +(14 rows) + +drop table j1; +drop table j2; +drop table j3; +-- test more complex permutations of unique joins +create table j1 (id1 int, id2 int, primary key(id1,id2)); +create table j2 (id1 int, id2 int, primary key(id1,id2)); +create table j3 (id1 int, id2 int, primary key(id1,id2)); +insert into j1 values(1,1),(1,2); +insert into j2 values(1,1); +insert into j3 values(1,1); +analyze j1; +analyze j2; +analyze j3; +-- ensure there's no unique join when not all columns which are part of the +-- unique index are seen in the join clause +explain (verbose, costs off) +select * from j1 +inner join j2 on j1.id1 = j2.id1; + QUERY PLAN +------------------------------------------ + Nested Loop + Output: j1.id1, j1.id2, j2.id1, j2.id2 + Join Filter: (j1.id1 = j2.id1) + -> Seq Scan on public.j2 + Output: j2.id1, j2.id2 + -> Seq Scan on public.j1 + Output: j1.id1, j1.id2 +(7 rows) + +-- ensure proper unique detection with multiple join quals +explain (verbose, costs off) +select * from j1 +inner join j2 on j1.id1 = j2.id1 and j1.id2 = j2.id2; + QUERY PLAN +---------------------------------------------------------- + Nested Loop + Output: j1.id1, j1.id2, j2.id1, j2.id2 + Inner Unique: true + Join Filter: ((j1.id1 = j2.id1) AND (j1.id2 = j2.id2)) + -> Seq Scan on public.j2 + Output: j2.id1, j2.id2 + -> Seq Scan on public.j1 + Output: j1.id1, j1.id2 +(8 rows) + +-- ensure we don't detect the join to be unique when quals are not part of the +-- join condition +explain (verbose, costs off) +select * from j1 +inner join j2 on j1.id1 = j2.id1 where j1.id2 = 1; + QUERY PLAN +------------------------------------------ + Nested Loop + Output: j1.id1, j1.id2, j2.id1, j2.id2 + Join Filter: (j1.id1 = j2.id1) + -> Seq Scan on public.j1 + Output: j1.id1, j1.id2 + Filter: (j1.id2 = 1) + -> Seq Scan on public.j2 + Output: j2.id1, j2.id2 +(8 rows) + +-- as above, but for left joins. +explain (verbose, costs off) +select * from j1 +left join j2 on j1.id1 = j2.id1 where j1.id2 = 1; + QUERY PLAN +------------------------------------------ + Nested Loop Left Join + Output: j1.id1, j1.id2, j2.id1, j2.id2 + Join Filter: (j1.id1 = j2.id1) + -> Seq Scan on public.j1 + Output: j1.id1, j1.id2 + Filter: (j1.id2 = 1) + -> Seq Scan on public.j2 + Output: j2.id1, j2.id2 +(8 rows) + +create unique index j1_id2_idx on j1(id2) where id2 is not null; +-- ensure we don't use a partial unique index as unique proofs +explain (verbose, costs off) +select * from j1 +inner join j2 on j1.id2 = j2.id2; + QUERY PLAN +------------------------------------------ + Nested Loop + Output: j1.id1, j1.id2, j2.id1, j2.id2 + Join Filter: (j2.id2 = j1.id2) + -> Seq Scan on public.j2 + Output: j2.id1, j2.id2 + -> Seq Scan on public.j1 + Output: j1.id1, j1.id2 +(7 rows) + +drop index j1_id2_idx; +-- validate logic in merge joins which skips mark and restore. +-- it should only do this if all quals which were used to detect the unique +-- are present as join quals, and not plain quals. +set enable_nestloop to 0; +set enable_hashjoin to 0; +set enable_sort to 0; +-- create indexes that will be preferred over the PKs to perform the join +create index j1_id1_idx on j1 (id1) where id1 % 1000 = 1; +create index j2_id1_idx on j2 (id1) where id1 % 1000 = 1; +-- need an additional row in j2, if we want j2_id1_idx to be preferred +insert into j2 values(1,2); +analyze j2; +explain (costs off) select * from j1 +inner join j2 on j1.id1 = j2.id1 and j1.id2 = j2.id2 +where j1.id1 % 1000 = 1 and j2.id1 % 1000 = 1; + QUERY PLAN +----------------------------------------- + Merge Join + Merge Cond: (j1.id1 = j2.id1) + Join Filter: (j2.id2 = j1.id2) + -> Index Scan using j1_id1_idx on j1 + -> Index Scan using j2_id1_idx on j2 +(5 rows) + +select * from j1 +inner join j2 on j1.id1 = j2.id1 and j1.id2 = j2.id2 +where j1.id1 % 1000 = 1 and j2.id1 % 1000 = 1; + id1 | id2 | id1 | id2 +-----+-----+-----+----- + 1 | 1 | 1 | 1 + 1 | 2 | 1 | 2 +(2 rows) + +-- Exercise array keys mark/restore B-Tree code +explain (costs off) select * from j1 +inner join j2 on j1.id1 = j2.id1 and j1.id2 = j2.id2 +where j1.id1 % 1000 = 1 and j2.id1 % 1000 = 1 and j2.id1 = any (array[1]); + QUERY PLAN +---------------------------------------------------- + Merge Join + Merge Cond: (j1.id1 = j2.id1) + Join Filter: (j2.id2 = j1.id2) + -> Index Scan using j1_id1_idx on j1 + -> Index Scan using j2_id1_idx on j2 + Index Cond: (id1 = ANY ('{1}'::integer[])) +(6 rows) + +select * from j1 +inner join j2 on j1.id1 = j2.id1 and j1.id2 = j2.id2 +where j1.id1 % 1000 = 1 and j2.id1 % 1000 = 1 and j2.id1 = any (array[1]); + id1 | id2 | id1 | id2 +-----+-----+-----+----- + 1 | 1 | 1 | 1 + 1 | 2 | 1 | 2 +(2 rows) + +-- Exercise array keys "find extreme element" B-Tree code +explain (costs off) select * from j1 +inner join j2 on j1.id1 = j2.id1 and j1.id2 = j2.id2 +where j1.id1 % 1000 = 1 and j2.id1 % 1000 = 1 and j2.id1 >= any (array[1,5]); + QUERY PLAN +------------------------------------------------------- + Merge Join + Merge Cond: (j1.id1 = j2.id1) + Join Filter: (j2.id2 = j1.id2) + -> Index Scan using j1_id1_idx on j1 + -> Index Only Scan using j2_pkey on j2 + Index Cond: (id1 >= ANY ('{1,5}'::integer[])) + Filter: ((id1 % 1000) = 1) +(7 rows) + +select * from j1 +inner join j2 on j1.id1 = j2.id1 and j1.id2 = j2.id2 +where j1.id1 % 1000 = 1 and j2.id1 % 1000 = 1 and j2.id1 >= any (array[1,5]); + id1 | id2 | id1 | id2 +-----+-----+-----+----- + 1 | 1 | 1 | 1 + 1 | 2 | 1 | 2 +(2 rows) + +reset enable_nestloop; +reset enable_hashjoin; +reset enable_sort; +drop table j1; +drop table j2; +drop table j3; +-- check that semijoin inner is not seen as unique for a portion of the outerrel +explain (verbose, costs off) +select t1.unique1, t2.hundred +from onek t1, tenk1 t2 +where exists (select 1 from tenk1 t3 + where t3.thousand = t1.unique1 and t3.tenthous = t2.hundred) + and t1.unique1 < 1; + QUERY PLAN +--------------------------------------------------------------------------------- + Nested Loop + Output: t1.unique1, t2.hundred + -> Hash Join + Output: t1.unique1, t3.tenthous + Hash Cond: (t3.thousand = t1.unique1) + -> HashAggregate + Output: t3.thousand, t3.tenthous + Group Key: t3.thousand, t3.tenthous + -> Index Only Scan using tenk1_thous_tenthous on public.tenk1 t3 + Output: t3.thousand, t3.tenthous + -> Hash + Output: t1.unique1 + -> Index Only Scan using onek_unique1 on public.onek t1 + Output: t1.unique1 + Index Cond: (t1.unique1 < 1) + -> Index Only Scan using tenk1_hundred on public.tenk1 t2 + Output: t2.hundred + Index Cond: (t2.hundred = t3.tenthous) +(18 rows) + +-- ... unless it actually is unique +create table j3 as select unique1, tenthous from onek; +vacuum analyze j3; +create unique index on j3(unique1, tenthous); +explain (verbose, costs off) +select t1.unique1, t2.hundred +from onek t1, tenk1 t2 +where exists (select 1 from j3 + where j3.unique1 = t1.unique1 and j3.tenthous = t2.hundred) + and t1.unique1 < 1; + QUERY PLAN +------------------------------------------------------------------------ + Nested Loop + Output: t1.unique1, t2.hundred + -> Nested Loop + Output: t1.unique1, j3.tenthous + -> Index Only Scan using onek_unique1 on public.onek t1 + Output: t1.unique1 + Index Cond: (t1.unique1 < 1) + -> Index Only Scan using j3_unique1_tenthous_idx on public.j3 + Output: j3.unique1, j3.tenthous + Index Cond: (j3.unique1 = t1.unique1) + -> Index Only Scan using tenk1_hundred on public.tenk1 t2 + Output: t2.hundred + Index Cond: (t2.hundred = j3.tenthous) +(13 rows) + +drop table j3; diff --git a/src/test/regress/expected/join_hash.out b/src/test/regress/expected/join_hash.out new file mode 100644 index 0000000..262fa71 --- /dev/null +++ b/src/test/regress/expected/join_hash.out @@ -0,0 +1,1166 @@ +-- +-- exercises for the hash join code +-- +begin; +set local min_parallel_table_scan_size = 0; +set local parallel_setup_cost = 0; +set local enable_hashjoin = on; +-- Extract bucket and batch counts from an explain analyze plan. In +-- general we can't make assertions about how many batches (or +-- buckets) will be required because it can vary, but we can in some +-- special cases and we can check for growth. +create or replace function find_hash(node json) +returns json language plpgsql +as +$$ +declare + x json; + child json; +begin + if node->>'Node Type' = 'Hash' then + return node; + else + for child in select json_array_elements(node->'Plans') + loop + x := find_hash(child); + if x is not null then + return x; + end if; + end loop; + return null; + end if; +end; +$$; +create or replace function hash_join_batches(query text) +returns table (original int, final int) language plpgsql +as +$$ +declare + whole_plan json; + hash_node json; +begin + for whole_plan in + execute 'explain (analyze, format ''json'') ' || query + loop + hash_node := find_hash(json_extract_path(whole_plan, '0', 'Plan')); + original := hash_node->>'Original Hash Batches'; + final := hash_node->>'Hash Batches'; + return next; + end loop; +end; +$$; +-- Make a simple relation with well distributed keys and correctly +-- estimated size. +create table simple as + select generate_series(1, 20000) AS id, 'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa'; +alter table simple set (parallel_workers = 2); +analyze simple; +-- Make a relation whose size we will under-estimate. We want stats +-- to say 1000 rows, but actually there are 20,000 rows. +create table bigger_than_it_looks as + select generate_series(1, 20000) as id, 'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa'; +alter table bigger_than_it_looks set (autovacuum_enabled = 'false'); +alter table bigger_than_it_looks set (parallel_workers = 2); +analyze bigger_than_it_looks; +update pg_class set reltuples = 1000 where relname = 'bigger_than_it_looks'; +-- Make a relation whose size we underestimate and that also has a +-- kind of skew that breaks our batching scheme. We want stats to say +-- 2 rows, but actually there are 20,000 rows with the same key. +create table extremely_skewed (id int, t text); +alter table extremely_skewed set (autovacuum_enabled = 'false'); +alter table extremely_skewed set (parallel_workers = 2); +analyze extremely_skewed; +insert into extremely_skewed + select 42 as id, 'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa' + from generate_series(1, 20000); +update pg_class + set reltuples = 2, relpages = pg_relation_size('extremely_skewed') / 8192 + where relname = 'extremely_skewed'; +-- Make a relation with a couple of enormous tuples. +create table wide as select generate_series(1, 2) as id, rpad('', 320000, 'x') as t; +alter table wide set (parallel_workers = 2); +-- The "optimal" case: the hash table fits in memory; we plan for 1 +-- batch, we stick to that number, and peak memory usage stays within +-- our work_mem budget +-- non-parallel +savepoint settings; +set local max_parallel_workers_per_gather = 0; +set local work_mem = '4MB'; +set local hash_mem_multiplier = 1.0; +explain (costs off) + select count(*) from simple r join simple s using (id); + QUERY PLAN +---------------------------------------- + Aggregate + -> Hash Join + Hash Cond: (r.id = s.id) + -> Seq Scan on simple r + -> Hash + -> Seq Scan on simple s +(6 rows) + +select count(*) from simple r join simple s using (id); + count +------- + 20000 +(1 row) + +select original > 1 as initially_multibatch, final > original as increased_batches + from hash_join_batches( +$$ + select count(*) from simple r join simple s using (id); +$$); + initially_multibatch | increased_batches +----------------------+------------------- + f | f +(1 row) + +rollback to settings; +-- parallel with parallel-oblivious hash join +savepoint settings; +set local max_parallel_workers_per_gather = 2; +set local work_mem = '4MB'; +set local hash_mem_multiplier = 1.0; +set local enable_parallel_hash = off; +explain (costs off) + select count(*) from simple r join simple s using (id); + QUERY PLAN +------------------------------------------------------- + Finalize Aggregate + -> Gather + Workers Planned: 2 + -> Partial Aggregate + -> Hash Join + Hash Cond: (r.id = s.id) + -> Parallel Seq Scan on simple r + -> Hash + -> Seq Scan on simple s +(9 rows) + +select count(*) from simple r join simple s using (id); + count +------- + 20000 +(1 row) + +select original > 1 as initially_multibatch, final > original as increased_batches + from hash_join_batches( +$$ + select count(*) from simple r join simple s using (id); +$$); + initially_multibatch | increased_batches +----------------------+------------------- + f | f +(1 row) + +rollback to settings; +-- parallel with parallel-aware hash join +savepoint settings; +set local max_parallel_workers_per_gather = 2; +set local work_mem = '4MB'; +set local hash_mem_multiplier = 1.0; +set local enable_parallel_hash = on; +explain (costs off) + select count(*) from simple r join simple s using (id); + QUERY PLAN +------------------------------------------------------------- + Finalize Aggregate + -> Gather + Workers Planned: 2 + -> Partial Aggregate + -> Parallel Hash Join + Hash Cond: (r.id = s.id) + -> Parallel Seq Scan on simple r + -> Parallel Hash + -> Parallel Seq Scan on simple s +(9 rows) + +select count(*) from simple r join simple s using (id); + count +------- + 20000 +(1 row) + +select original > 1 as initially_multibatch, final > original as increased_batches + from hash_join_batches( +$$ + select count(*) from simple r join simple s using (id); +$$); + initially_multibatch | increased_batches +----------------------+------------------- + f | f +(1 row) + +rollback to settings; +-- The "good" case: batches required, but we plan the right number; we +-- plan for some number of batches, and we stick to that number, and +-- peak memory usage says within our work_mem budget +-- non-parallel +savepoint settings; +set local max_parallel_workers_per_gather = 0; +set local work_mem = '128kB'; +set local hash_mem_multiplier = 1.0; +explain (costs off) + select count(*) from simple r join simple s using (id); + QUERY PLAN +---------------------------------------- + Aggregate + -> Hash Join + Hash Cond: (r.id = s.id) + -> Seq Scan on simple r + -> Hash + -> Seq Scan on simple s +(6 rows) + +select count(*) from simple r join simple s using (id); + count +------- + 20000 +(1 row) + +select original > 1 as initially_multibatch, final > original as increased_batches + from hash_join_batches( +$$ + select count(*) from simple r join simple s using (id); +$$); + initially_multibatch | increased_batches +----------------------+------------------- + t | f +(1 row) + +rollback to settings; +-- parallel with parallel-oblivious hash join +savepoint settings; +set local max_parallel_workers_per_gather = 2; +set local work_mem = '128kB'; +set local hash_mem_multiplier = 1.0; +set local enable_parallel_hash = off; +explain (costs off) + select count(*) from simple r join simple s using (id); + QUERY PLAN +------------------------------------------------------- + Finalize Aggregate + -> Gather + Workers Planned: 2 + -> Partial Aggregate + -> Hash Join + Hash Cond: (r.id = s.id) + -> Parallel Seq Scan on simple r + -> Hash + -> Seq Scan on simple s +(9 rows) + +select count(*) from simple r join simple s using (id); + count +------- + 20000 +(1 row) + +select original > 1 as initially_multibatch, final > original as increased_batches + from hash_join_batches( +$$ + select count(*) from simple r join simple s using (id); +$$); + initially_multibatch | increased_batches +----------------------+------------------- + t | f +(1 row) + +rollback to settings; +-- parallel with parallel-aware hash join +savepoint settings; +set local max_parallel_workers_per_gather = 2; +set local work_mem = '192kB'; +set local hash_mem_multiplier = 1.0; +set local enable_parallel_hash = on; +explain (costs off) + select count(*) from simple r join simple s using (id); + QUERY PLAN +------------------------------------------------------------- + Finalize Aggregate + -> Gather + Workers Planned: 2 + -> Partial Aggregate + -> Parallel Hash Join + Hash Cond: (r.id = s.id) + -> Parallel Seq Scan on simple r + -> Parallel Hash + -> Parallel Seq Scan on simple s +(9 rows) + +select count(*) from simple r join simple s using (id); + count +------- + 20000 +(1 row) + +select original > 1 as initially_multibatch, final > original as increased_batches + from hash_join_batches( +$$ + select count(*) from simple r join simple s using (id); +$$); + initially_multibatch | increased_batches +----------------------+------------------- + t | f +(1 row) + +-- parallel full multi-batch hash join +select count(*) from simple r full outer join simple s using (id); + count +------- + 20000 +(1 row) + +rollback to settings; +-- The "bad" case: during execution we need to increase number of +-- batches; in this case we plan for 1 batch, and increase at least a +-- couple of times, and peak memory usage stays within our work_mem +-- budget +-- non-parallel +savepoint settings; +set local max_parallel_workers_per_gather = 0; +set local work_mem = '128kB'; +set local hash_mem_multiplier = 1.0; +explain (costs off) + select count(*) FROM simple r JOIN bigger_than_it_looks s USING (id); + QUERY PLAN +------------------------------------------------------ + Aggregate + -> Hash Join + Hash Cond: (r.id = s.id) + -> Seq Scan on simple r + -> Hash + -> Seq Scan on bigger_than_it_looks s +(6 rows) + +select count(*) FROM simple r JOIN bigger_than_it_looks s USING (id); + count +------- + 20000 +(1 row) + +select original > 1 as initially_multibatch, final > original as increased_batches + from hash_join_batches( +$$ + select count(*) FROM simple r JOIN bigger_than_it_looks s USING (id); +$$); + initially_multibatch | increased_batches +----------------------+------------------- + f | t +(1 row) + +rollback to settings; +-- parallel with parallel-oblivious hash join +savepoint settings; +set local max_parallel_workers_per_gather = 2; +set local work_mem = '128kB'; +set local hash_mem_multiplier = 1.0; +set local enable_parallel_hash = off; +explain (costs off) + select count(*) from simple r join bigger_than_it_looks s using (id); + QUERY PLAN +------------------------------------------------------------------ + Finalize Aggregate + -> Gather + Workers Planned: 2 + -> Partial Aggregate + -> Hash Join + Hash Cond: (r.id = s.id) + -> Parallel Seq Scan on simple r + -> Hash + -> Seq Scan on bigger_than_it_looks s +(9 rows) + +select count(*) from simple r join bigger_than_it_looks s using (id); + count +------- + 20000 +(1 row) + +select original > 1 as initially_multibatch, final > original as increased_batches + from hash_join_batches( +$$ + select count(*) from simple r join bigger_than_it_looks s using (id); +$$); + initially_multibatch | increased_batches +----------------------+------------------- + f | t +(1 row) + +rollback to settings; +-- parallel with parallel-aware hash join +savepoint settings; +set local max_parallel_workers_per_gather = 1; +set local work_mem = '192kB'; +set local hash_mem_multiplier = 1.0; +set local enable_parallel_hash = on; +explain (costs off) + select count(*) from simple r join bigger_than_it_looks s using (id); + QUERY PLAN +--------------------------------------------------------------------------- + Finalize Aggregate + -> Gather + Workers Planned: 1 + -> Partial Aggregate + -> Parallel Hash Join + Hash Cond: (r.id = s.id) + -> Parallel Seq Scan on simple r + -> Parallel Hash + -> Parallel Seq Scan on bigger_than_it_looks s +(9 rows) + +select count(*) from simple r join bigger_than_it_looks s using (id); + count +------- + 20000 +(1 row) + +select original > 1 as initially_multibatch, final > original as increased_batches + from hash_join_batches( +$$ + select count(*) from simple r join bigger_than_it_looks s using (id); +$$); + initially_multibatch | increased_batches +----------------------+------------------- + f | t +(1 row) + +rollback to settings; +-- The "ugly" case: increasing the number of batches during execution +-- doesn't help, so stop trying to fit in work_mem and hope for the +-- best; in this case we plan for 1 batch, increases just once and +-- then stop increasing because that didn't help at all, so we blow +-- right through the work_mem budget and hope for the best... +-- non-parallel +savepoint settings; +set local max_parallel_workers_per_gather = 0; +set local work_mem = '128kB'; +set local hash_mem_multiplier = 1.0; +explain (costs off) + select count(*) from simple r join extremely_skewed s using (id); + QUERY PLAN +-------------------------------------------------- + Aggregate + -> Hash Join + Hash Cond: (r.id = s.id) + -> Seq Scan on simple r + -> Hash + -> Seq Scan on extremely_skewed s +(6 rows) + +select count(*) from simple r join extremely_skewed s using (id); + count +------- + 20000 +(1 row) + +select * from hash_join_batches( +$$ + select count(*) from simple r join extremely_skewed s using (id); +$$); + original | final +----------+------- + 1 | 2 +(1 row) + +rollback to settings; +-- parallel with parallel-oblivious hash join +savepoint settings; +set local max_parallel_workers_per_gather = 2; +set local work_mem = '128kB'; +set local hash_mem_multiplier = 1.0; +set local enable_parallel_hash = off; +explain (costs off) + select count(*) from simple r join extremely_skewed s using (id); + QUERY PLAN +-------------------------------------------------------- + Aggregate + -> Gather + Workers Planned: 2 + -> Hash Join + Hash Cond: (r.id = s.id) + -> Parallel Seq Scan on simple r + -> Hash + -> Seq Scan on extremely_skewed s +(8 rows) + +select count(*) from simple r join extremely_skewed s using (id); + count +------- + 20000 +(1 row) + +select * from hash_join_batches( +$$ + select count(*) from simple r join extremely_skewed s using (id); +$$); + original | final +----------+------- + 1 | 2 +(1 row) + +rollback to settings; +-- parallel with parallel-aware hash join +savepoint settings; +set local max_parallel_workers_per_gather = 1; +set local work_mem = '128kB'; +set local hash_mem_multiplier = 1.0; +set local enable_parallel_hash = on; +explain (costs off) + select count(*) from simple r join extremely_skewed s using (id); + QUERY PLAN +----------------------------------------------------------------------- + Finalize Aggregate + -> Gather + Workers Planned: 1 + -> Partial Aggregate + -> Parallel Hash Join + Hash Cond: (r.id = s.id) + -> Parallel Seq Scan on simple r + -> Parallel Hash + -> Parallel Seq Scan on extremely_skewed s +(9 rows) + +select count(*) from simple r join extremely_skewed s using (id); + count +------- + 20000 +(1 row) + +select * from hash_join_batches( +$$ + select count(*) from simple r join extremely_skewed s using (id); +$$); + original | final +----------+------- + 1 | 4 +(1 row) + +rollback to settings; +-- A couple of other hash join tests unrelated to work_mem management. +-- Check that EXPLAIN ANALYZE has data even if the leader doesn't participate +savepoint settings; +set local max_parallel_workers_per_gather = 2; +set local work_mem = '4MB'; +set local hash_mem_multiplier = 1.0; +set local parallel_leader_participation = off; +select * from hash_join_batches( +$$ + select count(*) from simple r join simple s using (id); +$$); + original | final +----------+------- + 1 | 1 +(1 row) + +rollback to settings; +-- Exercise rescans. We'll turn off parallel_leader_participation so +-- that we can check that instrumentation comes back correctly. +create table join_foo as select generate_series(1, 3) as id, 'xxxxx'::text as t; +alter table join_foo set (parallel_workers = 0); +create table join_bar as select generate_series(1, 10000) as id, 'xxxxx'::text as t; +alter table join_bar set (parallel_workers = 2); +-- multi-batch with rescan, parallel-oblivious +savepoint settings; +set enable_parallel_hash = off; +set parallel_leader_participation = off; +set min_parallel_table_scan_size = 0; +set parallel_setup_cost = 0; +set parallel_tuple_cost = 0; +set max_parallel_workers_per_gather = 2; +set enable_material = off; +set enable_mergejoin = off; +set work_mem = '64kB'; +set hash_mem_multiplier = 1.0; +explain (costs off) + select count(*) from join_foo + left join (select b1.id, b1.t from join_bar b1 join join_bar b2 using (id)) ss + on join_foo.id < ss.id + 1 and join_foo.id > ss.id - 1; + QUERY PLAN +------------------------------------------------------------------------------------ + Aggregate + -> Nested Loop Left Join + Join Filter: ((join_foo.id < (b1.id + 1)) AND (join_foo.id > (b1.id - 1))) + -> Seq Scan on join_foo + -> Gather + Workers Planned: 2 + -> Hash Join + Hash Cond: (b1.id = b2.id) + -> Parallel Seq Scan on join_bar b1 + -> Hash + -> Seq Scan on join_bar b2 +(11 rows) + +select count(*) from join_foo + left join (select b1.id, b1.t from join_bar b1 join join_bar b2 using (id)) ss + on join_foo.id < ss.id + 1 and join_foo.id > ss.id - 1; + count +------- + 3 +(1 row) + +select final > 1 as multibatch + from hash_join_batches( +$$ + select count(*) from join_foo + left join (select b1.id, b1.t from join_bar b1 join join_bar b2 using (id)) ss + on join_foo.id < ss.id + 1 and join_foo.id > ss.id - 1; +$$); + multibatch +------------ + t +(1 row) + +rollback to settings; +-- single-batch with rescan, parallel-oblivious +savepoint settings; +set enable_parallel_hash = off; +set parallel_leader_participation = off; +set min_parallel_table_scan_size = 0; +set parallel_setup_cost = 0; +set parallel_tuple_cost = 0; +set max_parallel_workers_per_gather = 2; +set enable_material = off; +set enable_mergejoin = off; +set work_mem = '4MB'; +set hash_mem_multiplier = 1.0; +explain (costs off) + select count(*) from join_foo + left join (select b1.id, b1.t from join_bar b1 join join_bar b2 using (id)) ss + on join_foo.id < ss.id + 1 and join_foo.id > ss.id - 1; + QUERY PLAN +------------------------------------------------------------------------------------ + Aggregate + -> Nested Loop Left Join + Join Filter: ((join_foo.id < (b1.id + 1)) AND (join_foo.id > (b1.id - 1))) + -> Seq Scan on join_foo + -> Gather + Workers Planned: 2 + -> Hash Join + Hash Cond: (b1.id = b2.id) + -> Parallel Seq Scan on join_bar b1 + -> Hash + -> Seq Scan on join_bar b2 +(11 rows) + +select count(*) from join_foo + left join (select b1.id, b1.t from join_bar b1 join join_bar b2 using (id)) ss + on join_foo.id < ss.id + 1 and join_foo.id > ss.id - 1; + count +------- + 3 +(1 row) + +select final > 1 as multibatch + from hash_join_batches( +$$ + select count(*) from join_foo + left join (select b1.id, b1.t from join_bar b1 join join_bar b2 using (id)) ss + on join_foo.id < ss.id + 1 and join_foo.id > ss.id - 1; +$$); + multibatch +------------ + f +(1 row) + +rollback to settings; +-- multi-batch with rescan, parallel-aware +savepoint settings; +set enable_parallel_hash = on; +set parallel_leader_participation = off; +set min_parallel_table_scan_size = 0; +set parallel_setup_cost = 0; +set parallel_tuple_cost = 0; +set max_parallel_workers_per_gather = 2; +set enable_material = off; +set enable_mergejoin = off; +set work_mem = '64kB'; +set hash_mem_multiplier = 1.0; +explain (costs off) + select count(*) from join_foo + left join (select b1.id, b1.t from join_bar b1 join join_bar b2 using (id)) ss + on join_foo.id < ss.id + 1 and join_foo.id > ss.id - 1; + QUERY PLAN +------------------------------------------------------------------------------------ + Aggregate + -> Nested Loop Left Join + Join Filter: ((join_foo.id < (b1.id + 1)) AND (join_foo.id > (b1.id - 1))) + -> Seq Scan on join_foo + -> Gather + Workers Planned: 2 + -> Parallel Hash Join + Hash Cond: (b1.id = b2.id) + -> Parallel Seq Scan on join_bar b1 + -> Parallel Hash + -> Parallel Seq Scan on join_bar b2 +(11 rows) + +select count(*) from join_foo + left join (select b1.id, b1.t from join_bar b1 join join_bar b2 using (id)) ss + on join_foo.id < ss.id + 1 and join_foo.id > ss.id - 1; + count +------- + 3 +(1 row) + +select final > 1 as multibatch + from hash_join_batches( +$$ + select count(*) from join_foo + left join (select b1.id, b1.t from join_bar b1 join join_bar b2 using (id)) ss + on join_foo.id < ss.id + 1 and join_foo.id > ss.id - 1; +$$); + multibatch +------------ + t +(1 row) + +rollback to settings; +-- single-batch with rescan, parallel-aware +savepoint settings; +set enable_parallel_hash = on; +set parallel_leader_participation = off; +set min_parallel_table_scan_size = 0; +set parallel_setup_cost = 0; +set parallel_tuple_cost = 0; +set max_parallel_workers_per_gather = 2; +set enable_material = off; +set enable_mergejoin = off; +set work_mem = '4MB'; +set hash_mem_multiplier = 1.0; +explain (costs off) + select count(*) from join_foo + left join (select b1.id, b1.t from join_bar b1 join join_bar b2 using (id)) ss + on join_foo.id < ss.id + 1 and join_foo.id > ss.id - 1; + QUERY PLAN +------------------------------------------------------------------------------------ + Aggregate + -> Nested Loop Left Join + Join Filter: ((join_foo.id < (b1.id + 1)) AND (join_foo.id > (b1.id - 1))) + -> Seq Scan on join_foo + -> Gather + Workers Planned: 2 + -> Parallel Hash Join + Hash Cond: (b1.id = b2.id) + -> Parallel Seq Scan on join_bar b1 + -> Parallel Hash + -> Parallel Seq Scan on join_bar b2 +(11 rows) + +select count(*) from join_foo + left join (select b1.id, b1.t from join_bar b1 join join_bar b2 using (id)) ss + on join_foo.id < ss.id + 1 and join_foo.id > ss.id - 1; + count +------- + 3 +(1 row) + +select final > 1 as multibatch + from hash_join_batches( +$$ + select count(*) from join_foo + left join (select b1.id, b1.t from join_bar b1 join join_bar b2 using (id)) ss + on join_foo.id < ss.id + 1 and join_foo.id > ss.id - 1; +$$); + multibatch +------------ + f +(1 row) + +rollback to settings; +-- A full outer join where every record is matched. +-- non-parallel +savepoint settings; +set local max_parallel_workers_per_gather = 0; +explain (costs off) + select count(*) from simple r full outer join simple s using (id); + QUERY PLAN +---------------------------------------- + Aggregate + -> Hash Full Join + Hash Cond: (r.id = s.id) + -> Seq Scan on simple r + -> Hash + -> Seq Scan on simple s +(6 rows) + +select count(*) from simple r full outer join simple s using (id); + count +------- + 20000 +(1 row) + +rollback to settings; +-- parallelism not possible with parallel-oblivious full hash join +savepoint settings; +set enable_parallel_hash = off; +set local max_parallel_workers_per_gather = 2; +explain (costs off) + select count(*) from simple r full outer join simple s using (id); + QUERY PLAN +---------------------------------------- + Aggregate + -> Hash Full Join + Hash Cond: (r.id = s.id) + -> Seq Scan on simple r + -> Hash + -> Seq Scan on simple s +(6 rows) + +select count(*) from simple r full outer join simple s using (id); + count +------- + 20000 +(1 row) + +rollback to settings; +-- parallelism is possible with parallel-aware full hash join +savepoint settings; +set local max_parallel_workers_per_gather = 2; +explain (costs off) + select count(*) from simple r full outer join simple s using (id); + QUERY PLAN +------------------------------------------------------------- + Finalize Aggregate + -> Gather + Workers Planned: 2 + -> Partial Aggregate + -> Parallel Hash Full Join + Hash Cond: (r.id = s.id) + -> Parallel Seq Scan on simple r + -> Parallel Hash + -> Parallel Seq Scan on simple s +(9 rows) + +select count(*) from simple r full outer join simple s using (id); + count +------- + 20000 +(1 row) + +rollback to settings; +-- A full outer join where every record is not matched. +-- non-parallel +savepoint settings; +set local max_parallel_workers_per_gather = 0; +explain (costs off) + select count(*) from simple r full outer join simple s on (r.id = 0 - s.id); + QUERY PLAN +---------------------------------------- + Aggregate + -> Hash Full Join + Hash Cond: ((0 - s.id) = r.id) + -> Seq Scan on simple s + -> Hash + -> Seq Scan on simple r +(6 rows) + +select count(*) from simple r full outer join simple s on (r.id = 0 - s.id); + count +------- + 40000 +(1 row) + +rollback to settings; +-- parallelism not possible with parallel-oblivious full hash join +savepoint settings; +set enable_parallel_hash = off; +set local max_parallel_workers_per_gather = 2; +explain (costs off) + select count(*) from simple r full outer join simple s on (r.id = 0 - s.id); + QUERY PLAN +---------------------------------------- + Aggregate + -> Hash Full Join + Hash Cond: ((0 - s.id) = r.id) + -> Seq Scan on simple s + -> Hash + -> Seq Scan on simple r +(6 rows) + +select count(*) from simple r full outer join simple s on (r.id = 0 - s.id); + count +------- + 40000 +(1 row) + +rollback to settings; +-- parallelism is possible with parallel-aware full hash join +savepoint settings; +set local max_parallel_workers_per_gather = 2; +explain (costs off) + select count(*) from simple r full outer join simple s on (r.id = 0 - s.id); + QUERY PLAN +------------------------------------------------------------- + Finalize Aggregate + -> Gather + Workers Planned: 2 + -> Partial Aggregate + -> Parallel Hash Full Join + Hash Cond: ((0 - s.id) = r.id) + -> Parallel Seq Scan on simple s + -> Parallel Hash + -> Parallel Seq Scan on simple r +(9 rows) + +select count(*) from simple r full outer join simple s on (r.id = 0 - s.id); + count +------- + 40000 +(1 row) + +rollback to settings; +-- exercise special code paths for huge tuples (note use of non-strict +-- expression and left join required to get the detoasted tuple into +-- the hash table) +-- parallel with parallel-aware hash join (hits ExecParallelHashLoadTuple and +-- sts_puttuple oversized tuple cases because it's multi-batch) +savepoint settings; +set max_parallel_workers_per_gather = 2; +set enable_parallel_hash = on; +set work_mem = '128kB'; +set hash_mem_multiplier = 1.0; +explain (costs off) + select length(max(s.t)) + from wide left join (select id, coalesce(t, '') || '' as t from wide) s using (id); + QUERY PLAN +---------------------------------------------------------------- + Finalize Aggregate + -> Gather + Workers Planned: 2 + -> Partial Aggregate + -> Parallel Hash Left Join + Hash Cond: (wide.id = wide_1.id) + -> Parallel Seq Scan on wide + -> Parallel Hash + -> Parallel Seq Scan on wide wide_1 +(9 rows) + +select length(max(s.t)) +from wide left join (select id, coalesce(t, '') || '' as t from wide) s using (id); + length +-------- + 320000 +(1 row) + +select final > 1 as multibatch + from hash_join_batches( +$$ + select length(max(s.t)) + from wide left join (select id, coalesce(t, '') || '' as t from wide) s using (id); +$$); + multibatch +------------ + t +(1 row) + +rollback to settings; +-- Hash join reuses the HOT status bit to indicate match status. This can only +-- be guaranteed to produce correct results if all the hash join tuple match +-- bits are reset before reuse. This is done upon loading them into the +-- hashtable. +SAVEPOINT settings; +SET enable_parallel_hash = on; +SET min_parallel_table_scan_size = 0; +SET parallel_setup_cost = 0; +SET parallel_tuple_cost = 0; +CREATE TABLE hjtest_matchbits_t1(id int); +CREATE TABLE hjtest_matchbits_t2(id int); +INSERT INTO hjtest_matchbits_t1 VALUES (1); +INSERT INTO hjtest_matchbits_t2 VALUES (2); +-- Update should create a HOT tuple. If this status bit isn't cleared, we won't +-- correctly emit the NULL-extended unmatching tuple in full hash join. +UPDATE hjtest_matchbits_t2 set id = 2; +SELECT * FROM hjtest_matchbits_t1 t1 FULL JOIN hjtest_matchbits_t2 t2 ON t1.id = t2.id + ORDER BY t1.id; + id | id +----+---- + 1 | + | 2 +(2 rows) + +-- Test serial full hash join. +-- Resetting parallel_setup_cost should force a serial plan. +-- Just to be safe, however, set enable_parallel_hash to off, as parallel full +-- hash joins are only supported with shared hashtables. +RESET parallel_setup_cost; +SET enable_parallel_hash = off; +SELECT * FROM hjtest_matchbits_t1 t1 FULL JOIN hjtest_matchbits_t2 t2 ON t1.id = t2.id; + id | id +----+---- + 1 | + | 2 +(2 rows) + +ROLLBACK TO settings; +rollback; +-- Verify that hash key expressions reference the correct +-- nodes. Hashjoin's hashkeys need to reference its outer plan, Hash's +-- need to reference Hash's outer plan (which is below HashJoin's +-- inner plan). It's not trivial to verify that the references are +-- correct (we don't display the hashkeys themselves), but if the +-- hashkeys contain subplan references, those will be displayed. Force +-- subplans to appear just about everywhere. +-- +-- Bug report: +-- https://www.postgresql.org/message-id/CAPpHfdvGVegF_TKKRiBrSmatJL2dR9uwFCuR%2BteQ_8tEXU8mxg%40mail.gmail.com +-- +BEGIN; +SET LOCAL enable_sort = OFF; -- avoid mergejoins +SET LOCAL from_collapse_limit = 1; -- allows easy changing of join order +CREATE TABLE hjtest_1 (a text, b int, id int, c bool); +CREATE TABLE hjtest_2 (a bool, id int, b text, c int); +INSERT INTO hjtest_1(a, b, id, c) VALUES ('text', 2, 1, false); -- matches +INSERT INTO hjtest_1(a, b, id, c) VALUES ('text', 1, 2, false); -- fails id join condition +INSERT INTO hjtest_1(a, b, id, c) VALUES ('text', 20, 1, false); -- fails < 50 +INSERT INTO hjtest_1(a, b, id, c) VALUES ('text', 1, 1, false); -- fails (SELECT hjtest_1.b * 5) = (SELECT hjtest_2.c*5) +INSERT INTO hjtest_2(a, id, b, c) VALUES (true, 1, 'another', 2); -- matches +INSERT INTO hjtest_2(a, id, b, c) VALUES (true, 3, 'another', 7); -- fails id join condition +INSERT INTO hjtest_2(a, id, b, c) VALUES (true, 1, 'another', 90); -- fails < 55 +INSERT INTO hjtest_2(a, id, b, c) VALUES (true, 1, 'another', 3); -- fails (SELECT hjtest_1.b * 5) = (SELECT hjtest_2.c*5) +INSERT INTO hjtest_2(a, id, b, c) VALUES (true, 1, 'text', 1); -- fails hjtest_1.a <> hjtest_2.b; +EXPLAIN (COSTS OFF, VERBOSE) +SELECT hjtest_1.a a1, hjtest_2.a a2,hjtest_1.tableoid::regclass t1, hjtest_2.tableoid::regclass t2 +FROM hjtest_1, hjtest_2 +WHERE + hjtest_1.id = (SELECT 1 WHERE hjtest_2.id = 1) + AND (SELECT hjtest_1.b * 5) = (SELECT hjtest_2.c*5) + AND (SELECT hjtest_1.b * 5) < 50 + AND (SELECT hjtest_2.c * 5) < 55 + AND hjtest_1.a <> hjtest_2.b; + QUERY PLAN +------------------------------------------------------------------------------------------------ + Hash Join + Output: hjtest_1.a, hjtest_2.a, (hjtest_1.tableoid)::regclass, (hjtest_2.tableoid)::regclass + Hash Cond: ((hjtest_1.id = (SubPlan 1)) AND ((SubPlan 2) = (SubPlan 3))) + Join Filter: (hjtest_1.a <> hjtest_2.b) + -> Seq Scan on public.hjtest_1 + Output: hjtest_1.a, hjtest_1.tableoid, hjtest_1.id, hjtest_1.b + Filter: ((SubPlan 4) < 50) + SubPlan 4 + -> Result + Output: (hjtest_1.b * 5) + -> Hash + Output: hjtest_2.a, hjtest_2.tableoid, hjtest_2.id, hjtest_2.c, hjtest_2.b + -> Seq Scan on public.hjtest_2 + Output: hjtest_2.a, hjtest_2.tableoid, hjtest_2.id, hjtest_2.c, hjtest_2.b + Filter: ((SubPlan 5) < 55) + SubPlan 5 + -> Result + Output: (hjtest_2.c * 5) + SubPlan 1 + -> Result + Output: 1 + One-Time Filter: (hjtest_2.id = 1) + SubPlan 3 + -> Result + Output: (hjtest_2.c * 5) + SubPlan 2 + -> Result + Output: (hjtest_1.b * 5) +(28 rows) + +SELECT hjtest_1.a a1, hjtest_2.a a2,hjtest_1.tableoid::regclass t1, hjtest_2.tableoid::regclass t2 +FROM hjtest_1, hjtest_2 +WHERE + hjtest_1.id = (SELECT 1 WHERE hjtest_2.id = 1) + AND (SELECT hjtest_1.b * 5) = (SELECT hjtest_2.c*5) + AND (SELECT hjtest_1.b * 5) < 50 + AND (SELECT hjtest_2.c * 5) < 55 + AND hjtest_1.a <> hjtest_2.b; + a1 | a2 | t1 | t2 +------+----+----------+---------- + text | t | hjtest_1 | hjtest_2 +(1 row) + +EXPLAIN (COSTS OFF, VERBOSE) +SELECT hjtest_1.a a1, hjtest_2.a a2,hjtest_1.tableoid::regclass t1, hjtest_2.tableoid::regclass t2 +FROM hjtest_2, hjtest_1 +WHERE + hjtest_1.id = (SELECT 1 WHERE hjtest_2.id = 1) + AND (SELECT hjtest_1.b * 5) = (SELECT hjtest_2.c*5) + AND (SELECT hjtest_1.b * 5) < 50 + AND (SELECT hjtest_2.c * 5) < 55 + AND hjtest_1.a <> hjtest_2.b; + QUERY PLAN +------------------------------------------------------------------------------------------------ + Hash Join + Output: hjtest_1.a, hjtest_2.a, (hjtest_1.tableoid)::regclass, (hjtest_2.tableoid)::regclass + Hash Cond: (((SubPlan 1) = hjtest_1.id) AND ((SubPlan 3) = (SubPlan 2))) + Join Filter: (hjtest_1.a <> hjtest_2.b) + -> Seq Scan on public.hjtest_2 + Output: hjtest_2.a, hjtest_2.tableoid, hjtest_2.id, hjtest_2.c, hjtest_2.b + Filter: ((SubPlan 5) < 55) + SubPlan 5 + -> Result + Output: (hjtest_2.c * 5) + -> Hash + Output: hjtest_1.a, hjtest_1.tableoid, hjtest_1.id, hjtest_1.b + -> Seq Scan on public.hjtest_1 + Output: hjtest_1.a, hjtest_1.tableoid, hjtest_1.id, hjtest_1.b + Filter: ((SubPlan 4) < 50) + SubPlan 4 + -> Result + Output: (hjtest_1.b * 5) + SubPlan 2 + -> Result + Output: (hjtest_1.b * 5) + SubPlan 1 + -> Result + Output: 1 + One-Time Filter: (hjtest_2.id = 1) + SubPlan 3 + -> Result + Output: (hjtest_2.c * 5) +(28 rows) + +SELECT hjtest_1.a a1, hjtest_2.a a2,hjtest_1.tableoid::regclass t1, hjtest_2.tableoid::regclass t2 +FROM hjtest_2, hjtest_1 +WHERE + hjtest_1.id = (SELECT 1 WHERE hjtest_2.id = 1) + AND (SELECT hjtest_1.b * 5) = (SELECT hjtest_2.c*5) + AND (SELECT hjtest_1.b * 5) < 50 + AND (SELECT hjtest_2.c * 5) < 55 + AND hjtest_1.a <> hjtest_2.b; + a1 | a2 | t1 | t2 +------+----+----------+---------- + text | t | hjtest_1 | hjtest_2 +(1 row) + +ROLLBACK; +-- Verify that we behave sanely when the inner hash keys contain parameters +-- (that is, outer or lateral references). This situation has to defeat +-- re-use of the inner hash table across rescans. +begin; +set local enable_hashjoin = on; +explain (costs off) +select i8.q2, ss.* from +int8_tbl i8, +lateral (select t1.fivethous, i4.f1 from tenk1 t1 join int4_tbl i4 + on t1.fivethous = i4.f1+i8.q2 order by 1,2) ss; + QUERY PLAN +----------------------------------------------------------- + Nested Loop + -> Seq Scan on int8_tbl i8 + -> Sort + Sort Key: t1.fivethous, i4.f1 + -> Hash Join + Hash Cond: (t1.fivethous = (i4.f1 + i8.q2)) + -> Seq Scan on tenk1 t1 + -> Hash + -> Seq Scan on int4_tbl i4 +(9 rows) + +select i8.q2, ss.* from +int8_tbl i8, +lateral (select t1.fivethous, i4.f1 from tenk1 t1 join int4_tbl i4 + on t1.fivethous = i4.f1+i8.q2 order by 1,2) ss; + q2 | fivethous | f1 +-----+-----------+---- + 456 | 456 | 0 + 456 | 456 | 0 + 123 | 123 | 0 + 123 | 123 | 0 +(4 rows) + +rollback; diff --git a/src/test/regress/expected/json.out b/src/test/regress/expected/json.out new file mode 100644 index 0000000..aa29bc5 --- /dev/null +++ b/src/test/regress/expected/json.out @@ -0,0 +1,2667 @@ +-- Strings. +SELECT '""'::json; -- OK. + json +------ + "" +(1 row) + +SELECT $$''$$::json; -- ERROR, single quotes are not allowed +ERROR: invalid input syntax for type json +LINE 1: SELECT $$''$$::json; + ^ +DETAIL: Token "'" is invalid. +CONTEXT: JSON data, line 1: '... +SELECT '"abc"'::json; -- OK + json +------- + "abc" +(1 row) + +SELECT '"abc'::json; -- ERROR, quotes not closed +ERROR: invalid input syntax for type json +LINE 1: SELECT '"abc'::json; + ^ +DETAIL: Token ""abc" is invalid. +CONTEXT: JSON data, line 1: "abc +SELECT '"abc +def"'::json; -- ERROR, unescaped newline in string constant +ERROR: invalid input syntax for type json +LINE 1: SELECT '"abc + ^ +DETAIL: Character with value 0x0a must be escaped. +CONTEXT: JSON data, line 1: "abc +SELECT '"\n\"\\"'::json; -- OK, legal escapes + json +---------- + "\n\"\\" +(1 row) + +SELECT '"\v"'::json; -- ERROR, not a valid JSON escape +ERROR: invalid input syntax for type json +LINE 1: SELECT '"\v"'::json; + ^ +DETAIL: Escape sequence "\v" is invalid. +CONTEXT: JSON data, line 1: "\v... +-- Check fast path for longer strings (at least 16 bytes long) +SELECT ('"'||repeat('.', 12)||'abc"')::json; -- OK + json +------------------- + "............abc" +(1 row) + +SELECT ('"'||repeat('.', 12)||'abc\n"')::json; -- OK, legal escapes + json +--------------------- + "............abc\n" +(1 row) + +-- see json_encoding test for input with unicode escapes +-- Numbers. +SELECT '1'::json; -- OK + json +------ + 1 +(1 row) + +SELECT '0'::json; -- OK + json +------ + 0 +(1 row) + +SELECT '01'::json; -- ERROR, not valid according to JSON spec +ERROR: invalid input syntax for type json +LINE 1: SELECT '01'::json; + ^ +DETAIL: Token "01" is invalid. +CONTEXT: JSON data, line 1: 01 +SELECT '0.1'::json; -- OK + json +------ + 0.1 +(1 row) + +SELECT '9223372036854775808'::json; -- OK, even though it's too large for int8 + json +--------------------- + 9223372036854775808 +(1 row) + +SELECT '1e100'::json; -- OK + json +------- + 1e100 +(1 row) + +SELECT '1.3e100'::json; -- OK + json +--------- + 1.3e100 +(1 row) + +SELECT '1f2'::json; -- ERROR +ERROR: invalid input syntax for type json +LINE 1: SELECT '1f2'::json; + ^ +DETAIL: Token "1f2" is invalid. +CONTEXT: JSON data, line 1: 1f2 +SELECT '0.x1'::json; -- ERROR +ERROR: invalid input syntax for type json +LINE 1: SELECT '0.x1'::json; + ^ +DETAIL: Token "0.x1" is invalid. +CONTEXT: JSON data, line 1: 0.x1 +SELECT '1.3ex100'::json; -- ERROR +ERROR: invalid input syntax for type json +LINE 1: SELECT '1.3ex100'::json; + ^ +DETAIL: Token "1.3ex100" is invalid. +CONTEXT: JSON data, line 1: 1.3ex100 +-- Arrays. +SELECT '[]'::json; -- OK + json +------ + [] +(1 row) + +SELECT '[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]'::json; -- OK + json +---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + [[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]] +(1 row) + +SELECT '[1,2]'::json; -- OK + json +------- + [1,2] +(1 row) + +SELECT '[1,2,]'::json; -- ERROR, trailing comma +ERROR: invalid input syntax for type json +LINE 1: SELECT '[1,2,]'::json; + ^ +DETAIL: Expected JSON value, but found "]". +CONTEXT: JSON data, line 1: [1,2,] +SELECT '[1,2'::json; -- ERROR, no closing bracket +ERROR: invalid input syntax for type json +LINE 1: SELECT '[1,2'::json; + ^ +DETAIL: The input string ended unexpectedly. +CONTEXT: JSON data, line 1: [1,2 +SELECT '[1,[2]'::json; -- ERROR, no closing bracket +ERROR: invalid input syntax for type json +LINE 1: SELECT '[1,[2]'::json; + ^ +DETAIL: The input string ended unexpectedly. +CONTEXT: JSON data, line 1: [1,[2] +-- Objects. +SELECT '{}'::json; -- OK + json +------ + {} +(1 row) + +SELECT '{"abc"}'::json; -- ERROR, no value +ERROR: invalid input syntax for type json +LINE 1: SELECT '{"abc"}'::json; + ^ +DETAIL: Expected ":", but found "}". +CONTEXT: JSON data, line 1: {"abc"} +SELECT '{"abc":1}'::json; -- OK + json +----------- + {"abc":1} +(1 row) + +SELECT '{1:"abc"}'::json; -- ERROR, keys must be strings +ERROR: invalid input syntax for type json +LINE 1: SELECT '{1:"abc"}'::json; + ^ +DETAIL: Expected string or "}", but found "1". +CONTEXT: JSON data, line 1: {1... +SELECT '{"abc",1}'::json; -- ERROR, wrong separator +ERROR: invalid input syntax for type json +LINE 1: SELECT '{"abc",1}'::json; + ^ +DETAIL: Expected ":", but found ",". +CONTEXT: JSON data, line 1: {"abc",... +SELECT '{"abc"=1}'::json; -- ERROR, totally wrong separator +ERROR: invalid input syntax for type json +LINE 1: SELECT '{"abc"=1}'::json; + ^ +DETAIL: Token "=" is invalid. +CONTEXT: JSON data, line 1: {"abc"=... +SELECT '{"abc"::1}'::json; -- ERROR, another wrong separator +ERROR: invalid input syntax for type json +LINE 1: SELECT '{"abc"::1}'::json; + ^ +DETAIL: Expected JSON value, but found ":". +CONTEXT: JSON data, line 1: {"abc"::... +SELECT '{"abc":1,"def":2,"ghi":[3,4],"hij":{"klm":5,"nop":[6]}}'::json; -- OK + json +--------------------------------------------------------- + {"abc":1,"def":2,"ghi":[3,4],"hij":{"klm":5,"nop":[6]}} +(1 row) + +SELECT '{"abc":1:2}'::json; -- ERROR, colon in wrong spot +ERROR: invalid input syntax for type json +LINE 1: SELECT '{"abc":1:2}'::json; + ^ +DETAIL: Expected "," or "}", but found ":". +CONTEXT: JSON data, line 1: {"abc":1:... +SELECT '{"abc":1,3}'::json; -- ERROR, no value +ERROR: invalid input syntax for type json +LINE 1: SELECT '{"abc":1,3}'::json; + ^ +DETAIL: Expected string, but found "3". +CONTEXT: JSON data, line 1: {"abc":1,3... +-- Recursion. +SET max_stack_depth = '100kB'; +SELECT repeat('[', 10000)::json; +ERROR: stack depth limit exceeded +HINT: Increase the configuration parameter "max_stack_depth" (currently 100kB), after ensuring the platform's stack depth limit is adequate. +SELECT repeat('{"a":', 10000)::json; +ERROR: stack depth limit exceeded +HINT: Increase the configuration parameter "max_stack_depth" (currently 100kB), after ensuring the platform's stack depth limit is adequate. +RESET max_stack_depth; +-- Miscellaneous stuff. +SELECT 'true'::json; -- OK + json +------ + true +(1 row) + +SELECT 'false'::json; -- OK + json +------- + false +(1 row) + +SELECT 'null'::json; -- OK + json +------ + null +(1 row) + +SELECT ' true '::json; -- OK, even with extra whitespace + json +-------- + true +(1 row) + +SELECT 'true false'::json; -- ERROR, too many values +ERROR: invalid input syntax for type json +LINE 1: SELECT 'true false'::json; + ^ +DETAIL: Expected end of input, but found "false". +CONTEXT: JSON data, line 1: true false +SELECT 'true, false'::json; -- ERROR, too many values +ERROR: invalid input syntax for type json +LINE 1: SELECT 'true, false'::json; + ^ +DETAIL: Expected end of input, but found ",". +CONTEXT: JSON data, line 1: true,... +SELECT 'truf'::json; -- ERROR, not a keyword +ERROR: invalid input syntax for type json +LINE 1: SELECT 'truf'::json; + ^ +DETAIL: Token "truf" is invalid. +CONTEXT: JSON data, line 1: truf +SELECT 'trues'::json; -- ERROR, not a keyword +ERROR: invalid input syntax for type json +LINE 1: SELECT 'trues'::json; + ^ +DETAIL: Token "trues" is invalid. +CONTEXT: JSON data, line 1: trues +SELECT ''::json; -- ERROR, no value +ERROR: invalid input syntax for type json +LINE 1: SELECT ''::json; + ^ +DETAIL: The input string ended unexpectedly. +CONTEXT: JSON data, line 1: +SELECT ' '::json; -- ERROR, no value +ERROR: invalid input syntax for type json +LINE 1: SELECT ' '::json; + ^ +DETAIL: The input string ended unexpectedly. +CONTEXT: JSON data, line 1: +-- Multi-line JSON input to check ERROR reporting +SELECT '{ + "one": 1, + "two":"two", + "three": + true}'::json; -- OK + json +------------------------------ + { + + "one": 1, + + "two":"two",+ + "three": + + true} +(1 row) + +SELECT '{ + "one": 1, + "two":,"two", -- ERROR extraneous comma before field "two" + "three": + true}'::json; +ERROR: invalid input syntax for type json +LINE 1: SELECT '{ + ^ +DETAIL: Expected JSON value, but found ",". +CONTEXT: JSON data, line 3: "two":,... +SELECT '{ + "one": 1, + "two":"two", + "averyveryveryveryveryveryveryveryveryverylongfieldname":}'::json; +ERROR: invalid input syntax for type json +LINE 1: SELECT '{ + ^ +DETAIL: Expected JSON value, but found "}". +CONTEXT: JSON data, line 4: ...yveryveryveryveryveryveryveryverylongfieldname":} +-- ERROR missing value for last field +-- test non-error-throwing input +select pg_input_is_valid('{"a":true}', 'json'); + pg_input_is_valid +------------------- + t +(1 row) + +select pg_input_is_valid('{"a":true', 'json'); + pg_input_is_valid +------------------- + f +(1 row) + +select * from pg_input_error_info('{"a":true', 'json'); + message | detail | hint | sql_error_code +------------------------------------+--------------------------------------+------+---------------- + invalid input syntax for type json | The input string ended unexpectedly. | | 22P02 +(1 row) + +--constructors +-- array_to_json +SELECT array_to_json(array(select 1 as a)); + array_to_json +--------------- + [1] +(1 row) + +SELECT array_to_json(array_agg(q),false) from (select x as b, x * 2 as c from generate_series(1,3) x) q; + array_to_json +--------------------------------------------- + [{"b":1,"c":2},{"b":2,"c":4},{"b":3,"c":6}] +(1 row) + +SELECT array_to_json(array_agg(q),true) from (select x as b, x * 2 as c from generate_series(1,3) x) q; + array_to_json +----------------- + [{"b":1,"c":2},+ + {"b":2,"c":4},+ + {"b":3,"c":6}] +(1 row) + +SELECT array_to_json(array_agg(q),false) + FROM ( SELECT $$a$$ || x AS b, y AS c, + ARRAY[ROW(x.*,ARRAY[1,2,3]), + ROW(y.*,ARRAY[4,5,6])] AS z + FROM generate_series(1,2) x, + generate_series(4,5) y) q; + array_to_json +------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + [{"b":"a1","c":4,"z":[{"f1":1,"f2":[1,2,3]},{"f1":4,"f2":[4,5,6]}]},{"b":"a1","c":5,"z":[{"f1":1,"f2":[1,2,3]},{"f1":5,"f2":[4,5,6]}]},{"b":"a2","c":4,"z":[{"f1":2,"f2":[1,2,3]},{"f1":4,"f2":[4,5,6]}]},{"b":"a2","c":5,"z":[{"f1":2,"f2":[1,2,3]},{"f1":5,"f2":[4,5,6]}]}] +(1 row) + +SELECT array_to_json(array_agg(x),false) from generate_series(5,10) x; + array_to_json +---------------- + [5,6,7,8,9,10] +(1 row) + +SELECT array_to_json('{{1,5},{99,100}}'::int[]); + array_to_json +------------------ + [[1,5],[99,100]] +(1 row) + +-- row_to_json +SELECT row_to_json(row(1,'foo')); + row_to_json +--------------------- + {"f1":1,"f2":"foo"} +(1 row) + +SELECT row_to_json(q) +FROM (SELECT $$a$$ || x AS b, + y AS c, + ARRAY[ROW(x.*,ARRAY[1,2,3]), + ROW(y.*,ARRAY[4,5,6])] AS z + FROM generate_series(1,2) x, + generate_series(4,5) y) q; + row_to_json +-------------------------------------------------------------------- + {"b":"a1","c":4,"z":[{"f1":1,"f2":[1,2,3]},{"f1":4,"f2":[4,5,6]}]} + {"b":"a1","c":5,"z":[{"f1":1,"f2":[1,2,3]},{"f1":5,"f2":[4,5,6]}]} + {"b":"a2","c":4,"z":[{"f1":2,"f2":[1,2,3]},{"f1":4,"f2":[4,5,6]}]} + {"b":"a2","c":5,"z":[{"f1":2,"f2":[1,2,3]},{"f1":5,"f2":[4,5,6]}]} +(4 rows) + +SELECT row_to_json(q,true) +FROM (SELECT $$a$$ || x AS b, + y AS c, + ARRAY[ROW(x.*,ARRAY[1,2,3]), + ROW(y.*,ARRAY[4,5,6])] AS z + FROM generate_series(1,2) x, + generate_series(4,5) y) q; + row_to_json +----------------------------------------------------- + {"b":"a1", + + "c":4, + + "z":[{"f1":1,"f2":[1,2,3]},{"f1":4,"f2":[4,5,6]}]} + {"b":"a1", + + "c":5, + + "z":[{"f1":1,"f2":[1,2,3]},{"f1":5,"f2":[4,5,6]}]} + {"b":"a2", + + "c":4, + + "z":[{"f1":2,"f2":[1,2,3]},{"f1":4,"f2":[4,5,6]}]} + {"b":"a2", + + "c":5, + + "z":[{"f1":2,"f2":[1,2,3]},{"f1":5,"f2":[4,5,6]}]} +(4 rows) + +CREATE TEMP TABLE rows AS +SELECT x, 'txt' || x as y +FROM generate_series(1,3) AS x; +SELECT row_to_json(q,true) +FROM rows q; + row_to_json +-------------- + {"x":1, + + "y":"txt1"} + {"x":2, + + "y":"txt2"} + {"x":3, + + "y":"txt3"} +(3 rows) + +SELECT row_to_json(row((select array_agg(x) as d from generate_series(5,10) x)),false); + row_to_json +----------------------- + {"f1":[5,6,7,8,9,10]} +(1 row) + +-- anyarray column +analyze rows; +select attname, to_json(histogram_bounds) histogram_bounds +from pg_stats +where tablename = 'rows' and + schemaname = pg_my_temp_schema()::regnamespace::text +order by 1; + attname | histogram_bounds +---------+------------------------ + x | [1,2,3] + y | ["txt1","txt2","txt3"] +(2 rows) + +-- to_json, timestamps +select to_json(timestamp '2014-05-28 12:22:35.614298'); + to_json +------------------------------ + "2014-05-28T12:22:35.614298" +(1 row) + +BEGIN; +SET LOCAL TIME ZONE 10.5; +select to_json(timestamptz '2014-05-28 12:22:35.614298-04'); + to_json +------------------------------------ + "2014-05-29T02:52:35.614298+10:30" +(1 row) + +SET LOCAL TIME ZONE -8; +select to_json(timestamptz '2014-05-28 12:22:35.614298-04'); + to_json +------------------------------------ + "2014-05-28T08:22:35.614298-08:00" +(1 row) + +COMMIT; +select to_json(date '2014-05-28'); + to_json +-------------- + "2014-05-28" +(1 row) + +select to_json(date 'Infinity'); + to_json +------------ + "infinity" +(1 row) + +select to_json(date '-Infinity'); + to_json +------------- + "-infinity" +(1 row) + +select to_json(timestamp 'Infinity'); + to_json +------------ + "infinity" +(1 row) + +select to_json(timestamp '-Infinity'); + to_json +------------- + "-infinity" +(1 row) + +select to_json(timestamptz 'Infinity'); + to_json +------------ + "infinity" +(1 row) + +select to_json(timestamptz '-Infinity'); + to_json +------------- + "-infinity" +(1 row) + +--json_agg +SELECT json_agg(q) + FROM ( SELECT $$a$$ || x AS b, y AS c, + ARRAY[ROW(x.*,ARRAY[1,2,3]), + ROW(y.*,ARRAY[4,5,6])] AS z + FROM generate_series(1,2) x, + generate_series(4,5) y) q; + json_agg +----------------------------------------------------------------------- + [{"b":"a1","c":4,"z":[{"f1":1,"f2":[1,2,3]},{"f1":4,"f2":[4,5,6]}]}, + + {"b":"a1","c":5,"z":[{"f1":1,"f2":[1,2,3]},{"f1":5,"f2":[4,5,6]}]}, + + {"b":"a2","c":4,"z":[{"f1":2,"f2":[1,2,3]},{"f1":4,"f2":[4,5,6]}]}, + + {"b":"a2","c":5,"z":[{"f1":2,"f2":[1,2,3]},{"f1":5,"f2":[4,5,6]}]}] +(1 row) + +SELECT json_agg(q ORDER BY x, y) + FROM rows q; + json_agg +----------------------- + [{"x":1,"y":"txt1"}, + + {"x":2,"y":"txt2"}, + + {"x":3,"y":"txt3"}] +(1 row) + +UPDATE rows SET x = NULL WHERE x = 1; +SELECT json_agg(q ORDER BY x NULLS FIRST, y) + FROM rows q; + json_agg +-------------------------- + [{"x":null,"y":"txt1"}, + + {"x":2,"y":"txt2"}, + + {"x":3,"y":"txt3"}] +(1 row) + +-- non-numeric output +SELECT row_to_json(q) +FROM (SELECT 'NaN'::float8 AS "float8field") q; + row_to_json +----------------------- + {"float8field":"NaN"} +(1 row) + +SELECT row_to_json(q) +FROM (SELECT 'Infinity'::float8 AS "float8field") q; + row_to_json +---------------------------- + {"float8field":"Infinity"} +(1 row) + +SELECT row_to_json(q) +FROM (SELECT '-Infinity'::float8 AS "float8field") q; + row_to_json +----------------------------- + {"float8field":"-Infinity"} +(1 row) + +-- json input +SELECT row_to_json(q) +FROM (SELECT '{"a":1,"b": [2,3,4,"d","e","f"],"c":{"p":1,"q":2}}'::json AS "jsonfield") q; + row_to_json +------------------------------------------------------------------ + {"jsonfield":{"a":1,"b": [2,3,4,"d","e","f"],"c":{"p":1,"q":2}}} +(1 row) + +-- json extraction functions +CREATE TEMP TABLE test_json ( + json_type text, + test_json json +); +INSERT INTO test_json VALUES +('scalar','"a scalar"'), +('array','["zero", "one","two",null,"four","five", [1,2,3],{"f1":9}]'), +('object','{"field1":"val1","field2":"val2","field3":null, "field4": 4, "field5": [1,2,3], "field6": {"f1":9}}'); +SELECT test_json -> 'x' +FROM test_json +WHERE json_type = 'scalar'; + ?column? +---------- + +(1 row) + +SELECT test_json -> 'x' +FROM test_json +WHERE json_type = 'array'; + ?column? +---------- + +(1 row) + +SELECT test_json -> 'x' +FROM test_json +WHERE json_type = 'object'; + ?column? +---------- + +(1 row) + +SELECT test_json->'field2' +FROM test_json +WHERE json_type = 'object'; + ?column? +---------- + "val2" +(1 row) + +SELECT test_json->>'field2' +FROM test_json +WHERE json_type = 'object'; + ?column? +---------- + val2 +(1 row) + +SELECT test_json -> 2 +FROM test_json +WHERE json_type = 'scalar'; + ?column? +---------- + +(1 row) + +SELECT test_json -> 2 +FROM test_json +WHERE json_type = 'array'; + ?column? +---------- + "two" +(1 row) + +SELECT test_json -> -1 +FROM test_json +WHERE json_type = 'array'; + ?column? +---------- + {"f1":9} +(1 row) + +SELECT test_json -> 2 +FROM test_json +WHERE json_type = 'object'; + ?column? +---------- + +(1 row) + +SELECT test_json->>2 +FROM test_json +WHERE json_type = 'array'; + ?column? +---------- + two +(1 row) + +SELECT test_json ->> 6 FROM test_json WHERE json_type = 'array'; + ?column? +---------- + [1,2,3] +(1 row) + +SELECT test_json ->> 7 FROM test_json WHERE json_type = 'array'; + ?column? +---------- + {"f1":9} +(1 row) + +SELECT test_json ->> 'field4' FROM test_json WHERE json_type = 'object'; + ?column? +---------- + 4 +(1 row) + +SELECT test_json ->> 'field5' FROM test_json WHERE json_type = 'object'; + ?column? +---------- + [1,2,3] +(1 row) + +SELECT test_json ->> 'field6' FROM test_json WHERE json_type = 'object'; + ?column? +---------- + {"f1":9} +(1 row) + +SELECT json_object_keys(test_json) +FROM test_json +WHERE json_type = 'scalar'; +ERROR: cannot call json_object_keys on a scalar +SELECT json_object_keys(test_json) +FROM test_json +WHERE json_type = 'array'; +ERROR: cannot call json_object_keys on an array +SELECT json_object_keys(test_json) +FROM test_json +WHERE json_type = 'object'; + json_object_keys +------------------ + field1 + field2 + field3 + field4 + field5 + field6 +(6 rows) + +-- test extending object_keys resultset - initial resultset size is 256 +select count(*) from + (select json_object_keys(json_object(array_agg(g))) + from (select unnest(array['f'||n,n::text])as g + from generate_series(1,300) as n) x ) y; + count +------- + 300 +(1 row) + +-- nulls +select (test_json->'field3') is null as expect_false +from test_json +where json_type = 'object'; + expect_false +-------------- + f +(1 row) + +select (test_json->>'field3') is null as expect_true +from test_json +where json_type = 'object'; + expect_true +------------- + t +(1 row) + +select (test_json->3) is null as expect_false +from test_json +where json_type = 'array'; + expect_false +-------------- + f +(1 row) + +select (test_json->>3) is null as expect_true +from test_json +where json_type = 'array'; + expect_true +------------- + t +(1 row) + +-- corner cases +select '{"a": [{"b": "c"}, {"b": "cc"}]}'::json -> null::text; + ?column? +---------- + +(1 row) + +select '{"a": [{"b": "c"}, {"b": "cc"}]}'::json -> null::int; + ?column? +---------- + +(1 row) + +select '{"a": [{"b": "c"}, {"b": "cc"}]}'::json -> 1; + ?column? +---------- + +(1 row) + +select '{"a": [{"b": "c"}, {"b": "cc"}]}'::json -> -1; + ?column? +---------- + +(1 row) + +select '{"a": [{"b": "c"}, {"b": "cc"}]}'::json -> 'z'; + ?column? +---------- + +(1 row) + +select '{"a": [{"b": "c"}, {"b": "cc"}]}'::json -> ''; + ?column? +---------- + +(1 row) + +select '[{"b": "c"}, {"b": "cc"}]'::json -> 1; + ?column? +------------- + {"b": "cc"} +(1 row) + +select '[{"b": "c"}, {"b": "cc"}]'::json -> 3; + ?column? +---------- + +(1 row) + +select '[{"b": "c"}, {"b": "cc"}]'::json -> 'z'; + ?column? +---------- + +(1 row) + +select '{"a": "c", "b": null}'::json -> 'b'; + ?column? +---------- + null +(1 row) + +select '"foo"'::json -> 1; + ?column? +---------- + +(1 row) + +select '"foo"'::json -> 'z'; + ?column? +---------- + +(1 row) + +select '{"a": [{"b": "c"}, {"b": "cc"}]}'::json ->> null::text; + ?column? +---------- + +(1 row) + +select '{"a": [{"b": "c"}, {"b": "cc"}]}'::json ->> null::int; + ?column? +---------- + +(1 row) + +select '{"a": [{"b": "c"}, {"b": "cc"}]}'::json ->> 1; + ?column? +---------- + +(1 row) + +select '{"a": [{"b": "c"}, {"b": "cc"}]}'::json ->> 'z'; + ?column? +---------- + +(1 row) + +select '{"a": [{"b": "c"}, {"b": "cc"}]}'::json ->> ''; + ?column? +---------- + +(1 row) + +select '[{"b": "c"}, {"b": "cc"}]'::json ->> 1; + ?column? +------------- + {"b": "cc"} +(1 row) + +select '[{"b": "c"}, {"b": "cc"}]'::json ->> 3; + ?column? +---------- + +(1 row) + +select '[{"b": "c"}, {"b": "cc"}]'::json ->> 'z'; + ?column? +---------- + +(1 row) + +select '{"a": "c", "b": null}'::json ->> 'b'; + ?column? +---------- + +(1 row) + +select '"foo"'::json ->> 1; + ?column? +---------- + +(1 row) + +select '"foo"'::json ->> 'z'; + ?column? +---------- + +(1 row) + +-- array length +SELECT json_array_length('[1,2,3,{"f1":1,"f2":[5,6]},4]'); + json_array_length +------------------- + 5 +(1 row) + +SELECT json_array_length('[]'); + json_array_length +------------------- + 0 +(1 row) + +SELECT json_array_length('{"f1":1,"f2":[5,6]}'); +ERROR: cannot get array length of a non-array +SELECT json_array_length('4'); +ERROR: cannot get array length of a scalar +-- each +select json_each('{"f1":[1,2,3],"f2":{"f3":1},"f4":null}'); + json_each +------------------- + (f1,"[1,2,3]") + (f2,"{""f3"":1}") + (f4,null) +(3 rows) + +select * from json_each('{"f1":[1,2,3],"f2":{"f3":1},"f4":null,"f5":99,"f6":"stringy"}') q; + key | value +-----+----------- + f1 | [1,2,3] + f2 | {"f3":1} + f4 | null + f5 | 99 + f6 | "stringy" +(5 rows) + +select json_each_text('{"f1":[1,2,3],"f2":{"f3":1},"f4":null,"f5":"null"}'); + json_each_text +------------------- + (f1,"[1,2,3]") + (f2,"{""f3"":1}") + (f4,) + (f5,null) +(4 rows) + +select * from json_each_text('{"f1":[1,2,3],"f2":{"f3":1},"f4":null,"f5":99,"f6":"stringy"}') q; + key | value +-----+---------- + f1 | [1,2,3] + f2 | {"f3":1} + f4 | + f5 | 99 + f6 | stringy +(5 rows) + +-- extract_path, extract_path_as_text +select json_extract_path('{"f2":{"f3":1},"f4":{"f5":99,"f6":"stringy"}}','f4','f6'); + json_extract_path +------------------- + "stringy" +(1 row) + +select json_extract_path('{"f2":{"f3":1},"f4":{"f5":99,"f6":"stringy"}}','f2'); + json_extract_path +------------------- + {"f3":1} +(1 row) + +select json_extract_path('{"f2":["f3",1],"f4":{"f5":99,"f6":"stringy"}}','f2',0::text); + json_extract_path +------------------- + "f3" +(1 row) + +select json_extract_path('{"f2":["f3",1],"f4":{"f5":99,"f6":"stringy"}}','f2',1::text); + json_extract_path +------------------- + 1 +(1 row) + +select json_extract_path_text('{"f2":{"f3":1},"f4":{"f5":99,"f6":"stringy"}}','f4','f6'); + json_extract_path_text +------------------------ + stringy +(1 row) + +select json_extract_path_text('{"f2":{"f3":1},"f4":{"f5":99,"f6":"stringy"}}','f2'); + json_extract_path_text +------------------------ + {"f3":1} +(1 row) + +select json_extract_path_text('{"f2":["f3",1],"f4":{"f5":99,"f6":"stringy"}}','f2',0::text); + json_extract_path_text +------------------------ + f3 +(1 row) + +select json_extract_path_text('{"f2":["f3",1],"f4":{"f5":99,"f6":"stringy"}}','f2',1::text); + json_extract_path_text +------------------------ + 1 +(1 row) + +-- extract_path nulls +select json_extract_path('{"f2":{"f3":1},"f4":{"f5":null,"f6":"stringy"}}','f4','f5') is null as expect_false; + expect_false +-------------- + f +(1 row) + +select json_extract_path_text('{"f2":{"f3":1},"f4":{"f5":null,"f6":"stringy"}}','f4','f5') is null as expect_true; + expect_true +------------- + t +(1 row) + +select json_extract_path('{"f2":{"f3":1},"f4":[0,1,2,null]}','f4','3') is null as expect_false; + expect_false +-------------- + f +(1 row) + +select json_extract_path_text('{"f2":{"f3":1},"f4":[0,1,2,null]}','f4','3') is null as expect_true; + expect_true +------------- + t +(1 row) + +-- extract_path operators +select '{"f2":{"f3":1},"f4":{"f5":99,"f6":"stringy"}}'::json#>array['f4','f6']; + ?column? +----------- + "stringy" +(1 row) + +select '{"f2":{"f3":1},"f4":{"f5":99,"f6":"stringy"}}'::json#>array['f2']; + ?column? +---------- + {"f3":1} +(1 row) + +select '{"f2":["f3",1],"f4":{"f5":99,"f6":"stringy"}}'::json#>array['f2','0']; + ?column? +---------- + "f3" +(1 row) + +select '{"f2":["f3",1],"f4":{"f5":99,"f6":"stringy"}}'::json#>array['f2','1']; + ?column? +---------- + 1 +(1 row) + +select '{"f2":{"f3":1},"f4":{"f5":99,"f6":"stringy"}}'::json#>>array['f4','f6']; + ?column? +---------- + stringy +(1 row) + +select '{"f2":{"f3":1},"f4":{"f5":99,"f6":"stringy"}}'::json#>>array['f2']; + ?column? +---------- + {"f3":1} +(1 row) + +select '{"f2":["f3",1],"f4":{"f5":99,"f6":"stringy"}}'::json#>>array['f2','0']; + ?column? +---------- + f3 +(1 row) + +select '{"f2":["f3",1],"f4":{"f5":99,"f6":"stringy"}}'::json#>>array['f2','1']; + ?column? +---------- + 1 +(1 row) + +-- corner cases for same +select '{"a": {"b":{"c": "foo"}}}'::json #> '{}'; + ?column? +--------------------------- + {"a": {"b":{"c": "foo"}}} +(1 row) + +select '[1,2,3]'::json #> '{}'; + ?column? +---------- + [1,2,3] +(1 row) + +select '"foo"'::json #> '{}'; + ?column? +---------- + "foo" +(1 row) + +select '42'::json #> '{}'; + ?column? +---------- + 42 +(1 row) + +select 'null'::json #> '{}'; + ?column? +---------- + null +(1 row) + +select '{"a": {"b":{"c": "foo"}}}'::json #> array['a']; + ?column? +-------------------- + {"b":{"c": "foo"}} +(1 row) + +select '{"a": {"b":{"c": "foo"}}}'::json #> array['a', null]; + ?column? +---------- + +(1 row) + +select '{"a": {"b":{"c": "foo"}}}'::json #> array['a', '']; + ?column? +---------- + +(1 row) + +select '{"a": {"b":{"c": "foo"}}}'::json #> array['a','b']; + ?column? +-------------- + {"c": "foo"} +(1 row) + +select '{"a": {"b":{"c": "foo"}}}'::json #> array['a','b','c']; + ?column? +---------- + "foo" +(1 row) + +select '{"a": {"b":{"c": "foo"}}}'::json #> array['a','b','c','d']; + ?column? +---------- + +(1 row) + +select '{"a": {"b":{"c": "foo"}}}'::json #> array['a','z','c']; + ?column? +---------- + +(1 row) + +select '{"a": [{"b": "c"}, {"b": "cc"}]}'::json #> array['a','1','b']; + ?column? +---------- + "cc" +(1 row) + +select '{"a": [{"b": "c"}, {"b": "cc"}]}'::json #> array['a','z','b']; + ?column? +---------- + +(1 row) + +select '[{"b": "c"}, {"b": "cc"}]'::json #> array['1','b']; + ?column? +---------- + "cc" +(1 row) + +select '[{"b": "c"}, {"b": "cc"}]'::json #> array['z','b']; + ?column? +---------- + +(1 row) + +select '[{"b": "c"}, {"b": null}]'::json #> array['1','b']; + ?column? +---------- + null +(1 row) + +select '"foo"'::json #> array['z']; + ?column? +---------- + +(1 row) + +select '42'::json #> array['f2']; + ?column? +---------- + +(1 row) + +select '42'::json #> array['0']; + ?column? +---------- + +(1 row) + +select '{"a": {"b":{"c": "foo"}}}'::json #>> '{}'; + ?column? +--------------------------- + {"a": {"b":{"c": "foo"}}} +(1 row) + +select '[1,2,3]'::json #>> '{}'; + ?column? +---------- + [1,2,3] +(1 row) + +select '"foo"'::json #>> '{}'; + ?column? +---------- + foo +(1 row) + +select '42'::json #>> '{}'; + ?column? +---------- + 42 +(1 row) + +select 'null'::json #>> '{}'; + ?column? +---------- + +(1 row) + +select '{"a": {"b":{"c": "foo"}}}'::json #>> array['a']; + ?column? +-------------------- + {"b":{"c": "foo"}} +(1 row) + +select '{"a": {"b":{"c": "foo"}}}'::json #>> array['a', null]; + ?column? +---------- + +(1 row) + +select '{"a": {"b":{"c": "foo"}}}'::json #>> array['a', '']; + ?column? +---------- + +(1 row) + +select '{"a": {"b":{"c": "foo"}}}'::json #>> array['a','b']; + ?column? +-------------- + {"c": "foo"} +(1 row) + +select '{"a": {"b":{"c": "foo"}}}'::json #>> array['a','b','c']; + ?column? +---------- + foo +(1 row) + +select '{"a": {"b":{"c": "foo"}}}'::json #>> array['a','b','c','d']; + ?column? +---------- + +(1 row) + +select '{"a": {"b":{"c": "foo"}}}'::json #>> array['a','z','c']; + ?column? +---------- + +(1 row) + +select '{"a": [{"b": "c"}, {"b": "cc"}]}'::json #>> array['a','1','b']; + ?column? +---------- + cc +(1 row) + +select '{"a": [{"b": "c"}, {"b": "cc"}]}'::json #>> array['a','z','b']; + ?column? +---------- + +(1 row) + +select '[{"b": "c"}, {"b": "cc"}]'::json #>> array['1','b']; + ?column? +---------- + cc +(1 row) + +select '[{"b": "c"}, {"b": "cc"}]'::json #>> array['z','b']; + ?column? +---------- + +(1 row) + +select '[{"b": "c"}, {"b": null}]'::json #>> array['1','b']; + ?column? +---------- + +(1 row) + +select '"foo"'::json #>> array['z']; + ?column? +---------- + +(1 row) + +select '42'::json #>> array['f2']; + ?column? +---------- + +(1 row) + +select '42'::json #>> array['0']; + ?column? +---------- + +(1 row) + +-- array_elements +select json_array_elements('[1,true,[1,[2,3]],null,{"f1":1,"f2":[7,8,9]},false,"stringy"]'); + json_array_elements +----------------------- + 1 + true + [1,[2,3]] + null + {"f1":1,"f2":[7,8,9]} + false + "stringy" +(7 rows) + +select * from json_array_elements('[1,true,[1,[2,3]],null,{"f1":1,"f2":[7,8,9]},false,"stringy"]') q; + value +----------------------- + 1 + true + [1,[2,3]] + null + {"f1":1,"f2":[7,8,9]} + false + "stringy" +(7 rows) + +select json_array_elements_text('[1,true,[1,[2,3]],null,{"f1":1,"f2":[7,8,9]},false,"stringy"]'); + json_array_elements_text +-------------------------- + 1 + true + [1,[2,3]] + + {"f1":1,"f2":[7,8,9]} + false + stringy +(7 rows) + +select * from json_array_elements_text('[1,true,[1,[2,3]],null,{"f1":1,"f2":[7,8,9]},false,"stringy"]') q; + value +----------------------- + 1 + true + [1,[2,3]] + + {"f1":1,"f2":[7,8,9]} + false + stringy +(7 rows) + +-- populate_record +create type jpop as (a text, b int, c timestamp); +CREATE DOMAIN js_int_not_null AS int NOT NULL; +CREATE DOMAIN js_int_array_1d AS int[] CHECK(array_length(VALUE, 1) = 3); +CREATE DOMAIN js_int_array_2d AS int[][] CHECK(array_length(VALUE, 2) = 3); +create type j_unordered_pair as (x int, y int); +create domain j_ordered_pair as j_unordered_pair check((value).x <= (value).y); +CREATE TYPE jsrec AS ( + i int, + ia _int4, + ia1 int[], + ia2 int[][], + ia3 int[][][], + ia1d js_int_array_1d, + ia2d js_int_array_2d, + t text, + ta text[], + c char(10), + ca char(10)[], + ts timestamp, + js json, + jsb jsonb, + jsa json[], + rec jpop, + reca jpop[] +); +CREATE TYPE jsrec_i_not_null AS ( + i js_int_not_null +); +select * from json_populate_record(null::jpop,'{"a":"blurfl","x":43.2}') q; + a | b | c +--------+---+--- + blurfl | | +(1 row) + +select * from json_populate_record(row('x',3,'2012-12-31 15:30:56')::jpop,'{"a":"blurfl","x":43.2}') q; + a | b | c +--------+---+-------------------------- + blurfl | 3 | Mon Dec 31 15:30:56 2012 +(1 row) + +select * from json_populate_record(null::jpop,'{"a":"blurfl","x":43.2}') q; + a | b | c +--------+---+--- + blurfl | | +(1 row) + +select * from json_populate_record(row('x',3,'2012-12-31 15:30:56')::jpop,'{"a":"blurfl","x":43.2}') q; + a | b | c +--------+---+-------------------------- + blurfl | 3 | Mon Dec 31 15:30:56 2012 +(1 row) + +select * from json_populate_record(null::jpop,'{"a":[100,200,false],"x":43.2}') q; + a | b | c +-----------------+---+--- + [100,200,false] | | +(1 row) + +select * from json_populate_record(row('x',3,'2012-12-31 15:30:56')::jpop,'{"a":[100,200,false],"x":43.2}') q; + a | b | c +-----------------+---+-------------------------- + [100,200,false] | 3 | Mon Dec 31 15:30:56 2012 +(1 row) + +select * from json_populate_record(row('x',3,'2012-12-31 15:30:56')::jpop,'{"c":[100,200,false],"x":43.2}') q; +ERROR: invalid input syntax for type timestamp: "[100,200,false]" +select * from json_populate_record(row('x',3,'2012-12-31 15:30:56')::jpop,'{}') q; + a | b | c +---+---+-------------------------- + x | 3 | Mon Dec 31 15:30:56 2012 +(1 row) + +SELECT i FROM json_populate_record(NULL::jsrec_i_not_null, '{"x": 43.2}') q; +ERROR: domain js_int_not_null does not allow null values +SELECT i FROM json_populate_record(NULL::jsrec_i_not_null, '{"i": null}') q; +ERROR: domain js_int_not_null does not allow null values +SELECT i FROM json_populate_record(NULL::jsrec_i_not_null, '{"i": 12345}') q; + i +------- + 12345 +(1 row) + +SELECT ia FROM json_populate_record(NULL::jsrec, '{"ia": null}') q; + ia +---- + +(1 row) + +SELECT ia FROM json_populate_record(NULL::jsrec, '{"ia": 123}') q; +ERROR: expected JSON array +HINT: See the value of key "ia". +SELECT ia FROM json_populate_record(NULL::jsrec, '{"ia": [1, "2", null, 4]}') q; + ia +-------------- + {1,2,NULL,4} +(1 row) + +SELECT ia FROM json_populate_record(NULL::jsrec, '{"ia": [[1, 2], [3, 4]]}') q; + ia +--------------- + {{1,2},{3,4}} +(1 row) + +SELECT ia FROM json_populate_record(NULL::jsrec, '{"ia": [[1], 2]}') q; +ERROR: expected JSON array +HINT: See the array element [1] of key "ia". +SELECT ia FROM json_populate_record(NULL::jsrec, '{"ia": [[1], [2, 3]]}') q; +ERROR: malformed JSON array +DETAIL: Multidimensional arrays must have sub-arrays with matching dimensions. +SELECT ia FROM json_populate_record(NULL::jsrec, '{"ia": "{1,2,3}"}') q; + ia +--------- + {1,2,3} +(1 row) + +SELECT ia1 FROM json_populate_record(NULL::jsrec, '{"ia1": null}') q; + ia1 +----- + +(1 row) + +SELECT ia1 FROM json_populate_record(NULL::jsrec, '{"ia1": 123}') q; +ERROR: expected JSON array +HINT: See the value of key "ia1". +SELECT ia1 FROM json_populate_record(NULL::jsrec, '{"ia1": [1, "2", null, 4]}') q; + ia1 +-------------- + {1,2,NULL,4} +(1 row) + +SELECT ia1 FROM json_populate_record(NULL::jsrec, '{"ia1": [[1, 2, 3]]}') q; + ia1 +----------- + {{1,2,3}} +(1 row) + +SELECT ia1d FROM json_populate_record(NULL::jsrec, '{"ia1d": null}') q; + ia1d +------ + +(1 row) + +SELECT ia1d FROM json_populate_record(NULL::jsrec, '{"ia1d": 123}') q; +ERROR: expected JSON array +HINT: See the value of key "ia1d". +SELECT ia1d FROM json_populate_record(NULL::jsrec, '{"ia1d": [1, "2", null, 4]}') q; +ERROR: value for domain js_int_array_1d violates check constraint "js_int_array_1d_check" +SELECT ia1d FROM json_populate_record(NULL::jsrec, '{"ia1d": [1, "2", null]}') q; + ia1d +------------ + {1,2,NULL} +(1 row) + +SELECT ia2 FROM json_populate_record(NULL::jsrec, '{"ia2": [1, "2", null, 4]}') q; + ia2 +-------------- + {1,2,NULL,4} +(1 row) + +SELECT ia2 FROM json_populate_record(NULL::jsrec, '{"ia2": [[1, 2], [null, 4]]}') q; + ia2 +------------------ + {{1,2},{NULL,4}} +(1 row) + +SELECT ia2 FROM json_populate_record(NULL::jsrec, '{"ia2": [[], []]}') q; + ia2 +----- + {} +(1 row) + +SELECT ia2 FROM json_populate_record(NULL::jsrec, '{"ia2": [[1, 2], [3]]}') q; +ERROR: malformed JSON array +DETAIL: Multidimensional arrays must have sub-arrays with matching dimensions. +SELECT ia2 FROM json_populate_record(NULL::jsrec, '{"ia2": [[1, 2], 3, 4]}') q; +ERROR: expected JSON array +HINT: See the array element [1] of key "ia2". +SELECT ia2d FROM json_populate_record(NULL::jsrec, '{"ia2d": [[1, "2"], [null, 4]]}') q; +ERROR: value for domain js_int_array_2d violates check constraint "js_int_array_2d_check" +SELECT ia2d FROM json_populate_record(NULL::jsrec, '{"ia2d": [[1, "2", 3], [null, 5, 6]]}') q; + ia2d +---------------------- + {{1,2,3},{NULL,5,6}} +(1 row) + +SELECT ia3 FROM json_populate_record(NULL::jsrec, '{"ia3": [1, "2", null, 4]}') q; + ia3 +-------------- + {1,2,NULL,4} +(1 row) + +SELECT ia3 FROM json_populate_record(NULL::jsrec, '{"ia3": [[1, 2], [null, 4]]}') q; + ia3 +------------------ + {{1,2},{NULL,4}} +(1 row) + +SELECT ia3 FROM json_populate_record(NULL::jsrec, '{"ia3": [ [[], []], [[], []], [[], []] ]}') q; + ia3 +----- + {} +(1 row) + +SELECT ia3 FROM json_populate_record(NULL::jsrec, '{"ia3": [ [[1, 2]], [[3, 4]] ]}') q; + ia3 +------------------- + {{{1,2}},{{3,4}}} +(1 row) + +SELECT ia3 FROM json_populate_record(NULL::jsrec, '{"ia3": [ [[1, 2], [3, 4]], [[5, 6], [7, 8]] ]}') q; + ia3 +------------------------------- + {{{1,2},{3,4}},{{5,6},{7,8}}} +(1 row) + +SELECT ia3 FROM json_populate_record(NULL::jsrec, '{"ia3": [ [[1, 2], [3, 4]], [[5, 6], [7, 8], [9, 10]] ]}') q; +ERROR: malformed JSON array +DETAIL: Multidimensional arrays must have sub-arrays with matching dimensions. +SELECT ta FROM json_populate_record(NULL::jsrec, '{"ta": null}') q; + ta +---- + +(1 row) + +SELECT ta FROM json_populate_record(NULL::jsrec, '{"ta": 123}') q; +ERROR: expected JSON array +HINT: See the value of key "ta". +SELECT ta FROM json_populate_record(NULL::jsrec, '{"ta": [1, "2", null, 4]}') q; + ta +-------------- + {1,2,NULL,4} +(1 row) + +SELECT ta FROM json_populate_record(NULL::jsrec, '{"ta": [[1, 2, 3], {"k": "v"}]}') q; +ERROR: expected JSON array +HINT: See the array element [1] of key "ta". +SELECT c FROM json_populate_record(NULL::jsrec, '{"c": null}') q; + c +--- + +(1 row) + +SELECT c FROM json_populate_record(NULL::jsrec, '{"c": "aaa"}') q; + c +------------ + aaa +(1 row) + +SELECT c FROM json_populate_record(NULL::jsrec, '{"c": "aaaaaaaaaa"}') q; + c +------------ + aaaaaaaaaa +(1 row) + +SELECT c FROM json_populate_record(NULL::jsrec, '{"c": "aaaaaaaaaaaaa"}') q; +ERROR: value too long for type character(10) +SELECT ca FROM json_populate_record(NULL::jsrec, '{"ca": null}') q; + ca +---- + +(1 row) + +SELECT ca FROM json_populate_record(NULL::jsrec, '{"ca": 123}') q; +ERROR: expected JSON array +HINT: See the value of key "ca". +SELECT ca FROM json_populate_record(NULL::jsrec, '{"ca": [1, "2", null, 4]}') q; + ca +----------------------------------------------- + {"1 ","2 ",NULL,"4 "} +(1 row) + +SELECT ca FROM json_populate_record(NULL::jsrec, '{"ca": ["aaaaaaaaaaaaaaaa"]}') q; +ERROR: value too long for type character(10) +SELECT ca FROM json_populate_record(NULL::jsrec, '{"ca": [[1, 2, 3], {"k": "v"}]}') q; +ERROR: expected JSON array +HINT: See the array element [1] of key "ca". +SELECT js FROM json_populate_record(NULL::jsrec, '{"js": null}') q; + js +---- + +(1 row) + +SELECT js FROM json_populate_record(NULL::jsrec, '{"js": true}') q; + js +------ + true +(1 row) + +SELECT js FROM json_populate_record(NULL::jsrec, '{"js": 123.45}') q; + js +-------- + 123.45 +(1 row) + +SELECT js FROM json_populate_record(NULL::jsrec, '{"js": "123.45"}') q; + js +---------- + "123.45" +(1 row) + +SELECT js FROM json_populate_record(NULL::jsrec, '{"js": "abc"}') q; + js +------- + "abc" +(1 row) + +SELECT js FROM json_populate_record(NULL::jsrec, '{"js": [123, "123", null, {"key": "value"}]}') q; + js +-------------------------------------- + [123, "123", null, {"key": "value"}] +(1 row) + +SELECT js FROM json_populate_record(NULL::jsrec, '{"js": {"a": "bbb", "b": null, "c": 123.45}}') q; + js +-------------------------------------- + {"a": "bbb", "b": null, "c": 123.45} +(1 row) + +SELECT jsb FROM json_populate_record(NULL::jsrec, '{"jsb": null}') q; + jsb +----- + +(1 row) + +SELECT jsb FROM json_populate_record(NULL::jsrec, '{"jsb": true}') q; + jsb +------ + true +(1 row) + +SELECT jsb FROM json_populate_record(NULL::jsrec, '{"jsb": 123.45}') q; + jsb +-------- + 123.45 +(1 row) + +SELECT jsb FROM json_populate_record(NULL::jsrec, '{"jsb": "123.45"}') q; + jsb +---------- + "123.45" +(1 row) + +SELECT jsb FROM json_populate_record(NULL::jsrec, '{"jsb": "abc"}') q; + jsb +------- + "abc" +(1 row) + +SELECT jsb FROM json_populate_record(NULL::jsrec, '{"jsb": [123, "123", null, {"key": "value"}]}') q; + jsb +-------------------------------------- + [123, "123", null, {"key": "value"}] +(1 row) + +SELECT jsb FROM json_populate_record(NULL::jsrec, '{"jsb": {"a": "bbb", "b": null, "c": 123.45}}') q; + jsb +-------------------------------------- + {"a": "bbb", "b": null, "c": 123.45} +(1 row) + +SELECT jsa FROM json_populate_record(NULL::jsrec, '{"jsa": null}') q; + jsa +----- + +(1 row) + +SELECT jsa FROM json_populate_record(NULL::jsrec, '{"jsa": 123}') q; +ERROR: expected JSON array +HINT: See the value of key "jsa". +SELECT jsa FROM json_populate_record(NULL::jsrec, '{"jsa": [1, "2", null, 4]}') q; + jsa +-------------------- + {1,"\"2\"",NULL,4} +(1 row) + +SELECT jsa FROM json_populate_record(NULL::jsrec, '{"jsa": ["aaa", null, [1, 2, "3", {}], { "k" : "v" }]}') q; + jsa +---------------------------------------------------------- + {"\"aaa\"",NULL,"[1, 2, \"3\", {}]","{ \"k\" : \"v\" }"} +(1 row) + +SELECT rec FROM json_populate_record(NULL::jsrec, '{"rec": 123}') q; +ERROR: cannot call populate_composite on a scalar +SELECT rec FROM json_populate_record(NULL::jsrec, '{"rec": [1, 2]}') q; +ERROR: cannot call populate_composite on an array +SELECT rec FROM json_populate_record(NULL::jsrec, '{"rec": {"a": "abc", "c": "01.02.2003", "x": 43.2}}') q; + rec +----------------------------------- + (abc,,"Thu Jan 02 00:00:00 2003") +(1 row) + +SELECT rec FROM json_populate_record(NULL::jsrec, '{"rec": "(abc,42,01.02.2003)"}') q; + rec +------------------------------------- + (abc,42,"Thu Jan 02 00:00:00 2003") +(1 row) + +SELECT reca FROM json_populate_record(NULL::jsrec, '{"reca": 123}') q; +ERROR: expected JSON array +HINT: See the value of key "reca". +SELECT reca FROM json_populate_record(NULL::jsrec, '{"reca": [1, 2]}') q; +ERROR: cannot call populate_composite on a scalar +SELECT reca FROM json_populate_record(NULL::jsrec, '{"reca": [{"a": "abc", "b": 456}, null, {"c": "01.02.2003", "x": 43.2}]}') q; + reca +-------------------------------------------------------- + {"(abc,456,)",NULL,"(,,\"Thu Jan 02 00:00:00 2003\")"} +(1 row) + +SELECT reca FROM json_populate_record(NULL::jsrec, '{"reca": ["(abc,42,01.02.2003)"]}') q; + reca +------------------------------------------- + {"(abc,42,\"Thu Jan 02 00:00:00 2003\")"} +(1 row) + +SELECT reca FROM json_populate_record(NULL::jsrec, '{"reca": "{\"(abc,42,01.02.2003)\"}"}') q; + reca +------------------------------------------- + {"(abc,42,\"Thu Jan 02 00:00:00 2003\")"} +(1 row) + +SELECT rec FROM json_populate_record( + row(NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL, + row('x',3,'2012-12-31 15:30:56')::jpop,NULL)::jsrec, + '{"rec": {"a": "abc", "c": "01.02.2003", "x": 43.2}}' +) q; + rec +------------------------------------ + (abc,3,"Thu Jan 02 00:00:00 2003") +(1 row) + +-- anonymous record type +SELECT json_populate_record(null::record, '{"x": 0, "y": 1}'); +ERROR: could not determine row type for result of json_populate_record +HINT: Provide a non-null record argument, or call the function in the FROM clause using a column definition list. +SELECT json_populate_record(row(1,2), '{"f1": 0, "f2": 1}'); + json_populate_record +---------------------- + (0,1) +(1 row) + +SELECT * FROM + json_populate_record(null::record, '{"x": 776}') AS (x int, y int); + x | y +-----+--- + 776 | +(1 row) + +-- composite domain +SELECT json_populate_record(null::j_ordered_pair, '{"x": 0, "y": 1}'); + json_populate_record +---------------------- + (0,1) +(1 row) + +SELECT json_populate_record(row(1,2)::j_ordered_pair, '{"x": 0}'); + json_populate_record +---------------------- + (0,2) +(1 row) + +SELECT json_populate_record(row(1,2)::j_ordered_pair, '{"x": 1, "y": 0}'); +ERROR: value for domain j_ordered_pair violates check constraint "j_ordered_pair_check" +-- populate_recordset +select * from json_populate_recordset(null::jpop,'[{"a":"blurfl","x":43.2},{"b":3,"c":"2012-01-20 10:42:53"}]') q; + a | b | c +--------+---+-------------------------- + blurfl | | + | 3 | Fri Jan 20 10:42:53 2012 +(2 rows) + +select * from json_populate_recordset(row('def',99,null)::jpop,'[{"a":"blurfl","x":43.2},{"b":3,"c":"2012-01-20 10:42:53"}]') q; + a | b | c +--------+----+-------------------------- + blurfl | 99 | + def | 3 | Fri Jan 20 10:42:53 2012 +(2 rows) + +select * from json_populate_recordset(null::jpop,'[{"a":"blurfl","x":43.2},{"b":3,"c":"2012-01-20 10:42:53"}]') q; + a | b | c +--------+---+-------------------------- + blurfl | | + | 3 | Fri Jan 20 10:42:53 2012 +(2 rows) + +select * from json_populate_recordset(row('def',99,null)::jpop,'[{"a":"blurfl","x":43.2},{"b":3,"c":"2012-01-20 10:42:53"}]') q; + a | b | c +--------+----+-------------------------- + blurfl | 99 | + def | 3 | Fri Jan 20 10:42:53 2012 +(2 rows) + +select * from json_populate_recordset(row('def',99,null)::jpop,'[{"a":[100,200,300],"x":43.2},{"a":{"z":true},"b":3,"c":"2012-01-20 10:42:53"}]') q; + a | b | c +---------------+----+-------------------------- + [100,200,300] | 99 | + {"z":true} | 3 | Fri Jan 20 10:42:53 2012 +(2 rows) + +select * from json_populate_recordset(row('def',99,null)::jpop,'[{"c":[100,200,300],"x":43.2},{"a":{"z":true},"b":3,"c":"2012-01-20 10:42:53"}]') q; +ERROR: invalid input syntax for type timestamp: "[100,200,300]" +create type jpop2 as (a int, b json, c int, d int); +select * from json_populate_recordset(null::jpop2, '[{"a":2,"c":3,"b":{"z":4},"d":6}]') q; + a | b | c | d +---+---------+---+--- + 2 | {"z":4} | 3 | 6 +(1 row) + +select * from json_populate_recordset(null::jpop,'[{"a":"blurfl","x":43.2},{"b":3,"c":"2012-01-20 10:42:53"}]') q; + a | b | c +--------+---+-------------------------- + blurfl | | + | 3 | Fri Jan 20 10:42:53 2012 +(2 rows) + +select * from json_populate_recordset(row('def',99,null)::jpop,'[{"a":"blurfl","x":43.2},{"b":3,"c":"2012-01-20 10:42:53"}]') q; + a | b | c +--------+----+-------------------------- + blurfl | 99 | + def | 3 | Fri Jan 20 10:42:53 2012 +(2 rows) + +select * from json_populate_recordset(row('def',99,null)::jpop,'[{"a":[100,200,300],"x":43.2},{"a":{"z":true},"b":3,"c":"2012-01-20 10:42:53"}]') q; + a | b | c +---------------+----+-------------------------- + [100,200,300] | 99 | + {"z":true} | 3 | Fri Jan 20 10:42:53 2012 +(2 rows) + +-- anonymous record type +SELECT json_populate_recordset(null::record, '[{"x": 0, "y": 1}]'); +ERROR: could not determine row type for result of json_populate_recordset +HINT: Provide a non-null record argument, or call the function in the FROM clause using a column definition list. +SELECT json_populate_recordset(row(1,2), '[{"f1": 0, "f2": 1}]'); + json_populate_recordset +------------------------- + (0,1) +(1 row) + +SELECT i, json_populate_recordset(row(i,50), '[{"f1":"42"},{"f2":"43"}]') +FROM (VALUES (1),(2)) v(i); + i | json_populate_recordset +---+------------------------- + 1 | (42,50) + 1 | (1,43) + 2 | (42,50) + 2 | (2,43) +(4 rows) + +SELECT * FROM + json_populate_recordset(null::record, '[{"x": 776}]') AS (x int, y int); + x | y +-----+--- + 776 | +(1 row) + +-- empty array is a corner case +SELECT json_populate_recordset(null::record, '[]'); +ERROR: could not determine row type for result of json_populate_recordset +HINT: Provide a non-null record argument, or call the function in the FROM clause using a column definition list. +SELECT json_populate_recordset(row(1,2), '[]'); + json_populate_recordset +------------------------- +(0 rows) + +SELECT * FROM json_populate_recordset(NULL::jpop,'[]') q; + a | b | c +---+---+--- +(0 rows) + +SELECT * FROM + json_populate_recordset(null::record, '[]') AS (x int, y int); + x | y +---+--- +(0 rows) + +-- composite domain +SELECT json_populate_recordset(null::j_ordered_pair, '[{"x": 0, "y": 1}]'); + json_populate_recordset +------------------------- + (0,1) +(1 row) + +SELECT json_populate_recordset(row(1,2)::j_ordered_pair, '[{"x": 0}, {"y": 3}]'); + json_populate_recordset +------------------------- + (0,2) + (1,3) +(2 rows) + +SELECT json_populate_recordset(row(1,2)::j_ordered_pair, '[{"x": 1, "y": 0}]'); +ERROR: value for domain j_ordered_pair violates check constraint "j_ordered_pair_check" +-- negative cases where the wrong record type is supplied +select * from json_populate_recordset(row(0::int),'[{"a":"1","b":"2"},{"a":"3"}]') q (a text, b text); +ERROR: function return row and query-specified return row do not match +DETAIL: Returned row contains 1 attribute, but query expects 2. +select * from json_populate_recordset(row(0::int,0::int),'[{"a":"1","b":"2"},{"a":"3"}]') q (a text, b text); +ERROR: function return row and query-specified return row do not match +DETAIL: Returned type integer at ordinal position 1, but query expects text. +select * from json_populate_recordset(row(0::int,0::int,0::int),'[{"a":"1","b":"2"},{"a":"3"}]') q (a text, b text); +ERROR: function return row and query-specified return row do not match +DETAIL: Returned row contains 3 attributes, but query expects 2. +select * from json_populate_recordset(row(1000000000::int,50::int),'[{"b":"2"},{"a":"3"}]') q (a text, b text); +ERROR: function return row and query-specified return row do not match +DETAIL: Returned type integer at ordinal position 1, but query expects text. +-- test type info caching in json_populate_record() +CREATE TEMP TABLE jspoptest (js json); +INSERT INTO jspoptest +SELECT '{ + "jsa": [1, "2", null, 4], + "rec": {"a": "abc", "c": "01.02.2003", "x": 43.2}, + "reca": [{"a": "abc", "b": 456}, null, {"c": "01.02.2003", "x": 43.2}] +}'::json +FROM generate_series(1, 3); +SELECT (json_populate_record(NULL::jsrec, js)).* FROM jspoptest; + i | ia | ia1 | ia2 | ia3 | ia1d | ia2d | t | ta | c | ca | ts | js | jsb | jsa | rec | reca +---+----+-----+-----+-----+------+------+---+----+---+----+----+----+-----+--------------------+-----------------------------------+-------------------------------------------------------- + | | | | | | | | | | | | | | {1,"\"2\"",NULL,4} | (abc,,"Thu Jan 02 00:00:00 2003") | {"(abc,456,)",NULL,"(,,\"Thu Jan 02 00:00:00 2003\")"} + | | | | | | | | | | | | | | {1,"\"2\"",NULL,4} | (abc,,"Thu Jan 02 00:00:00 2003") | {"(abc,456,)",NULL,"(,,\"Thu Jan 02 00:00:00 2003\")"} + | | | | | | | | | | | | | | {1,"\"2\"",NULL,4} | (abc,,"Thu Jan 02 00:00:00 2003") | {"(abc,456,)",NULL,"(,,\"Thu Jan 02 00:00:00 2003\")"} +(3 rows) + +DROP TYPE jsrec; +DROP TYPE jsrec_i_not_null; +DROP DOMAIN js_int_not_null; +DROP DOMAIN js_int_array_1d; +DROP DOMAIN js_int_array_2d; +DROP DOMAIN j_ordered_pair; +DROP TYPE j_unordered_pair; +--json_typeof() function +select value, json_typeof(value) + from (values (json '123.4'), + (json '-1'), + (json '"foo"'), + (json 'true'), + (json 'false'), + (json 'null'), + (json '[1, 2, 3]'), + (json '[]'), + (json '{"x":"foo", "y":123}'), + (json '{}'), + (NULL::json)) + as data(value); + value | json_typeof +----------------------+------------- + 123.4 | number + -1 | number + "foo" | string + true | boolean + false | boolean + null | null + [1, 2, 3] | array + [] | array + {"x":"foo", "y":123} | object + {} | object + | +(11 rows) + +-- json_build_array, json_build_object, json_object_agg +SELECT json_build_array('a',1,'b',1.2,'c',true,'d',null,'e',json '{"x": 3, "y": [1,2,3]}'); + json_build_array +----------------------------------------------------------------------- + ["a", 1, "b", 1.2, "c", true, "d", null, "e", {"x": 3, "y": [1,2,3]}] +(1 row) + +SELECT json_build_array('a', NULL); -- ok + json_build_array +------------------ + ["a", null] +(1 row) + +SELECT json_build_array(VARIADIC NULL::text[]); -- ok + json_build_array +------------------ + +(1 row) + +SELECT json_build_array(VARIADIC '{}'::text[]); -- ok + json_build_array +------------------ + [] +(1 row) + +SELECT json_build_array(VARIADIC '{a,b,c}'::text[]); -- ok + json_build_array +------------------ + ["a", "b", "c"] +(1 row) + +SELECT json_build_array(VARIADIC ARRAY['a', NULL]::text[]); -- ok + json_build_array +------------------ + ["a", null] +(1 row) + +SELECT json_build_array(VARIADIC '{1,2,3,4}'::text[]); -- ok + json_build_array +---------------------- + ["1", "2", "3", "4"] +(1 row) + +SELECT json_build_array(VARIADIC '{1,2,3,4}'::int[]); -- ok + json_build_array +------------------ + [1, 2, 3, 4] +(1 row) + +SELECT json_build_array(VARIADIC '{{1,4},{2,5},{3,6}}'::int[][]); -- ok + json_build_array +-------------------- + [1, 4, 2, 5, 3, 6] +(1 row) + +SELECT json_build_object('a',1,'b',1.2,'c',true,'d',null,'e',json '{"x": 3, "y": [1,2,3]}'); + json_build_object +---------------------------------------------------------------------------- + {"a" : 1, "b" : 1.2, "c" : true, "d" : null, "e" : {"x": 3, "y": [1,2,3]}} +(1 row) + +SELECT json_build_object( + 'a', json_build_object('b',false,'c',99), + 'd', json_build_object('e',array[9,8,7]::int[], + 'f', (select row_to_json(r) from ( select relkind, oid::regclass as name from pg_class where relname = 'pg_class') r))); + json_build_object +------------------------------------------------------------------------------------------------- + {"a" : {"b" : false, "c" : 99}, "d" : {"e" : [9,8,7], "f" : {"relkind":"r","name":"pg_class"}}} +(1 row) + +SELECT json_build_object('{a,b,c}'::text[]); -- error +ERROR: argument list must have even number of elements +HINT: The arguments of json_build_object() must consist of alternating keys and values. +SELECT json_build_object('{a,b,c}'::text[], '{d,e,f}'::text[]); -- error, key cannot be array +ERROR: key value must be scalar, not array, composite, or json +SELECT json_build_object('a', 'b', 'c'); -- error +ERROR: argument list must have even number of elements +HINT: The arguments of json_build_object() must consist of alternating keys and values. +SELECT json_build_object(NULL, 'a'); -- error, key cannot be NULL +ERROR: null value not allowed for object key +SELECT json_build_object('a', NULL); -- ok + json_build_object +------------------- + {"a" : null} +(1 row) + +SELECT json_build_object(VARIADIC NULL::text[]); -- ok + json_build_object +------------------- + +(1 row) + +SELECT json_build_object(VARIADIC '{}'::text[]); -- ok + json_build_object +------------------- + {} +(1 row) + +SELECT json_build_object(VARIADIC '{a,b,c}'::text[]); -- error +ERROR: argument list must have even number of elements +HINT: The arguments of json_build_object() must consist of alternating keys and values. +SELECT json_build_object(VARIADIC ARRAY['a', NULL]::text[]); -- ok + json_build_object +------------------- + {"a" : null} +(1 row) + +SELECT json_build_object(VARIADIC ARRAY[NULL, 'a']::text[]); -- error, key cannot be NULL +ERROR: null value not allowed for object key +SELECT json_build_object(VARIADIC '{1,2,3,4}'::text[]); -- ok + json_build_object +------------------------ + {"1" : "2", "3" : "4"} +(1 row) + +SELECT json_build_object(VARIADIC '{1,2,3,4}'::int[]); -- ok + json_build_object +-------------------- + {"1" : 2, "3" : 4} +(1 row) + +SELECT json_build_object(VARIADIC '{{1,4},{2,5},{3,6}}'::int[][]); -- ok + json_build_object +----------------------------- + {"1" : 4, "2" : 5, "3" : 6} +(1 row) + +-- empty objects/arrays +SELECT json_build_array(); + json_build_array +------------------ + [] +(1 row) + +SELECT json_build_object(); + json_build_object +------------------- + {} +(1 row) + +-- make sure keys are quoted +SELECT json_build_object(1,2); + json_build_object +------------------- + {"1" : 2} +(1 row) + +-- keys must be scalar and not null +SELECT json_build_object(null,2); +ERROR: null value not allowed for object key +SELECT json_build_object(r,2) FROM (SELECT 1 AS a, 2 AS b) r; +ERROR: key value must be scalar, not array, composite, or json +SELECT json_build_object(json '{"a":1,"b":2}', 3); +ERROR: key value must be scalar, not array, composite, or json +SELECT json_build_object('{1,2,3}'::int[], 3); +ERROR: key value must be scalar, not array, composite, or json +CREATE TEMP TABLE foo (serial_num int, name text, type text); +INSERT INTO foo VALUES (847001,'t15','GE1043'); +INSERT INTO foo VALUES (847002,'t16','GE1043'); +INSERT INTO foo VALUES (847003,'sub-alpha','GESS90'); +SELECT json_build_object('turbines',json_object_agg(serial_num,json_build_object('name',name,'type',type))) +FROM foo; + json_build_object +------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + {"turbines" : { "847001" : {"name" : "t15", "type" : "GE1043"}, "847002" : {"name" : "t16", "type" : "GE1043"}, "847003" : {"name" : "sub-alpha", "type" : "GESS90"} }} +(1 row) + +SELECT json_object_agg(name, type) FROM foo; + json_object_agg +---------------------------------------------------------------- + { "t15" : "GE1043", "t16" : "GE1043", "sub-alpha" : "GESS90" } +(1 row) + +INSERT INTO foo VALUES (999999, NULL, 'bar'); +SELECT json_object_agg(name, type) FROM foo; +ERROR: null value not allowed for object key +-- json_object +-- empty object, one dimension +SELECT json_object('{}'); + json_object +------------- + {} +(1 row) + +-- empty object, two dimensions +SELECT json_object('{}', '{}'); + json_object +------------- + {} +(1 row) + +-- one dimension +SELECT json_object('{a,1,b,2,3,NULL,"d e f","a b c"}'); + json_object +------------------------------------------------------- + {"a" : "1", "b" : "2", "3" : null, "d e f" : "a b c"} +(1 row) + +-- same but with two dimensions +SELECT json_object('{{a,1},{b,2},{3,NULL},{"d e f","a b c"}}'); + json_object +------------------------------------------------------- + {"a" : "1", "b" : "2", "3" : null, "d e f" : "a b c"} +(1 row) + +-- odd number error +SELECT json_object('{a,b,c}'); +ERROR: array must have even number of elements +-- one column error +SELECT json_object('{{a},{b}}'); +ERROR: array must have two columns +-- too many columns error +SELECT json_object('{{a,b,c},{b,c,d}}'); +ERROR: array must have two columns +-- too many dimensions error +SELECT json_object('{{{a,b},{c,d}},{{b,c},{d,e}}}'); +ERROR: wrong number of array subscripts +--two argument form of json_object +select json_object('{a,b,c,"d e f"}','{1,2,3,"a b c"}'); + json_object +------------------------------------------------------ + {"a" : "1", "b" : "2", "c" : "3", "d e f" : "a b c"} +(1 row) + +-- too many dimensions +SELECT json_object('{{a,1},{b,2},{3,NULL},{"d e f","a b c"}}', '{{a,1},{b,2},{3,NULL},{"d e f","a b c"}}'); +ERROR: wrong number of array subscripts +-- mismatched dimensions +select json_object('{a,b,c,"d e f",g}','{1,2,3,"a b c"}'); +ERROR: mismatched array dimensions +select json_object('{a,b,c,"d e f"}','{1,2,3,"a b c",g}'); +ERROR: mismatched array dimensions +-- null key error +select json_object('{a,b,NULL,"d e f"}','{1,2,3,"a b c"}'); +ERROR: null value not allowed for object key +-- empty key is allowed +select json_object('{a,b,"","d e f"}','{1,2,3,"a b c"}'); + json_object +----------------------------------------------------- + {"a" : "1", "b" : "2", "" : "3", "d e f" : "a b c"} +(1 row) + +-- json_to_record and json_to_recordset +select * from json_to_record('{"a":1,"b":"foo","c":"bar"}') + as x(a int, b text, d text); + a | b | d +---+-----+--- + 1 | foo | +(1 row) + +select * from json_to_recordset('[{"a":1,"b":"foo","d":false},{"a":2,"b":"bar","c":true}]') + as x(a int, b text, c boolean); + a | b | c +---+-----+--- + 1 | foo | + 2 | bar | t +(2 rows) + +select * from json_to_recordset('[{"a":1,"b":{"d":"foo"},"c":true},{"a":2,"c":false,"b":{"d":"bar"}}]') + as x(a int, b json, c boolean); + a | b | c +---+-------------+--- + 1 | {"d":"foo"} | t + 2 | {"d":"bar"} | f +(2 rows) + +select *, c is null as c_is_null +from json_to_record('{"a":1, "b":{"c":16, "d":2}, "x":8, "ca": ["1 2", 3], "ia": [[1,2],[3,4]], "r": {"a": "aaa", "b": 123}}'::json) + as t(a int, b json, c text, x int, ca char(5)[], ia int[][], r jpop); + a | b | c | x | ca | ia | r | c_is_null +---+-----------------+---+---+-------------------+---------------+------------+----------- + 1 | {"c":16, "d":2} | | 8 | {"1 2 ","3 "} | {{1,2},{3,4}} | (aaa,123,) | t +(1 row) + +select *, c is null as c_is_null +from json_to_recordset('[{"a":1, "b":{"c":16, "d":2}, "x":8}]'::json) + as t(a int, b json, c text, x int); + a | b | c | x | c_is_null +---+-----------------+---+---+----------- + 1 | {"c":16, "d":2} | | 8 | t +(1 row) + +select * from json_to_record('{"ia": null}') as x(ia _int4); + ia +---- + +(1 row) + +select * from json_to_record('{"ia": 123}') as x(ia _int4); +ERROR: expected JSON array +HINT: See the value of key "ia". +select * from json_to_record('{"ia": [1, "2", null, 4]}') as x(ia _int4); + ia +-------------- + {1,2,NULL,4} +(1 row) + +select * from json_to_record('{"ia": [[1, 2], [3, 4]]}') as x(ia _int4); + ia +--------------- + {{1,2},{3,4}} +(1 row) + +select * from json_to_record('{"ia": [[1], 2]}') as x(ia _int4); +ERROR: expected JSON array +HINT: See the array element [1] of key "ia". +select * from json_to_record('{"ia": [[1], [2, 3]]}') as x(ia _int4); +ERROR: malformed JSON array +DETAIL: Multidimensional arrays must have sub-arrays with matching dimensions. +select * from json_to_record('{"ia2": [1, 2, 3]}') as x(ia2 int[][]); + ia2 +--------- + {1,2,3} +(1 row) + +select * from json_to_record('{"ia2": [[1, 2], [3, 4]]}') as x(ia2 int4[][]); + ia2 +--------------- + {{1,2},{3,4}} +(1 row) + +select * from json_to_record('{"ia2": [[[1], [2], [3]]]}') as x(ia2 int4[][]); + ia2 +----------------- + {{{1},{2},{3}}} +(1 row) + +select * from json_to_record('{"out": {"key": 1}}') as x(out json); + out +------------ + {"key": 1} +(1 row) + +select * from json_to_record('{"out": [{"key": 1}]}') as x(out json); + out +-------------- + [{"key": 1}] +(1 row) + +select * from json_to_record('{"out": "{\"key\": 1}"}') as x(out json); + out +---------------- + "{\"key\": 1}" +(1 row) + +select * from json_to_record('{"out": {"key": 1}}') as x(out jsonb); + out +------------ + {"key": 1} +(1 row) + +select * from json_to_record('{"out": [{"key": 1}]}') as x(out jsonb); + out +-------------- + [{"key": 1}] +(1 row) + +select * from json_to_record('{"out": "{\"key\": 1}"}') as x(out jsonb); + out +---------------- + "{\"key\": 1}" +(1 row) + +-- json_strip_nulls +select json_strip_nulls(null); + json_strip_nulls +------------------ + +(1 row) + +select json_strip_nulls('1'); + json_strip_nulls +------------------ + 1 +(1 row) + +select json_strip_nulls('"a string"'); + json_strip_nulls +------------------ + "a string" +(1 row) + +select json_strip_nulls('null'); + json_strip_nulls +------------------ + null +(1 row) + +select json_strip_nulls('[1,2,null,3,4]'); + json_strip_nulls +------------------ + [1,2,null,3,4] +(1 row) + +select json_strip_nulls('{"a":1,"b":null,"c":[2,null,3],"d":{"e":4,"f":null}}'); + json_strip_nulls +------------------------------------ + {"a":1,"c":[2,null,3],"d":{"e":4}} +(1 row) + +select json_strip_nulls('[1,{"a":1,"b":null,"c":2},3]'); + json_strip_nulls +--------------------- + [1,{"a":1,"c":2},3] +(1 row) + +-- an empty object is not null and should not be stripped +select json_strip_nulls('{"a": {"b": null, "c": null}, "d": {} }'); + json_strip_nulls +------------------ + {"a":{},"d":{}} +(1 row) + +-- json to tsvector +select to_tsvector('{"a": "aaa bbb ddd ccc", "b": ["eee fff ggg"], "c": {"d": "hhh iii"}}'::json); + to_tsvector +--------------------------------------------------------------------------- + 'aaa':1 'bbb':2 'ccc':4 'ddd':3 'eee':6 'fff':7 'ggg':8 'hhh':10 'iii':11 +(1 row) + +-- json to tsvector with config +select to_tsvector('simple', '{"a": "aaa bbb ddd ccc", "b": ["eee fff ggg"], "c": {"d": "hhh iii"}}'::json); + to_tsvector +--------------------------------------------------------------------------- + 'aaa':1 'bbb':2 'ccc':4 'ddd':3 'eee':6 'fff':7 'ggg':8 'hhh':10 'iii':11 +(1 row) + +-- json to tsvector with stop words +select to_tsvector('english', '{"a": "aaa in bbb ddd ccc", "b": ["the eee fff ggg"], "c": {"d": "hhh. iii"}}'::json); + to_tsvector +---------------------------------------------------------------------------- + 'aaa':1 'bbb':3 'ccc':5 'ddd':4 'eee':8 'fff':9 'ggg':10 'hhh':12 'iii':13 +(1 row) + +-- json to tsvector with numeric values +select to_tsvector('english', '{"a": "aaa in bbb ddd ccc", "b": 123, "c": 456}'::json); + to_tsvector +--------------------------------- + 'aaa':1 'bbb':3 'ccc':5 'ddd':4 +(1 row) + +-- json_to_tsvector +select json_to_tsvector('english', '{"a": "aaa in bbb", "b": 123, "c": 456, "d": true, "f": false, "g": null}'::json, '"all"'); + json_to_tsvector +---------------------------------------------------------------------------------------- + '123':8 '456':12 'aaa':2 'b':6 'bbb':4 'c':10 'd':14 'f':18 'fals':20 'g':22 'true':16 +(1 row) + +select json_to_tsvector('english', '{"a": "aaa in bbb", "b": 123, "c": 456, "d": true, "f": false, "g": null}'::json, '"key"'); + json_to_tsvector +-------------------------------- + 'b':2 'c':4 'd':6 'f':8 'g':10 +(1 row) + +select json_to_tsvector('english', '{"a": "aaa in bbb", "b": 123, "c": 456, "d": true, "f": false, "g": null}'::json, '"string"'); + json_to_tsvector +------------------ + 'aaa':1 'bbb':3 +(1 row) + +select json_to_tsvector('english', '{"a": "aaa in bbb", "b": 123, "c": 456, "d": true, "f": false, "g": null}'::json, '"numeric"'); + json_to_tsvector +------------------ + '123':1 '456':3 +(1 row) + +select json_to_tsvector('english', '{"a": "aaa in bbb", "b": 123, "c": 456, "d": true, "f": false, "g": null}'::json, '"boolean"'); + json_to_tsvector +------------------- + 'fals':3 'true':1 +(1 row) + +select json_to_tsvector('english', '{"a": "aaa in bbb", "b": 123, "c": 456, "d": true, "f": false, "g": null}'::json, '["string", "numeric"]'); + json_to_tsvector +--------------------------------- + '123':5 '456':7 'aaa':1 'bbb':3 +(1 row) + +select json_to_tsvector('english', '{"a": "aaa in bbb", "b": 123, "c": 456, "d": true, "f": false, "g": null}'::json, '"all"'); + json_to_tsvector +---------------------------------------------------------------------------------------- + '123':8 '456':12 'aaa':2 'b':6 'bbb':4 'c':10 'd':14 'f':18 'fals':20 'g':22 'true':16 +(1 row) + +select json_to_tsvector('english', '{"a": "aaa in bbb", "b": 123, "c": 456, "d": true, "f": false, "g": null}'::json, '"key"'); + json_to_tsvector +-------------------------------- + 'b':2 'c':4 'd':6 'f':8 'g':10 +(1 row) + +select json_to_tsvector('english', '{"a": "aaa in bbb", "b": 123, "c": 456, "d": true, "f": false, "g": null}'::json, '"string"'); + json_to_tsvector +------------------ + 'aaa':1 'bbb':3 +(1 row) + +select json_to_tsvector('english', '{"a": "aaa in bbb", "b": 123, "c": 456, "d": true, "f": false, "g": null}'::json, '"numeric"'); + json_to_tsvector +------------------ + '123':1 '456':3 +(1 row) + +select json_to_tsvector('english', '{"a": "aaa in bbb", "b": 123, "c": 456, "d": true, "f": false, "g": null}'::json, '"boolean"'); + json_to_tsvector +------------------- + 'fals':3 'true':1 +(1 row) + +select json_to_tsvector('english', '{"a": "aaa in bbb", "b": 123, "c": 456, "d": true, "f": false, "g": null}'::json, '["string", "numeric"]'); + json_to_tsvector +--------------------------------- + '123':5 '456':7 'aaa':1 'bbb':3 +(1 row) + +-- to_tsvector corner cases +select to_tsvector('""'::json); + to_tsvector +------------- + +(1 row) + +select to_tsvector('{}'::json); + to_tsvector +------------- + +(1 row) + +select to_tsvector('[]'::json); + to_tsvector +------------- + +(1 row) + +select to_tsvector('null'::json); + to_tsvector +------------- + +(1 row) + +-- json_to_tsvector corner cases +select json_to_tsvector('""'::json, '"all"'); + json_to_tsvector +------------------ + +(1 row) + +select json_to_tsvector('{}'::json, '"all"'); + json_to_tsvector +------------------ + +(1 row) + +select json_to_tsvector('[]'::json, '"all"'); + json_to_tsvector +------------------ + +(1 row) + +select json_to_tsvector('null'::json, '"all"'); + json_to_tsvector +------------------ + +(1 row) + +select json_to_tsvector('english', '{"a": "aaa in bbb", "b": 123, "c": 456, "d": true, "f": false, "g": null}'::json, '""'); +ERROR: wrong flag in flag array: "" +HINT: Possible values are: "string", "numeric", "boolean", "key", and "all". +select json_to_tsvector('english', '{"a": "aaa in bbb", "b": 123, "c": 456, "d": true, "f": false, "g": null}'::json, '{}'); +ERROR: wrong flag type, only arrays and scalars are allowed +select json_to_tsvector('english', '{"a": "aaa in bbb", "b": 123, "c": 456, "d": true, "f": false, "g": null}'::json, '[]'); + json_to_tsvector +------------------ + +(1 row) + +select json_to_tsvector('english', '{"a": "aaa in bbb", "b": 123, "c": 456, "d": true, "f": false, "g": null}'::json, 'null'); +ERROR: flag array element is not a string +HINT: Possible values are: "string", "numeric", "boolean", "key", and "all". +select json_to_tsvector('english', '{"a": "aaa in bbb", "b": 123, "c": 456, "d": true, "f": false, "g": null}'::json, '["all", null]'); +ERROR: flag array element is not a string +HINT: Possible values are: "string", "numeric", "boolean", "key", and "all". +-- ts_headline for json +select ts_headline('{"a": "aaa bbb", "b": {"c": "ccc ddd fff", "c1": "ccc1 ddd1"}, "d": ["ggg hhh", "iii jjj"]}'::json, tsquery('bbb & ddd & hhh')); + ts_headline +--------------------------------------------------------------------------------------------------------- + {"a":"aaa bbb","b":{"c":"ccc ddd fff","c1":"ccc1 ddd1"},"d":["ggg hhh","iii jjj"]} +(1 row) + +select ts_headline('english', '{"a": "aaa bbb", "b": {"c": "ccc ddd fff"}, "d": ["ggg hhh", "iii jjj"]}'::json, tsquery('bbb & ddd & hhh')); + ts_headline +---------------------------------------------------------------------------------------- + {"a":"aaa bbb","b":{"c":"ccc ddd fff"},"d":["ggg hhh","iii jjj"]} +(1 row) + +select ts_headline('{"a": "aaa bbb", "b": {"c": "ccc ddd fff", "c1": "ccc1 ddd1"}, "d": ["ggg hhh", "iii jjj"]}'::json, tsquery('bbb & ddd & hhh'), 'StartSel = <, StopSel = >'); + ts_headline +------------------------------------------------------------------------------------------ + {"a":"aaa ","b":{"c":"ccc fff","c1":"ccc1 ddd1"},"d":["ggg ","iii jjj"]} +(1 row) + +select ts_headline('english', '{"a": "aaa bbb", "b": {"c": "ccc ddd fff", "c1": "ccc1 ddd1"}, "d": ["ggg hhh", "iii jjj"]}'::json, tsquery('bbb & ddd & hhh'), 'StartSel = <, StopSel = >'); + ts_headline +------------------------------------------------------------------------------------------ + {"a":"aaa ","b":{"c":"ccc fff","c1":"ccc1 ddd1"},"d":["ggg ","iii jjj"]} +(1 row) + +-- corner cases for ts_headline with json +select ts_headline('null'::json, tsquery('aaa & bbb')); + ts_headline +------------- + null +(1 row) + +select ts_headline('{}'::json, tsquery('aaa & bbb')); + ts_headline +------------- + {} +(1 row) + +select ts_headline('[]'::json, tsquery('aaa & bbb')); + ts_headline +------------- + [] +(1 row) + diff --git a/src/test/regress/expected/json_encoding.out b/src/test/regress/expected/json_encoding.out new file mode 100644 index 0000000..fe729db --- /dev/null +++ b/src/test/regress/expected/json_encoding.out @@ -0,0 +1,269 @@ +-- +-- encoding-sensitive tests for json and jsonb +-- +-- We provide expected-results files for UTF8 (json_encoding.out) +-- and for SQL_ASCII (json_encoding_1.out). Skip otherwise. +SELECT getdatabaseencoding() NOT IN ('UTF8', 'SQL_ASCII') + AS skip_test \gset +\if :skip_test +\quit +\endif +SELECT getdatabaseencoding(); -- just to label the results files + getdatabaseencoding +--------------------- + UTF8 +(1 row) + +-- first json +-- basic unicode input +SELECT '"\u"'::json; -- ERROR, incomplete escape +ERROR: invalid input syntax for type json +LINE 1: SELECT '"\u"'::json; + ^ +DETAIL: "\u" must be followed by four hexadecimal digits. +CONTEXT: JSON data, line 1: "\u" +SELECT '"\u00"'::json; -- ERROR, incomplete escape +ERROR: invalid input syntax for type json +LINE 1: SELECT '"\u00"'::json; + ^ +DETAIL: "\u" must be followed by four hexadecimal digits. +CONTEXT: JSON data, line 1: "\u00" +SELECT '"\u000g"'::json; -- ERROR, g is not a hex digit +ERROR: invalid input syntax for type json +LINE 1: SELECT '"\u000g"'::json; + ^ +DETAIL: "\u" must be followed by four hexadecimal digits. +CONTEXT: JSON data, line 1: "\u000g... +SELECT '"\u0000"'::json; -- OK, legal escape + json +---------- + "\u0000" +(1 row) + +SELECT '"\uaBcD"'::json; -- OK, uppercase and lower case both OK + json +---------- + "\uaBcD" +(1 row) + +-- handling of unicode surrogate pairs +select json '{ "a": "\ud83d\ude04\ud83d\udc36" }' -> 'a' as correct_in_utf8; + correct_in_utf8 +---------------------------- + "\ud83d\ude04\ud83d\udc36" +(1 row) + +select json '{ "a": "\ud83d\ud83d" }' -> 'a'; -- 2 high surrogates in a row +ERROR: invalid input syntax for type json +DETAIL: Unicode high surrogate must not follow a high surrogate. +CONTEXT: JSON data, line 1: { "a": "\ud83d\ud83d... +select json '{ "a": "\ude04\ud83d" }' -> 'a'; -- surrogates in wrong order +ERROR: invalid input syntax for type json +DETAIL: Unicode low surrogate must follow a high surrogate. +CONTEXT: JSON data, line 1: { "a": "\ude04... +select json '{ "a": "\ud83dX" }' -> 'a'; -- orphan high surrogate +ERROR: invalid input syntax for type json +DETAIL: Unicode low surrogate must follow a high surrogate. +CONTEXT: JSON data, line 1: { "a": "\ud83dX... +select json '{ "a": "\ude04X" }' -> 'a'; -- orphan low surrogate +ERROR: invalid input syntax for type json +DETAIL: Unicode low surrogate must follow a high surrogate. +CONTEXT: JSON data, line 1: { "a": "\ude04... +--handling of simple unicode escapes +select json '{ "a": "the Copyright \u00a9 sign" }' as correct_in_utf8; + correct_in_utf8 +--------------------------------------- + { "a": "the Copyright \u00a9 sign" } +(1 row) + +select json '{ "a": "dollar \u0024 character" }' as correct_everywhere; + correct_everywhere +------------------------------------- + { "a": "dollar \u0024 character" } +(1 row) + +select json '{ "a": "dollar \\u0024 character" }' as not_an_escape; + not_an_escape +-------------------------------------- + { "a": "dollar \\u0024 character" } +(1 row) + +select json '{ "a": "null \u0000 escape" }' as not_unescaped; + not_unescaped +-------------------------------- + { "a": "null \u0000 escape" } +(1 row) + +select json '{ "a": "null \\u0000 escape" }' as not_an_escape; + not_an_escape +--------------------------------- + { "a": "null \\u0000 escape" } +(1 row) + +select json '{ "a": "the Copyright \u00a9 sign" }' ->> 'a' as correct_in_utf8; + correct_in_utf8 +---------------------- + the Copyright © sign +(1 row) + +select json '{ "a": "dollar \u0024 character" }' ->> 'a' as correct_everywhere; + correct_everywhere +-------------------- + dollar $ character +(1 row) + +select json '{ "a": "dollar \\u0024 character" }' ->> 'a' as not_an_escape; + not_an_escape +------------------------- + dollar \u0024 character +(1 row) + +select json '{ "a": "null \u0000 escape" }' ->> 'a' as fails; +ERROR: unsupported Unicode escape sequence +DETAIL: \u0000 cannot be converted to text. +CONTEXT: JSON data, line 1: { "a": "null \u0000... +select json '{ "a": "null \\u0000 escape" }' ->> 'a' as not_an_escape; + not_an_escape +-------------------- + null \u0000 escape +(1 row) + +-- then jsonb +-- basic unicode input +SELECT '"\u"'::jsonb; -- ERROR, incomplete escape +ERROR: invalid input syntax for type json +LINE 1: SELECT '"\u"'::jsonb; + ^ +DETAIL: "\u" must be followed by four hexadecimal digits. +CONTEXT: JSON data, line 1: "\u" +SELECT '"\u00"'::jsonb; -- ERROR, incomplete escape +ERROR: invalid input syntax for type json +LINE 1: SELECT '"\u00"'::jsonb; + ^ +DETAIL: "\u" must be followed by four hexadecimal digits. +CONTEXT: JSON data, line 1: "\u00" +SELECT '"\u000g"'::jsonb; -- ERROR, g is not a hex digit +ERROR: invalid input syntax for type json +LINE 1: SELECT '"\u000g"'::jsonb; + ^ +DETAIL: "\u" must be followed by four hexadecimal digits. +CONTEXT: JSON data, line 1: "\u000g... +SELECT '"\u0045"'::jsonb; -- OK, legal escape + jsonb +------- + "E" +(1 row) + +SELECT '"\u0000"'::jsonb; -- ERROR, we don't support U+0000 +ERROR: unsupported Unicode escape sequence +LINE 1: SELECT '"\u0000"'::jsonb; + ^ +DETAIL: \u0000 cannot be converted to text. +CONTEXT: JSON data, line 1: "\u0000... +-- use octet_length here so we don't get an odd unicode char in the +-- output +SELECT octet_length('"\uaBcD"'::jsonb::text); -- OK, uppercase and lower case both OK + octet_length +-------------- + 5 +(1 row) + +-- handling of unicode surrogate pairs +SELECT octet_length((jsonb '{ "a": "\ud83d\ude04\ud83d\udc36" }' -> 'a')::text) AS correct_in_utf8; + correct_in_utf8 +----------------- + 10 +(1 row) + +SELECT jsonb '{ "a": "\ud83d\ud83d" }' -> 'a'; -- 2 high surrogates in a row +ERROR: invalid input syntax for type json +LINE 1: SELECT jsonb '{ "a": "\ud83d\ud83d" }' -> 'a'; + ^ +DETAIL: Unicode high surrogate must not follow a high surrogate. +CONTEXT: JSON data, line 1: { "a": "\ud83d\ud83d... +SELECT jsonb '{ "a": "\ude04\ud83d" }' -> 'a'; -- surrogates in wrong order +ERROR: invalid input syntax for type json +LINE 1: SELECT jsonb '{ "a": "\ude04\ud83d" }' -> 'a'; + ^ +DETAIL: Unicode low surrogate must follow a high surrogate. +CONTEXT: JSON data, line 1: { "a": "\ude04... +SELECT jsonb '{ "a": "\ud83dX" }' -> 'a'; -- orphan high surrogate +ERROR: invalid input syntax for type json +LINE 1: SELECT jsonb '{ "a": "\ud83dX" }' -> 'a'; + ^ +DETAIL: Unicode low surrogate must follow a high surrogate. +CONTEXT: JSON data, line 1: { "a": "\ud83dX... +SELECT jsonb '{ "a": "\ude04X" }' -> 'a'; -- orphan low surrogate +ERROR: invalid input syntax for type json +LINE 1: SELECT jsonb '{ "a": "\ude04X" }' -> 'a'; + ^ +DETAIL: Unicode low surrogate must follow a high surrogate. +CONTEXT: JSON data, line 1: { "a": "\ude04... +-- handling of simple unicode escapes +SELECT jsonb '{ "a": "the Copyright \u00a9 sign" }' as correct_in_utf8; + correct_in_utf8 +------------------------------- + {"a": "the Copyright © sign"} +(1 row) + +SELECT jsonb '{ "a": "dollar \u0024 character" }' as correct_everywhere; + correct_everywhere +----------------------------- + {"a": "dollar $ character"} +(1 row) + +SELECT jsonb '{ "a": "dollar \\u0024 character" }' as not_an_escape; + not_an_escape +----------------------------------- + {"a": "dollar \\u0024 character"} +(1 row) + +SELECT jsonb '{ "a": "null \u0000 escape" }' as fails; +ERROR: unsupported Unicode escape sequence +LINE 1: SELECT jsonb '{ "a": "null \u0000 escape" }' as fails; + ^ +DETAIL: \u0000 cannot be converted to text. +CONTEXT: JSON data, line 1: { "a": "null \u0000... +SELECT jsonb '{ "a": "null \\u0000 escape" }' as not_an_escape; + not_an_escape +------------------------------ + {"a": "null \\u0000 escape"} +(1 row) + +SELECT jsonb '{ "a": "the Copyright \u00a9 sign" }' ->> 'a' as correct_in_utf8; + correct_in_utf8 +---------------------- + the Copyright © sign +(1 row) + +SELECT jsonb '{ "a": "dollar \u0024 character" }' ->> 'a' as correct_everywhere; + correct_everywhere +-------------------- + dollar $ character +(1 row) + +SELECT jsonb '{ "a": "dollar \\u0024 character" }' ->> 'a' as not_an_escape; + not_an_escape +------------------------- + dollar \u0024 character +(1 row) + +SELECT jsonb '{ "a": "null \u0000 escape" }' ->> 'a' as fails; +ERROR: unsupported Unicode escape sequence +LINE 1: SELECT jsonb '{ "a": "null \u0000 escape" }' ->> 'a' as fai... + ^ +DETAIL: \u0000 cannot be converted to text. +CONTEXT: JSON data, line 1: { "a": "null \u0000... +SELECT jsonb '{ "a": "null \\u0000 escape" }' ->> 'a' as not_an_escape; + not_an_escape +-------------------- + null \u0000 escape +(1 row) + +-- soft error for input-time failure +select * from pg_input_error_info('{ "a": "\ud83d\ude04\ud83d\udc36" }', 'jsonb'); + message | detail | hint | sql_error_code +---------+--------+------+---------------- + | | | +(1 row) + diff --git a/src/test/regress/expected/json_encoding_1.out b/src/test/regress/expected/json_encoding_1.out new file mode 100644 index 0000000..5c8d91a --- /dev/null +++ b/src/test/regress/expected/json_encoding_1.out @@ -0,0 +1,265 @@ +-- +-- encoding-sensitive tests for json and jsonb +-- +-- We provide expected-results files for UTF8 (json_encoding.out) +-- and for SQL_ASCII (json_encoding_1.out). Skip otherwise. +SELECT getdatabaseencoding() NOT IN ('UTF8', 'SQL_ASCII') + AS skip_test \gset +\if :skip_test +\quit +\endif +SELECT getdatabaseencoding(); -- just to label the results files + getdatabaseencoding +--------------------- + SQL_ASCII +(1 row) + +-- first json +-- basic unicode input +SELECT '"\u"'::json; -- ERROR, incomplete escape +ERROR: invalid input syntax for type json +LINE 1: SELECT '"\u"'::json; + ^ +DETAIL: "\u" must be followed by four hexadecimal digits. +CONTEXT: JSON data, line 1: "\u" +SELECT '"\u00"'::json; -- ERROR, incomplete escape +ERROR: invalid input syntax for type json +LINE 1: SELECT '"\u00"'::json; + ^ +DETAIL: "\u" must be followed by four hexadecimal digits. +CONTEXT: JSON data, line 1: "\u00" +SELECT '"\u000g"'::json; -- ERROR, g is not a hex digit +ERROR: invalid input syntax for type json +LINE 1: SELECT '"\u000g"'::json; + ^ +DETAIL: "\u" must be followed by four hexadecimal digits. +CONTEXT: JSON data, line 1: "\u000g... +SELECT '"\u0000"'::json; -- OK, legal escape + json +---------- + "\u0000" +(1 row) + +SELECT '"\uaBcD"'::json; -- OK, uppercase and lower case both OK + json +---------- + "\uaBcD" +(1 row) + +-- handling of unicode surrogate pairs +select json '{ "a": "\ud83d\ude04\ud83d\udc36" }' -> 'a' as correct_in_utf8; +ERROR: unsupported Unicode escape sequence +DETAIL: Unicode escape value could not be translated to the server's encoding SQL_ASCII. +CONTEXT: JSON data, line 1: { "a": "\ud83d\ude04... +select json '{ "a": "\ud83d\ud83d" }' -> 'a'; -- 2 high surrogates in a row +ERROR: invalid input syntax for type json +DETAIL: Unicode high surrogate must not follow a high surrogate. +CONTEXT: JSON data, line 1: { "a": "\ud83d\ud83d... +select json '{ "a": "\ude04\ud83d" }' -> 'a'; -- surrogates in wrong order +ERROR: invalid input syntax for type json +DETAIL: Unicode low surrogate must follow a high surrogate. +CONTEXT: JSON data, line 1: { "a": "\ude04... +select json '{ "a": "\ud83dX" }' -> 'a'; -- orphan high surrogate +ERROR: invalid input syntax for type json +DETAIL: Unicode low surrogate must follow a high surrogate. +CONTEXT: JSON data, line 1: { "a": "\ud83dX... +select json '{ "a": "\ude04X" }' -> 'a'; -- orphan low surrogate +ERROR: invalid input syntax for type json +DETAIL: Unicode low surrogate must follow a high surrogate. +CONTEXT: JSON data, line 1: { "a": "\ude04... +--handling of simple unicode escapes +select json '{ "a": "the Copyright \u00a9 sign" }' as correct_in_utf8; + correct_in_utf8 +--------------------------------------- + { "a": "the Copyright \u00a9 sign" } +(1 row) + +select json '{ "a": "dollar \u0024 character" }' as correct_everywhere; + correct_everywhere +------------------------------------- + { "a": "dollar \u0024 character" } +(1 row) + +select json '{ "a": "dollar \\u0024 character" }' as not_an_escape; + not_an_escape +-------------------------------------- + { "a": "dollar \\u0024 character" } +(1 row) + +select json '{ "a": "null \u0000 escape" }' as not_unescaped; + not_unescaped +-------------------------------- + { "a": "null \u0000 escape" } +(1 row) + +select json '{ "a": "null \\u0000 escape" }' as not_an_escape; + not_an_escape +--------------------------------- + { "a": "null \\u0000 escape" } +(1 row) + +select json '{ "a": "the Copyright \u00a9 sign" }' ->> 'a' as correct_in_utf8; +ERROR: unsupported Unicode escape sequence +DETAIL: Unicode escape value could not be translated to the server's encoding SQL_ASCII. +CONTEXT: JSON data, line 1: { "a": "the Copyright \u00a9... +select json '{ "a": "dollar \u0024 character" }' ->> 'a' as correct_everywhere; + correct_everywhere +-------------------- + dollar $ character +(1 row) + +select json '{ "a": "dollar \\u0024 character" }' ->> 'a' as not_an_escape; + not_an_escape +------------------------- + dollar \u0024 character +(1 row) + +select json '{ "a": "null \u0000 escape" }' ->> 'a' as fails; +ERROR: unsupported Unicode escape sequence +DETAIL: \u0000 cannot be converted to text. +CONTEXT: JSON data, line 1: { "a": "null \u0000... +select json '{ "a": "null \\u0000 escape" }' ->> 'a' as not_an_escape; + not_an_escape +-------------------- + null \u0000 escape +(1 row) + +-- then jsonb +-- basic unicode input +SELECT '"\u"'::jsonb; -- ERROR, incomplete escape +ERROR: invalid input syntax for type json +LINE 1: SELECT '"\u"'::jsonb; + ^ +DETAIL: "\u" must be followed by four hexadecimal digits. +CONTEXT: JSON data, line 1: "\u" +SELECT '"\u00"'::jsonb; -- ERROR, incomplete escape +ERROR: invalid input syntax for type json +LINE 1: SELECT '"\u00"'::jsonb; + ^ +DETAIL: "\u" must be followed by four hexadecimal digits. +CONTEXT: JSON data, line 1: "\u00" +SELECT '"\u000g"'::jsonb; -- ERROR, g is not a hex digit +ERROR: invalid input syntax for type json +LINE 1: SELECT '"\u000g"'::jsonb; + ^ +DETAIL: "\u" must be followed by four hexadecimal digits. +CONTEXT: JSON data, line 1: "\u000g... +SELECT '"\u0045"'::jsonb; -- OK, legal escape + jsonb +------- + "E" +(1 row) + +SELECT '"\u0000"'::jsonb; -- ERROR, we don't support U+0000 +ERROR: unsupported Unicode escape sequence +LINE 1: SELECT '"\u0000"'::jsonb; + ^ +DETAIL: \u0000 cannot be converted to text. +CONTEXT: JSON data, line 1: "\u0000... +-- use octet_length here so we don't get an odd unicode char in the +-- output +SELECT octet_length('"\uaBcD"'::jsonb::text); -- OK, uppercase and lower case both OK +ERROR: unsupported Unicode escape sequence +LINE 1: SELECT octet_length('"\uaBcD"'::jsonb::text); + ^ +DETAIL: Unicode escape value could not be translated to the server's encoding SQL_ASCII. +CONTEXT: JSON data, line 1: "\uaBcD... +-- handling of unicode surrogate pairs +SELECT octet_length((jsonb '{ "a": "\ud83d\ude04\ud83d\udc36" }' -> 'a')::text) AS correct_in_utf8; +ERROR: unsupported Unicode escape sequence +LINE 1: SELECT octet_length((jsonb '{ "a": "\ud83d\ude04\ud83d\udc3... + ^ +DETAIL: Unicode escape value could not be translated to the server's encoding SQL_ASCII. +CONTEXT: JSON data, line 1: { "a": "\ud83d\ude04... +SELECT jsonb '{ "a": "\ud83d\ud83d" }' -> 'a'; -- 2 high surrogates in a row +ERROR: invalid input syntax for type json +LINE 1: SELECT jsonb '{ "a": "\ud83d\ud83d" }' -> 'a'; + ^ +DETAIL: Unicode high surrogate must not follow a high surrogate. +CONTEXT: JSON data, line 1: { "a": "\ud83d\ud83d... +SELECT jsonb '{ "a": "\ude04\ud83d" }' -> 'a'; -- surrogates in wrong order +ERROR: invalid input syntax for type json +LINE 1: SELECT jsonb '{ "a": "\ude04\ud83d" }' -> 'a'; + ^ +DETAIL: Unicode low surrogate must follow a high surrogate. +CONTEXT: JSON data, line 1: { "a": "\ude04... +SELECT jsonb '{ "a": "\ud83dX" }' -> 'a'; -- orphan high surrogate +ERROR: invalid input syntax for type json +LINE 1: SELECT jsonb '{ "a": "\ud83dX" }' -> 'a'; + ^ +DETAIL: Unicode low surrogate must follow a high surrogate. +CONTEXT: JSON data, line 1: { "a": "\ud83dX... +SELECT jsonb '{ "a": "\ude04X" }' -> 'a'; -- orphan low surrogate +ERROR: invalid input syntax for type json +LINE 1: SELECT jsonb '{ "a": "\ude04X" }' -> 'a'; + ^ +DETAIL: Unicode low surrogate must follow a high surrogate. +CONTEXT: JSON data, line 1: { "a": "\ude04... +-- handling of simple unicode escapes +SELECT jsonb '{ "a": "the Copyright \u00a9 sign" }' as correct_in_utf8; +ERROR: unsupported Unicode escape sequence +LINE 1: SELECT jsonb '{ "a": "the Copyright \u00a9 sign" }' as corr... + ^ +DETAIL: Unicode escape value could not be translated to the server's encoding SQL_ASCII. +CONTEXT: JSON data, line 1: { "a": "the Copyright \u00a9... +SELECT jsonb '{ "a": "dollar \u0024 character" }' as correct_everywhere; + correct_everywhere +----------------------------- + {"a": "dollar $ character"} +(1 row) + +SELECT jsonb '{ "a": "dollar \\u0024 character" }' as not_an_escape; + not_an_escape +----------------------------------- + {"a": "dollar \\u0024 character"} +(1 row) + +SELECT jsonb '{ "a": "null \u0000 escape" }' as fails; +ERROR: unsupported Unicode escape sequence +LINE 1: SELECT jsonb '{ "a": "null \u0000 escape" }' as fails; + ^ +DETAIL: \u0000 cannot be converted to text. +CONTEXT: JSON data, line 1: { "a": "null \u0000... +SELECT jsonb '{ "a": "null \\u0000 escape" }' as not_an_escape; + not_an_escape +------------------------------ + {"a": "null \\u0000 escape"} +(1 row) + +SELECT jsonb '{ "a": "the Copyright \u00a9 sign" }' ->> 'a' as correct_in_utf8; +ERROR: unsupported Unicode escape sequence +LINE 1: SELECT jsonb '{ "a": "the Copyright \u00a9 sign" }' ->> 'a'... + ^ +DETAIL: Unicode escape value could not be translated to the server's encoding SQL_ASCII. +CONTEXT: JSON data, line 1: { "a": "the Copyright \u00a9... +SELECT jsonb '{ "a": "dollar \u0024 character" }' ->> 'a' as correct_everywhere; + correct_everywhere +-------------------- + dollar $ character +(1 row) + +SELECT jsonb '{ "a": "dollar \\u0024 character" }' ->> 'a' as not_an_escape; + not_an_escape +------------------------- + dollar \u0024 character +(1 row) + +SELECT jsonb '{ "a": "null \u0000 escape" }' ->> 'a' as fails; +ERROR: unsupported Unicode escape sequence +LINE 1: SELECT jsonb '{ "a": "null \u0000 escape" }' ->> 'a' as fai... + ^ +DETAIL: \u0000 cannot be converted to text. +CONTEXT: JSON data, line 1: { "a": "null \u0000... +SELECT jsonb '{ "a": "null \\u0000 escape" }' ->> 'a' as not_an_escape; + not_an_escape +-------------------- + null \u0000 escape +(1 row) + +-- soft error for input-time failure +select * from pg_input_error_info('{ "a": "\ud83d\ude04\ud83d\udc36" }', 'jsonb'); + message | detail | hint | sql_error_code +-------------------------------------+----------------------------------------------------------------------------------+------+---------------- + unsupported Unicode escape sequence | Unicode escape value could not be translated to the server's encoding SQL_ASCII. | | 22P05 +(1 row) + diff --git a/src/test/regress/expected/json_encoding_2.out b/src/test/regress/expected/json_encoding_2.out new file mode 100644 index 0000000..4fc8f02 --- /dev/null +++ b/src/test/regress/expected/json_encoding_2.out @@ -0,0 +1,9 @@ +-- +-- encoding-sensitive tests for json and jsonb +-- +-- We provide expected-results files for UTF8 (json_encoding.out) +-- and for SQL_ASCII (json_encoding_1.out). Skip otherwise. +SELECT getdatabaseencoding() NOT IN ('UTF8', 'SQL_ASCII') + AS skip_test \gset +\if :skip_test +\quit diff --git a/src/test/regress/expected/jsonb.out b/src/test/regress/expected/jsonb.out new file mode 100644 index 0000000..f8a7dac --- /dev/null +++ b/src/test/regress/expected/jsonb.out @@ -0,0 +1,5584 @@ +-- directory paths are passed to us in environment variables +\getenv abs_srcdir PG_ABS_SRCDIR +CREATE TABLE testjsonb ( + j jsonb +); +\set filename :abs_srcdir '/data/jsonb.data' +COPY testjsonb FROM :'filename'; +-- Strings. +SELECT '""'::jsonb; -- OK. + jsonb +------- + "" +(1 row) + +SELECT $$''$$::jsonb; -- ERROR, single quotes are not allowed +ERROR: invalid input syntax for type json +LINE 1: SELECT $$''$$::jsonb; + ^ +DETAIL: Token "'" is invalid. +CONTEXT: JSON data, line 1: '... +SELECT '"abc"'::jsonb; -- OK + jsonb +------- + "abc" +(1 row) + +SELECT '"abc'::jsonb; -- ERROR, quotes not closed +ERROR: invalid input syntax for type json +LINE 1: SELECT '"abc'::jsonb; + ^ +DETAIL: Token ""abc" is invalid. +CONTEXT: JSON data, line 1: "abc +SELECT '"abc +def"'::jsonb; -- ERROR, unescaped newline in string constant +ERROR: invalid input syntax for type json +LINE 1: SELECT '"abc + ^ +DETAIL: Character with value 0x0a must be escaped. +CONTEXT: JSON data, line 1: "abc +SELECT '"\n\"\\"'::jsonb; -- OK, legal escapes + jsonb +---------- + "\n\"\\" +(1 row) + +SELECT '"\v"'::jsonb; -- ERROR, not a valid JSON escape +ERROR: invalid input syntax for type json +LINE 1: SELECT '"\v"'::jsonb; + ^ +DETAIL: Escape sequence "\v" is invalid. +CONTEXT: JSON data, line 1: "\v... +-- see json_encoding test for input with unicode escapes +-- Numbers. +SELECT '1'::jsonb; -- OK + jsonb +------- + 1 +(1 row) + +SELECT '0'::jsonb; -- OK + jsonb +------- + 0 +(1 row) + +SELECT '01'::jsonb; -- ERROR, not valid according to JSON spec +ERROR: invalid input syntax for type json +LINE 1: SELECT '01'::jsonb; + ^ +DETAIL: Token "01" is invalid. +CONTEXT: JSON data, line 1: 01 +SELECT '0.1'::jsonb; -- OK + jsonb +------- + 0.1 +(1 row) + +SELECT '9223372036854775808'::jsonb; -- OK, even though it's too large for int8 + jsonb +--------------------- + 9223372036854775808 +(1 row) + +SELECT '1e100'::jsonb; -- OK + jsonb +------------------------------------------------------------------------------------------------------- + 10000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000 +(1 row) + +SELECT '1.3e100'::jsonb; -- OK + jsonb +------------------------------------------------------------------------------------------------------- + 13000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000 +(1 row) + +SELECT '1f2'::jsonb; -- ERROR +ERROR: invalid input syntax for type json +LINE 1: SELECT '1f2'::jsonb; + ^ +DETAIL: Token "1f2" is invalid. +CONTEXT: JSON data, line 1: 1f2 +SELECT '0.x1'::jsonb; -- ERROR +ERROR: invalid input syntax for type json +LINE 1: SELECT '0.x1'::jsonb; + ^ +DETAIL: Token "0.x1" is invalid. +CONTEXT: JSON data, line 1: 0.x1 +SELECT '1.3ex100'::jsonb; -- ERROR +ERROR: invalid input syntax for type json +LINE 1: SELECT '1.3ex100'::jsonb; + ^ +DETAIL: Token "1.3ex100" is invalid. +CONTEXT: JSON data, line 1: 1.3ex100 +-- Arrays. +SELECT '[]'::jsonb; -- OK + jsonb +------- + [] +(1 row) + +SELECT '[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]'::jsonb; -- OK + jsonb +---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + [[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]] +(1 row) + +SELECT '[1,2]'::jsonb; -- OK + jsonb +-------- + [1, 2] +(1 row) + +SELECT '[1,2,]'::jsonb; -- ERROR, trailing comma +ERROR: invalid input syntax for type json +LINE 1: SELECT '[1,2,]'::jsonb; + ^ +DETAIL: Expected JSON value, but found "]". +CONTEXT: JSON data, line 1: [1,2,] +SELECT '[1,2'::jsonb; -- ERROR, no closing bracket +ERROR: invalid input syntax for type json +LINE 1: SELECT '[1,2'::jsonb; + ^ +DETAIL: The input string ended unexpectedly. +CONTEXT: JSON data, line 1: [1,2 +SELECT '[1,[2]'::jsonb; -- ERROR, no closing bracket +ERROR: invalid input syntax for type json +LINE 1: SELECT '[1,[2]'::jsonb; + ^ +DETAIL: The input string ended unexpectedly. +CONTEXT: JSON data, line 1: [1,[2] +-- Objects. +SELECT '{}'::jsonb; -- OK + jsonb +------- + {} +(1 row) + +SELECT '{"abc"}'::jsonb; -- ERROR, no value +ERROR: invalid input syntax for type json +LINE 1: SELECT '{"abc"}'::jsonb; + ^ +DETAIL: Expected ":", but found "}". +CONTEXT: JSON data, line 1: {"abc"} +SELECT '{"abc":1}'::jsonb; -- OK + jsonb +------------ + {"abc": 1} +(1 row) + +SELECT '{1:"abc"}'::jsonb; -- ERROR, keys must be strings +ERROR: invalid input syntax for type json +LINE 1: SELECT '{1:"abc"}'::jsonb; + ^ +DETAIL: Expected string or "}", but found "1". +CONTEXT: JSON data, line 1: {1... +SELECT '{"abc",1}'::jsonb; -- ERROR, wrong separator +ERROR: invalid input syntax for type json +LINE 1: SELECT '{"abc",1}'::jsonb; + ^ +DETAIL: Expected ":", but found ",". +CONTEXT: JSON data, line 1: {"abc",... +SELECT '{"abc"=1}'::jsonb; -- ERROR, totally wrong separator +ERROR: invalid input syntax for type json +LINE 1: SELECT '{"abc"=1}'::jsonb; + ^ +DETAIL: Token "=" is invalid. +CONTEXT: JSON data, line 1: {"abc"=... +SELECT '{"abc"::1}'::jsonb; -- ERROR, another wrong separator +ERROR: invalid input syntax for type json +LINE 1: SELECT '{"abc"::1}'::jsonb; + ^ +DETAIL: Expected JSON value, but found ":". +CONTEXT: JSON data, line 1: {"abc"::... +SELECT '{"abc":1,"def":2,"ghi":[3,4],"hij":{"klm":5,"nop":[6]}}'::jsonb; -- OK + jsonb +-------------------------------------------------------------------- + {"abc": 1, "def": 2, "ghi": [3, 4], "hij": {"klm": 5, "nop": [6]}} +(1 row) + +SELECT '{"abc":1:2}'::jsonb; -- ERROR, colon in wrong spot +ERROR: invalid input syntax for type json +LINE 1: SELECT '{"abc":1:2}'::jsonb; + ^ +DETAIL: Expected "," or "}", but found ":". +CONTEXT: JSON data, line 1: {"abc":1:... +SELECT '{"abc":1,3}'::jsonb; -- ERROR, no value +ERROR: invalid input syntax for type json +LINE 1: SELECT '{"abc":1,3}'::jsonb; + ^ +DETAIL: Expected string, but found "3". +CONTEXT: JSON data, line 1: {"abc":1,3... +-- Recursion. +SET max_stack_depth = '100kB'; +SELECT repeat('[', 10000)::jsonb; +ERROR: stack depth limit exceeded +HINT: Increase the configuration parameter "max_stack_depth" (currently 100kB), after ensuring the platform's stack depth limit is adequate. +SELECT repeat('{"a":', 10000)::jsonb; +ERROR: stack depth limit exceeded +HINT: Increase the configuration parameter "max_stack_depth" (currently 100kB), after ensuring the platform's stack depth limit is adequate. +RESET max_stack_depth; +-- Miscellaneous stuff. +SELECT 'true'::jsonb; -- OK + jsonb +------- + true +(1 row) + +SELECT 'false'::jsonb; -- OK + jsonb +------- + false +(1 row) + +SELECT 'null'::jsonb; -- OK + jsonb +------- + null +(1 row) + +SELECT ' true '::jsonb; -- OK, even with extra whitespace + jsonb +------- + true +(1 row) + +SELECT 'true false'::jsonb; -- ERROR, too many values +ERROR: invalid input syntax for type json +LINE 1: SELECT 'true false'::jsonb; + ^ +DETAIL: Expected end of input, but found "false". +CONTEXT: JSON data, line 1: true false +SELECT 'true, false'::jsonb; -- ERROR, too many values +ERROR: invalid input syntax for type json +LINE 1: SELECT 'true, false'::jsonb; + ^ +DETAIL: Expected end of input, but found ",". +CONTEXT: JSON data, line 1: true,... +SELECT 'truf'::jsonb; -- ERROR, not a keyword +ERROR: invalid input syntax for type json +LINE 1: SELECT 'truf'::jsonb; + ^ +DETAIL: Token "truf" is invalid. +CONTEXT: JSON data, line 1: truf +SELECT 'trues'::jsonb; -- ERROR, not a keyword +ERROR: invalid input syntax for type json +LINE 1: SELECT 'trues'::jsonb; + ^ +DETAIL: Token "trues" is invalid. +CONTEXT: JSON data, line 1: trues +SELECT ''::jsonb; -- ERROR, no value +ERROR: invalid input syntax for type json +LINE 1: SELECT ''::jsonb; + ^ +DETAIL: The input string ended unexpectedly. +CONTEXT: JSON data, line 1: +SELECT ' '::jsonb; -- ERROR, no value +ERROR: invalid input syntax for type json +LINE 1: SELECT ' '::jsonb; + ^ +DETAIL: The input string ended unexpectedly. +CONTEXT: JSON data, line 1: +-- Multi-line JSON input to check ERROR reporting +SELECT '{ + "one": 1, + "two":"two", + "three": + true}'::jsonb; -- OK + jsonb +----------------------------------------- + {"one": 1, "two": "two", "three": true} +(1 row) + +SELECT '{ + "one": 1, + "two":,"two", -- ERROR extraneous comma before field "two" + "three": + true}'::jsonb; +ERROR: invalid input syntax for type json +LINE 1: SELECT '{ + ^ +DETAIL: Expected JSON value, but found ",". +CONTEXT: JSON data, line 3: "two":,... +SELECT '{ + "one": 1, + "two":"two", + "averyveryveryveryveryveryveryveryveryverylongfieldname":}'::jsonb; +ERROR: invalid input syntax for type json +LINE 1: SELECT '{ + ^ +DETAIL: Expected JSON value, but found "}". +CONTEXT: JSON data, line 4: ...yveryveryveryveryveryveryveryverylongfieldname":} +-- ERROR missing value for last field +-- test non-error-throwing input +select pg_input_is_valid('{"a":true}', 'jsonb'); + pg_input_is_valid +------------------- + t +(1 row) + +select pg_input_is_valid('{"a":true', 'jsonb'); + pg_input_is_valid +------------------- + f +(1 row) + +select * from pg_input_error_info('{"a":true', 'jsonb'); + message | detail | hint | sql_error_code +------------------------------------+--------------------------------------+------+---------------- + invalid input syntax for type json | The input string ended unexpectedly. | | 22P02 +(1 row) + +select * from pg_input_error_info('{"a":1e1000000}', 'jsonb'); + message | detail | hint | sql_error_code +--------------------------------+--------+------+---------------- + value overflows numeric format | | | 22003 +(1 row) + +-- make sure jsonb is passed through json generators without being escaped +SELECT array_to_json(ARRAY [jsonb '{"a":1}', jsonb '{"b":[2,3]}']); + array_to_json +-------------------------- + [{"a": 1},{"b": [2, 3]}] +(1 row) + +-- anyarray column +CREATE TEMP TABLE rows AS +SELECT x, 'txt' || x as y +FROM generate_series(1,3) AS x; +analyze rows; +select attname, to_jsonb(histogram_bounds) histogram_bounds +from pg_stats +where tablename = 'rows' and + schemaname = pg_my_temp_schema()::regnamespace::text +order by 1; + attname | histogram_bounds +---------+-------------------------- + x | [1, 2, 3] + y | ["txt1", "txt2", "txt3"] +(2 rows) + +-- to_jsonb, timestamps +select to_jsonb(timestamp '2014-05-28 12:22:35.614298'); + to_jsonb +------------------------------ + "2014-05-28T12:22:35.614298" +(1 row) + +BEGIN; +SET LOCAL TIME ZONE 10.5; +select to_jsonb(timestamptz '2014-05-28 12:22:35.614298-04'); + to_jsonb +------------------------------------ + "2014-05-29T02:52:35.614298+10:30" +(1 row) + +SET LOCAL TIME ZONE -8; +select to_jsonb(timestamptz '2014-05-28 12:22:35.614298-04'); + to_jsonb +------------------------------------ + "2014-05-28T08:22:35.614298-08:00" +(1 row) + +COMMIT; +select to_jsonb(date '2014-05-28'); + to_jsonb +-------------- + "2014-05-28" +(1 row) + +select to_jsonb(date 'Infinity'); + to_jsonb +------------ + "infinity" +(1 row) + +select to_jsonb(date '-Infinity'); + to_jsonb +------------- + "-infinity" +(1 row) + +select to_jsonb(timestamp 'Infinity'); + to_jsonb +------------ + "infinity" +(1 row) + +select to_jsonb(timestamp '-Infinity'); + to_jsonb +------------- + "-infinity" +(1 row) + +select to_jsonb(timestamptz 'Infinity'); + to_jsonb +------------ + "infinity" +(1 row) + +select to_jsonb(timestamptz '-Infinity'); + to_jsonb +------------- + "-infinity" +(1 row) + +--jsonb_agg +SELECT jsonb_agg(q) + FROM ( SELECT $$a$$ || x AS b, y AS c, + ARRAY[ROW(x.*,ARRAY[1,2,3]), + ROW(y.*,ARRAY[4,5,6])] AS z + FROM generate_series(1,2) x, + generate_series(4,5) y) q; + jsonb_agg +-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + [{"b": "a1", "c": 4, "z": [{"f1": 1, "f2": [1, 2, 3]}, {"f1": 4, "f2": [4, 5, 6]}]}, {"b": "a1", "c": 5, "z": [{"f1": 1, "f2": [1, 2, 3]}, {"f1": 5, "f2": [4, 5, 6]}]}, {"b": "a2", "c": 4, "z": [{"f1": 2, "f2": [1, 2, 3]}, {"f1": 4, "f2": [4, 5, 6]}]}, {"b": "a2", "c": 5, "z": [{"f1": 2, "f2": [1, 2, 3]}, {"f1": 5, "f2": [4, 5, 6]}]}] +(1 row) + +SELECT jsonb_agg(q ORDER BY x, y) + FROM rows q; + jsonb_agg +----------------------------------------------------------------------- + [{"x": 1, "y": "txt1"}, {"x": 2, "y": "txt2"}, {"x": 3, "y": "txt3"}] +(1 row) + +UPDATE rows SET x = NULL WHERE x = 1; +SELECT jsonb_agg(q ORDER BY x NULLS FIRST, y) + FROM rows q; + jsonb_agg +-------------------------------------------------------------------------- + [{"x": null, "y": "txt1"}, {"x": 2, "y": "txt2"}, {"x": 3, "y": "txt3"}] +(1 row) + +-- jsonb extraction functions +CREATE TEMP TABLE test_jsonb ( + json_type text, + test_json jsonb +); +INSERT INTO test_jsonb VALUES +('scalar','"a scalar"'), +('array','["zero", "one","two",null,"four","five", [1,2,3],{"f1":9}]'), +('object','{"field1":"val1","field2":"val2","field3":null, "field4": 4, "field5": [1,2,3], "field6": {"f1":9}}'); +SELECT test_json -> 'x' FROM test_jsonb WHERE json_type = 'scalar'; + ?column? +---------- + +(1 row) + +SELECT test_json -> 'x' FROM test_jsonb WHERE json_type = 'array'; + ?column? +---------- + +(1 row) + +SELECT test_json -> 'x' FROM test_jsonb WHERE json_type = 'object'; + ?column? +---------- + +(1 row) + +SELECT test_json -> 'field2' FROM test_jsonb WHERE json_type = 'object'; + ?column? +---------- + "val2" +(1 row) + +SELECT test_json ->> 'field2' FROM test_jsonb WHERE json_type = 'scalar'; + ?column? +---------- + +(1 row) + +SELECT test_json ->> 'field2' FROM test_jsonb WHERE json_type = 'array'; + ?column? +---------- + +(1 row) + +SELECT test_json ->> 'field2' FROM test_jsonb WHERE json_type = 'object'; + ?column? +---------- + val2 +(1 row) + +SELECT test_json -> 2 FROM test_jsonb WHERE json_type = 'scalar'; + ?column? +---------- + +(1 row) + +SELECT test_json -> 2 FROM test_jsonb WHERE json_type = 'array'; + ?column? +---------- + "two" +(1 row) + +SELECT test_json -> 9 FROM test_jsonb WHERE json_type = 'array'; + ?column? +---------- + +(1 row) + +SELECT test_json -> 2 FROM test_jsonb WHERE json_type = 'object'; + ?column? +---------- + +(1 row) + +SELECT test_json ->> 6 FROM test_jsonb WHERE json_type = 'array'; + ?column? +----------- + [1, 2, 3] +(1 row) + +SELECT test_json ->> 7 FROM test_jsonb WHERE json_type = 'array'; + ?column? +----------- + {"f1": 9} +(1 row) + +SELECT test_json ->> 'field4' FROM test_jsonb WHERE json_type = 'object'; + ?column? +---------- + 4 +(1 row) + +SELECT test_json ->> 'field5' FROM test_jsonb WHERE json_type = 'object'; + ?column? +----------- + [1, 2, 3] +(1 row) + +SELECT test_json ->> 'field6' FROM test_jsonb WHERE json_type = 'object'; + ?column? +----------- + {"f1": 9} +(1 row) + +SELECT test_json ->> 2 FROM test_jsonb WHERE json_type = 'scalar'; + ?column? +---------- + +(1 row) + +SELECT test_json ->> 2 FROM test_jsonb WHERE json_type = 'array'; + ?column? +---------- + two +(1 row) + +SELECT test_json ->> 2 FROM test_jsonb WHERE json_type = 'object'; + ?column? +---------- + +(1 row) + +SELECT jsonb_object_keys(test_json) FROM test_jsonb WHERE json_type = 'scalar'; +ERROR: cannot call jsonb_object_keys on a scalar +SELECT jsonb_object_keys(test_json) FROM test_jsonb WHERE json_type = 'array'; +ERROR: cannot call jsonb_object_keys on an array +SELECT jsonb_object_keys(test_json) FROM test_jsonb WHERE json_type = 'object'; + jsonb_object_keys +------------------- + field1 + field2 + field3 + field4 + field5 + field6 +(6 rows) + +-- nulls +SELECT (test_json->'field3') IS NULL AS expect_false FROM test_jsonb WHERE json_type = 'object'; + expect_false +-------------- + f +(1 row) + +SELECT (test_json->>'field3') IS NULL AS expect_true FROM test_jsonb WHERE json_type = 'object'; + expect_true +------------- + t +(1 row) + +SELECT (test_json->3) IS NULL AS expect_false FROM test_jsonb WHERE json_type = 'array'; + expect_false +-------------- + f +(1 row) + +SELECT (test_json->>3) IS NULL AS expect_true FROM test_jsonb WHERE json_type = 'array'; + expect_true +------------- + t +(1 row) + +-- corner cases +select '{"a": [{"b": "c"}, {"b": "cc"}]}'::jsonb -> null::text; + ?column? +---------- + +(1 row) + +select '{"a": [{"b": "c"}, {"b": "cc"}]}'::jsonb -> null::int; + ?column? +---------- + +(1 row) + +select '{"a": [{"b": "c"}, {"b": "cc"}]}'::jsonb -> 1; + ?column? +---------- + +(1 row) + +select '{"a": [{"b": "c"}, {"b": "cc"}]}'::jsonb -> 'z'; + ?column? +---------- + +(1 row) + +select '{"a": [{"b": "c"}, {"b": "cc"}]}'::jsonb -> ''; + ?column? +---------- + +(1 row) + +select '[{"b": "c"}, {"b": "cc"}]'::jsonb -> 1; + ?column? +------------- + {"b": "cc"} +(1 row) + +select '[{"b": "c"}, {"b": "cc"}]'::jsonb -> 3; + ?column? +---------- + +(1 row) + +select '[{"b": "c"}, {"b": "cc"}]'::jsonb -> 'z'; + ?column? +---------- + +(1 row) + +select '{"a": "c", "b": null}'::jsonb -> 'b'; + ?column? +---------- + null +(1 row) + +select '"foo"'::jsonb -> 1; + ?column? +---------- + +(1 row) + +select '"foo"'::jsonb -> 'z'; + ?column? +---------- + +(1 row) + +select '{"a": [{"b": "c"}, {"b": "cc"}]}'::jsonb ->> null::text; + ?column? +---------- + +(1 row) + +select '{"a": [{"b": "c"}, {"b": "cc"}]}'::jsonb ->> null::int; + ?column? +---------- + +(1 row) + +select '{"a": [{"b": "c"}, {"b": "cc"}]}'::jsonb ->> 1; + ?column? +---------- + +(1 row) + +select '{"a": [{"b": "c"}, {"b": "cc"}]}'::jsonb ->> 'z'; + ?column? +---------- + +(1 row) + +select '{"a": [{"b": "c"}, {"b": "cc"}]}'::jsonb ->> ''; + ?column? +---------- + +(1 row) + +select '[{"b": "c"}, {"b": "cc"}]'::jsonb ->> 1; + ?column? +------------- + {"b": "cc"} +(1 row) + +select '[{"b": "c"}, {"b": "cc"}]'::jsonb ->> 3; + ?column? +---------- + +(1 row) + +select '[{"b": "c"}, {"b": "cc"}]'::jsonb ->> 'z'; + ?column? +---------- + +(1 row) + +select '{"a": "c", "b": null}'::jsonb ->> 'b'; + ?column? +---------- + +(1 row) + +select '"foo"'::jsonb ->> 1; + ?column? +---------- + +(1 row) + +select '"foo"'::jsonb ->> 'z'; + ?column? +---------- + +(1 row) + +-- equality and inequality +SELECT '{"x":"y"}'::jsonb = '{"x":"y"}'::jsonb; + ?column? +---------- + t +(1 row) + +SELECT '{"x":"y"}'::jsonb = '{"x":"z"}'::jsonb; + ?column? +---------- + f +(1 row) + +SELECT '{"x":"y"}'::jsonb <> '{"x":"y"}'::jsonb; + ?column? +---------- + f +(1 row) + +SELECT '{"x":"y"}'::jsonb <> '{"x":"z"}'::jsonb; + ?column? +---------- + t +(1 row) + +-- containment +SELECT jsonb_contains('{"a":"b", "b":1, "c":null}', '{"a":"b"}'); + jsonb_contains +---------------- + t +(1 row) + +SELECT jsonb_contains('{"a":"b", "b":1, "c":null}', '{"a":"b", "c":null}'); + jsonb_contains +---------------- + t +(1 row) + +SELECT jsonb_contains('{"a":"b", "b":1, "c":null}', '{"a":"b", "g":null}'); + jsonb_contains +---------------- + f +(1 row) + +SELECT jsonb_contains('{"a":"b", "b":1, "c":null}', '{"g":null}'); + jsonb_contains +---------------- + f +(1 row) + +SELECT jsonb_contains('{"a":"b", "b":1, "c":null}', '{"a":"c"}'); + jsonb_contains +---------------- + f +(1 row) + +SELECT jsonb_contains('{"a":"b", "b":1, "c":null}', '{"a":"b"}'); + jsonb_contains +---------------- + t +(1 row) + +SELECT jsonb_contains('{"a":"b", "b":1, "c":null}', '{"a":"b", "c":"q"}'); + jsonb_contains +---------------- + f +(1 row) + +SELECT '{"a":"b", "b":1, "c":null}'::jsonb @> '{"a":"b"}'; + ?column? +---------- + t +(1 row) + +SELECT '{"a":"b", "b":1, "c":null}'::jsonb @> '{"a":"b", "c":null}'; + ?column? +---------- + t +(1 row) + +SELECT '{"a":"b", "b":1, "c":null}'::jsonb @> '{"a":"b", "g":null}'; + ?column? +---------- + f +(1 row) + +SELECT '{"a":"b", "b":1, "c":null}'::jsonb @> '{"g":null}'; + ?column? +---------- + f +(1 row) + +SELECT '{"a":"b", "b":1, "c":null}'::jsonb @> '{"a":"c"}'; + ?column? +---------- + f +(1 row) + +SELECT '{"a":"b", "b":1, "c":null}'::jsonb @> '{"a":"b"}'; + ?column? +---------- + t +(1 row) + +SELECT '{"a":"b", "b":1, "c":null}'::jsonb @> '{"a":"b", "c":"q"}'; + ?column? +---------- + f +(1 row) + +SELECT '[1,2]'::jsonb @> '[1,2,2]'::jsonb; + ?column? +---------- + t +(1 row) + +SELECT '[1,1,2]'::jsonb @> '[1,2,2]'::jsonb; + ?column? +---------- + t +(1 row) + +SELECT '[[1,2]]'::jsonb @> '[[1,2,2]]'::jsonb; + ?column? +---------- + t +(1 row) + +SELECT '[1,2,2]'::jsonb <@ '[1,2]'::jsonb; + ?column? +---------- + t +(1 row) + +SELECT '[1,2,2]'::jsonb <@ '[1,1,2]'::jsonb; + ?column? +---------- + t +(1 row) + +SELECT '[[1,2,2]]'::jsonb <@ '[[1,2]]'::jsonb; + ?column? +---------- + t +(1 row) + +SELECT jsonb_contained('{"a":"b"}', '{"a":"b", "b":1, "c":null}'); + jsonb_contained +----------------- + t +(1 row) + +SELECT jsonb_contained('{"a":"b", "c":null}', '{"a":"b", "b":1, "c":null}'); + jsonb_contained +----------------- + t +(1 row) + +SELECT jsonb_contained('{"a":"b", "g":null}', '{"a":"b", "b":1, "c":null}'); + jsonb_contained +----------------- + f +(1 row) + +SELECT jsonb_contained('{"g":null}', '{"a":"b", "b":1, "c":null}'); + jsonb_contained +----------------- + f +(1 row) + +SELECT jsonb_contained('{"a":"c"}', '{"a":"b", "b":1, "c":null}'); + jsonb_contained +----------------- + f +(1 row) + +SELECT jsonb_contained('{"a":"b"}', '{"a":"b", "b":1, "c":null}'); + jsonb_contained +----------------- + t +(1 row) + +SELECT jsonb_contained('{"a":"b", "c":"q"}', '{"a":"b", "b":1, "c":null}'); + jsonb_contained +----------------- + f +(1 row) + +SELECT '{"a":"b"}'::jsonb <@ '{"a":"b", "b":1, "c":null}'; + ?column? +---------- + t +(1 row) + +SELECT '{"a":"b", "c":null}'::jsonb <@ '{"a":"b", "b":1, "c":null}'; + ?column? +---------- + t +(1 row) + +SELECT '{"a":"b", "g":null}'::jsonb <@ '{"a":"b", "b":1, "c":null}'; + ?column? +---------- + f +(1 row) + +SELECT '{"g":null}'::jsonb <@ '{"a":"b", "b":1, "c":null}'; + ?column? +---------- + f +(1 row) + +SELECT '{"a":"c"}'::jsonb <@ '{"a":"b", "b":1, "c":null}'; + ?column? +---------- + f +(1 row) + +SELECT '{"a":"b"}'::jsonb <@ '{"a":"b", "b":1, "c":null}'; + ?column? +---------- + t +(1 row) + +SELECT '{"a":"b", "c":"q"}'::jsonb <@ '{"a":"b", "b":1, "c":null}'; + ?column? +---------- + f +(1 row) + +-- Raw scalar may contain another raw scalar, array may contain a raw scalar +SELECT '[5]'::jsonb @> '[5]'; + ?column? +---------- + t +(1 row) + +SELECT '5'::jsonb @> '5'; + ?column? +---------- + t +(1 row) + +SELECT '[5]'::jsonb @> '5'; + ?column? +---------- + t +(1 row) + +-- But a raw scalar cannot contain an array +SELECT '5'::jsonb @> '[5]'; + ?column? +---------- + f +(1 row) + +-- In general, one thing should always contain itself. Test array containment: +SELECT '["9", ["7", "3"], 1]'::jsonb @> '["9", ["7", "3"], 1]'::jsonb; + ?column? +---------- + t +(1 row) + +SELECT '["9", ["7", "3"], ["1"]]'::jsonb @> '["9", ["7", "3"], ["1"]]'::jsonb; + ?column? +---------- + t +(1 row) + +-- array containment string matching confusion bug +SELECT '{ "name": "Bob", "tags": [ "enim", "qui"]}'::jsonb @> '{"tags":["qu"]}'; + ?column? +---------- + f +(1 row) + +-- array length +SELECT jsonb_array_length('[1,2,3,{"f1":1,"f2":[5,6]},4]'); + jsonb_array_length +-------------------- + 5 +(1 row) + +SELECT jsonb_array_length('[]'); + jsonb_array_length +-------------------- + 0 +(1 row) + +SELECT jsonb_array_length('{"f1":1,"f2":[5,6]}'); +ERROR: cannot get array length of a non-array +SELECT jsonb_array_length('4'); +ERROR: cannot get array length of a scalar +-- each +SELECT jsonb_each('{"f1":[1,2,3],"f2":{"f3":1},"f4":null}'); + jsonb_each +-------------------- + (f1,"[1, 2, 3]") + (f2,"{""f3"": 1}") + (f4,null) +(3 rows) + +SELECT jsonb_each('{"a":{"b":"c","c":"b","1":"first"},"b":[1,2],"c":"cc","1":"first","n":null}'::jsonb) AS q; + q +------------------------------------------------------ + (1,"""first""") + (a,"{""1"": ""first"", ""b"": ""c"", ""c"": ""b""}") + (b,"[1, 2]") + (c,"""cc""") + (n,null) +(5 rows) + +SELECT * FROM jsonb_each('{"f1":[1,2,3],"f2":{"f3":1},"f4":null,"f5":99,"f6":"stringy"}') q; + key | value +-----+----------- + f1 | [1, 2, 3] + f2 | {"f3": 1} + f4 | null + f5 | 99 + f6 | "stringy" +(5 rows) + +SELECT * FROM jsonb_each('{"a":{"b":"c","c":"b","1":"first"},"b":[1,2],"c":"cc","1":"first","n":null}'::jsonb) AS q; + key | value +-----+------------------------------------ + 1 | "first" + a | {"1": "first", "b": "c", "c": "b"} + b | [1, 2] + c | "cc" + n | null +(5 rows) + +SELECT jsonb_each_text('{"f1":[1,2,3],"f2":{"f3":1},"f4":null,"f5":"null"}'); + jsonb_each_text +-------------------- + (f1,"[1, 2, 3]") + (f2,"{""f3"": 1}") + (f4,) + (f5,null) +(4 rows) + +SELECT jsonb_each_text('{"a":{"b":"c","c":"b","1":"first"},"b":[1,2],"c":"cc","1":"first","n":null}'::jsonb) AS q; + q +------------------------------------------------------ + (1,first) + (a,"{""1"": ""first"", ""b"": ""c"", ""c"": ""b""}") + (b,"[1, 2]") + (c,cc) + (n,) +(5 rows) + +SELECT * FROM jsonb_each_text('{"f1":[1,2,3],"f2":{"f3":1},"f4":null,"f5":99,"f6":"stringy"}') q; + key | value +-----+----------- + f1 | [1, 2, 3] + f2 | {"f3": 1} + f4 | + f5 | 99 + f6 | stringy +(5 rows) + +SELECT * FROM jsonb_each_text('{"a":{"b":"c","c":"b","1":"first"},"b":[1,2],"c":"cc","1":"first","n":null}'::jsonb) AS q; + key | value +-----+------------------------------------ + 1 | first + a | {"1": "first", "b": "c", "c": "b"} + b | [1, 2] + c | cc + n | +(5 rows) + +-- exists +SELECT jsonb_exists('{"a":null, "b":"qq"}', 'a'); + jsonb_exists +-------------- + t +(1 row) + +SELECT jsonb_exists('{"a":null, "b":"qq"}', 'b'); + jsonb_exists +-------------- + t +(1 row) + +SELECT jsonb_exists('{"a":null, "b":"qq"}', 'c'); + jsonb_exists +-------------- + f +(1 row) + +SELECT jsonb_exists('{"a":"null", "b":"qq"}', 'a'); + jsonb_exists +-------------- + t +(1 row) + +SELECT jsonb '{"a":null, "b":"qq"}' ? 'a'; + ?column? +---------- + t +(1 row) + +SELECT jsonb '{"a":null, "b":"qq"}' ? 'b'; + ?column? +---------- + t +(1 row) + +SELECT jsonb '{"a":null, "b":"qq"}' ? 'c'; + ?column? +---------- + f +(1 row) + +SELECT jsonb '{"a":"null", "b":"qq"}' ? 'a'; + ?column? +---------- + t +(1 row) + +-- array exists - array elements should behave as keys +SELECT count(*) from testjsonb WHERE j->'array' ? 'bar'; + count +------- + 3 +(1 row) + +-- type sensitive array exists - should return no rows (since "exists" only +-- matches strings that are either object keys or array elements) +SELECT count(*) from testjsonb WHERE j->'array' ? '5'::text; + count +------- + 0 +(1 row) + +-- However, a raw scalar is *contained* within the array +SELECT count(*) from testjsonb WHERE j->'array' @> '5'::jsonb; + count +------- + 1 +(1 row) + +SELECT jsonb_exists_any('{"a":null, "b":"qq"}', ARRAY['a','b']); + jsonb_exists_any +------------------ + t +(1 row) + +SELECT jsonb_exists_any('{"a":null, "b":"qq"}', ARRAY['b','a']); + jsonb_exists_any +------------------ + t +(1 row) + +SELECT jsonb_exists_any('{"a":null, "b":"qq"}', ARRAY['c','a']); + jsonb_exists_any +------------------ + t +(1 row) + +SELECT jsonb_exists_any('{"a":null, "b":"qq"}', ARRAY['c','d']); + jsonb_exists_any +------------------ + f +(1 row) + +SELECT jsonb_exists_any('{"a":null, "b":"qq"}', '{}'::text[]); + jsonb_exists_any +------------------ + f +(1 row) + +SELECT jsonb '{"a":null, "b":"qq"}' ?| ARRAY['a','b']; + ?column? +---------- + t +(1 row) + +SELECT jsonb '{"a":null, "b":"qq"}' ?| ARRAY['b','a']; + ?column? +---------- + t +(1 row) + +SELECT jsonb '{"a":null, "b":"qq"}' ?| ARRAY['c','a']; + ?column? +---------- + t +(1 row) + +SELECT jsonb '{"a":null, "b":"qq"}' ?| ARRAY['c','d']; + ?column? +---------- + f +(1 row) + +SELECT jsonb '{"a":null, "b":"qq"}' ?| '{}'::text[]; + ?column? +---------- + f +(1 row) + +SELECT jsonb_exists_all('{"a":null, "b":"qq"}', ARRAY['a','b']); + jsonb_exists_all +------------------ + t +(1 row) + +SELECT jsonb_exists_all('{"a":null, "b":"qq"}', ARRAY['b','a']); + jsonb_exists_all +------------------ + t +(1 row) + +SELECT jsonb_exists_all('{"a":null, "b":"qq"}', ARRAY['c','a']); + jsonb_exists_all +------------------ + f +(1 row) + +SELECT jsonb_exists_all('{"a":null, "b":"qq"}', ARRAY['c','d']); + jsonb_exists_all +------------------ + f +(1 row) + +SELECT jsonb_exists_all('{"a":null, "b":"qq"}', '{}'::text[]); + jsonb_exists_all +------------------ + t +(1 row) + +SELECT jsonb '{"a":null, "b":"qq"}' ?& ARRAY['a','b']; + ?column? +---------- + t +(1 row) + +SELECT jsonb '{"a":null, "b":"qq"}' ?& ARRAY['b','a']; + ?column? +---------- + t +(1 row) + +SELECT jsonb '{"a":null, "b":"qq"}' ?& ARRAY['c','a']; + ?column? +---------- + f +(1 row) + +SELECT jsonb '{"a":null, "b":"qq"}' ?& ARRAY['c','d']; + ?column? +---------- + f +(1 row) + +SELECT jsonb '{"a":null, "b":"qq"}' ?& ARRAY['a','a', 'b', 'b', 'b']; + ?column? +---------- + t +(1 row) + +SELECT jsonb '{"a":null, "b":"qq"}' ?& '{}'::text[]; + ?column? +---------- + t +(1 row) + +-- typeof +SELECT jsonb_typeof('{}') AS object; + object +-------- + object +(1 row) + +SELECT jsonb_typeof('{"c":3,"p":"o"}') AS object; + object +-------- + object +(1 row) + +SELECT jsonb_typeof('[]') AS array; + array +------- + array +(1 row) + +SELECT jsonb_typeof('["a", 1]') AS array; + array +------- + array +(1 row) + +SELECT jsonb_typeof('null') AS "null"; + null +------ + null +(1 row) + +SELECT jsonb_typeof('1') AS number; + number +-------- + number +(1 row) + +SELECT jsonb_typeof('-1') AS number; + number +-------- + number +(1 row) + +SELECT jsonb_typeof('1.0') AS number; + number +-------- + number +(1 row) + +SELECT jsonb_typeof('1e2') AS number; + number +-------- + number +(1 row) + +SELECT jsonb_typeof('-1.0') AS number; + number +-------- + number +(1 row) + +SELECT jsonb_typeof('true') AS boolean; + boolean +--------- + boolean +(1 row) + +SELECT jsonb_typeof('false') AS boolean; + boolean +--------- + boolean +(1 row) + +SELECT jsonb_typeof('"hello"') AS string; + string +-------- + string +(1 row) + +SELECT jsonb_typeof('"true"') AS string; + string +-------- + string +(1 row) + +SELECT jsonb_typeof('"1.0"') AS string; + string +-------- + string +(1 row) + +-- jsonb_build_array, jsonb_build_object, jsonb_object_agg +SELECT jsonb_build_array('a',1,'b',1.2,'c',true,'d',null,'e',json '{"x": 3, "y": [1,2,3]}'); + jsonb_build_array +------------------------------------------------------------------------- + ["a", 1, "b", 1.2, "c", true, "d", null, "e", {"x": 3, "y": [1, 2, 3]}] +(1 row) + +SELECT jsonb_build_array('a', NULL); -- ok + jsonb_build_array +------------------- + ["a", null] +(1 row) + +SELECT jsonb_build_array(VARIADIC NULL::text[]); -- ok + jsonb_build_array +------------------- + +(1 row) + +SELECT jsonb_build_array(VARIADIC '{}'::text[]); -- ok + jsonb_build_array +------------------- + [] +(1 row) + +SELECT jsonb_build_array(VARIADIC '{a,b,c}'::text[]); -- ok + jsonb_build_array +------------------- + ["a", "b", "c"] +(1 row) + +SELECT jsonb_build_array(VARIADIC ARRAY['a', NULL]::text[]); -- ok + jsonb_build_array +------------------- + ["a", null] +(1 row) + +SELECT jsonb_build_array(VARIADIC '{1,2,3,4}'::text[]); -- ok + jsonb_build_array +---------------------- + ["1", "2", "3", "4"] +(1 row) + +SELECT jsonb_build_array(VARIADIC '{1,2,3,4}'::int[]); -- ok + jsonb_build_array +------------------- + [1, 2, 3, 4] +(1 row) + +SELECT jsonb_build_array(VARIADIC '{{1,4},{2,5},{3,6}}'::int[][]); -- ok + jsonb_build_array +-------------------- + [1, 4, 2, 5, 3, 6] +(1 row) + +SELECT jsonb_build_object('a',1,'b',1.2,'c',true,'d',null,'e',json '{"x": 3, "y": [1,2,3]}'); + jsonb_build_object +------------------------------------------------------------------------- + {"a": 1, "b": 1.2, "c": true, "d": null, "e": {"x": 3, "y": [1, 2, 3]}} +(1 row) + +SELECT jsonb_build_object( + 'a', jsonb_build_object('b',false,'c',99), + 'd', jsonb_build_object('e',array[9,8,7]::int[], + 'f', (select row_to_json(r) from ( select relkind, oid::regclass as name from pg_class where relname = 'pg_class') r))); + jsonb_build_object +------------------------------------------------------------------------------------------------ + {"a": {"b": false, "c": 99}, "d": {"e": [9, 8, 7], "f": {"name": "pg_class", "relkind": "r"}}} +(1 row) + +SELECT jsonb_build_object('{a,b,c}'::text[]); -- error +ERROR: argument list must have even number of elements +HINT: The arguments of jsonb_build_object() must consist of alternating keys and values. +SELECT jsonb_build_object('{a,b,c}'::text[], '{d,e,f}'::text[]); -- error, key cannot be array +ERROR: key value must be scalar, not array, composite, or json +SELECT jsonb_build_object('a', 'b', 'c'); -- error +ERROR: argument list must have even number of elements +HINT: The arguments of jsonb_build_object() must consist of alternating keys and values. +SELECT jsonb_build_object(NULL, 'a'); -- error, key cannot be NULL +ERROR: argument 1: key must not be null +SELECT jsonb_build_object('a', NULL); -- ok + jsonb_build_object +-------------------- + {"a": null} +(1 row) + +SELECT jsonb_build_object(VARIADIC NULL::text[]); -- ok + jsonb_build_object +-------------------- + +(1 row) + +SELECT jsonb_build_object(VARIADIC '{}'::text[]); -- ok + jsonb_build_object +-------------------- + {} +(1 row) + +SELECT jsonb_build_object(VARIADIC '{a,b,c}'::text[]); -- error +ERROR: argument list must have even number of elements +HINT: The arguments of jsonb_build_object() must consist of alternating keys and values. +SELECT jsonb_build_object(VARIADIC ARRAY['a', NULL]::text[]); -- ok + jsonb_build_object +-------------------- + {"a": null} +(1 row) + +SELECT jsonb_build_object(VARIADIC ARRAY[NULL, 'a']::text[]); -- error, key cannot be NULL +ERROR: argument 1: key must not be null +SELECT jsonb_build_object(VARIADIC '{1,2,3,4}'::text[]); -- ok + jsonb_build_object +---------------------- + {"1": "2", "3": "4"} +(1 row) + +SELECT jsonb_build_object(VARIADIC '{1,2,3,4}'::int[]); -- ok + jsonb_build_object +-------------------- + {"1": 2, "3": 4} +(1 row) + +SELECT jsonb_build_object(VARIADIC '{{1,4},{2,5},{3,6}}'::int[][]); -- ok + jsonb_build_object +-------------------------- + {"1": 4, "2": 5, "3": 6} +(1 row) + +-- empty objects/arrays +SELECT jsonb_build_array(); + jsonb_build_array +------------------- + [] +(1 row) + +SELECT jsonb_build_object(); + jsonb_build_object +-------------------- + {} +(1 row) + +-- make sure keys are quoted +SELECT jsonb_build_object(1,2); + jsonb_build_object +-------------------- + {"1": 2} +(1 row) + +-- keys must be scalar and not null +SELECT jsonb_build_object(null,2); +ERROR: argument 1: key must not be null +SELECT jsonb_build_object(r,2) FROM (SELECT 1 AS a, 2 AS b) r; +ERROR: key value must be scalar, not array, composite, or json +SELECT jsonb_build_object(json '{"a":1,"b":2}', 3); +ERROR: key value must be scalar, not array, composite, or json +SELECT jsonb_build_object('{1,2,3}'::int[], 3); +ERROR: key value must be scalar, not array, composite, or json +-- handling of NULL values +SELECT jsonb_object_agg(1, NULL::jsonb); + jsonb_object_agg +------------------ + {"1": null} +(1 row) + +SELECT jsonb_object_agg(NULL, '{"a":1}'); +ERROR: field name must not be null +CREATE TEMP TABLE foo (serial_num int, name text, type text); +INSERT INTO foo VALUES (847001,'t15','GE1043'); +INSERT INTO foo VALUES (847002,'t16','GE1043'); +INSERT INTO foo VALUES (847003,'sub-alpha','GESS90'); +SELECT jsonb_build_object('turbines',jsonb_object_agg(serial_num,jsonb_build_object('name',name,'type',type))) +FROM foo; + jsonb_build_object +------------------------------------------------------------------------------------------------------------------------------------------------------------- + {"turbines": {"847001": {"name": "t15", "type": "GE1043"}, "847002": {"name": "t16", "type": "GE1043"}, "847003": {"name": "sub-alpha", "type": "GESS90"}}} +(1 row) + +SELECT jsonb_object_agg(name, type) FROM foo; + jsonb_object_agg +----------------------------------------------------------- + {"t15": "GE1043", "t16": "GE1043", "sub-alpha": "GESS90"} +(1 row) + +INSERT INTO foo VALUES (999999, NULL, 'bar'); +SELECT jsonb_object_agg(name, type) FROM foo; +ERROR: field name must not be null +-- edge case for parser +SELECT jsonb_object_agg(DISTINCT 'a', 'abc'); + jsonb_object_agg +------------------ + {"a": "abc"} +(1 row) + +-- jsonb_object +-- empty object, one dimension +SELECT jsonb_object('{}'); + jsonb_object +-------------- + {} +(1 row) + +-- empty object, two dimensions +SELECT jsonb_object('{}', '{}'); + jsonb_object +-------------- + {} +(1 row) + +-- one dimension +SELECT jsonb_object('{a,1,b,2,3,NULL,"d e f","a b c"}'); + jsonb_object +--------------------------------------------------- + {"3": null, "a": "1", "b": "2", "d e f": "a b c"} +(1 row) + +-- same but with two dimensions +SELECT jsonb_object('{{a,1},{b,2},{3,NULL},{"d e f","a b c"}}'); + jsonb_object +--------------------------------------------------- + {"3": null, "a": "1", "b": "2", "d e f": "a b c"} +(1 row) + +-- odd number error +SELECT jsonb_object('{a,b,c}'); +ERROR: array must have even number of elements +-- one column error +SELECT jsonb_object('{{a},{b}}'); +ERROR: array must have two columns +-- too many columns error +SELECT jsonb_object('{{a,b,c},{b,c,d}}'); +ERROR: array must have two columns +-- too many dimensions error +SELECT jsonb_object('{{{a,b},{c,d}},{{b,c},{d,e}}}'); +ERROR: wrong number of array subscripts +--two argument form of jsonb_object +select jsonb_object('{a,b,c,"d e f"}','{1,2,3,"a b c"}'); + jsonb_object +-------------------------------------------------- + {"a": "1", "b": "2", "c": "3", "d e f": "a b c"} +(1 row) + +-- too many dimensions +SELECT jsonb_object('{{a,1},{b,2},{3,NULL},{"d e f","a b c"}}', '{{a,1},{b,2},{3,NULL},{"d e f","a b c"}}'); +ERROR: wrong number of array subscripts +-- mismatched dimensions +select jsonb_object('{a,b,c,"d e f",g}','{1,2,3,"a b c"}'); +ERROR: mismatched array dimensions +select jsonb_object('{a,b,c,"d e f"}','{1,2,3,"a b c",g}'); +ERROR: mismatched array dimensions +-- null key error +select jsonb_object('{a,b,NULL,"d e f"}','{1,2,3,"a b c"}'); +ERROR: null value not allowed for object key +-- empty key is allowed +select jsonb_object('{a,b,"","d e f"}','{1,2,3,"a b c"}'); + jsonb_object +------------------------------------------------- + {"": "3", "a": "1", "b": "2", "d e f": "a b c"} +(1 row) + +-- extract_path, extract_path_as_text +SELECT jsonb_extract_path('{"f2":{"f3":1},"f4":{"f5":99,"f6":"stringy"}}','f4','f6'); + jsonb_extract_path +-------------------- + "stringy" +(1 row) + +SELECT jsonb_extract_path('{"f2":{"f3":1},"f4":{"f5":99,"f6":"stringy"}}','f2'); + jsonb_extract_path +-------------------- + {"f3": 1} +(1 row) + +SELECT jsonb_extract_path('{"f2":["f3",1],"f4":{"f5":99,"f6":"stringy"}}','f2',0::text); + jsonb_extract_path +-------------------- + "f3" +(1 row) + +SELECT jsonb_extract_path('{"f2":["f3",1],"f4":{"f5":99,"f6":"stringy"}}','f2',1::text); + jsonb_extract_path +-------------------- + 1 +(1 row) + +SELECT jsonb_extract_path_text('{"f2":{"f3":1},"f4":{"f5":99,"f6":"stringy"}}','f4','f6'); + jsonb_extract_path_text +------------------------- + stringy +(1 row) + +SELECT jsonb_extract_path_text('{"f2":{"f3":1},"f4":{"f5":99,"f6":"stringy"}}','f2'); + jsonb_extract_path_text +------------------------- + {"f3": 1} +(1 row) + +SELECT jsonb_extract_path_text('{"f2":["f3",1],"f4":{"f5":99,"f6":"stringy"}}','f2',0::text); + jsonb_extract_path_text +------------------------- + f3 +(1 row) + +SELECT jsonb_extract_path_text('{"f2":["f3",1],"f4":{"f5":99,"f6":"stringy"}}','f2',1::text); + jsonb_extract_path_text +------------------------- + 1 +(1 row) + +-- extract_path nulls +SELECT jsonb_extract_path('{"f2":{"f3":1},"f4":{"f5":null,"f6":"stringy"}}','f4','f5') IS NULL AS expect_false; + expect_false +-------------- + f +(1 row) + +SELECT jsonb_extract_path_text('{"f2":{"f3":1},"f4":{"f5":null,"f6":"stringy"}}','f4','f5') IS NULL AS expect_true; + expect_true +------------- + t +(1 row) + +SELECT jsonb_extract_path('{"f2":{"f3":1},"f4":[0,1,2,null]}','f4','3') IS NULL AS expect_false; + expect_false +-------------- + f +(1 row) + +SELECT jsonb_extract_path_text('{"f2":{"f3":1},"f4":[0,1,2,null]}','f4','3') IS NULL AS expect_true; + expect_true +------------- + t +(1 row) + +-- extract_path operators +SELECT '{"f2":{"f3":1},"f4":{"f5":99,"f6":"stringy"}}'::jsonb#>array['f4','f6']; + ?column? +----------- + "stringy" +(1 row) + +SELECT '{"f2":{"f3":1},"f4":{"f5":99,"f6":"stringy"}}'::jsonb#>array['f2']; + ?column? +----------- + {"f3": 1} +(1 row) + +SELECT '{"f2":["f3",1],"f4":{"f5":99,"f6":"stringy"}}'::jsonb#>array['f2','0']; + ?column? +---------- + "f3" +(1 row) + +SELECT '{"f2":["f3",1],"f4":{"f5":99,"f6":"stringy"}}'::jsonb#>array['f2','1']; + ?column? +---------- + 1 +(1 row) + +SELECT '{"f2":{"f3":1},"f4":{"f5":99,"f6":"stringy"}}'::jsonb#>>array['f4','f6']; + ?column? +---------- + stringy +(1 row) + +SELECT '{"f2":{"f3":1},"f4":{"f5":99,"f6":"stringy"}}'::jsonb#>>array['f2']; + ?column? +----------- + {"f3": 1} +(1 row) + +SELECT '{"f2":["f3",1],"f4":{"f5":99,"f6":"stringy"}}'::jsonb#>>array['f2','0']; + ?column? +---------- + f3 +(1 row) + +SELECT '{"f2":["f3",1],"f4":{"f5":99,"f6":"stringy"}}'::jsonb#>>array['f2','1']; + ?column? +---------- + 1 +(1 row) + +-- corner cases for same +select '{"a": {"b":{"c": "foo"}}}'::jsonb #> '{}'; + ?column? +---------------------------- + {"a": {"b": {"c": "foo"}}} +(1 row) + +select '[1,2,3]'::jsonb #> '{}'; + ?column? +----------- + [1, 2, 3] +(1 row) + +select '"foo"'::jsonb #> '{}'; + ?column? +---------- + "foo" +(1 row) + +select '42'::jsonb #> '{}'; + ?column? +---------- + 42 +(1 row) + +select 'null'::jsonb #> '{}'; + ?column? +---------- + null +(1 row) + +select '{"a": {"b":{"c": "foo"}}}'::jsonb #> array['a']; + ?column? +--------------------- + {"b": {"c": "foo"}} +(1 row) + +select '{"a": {"b":{"c": "foo"}}}'::jsonb #> array['a', null]; + ?column? +---------- + +(1 row) + +select '{"a": {"b":{"c": "foo"}}}'::jsonb #> array['a', '']; + ?column? +---------- + +(1 row) + +select '{"a": {"b":{"c": "foo"}}}'::jsonb #> array['a','b']; + ?column? +-------------- + {"c": "foo"} +(1 row) + +select '{"a": {"b":{"c": "foo"}}}'::jsonb #> array['a','b','c']; + ?column? +---------- + "foo" +(1 row) + +select '{"a": {"b":{"c": "foo"}}}'::jsonb #> array['a','b','c','d']; + ?column? +---------- + +(1 row) + +select '{"a": {"b":{"c": "foo"}}}'::jsonb #> array['a','z','c']; + ?column? +---------- + +(1 row) + +select '{"a": [{"b": "c"}, {"b": "cc"}]}'::jsonb #> array['a','1','b']; + ?column? +---------- + "cc" +(1 row) + +select '{"a": [{"b": "c"}, {"b": "cc"}]}'::jsonb #> array['a','z','b']; + ?column? +---------- + +(1 row) + +select '[{"b": "c"}, {"b": "cc"}]'::jsonb #> array['1','b']; + ?column? +---------- + "cc" +(1 row) + +select '[{"b": "c"}, {"b": "cc"}]'::jsonb #> array['z','b']; + ?column? +---------- + +(1 row) + +select '[{"b": "c"}, {"b": null}]'::jsonb #> array['1','b']; + ?column? +---------- + null +(1 row) + +select '"foo"'::jsonb #> array['z']; + ?column? +---------- + +(1 row) + +select '42'::jsonb #> array['f2']; + ?column? +---------- + +(1 row) + +select '42'::jsonb #> array['0']; + ?column? +---------- + +(1 row) + +select '{"a": {"b":{"c": "foo"}}}'::jsonb #>> '{}'; + ?column? +---------------------------- + {"a": {"b": {"c": "foo"}}} +(1 row) + +select '[1,2,3]'::jsonb #>> '{}'; + ?column? +----------- + [1, 2, 3] +(1 row) + +select '"foo"'::jsonb #>> '{}'; + ?column? +---------- + foo +(1 row) + +select '42'::jsonb #>> '{}'; + ?column? +---------- + 42 +(1 row) + +select 'null'::jsonb #>> '{}'; + ?column? +---------- + +(1 row) + +select '{"a": {"b":{"c": "foo"}}}'::jsonb #>> array['a']; + ?column? +--------------------- + {"b": {"c": "foo"}} +(1 row) + +select '{"a": {"b":{"c": "foo"}}}'::jsonb #>> array['a', null]; + ?column? +---------- + +(1 row) + +select '{"a": {"b":{"c": "foo"}}}'::jsonb #>> array['a', '']; + ?column? +---------- + +(1 row) + +select '{"a": {"b":{"c": "foo"}}}'::jsonb #>> array['a','b']; + ?column? +-------------- + {"c": "foo"} +(1 row) + +select '{"a": {"b":{"c": "foo"}}}'::jsonb #>> array['a','b','c']; + ?column? +---------- + foo +(1 row) + +select '{"a": {"b":{"c": "foo"}}}'::jsonb #>> array['a','b','c','d']; + ?column? +---------- + +(1 row) + +select '{"a": {"b":{"c": "foo"}}}'::jsonb #>> array['a','z','c']; + ?column? +---------- + +(1 row) + +select '{"a": [{"b": "c"}, {"b": "cc"}]}'::jsonb #>> array['a','1','b']; + ?column? +---------- + cc +(1 row) + +select '{"a": [{"b": "c"}, {"b": "cc"}]}'::jsonb #>> array['a','z','b']; + ?column? +---------- + +(1 row) + +select '[{"b": "c"}, {"b": "cc"}]'::jsonb #>> array['1','b']; + ?column? +---------- + cc +(1 row) + +select '[{"b": "c"}, {"b": "cc"}]'::jsonb #>> array['z','b']; + ?column? +---------- + +(1 row) + +select '[{"b": "c"}, {"b": null}]'::jsonb #>> array['1','b']; + ?column? +---------- + +(1 row) + +select '"foo"'::jsonb #>> array['z']; + ?column? +---------- + +(1 row) + +select '42'::jsonb #>> array['f2']; + ?column? +---------- + +(1 row) + +select '42'::jsonb #>> array['0']; + ?column? +---------- + +(1 row) + +-- array_elements +SELECT jsonb_array_elements('[1,true,[1,[2,3]],null,{"f1":1,"f2":[7,8,9]},false]'); + jsonb_array_elements +---------------------------- + 1 + true + [1, [2, 3]] + null + {"f1": 1, "f2": [7, 8, 9]} + false +(6 rows) + +SELECT * FROM jsonb_array_elements('[1,true,[1,[2,3]],null,{"f1":1,"f2":[7,8,9]},false]') q; + value +---------------------------- + 1 + true + [1, [2, 3]] + null + {"f1": 1, "f2": [7, 8, 9]} + false +(6 rows) + +SELECT jsonb_array_elements_text('[1,true,[1,[2,3]],null,{"f1":1,"f2":[7,8,9]},false,"stringy"]'); + jsonb_array_elements_text +---------------------------- + 1 + true + [1, [2, 3]] + + {"f1": 1, "f2": [7, 8, 9]} + false + stringy +(7 rows) + +SELECT * FROM jsonb_array_elements_text('[1,true,[1,[2,3]],null,{"f1":1,"f2":[7,8,9]},false,"stringy"]') q; + value +---------------------------- + 1 + true + [1, [2, 3]] + + {"f1": 1, "f2": [7, 8, 9]} + false + stringy +(7 rows) + +-- populate_record +CREATE TYPE jbpop AS (a text, b int, c timestamp); +CREATE DOMAIN jsb_int_not_null AS int NOT NULL; +CREATE DOMAIN jsb_int_array_1d AS int[] CHECK(array_length(VALUE, 1) = 3); +CREATE DOMAIN jsb_int_array_2d AS int[][] CHECK(array_length(VALUE, 2) = 3); +create type jb_unordered_pair as (x int, y int); +create domain jb_ordered_pair as jb_unordered_pair check((value).x <= (value).y); +CREATE TYPE jsbrec AS ( + i int, + ia _int4, + ia1 int[], + ia2 int[][], + ia3 int[][][], + ia1d jsb_int_array_1d, + ia2d jsb_int_array_2d, + t text, + ta text[], + c char(10), + ca char(10)[], + ts timestamp, + js json, + jsb jsonb, + jsa json[], + rec jbpop, + reca jbpop[] +); +CREATE TYPE jsbrec_i_not_null AS ( + i jsb_int_not_null +); +SELECT * FROM jsonb_populate_record(NULL::jbpop,'{"a":"blurfl","x":43.2}') q; + a | b | c +--------+---+--- + blurfl | | +(1 row) + +SELECT * FROM jsonb_populate_record(row('x',3,'2012-12-31 15:30:56')::jbpop,'{"a":"blurfl","x":43.2}') q; + a | b | c +--------+---+-------------------------- + blurfl | 3 | Mon Dec 31 15:30:56 2012 +(1 row) + +SELECT * FROM jsonb_populate_record(NULL::jbpop,'{"a":"blurfl","x":43.2}') q; + a | b | c +--------+---+--- + blurfl | | +(1 row) + +SELECT * FROM jsonb_populate_record(row('x',3,'2012-12-31 15:30:56')::jbpop,'{"a":"blurfl","x":43.2}') q; + a | b | c +--------+---+-------------------------- + blurfl | 3 | Mon Dec 31 15:30:56 2012 +(1 row) + +SELECT * FROM jsonb_populate_record(NULL::jbpop,'{"a":[100,200,false],"x":43.2}') q; + a | b | c +-------------------+---+--- + [100, 200, false] | | +(1 row) + +SELECT * FROM jsonb_populate_record(row('x',3,'2012-12-31 15:30:56')::jbpop,'{"a":[100,200,false],"x":43.2}') q; + a | b | c +-------------------+---+-------------------------- + [100, 200, false] | 3 | Mon Dec 31 15:30:56 2012 +(1 row) + +SELECT * FROM jsonb_populate_record(row('x',3,'2012-12-31 15:30:56')::jbpop,'{"c":[100,200,false],"x":43.2}') q; +ERROR: invalid input syntax for type timestamp: "[100, 200, false]" +SELECT * FROM jsonb_populate_record(row('x',3,'2012-12-31 15:30:56')::jbpop, '{}') q; + a | b | c +---+---+-------------------------- + x | 3 | Mon Dec 31 15:30:56 2012 +(1 row) + +SELECT i FROM jsonb_populate_record(NULL::jsbrec_i_not_null, '{"x": 43.2}') q; +ERROR: domain jsb_int_not_null does not allow null values +SELECT i FROM jsonb_populate_record(NULL::jsbrec_i_not_null, '{"i": null}') q; +ERROR: domain jsb_int_not_null does not allow null values +SELECT i FROM jsonb_populate_record(NULL::jsbrec_i_not_null, '{"i": 12345}') q; + i +------- + 12345 +(1 row) + +SELECT ia FROM jsonb_populate_record(NULL::jsbrec, '{"ia": null}') q; + ia +---- + +(1 row) + +SELECT ia FROM jsonb_populate_record(NULL::jsbrec, '{"ia": 123}') q; +ERROR: expected JSON array +HINT: See the value of key "ia". +SELECT ia FROM jsonb_populate_record(NULL::jsbrec, '{"ia": [1, "2", null, 4]}') q; + ia +-------------- + {1,2,NULL,4} +(1 row) + +SELECT ia FROM jsonb_populate_record(NULL::jsbrec, '{"ia": [[1, 2], [3, 4]]}') q; + ia +--------------- + {{1,2},{3,4}} +(1 row) + +SELECT ia FROM jsonb_populate_record(NULL::jsbrec, '{"ia": [[1], 2]}') q; +ERROR: expected JSON array +HINT: See the array element [1] of key "ia". +SELECT ia FROM jsonb_populate_record(NULL::jsbrec, '{"ia": [[1], [2, 3]]}') q; +ERROR: malformed JSON array +DETAIL: Multidimensional arrays must have sub-arrays with matching dimensions. +SELECT ia FROM jsonb_populate_record(NULL::jsbrec, '{"ia": "{1,2,3}"}') q; + ia +--------- + {1,2,3} +(1 row) + +SELECT ia1 FROM jsonb_populate_record(NULL::jsbrec, '{"ia1": null}') q; + ia1 +----- + +(1 row) + +SELECT ia1 FROM jsonb_populate_record(NULL::jsbrec, '{"ia1": 123}') q; +ERROR: expected JSON array +HINT: See the value of key "ia1". +SELECT ia1 FROM jsonb_populate_record(NULL::jsbrec, '{"ia1": [1, "2", null, 4]}') q; + ia1 +-------------- + {1,2,NULL,4} +(1 row) + +SELECT ia1 FROM jsonb_populate_record(NULL::jsbrec, '{"ia1": [[1, 2, 3]]}') q; + ia1 +----------- + {{1,2,3}} +(1 row) + +SELECT ia1d FROM jsonb_populate_record(NULL::jsbrec, '{"ia1d": null}') q; + ia1d +------ + +(1 row) + +SELECT ia1d FROM jsonb_populate_record(NULL::jsbrec, '{"ia1d": 123}') q; +ERROR: expected JSON array +HINT: See the value of key "ia1d". +SELECT ia1d FROM jsonb_populate_record(NULL::jsbrec, '{"ia1d": [1, "2", null, 4]}') q; +ERROR: value for domain jsb_int_array_1d violates check constraint "jsb_int_array_1d_check" +SELECT ia1d FROM jsonb_populate_record(NULL::jsbrec, '{"ia1d": [1, "2", null]}') q; + ia1d +------------ + {1,2,NULL} +(1 row) + +SELECT ia2 FROM jsonb_populate_record(NULL::jsbrec, '{"ia2": [1, "2", null, 4]}') q; + ia2 +-------------- + {1,2,NULL,4} +(1 row) + +SELECT ia2 FROM jsonb_populate_record(NULL::jsbrec, '{"ia2": [[1, 2], [null, 4]]}') q; + ia2 +------------------ + {{1,2},{NULL,4}} +(1 row) + +SELECT ia2 FROM jsonb_populate_record(NULL::jsbrec, '{"ia2": [[], []]}') q; + ia2 +----- + {} +(1 row) + +SELECT ia2 FROM jsonb_populate_record(NULL::jsbrec, '{"ia2": [[1, 2], [3]]}') q; +ERROR: malformed JSON array +DETAIL: Multidimensional arrays must have sub-arrays with matching dimensions. +SELECT ia2 FROM jsonb_populate_record(NULL::jsbrec, '{"ia2": [[1, 2], 3, 4]}') q; +ERROR: expected JSON array +HINT: See the array element [1] of key "ia2". +SELECT ia2d FROM jsonb_populate_record(NULL::jsbrec, '{"ia2d": [[1, "2"], [null, 4]]}') q; +ERROR: value for domain jsb_int_array_2d violates check constraint "jsb_int_array_2d_check" +SELECT ia2d FROM jsonb_populate_record(NULL::jsbrec, '{"ia2d": [[1, "2", 3], [null, 5, 6]]}') q; + ia2d +---------------------- + {{1,2,3},{NULL,5,6}} +(1 row) + +SELECT ia3 FROM jsonb_populate_record(NULL::jsbrec, '{"ia3": [1, "2", null, 4]}') q; + ia3 +-------------- + {1,2,NULL,4} +(1 row) + +SELECT ia3 FROM jsonb_populate_record(NULL::jsbrec, '{"ia3": [[1, 2], [null, 4]]}') q; + ia3 +------------------ + {{1,2},{NULL,4}} +(1 row) + +SELECT ia3 FROM jsonb_populate_record(NULL::jsbrec, '{"ia3": [ [[], []], [[], []], [[], []] ]}') q; + ia3 +----- + {} +(1 row) + +SELECT ia3 FROM jsonb_populate_record(NULL::jsbrec, '{"ia3": [ [[1, 2]], [[3, 4]] ]}') q; + ia3 +------------------- + {{{1,2}},{{3,4}}} +(1 row) + +SELECT ia3 FROM jsonb_populate_record(NULL::jsbrec, '{"ia3": [ [[1, 2], [3, 4]], [[5, 6], [7, 8]] ]}') q; + ia3 +------------------------------- + {{{1,2},{3,4}},{{5,6},{7,8}}} +(1 row) + +SELECT ia3 FROM jsonb_populate_record(NULL::jsbrec, '{"ia3": [ [[1, 2], [3, 4]], [[5, 6], [7, 8], [9, 10]] ]}') q; +ERROR: malformed JSON array +DETAIL: Multidimensional arrays must have sub-arrays with matching dimensions. +SELECT ta FROM jsonb_populate_record(NULL::jsbrec, '{"ta": null}') q; + ta +---- + +(1 row) + +SELECT ta FROM jsonb_populate_record(NULL::jsbrec, '{"ta": 123}') q; +ERROR: expected JSON array +HINT: See the value of key "ta". +SELECT ta FROM jsonb_populate_record(NULL::jsbrec, '{"ta": [1, "2", null, 4]}') q; + ta +-------------- + {1,2,NULL,4} +(1 row) + +SELECT ta FROM jsonb_populate_record(NULL::jsbrec, '{"ta": [[1, 2, 3], {"k": "v"}]}') q; +ERROR: expected JSON array +HINT: See the array element [1] of key "ta". +SELECT c FROM jsonb_populate_record(NULL::jsbrec, '{"c": null}') q; + c +--- + +(1 row) + +SELECT c FROM jsonb_populate_record(NULL::jsbrec, '{"c": "aaa"}') q; + c +------------ + aaa +(1 row) + +SELECT c FROM jsonb_populate_record(NULL::jsbrec, '{"c": "aaaaaaaaaa"}') q; + c +------------ + aaaaaaaaaa +(1 row) + +SELECT c FROM jsonb_populate_record(NULL::jsbrec, '{"c": "aaaaaaaaaaaaa"}') q; +ERROR: value too long for type character(10) +SELECT ca FROM jsonb_populate_record(NULL::jsbrec, '{"ca": null}') q; + ca +---- + +(1 row) + +SELECT ca FROM jsonb_populate_record(NULL::jsbrec, '{"ca": 123}') q; +ERROR: expected JSON array +HINT: See the value of key "ca". +SELECT ca FROM jsonb_populate_record(NULL::jsbrec, '{"ca": [1, "2", null, 4]}') q; + ca +----------------------------------------------- + {"1 ","2 ",NULL,"4 "} +(1 row) + +SELECT ca FROM jsonb_populate_record(NULL::jsbrec, '{"ca": ["aaaaaaaaaaaaaaaa"]}') q; +ERROR: value too long for type character(10) +SELECT ca FROM jsonb_populate_record(NULL::jsbrec, '{"ca": [[1, 2, 3], {"k": "v"}]}') q; +ERROR: expected JSON array +HINT: See the array element [1] of key "ca". +SELECT js FROM jsonb_populate_record(NULL::jsbrec, '{"js": null}') q; + js +---- + +(1 row) + +SELECT js FROM jsonb_populate_record(NULL::jsbrec, '{"js": true}') q; + js +------ + true +(1 row) + +SELECT js FROM jsonb_populate_record(NULL::jsbrec, '{"js": 123.45}') q; + js +-------- + 123.45 +(1 row) + +SELECT js FROM jsonb_populate_record(NULL::jsbrec, '{"js": "123.45"}') q; + js +---------- + "123.45" +(1 row) + +SELECT js FROM jsonb_populate_record(NULL::jsbrec, '{"js": "abc"}') q; + js +------- + "abc" +(1 row) + +SELECT js FROM jsonb_populate_record(NULL::jsbrec, '{"js": [123, "123", null, {"key": "value"}]}') q; + js +-------------------------------------- + [123, "123", null, {"key": "value"}] +(1 row) + +SELECT js FROM jsonb_populate_record(NULL::jsbrec, '{"js": {"a": "bbb", "b": null, "c": 123.45}}') q; + js +-------------------------------------- + {"a": "bbb", "b": null, "c": 123.45} +(1 row) + +SELECT jsb FROM jsonb_populate_record(NULL::jsbrec, '{"jsb": null}') q; + jsb +----- + +(1 row) + +SELECT jsb FROM jsonb_populate_record(NULL::jsbrec, '{"jsb": true}') q; + jsb +------ + true +(1 row) + +SELECT jsb FROM jsonb_populate_record(NULL::jsbrec, '{"jsb": 123.45}') q; + jsb +-------- + 123.45 +(1 row) + +SELECT jsb FROM jsonb_populate_record(NULL::jsbrec, '{"jsb": "123.45"}') q; + jsb +---------- + "123.45" +(1 row) + +SELECT jsb FROM jsonb_populate_record(NULL::jsbrec, '{"jsb": "abc"}') q; + jsb +------- + "abc" +(1 row) + +SELECT jsb FROM jsonb_populate_record(NULL::jsbrec, '{"jsb": [123, "123", null, {"key": "value"}]}') q; + jsb +-------------------------------------- + [123, "123", null, {"key": "value"}] +(1 row) + +SELECT jsb FROM jsonb_populate_record(NULL::jsbrec, '{"jsb": {"a": "bbb", "b": null, "c": 123.45}}') q; + jsb +-------------------------------------- + {"a": "bbb", "b": null, "c": 123.45} +(1 row) + +SELECT jsa FROM jsonb_populate_record(NULL::jsbrec, '{"jsa": null}') q; + jsa +----- + +(1 row) + +SELECT jsa FROM jsonb_populate_record(NULL::jsbrec, '{"jsa": 123}') q; +ERROR: expected JSON array +HINT: See the value of key "jsa". +SELECT jsa FROM jsonb_populate_record(NULL::jsbrec, '{"jsa": [1, "2", null, 4]}') q; + jsa +-------------------- + {1,"\"2\"",NULL,4} +(1 row) + +SELECT jsa FROM jsonb_populate_record(NULL::jsbrec, '{"jsa": ["aaa", null, [1, 2, "3", {}], { "k" : "v" }]}') q; + jsa +------------------------------------------------------- + {"\"aaa\"",NULL,"[1, 2, \"3\", {}]","{\"k\": \"v\"}"} +(1 row) + +SELECT rec FROM jsonb_populate_record(NULL::jsbrec, '{"rec": 123}') q; +ERROR: cannot call populate_composite on a scalar +SELECT rec FROM jsonb_populate_record(NULL::jsbrec, '{"rec": [1, 2]}') q; +ERROR: cannot call populate_composite on an array +SELECT rec FROM jsonb_populate_record(NULL::jsbrec, '{"rec": {"a": "abc", "c": "01.02.2003", "x": 43.2}}') q; + rec +----------------------------------- + (abc,,"Thu Jan 02 00:00:00 2003") +(1 row) + +SELECT rec FROM jsonb_populate_record(NULL::jsbrec, '{"rec": "(abc,42,01.02.2003)"}') q; + rec +------------------------------------- + (abc,42,"Thu Jan 02 00:00:00 2003") +(1 row) + +SELECT reca FROM jsonb_populate_record(NULL::jsbrec, '{"reca": 123}') q; +ERROR: expected JSON array +HINT: See the value of key "reca". +SELECT reca FROM jsonb_populate_record(NULL::jsbrec, '{"reca": [1, 2]}') q; +ERROR: cannot call populate_composite on a scalar +SELECT reca FROM jsonb_populate_record(NULL::jsbrec, '{"reca": [{"a": "abc", "b": 456}, null, {"c": "01.02.2003", "x": 43.2}]}') q; + reca +-------------------------------------------------------- + {"(abc,456,)",NULL,"(,,\"Thu Jan 02 00:00:00 2003\")"} +(1 row) + +SELECT reca FROM jsonb_populate_record(NULL::jsbrec, '{"reca": ["(abc,42,01.02.2003)"]}') q; + reca +------------------------------------------- + {"(abc,42,\"Thu Jan 02 00:00:00 2003\")"} +(1 row) + +SELECT reca FROM jsonb_populate_record(NULL::jsbrec, '{"reca": "{\"(abc,42,01.02.2003)\"}"}') q; + reca +------------------------------------------- + {"(abc,42,\"Thu Jan 02 00:00:00 2003\")"} +(1 row) + +SELECT rec FROM jsonb_populate_record( + row(NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL, + row('x',3,'2012-12-31 15:30:56')::jbpop,NULL)::jsbrec, + '{"rec": {"a": "abc", "c": "01.02.2003", "x": 43.2}}' +) q; + rec +------------------------------------ + (abc,3,"Thu Jan 02 00:00:00 2003") +(1 row) + +-- anonymous record type +SELECT jsonb_populate_record(null::record, '{"x": 0, "y": 1}'); +ERROR: could not determine row type for result of jsonb_populate_record +HINT: Provide a non-null record argument, or call the function in the FROM clause using a column definition list. +SELECT jsonb_populate_record(row(1,2), '{"f1": 0, "f2": 1}'); + jsonb_populate_record +----------------------- + (0,1) +(1 row) + +SELECT * FROM + jsonb_populate_record(null::record, '{"x": 776}') AS (x int, y int); + x | y +-----+--- + 776 | +(1 row) + +-- composite domain +SELECT jsonb_populate_record(null::jb_ordered_pair, '{"x": 0, "y": 1}'); + jsonb_populate_record +----------------------- + (0,1) +(1 row) + +SELECT jsonb_populate_record(row(1,2)::jb_ordered_pair, '{"x": 0}'); + jsonb_populate_record +----------------------- + (0,2) +(1 row) + +SELECT jsonb_populate_record(row(1,2)::jb_ordered_pair, '{"x": 1, "y": 0}'); +ERROR: value for domain jb_ordered_pair violates check constraint "jb_ordered_pair_check" +-- populate_recordset +SELECT * FROM jsonb_populate_recordset(NULL::jbpop,'[{"a":"blurfl","x":43.2},{"b":3,"c":"2012-01-20 10:42:53"}]') q; + a | b | c +--------+---+-------------------------- + blurfl | | + | 3 | Fri Jan 20 10:42:53 2012 +(2 rows) + +SELECT * FROM jsonb_populate_recordset(row('def',99,NULL)::jbpop,'[{"a":"blurfl","x":43.2},{"b":3,"c":"2012-01-20 10:42:53"}]') q; + a | b | c +--------+----+-------------------------- + blurfl | 99 | + def | 3 | Fri Jan 20 10:42:53 2012 +(2 rows) + +SELECT * FROM jsonb_populate_recordset(NULL::jbpop,'[{"a":"blurfl","x":43.2},{"b":3,"c":"2012-01-20 10:42:53"}]') q; + a | b | c +--------+---+-------------------------- + blurfl | | + | 3 | Fri Jan 20 10:42:53 2012 +(2 rows) + +SELECT * FROM jsonb_populate_recordset(row('def',99,NULL)::jbpop,'[{"a":"blurfl","x":43.2},{"b":3,"c":"2012-01-20 10:42:53"}]') q; + a | b | c +--------+----+-------------------------- + blurfl | 99 | + def | 3 | Fri Jan 20 10:42:53 2012 +(2 rows) + +SELECT * FROM jsonb_populate_recordset(row('def',99,NULL)::jbpop,'[{"a":[100,200,300],"x":43.2},{"a":{"z":true},"b":3,"c":"2012-01-20 10:42:53"}]') q; + a | b | c +-----------------+----+-------------------------- + [100, 200, 300] | 99 | + {"z": true} | 3 | Fri Jan 20 10:42:53 2012 +(2 rows) + +SELECT * FROM jsonb_populate_recordset(row('def',99,NULL)::jbpop,'[{"c":[100,200,300],"x":43.2},{"a":{"z":true},"b":3,"c":"2012-01-20 10:42:53"}]') q; +ERROR: invalid input syntax for type timestamp: "[100, 200, 300]" +SELECT * FROM jsonb_populate_recordset(NULL::jbpop,'[{"a":"blurfl","x":43.2},{"b":3,"c":"2012-01-20 10:42:53"}]') q; + a | b | c +--------+---+-------------------------- + blurfl | | + | 3 | Fri Jan 20 10:42:53 2012 +(2 rows) + +SELECT * FROM jsonb_populate_recordset(row('def',99,NULL)::jbpop,'[{"a":"blurfl","x":43.2},{"b":3,"c":"2012-01-20 10:42:53"}]') q; + a | b | c +--------+----+-------------------------- + blurfl | 99 | + def | 3 | Fri Jan 20 10:42:53 2012 +(2 rows) + +SELECT * FROM jsonb_populate_recordset(row('def',99,NULL)::jbpop,'[{"a":[100,200,300],"x":43.2},{"a":{"z":true},"b":3,"c":"2012-01-20 10:42:53"}]') q; + a | b | c +-----------------+----+-------------------------- + [100, 200, 300] | 99 | + {"z": true} | 3 | Fri Jan 20 10:42:53 2012 +(2 rows) + +-- anonymous record type +SELECT jsonb_populate_recordset(null::record, '[{"x": 0, "y": 1}]'); +ERROR: could not determine row type for result of jsonb_populate_recordset +HINT: Provide a non-null record argument, or call the function in the FROM clause using a column definition list. +SELECT jsonb_populate_recordset(row(1,2), '[{"f1": 0, "f2": 1}]'); + jsonb_populate_recordset +-------------------------- + (0,1) +(1 row) + +SELECT i, jsonb_populate_recordset(row(i,50), '[{"f1":"42"},{"f2":"43"}]') +FROM (VALUES (1),(2)) v(i); + i | jsonb_populate_recordset +---+-------------------------- + 1 | (42,50) + 1 | (1,43) + 2 | (42,50) + 2 | (2,43) +(4 rows) + +SELECT * FROM + jsonb_populate_recordset(null::record, '[{"x": 776}]') AS (x int, y int); + x | y +-----+--- + 776 | +(1 row) + +-- empty array is a corner case +SELECT jsonb_populate_recordset(null::record, '[]'); +ERROR: could not determine row type for result of jsonb_populate_recordset +HINT: Provide a non-null record argument, or call the function in the FROM clause using a column definition list. +SELECT jsonb_populate_recordset(row(1,2), '[]'); + jsonb_populate_recordset +-------------------------- +(0 rows) + +SELECT * FROM jsonb_populate_recordset(NULL::jbpop,'[]') q; + a | b | c +---+---+--- +(0 rows) + +SELECT * FROM + jsonb_populate_recordset(null::record, '[]') AS (x int, y int); + x | y +---+--- +(0 rows) + +-- composite domain +SELECT jsonb_populate_recordset(null::jb_ordered_pair, '[{"x": 0, "y": 1}]'); + jsonb_populate_recordset +-------------------------- + (0,1) +(1 row) + +SELECT jsonb_populate_recordset(row(1,2)::jb_ordered_pair, '[{"x": 0}, {"y": 3}]'); + jsonb_populate_recordset +-------------------------- + (0,2) + (1,3) +(2 rows) + +SELECT jsonb_populate_recordset(row(1,2)::jb_ordered_pair, '[{"x": 1, "y": 0}]'); +ERROR: value for domain jb_ordered_pair violates check constraint "jb_ordered_pair_check" +-- negative cases where the wrong record type is supplied +select * from jsonb_populate_recordset(row(0::int),'[{"a":"1","b":"2"},{"a":"3"}]') q (a text, b text); +ERROR: function return row and query-specified return row do not match +DETAIL: Returned row contains 1 attribute, but query expects 2. +select * from jsonb_populate_recordset(row(0::int,0::int),'[{"a":"1","b":"2"},{"a":"3"}]') q (a text, b text); +ERROR: function return row and query-specified return row do not match +DETAIL: Returned type integer at ordinal position 1, but query expects text. +select * from jsonb_populate_recordset(row(0::int,0::int,0::int),'[{"a":"1","b":"2"},{"a":"3"}]') q (a text, b text); +ERROR: function return row and query-specified return row do not match +DETAIL: Returned row contains 3 attributes, but query expects 2. +select * from jsonb_populate_recordset(row(1000000000::int,50::int),'[{"b":"2"},{"a":"3"}]') q (a text, b text); +ERROR: function return row and query-specified return row do not match +DETAIL: Returned type integer at ordinal position 1, but query expects text. +-- jsonb_to_record and jsonb_to_recordset +select * from jsonb_to_record('{"a":1,"b":"foo","c":"bar"}') + as x(a int, b text, d text); + a | b | d +---+-----+--- + 1 | foo | +(1 row) + +select * from jsonb_to_recordset('[{"a":1,"b":"foo","d":false},{"a":2,"b":"bar","c":true}]') + as x(a int, b text, c boolean); + a | b | c +---+-----+--- + 1 | foo | + 2 | bar | t +(2 rows) + +select *, c is null as c_is_null +from jsonb_to_record('{"a":1, "b":{"c":16, "d":2}, "x":8, "ca": ["1 2", 3], "ia": [[1,2],[3,4]], "r": {"a": "aaa", "b": 123}}'::jsonb) + as t(a int, b jsonb, c text, x int, ca char(5)[], ia int[][], r jbpop); + a | b | c | x | ca | ia | r | c_is_null +---+-------------------+---+---+-------------------+---------------+------------+----------- + 1 | {"c": 16, "d": 2} | | 8 | {"1 2 ","3 "} | {{1,2},{3,4}} | (aaa,123,) | t +(1 row) + +select *, c is null as c_is_null +from jsonb_to_recordset('[{"a":1, "b":{"c":16, "d":2}, "x":8}]'::jsonb) + as t(a int, b jsonb, c text, x int); + a | b | c | x | c_is_null +---+-------------------+---+---+----------- + 1 | {"c": 16, "d": 2} | | 8 | t +(1 row) + +select * from jsonb_to_record('{"ia": null}') as x(ia _int4); + ia +---- + +(1 row) + +select * from jsonb_to_record('{"ia": 123}') as x(ia _int4); +ERROR: expected JSON array +HINT: See the value of key "ia". +select * from jsonb_to_record('{"ia": [1, "2", null, 4]}') as x(ia _int4); + ia +-------------- + {1,2,NULL,4} +(1 row) + +select * from jsonb_to_record('{"ia": [[1, 2], [3, 4]]}') as x(ia _int4); + ia +--------------- + {{1,2},{3,4}} +(1 row) + +select * from jsonb_to_record('{"ia": [[1], 2]}') as x(ia _int4); +ERROR: expected JSON array +HINT: See the array element [1] of key "ia". +select * from jsonb_to_record('{"ia": [[1], [2, 3]]}') as x(ia _int4); +ERROR: malformed JSON array +DETAIL: Multidimensional arrays must have sub-arrays with matching dimensions. +select * from jsonb_to_record('{"ia2": [1, 2, 3]}') as x(ia2 int[][]); + ia2 +--------- + {1,2,3} +(1 row) + +select * from jsonb_to_record('{"ia2": [[1, 2], [3, 4]]}') as x(ia2 int4[][]); + ia2 +--------------- + {{1,2},{3,4}} +(1 row) + +select * from jsonb_to_record('{"ia2": [[[1], [2], [3]]]}') as x(ia2 int4[][]); + ia2 +----------------- + {{{1},{2},{3}}} +(1 row) + +select * from jsonb_to_record('{"out": {"key": 1}}') as x(out json); + out +------------ + {"key": 1} +(1 row) + +select * from jsonb_to_record('{"out": [{"key": 1}]}') as x(out json); + out +-------------- + [{"key": 1}] +(1 row) + +select * from jsonb_to_record('{"out": "{\"key\": 1}"}') as x(out json); + out +---------------- + "{\"key\": 1}" +(1 row) + +select * from jsonb_to_record('{"out": {"key": 1}}') as x(out jsonb); + out +------------ + {"key": 1} +(1 row) + +select * from jsonb_to_record('{"out": [{"key": 1}]}') as x(out jsonb); + out +-------------- + [{"key": 1}] +(1 row) + +select * from jsonb_to_record('{"out": "{\"key\": 1}"}') as x(out jsonb); + out +---------------- + "{\"key\": 1}" +(1 row) + +-- test type info caching in jsonb_populate_record() +CREATE TEMP TABLE jsbpoptest (js jsonb); +INSERT INTO jsbpoptest +SELECT '{ + "jsa": [1, "2", null, 4], + "rec": {"a": "abc", "c": "01.02.2003", "x": 43.2}, + "reca": [{"a": "abc", "b": 456}, null, {"c": "01.02.2003", "x": 43.2}] +}'::jsonb +FROM generate_series(1, 3); +SELECT (jsonb_populate_record(NULL::jsbrec, js)).* FROM jsbpoptest; + i | ia | ia1 | ia2 | ia3 | ia1d | ia2d | t | ta | c | ca | ts | js | jsb | jsa | rec | reca +---+----+-----+-----+-----+------+------+---+----+---+----+----+----+-----+--------------------+-----------------------------------+-------------------------------------------------------- + | | | | | | | | | | | | | | {1,"\"2\"",NULL,4} | (abc,,"Thu Jan 02 00:00:00 2003") | {"(abc,456,)",NULL,"(,,\"Thu Jan 02 00:00:00 2003\")"} + | | | | | | | | | | | | | | {1,"\"2\"",NULL,4} | (abc,,"Thu Jan 02 00:00:00 2003") | {"(abc,456,)",NULL,"(,,\"Thu Jan 02 00:00:00 2003\")"} + | | | | | | | | | | | | | | {1,"\"2\"",NULL,4} | (abc,,"Thu Jan 02 00:00:00 2003") | {"(abc,456,)",NULL,"(,,\"Thu Jan 02 00:00:00 2003\")"} +(3 rows) + +DROP TYPE jsbrec; +DROP TYPE jsbrec_i_not_null; +DROP DOMAIN jsb_int_not_null; +DROP DOMAIN jsb_int_array_1d; +DROP DOMAIN jsb_int_array_2d; +DROP DOMAIN jb_ordered_pair; +DROP TYPE jb_unordered_pair; +-- indexing +SELECT count(*) FROM testjsonb WHERE j @> '{"wait":null}'; + count +------- + 1 +(1 row) + +SELECT count(*) FROM testjsonb WHERE j @> '{"wait":"CC"}'; + count +------- + 15 +(1 row) + +SELECT count(*) FROM testjsonb WHERE j @> '{"wait":"CC", "public":true}'; + count +------- + 2 +(1 row) + +SELECT count(*) FROM testjsonb WHERE j @> '{"age":25}'; + count +------- + 2 +(1 row) + +SELECT count(*) FROM testjsonb WHERE j @> '{"age":25.0}'; + count +------- + 2 +(1 row) + +SELECT count(*) FROM testjsonb WHERE j ? 'public'; + count +------- + 194 +(1 row) + +SELECT count(*) FROM testjsonb WHERE j ? 'bar'; + count +------- + 0 +(1 row) + +SELECT count(*) FROM testjsonb WHERE j ?| ARRAY['public','disabled']; + count +------- + 337 +(1 row) + +SELECT count(*) FROM testjsonb WHERE j ?& ARRAY['public','disabled']; + count +------- + 42 +(1 row) + +SELECT count(*) FROM testjsonb WHERE j @@ '$.wait == null'; + count +------- + 1 +(1 row) + +SELECT count(*) FROM testjsonb WHERE j @@ '"CC" == $.wait'; + count +------- + 15 +(1 row) + +SELECT count(*) FROM testjsonb WHERE j @@ '$.wait == "CC" && true == $.public'; + count +------- + 2 +(1 row) + +SELECT count(*) FROM testjsonb WHERE j @@ '$.age == 25'; + count +------- + 2 +(1 row) + +SELECT count(*) FROM testjsonb WHERE j @@ '$.age == 25.0'; + count +------- + 2 +(1 row) + +SELECT count(*) FROM testjsonb WHERE j @@ 'exists($)'; + count +------- + 1012 +(1 row) + +SELECT count(*) FROM testjsonb WHERE j @@ 'exists($.public)'; + count +------- + 194 +(1 row) + +SELECT count(*) FROM testjsonb WHERE j @@ 'exists($.bar)'; + count +------- + 0 +(1 row) + +SELECT count(*) FROM testjsonb WHERE j @@ 'exists($.public) || exists($.disabled)'; + count +------- + 337 +(1 row) + +SELECT count(*) FROM testjsonb WHERE j @@ 'exists($.public) && exists($.disabled)'; + count +------- + 42 +(1 row) + +SELECT count(*) FROM testjsonb WHERE j @? '$.wait ? (@ == null)'; + count +------- + 1 +(1 row) + +SELECT count(*) FROM testjsonb WHERE j @? '$.wait ? ("CC" == @)'; + count +------- + 15 +(1 row) + +SELECT count(*) FROM testjsonb WHERE j @? '$ ? (@.wait == "CC" && true == @.public)'; + count +------- + 2 +(1 row) + +SELECT count(*) FROM testjsonb WHERE j @? '$.age ? (@ == 25)'; + count +------- + 2 +(1 row) + +SELECT count(*) FROM testjsonb WHERE j @? '$ ? (@.age == 25.0)'; + count +------- + 2 +(1 row) + +SELECT count(*) FROM testjsonb WHERE j @? '$'; + count +------- + 1012 +(1 row) + +SELECT count(*) FROM testjsonb WHERE j @? '$.public'; + count +------- + 194 +(1 row) + +SELECT count(*) FROM testjsonb WHERE j @? '$.bar'; + count +------- + 0 +(1 row) + +CREATE INDEX jidx ON testjsonb USING gin (j); +SET enable_seqscan = off; +SELECT count(*) FROM testjsonb WHERE j @> '{"wait":null}'; + count +------- + 1 +(1 row) + +SELECT count(*) FROM testjsonb WHERE j @> '{"wait":"CC"}'; + count +------- + 15 +(1 row) + +SELECT count(*) FROM testjsonb WHERE j @> '{"wait":"CC", "public":true}'; + count +------- + 2 +(1 row) + +SELECT count(*) FROM testjsonb WHERE j @> '{"age":25}'; + count +------- + 2 +(1 row) + +SELECT count(*) FROM testjsonb WHERE j @> '{"age":25.0}'; + count +------- + 2 +(1 row) + +SELECT count(*) FROM testjsonb WHERE j @> '{"array":["foo"]}'; + count +------- + 3 +(1 row) + +SELECT count(*) FROM testjsonb WHERE j @> '{"array":["bar"]}'; + count +------- + 3 +(1 row) + +-- exercise GIN_SEARCH_MODE_ALL +SELECT count(*) FROM testjsonb WHERE j @> '{}'; + count +------- + 1012 +(1 row) + +SELECT count(*) FROM testjsonb WHERE j ? 'public'; + count +------- + 194 +(1 row) + +SELECT count(*) FROM testjsonb WHERE j ? 'bar'; + count +------- + 0 +(1 row) + +SELECT count(*) FROM testjsonb WHERE j ?| ARRAY['public','disabled']; + count +------- + 337 +(1 row) + +SELECT count(*) FROM testjsonb WHERE j ?& ARRAY['public','disabled']; + count +------- + 42 +(1 row) + +EXPLAIN (COSTS OFF) +SELECT count(*) FROM testjsonb WHERE j @@ '$.wait == null'; + QUERY PLAN +----------------------------------------------------------------- + Aggregate + -> Bitmap Heap Scan on testjsonb + Recheck Cond: (j @@ '($."wait" == null)'::jsonpath) + -> Bitmap Index Scan on jidx + Index Cond: (j @@ '($."wait" == null)'::jsonpath) +(5 rows) + +SELECT count(*) FROM testjsonb WHERE j @@ '$.wait == null'; + count +------- + 1 +(1 row) + +SELECT count(*) FROM testjsonb WHERE j @@ 'exists($ ? (@.wait == null))'; + count +------- + 1 +(1 row) + +SELECT count(*) FROM testjsonb WHERE j @@ 'exists($.wait ? (@ == null))'; + count +------- + 1 +(1 row) + +SELECT count(*) FROM testjsonb WHERE j @@ '"CC" == $.wait'; + count +------- + 15 +(1 row) + +SELECT count(*) FROM testjsonb WHERE j @@ '$.wait == "CC" && true == $.public'; + count +------- + 2 +(1 row) + +SELECT count(*) FROM testjsonb WHERE j @@ '$.age == 25'; + count +------- + 2 +(1 row) + +SELECT count(*) FROM testjsonb WHERE j @@ '$.age == 25.0'; + count +------- + 2 +(1 row) + +SELECT count(*) FROM testjsonb WHERE j @@ '$.array[*] == "foo"'; + count +------- + 3 +(1 row) + +SELECT count(*) FROM testjsonb WHERE j @@ '$.array[*] == "bar"'; + count +------- + 3 +(1 row) + +SELECT count(*) FROM testjsonb WHERE j @@ 'exists($ ? (@.array[*] == "bar"))'; + count +------- + 3 +(1 row) + +SELECT count(*) FROM testjsonb WHERE j @@ 'exists($.array ? (@[*] == "bar"))'; + count +------- + 3 +(1 row) + +SELECT count(*) FROM testjsonb WHERE j @@ 'exists($.array[*] ? (@ == "bar"))'; + count +------- + 3 +(1 row) + +SELECT count(*) FROM testjsonb WHERE j @@ 'exists($)'; + count +------- + 1012 +(1 row) + +SELECT count(*) FROM testjsonb WHERE j @@ 'exists($.public)'; + count +------- + 194 +(1 row) + +SELECT count(*) FROM testjsonb WHERE j @@ 'exists($.bar)'; + count +------- + 0 +(1 row) + +SELECT count(*) FROM testjsonb WHERE j @@ 'exists($.public) || exists($.disabled)'; + count +------- + 337 +(1 row) + +SELECT count(*) FROM testjsonb WHERE j @@ 'exists($.public) && exists($.disabled)'; + count +------- + 42 +(1 row) + +EXPLAIN (COSTS OFF) +SELECT count(*) FROM testjsonb WHERE j @? '$.wait ? (@ == null)'; + QUERY PLAN +------------------------------------------------------------------- + Aggregate + -> Bitmap Heap Scan on testjsonb + Recheck Cond: (j @? '$."wait"?(@ == null)'::jsonpath) + -> Bitmap Index Scan on jidx + Index Cond: (j @? '$."wait"?(@ == null)'::jsonpath) +(5 rows) + +SELECT count(*) FROM testjsonb WHERE j @? '$.wait ? (@ == null)'; + count +------- + 1 +(1 row) + +SELECT count(*) FROM testjsonb WHERE j @? '$.wait ? ("CC" == @)'; + count +------- + 15 +(1 row) + +SELECT count(*) FROM testjsonb WHERE j @? '$ ? (@.wait == "CC" && true == @.public)'; + count +------- + 2 +(1 row) + +SELECT count(*) FROM testjsonb WHERE j @? '$.age ? (@ == 25)'; + count +------- + 2 +(1 row) + +SELECT count(*) FROM testjsonb WHERE j @? '$ ? (@.age == 25.0)'; + count +------- + 2 +(1 row) + +SELECT count(*) FROM testjsonb WHERE j @? '$ ? (@.array[*] == "bar")'; + count +------- + 3 +(1 row) + +SELECT count(*) FROM testjsonb WHERE j @? '$.array ? (@[*] == "bar")'; + count +------- + 3 +(1 row) + +SELECT count(*) FROM testjsonb WHERE j @? '$.array[*] ? (@ == "bar")'; + count +------- + 3 +(1 row) + +SELECT count(*) FROM testjsonb WHERE j @? '$'; + count +------- + 1012 +(1 row) + +SELECT count(*) FROM testjsonb WHERE j @? '$.public'; + count +------- + 194 +(1 row) + +SELECT count(*) FROM testjsonb WHERE j @? '$.bar'; + count +------- + 0 +(1 row) + +-- array exists - array elements should behave as keys (for GIN index scans too) +CREATE INDEX jidx_array ON testjsonb USING gin((j->'array')); +SELECT count(*) from testjsonb WHERE j->'array' ? 'bar'; + count +------- + 3 +(1 row) + +-- type sensitive array exists - should return no rows (since "exists" only +-- matches strings that are either object keys or array elements) +SELECT count(*) from testjsonb WHERE j->'array' ? '5'::text; + count +------- + 0 +(1 row) + +-- However, a raw scalar is *contained* within the array +SELECT count(*) from testjsonb WHERE j->'array' @> '5'::jsonb; + count +------- + 1 +(1 row) + +RESET enable_seqscan; +SELECT count(*) FROM (SELECT (jsonb_each(j)).key FROM testjsonb) AS wow; + count +------- + 4791 +(1 row) + +SELECT key, count(*) FROM (SELECT (jsonb_each(j)).key FROM testjsonb) AS wow GROUP BY key ORDER BY count DESC, key; + key | count +-----------+------- + line | 884 + query | 207 + pos | 203 + node | 202 + space | 197 + status | 195 + public | 194 + title | 190 + wait | 190 + org | 189 + user | 189 + coauthors | 188 + disabled | 185 + indexed | 184 + cleaned | 180 + bad | 179 + date | 179 + world | 176 + state | 172 + subtitle | 169 + auth | 168 + abstract | 161 + array | 5 + age | 2 + foo | 2 + fool | 1 +(26 rows) + +-- sort/hash +SELECT count(distinct j) FROM testjsonb; + count +------- + 894 +(1 row) + +SET enable_hashagg = off; +SELECT count(*) FROM (SELECT j FROM (SELECT * FROM testjsonb UNION ALL SELECT * FROM testjsonb) js GROUP BY j) js2; + count +------- + 894 +(1 row) + +SET enable_hashagg = on; +SET enable_sort = off; +SELECT count(*) FROM (SELECT j FROM (SELECT * FROM testjsonb UNION ALL SELECT * FROM testjsonb) js GROUP BY j) js2; + count +------- + 894 +(1 row) + +SELECT distinct * FROM (values (jsonb '{}' || ''::text),('{}')) v(j); + j +---- + {} +(1 row) + +SET enable_sort = on; +RESET enable_hashagg; +RESET enable_sort; +DROP INDEX jidx; +DROP INDEX jidx_array; +-- btree +CREATE INDEX jidx ON testjsonb USING btree (j); +SET enable_seqscan = off; +SELECT count(*) FROM testjsonb WHERE j > '{"p":1}'; + count +------- + 884 +(1 row) + +SELECT count(*) FROM testjsonb WHERE j = '{"pos":98, "line":371, "node":"CBA", "indexed":true}'; + count +------- + 1 +(1 row) + +--gin path opclass +DROP INDEX jidx; +CREATE INDEX jidx ON testjsonb USING gin (j jsonb_path_ops); +SET enable_seqscan = off; +SELECT count(*) FROM testjsonb WHERE j @> '{"wait":null}'; + count +------- + 1 +(1 row) + +SELECT count(*) FROM testjsonb WHERE j @> '{"wait":"CC"}'; + count +------- + 15 +(1 row) + +SELECT count(*) FROM testjsonb WHERE j @> '{"wait":"CC", "public":true}'; + count +------- + 2 +(1 row) + +SELECT count(*) FROM testjsonb WHERE j @> '{"age":25}'; + count +------- + 2 +(1 row) + +SELECT count(*) FROM testjsonb WHERE j @> '{"age":25.0}'; + count +------- + 2 +(1 row) + +-- exercise GIN_SEARCH_MODE_ALL +SELECT count(*) FROM testjsonb WHERE j @> '{}'; + count +------- + 1012 +(1 row) + +SELECT count(*) FROM testjsonb WHERE j @@ '$.wait == null'; + count +------- + 1 +(1 row) + +SELECT count(*) FROM testjsonb WHERE j @@ 'exists($ ? (@.wait == null))'; + count +------- + 1 +(1 row) + +SELECT count(*) FROM testjsonb WHERE j @@ 'exists($.wait ? (@ == null))'; + count +------- + 1 +(1 row) + +SELECT count(*) FROM testjsonb WHERE j @@ '"CC" == $.wait'; + count +------- + 15 +(1 row) + +SELECT count(*) FROM testjsonb WHERE j @@ '$.wait == "CC" && true == $.public'; + count +------- + 2 +(1 row) + +SELECT count(*) FROM testjsonb WHERE j @@ '$.age == 25'; + count +------- + 2 +(1 row) + +SELECT count(*) FROM testjsonb WHERE j @@ '$.age == 25.0'; + count +------- + 2 +(1 row) + +SELECT count(*) FROM testjsonb WHERE j @@ '$.array[*] == "foo"'; + count +------- + 3 +(1 row) + +SELECT count(*) FROM testjsonb WHERE j @@ '$.array[*] == "bar"'; + count +------- + 3 +(1 row) + +SELECT count(*) FROM testjsonb WHERE j @@ 'exists($ ? (@.array[*] == "bar"))'; + count +------- + 3 +(1 row) + +SELECT count(*) FROM testjsonb WHERE j @@ 'exists($.array ? (@[*] == "bar"))'; + count +------- + 3 +(1 row) + +SELECT count(*) FROM testjsonb WHERE j @@ 'exists($.array[*] ? (@ == "bar"))'; + count +------- + 3 +(1 row) + +SELECT count(*) FROM testjsonb WHERE j @@ 'exists($)'; + count +------- + 1012 +(1 row) + +EXPLAIN (COSTS OFF) +SELECT count(*) FROM testjsonb WHERE j @? '$.wait ? (@ == null)'; + QUERY PLAN +------------------------------------------------------------------- + Aggregate + -> Bitmap Heap Scan on testjsonb + Recheck Cond: (j @? '$."wait"?(@ == null)'::jsonpath) + -> Bitmap Index Scan on jidx + Index Cond: (j @? '$."wait"?(@ == null)'::jsonpath) +(5 rows) + +SELECT count(*) FROM testjsonb WHERE j @? '$.wait ? (@ == null)'; + count +------- + 1 +(1 row) + +SELECT count(*) FROM testjsonb WHERE j @? '$.wait ? ("CC" == @)'; + count +------- + 15 +(1 row) + +SELECT count(*) FROM testjsonb WHERE j @? '$ ? (@.wait == "CC" && true == @.public)'; + count +------- + 2 +(1 row) + +SELECT count(*) FROM testjsonb WHERE j @? '$.age ? (@ == 25)'; + count +------- + 2 +(1 row) + +SELECT count(*) FROM testjsonb WHERE j @? '$ ? (@.age == 25.0)'; + count +------- + 2 +(1 row) + +SELECT count(*) FROM testjsonb WHERE j @? '$ ? (@.array[*] == "bar")'; + count +------- + 3 +(1 row) + +SELECT count(*) FROM testjsonb WHERE j @? '$.array ? (@[*] == "bar")'; + count +------- + 3 +(1 row) + +SELECT count(*) FROM testjsonb WHERE j @? '$.array[*] ? (@ == "bar")'; + count +------- + 3 +(1 row) + +SELECT count(*) FROM testjsonb WHERE j @? '$'; + count +------- + 1012 +(1 row) + +SELECT count(*) FROM testjsonb WHERE j @? '$.public'; + count +------- + 194 +(1 row) + +SELECT count(*) FROM testjsonb WHERE j @? '$.bar'; + count +------- + 0 +(1 row) + +RESET enable_seqscan; +DROP INDEX jidx; +-- nested tests +SELECT '{"ff":{"a":12,"b":16}}'::jsonb; + jsonb +---------------------------- + {"ff": {"a": 12, "b": 16}} +(1 row) + +SELECT '{"ff":{"a":12,"b":16},"qq":123}'::jsonb; + jsonb +--------------------------------------- + {"ff": {"a": 12, "b": 16}, "qq": 123} +(1 row) + +SELECT '{"aa":["a","aaa"],"qq":{"a":12,"b":16,"c":["c1","c2"],"d":{"d1":"d1","d2":"d2","d1":"d3"}}}'::jsonb; + jsonb +-------------------------------------------------------------------------------------------------- + {"aa": ["a", "aaa"], "qq": {"a": 12, "b": 16, "c": ["c1", "c2"], "d": {"d1": "d3", "d2": "d2"}}} +(1 row) + +SELECT '{"aa":["a","aaa"],"qq":{"a":"12","b":"16","c":["c1","c2"],"d":{"d1":"d1","d2":"d2"}}}'::jsonb; + jsonb +------------------------------------------------------------------------------------------------------ + {"aa": ["a", "aaa"], "qq": {"a": "12", "b": "16", "c": ["c1", "c2"], "d": {"d1": "d1", "d2": "d2"}}} +(1 row) + +SELECT '{"aa":["a","aaa"],"qq":{"a":"12","b":"16","c":["c1","c2",["c3"],{"c4":4}],"d":{"d1":"d1","d2":"d2"}}}'::jsonb; + jsonb +------------------------------------------------------------------------------------------------------------------------- + {"aa": ["a", "aaa"], "qq": {"a": "12", "b": "16", "c": ["c1", "c2", ["c3"], {"c4": 4}], "d": {"d1": "d1", "d2": "d2"}}} +(1 row) + +SELECT '{"ff":["a","aaa"]}'::jsonb; + jsonb +---------------------- + {"ff": ["a", "aaa"]} +(1 row) + +SELECT + '{"ff":{"a":12,"b":16},"qq":123,"x":[1,2],"Y":null}'::jsonb -> 'ff', + '{"ff":{"a":12,"b":16},"qq":123,"x":[1,2],"Y":null}'::jsonb -> 'qq', + ('{"ff":{"a":12,"b":16},"qq":123,"x":[1,2],"Y":null}'::jsonb -> 'Y') IS NULL AS f, + ('{"ff":{"a":12,"b":16},"qq":123,"x":[1,2],"Y":null}'::jsonb ->> 'Y') IS NULL AS t, + '{"ff":{"a":12,"b":16},"qq":123,"x":[1,2],"Y":null}'::jsonb -> 'x'; + ?column? | ?column? | f | t | ?column? +--------------------+----------+---+---+---------- + {"a": 12, "b": 16} | 123 | f | t | [1, 2] +(1 row) + +-- nested containment +SELECT '{"a":[1,2],"c":"b"}'::jsonb @> '{"a":[1,2]}'; + ?column? +---------- + t +(1 row) + +SELECT '{"a":[2,1],"c":"b"}'::jsonb @> '{"a":[1,2]}'; + ?column? +---------- + t +(1 row) + +SELECT '{"a":{"1":2},"c":"b"}'::jsonb @> '{"a":[1,2]}'; + ?column? +---------- + f +(1 row) + +SELECT '{"a":{"2":1},"c":"b"}'::jsonb @> '{"a":[1,2]}'; + ?column? +---------- + f +(1 row) + +SELECT '{"a":{"1":2},"c":"b"}'::jsonb @> '{"a":{"1":2}}'; + ?column? +---------- + t +(1 row) + +SELECT '{"a":{"2":1},"c":"b"}'::jsonb @> '{"a":{"1":2}}'; + ?column? +---------- + f +(1 row) + +SELECT '["a","b"]'::jsonb @> '["a","b","c","b"]'; + ?column? +---------- + f +(1 row) + +SELECT '["a","b","c","b"]'::jsonb @> '["a","b"]'; + ?column? +---------- + t +(1 row) + +SELECT '["a","b","c",[1,2]]'::jsonb @> '["a",[1,2]]'; + ?column? +---------- + t +(1 row) + +SELECT '["a","b","c",[1,2]]'::jsonb @> '["b",[1,2]]'; + ?column? +---------- + t +(1 row) + +SELECT '{"a":[1,2],"c":"b"}'::jsonb @> '{"a":[1]}'; + ?column? +---------- + t +(1 row) + +SELECT '{"a":[1,2],"c":"b"}'::jsonb @> '{"a":[2]}'; + ?column? +---------- + t +(1 row) + +SELECT '{"a":[1,2],"c":"b"}'::jsonb @> '{"a":[3]}'; + ?column? +---------- + f +(1 row) + +SELECT '{"a":[1,2,{"c":3,"x":4}],"c":"b"}'::jsonb @> '{"a":[{"c":3}]}'; + ?column? +---------- + t +(1 row) + +SELECT '{"a":[1,2,{"c":3,"x":4}],"c":"b"}'::jsonb @> '{"a":[{"x":4}]}'; + ?column? +---------- + t +(1 row) + +SELECT '{"a":[1,2,{"c":3,"x":4}],"c":"b"}'::jsonb @> '{"a":[{"x":4},3]}'; + ?column? +---------- + f +(1 row) + +SELECT '{"a":[1,2,{"c":3,"x":4}],"c":"b"}'::jsonb @> '{"a":[{"x":4},1]}'; + ?column? +---------- + t +(1 row) + +-- check some corner cases for indexed nested containment (bug #13756) +create temp table nestjsonb (j jsonb); +insert into nestjsonb (j) values ('{"a":[["b",{"x":1}],["b",{"x":2}]],"c":3}'); +insert into nestjsonb (j) values ('[[14,2,3]]'); +insert into nestjsonb (j) values ('[1,[14,2,3]]'); +create index on nestjsonb using gin(j jsonb_path_ops); +set enable_seqscan = on; +set enable_bitmapscan = off; +select * from nestjsonb where j @> '{"a":[[{"x":2}]]}'::jsonb; + j +--------------------------------------------------- + {"a": [["b", {"x": 1}], ["b", {"x": 2}]], "c": 3} +(1 row) + +select * from nestjsonb where j @> '{"c":3}'; + j +--------------------------------------------------- + {"a": [["b", {"x": 1}], ["b", {"x": 2}]], "c": 3} +(1 row) + +select * from nestjsonb where j @> '[[14]]'; + j +----------------- + [[14, 2, 3]] + [1, [14, 2, 3]] +(2 rows) + +set enable_seqscan = off; +set enable_bitmapscan = on; +select * from nestjsonb where j @> '{"a":[[{"x":2}]]}'::jsonb; + j +--------------------------------------------------- + {"a": [["b", {"x": 1}], ["b", {"x": 2}]], "c": 3} +(1 row) + +select * from nestjsonb where j @> '{"c":3}'; + j +--------------------------------------------------- + {"a": [["b", {"x": 1}], ["b", {"x": 2}]], "c": 3} +(1 row) + +select * from nestjsonb where j @> '[[14]]'; + j +----------------- + [[14, 2, 3]] + [1, [14, 2, 3]] +(2 rows) + +reset enable_seqscan; +reset enable_bitmapscan; +-- nested object field / array index lookup +SELECT '{"n":null,"a":1,"b":[1,2],"c":{"1":2},"d":{"1":[2,3]}}'::jsonb -> 'n'; + ?column? +---------- + null +(1 row) + +SELECT '{"n":null,"a":1,"b":[1,2],"c":{"1":2},"d":{"1":[2,3]}}'::jsonb -> 'a'; + ?column? +---------- + 1 +(1 row) + +SELECT '{"n":null,"a":1,"b":[1,2],"c":{"1":2},"d":{"1":[2,3]}}'::jsonb -> 'b'; + ?column? +---------- + [1, 2] +(1 row) + +SELECT '{"n":null,"a":1,"b":[1,2],"c":{"1":2},"d":{"1":[2,3]}}'::jsonb -> 'c'; + ?column? +---------- + {"1": 2} +(1 row) + +SELECT '{"n":null,"a":1,"b":[1,2],"c":{"1":2},"d":{"1":[2,3]}}'::jsonb -> 'd'; + ?column? +--------------- + {"1": [2, 3]} +(1 row) + +SELECT '{"n":null,"a":1,"b":[1,2],"c":{"1":2},"d":{"1":[2,3]}}'::jsonb -> 'd' -> '1'; + ?column? +---------- + [2, 3] +(1 row) + +SELECT '{"n":null,"a":1,"b":[1,2],"c":{"1":2},"d":{"1":[2,3]}}'::jsonb -> 'e'; + ?column? +---------- + +(1 row) + +SELECT '{"n":null,"a":1,"b":[1,2],"c":{"1":2},"d":{"1":[2,3]}}'::jsonb -> 0; --expecting error + ?column? +---------- + +(1 row) + +SELECT '["a","b","c",[1,2],null]'::jsonb -> 0; + ?column? +---------- + "a" +(1 row) + +SELECT '["a","b","c",[1,2],null]'::jsonb -> 1; + ?column? +---------- + "b" +(1 row) + +SELECT '["a","b","c",[1,2],null]'::jsonb -> 2; + ?column? +---------- + "c" +(1 row) + +SELECT '["a","b","c",[1,2],null]'::jsonb -> 3; + ?column? +---------- + [1, 2] +(1 row) + +SELECT '["a","b","c",[1,2],null]'::jsonb -> 3 -> 1; + ?column? +---------- + 2 +(1 row) + +SELECT '["a","b","c",[1,2],null]'::jsonb -> 4; + ?column? +---------- + null +(1 row) + +SELECT '["a","b","c",[1,2],null]'::jsonb -> 5; + ?column? +---------- + +(1 row) + +SELECT '["a","b","c",[1,2],null]'::jsonb -> -1; + ?column? +---------- + null +(1 row) + +SELECT '["a","b","c",[1,2],null]'::jsonb -> -5; + ?column? +---------- + "a" +(1 row) + +SELECT '["a","b","c",[1,2],null]'::jsonb -> -6; + ?column? +---------- + +(1 row) + +--nested path extraction +SELECT '{"a":"b","c":[1,2,3]}'::jsonb #> '{0}'; + ?column? +---------- + +(1 row) + +SELECT '{"a":"b","c":[1,2,3]}'::jsonb #> '{a}'; + ?column? +---------- + "b" +(1 row) + +SELECT '{"a":"b","c":[1,2,3]}'::jsonb #> '{c}'; + ?column? +----------- + [1, 2, 3] +(1 row) + +SELECT '{"a":"b","c":[1,2,3]}'::jsonb #> '{c,0}'; + ?column? +---------- + 1 +(1 row) + +SELECT '{"a":"b","c":[1,2,3]}'::jsonb #> '{c,1}'; + ?column? +---------- + 2 +(1 row) + +SELECT '{"a":"b","c":[1,2,3]}'::jsonb #> '{c,2}'; + ?column? +---------- + 3 +(1 row) + +SELECT '{"a":"b","c":[1,2,3]}'::jsonb #> '{c,3}'; + ?column? +---------- + +(1 row) + +SELECT '{"a":"b","c":[1,2,3]}'::jsonb #> '{c,-1}'; + ?column? +---------- + 3 +(1 row) + +SELECT '{"a":"b","c":[1,2,3]}'::jsonb #> '{c,-3}'; + ?column? +---------- + 1 +(1 row) + +SELECT '{"a":"b","c":[1,2,3]}'::jsonb #> '{c,-4}'; + ?column? +---------- + +(1 row) + +SELECT '[0,1,2,[3,4],{"5":"five"}]'::jsonb #> '{0}'; + ?column? +---------- + 0 +(1 row) + +SELECT '[0,1,2,[3,4],{"5":"five"}]'::jsonb #> '{3}'; + ?column? +---------- + [3, 4] +(1 row) + +SELECT '[0,1,2,[3,4],{"5":"five"}]'::jsonb #> '{4}'; + ?column? +--------------- + {"5": "five"} +(1 row) + +SELECT '[0,1,2,[3,4],{"5":"five"}]'::jsonb #> '{4,5}'; + ?column? +---------- + "five" +(1 row) + +--nested exists +SELECT '{"n":null,"a":1,"b":[1,2],"c":{"1":2},"d":{"1":[2,3]}}'::jsonb ? 'n'; + ?column? +---------- + t +(1 row) + +SELECT '{"n":null,"a":1,"b":[1,2],"c":{"1":2},"d":{"1":[2,3]}}'::jsonb ? 'a'; + ?column? +---------- + t +(1 row) + +SELECT '{"n":null,"a":1,"b":[1,2],"c":{"1":2},"d":{"1":[2,3]}}'::jsonb ? 'b'; + ?column? +---------- + t +(1 row) + +SELECT '{"n":null,"a":1,"b":[1,2],"c":{"1":2},"d":{"1":[2,3]}}'::jsonb ? 'c'; + ?column? +---------- + t +(1 row) + +SELECT '{"n":null,"a":1,"b":[1,2],"c":{"1":2},"d":{"1":[2,3]}}'::jsonb ? 'd'; + ?column? +---------- + t +(1 row) + +SELECT '{"n":null,"a":1,"b":[1,2],"c":{"1":2},"d":{"1":[2,3]}}'::jsonb ? 'e'; + ?column? +---------- + f +(1 row) + +-- jsonb_strip_nulls +select jsonb_strip_nulls(null); + jsonb_strip_nulls +------------------- + +(1 row) + +select jsonb_strip_nulls('1'); + jsonb_strip_nulls +------------------- + 1 +(1 row) + +select jsonb_strip_nulls('"a string"'); + jsonb_strip_nulls +------------------- + "a string" +(1 row) + +select jsonb_strip_nulls('null'); + jsonb_strip_nulls +------------------- + null +(1 row) + +select jsonb_strip_nulls('[1,2,null,3,4]'); + jsonb_strip_nulls +-------------------- + [1, 2, null, 3, 4] +(1 row) + +select jsonb_strip_nulls('{"a":1,"b":null,"c":[2,null,3],"d":{"e":4,"f":null}}'); + jsonb_strip_nulls +-------------------------------------------- + {"a": 1, "c": [2, null, 3], "d": {"e": 4}} +(1 row) + +select jsonb_strip_nulls('[1,{"a":1,"b":null,"c":2},3]'); + jsonb_strip_nulls +-------------------------- + [1, {"a": 1, "c": 2}, 3] +(1 row) + +-- an empty object is not null and should not be stripped +select jsonb_strip_nulls('{"a": {"b": null, "c": null}, "d": {} }'); + jsonb_strip_nulls +-------------------- + {"a": {}, "d": {}} +(1 row) + +select jsonb_pretty('{"a": "test", "b": [1, 2, 3], "c": "test3", "d":{"dd": "test4", "dd2":{"ddd": "test5"}}}'); + jsonb_pretty +---------------------------- + { + + "a": "test", + + "b": [ + + 1, + + 2, + + 3 + + ], + + "c": "test3", + + "d": { + + "dd": "test4", + + "dd2": { + + "ddd": "test5"+ + } + + } + + } +(1 row) + +select jsonb_pretty('[{"f1":1,"f2":null},2,null,[[{"x":true},6,7],8],3]'); + jsonb_pretty +--------------------------- + [ + + { + + "f1": 1, + + "f2": null + + }, + + 2, + + null, + + [ + + [ + + { + + "x": true+ + }, + + 6, + + 7 + + ], + + 8 + + ], + + 3 + + ] +(1 row) + +select jsonb_pretty('{"a":["b", "c"], "d": {"e":"f"}}'); + jsonb_pretty +------------------ + { + + "a": [ + + "b", + + "c" + + ], + + "d": { + + "e": "f"+ + } + + } +(1 row) + +select jsonb_concat('{"d": "test", "a": [1, 2]}', '{"g": "test2", "c": {"c1":1, "c2":2}}'); + jsonb_concat +------------------------------------------------------------------- + {"a": [1, 2], "c": {"c1": 1, "c2": 2}, "d": "test", "g": "test2"} +(1 row) + +select '{"aa":1 , "b":2, "cq":3}'::jsonb || '{"cq":"l", "b":"g", "fg":false}'; + ?column? +--------------------------------------------- + {"b": "g", "aa": 1, "cq": "l", "fg": false} +(1 row) + +select '{"aa":1 , "b":2, "cq":3}'::jsonb || '{"aq":"l"}'; + ?column? +--------------------------------------- + {"b": 2, "aa": 1, "aq": "l", "cq": 3} +(1 row) + +select '{"aa":1 , "b":2, "cq":3}'::jsonb || '{"aa":"l"}'; + ?column? +------------------------------ + {"b": 2, "aa": "l", "cq": 3} +(1 row) + +select '{"aa":1 , "b":2, "cq":3}'::jsonb || '{}'; + ?column? +---------------------------- + {"b": 2, "aa": 1, "cq": 3} +(1 row) + +select '["a", "b"]'::jsonb || '["c"]'; + ?column? +----------------- + ["a", "b", "c"] +(1 row) + +select '["a", "b"]'::jsonb || '["c", "d"]'; + ?column? +---------------------- + ["a", "b", "c", "d"] +(1 row) + +select '["c"]' || '["a", "b"]'::jsonb; + ?column? +----------------- + ["c", "a", "b"] +(1 row) + +select '["a", "b"]'::jsonb || '"c"'; + ?column? +----------------- + ["a", "b", "c"] +(1 row) + +select '"c"' || '["a", "b"]'::jsonb; + ?column? +----------------- + ["c", "a", "b"] +(1 row) + +select '[]'::jsonb || '["a"]'::jsonb; + ?column? +---------- + ["a"] +(1 row) + +select '[]'::jsonb || '"a"'::jsonb; + ?column? +---------- + ["a"] +(1 row) + +select '"b"'::jsonb || '"a"'::jsonb; + ?column? +------------ + ["b", "a"] +(1 row) + +select '{}'::jsonb || '{"a":"b"}'::jsonb; + ?column? +------------ + {"a": "b"} +(1 row) + +select '[]'::jsonb || '{"a":"b"}'::jsonb; + ?column? +-------------- + [{"a": "b"}] +(1 row) + +select '{"a":"b"}'::jsonb || '[]'::jsonb; + ?column? +-------------- + [{"a": "b"}] +(1 row) + +select '"a"'::jsonb || '{"a":1}'; + ?column? +----------------- + ["a", {"a": 1}] +(1 row) + +select '{"a":1}' || '"a"'::jsonb; + ?column? +----------------- + [{"a": 1}, "a"] +(1 row) + +select '[3]'::jsonb || '{}'::jsonb; + ?column? +---------- + [3, {}] +(1 row) + +select '3'::jsonb || '[]'::jsonb; + ?column? +---------- + [3] +(1 row) + +select '3'::jsonb || '4'::jsonb; + ?column? +---------- + [3, 4] +(1 row) + +select '3'::jsonb || '{}'::jsonb; + ?column? +---------- + [3, {}] +(1 row) + +select '["a", "b"]'::jsonb || '{"c":1}'; + ?column? +---------------------- + ["a", "b", {"c": 1}] +(1 row) + +select '{"c": 1}'::jsonb || '["a", "b"]'; + ?column? +---------------------- + [{"c": 1}, "a", "b"] +(1 row) + +select '{}'::jsonb || '{"cq":"l", "b":"g", "fg":false}'; + ?column? +------------------------------------ + {"b": "g", "cq": "l", "fg": false} +(1 row) + +select pg_column_size('{}'::jsonb || '{}'::jsonb) = pg_column_size('{}'::jsonb); + ?column? +---------- + t +(1 row) + +select pg_column_size('{"aa":1}'::jsonb || '{"b":2}'::jsonb) = pg_column_size('{"aa":1, "b":2}'::jsonb); + ?column? +---------- + t +(1 row) + +select pg_column_size('{"aa":1, "b":2}'::jsonb || '{}'::jsonb) = pg_column_size('{"aa":1, "b":2}'::jsonb); + ?column? +---------- + t +(1 row) + +select pg_column_size('{}'::jsonb || '{"aa":1, "b":2}'::jsonb) = pg_column_size('{"aa":1, "b":2}'::jsonb); + ?column? +---------- + t +(1 row) + +select jsonb_delete('{"a":1 , "b":2, "c":3}'::jsonb, 'a'); + jsonb_delete +------------------ + {"b": 2, "c": 3} +(1 row) + +select jsonb_delete('{"a":null , "b":2, "c":3}'::jsonb, 'a'); + jsonb_delete +------------------ + {"b": 2, "c": 3} +(1 row) + +select jsonb_delete('{"a":1 , "b":2, "c":3}'::jsonb, 'b'); + jsonb_delete +------------------ + {"a": 1, "c": 3} +(1 row) + +select jsonb_delete('{"a":1 , "b":2, "c":3}'::jsonb, 'c'); + jsonb_delete +------------------ + {"a": 1, "b": 2} +(1 row) + +select jsonb_delete('{"a":1 , "b":2, "c":3}'::jsonb, 'd'); + jsonb_delete +-------------------------- + {"a": 1, "b": 2, "c": 3} +(1 row) + +select '{"a":1 , "b":2, "c":3}'::jsonb - 'a'; + ?column? +------------------ + {"b": 2, "c": 3} +(1 row) + +select '{"a":null , "b":2, "c":3}'::jsonb - 'a'; + ?column? +------------------ + {"b": 2, "c": 3} +(1 row) + +select '{"a":1 , "b":2, "c":3}'::jsonb - 'b'; + ?column? +------------------ + {"a": 1, "c": 3} +(1 row) + +select '{"a":1 , "b":2, "c":3}'::jsonb - 'c'; + ?column? +------------------ + {"a": 1, "b": 2} +(1 row) + +select '{"a":1 , "b":2, "c":3}'::jsonb - 'd'; + ?column? +-------------------------- + {"a": 1, "b": 2, "c": 3} +(1 row) + +select pg_column_size('{"a":1 , "b":2, "c":3}'::jsonb - 'b') = pg_column_size('{"a":1, "b":2}'::jsonb); + ?column? +---------- + t +(1 row) + +select '["a","b","c"]'::jsonb - 3; + ?column? +----------------- + ["a", "b", "c"] +(1 row) + +select '["a","b","c"]'::jsonb - 2; + ?column? +------------ + ["a", "b"] +(1 row) + +select '["a","b","c"]'::jsonb - 1; + ?column? +------------ + ["a", "c"] +(1 row) + +select '["a","b","c"]'::jsonb - 0; + ?column? +------------ + ["b", "c"] +(1 row) + +select '["a","b","c"]'::jsonb - -1; + ?column? +------------ + ["a", "b"] +(1 row) + +select '["a","b","c"]'::jsonb - -2; + ?column? +------------ + ["a", "c"] +(1 row) + +select '["a","b","c"]'::jsonb - -3; + ?column? +------------ + ["b", "c"] +(1 row) + +select '["a","b","c"]'::jsonb - -4; + ?column? +----------------- + ["a", "b", "c"] +(1 row) + +select '{"a":1 , "b":2, "c":3}'::jsonb - '{b}'::text[]; + ?column? +------------------ + {"a": 1, "c": 3} +(1 row) + +select '{"a":1 , "b":2, "c":3}'::jsonb - '{c,b}'::text[]; + ?column? +---------- + {"a": 1} +(1 row) + +select '{"a":1 , "b":2, "c":3}'::jsonb - '{}'::text[]; + ?column? +-------------------------- + {"a": 1, "b": 2, "c": 3} +(1 row) + +select jsonb_set('{"n":null, "a":1, "b":[1,2], "c":{"1":2}, "d":{"1":[2,3]}}'::jsonb, '{n}', '[1,2,3]'); + jsonb_set +-------------------------------------------------------------------------- + {"a": 1, "b": [1, 2], "c": {"1": 2}, "d": {"1": [2, 3]}, "n": [1, 2, 3]} +(1 row) + +select jsonb_set('{"n":null, "a":1, "b":[1,2], "c":{"1":2}, "d":{"1":[2,3]}}'::jsonb, '{b,-1}', '[1,2,3]'); + jsonb_set +----------------------------------------------------------------------------- + {"a": 1, "b": [1, [1, 2, 3]], "c": {"1": 2}, "d": {"1": [2, 3]}, "n": null} +(1 row) + +select jsonb_set('{"n":null, "a":1, "b":[1,2], "c":{"1":2}, "d":{"1":[2,3]}}'::jsonb, '{d,1,0}', '[1,2,3]'); + jsonb_set +----------------------------------------------------------------------------- + {"a": 1, "b": [1, 2], "c": {"1": 2}, "d": {"1": [[1, 2, 3], 3]}, "n": null} +(1 row) + +select jsonb_set('{"n":null, "a":1, "b":[1,2], "c":{"1":2}, "d":{"1":[2,3]}}'::jsonb, '{d,NULL,0}', '[1,2,3]'); +ERROR: path element at position 2 is null +select jsonb_set('{"n":null, "a":1, "b":[1,2], "c":{"1":2}, "d":{"1":[2,3]}}'::jsonb, '{n}', '{"1": 2}'); + jsonb_set +------------------------------------------------------------------------- + {"a": 1, "b": [1, 2], "c": {"1": 2}, "d": {"1": [2, 3]}, "n": {"1": 2}} +(1 row) + +select jsonb_set('{"n":null, "a":1, "b":[1,2], "c":{"1":2}, "d":{"1":[2,3]}}'::jsonb, '{b,-1}', '{"1": 2}'); + jsonb_set +---------------------------------------------------------------------------- + {"a": 1, "b": [1, {"1": 2}], "c": {"1": 2}, "d": {"1": [2, 3]}, "n": null} +(1 row) + +select jsonb_set('{"n":null, "a":1, "b":[1,2], "c":{"1":2}, "d":{"1":[2,3]}}'::jsonb, '{d,1,0}', '{"1": 2}'); + jsonb_set +---------------------------------------------------------------------------- + {"a": 1, "b": [1, 2], "c": {"1": 2}, "d": {"1": [{"1": 2}, 3]}, "n": null} +(1 row) + +select jsonb_set('{"n":null, "a":1, "b":[1,2], "c":{"1":2}, "d":{"1":[2,3]}}'::jsonb, '{d,NULL,0}', '{"1": 2}'); +ERROR: path element at position 2 is null +select jsonb_set('{"n":null, "a":1, "b":[1,2], "c":{"1":2}, "d":{"1":[2,3]}}'::jsonb, '{b,-1}', '"test"'); + jsonb_set +-------------------------------------------------------------------------- + {"a": 1, "b": [1, "test"], "c": {"1": 2}, "d": {"1": [2, 3]}, "n": null} +(1 row) + +select jsonb_set('{"n":null, "a":1, "b":[1,2], "c":{"1":2}, "d":{"1":[2,3]}}'::jsonb, '{b,-1}', '{"f": "test"}'); + jsonb_set +--------------------------------------------------------------------------------- + {"a": 1, "b": [1, {"f": "test"}], "c": {"1": 2}, "d": {"1": [2, 3]}, "n": null} +(1 row) + +select jsonb_delete_path('{"n":null, "a":1, "b":[1,2], "c":{"1":2}, "d":{"1":[2,3]}}', '{n}'); + jsonb_delete_path +---------------------------------------------------------- + {"a": 1, "b": [1, 2], "c": {"1": 2}, "d": {"1": [2, 3]}} +(1 row) + +select jsonb_delete_path('{"n":null, "a":1, "b":[1,2], "c":{"1":2}, "d":{"1":[2,3]}}', '{b,-1}'); + jsonb_delete_path +------------------------------------------------------------------ + {"a": 1, "b": [1], "c": {"1": 2}, "d": {"1": [2, 3]}, "n": null} +(1 row) + +select jsonb_delete_path('{"n":null, "a":1, "b":[1,2], "c":{"1":2}, "d":{"1":[2,3]}}', '{d,1,0}'); + jsonb_delete_path +------------------------------------------------------------------ + {"a": 1, "b": [1, 2], "c": {"1": 2}, "d": {"1": [3]}, "n": null} +(1 row) + +select '{"n":null, "a":1, "b":[1,2], "c":{"1":2}, "d":{"1":[2,3]}}'::jsonb #- '{n}'; + ?column? +---------------------------------------------------------- + {"a": 1, "b": [1, 2], "c": {"1": 2}, "d": {"1": [2, 3]}} +(1 row) + +select '{"n":null, "a":1, "b":[1,2], "c":{"1":2}, "d":{"1":[2,3]}}'::jsonb #- '{b,-1}'; + ?column? +------------------------------------------------------------------ + {"a": 1, "b": [1], "c": {"1": 2}, "d": {"1": [2, 3]}, "n": null} +(1 row) + +select '{"n":null, "a":1, "b":[1,2], "c":{"1":2}, "d":{"1":[2,3]}}'::jsonb #- '{b,-1e}'; -- invalid array subscript +ERROR: path element at position 2 is not an integer: "-1e" +select '{"n":null, "a":1, "b":[1,2], "c":{"1":2}, "d":{"1":[2,3]}}'::jsonb #- '{d,1,0}'; + ?column? +------------------------------------------------------------------ + {"a": 1, "b": [1, 2], "c": {"1": 2}, "d": {"1": [3]}, "n": null} +(1 row) + +-- empty structure and error conditions for delete and replace +select '"a"'::jsonb - 'a'; -- error +ERROR: cannot delete from scalar +select '{}'::jsonb - 'a'; + ?column? +---------- + {} +(1 row) + +select '[]'::jsonb - 'a'; + ?column? +---------- + [] +(1 row) + +select '"a"'::jsonb - 1; -- error +ERROR: cannot delete from scalar +select '{}'::jsonb - 1; -- error +ERROR: cannot delete from object using integer index +select '[]'::jsonb - 1; + ?column? +---------- + [] +(1 row) + +select '"a"'::jsonb #- '{a}'; -- error +ERROR: cannot delete path in scalar +select '{}'::jsonb #- '{a}'; + ?column? +---------- + {} +(1 row) + +select '[]'::jsonb #- '{a}'; + ?column? +---------- + [] +(1 row) + +select jsonb_set('"a"','{a}','"b"'); --error +ERROR: cannot set path in scalar +select jsonb_set('{}','{a}','"b"', false); + jsonb_set +----------- + {} +(1 row) + +select jsonb_set('[]','{1}','"b"', false); + jsonb_set +----------- + [] +(1 row) + +select jsonb_set('[{"f1":1,"f2":null},2,null,3]', '{0}','[2,3,4]', false); + jsonb_set +------------------------- + [[2, 3, 4], 2, null, 3] +(1 row) + +-- jsonb_set adding instead of replacing +-- prepend to array +select jsonb_set('{"a":1,"b":[0,1,2],"c":{"d":4}}','{b,-33}','{"foo":123}'); + jsonb_set +------------------------------------------------------- + {"a": 1, "b": [{"foo": 123}, 0, 1, 2], "c": {"d": 4}} +(1 row) + +-- append to array +select jsonb_set('{"a":1,"b":[0,1,2],"c":{"d":4}}','{b,33}','{"foo":123}'); + jsonb_set +------------------------------------------------------- + {"a": 1, "b": [0, 1, 2, {"foo": 123}], "c": {"d": 4}} +(1 row) + +-- check nesting levels addition +select jsonb_set('{"a":1,"b":[4,5,[0,1,2],6,7],"c":{"d":4}}','{b,2,33}','{"foo":123}'); + jsonb_set +--------------------------------------------------------------------- + {"a": 1, "b": [4, 5, [0, 1, 2, {"foo": 123}], 6, 7], "c": {"d": 4}} +(1 row) + +-- add new key +select jsonb_set('{"a":1,"b":[0,1,2],"c":{"d":4}}','{c,e}','{"foo":123}'); + jsonb_set +------------------------------------------------------------ + {"a": 1, "b": [0, 1, 2], "c": {"d": 4, "e": {"foo": 123}}} +(1 row) + +-- adding doesn't do anything if elements before last aren't present +select jsonb_set('{"a":1,"b":[0,1,2],"c":{"d":4}}','{x,-33}','{"foo":123}'); + jsonb_set +----------------------------------------- + {"a": 1, "b": [0, 1, 2], "c": {"d": 4}} +(1 row) + +select jsonb_set('{"a":1,"b":[0,1,2],"c":{"d":4}}','{x,y}','{"foo":123}'); + jsonb_set +----------------------------------------- + {"a": 1, "b": [0, 1, 2], "c": {"d": 4}} +(1 row) + +-- add to empty object +select jsonb_set('{}','{x}','{"foo":123}'); + jsonb_set +--------------------- + {"x": {"foo": 123}} +(1 row) + +--add to empty array +select jsonb_set('[]','{0}','{"foo":123}'); + jsonb_set +---------------- + [{"foo": 123}] +(1 row) + +select jsonb_set('[]','{99}','{"foo":123}'); + jsonb_set +---------------- + [{"foo": 123}] +(1 row) + +select jsonb_set('[]','{-99}','{"foo":123}'); + jsonb_set +---------------- + [{"foo": 123}] +(1 row) + +select jsonb_set('{"a": [1, 2, 3]}', '{a, non_integer}', '"new_value"'); +ERROR: path element at position 2 is not an integer: "non_integer" +select jsonb_set('{"a": {"b": [1, 2, 3]}}', '{a, b, non_integer}', '"new_value"'); +ERROR: path element at position 3 is not an integer: "non_integer" +select jsonb_set('{"a": {"b": [1, 2, 3]}}', '{a, b, NULL}', '"new_value"'); +ERROR: path element at position 3 is null +-- jsonb_set_lax +\pset null NULL +-- pass though non nulls to jsonb_set +select jsonb_set_lax('{"a":1,"b":2}','{b}','5') ; + jsonb_set_lax +------------------ + {"a": 1, "b": 5} +(1 row) + +select jsonb_set_lax('{"a":1,"b":2}','{d}','6', true) ; + jsonb_set_lax +-------------------------- + {"a": 1, "b": 2, "d": 6} +(1 row) + +-- using the default treatment +select jsonb_set_lax('{"a":1,"b":2}','{b}',null); + jsonb_set_lax +--------------------- + {"a": 1, "b": null} +(1 row) + +select jsonb_set_lax('{"a":1,"b":2}','{d}',null,true); + jsonb_set_lax +----------------------------- + {"a": 1, "b": 2, "d": null} +(1 row) + +-- errors +select jsonb_set_lax('{"a":1,"b":2}', '{b}', null, true, null); +ERROR: null_value_treatment must be "delete_key", "return_target", "use_json_null", or "raise_exception" +select jsonb_set_lax('{"a":1,"b":2}', '{b}', null, true, 'no_such_treatment'); +ERROR: null_value_treatment must be "delete_key", "return_target", "use_json_null", or "raise_exception" +-- explicit treatments +select jsonb_set_lax('{"a":1,"b":2}', '{b}', null, null_value_treatment => 'raise_exception') as raise_exception; +ERROR: JSON value must not be null +DETAIL: Exception was raised because null_value_treatment is "raise_exception". +HINT: To avoid, either change the null_value_treatment argument or ensure that an SQL NULL is not passed. +select jsonb_set_lax('{"a":1,"b":2}', '{b}', null, null_value_treatment => 'return_target') as return_target; + return_target +------------------ + {"a": 1, "b": 2} +(1 row) + +select jsonb_set_lax('{"a":1,"b":2}', '{b}', null, null_value_treatment => 'delete_key') as delete_key; + delete_key +------------ + {"a": 1} +(1 row) + +select jsonb_set_lax('{"a":1,"b":2}', '{b}', null, null_value_treatment => 'use_json_null') as use_json_null; + use_json_null +--------------------- + {"a": 1, "b": null} +(1 row) + +\pset null '' +-- jsonb_insert +select jsonb_insert('{"a": [0,1,2]}', '{a, 1}', '"new_value"'); + jsonb_insert +------------------------------- + {"a": [0, "new_value", 1, 2]} +(1 row) + +select jsonb_insert('{"a": [0,1,2]}', '{a, 1}', '"new_value"', true); + jsonb_insert +------------------------------- + {"a": [0, 1, "new_value", 2]} +(1 row) + +select jsonb_insert('{"a": {"b": {"c": [0, 1, "test1", "test2"]}}}', '{a, b, c, 2}', '"new_value"'); + jsonb_insert +------------------------------------------------------------ + {"a": {"b": {"c": [0, 1, "new_value", "test1", "test2"]}}} +(1 row) + +select jsonb_insert('{"a": {"b": {"c": [0, 1, "test1", "test2"]}}}', '{a, b, c, 2}', '"new_value"', true); + jsonb_insert +------------------------------------------------------------ + {"a": {"b": {"c": [0, 1, "test1", "new_value", "test2"]}}} +(1 row) + +select jsonb_insert('{"a": [0,1,2]}', '{a, 1}', '{"b": "value"}'); + jsonb_insert +---------------------------------- + {"a": [0, {"b": "value"}, 1, 2]} +(1 row) + +select jsonb_insert('{"a": [0,1,2]}', '{a, 1}', '["value1", "value2"]'); + jsonb_insert +---------------------------------------- + {"a": [0, ["value1", "value2"], 1, 2]} +(1 row) + +-- edge cases +select jsonb_insert('{"a": [0,1,2]}', '{a, 0}', '"new_value"'); + jsonb_insert +------------------------------- + {"a": ["new_value", 0, 1, 2]} +(1 row) + +select jsonb_insert('{"a": [0,1,2]}', '{a, 0}', '"new_value"', true); + jsonb_insert +------------------------------- + {"a": [0, "new_value", 1, 2]} +(1 row) + +select jsonb_insert('{"a": [0,1,2]}', '{a, 2}', '"new_value"'); + jsonb_insert +------------------------------- + {"a": [0, 1, "new_value", 2]} +(1 row) + +select jsonb_insert('{"a": [0,1,2]}', '{a, 2}', '"new_value"', true); + jsonb_insert +------------------------------- + {"a": [0, 1, 2, "new_value"]} +(1 row) + +select jsonb_insert('{"a": [0,1,2]}', '{a, -1}', '"new_value"'); + jsonb_insert +------------------------------- + {"a": [0, 1, "new_value", 2]} +(1 row) + +select jsonb_insert('{"a": [0,1,2]}', '{a, -1}', '"new_value"', true); + jsonb_insert +------------------------------- + {"a": [0, 1, 2, "new_value"]} +(1 row) + +select jsonb_insert('[]', '{1}', '"new_value"'); + jsonb_insert +--------------- + ["new_value"] +(1 row) + +select jsonb_insert('[]', '{1}', '"new_value"', true); + jsonb_insert +--------------- + ["new_value"] +(1 row) + +select jsonb_insert('{"a": []}', '{a, 1}', '"new_value"'); + jsonb_insert +---------------------- + {"a": ["new_value"]} +(1 row) + +select jsonb_insert('{"a": []}', '{a, 1}', '"new_value"', true); + jsonb_insert +---------------------- + {"a": ["new_value"]} +(1 row) + +select jsonb_insert('{"a": [0,1,2]}', '{a, 10}', '"new_value"'); + jsonb_insert +------------------------------- + {"a": [0, 1, 2, "new_value"]} +(1 row) + +select jsonb_insert('{"a": [0,1,2]}', '{a, -10}', '"new_value"'); + jsonb_insert +------------------------------- + {"a": ["new_value", 0, 1, 2]} +(1 row) + +-- jsonb_insert should be able to insert new value for objects, but not to replace +select jsonb_insert('{"a": {"b": "value"}}', '{a, c}', '"new_value"'); + jsonb_insert +----------------------------------------- + {"a": {"b": "value", "c": "new_value"}} +(1 row) + +select jsonb_insert('{"a": {"b": "value"}}', '{a, c}', '"new_value"', true); + jsonb_insert +----------------------------------------- + {"a": {"b": "value", "c": "new_value"}} +(1 row) + +select jsonb_insert('{"a": {"b": "value"}}', '{a, b}', '"new_value"'); +ERROR: cannot replace existing key +HINT: Try using the function jsonb_set to replace key value. +select jsonb_insert('{"a": {"b": "value"}}', '{a, b}', '"new_value"', true); +ERROR: cannot replace existing key +HINT: Try using the function jsonb_set to replace key value. +-- jsonb subscript +select ('123'::jsonb)['a']; + jsonb +------- + +(1 row) + +select ('123'::jsonb)[0]; + jsonb +------- + +(1 row) + +select ('123'::jsonb)[NULL]; + jsonb +------- + +(1 row) + +select ('{"a": 1}'::jsonb)['a']; + jsonb +------- + 1 +(1 row) + +select ('{"a": 1}'::jsonb)[0]; + jsonb +------- + +(1 row) + +select ('{"a": 1}'::jsonb)['not_exist']; + jsonb +------- + +(1 row) + +select ('{"a": 1}'::jsonb)[NULL]; + jsonb +------- + +(1 row) + +select ('[1, "2", null]'::jsonb)['a']; + jsonb +------- + +(1 row) + +select ('[1, "2", null]'::jsonb)[0]; + jsonb +------- + 1 +(1 row) + +select ('[1, "2", null]'::jsonb)['1']; + jsonb +------- + "2" +(1 row) + +select ('[1, "2", null]'::jsonb)[1.0]; +ERROR: subscript type numeric is not supported +LINE 1: select ('[1, "2", null]'::jsonb)[1.0]; + ^ +HINT: jsonb subscript must be coercible to either integer or text. +select ('[1, "2", null]'::jsonb)[2]; + jsonb +------- + null +(1 row) + +select ('[1, "2", null]'::jsonb)[3]; + jsonb +------- + +(1 row) + +select ('[1, "2", null]'::jsonb)[-2]; + jsonb +------- + "2" +(1 row) + +select ('[1, "2", null]'::jsonb)[1]['a']; + jsonb +------- + +(1 row) + +select ('[1, "2", null]'::jsonb)[1][0]; + jsonb +------- + +(1 row) + +select ('{"a": 1, "b": "c", "d": [1, 2, 3]}'::jsonb)['b']; + jsonb +------- + "c" +(1 row) + +select ('{"a": 1, "b": "c", "d": [1, 2, 3]}'::jsonb)['d']; + jsonb +----------- + [1, 2, 3] +(1 row) + +select ('{"a": 1, "b": "c", "d": [1, 2, 3]}'::jsonb)['d'][1]; + jsonb +------- + 2 +(1 row) + +select ('{"a": 1, "b": "c", "d": [1, 2, 3]}'::jsonb)['d']['a']; + jsonb +------- + +(1 row) + +select ('{"a": {"a1": {"a2": "aaa"}}, "b": "bbb", "c": "ccc"}'::jsonb)['a']['a1']; + jsonb +--------------- + {"a2": "aaa"} +(1 row) + +select ('{"a": {"a1": {"a2": "aaa"}}, "b": "bbb", "c": "ccc"}'::jsonb)['a']['a1']['a2']; + jsonb +------- + "aaa" +(1 row) + +select ('{"a": {"a1": {"a2": "aaa"}}, "b": "bbb", "c": "ccc"}'::jsonb)['a']['a1']['a2']['a3']; + jsonb +------- + +(1 row) + +select ('{"a": ["a1", {"b1": ["aaa", "bbb", "ccc"]}], "b": "bb"}'::jsonb)['a'][1]['b1']; + jsonb +----------------------- + ["aaa", "bbb", "ccc"] +(1 row) + +select ('{"a": ["a1", {"b1": ["aaa", "bbb", "ccc"]}], "b": "bb"}'::jsonb)['a'][1]['b1'][2]; + jsonb +------- + "ccc" +(1 row) + +-- slices are not supported +select ('{"a": 1}'::jsonb)['a':'b']; +ERROR: jsonb subscript does not support slices +LINE 1: select ('{"a": 1}'::jsonb)['a':'b']; + ^ +select ('[1, "2", null]'::jsonb)[1:2]; +ERROR: jsonb subscript does not support slices +LINE 1: select ('[1, "2", null]'::jsonb)[1:2]; + ^ +select ('[1, "2", null]'::jsonb)[:2]; +ERROR: jsonb subscript does not support slices +LINE 1: select ('[1, "2", null]'::jsonb)[:2]; + ^ +select ('[1, "2", null]'::jsonb)[1:]; +ERROR: jsonb subscript does not support slices +LINE 1: select ('[1, "2", null]'::jsonb)[1:]; + ^ +select ('[1, "2", null]'::jsonb)[:]; +ERROR: jsonb subscript does not support slices +create TEMP TABLE test_jsonb_subscript ( + id int, + test_json jsonb +); +insert into test_jsonb_subscript values +(1, '{}'), -- empty jsonb +(2, '{"key": "value"}'); -- jsonb with data +-- update empty jsonb +update test_jsonb_subscript set test_json['a'] = '1' where id = 1; +select * from test_jsonb_subscript; + id | test_json +----+------------------ + 2 | {"key": "value"} + 1 | {"a": 1} +(2 rows) + +-- update jsonb with some data +update test_jsonb_subscript set test_json['a'] = '1' where id = 2; +select * from test_jsonb_subscript; + id | test_json +----+-------------------------- + 1 | {"a": 1} + 2 | {"a": 1, "key": "value"} +(2 rows) + +-- replace jsonb +update test_jsonb_subscript set test_json['a'] = '"test"'; +select * from test_jsonb_subscript; + id | test_json +----+------------------------------- + 1 | {"a": "test"} + 2 | {"a": "test", "key": "value"} +(2 rows) + +-- replace by object +update test_jsonb_subscript set test_json['a'] = '{"b": 1}'::jsonb; +select * from test_jsonb_subscript; + id | test_json +----+--------------------------------- + 1 | {"a": {"b": 1}} + 2 | {"a": {"b": 1}, "key": "value"} +(2 rows) + +-- replace by array +update test_jsonb_subscript set test_json['a'] = '[1, 2, 3]'::jsonb; +select * from test_jsonb_subscript; + id | test_json +----+---------------------------------- + 1 | {"a": [1, 2, 3]} + 2 | {"a": [1, 2, 3], "key": "value"} +(2 rows) + +-- use jsonb subscription in where clause +select * from test_jsonb_subscript where test_json['key'] = '"value"'; + id | test_json +----+---------------------------------- + 2 | {"a": [1, 2, 3], "key": "value"} +(1 row) + +select * from test_jsonb_subscript where test_json['key_doesnt_exists'] = '"value"'; + id | test_json +----+----------- +(0 rows) + +select * from test_jsonb_subscript where test_json['key'] = '"wrong_value"'; + id | test_json +----+----------- +(0 rows) + +-- NULL +update test_jsonb_subscript set test_json[NULL] = '1'; +ERROR: jsonb subscript in assignment must not be null +update test_jsonb_subscript set test_json['another_key'] = NULL; +select * from test_jsonb_subscript; + id | test_json +----+------------------------------------------------------- + 1 | {"a": [1, 2, 3], "another_key": null} + 2 | {"a": [1, 2, 3], "key": "value", "another_key": null} +(2 rows) + +-- NULL as jsonb source +insert into test_jsonb_subscript values (3, NULL); +update test_jsonb_subscript set test_json['a'] = '1' where id = 3; +select * from test_jsonb_subscript; + id | test_json +----+------------------------------------------------------- + 1 | {"a": [1, 2, 3], "another_key": null} + 2 | {"a": [1, 2, 3], "key": "value", "another_key": null} + 3 | {"a": 1} +(3 rows) + +update test_jsonb_subscript set test_json = NULL where id = 3; +update test_jsonb_subscript set test_json[0] = '1'; +select * from test_jsonb_subscript; + id | test_json +----+--------------------------------------------------------------- + 1 | {"0": 1, "a": [1, 2, 3], "another_key": null} + 2 | {"0": 1, "a": [1, 2, 3], "key": "value", "another_key": null} + 3 | [1] +(3 rows) + +-- Fill the gaps logic +delete from test_jsonb_subscript; +insert into test_jsonb_subscript values (1, '[0]'); +update test_jsonb_subscript set test_json[5] = '1'; +select * from test_jsonb_subscript; + id | test_json +----+-------------------------------- + 1 | [0, null, null, null, null, 1] +(1 row) + +update test_jsonb_subscript set test_json[-4] = '1'; +select * from test_jsonb_subscript; + id | test_json +----+----------------------------- + 1 | [0, null, 1, null, null, 1] +(1 row) + +update test_jsonb_subscript set test_json[-8] = '1'; +ERROR: path element at position 1 is out of range: -8 +select * from test_jsonb_subscript; + id | test_json +----+----------------------------- + 1 | [0, null, 1, null, null, 1] +(1 row) + +-- keep consistent values position +delete from test_jsonb_subscript; +insert into test_jsonb_subscript values (1, '[]'); +update test_jsonb_subscript set test_json[5] = '1'; +select * from test_jsonb_subscript; + id | test_json +----+----------------------------------- + 1 | [null, null, null, null, null, 1] +(1 row) + +-- create the whole path +delete from test_jsonb_subscript; +insert into test_jsonb_subscript values (1, '{}'); +update test_jsonb_subscript set test_json['a'][0]['b'][0]['c'] = '1'; +select * from test_jsonb_subscript; + id | test_json +----+---------------------------- + 1 | {"a": [{"b": [{"c": 1}]}]} +(1 row) + +delete from test_jsonb_subscript; +insert into test_jsonb_subscript values (1, '{}'); +update test_jsonb_subscript set test_json['a'][2]['b'][2]['c'][2] = '1'; +select * from test_jsonb_subscript; + id | test_json +----+------------------------------------------------------------------ + 1 | {"a": [null, null, {"b": [null, null, {"c": [null, null, 1]}]}]} +(1 row) + +-- create the whole path with already existing keys +delete from test_jsonb_subscript; +insert into test_jsonb_subscript values (1, '{"b": 1}'); +update test_jsonb_subscript set test_json['a'][0] = '2'; +select * from test_jsonb_subscript; + id | test_json +----+-------------------- + 1 | {"a": [2], "b": 1} +(1 row) + +-- the start jsonb is an object, first subscript is treated as a key +delete from test_jsonb_subscript; +insert into test_jsonb_subscript values (1, '{}'); +update test_jsonb_subscript set test_json[0]['a'] = '1'; +select * from test_jsonb_subscript; + id | test_json +----+----------------- + 1 | {"0": {"a": 1}} +(1 row) + +-- the start jsonb is an array +delete from test_jsonb_subscript; +insert into test_jsonb_subscript values (1, '[]'); +update test_jsonb_subscript set test_json[0]['a'] = '1'; +update test_jsonb_subscript set test_json[2]['b'] = '2'; +select * from test_jsonb_subscript; + id | test_json +----+---------------------------- + 1 | [{"a": 1}, null, {"b": 2}] +(1 row) + +-- overwriting an existing path +delete from test_jsonb_subscript; +insert into test_jsonb_subscript values (1, '{}'); +update test_jsonb_subscript set test_json['a']['b'][1] = '1'; +update test_jsonb_subscript set test_json['a']['b'][10] = '1'; +select * from test_jsonb_subscript; + id | test_json +----+---------------------------------------------------------------------------- + 1 | {"a": {"b": [null, 1, null, null, null, null, null, null, null, null, 1]}} +(1 row) + +delete from test_jsonb_subscript; +insert into test_jsonb_subscript values (1, '[]'); +update test_jsonb_subscript set test_json[0][0][0] = '1'; +update test_jsonb_subscript set test_json[0][0][1] = '1'; +select * from test_jsonb_subscript; + id | test_json +----+------------ + 1 | [[[1, 1]]] +(1 row) + +delete from test_jsonb_subscript; +insert into test_jsonb_subscript values (1, '{}'); +update test_jsonb_subscript set test_json['a']['b'][10] = '1'; +update test_jsonb_subscript set test_json['a'][10][10] = '1'; +select * from test_jsonb_subscript; + id | test_json +----+------------------------------------------------------------------------------------------------------------------------------------------------------ + 1 | {"a": {"b": [null, null, null, null, null, null, null, null, null, null, 1], "10": [null, null, null, null, null, null, null, null, null, null, 1]}} +(1 row) + +-- an empty sub element +delete from test_jsonb_subscript; +insert into test_jsonb_subscript values (1, '{"a": {}}'); +update test_jsonb_subscript set test_json['a']['b']['c'][2] = '1'; +select * from test_jsonb_subscript; + id | test_json +----+-------------------------------------- + 1 | {"a": {"b": {"c": [null, null, 1]}}} +(1 row) + +delete from test_jsonb_subscript; +insert into test_jsonb_subscript values (1, '{"a": []}'); +update test_jsonb_subscript set test_json['a'][1]['c'][2] = '1'; +select * from test_jsonb_subscript; + id | test_json +----+--------------------------------------- + 1 | {"a": [null, {"c": [null, null, 1]}]} +(1 row) + +-- trying replace assuming a composite object, but it's an element or a value +delete from test_jsonb_subscript; +insert into test_jsonb_subscript values (1, '{"a": 1}'); +update test_jsonb_subscript set test_json['a']['b'] = '1'; +ERROR: cannot replace existing key +DETAIL: The path assumes key is a composite object, but it is a scalar value. +update test_jsonb_subscript set test_json['a']['b']['c'] = '1'; +ERROR: cannot replace existing key +DETAIL: The path assumes key is a composite object, but it is a scalar value. +update test_jsonb_subscript set test_json['a'][0] = '1'; +ERROR: cannot replace existing key +DETAIL: The path assumes key is a composite object, but it is a scalar value. +update test_jsonb_subscript set test_json['a'][0]['c'] = '1'; +ERROR: cannot replace existing key +DETAIL: The path assumes key is a composite object, but it is a scalar value. +update test_jsonb_subscript set test_json['a'][0][0] = '1'; +ERROR: cannot replace existing key +DETAIL: The path assumes key is a composite object, but it is a scalar value. +-- trying replace assuming a composite object, but it's a raw scalar +delete from test_jsonb_subscript; +insert into test_jsonb_subscript values (1, 'null'); +update test_jsonb_subscript set test_json[0] = '1'; +ERROR: cannot replace existing key +DETAIL: The path assumes key is a composite object, but it is a scalar value. +update test_jsonb_subscript set test_json[0][0] = '1'; +ERROR: cannot replace existing key +DETAIL: The path assumes key is a composite object, but it is a scalar value. +-- try some things with short-header and toasted subscript values +drop table test_jsonb_subscript; +create temp table test_jsonb_subscript ( + id text, + test_json jsonb +); +insert into test_jsonb_subscript values('foo', '{"foo": "bar"}'); +insert into test_jsonb_subscript + select s, ('{"' || s || '": "bar"}')::jsonb from repeat('xyzzy', 500) s; +select length(id), test_json[id] from test_jsonb_subscript; + length | test_json +--------+----------- + 3 | "bar" + 2500 | "bar" +(2 rows) + +update test_jsonb_subscript set test_json[id] = '"baz"'; +select length(id), test_json[id] from test_jsonb_subscript; + length | test_json +--------+----------- + 3 | "baz" + 2500 | "baz" +(2 rows) + +\x +table test_jsonb_subscript; +-[ RECORD 1 ]-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- +id | foo +test_json | {"foo": "baz"} +-[ RECORD 2 ]-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- +id | xyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzy +test_json | {"xyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzyxyzzy": "baz"} + +\x +-- jsonb to tsvector +select to_tsvector('{"a": "aaa bbb ddd ccc", "b": ["eee fff ggg"], "c": {"d": "hhh iii"}}'::jsonb); + to_tsvector +--------------------------------------------------------------------------- + 'aaa':1 'bbb':2 'ccc':4 'ddd':3 'eee':6 'fff':7 'ggg':8 'hhh':10 'iii':11 +(1 row) + +-- jsonb to tsvector with config +select to_tsvector('simple', '{"a": "aaa bbb ddd ccc", "b": ["eee fff ggg"], "c": {"d": "hhh iii"}}'::jsonb); + to_tsvector +--------------------------------------------------------------------------- + 'aaa':1 'bbb':2 'ccc':4 'ddd':3 'eee':6 'fff':7 'ggg':8 'hhh':10 'iii':11 +(1 row) + +-- jsonb to tsvector with stop words +select to_tsvector('english', '{"a": "aaa in bbb ddd ccc", "b": ["the eee fff ggg"], "c": {"d": "hhh. iii"}}'::jsonb); + to_tsvector +---------------------------------------------------------------------------- + 'aaa':1 'bbb':3 'ccc':5 'ddd':4 'eee':8 'fff':9 'ggg':10 'hhh':12 'iii':13 +(1 row) + +-- jsonb to tsvector with numeric values +select to_tsvector('english', '{"a": "aaa in bbb ddd ccc", "b": 123, "c": 456}'::jsonb); + to_tsvector +--------------------------------- + 'aaa':1 'bbb':3 'ccc':5 'ddd':4 +(1 row) + +-- jsonb_to_tsvector +select jsonb_to_tsvector('english', '{"a": "aaa in bbb", "b": 123, "c": 456, "d": true, "f": false, "g": null}'::jsonb, '"all"'); + jsonb_to_tsvector +---------------------------------------------------------------------------------------- + '123':8 '456':12 'aaa':2 'b':6 'bbb':4 'c':10 'd':14 'f':18 'fals':20 'g':22 'true':16 +(1 row) + +select jsonb_to_tsvector('english', '{"a": "aaa in bbb", "b": 123, "c": 456, "d": true, "f": false, "g": null}'::jsonb, '"key"'); + jsonb_to_tsvector +-------------------------------- + 'b':2 'c':4 'd':6 'f':8 'g':10 +(1 row) + +select jsonb_to_tsvector('english', '{"a": "aaa in bbb", "b": 123, "c": 456, "d": true, "f": false, "g": null}'::jsonb, '"string"'); + jsonb_to_tsvector +------------------- + 'aaa':1 'bbb':3 +(1 row) + +select jsonb_to_tsvector('english', '{"a": "aaa in bbb", "b": 123, "c": 456, "d": true, "f": false, "g": null}'::jsonb, '"numeric"'); + jsonb_to_tsvector +------------------- + '123':1 '456':3 +(1 row) + +select jsonb_to_tsvector('english', '{"a": "aaa in bbb", "b": 123, "c": 456, "d": true, "f": false, "g": null}'::jsonb, '"boolean"'); + jsonb_to_tsvector +------------------- + 'fals':3 'true':1 +(1 row) + +select jsonb_to_tsvector('english', '{"a": "aaa in bbb", "b": 123, "c": 456, "d": true, "f": false, "g": null}'::jsonb, '["string", "numeric"]'); + jsonb_to_tsvector +--------------------------------- + '123':5 '456':7 'aaa':1 'bbb':3 +(1 row) + +select jsonb_to_tsvector('english', '{"a": "aaa in bbb", "b": 123, "c": 456, "d": true, "f": false, "g": null}'::jsonb, '"all"'); + jsonb_to_tsvector +---------------------------------------------------------------------------------------- + '123':8 '456':12 'aaa':2 'b':6 'bbb':4 'c':10 'd':14 'f':18 'fals':20 'g':22 'true':16 +(1 row) + +select jsonb_to_tsvector('english', '{"a": "aaa in bbb", "b": 123, "c": 456, "d": true, "f": false, "g": null}'::jsonb, '"key"'); + jsonb_to_tsvector +-------------------------------- + 'b':2 'c':4 'd':6 'f':8 'g':10 +(1 row) + +select jsonb_to_tsvector('english', '{"a": "aaa in bbb", "b": 123, "c": 456, "d": true, "f": false, "g": null}'::jsonb, '"string"'); + jsonb_to_tsvector +------------------- + 'aaa':1 'bbb':3 +(1 row) + +select jsonb_to_tsvector('english', '{"a": "aaa in bbb", "b": 123, "c": 456, "d": true, "f": false, "g": null}'::jsonb, '"numeric"'); + jsonb_to_tsvector +------------------- + '123':1 '456':3 +(1 row) + +select jsonb_to_tsvector('english', '{"a": "aaa in bbb", "b": 123, "c": 456, "d": true, "f": false, "g": null}'::jsonb, '"boolean"'); + jsonb_to_tsvector +------------------- + 'fals':3 'true':1 +(1 row) + +select jsonb_to_tsvector('english', '{"a": "aaa in bbb", "b": 123, "c": 456, "d": true, "f": false, "g": null}'::jsonb, '["string", "numeric"]'); + jsonb_to_tsvector +--------------------------------- + '123':5 '456':7 'aaa':1 'bbb':3 +(1 row) + +-- to_tsvector corner cases +select to_tsvector('""'::jsonb); + to_tsvector +------------- + +(1 row) + +select to_tsvector('{}'::jsonb); + to_tsvector +------------- + +(1 row) + +select to_tsvector('[]'::jsonb); + to_tsvector +------------- + +(1 row) + +select to_tsvector('null'::jsonb); + to_tsvector +------------- + +(1 row) + +-- jsonb_to_tsvector corner cases +select jsonb_to_tsvector('""'::jsonb, '"all"'); + jsonb_to_tsvector +------------------- + +(1 row) + +select jsonb_to_tsvector('{}'::jsonb, '"all"'); + jsonb_to_tsvector +------------------- + +(1 row) + +select jsonb_to_tsvector('[]'::jsonb, '"all"'); + jsonb_to_tsvector +------------------- + +(1 row) + +select jsonb_to_tsvector('null'::jsonb, '"all"'); + jsonb_to_tsvector +------------------- + +(1 row) + +select jsonb_to_tsvector('english', '{"a": "aaa in bbb", "b": 123, "c": 456, "d": true, "f": false, "g": null}'::jsonb, '""'); +ERROR: wrong flag in flag array: "" +HINT: Possible values are: "string", "numeric", "boolean", "key", and "all". +select jsonb_to_tsvector('english', '{"a": "aaa in bbb", "b": 123, "c": 456, "d": true, "f": false, "g": null}'::jsonb, '{}'); +ERROR: wrong flag type, only arrays and scalars are allowed +select jsonb_to_tsvector('english', '{"a": "aaa in bbb", "b": 123, "c": 456, "d": true, "f": false, "g": null}'::jsonb, '[]'); + jsonb_to_tsvector +------------------- + +(1 row) + +select jsonb_to_tsvector('english', '{"a": "aaa in bbb", "b": 123, "c": 456, "d": true, "f": false, "g": null}'::jsonb, 'null'); +ERROR: flag array element is not a string +HINT: Possible values are: "string", "numeric", "boolean", "key", and "all". +select jsonb_to_tsvector('english', '{"a": "aaa in bbb", "b": 123, "c": 456, "d": true, "f": false, "g": null}'::jsonb, '["all", null]'); +ERROR: flag array element is not a string +HINT: Possible values are: "string", "numeric", "boolean", "key", and "all". +-- ts_headline for jsonb +select ts_headline('{"a": "aaa bbb", "b": {"c": "ccc ddd fff", "c1": "ccc1 ddd1"}, "d": ["ggg hhh", "iii jjj"]}'::jsonb, tsquery('bbb & ddd & hhh')); + ts_headline +------------------------------------------------------------------------------------------------------------------ + {"a": "aaa bbb", "b": {"c": "ccc ddd fff", "c1": "ccc1 ddd1"}, "d": ["ggg hhh", "iii jjj"]} +(1 row) + +select ts_headline('english', '{"a": "aaa bbb", "b": {"c": "ccc ddd fff"}, "d": ["ggg hhh", "iii jjj"]}'::jsonb, tsquery('bbb & ddd & hhh')); + ts_headline +----------------------------------------------------------------------------------------------- + {"a": "aaa bbb", "b": {"c": "ccc ddd fff"}, "d": ["ggg hhh", "iii jjj"]} +(1 row) + +select ts_headline('{"a": "aaa bbb", "b": {"c": "ccc ddd fff", "c1": "ccc1 ddd1"}, "d": ["ggg hhh", "iii jjj"]}'::jsonb, tsquery('bbb & ddd & hhh'), 'StartSel = <, StopSel = >'); + ts_headline +--------------------------------------------------------------------------------------------------- + {"a": "aaa ", "b": {"c": "ccc fff", "c1": "ccc1 ddd1"}, "d": ["ggg ", "iii jjj"]} +(1 row) + +select ts_headline('english', '{"a": "aaa bbb", "b": {"c": "ccc ddd fff", "c1": "ccc1 ddd1"}, "d": ["ggg hhh", "iii jjj"]}'::jsonb, tsquery('bbb & ddd & hhh'), 'StartSel = <, StopSel = >'); + ts_headline +--------------------------------------------------------------------------------------------------- + {"a": "aaa ", "b": {"c": "ccc fff", "c1": "ccc1 ddd1"}, "d": ["ggg ", "iii jjj"]} +(1 row) + +-- corner cases for ts_headline with jsonb +select ts_headline('null'::jsonb, tsquery('aaa & bbb')); + ts_headline +------------- + null +(1 row) + +select ts_headline('{}'::jsonb, tsquery('aaa & bbb')); + ts_headline +------------- + {} +(1 row) + +select ts_headline('[]'::jsonb, tsquery('aaa & bbb')); + ts_headline +------------- + [] +(1 row) + +-- casts +select 'true'::jsonb::bool; + bool +------ + t +(1 row) + +select '[]'::jsonb::bool; +ERROR: cannot cast jsonb array to type boolean +select '1.0'::jsonb::float; + float8 +-------- + 1 +(1 row) + +select '[1.0]'::jsonb::float; +ERROR: cannot cast jsonb array to type double precision +select '12345'::jsonb::int4; + int4 +------- + 12345 +(1 row) + +select '"hello"'::jsonb::int4; +ERROR: cannot cast jsonb string to type integer +select '12345'::jsonb::numeric; + numeric +--------- + 12345 +(1 row) + +select '{}'::jsonb::numeric; +ERROR: cannot cast jsonb object to type numeric +select '12345.05'::jsonb::numeric; + numeric +---------- + 12345.05 +(1 row) + +select '12345.05'::jsonb::float4; + float4 +---------- + 12345.05 +(1 row) + +select '12345.05'::jsonb::float8; + float8 +---------- + 12345.05 +(1 row) + +select '12345.05'::jsonb::int2; + int2 +------- + 12345 +(1 row) + +select '12345.05'::jsonb::int4; + int4 +------- + 12345 +(1 row) + +select '12345.05'::jsonb::int8; + int8 +------- + 12345 +(1 row) + +select '12345.0000000000000000000000000000000000000000000005'::jsonb::numeric; + numeric +------------------------------------------------------ + 12345.0000000000000000000000000000000000000000000005 +(1 row) + +select '12345.0000000000000000000000000000000000000000000005'::jsonb::float4; + float4 +-------- + 12345 +(1 row) + +select '12345.0000000000000000000000000000000000000000000005'::jsonb::float8; + float8 +-------- + 12345 +(1 row) + +select '12345.0000000000000000000000000000000000000000000005'::jsonb::int2; + int2 +------- + 12345 +(1 row) + +select '12345.0000000000000000000000000000000000000000000005'::jsonb::int4; + int4 +------- + 12345 +(1 row) + +select '12345.0000000000000000000000000000000000000000000005'::jsonb::int8; + int8 +------- + 12345 +(1 row) + diff --git a/src/test/regress/expected/jsonb_jsonpath.out b/src/test/regress/expected/jsonb_jsonpath.out new file mode 100644 index 0000000..6659bc9 --- /dev/null +++ b/src/test/regress/expected/jsonb_jsonpath.out @@ -0,0 +1,2586 @@ +select jsonb '{"a": 12}' @? '$'; + ?column? +---------- + t +(1 row) + +select jsonb '{"a": 12}' @? '1'; + ?column? +---------- + t +(1 row) + +select jsonb '{"a": 12}' @? '$.a.b'; + ?column? +---------- + f +(1 row) + +select jsonb '{"a": 12}' @? '$.b'; + ?column? +---------- + f +(1 row) + +select jsonb '{"a": 12}' @? '$.a + 2'; + ?column? +---------- + t +(1 row) + +select jsonb '{"a": 12}' @? '$.b + 2'; + ?column? +---------- + +(1 row) + +select jsonb '{"a": {"a": 12}}' @? '$.a.a'; + ?column? +---------- + t +(1 row) + +select jsonb '{"a": {"a": 12}}' @? '$.*.a'; + ?column? +---------- + t +(1 row) + +select jsonb '{"b": {"a": 12}}' @? '$.*.a'; + ?column? +---------- + t +(1 row) + +select jsonb '{"b": {"a": 12}}' @? '$.*.b'; + ?column? +---------- + f +(1 row) + +select jsonb '{"b": {"a": 12}}' @? 'strict $.*.b'; + ?column? +---------- + +(1 row) + +select jsonb '{}' @? '$.*'; + ?column? +---------- + f +(1 row) + +select jsonb '{"a": 1}' @? '$.*'; + ?column? +---------- + t +(1 row) + +select jsonb '{"a": {"b": 1}}' @? 'lax $.**{1}'; + ?column? +---------- + t +(1 row) + +select jsonb '{"a": {"b": 1}}' @? 'lax $.**{2}'; + ?column? +---------- + t +(1 row) + +select jsonb '{"a": {"b": 1}}' @? 'lax $.**{3}'; + ?column? +---------- + f +(1 row) + +select jsonb '[]' @? '$[*]'; + ?column? +---------- + f +(1 row) + +select jsonb '[1]' @? '$[*]'; + ?column? +---------- + t +(1 row) + +select jsonb '[1]' @? '$[1]'; + ?column? +---------- + f +(1 row) + +select jsonb '[1]' @? 'strict $[1]'; + ?column? +---------- + +(1 row) + +select jsonb_path_query('[1]', 'strict $[1]'); +ERROR: jsonpath array subscript is out of bounds +select jsonb_path_query('[1]', 'strict $[1]', silent => true); + jsonb_path_query +------------------ +(0 rows) + +select jsonb '[1]' @? 'lax $[10000000000000000]'; + ?column? +---------- + +(1 row) + +select jsonb '[1]' @? 'strict $[10000000000000000]'; + ?column? +---------- + +(1 row) + +select jsonb_path_query('[1]', 'lax $[10000000000000000]'); +ERROR: jsonpath array subscript is out of integer range +select jsonb_path_query('[1]', 'strict $[10000000000000000]'); +ERROR: jsonpath array subscript is out of integer range +select jsonb '[1]' @? '$[0]'; + ?column? +---------- + t +(1 row) + +select jsonb '[1]' @? '$[0.3]'; + ?column? +---------- + t +(1 row) + +select jsonb '[1]' @? '$[0.5]'; + ?column? +---------- + t +(1 row) + +select jsonb '[1]' @? '$[0.9]'; + ?column? +---------- + t +(1 row) + +select jsonb '[1]' @? '$[1.2]'; + ?column? +---------- + f +(1 row) + +select jsonb '[1]' @? 'strict $[1.2]'; + ?column? +---------- + +(1 row) + +select jsonb '{"a": [1,2,3], "b": [3,4,5]}' @? '$ ? (@.a[*] > @.b[*])'; + ?column? +---------- + f +(1 row) + +select jsonb '{"a": [1,2,3], "b": [3,4,5]}' @? '$ ? (@.a[*] >= @.b[*])'; + ?column? +---------- + t +(1 row) + +select jsonb '{"a": [1,2,3], "b": [3,4,"5"]}' @? '$ ? (@.a[*] >= @.b[*])'; + ?column? +---------- + t +(1 row) + +select jsonb '{"a": [1,2,3], "b": [3,4,"5"]}' @? 'strict $ ? (@.a[*] >= @.b[*])'; + ?column? +---------- + f +(1 row) + +select jsonb '{"a": [1,2,3], "b": [3,4,null]}' @? '$ ? (@.a[*] >= @.b[*])'; + ?column? +---------- + t +(1 row) + +select jsonb '1' @? '$ ? ((@ == "1") is unknown)'; + ?column? +---------- + t +(1 row) + +select jsonb '1' @? '$ ? ((@ == 1) is unknown)'; + ?column? +---------- + f +(1 row) + +select jsonb '[{"a": 1}, {"a": 2}]' @? '$[0 to 1] ? (@.a > 1)'; + ?column? +---------- + t +(1 row) + +select jsonb_path_exists('[{"a": 1}, {"a": 2}, 3]', 'lax $[*].a', silent => false); + jsonb_path_exists +------------------- + t +(1 row) + +select jsonb_path_exists('[{"a": 1}, {"a": 2}, 3]', 'lax $[*].a', silent => true); + jsonb_path_exists +------------------- + t +(1 row) + +select jsonb_path_exists('[{"a": 1}, {"a": 2}, 3]', 'strict $[*].a', silent => false); +ERROR: jsonpath member accessor can only be applied to an object +select jsonb_path_exists('[{"a": 1}, {"a": 2}, 3]', 'strict $[*].a', silent => true); + jsonb_path_exists +------------------- + +(1 row) + +select jsonb_path_query('1', 'lax $.a'); + jsonb_path_query +------------------ +(0 rows) + +select jsonb_path_query('1', 'strict $.a'); +ERROR: jsonpath member accessor can only be applied to an object +select jsonb_path_query('1', 'strict $.*'); +ERROR: jsonpath wildcard member accessor can only be applied to an object +select jsonb_path_query('1', 'strict $.a', silent => true); + jsonb_path_query +------------------ +(0 rows) + +select jsonb_path_query('1', 'strict $.*', silent => true); + jsonb_path_query +------------------ +(0 rows) + +select jsonb_path_query('[]', 'lax $.a'); + jsonb_path_query +------------------ +(0 rows) + +select jsonb_path_query('[]', 'strict $.a'); +ERROR: jsonpath member accessor can only be applied to an object +select jsonb_path_query('[]', 'strict $.a', silent => true); + jsonb_path_query +------------------ +(0 rows) + +select jsonb_path_query('{}', 'lax $.a'); + jsonb_path_query +------------------ +(0 rows) + +select jsonb_path_query('{}', 'strict $.a'); +ERROR: JSON object does not contain key "a" +select jsonb_path_query('{}', 'strict $.a', silent => true); + jsonb_path_query +------------------ +(0 rows) + +select jsonb_path_query('1', 'strict $[1]'); +ERROR: jsonpath array accessor can only be applied to an array +select jsonb_path_query('1', 'strict $[*]'); +ERROR: jsonpath wildcard array accessor can only be applied to an array +select jsonb_path_query('[]', 'strict $[1]'); +ERROR: jsonpath array subscript is out of bounds +select jsonb_path_query('[]', 'strict $["a"]'); +ERROR: jsonpath array subscript is not a single numeric value +select jsonb_path_query('1', 'strict $[1]', silent => true); + jsonb_path_query +------------------ +(0 rows) + +select jsonb_path_query('1', 'strict $[*]', silent => true); + jsonb_path_query +------------------ +(0 rows) + +select jsonb_path_query('[]', 'strict $[1]', silent => true); + jsonb_path_query +------------------ +(0 rows) + +select jsonb_path_query('[]', 'strict $["a"]', silent => true); + jsonb_path_query +------------------ +(0 rows) + +select jsonb_path_query('{"a": 12, "b": {"a": 13}}', '$.a'); + jsonb_path_query +------------------ + 12 +(1 row) + +select jsonb_path_query('{"a": 12, "b": {"a": 13}}', '$.b'); + jsonb_path_query +------------------ + {"a": 13} +(1 row) + +select jsonb_path_query('{"a": 12, "b": {"a": 13}}', '$.*'); + jsonb_path_query +------------------ + 12 + {"a": 13} +(2 rows) + +select jsonb_path_query('{"a": 12, "b": {"a": 13}}', 'lax $.*.a'); + jsonb_path_query +------------------ + 13 +(1 row) + +select jsonb_path_query('[12, {"a": 13}, {"b": 14}]', 'lax $[*].a'); + jsonb_path_query +------------------ + 13 +(1 row) + +select jsonb_path_query('[12, {"a": 13}, {"b": 14}]', 'lax $[*].*'); + jsonb_path_query +------------------ + 13 + 14 +(2 rows) + +select jsonb_path_query('[12, {"a": 13}, {"b": 14}]', 'lax $[0].a'); + jsonb_path_query +------------------ +(0 rows) + +select jsonb_path_query('[12, {"a": 13}, {"b": 14}]', 'lax $[1].a'); + jsonb_path_query +------------------ + 13 +(1 row) + +select jsonb_path_query('[12, {"a": 13}, {"b": 14}]', 'lax $[2].a'); + jsonb_path_query +------------------ +(0 rows) + +select jsonb_path_query('[12, {"a": 13}, {"b": 14}]', 'lax $[0,1].a'); + jsonb_path_query +------------------ + 13 +(1 row) + +select jsonb_path_query('[12, {"a": 13}, {"b": 14}]', 'lax $[0 to 10].a'); + jsonb_path_query +------------------ + 13 +(1 row) + +select jsonb_path_query('[12, {"a": 13}, {"b": 14}]', 'lax $[0 to 10 / 0].a'); +ERROR: division by zero +select jsonb_path_query('[12, {"a": 13}, {"b": 14}, "ccc", true]', '$[2.5 - 1 to $.size() - 2]'); + jsonb_path_query +------------------ + {"a": 13} + {"b": 14} + "ccc" +(3 rows) + +select jsonb_path_query('1', 'lax $[0]'); + jsonb_path_query +------------------ + 1 +(1 row) + +select jsonb_path_query('1', 'lax $[*]'); + jsonb_path_query +------------------ + 1 +(1 row) + +select jsonb_path_query('[1]', 'lax $[0]'); + jsonb_path_query +------------------ + 1 +(1 row) + +select jsonb_path_query('[1]', 'lax $[*]'); + jsonb_path_query +------------------ + 1 +(1 row) + +select jsonb_path_query('[1,2,3]', 'lax $[*]'); + jsonb_path_query +------------------ + 1 + 2 + 3 +(3 rows) + +select jsonb_path_query('[1,2,3]', 'strict $[*].a'); +ERROR: jsonpath member accessor can only be applied to an object +select jsonb_path_query('[1,2,3]', 'strict $[*].a', silent => true); + jsonb_path_query +------------------ +(0 rows) + +select jsonb_path_query('[]', '$[last]'); + jsonb_path_query +------------------ +(0 rows) + +select jsonb_path_query('[]', '$[last ? (exists(last))]'); + jsonb_path_query +------------------ +(0 rows) + +select jsonb_path_query('[]', 'strict $[last]'); +ERROR: jsonpath array subscript is out of bounds +select jsonb_path_query('[]', 'strict $[last]', silent => true); + jsonb_path_query +------------------ +(0 rows) + +select jsonb_path_query('[1]', '$[last]'); + jsonb_path_query +------------------ + 1 +(1 row) + +select jsonb_path_query('[1,2,3]', '$[last]'); + jsonb_path_query +------------------ + 3 +(1 row) + +select jsonb_path_query('[1,2,3]', '$[last - 1]'); + jsonb_path_query +------------------ + 2 +(1 row) + +select jsonb_path_query('[1,2,3]', '$[last ? (@.type() == "number")]'); + jsonb_path_query +------------------ + 3 +(1 row) + +select jsonb_path_query('[1,2,3]', '$[last ? (@.type() == "string")]'); +ERROR: jsonpath array subscript is not a single numeric value +select jsonb_path_query('[1,2,3]', '$[last ? (@.type() == "string")]', silent => true); + jsonb_path_query +------------------ +(0 rows) + +select * from jsonb_path_query('{"a": 10}', '$'); + jsonb_path_query +------------------ + {"a": 10} +(1 row) + +select * from jsonb_path_query('{"a": 10}', '$ ? (@.a < $value)'); +ERROR: could not find jsonpath variable "value" +select * from jsonb_path_query('{"a": 10}', '$ ? (@.a < $value)', '1'); +ERROR: "vars" argument is not an object +DETAIL: Jsonpath parameters should be encoded as key-value pairs of "vars" object. +select * from jsonb_path_query('{"a": 10}', '$ ? (@.a < $value)', '[{"value" : 13}]'); +ERROR: "vars" argument is not an object +DETAIL: Jsonpath parameters should be encoded as key-value pairs of "vars" object. +select * from jsonb_path_query('{"a": 10}', '$ ? (@.a < $value)', '{"value" : 13}'); + jsonb_path_query +------------------ + {"a": 10} +(1 row) + +select * from jsonb_path_query('{"a": 10}', '$ ? (@.a < $value)', '{"value" : 8}'); + jsonb_path_query +------------------ +(0 rows) + +select * from jsonb_path_query('{"a": 10}', '$.a ? (@ < $value)', '{"value" : 13}'); + jsonb_path_query +------------------ + 10 +(1 row) + +select * from jsonb_path_query('[10,11,12,13,14,15]', '$[*] ? (@ < $value)', '{"value" : 13}'); + jsonb_path_query +------------------ + 10 + 11 + 12 +(3 rows) + +select * from jsonb_path_query('[10,11,12,13,14,15]', '$[0,1] ? (@ < $x.value)', '{"x": {"value" : 13}}'); + jsonb_path_query +------------------ + 10 + 11 +(2 rows) + +select * from jsonb_path_query('[10,11,12,13,14,15]', '$[0 to 2] ? (@ < $value)', '{"value" : 15}'); + jsonb_path_query +------------------ + 10 + 11 + 12 +(3 rows) + +select * from jsonb_path_query('[1,"1",2,"2",null]', '$[*] ? (@ == "1")'); + jsonb_path_query +------------------ + "1" +(1 row) + +select * from jsonb_path_query('[1,"1",2,"2",null]', '$[*] ? (@ == $value)', '{"value" : "1"}'); + jsonb_path_query +------------------ + "1" +(1 row) + +select * from jsonb_path_query('[1,"1",2,"2",null]', '$[*] ? (@ == $value)', '{"value" : null}'); + jsonb_path_query +------------------ + null +(1 row) + +select * from jsonb_path_query('[1, "2", null]', '$[*] ? (@ != null)'); + jsonb_path_query +------------------ + 1 + "2" +(2 rows) + +select * from jsonb_path_query('[1, "2", null]', '$[*] ? (@ == null)'); + jsonb_path_query +------------------ + null +(1 row) + +select * from jsonb_path_query('{}', '$ ? (@ == @)'); + jsonb_path_query +------------------ +(0 rows) + +select * from jsonb_path_query('[]', 'strict $ ? (@ == @)'); + jsonb_path_query +------------------ +(0 rows) + +select jsonb_path_query('{"a": {"b": 1}}', 'lax $.**'); + jsonb_path_query +------------------ + {"a": {"b": 1}} + {"b": 1} + 1 +(3 rows) + +select jsonb_path_query('{"a": {"b": 1}}', 'lax $.**{0}'); + jsonb_path_query +------------------ + {"a": {"b": 1}} +(1 row) + +select jsonb_path_query('{"a": {"b": 1}}', 'lax $.**{0 to last}'); + jsonb_path_query +------------------ + {"a": {"b": 1}} + {"b": 1} + 1 +(3 rows) + +select jsonb_path_query('{"a": {"b": 1}}', 'lax $.**{1}'); + jsonb_path_query +------------------ + {"b": 1} +(1 row) + +select jsonb_path_query('{"a": {"b": 1}}', 'lax $.**{1 to last}'); + jsonb_path_query +------------------ + {"b": 1} + 1 +(2 rows) + +select jsonb_path_query('{"a": {"b": 1}}', 'lax $.**{2}'); + jsonb_path_query +------------------ + 1 +(1 row) + +select jsonb_path_query('{"a": {"b": 1}}', 'lax $.**{2 to last}'); + jsonb_path_query +------------------ + 1 +(1 row) + +select jsonb_path_query('{"a": {"b": 1}}', 'lax $.**{3 to last}'); + jsonb_path_query +------------------ +(0 rows) + +select jsonb_path_query('{"a": {"b": 1}}', 'lax $.**{last}'); + jsonb_path_query +------------------ + 1 +(1 row) + +select jsonb_path_query('{"a": {"b": 1}}', 'lax $.**.b ? (@ > 0)'); + jsonb_path_query +------------------ + 1 +(1 row) + +select jsonb_path_query('{"a": {"b": 1}}', 'lax $.**{0}.b ? (@ > 0)'); + jsonb_path_query +------------------ +(0 rows) + +select jsonb_path_query('{"a": {"b": 1}}', 'lax $.**{1}.b ? (@ > 0)'); + jsonb_path_query +------------------ + 1 +(1 row) + +select jsonb_path_query('{"a": {"b": 1}}', 'lax $.**{0 to last}.b ? (@ > 0)'); + jsonb_path_query +------------------ + 1 +(1 row) + +select jsonb_path_query('{"a": {"b": 1}}', 'lax $.**{1 to last}.b ? (@ > 0)'); + jsonb_path_query +------------------ + 1 +(1 row) + +select jsonb_path_query('{"a": {"b": 1}}', 'lax $.**{1 to 2}.b ? (@ > 0)'); + jsonb_path_query +------------------ + 1 +(1 row) + +select jsonb_path_query('{"a": {"c": {"b": 1}}}', 'lax $.**.b ? (@ > 0)'); + jsonb_path_query +------------------ + 1 +(1 row) + +select jsonb_path_query('{"a": {"c": {"b": 1}}}', 'lax $.**{0}.b ? (@ > 0)'); + jsonb_path_query +------------------ +(0 rows) + +select jsonb_path_query('{"a": {"c": {"b": 1}}}', 'lax $.**{1}.b ? (@ > 0)'); + jsonb_path_query +------------------ +(0 rows) + +select jsonb_path_query('{"a": {"c": {"b": 1}}}', 'lax $.**{0 to last}.b ? (@ > 0)'); + jsonb_path_query +------------------ + 1 +(1 row) + +select jsonb_path_query('{"a": {"c": {"b": 1}}}', 'lax $.**{1 to last}.b ? (@ > 0)'); + jsonb_path_query +------------------ + 1 +(1 row) + +select jsonb_path_query('{"a": {"c": {"b": 1}}}', 'lax $.**{1 to 2}.b ? (@ > 0)'); + jsonb_path_query +------------------ + 1 +(1 row) + +select jsonb_path_query('{"a": {"c": {"b": 1}}}', 'lax $.**{2 to 3}.b ? (@ > 0)'); + jsonb_path_query +------------------ + 1 +(1 row) + +select jsonb '{"a": {"b": 1}}' @? '$.**.b ? ( @ > 0)'; + ?column? +---------- + t +(1 row) + +select jsonb '{"a": {"b": 1}}' @? '$.**{0}.b ? ( @ > 0)'; + ?column? +---------- + f +(1 row) + +select jsonb '{"a": {"b": 1}}' @? '$.**{1}.b ? ( @ > 0)'; + ?column? +---------- + t +(1 row) + +select jsonb '{"a": {"b": 1}}' @? '$.**{0 to last}.b ? ( @ > 0)'; + ?column? +---------- + t +(1 row) + +select jsonb '{"a": {"b": 1}}' @? '$.**{1 to last}.b ? ( @ > 0)'; + ?column? +---------- + t +(1 row) + +select jsonb '{"a": {"b": 1}}' @? '$.**{1 to 2}.b ? ( @ > 0)'; + ?column? +---------- + t +(1 row) + +select jsonb '{"a": {"c": {"b": 1}}}' @? '$.**.b ? ( @ > 0)'; + ?column? +---------- + t +(1 row) + +select jsonb '{"a": {"c": {"b": 1}}}' @? '$.**{0}.b ? ( @ > 0)'; + ?column? +---------- + f +(1 row) + +select jsonb '{"a": {"c": {"b": 1}}}' @? '$.**{1}.b ? ( @ > 0)'; + ?column? +---------- + f +(1 row) + +select jsonb '{"a": {"c": {"b": 1}}}' @? '$.**{0 to last}.b ? ( @ > 0)'; + ?column? +---------- + t +(1 row) + +select jsonb '{"a": {"c": {"b": 1}}}' @? '$.**{1 to last}.b ? ( @ > 0)'; + ?column? +---------- + t +(1 row) + +select jsonb '{"a": {"c": {"b": 1}}}' @? '$.**{1 to 2}.b ? ( @ > 0)'; + ?column? +---------- + t +(1 row) + +select jsonb '{"a": {"c": {"b": 1}}}' @? '$.**{2 to 3}.b ? ( @ > 0)'; + ?column? +---------- + t +(1 row) + +select jsonb_path_query('{"g": {"x": 2}}', '$.g ? (exists (@.x))'); + jsonb_path_query +------------------ + {"x": 2} +(1 row) + +select jsonb_path_query('{"g": {"x": 2}}', '$.g ? (exists (@.y))'); + jsonb_path_query +------------------ +(0 rows) + +select jsonb_path_query('{"g": {"x": 2}}', '$.g ? (exists (@.x ? (@ >= 2) ))'); + jsonb_path_query +------------------ + {"x": 2} +(1 row) + +select jsonb_path_query('{"g": [{"x": 2}, {"y": 3}]}', 'lax $.g ? (exists (@.x))'); + jsonb_path_query +------------------ + {"x": 2} +(1 row) + +select jsonb_path_query('{"g": [{"x": 2}, {"y": 3}]}', 'lax $.g ? (exists (@.x + "3"))'); + jsonb_path_query +------------------ +(0 rows) + +select jsonb_path_query('{"g": [{"x": 2}, {"y": 3}]}', 'lax $.g ? ((exists (@.x + "3")) is unknown)'); + jsonb_path_query +------------------ + {"x": 2} + {"y": 3} +(2 rows) + +select jsonb_path_query('{"g": [{"x": 2}, {"y": 3}]}', 'strict $.g[*] ? (exists (@.x))'); + jsonb_path_query +------------------ + {"x": 2} +(1 row) + +select jsonb_path_query('{"g": [{"x": 2}, {"y": 3}]}', 'strict $.g[*] ? ((exists (@.x)) is unknown)'); + jsonb_path_query +------------------ + {"y": 3} +(1 row) + +select jsonb_path_query('{"g": [{"x": 2}, {"y": 3}]}', 'strict $.g ? (exists (@[*].x))'); + jsonb_path_query +------------------ +(0 rows) + +select jsonb_path_query('{"g": [{"x": 2}, {"y": 3}]}', 'strict $.g ? ((exists (@[*].x)) is unknown)'); + jsonb_path_query +---------------------- + [{"x": 2}, {"y": 3}] +(1 row) + +--test ternary logic +select + x, y, + jsonb_path_query( + '[true, false, null]', + '$[*] ? (@ == true && ($x == true && $y == true) || + @ == false && !($x == true && $y == true) || + @ == null && ($x == true && $y == true) is unknown)', + jsonb_build_object('x', x, 'y', y) + ) as "x && y" +from + (values (jsonb 'true'), ('false'), ('"null"')) x(x), + (values (jsonb 'true'), ('false'), ('"null"')) y(y); + x | y | x && y +--------+--------+-------- + true | true | true + true | false | false + true | "null" | null + false | true | false + false | false | false + false | "null" | false + "null" | true | null + "null" | false | false + "null" | "null" | null +(9 rows) + +select + x, y, + jsonb_path_query( + '[true, false, null]', + '$[*] ? (@ == true && ($x == true || $y == true) || + @ == false && !($x == true || $y == true) || + @ == null && ($x == true || $y == true) is unknown)', + jsonb_build_object('x', x, 'y', y) + ) as "x || y" +from + (values (jsonb 'true'), ('false'), ('"null"')) x(x), + (values (jsonb 'true'), ('false'), ('"null"')) y(y); + x | y | x || y +--------+--------+-------- + true | true | true + true | false | true + true | "null" | true + false | true | true + false | false | false + false | "null" | null + "null" | true | true + "null" | false | null + "null" | "null" | null +(9 rows) + +select jsonb '{"a": 1, "b":1}' @? '$ ? (@.a == @.b)'; + ?column? +---------- + t +(1 row) + +select jsonb '{"c": {"a": 1, "b":1}}' @? '$ ? (@.a == @.b)'; + ?column? +---------- + f +(1 row) + +select jsonb '{"c": {"a": 1, "b":1}}' @? '$.c ? (@.a == @.b)'; + ?column? +---------- + t +(1 row) + +select jsonb '{"c": {"a": 1, "b":1}}' @? '$.c ? ($.c.a == @.b)'; + ?column? +---------- + t +(1 row) + +select jsonb '{"c": {"a": 1, "b":1}}' @? '$.* ? (@.a == @.b)'; + ?column? +---------- + t +(1 row) + +select jsonb '{"a": 1, "b":1}' @? '$.** ? (@.a == @.b)'; + ?column? +---------- + t +(1 row) + +select jsonb '{"c": {"a": 1, "b":1}}' @? '$.** ? (@.a == @.b)'; + ?column? +---------- + t +(1 row) + +select jsonb_path_query('{"c": {"a": 2, "b":1}}', '$.** ? (@.a == 1 + 1)'); + jsonb_path_query +------------------ + {"a": 2, "b": 1} +(1 row) + +select jsonb_path_query('{"c": {"a": 2, "b":1}}', '$.** ? (@.a == (1 + 1))'); + jsonb_path_query +------------------ + {"a": 2, "b": 1} +(1 row) + +select jsonb_path_query('{"c": {"a": 2, "b":1}}', '$.** ? (@.a == @.b + 1)'); + jsonb_path_query +------------------ + {"a": 2, "b": 1} +(1 row) + +select jsonb_path_query('{"c": {"a": 2, "b":1}}', '$.** ? (@.a == (@.b + 1))'); + jsonb_path_query +------------------ + {"a": 2, "b": 1} +(1 row) + +select jsonb '{"c": {"a": -1, "b":1}}' @? '$.** ? (@.a == - 1)'; + ?column? +---------- + t +(1 row) + +select jsonb '{"c": {"a": -1, "b":1}}' @? '$.** ? (@.a == -1)'; + ?column? +---------- + t +(1 row) + +select jsonb '{"c": {"a": -1, "b":1}}' @? '$.** ? (@.a == -@.b)'; + ?column? +---------- + t +(1 row) + +select jsonb '{"c": {"a": -1, "b":1}}' @? '$.** ? (@.a == - @.b)'; + ?column? +---------- + t +(1 row) + +select jsonb '{"c": {"a": 0, "b":1}}' @? '$.** ? (@.a == 1 - @.b)'; + ?column? +---------- + t +(1 row) + +select jsonb '{"c": {"a": 2, "b":1}}' @? '$.** ? (@.a == 1 - - @.b)'; + ?column? +---------- + t +(1 row) + +select jsonb '{"c": {"a": 0, "b":1}}' @? '$.** ? (@.a == 1 - +@.b)'; + ?column? +---------- + t +(1 row) + +select jsonb '[1,2,3]' @? '$ ? (+@[*] > +2)'; + ?column? +---------- + t +(1 row) + +select jsonb '[1,2,3]' @? '$ ? (+@[*] > +3)'; + ?column? +---------- + f +(1 row) + +select jsonb '[1,2,3]' @? '$ ? (-@[*] < -2)'; + ?column? +---------- + t +(1 row) + +select jsonb '[1,2,3]' @? '$ ? (-@[*] < -3)'; + ?column? +---------- + f +(1 row) + +select jsonb '1' @? '$ ? ($ > 0)'; + ?column? +---------- + t +(1 row) + +-- arithmetic errors +select jsonb_path_query('[1,2,0,3]', '$[*] ? (2 / @ > 0)'); + jsonb_path_query +------------------ + 1 + 2 + 3 +(3 rows) + +select jsonb_path_query('[1,2,0,3]', '$[*] ? ((2 / @ > 0) is unknown)'); + jsonb_path_query +------------------ + 0 +(1 row) + +select jsonb_path_query('0', '1 / $'); +ERROR: division by zero +select jsonb_path_query('0', '1 / $ + 2'); +ERROR: division by zero +select jsonb_path_query('0', '-(3 + 1 % $)'); +ERROR: division by zero +select jsonb_path_query('1', '$ + "2"'); +ERROR: right operand of jsonpath operator + is not a single numeric value +select jsonb_path_query('[1, 2]', '3 * $'); +ERROR: right operand of jsonpath operator * is not a single numeric value +select jsonb_path_query('"a"', '-$'); +ERROR: operand of unary jsonpath operator - is not a numeric value +select jsonb_path_query('[1,"2",3]', '+$'); +ERROR: operand of unary jsonpath operator + is not a numeric value +select jsonb_path_query('1', '$ + "2"', silent => true); + jsonb_path_query +------------------ +(0 rows) + +select jsonb_path_query('[1, 2]', '3 * $', silent => true); + jsonb_path_query +------------------ +(0 rows) + +select jsonb_path_query('"a"', '-$', silent => true); + jsonb_path_query +------------------ +(0 rows) + +select jsonb_path_query('[1,"2",3]', '+$', silent => true); + jsonb_path_query +------------------ + 1 +(1 row) + +select jsonb '["1",2,0,3]' @? '-$[*]'; + ?column? +---------- + t +(1 row) + +select jsonb '[1,"2",0,3]' @? '-$[*]'; + ?column? +---------- + t +(1 row) + +select jsonb '["1",2,0,3]' @? 'strict -$[*]'; + ?column? +---------- + +(1 row) + +select jsonb '[1,"2",0,3]' @? 'strict -$[*]'; + ?column? +---------- + +(1 row) + +-- unwrapping of operator arguments in lax mode +select jsonb_path_query('{"a": [2]}', 'lax $.a * 3'); + jsonb_path_query +------------------ + 6 +(1 row) + +select jsonb_path_query('{"a": [2]}', 'lax $.a + 3'); + jsonb_path_query +------------------ + 5 +(1 row) + +select jsonb_path_query('{"a": [2, 3, 4]}', 'lax -$.a'); + jsonb_path_query +------------------ + -2 + -3 + -4 +(3 rows) + +-- should fail +select jsonb_path_query('{"a": [1, 2]}', 'lax $.a * 3'); +ERROR: left operand of jsonpath operator * is not a single numeric value +select jsonb_path_query('{"a": [1, 2]}', 'lax $.a * 3', silent => true); + jsonb_path_query +------------------ +(0 rows) + +-- extension: boolean expressions +select jsonb_path_query('2', '$ > 1'); + jsonb_path_query +------------------ + true +(1 row) + +select jsonb_path_query('2', '$ <= 1'); + jsonb_path_query +------------------ + false +(1 row) + +select jsonb_path_query('2', '$ == "2"'); + jsonb_path_query +------------------ + null +(1 row) + +select jsonb '2' @? '$ == "2"'; + ?column? +---------- + t +(1 row) + +select jsonb '2' @@ '$ > 1'; + ?column? +---------- + t +(1 row) + +select jsonb '2' @@ '$ <= 1'; + ?column? +---------- + f +(1 row) + +select jsonb '2' @@ '$ == "2"'; + ?column? +---------- + +(1 row) + +select jsonb '2' @@ '1'; + ?column? +---------- + +(1 row) + +select jsonb '{}' @@ '$'; + ?column? +---------- + +(1 row) + +select jsonb '[]' @@ '$'; + ?column? +---------- + +(1 row) + +select jsonb '[1,2,3]' @@ '$[*]'; + ?column? +---------- + +(1 row) + +select jsonb '[]' @@ '$[*]'; + ?column? +---------- + +(1 row) + +select jsonb_path_match('[[1, true], [2, false]]', 'strict $[*] ? (@[0] > $x) [1]', '{"x": 1}'); + jsonb_path_match +------------------ + f +(1 row) + +select jsonb_path_match('[[1, true], [2, false]]', 'strict $[*] ? (@[0] < $x) [1]', '{"x": 2}'); + jsonb_path_match +------------------ + t +(1 row) + +select jsonb_path_match('[{"a": 1}, {"a": 2}, 3]', 'lax exists($[*].a)', silent => false); + jsonb_path_match +------------------ + t +(1 row) + +select jsonb_path_match('[{"a": 1}, {"a": 2}, 3]', 'lax exists($[*].a)', silent => true); + jsonb_path_match +------------------ + t +(1 row) + +select jsonb_path_match('[{"a": 1}, {"a": 2}, 3]', 'strict exists($[*].a)', silent => false); + jsonb_path_match +------------------ + +(1 row) + +select jsonb_path_match('[{"a": 1}, {"a": 2}, 3]', 'strict exists($[*].a)', silent => true); + jsonb_path_match +------------------ + +(1 row) + +select jsonb_path_query('[null,1,true,"a",[],{}]', '$.type()'); + jsonb_path_query +------------------ + "array" +(1 row) + +select jsonb_path_query('[null,1,true,"a",[],{}]', 'lax $.type()'); + jsonb_path_query +------------------ + "array" +(1 row) + +select jsonb_path_query('[null,1,true,"a",[],{}]', '$[*].type()'); + jsonb_path_query +------------------ + "null" + "number" + "boolean" + "string" + "array" + "object" +(6 rows) + +select jsonb_path_query('null', 'null.type()'); + jsonb_path_query +------------------ + "null" +(1 row) + +select jsonb_path_query('null', 'true.type()'); + jsonb_path_query +------------------ + "boolean" +(1 row) + +select jsonb_path_query('null', '(123).type()'); + jsonb_path_query +------------------ + "number" +(1 row) + +select jsonb_path_query('null', '"123".type()'); + jsonb_path_query +------------------ + "string" +(1 row) + +select jsonb_path_query('{"a": 2}', '($.a - 5).abs() + 10'); + jsonb_path_query +------------------ + 13 +(1 row) + +select jsonb_path_query('{"a": 2.5}', '-($.a * $.a).floor() % 4.3'); + jsonb_path_query +------------------ + -1.7 +(1 row) + +select jsonb_path_query('[1, 2, 3]', '($[*] > 2) ? (@ == true)'); + jsonb_path_query +------------------ + true +(1 row) + +select jsonb_path_query('[1, 2, 3]', '($[*] > 3).type()'); + jsonb_path_query +------------------ + "boolean" +(1 row) + +select jsonb_path_query('[1, 2, 3]', '($[*].a > 3).type()'); + jsonb_path_query +------------------ + "boolean" +(1 row) + +select jsonb_path_query('[1, 2, 3]', 'strict ($[*].a > 3).type()'); + jsonb_path_query +------------------ + "null" +(1 row) + +select jsonb_path_query('[1,null,true,"11",[],[1],[1,2,3],{},{"a":1,"b":2}]', 'strict $[*].size()'); +ERROR: jsonpath item method .size() can only be applied to an array +select jsonb_path_query('[1,null,true,"11",[],[1],[1,2,3],{},{"a":1,"b":2}]', 'strict $[*].size()', silent => true); + jsonb_path_query +------------------ +(0 rows) + +select jsonb_path_query('[1,null,true,"11",[],[1],[1,2,3],{},{"a":1,"b":2}]', 'lax $[*].size()'); + jsonb_path_query +------------------ + 1 + 1 + 1 + 1 + 0 + 1 + 3 + 1 + 1 +(9 rows) + +select jsonb_path_query('[0, 1, -2, -3.4, 5.6]', '$[*].abs()'); + jsonb_path_query +------------------ + 0 + 1 + 2 + 3.4 + 5.6 +(5 rows) + +select jsonb_path_query('[0, 1, -2, -3.4, 5.6]', '$[*].floor()'); + jsonb_path_query +------------------ + 0 + 1 + -2 + -4 + 5 +(5 rows) + +select jsonb_path_query('[0, 1, -2, -3.4, 5.6]', '$[*].ceiling()'); + jsonb_path_query +------------------ + 0 + 1 + -2 + -3 + 6 +(5 rows) + +select jsonb_path_query('[0, 1, -2, -3.4, 5.6]', '$[*].ceiling().abs()'); + jsonb_path_query +------------------ + 0 + 1 + 2 + 3 + 6 +(5 rows) + +select jsonb_path_query('[0, 1, -2, -3.4, 5.6]', '$[*].ceiling().abs().type()'); + jsonb_path_query +------------------ + "number" + "number" + "number" + "number" + "number" +(5 rows) + +select jsonb_path_query('[{},1]', '$[*].keyvalue()'); +ERROR: jsonpath item method .keyvalue() can only be applied to an object +select jsonb_path_query('[{},1]', '$[*].keyvalue()', silent => true); + jsonb_path_query +------------------ +(0 rows) + +select jsonb_path_query('{}', '$.keyvalue()'); + jsonb_path_query +------------------ +(0 rows) + +select jsonb_path_query('{"a": 1, "b": [1, 2], "c": {"a": "bbb"}}', '$.keyvalue()'); + jsonb_path_query +---------------------------------------------- + {"id": 0, "key": "a", "value": 1} + {"id": 0, "key": "b", "value": [1, 2]} + {"id": 0, "key": "c", "value": {"a": "bbb"}} +(3 rows) + +select jsonb_path_query('[{"a": 1, "b": [1, 2]}, {"c": {"a": "bbb"}}]', '$[*].keyvalue()'); + jsonb_path_query +----------------------------------------------- + {"id": 12, "key": "a", "value": 1} + {"id": 12, "key": "b", "value": [1, 2]} + {"id": 72, "key": "c", "value": {"a": "bbb"}} +(3 rows) + +select jsonb_path_query('[{"a": 1, "b": [1, 2]}, {"c": {"a": "bbb"}}]', 'strict $.keyvalue()'); +ERROR: jsonpath item method .keyvalue() can only be applied to an object +select jsonb_path_query('[{"a": 1, "b": [1, 2]}, {"c": {"a": "bbb"}}]', 'lax $.keyvalue()'); + jsonb_path_query +----------------------------------------------- + {"id": 12, "key": "a", "value": 1} + {"id": 12, "key": "b", "value": [1, 2]} + {"id": 72, "key": "c", "value": {"a": "bbb"}} +(3 rows) + +select jsonb_path_query('[{"a": 1, "b": [1, 2]}, {"c": {"a": "bbb"}}]', 'strict $.keyvalue().a'); +ERROR: jsonpath item method .keyvalue() can only be applied to an object +select jsonb '{"a": 1, "b": [1, 2]}' @? 'lax $.keyvalue()'; + ?column? +---------- + t +(1 row) + +select jsonb '{"a": 1, "b": [1, 2]}' @? 'lax $.keyvalue().key'; + ?column? +---------- + t +(1 row) + +select jsonb_path_query('null', '$.double()'); +ERROR: jsonpath item method .double() can only be applied to a string or numeric value +select jsonb_path_query('true', '$.double()'); +ERROR: jsonpath item method .double() can only be applied to a string or numeric value +select jsonb_path_query('null', '$.double()', silent => true); + jsonb_path_query +------------------ +(0 rows) + +select jsonb_path_query('true', '$.double()', silent => true); + jsonb_path_query +------------------ +(0 rows) + +select jsonb_path_query('[]', '$.double()'); + jsonb_path_query +------------------ +(0 rows) + +select jsonb_path_query('[]', 'strict $.double()'); +ERROR: jsonpath item method .double() can only be applied to a string or numeric value +select jsonb_path_query('{}', '$.double()'); +ERROR: jsonpath item method .double() can only be applied to a string or numeric value +select jsonb_path_query('[]', 'strict $.double()', silent => true); + jsonb_path_query +------------------ +(0 rows) + +select jsonb_path_query('{}', '$.double()', silent => true); + jsonb_path_query +------------------ +(0 rows) + +select jsonb_path_query('1.23', '$.double()'); + jsonb_path_query +------------------ + 1.23 +(1 row) + +select jsonb_path_query('"1.23"', '$.double()'); + jsonb_path_query +------------------ + 1.23 +(1 row) + +select jsonb_path_query('"1.23aaa"', '$.double()'); +ERROR: string argument of jsonpath item method .double() is not a valid representation of a double precision number +select jsonb_path_query('1e1000', '$.double()'); +ERROR: numeric argument of jsonpath item method .double() is out of range for type double precision +select jsonb_path_query('"nan"', '$.double()'); +ERROR: string argument of jsonpath item method .double() is not a valid representation of a double precision number +select jsonb_path_query('"NaN"', '$.double()'); +ERROR: string argument of jsonpath item method .double() is not a valid representation of a double precision number +select jsonb_path_query('"inf"', '$.double()'); +ERROR: string argument of jsonpath item method .double() is not a valid representation of a double precision number +select jsonb_path_query('"-inf"', '$.double()'); +ERROR: string argument of jsonpath item method .double() is not a valid representation of a double precision number +select jsonb_path_query('"inf"', '$.double()', silent => true); + jsonb_path_query +------------------ +(0 rows) + +select jsonb_path_query('"-inf"', '$.double()', silent => true); + jsonb_path_query +------------------ +(0 rows) + +select jsonb_path_query('{}', '$.abs()'); +ERROR: jsonpath item method .abs() can only be applied to a numeric value +select jsonb_path_query('true', '$.floor()'); +ERROR: jsonpath item method .floor() can only be applied to a numeric value +select jsonb_path_query('"1.2"', '$.ceiling()'); +ERROR: jsonpath item method .ceiling() can only be applied to a numeric value +select jsonb_path_query('{}', '$.abs()', silent => true); + jsonb_path_query +------------------ +(0 rows) + +select jsonb_path_query('true', '$.floor()', silent => true); + jsonb_path_query +------------------ +(0 rows) + +select jsonb_path_query('"1.2"', '$.ceiling()', silent => true); + jsonb_path_query +------------------ +(0 rows) + +select jsonb_path_query('["", "a", "abc", "abcabc"]', '$[*] ? (@ starts with "abc")'); + jsonb_path_query +------------------ + "abc" + "abcabc" +(2 rows) + +select jsonb_path_query('["", "a", "abc", "abcabc"]', 'strict $ ? (@[*] starts with "abc")'); + jsonb_path_query +---------------------------- + ["", "a", "abc", "abcabc"] +(1 row) + +select jsonb_path_query('["", "a", "abd", "abdabc"]', 'strict $ ? (@[*] starts with "abc")'); + jsonb_path_query +------------------ +(0 rows) + +select jsonb_path_query('["abc", "abcabc", null, 1]', 'strict $ ? (@[*] starts with "abc")'); + jsonb_path_query +------------------ +(0 rows) + +select jsonb_path_query('["abc", "abcabc", null, 1]', 'strict $ ? ((@[*] starts with "abc") is unknown)'); + jsonb_path_query +---------------------------- + ["abc", "abcabc", null, 1] +(1 row) + +select jsonb_path_query('[[null, 1, "abc", "abcabc"]]', 'lax $ ? (@[*] starts with "abc")'); + jsonb_path_query +---------------------------- + [null, 1, "abc", "abcabc"] +(1 row) + +select jsonb_path_query('[[null, 1, "abd", "abdabc"]]', 'lax $ ? ((@[*] starts with "abc") is unknown)'); + jsonb_path_query +---------------------------- + [null, 1, "abd", "abdabc"] +(1 row) + +select jsonb_path_query('[null, 1, "abd", "abdabc"]', 'lax $[*] ? ((@ starts with "abc") is unknown)'); + jsonb_path_query +------------------ + null + 1 +(2 rows) + +select jsonb_path_query('[null, 1, "abc", "abd", "aBdC", "abdacb", "babc", "adc\nabc", "ab\nadc"]', 'lax $[*] ? (@ like_regex "^ab.*c")'); + jsonb_path_query +------------------ + "abc" + "abdacb" +(2 rows) + +select jsonb_path_query('[null, 1, "abc", "abd", "aBdC", "abdacb", "babc", "adc\nabc", "ab\nadc"]', 'lax $[*] ? (@ like_regex "^ab.*c" flag "i")'); + jsonb_path_query +------------------ + "abc" + "aBdC" + "abdacb" +(3 rows) + +select jsonb_path_query('[null, 1, "abc", "abd", "aBdC", "abdacb", "babc", "adc\nabc", "ab\nadc"]', 'lax $[*] ? (@ like_regex "^ab.*c" flag "m")'); + jsonb_path_query +------------------ + "abc" + "abdacb" + "adc\nabc" +(3 rows) + +select jsonb_path_query('[null, 1, "abc", "abd", "aBdC", "abdacb", "babc", "adc\nabc", "ab\nadc"]', 'lax $[*] ? (@ like_regex "^ab.*c" flag "s")'); + jsonb_path_query +------------------ + "abc" + "abdacb" + "ab\nadc" +(3 rows) + +select jsonb_path_query('[null, 1, "a\b", "a\\b", "^a\\b$"]', 'lax $[*] ? (@ like_regex "a\\b" flag "q")'); + jsonb_path_query +------------------ + "a\\b" + "^a\\b$" +(2 rows) + +select jsonb_path_query('[null, 1, "a\b", "a\\b", "^a\\b$"]', 'lax $[*] ? (@ like_regex "a\\b" flag "")'); + jsonb_path_query +------------------ + "a\b" +(1 row) + +select jsonb_path_query('[null, 1, "a\b", "a\\b", "^a\\b$"]', 'lax $[*] ? (@ like_regex "^a\\b$" flag "q")'); + jsonb_path_query +------------------ + "^a\\b$" +(1 row) + +select jsonb_path_query('[null, 1, "a\b", "a\\b", "^a\\b$"]', 'lax $[*] ? (@ like_regex "^a\\B$" flag "q")'); + jsonb_path_query +------------------ +(0 rows) + +select jsonb_path_query('[null, 1, "a\b", "a\\b", "^a\\b$"]', 'lax $[*] ? (@ like_regex "^a\\B$" flag "iq")'); + jsonb_path_query +------------------ + "^a\\b$" +(1 row) + +select jsonb_path_query('[null, 1, "a\b", "a\\b", "^a\\b$"]', 'lax $[*] ? (@ like_regex "^a\\b$" flag "")'); + jsonb_path_query +------------------ + "a\b" +(1 row) + +select jsonb_path_query('null', '$.datetime()'); +ERROR: jsonpath item method .datetime() can only be applied to a string +select jsonb_path_query('true', '$.datetime()'); +ERROR: jsonpath item method .datetime() can only be applied to a string +select jsonb_path_query('1', '$.datetime()'); +ERROR: jsonpath item method .datetime() can only be applied to a string +select jsonb_path_query('[]', '$.datetime()'); + jsonb_path_query +------------------ +(0 rows) + +select jsonb_path_query('[]', 'strict $.datetime()'); +ERROR: jsonpath item method .datetime() can only be applied to a string +select jsonb_path_query('{}', '$.datetime()'); +ERROR: jsonpath item method .datetime() can only be applied to a string +select jsonb_path_query('"bogus"', '$.datetime()'); +ERROR: datetime format is not recognized: "bogus" +HINT: Use a datetime template argument to specify the input data format. +select jsonb_path_query('"12:34"', '$.datetime("aaa")'); +ERROR: invalid datetime format separator: "a" +select jsonb_path_query('"aaaa"', '$.datetime("HH24")'); +ERROR: invalid value "aa" for "HH24" +DETAIL: Value must be an integer. +select jsonb '"10-03-2017"' @? '$.datetime("dd-mm-yyyy")'; + ?column? +---------- + t +(1 row) + +select jsonb_path_query('"10-03-2017"', '$.datetime("dd-mm-yyyy")'); + jsonb_path_query +------------------ + "2017-03-10" +(1 row) + +select jsonb_path_query('"10-03-2017"', '$.datetime("dd-mm-yyyy").type()'); + jsonb_path_query +------------------ + "date" +(1 row) + +select jsonb_path_query('"10-03-2017 12:34"', '$.datetime("dd-mm-yyyy")'); +ERROR: trailing characters remain in input string after datetime format +select jsonb_path_query('"10-03-2017 12:34"', '$.datetime("dd-mm-yyyy").type()'); +ERROR: trailing characters remain in input string after datetime format +select jsonb_path_query('"10-03-2017 12:34"', ' $.datetime("dd-mm-yyyy HH24:MI").type()'); + jsonb_path_query +------------------------------- + "timestamp without time zone" +(1 row) + +select jsonb_path_query('"10-03-2017 12:34 +05:20"', '$.datetime("dd-mm-yyyy HH24:MI TZH:TZM").type()'); + jsonb_path_query +---------------------------- + "timestamp with time zone" +(1 row) + +select jsonb_path_query('"12:34:56"', '$.datetime("HH24:MI:SS").type()'); + jsonb_path_query +-------------------------- + "time without time zone" +(1 row) + +select jsonb_path_query('"12:34:56 +05:20"', '$.datetime("HH24:MI:SS TZH:TZM").type()'); + jsonb_path_query +----------------------- + "time with time zone" +(1 row) + +select jsonb_path_query('"10-03-2017T12:34:56"', '$.datetime("dd-mm-yyyy\"T\"HH24:MI:SS")'); + jsonb_path_query +----------------------- + "2017-03-10T12:34:56" +(1 row) + +select jsonb_path_query('"10-03-2017t12:34:56"', '$.datetime("dd-mm-yyyy\"T\"HH24:MI:SS")'); +ERROR: unmatched format character "T" +select jsonb_path_query('"10-03-2017 12:34:56"', '$.datetime("dd-mm-yyyy\"T\"HH24:MI:SS")'); +ERROR: unmatched format character "T" +set time zone '+00'; +select jsonb_path_query('"10-03-2017 12:34"', '$.datetime("dd-mm-yyyy HH24:MI")'); + jsonb_path_query +----------------------- + "2017-03-10T12:34:00" +(1 row) + +select jsonb_path_query('"10-03-2017 12:34"', '$.datetime("dd-mm-yyyy HH24:MI TZH")'); +ERROR: input string is too short for datetime format +select jsonb_path_query('"10-03-2017 12:34 +05"', '$.datetime("dd-mm-yyyy HH24:MI TZH")'); + jsonb_path_query +----------------------------- + "2017-03-10T12:34:00+05:00" +(1 row) + +select jsonb_path_query('"10-03-2017 12:34 -05"', '$.datetime("dd-mm-yyyy HH24:MI TZH")'); + jsonb_path_query +----------------------------- + "2017-03-10T12:34:00-05:00" +(1 row) + +select jsonb_path_query('"10-03-2017 12:34 +05:20"', '$.datetime("dd-mm-yyyy HH24:MI TZH:TZM")'); + jsonb_path_query +----------------------------- + "2017-03-10T12:34:00+05:20" +(1 row) + +select jsonb_path_query('"10-03-2017 12:34 -05:20"', '$.datetime("dd-mm-yyyy HH24:MI TZH:TZM")'); + jsonb_path_query +----------------------------- + "2017-03-10T12:34:00-05:20" +(1 row) + +select jsonb_path_query('"12:34"', '$.datetime("HH24:MI")'); + jsonb_path_query +------------------ + "12:34:00" +(1 row) + +select jsonb_path_query('"12:34"', '$.datetime("HH24:MI TZH")'); +ERROR: input string is too short for datetime format +select jsonb_path_query('"12:34 +05"', '$.datetime("HH24:MI TZH")'); + jsonb_path_query +------------------ + "12:34:00+05:00" +(1 row) + +select jsonb_path_query('"12:34 -05"', '$.datetime("HH24:MI TZH")'); + jsonb_path_query +------------------ + "12:34:00-05:00" +(1 row) + +select jsonb_path_query('"12:34 +05:20"', '$.datetime("HH24:MI TZH:TZM")'); + jsonb_path_query +------------------ + "12:34:00+05:20" +(1 row) + +select jsonb_path_query('"12:34 -05:20"', '$.datetime("HH24:MI TZH:TZM")'); + jsonb_path_query +------------------ + "12:34:00-05:20" +(1 row) + +set time zone '+10'; +select jsonb_path_query('"10-03-2017 12:34"', '$.datetime("dd-mm-yyyy HH24:MI")'); + jsonb_path_query +----------------------- + "2017-03-10T12:34:00" +(1 row) + +select jsonb_path_query('"10-03-2017 12:34"', '$.datetime("dd-mm-yyyy HH24:MI TZH")'); +ERROR: input string is too short for datetime format +select jsonb_path_query('"10-03-2017 12:34 +05"', '$.datetime("dd-mm-yyyy HH24:MI TZH")'); + jsonb_path_query +----------------------------- + "2017-03-10T12:34:00+05:00" +(1 row) + +select jsonb_path_query('"10-03-2017 12:34 -05"', '$.datetime("dd-mm-yyyy HH24:MI TZH")'); + jsonb_path_query +----------------------------- + "2017-03-10T12:34:00-05:00" +(1 row) + +select jsonb_path_query('"10-03-2017 12:34 +05:20"', '$.datetime("dd-mm-yyyy HH24:MI TZH:TZM")'); + jsonb_path_query +----------------------------- + "2017-03-10T12:34:00+05:20" +(1 row) + +select jsonb_path_query('"10-03-2017 12:34 -05:20"', '$.datetime("dd-mm-yyyy HH24:MI TZH:TZM")'); + jsonb_path_query +----------------------------- + "2017-03-10T12:34:00-05:20" +(1 row) + +select jsonb_path_query('"12:34"', '$.datetime("HH24:MI")'); + jsonb_path_query +------------------ + "12:34:00" +(1 row) + +select jsonb_path_query('"12:34"', '$.datetime("HH24:MI TZH")'); +ERROR: input string is too short for datetime format +select jsonb_path_query('"12:34 +05"', '$.datetime("HH24:MI TZH")'); + jsonb_path_query +------------------ + "12:34:00+05:00" +(1 row) + +select jsonb_path_query('"12:34 -05"', '$.datetime("HH24:MI TZH")'); + jsonb_path_query +------------------ + "12:34:00-05:00" +(1 row) + +select jsonb_path_query('"12:34 +05:20"', '$.datetime("HH24:MI TZH:TZM")'); + jsonb_path_query +------------------ + "12:34:00+05:20" +(1 row) + +select jsonb_path_query('"12:34 -05:20"', '$.datetime("HH24:MI TZH:TZM")'); + jsonb_path_query +------------------ + "12:34:00-05:20" +(1 row) + +set time zone default; +select jsonb_path_query('"2017-03-10"', '$.datetime().type()'); + jsonb_path_query +------------------ + "date" +(1 row) + +select jsonb_path_query('"2017-03-10"', '$.datetime()'); + jsonb_path_query +------------------ + "2017-03-10" +(1 row) + +select jsonb_path_query('"2017-03-10 12:34:56"', '$.datetime().type()'); + jsonb_path_query +------------------------------- + "timestamp without time zone" +(1 row) + +select jsonb_path_query('"2017-03-10 12:34:56"', '$.datetime()'); + jsonb_path_query +----------------------- + "2017-03-10T12:34:56" +(1 row) + +select jsonb_path_query('"2017-03-10 12:34:56+3"', '$.datetime().type()'); + jsonb_path_query +---------------------------- + "timestamp with time zone" +(1 row) + +select jsonb_path_query('"2017-03-10 12:34:56+3"', '$.datetime()'); + jsonb_path_query +----------------------------- + "2017-03-10T12:34:56+03:00" +(1 row) + +select jsonb_path_query('"2017-03-10 12:34:56+3:10"', '$.datetime().type()'); + jsonb_path_query +---------------------------- + "timestamp with time zone" +(1 row) + +select jsonb_path_query('"2017-03-10 12:34:56+3:10"', '$.datetime()'); + jsonb_path_query +----------------------------- + "2017-03-10T12:34:56+03:10" +(1 row) + +select jsonb_path_query('"2017-03-10T12:34:56+3:10"', '$.datetime()'); + jsonb_path_query +----------------------------- + "2017-03-10T12:34:56+03:10" +(1 row) + +select jsonb_path_query('"2017-03-10t12:34:56+3:10"', '$.datetime()'); +ERROR: datetime format is not recognized: "2017-03-10t12:34:56+3:10" +HINT: Use a datetime template argument to specify the input data format. +select jsonb_path_query('"2017-03-10 12:34:56.789+3:10"', '$.datetime()'); + jsonb_path_query +--------------------------------- + "2017-03-10T12:34:56.789+03:10" +(1 row) + +select jsonb_path_query('"2017-03-10T12:34:56.789+3:10"', '$.datetime()'); + jsonb_path_query +--------------------------------- + "2017-03-10T12:34:56.789+03:10" +(1 row) + +select jsonb_path_query('"2017-03-10t12:34:56.789+3:10"', '$.datetime()'); +ERROR: datetime format is not recognized: "2017-03-10t12:34:56.789+3:10" +HINT: Use a datetime template argument to specify the input data format. +select jsonb_path_query('"12:34:56"', '$.datetime().type()'); + jsonb_path_query +-------------------------- + "time without time zone" +(1 row) + +select jsonb_path_query('"12:34:56"', '$.datetime()'); + jsonb_path_query +------------------ + "12:34:56" +(1 row) + +select jsonb_path_query('"12:34:56+3"', '$.datetime().type()'); + jsonb_path_query +----------------------- + "time with time zone" +(1 row) + +select jsonb_path_query('"12:34:56+3"', '$.datetime()'); + jsonb_path_query +------------------ + "12:34:56+03:00" +(1 row) + +select jsonb_path_query('"12:34:56+3:10"', '$.datetime().type()'); + jsonb_path_query +----------------------- + "time with time zone" +(1 row) + +select jsonb_path_query('"12:34:56+3:10"', '$.datetime()'); + jsonb_path_query +------------------ + "12:34:56+03:10" +(1 row) + +set time zone '+00'; +-- date comparison +select jsonb_path_query( + '["2017-03-10", "2017-03-11", "2017-03-09", "12:34:56", "01:02:03+04", "2017-03-10 00:00:00", "2017-03-10 12:34:56", "2017-03-10 01:02:03+04", "2017-03-10 03:00:00+03"]', + '$[*].datetime() ? (@ == "10.03.2017".datetime("dd.mm.yyyy"))'); +ERROR: cannot convert value from date to timestamptz without time zone usage +HINT: Use *_tz() function for time zone support. +select jsonb_path_query( + '["2017-03-10", "2017-03-11", "2017-03-09", "12:34:56", "01:02:03+04", "2017-03-10 00:00:00", "2017-03-10 12:34:56", "2017-03-10 01:02:03+04", "2017-03-10 03:00:00+03"]', + '$[*].datetime() ? (@ >= "10.03.2017".datetime("dd.mm.yyyy"))'); +ERROR: cannot convert value from date to timestamptz without time zone usage +HINT: Use *_tz() function for time zone support. +select jsonb_path_query( + '["2017-03-10", "2017-03-11", "2017-03-09", "12:34:56", "01:02:03+04", "2017-03-10 00:00:00", "2017-03-10 12:34:56", "2017-03-10 01:02:03+04", "2017-03-10 03:00:00+03"]', + '$[*].datetime() ? (@ < "10.03.2017".datetime("dd.mm.yyyy"))'); +ERROR: cannot convert value from date to timestamptz without time zone usage +HINT: Use *_tz() function for time zone support. +select jsonb_path_query_tz( + '["2017-03-10", "2017-03-11", "2017-03-09", "12:34:56", "01:02:03+04", "2017-03-10 00:00:00", "2017-03-10 12:34:56", "2017-03-10 01:02:03+04", "2017-03-10 03:00:00+03"]', + '$[*].datetime() ? (@ == "10.03.2017".datetime("dd.mm.yyyy"))'); + jsonb_path_query_tz +----------------------------- + "2017-03-10" + "2017-03-10T00:00:00" + "2017-03-10T03:00:00+03:00" +(3 rows) + +select jsonb_path_query_tz( + '["2017-03-10", "2017-03-11", "2017-03-09", "12:34:56", "01:02:03+04", "2017-03-10 00:00:00", "2017-03-10 12:34:56", "2017-03-10 01:02:03+04", "2017-03-10 03:00:00+03"]', + '$[*].datetime() ? (@ >= "10.03.2017".datetime("dd.mm.yyyy"))'); + jsonb_path_query_tz +----------------------------- + "2017-03-10" + "2017-03-11" + "2017-03-10T00:00:00" + "2017-03-10T12:34:56" + "2017-03-10T03:00:00+03:00" +(5 rows) + +select jsonb_path_query_tz( + '["2017-03-10", "2017-03-11", "2017-03-09", "12:34:56", "01:02:03+04", "2017-03-10 00:00:00", "2017-03-10 12:34:56", "2017-03-10 01:02:03+04", "2017-03-10 03:00:00+03"]', + '$[*].datetime() ? (@ < "10.03.2017".datetime("dd.mm.yyyy"))'); + jsonb_path_query_tz +----------------------------- + "2017-03-09" + "2017-03-10T01:02:03+04:00" +(2 rows) + +-- time comparison +select jsonb_path_query( + '["12:34:00", "12:35:00", "12:36:00", "12:35:00+00", "12:35:00+01", "13:35:00+01", "2017-03-10", "2017-03-10 12:35:00", "2017-03-10 12:35:00+01"]', + '$[*].datetime() ? (@ == "12:35".datetime("HH24:MI"))'); +ERROR: cannot convert value from time to timetz without time zone usage +HINT: Use *_tz() function for time zone support. +select jsonb_path_query( + '["12:34:00", "12:35:00", "12:36:00", "12:35:00+00", "12:35:00+01", "13:35:00+01", "2017-03-10", "2017-03-10 12:35:00", "2017-03-10 12:35:00+01"]', + '$[*].datetime() ? (@ >= "12:35".datetime("HH24:MI"))'); +ERROR: cannot convert value from time to timetz without time zone usage +HINT: Use *_tz() function for time zone support. +select jsonb_path_query( + '["12:34:00", "12:35:00", "12:36:00", "12:35:00+00", "12:35:00+01", "13:35:00+01", "2017-03-10", "2017-03-10 12:35:00", "2017-03-10 12:35:00+01"]', + '$[*].datetime() ? (@ < "12:35".datetime("HH24:MI"))'); +ERROR: cannot convert value from time to timetz without time zone usage +HINT: Use *_tz() function for time zone support. +select jsonb_path_query_tz( + '["12:34:00", "12:35:00", "12:36:00", "12:35:00+00", "12:35:00+01", "13:35:00+01", "2017-03-10", "2017-03-10 12:35:00", "2017-03-10 12:35:00+01"]', + '$[*].datetime() ? (@ == "12:35".datetime("HH24:MI"))'); + jsonb_path_query_tz +--------------------- + "12:35:00" + "12:35:00+00:00" +(2 rows) + +select jsonb_path_query_tz( + '["12:34:00", "12:35:00", "12:36:00", "12:35:00+00", "12:35:00+01", "13:35:00+01", "2017-03-10", "2017-03-10 12:35:00", "2017-03-10 12:35:00+01"]', + '$[*].datetime() ? (@ >= "12:35".datetime("HH24:MI"))'); + jsonb_path_query_tz +--------------------- + "12:35:00" + "12:36:00" + "12:35:00+00:00" +(3 rows) + +select jsonb_path_query_tz( + '["12:34:00", "12:35:00", "12:36:00", "12:35:00+00", "12:35:00+01", "13:35:00+01", "2017-03-10", "2017-03-10 12:35:00", "2017-03-10 12:35:00+01"]', + '$[*].datetime() ? (@ < "12:35".datetime("HH24:MI"))'); + jsonb_path_query_tz +--------------------- + "12:34:00" + "12:35:00+01:00" + "13:35:00+01:00" +(3 rows) + +-- timetz comparison +select jsonb_path_query( + '["12:34:00+01", "12:35:00+01", "12:36:00+01", "12:35:00+02", "12:35:00-02", "10:35:00", "11:35:00", "12:35:00", "2017-03-10", "2017-03-10 12:35:00", "2017-03-10 12:35:00 +1"]', + '$[*].datetime() ? (@ == "12:35 +1".datetime("HH24:MI TZH"))'); +ERROR: cannot convert value from time to timetz without time zone usage +HINT: Use *_tz() function for time zone support. +select jsonb_path_query( + '["12:34:00+01", "12:35:00+01", "12:36:00+01", "12:35:00+02", "12:35:00-02", "10:35:00", "11:35:00", "12:35:00", "2017-03-10", "2017-03-10 12:35:00", "2017-03-10 12:35:00 +1"]', + '$[*].datetime() ? (@ >= "12:35 +1".datetime("HH24:MI TZH"))'); +ERROR: cannot convert value from time to timetz without time zone usage +HINT: Use *_tz() function for time zone support. +select jsonb_path_query( + '["12:34:00+01", "12:35:00+01", "12:36:00+01", "12:35:00+02", "12:35:00-02", "10:35:00", "11:35:00", "12:35:00", "2017-03-10", "2017-03-10 12:35:00", "2017-03-10 12:35:00 +1"]', + '$[*].datetime() ? (@ < "12:35 +1".datetime("HH24:MI TZH"))'); +ERROR: cannot convert value from time to timetz without time zone usage +HINT: Use *_tz() function for time zone support. +select jsonb_path_query_tz( + '["12:34:00+01", "12:35:00+01", "12:36:00+01", "12:35:00+02", "12:35:00-02", "10:35:00", "11:35:00", "12:35:00", "2017-03-10", "2017-03-10 12:35:00", "2017-03-10 12:35:00 +1"]', + '$[*].datetime() ? (@ == "12:35 +1".datetime("HH24:MI TZH"))'); + jsonb_path_query_tz +--------------------- + "12:35:00+01:00" +(1 row) + +select jsonb_path_query_tz( + '["12:34:00+01", "12:35:00+01", "12:36:00+01", "12:35:00+02", "12:35:00-02", "10:35:00", "11:35:00", "12:35:00", "2017-03-10", "2017-03-10 12:35:00", "2017-03-10 12:35:00 +1"]', + '$[*].datetime() ? (@ >= "12:35 +1".datetime("HH24:MI TZH"))'); + jsonb_path_query_tz +--------------------- + "12:35:00+01:00" + "12:36:00+01:00" + "12:35:00-02:00" + "11:35:00" + "12:35:00" +(5 rows) + +select jsonb_path_query_tz( + '["12:34:00+01", "12:35:00+01", "12:36:00+01", "12:35:00+02", "12:35:00-02", "10:35:00", "11:35:00", "12:35:00", "2017-03-10", "2017-03-10 12:35:00", "2017-03-10 12:35:00 +1"]', + '$[*].datetime() ? (@ < "12:35 +1".datetime("HH24:MI TZH"))'); + jsonb_path_query_tz +--------------------- + "12:34:00+01:00" + "12:35:00+02:00" + "10:35:00" +(3 rows) + +-- timestamp comparison +select jsonb_path_query( + '["2017-03-10 12:34:00", "2017-03-10 12:35:00", "2017-03-10 12:36:00", "2017-03-10 12:35:00+01", "2017-03-10 13:35:00+01", "2017-03-10 12:35:00-01", "2017-03-10", "2017-03-11", "12:34:56", "12:34:56+01"]', + '$[*].datetime() ? (@ == "10.03.2017 12:35".datetime("dd.mm.yyyy HH24:MI"))'); +ERROR: cannot convert value from timestamp to timestamptz without time zone usage +HINT: Use *_tz() function for time zone support. +select jsonb_path_query( + '["2017-03-10 12:34:00", "2017-03-10 12:35:00", "2017-03-10 12:36:00", "2017-03-10 12:35:00+01", "2017-03-10 13:35:00+01", "2017-03-10 12:35:00-01", "2017-03-10", "2017-03-11", "12:34:56", "12:34:56+01"]', + '$[*].datetime() ? (@ >= "10.03.2017 12:35".datetime("dd.mm.yyyy HH24:MI"))'); +ERROR: cannot convert value from timestamp to timestamptz without time zone usage +HINT: Use *_tz() function for time zone support. +select jsonb_path_query( + '["2017-03-10 12:34:00", "2017-03-10 12:35:00", "2017-03-10 12:36:00", "2017-03-10 12:35:00+01", "2017-03-10 13:35:00+01", "2017-03-10 12:35:00-01", "2017-03-10", "2017-03-11", "12:34:56", "12:34:56+01"]', + '$[*].datetime() ? (@ < "10.03.2017 12:35".datetime("dd.mm.yyyy HH24:MI"))'); +ERROR: cannot convert value from timestamp to timestamptz without time zone usage +HINT: Use *_tz() function for time zone support. +select jsonb_path_query_tz( + '["2017-03-10 12:34:00", "2017-03-10 12:35:00", "2017-03-10 12:36:00", "2017-03-10 12:35:00+01", "2017-03-10 13:35:00+01", "2017-03-10 12:35:00-01", "2017-03-10", "2017-03-11", "12:34:56", "12:34:56+01"]', + '$[*].datetime() ? (@ == "10.03.2017 12:35".datetime("dd.mm.yyyy HH24:MI"))'); + jsonb_path_query_tz +----------------------------- + "2017-03-10T12:35:00" + "2017-03-10T13:35:00+01:00" +(2 rows) + +select jsonb_path_query_tz( + '["2017-03-10 12:34:00", "2017-03-10 12:35:00", "2017-03-10 12:36:00", "2017-03-10 12:35:00+01", "2017-03-10 13:35:00+01", "2017-03-10 12:35:00-01", "2017-03-10", "2017-03-11", "12:34:56", "12:34:56+01"]', + '$[*].datetime() ? (@ >= "10.03.2017 12:35".datetime("dd.mm.yyyy HH24:MI"))'); + jsonb_path_query_tz +----------------------------- + "2017-03-10T12:35:00" + "2017-03-10T12:36:00" + "2017-03-10T13:35:00+01:00" + "2017-03-10T12:35:00-01:00" + "2017-03-11" +(5 rows) + +select jsonb_path_query_tz( + '["2017-03-10 12:34:00", "2017-03-10 12:35:00", "2017-03-10 12:36:00", "2017-03-10 12:35:00+01", "2017-03-10 13:35:00+01", "2017-03-10 12:35:00-01", "2017-03-10", "2017-03-11", "12:34:56", "12:34:56+01"]', + '$[*].datetime() ? (@ < "10.03.2017 12:35".datetime("dd.mm.yyyy HH24:MI"))'); + jsonb_path_query_tz +----------------------------- + "2017-03-10T12:34:00" + "2017-03-10T12:35:00+01:00" + "2017-03-10" +(3 rows) + +-- timestamptz comparison +select jsonb_path_query( + '["2017-03-10 12:34:00+01", "2017-03-10 12:35:00+01", "2017-03-10 12:36:00+01", "2017-03-10 12:35:00+02", "2017-03-10 12:35:00-02", "2017-03-10 10:35:00", "2017-03-10 11:35:00", "2017-03-10 12:35:00", "2017-03-10", "2017-03-11", "12:34:56", "12:34:56+01"]', + '$[*].datetime() ? (@ == "10.03.2017 12:35 +1".datetime("dd.mm.yyyy HH24:MI TZH"))'); +ERROR: cannot convert value from timestamp to timestamptz without time zone usage +HINT: Use *_tz() function for time zone support. +select jsonb_path_query( + '["2017-03-10 12:34:00+01", "2017-03-10 12:35:00+01", "2017-03-10 12:36:00+01", "2017-03-10 12:35:00+02", "2017-03-10 12:35:00-02", "2017-03-10 10:35:00", "2017-03-10 11:35:00", "2017-03-10 12:35:00", "2017-03-10", "2017-03-11", "12:34:56", "12:34:56+01"]', + '$[*].datetime() ? (@ >= "10.03.2017 12:35 +1".datetime("dd.mm.yyyy HH24:MI TZH"))'); +ERROR: cannot convert value from timestamp to timestamptz without time zone usage +HINT: Use *_tz() function for time zone support. +select jsonb_path_query( + '["2017-03-10 12:34:00+01", "2017-03-10 12:35:00+01", "2017-03-10 12:36:00+01", "2017-03-10 12:35:00+02", "2017-03-10 12:35:00-02", "2017-03-10 10:35:00", "2017-03-10 11:35:00", "2017-03-10 12:35:00", "2017-03-10", "2017-03-11", "12:34:56", "12:34:56+01"]', + '$[*].datetime() ? (@ < "10.03.2017 12:35 +1".datetime("dd.mm.yyyy HH24:MI TZH"))'); +ERROR: cannot convert value from timestamp to timestamptz without time zone usage +HINT: Use *_tz() function for time zone support. +select jsonb_path_query_tz( + '["2017-03-10 12:34:00+01", "2017-03-10 12:35:00+01", "2017-03-10 12:36:00+01", "2017-03-10 12:35:00+02", "2017-03-10 12:35:00-02", "2017-03-10 10:35:00", "2017-03-10 11:35:00", "2017-03-10 12:35:00", "2017-03-10", "2017-03-11", "12:34:56", "12:34:56+01"]', + '$[*].datetime() ? (@ == "10.03.2017 12:35 +1".datetime("dd.mm.yyyy HH24:MI TZH"))'); + jsonb_path_query_tz +----------------------------- + "2017-03-10T12:35:00+01:00" + "2017-03-10T11:35:00" +(2 rows) + +select jsonb_path_query_tz( + '["2017-03-10 12:34:00+01", "2017-03-10 12:35:00+01", "2017-03-10 12:36:00+01", "2017-03-10 12:35:00+02", "2017-03-10 12:35:00-02", "2017-03-10 10:35:00", "2017-03-10 11:35:00", "2017-03-10 12:35:00", "2017-03-10", "2017-03-11", "12:34:56", "12:34:56+01"]', + '$[*].datetime() ? (@ >= "10.03.2017 12:35 +1".datetime("dd.mm.yyyy HH24:MI TZH"))'); + jsonb_path_query_tz +----------------------------- + "2017-03-10T12:35:00+01:00" + "2017-03-10T12:36:00+01:00" + "2017-03-10T12:35:00-02:00" + "2017-03-10T11:35:00" + "2017-03-10T12:35:00" + "2017-03-11" +(6 rows) + +select jsonb_path_query_tz( + '["2017-03-10 12:34:00+01", "2017-03-10 12:35:00+01", "2017-03-10 12:36:00+01", "2017-03-10 12:35:00+02", "2017-03-10 12:35:00-02", "2017-03-10 10:35:00", "2017-03-10 11:35:00", "2017-03-10 12:35:00", "2017-03-10", "2017-03-11", "12:34:56", "12:34:56+01"]', + '$[*].datetime() ? (@ < "10.03.2017 12:35 +1".datetime("dd.mm.yyyy HH24:MI TZH"))'); + jsonb_path_query_tz +----------------------------- + "2017-03-10T12:34:00+01:00" + "2017-03-10T12:35:00+02:00" + "2017-03-10T10:35:00" + "2017-03-10" +(4 rows) + +-- overflow during comparison +select jsonb_path_query('"1000000-01-01"', '$.datetime() > "2020-01-01 12:00:00".datetime()'::jsonpath); + jsonb_path_query +------------------ + true +(1 row) + +set time zone default; +-- jsonpath operators +SELECT jsonb_path_query('[{"a": 1}, {"a": 2}]', '$[*]'); + jsonb_path_query +------------------ + {"a": 1} + {"a": 2} +(2 rows) + +SELECT jsonb_path_query('[{"a": 1}, {"a": 2}]', '$[*] ? (@.a > 10)'); + jsonb_path_query +------------------ +(0 rows) + +SELECT jsonb_path_query('[{"a": 1}]', '$undefined_var'); +ERROR: could not find jsonpath variable "undefined_var" +SELECT jsonb_path_query('[{"a": 1}]', 'false'); + jsonb_path_query +------------------ + false +(1 row) + +SELECT jsonb_path_query_array('[{"a": 1}, {"a": 2}, {}]', 'strict $[*].a'); +ERROR: JSON object does not contain key "a" +SELECT jsonb_path_query_array('[{"a": 1}, {"a": 2}]', '$[*].a'); + jsonb_path_query_array +------------------------ + [1, 2] +(1 row) + +SELECT jsonb_path_query_array('[{"a": 1}, {"a": 2}]', '$[*].a ? (@ == 1)'); + jsonb_path_query_array +------------------------ + [1] +(1 row) + +SELECT jsonb_path_query_array('[{"a": 1}, {"a": 2}]', '$[*].a ? (@ > 10)'); + jsonb_path_query_array +------------------------ + [] +(1 row) + +SELECT jsonb_path_query_array('[{"a": 1}, {"a": 2}, {"a": 3}, {"a": 5}]', '$[*].a ? (@ > $min && @ < $max)', vars => '{"min": 1, "max": 4}'); + jsonb_path_query_array +------------------------ + [2, 3] +(1 row) + +SELECT jsonb_path_query_array('[{"a": 1}, {"a": 2}, {"a": 3}, {"a": 5}]', '$[*].a ? (@ > $min && @ < $max)', vars => '{"min": 3, "max": 4}'); + jsonb_path_query_array +------------------------ + [] +(1 row) + +SELECT jsonb_path_query_first('[{"a": 1}, {"a": 2}, {}]', 'strict $[*].a'); +ERROR: JSON object does not contain key "a" +SELECT jsonb_path_query_first('[{"a": 1}, {"a": 2}, {}]', 'strict $[*].a', silent => true); + jsonb_path_query_first +------------------------ + 1 +(1 row) + +SELECT jsonb_path_query_first('[{"a": 1}, {"a": 2}]', '$[*].a'); + jsonb_path_query_first +------------------------ + 1 +(1 row) + +SELECT jsonb_path_query_first('[{"a": 1}, {"a": 2}]', '$[*].a ? (@ == 1)'); + jsonb_path_query_first +------------------------ + 1 +(1 row) + +SELECT jsonb_path_query_first('[{"a": 1}, {"a": 2}]', '$[*].a ? (@ > 10)'); + jsonb_path_query_first +------------------------ + +(1 row) + +SELECT jsonb_path_query_first('[{"a": 1}, {"a": 2}, {"a": 3}, {"a": 5}]', '$[*].a ? (@ > $min && @ < $max)', vars => '{"min": 1, "max": 4}'); + jsonb_path_query_first +------------------------ + 2 +(1 row) + +SELECT jsonb_path_query_first('[{"a": 1}, {"a": 2}, {"a": 3}, {"a": 5}]', '$[*].a ? (@ > $min && @ < $max)', vars => '{"min": 3, "max": 4}'); + jsonb_path_query_first +------------------------ + +(1 row) + +SELECT jsonb_path_query_first('[{"a": 1}]', '$undefined_var'); +ERROR: could not find jsonpath variable "undefined_var" +SELECT jsonb_path_query_first('[{"a": 1}]', 'false'); + jsonb_path_query_first +------------------------ + false +(1 row) + +SELECT jsonb '[{"a": 1}, {"a": 2}]' @? '$[*].a ? (@ > 1)'; + ?column? +---------- + t +(1 row) + +SELECT jsonb '[{"a": 1}, {"a": 2}]' @? '$[*] ? (@.a > 2)'; + ?column? +---------- + f +(1 row) + +SELECT jsonb_path_exists('[{"a": 1}, {"a": 2}]', '$[*].a ? (@ > 1)'); + jsonb_path_exists +------------------- + t +(1 row) + +SELECT jsonb_path_exists('[{"a": 1}, {"a": 2}, {"a": 3}, {"a": 5}]', '$[*] ? (@.a > $min && @.a < $max)', vars => '{"min": 1, "max": 4}'); + jsonb_path_exists +------------------- + t +(1 row) + +SELECT jsonb_path_exists('[{"a": 1}, {"a": 2}, {"a": 3}, {"a": 5}]', '$[*] ? (@.a > $min && @.a < $max)', vars => '{"min": 3, "max": 4}'); + jsonb_path_exists +------------------- + f +(1 row) + +SELECT jsonb_path_exists('[{"a": 1}]', '$undefined_var'); +ERROR: could not find jsonpath variable "undefined_var" +SELECT jsonb_path_exists('[{"a": 1}]', 'false'); + jsonb_path_exists +------------------- + t +(1 row) + +SELECT jsonb_path_match('true', '$', silent => false); + jsonb_path_match +------------------ + t +(1 row) + +SELECT jsonb_path_match('false', '$', silent => false); + jsonb_path_match +------------------ + f +(1 row) + +SELECT jsonb_path_match('null', '$', silent => false); + jsonb_path_match +------------------ + +(1 row) + +SELECT jsonb_path_match('1', '$', silent => true); + jsonb_path_match +------------------ + +(1 row) + +SELECT jsonb_path_match('1', '$', silent => false); +ERROR: single boolean result is expected +SELECT jsonb_path_match('"a"', '$', silent => false); +ERROR: single boolean result is expected +SELECT jsonb_path_match('{}', '$', silent => false); +ERROR: single boolean result is expected +SELECT jsonb_path_match('[true]', '$', silent => false); +ERROR: single boolean result is expected +SELECT jsonb_path_match('{}', 'lax $.a', silent => false); +ERROR: single boolean result is expected +SELECT jsonb_path_match('{}', 'strict $.a', silent => false); +ERROR: JSON object does not contain key "a" +SELECT jsonb_path_match('{}', 'strict $.a', silent => true); + jsonb_path_match +------------------ + +(1 row) + +SELECT jsonb_path_match('[true, true]', '$[*]', silent => false); +ERROR: single boolean result is expected +SELECT jsonb '[{"a": 1}, {"a": 2}]' @@ '$[*].a > 1'; + ?column? +---------- + t +(1 row) + +SELECT jsonb '[{"a": 1}, {"a": 2}]' @@ '$[*].a > 2'; + ?column? +---------- + f +(1 row) + +SELECT jsonb_path_match('[{"a": 1}, {"a": 2}]', '$[*].a > 1'); + jsonb_path_match +------------------ + t +(1 row) + +SELECT jsonb_path_match('[{"a": 1}]', '$undefined_var'); +ERROR: could not find jsonpath variable "undefined_var" +SELECT jsonb_path_match('[{"a": 1}]', 'false'); + jsonb_path_match +------------------ + f +(1 row) + +-- test string comparison (Unicode codepoint collation) +WITH str(j, num) AS +( + SELECT jsonb_build_object('s', s), num + FROM unnest('{"", "a", "ab", "abc", "abcd", "b", "A", "AB", "ABC", "ABc", "ABcD", "B"}'::text[]) WITH ORDINALITY AS a(s, num) +) +SELECT + s1.j, s2.j, + jsonb_path_query_first(s1.j, '$.s < $s', vars => s2.j) lt, + jsonb_path_query_first(s1.j, '$.s <= $s', vars => s2.j) le, + jsonb_path_query_first(s1.j, '$.s == $s', vars => s2.j) eq, + jsonb_path_query_first(s1.j, '$.s >= $s', vars => s2.j) ge, + jsonb_path_query_first(s1.j, '$.s > $s', vars => s2.j) gt +FROM str s1, str s2 +ORDER BY s1.num, s2.num; + j | j | lt | le | eq | ge | gt +---------------+---------------+-------+-------+-------+-------+------- + {"s": ""} | {"s": ""} | false | true | true | true | false + {"s": ""} | {"s": "a"} | true | true | false | false | false + {"s": ""} | {"s": "ab"} | true | true | false | false | false + {"s": ""} | {"s": "abc"} | true | true | false | false | false + {"s": ""} | {"s": "abcd"} | true | true | false | false | false + {"s": ""} | {"s": "b"} | true | true | false | false | false + {"s": ""} | {"s": "A"} | true | true | false | false | false + {"s": ""} | {"s": "AB"} | true | true | false | false | false + {"s": ""} | {"s": "ABC"} | true | true | false | false | false + {"s": ""} | {"s": "ABc"} | true | true | false | false | false + {"s": ""} | {"s": "ABcD"} | true | true | false | false | false + {"s": ""} | {"s": "B"} | true | true | false | false | false + {"s": "a"} | {"s": ""} | false | false | false | true | true + {"s": "a"} | {"s": "a"} | false | true | true | true | false + {"s": "a"} | {"s": "ab"} | true | true | false | false | false + {"s": "a"} | {"s": "abc"} | true | true | false | false | false + {"s": "a"} | {"s": "abcd"} | true | true | false | false | false + {"s": "a"} | {"s": "b"} | true | true | false | false | false + {"s": "a"} | {"s": "A"} | false | false | false | true | true + {"s": "a"} | {"s": "AB"} | false | false | false | true | true + {"s": "a"} | {"s": "ABC"} | false | false | false | true | true + {"s": "a"} | {"s": "ABc"} | false | false | false | true | true + {"s": "a"} | {"s": "ABcD"} | false | false | false | true | true + {"s": "a"} | {"s": "B"} | false | false | false | true | true + {"s": "ab"} | {"s": ""} | false | false | false | true | true + {"s": "ab"} | {"s": "a"} | false | false | false | true | true + {"s": "ab"} | {"s": "ab"} | false | true | true | true | false + {"s": "ab"} | {"s": "abc"} | true | true | false | false | false + {"s": "ab"} | {"s": "abcd"} | true | true | false | false | false + {"s": "ab"} | {"s": "b"} | true | true | false | false | false + {"s": "ab"} | {"s": "A"} | false | false | false | true | true + {"s": "ab"} | {"s": "AB"} | false | false | false | true | true + {"s": "ab"} | {"s": "ABC"} | false | false | false | true | true + {"s": "ab"} | {"s": "ABc"} | false | false | false | true | true + {"s": "ab"} | {"s": "ABcD"} | false | false | false | true | true + {"s": "ab"} | {"s": "B"} | false | false | false | true | true + {"s": "abc"} | {"s": ""} | false | false | false | true | true + {"s": "abc"} | {"s": "a"} | false | false | false | true | true + {"s": "abc"} | {"s": "ab"} | false | false | false | true | true + {"s": "abc"} | {"s": "abc"} | false | true | true | true | false + {"s": "abc"} | {"s": "abcd"} | true | true | false | false | false + {"s": "abc"} | {"s": "b"} | true | true | false | false | false + {"s": "abc"} | {"s": "A"} | false | false | false | true | true + {"s": "abc"} | {"s": "AB"} | false | false | false | true | true + {"s": "abc"} | {"s": "ABC"} | false | false | false | true | true + {"s": "abc"} | {"s": "ABc"} | false | false | false | true | true + {"s": "abc"} | {"s": "ABcD"} | false | false | false | true | true + {"s": "abc"} | {"s": "B"} | false | false | false | true | true + {"s": "abcd"} | {"s": ""} | false | false | false | true | true + {"s": "abcd"} | {"s": "a"} | false | false | false | true | true + {"s": "abcd"} | {"s": "ab"} | false | false | false | true | true + {"s": "abcd"} | {"s": "abc"} | false | false | false | true | true + {"s": "abcd"} | {"s": "abcd"} | false | true | true | true | false + {"s": "abcd"} | {"s": "b"} | true | true | false | false | false + {"s": "abcd"} | {"s": "A"} | false | false | false | true | true + {"s": "abcd"} | {"s": "AB"} | false | false | false | true | true + {"s": "abcd"} | {"s": "ABC"} | false | false | false | true | true + {"s": "abcd"} | {"s": "ABc"} | false | false | false | true | true + {"s": "abcd"} | {"s": "ABcD"} | false | false | false | true | true + {"s": "abcd"} | {"s": "B"} | false | false | false | true | true + {"s": "b"} | {"s": ""} | false | false | false | true | true + {"s": "b"} | {"s": "a"} | false | false | false | true | true + {"s": "b"} | {"s": "ab"} | false | false | false | true | true + {"s": "b"} | {"s": "abc"} | false | false | false | true | true + {"s": "b"} | {"s": "abcd"} | false | false | false | true | true + {"s": "b"} | {"s": "b"} | false | true | true | true | false + {"s": "b"} | {"s": "A"} | false | false | false | true | true + {"s": "b"} | {"s": "AB"} | false | false | false | true | true + {"s": "b"} | {"s": "ABC"} | false | false | false | true | true + {"s": "b"} | {"s": "ABc"} | false | false | false | true | true + {"s": "b"} | {"s": "ABcD"} | false | false | false | true | true + {"s": "b"} | {"s": "B"} | false | false | false | true | true + {"s": "A"} | {"s": ""} | false | false | false | true | true + {"s": "A"} | {"s": "a"} | true | true | false | false | false + {"s": "A"} | {"s": "ab"} | true | true | false | false | false + {"s": "A"} | {"s": "abc"} | true | true | false | false | false + {"s": "A"} | {"s": "abcd"} | true | true | false | false | false + {"s": "A"} | {"s": "b"} | true | true | false | false | false + {"s": "A"} | {"s": "A"} | false | true | true | true | false + {"s": "A"} | {"s": "AB"} | true | true | false | false | false + {"s": "A"} | {"s": "ABC"} | true | true | false | false | false + {"s": "A"} | {"s": "ABc"} | true | true | false | false | false + {"s": "A"} | {"s": "ABcD"} | true | true | false | false | false + {"s": "A"} | {"s": "B"} | true | true | false | false | false + {"s": "AB"} | {"s": ""} | false | false | false | true | true + {"s": "AB"} | {"s": "a"} | true | true | false | false | false + {"s": "AB"} | {"s": "ab"} | true | true | false | false | false + {"s": "AB"} | {"s": "abc"} | true | true | false | false | false + {"s": "AB"} | {"s": "abcd"} | true | true | false | false | false + {"s": "AB"} | {"s": "b"} | true | true | false | false | false + {"s": "AB"} | {"s": "A"} | false | false | false | true | true + {"s": "AB"} | {"s": "AB"} | false | true | true | true | false + {"s": "AB"} | {"s": "ABC"} | true | true | false | false | false + {"s": "AB"} | {"s": "ABc"} | true | true | false | false | false + {"s": "AB"} | {"s": "ABcD"} | true | true | false | false | false + {"s": "AB"} | {"s": "B"} | true | true | false | false | false + {"s": "ABC"} | {"s": ""} | false | false | false | true | true + {"s": "ABC"} | {"s": "a"} | true | true | false | false | false + {"s": "ABC"} | {"s": "ab"} | true | true | false | false | false + {"s": "ABC"} | {"s": "abc"} | true | true | false | false | false + {"s": "ABC"} | {"s": "abcd"} | true | true | false | false | false + {"s": "ABC"} | {"s": "b"} | true | true | false | false | false + {"s": "ABC"} | {"s": "A"} | false | false | false | true | true + {"s": "ABC"} | {"s": "AB"} | false | false | false | true | true + {"s": "ABC"} | {"s": "ABC"} | false | true | true | true | false + {"s": "ABC"} | {"s": "ABc"} | true | true | false | false | false + {"s": "ABC"} | {"s": "ABcD"} | true | true | false | false | false + {"s": "ABC"} | {"s": "B"} | true | true | false | false | false + {"s": "ABc"} | {"s": ""} | false | false | false | true | true + {"s": "ABc"} | {"s": "a"} | true | true | false | false | false + {"s": "ABc"} | {"s": "ab"} | true | true | false | false | false + {"s": "ABc"} | {"s": "abc"} | true | true | false | false | false + {"s": "ABc"} | {"s": "abcd"} | true | true | false | false | false + {"s": "ABc"} | {"s": "b"} | true | true | false | false | false + {"s": "ABc"} | {"s": "A"} | false | false | false | true | true + {"s": "ABc"} | {"s": "AB"} | false | false | false | true | true + {"s": "ABc"} | {"s": "ABC"} | false | false | false | true | true + {"s": "ABc"} | {"s": "ABc"} | false | true | true | true | false + {"s": "ABc"} | {"s": "ABcD"} | true | true | false | false | false + {"s": "ABc"} | {"s": "B"} | true | true | false | false | false + {"s": "ABcD"} | {"s": ""} | false | false | false | true | true + {"s": "ABcD"} | {"s": "a"} | true | true | false | false | false + {"s": "ABcD"} | {"s": "ab"} | true | true | false | false | false + {"s": "ABcD"} | {"s": "abc"} | true | true | false | false | false + {"s": "ABcD"} | {"s": "abcd"} | true | true | false | false | false + {"s": "ABcD"} | {"s": "b"} | true | true | false | false | false + {"s": "ABcD"} | {"s": "A"} | false | false | false | true | true + {"s": "ABcD"} | {"s": "AB"} | false | false | false | true | true + {"s": "ABcD"} | {"s": "ABC"} | false | false | false | true | true + {"s": "ABcD"} | {"s": "ABc"} | false | false | false | true | true + {"s": "ABcD"} | {"s": "ABcD"} | false | true | true | true | false + {"s": "ABcD"} | {"s": "B"} | true | true | false | false | false + {"s": "B"} | {"s": ""} | false | false | false | true | true + {"s": "B"} | {"s": "a"} | true | true | false | false | false + {"s": "B"} | {"s": "ab"} | true | true | false | false | false + {"s": "B"} | {"s": "abc"} | true | true | false | false | false + {"s": "B"} | {"s": "abcd"} | true | true | false | false | false + {"s": "B"} | {"s": "b"} | true | true | false | false | false + {"s": "B"} | {"s": "A"} | false | false | false | true | true + {"s": "B"} | {"s": "AB"} | false | false | false | true | true + {"s": "B"} | {"s": "ABC"} | false | false | false | true | true + {"s": "B"} | {"s": "ABc"} | false | false | false | true | true + {"s": "B"} | {"s": "ABcD"} | false | false | false | true | true + {"s": "B"} | {"s": "B"} | false | true | true | true | false +(144 rows) + diff --git a/src/test/regress/expected/jsonpath.out b/src/test/regress/expected/jsonpath.out new file mode 100644 index 0000000..eeffb38 --- /dev/null +++ b/src/test/regress/expected/jsonpath.out @@ -0,0 +1,1218 @@ +--jsonpath io +select ''::jsonpath; +ERROR: invalid input syntax for type jsonpath: "" +LINE 1: select ''::jsonpath; + ^ +select '$'::jsonpath; + jsonpath +---------- + $ +(1 row) + +select 'strict $'::jsonpath; + jsonpath +---------- + strict $ +(1 row) + +select 'lax $'::jsonpath; + jsonpath +---------- + $ +(1 row) + +select '$.a'::jsonpath; + jsonpath +---------- + $."a" +(1 row) + +select '$.a.v'::jsonpath; + jsonpath +----------- + $."a"."v" +(1 row) + +select '$.a.*'::jsonpath; + jsonpath +---------- + $."a".* +(1 row) + +select '$.*[*]'::jsonpath; + jsonpath +---------- + $.*[*] +(1 row) + +select '$.a[*]'::jsonpath; + jsonpath +---------- + $."a"[*] +(1 row) + +select '$.a[*][*]'::jsonpath; + jsonpath +------------- + $."a"[*][*] +(1 row) + +select '$[*]'::jsonpath; + jsonpath +---------- + $[*] +(1 row) + +select '$[0]'::jsonpath; + jsonpath +---------- + $[0] +(1 row) + +select '$[*][0]'::jsonpath; + jsonpath +---------- + $[*][0] +(1 row) + +select '$[*].a'::jsonpath; + jsonpath +---------- + $[*]."a" +(1 row) + +select '$[*][0].a.b'::jsonpath; + jsonpath +----------------- + $[*][0]."a"."b" +(1 row) + +select '$.a.**.b'::jsonpath; + jsonpath +-------------- + $."a".**."b" +(1 row) + +select '$.a.**{2}.b'::jsonpath; + jsonpath +----------------- + $."a".**{2}."b" +(1 row) + +select '$.a.**{2 to 2}.b'::jsonpath; + jsonpath +----------------- + $."a".**{2}."b" +(1 row) + +select '$.a.**{2 to 5}.b'::jsonpath; + jsonpath +---------------------- + $."a".**{2 to 5}."b" +(1 row) + +select '$.a.**{0 to 5}.b'::jsonpath; + jsonpath +---------------------- + $."a".**{0 to 5}."b" +(1 row) + +select '$.a.**{5 to last}.b'::jsonpath; + jsonpath +------------------------- + $."a".**{5 to last}."b" +(1 row) + +select '$.a.**{last}.b'::jsonpath; + jsonpath +-------------------- + $."a".**{last}."b" +(1 row) + +select '$.a.**{last to 5}.b'::jsonpath; + jsonpath +------------------------- + $."a".**{last to 5}."b" +(1 row) + +select '$+1'::jsonpath; + jsonpath +---------- + ($ + 1) +(1 row) + +select '$-1'::jsonpath; + jsonpath +---------- + ($ - 1) +(1 row) + +select '$--+1'::jsonpath; + jsonpath +---------- + ($ - -1) +(1 row) + +select '$.a/+-1'::jsonpath; + jsonpath +-------------- + ($."a" / -1) +(1 row) + +select '1 * 2 + 4 % -3 != false'::jsonpath; + jsonpath +--------------------------- + (1 * 2 + 4 % -3 != false) +(1 row) + +select '"\b\f\r\n\t\v\"\''\\"'::jsonpath; + jsonpath +------------------------- + "\b\f\r\n\t\u000b\"'\\" +(1 row) + +select '"\x50\u0067\u{53}\u{051}\u{00004C}"'::jsonpath; + jsonpath +---------- + "PgSQL" +(1 row) + +select '$.foo\x50\u0067\u{53}\u{051}\u{00004C}\t\"bar'::jsonpath; + jsonpath +--------------------- + $."fooPgSQL\t\"bar" +(1 row) + +select '"\z"'::jsonpath; -- unrecognized escape is just the literal char + jsonpath +---------- + "z" +(1 row) + +select '$.g ? ($.a == 1)'::jsonpath; + jsonpath +-------------------- + $."g"?($."a" == 1) +(1 row) + +select '$.g ? (@ == 1)'::jsonpath; + jsonpath +---------------- + $."g"?(@ == 1) +(1 row) + +select '$.g ? (@.a == 1)'::jsonpath; + jsonpath +-------------------- + $."g"?(@."a" == 1) +(1 row) + +select '$.g ? (@.a == 1 || @.a == 4)'::jsonpath; + jsonpath +---------------------------------- + $."g"?(@."a" == 1 || @."a" == 4) +(1 row) + +select '$.g ? (@.a == 1 && @.a == 4)'::jsonpath; + jsonpath +---------------------------------- + $."g"?(@."a" == 1 && @."a" == 4) +(1 row) + +select '$.g ? (@.a == 1 || @.a == 4 && @.b == 7)'::jsonpath; + jsonpath +------------------------------------------------ + $."g"?(@."a" == 1 || @."a" == 4 && @."b" == 7) +(1 row) + +select '$.g ? (@.a == 1 || !(@.a == 4) && @.b == 7)'::jsonpath; + jsonpath +--------------------------------------------------- + $."g"?(@."a" == 1 || !(@."a" == 4) && @."b" == 7) +(1 row) + +select '$.g ? (@.a == 1 || !(@.x >= 123 || @.a == 4) && @.b == 7)'::jsonpath; + jsonpath +------------------------------------------------------------------- + $."g"?(@."a" == 1 || !(@."x" >= 123 || @."a" == 4) && @."b" == 7) +(1 row) + +select '$.g ? (@.x >= @[*]?(@.a > "abc"))'::jsonpath; + jsonpath +--------------------------------------- + $."g"?(@."x" >= @[*]?(@."a" > "abc")) +(1 row) + +select '$.g ? ((@.x >= 123 || @.a == 4) is unknown)'::jsonpath; + jsonpath +------------------------------------------------- + $."g"?((@."x" >= 123 || @."a" == 4) is unknown) +(1 row) + +select '$.g ? (exists (@.x))'::jsonpath; + jsonpath +------------------------ + $."g"?(exists (@."x")) +(1 row) + +select '$.g ? (exists (@.x ? (@ == 14)))'::jsonpath; + jsonpath +---------------------------------- + $."g"?(exists (@."x"?(@ == 14))) +(1 row) + +select '$.g ? ((@.x >= 123 || @.a == 4) && exists (@.x ? (@ == 14)))'::jsonpath; + jsonpath +------------------------------------------------------------------ + $."g"?((@."x" >= 123 || @."a" == 4) && exists (@."x"?(@ == 14))) +(1 row) + +select '$.g ? (+@.x >= +-(+@.a + 2))'::jsonpath; + jsonpath +------------------------------------ + $."g"?(+@."x" >= +(-(+@."a" + 2))) +(1 row) + +select '$a'::jsonpath; + jsonpath +---------- + $"a" +(1 row) + +select '$a.b'::jsonpath; + jsonpath +---------- + $"a"."b" +(1 row) + +select '$a[*]'::jsonpath; + jsonpath +---------- + $"a"[*] +(1 row) + +select '$.g ? (@.zip == $zip)'::jsonpath; + jsonpath +--------------------------- + $."g"?(@."zip" == $"zip") +(1 row) + +select '$.a[1,2, 3 to 16]'::jsonpath; + jsonpath +-------------------- + $."a"[1,2,3 to 16] +(1 row) + +select '$.a[$a + 1, ($b[*]) to -($[0] * 2)]'::jsonpath; + jsonpath +---------------------------------------- + $."a"[$"a" + 1,$"b"[*] to -($[0] * 2)] +(1 row) + +select '$.a[$.a.size() - 3]'::jsonpath; + jsonpath +------------------------- + $."a"[$."a".size() - 3] +(1 row) + +select 'last'::jsonpath; +ERROR: LAST is allowed only in array subscripts +LINE 1: select 'last'::jsonpath; + ^ +select '"last"'::jsonpath; + jsonpath +---------- + "last" +(1 row) + +select '$.last'::jsonpath; + jsonpath +---------- + $."last" +(1 row) + +select '$ ? (last > 0)'::jsonpath; +ERROR: LAST is allowed only in array subscripts +LINE 1: select '$ ? (last > 0)'::jsonpath; + ^ +select '$[last]'::jsonpath; + jsonpath +---------- + $[last] +(1 row) + +select '$[$[0] ? (last > 0)]'::jsonpath; + jsonpath +-------------------- + $[$[0]?(last > 0)] +(1 row) + +select 'null.type()'::jsonpath; + jsonpath +------------- + null.type() +(1 row) + +select '1.type()'::jsonpath; +ERROR: trailing junk after numeric literal at or near "1.t" of jsonpath input +LINE 1: select '1.type()'::jsonpath; + ^ +select '(1).type()'::jsonpath; + jsonpath +------------ + (1).type() +(1 row) + +select '1.2.type()'::jsonpath; + jsonpath +-------------- + (1.2).type() +(1 row) + +select '"aaa".type()'::jsonpath; + jsonpath +-------------- + "aaa".type() +(1 row) + +select 'true.type()'::jsonpath; + jsonpath +------------- + true.type() +(1 row) + +select '$.double().floor().ceiling().abs()'::jsonpath; + jsonpath +------------------------------------ + $.double().floor().ceiling().abs() +(1 row) + +select '$.keyvalue().key'::jsonpath; + jsonpath +-------------------- + $.keyvalue()."key" +(1 row) + +select '$.datetime()'::jsonpath; + jsonpath +-------------- + $.datetime() +(1 row) + +select '$.datetime("datetime template")'::jsonpath; + jsonpath +--------------------------------- + $.datetime("datetime template") +(1 row) + +select '$ ? (@ starts with "abc")'::jsonpath; + jsonpath +------------------------- + $?(@ starts with "abc") +(1 row) + +select '$ ? (@ starts with $var)'::jsonpath; + jsonpath +-------------------------- + $?(@ starts with $"var") +(1 row) + +select '$ ? (@ like_regex "(invalid pattern")'::jsonpath; +ERROR: invalid regular expression: parentheses () not balanced +LINE 1: select '$ ? (@ like_regex "(invalid pattern")'::jsonpath; + ^ +select '$ ? (@ like_regex "pattern")'::jsonpath; + jsonpath +---------------------------- + $?(@ like_regex "pattern") +(1 row) + +select '$ ? (@ like_regex "pattern" flag "")'::jsonpath; + jsonpath +---------------------------- + $?(@ like_regex "pattern") +(1 row) + +select '$ ? (@ like_regex "pattern" flag "i")'::jsonpath; + jsonpath +------------------------------------- + $?(@ like_regex "pattern" flag "i") +(1 row) + +select '$ ? (@ like_regex "pattern" flag "is")'::jsonpath; + jsonpath +-------------------------------------- + $?(@ like_regex "pattern" flag "is") +(1 row) + +select '$ ? (@ like_regex "pattern" flag "isim")'::jsonpath; + jsonpath +--------------------------------------- + $?(@ like_regex "pattern" flag "ism") +(1 row) + +select '$ ? (@ like_regex "pattern" flag "xsms")'::jsonpath; +ERROR: XQuery "x" flag (expanded regular expressions) is not implemented +LINE 1: select '$ ? (@ like_regex "pattern" flag "xsms")'::jsonpath; + ^ +select '$ ? (@ like_regex "pattern" flag "q")'::jsonpath; + jsonpath +------------------------------------- + $?(@ like_regex "pattern" flag "q") +(1 row) + +select '$ ? (@ like_regex "pattern" flag "iq")'::jsonpath; + jsonpath +-------------------------------------- + $?(@ like_regex "pattern" flag "iq") +(1 row) + +select '$ ? (@ like_regex "pattern" flag "smixq")'::jsonpath; + jsonpath +----------------------------------------- + $?(@ like_regex "pattern" flag "ismxq") +(1 row) + +select '$ ? (@ like_regex "pattern" flag "a")'::jsonpath; +ERROR: invalid input syntax for type jsonpath +LINE 1: select '$ ? (@ like_regex "pattern" flag "a")'::jsonpath; + ^ +DETAIL: Unrecognized flag character "a" in LIKE_REGEX predicate. +select '$ < 1'::jsonpath; + jsonpath +---------- + ($ < 1) +(1 row) + +select '($ < 1) || $.a.b <= $x'::jsonpath; + jsonpath +------------------------------ + ($ < 1 || $."a"."b" <= $"x") +(1 row) + +select '@ + 1'::jsonpath; +ERROR: @ is not allowed in root expressions +LINE 1: select '@ + 1'::jsonpath; + ^ +select '($).a.b'::jsonpath; + jsonpath +----------- + $."a"."b" +(1 row) + +select '($.a.b).c.d'::jsonpath; + jsonpath +------------------- + $."a"."b"."c"."d" +(1 row) + +select '($.a.b + -$.x.y).c.d'::jsonpath; + jsonpath +---------------------------------- + ($."a"."b" + -$."x"."y")."c"."d" +(1 row) + +select '(-+$.a.b).c.d'::jsonpath; + jsonpath +------------------------- + (-(+$."a"."b"))."c"."d" +(1 row) + +select '1 + ($.a.b + 2).c.d'::jsonpath; + jsonpath +------------------------------- + (1 + ($."a"."b" + 2)."c"."d") +(1 row) + +select '1 + ($.a.b > 2).c.d'::jsonpath; + jsonpath +------------------------------- + (1 + ($."a"."b" > 2)."c"."d") +(1 row) + +select '($)'::jsonpath; + jsonpath +---------- + $ +(1 row) + +select '(($))'::jsonpath; + jsonpath +---------- + $ +(1 row) + +select '((($ + 1)).a + ((2)).b ? ((((@ > 1)) || (exists(@.c)))))'::jsonpath; + jsonpath +--------------------------------------------------- + (($ + 1)."a" + (2)."b"?(@ > 1 || exists (@."c"))) +(1 row) + +select '$ ? (@.a < 1)'::jsonpath; + jsonpath +--------------- + $?(@."a" < 1) +(1 row) + +select '$ ? (@.a < -1)'::jsonpath; + jsonpath +---------------- + $?(@."a" < -1) +(1 row) + +select '$ ? (@.a < +1)'::jsonpath; + jsonpath +--------------- + $?(@."a" < 1) +(1 row) + +select '$ ? (@.a < .1)'::jsonpath; + jsonpath +----------------- + $?(@."a" < 0.1) +(1 row) + +select '$ ? (@.a < -.1)'::jsonpath; + jsonpath +------------------ + $?(@."a" < -0.1) +(1 row) + +select '$ ? (@.a < +.1)'::jsonpath; + jsonpath +----------------- + $?(@."a" < 0.1) +(1 row) + +select '$ ? (@.a < 0.1)'::jsonpath; + jsonpath +----------------- + $?(@."a" < 0.1) +(1 row) + +select '$ ? (@.a < -0.1)'::jsonpath; + jsonpath +------------------ + $?(@."a" < -0.1) +(1 row) + +select '$ ? (@.a < +0.1)'::jsonpath; + jsonpath +----------------- + $?(@."a" < 0.1) +(1 row) + +select '$ ? (@.a < 10.1)'::jsonpath; + jsonpath +------------------ + $?(@."a" < 10.1) +(1 row) + +select '$ ? (@.a < -10.1)'::jsonpath; + jsonpath +------------------- + $?(@."a" < -10.1) +(1 row) + +select '$ ? (@.a < +10.1)'::jsonpath; + jsonpath +------------------ + $?(@."a" < 10.1) +(1 row) + +select '$ ? (@.a < 1e1)'::jsonpath; + jsonpath +---------------- + $?(@."a" < 10) +(1 row) + +select '$ ? (@.a < -1e1)'::jsonpath; + jsonpath +----------------- + $?(@."a" < -10) +(1 row) + +select '$ ? (@.a < +1e1)'::jsonpath; + jsonpath +---------------- + $?(@."a" < 10) +(1 row) + +select '$ ? (@.a < .1e1)'::jsonpath; + jsonpath +--------------- + $?(@."a" < 1) +(1 row) + +select '$ ? (@.a < -.1e1)'::jsonpath; + jsonpath +---------------- + $?(@."a" < -1) +(1 row) + +select '$ ? (@.a < +.1e1)'::jsonpath; + jsonpath +--------------- + $?(@."a" < 1) +(1 row) + +select '$ ? (@.a < 0.1e1)'::jsonpath; + jsonpath +--------------- + $?(@."a" < 1) +(1 row) + +select '$ ? (@.a < -0.1e1)'::jsonpath; + jsonpath +---------------- + $?(@."a" < -1) +(1 row) + +select '$ ? (@.a < +0.1e1)'::jsonpath; + jsonpath +--------------- + $?(@."a" < 1) +(1 row) + +select '$ ? (@.a < 10.1e1)'::jsonpath; + jsonpath +----------------- + $?(@."a" < 101) +(1 row) + +select '$ ? (@.a < -10.1e1)'::jsonpath; + jsonpath +------------------ + $?(@."a" < -101) +(1 row) + +select '$ ? (@.a < +10.1e1)'::jsonpath; + jsonpath +----------------- + $?(@."a" < 101) +(1 row) + +select '$ ? (@.a < 1e-1)'::jsonpath; + jsonpath +----------------- + $?(@."a" < 0.1) +(1 row) + +select '$ ? (@.a < -1e-1)'::jsonpath; + jsonpath +------------------ + $?(@."a" < -0.1) +(1 row) + +select '$ ? (@.a < +1e-1)'::jsonpath; + jsonpath +----------------- + $?(@."a" < 0.1) +(1 row) + +select '$ ? (@.a < .1e-1)'::jsonpath; + jsonpath +------------------ + $?(@."a" < 0.01) +(1 row) + +select '$ ? (@.a < -.1e-1)'::jsonpath; + jsonpath +------------------- + $?(@."a" < -0.01) +(1 row) + +select '$ ? (@.a < +.1e-1)'::jsonpath; + jsonpath +------------------ + $?(@."a" < 0.01) +(1 row) + +select '$ ? (@.a < 0.1e-1)'::jsonpath; + jsonpath +------------------ + $?(@."a" < 0.01) +(1 row) + +select '$ ? (@.a < -0.1e-1)'::jsonpath; + jsonpath +------------------- + $?(@."a" < -0.01) +(1 row) + +select '$ ? (@.a < +0.1e-1)'::jsonpath; + jsonpath +------------------ + $?(@."a" < 0.01) +(1 row) + +select '$ ? (@.a < 10.1e-1)'::jsonpath; + jsonpath +------------------ + $?(@."a" < 1.01) +(1 row) + +select '$ ? (@.a < -10.1e-1)'::jsonpath; + jsonpath +------------------- + $?(@."a" < -1.01) +(1 row) + +select '$ ? (@.a < +10.1e-1)'::jsonpath; + jsonpath +------------------ + $?(@."a" < 1.01) +(1 row) + +select '$ ? (@.a < 1e+1)'::jsonpath; + jsonpath +---------------- + $?(@."a" < 10) +(1 row) + +select '$ ? (@.a < -1e+1)'::jsonpath; + jsonpath +----------------- + $?(@."a" < -10) +(1 row) + +select '$ ? (@.a < +1e+1)'::jsonpath; + jsonpath +---------------- + $?(@."a" < 10) +(1 row) + +select '$ ? (@.a < .1e+1)'::jsonpath; + jsonpath +--------------- + $?(@."a" < 1) +(1 row) + +select '$ ? (@.a < -.1e+1)'::jsonpath; + jsonpath +---------------- + $?(@."a" < -1) +(1 row) + +select '$ ? (@.a < +.1e+1)'::jsonpath; + jsonpath +--------------- + $?(@."a" < 1) +(1 row) + +select '$ ? (@.a < 0.1e+1)'::jsonpath; + jsonpath +--------------- + $?(@."a" < 1) +(1 row) + +select '$ ? (@.a < -0.1e+1)'::jsonpath; + jsonpath +---------------- + $?(@."a" < -1) +(1 row) + +select '$ ? (@.a < +0.1e+1)'::jsonpath; + jsonpath +--------------- + $?(@."a" < 1) +(1 row) + +select '$ ? (@.a < 10.1e+1)'::jsonpath; + jsonpath +----------------- + $?(@."a" < 101) +(1 row) + +select '$ ? (@.a < -10.1e+1)'::jsonpath; + jsonpath +------------------ + $?(@."a" < -101) +(1 row) + +select '$ ? (@.a < +10.1e+1)'::jsonpath; + jsonpath +----------------- + $?(@."a" < 101) +(1 row) + +-- numeric literals +select '0'::jsonpath; + jsonpath +---------- + 0 +(1 row) + +select '00'::jsonpath; +ERROR: trailing junk after numeric literal at or near "00" of jsonpath input +LINE 1: select '00'::jsonpath; + ^ +select '0755'::jsonpath; +ERROR: syntax error at end of jsonpath input +LINE 1: select '0755'::jsonpath; + ^ +select '0.0'::jsonpath; + jsonpath +---------- + 0.0 +(1 row) + +select '0.000'::jsonpath; + jsonpath +---------- + 0.000 +(1 row) + +select '0.000e1'::jsonpath; + jsonpath +---------- + 0.00 +(1 row) + +select '0.000e2'::jsonpath; + jsonpath +---------- + 0.0 +(1 row) + +select '0.000e3'::jsonpath; + jsonpath +---------- + 0 +(1 row) + +select '0.0010'::jsonpath; + jsonpath +---------- + 0.0010 +(1 row) + +select '0.0010e-1'::jsonpath; + jsonpath +---------- + 0.00010 +(1 row) + +select '0.0010e+1'::jsonpath; + jsonpath +---------- + 0.010 +(1 row) + +select '0.0010e+2'::jsonpath; + jsonpath +---------- + 0.10 +(1 row) + +select '.001'::jsonpath; + jsonpath +---------- + 0.001 +(1 row) + +select '.001e1'::jsonpath; + jsonpath +---------- + 0.01 +(1 row) + +select '1.'::jsonpath; + jsonpath +---------- + 1 +(1 row) + +select '1.e1'::jsonpath; + jsonpath +---------- + 10 +(1 row) + +select '1a'::jsonpath; +ERROR: trailing junk after numeric literal at or near "1a" of jsonpath input +LINE 1: select '1a'::jsonpath; + ^ +select '1e'::jsonpath; +ERROR: trailing junk after numeric literal at or near "1e" of jsonpath input +LINE 1: select '1e'::jsonpath; + ^ +select '1.e'::jsonpath; +ERROR: trailing junk after numeric literal at or near "1.e" of jsonpath input +LINE 1: select '1.e'::jsonpath; + ^ +select '1.2a'::jsonpath; +ERROR: trailing junk after numeric literal at or near "1.2a" of jsonpath input +LINE 1: select '1.2a'::jsonpath; + ^ +select '1.2e'::jsonpath; +ERROR: trailing junk after numeric literal at or near "1.2e" of jsonpath input +LINE 1: select '1.2e'::jsonpath; + ^ +select '1.2.e'::jsonpath; + jsonpath +----------- + (1.2)."e" +(1 row) + +select '(1.2).e'::jsonpath; + jsonpath +----------- + (1.2)."e" +(1 row) + +select '1e3'::jsonpath; + jsonpath +---------- + 1000 +(1 row) + +select '1.e3'::jsonpath; + jsonpath +---------- + 1000 +(1 row) + +select '1.e3.e'::jsonpath; + jsonpath +------------ + (1000)."e" +(1 row) + +select '1.e3.e4'::jsonpath; + jsonpath +------------- + (1000)."e4" +(1 row) + +select '1.2e3'::jsonpath; + jsonpath +---------- + 1200 +(1 row) + +select '1.2e3a'::jsonpath; +ERROR: trailing junk after numeric literal at or near "1.2e3a" of jsonpath input +LINE 1: select '1.2e3a'::jsonpath; + ^ +select '1.2.e3'::jsonpath; + jsonpath +------------ + (1.2)."e3" +(1 row) + +select '(1.2).e3'::jsonpath; + jsonpath +------------ + (1.2)."e3" +(1 row) + +select '1..e'::jsonpath; + jsonpath +---------- + (1)."e" +(1 row) + +select '1..e3'::jsonpath; + jsonpath +---------- + (1)."e3" +(1 row) + +select '(1.).e'::jsonpath; + jsonpath +---------- + (1)."e" +(1 row) + +select '(1.).e3'::jsonpath; + jsonpath +---------- + (1)."e3" +(1 row) + +select '1?(2>3)'::jsonpath; + jsonpath +------------- + (1)?(2 > 3) +(1 row) + +-- nondecimal +select '0b100101'::jsonpath; + jsonpath +---------- + 37 +(1 row) + +select '0o273'::jsonpath; + jsonpath +---------- + 187 +(1 row) + +select '0x42F'::jsonpath; + jsonpath +---------- + 1071 +(1 row) + +-- error cases +select '0b'::jsonpath; +ERROR: trailing junk after numeric literal at or near "0b" of jsonpath input +LINE 1: select '0b'::jsonpath; + ^ +select '1b'::jsonpath; +ERROR: trailing junk after numeric literal at or near "1b" of jsonpath input +LINE 1: select '1b'::jsonpath; + ^ +select '0b0x'::jsonpath; +ERROR: syntax error at end of jsonpath input +LINE 1: select '0b0x'::jsonpath; + ^ +select '0o'::jsonpath; +ERROR: trailing junk after numeric literal at or near "0o" of jsonpath input +LINE 1: select '0o'::jsonpath; + ^ +select '1o'::jsonpath; +ERROR: trailing junk after numeric literal at or near "1o" of jsonpath input +LINE 1: select '1o'::jsonpath; + ^ +select '0o0x'::jsonpath; +ERROR: syntax error at end of jsonpath input +LINE 1: select '0o0x'::jsonpath; + ^ +select '0x'::jsonpath; +ERROR: trailing junk after numeric literal at or near "0x" of jsonpath input +LINE 1: select '0x'::jsonpath; + ^ +select '1x'::jsonpath; +ERROR: trailing junk after numeric literal at or near "1x" of jsonpath input +LINE 1: select '1x'::jsonpath; + ^ +select '0x0y'::jsonpath; +ERROR: syntax error at end of jsonpath input +LINE 1: select '0x0y'::jsonpath; + ^ +-- underscores +select '1_000_000'::jsonpath; + jsonpath +---------- + 1000000 +(1 row) + +select '1_2_3'::jsonpath; + jsonpath +---------- + 123 +(1 row) + +select '0x1EEE_FFFF'::jsonpath; + jsonpath +----------- + 518979583 +(1 row) + +select '0o2_73'::jsonpath; + jsonpath +---------- + 187 +(1 row) + +select '0b10_0101'::jsonpath; + jsonpath +---------- + 37 +(1 row) + +select '1_000.000_005'::jsonpath; + jsonpath +------------- + 1000.000005 +(1 row) + +select '1_000.'::jsonpath; + jsonpath +---------- + 1000 +(1 row) + +select '.000_005'::jsonpath; + jsonpath +---------- + 0.000005 +(1 row) + +select '1_000.5e0_1'::jsonpath; + jsonpath +---------- + 10005 +(1 row) + +-- error cases +select '_100'::jsonpath; +ERROR: syntax error at end of jsonpath input +LINE 1: select '_100'::jsonpath; + ^ +select '100_'::jsonpath; +ERROR: trailing junk after numeric literal at or near "100_" of jsonpath input +LINE 1: select '100_'::jsonpath; + ^ +select '100__000'::jsonpath; +ERROR: syntax error at end of jsonpath input +LINE 1: select '100__000'::jsonpath; + ^ +select '_1_000.5'::jsonpath; +ERROR: syntax error at end of jsonpath input +LINE 1: select '_1_000.5'::jsonpath; + ^ +select '1_000_.5'::jsonpath; +ERROR: trailing junk after numeric literal at or near "1_000_" of jsonpath input +LINE 1: select '1_000_.5'::jsonpath; + ^ +select '1_000._5'::jsonpath; +ERROR: trailing junk after numeric literal at or near "1_000._" of jsonpath input +LINE 1: select '1_000._5'::jsonpath; + ^ +select '1_000.5_'::jsonpath; +ERROR: trailing junk after numeric literal at or near "1_000.5_" of jsonpath input +LINE 1: select '1_000.5_'::jsonpath; + ^ +select '1_000.5e_1'::jsonpath; +ERROR: trailing junk after numeric literal at or near "1_000.5e" of jsonpath input +LINE 1: select '1_000.5e_1'::jsonpath; + ^ +-- underscore after prefix not allowed in JavaScript (but allowed in SQL) +select '0b_10_0101'::jsonpath; +ERROR: syntax error at end of jsonpath input +LINE 1: select '0b_10_0101'::jsonpath; + ^ +select '0o_273'::jsonpath; +ERROR: syntax error at end of jsonpath input +LINE 1: select '0o_273'::jsonpath; + ^ +select '0x_42F'::jsonpath; +ERROR: syntax error at end of jsonpath input +LINE 1: select '0x_42F'::jsonpath; + ^ +-- test non-error-throwing API +SELECT str as jsonpath, + pg_input_is_valid(str,'jsonpath') as ok, + errinfo.sql_error_code, + errinfo.message, + errinfo.detail, + errinfo.hint +FROM unnest(ARRAY['$ ? (@ like_regex "pattern" flag "smixq")'::text, + '$ ? (@ like_regex "pattern" flag "a")', + '@ + 1', + '00', + '1a']) str, + LATERAL pg_input_error_info(str, 'jsonpath') as errinfo; + jsonpath | ok | sql_error_code | message | detail | hint +-------------------------------------------+----+----------------+-----------------------------------------------------------------------+----------------------------------------------------------+------ + $ ? (@ like_regex "pattern" flag "smixq") | t | | | | + $ ? (@ like_regex "pattern" flag "a") | f | 42601 | invalid input syntax for type jsonpath | Unrecognized flag character "a" in LIKE_REGEX predicate. | + @ + 1 | f | 42601 | @ is not allowed in root expressions | | + 00 | f | 42601 | trailing junk after numeric literal at or near "00" of jsonpath input | | + 1a | f | 42601 | trailing junk after numeric literal at or near "1a" of jsonpath input | | +(5 rows) + diff --git a/src/test/regress/expected/jsonpath_encoding.out b/src/test/regress/expected/jsonpath_encoding.out new file mode 100644 index 0000000..d53affe --- /dev/null +++ b/src/test/regress/expected/jsonpath_encoding.out @@ -0,0 +1,180 @@ +-- +-- encoding-sensitive tests for jsonpath +-- +-- We provide expected-results files for UTF8 (jsonpath_encoding.out) +-- and for SQL_ASCII (jsonpath_encoding_1.out). Skip otherwise. +SELECT getdatabaseencoding() NOT IN ('UTF8', 'SQL_ASCII') + AS skip_test \gset +\if :skip_test +\quit +\endif +SELECT getdatabaseencoding(); -- just to label the results files + getdatabaseencoding +--------------------- + UTF8 +(1 row) + +-- checks for double-quoted values +-- basic unicode input +SELECT '"\u"'::jsonpath; -- ERROR, incomplete escape +ERROR: invalid Unicode escape sequence at or near "\u" of jsonpath input +LINE 1: SELECT '"\u"'::jsonpath; + ^ +SELECT '"\u00"'::jsonpath; -- ERROR, incomplete escape +ERROR: invalid Unicode escape sequence at or near "\u00" of jsonpath input +LINE 1: SELECT '"\u00"'::jsonpath; + ^ +SELECT '"\u000g"'::jsonpath; -- ERROR, g is not a hex digit +ERROR: invalid Unicode escape sequence at or near "\u000" of jsonpath input +LINE 1: SELECT '"\u000g"'::jsonpath; + ^ +SELECT '"\u0000"'::jsonpath; -- OK, legal escape +ERROR: unsupported Unicode escape sequence +LINE 1: SELECT '"\u0000"'::jsonpath; + ^ +DETAIL: \u0000 cannot be converted to text. +SELECT '"\uaBcD"'::jsonpath; -- OK, uppercase and lower case both OK + jsonpath +---------- + "ê¯" +(1 row) + +-- handling of unicode surrogate pairs +select '"\ud83d\ude04\ud83d\udc36"'::jsonpath as correct_in_utf8; + correct_in_utf8 +----------------- + "😄ðŸ¶" +(1 row) + +select '"\ud83d\ud83d"'::jsonpath; -- 2 high surrogates in a row +ERROR: invalid input syntax for type jsonpath +LINE 1: select '"\ud83d\ud83d"'::jsonpath; + ^ +DETAIL: Unicode high surrogate must not follow a high surrogate. +select '"\ude04\ud83d"'::jsonpath; -- surrogates in wrong order +ERROR: invalid input syntax for type jsonpath +LINE 1: select '"\ude04\ud83d"'::jsonpath; + ^ +DETAIL: Unicode low surrogate must follow a high surrogate. +select '"\ud83dX"'::jsonpath; -- orphan high surrogate +ERROR: invalid input syntax for type jsonpath +LINE 1: select '"\ud83dX"'::jsonpath; + ^ +DETAIL: Unicode low surrogate must follow a high surrogate. +select '"\ude04X"'::jsonpath; -- orphan low surrogate +ERROR: invalid input syntax for type jsonpath +LINE 1: select '"\ude04X"'::jsonpath; + ^ +DETAIL: Unicode low surrogate must follow a high surrogate. +--handling of simple unicode escapes +select '"the Copyright \u00a9 sign"'::jsonpath as correct_in_utf8; + correct_in_utf8 +------------------------ + "the Copyright © sign" +(1 row) + +select '"dollar \u0024 character"'::jsonpath as correct_everywhere; + correct_everywhere +---------------------- + "dollar $ character" +(1 row) + +select '"dollar \\u0024 character"'::jsonpath as not_an_escape; + not_an_escape +---------------------------- + "dollar \\u0024 character" +(1 row) + +select '"null \u0000 escape"'::jsonpath as not_unescaped; +ERROR: unsupported Unicode escape sequence +LINE 1: select '"null \u0000 escape"'::jsonpath as not_unescaped; + ^ +DETAIL: \u0000 cannot be converted to text. +select '"null \\u0000 escape"'::jsonpath as not_an_escape; + not_an_escape +----------------------- + "null \\u0000 escape" +(1 row) + +-- checks for quoted key names +-- basic unicode input +SELECT '$."\u"'::jsonpath; -- ERROR, incomplete escape +ERROR: invalid Unicode escape sequence at or near "\u" of jsonpath input +LINE 1: SELECT '$."\u"'::jsonpath; + ^ +SELECT '$."\u00"'::jsonpath; -- ERROR, incomplete escape +ERROR: invalid Unicode escape sequence at or near "\u00" of jsonpath input +LINE 1: SELECT '$."\u00"'::jsonpath; + ^ +SELECT '$."\u000g"'::jsonpath; -- ERROR, g is not a hex digit +ERROR: invalid Unicode escape sequence at or near "\u000" of jsonpath input +LINE 1: SELECT '$."\u000g"'::jsonpath; + ^ +SELECT '$."\u0000"'::jsonpath; -- OK, legal escape +ERROR: unsupported Unicode escape sequence +LINE 1: SELECT '$."\u0000"'::jsonpath; + ^ +DETAIL: \u0000 cannot be converted to text. +SELECT '$."\uaBcD"'::jsonpath; -- OK, uppercase and lower case both OK + jsonpath +---------- + $."ê¯" +(1 row) + +-- handling of unicode surrogate pairs +select '$."\ud83d\ude04\ud83d\udc36"'::jsonpath as correct_in_utf8; + correct_in_utf8 +----------------- + $."😄ðŸ¶" +(1 row) + +select '$."\ud83d\ud83d"'::jsonpath; -- 2 high surrogates in a row +ERROR: invalid input syntax for type jsonpath +LINE 1: select '$."\ud83d\ud83d"'::jsonpath; + ^ +DETAIL: Unicode high surrogate must not follow a high surrogate. +select '$."\ude04\ud83d"'::jsonpath; -- surrogates in wrong order +ERROR: invalid input syntax for type jsonpath +LINE 1: select '$."\ude04\ud83d"'::jsonpath; + ^ +DETAIL: Unicode low surrogate must follow a high surrogate. +select '$."\ud83dX"'::jsonpath; -- orphan high surrogate +ERROR: invalid input syntax for type jsonpath +LINE 1: select '$."\ud83dX"'::jsonpath; + ^ +DETAIL: Unicode low surrogate must follow a high surrogate. +select '$."\ude04X"'::jsonpath; -- orphan low surrogate +ERROR: invalid input syntax for type jsonpath +LINE 1: select '$."\ude04X"'::jsonpath; + ^ +DETAIL: Unicode low surrogate must follow a high surrogate. +--handling of simple unicode escapes +select '$."the Copyright \u00a9 sign"'::jsonpath as correct_in_utf8; + correct_in_utf8 +-------------------------- + $."the Copyright © sign" +(1 row) + +select '$."dollar \u0024 character"'::jsonpath as correct_everywhere; + correct_everywhere +------------------------ + $."dollar $ character" +(1 row) + +select '$."dollar \\u0024 character"'::jsonpath as not_an_escape; + not_an_escape +------------------------------ + $."dollar \\u0024 character" +(1 row) + +select '$."null \u0000 escape"'::jsonpath as not_unescaped; +ERROR: unsupported Unicode escape sequence +LINE 1: select '$."null \u0000 escape"'::jsonpath as not_unescaped; + ^ +DETAIL: \u0000 cannot be converted to text. +select '$."null \\u0000 escape"'::jsonpath as not_an_escape; + not_an_escape +------------------------- + $."null \\u0000 escape" +(1 row) + diff --git a/src/test/regress/expected/jsonpath_encoding_1.out b/src/test/regress/expected/jsonpath_encoding_1.out new file mode 100644 index 0000000..287324f --- /dev/null +++ b/src/test/regress/expected/jsonpath_encoding_1.out @@ -0,0 +1,168 @@ +-- +-- encoding-sensitive tests for jsonpath +-- +-- We provide expected-results files for UTF8 (jsonpath_encoding.out) +-- and for SQL_ASCII (jsonpath_encoding_1.out). Skip otherwise. +SELECT getdatabaseencoding() NOT IN ('UTF8', 'SQL_ASCII') + AS skip_test \gset +\if :skip_test +\quit +\endif +SELECT getdatabaseencoding(); -- just to label the results files + getdatabaseencoding +--------------------- + SQL_ASCII +(1 row) + +-- checks for double-quoted values +-- basic unicode input +SELECT '"\u"'::jsonpath; -- ERROR, incomplete escape +ERROR: invalid Unicode escape sequence at or near "\u" of jsonpath input +LINE 1: SELECT '"\u"'::jsonpath; + ^ +SELECT '"\u00"'::jsonpath; -- ERROR, incomplete escape +ERROR: invalid Unicode escape sequence at or near "\u00" of jsonpath input +LINE 1: SELECT '"\u00"'::jsonpath; + ^ +SELECT '"\u000g"'::jsonpath; -- ERROR, g is not a hex digit +ERROR: invalid Unicode escape sequence at or near "\u000" of jsonpath input +LINE 1: SELECT '"\u000g"'::jsonpath; + ^ +SELECT '"\u0000"'::jsonpath; -- OK, legal escape +ERROR: unsupported Unicode escape sequence +LINE 1: SELECT '"\u0000"'::jsonpath; + ^ +DETAIL: \u0000 cannot be converted to text. +SELECT '"\uaBcD"'::jsonpath; -- OK, uppercase and lower case both OK +ERROR: conversion between UTF8 and SQL_ASCII is not supported +LINE 1: SELECT '"\uaBcD"'::jsonpath; + ^ +-- handling of unicode surrogate pairs +select '"\ud83d\ude04\ud83d\udc36"'::jsonpath as correct_in_utf8; +ERROR: conversion between UTF8 and SQL_ASCII is not supported +LINE 1: select '"\ud83d\ude04\ud83d\udc36"'::jsonpath as correct_in_... + ^ +select '"\ud83d\ud83d"'::jsonpath; -- 2 high surrogates in a row +ERROR: invalid input syntax for type jsonpath +LINE 1: select '"\ud83d\ud83d"'::jsonpath; + ^ +DETAIL: Unicode high surrogate must not follow a high surrogate. +select '"\ude04\ud83d"'::jsonpath; -- surrogates in wrong order +ERROR: invalid input syntax for type jsonpath +LINE 1: select '"\ude04\ud83d"'::jsonpath; + ^ +DETAIL: Unicode low surrogate must follow a high surrogate. +select '"\ud83dX"'::jsonpath; -- orphan high surrogate +ERROR: invalid input syntax for type jsonpath +LINE 1: select '"\ud83dX"'::jsonpath; + ^ +DETAIL: Unicode low surrogate must follow a high surrogate. +select '"\ude04X"'::jsonpath; -- orphan low surrogate +ERROR: invalid input syntax for type jsonpath +LINE 1: select '"\ude04X"'::jsonpath; + ^ +DETAIL: Unicode low surrogate must follow a high surrogate. +--handling of simple unicode escapes +select '"the Copyright \u00a9 sign"'::jsonpath as correct_in_utf8; +ERROR: conversion between UTF8 and SQL_ASCII is not supported +LINE 1: select '"the Copyright \u00a9 sign"'::jsonpath as correct_in... + ^ +select '"dollar \u0024 character"'::jsonpath as correct_everywhere; + correct_everywhere +---------------------- + "dollar $ character" +(1 row) + +select '"dollar \\u0024 character"'::jsonpath as not_an_escape; + not_an_escape +---------------------------- + "dollar \\u0024 character" +(1 row) + +select '"null \u0000 escape"'::jsonpath as not_unescaped; +ERROR: unsupported Unicode escape sequence +LINE 1: select '"null \u0000 escape"'::jsonpath as not_unescaped; + ^ +DETAIL: \u0000 cannot be converted to text. +select '"null \\u0000 escape"'::jsonpath as not_an_escape; + not_an_escape +----------------------- + "null \\u0000 escape" +(1 row) + +-- checks for quoted key names +-- basic unicode input +SELECT '$."\u"'::jsonpath; -- ERROR, incomplete escape +ERROR: invalid Unicode escape sequence at or near "\u" of jsonpath input +LINE 1: SELECT '$."\u"'::jsonpath; + ^ +SELECT '$."\u00"'::jsonpath; -- ERROR, incomplete escape +ERROR: invalid Unicode escape sequence at or near "\u00" of jsonpath input +LINE 1: SELECT '$."\u00"'::jsonpath; + ^ +SELECT '$."\u000g"'::jsonpath; -- ERROR, g is not a hex digit +ERROR: invalid Unicode escape sequence at or near "\u000" of jsonpath input +LINE 1: SELECT '$."\u000g"'::jsonpath; + ^ +SELECT '$."\u0000"'::jsonpath; -- OK, legal escape +ERROR: unsupported Unicode escape sequence +LINE 1: SELECT '$."\u0000"'::jsonpath; + ^ +DETAIL: \u0000 cannot be converted to text. +SELECT '$."\uaBcD"'::jsonpath; -- OK, uppercase and lower case both OK +ERROR: conversion between UTF8 and SQL_ASCII is not supported +LINE 1: SELECT '$."\uaBcD"'::jsonpath; + ^ +-- handling of unicode surrogate pairs +select '$."\ud83d\ude04\ud83d\udc36"'::jsonpath as correct_in_utf8; +ERROR: conversion between UTF8 and SQL_ASCII is not supported +LINE 1: select '$."\ud83d\ude04\ud83d\udc36"'::jsonpath as correct_i... + ^ +select '$."\ud83d\ud83d"'::jsonpath; -- 2 high surrogates in a row +ERROR: invalid input syntax for type jsonpath +LINE 1: select '$."\ud83d\ud83d"'::jsonpath; + ^ +DETAIL: Unicode high surrogate must not follow a high surrogate. +select '$."\ude04\ud83d"'::jsonpath; -- surrogates in wrong order +ERROR: invalid input syntax for type jsonpath +LINE 1: select '$."\ude04\ud83d"'::jsonpath; + ^ +DETAIL: Unicode low surrogate must follow a high surrogate. +select '$."\ud83dX"'::jsonpath; -- orphan high surrogate +ERROR: invalid input syntax for type jsonpath +LINE 1: select '$."\ud83dX"'::jsonpath; + ^ +DETAIL: Unicode low surrogate must follow a high surrogate. +select '$."\ude04X"'::jsonpath; -- orphan low surrogate +ERROR: invalid input syntax for type jsonpath +LINE 1: select '$."\ude04X"'::jsonpath; + ^ +DETAIL: Unicode low surrogate must follow a high surrogate. +--handling of simple unicode escapes +select '$."the Copyright \u00a9 sign"'::jsonpath as correct_in_utf8; +ERROR: conversion between UTF8 and SQL_ASCII is not supported +LINE 1: select '$."the Copyright \u00a9 sign"'::jsonpath as correct_... + ^ +select '$."dollar \u0024 character"'::jsonpath as correct_everywhere; + correct_everywhere +------------------------ + $."dollar $ character" +(1 row) + +select '$."dollar \\u0024 character"'::jsonpath as not_an_escape; + not_an_escape +------------------------------ + $."dollar \\u0024 character" +(1 row) + +select '$."null \u0000 escape"'::jsonpath as not_unescaped; +ERROR: unsupported Unicode escape sequence +LINE 1: select '$."null \u0000 escape"'::jsonpath as not_unescaped; + ^ +DETAIL: \u0000 cannot be converted to text. +select '$."null \\u0000 escape"'::jsonpath as not_an_escape; + not_an_escape +------------------------- + $."null \\u0000 escape" +(1 row) + diff --git a/src/test/regress/expected/jsonpath_encoding_2.out b/src/test/regress/expected/jsonpath_encoding_2.out new file mode 100644 index 0000000..bb71bfe --- /dev/null +++ b/src/test/regress/expected/jsonpath_encoding_2.out @@ -0,0 +1,9 @@ +-- +-- encoding-sensitive tests for jsonpath +-- +-- We provide expected-results files for UTF8 (jsonpath_encoding.out) +-- and for SQL_ASCII (jsonpath_encoding_1.out). Skip otherwise. +SELECT getdatabaseencoding() NOT IN ('UTF8', 'SQL_ASCII') + AS skip_test \gset +\if :skip_test +\quit diff --git a/src/test/regress/expected/largeobject.out b/src/test/regress/expected/largeobject.out new file mode 100644 index 0000000..4921dd7 --- /dev/null +++ b/src/test/regress/expected/largeobject.out @@ -0,0 +1,563 @@ +-- +-- Test large object support +-- +-- directory paths are passed to us in environment variables +\getenv abs_srcdir PG_ABS_SRCDIR +\getenv abs_builddir PG_ABS_BUILDDIR +-- ensure consistent test output regardless of the default bytea format +SET bytea_output TO escape; +-- Test ALTER LARGE OBJECT OWNER +CREATE ROLE regress_lo_user; +SELECT lo_create(42); + lo_create +----------- + 42 +(1 row) + +ALTER LARGE OBJECT 42 OWNER TO regress_lo_user; +-- Test GRANT, COMMENT as non-superuser +SET SESSION AUTHORIZATION regress_lo_user; +GRANT SELECT ON LARGE OBJECT 42 TO public; +COMMENT ON LARGE OBJECT 42 IS 'the ultimate answer'; +RESET SESSION AUTHORIZATION; +-- Test psql's \lo_list et al (we assume no other LOs exist yet) +\lo_list + Large objects + ID | Owner | Description +----+-----------------+--------------------- + 42 | regress_lo_user | the ultimate answer +(1 row) + +\lo_list+ + Large objects + ID | Owner | Access privileges | Description +----+-----------------+------------------------------------+--------------------- + 42 | regress_lo_user | regress_lo_user=rw/regress_lo_user+| the ultimate answer + | | =r/regress_lo_user | +(1 row) + +\lo_unlink 42 +\dl + Large objects + ID | Owner | Description +----+-------+------------- +(0 rows) + +-- Load a file +CREATE TABLE lotest_stash_values (loid oid, fd integer); +-- lo_creat(mode integer) returns oid +-- The mode arg to lo_creat is unused, some vestigal holdover from ancient times +-- returns the large object id +INSERT INTO lotest_stash_values (loid) SELECT lo_creat(42); +-- NOTE: large objects require transactions +BEGIN; +-- lo_open(lobjId oid, mode integer) returns integer +-- The mode parameter to lo_open uses two constants: +-- INV_WRITE = 0x20000 +-- INV_READ = 0x40000 +-- The return value is a file descriptor-like value which remains valid for the +-- transaction. +UPDATE lotest_stash_values SET fd = lo_open(loid, CAST(x'20000' | x'40000' AS integer)); +-- loread/lowrite names are wonky, different from other functions which are lo_* +-- lowrite(fd integer, data bytea) returns integer +-- the integer is the number of bytes written +SELECT lowrite(fd, ' +I wandered lonely as a cloud +That floats on high o''er vales and hills, +When all at once I saw a crowd, +A host, of golden daffodils; +Beside the lake, beneath the trees, +Fluttering and dancing in the breeze. + +Continuous as the stars that shine +And twinkle on the milky way, +They stretched in never-ending line +Along the margin of a bay: +Ten thousand saw I at a glance, +Tossing their heads in sprightly dance. + +The waves beside them danced; but they +Out-did the sparkling waves in glee: +A poet could not but be gay, +In such a jocund company: +I gazed--and gazed--but little thought +What wealth the show to me had brought: + +For oft, when on my couch I lie +In vacant or in pensive mood, +They flash upon that inward eye +Which is the bliss of solitude; +And then my heart with pleasure fills, +And dances with the daffodils. + + -- William Wordsworth +') FROM lotest_stash_values; + lowrite +--------- + 848 +(1 row) + +-- lo_close(fd integer) returns integer +-- return value is 0 for success, or <0 for error (actually only -1, but...) +SELECT lo_close(fd) FROM lotest_stash_values; + lo_close +---------- + 0 +(1 row) + +END; +-- Copy to another large object. +-- Note: we intentionally don't remove the object created here; +-- it's left behind to help test pg_dump. +SELECT lo_from_bytea(0, lo_get(loid)) AS newloid FROM lotest_stash_values +\gset +-- Add a comment to it, as well, for pg_dump/pg_upgrade testing. +COMMENT ON LARGE OBJECT :newloid IS 'I Wandered Lonely as a Cloud'; +-- Read out a portion +BEGIN; +UPDATE lotest_stash_values SET fd=lo_open(loid, CAST(x'20000' | x'40000' AS integer)); +-- lo_lseek(fd integer, offset integer, whence integer) returns integer +-- offset is in bytes, whence is one of three values: +-- SEEK_SET (= 0) meaning relative to beginning +-- SEEK_CUR (= 1) meaning relative to current position +-- SEEK_END (= 2) meaning relative to end (offset better be negative) +-- returns current position in file +SELECT lo_lseek(fd, 104, 0) FROM lotest_stash_values; + lo_lseek +---------- + 104 +(1 row) + +-- loread/lowrite names are wonky, different from other functions which are lo_* +-- loread(fd integer, len integer) returns bytea +SELECT loread(fd, 28) FROM lotest_stash_values; + loread +------------------------------ + A host, of golden daffodils; +(1 row) + +SELECT lo_lseek(fd, -19, 1) FROM lotest_stash_values; + lo_lseek +---------- + 113 +(1 row) + +SELECT lowrite(fd, 'n') FROM lotest_stash_values; + lowrite +--------- + 1 +(1 row) + +SELECT lo_tell(fd) FROM lotest_stash_values; + lo_tell +--------- + 114 +(1 row) + +SELECT lo_lseek(fd, -744, 2) FROM lotest_stash_values; + lo_lseek +---------- + 104 +(1 row) + +SELECT loread(fd, 28) FROM lotest_stash_values; + loread +------------------------------ + A host, on golden daffodils; +(1 row) + +SELECT lo_close(fd) FROM lotest_stash_values; + lo_close +---------- + 0 +(1 row) + +END; +-- Test resource management +BEGIN; +SELECT lo_open(loid, x'40000'::int) from lotest_stash_values; + lo_open +--------- + 0 +(1 row) + +ABORT; +\set filename :abs_builddir '/results/invalid/path' +\set dobody 'DECLARE loid oid; BEGIN ' +\set dobody :dobody 'SELECT tbl.loid INTO loid FROM lotest_stash_values tbl; ' +\set dobody :dobody 'PERFORM lo_export(loid, ' :'filename' '); ' +\set dobody :dobody 'EXCEPTION WHEN UNDEFINED_FILE THEN ' +\set dobody :dobody 'RAISE NOTICE ''could not open file, as expected''; END' +DO :'dobody'; +NOTICE: could not open file, as expected +-- Test truncation. +BEGIN; +UPDATE lotest_stash_values SET fd=lo_open(loid, CAST(x'20000' | x'40000' AS integer)); +SELECT lo_truncate(fd, 11) FROM lotest_stash_values; + lo_truncate +------------- + 0 +(1 row) + +SELECT loread(fd, 15) FROM lotest_stash_values; + loread +---------------- + \012I wandered +(1 row) + +SELECT lo_truncate(fd, 10000) FROM lotest_stash_values; + lo_truncate +------------- + 0 +(1 row) + +SELECT loread(fd, 10) FROM lotest_stash_values; + loread +------------------------------------------ + \000\000\000\000\000\000\000\000\000\000 +(1 row) + +SELECT lo_lseek(fd, 0, 2) FROM lotest_stash_values; + lo_lseek +---------- + 10000 +(1 row) + +SELECT lo_tell(fd) FROM lotest_stash_values; + lo_tell +--------- + 10000 +(1 row) + +SELECT lo_truncate(fd, 5000) FROM lotest_stash_values; + lo_truncate +------------- + 0 +(1 row) + +SELECT lo_lseek(fd, 0, 2) FROM lotest_stash_values; + lo_lseek +---------- + 5000 +(1 row) + +SELECT lo_tell(fd) FROM lotest_stash_values; + lo_tell +--------- + 5000 +(1 row) + +SELECT lo_close(fd) FROM lotest_stash_values; + lo_close +---------- + 0 +(1 row) + +END; +-- Test 64-bit large object functions. +BEGIN; +UPDATE lotest_stash_values SET fd = lo_open(loid, CAST(x'20000' | x'40000' AS integer)); +SELECT lo_lseek64(fd, 4294967296, 0) FROM lotest_stash_values; + lo_lseek64 +------------ + 4294967296 +(1 row) + +SELECT lowrite(fd, 'offset:4GB') FROM lotest_stash_values; + lowrite +--------- + 10 +(1 row) + +SELECT lo_tell64(fd) FROM lotest_stash_values; + lo_tell64 +------------ + 4294967306 +(1 row) + +SELECT lo_lseek64(fd, -10, 1) FROM lotest_stash_values; + lo_lseek64 +------------ + 4294967296 +(1 row) + +SELECT lo_tell64(fd) FROM lotest_stash_values; + lo_tell64 +------------ + 4294967296 +(1 row) + +SELECT loread(fd, 10) FROM lotest_stash_values; + loread +------------ + offset:4GB +(1 row) + +SELECT lo_truncate64(fd, 5000000000) FROM lotest_stash_values; + lo_truncate64 +--------------- + 0 +(1 row) + +SELECT lo_lseek64(fd, 0, 2) FROM lotest_stash_values; + lo_lseek64 +------------ + 5000000000 +(1 row) + +SELECT lo_tell64(fd) FROM lotest_stash_values; + lo_tell64 +------------ + 5000000000 +(1 row) + +SELECT lo_truncate64(fd, 3000000000) FROM lotest_stash_values; + lo_truncate64 +--------------- + 0 +(1 row) + +SELECT lo_lseek64(fd, 0, 2) FROM lotest_stash_values; + lo_lseek64 +------------ + 3000000000 +(1 row) + +SELECT lo_tell64(fd) FROM lotest_stash_values; + lo_tell64 +------------ + 3000000000 +(1 row) + +SELECT lo_close(fd) FROM lotest_stash_values; + lo_close +---------- + 0 +(1 row) + +END; +-- lo_unlink(lobjId oid) returns integer +-- return value appears to always be 1 +SELECT lo_unlink(loid) from lotest_stash_values; + lo_unlink +----------- + 1 +(1 row) + +TRUNCATE lotest_stash_values; +\set filename :abs_srcdir '/data/tenk.data' +INSERT INTO lotest_stash_values (loid) SELECT lo_import(:'filename'); +BEGIN; +UPDATE lotest_stash_values SET fd=lo_open(loid, CAST(x'20000' | x'40000' AS integer)); +-- verify length of large object +SELECT lo_lseek(fd, 0, 2) FROM lotest_stash_values; + lo_lseek +---------- + 670800 +(1 row) + +-- with the default BLCKSZ, LOBLKSIZE = 2048, so this positions us for a block +-- edge case +SELECT lo_lseek(fd, 2030, 0) FROM lotest_stash_values; + lo_lseek +---------- + 2030 +(1 row) + +-- this should get half of the value from page 0 and half from page 1 of the +-- large object +SELECT loread(fd, 36) FROM lotest_stash_values; + loread +----------------------------------------------------------------- + AAA\011FBAAAA\011VVVVxx\0122513\01132\0111\0111\0113\01113\0111 +(1 row) + +SELECT lo_tell(fd) FROM lotest_stash_values; + lo_tell +--------- + 2066 +(1 row) + +SELECT lo_lseek(fd, -26, 1) FROM lotest_stash_values; + lo_lseek +---------- + 2040 +(1 row) + +SELECT lowrite(fd, 'abcdefghijklmnop') FROM lotest_stash_values; + lowrite +--------- + 16 +(1 row) + +SELECT lo_lseek(fd, 2030, 0) FROM lotest_stash_values; + lo_lseek +---------- + 2030 +(1 row) + +SELECT loread(fd, 36) FROM lotest_stash_values; + loread +----------------------------------------------------- + AAA\011FBAAAAabcdefghijklmnop1\0111\0113\01113\0111 +(1 row) + +SELECT lo_close(fd) FROM lotest_stash_values; + lo_close +---------- + 0 +(1 row) + +END; +\set filename :abs_builddir '/results/lotest.txt' +SELECT lo_export(loid, :'filename') FROM lotest_stash_values; + lo_export +----------- + 1 +(1 row) + +\lo_import :filename +\set newloid :LASTOID +-- just make sure \lo_export does not barf +\set filename :abs_builddir '/results/lotest2.txt' +\lo_export :newloid :filename +-- This is a hack to test that export/import are reversible +-- This uses knowledge about the inner workings of large object mechanism +-- which should not be used outside it. This makes it a HACK +SELECT pageno, data FROM pg_largeobject WHERE loid = (SELECT loid from lotest_stash_values) +EXCEPT +SELECT pageno, data FROM pg_largeobject WHERE loid = :newloid; + pageno | data +--------+------ +(0 rows) + +SELECT lo_unlink(loid) FROM lotest_stash_values; + lo_unlink +----------- + 1 +(1 row) + +TRUNCATE lotest_stash_values; +\lo_unlink :newloid +\set filename :abs_builddir '/results/lotest.txt' +\lo_import :filename +\set newloid_1 :LASTOID +SELECT lo_from_bytea(0, lo_get(:newloid_1)) AS newloid_2 +\gset +SELECT fipshash(lo_get(:newloid_1)) = fipshash(lo_get(:newloid_2)); + ?column? +---------- + t +(1 row) + +SELECT lo_get(:newloid_1, 0, 20); + lo_get +------------------------------------------- + 8800\0110\0110\0110\0110\0110\0110\011800 +(1 row) + +SELECT lo_get(:newloid_1, 10, 20); + lo_get +------------------------------------------- + \0110\0110\0110\011800\011800\0113800\011 +(1 row) + +SELECT lo_put(:newloid_1, 5, decode('afafafaf', 'hex')); + lo_put +-------- + +(1 row) + +SELECT lo_get(:newloid_1, 0, 20); + lo_get +------------------------------------------------- + 8800\011\257\257\257\2570\0110\0110\0110\011800 +(1 row) + +SELECT lo_put(:newloid_1, 4294967310, 'foo'); + lo_put +-------- + +(1 row) + +SELECT lo_get(:newloid_1); +ERROR: large object read request is too large +SELECT lo_get(:newloid_1, 4294967294, 100); + lo_get +--------------------------------------------------------------------- + \000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000foo +(1 row) + +\lo_unlink :newloid_1 +\lo_unlink :newloid_2 +-- This object is left in the database for pg_dump test purposes +SELECT lo_from_bytea(0, E'\\xdeadbeef') AS newloid +\gset +SET bytea_output TO hex; +SELECT lo_get(:newloid); + lo_get +------------ + \xdeadbeef +(1 row) + +-- Create one more object that we leave behind for testing pg_dump/pg_upgrade; +-- this one intentionally has an OID in the system range +SELECT lo_create(2121); + lo_create +----------- + 2121 +(1 row) + +COMMENT ON LARGE OBJECT 2121 IS 'testing comments'; +-- Test writes on large objects in read-only transactions +START TRANSACTION READ ONLY; +-- INV_READ ... ok +SELECT lo_open(2121, x'40000'::int); + lo_open +--------- + 0 +(1 row) + +-- INV_WRITE ... error +SELECT lo_open(2121, x'20000'::int); +ERROR: cannot execute lo_open(INV_WRITE) in a read-only transaction +ROLLBACK; +START TRANSACTION READ ONLY; +SELECT lo_create(42); +ERROR: cannot execute lo_create() in a read-only transaction +ROLLBACK; +START TRANSACTION READ ONLY; +SELECT lo_creat(42); +ERROR: cannot execute lo_creat() in a read-only transaction +ROLLBACK; +START TRANSACTION READ ONLY; +SELECT lo_unlink(42); +ERROR: cannot execute lo_unlink() in a read-only transaction +ROLLBACK; +START TRANSACTION READ ONLY; +SELECT lowrite(42, 'x'); +ERROR: cannot execute lowrite() in a read-only transaction +ROLLBACK; +START TRANSACTION READ ONLY; +SELECT lo_import(:'filename'); +ERROR: cannot execute lo_import() in a read-only transaction +ROLLBACK; +START TRANSACTION READ ONLY; +SELECT lo_truncate(42, 0); +ERROR: cannot execute lo_truncate() in a read-only transaction +ROLLBACK; +START TRANSACTION READ ONLY; +SELECT lo_truncate64(42, 0); +ERROR: cannot execute lo_truncate64() in a read-only transaction +ROLLBACK; +START TRANSACTION READ ONLY; +SELECT lo_from_bytea(0, 'x'); +ERROR: cannot execute lo_from_bytea() in a read-only transaction +ROLLBACK; +START TRANSACTION READ ONLY; +SELECT lo_put(42, 0, 'x'); +ERROR: cannot execute lo_put() in a read-only transaction +ROLLBACK; +-- Clean up +DROP TABLE lotest_stash_values; +DROP ROLE regress_lo_user; diff --git a/src/test/regress/expected/largeobject_1.out b/src/test/regress/expected/largeobject_1.out new file mode 100644 index 0000000..7172ddb --- /dev/null +++ b/src/test/regress/expected/largeobject_1.out @@ -0,0 +1,563 @@ +-- +-- Test large object support +-- +-- directory paths are passed to us in environment variables +\getenv abs_srcdir PG_ABS_SRCDIR +\getenv abs_builddir PG_ABS_BUILDDIR +-- ensure consistent test output regardless of the default bytea format +SET bytea_output TO escape; +-- Test ALTER LARGE OBJECT OWNER +CREATE ROLE regress_lo_user; +SELECT lo_create(42); + lo_create +----------- + 42 +(1 row) + +ALTER LARGE OBJECT 42 OWNER TO regress_lo_user; +-- Test GRANT, COMMENT as non-superuser +SET SESSION AUTHORIZATION regress_lo_user; +GRANT SELECT ON LARGE OBJECT 42 TO public; +COMMENT ON LARGE OBJECT 42 IS 'the ultimate answer'; +RESET SESSION AUTHORIZATION; +-- Test psql's \lo_list et al (we assume no other LOs exist yet) +\lo_list + Large objects + ID | Owner | Description +----+-----------------+--------------------- + 42 | regress_lo_user | the ultimate answer +(1 row) + +\lo_list+ + Large objects + ID | Owner | Access privileges | Description +----+-----------------+------------------------------------+--------------------- + 42 | regress_lo_user | regress_lo_user=rw/regress_lo_user+| the ultimate answer + | | =r/regress_lo_user | +(1 row) + +\lo_unlink 42 +\dl + Large objects + ID | Owner | Description +----+-------+------------- +(0 rows) + +-- Load a file +CREATE TABLE lotest_stash_values (loid oid, fd integer); +-- lo_creat(mode integer) returns oid +-- The mode arg to lo_creat is unused, some vestigal holdover from ancient times +-- returns the large object id +INSERT INTO lotest_stash_values (loid) SELECT lo_creat(42); +-- NOTE: large objects require transactions +BEGIN; +-- lo_open(lobjId oid, mode integer) returns integer +-- The mode parameter to lo_open uses two constants: +-- INV_WRITE = 0x20000 +-- INV_READ = 0x40000 +-- The return value is a file descriptor-like value which remains valid for the +-- transaction. +UPDATE lotest_stash_values SET fd = lo_open(loid, CAST(x'20000' | x'40000' AS integer)); +-- loread/lowrite names are wonky, different from other functions which are lo_* +-- lowrite(fd integer, data bytea) returns integer +-- the integer is the number of bytes written +SELECT lowrite(fd, ' +I wandered lonely as a cloud +That floats on high o''er vales and hills, +When all at once I saw a crowd, +A host, of golden daffodils; +Beside the lake, beneath the trees, +Fluttering and dancing in the breeze. + +Continuous as the stars that shine +And twinkle on the milky way, +They stretched in never-ending line +Along the margin of a bay: +Ten thousand saw I at a glance, +Tossing their heads in sprightly dance. + +The waves beside them danced; but they +Out-did the sparkling waves in glee: +A poet could not but be gay, +In such a jocund company: +I gazed--and gazed--but little thought +What wealth the show to me had brought: + +For oft, when on my couch I lie +In vacant or in pensive mood, +They flash upon that inward eye +Which is the bliss of solitude; +And then my heart with pleasure fills, +And dances with the daffodils. + + -- William Wordsworth +') FROM lotest_stash_values; + lowrite +--------- + 848 +(1 row) + +-- lo_close(fd integer) returns integer +-- return value is 0 for success, or <0 for error (actually only -1, but...) +SELECT lo_close(fd) FROM lotest_stash_values; + lo_close +---------- + 0 +(1 row) + +END; +-- Copy to another large object. +-- Note: we intentionally don't remove the object created here; +-- it's left behind to help test pg_dump. +SELECT lo_from_bytea(0, lo_get(loid)) AS newloid FROM lotest_stash_values +\gset +-- Add a comment to it, as well, for pg_dump/pg_upgrade testing. +COMMENT ON LARGE OBJECT :newloid IS 'I Wandered Lonely as a Cloud'; +-- Read out a portion +BEGIN; +UPDATE lotest_stash_values SET fd=lo_open(loid, CAST(x'20000' | x'40000' AS integer)); +-- lo_lseek(fd integer, offset integer, whence integer) returns integer +-- offset is in bytes, whence is one of three values: +-- SEEK_SET (= 0) meaning relative to beginning +-- SEEK_CUR (= 1) meaning relative to current position +-- SEEK_END (= 2) meaning relative to end (offset better be negative) +-- returns current position in file +SELECT lo_lseek(fd, 104, 0) FROM lotest_stash_values; + lo_lseek +---------- + 104 +(1 row) + +-- loread/lowrite names are wonky, different from other functions which are lo_* +-- loread(fd integer, len integer) returns bytea +SELECT loread(fd, 28) FROM lotest_stash_values; + loread +------------------------------ + A host, of golden daffodils; +(1 row) + +SELECT lo_lseek(fd, -19, 1) FROM lotest_stash_values; + lo_lseek +---------- + 113 +(1 row) + +SELECT lowrite(fd, 'n') FROM lotest_stash_values; + lowrite +--------- + 1 +(1 row) + +SELECT lo_tell(fd) FROM lotest_stash_values; + lo_tell +--------- + 114 +(1 row) + +SELECT lo_lseek(fd, -744, 2) FROM lotest_stash_values; + lo_lseek +---------- + 104 +(1 row) + +SELECT loread(fd, 28) FROM lotest_stash_values; + loread +------------------------------ + A host, on golden daffodils; +(1 row) + +SELECT lo_close(fd) FROM lotest_stash_values; + lo_close +---------- + 0 +(1 row) + +END; +-- Test resource management +BEGIN; +SELECT lo_open(loid, x'40000'::int) from lotest_stash_values; + lo_open +--------- + 0 +(1 row) + +ABORT; +\set filename :abs_builddir '/results/invalid/path' +\set dobody 'DECLARE loid oid; BEGIN ' +\set dobody :dobody 'SELECT tbl.loid INTO loid FROM lotest_stash_values tbl; ' +\set dobody :dobody 'PERFORM lo_export(loid, ' :'filename' '); ' +\set dobody :dobody 'EXCEPTION WHEN UNDEFINED_FILE THEN ' +\set dobody :dobody 'RAISE NOTICE ''could not open file, as expected''; END' +DO :'dobody'; +NOTICE: could not open file, as expected +-- Test truncation. +BEGIN; +UPDATE lotest_stash_values SET fd=lo_open(loid, CAST(x'20000' | x'40000' AS integer)); +SELECT lo_truncate(fd, 11) FROM lotest_stash_values; + lo_truncate +------------- + 0 +(1 row) + +SELECT loread(fd, 15) FROM lotest_stash_values; + loread +---------------- + \012I wandered +(1 row) + +SELECT lo_truncate(fd, 10000) FROM lotest_stash_values; + lo_truncate +------------- + 0 +(1 row) + +SELECT loread(fd, 10) FROM lotest_stash_values; + loread +------------------------------------------ + \000\000\000\000\000\000\000\000\000\000 +(1 row) + +SELECT lo_lseek(fd, 0, 2) FROM lotest_stash_values; + lo_lseek +---------- + 10000 +(1 row) + +SELECT lo_tell(fd) FROM lotest_stash_values; + lo_tell +--------- + 10000 +(1 row) + +SELECT lo_truncate(fd, 5000) FROM lotest_stash_values; + lo_truncate +------------- + 0 +(1 row) + +SELECT lo_lseek(fd, 0, 2) FROM lotest_stash_values; + lo_lseek +---------- + 5000 +(1 row) + +SELECT lo_tell(fd) FROM lotest_stash_values; + lo_tell +--------- + 5000 +(1 row) + +SELECT lo_close(fd) FROM lotest_stash_values; + lo_close +---------- + 0 +(1 row) + +END; +-- Test 64-bit large object functions. +BEGIN; +UPDATE lotest_stash_values SET fd = lo_open(loid, CAST(x'20000' | x'40000' AS integer)); +SELECT lo_lseek64(fd, 4294967296, 0) FROM lotest_stash_values; + lo_lseek64 +------------ + 4294967296 +(1 row) + +SELECT lowrite(fd, 'offset:4GB') FROM lotest_stash_values; + lowrite +--------- + 10 +(1 row) + +SELECT lo_tell64(fd) FROM lotest_stash_values; + lo_tell64 +------------ + 4294967306 +(1 row) + +SELECT lo_lseek64(fd, -10, 1) FROM lotest_stash_values; + lo_lseek64 +------------ + 4294967296 +(1 row) + +SELECT lo_tell64(fd) FROM lotest_stash_values; + lo_tell64 +------------ + 4294967296 +(1 row) + +SELECT loread(fd, 10) FROM lotest_stash_values; + loread +------------ + offset:4GB +(1 row) + +SELECT lo_truncate64(fd, 5000000000) FROM lotest_stash_values; + lo_truncate64 +--------------- + 0 +(1 row) + +SELECT lo_lseek64(fd, 0, 2) FROM lotest_stash_values; + lo_lseek64 +------------ + 5000000000 +(1 row) + +SELECT lo_tell64(fd) FROM lotest_stash_values; + lo_tell64 +------------ + 5000000000 +(1 row) + +SELECT lo_truncate64(fd, 3000000000) FROM lotest_stash_values; + lo_truncate64 +--------------- + 0 +(1 row) + +SELECT lo_lseek64(fd, 0, 2) FROM lotest_stash_values; + lo_lseek64 +------------ + 3000000000 +(1 row) + +SELECT lo_tell64(fd) FROM lotest_stash_values; + lo_tell64 +------------ + 3000000000 +(1 row) + +SELECT lo_close(fd) FROM lotest_stash_values; + lo_close +---------- + 0 +(1 row) + +END; +-- lo_unlink(lobjId oid) returns integer +-- return value appears to always be 1 +SELECT lo_unlink(loid) from lotest_stash_values; + lo_unlink +----------- + 1 +(1 row) + +TRUNCATE lotest_stash_values; +\set filename :abs_srcdir '/data/tenk.data' +INSERT INTO lotest_stash_values (loid) SELECT lo_import(:'filename'); +BEGIN; +UPDATE lotest_stash_values SET fd=lo_open(loid, CAST(x'20000' | x'40000' AS integer)); +-- verify length of large object +SELECT lo_lseek(fd, 0, 2) FROM lotest_stash_values; + lo_lseek +---------- + 680800 +(1 row) + +-- with the default BLCKSZ, LOBLKSIZE = 2048, so this positions us for a block +-- edge case +SELECT lo_lseek(fd, 2030, 0) FROM lotest_stash_values; + lo_lseek +---------- + 2030 +(1 row) + +-- this should get half of the value from page 0 and half from page 1 of the +-- large object +SELECT loread(fd, 36) FROM lotest_stash_values; + loread +-------------------------------------------------------------- + 44\011144\0111144\0114144\0119144\01188\01189\011SNAAAA\011F +(1 row) + +SELECT lo_tell(fd) FROM lotest_stash_values; + lo_tell +--------- + 2066 +(1 row) + +SELECT lo_lseek(fd, -26, 1) FROM lotest_stash_values; + lo_lseek +---------- + 2040 +(1 row) + +SELECT lowrite(fd, 'abcdefghijklmnop') FROM lotest_stash_values; + lowrite +--------- + 16 +(1 row) + +SELECT lo_lseek(fd, 2030, 0) FROM lotest_stash_values; + lo_lseek +---------- + 2030 +(1 row) + +SELECT loread(fd, 36) FROM lotest_stash_values; + loread +-------------------------------------------------- + 44\011144\011114abcdefghijklmnop9\011SNAAAA\011F +(1 row) + +SELECT lo_close(fd) FROM lotest_stash_values; + lo_close +---------- + 0 +(1 row) + +END; +\set filename :abs_builddir '/results/lotest.txt' +SELECT lo_export(loid, :'filename') FROM lotest_stash_values; + lo_export +----------- + 1 +(1 row) + +\lo_import :filename +\set newloid :LASTOID +-- just make sure \lo_export does not barf +\set filename :abs_builddir '/results/lotest2.txt' +\lo_export :newloid :filename +-- This is a hack to test that export/import are reversible +-- This uses knowledge about the inner workings of large object mechanism +-- which should not be used outside it. This makes it a HACK +SELECT pageno, data FROM pg_largeobject WHERE loid = (SELECT loid from lotest_stash_values) +EXCEPT +SELECT pageno, data FROM pg_largeobject WHERE loid = :newloid; + pageno | data +--------+------ +(0 rows) + +SELECT lo_unlink(loid) FROM lotest_stash_values; + lo_unlink +----------- + 1 +(1 row) + +TRUNCATE lotest_stash_values; +\lo_unlink :newloid +\set filename :abs_builddir '/results/lotest.txt' +\lo_import :filename +\set newloid_1 :LASTOID +SELECT lo_from_bytea(0, lo_get(:newloid_1)) AS newloid_2 +\gset +SELECT fipshash(lo_get(:newloid_1)) = fipshash(lo_get(:newloid_2)); + ?column? +---------- + t +(1 row) + +SELECT lo_get(:newloid_1, 0, 20); + lo_get +------------------------------------------- + 8800\0110\0110\0110\0110\0110\0110\011800 +(1 row) + +SELECT lo_get(:newloid_1, 10, 20); + lo_get +------------------------------------------- + \0110\0110\0110\011800\011800\0113800\011 +(1 row) + +SELECT lo_put(:newloid_1, 5, decode('afafafaf', 'hex')); + lo_put +-------- + +(1 row) + +SELECT lo_get(:newloid_1, 0, 20); + lo_get +------------------------------------------------- + 8800\011\257\257\257\2570\0110\0110\0110\011800 +(1 row) + +SELECT lo_put(:newloid_1, 4294967310, 'foo'); + lo_put +-------- + +(1 row) + +SELECT lo_get(:newloid_1); +ERROR: large object read request is too large +SELECT lo_get(:newloid_1, 4294967294, 100); + lo_get +--------------------------------------------------------------------- + \000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000foo +(1 row) + +\lo_unlink :newloid_1 +\lo_unlink :newloid_2 +-- This object is left in the database for pg_dump test purposes +SELECT lo_from_bytea(0, E'\\xdeadbeef') AS newloid +\gset +SET bytea_output TO hex; +SELECT lo_get(:newloid); + lo_get +------------ + \xdeadbeef +(1 row) + +-- Create one more object that we leave behind for testing pg_dump/pg_upgrade; +-- this one intentionally has an OID in the system range +SELECT lo_create(2121); + lo_create +----------- + 2121 +(1 row) + +COMMENT ON LARGE OBJECT 2121 IS 'testing comments'; +-- Test writes on large objects in read-only transactions +START TRANSACTION READ ONLY; +-- INV_READ ... ok +SELECT lo_open(2121, x'40000'::int); + lo_open +--------- + 0 +(1 row) + +-- INV_WRITE ... error +SELECT lo_open(2121, x'20000'::int); +ERROR: cannot execute lo_open(INV_WRITE) in a read-only transaction +ROLLBACK; +START TRANSACTION READ ONLY; +SELECT lo_create(42); +ERROR: cannot execute lo_create() in a read-only transaction +ROLLBACK; +START TRANSACTION READ ONLY; +SELECT lo_creat(42); +ERROR: cannot execute lo_creat() in a read-only transaction +ROLLBACK; +START TRANSACTION READ ONLY; +SELECT lo_unlink(42); +ERROR: cannot execute lo_unlink() in a read-only transaction +ROLLBACK; +START TRANSACTION READ ONLY; +SELECT lowrite(42, 'x'); +ERROR: cannot execute lowrite() in a read-only transaction +ROLLBACK; +START TRANSACTION READ ONLY; +SELECT lo_import(:'filename'); +ERROR: cannot execute lo_import() in a read-only transaction +ROLLBACK; +START TRANSACTION READ ONLY; +SELECT lo_truncate(42, 0); +ERROR: cannot execute lo_truncate() in a read-only transaction +ROLLBACK; +START TRANSACTION READ ONLY; +SELECT lo_truncate64(42, 0); +ERROR: cannot execute lo_truncate64() in a read-only transaction +ROLLBACK; +START TRANSACTION READ ONLY; +SELECT lo_from_bytea(0, 'x'); +ERROR: cannot execute lo_from_bytea() in a read-only transaction +ROLLBACK; +START TRANSACTION READ ONLY; +SELECT lo_put(42, 0, 'x'); +ERROR: cannot execute lo_put() in a read-only transaction +ROLLBACK; +-- Clean up +DROP TABLE lotest_stash_values; +DROP ROLE regress_lo_user; diff --git a/src/test/regress/expected/limit.out b/src/test/regress/expected/limit.out new file mode 100644 index 0000000..a2cd0f9 --- /dev/null +++ b/src/test/regress/expected/limit.out @@ -0,0 +1,694 @@ +-- +-- LIMIT +-- Check the LIMIT/OFFSET feature of SELECT +-- +SELECT ''::text AS two, unique1, unique2, stringu1 + FROM onek WHERE unique1 > 50 + ORDER BY unique1 LIMIT 2; + two | unique1 | unique2 | stringu1 +-----+---------+---------+---------- + | 51 | 76 | ZBAAAA + | 52 | 985 | ACAAAA +(2 rows) + +SELECT ''::text AS five, unique1, unique2, stringu1 + FROM onek WHERE unique1 > 60 + ORDER BY unique1 LIMIT 5; + five | unique1 | unique2 | stringu1 +------+---------+---------+---------- + | 61 | 560 | JCAAAA + | 62 | 633 | KCAAAA + | 63 | 296 | LCAAAA + | 64 | 479 | MCAAAA + | 65 | 64 | NCAAAA +(5 rows) + +SELECT ''::text AS two, unique1, unique2, stringu1 + FROM onek WHERE unique1 > 60 AND unique1 < 63 + ORDER BY unique1 LIMIT 5; + two | unique1 | unique2 | stringu1 +-----+---------+---------+---------- + | 61 | 560 | JCAAAA + | 62 | 633 | KCAAAA +(2 rows) + +SELECT ''::text AS three, unique1, unique2, stringu1 + FROM onek WHERE unique1 > 100 + ORDER BY unique1 LIMIT 3 OFFSET 20; + three | unique1 | unique2 | stringu1 +-------+---------+---------+---------- + | 121 | 700 | REAAAA + | 122 | 519 | SEAAAA + | 123 | 777 | TEAAAA +(3 rows) + +SELECT ''::text AS zero, unique1, unique2, stringu1 + FROM onek WHERE unique1 < 50 + ORDER BY unique1 DESC LIMIT 8 OFFSET 99; + zero | unique1 | unique2 | stringu1 +------+---------+---------+---------- +(0 rows) + +SELECT ''::text AS eleven, unique1, unique2, stringu1 + FROM onek WHERE unique1 < 50 + ORDER BY unique1 DESC LIMIT 20 OFFSET 39; + eleven | unique1 | unique2 | stringu1 +--------+---------+---------+---------- + | 10 | 520 | KAAAAA + | 9 | 49 | JAAAAA + | 8 | 653 | IAAAAA + | 7 | 647 | HAAAAA + | 6 | 978 | GAAAAA + | 5 | 541 | FAAAAA + | 4 | 833 | EAAAAA + | 3 | 431 | DAAAAA + | 2 | 326 | CAAAAA + | 1 | 214 | BAAAAA + | 0 | 998 | AAAAAA +(11 rows) + +SELECT ''::text AS ten, unique1, unique2, stringu1 + FROM onek + ORDER BY unique1 OFFSET 990; + ten | unique1 | unique2 | stringu1 +-----+---------+---------+---------- + | 990 | 369 | CMAAAA + | 991 | 426 | DMAAAA + | 992 | 363 | EMAAAA + | 993 | 661 | FMAAAA + | 994 | 695 | GMAAAA + | 995 | 144 | HMAAAA + | 996 | 258 | IMAAAA + | 997 | 21 | JMAAAA + | 998 | 549 | KMAAAA + | 999 | 152 | LMAAAA +(10 rows) + +SELECT ''::text AS five, unique1, unique2, stringu1 + FROM onek + ORDER BY unique1 OFFSET 990 LIMIT 5; + five | unique1 | unique2 | stringu1 +------+---------+---------+---------- + | 990 | 369 | CMAAAA + | 991 | 426 | DMAAAA + | 992 | 363 | EMAAAA + | 993 | 661 | FMAAAA + | 994 | 695 | GMAAAA +(5 rows) + +SELECT ''::text AS five, unique1, unique2, stringu1 + FROM onek + ORDER BY unique1 LIMIT 5 OFFSET 900; + five | unique1 | unique2 | stringu1 +------+---------+---------+---------- + | 900 | 913 | QIAAAA + | 901 | 931 | RIAAAA + | 902 | 702 | SIAAAA + | 903 | 641 | TIAAAA + | 904 | 793 | UIAAAA +(5 rows) + +-- Test null limit and offset. The planner would discard a simple null +-- constant, so to ensure executor is exercised, do this: +select * from int8_tbl limit (case when random() < 0.5 then null::bigint end); + q1 | q2 +------------------+------------------- + 123 | 456 + 123 | 4567890123456789 + 4567890123456789 | 123 + 4567890123456789 | 4567890123456789 + 4567890123456789 | -4567890123456789 +(5 rows) + +select * from int8_tbl offset (case when random() < 0.5 then null::bigint end); + q1 | q2 +------------------+------------------- + 123 | 456 + 123 | 4567890123456789 + 4567890123456789 | 123 + 4567890123456789 | 4567890123456789 + 4567890123456789 | -4567890123456789 +(5 rows) + +-- Test assorted cases involving backwards fetch from a LIMIT plan node +begin; +declare c1 cursor for select * from int8_tbl limit 10; +fetch all in c1; + q1 | q2 +------------------+------------------- + 123 | 456 + 123 | 4567890123456789 + 4567890123456789 | 123 + 4567890123456789 | 4567890123456789 + 4567890123456789 | -4567890123456789 +(5 rows) + +fetch 1 in c1; + q1 | q2 +----+---- +(0 rows) + +fetch backward 1 in c1; + q1 | q2 +------------------+------------------- + 4567890123456789 | -4567890123456789 +(1 row) + +fetch backward all in c1; + q1 | q2 +------------------+------------------ + 4567890123456789 | 4567890123456789 + 4567890123456789 | 123 + 123 | 4567890123456789 + 123 | 456 +(4 rows) + +fetch backward 1 in c1; + q1 | q2 +----+---- +(0 rows) + +fetch all in c1; + q1 | q2 +------------------+------------------- + 123 | 456 + 123 | 4567890123456789 + 4567890123456789 | 123 + 4567890123456789 | 4567890123456789 + 4567890123456789 | -4567890123456789 +(5 rows) + +declare c2 cursor for select * from int8_tbl limit 3; +fetch all in c2; + q1 | q2 +------------------+------------------ + 123 | 456 + 123 | 4567890123456789 + 4567890123456789 | 123 +(3 rows) + +fetch 1 in c2; + q1 | q2 +----+---- +(0 rows) + +fetch backward 1 in c2; + q1 | q2 +------------------+----- + 4567890123456789 | 123 +(1 row) + +fetch backward all in c2; + q1 | q2 +-----+------------------ + 123 | 4567890123456789 + 123 | 456 +(2 rows) + +fetch backward 1 in c2; + q1 | q2 +----+---- +(0 rows) + +fetch all in c2; + q1 | q2 +------------------+------------------ + 123 | 456 + 123 | 4567890123456789 + 4567890123456789 | 123 +(3 rows) + +declare c3 cursor for select * from int8_tbl offset 3; +fetch all in c3; + q1 | q2 +------------------+------------------- + 4567890123456789 | 4567890123456789 + 4567890123456789 | -4567890123456789 +(2 rows) + +fetch 1 in c3; + q1 | q2 +----+---- +(0 rows) + +fetch backward 1 in c3; + q1 | q2 +------------------+------------------- + 4567890123456789 | -4567890123456789 +(1 row) + +fetch backward all in c3; + q1 | q2 +------------------+------------------ + 4567890123456789 | 4567890123456789 +(1 row) + +fetch backward 1 in c3; + q1 | q2 +----+---- +(0 rows) + +fetch all in c3; + q1 | q2 +------------------+------------------- + 4567890123456789 | 4567890123456789 + 4567890123456789 | -4567890123456789 +(2 rows) + +declare c4 cursor for select * from int8_tbl offset 10; +fetch all in c4; + q1 | q2 +----+---- +(0 rows) + +fetch 1 in c4; + q1 | q2 +----+---- +(0 rows) + +fetch backward 1 in c4; + q1 | q2 +----+---- +(0 rows) + +fetch backward all in c4; + q1 | q2 +----+---- +(0 rows) + +fetch backward 1 in c4; + q1 | q2 +----+---- +(0 rows) + +fetch all in c4; + q1 | q2 +----+---- +(0 rows) + +declare c5 cursor for select * from int8_tbl order by q1 fetch first 2 rows with ties; +fetch all in c5; + q1 | q2 +-----+------------------ + 123 | 456 + 123 | 4567890123456789 +(2 rows) + +fetch 1 in c5; + q1 | q2 +----+---- +(0 rows) + +fetch backward 1 in c5; + q1 | q2 +-----+------------------ + 123 | 4567890123456789 +(1 row) + +fetch backward 1 in c5; + q1 | q2 +-----+----- + 123 | 456 +(1 row) + +fetch all in c5; + q1 | q2 +-----+------------------ + 123 | 4567890123456789 +(1 row) + +fetch backward all in c5; + q1 | q2 +-----+------------------ + 123 | 4567890123456789 + 123 | 456 +(2 rows) + +fetch all in c5; + q1 | q2 +-----+------------------ + 123 | 456 + 123 | 4567890123456789 +(2 rows) + +fetch backward all in c5; + q1 | q2 +-----+------------------ + 123 | 4567890123456789 + 123 | 456 +(2 rows) + +rollback; +-- Stress test for variable LIMIT in conjunction with bounded-heap sorting +SELECT + (SELECT n + FROM (VALUES (1)) AS x, + (SELECT n FROM generate_series(1,10) AS n + ORDER BY n LIMIT 1 OFFSET s-1) AS y) AS z + FROM generate_series(1,10) AS s; + z +---- + 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 + 10 +(10 rows) + +-- +-- Test behavior of volatile and set-returning functions in conjunction +-- with ORDER BY and LIMIT. +-- +create temp sequence testseq; +explain (verbose, costs off) +select unique1, unique2, nextval('testseq') + from tenk1 order by unique2 limit 10; + QUERY PLAN +---------------------------------------------------------------- + Limit + Output: unique1, unique2, (nextval('testseq'::regclass)) + -> Index Scan using tenk1_unique2 on public.tenk1 + Output: unique1, unique2, nextval('testseq'::regclass) +(4 rows) + +select unique1, unique2, nextval('testseq') + from tenk1 order by unique2 limit 10; + unique1 | unique2 | nextval +---------+---------+--------- + 8800 | 0 | 1 + 1891 | 1 | 2 + 3420 | 2 | 3 + 9850 | 3 | 4 + 7164 | 4 | 5 + 8009 | 5 | 6 + 5057 | 6 | 7 + 6701 | 7 | 8 + 4321 | 8 | 9 + 3043 | 9 | 10 +(10 rows) + +select currval('testseq'); + currval +--------- + 10 +(1 row) + +explain (verbose, costs off) +select unique1, unique2, nextval('testseq') + from tenk1 order by tenthous limit 10; + QUERY PLAN +-------------------------------------------------------------------------- + Limit + Output: unique1, unique2, (nextval('testseq'::regclass)), tenthous + -> Result + Output: unique1, unique2, nextval('testseq'::regclass), tenthous + -> Sort + Output: unique1, unique2, tenthous + Sort Key: tenk1.tenthous + -> Seq Scan on public.tenk1 + Output: unique1, unique2, tenthous +(9 rows) + +select unique1, unique2, nextval('testseq') + from tenk1 order by tenthous limit 10; + unique1 | unique2 | nextval +---------+---------+--------- + 0 | 9998 | 11 + 1 | 2838 | 12 + 2 | 2716 | 13 + 3 | 5679 | 14 + 4 | 1621 | 15 + 5 | 5557 | 16 + 6 | 2855 | 17 + 7 | 8518 | 18 + 8 | 5435 | 19 + 9 | 4463 | 20 +(10 rows) + +select currval('testseq'); + currval +--------- + 20 +(1 row) + +explain (verbose, costs off) +select unique1, unique2, generate_series(1,10) + from tenk1 order by unique2 limit 7; + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------------------------------------------- + Limit + Output: unique1, unique2, (generate_series(1, 10)) + -> ProjectSet + Output: unique1, unique2, generate_series(1, 10) + -> Index Scan using tenk1_unique2 on public.tenk1 + Output: unique1, unique2, two, four, ten, twenty, hundred, thousand, twothousand, fivethous, tenthous, odd, even, stringu1, stringu2, string4 +(6 rows) + +select unique1, unique2, generate_series(1,10) + from tenk1 order by unique2 limit 7; + unique1 | unique2 | generate_series +---------+---------+----------------- + 8800 | 0 | 1 + 8800 | 0 | 2 + 8800 | 0 | 3 + 8800 | 0 | 4 + 8800 | 0 | 5 + 8800 | 0 | 6 + 8800 | 0 | 7 +(7 rows) + +explain (verbose, costs off) +select unique1, unique2, generate_series(1,10) + from tenk1 order by tenthous limit 7; + QUERY PLAN +-------------------------------------------------------------------- + Limit + Output: unique1, unique2, (generate_series(1, 10)), tenthous + -> ProjectSet + Output: unique1, unique2, generate_series(1, 10), tenthous + -> Sort + Output: unique1, unique2, tenthous + Sort Key: tenk1.tenthous + -> Seq Scan on public.tenk1 + Output: unique1, unique2, tenthous +(9 rows) + +select unique1, unique2, generate_series(1,10) + from tenk1 order by tenthous limit 7; + unique1 | unique2 | generate_series +---------+---------+----------------- + 0 | 9998 | 1 + 0 | 9998 | 2 + 0 | 9998 | 3 + 0 | 9998 | 4 + 0 | 9998 | 5 + 0 | 9998 | 6 + 0 | 9998 | 7 +(7 rows) + +-- use of random() is to keep planner from folding the expressions together +explain (verbose, costs off) +select generate_series(0,2) as s1, generate_series((random()*.1)::int,2) as s2; + QUERY PLAN +------------------------------------------------------------------------------------------------------ + ProjectSet + Output: generate_series(0, 2), generate_series(((random() * '0.1'::double precision))::integer, 2) + -> Result +(3 rows) + +select generate_series(0,2) as s1, generate_series((random()*.1)::int,2) as s2; + s1 | s2 +----+---- + 0 | 0 + 1 | 1 + 2 | 2 +(3 rows) + +explain (verbose, costs off) +select generate_series(0,2) as s1, generate_series((random()*.1)::int,2) as s2 +order by s2 desc; + QUERY PLAN +------------------------------------------------------------------------------------------------------------ + Sort + Output: (generate_series(0, 2)), (generate_series(((random() * '0.1'::double precision))::integer, 2)) + Sort Key: (generate_series(((random() * '0.1'::double precision))::integer, 2)) DESC + -> ProjectSet + Output: generate_series(0, 2), generate_series(((random() * '0.1'::double precision))::integer, 2) + -> Result +(6 rows) + +select generate_series(0,2) as s1, generate_series((random()*.1)::int,2) as s2 +order by s2 desc; + s1 | s2 +----+---- + 2 | 2 + 1 | 1 + 0 | 0 +(3 rows) + +-- test for failure to set all aggregates' aggtranstype +explain (verbose, costs off) +select sum(tenthous) as s1, sum(tenthous) + random()*0 as s2 + from tenk1 group by thousand order by thousand limit 3; + QUERY PLAN +------------------------------------------------------------------------------------------------------------------- + Limit + Output: (sum(tenthous)), (((sum(tenthous))::double precision + (random() * '0'::double precision))), thousand + -> GroupAggregate + Output: sum(tenthous), ((sum(tenthous))::double precision + (random() * '0'::double precision)), thousand + Group Key: tenk1.thousand + -> Index Only Scan using tenk1_thous_tenthous on public.tenk1 + Output: thousand, tenthous +(7 rows) + +select sum(tenthous) as s1, sum(tenthous) + random()*0 as s2 + from tenk1 group by thousand order by thousand limit 3; + s1 | s2 +-------+------- + 45000 | 45000 + 45010 | 45010 + 45020 | 45020 +(3 rows) + +-- +-- FETCH FIRST +-- Check the WITH TIES clause +-- +SELECT thousand + FROM onek WHERE thousand < 5 + ORDER BY thousand FETCH FIRST 2 ROW WITH TIES; + thousand +---------- + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 +(10 rows) + +SELECT thousand + FROM onek WHERE thousand < 5 + ORDER BY thousand FETCH FIRST ROWS WITH TIES; + thousand +---------- + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 +(10 rows) + +SELECT thousand + FROM onek WHERE thousand < 5 + ORDER BY thousand FETCH FIRST 1 ROW WITH TIES; + thousand +---------- + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 +(10 rows) + +SELECT thousand + FROM onek WHERE thousand < 5 + ORDER BY thousand FETCH FIRST 2 ROW ONLY; + thousand +---------- + 0 + 0 +(2 rows) + +-- SKIP LOCKED and WITH TIES are incompatible +SELECT thousand + FROM onek WHERE thousand < 5 + ORDER BY thousand FETCH FIRST 1 ROW WITH TIES FOR UPDATE SKIP LOCKED; +ERROR: SKIP LOCKED and WITH TIES options cannot be used together +-- should fail +SELECT ''::text AS two, unique1, unique2, stringu1 + FROM onek WHERE unique1 > 50 + FETCH FIRST 2 ROW WITH TIES; +ERROR: WITH TIES cannot be specified without ORDER BY clause +-- test ruleutils +CREATE VIEW limit_thousand_v_1 AS SELECT thousand FROM onek WHERE thousand < 995 + ORDER BY thousand FETCH FIRST 5 ROWS WITH TIES OFFSET 10; +\d+ limit_thousand_v_1 + View "public.limit_thousand_v_1" + Column | Type | Collation | Nullable | Default | Storage | Description +----------+---------+-----------+----------+---------+---------+------------- + thousand | integer | | | | plain | +View definition: + SELECT thousand + FROM onek + WHERE thousand < 995 + ORDER BY thousand + OFFSET 10 + FETCH FIRST 5 ROWS WITH TIES; + +CREATE VIEW limit_thousand_v_2 AS SELECT thousand FROM onek WHERE thousand < 995 + ORDER BY thousand OFFSET 10 FETCH FIRST 5 ROWS ONLY; +\d+ limit_thousand_v_2 + View "public.limit_thousand_v_2" + Column | Type | Collation | Nullable | Default | Storage | Description +----------+---------+-----------+----------+---------+---------+------------- + thousand | integer | | | | plain | +View definition: + SELECT thousand + FROM onek + WHERE thousand < 995 + ORDER BY thousand + OFFSET 10 + LIMIT 5; + +CREATE VIEW limit_thousand_v_3 AS SELECT thousand FROM onek WHERE thousand < 995 + ORDER BY thousand FETCH FIRST NULL ROWS WITH TIES; -- fails +ERROR: row count cannot be null in FETCH FIRST ... WITH TIES clause +CREATE VIEW limit_thousand_v_3 AS SELECT thousand FROM onek WHERE thousand < 995 + ORDER BY thousand FETCH FIRST (NULL+1) ROWS WITH TIES; +\d+ limit_thousand_v_3 + View "public.limit_thousand_v_3" + Column | Type | Collation | Nullable | Default | Storage | Description +----------+---------+-----------+----------+---------+---------+------------- + thousand | integer | | | | plain | +View definition: + SELECT thousand + FROM onek + WHERE thousand < 995 + ORDER BY thousand + FETCH FIRST (NULL::integer + 1) ROWS WITH TIES; + +CREATE VIEW limit_thousand_v_4 AS SELECT thousand FROM onek WHERE thousand < 995 + ORDER BY thousand FETCH FIRST NULL ROWS ONLY; +\d+ limit_thousand_v_4 + View "public.limit_thousand_v_4" + Column | Type | Collation | Nullable | Default | Storage | Description +----------+---------+-----------+----------+---------+---------+------------- + thousand | integer | | | | plain | +View definition: + SELECT thousand + FROM onek + WHERE thousand < 995 + ORDER BY thousand + LIMIT ALL; + +-- leave these views diff --git a/src/test/regress/expected/line.out b/src/test/regress/expected/line.out new file mode 100644 index 0000000..e7d4332 --- /dev/null +++ b/src/test/regress/expected/line.out @@ -0,0 +1,148 @@ +-- +-- LINE +-- Infinite lines +-- +--DROP TABLE LINE_TBL; +CREATE TABLE LINE_TBL (s line); +INSERT INTO LINE_TBL VALUES ('{0,-1,5}'); -- A == 0 +INSERT INTO LINE_TBL VALUES ('{1,0,5}'); -- B == 0 +INSERT INTO LINE_TBL VALUES ('{0,3,0}'); -- A == C == 0 +INSERT INTO LINE_TBL VALUES (' (0,0), (6,6)'); +INSERT INTO LINE_TBL VALUES ('10,-10 ,-5,-4'); +INSERT INTO LINE_TBL VALUES ('[-1e6,2e2,3e5, -4e1]'); +INSERT INTO LINE_TBL VALUES ('{3,NaN,5}'); +INSERT INTO LINE_TBL VALUES ('{NaN,NaN,NaN}'); +-- horizontal +INSERT INTO LINE_TBL VALUES ('[(1,3),(2,3)]'); +-- vertical +INSERT INTO LINE_TBL VALUES (line(point '(3,1)', point '(3,2)')); +-- bad values for parser testing +INSERT INTO LINE_TBL VALUES ('{}'); +ERROR: invalid input syntax for type line: "{}" +LINE 1: INSERT INTO LINE_TBL VALUES ('{}'); + ^ +INSERT INTO LINE_TBL VALUES ('{0'); +ERROR: invalid input syntax for type line: "{0" +LINE 1: INSERT INTO LINE_TBL VALUES ('{0'); + ^ +INSERT INTO LINE_TBL VALUES ('{0,0}'); +ERROR: invalid input syntax for type line: "{0,0}" +LINE 1: INSERT INTO LINE_TBL VALUES ('{0,0}'); + ^ +INSERT INTO LINE_TBL VALUES ('{0,0,1'); +ERROR: invalid input syntax for type line: "{0,0,1" +LINE 1: INSERT INTO LINE_TBL VALUES ('{0,0,1'); + ^ +INSERT INTO LINE_TBL VALUES ('{0,0,1}'); +ERROR: invalid line specification: A and B cannot both be zero +LINE 1: INSERT INTO LINE_TBL VALUES ('{0,0,1}'); + ^ +INSERT INTO LINE_TBL VALUES ('{0,0,1} x'); +ERROR: invalid input syntax for type line: "{0,0,1} x" +LINE 1: INSERT INTO LINE_TBL VALUES ('{0,0,1} x'); + ^ +INSERT INTO LINE_TBL VALUES ('(3asdf,2 ,3,4r2)'); +ERROR: invalid input syntax for type line: "(3asdf,2 ,3,4r2)" +LINE 1: INSERT INTO LINE_TBL VALUES ('(3asdf,2 ,3,4r2)'); + ^ +INSERT INTO LINE_TBL VALUES ('[1,2,3, 4'); +ERROR: invalid input syntax for type line: "[1,2,3, 4" +LINE 1: INSERT INTO LINE_TBL VALUES ('[1,2,3, 4'); + ^ +INSERT INTO LINE_TBL VALUES ('[(,2),(3,4)]'); +ERROR: invalid input syntax for type line: "[(,2),(3,4)]" +LINE 1: INSERT INTO LINE_TBL VALUES ('[(,2),(3,4)]'); + ^ +INSERT INTO LINE_TBL VALUES ('[(1,2),(3,4)'); +ERROR: invalid input syntax for type line: "[(1,2),(3,4)" +LINE 1: INSERT INTO LINE_TBL VALUES ('[(1,2),(3,4)'); + ^ +INSERT INTO LINE_TBL VALUES ('[(1,2),(1,2)]'); +ERROR: invalid line specification: must be two distinct points +LINE 1: INSERT INTO LINE_TBL VALUES ('[(1,2),(1,2)]'); + ^ +INSERT INTO LINE_TBL VALUES (line(point '(1,0)', point '(1,0)')); +ERROR: invalid line specification: must be two distinct points +select * from LINE_TBL; + s +------------------------------------------------ + {0,-1,5} + {1,0,5} + {0,3,0} + {1,-1,0} + {-0.4,-1,-6} + {-0.0001846153846153846,-1,15.384615384615387} + {3,NaN,5} + {NaN,NaN,NaN} + {0,-1,3} + {-1,0,3} +(10 rows) + +select '{nan, 1, nan}'::line = '{nan, 1, nan}'::line as true, + '{nan, 1, nan}'::line = '{nan, 2, nan}'::line as false; + true | false +------+------- + t | f +(1 row) + +-- test non-error-throwing API for some core types +SELECT pg_input_is_valid('{1, 1}', 'line'); + pg_input_is_valid +------------------- + f +(1 row) + +SELECT * FROM pg_input_error_info('{1, 1}', 'line'); + message | detail | hint | sql_error_code +----------------------------------------------+--------+------+---------------- + invalid input syntax for type line: "{1, 1}" | | | 22P02 +(1 row) + +SELECT pg_input_is_valid('{0, 0, 0}', 'line'); + pg_input_is_valid +------------------- + f +(1 row) + +SELECT * FROM pg_input_error_info('{0, 0, 0}', 'line'); + message | detail | hint | sql_error_code +---------------------------------------------------------+--------+------+---------------- + invalid line specification: A and B cannot both be zero | | | 22P02 +(1 row) + +SELECT pg_input_is_valid('{1, 1, a}', 'line'); + pg_input_is_valid +------------------- + f +(1 row) + +SELECT * FROM pg_input_error_info('{1, 1, a}', 'line'); + message | detail | hint | sql_error_code +-------------------------------------------------+--------+------+---------------- + invalid input syntax for type line: "{1, 1, a}" | | | 22P02 +(1 row) + +SELECT pg_input_is_valid('{1, 1, 1e400}', 'line'); + pg_input_is_valid +------------------- + f +(1 row) + +SELECT * FROM pg_input_error_info('{1, 1, 1e400}', 'line'); + message | detail | hint | sql_error_code +---------------------------------------------------+--------+------+---------------- + "1e400" is out of range for type double precision | | | 22003 +(1 row) + +SELECT pg_input_is_valid('(1, 1), (1, 1e400)', 'line'); + pg_input_is_valid +------------------- + f +(1 row) + +SELECT * FROM pg_input_error_info('(1, 1), (1, 1e400)', 'line'); + message | detail | hint | sql_error_code +---------------------------------------------------+--------+------+---------------- + "1e400" is out of range for type double precision | | | 22003 +(1 row) + diff --git a/src/test/regress/expected/lock.out b/src/test/regress/expected/lock.out new file mode 100644 index 0000000..ad137d3 --- /dev/null +++ b/src/test/regress/expected/lock.out @@ -0,0 +1,252 @@ +-- +-- Test the LOCK statement +-- +-- directory paths and dlsuffix are passed to us in environment variables +\getenv libdir PG_LIBDIR +\getenv dlsuffix PG_DLSUFFIX +\set regresslib :libdir '/regress' :dlsuffix +-- Setup +CREATE SCHEMA lock_schema1; +SET search_path = lock_schema1; +CREATE TABLE lock_tbl1 (a BIGINT); +CREATE TABLE lock_tbl1a (a BIGINT); +CREATE VIEW lock_view1 AS SELECT * FROM lock_tbl1; +CREATE VIEW lock_view2(a,b) AS SELECT * FROM lock_tbl1, lock_tbl1a; +CREATE VIEW lock_view3 AS SELECT * from lock_view2; +CREATE VIEW lock_view4 AS SELECT (select a from lock_tbl1a limit 1) from lock_tbl1; +CREATE VIEW lock_view5 AS SELECT * from lock_tbl1 where a in (select * from lock_tbl1a); +CREATE VIEW lock_view6 AS SELECT * from (select * from lock_tbl1) sub; +CREATE ROLE regress_rol_lock1; +ALTER ROLE regress_rol_lock1 SET search_path = lock_schema1; +GRANT USAGE ON SCHEMA lock_schema1 TO regress_rol_lock1; +-- Try all valid lock options; also try omitting the optional TABLE keyword. +BEGIN TRANSACTION; +LOCK TABLE lock_tbl1 IN ACCESS SHARE MODE; +LOCK lock_tbl1 IN ROW SHARE MODE; +LOCK TABLE lock_tbl1 IN ROW EXCLUSIVE MODE; +LOCK TABLE lock_tbl1 IN SHARE UPDATE EXCLUSIVE MODE; +LOCK TABLE lock_tbl1 IN SHARE MODE; +LOCK lock_tbl1 IN SHARE ROW EXCLUSIVE MODE; +LOCK TABLE lock_tbl1 IN EXCLUSIVE MODE; +LOCK TABLE lock_tbl1 IN ACCESS EXCLUSIVE MODE; +ROLLBACK; +-- Try using NOWAIT along with valid options. +BEGIN TRANSACTION; +LOCK TABLE lock_tbl1 IN ACCESS SHARE MODE NOWAIT; +LOCK TABLE lock_tbl1 IN ROW SHARE MODE NOWAIT; +LOCK TABLE lock_tbl1 IN ROW EXCLUSIVE MODE NOWAIT; +LOCK TABLE lock_tbl1 IN SHARE UPDATE EXCLUSIVE MODE NOWAIT; +LOCK TABLE lock_tbl1 IN SHARE MODE NOWAIT; +LOCK TABLE lock_tbl1 IN SHARE ROW EXCLUSIVE MODE NOWAIT; +LOCK TABLE lock_tbl1 IN EXCLUSIVE MODE NOWAIT; +LOCK TABLE lock_tbl1 IN ACCESS EXCLUSIVE MODE NOWAIT; +ROLLBACK; +-- Verify that we can lock views. +BEGIN TRANSACTION; +LOCK TABLE lock_view1 IN EXCLUSIVE MODE; +-- lock_view1 and lock_tbl1 are locked. +select relname from pg_locks l, pg_class c + where l.relation = c.oid and relname like '%lock_%' and mode = 'ExclusiveLock' + order by relname; + relname +------------ + lock_tbl1 + lock_view1 +(2 rows) + +ROLLBACK; +BEGIN TRANSACTION; +LOCK TABLE lock_view2 IN EXCLUSIVE MODE; +-- lock_view1, lock_tbl1, and lock_tbl1a are locked. +select relname from pg_locks l, pg_class c + where l.relation = c.oid and relname like '%lock_%' and mode = 'ExclusiveLock' + order by relname; + relname +------------ + lock_tbl1 + lock_tbl1a + lock_view2 +(3 rows) + +ROLLBACK; +BEGIN TRANSACTION; +LOCK TABLE lock_view3 IN EXCLUSIVE MODE; +-- lock_view3, lock_view2, lock_tbl1, and lock_tbl1a are locked recursively. +select relname from pg_locks l, pg_class c + where l.relation = c.oid and relname like '%lock_%' and mode = 'ExclusiveLock' + order by relname; + relname +------------ + lock_tbl1 + lock_tbl1a + lock_view2 + lock_view3 +(4 rows) + +ROLLBACK; +BEGIN TRANSACTION; +LOCK TABLE lock_view4 IN EXCLUSIVE MODE; +-- lock_view4, lock_tbl1, and lock_tbl1a are locked. +select relname from pg_locks l, pg_class c + where l.relation = c.oid and relname like '%lock_%' and mode = 'ExclusiveLock' + order by relname; + relname +------------ + lock_tbl1 + lock_tbl1a + lock_view4 +(3 rows) + +ROLLBACK; +BEGIN TRANSACTION; +LOCK TABLE lock_view5 IN EXCLUSIVE MODE; +-- lock_view5, lock_tbl1, and lock_tbl1a are locked. +select relname from pg_locks l, pg_class c + where l.relation = c.oid and relname like '%lock_%' and mode = 'ExclusiveLock' + order by relname; + relname +------------ + lock_tbl1 + lock_tbl1a + lock_view5 +(3 rows) + +ROLLBACK; +BEGIN TRANSACTION; +LOCK TABLE lock_view6 IN EXCLUSIVE MODE; +-- lock_view6 an lock_tbl1 are locked. +select relname from pg_locks l, pg_class c + where l.relation = c.oid and relname like '%lock_%' and mode = 'ExclusiveLock' + order by relname; + relname +------------ + lock_tbl1 + lock_view6 +(2 rows) + +ROLLBACK; +-- Verify that we cope with infinite recursion in view definitions. +CREATE OR REPLACE VIEW lock_view2 AS SELECT * from lock_view3; +BEGIN TRANSACTION; +LOCK TABLE lock_view2 IN EXCLUSIVE MODE; +ROLLBACK; +CREATE VIEW lock_view7 AS SELECT * from lock_view2; +BEGIN TRANSACTION; +LOCK TABLE lock_view7 IN EXCLUSIVE MODE; +ROLLBACK; +-- Verify that we can lock a table with inheritance children. +CREATE TABLE lock_tbl2 (b BIGINT) INHERITS (lock_tbl1); +CREATE TABLE lock_tbl3 () INHERITS (lock_tbl2); +BEGIN TRANSACTION; +LOCK TABLE lock_tbl1 * IN ACCESS EXCLUSIVE MODE; +ROLLBACK; +-- Child tables are locked without granting explicit permission to do so as +-- long as we have permission to lock the parent. +GRANT UPDATE ON TABLE lock_tbl1 TO regress_rol_lock1; +SET ROLE regress_rol_lock1; +-- fail when child locked directly +BEGIN; +LOCK TABLE lock_tbl2; +ERROR: permission denied for table lock_tbl2 +ROLLBACK; +BEGIN; +LOCK TABLE lock_tbl1 * IN ACCESS EXCLUSIVE MODE; +ROLLBACK; +BEGIN; +LOCK TABLE ONLY lock_tbl1; +ROLLBACK; +RESET ROLE; +REVOKE UPDATE ON TABLE lock_tbl1 FROM regress_rol_lock1; +-- Tables referred to by views are locked without explicit permission to do so +-- as long as we have permission to lock the view itself. +SET ROLE regress_rol_lock1; +-- fail without permissions on the view +BEGIN; +LOCK TABLE lock_view1; +ERROR: permission denied for view lock_view1 +ROLLBACK; +RESET ROLE; +GRANT UPDATE ON TABLE lock_view1 TO regress_rol_lock1; +SET ROLE regress_rol_lock1; +BEGIN; +LOCK TABLE lock_view1 IN ACCESS EXCLUSIVE MODE; +-- lock_view1 and lock_tbl1 (plus children lock_tbl2 and lock_tbl3) are locked. +select relname from pg_locks l, pg_class c + where l.relation = c.oid and relname like '%lock_%' and mode = 'AccessExclusiveLock' + order by relname; + relname +------------ + lock_tbl1 + lock_tbl2 + lock_tbl3 + lock_view1 +(4 rows) + +ROLLBACK; +RESET ROLE; +REVOKE UPDATE ON TABLE lock_view1 FROM regress_rol_lock1; +-- Tables referred to by security invoker views require explicit permission to +-- be locked. +CREATE VIEW lock_view8 WITH (security_invoker) AS SELECT * FROM lock_tbl1; +SET ROLE regress_rol_lock1; +-- fail without permissions on the view +BEGIN; +LOCK TABLE lock_view8; +ERROR: permission denied for view lock_view8 +ROLLBACK; +RESET ROLE; +GRANT UPDATE ON TABLE lock_view8 TO regress_rol_lock1; +SET ROLE regress_rol_lock1; +-- fail without permissions on the table referenced by the view +BEGIN; +LOCK TABLE lock_view8; +ERROR: permission denied for table lock_tbl1 +ROLLBACK; +RESET ROLE; +GRANT UPDATE ON TABLE lock_tbl1 TO regress_rol_lock1; +BEGIN; +LOCK TABLE lock_view8 IN ACCESS EXCLUSIVE MODE; +-- lock_view8 and lock_tbl1 (plus children lock_tbl2 and lock_tbl3) are locked. +select relname from pg_locks l, pg_class c + where l.relation = c.oid and relname like '%lock_%' and mode = 'AccessExclusiveLock' + order by relname; + relname +------------ + lock_tbl1 + lock_tbl2 + lock_tbl3 + lock_view8 +(4 rows) + +ROLLBACK; +RESET ROLE; +REVOKE UPDATE ON TABLE lock_view8 FROM regress_rol_lock1; +-- +-- Clean up +-- +DROP VIEW lock_view8; +DROP VIEW lock_view7; +DROP VIEW lock_view6; +DROP VIEW lock_view5; +DROP VIEW lock_view4; +DROP VIEW lock_view3 CASCADE; +NOTICE: drop cascades to view lock_view2 +DROP VIEW lock_view1; +DROP TABLE lock_tbl3; +DROP TABLE lock_tbl2; +DROP TABLE lock_tbl1; +DROP TABLE lock_tbl1a; +DROP SCHEMA lock_schema1 CASCADE; +DROP ROLE regress_rol_lock1; +-- atomic ops tests +RESET search_path; +CREATE FUNCTION test_atomic_ops() + RETURNS bool + AS :'regresslib' + LANGUAGE C; +SELECT test_atomic_ops(); + test_atomic_ops +----------------- + t +(1 row) + diff --git a/src/test/regress/expected/lseg.out b/src/test/regress/expected/lseg.out new file mode 100644 index 0000000..c0375ac --- /dev/null +++ b/src/test/regress/expected/lseg.out @@ -0,0 +1,57 @@ +-- +-- LSEG +-- Line segments +-- +--DROP TABLE LSEG_TBL; +CREATE TABLE LSEG_TBL (s lseg); +INSERT INTO LSEG_TBL VALUES ('[(1,2),(3,4)]'); +INSERT INTO LSEG_TBL VALUES ('(0,0),(6,6)'); +INSERT INTO LSEG_TBL VALUES ('10,-10 ,-3,-4'); +INSERT INTO LSEG_TBL VALUES ('[-1e6,2e2,3e5, -4e1]'); +INSERT INTO LSEG_TBL VALUES (lseg(point(11, 22), point(33,44))); +INSERT INTO LSEG_TBL VALUES ('[(-10,2),(-10,3)]'); -- vertical +INSERT INTO LSEG_TBL VALUES ('[(0,-20),(30,-20)]'); -- horizontal +INSERT INTO LSEG_TBL VALUES ('[(NaN,1),(NaN,90)]'); -- NaN +-- bad values for parser testing +INSERT INTO LSEG_TBL VALUES ('(3asdf,2 ,3,4r2)'); +ERROR: invalid input syntax for type lseg: "(3asdf,2 ,3,4r2)" +LINE 1: INSERT INTO LSEG_TBL VALUES ('(3asdf,2 ,3,4r2)'); + ^ +INSERT INTO LSEG_TBL VALUES ('[1,2,3, 4'); +ERROR: invalid input syntax for type lseg: "[1,2,3, 4" +LINE 1: INSERT INTO LSEG_TBL VALUES ('[1,2,3, 4'); + ^ +INSERT INTO LSEG_TBL VALUES ('[(,2),(3,4)]'); +ERROR: invalid input syntax for type lseg: "[(,2),(3,4)]" +LINE 1: INSERT INTO LSEG_TBL VALUES ('[(,2),(3,4)]'); + ^ +INSERT INTO LSEG_TBL VALUES ('[(1,2),(3,4)'); +ERROR: invalid input syntax for type lseg: "[(1,2),(3,4)" +LINE 1: INSERT INTO LSEG_TBL VALUES ('[(1,2),(3,4)'); + ^ +select * from LSEG_TBL; + s +------------------------------- + [(1,2),(3,4)] + [(0,0),(6,6)] + [(10,-10),(-3,-4)] + [(-1000000,200),(300000,-40)] + [(11,22),(33,44)] + [(-10,2),(-10,3)] + [(0,-20),(30,-20)] + [(NaN,1),(NaN,90)] +(8 rows) + +-- test non-error-throwing API for some core types +SELECT pg_input_is_valid('[(1,2),(3)]', 'lseg'); + pg_input_is_valid +------------------- + f +(1 row) + +SELECT * FROM pg_input_error_info('[(1,2),(3)]', 'lseg'); + message | detail | hint | sql_error_code +---------------------------------------------------+--------+------+---------------- + invalid input syntax for type lseg: "[(1,2),(3)]" | | | 22P02 +(1 row) + diff --git a/src/test/regress/expected/macaddr.out b/src/test/regress/expected/macaddr.out new file mode 100644 index 0000000..8d5b221 --- /dev/null +++ b/src/test/regress/expected/macaddr.out @@ -0,0 +1,185 @@ +-- +-- macaddr +-- +CREATE TABLE macaddr_data (a int, b macaddr); +INSERT INTO macaddr_data VALUES (1, '08:00:2b:01:02:03'); +INSERT INTO macaddr_data VALUES (2, '08-00-2b-01-02-03'); +INSERT INTO macaddr_data VALUES (3, '08002b:010203'); +INSERT INTO macaddr_data VALUES (4, '08002b-010203'); +INSERT INTO macaddr_data VALUES (5, '0800.2b01.0203'); +INSERT INTO macaddr_data VALUES (6, '0800-2b01-0203'); +INSERT INTO macaddr_data VALUES (7, '08002b010203'); +INSERT INTO macaddr_data VALUES (8, '0800:2b01:0203'); -- invalid +ERROR: invalid input syntax for type macaddr: "0800:2b01:0203" +LINE 1: INSERT INTO macaddr_data VALUES (8, '0800:2b01:0203'); + ^ +INSERT INTO macaddr_data VALUES (9, 'not even close'); -- invalid +ERROR: invalid input syntax for type macaddr: "not even close" +LINE 1: INSERT INTO macaddr_data VALUES (9, 'not even close'); + ^ +INSERT INTO macaddr_data VALUES (10, '08:00:2b:01:02:04'); +INSERT INTO macaddr_data VALUES (11, '08:00:2b:01:02:02'); +INSERT INTO macaddr_data VALUES (12, '08:00:2a:01:02:03'); +INSERT INTO macaddr_data VALUES (13, '08:00:2c:01:02:03'); +INSERT INTO macaddr_data VALUES (14, '08:00:2a:01:02:04'); +SELECT * FROM macaddr_data; + a | b +----+------------------- + 1 | 08:00:2b:01:02:03 + 2 | 08:00:2b:01:02:03 + 3 | 08:00:2b:01:02:03 + 4 | 08:00:2b:01:02:03 + 5 | 08:00:2b:01:02:03 + 6 | 08:00:2b:01:02:03 + 7 | 08:00:2b:01:02:03 + 10 | 08:00:2b:01:02:04 + 11 | 08:00:2b:01:02:02 + 12 | 08:00:2a:01:02:03 + 13 | 08:00:2c:01:02:03 + 14 | 08:00:2a:01:02:04 +(12 rows) + +CREATE INDEX macaddr_data_btree ON macaddr_data USING btree (b); +CREATE INDEX macaddr_data_hash ON macaddr_data USING hash (b); +SELECT a, b, trunc(b) FROM macaddr_data ORDER BY 2, 1; + a | b | trunc +----+-------------------+------------------- + 12 | 08:00:2a:01:02:03 | 08:00:2a:00:00:00 + 14 | 08:00:2a:01:02:04 | 08:00:2a:00:00:00 + 11 | 08:00:2b:01:02:02 | 08:00:2b:00:00:00 + 1 | 08:00:2b:01:02:03 | 08:00:2b:00:00:00 + 2 | 08:00:2b:01:02:03 | 08:00:2b:00:00:00 + 3 | 08:00:2b:01:02:03 | 08:00:2b:00:00:00 + 4 | 08:00:2b:01:02:03 | 08:00:2b:00:00:00 + 5 | 08:00:2b:01:02:03 | 08:00:2b:00:00:00 + 6 | 08:00:2b:01:02:03 | 08:00:2b:00:00:00 + 7 | 08:00:2b:01:02:03 | 08:00:2b:00:00:00 + 10 | 08:00:2b:01:02:04 | 08:00:2b:00:00:00 + 13 | 08:00:2c:01:02:03 | 08:00:2c:00:00:00 +(12 rows) + +SELECT b < '08:00:2b:01:02:04' FROM macaddr_data WHERE a = 1; -- true + ?column? +---------- + t +(1 row) + +SELECT b > '08:00:2b:01:02:04' FROM macaddr_data WHERE a = 1; -- false + ?column? +---------- + f +(1 row) + +SELECT b > '08:00:2b:01:02:03' FROM macaddr_data WHERE a = 1; -- false + ?column? +---------- + f +(1 row) + +SELECT b <= '08:00:2b:01:02:04' FROM macaddr_data WHERE a = 1; -- true + ?column? +---------- + t +(1 row) + +SELECT b >= '08:00:2b:01:02:04' FROM macaddr_data WHERE a = 1; -- false + ?column? +---------- + f +(1 row) + +SELECT b = '08:00:2b:01:02:03' FROM macaddr_data WHERE a = 1; -- true + ?column? +---------- + t +(1 row) + +SELECT b <> '08:00:2b:01:02:04' FROM macaddr_data WHERE a = 1; -- true + ?column? +---------- + t +(1 row) + +SELECT b <> '08:00:2b:01:02:03' FROM macaddr_data WHERE a = 1; -- false + ?column? +---------- + f +(1 row) + +SELECT ~b FROM macaddr_data; + ?column? +------------------- + f7:ff:d4:fe:fd:fc + f7:ff:d4:fe:fd:fc + f7:ff:d4:fe:fd:fc + f7:ff:d4:fe:fd:fc + f7:ff:d4:fe:fd:fc + f7:ff:d4:fe:fd:fc + f7:ff:d4:fe:fd:fc + f7:ff:d4:fe:fd:fb + f7:ff:d4:fe:fd:fd + f7:ff:d5:fe:fd:fc + f7:ff:d3:fe:fd:fc + f7:ff:d5:fe:fd:fb +(12 rows) + +SELECT b & '00:00:00:ff:ff:ff' FROM macaddr_data; + ?column? +------------------- + 00:00:00:01:02:03 + 00:00:00:01:02:03 + 00:00:00:01:02:03 + 00:00:00:01:02:03 + 00:00:00:01:02:03 + 00:00:00:01:02:03 + 00:00:00:01:02:03 + 00:00:00:01:02:04 + 00:00:00:01:02:02 + 00:00:00:01:02:03 + 00:00:00:01:02:03 + 00:00:00:01:02:04 +(12 rows) + +SELECT b | '01:02:03:04:05:06' FROM macaddr_data; + ?column? +------------------- + 09:02:2b:05:07:07 + 09:02:2b:05:07:07 + 09:02:2b:05:07:07 + 09:02:2b:05:07:07 + 09:02:2b:05:07:07 + 09:02:2b:05:07:07 + 09:02:2b:05:07:07 + 09:02:2b:05:07:06 + 09:02:2b:05:07:06 + 09:02:2b:05:07:07 + 09:02:2f:05:07:07 + 09:02:2b:05:07:06 +(12 rows) + +DROP TABLE macaddr_data; +-- test non-error-throwing API for some core types +SELECT pg_input_is_valid('08:00:2b:01:02:ZZ', 'macaddr'); + pg_input_is_valid +------------------- + f +(1 row) + +SELECT * FROM pg_input_error_info('08:00:2b:01:02:ZZ', 'macaddr'); + message | detail | hint | sql_error_code +------------------------------------------------------------+--------+------+---------------- + invalid input syntax for type macaddr: "08:00:2b:01:02:ZZ" | | | 22P02 +(1 row) + +SELECT pg_input_is_valid('08:00:2b:01:02:', 'macaddr'); + pg_input_is_valid +------------------- + f +(1 row) + +SELECT * FROM pg_input_error_info('08:00:2b:01:02:', 'macaddr'); + message | detail | hint | sql_error_code +----------------------------------------------------------+--------+------+---------------- + invalid input syntax for type macaddr: "08:00:2b:01:02:" | | | 22P02 +(1 row) + diff --git a/src/test/regress/expected/macaddr8.out b/src/test/regress/expected/macaddr8.out new file mode 100644 index 0000000..460e850 --- /dev/null +++ b/src/test/regress/expected/macaddr8.out @@ -0,0 +1,379 @@ +-- +-- macaddr8 +-- +-- test various cases of valid and invalid input +-- valid +SELECT '08:00:2b:01:02:03 '::macaddr8; + macaddr8 +------------------------- + 08:00:2b:ff:fe:01:02:03 +(1 row) + +SELECT ' 08:00:2b:01:02:03 '::macaddr8; + macaddr8 +------------------------- + 08:00:2b:ff:fe:01:02:03 +(1 row) + +SELECT ' 08:00:2b:01:02:03'::macaddr8; + macaddr8 +------------------------- + 08:00:2b:ff:fe:01:02:03 +(1 row) + +SELECT '08:00:2b:01:02:03:04:05 '::macaddr8; + macaddr8 +------------------------- + 08:00:2b:01:02:03:04:05 +(1 row) + +SELECT ' 08:00:2b:01:02:03:04:05 '::macaddr8; + macaddr8 +------------------------- + 08:00:2b:01:02:03:04:05 +(1 row) + +SELECT ' 08:00:2b:01:02:03:04:05'::macaddr8; + macaddr8 +------------------------- + 08:00:2b:01:02:03:04:05 +(1 row) + +SELECT '123 08:00:2b:01:02:03'::macaddr8; -- invalid +ERROR: invalid input syntax for type macaddr8: "123 08:00:2b:01:02:03" +LINE 1: SELECT '123 08:00:2b:01:02:03'::macaddr8; + ^ +SELECT '08:00:2b:01:02:03 123'::macaddr8; -- invalid +ERROR: invalid input syntax for type macaddr8: "08:00:2b:01:02:03 123" +LINE 1: SELECT '08:00:2b:01:02:03 123'::macaddr8; + ^ +SELECT '123 08:00:2b:01:02:03:04:05'::macaddr8; -- invalid +ERROR: invalid input syntax for type macaddr8: "123 08:00:2b:01:02:03:04:05" +LINE 1: SELECT '123 08:00:2b:01:02:03:04:05'::macaddr8; + ^ +SELECT '08:00:2b:01:02:03:04:05 123'::macaddr8; -- invalid +ERROR: invalid input syntax for type macaddr8: "08:00:2b:01:02:03:04:05 123" +LINE 1: SELECT '08:00:2b:01:02:03:04:05 123'::macaddr8; + ^ +SELECT '08:00:2b:01:02:03:04:05:06:07'::macaddr8; -- invalid +ERROR: invalid input syntax for type macaddr8: "08:00:2b:01:02:03:04:05:06:07" +LINE 1: SELECT '08:00:2b:01:02:03:04:05:06:07'::macaddr8; + ^ +SELECT '08-00-2b-01-02-03-04-05-06-07'::macaddr8; -- invalid +ERROR: invalid input syntax for type macaddr8: "08-00-2b-01-02-03-04-05-06-07" +LINE 1: SELECT '08-00-2b-01-02-03-04-05-06-07'::macaddr8; + ^ +SELECT '08002b:01020304050607'::macaddr8; -- invalid +ERROR: invalid input syntax for type macaddr8: "08002b:01020304050607" +LINE 1: SELECT '08002b:01020304050607'::macaddr8; + ^ +SELECT '08002b01020304050607'::macaddr8; -- invalid +ERROR: invalid input syntax for type macaddr8: "08002b01020304050607" +LINE 1: SELECT '08002b01020304050607'::macaddr8; + ^ +SELECT '0z002b0102030405'::macaddr8; -- invalid +ERROR: invalid input syntax for type macaddr8: "0z002b0102030405" +LINE 1: SELECT '0z002b0102030405'::macaddr8; + ^ +SELECT '08002b010203xyza'::macaddr8; -- invalid +ERROR: invalid input syntax for type macaddr8: "08002b010203xyza" +LINE 1: SELECT '08002b010203xyza'::macaddr8; + ^ +SELECT '08:00-2b:01:02:03:04:05'::macaddr8; -- invalid +ERROR: invalid input syntax for type macaddr8: "08:00-2b:01:02:03:04:05" +LINE 1: SELECT '08:00-2b:01:02:03:04:05'::macaddr8; + ^ +SELECT '08:00-2b:01:02:03:04:05'::macaddr8; -- invalid +ERROR: invalid input syntax for type macaddr8: "08:00-2b:01:02:03:04:05" +LINE 1: SELECT '08:00-2b:01:02:03:04:05'::macaddr8; + ^ +SELECT '08:00:2b:01.02:03:04:05'::macaddr8; -- invalid +ERROR: invalid input syntax for type macaddr8: "08:00:2b:01.02:03:04:05" +LINE 1: SELECT '08:00:2b:01.02:03:04:05'::macaddr8; + ^ +SELECT '08:00:2b:01.02:03:04:05'::macaddr8; -- invalid +ERROR: invalid input syntax for type macaddr8: "08:00:2b:01.02:03:04:05" +LINE 1: SELECT '08:00:2b:01.02:03:04:05'::macaddr8; + ^ +-- test converting a MAC address to modified EUI-64 for inclusion +-- in an ipv6 address +SELECT macaddr8_set7bit('00:08:2b:01:02:03'::macaddr8); + macaddr8_set7bit +------------------------- + 02:08:2b:ff:fe:01:02:03 +(1 row) + +CREATE TABLE macaddr8_data (a int, b macaddr8); +INSERT INTO macaddr8_data VALUES (1, '08:00:2b:01:02:03'); +INSERT INTO macaddr8_data VALUES (2, '08-00-2b-01-02-03'); +INSERT INTO macaddr8_data VALUES (3, '08002b:010203'); +INSERT INTO macaddr8_data VALUES (4, '08002b-010203'); +INSERT INTO macaddr8_data VALUES (5, '0800.2b01.0203'); +INSERT INTO macaddr8_data VALUES (6, '0800-2b01-0203'); +INSERT INTO macaddr8_data VALUES (7, '08002b010203'); +INSERT INTO macaddr8_data VALUES (8, '0800:2b01:0203'); +INSERT INTO macaddr8_data VALUES (9, 'not even close'); -- invalid +ERROR: invalid input syntax for type macaddr8: "not even close" +LINE 1: INSERT INTO macaddr8_data VALUES (9, 'not even close'); + ^ +INSERT INTO macaddr8_data VALUES (10, '08:00:2b:01:02:04'); +INSERT INTO macaddr8_data VALUES (11, '08:00:2b:01:02:02'); +INSERT INTO macaddr8_data VALUES (12, '08:00:2a:01:02:03'); +INSERT INTO macaddr8_data VALUES (13, '08:00:2c:01:02:03'); +INSERT INTO macaddr8_data VALUES (14, '08:00:2a:01:02:04'); +INSERT INTO macaddr8_data VALUES (15, '08:00:2b:01:02:03:04:05'); +INSERT INTO macaddr8_data VALUES (16, '08-00-2b-01-02-03-04-05'); +INSERT INTO macaddr8_data VALUES (17, '08002b:0102030405'); +INSERT INTO macaddr8_data VALUES (18, '08002b-0102030405'); +INSERT INTO macaddr8_data VALUES (19, '0800.2b01.0203.0405'); +INSERT INTO macaddr8_data VALUES (20, '08002b01:02030405'); +INSERT INTO macaddr8_data VALUES (21, '08002b0102030405'); +SELECT * FROM macaddr8_data ORDER BY 1; + a | b +----+------------------------- + 1 | 08:00:2b:ff:fe:01:02:03 + 2 | 08:00:2b:ff:fe:01:02:03 + 3 | 08:00:2b:ff:fe:01:02:03 + 4 | 08:00:2b:ff:fe:01:02:03 + 5 | 08:00:2b:ff:fe:01:02:03 + 6 | 08:00:2b:ff:fe:01:02:03 + 7 | 08:00:2b:ff:fe:01:02:03 + 8 | 08:00:2b:ff:fe:01:02:03 + 10 | 08:00:2b:ff:fe:01:02:04 + 11 | 08:00:2b:ff:fe:01:02:02 + 12 | 08:00:2a:ff:fe:01:02:03 + 13 | 08:00:2c:ff:fe:01:02:03 + 14 | 08:00:2a:ff:fe:01:02:04 + 15 | 08:00:2b:01:02:03:04:05 + 16 | 08:00:2b:01:02:03:04:05 + 17 | 08:00:2b:01:02:03:04:05 + 18 | 08:00:2b:01:02:03:04:05 + 19 | 08:00:2b:01:02:03:04:05 + 20 | 08:00:2b:01:02:03:04:05 + 21 | 08:00:2b:01:02:03:04:05 +(20 rows) + +CREATE INDEX macaddr8_data_btree ON macaddr8_data USING btree (b); +CREATE INDEX macaddr8_data_hash ON macaddr8_data USING hash (b); +SELECT a, b, trunc(b) FROM macaddr8_data ORDER BY 2, 1; + a | b | trunc +----+-------------------------+------------------------- + 12 | 08:00:2a:ff:fe:01:02:03 | 08:00:2a:00:00:00:00:00 + 14 | 08:00:2a:ff:fe:01:02:04 | 08:00:2a:00:00:00:00:00 + 15 | 08:00:2b:01:02:03:04:05 | 08:00:2b:00:00:00:00:00 + 16 | 08:00:2b:01:02:03:04:05 | 08:00:2b:00:00:00:00:00 + 17 | 08:00:2b:01:02:03:04:05 | 08:00:2b:00:00:00:00:00 + 18 | 08:00:2b:01:02:03:04:05 | 08:00:2b:00:00:00:00:00 + 19 | 08:00:2b:01:02:03:04:05 | 08:00:2b:00:00:00:00:00 + 20 | 08:00:2b:01:02:03:04:05 | 08:00:2b:00:00:00:00:00 + 21 | 08:00:2b:01:02:03:04:05 | 08:00:2b:00:00:00:00:00 + 11 | 08:00:2b:ff:fe:01:02:02 | 08:00:2b:00:00:00:00:00 + 1 | 08:00:2b:ff:fe:01:02:03 | 08:00:2b:00:00:00:00:00 + 2 | 08:00:2b:ff:fe:01:02:03 | 08:00:2b:00:00:00:00:00 + 3 | 08:00:2b:ff:fe:01:02:03 | 08:00:2b:00:00:00:00:00 + 4 | 08:00:2b:ff:fe:01:02:03 | 08:00:2b:00:00:00:00:00 + 5 | 08:00:2b:ff:fe:01:02:03 | 08:00:2b:00:00:00:00:00 + 6 | 08:00:2b:ff:fe:01:02:03 | 08:00:2b:00:00:00:00:00 + 7 | 08:00:2b:ff:fe:01:02:03 | 08:00:2b:00:00:00:00:00 + 8 | 08:00:2b:ff:fe:01:02:03 | 08:00:2b:00:00:00:00:00 + 10 | 08:00:2b:ff:fe:01:02:04 | 08:00:2b:00:00:00:00:00 + 13 | 08:00:2c:ff:fe:01:02:03 | 08:00:2c:00:00:00:00:00 +(20 rows) + +SELECT b < '08:00:2b:01:02:04' FROM macaddr8_data WHERE a = 1; -- true + ?column? +---------- + t +(1 row) + +SELECT b > '08:00:2b:ff:fe:01:02:04' FROM macaddr8_data WHERE a = 1; -- false + ?column? +---------- + f +(1 row) + +SELECT b > '08:00:2b:ff:fe:01:02:03' FROM macaddr8_data WHERE a = 1; -- false + ?column? +---------- + f +(1 row) + +SELECT b::macaddr <= '08:00:2b:01:02:04' FROM macaddr8_data WHERE a = 1; -- true + ?column? +---------- + t +(1 row) + +SELECT b::macaddr >= '08:00:2b:01:02:04' FROM macaddr8_data WHERE a = 1; -- false + ?column? +---------- + f +(1 row) + +SELECT b = '08:00:2b:ff:fe:01:02:03' FROM macaddr8_data WHERE a = 1; -- true + ?column? +---------- + t +(1 row) + +SELECT b::macaddr <> '08:00:2b:01:02:04'::macaddr FROM macaddr8_data WHERE a = 1; -- true + ?column? +---------- + t +(1 row) + +SELECT b::macaddr <> '08:00:2b:01:02:03'::macaddr FROM macaddr8_data WHERE a = 1; -- false + ?column? +---------- + f +(1 row) + +SELECT b < '08:00:2b:01:02:03:04:06' FROM macaddr8_data WHERE a = 15; -- true + ?column? +---------- + t +(1 row) + +SELECT b > '08:00:2b:01:02:03:04:06' FROM macaddr8_data WHERE a = 15; -- false + ?column? +---------- + f +(1 row) + +SELECT b > '08:00:2b:01:02:03:04:05' FROM macaddr8_data WHERE a = 15; -- false + ?column? +---------- + f +(1 row) + +SELECT b <= '08:00:2b:01:02:03:04:06' FROM macaddr8_data WHERE a = 15; -- true + ?column? +---------- + t +(1 row) + +SELECT b >= '08:00:2b:01:02:03:04:06' FROM macaddr8_data WHERE a = 15; -- false + ?column? +---------- + f +(1 row) + +SELECT b = '08:00:2b:01:02:03:04:05' FROM macaddr8_data WHERE a = 15; -- true + ?column? +---------- + t +(1 row) + +SELECT b <> '08:00:2b:01:02:03:04:06' FROM macaddr8_data WHERE a = 15; -- true + ?column? +---------- + t +(1 row) + +SELECT b <> '08:00:2b:01:02:03:04:05' FROM macaddr8_data WHERE a = 15; -- false + ?column? +---------- + f +(1 row) + +SELECT ~b FROM macaddr8_data; + ?column? +------------------------- + f7:ff:d4:00:01:fe:fd:fc + f7:ff:d4:00:01:fe:fd:fc + f7:ff:d4:00:01:fe:fd:fc + f7:ff:d4:00:01:fe:fd:fc + f7:ff:d4:00:01:fe:fd:fc + f7:ff:d4:00:01:fe:fd:fc + f7:ff:d4:00:01:fe:fd:fc + f7:ff:d4:00:01:fe:fd:fc + f7:ff:d4:00:01:fe:fd:fb + f7:ff:d4:00:01:fe:fd:fd + f7:ff:d5:00:01:fe:fd:fc + f7:ff:d3:00:01:fe:fd:fc + f7:ff:d5:00:01:fe:fd:fb + f7:ff:d4:fe:fd:fc:fb:fa + f7:ff:d4:fe:fd:fc:fb:fa + f7:ff:d4:fe:fd:fc:fb:fa + f7:ff:d4:fe:fd:fc:fb:fa + f7:ff:d4:fe:fd:fc:fb:fa + f7:ff:d4:fe:fd:fc:fb:fa + f7:ff:d4:fe:fd:fc:fb:fa +(20 rows) + +SELECT b & '00:00:00:ff:ff:ff' FROM macaddr8_data; + ?column? +------------------------- + 00:00:00:ff:fe:01:02:03 + 00:00:00:ff:fe:01:02:03 + 00:00:00:ff:fe:01:02:03 + 00:00:00:ff:fe:01:02:03 + 00:00:00:ff:fe:01:02:03 + 00:00:00:ff:fe:01:02:03 + 00:00:00:ff:fe:01:02:03 + 00:00:00:ff:fe:01:02:03 + 00:00:00:ff:fe:01:02:04 + 00:00:00:ff:fe:01:02:02 + 00:00:00:ff:fe:01:02:03 + 00:00:00:ff:fe:01:02:03 + 00:00:00:ff:fe:01:02:04 + 00:00:00:01:02:03:04:05 + 00:00:00:01:02:03:04:05 + 00:00:00:01:02:03:04:05 + 00:00:00:01:02:03:04:05 + 00:00:00:01:02:03:04:05 + 00:00:00:01:02:03:04:05 + 00:00:00:01:02:03:04:05 +(20 rows) + +SELECT b | '01:02:03:04:05:06' FROM macaddr8_data; + ?column? +------------------------- + 09:02:2b:ff:fe:05:07:07 + 09:02:2b:ff:fe:05:07:07 + 09:02:2b:ff:fe:05:07:07 + 09:02:2b:ff:fe:05:07:07 + 09:02:2b:ff:fe:05:07:07 + 09:02:2b:ff:fe:05:07:07 + 09:02:2b:ff:fe:05:07:07 + 09:02:2b:ff:fe:05:07:07 + 09:02:2b:ff:fe:05:07:06 + 09:02:2b:ff:fe:05:07:06 + 09:02:2b:ff:fe:05:07:07 + 09:02:2f:ff:fe:05:07:07 + 09:02:2b:ff:fe:05:07:06 + 09:02:2b:ff:fe:07:05:07 + 09:02:2b:ff:fe:07:05:07 + 09:02:2b:ff:fe:07:05:07 + 09:02:2b:ff:fe:07:05:07 + 09:02:2b:ff:fe:07:05:07 + 09:02:2b:ff:fe:07:05:07 + 09:02:2b:ff:fe:07:05:07 +(20 rows) + +DROP TABLE macaddr8_data; +-- test non-error-throwing API for some core types +SELECT pg_input_is_valid('08:00:2b:01:02:03:04:ZZ', 'macaddr8'); + pg_input_is_valid +------------------- + f +(1 row) + +SELECT * FROM pg_input_error_info('08:00:2b:01:02:03:04:ZZ', 'macaddr8'); + message | detail | hint | sql_error_code +-------------------------------------------------------------------+--------+------+---------------- + invalid input syntax for type macaddr8: "08:00:2b:01:02:03:04:ZZ" | | | 22P02 +(1 row) + +SELECT pg_input_is_valid('08:00:2b:01:02:03:04:', 'macaddr8'); + pg_input_is_valid +------------------- + f +(1 row) + +SELECT * FROM pg_input_error_info('08:00:2b:01:02:03:04:', 'macaddr8'); + message | detail | hint | sql_error_code +-----------------------------------------------------------------+--------+------+---------------- + invalid input syntax for type macaddr8: "08:00:2b:01:02:03:04:" | | | 22P02 +(1 row) + diff --git a/src/test/regress/expected/matview.out b/src/test/regress/expected/matview.out new file mode 100644 index 0000000..67a50bd --- /dev/null +++ b/src/test/regress/expected/matview.out @@ -0,0 +1,694 @@ +-- create a table to use as a basis for views and materialized views in various combinations +CREATE TABLE mvtest_t (id int NOT NULL PRIMARY KEY, type text NOT NULL, amt numeric NOT NULL); +INSERT INTO mvtest_t VALUES + (1, 'x', 2), + (2, 'x', 3), + (3, 'y', 5), + (4, 'y', 7), + (5, 'z', 11); +-- we want a view based on the table, too, since views present additional challenges +CREATE VIEW mvtest_tv AS SELECT type, sum(amt) AS totamt FROM mvtest_t GROUP BY type; +SELECT * FROM mvtest_tv ORDER BY type; + type | totamt +------+-------- + x | 5 + y | 12 + z | 11 +(3 rows) + +-- create a materialized view with no data, and confirm correct behavior +EXPLAIN (costs off) + CREATE MATERIALIZED VIEW mvtest_tm AS SELECT type, sum(amt) AS totamt FROM mvtest_t GROUP BY type WITH NO DATA; + QUERY PLAN +---------------------------- + HashAggregate + Group Key: type + -> Seq Scan on mvtest_t +(3 rows) + +CREATE MATERIALIZED VIEW mvtest_tm AS SELECT type, sum(amt) AS totamt FROM mvtest_t GROUP BY type WITH NO DATA; +SELECT relispopulated FROM pg_class WHERE oid = 'mvtest_tm'::regclass; + relispopulated +---------------- + f +(1 row) + +SELECT * FROM mvtest_tm ORDER BY type; +ERROR: materialized view "mvtest_tm" has not been populated +HINT: Use the REFRESH MATERIALIZED VIEW command. +REFRESH MATERIALIZED VIEW mvtest_tm; +SELECT relispopulated FROM pg_class WHERE oid = 'mvtest_tm'::regclass; + relispopulated +---------------- + t +(1 row) + +CREATE UNIQUE INDEX mvtest_tm_type ON mvtest_tm (type); +SELECT * FROM mvtest_tm ORDER BY type; + type | totamt +------+-------- + x | 5 + y | 12 + z | 11 +(3 rows) + +-- create various views +EXPLAIN (costs off) + CREATE MATERIALIZED VIEW mvtest_tvm AS SELECT * FROM mvtest_tv ORDER BY type; + QUERY PLAN +---------------------------------- + Sort + Sort Key: mvtest_t.type + -> HashAggregate + Group Key: mvtest_t.type + -> Seq Scan on mvtest_t +(5 rows) + +CREATE MATERIALIZED VIEW mvtest_tvm AS SELECT * FROM mvtest_tv ORDER BY type; +SELECT * FROM mvtest_tvm; + type | totamt +------+-------- + x | 5 + y | 12 + z | 11 +(3 rows) + +CREATE MATERIALIZED VIEW mvtest_tmm AS SELECT sum(totamt) AS grandtot FROM mvtest_tm; +CREATE MATERIALIZED VIEW mvtest_tvmm AS SELECT sum(totamt) AS grandtot FROM mvtest_tvm; +CREATE UNIQUE INDEX mvtest_tvmm_expr ON mvtest_tvmm ((grandtot > 0)); +CREATE UNIQUE INDEX mvtest_tvmm_pred ON mvtest_tvmm (grandtot) WHERE grandtot < 0; +CREATE VIEW mvtest_tvv AS SELECT sum(totamt) AS grandtot FROM mvtest_tv; +EXPLAIN (costs off) + CREATE MATERIALIZED VIEW mvtest_tvvm AS SELECT * FROM mvtest_tvv; + QUERY PLAN +---------------------------------- + Aggregate + -> HashAggregate + Group Key: mvtest_t.type + -> Seq Scan on mvtest_t +(4 rows) + +CREATE MATERIALIZED VIEW mvtest_tvvm AS SELECT * FROM mvtest_tvv; +CREATE VIEW mvtest_tvvmv AS SELECT * FROM mvtest_tvvm; +CREATE MATERIALIZED VIEW mvtest_bb AS SELECT * FROM mvtest_tvvmv; +CREATE INDEX mvtest_aa ON mvtest_bb (grandtot); +-- check that plans seem reasonable +\d+ mvtest_tvm + Materialized view "public.mvtest_tvm" + Column | Type | Collation | Nullable | Default | Storage | Stats target | Description +--------+---------+-----------+----------+---------+----------+--------------+------------- + type | text | | | | extended | | + totamt | numeric | | | | main | | +View definition: + SELECT type, + totamt + FROM mvtest_tv + ORDER BY type; + +\d+ mvtest_tvm + Materialized view "public.mvtest_tvm" + Column | Type | Collation | Nullable | Default | Storage | Stats target | Description +--------+---------+-----------+----------+---------+----------+--------------+------------- + type | text | | | | extended | | + totamt | numeric | | | | main | | +View definition: + SELECT type, + totamt + FROM mvtest_tv + ORDER BY type; + +\d+ mvtest_tvvm + Materialized view "public.mvtest_tvvm" + Column | Type | Collation | Nullable | Default | Storage | Stats target | Description +----------+---------+-----------+----------+---------+---------+--------------+------------- + grandtot | numeric | | | | main | | +View definition: + SELECT grandtot + FROM mvtest_tvv; + +\d+ mvtest_bb + Materialized view "public.mvtest_bb" + Column | Type | Collation | Nullable | Default | Storage | Stats target | Description +----------+---------+-----------+----------+---------+---------+--------------+------------- + grandtot | numeric | | | | main | | +Indexes: + "mvtest_aa" btree (grandtot) +View definition: + SELECT grandtot + FROM mvtest_tvvmv; + +-- test schema behavior +CREATE SCHEMA mvtest_mvschema; +ALTER MATERIALIZED VIEW mvtest_tvm SET SCHEMA mvtest_mvschema; +\d+ mvtest_tvm +\d+ mvtest_tvmm + Materialized view "public.mvtest_tvmm" + Column | Type | Collation | Nullable | Default | Storage | Stats target | Description +----------+---------+-----------+----------+---------+---------+--------------+------------- + grandtot | numeric | | | | main | | +Indexes: + "mvtest_tvmm_expr" UNIQUE, btree ((grandtot > 0::numeric)) + "mvtest_tvmm_pred" UNIQUE, btree (grandtot) WHERE grandtot < 0::numeric +View definition: + SELECT sum(totamt) AS grandtot + FROM mvtest_mvschema.mvtest_tvm; + +SET search_path = mvtest_mvschema, public; +\d+ mvtest_tvm + Materialized view "mvtest_mvschema.mvtest_tvm" + Column | Type | Collation | Nullable | Default | Storage | Stats target | Description +--------+---------+-----------+----------+---------+----------+--------------+------------- + type | text | | | | extended | | + totamt | numeric | | | | main | | +View definition: + SELECT type, + totamt + FROM mvtest_tv + ORDER BY type; + +-- modify the underlying table data +INSERT INTO mvtest_t VALUES (6, 'z', 13); +-- confirm pre- and post-refresh contents of fairly simple materialized views +SELECT * FROM mvtest_tm ORDER BY type; + type | totamt +------+-------- + x | 5 + y | 12 + z | 11 +(3 rows) + +SELECT * FROM mvtest_tvm ORDER BY type; + type | totamt +------+-------- + x | 5 + y | 12 + z | 11 +(3 rows) + +REFRESH MATERIALIZED VIEW CONCURRENTLY mvtest_tm; +REFRESH MATERIALIZED VIEW mvtest_tvm; +SELECT * FROM mvtest_tm ORDER BY type; + type | totamt +------+-------- + x | 5 + y | 12 + z | 24 +(3 rows) + +SELECT * FROM mvtest_tvm ORDER BY type; + type | totamt +------+-------- + x | 5 + y | 12 + z | 24 +(3 rows) + +RESET search_path; +-- confirm pre- and post-refresh contents of nested materialized views +EXPLAIN (costs off) + SELECT * FROM mvtest_tmm; + QUERY PLAN +------------------------ + Seq Scan on mvtest_tmm +(1 row) + +EXPLAIN (costs off) + SELECT * FROM mvtest_tvmm; + QUERY PLAN +------------------------- + Seq Scan on mvtest_tvmm +(1 row) + +EXPLAIN (costs off) + SELECT * FROM mvtest_tvvm; + QUERY PLAN +------------------------- + Seq Scan on mvtest_tvvm +(1 row) + +SELECT * FROM mvtest_tmm; + grandtot +---------- + 28 +(1 row) + +SELECT * FROM mvtest_tvmm; + grandtot +---------- + 28 +(1 row) + +SELECT * FROM mvtest_tvvm; + grandtot +---------- + 28 +(1 row) + +REFRESH MATERIALIZED VIEW mvtest_tmm; +REFRESH MATERIALIZED VIEW CONCURRENTLY mvtest_tvmm; +ERROR: cannot refresh materialized view "public.mvtest_tvmm" concurrently +HINT: Create a unique index with no WHERE clause on one or more columns of the materialized view. +REFRESH MATERIALIZED VIEW mvtest_tvmm; +REFRESH MATERIALIZED VIEW mvtest_tvvm; +EXPLAIN (costs off) + SELECT * FROM mvtest_tmm; + QUERY PLAN +------------------------ + Seq Scan on mvtest_tmm +(1 row) + +EXPLAIN (costs off) + SELECT * FROM mvtest_tvmm; + QUERY PLAN +------------------------- + Seq Scan on mvtest_tvmm +(1 row) + +EXPLAIN (costs off) + SELECT * FROM mvtest_tvvm; + QUERY PLAN +------------------------- + Seq Scan on mvtest_tvvm +(1 row) + +SELECT * FROM mvtest_tmm; + grandtot +---------- + 41 +(1 row) + +SELECT * FROM mvtest_tvmm; + grandtot +---------- + 41 +(1 row) + +SELECT * FROM mvtest_tvvm; + grandtot +---------- + 41 +(1 row) + +-- test diemv when the mv does not exist +DROP MATERIALIZED VIEW IF EXISTS no_such_mv; +NOTICE: materialized view "no_such_mv" does not exist, skipping +-- make sure invalid combination of options is prohibited +REFRESH MATERIALIZED VIEW CONCURRENTLY mvtest_tvmm WITH NO DATA; +ERROR: CONCURRENTLY and WITH NO DATA options cannot be used together +-- no tuple locks on materialized views +SELECT * FROM mvtest_tvvm FOR SHARE; +ERROR: cannot lock rows in materialized view "mvtest_tvvm" +-- test join of mv and view +SELECT type, m.totamt AS mtot, v.totamt AS vtot FROM mvtest_tm m LEFT JOIN mvtest_tv v USING (type) ORDER BY type; + type | mtot | vtot +------+------+------ + x | 5 | 5 + y | 12 | 12 + z | 24 | 24 +(3 rows) + +-- make sure that dependencies are reported properly when they block the drop +DROP TABLE mvtest_t; +ERROR: cannot drop table mvtest_t because other objects depend on it +DETAIL: view mvtest_tv depends on table mvtest_t +materialized view mvtest_mvschema.mvtest_tvm depends on view mvtest_tv +materialized view mvtest_tvmm depends on materialized view mvtest_mvschema.mvtest_tvm +view mvtest_tvv depends on view mvtest_tv +materialized view mvtest_tvvm depends on view mvtest_tvv +view mvtest_tvvmv depends on materialized view mvtest_tvvm +materialized view mvtest_bb depends on view mvtest_tvvmv +materialized view mvtest_tm depends on table mvtest_t +materialized view mvtest_tmm depends on materialized view mvtest_tm +HINT: Use DROP ... CASCADE to drop the dependent objects too. +-- make sure dependencies are dropped and reported +-- and make sure that transactional behavior is correct on rollback +-- incidentally leaving some interesting materialized views for pg_dump testing +BEGIN; +DROP TABLE mvtest_t CASCADE; +NOTICE: drop cascades to 9 other objects +DETAIL: drop cascades to view mvtest_tv +drop cascades to materialized view mvtest_mvschema.mvtest_tvm +drop cascades to materialized view mvtest_tvmm +drop cascades to view mvtest_tvv +drop cascades to materialized view mvtest_tvvm +drop cascades to view mvtest_tvvmv +drop cascades to materialized view mvtest_bb +drop cascades to materialized view mvtest_tm +drop cascades to materialized view mvtest_tmm +ROLLBACK; +-- some additional tests not using base tables +CREATE VIEW mvtest_vt1 AS SELECT 1 moo; +CREATE VIEW mvtest_vt2 AS SELECT moo, 2*moo FROM mvtest_vt1 UNION ALL SELECT moo, 3*moo FROM mvtest_vt1; +\d+ mvtest_vt2 + View "public.mvtest_vt2" + Column | Type | Collation | Nullable | Default | Storage | Description +----------+---------+-----------+----------+---------+---------+------------- + moo | integer | | | | plain | + ?column? | integer | | | | plain | +View definition: + SELECT mvtest_vt1.moo, + 2 * mvtest_vt1.moo AS "?column?" + FROM mvtest_vt1 +UNION ALL + SELECT mvtest_vt1.moo, + 3 * mvtest_vt1.moo + FROM mvtest_vt1; + +CREATE MATERIALIZED VIEW mv_test2 AS SELECT moo, 2*moo FROM mvtest_vt2 UNION ALL SELECT moo, 3*moo FROM mvtest_vt2; +\d+ mv_test2 + Materialized view "public.mv_test2" + Column | Type | Collation | Nullable | Default | Storage | Stats target | Description +----------+---------+-----------+----------+---------+---------+--------------+------------- + moo | integer | | | | plain | | + ?column? | integer | | | | plain | | +View definition: + SELECT mvtest_vt2.moo, + 2 * mvtest_vt2.moo AS "?column?" + FROM mvtest_vt2 +UNION ALL + SELECT mvtest_vt2.moo, + 3 * mvtest_vt2.moo + FROM mvtest_vt2; + +CREATE MATERIALIZED VIEW mv_test3 AS SELECT * FROM mv_test2 WHERE moo = 12345; +SELECT relispopulated FROM pg_class WHERE oid = 'mv_test3'::regclass; + relispopulated +---------------- + t +(1 row) + +DROP VIEW mvtest_vt1 CASCADE; +NOTICE: drop cascades to 3 other objects +DETAIL: drop cascades to view mvtest_vt2 +drop cascades to materialized view mv_test2 +drop cascades to materialized view mv_test3 +-- test that duplicate values on unique index prevent refresh +CREATE TABLE mvtest_foo(a, b) AS VALUES(1, 10); +CREATE MATERIALIZED VIEW mvtest_mv AS SELECT * FROM mvtest_foo; +CREATE UNIQUE INDEX ON mvtest_mv(a); +INSERT INTO mvtest_foo SELECT * FROM mvtest_foo; +REFRESH MATERIALIZED VIEW mvtest_mv; +ERROR: could not create unique index "mvtest_mv_a_idx" +DETAIL: Key (a)=(1) is duplicated. +REFRESH MATERIALIZED VIEW CONCURRENTLY mvtest_mv; +ERROR: new data for materialized view "mvtest_mv" contains duplicate rows without any null columns +DETAIL: Row: (1,10) +DROP TABLE mvtest_foo CASCADE; +NOTICE: drop cascades to materialized view mvtest_mv +-- make sure that all columns covered by unique indexes works +CREATE TABLE mvtest_foo(a, b, c) AS VALUES(1, 2, 3); +CREATE MATERIALIZED VIEW mvtest_mv AS SELECT * FROM mvtest_foo; +CREATE UNIQUE INDEX ON mvtest_mv (a); +CREATE UNIQUE INDEX ON mvtest_mv (b); +CREATE UNIQUE INDEX on mvtest_mv (c); +INSERT INTO mvtest_foo VALUES(2, 3, 4); +INSERT INTO mvtest_foo VALUES(3, 4, 5); +REFRESH MATERIALIZED VIEW mvtest_mv; +REFRESH MATERIALIZED VIEW CONCURRENTLY mvtest_mv; +DROP TABLE mvtest_foo CASCADE; +NOTICE: drop cascades to materialized view mvtest_mv +-- allow subquery to reference unpopulated matview if WITH NO DATA is specified +CREATE MATERIALIZED VIEW mvtest_mv1 AS SELECT 1 AS col1 WITH NO DATA; +CREATE MATERIALIZED VIEW mvtest_mv2 AS SELECT * FROM mvtest_mv1 + WHERE col1 = (SELECT LEAST(col1) FROM mvtest_mv1) WITH NO DATA; +DROP MATERIALIZED VIEW mvtest_mv1 CASCADE; +NOTICE: drop cascades to materialized view mvtest_mv2 +-- make sure that types with unusual equality tests work +CREATE TABLE mvtest_boxes (id serial primary key, b box); +INSERT INTO mvtest_boxes (b) VALUES + ('(32,32),(31,31)'), + ('(2.0000004,2.0000004),(1,1)'), + ('(1.9999996,1.9999996),(1,1)'); +CREATE MATERIALIZED VIEW mvtest_boxmv AS SELECT * FROM mvtest_boxes; +CREATE UNIQUE INDEX mvtest_boxmv_id ON mvtest_boxmv (id); +UPDATE mvtest_boxes SET b = '(2,2),(1,1)' WHERE id = 2; +REFRESH MATERIALIZED VIEW CONCURRENTLY mvtest_boxmv; +SELECT * FROM mvtest_boxmv ORDER BY id; + id | b +----+----------------------------- + 1 | (32,32),(31,31) + 2 | (2,2),(1,1) + 3 | (1.9999996,1.9999996),(1,1) +(3 rows) + +DROP TABLE mvtest_boxes CASCADE; +NOTICE: drop cascades to materialized view mvtest_boxmv +-- make sure that column names are handled correctly +CREATE TABLE mvtest_v (i int, j int); +CREATE MATERIALIZED VIEW mvtest_mv_v (ii, jj, kk) AS SELECT i, j FROM mvtest_v; -- error +ERROR: too many column names were specified +CREATE MATERIALIZED VIEW mvtest_mv_v (ii, jj) AS SELECT i, j FROM mvtest_v; -- ok +CREATE MATERIALIZED VIEW mvtest_mv_v_2 (ii) AS SELECT i, j FROM mvtest_v; -- ok +CREATE MATERIALIZED VIEW mvtest_mv_v_3 (ii, jj, kk) AS SELECT i, j FROM mvtest_v WITH NO DATA; -- error +ERROR: too many column names were specified +CREATE MATERIALIZED VIEW mvtest_mv_v_3 (ii, jj) AS SELECT i, j FROM mvtest_v WITH NO DATA; -- ok +CREATE MATERIALIZED VIEW mvtest_mv_v_4 (ii) AS SELECT i, j FROM mvtest_v WITH NO DATA; -- ok +ALTER TABLE mvtest_v RENAME COLUMN i TO x; +INSERT INTO mvtest_v values (1, 2); +CREATE UNIQUE INDEX mvtest_mv_v_ii ON mvtest_mv_v (ii); +REFRESH MATERIALIZED VIEW mvtest_mv_v; +UPDATE mvtest_v SET j = 3 WHERE x = 1; +REFRESH MATERIALIZED VIEW CONCURRENTLY mvtest_mv_v; +REFRESH MATERIALIZED VIEW mvtest_mv_v_2; +REFRESH MATERIALIZED VIEW mvtest_mv_v_3; +REFRESH MATERIALIZED VIEW mvtest_mv_v_4; +SELECT * FROM mvtest_v; + x | j +---+--- + 1 | 3 +(1 row) + +SELECT * FROM mvtest_mv_v; + ii | jj +----+---- + 1 | 3 +(1 row) + +SELECT * FROM mvtest_mv_v_2; + ii | j +----+--- + 1 | 3 +(1 row) + +SELECT * FROM mvtest_mv_v_3; + ii | jj +----+---- + 1 | 3 +(1 row) + +SELECT * FROM mvtest_mv_v_4; + ii | j +----+--- + 1 | 3 +(1 row) + +DROP TABLE mvtest_v CASCADE; +NOTICE: drop cascades to 4 other objects +DETAIL: drop cascades to materialized view mvtest_mv_v +drop cascades to materialized view mvtest_mv_v_2 +drop cascades to materialized view mvtest_mv_v_3 +drop cascades to materialized view mvtest_mv_v_4 +-- Check that unknown literals are converted to "text" in CREATE MATVIEW, +-- so that we don't end up with unknown-type columns. +CREATE MATERIALIZED VIEW mv_unspecified_types AS + SELECT 42 as i, 42.5 as num, 'foo' as u, 'foo'::unknown as u2, null as n; +\d+ mv_unspecified_types + Materialized view "public.mv_unspecified_types" + Column | Type | Collation | Nullable | Default | Storage | Stats target | Description +--------+---------+-----------+----------+---------+----------+--------------+------------- + i | integer | | | | plain | | + num | numeric | | | | main | | + u | text | | | | extended | | + u2 | text | | | | extended | | + n | text | | | | extended | | +View definition: + SELECT 42 AS i, + 42.5 AS num, + 'foo'::text AS u, + 'foo'::text AS u2, + NULL::text AS n; + +SELECT * FROM mv_unspecified_types; + i | num | u | u2 | n +----+------+-----+-----+--- + 42 | 42.5 | foo | foo | +(1 row) + +DROP MATERIALIZED VIEW mv_unspecified_types; +-- make sure that create WITH NO DATA does not plan the query (bug #13907) +create materialized view mvtest_error as select 1/0 as x; -- fail +ERROR: division by zero +create materialized view mvtest_error as select 1/0 as x with no data; +refresh materialized view mvtest_error; -- fail here +ERROR: division by zero +drop materialized view mvtest_error; +-- make sure that matview rows can be referenced as source rows (bug #9398) +CREATE TABLE mvtest_v AS SELECT generate_series(1,10) AS a; +CREATE MATERIALIZED VIEW mvtest_mv_v AS SELECT a FROM mvtest_v WHERE a <= 5; +DELETE FROM mvtest_v WHERE EXISTS ( SELECT * FROM mvtest_mv_v WHERE mvtest_mv_v.a = mvtest_v.a ); +SELECT * FROM mvtest_v; + a +---- + 6 + 7 + 8 + 9 + 10 +(5 rows) + +SELECT * FROM mvtest_mv_v; + a +--- + 1 + 2 + 3 + 4 + 5 +(5 rows) + +DROP TABLE mvtest_v CASCADE; +NOTICE: drop cascades to materialized view mvtest_mv_v +-- make sure running as superuser works when MV owned by another role (bug #11208) +CREATE ROLE regress_user_mvtest; +SET ROLE regress_user_mvtest; +-- this test case also checks for ambiguity in the queries issued by +-- refresh_by_match_merge(), by choosing column names that intentionally +-- duplicate all the aliases used in those queries +CREATE TABLE mvtest_foo_data AS SELECT i, + i+1 AS tid, + fipshash(random()::text) AS mv, + fipshash(random()::text) AS newdata, + fipshash(random()::text) AS newdata2, + fipshash(random()::text) AS diff + FROM generate_series(1, 10) i; +CREATE MATERIALIZED VIEW mvtest_mv_foo AS SELECT * FROM mvtest_foo_data; +CREATE MATERIALIZED VIEW mvtest_mv_foo AS SELECT * FROM mvtest_foo_data; +ERROR: relation "mvtest_mv_foo" already exists +CREATE MATERIALIZED VIEW IF NOT EXISTS mvtest_mv_foo AS SELECT * FROM mvtest_foo_data; +NOTICE: relation "mvtest_mv_foo" already exists, skipping +CREATE UNIQUE INDEX ON mvtest_mv_foo (i); +RESET ROLE; +REFRESH MATERIALIZED VIEW mvtest_mv_foo; +REFRESH MATERIALIZED VIEW CONCURRENTLY mvtest_mv_foo; +DROP OWNED BY regress_user_mvtest CASCADE; +DROP ROLE regress_user_mvtest; +-- Concurrent refresh requires a unique index on the materialized +-- view. Test what happens if it's dropped during the refresh. +CREATE OR REPLACE FUNCTION mvtest_drop_the_index() + RETURNS bool AS $$ +BEGIN + EXECUTE 'DROP INDEX IF EXISTS mvtest_drop_idx'; + RETURN true; +END; +$$ LANGUAGE plpgsql; +CREATE MATERIALIZED VIEW drop_idx_matview AS + SELECT 1 as i WHERE mvtest_drop_the_index(); +NOTICE: index "mvtest_drop_idx" does not exist, skipping +CREATE UNIQUE INDEX mvtest_drop_idx ON drop_idx_matview (i); +REFRESH MATERIALIZED VIEW CONCURRENTLY drop_idx_matview; +ERROR: could not find suitable unique index on materialized view +DROP MATERIALIZED VIEW drop_idx_matview; -- clean up +-- make sure that create WITH NO DATA works via SPI +BEGIN; +CREATE FUNCTION mvtest_func() + RETURNS void AS $$ +BEGIN + CREATE MATERIALIZED VIEW mvtest1 AS SELECT 1 AS x; + CREATE MATERIALIZED VIEW mvtest2 AS SELECT 1 AS x WITH NO DATA; +END; +$$ LANGUAGE plpgsql; +SELECT mvtest_func(); + mvtest_func +------------- + +(1 row) + +SELECT * FROM mvtest1; + x +--- + 1 +(1 row) + +SELECT * FROM mvtest2; +ERROR: materialized view "mvtest2" has not been populated +HINT: Use the REFRESH MATERIALIZED VIEW command. +ROLLBACK; +-- INSERT privileges if relation owner is not allowed to insert. +CREATE SCHEMA matview_schema; +CREATE USER regress_matview_user; +ALTER DEFAULT PRIVILEGES FOR ROLE regress_matview_user + REVOKE INSERT ON TABLES FROM regress_matview_user; +GRANT ALL ON SCHEMA matview_schema TO public; +SET SESSION AUTHORIZATION regress_matview_user; +CREATE MATERIALIZED VIEW matview_schema.mv_withdata1 (a) AS + SELECT generate_series(1, 10) WITH DATA; +EXPLAIN (ANALYZE, COSTS OFF, SUMMARY OFF, TIMING OFF) + CREATE MATERIALIZED VIEW matview_schema.mv_withdata2 (a) AS + SELECT generate_series(1, 10) WITH DATA; + QUERY PLAN +-------------------------------------- + ProjectSet (actual rows=10 loops=1) + -> Result (actual rows=1 loops=1) +(2 rows) + +REFRESH MATERIALIZED VIEW matview_schema.mv_withdata2; +CREATE MATERIALIZED VIEW matview_schema.mv_nodata1 (a) AS + SELECT generate_series(1, 10) WITH NO DATA; +EXPLAIN (ANALYZE, COSTS OFF, SUMMARY OFF, TIMING OFF) + CREATE MATERIALIZED VIEW matview_schema.mv_nodata2 (a) AS + SELECT generate_series(1, 10) WITH NO DATA; + QUERY PLAN +------------------------------- + ProjectSet (never executed) + -> Result (never executed) +(2 rows) + +REFRESH MATERIALIZED VIEW matview_schema.mv_nodata2; +RESET SESSION AUTHORIZATION; +ALTER DEFAULT PRIVILEGES FOR ROLE regress_matview_user + GRANT INSERT ON TABLES TO regress_matview_user; +DROP SCHEMA matview_schema CASCADE; +NOTICE: drop cascades to 4 other objects +DETAIL: drop cascades to materialized view matview_schema.mv_withdata1 +drop cascades to materialized view matview_schema.mv_withdata2 +drop cascades to materialized view matview_schema.mv_nodata1 +drop cascades to materialized view matview_schema.mv_nodata2 +DROP USER regress_matview_user; +-- CREATE MATERIALIZED VIEW ... IF NOT EXISTS +CREATE MATERIALIZED VIEW matview_ine_tab AS SELECT 1; +CREATE MATERIALIZED VIEW matview_ine_tab AS SELECT 1 / 0; -- error +ERROR: relation "matview_ine_tab" already exists +CREATE MATERIALIZED VIEW IF NOT EXISTS matview_ine_tab AS + SELECT 1 / 0; -- ok +NOTICE: relation "matview_ine_tab" already exists, skipping +CREATE MATERIALIZED VIEW matview_ine_tab AS + SELECT 1 / 0 WITH NO DATA; -- error +ERROR: relation "matview_ine_tab" already exists +CREATE MATERIALIZED VIEW IF NOT EXISTS matview_ine_tab AS + SELECT 1 / 0 WITH NO DATA; -- ok +NOTICE: relation "matview_ine_tab" already exists, skipping +EXPLAIN (ANALYZE, COSTS OFF, SUMMARY OFF, TIMING OFF) + CREATE MATERIALIZED VIEW matview_ine_tab AS + SELECT 1 / 0; -- error +ERROR: relation "matview_ine_tab" already exists +EXPLAIN (ANALYZE, COSTS OFF, SUMMARY OFF, TIMING OFF) + CREATE MATERIALIZED VIEW IF NOT EXISTS matview_ine_tab AS + SELECT 1 / 0; -- ok +NOTICE: relation "matview_ine_tab" already exists, skipping + QUERY PLAN +------------ +(0 rows) + +EXPLAIN (ANALYZE, COSTS OFF, SUMMARY OFF, TIMING OFF) + CREATE MATERIALIZED VIEW matview_ine_tab AS + SELECT 1 / 0 WITH NO DATA; -- error +ERROR: relation "matview_ine_tab" already exists +EXPLAIN (ANALYZE, COSTS OFF, SUMMARY OFF, TIMING OFF) + CREATE MATERIALIZED VIEW IF NOT EXISTS matview_ine_tab AS + SELECT 1 / 0 WITH NO DATA; -- ok +NOTICE: relation "matview_ine_tab" already exists, skipping + QUERY PLAN +------------ +(0 rows) + +DROP MATERIALIZED VIEW matview_ine_tab; diff --git a/src/test/regress/expected/md5.out b/src/test/regress/expected/md5.out new file mode 100644 index 0000000..c5dd801 --- /dev/null +++ b/src/test/regress/expected/md5.out @@ -0,0 +1,91 @@ +-- +-- MD5 test suite - from IETF RFC 1321 +-- (see: https://www.rfc-editor.org/rfc/rfc1321) +-- +-- (The md5() function will error in OpenSSL FIPS mode. By keeping +-- this test in a separate file, it is easier to manage variant +-- results.) +select md5('') = 'd41d8cd98f00b204e9800998ecf8427e' AS "TRUE"; + TRUE +------ + t +(1 row) + +select md5('a') = '0cc175b9c0f1b6a831c399e269772661' AS "TRUE"; + TRUE +------ + t +(1 row) + +select md5('abc') = '900150983cd24fb0d6963f7d28e17f72' AS "TRUE"; + TRUE +------ + t +(1 row) + +select md5('message digest') = 'f96b697d7cb7938d525a2f31aaf161d0' AS "TRUE"; + TRUE +------ + t +(1 row) + +select md5('abcdefghijklmnopqrstuvwxyz') = 'c3fcd3d76192e4007dfb496cca67e13b' AS "TRUE"; + TRUE +------ + t +(1 row) + +select md5('ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789') = 'd174ab98d277d9f5a5611c2c9f419d9f' AS "TRUE"; + TRUE +------ + t +(1 row) + +select md5('12345678901234567890123456789012345678901234567890123456789012345678901234567890') = '57edf4a22be3c955ac49da2e2107b67a' AS "TRUE"; + TRUE +------ + t +(1 row) + +select md5(''::bytea) = 'd41d8cd98f00b204e9800998ecf8427e' AS "TRUE"; + TRUE +------ + t +(1 row) + +select md5('a'::bytea) = '0cc175b9c0f1b6a831c399e269772661' AS "TRUE"; + TRUE +------ + t +(1 row) + +select md5('abc'::bytea) = '900150983cd24fb0d6963f7d28e17f72' AS "TRUE"; + TRUE +------ + t +(1 row) + +select md5('message digest'::bytea) = 'f96b697d7cb7938d525a2f31aaf161d0' AS "TRUE"; + TRUE +------ + t +(1 row) + +select md5('abcdefghijklmnopqrstuvwxyz'::bytea) = 'c3fcd3d76192e4007dfb496cca67e13b' AS "TRUE"; + TRUE +------ + t +(1 row) + +select md5('ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789'::bytea) = 'd174ab98d277d9f5a5611c2c9f419d9f' AS "TRUE"; + TRUE +------ + t +(1 row) + +select md5('12345678901234567890123456789012345678901234567890123456789012345678901234567890'::bytea) = '57edf4a22be3c955ac49da2e2107b67a' AS "TRUE"; + TRUE +------ + t +(1 row) + diff --git a/src/test/regress/expected/memoize.out b/src/test/regress/expected/memoize.out new file mode 100644 index 0000000..f520243 --- /dev/null +++ b/src/test/regress/expected/memoize.out @@ -0,0 +1,350 @@ +-- Perform tests on the Memoize node. +-- The cache hits/misses/evictions from the Memoize node can vary between +-- machines. Let's just replace the number with an 'N'. In order to allow us +-- to perform validation when the measure was zero, we replace a zero value +-- with "Zero". All other numbers are replaced with 'N'. +create function explain_memoize(query text, hide_hitmiss bool) returns setof text +language plpgsql as +$$ +declare + ln text; +begin + for ln in + execute format('explain (analyze, costs off, summary off, timing off) %s', + query) + loop + if hide_hitmiss = true then + ln := regexp_replace(ln, 'Hits: 0', 'Hits: Zero'); + ln := regexp_replace(ln, 'Hits: \d+', 'Hits: N'); + ln := regexp_replace(ln, 'Misses: 0', 'Misses: Zero'); + ln := regexp_replace(ln, 'Misses: \d+', 'Misses: N'); + end if; + ln := regexp_replace(ln, 'Evictions: 0', 'Evictions: Zero'); + ln := regexp_replace(ln, 'Evictions: \d+', 'Evictions: N'); + ln := regexp_replace(ln, 'Memory Usage: \d+', 'Memory Usage: N'); + ln := regexp_replace(ln, 'Heap Fetches: \d+', 'Heap Fetches: N'); + ln := regexp_replace(ln, 'loops=\d+', 'loops=N'); + return next ln; + end loop; +end; +$$; +-- Ensure we get a memoize node on the inner side of the nested loop +SET enable_hashjoin TO off; +SET enable_bitmapscan TO off; +SELECT explain_memoize(' +SELECT COUNT(*),AVG(t1.unique1) FROM tenk1 t1 +INNER JOIN tenk1 t2 ON t1.unique1 = t2.twenty +WHERE t2.unique1 < 1000;', false); + explain_memoize +------------------------------------------------------------------------------------------- + Aggregate (actual rows=1 loops=N) + -> Nested Loop (actual rows=1000 loops=N) + -> Seq Scan on tenk1 t2 (actual rows=1000 loops=N) + Filter: (unique1 < 1000) + Rows Removed by Filter: 9000 + -> Memoize (actual rows=1 loops=N) + Cache Key: t2.twenty + Cache Mode: logical + Hits: 980 Misses: 20 Evictions: Zero Overflows: 0 Memory Usage: NkB + -> Index Only Scan using tenk1_unique1 on tenk1 t1 (actual rows=1 loops=N) + Index Cond: (unique1 = t2.twenty) + Heap Fetches: N +(12 rows) + +-- And check we get the expected results. +SELECT COUNT(*),AVG(t1.unique1) FROM tenk1 t1 +INNER JOIN tenk1 t2 ON t1.unique1 = t2.twenty +WHERE t2.unique1 < 1000; + count | avg +-------+-------------------- + 1000 | 9.5000000000000000 +(1 row) + +-- Try with LATERAL joins +SELECT explain_memoize(' +SELECT COUNT(*),AVG(t2.unique1) FROM tenk1 t1, +LATERAL (SELECT t2.unique1 FROM tenk1 t2 + WHERE t1.twenty = t2.unique1 OFFSET 0) t2 +WHERE t1.unique1 < 1000;', false); + explain_memoize +------------------------------------------------------------------------------------------- + Aggregate (actual rows=1 loops=N) + -> Nested Loop (actual rows=1000 loops=N) + -> Seq Scan on tenk1 t1 (actual rows=1000 loops=N) + Filter: (unique1 < 1000) + Rows Removed by Filter: 9000 + -> Memoize (actual rows=1 loops=N) + Cache Key: t1.twenty + Cache Mode: binary + Hits: 980 Misses: 20 Evictions: Zero Overflows: 0 Memory Usage: NkB + -> Index Only Scan using tenk1_unique1 on tenk1 t2 (actual rows=1 loops=N) + Index Cond: (unique1 = t1.twenty) + Heap Fetches: N +(12 rows) + +-- And check we get the expected results. +SELECT COUNT(*),AVG(t2.unique1) FROM tenk1 t1, +LATERAL (SELECT t2.unique1 FROM tenk1 t2 + WHERE t1.twenty = t2.unique1 OFFSET 0) t2 +WHERE t1.unique1 < 1000; + count | avg +-------+-------------------- + 1000 | 9.5000000000000000 +(1 row) + +-- Reduce work_mem and hash_mem_multiplier so that we see some cache evictions +SET work_mem TO '64kB'; +SET hash_mem_multiplier TO 1.0; +SET enable_mergejoin TO off; +-- Ensure we get some evictions. We're unable to validate the hits and misses +-- here as the number of entries that fit in the cache at once will vary +-- between different machines. +SELECT explain_memoize(' +SELECT COUNT(*),AVG(t1.unique1) FROM tenk1 t1 +INNER JOIN tenk1 t2 ON t1.unique1 = t2.thousand +WHERE t2.unique1 < 1200;', true); + explain_memoize +------------------------------------------------------------------------------------------- + Aggregate (actual rows=1 loops=N) + -> Nested Loop (actual rows=1200 loops=N) + -> Seq Scan on tenk1 t2 (actual rows=1200 loops=N) + Filter: (unique1 < 1200) + Rows Removed by Filter: 8800 + -> Memoize (actual rows=1 loops=N) + Cache Key: t2.thousand + Cache Mode: logical + Hits: N Misses: N Evictions: N Overflows: 0 Memory Usage: NkB + -> Index Only Scan using tenk1_unique1 on tenk1 t1 (actual rows=1 loops=N) + Index Cond: (unique1 = t2.thousand) + Heap Fetches: N +(12 rows) + +CREATE TABLE flt (f float); +CREATE INDEX flt_f_idx ON flt (f); +INSERT INTO flt VALUES('-0.0'::float),('+0.0'::float); +ANALYZE flt; +SET enable_seqscan TO off; +-- Ensure memoize operates in logical mode +SELECT explain_memoize(' +SELECT * FROM flt f1 INNER JOIN flt f2 ON f1.f = f2.f;', false); + explain_memoize +------------------------------------------------------------------------------- + Nested Loop (actual rows=4 loops=N) + -> Index Only Scan using flt_f_idx on flt f1 (actual rows=2 loops=N) + Heap Fetches: N + -> Memoize (actual rows=2 loops=N) + Cache Key: f1.f + Cache Mode: logical + Hits: 1 Misses: 1 Evictions: Zero Overflows: 0 Memory Usage: NkB + -> Index Only Scan using flt_f_idx on flt f2 (actual rows=2 loops=N) + Index Cond: (f = f1.f) + Heap Fetches: N +(10 rows) + +-- Ensure memoize operates in binary mode +SELECT explain_memoize(' +SELECT * FROM flt f1 INNER JOIN flt f2 ON f1.f >= f2.f;', false); + explain_memoize +------------------------------------------------------------------------------- + Nested Loop (actual rows=4 loops=N) + -> Index Only Scan using flt_f_idx on flt f1 (actual rows=2 loops=N) + Heap Fetches: N + -> Memoize (actual rows=2 loops=N) + Cache Key: f1.f + Cache Mode: binary + Hits: 0 Misses: 2 Evictions: Zero Overflows: 0 Memory Usage: NkB + -> Index Only Scan using flt_f_idx on flt f2 (actual rows=2 loops=N) + Index Cond: (f <= f1.f) + Heap Fetches: N +(10 rows) + +DROP TABLE flt; +-- Exercise Memoize in binary mode with a large fixed width type and a +-- varlena type. +CREATE TABLE strtest (n name, t text); +CREATE INDEX strtest_n_idx ON strtest (n); +CREATE INDEX strtest_t_idx ON strtest (t); +INSERT INTO strtest VALUES('one','one'),('two','two'),('three',repeat(fipshash('three'),100)); +-- duplicate rows so we get some cache hits +INSERT INTO strtest SELECT * FROM strtest; +ANALYZE strtest; +-- Ensure we get 3 hits and 3 misses +SELECT explain_memoize(' +SELECT * FROM strtest s1 INNER JOIN strtest s2 ON s1.n >= s2.n;', false); + explain_memoize +---------------------------------------------------------------------------------- + Nested Loop (actual rows=24 loops=N) + -> Seq Scan on strtest s1 (actual rows=6 loops=N) + -> Memoize (actual rows=4 loops=N) + Cache Key: s1.n + Cache Mode: binary + Hits: 3 Misses: 3 Evictions: Zero Overflows: 0 Memory Usage: NkB + -> Index Scan using strtest_n_idx on strtest s2 (actual rows=4 loops=N) + Index Cond: (n <= s1.n) +(8 rows) + +-- Ensure we get 3 hits and 3 misses +SELECT explain_memoize(' +SELECT * FROM strtest s1 INNER JOIN strtest s2 ON s1.t >= s2.t;', false); + explain_memoize +---------------------------------------------------------------------------------- + Nested Loop (actual rows=24 loops=N) + -> Seq Scan on strtest s1 (actual rows=6 loops=N) + -> Memoize (actual rows=4 loops=N) + Cache Key: s1.t + Cache Mode: binary + Hits: 3 Misses: 3 Evictions: Zero Overflows: 0 Memory Usage: NkB + -> Index Scan using strtest_t_idx on strtest s2 (actual rows=4 loops=N) + Index Cond: (t <= s1.t) +(8 rows) + +DROP TABLE strtest; +-- Ensure memoize works with partitionwise join +SET enable_partitionwise_join TO on; +CREATE TABLE prt (a int) PARTITION BY RANGE(a); +CREATE TABLE prt_p1 PARTITION OF prt FOR VALUES FROM (0) TO (10); +CREATE TABLE prt_p2 PARTITION OF prt FOR VALUES FROM (10) TO (20); +INSERT INTO prt VALUES (0), (0), (0), (0); +INSERT INTO prt VALUES (10), (10), (10), (10); +CREATE INDEX iprt_p1_a ON prt_p1 (a); +CREATE INDEX iprt_p2_a ON prt_p2 (a); +ANALYZE prt; +SELECT explain_memoize(' +SELECT * FROM prt t1 INNER JOIN prt t2 ON t1.a = t2.a;', false); + explain_memoize +------------------------------------------------------------------------------------------ + Append (actual rows=32 loops=N) + -> Nested Loop (actual rows=16 loops=N) + -> Index Only Scan using iprt_p1_a on prt_p1 t1_1 (actual rows=4 loops=N) + Heap Fetches: N + -> Memoize (actual rows=4 loops=N) + Cache Key: t1_1.a + Cache Mode: logical + Hits: 3 Misses: 1 Evictions: Zero Overflows: 0 Memory Usage: NkB + -> Index Only Scan using iprt_p1_a on prt_p1 t2_1 (actual rows=4 loops=N) + Index Cond: (a = t1_1.a) + Heap Fetches: N + -> Nested Loop (actual rows=16 loops=N) + -> Index Only Scan using iprt_p2_a on prt_p2 t1_2 (actual rows=4 loops=N) + Heap Fetches: N + -> Memoize (actual rows=4 loops=N) + Cache Key: t1_2.a + Cache Mode: logical + Hits: 3 Misses: 1 Evictions: Zero Overflows: 0 Memory Usage: NkB + -> Index Only Scan using iprt_p2_a on prt_p2 t2_2 (actual rows=4 loops=N) + Index Cond: (a = t1_2.a) + Heap Fetches: N +(21 rows) + +-- Ensure memoize works with parameterized union-all Append path +SET enable_partitionwise_join TO off; +SELECT explain_memoize(' +SELECT * FROM prt_p1 t1 INNER JOIN +(SELECT * FROM prt_p1 UNION ALL SELECT * FROM prt_p2) t2 +ON t1.a = t2.a;', false); + explain_memoize +------------------------------------------------------------------------------------- + Nested Loop (actual rows=16 loops=N) + -> Index Only Scan using iprt_p1_a on prt_p1 t1 (actual rows=4 loops=N) + Heap Fetches: N + -> Memoize (actual rows=4 loops=N) + Cache Key: t1.a + Cache Mode: logical + Hits: 3 Misses: 1 Evictions: Zero Overflows: 0 Memory Usage: NkB + -> Append (actual rows=4 loops=N) + -> Index Only Scan using iprt_p1_a on prt_p1 (actual rows=4 loops=N) + Index Cond: (a = t1.a) + Heap Fetches: N + -> Index Only Scan using iprt_p2_a on prt_p2 (actual rows=0 loops=N) + Index Cond: (a = t1.a) + Heap Fetches: N +(14 rows) + +DROP TABLE prt; +RESET enable_partitionwise_join; +-- Exercise Memoize code that flushes the cache when a parameter changes which +-- is not part of the cache key. +-- Ensure we get a Memoize plan +EXPLAIN (COSTS OFF) +SELECT unique1 FROM tenk1 t0 +WHERE unique1 < 3 + AND EXISTS ( + SELECT 1 FROM tenk1 t1 + INNER JOIN tenk1 t2 ON t1.unique1 = t2.hundred + WHERE t0.ten = t1.twenty AND t0.two <> t2.four OFFSET 0); + QUERY PLAN +---------------------------------------------------------------- + Index Scan using tenk1_unique1 on tenk1 t0 + Index Cond: (unique1 < 3) + Filter: (SubPlan 1) + SubPlan 1 + -> Nested Loop + -> Index Scan using tenk1_hundred on tenk1 t2 + Filter: (t0.two <> four) + -> Memoize + Cache Key: t2.hundred + Cache Mode: logical + -> Index Scan using tenk1_unique1 on tenk1 t1 + Index Cond: (unique1 = t2.hundred) + Filter: (t0.ten = twenty) +(13 rows) + +-- Ensure the above query returns the correct result +SELECT unique1 FROM tenk1 t0 +WHERE unique1 < 3 + AND EXISTS ( + SELECT 1 FROM tenk1 t1 + INNER JOIN tenk1 t2 ON t1.unique1 = t2.hundred + WHERE t0.ten = t1.twenty AND t0.two <> t2.four OFFSET 0); + unique1 +--------- + 2 +(1 row) + +RESET enable_seqscan; +RESET enable_mergejoin; +RESET work_mem; +RESET hash_mem_multiplier; +RESET enable_bitmapscan; +RESET enable_hashjoin; +-- Test parallel plans with Memoize +SET min_parallel_table_scan_size TO 0; +SET parallel_setup_cost TO 0; +SET parallel_tuple_cost TO 0; +SET max_parallel_workers_per_gather TO 2; +-- Ensure we get a parallel plan. +EXPLAIN (COSTS OFF) +SELECT COUNT(*),AVG(t2.unique1) FROM tenk1 t1, +LATERAL (SELECT t2.unique1 FROM tenk1 t2 WHERE t1.twenty = t2.unique1) t2 +WHERE t1.unique1 < 1000; + QUERY PLAN +------------------------------------------------------------------------------- + Finalize Aggregate + -> Gather + Workers Planned: 2 + -> Partial Aggregate + -> Nested Loop + -> Parallel Bitmap Heap Scan on tenk1 t1 + Recheck Cond: (unique1 < 1000) + -> Bitmap Index Scan on tenk1_unique1 + Index Cond: (unique1 < 1000) + -> Memoize + Cache Key: t1.twenty + Cache Mode: logical + -> Index Only Scan using tenk1_unique1 on tenk1 t2 + Index Cond: (unique1 = t1.twenty) +(14 rows) + +-- And ensure the parallel plan gives us the correct results. +SELECT COUNT(*),AVG(t2.unique1) FROM tenk1 t1, +LATERAL (SELECT t2.unique1 FROM tenk1 t2 WHERE t1.twenty = t2.unique1) t2 +WHERE t1.unique1 < 1000; + count | avg +-------+-------------------- + 1000 | 9.5000000000000000 +(1 row) + +RESET max_parallel_workers_per_gather; +RESET parallel_tuple_cost; +RESET parallel_setup_cost; +RESET min_parallel_table_scan_size; diff --git a/src/test/regress/expected/merge.out b/src/test/regress/expected/merge.out new file mode 100644 index 0000000..f87905f --- /dev/null +++ b/src/test/regress/expected/merge.out @@ -0,0 +1,2250 @@ +-- +-- MERGE +-- +CREATE USER regress_merge_privs; +CREATE USER regress_merge_no_privs; +DROP TABLE IF EXISTS target; +NOTICE: table "target" does not exist, skipping +DROP TABLE IF EXISTS source; +NOTICE: table "source" does not exist, skipping +CREATE TABLE target (tid integer, balance integer) + WITH (autovacuum_enabled=off); +CREATE TABLE source (sid integer, delta integer) -- no index + WITH (autovacuum_enabled=off); +INSERT INTO target VALUES (1, 10); +INSERT INTO target VALUES (2, 20); +INSERT INTO target VALUES (3, 30); +SELECT t.ctid is not null as matched, t.*, s.* FROM source s FULL OUTER JOIN target t ON s.sid = t.tid ORDER BY t.tid, s.sid; + matched | tid | balance | sid | delta +---------+-----+---------+-----+------- + t | 1 | 10 | | + t | 2 | 20 | | + t | 3 | 30 | | +(3 rows) + +ALTER TABLE target OWNER TO regress_merge_privs; +ALTER TABLE source OWNER TO regress_merge_privs; +CREATE TABLE target2 (tid integer, balance integer) + WITH (autovacuum_enabled=off); +CREATE TABLE source2 (sid integer, delta integer) + WITH (autovacuum_enabled=off); +ALTER TABLE target2 OWNER TO regress_merge_no_privs; +ALTER TABLE source2 OWNER TO regress_merge_no_privs; +GRANT INSERT ON target TO regress_merge_no_privs; +SET SESSION AUTHORIZATION regress_merge_privs; +EXPLAIN (COSTS OFF) +MERGE INTO target t +USING source AS s +ON t.tid = s.sid +WHEN MATCHED THEN + DELETE; + QUERY PLAN +---------------------------------------- + Merge on target t + -> Merge Join + Merge Cond: (t.tid = s.sid) + -> Sort + Sort Key: t.tid + -> Seq Scan on target t + -> Sort + Sort Key: s.sid + -> Seq Scan on source s +(9 rows) + +-- +-- Errors +-- +MERGE INTO target t RANDOMWORD +USING source AS s +ON t.tid = s.sid +WHEN MATCHED THEN + UPDATE SET balance = 0; +ERROR: syntax error at or near "RANDOMWORD" +LINE 1: MERGE INTO target t RANDOMWORD + ^ +-- MATCHED/INSERT error +MERGE INTO target t +USING source AS s +ON t.tid = s.sid +WHEN MATCHED THEN + INSERT DEFAULT VALUES; +ERROR: syntax error at or near "INSERT" +LINE 5: INSERT DEFAULT VALUES; + ^ +-- incorrectly specifying INTO target +MERGE INTO target t +USING source AS s +ON t.tid = s.sid +WHEN NOT MATCHED THEN + INSERT INTO target DEFAULT VALUES; +ERROR: syntax error at or near "INTO" +LINE 5: INSERT INTO target DEFAULT VALUES; + ^ +-- Multiple VALUES clause +MERGE INTO target t +USING source AS s +ON t.tid = s.sid +WHEN NOT MATCHED THEN + INSERT VALUES (1,1), (2,2); +ERROR: syntax error at or near "," +LINE 5: INSERT VALUES (1,1), (2,2); + ^ +-- SELECT query for INSERT +MERGE INTO target t +USING source AS s +ON t.tid = s.sid +WHEN NOT MATCHED THEN + INSERT SELECT (1, 1); +ERROR: syntax error at or near "SELECT" +LINE 5: INSERT SELECT (1, 1); + ^ +-- NOT MATCHED/UPDATE +MERGE INTO target t +USING source AS s +ON t.tid = s.sid +WHEN NOT MATCHED THEN + UPDATE SET balance = 0; +ERROR: syntax error at or near "UPDATE" +LINE 5: UPDATE SET balance = 0; + ^ +-- UPDATE tablename +MERGE INTO target t +USING source AS s +ON t.tid = s.sid +WHEN MATCHED THEN + UPDATE target SET balance = 0; +ERROR: syntax error at or near "target" +LINE 5: UPDATE target SET balance = 0; + ^ +-- source and target names the same +MERGE INTO target +USING target +ON tid = tid +WHEN MATCHED THEN DO NOTHING; +ERROR: name "target" specified more than once +DETAIL: The name is used both as MERGE target table and data source. +-- used in a CTE +WITH foo AS ( + MERGE INTO target USING source ON (true) + WHEN MATCHED THEN DELETE +) SELECT * FROM foo; +ERROR: MERGE not supported in WITH query +LINE 1: WITH foo AS ( + ^ +-- used in COPY +COPY ( + MERGE INTO target USING source ON (true) + WHEN MATCHED THEN DELETE +) TO stdout; +ERROR: MERGE not supported in COPY +-- unsupported relation types +-- view +CREATE VIEW tv AS SELECT * FROM target; +MERGE INTO tv t +USING source s +ON t.tid = s.sid +WHEN NOT MATCHED THEN + INSERT DEFAULT VALUES; +ERROR: cannot execute MERGE on relation "tv" +DETAIL: This operation is not supported for views. +DROP VIEW tv; +-- materialized view +CREATE MATERIALIZED VIEW mv AS SELECT * FROM target; +MERGE INTO mv t +USING source s +ON t.tid = s.sid +WHEN NOT MATCHED THEN + INSERT DEFAULT VALUES; +ERROR: cannot execute MERGE on relation "mv" +DETAIL: This operation is not supported for materialized views. +DROP MATERIALIZED VIEW mv; +-- permissions +MERGE INTO target +USING source2 +ON target.tid = source2.sid +WHEN MATCHED THEN + UPDATE SET balance = 0; +ERROR: permission denied for table source2 +GRANT INSERT ON target TO regress_merge_no_privs; +SET SESSION AUTHORIZATION regress_merge_no_privs; +MERGE INTO target +USING source2 +ON target.tid = source2.sid +WHEN MATCHED THEN + UPDATE SET balance = 0; +ERROR: permission denied for table target +GRANT UPDATE ON target2 TO regress_merge_privs; +SET SESSION AUTHORIZATION regress_merge_privs; +MERGE INTO target2 +USING source +ON target2.tid = source.sid +WHEN MATCHED THEN + DELETE; +ERROR: permission denied for table target2 +MERGE INTO target2 +USING source +ON target2.tid = source.sid +WHEN NOT MATCHED THEN + INSERT DEFAULT VALUES; +ERROR: permission denied for table target2 +-- check if the target can be accessed from source relation subquery; we should +-- not be able to do so +MERGE INTO target t +USING (SELECT * FROM source WHERE t.tid > sid) s +ON t.tid = s.sid +WHEN NOT MATCHED THEN + INSERT DEFAULT VALUES; +ERROR: invalid reference to FROM-clause entry for table "t" +LINE 2: USING (SELECT * FROM source WHERE t.tid > sid) s + ^ +DETAIL: There is an entry for table "t", but it cannot be referenced from this part of the query. +-- +-- initial tests +-- +-- zero rows in source has no effect +MERGE INTO target +USING source +ON target.tid = source.sid +WHEN MATCHED THEN + UPDATE SET balance = 0; +MERGE INTO target t +USING source AS s +ON t.tid = s.sid +WHEN MATCHED THEN + UPDATE SET balance = 0; +MERGE INTO target t +USING source AS s +ON t.tid = s.sid +WHEN MATCHED THEN + DELETE; +BEGIN; +MERGE INTO target t +USING source AS s +ON t.tid = s.sid +WHEN NOT MATCHED THEN + INSERT DEFAULT VALUES; +ROLLBACK; +-- insert some non-matching source rows to work from +INSERT INTO source VALUES (4, 40); +SELECT * FROM source ORDER BY sid; + sid | delta +-----+------- + 4 | 40 +(1 row) + +SELECT * FROM target ORDER BY tid; + tid | balance +-----+--------- + 1 | 10 + 2 | 20 + 3 | 30 +(3 rows) + +MERGE INTO target t +USING source AS s +ON t.tid = s.sid +WHEN NOT MATCHED THEN + DO NOTHING; +MERGE INTO target t +USING source AS s +ON t.tid = s.sid +WHEN MATCHED THEN + UPDATE SET balance = 0; +MERGE INTO target t +USING source AS s +ON t.tid = s.sid +WHEN MATCHED THEN + DELETE; +BEGIN; +MERGE INTO target t +USING source AS s +ON t.tid = s.sid +WHEN NOT MATCHED THEN + INSERT DEFAULT VALUES; +SELECT * FROM target ORDER BY tid; + tid | balance +-----+--------- + 1 | 10 + 2 | 20 + 3 | 30 + | +(4 rows) + +ROLLBACK; +-- index plans +INSERT INTO target SELECT generate_series(1000,2500), 0; +ALTER TABLE target ADD PRIMARY KEY (tid); +ANALYZE target; +EXPLAIN (COSTS OFF) +MERGE INTO target t +USING source AS s +ON t.tid = s.sid +WHEN MATCHED THEN + UPDATE SET balance = 0; + QUERY PLAN +---------------------------------------- + Merge on target t + -> Hash Join + Hash Cond: (s.sid = t.tid) + -> Seq Scan on source s + -> Hash + -> Seq Scan on target t +(6 rows) + +EXPLAIN (COSTS OFF) +MERGE INTO target t +USING source AS s +ON t.tid = s.sid +WHEN MATCHED THEN + DELETE; + QUERY PLAN +---------------------------------------- + Merge on target t + -> Hash Join + Hash Cond: (s.sid = t.tid) + -> Seq Scan on source s + -> Hash + -> Seq Scan on target t +(6 rows) + +EXPLAIN (COSTS OFF) +MERGE INTO target t +USING source AS s +ON t.tid = s.sid +WHEN NOT MATCHED THEN + INSERT VALUES (4, NULL); + QUERY PLAN +---------------------------------------- + Merge on target t + -> Hash Left Join + Hash Cond: (s.sid = t.tid) + -> Seq Scan on source s + -> Hash + -> Seq Scan on target t +(6 rows) + +DELETE FROM target WHERE tid > 100; +ANALYZE target; +-- insert some matching source rows to work from +INSERT INTO source VALUES (2, 5); +INSERT INTO source VALUES (3, 20); +SELECT * FROM source ORDER BY sid; + sid | delta +-----+------- + 2 | 5 + 3 | 20 + 4 | 40 +(3 rows) + +SELECT * FROM target ORDER BY tid; + tid | balance +-----+--------- + 1 | 10 + 2 | 20 + 3 | 30 +(3 rows) + +-- equivalent of an UPDATE join +BEGIN; +MERGE INTO target t +USING source AS s +ON t.tid = s.sid +WHEN MATCHED THEN + UPDATE SET balance = 0; +SELECT * FROM target ORDER BY tid; + tid | balance +-----+--------- + 1 | 10 + 2 | 0 + 3 | 0 +(3 rows) + +ROLLBACK; +-- equivalent of a DELETE join +BEGIN; +MERGE INTO target t +USING source AS s +ON t.tid = s.sid +WHEN MATCHED THEN + DELETE; +SELECT * FROM target ORDER BY tid; + tid | balance +-----+--------- + 1 | 10 +(1 row) + +ROLLBACK; +BEGIN; +MERGE INTO target t +USING source AS s +ON t.tid = s.sid +WHEN MATCHED THEN + DO NOTHING; +SELECT * FROM target ORDER BY tid; + tid | balance +-----+--------- + 1 | 10 + 2 | 20 + 3 | 30 +(3 rows) + +ROLLBACK; +BEGIN; +MERGE INTO target t +USING source AS s +ON t.tid = s.sid +WHEN NOT MATCHED THEN + INSERT VALUES (4, NULL); +SELECT * FROM target ORDER BY tid; + tid | balance +-----+--------- + 1 | 10 + 2 | 20 + 3 | 30 + 4 | +(4 rows) + +ROLLBACK; +-- duplicate source row causes multiple target row update ERROR +INSERT INTO source VALUES (2, 5); +SELECT * FROM source ORDER BY sid; + sid | delta +-----+------- + 2 | 5 + 2 | 5 + 3 | 20 + 4 | 40 +(4 rows) + +SELECT * FROM target ORDER BY tid; + tid | balance +-----+--------- + 1 | 10 + 2 | 20 + 3 | 30 +(3 rows) + +BEGIN; +MERGE INTO target t +USING source AS s +ON t.tid = s.sid +WHEN MATCHED THEN + UPDATE SET balance = 0; +ERROR: MERGE command cannot affect row a second time +HINT: Ensure that not more than one source row matches any one target row. +ROLLBACK; +BEGIN; +MERGE INTO target t +USING source AS s +ON t.tid = s.sid +WHEN MATCHED THEN + DELETE; +ERROR: MERGE command cannot affect row a second time +HINT: Ensure that not more than one source row matches any one target row. +ROLLBACK; +-- remove duplicate MATCHED data from source data +DELETE FROM source WHERE sid = 2; +INSERT INTO source VALUES (2, 5); +SELECT * FROM source ORDER BY sid; + sid | delta +-----+------- + 2 | 5 + 3 | 20 + 4 | 40 +(3 rows) + +SELECT * FROM target ORDER BY tid; + tid | balance +-----+--------- + 1 | 10 + 2 | 20 + 3 | 30 +(3 rows) + +-- duplicate source row on INSERT should fail because of target_pkey +INSERT INTO source VALUES (4, 40); +BEGIN; +MERGE INTO target t +USING source AS s +ON t.tid = s.sid +WHEN NOT MATCHED THEN + INSERT VALUES (4, NULL); +ERROR: duplicate key value violates unique constraint "target_pkey" +DETAIL: Key (tid)=(4) already exists. +SELECT * FROM target ORDER BY tid; +ERROR: current transaction is aborted, commands ignored until end of transaction block +ROLLBACK; +-- remove duplicate NOT MATCHED data from source data +DELETE FROM source WHERE sid = 4; +INSERT INTO source VALUES (4, 40); +SELECT * FROM source ORDER BY sid; + sid | delta +-----+------- + 2 | 5 + 3 | 20 + 4 | 40 +(3 rows) + +SELECT * FROM target ORDER BY tid; + tid | balance +-----+--------- + 1 | 10 + 2 | 20 + 3 | 30 +(3 rows) + +-- remove constraints +alter table target drop CONSTRAINT target_pkey; +alter table target alter column tid drop not null; +-- multiple actions +BEGIN; +MERGE INTO target t +USING source AS s +ON t.tid = s.sid +WHEN NOT MATCHED THEN + INSERT VALUES (4, 4) +WHEN MATCHED THEN + UPDATE SET balance = 0; +SELECT * FROM target ORDER BY tid; + tid | balance +-----+--------- + 1 | 10 + 2 | 0 + 3 | 0 + 4 | 4 +(4 rows) + +ROLLBACK; +-- should be equivalent +BEGIN; +MERGE INTO target t +USING source AS s +ON t.tid = s.sid +WHEN MATCHED THEN + UPDATE SET balance = 0 +WHEN NOT MATCHED THEN + INSERT VALUES (4, 4); +SELECT * FROM target ORDER BY tid; + tid | balance +-----+--------- + 1 | 10 + 2 | 0 + 3 | 0 + 4 | 4 +(4 rows) + +ROLLBACK; +-- column references +-- do a simple equivalent of an UPDATE join +BEGIN; +MERGE INTO target t +USING source AS s +ON t.tid = s.sid +WHEN MATCHED THEN + UPDATE SET balance = t.balance + s.delta; +SELECT * FROM target ORDER BY tid; + tid | balance +-----+--------- + 1 | 10 + 2 | 25 + 3 | 50 +(3 rows) + +ROLLBACK; +-- do a simple equivalent of an INSERT SELECT +BEGIN; +MERGE INTO target t +USING source AS s +ON t.tid = s.sid +WHEN NOT MATCHED THEN + INSERT VALUES (s.sid, s.delta); +SELECT * FROM target ORDER BY tid; + tid | balance +-----+--------- + 1 | 10 + 2 | 20 + 3 | 30 + 4 | 40 +(4 rows) + +ROLLBACK; +-- and again with duplicate source rows +INSERT INTO source VALUES (5, 50); +INSERT INTO source VALUES (5, 50); +-- do a simple equivalent of an INSERT SELECT +BEGIN; +MERGE INTO target t +USING source AS s +ON t.tid = s.sid +WHEN NOT MATCHED THEN + INSERT VALUES (s.sid, s.delta); +SELECT * FROM target ORDER BY tid; + tid | balance +-----+--------- + 1 | 10 + 2 | 20 + 3 | 30 + 4 | 40 + 5 | 50 + 5 | 50 +(6 rows) + +ROLLBACK; +-- removing duplicate source rows +DELETE FROM source WHERE sid = 5; +-- and again with explicitly identified column list +BEGIN; +MERGE INTO target t +USING source AS s +ON t.tid = s.sid +WHEN NOT MATCHED THEN + INSERT (tid, balance) VALUES (s.sid, s.delta); +SELECT * FROM target ORDER BY tid; + tid | balance +-----+--------- + 1 | 10 + 2 | 20 + 3 | 30 + 4 | 40 +(4 rows) + +ROLLBACK; +-- and again with a subtle error: referring to non-existent target row for NOT MATCHED +MERGE INTO target t +USING source AS s +ON t.tid = s.sid +WHEN NOT MATCHED THEN + INSERT (tid, balance) VALUES (t.tid, s.delta); +ERROR: invalid reference to FROM-clause entry for table "t" +LINE 5: INSERT (tid, balance) VALUES (t.tid, s.delta); + ^ +DETAIL: There is an entry for table "t", but it cannot be referenced from this part of the query. +-- and again with a constant ON clause +BEGIN; +MERGE INTO target t +USING source AS s +ON (SELECT true) +WHEN NOT MATCHED THEN + INSERT (tid, balance) VALUES (t.tid, s.delta); +ERROR: invalid reference to FROM-clause entry for table "t" +LINE 5: INSERT (tid, balance) VALUES (t.tid, s.delta); + ^ +DETAIL: There is an entry for table "t", but it cannot be referenced from this part of the query. +SELECT * FROM target ORDER BY tid; +ERROR: current transaction is aborted, commands ignored until end of transaction block +ROLLBACK; +-- now the classic UPSERT +BEGIN; +MERGE INTO target t +USING source AS s +ON t.tid = s.sid +WHEN MATCHED THEN + UPDATE SET balance = t.balance + s.delta +WHEN NOT MATCHED THEN + INSERT VALUES (s.sid, s.delta); +SELECT * FROM target ORDER BY tid; + tid | balance +-----+--------- + 1 | 10 + 2 | 25 + 3 | 50 + 4 | 40 +(4 rows) + +ROLLBACK; +-- unreachable WHEN clause should ERROR +BEGIN; +MERGE INTO target t +USING source AS s +ON t.tid = s.sid +WHEN MATCHED THEN /* Terminal WHEN clause for MATCHED */ + DELETE +WHEN MATCHED THEN + UPDATE SET balance = t.balance - s.delta; +ERROR: unreachable WHEN clause specified after unconditional WHEN clause +ROLLBACK; +-- conditional WHEN clause +CREATE TABLE wq_target (tid integer not null, balance integer DEFAULT -1) + WITH (autovacuum_enabled=off); +CREATE TABLE wq_source (balance integer, sid integer) + WITH (autovacuum_enabled=off); +INSERT INTO wq_source (sid, balance) VALUES (1, 100); +BEGIN; +-- try a simple INSERT with default values first +MERGE INTO wq_target t +USING wq_source s ON t.tid = s.sid +WHEN NOT MATCHED THEN + INSERT (tid) VALUES (s.sid); +SELECT * FROM wq_target; + tid | balance +-----+--------- + 1 | -1 +(1 row) + +ROLLBACK; +-- this time with a FALSE condition +MERGE INTO wq_target t +USING wq_source s ON t.tid = s.sid +WHEN NOT MATCHED AND FALSE THEN + INSERT (tid) VALUES (s.sid); +SELECT * FROM wq_target; + tid | balance +-----+--------- +(0 rows) + +-- this time with an actual condition which returns false +MERGE INTO wq_target t +USING wq_source s ON t.tid = s.sid +WHEN NOT MATCHED AND s.balance <> 100 THEN + INSERT (tid) VALUES (s.sid); +SELECT * FROM wq_target; + tid | balance +-----+--------- +(0 rows) + +BEGIN; +-- and now with a condition which returns true +MERGE INTO wq_target t +USING wq_source s ON t.tid = s.sid +WHEN NOT MATCHED AND s.balance = 100 THEN + INSERT (tid) VALUES (s.sid); +SELECT * FROM wq_target; + tid | balance +-----+--------- + 1 | -1 +(1 row) + +ROLLBACK; +-- conditions in the NOT MATCHED clause can only refer to source columns +BEGIN; +MERGE INTO wq_target t +USING wq_source s ON t.tid = s.sid +WHEN NOT MATCHED AND t.balance = 100 THEN + INSERT (tid) VALUES (s.sid); +ERROR: invalid reference to FROM-clause entry for table "t" +LINE 3: WHEN NOT MATCHED AND t.balance = 100 THEN + ^ +DETAIL: There is an entry for table "t", but it cannot be referenced from this part of the query. +SELECT * FROM wq_target; +ERROR: current transaction is aborted, commands ignored until end of transaction block +ROLLBACK; +MERGE INTO wq_target t +USING wq_source s ON t.tid = s.sid +WHEN NOT MATCHED AND s.balance = 100 THEN + INSERT (tid) VALUES (s.sid); +SELECT * FROM wq_target; + tid | balance +-----+--------- + 1 | -1 +(1 row) + +-- conditions in MATCHED clause can refer to both source and target +SELECT * FROM wq_source; + balance | sid +---------+----- + 100 | 1 +(1 row) + +MERGE INTO wq_target t +USING wq_source s ON t.tid = s.sid +WHEN MATCHED AND s.balance = 100 THEN + UPDATE SET balance = t.balance + s.balance; +SELECT * FROM wq_target; + tid | balance +-----+--------- + 1 | 99 +(1 row) + +MERGE INTO wq_target t +USING wq_source s ON t.tid = s.sid +WHEN MATCHED AND t.balance = 100 THEN + UPDATE SET balance = t.balance + s.balance; +SELECT * FROM wq_target; + tid | balance +-----+--------- + 1 | 99 +(1 row) + +-- check if AND works +MERGE INTO wq_target t +USING wq_source s ON t.tid = s.sid +WHEN MATCHED AND t.balance = 99 AND s.balance > 100 THEN + UPDATE SET balance = t.balance + s.balance; +SELECT * FROM wq_target; + tid | balance +-----+--------- + 1 | 99 +(1 row) + +MERGE INTO wq_target t +USING wq_source s ON t.tid = s.sid +WHEN MATCHED AND t.balance = 99 AND s.balance = 100 THEN + UPDATE SET balance = t.balance + s.balance; +SELECT * FROM wq_target; + tid | balance +-----+--------- + 1 | 199 +(1 row) + +-- check if OR works +MERGE INTO wq_target t +USING wq_source s ON t.tid = s.sid +WHEN MATCHED AND t.balance = 99 OR s.balance > 100 THEN + UPDATE SET balance = t.balance + s.balance; +SELECT * FROM wq_target; + tid | balance +-----+--------- + 1 | 199 +(1 row) + +MERGE INTO wq_target t +USING wq_source s ON t.tid = s.sid +WHEN MATCHED AND t.balance = 199 OR s.balance > 100 THEN + UPDATE SET balance = t.balance + s.balance; +SELECT * FROM wq_target; + tid | balance +-----+--------- + 1 | 299 +(1 row) + +-- check source-side whole-row references +BEGIN; +MERGE INTO wq_target t +USING wq_source s ON (t.tid = s.sid) +WHEN matched and t = s or t.tid = s.sid THEN + UPDATE SET balance = t.balance + s.balance; +SELECT * FROM wq_target; + tid | balance +-----+--------- + 1 | 399 +(1 row) + +ROLLBACK; +-- check if subqueries work in the conditions? +MERGE INTO wq_target t +USING wq_source s ON t.tid = s.sid +WHEN MATCHED AND t.balance > (SELECT max(balance) FROM target) THEN + UPDATE SET balance = t.balance + s.balance; +-- check if we can access system columns in the conditions +MERGE INTO wq_target t +USING wq_source s ON t.tid = s.sid +WHEN MATCHED AND t.xmin = t.xmax THEN + UPDATE SET balance = t.balance + s.balance; +ERROR: cannot use system column "xmin" in MERGE WHEN condition +LINE 3: WHEN MATCHED AND t.xmin = t.xmax THEN + ^ +MERGE INTO wq_target t +USING wq_source s ON t.tid = s.sid +WHEN MATCHED AND t.tableoid >= 0 THEN + UPDATE SET balance = t.balance + s.balance; +SELECT * FROM wq_target; + tid | balance +-----+--------- + 1 | 499 +(1 row) + +DROP TABLE wq_target, wq_source; +-- test triggers +create or replace function merge_trigfunc () returns trigger +language plpgsql as +$$ +DECLARE + line text; +BEGIN + SELECT INTO line format('%s %s %s trigger%s', + TG_WHEN, TG_OP, TG_LEVEL, CASE + WHEN TG_OP = 'INSERT' AND TG_LEVEL = 'ROW' + THEN format(' row: %s', NEW) + WHEN TG_OP = 'UPDATE' AND TG_LEVEL = 'ROW' + THEN format(' row: %s -> %s', OLD, NEW) + WHEN TG_OP = 'DELETE' AND TG_LEVEL = 'ROW' + THEN format(' row: %s', OLD) + END); + + RAISE NOTICE '%', line; + IF (TG_WHEN = 'BEFORE' AND TG_LEVEL = 'ROW') THEN + IF (TG_OP = 'DELETE') THEN + RETURN OLD; + ELSE + RETURN NEW; + END IF; + ELSE + RETURN NULL; + END IF; +END; +$$; +CREATE TRIGGER merge_bsi BEFORE INSERT ON target FOR EACH STATEMENT EXECUTE PROCEDURE merge_trigfunc (); +CREATE TRIGGER merge_bsu BEFORE UPDATE ON target FOR EACH STATEMENT EXECUTE PROCEDURE merge_trigfunc (); +CREATE TRIGGER merge_bsd BEFORE DELETE ON target FOR EACH STATEMENT EXECUTE PROCEDURE merge_trigfunc (); +CREATE TRIGGER merge_asi AFTER INSERT ON target FOR EACH STATEMENT EXECUTE PROCEDURE merge_trigfunc (); +CREATE TRIGGER merge_asu AFTER UPDATE ON target FOR EACH STATEMENT EXECUTE PROCEDURE merge_trigfunc (); +CREATE TRIGGER merge_asd AFTER DELETE ON target FOR EACH STATEMENT EXECUTE PROCEDURE merge_trigfunc (); +CREATE TRIGGER merge_bri BEFORE INSERT ON target FOR EACH ROW EXECUTE PROCEDURE merge_trigfunc (); +CREATE TRIGGER merge_bru BEFORE UPDATE ON target FOR EACH ROW EXECUTE PROCEDURE merge_trigfunc (); +CREATE TRIGGER merge_brd BEFORE DELETE ON target FOR EACH ROW EXECUTE PROCEDURE merge_trigfunc (); +CREATE TRIGGER merge_ari AFTER INSERT ON target FOR EACH ROW EXECUTE PROCEDURE merge_trigfunc (); +CREATE TRIGGER merge_aru AFTER UPDATE ON target FOR EACH ROW EXECUTE PROCEDURE merge_trigfunc (); +CREATE TRIGGER merge_ard AFTER DELETE ON target FOR EACH ROW EXECUTE PROCEDURE merge_trigfunc (); +-- now the classic UPSERT, with a DELETE +BEGIN; +UPDATE target SET balance = 0 WHERE tid = 3; +NOTICE: BEFORE UPDATE STATEMENT trigger +NOTICE: BEFORE UPDATE ROW trigger row: (3,30) -> (3,0) +NOTICE: AFTER UPDATE ROW trigger row: (3,30) -> (3,0) +NOTICE: AFTER UPDATE STATEMENT trigger +--EXPLAIN (ANALYZE ON, COSTS OFF, SUMMARY OFF, TIMING OFF) +MERGE INTO target t +USING source AS s +ON t.tid = s.sid +WHEN MATCHED AND t.balance > s.delta THEN + UPDATE SET balance = t.balance - s.delta +WHEN MATCHED THEN + DELETE +WHEN NOT MATCHED THEN + INSERT VALUES (s.sid, s.delta); +NOTICE: BEFORE INSERT STATEMENT trigger +NOTICE: BEFORE UPDATE STATEMENT trigger +NOTICE: BEFORE DELETE STATEMENT trigger +NOTICE: BEFORE DELETE ROW trigger row: (3,0) +NOTICE: BEFORE UPDATE ROW trigger row: (2,20) -> (2,15) +NOTICE: BEFORE INSERT ROW trigger row: (4,40) +NOTICE: AFTER DELETE ROW trigger row: (3,0) +NOTICE: AFTER UPDATE ROW trigger row: (2,20) -> (2,15) +NOTICE: AFTER INSERT ROW trigger row: (4,40) +NOTICE: AFTER DELETE STATEMENT trigger +NOTICE: AFTER UPDATE STATEMENT trigger +NOTICE: AFTER INSERT STATEMENT trigger +SELECT * FROM target ORDER BY tid; + tid | balance +-----+--------- + 1 | 10 + 2 | 15 + 4 | 40 +(3 rows) + +ROLLBACK; +-- Test behavior of triggers that turn UPDATE/DELETE into no-ops +create or replace function skip_merge_op() returns trigger +language plpgsql as +$$ +BEGIN + RETURN NULL; +END; +$$; +SELECT * FROM target full outer join source on (sid = tid); + tid | balance | sid | delta +-----+---------+-----+------- + 3 | 30 | 3 | 20 + 2 | 20 | 2 | 5 + | | 4 | 40 + 1 | 10 | | +(4 rows) + +create trigger merge_skip BEFORE INSERT OR UPDATE or DELETE + ON target FOR EACH ROW EXECUTE FUNCTION skip_merge_op(); +DO $$ +DECLARE + result integer; +BEGIN +MERGE INTO target t +USING source AS s +ON t.tid = s.sid +WHEN MATCHED AND s.sid = 3 THEN UPDATE SET balance = t.balance + s.delta +WHEN MATCHED THEN DELETE +WHEN NOT MATCHED THEN INSERT VALUES (sid, delta); +IF FOUND THEN + RAISE NOTICE 'Found'; +ELSE + RAISE NOTICE 'Not found'; +END IF; +GET DIAGNOSTICS result := ROW_COUNT; +RAISE NOTICE 'ROW_COUNT = %', result; +END; +$$; +NOTICE: BEFORE INSERT STATEMENT trigger +NOTICE: BEFORE UPDATE STATEMENT trigger +NOTICE: BEFORE DELETE STATEMENT trigger +NOTICE: BEFORE UPDATE ROW trigger row: (3,30) -> (3,50) +NOTICE: BEFORE DELETE ROW trigger row: (2,20) +NOTICE: BEFORE INSERT ROW trigger row: (4,40) +NOTICE: AFTER DELETE STATEMENT trigger +NOTICE: AFTER UPDATE STATEMENT trigger +NOTICE: AFTER INSERT STATEMENT trigger +NOTICE: Not found +NOTICE: ROW_COUNT = 0 +SELECT * FROM target FULL OUTER JOIN source ON (sid = tid); + tid | balance | sid | delta +-----+---------+-----+------- + 3 | 30 | 3 | 20 + 2 | 20 | 2 | 5 + | | 4 | 40 + 1 | 10 | | +(4 rows) + +DROP TRIGGER merge_skip ON target; +DROP FUNCTION skip_merge_op(); +-- test from PL/pgSQL +-- make sure MERGE INTO isn't interpreted to mean returning variables like SELECT INTO +BEGIN; +DO LANGUAGE plpgsql $$ +BEGIN +MERGE INTO target t +USING source AS s +ON t.tid = s.sid +WHEN MATCHED AND t.balance > s.delta THEN + UPDATE SET balance = t.balance - s.delta; +END; +$$; +NOTICE: BEFORE UPDATE STATEMENT trigger +NOTICE: BEFORE UPDATE ROW trigger row: (3,30) -> (3,10) +NOTICE: BEFORE UPDATE ROW trigger row: (2,20) -> (2,15) +NOTICE: AFTER UPDATE ROW trigger row: (3,30) -> (3,10) +NOTICE: AFTER UPDATE ROW trigger row: (2,20) -> (2,15) +NOTICE: AFTER UPDATE STATEMENT trigger +ROLLBACK; +--source constants +BEGIN; +MERGE INTO target t +USING (SELECT 9 AS sid, 57 AS delta) AS s +ON t.tid = s.sid +WHEN NOT MATCHED THEN + INSERT (tid, balance) VALUES (s.sid, s.delta); +NOTICE: BEFORE INSERT STATEMENT trigger +NOTICE: BEFORE INSERT ROW trigger row: (9,57) +NOTICE: AFTER INSERT ROW trigger row: (9,57) +NOTICE: AFTER INSERT STATEMENT trigger +SELECT * FROM target ORDER BY tid; + tid | balance +-----+--------- + 1 | 10 + 2 | 20 + 3 | 30 + 9 | 57 +(4 rows) + +ROLLBACK; +--source query +BEGIN; +MERGE INTO target t +USING (SELECT sid, delta FROM source WHERE delta > 0) AS s +ON t.tid = s.sid +WHEN NOT MATCHED THEN + INSERT (tid, balance) VALUES (s.sid, s.delta); +NOTICE: BEFORE INSERT STATEMENT trigger +NOTICE: BEFORE INSERT ROW trigger row: (4,40) +NOTICE: AFTER INSERT ROW trigger row: (4,40) +NOTICE: AFTER INSERT STATEMENT trigger +SELECT * FROM target ORDER BY tid; + tid | balance +-----+--------- + 1 | 10 + 2 | 20 + 3 | 30 + 4 | 40 +(4 rows) + +ROLLBACK; +BEGIN; +MERGE INTO target t +USING (SELECT sid, delta as newname FROM source WHERE delta > 0) AS s +ON t.tid = s.sid +WHEN NOT MATCHED THEN + INSERT (tid, balance) VALUES (s.sid, s.newname); +NOTICE: BEFORE INSERT STATEMENT trigger +NOTICE: BEFORE INSERT ROW trigger row: (4,40) +NOTICE: AFTER INSERT ROW trigger row: (4,40) +NOTICE: AFTER INSERT STATEMENT trigger +SELECT * FROM target ORDER BY tid; + tid | balance +-----+--------- + 1 | 10 + 2 | 20 + 3 | 30 + 4 | 40 +(4 rows) + +ROLLBACK; +--self-merge +BEGIN; +MERGE INTO target t1 +USING target t2 +ON t1.tid = t2.tid +WHEN MATCHED THEN + UPDATE SET balance = t1.balance + t2.balance +WHEN NOT MATCHED THEN + INSERT VALUES (t2.tid, t2.balance); +NOTICE: BEFORE INSERT STATEMENT trigger +NOTICE: BEFORE UPDATE STATEMENT trigger +NOTICE: BEFORE UPDATE ROW trigger row: (1,10) -> (1,20) +NOTICE: BEFORE UPDATE ROW trigger row: (2,20) -> (2,40) +NOTICE: BEFORE UPDATE ROW trigger row: (3,30) -> (3,60) +NOTICE: AFTER UPDATE ROW trigger row: (1,10) -> (1,20) +NOTICE: AFTER UPDATE ROW trigger row: (2,20) -> (2,40) +NOTICE: AFTER UPDATE ROW trigger row: (3,30) -> (3,60) +NOTICE: AFTER UPDATE STATEMENT trigger +NOTICE: AFTER INSERT STATEMENT trigger +SELECT * FROM target ORDER BY tid; + tid | balance +-----+--------- + 1 | 20 + 2 | 40 + 3 | 60 +(3 rows) + +ROLLBACK; +BEGIN; +MERGE INTO target t +USING (SELECT tid as sid, balance as delta FROM target WHERE balance > 0) AS s +ON t.tid = s.sid +WHEN NOT MATCHED THEN + INSERT (tid, balance) VALUES (s.sid, s.delta); +NOTICE: BEFORE INSERT STATEMENT trigger +NOTICE: AFTER INSERT STATEMENT trigger +SELECT * FROM target ORDER BY tid; + tid | balance +-----+--------- + 1 | 10 + 2 | 20 + 3 | 30 +(3 rows) + +ROLLBACK; +BEGIN; +MERGE INTO target t +USING +(SELECT sid, max(delta) AS delta + FROM source + GROUP BY sid + HAVING count(*) = 1 + ORDER BY sid ASC) AS s +ON t.tid = s.sid +WHEN NOT MATCHED THEN + INSERT (tid, balance) VALUES (s.sid, s.delta); +NOTICE: BEFORE INSERT STATEMENT trigger +NOTICE: BEFORE INSERT ROW trigger row: (4,40) +NOTICE: AFTER INSERT ROW trigger row: (4,40) +NOTICE: AFTER INSERT STATEMENT trigger +SELECT * FROM target ORDER BY tid; + tid | balance +-----+--------- + 1 | 10 + 2 | 20 + 3 | 30 + 4 | 40 +(4 rows) + +ROLLBACK; +-- plpgsql parameters and results +BEGIN; +CREATE FUNCTION merge_func (p_id integer, p_bal integer) +RETURNS INTEGER +LANGUAGE plpgsql +AS $$ +DECLARE + result integer; +BEGIN +MERGE INTO target t +USING (SELECT p_id AS sid) AS s +ON t.tid = s.sid +WHEN MATCHED THEN + UPDATE SET balance = t.balance - p_bal; +IF FOUND THEN + GET DIAGNOSTICS result := ROW_COUNT; +END IF; +RETURN result; +END; +$$; +SELECT merge_func(3, 4); +NOTICE: BEFORE UPDATE STATEMENT trigger +NOTICE: BEFORE UPDATE ROW trigger row: (3,30) -> (3,26) +NOTICE: AFTER UPDATE ROW trigger row: (3,30) -> (3,26) +NOTICE: AFTER UPDATE STATEMENT trigger + merge_func +------------ + 1 +(1 row) + +SELECT * FROM target ORDER BY tid; + tid | balance +-----+--------- + 1 | 10 + 2 | 20 + 3 | 26 +(3 rows) + +ROLLBACK; +-- PREPARE +BEGIN; +prepare foom as merge into target t using (select 1 as sid) s on (t.tid = s.sid) when matched then update set balance = 1; +execute foom; +NOTICE: BEFORE UPDATE STATEMENT trigger +NOTICE: BEFORE UPDATE ROW trigger row: (1,10) -> (1,1) +NOTICE: AFTER UPDATE ROW trigger row: (1,10) -> (1,1) +NOTICE: AFTER UPDATE STATEMENT trigger +SELECT * FROM target ORDER BY tid; + tid | balance +-----+--------- + 1 | 1 + 2 | 20 + 3 | 30 +(3 rows) + +ROLLBACK; +BEGIN; +PREPARE foom2 (integer, integer) AS +MERGE INTO target t +USING (SELECT 1) s +ON t.tid = $1 +WHEN MATCHED THEN +UPDATE SET balance = $2; +--EXPLAIN (ANALYZE ON, COSTS OFF, SUMMARY OFF, TIMING OFF) +execute foom2 (1, 1); +NOTICE: BEFORE UPDATE STATEMENT trigger +NOTICE: BEFORE UPDATE ROW trigger row: (1,10) -> (1,1) +NOTICE: AFTER UPDATE ROW trigger row: (1,10) -> (1,1) +NOTICE: AFTER UPDATE STATEMENT trigger +SELECT * FROM target ORDER BY tid; + tid | balance +-----+--------- + 1 | 1 + 2 | 20 + 3 | 30 +(3 rows) + +ROLLBACK; +-- subqueries in source relation +CREATE TABLE sq_target (tid integer NOT NULL, balance integer) + WITH (autovacuum_enabled=off); +CREATE TABLE sq_source (delta integer, sid integer, balance integer DEFAULT 0) + WITH (autovacuum_enabled=off); +INSERT INTO sq_target(tid, balance) VALUES (1,100), (2,200), (3,300); +INSERT INTO sq_source(sid, delta) VALUES (1,10), (2,20), (4,40); +BEGIN; +MERGE INTO sq_target t +USING (SELECT * FROM sq_source) s +ON tid = sid +WHEN MATCHED AND t.balance > delta THEN + UPDATE SET balance = t.balance + delta; +SELECT * FROM sq_target; + tid | balance +-----+--------- + 3 | 300 + 1 | 110 + 2 | 220 +(3 rows) + +ROLLBACK; +-- try a view +CREATE VIEW v AS SELECT * FROM sq_source WHERE sid < 2; +BEGIN; +MERGE INTO sq_target +USING v +ON tid = sid +WHEN MATCHED THEN + UPDATE SET balance = v.balance + delta; +SELECT * FROM sq_target; + tid | balance +-----+--------- + 2 | 200 + 3 | 300 + 1 | 10 +(3 rows) + +ROLLBACK; +-- ambiguous reference to a column +BEGIN; +MERGE INTO sq_target +USING v +ON tid = sid +WHEN MATCHED AND tid > 2 THEN + UPDATE SET balance = balance + delta +WHEN NOT MATCHED THEN + INSERT (balance, tid) VALUES (balance + delta, sid) +WHEN MATCHED AND tid < 2 THEN + DELETE; +ERROR: column reference "balance" is ambiguous +LINE 5: UPDATE SET balance = balance + delta + ^ +ROLLBACK; +BEGIN; +INSERT INTO sq_source (sid, balance, delta) VALUES (-1, -1, -10); +MERGE INTO sq_target t +USING v +ON tid = sid +WHEN MATCHED AND tid > 2 THEN + UPDATE SET balance = t.balance + delta +WHEN NOT MATCHED THEN + INSERT (balance, tid) VALUES (balance + delta, sid) +WHEN MATCHED AND tid < 2 THEN + DELETE; +SELECT * FROM sq_target; + tid | balance +-----+--------- + 2 | 200 + 3 | 300 + -1 | -11 +(3 rows) + +ROLLBACK; +-- CTEs +BEGIN; +INSERT INTO sq_source (sid, balance, delta) VALUES (-1, -1, -10); +WITH targq AS ( + SELECT * FROM v +) +MERGE INTO sq_target t +USING v +ON tid = sid +WHEN MATCHED AND tid > 2 THEN + UPDATE SET balance = t.balance + delta +WHEN NOT MATCHED THEN + INSERT (balance, tid) VALUES (balance + delta, sid) +WHEN MATCHED AND tid < 2 THEN + DELETE; +ROLLBACK; +-- RETURNING +BEGIN; +INSERT INTO sq_source (sid, balance, delta) VALUES (-1, -1, -10); +MERGE INTO sq_target t +USING v +ON tid = sid +WHEN MATCHED AND tid > 2 THEN + UPDATE SET balance = t.balance + delta +WHEN NOT MATCHED THEN + INSERT (balance, tid) VALUES (balance + delta, sid) +WHEN MATCHED AND tid < 2 THEN + DELETE +RETURNING *; +ERROR: syntax error at or near "RETURNING" +LINE 10: RETURNING *; + ^ +ROLLBACK; +-- EXPLAIN +CREATE TABLE ex_mtarget (a int, b int) + WITH (autovacuum_enabled=off); +CREATE TABLE ex_msource (a int, b int) + WITH (autovacuum_enabled=off); +INSERT INTO ex_mtarget SELECT i, i*10 FROM generate_series(1,100,2) i; +INSERT INTO ex_msource SELECT i, i*10 FROM generate_series(1,100,1) i; +CREATE FUNCTION explain_merge(query text) RETURNS SETOF text +LANGUAGE plpgsql AS +$$ +DECLARE ln text; +BEGIN + FOR ln IN + EXECUTE 'explain (analyze, timing off, summary off, costs off) ' || + query + LOOP + ln := regexp_replace(ln, '(Memory( Usage)?|Buckets|Batches): \S*', '\1: xxx', 'g'); + RETURN NEXT ln; + END LOOP; +END; +$$; +-- only updates +SELECT explain_merge(' +MERGE INTO ex_mtarget t USING ex_msource s ON t.a = s.a +WHEN MATCHED THEN + UPDATE SET b = t.b + 1'); + explain_merge +---------------------------------------------------------------------- + Merge on ex_mtarget t (actual rows=0 loops=1) + Tuples: updated=50 + -> Merge Join (actual rows=50 loops=1) + Merge Cond: (t.a = s.a) + -> Sort (actual rows=50 loops=1) + Sort Key: t.a + Sort Method: quicksort Memory: xxx + -> Seq Scan on ex_mtarget t (actual rows=50 loops=1) + -> Sort (actual rows=100 loops=1) + Sort Key: s.a + Sort Method: quicksort Memory: xxx + -> Seq Scan on ex_msource s (actual rows=100 loops=1) +(12 rows) + +-- only updates to selected tuples +SELECT explain_merge(' +MERGE INTO ex_mtarget t USING ex_msource s ON t.a = s.a +WHEN MATCHED AND t.a < 10 THEN + UPDATE SET b = t.b + 1'); + explain_merge +---------------------------------------------------------------------- + Merge on ex_mtarget t (actual rows=0 loops=1) + Tuples: updated=5 skipped=45 + -> Merge Join (actual rows=50 loops=1) + Merge Cond: (t.a = s.a) + -> Sort (actual rows=50 loops=1) + Sort Key: t.a + Sort Method: quicksort Memory: xxx + -> Seq Scan on ex_mtarget t (actual rows=50 loops=1) + -> Sort (actual rows=100 loops=1) + Sort Key: s.a + Sort Method: quicksort Memory: xxx + -> Seq Scan on ex_msource s (actual rows=100 loops=1) +(12 rows) + +-- updates + deletes +SELECT explain_merge(' +MERGE INTO ex_mtarget t USING ex_msource s ON t.a = s.a +WHEN MATCHED AND t.a < 10 THEN + UPDATE SET b = t.b + 1 +WHEN MATCHED AND t.a >= 10 AND t.a <= 20 THEN + DELETE'); + explain_merge +---------------------------------------------------------------------- + Merge on ex_mtarget t (actual rows=0 loops=1) + Tuples: updated=5 deleted=5 skipped=40 + -> Merge Join (actual rows=50 loops=1) + Merge Cond: (t.a = s.a) + -> Sort (actual rows=50 loops=1) + Sort Key: t.a + Sort Method: quicksort Memory: xxx + -> Seq Scan on ex_mtarget t (actual rows=50 loops=1) + -> Sort (actual rows=100 loops=1) + Sort Key: s.a + Sort Method: quicksort Memory: xxx + -> Seq Scan on ex_msource s (actual rows=100 loops=1) +(12 rows) + +-- only inserts +SELECT explain_merge(' +MERGE INTO ex_mtarget t USING ex_msource s ON t.a = s.a +WHEN NOT MATCHED AND s.a < 10 THEN + INSERT VALUES (a, b)'); + explain_merge +---------------------------------------------------------------------- + Merge on ex_mtarget t (actual rows=0 loops=1) + Tuples: inserted=4 skipped=96 + -> Merge Left Join (actual rows=100 loops=1) + Merge Cond: (s.a = t.a) + -> Sort (actual rows=100 loops=1) + Sort Key: s.a + Sort Method: quicksort Memory: xxx + -> Seq Scan on ex_msource s (actual rows=100 loops=1) + -> Sort (actual rows=45 loops=1) + Sort Key: t.a + Sort Method: quicksort Memory: xxx + -> Seq Scan on ex_mtarget t (actual rows=45 loops=1) +(12 rows) + +-- all three +SELECT explain_merge(' +MERGE INTO ex_mtarget t USING ex_msource s ON t.a = s.a +WHEN MATCHED AND t.a < 10 THEN + UPDATE SET b = t.b + 1 +WHEN MATCHED AND t.a >= 30 AND t.a <= 40 THEN + DELETE +WHEN NOT MATCHED AND s.a < 20 THEN + INSERT VALUES (a, b)'); + explain_merge +---------------------------------------------------------------------- + Merge on ex_mtarget t (actual rows=0 loops=1) + Tuples: inserted=10 updated=9 deleted=5 skipped=76 + -> Merge Left Join (actual rows=100 loops=1) + Merge Cond: (s.a = t.a) + -> Sort (actual rows=100 loops=1) + Sort Key: s.a + Sort Method: quicksort Memory: xxx + -> Seq Scan on ex_msource s (actual rows=100 loops=1) + -> Sort (actual rows=49 loops=1) + Sort Key: t.a + Sort Method: quicksort Memory: xxx + -> Seq Scan on ex_mtarget t (actual rows=49 loops=1) +(12 rows) + +-- nothing +SELECT explain_merge(' +MERGE INTO ex_mtarget t USING ex_msource s ON t.a = s.a AND t.a < -1000 +WHEN MATCHED AND t.a < 10 THEN + DO NOTHING'); + explain_merge +-------------------------------------------------------------------- + Merge on ex_mtarget t (actual rows=0 loops=1) + -> Merge Join (actual rows=0 loops=1) + Merge Cond: (t.a = s.a) + -> Sort (actual rows=0 loops=1) + Sort Key: t.a + Sort Method: quicksort Memory: xxx + -> Seq Scan on ex_mtarget t (actual rows=0 loops=1) + Filter: (a < '-1000'::integer) + Rows Removed by Filter: 54 + -> Sort (never executed) + Sort Key: s.a + -> Seq Scan on ex_msource s (never executed) +(12 rows) + +DROP TABLE ex_msource, ex_mtarget; +DROP FUNCTION explain_merge(text); +-- Subqueries +BEGIN; +MERGE INTO sq_target t +USING v +ON tid = sid +WHEN MATCHED THEN + UPDATE SET balance = (SELECT count(*) FROM sq_target); +SELECT * FROM sq_target WHERE tid = 1; + tid | balance +-----+--------- + 1 | 3 +(1 row) + +ROLLBACK; +BEGIN; +MERGE INTO sq_target t +USING v +ON tid = sid +WHEN MATCHED AND (SELECT count(*) > 0 FROM sq_target) THEN + UPDATE SET balance = 42; +SELECT * FROM sq_target WHERE tid = 1; + tid | balance +-----+--------- + 1 | 42 +(1 row) + +ROLLBACK; +BEGIN; +MERGE INTO sq_target t +USING v +ON tid = sid AND (SELECT count(*) > 0 FROM sq_target) +WHEN MATCHED THEN + UPDATE SET balance = 42; +SELECT * FROM sq_target WHERE tid = 1; + tid | balance +-----+--------- + 1 | 42 +(1 row) + +ROLLBACK; +DROP TABLE sq_target, sq_source CASCADE; +NOTICE: drop cascades to view v +CREATE TABLE pa_target (tid integer, balance float, val text) + PARTITION BY LIST (tid); +CREATE TABLE part1 PARTITION OF pa_target FOR VALUES IN (1,4) + WITH (autovacuum_enabled=off); +CREATE TABLE part2 PARTITION OF pa_target FOR VALUES IN (2,5,6) + WITH (autovacuum_enabled=off); +CREATE TABLE part3 PARTITION OF pa_target FOR VALUES IN (3,8,9) + WITH (autovacuum_enabled=off); +CREATE TABLE part4 PARTITION OF pa_target DEFAULT + WITH (autovacuum_enabled=off); +CREATE TABLE pa_source (sid integer, delta float); +-- insert many rows to the source table +INSERT INTO pa_source SELECT id, id * 10 FROM generate_series(1,14) AS id; +-- insert a few rows in the target table (odd numbered tid) +INSERT INTO pa_target SELECT id, id * 100, 'initial' FROM generate_series(1,14,2) AS id; +-- try simple MERGE +BEGIN; +MERGE INTO pa_target t + USING pa_source s + ON t.tid = s.sid + WHEN MATCHED THEN + UPDATE SET balance = balance + delta, val = val || ' updated by merge' + WHEN NOT MATCHED THEN + INSERT VALUES (sid, delta, 'inserted by merge'); +SELECT * FROM pa_target ORDER BY tid; + tid | balance | val +-----+---------+-------------------------- + 1 | 110 | initial updated by merge + 2 | 20 | inserted by merge + 3 | 330 | initial updated by merge + 4 | 40 | inserted by merge + 5 | 550 | initial updated by merge + 6 | 60 | inserted by merge + 7 | 770 | initial updated by merge + 8 | 80 | inserted by merge + 9 | 990 | initial updated by merge + 10 | 100 | inserted by merge + 11 | 1210 | initial updated by merge + 12 | 120 | inserted by merge + 13 | 1430 | initial updated by merge + 14 | 140 | inserted by merge +(14 rows) + +ROLLBACK; +-- same with a constant qual +BEGIN; +MERGE INTO pa_target t + USING pa_source s + ON t.tid = s.sid AND tid = 1 + WHEN MATCHED THEN + UPDATE SET balance = balance + delta, val = val || ' updated by merge' + WHEN NOT MATCHED THEN + INSERT VALUES (sid, delta, 'inserted by merge'); +SELECT * FROM pa_target ORDER BY tid; + tid | balance | val +-----+---------+-------------------------- + 1 | 110 | initial updated by merge + 2 | 20 | inserted by merge + 3 | 30 | inserted by merge + 3 | 300 | initial + 4 | 40 | inserted by merge + 5 | 500 | initial + 5 | 50 | inserted by merge + 6 | 60 | inserted by merge + 7 | 700 | initial + 7 | 70 | inserted by merge + 8 | 80 | inserted by merge + 9 | 90 | inserted by merge + 9 | 900 | initial + 10 | 100 | inserted by merge + 11 | 1100 | initial + 11 | 110 | inserted by merge + 12 | 120 | inserted by merge + 13 | 1300 | initial + 13 | 130 | inserted by merge + 14 | 140 | inserted by merge +(20 rows) + +ROLLBACK; +-- try updating the partition key column +BEGIN; +CREATE FUNCTION merge_func() RETURNS integer LANGUAGE plpgsql AS $$ +DECLARE + result integer; +BEGIN +MERGE INTO pa_target t + USING pa_source s + ON t.tid = s.sid + WHEN MATCHED THEN + UPDATE SET tid = tid + 1, balance = balance + delta, val = val || ' updated by merge' + WHEN NOT MATCHED THEN + INSERT VALUES (sid, delta, 'inserted by merge'); +IF FOUND THEN + GET DIAGNOSTICS result := ROW_COUNT; +END IF; +RETURN result; +END; +$$; +SELECT merge_func(); + merge_func +------------ + 14 +(1 row) + +SELECT * FROM pa_target ORDER BY tid; + tid | balance | val +-----+---------+-------------------------- + 2 | 110 | initial updated by merge + 2 | 20 | inserted by merge + 4 | 40 | inserted by merge + 4 | 330 | initial updated by merge + 6 | 550 | initial updated by merge + 6 | 60 | inserted by merge + 8 | 80 | inserted by merge + 8 | 770 | initial updated by merge + 10 | 990 | initial updated by merge + 10 | 100 | inserted by merge + 12 | 1210 | initial updated by merge + 12 | 120 | inserted by merge + 14 | 1430 | initial updated by merge + 14 | 140 | inserted by merge +(14 rows) + +ROLLBACK; +DROP TABLE pa_target CASCADE; +-- The target table is partitioned in the same way, but this time by attaching +-- partitions which have columns in different order, dropped columns etc. +CREATE TABLE pa_target (tid integer, balance float, val text) + PARTITION BY LIST (tid); +CREATE TABLE part1 (tid integer, balance float, val text) + WITH (autovacuum_enabled=off); +CREATE TABLE part2 (balance float, tid integer, val text) + WITH (autovacuum_enabled=off); +CREATE TABLE part3 (tid integer, balance float, val text) + WITH (autovacuum_enabled=off); +CREATE TABLE part4 (extraid text, tid integer, balance float, val text) + WITH (autovacuum_enabled=off); +ALTER TABLE part4 DROP COLUMN extraid; +ALTER TABLE pa_target ATTACH PARTITION part1 FOR VALUES IN (1,4); +ALTER TABLE pa_target ATTACH PARTITION part2 FOR VALUES IN (2,5,6); +ALTER TABLE pa_target ATTACH PARTITION part3 FOR VALUES IN (3,8,9); +ALTER TABLE pa_target ATTACH PARTITION part4 DEFAULT; +-- insert a few rows in the target table (odd numbered tid) +INSERT INTO pa_target SELECT id, id * 100, 'initial' FROM generate_series(1,14,2) AS id; +-- try simple MERGE +BEGIN; +DO $$ +DECLARE + result integer; +BEGIN +MERGE INTO pa_target t + USING pa_source s + ON t.tid = s.sid + WHEN MATCHED THEN + UPDATE SET balance = balance + delta, val = val || ' updated by merge' + WHEN NOT MATCHED THEN + INSERT VALUES (sid, delta, 'inserted by merge'); +GET DIAGNOSTICS result := ROW_COUNT; +RAISE NOTICE 'ROW_COUNT = %', result; +END; +$$; +NOTICE: ROW_COUNT = 14 +SELECT * FROM pa_target ORDER BY tid; + tid | balance | val +-----+---------+-------------------------- + 1 | 110 | initial updated by merge + 2 | 20 | inserted by merge + 3 | 330 | initial updated by merge + 4 | 40 | inserted by merge + 5 | 550 | initial updated by merge + 6 | 60 | inserted by merge + 7 | 770 | initial updated by merge + 8 | 80 | inserted by merge + 9 | 990 | initial updated by merge + 10 | 100 | inserted by merge + 11 | 1210 | initial updated by merge + 12 | 120 | inserted by merge + 13 | 1430 | initial updated by merge + 14 | 140 | inserted by merge +(14 rows) + +ROLLBACK; +-- same with a constant qual +BEGIN; +MERGE INTO pa_target t + USING pa_source s + ON t.tid = s.sid AND tid IN (1, 5) + WHEN MATCHED AND tid % 5 = 0 THEN DELETE + WHEN MATCHED THEN + UPDATE SET balance = balance + delta, val = val || ' updated by merge' + WHEN NOT MATCHED THEN + INSERT VALUES (sid, delta, 'inserted by merge'); +SELECT * FROM pa_target ORDER BY tid; + tid | balance | val +-----+---------+-------------------------- + 1 | 110 | initial updated by merge + 2 | 20 | inserted by merge + 3 | 30 | inserted by merge + 3 | 300 | initial + 4 | 40 | inserted by merge + 6 | 60 | inserted by merge + 7 | 700 | initial + 7 | 70 | inserted by merge + 8 | 80 | inserted by merge + 9 | 900 | initial + 9 | 90 | inserted by merge + 10 | 100 | inserted by merge + 11 | 110 | inserted by merge + 11 | 1100 | initial + 12 | 120 | inserted by merge + 13 | 1300 | initial + 13 | 130 | inserted by merge + 14 | 140 | inserted by merge +(18 rows) + +ROLLBACK; +-- try updating the partition key column +BEGIN; +DO $$ +DECLARE + result integer; +BEGIN +MERGE INTO pa_target t + USING pa_source s + ON t.tid = s.sid + WHEN MATCHED THEN + UPDATE SET tid = tid + 1, balance = balance + delta, val = val || ' updated by merge' + WHEN NOT MATCHED THEN + INSERT VALUES (sid, delta, 'inserted by merge'); +GET DIAGNOSTICS result := ROW_COUNT; +RAISE NOTICE 'ROW_COUNT = %', result; +END; +$$; +NOTICE: ROW_COUNT = 14 +SELECT * FROM pa_target ORDER BY tid; + tid | balance | val +-----+---------+-------------------------- + 2 | 110 | initial updated by merge + 2 | 20 | inserted by merge + 4 | 40 | inserted by merge + 4 | 330 | initial updated by merge + 6 | 550 | initial updated by merge + 6 | 60 | inserted by merge + 8 | 80 | inserted by merge + 8 | 770 | initial updated by merge + 10 | 990 | initial updated by merge + 10 | 100 | inserted by merge + 12 | 1210 | initial updated by merge + 12 | 120 | inserted by merge + 14 | 1430 | initial updated by merge + 14 | 140 | inserted by merge +(14 rows) + +ROLLBACK; +-- as above, but blocked by BEFORE DELETE ROW trigger +BEGIN; +CREATE FUNCTION trig_fn() RETURNS trigger LANGUAGE plpgsql AS + $$ BEGIN RETURN NULL; END; $$; +CREATE TRIGGER del_trig BEFORE DELETE ON pa_target + FOR EACH ROW EXECUTE PROCEDURE trig_fn(); +DO $$ +DECLARE + result integer; +BEGIN +MERGE INTO pa_target t + USING pa_source s + ON t.tid = s.sid + WHEN MATCHED THEN + UPDATE SET tid = tid + 1, balance = balance + delta, val = val || ' updated by merge' + WHEN NOT MATCHED THEN + INSERT VALUES (sid, delta, 'inserted by merge'); +GET DIAGNOSTICS result := ROW_COUNT; +RAISE NOTICE 'ROW_COUNT = %', result; +END; +$$; +NOTICE: ROW_COUNT = 10 +SELECT * FROM pa_target ORDER BY tid; + tid | balance | val +-----+---------+-------------------------- + 1 | 100 | initial + 2 | 20 | inserted by merge + 3 | 300 | initial + 4 | 40 | inserted by merge + 6 | 550 | initial updated by merge + 6 | 60 | inserted by merge + 7 | 700 | initial + 8 | 80 | inserted by merge + 9 | 900 | initial + 10 | 100 | inserted by merge + 12 | 1210 | initial updated by merge + 12 | 120 | inserted by merge + 14 | 1430 | initial updated by merge + 14 | 140 | inserted by merge +(14 rows) + +ROLLBACK; +-- as above, but blocked by BEFORE INSERT ROW trigger +BEGIN; +CREATE FUNCTION trig_fn() RETURNS trigger LANGUAGE plpgsql AS + $$ BEGIN RETURN NULL; END; $$; +CREATE TRIGGER ins_trig BEFORE INSERT ON pa_target + FOR EACH ROW EXECUTE PROCEDURE trig_fn(); +DO $$ +DECLARE + result integer; +BEGIN +MERGE INTO pa_target t + USING pa_source s + ON t.tid = s.sid + WHEN MATCHED THEN + UPDATE SET tid = tid + 1, balance = balance + delta, val = val || ' updated by merge' + WHEN NOT MATCHED THEN + INSERT VALUES (sid, delta, 'inserted by merge'); +GET DIAGNOSTICS result := ROW_COUNT; +RAISE NOTICE 'ROW_COUNT = %', result; +END; +$$; +NOTICE: ROW_COUNT = 3 +SELECT * FROM pa_target ORDER BY tid; + tid | balance | val +-----+---------+-------------------------- + 6 | 550 | initial updated by merge + 12 | 1210 | initial updated by merge + 14 | 1430 | initial updated by merge +(3 rows) + +ROLLBACK; +-- test RLS enforcement +BEGIN; +ALTER TABLE pa_target ENABLE ROW LEVEL SECURITY; +ALTER TABLE pa_target FORCE ROW LEVEL SECURITY; +CREATE POLICY pa_target_pol ON pa_target USING (tid != 0); +MERGE INTO pa_target t + USING pa_source s + ON t.tid = s.sid AND t.tid IN (1,2,3,4) + WHEN MATCHED THEN + UPDATE SET tid = tid - 1; +ERROR: new row violates row-level security policy for table "pa_target" +ROLLBACK; +DROP TABLE pa_source; +DROP TABLE pa_target CASCADE; +-- Sub-partitioning +CREATE TABLE pa_target (logts timestamp, tid integer, balance float, val text) + PARTITION BY RANGE (logts); +CREATE TABLE part_m01 PARTITION OF pa_target + FOR VALUES FROM ('2017-01-01') TO ('2017-02-01') + PARTITION BY LIST (tid); +CREATE TABLE part_m01_odd PARTITION OF part_m01 + FOR VALUES IN (1,3,5,7,9) WITH (autovacuum_enabled=off); +CREATE TABLE part_m01_even PARTITION OF part_m01 + FOR VALUES IN (2,4,6,8) WITH (autovacuum_enabled=off); +CREATE TABLE part_m02 PARTITION OF pa_target + FOR VALUES FROM ('2017-02-01') TO ('2017-03-01') + PARTITION BY LIST (tid); +CREATE TABLE part_m02_odd PARTITION OF part_m02 + FOR VALUES IN (1,3,5,7,9) WITH (autovacuum_enabled=off); +CREATE TABLE part_m02_even PARTITION OF part_m02 + FOR VALUES IN (2,4,6,8) WITH (autovacuum_enabled=off); +CREATE TABLE pa_source (sid integer, delta float) + WITH (autovacuum_enabled=off); +-- insert many rows to the source table +INSERT INTO pa_source SELECT id, id * 10 FROM generate_series(1,14) AS id; +-- insert a few rows in the target table (odd numbered tid) +INSERT INTO pa_target SELECT '2017-01-31', id, id * 100, 'initial' FROM generate_series(1,9,3) AS id; +INSERT INTO pa_target SELECT '2017-02-28', id, id * 100, 'initial' FROM generate_series(2,9,3) AS id; +-- try simple MERGE +BEGIN; +MERGE INTO pa_target t + USING (SELECT '2017-01-15' AS slogts, * FROM pa_source WHERE sid < 10) s + ON t.tid = s.sid + WHEN MATCHED THEN + UPDATE SET balance = balance + delta, val = val || ' updated by merge' + WHEN NOT MATCHED THEN + INSERT VALUES (slogts::timestamp, sid, delta, 'inserted by merge'); +SELECT * FROM pa_target ORDER BY tid; + logts | tid | balance | val +--------------------------+-----+---------+-------------------------- + Tue Jan 31 00:00:00 2017 | 1 | 110 | initial updated by merge + Tue Feb 28 00:00:00 2017 | 2 | 220 | initial updated by merge + Sun Jan 15 00:00:00 2017 | 3 | 30 | inserted by merge + Tue Jan 31 00:00:00 2017 | 4 | 440 | initial updated by merge + Tue Feb 28 00:00:00 2017 | 5 | 550 | initial updated by merge + Sun Jan 15 00:00:00 2017 | 6 | 60 | inserted by merge + Tue Jan 31 00:00:00 2017 | 7 | 770 | initial updated by merge + Tue Feb 28 00:00:00 2017 | 8 | 880 | initial updated by merge + Sun Jan 15 00:00:00 2017 | 9 | 90 | inserted by merge +(9 rows) + +ROLLBACK; +DROP TABLE pa_source; +DROP TABLE pa_target CASCADE; +-- Partitioned table with primary key +CREATE TABLE pa_target (tid integer PRIMARY KEY) PARTITION BY LIST (tid); +CREATE TABLE pa_targetp PARTITION OF pa_target DEFAULT; +CREATE TABLE pa_source (sid integer); +INSERT INTO pa_source VALUES (1), (2); +EXPLAIN (VERBOSE, COSTS OFF) +MERGE INTO pa_target t USING pa_source s ON t.tid = s.sid + WHEN NOT MATCHED THEN INSERT VALUES (s.sid); + QUERY PLAN +------------------------------------------------------------- + Merge on public.pa_target t + Merge on public.pa_targetp t_1 + -> Hash Left Join + Output: s.sid, s.ctid, t_1.tableoid, t_1.ctid + Inner Unique: true + Hash Cond: (s.sid = t_1.tid) + -> Seq Scan on public.pa_source s + Output: s.sid, s.ctid + -> Hash + Output: t_1.tid, t_1.tableoid, t_1.ctid + -> Seq Scan on public.pa_targetp t_1 + Output: t_1.tid, t_1.tableoid, t_1.ctid +(12 rows) + +MERGE INTO pa_target t USING pa_source s ON t.tid = s.sid + WHEN NOT MATCHED THEN INSERT VALUES (s.sid); +TABLE pa_target; + tid +----- + 1 + 2 +(2 rows) + +-- Partition-less partitioned table +-- (the bug we are checking for appeared only if table had partitions before) +DROP TABLE pa_targetp; +EXPLAIN (VERBOSE, COSTS OFF) +MERGE INTO pa_target t USING pa_source s ON t.tid = s.sid + WHEN NOT MATCHED THEN INSERT VALUES (s.sid); + QUERY PLAN +-------------------------------------------- + Merge on public.pa_target t + -> Hash Left Join + Output: s.sid, s.ctid, t.ctid + Inner Unique: true + Hash Cond: (s.sid = t.tid) + -> Seq Scan on public.pa_source s + Output: s.sid, s.ctid + -> Hash + Output: t.tid, t.ctid + -> Result + Output: t.tid, t.ctid + One-Time Filter: false +(12 rows) + +MERGE INTO pa_target t USING pa_source s ON t.tid = s.sid + WHEN NOT MATCHED THEN INSERT VALUES (s.sid); +ERROR: no partition of relation "pa_target" found for row +DETAIL: Partition key of the failing row contains (tid) = (1). +DROP TABLE pa_source; +DROP TABLE pa_target CASCADE; +-- some complex joins on the source side +CREATE TABLE cj_target (tid integer, balance float, val text) + WITH (autovacuum_enabled=off); +CREATE TABLE cj_source1 (sid1 integer, scat integer, delta integer) + WITH (autovacuum_enabled=off); +CREATE TABLE cj_source2 (sid2 integer, sval text) + WITH (autovacuum_enabled=off); +INSERT INTO cj_source1 VALUES (1, 10, 100); +INSERT INTO cj_source1 VALUES (1, 20, 200); +INSERT INTO cj_source1 VALUES (2, 20, 300); +INSERT INTO cj_source1 VALUES (3, 10, 400); +INSERT INTO cj_source2 VALUES (1, 'initial source2'); +INSERT INTO cj_source2 VALUES (2, 'initial source2'); +INSERT INTO cj_source2 VALUES (3, 'initial source2'); +-- source relation is an unaliased join +MERGE INTO cj_target t +USING cj_source1 s1 + INNER JOIN cj_source2 s2 ON sid1 = sid2 +ON t.tid = sid1 +WHEN NOT MATCHED THEN + INSERT VALUES (sid1, delta, sval); +-- try accessing columns from either side of the source join +MERGE INTO cj_target t +USING cj_source2 s2 + INNER JOIN cj_source1 s1 ON sid1 = sid2 AND scat = 20 +ON t.tid = sid1 +WHEN NOT MATCHED THEN + INSERT VALUES (sid2, delta, sval) +WHEN MATCHED THEN + DELETE; +-- some simple expressions in INSERT targetlist +MERGE INTO cj_target t +USING cj_source2 s2 + INNER JOIN cj_source1 s1 ON sid1 = sid2 +ON t.tid = sid1 +WHEN NOT MATCHED THEN + INSERT VALUES (sid2, delta + scat, sval) +WHEN MATCHED THEN + UPDATE SET val = val || ' updated by merge'; +MERGE INTO cj_target t +USING cj_source2 s2 + INNER JOIN cj_source1 s1 ON sid1 = sid2 AND scat = 20 +ON t.tid = sid1 +WHEN MATCHED THEN + UPDATE SET val = val || ' ' || delta::text; +SELECT * FROM cj_target; + tid | balance | val +-----+---------+---------------------------------- + 3 | 400 | initial source2 updated by merge + 1 | 220 | initial source2 200 + 1 | 110 | initial source2 200 + 2 | 320 | initial source2 300 +(4 rows) + +-- try it with an outer join and PlaceHolderVar +MERGE INTO cj_target t +USING (SELECT *, 'join input'::text AS phv FROM cj_source1) fj + FULL JOIN cj_source2 fj2 ON fj.scat = fj2.sid2 * 10 +ON t.tid = fj.scat +WHEN NOT MATCHED THEN + INSERT (tid, balance, val) VALUES (fj.scat, fj.delta, fj.phv); +SELECT * FROM cj_target; + tid | balance | val +-----+---------+---------------------------------- + 3 | 400 | initial source2 updated by merge + 1 | 220 | initial source2 200 + 1 | 110 | initial source2 200 + 2 | 320 | initial source2 300 + 10 | 100 | join input + 10 | 400 | join input + 20 | 200 | join input + 20 | 300 | join input + | | +(9 rows) + +ALTER TABLE cj_source1 RENAME COLUMN sid1 TO sid; +ALTER TABLE cj_source2 RENAME COLUMN sid2 TO sid; +TRUNCATE cj_target; +MERGE INTO cj_target t +USING cj_source1 s1 + INNER JOIN cj_source2 s2 ON s1.sid = s2.sid +ON t.tid = s1.sid +WHEN NOT MATCHED THEN + INSERT VALUES (s2.sid, delta, sval); +DROP TABLE cj_source2, cj_source1, cj_target; +-- Function scans +CREATE TABLE fs_target (a int, b int, c text) + WITH (autovacuum_enabled=off); +MERGE INTO fs_target t +USING generate_series(1,100,1) AS id +ON t.a = id +WHEN MATCHED THEN + UPDATE SET b = b + id +WHEN NOT MATCHED THEN + INSERT VALUES (id, -1); +MERGE INTO fs_target t +USING generate_series(1,100,2) AS id +ON t.a = id +WHEN MATCHED THEN + UPDATE SET b = b + id, c = 'updated '|| id.*::text +WHEN NOT MATCHED THEN + INSERT VALUES (id, -1, 'inserted ' || id.*::text); +SELECT count(*) FROM fs_target; + count +------- + 100 +(1 row) + +DROP TABLE fs_target; +-- SERIALIZABLE test +-- handled in isolation tests +-- Inheritance-based partitioning +CREATE TABLE measurement ( + city_id int not null, + logdate date not null, + peaktemp int, + unitsales int +) WITH (autovacuum_enabled=off); +CREATE TABLE measurement_y2006m02 ( + CHECK ( logdate >= DATE '2006-02-01' AND logdate < DATE '2006-03-01' ) +) INHERITS (measurement) WITH (autovacuum_enabled=off); +CREATE TABLE measurement_y2006m03 ( + CHECK ( logdate >= DATE '2006-03-01' AND logdate < DATE '2006-04-01' ) +) INHERITS (measurement) WITH (autovacuum_enabled=off); +CREATE TABLE measurement_y2007m01 ( + filler text, + peaktemp int, + logdate date not null, + city_id int not null, + unitsales int + CHECK ( logdate >= DATE '2007-01-01' AND logdate < DATE '2007-02-01') +) WITH (autovacuum_enabled=off); +ALTER TABLE measurement_y2007m01 DROP COLUMN filler; +ALTER TABLE measurement_y2007m01 INHERIT measurement; +INSERT INTO measurement VALUES (0, '2005-07-21', 5, 15); +CREATE OR REPLACE FUNCTION measurement_insert_trigger() +RETURNS TRIGGER AS $$ +BEGIN + IF ( NEW.logdate >= DATE '2006-02-01' AND + NEW.logdate < DATE '2006-03-01' ) THEN + INSERT INTO measurement_y2006m02 VALUES (NEW.*); + ELSIF ( NEW.logdate >= DATE '2006-03-01' AND + NEW.logdate < DATE '2006-04-01' ) THEN + INSERT INTO measurement_y2006m03 VALUES (NEW.*); + ELSIF ( NEW.logdate >= DATE '2007-01-01' AND + NEW.logdate < DATE '2007-02-01' ) THEN + INSERT INTO measurement_y2007m01 (city_id, logdate, peaktemp, unitsales) + VALUES (NEW.*); + ELSE + RAISE EXCEPTION 'Date out of range. Fix the measurement_insert_trigger() function!'; + END IF; + RETURN NULL; +END; +$$ LANGUAGE plpgsql ; +CREATE TRIGGER insert_measurement_trigger + BEFORE INSERT ON measurement + FOR EACH ROW EXECUTE PROCEDURE measurement_insert_trigger(); +INSERT INTO measurement VALUES (1, '2006-02-10', 35, 10); +INSERT INTO measurement VALUES (1, '2006-02-16', 45, 20); +INSERT INTO measurement VALUES (1, '2006-03-17', 25, 10); +INSERT INTO measurement VALUES (1, '2006-03-27', 15, 40); +INSERT INTO measurement VALUES (1, '2007-01-15', 10, 10); +INSERT INTO measurement VALUES (1, '2007-01-17', 10, 10); +SELECT tableoid::regclass, * FROM measurement ORDER BY city_id, logdate; + tableoid | city_id | logdate | peaktemp | unitsales +----------------------+---------+------------+----------+----------- + measurement | 0 | 07-21-2005 | 5 | 15 + measurement_y2006m02 | 1 | 02-10-2006 | 35 | 10 + measurement_y2006m02 | 1 | 02-16-2006 | 45 | 20 + measurement_y2006m03 | 1 | 03-17-2006 | 25 | 10 + measurement_y2006m03 | 1 | 03-27-2006 | 15 | 40 + measurement_y2007m01 | 1 | 01-15-2007 | 10 | 10 + measurement_y2007m01 | 1 | 01-17-2007 | 10 | 10 +(7 rows) + +CREATE TABLE new_measurement (LIKE measurement) WITH (autovacuum_enabled=off); +INSERT INTO new_measurement VALUES (0, '2005-07-21', 25, 20); +INSERT INTO new_measurement VALUES (1, '2006-03-01', 20, 10); +INSERT INTO new_measurement VALUES (1, '2006-02-16', 50, 10); +INSERT INTO new_measurement VALUES (2, '2006-02-10', 20, 20); +INSERT INTO new_measurement VALUES (1, '2006-03-27', NULL, NULL); +INSERT INTO new_measurement VALUES (1, '2007-01-17', NULL, NULL); +INSERT INTO new_measurement VALUES (1, '2007-01-15', 5, NULL); +INSERT INTO new_measurement VALUES (1, '2007-01-16', 10, 10); +BEGIN; +MERGE INTO ONLY measurement m + USING new_measurement nm ON + (m.city_id = nm.city_id and m.logdate=nm.logdate) +WHEN MATCHED AND nm.peaktemp IS NULL THEN DELETE +WHEN MATCHED THEN UPDATE + SET peaktemp = greatest(m.peaktemp, nm.peaktemp), + unitsales = m.unitsales + coalesce(nm.unitsales, 0) +WHEN NOT MATCHED THEN INSERT + (city_id, logdate, peaktemp, unitsales) + VALUES (city_id, logdate, peaktemp, unitsales); +SELECT tableoid::regclass, * FROM measurement ORDER BY city_id, logdate, peaktemp; + tableoid | city_id | logdate | peaktemp | unitsales +----------------------+---------+------------+----------+----------- + measurement | 0 | 07-21-2005 | 25 | 35 + measurement_y2006m02 | 1 | 02-10-2006 | 35 | 10 + measurement_y2006m02 | 1 | 02-16-2006 | 45 | 20 + measurement_y2006m02 | 1 | 02-16-2006 | 50 | 10 + measurement_y2006m03 | 1 | 03-01-2006 | 20 | 10 + measurement_y2006m03 | 1 | 03-17-2006 | 25 | 10 + measurement_y2006m03 | 1 | 03-27-2006 | 15 | 40 + measurement_y2006m03 | 1 | 03-27-2006 | | + measurement_y2007m01 | 1 | 01-15-2007 | 5 | + measurement_y2007m01 | 1 | 01-15-2007 | 10 | 10 + measurement_y2007m01 | 1 | 01-16-2007 | 10 | 10 + measurement_y2007m01 | 1 | 01-17-2007 | 10 | 10 + measurement_y2007m01 | 1 | 01-17-2007 | | + measurement_y2006m02 | 2 | 02-10-2006 | 20 | 20 +(14 rows) + +ROLLBACK; +MERGE into measurement m + USING new_measurement nm ON + (m.city_id = nm.city_id and m.logdate=nm.logdate) +WHEN MATCHED AND nm.peaktemp IS NULL THEN DELETE +WHEN MATCHED THEN UPDATE + SET peaktemp = greatest(m.peaktemp, nm.peaktemp), + unitsales = m.unitsales + coalesce(nm.unitsales, 0) +WHEN NOT MATCHED THEN INSERT + (city_id, logdate, peaktemp, unitsales) + VALUES (city_id, logdate, peaktemp, unitsales); +SELECT tableoid::regclass, * FROM measurement ORDER BY city_id, logdate; + tableoid | city_id | logdate | peaktemp | unitsales +----------------------+---------+------------+----------+----------- + measurement | 0 | 07-21-2005 | 25 | 35 + measurement_y2006m02 | 1 | 02-10-2006 | 35 | 10 + measurement_y2006m02 | 1 | 02-16-2006 | 50 | 30 + measurement_y2006m03 | 1 | 03-01-2006 | 20 | 10 + measurement_y2006m03 | 1 | 03-17-2006 | 25 | 10 + measurement_y2007m01 | 1 | 01-15-2007 | 10 | 10 + measurement_y2007m01 | 1 | 01-16-2007 | 10 | 10 + measurement_y2006m02 | 2 | 02-10-2006 | 20 | 20 +(8 rows) + +BEGIN; +MERGE INTO new_measurement nm + USING ONLY measurement m ON + (nm.city_id = m.city_id and nm.logdate=m.logdate) +WHEN MATCHED THEN DELETE; +SELECT * FROM new_measurement ORDER BY city_id, logdate; + city_id | logdate | peaktemp | unitsales +---------+------------+----------+----------- + 1 | 02-16-2006 | 50 | 10 + 1 | 03-01-2006 | 20 | 10 + 1 | 03-27-2006 | | + 1 | 01-15-2007 | 5 | + 1 | 01-16-2007 | 10 | 10 + 1 | 01-17-2007 | | + 2 | 02-10-2006 | 20 | 20 +(7 rows) + +ROLLBACK; +MERGE INTO new_measurement nm + USING measurement m ON + (nm.city_id = m.city_id and nm.logdate=m.logdate) +WHEN MATCHED THEN DELETE; +SELECT * FROM new_measurement ORDER BY city_id, logdate; + city_id | logdate | peaktemp | unitsales +---------+------------+----------+----------- + 1 | 03-27-2006 | | + 1 | 01-17-2007 | | +(2 rows) + +DROP TABLE measurement, new_measurement CASCADE; +NOTICE: drop cascades to 3 other objects +DETAIL: drop cascades to table measurement_y2006m02 +drop cascades to table measurement_y2006m03 +drop cascades to table measurement_y2007m01 +DROP FUNCTION measurement_insert_trigger(); +-- prepare +RESET SESSION AUTHORIZATION; +DROP TABLE target, target2; +DROP TABLE source, source2; +DROP FUNCTION merge_trigfunc(); +DROP USER regress_merge_privs; +DROP USER regress_merge_no_privs; diff --git a/src/test/regress/expected/misc.out b/src/test/regress/expected/misc.out new file mode 100644 index 0000000..6e816c5 --- /dev/null +++ b/src/test/regress/expected/misc.out @@ -0,0 +1,398 @@ +-- +-- MISC +-- +-- directory paths and dlsuffix are passed to us in environment variables +\getenv abs_srcdir PG_ABS_SRCDIR +\getenv abs_builddir PG_ABS_BUILDDIR +\getenv libdir PG_LIBDIR +\getenv dlsuffix PG_DLSUFFIX +\set regresslib :libdir '/regress' :dlsuffix +CREATE FUNCTION overpaid(emp) + RETURNS bool + AS :'regresslib' + LANGUAGE C STRICT; +CREATE FUNCTION reverse_name(name) + RETURNS name + AS :'regresslib' + LANGUAGE C STRICT; +-- +-- BTREE +-- +UPDATE onek + SET unique1 = onek.unique1 + 1; +UPDATE onek + SET unique1 = onek.unique1 - 1; +-- +-- BTREE partial +-- +-- UPDATE onek2 +-- SET unique1 = onek2.unique1 + 1; +--UPDATE onek2 +-- SET unique1 = onek2.unique1 - 1; +-- +-- BTREE shutting out non-functional updates +-- +-- the following two tests seem to take a long time on some +-- systems. This non-func update stuff needs to be examined +-- more closely. - jolly (2/22/96) +-- +SELECT two, stringu1, ten, string4 + INTO TABLE tmp + FROM onek; +UPDATE tmp + SET stringu1 = reverse_name(onek.stringu1) + FROM onek + WHERE onek.stringu1 = 'JBAAAA' and + onek.stringu1 = tmp.stringu1; +UPDATE tmp + SET stringu1 = reverse_name(onek2.stringu1) + FROM onek2 + WHERE onek2.stringu1 = 'JCAAAA' and + onek2.stringu1 = tmp.stringu1; +DROP TABLE tmp; +--UPDATE person* +-- SET age = age + 1; +--UPDATE person* +-- SET age = age + 3 +-- WHERE name = 'linda'; +-- +-- copy +-- +\set filename :abs_builddir '/results/onek.data' +COPY onek TO :'filename'; +CREATE TEMP TABLE onek_copy (LIKE onek); +COPY onek_copy FROM :'filename'; +SELECT * FROM onek EXCEPT ALL SELECT * FROM onek_copy; + unique1 | unique2 | two | four | ten | twenty | hundred | thousand | twothousand | fivethous | tenthous | odd | even | stringu1 | stringu2 | string4 +---------+---------+-----+------+-----+--------+---------+----------+-------------+-----------+----------+-----+------+----------+----------+--------- +(0 rows) + +SELECT * FROM onek_copy EXCEPT ALL SELECT * FROM onek; + unique1 | unique2 | two | four | ten | twenty | hundred | thousand | twothousand | fivethous | tenthous | odd | even | stringu1 | stringu2 | string4 +---------+---------+-----+------+-----+--------+---------+----------+-------------+-----------+----------+-----+------+----------+----------+--------- +(0 rows) + +\set filename :abs_builddir '/results/stud_emp.data' +COPY BINARY stud_emp TO :'filename'; +CREATE TEMP TABLE stud_emp_copy (LIKE stud_emp); +COPY BINARY stud_emp_copy FROM :'filename'; +SELECT * FROM stud_emp_copy; + name | age | location | salary | manager | gpa | percent +-------+-----+------------+--------+---------+-----+--------- + jeff | 23 | (8,7.7) | 600 | sharon | 3.5 | + cim | 30 | (10.5,4.7) | 400 | | 3.4 | + linda | 19 | (0.9,6.1) | 100 | | 2.9 | +(3 rows) + +-- +-- test data for postquel functions +-- +CREATE TABLE hobbies_r ( + name text, + person text +); +CREATE TABLE equipment_r ( + name text, + hobby text +); +INSERT INTO hobbies_r (name, person) + SELECT 'posthacking', p.name + FROM person* p + WHERE p.name = 'mike' or p.name = 'jeff'; +INSERT INTO hobbies_r (name, person) + SELECT 'basketball', p.name + FROM person p + WHERE p.name = 'joe' or p.name = 'sally'; +INSERT INTO hobbies_r (name) VALUES ('skywalking'); +INSERT INTO equipment_r (name, hobby) VALUES ('advil', 'posthacking'); +INSERT INTO equipment_r (name, hobby) VALUES ('peet''s coffee', 'posthacking'); +INSERT INTO equipment_r (name, hobby) VALUES ('hightops', 'basketball'); +INSERT INTO equipment_r (name, hobby) VALUES ('guts', 'skywalking'); +-- +-- postquel functions +-- +CREATE FUNCTION hobbies(person) + RETURNS setof hobbies_r + AS 'select * from hobbies_r where person = $1.name' + LANGUAGE SQL; +CREATE FUNCTION hobby_construct(text, text) + RETURNS hobbies_r + AS 'select $1 as name, $2 as hobby' + LANGUAGE SQL; +CREATE FUNCTION hobby_construct_named(name text, hobby text) + RETURNS hobbies_r + AS 'select name, hobby' + LANGUAGE SQL; +CREATE FUNCTION hobbies_by_name(hobbies_r.name%TYPE) + RETURNS hobbies_r.person%TYPE + AS 'select person from hobbies_r where name = $1' + LANGUAGE SQL; +NOTICE: type reference hobbies_r.name%TYPE converted to text +NOTICE: type reference hobbies_r.person%TYPE converted to text +CREATE FUNCTION equipment(hobbies_r) + RETURNS setof equipment_r + AS 'select * from equipment_r where hobby = $1.name' + LANGUAGE SQL; +CREATE FUNCTION equipment_named(hobby hobbies_r) + RETURNS setof equipment_r + AS 'select * from equipment_r where equipment_r.hobby = equipment_named.hobby.name' + LANGUAGE SQL; +CREATE FUNCTION equipment_named_ambiguous_1a(hobby hobbies_r) + RETURNS setof equipment_r + AS 'select * from equipment_r where hobby = equipment_named_ambiguous_1a.hobby.name' + LANGUAGE SQL; +CREATE FUNCTION equipment_named_ambiguous_1b(hobby hobbies_r) + RETURNS setof equipment_r + AS 'select * from equipment_r where equipment_r.hobby = hobby.name' + LANGUAGE SQL; +CREATE FUNCTION equipment_named_ambiguous_1c(hobby hobbies_r) + RETURNS setof equipment_r + AS 'select * from equipment_r where hobby = hobby.name' + LANGUAGE SQL; +CREATE FUNCTION equipment_named_ambiguous_2a(hobby text) + RETURNS setof equipment_r + AS 'select * from equipment_r where hobby = equipment_named_ambiguous_2a.hobby' + LANGUAGE SQL; +CREATE FUNCTION equipment_named_ambiguous_2b(hobby text) + RETURNS setof equipment_r + AS 'select * from equipment_r where equipment_r.hobby = hobby' + LANGUAGE SQL; +-- +-- mike does post_hacking, +-- joe and sally play basketball, and +-- everyone else does nothing. +-- +SELECT p.name, name(p.hobbies) FROM ONLY person p; + name | name +-------+------------- + mike | posthacking + joe | basketball + sally | basketball +(3 rows) + +-- +-- as above, but jeff also does post_hacking. +-- +SELECT p.name, name(p.hobbies) FROM person* p; + name | name +-------+------------- + mike | posthacking + joe | basketball + sally | basketball + jeff | posthacking +(4 rows) + +-- +-- the next two queries demonstrate how functions generate bogus duplicates. +-- this is a "feature" .. +-- +SELECT DISTINCT hobbies_r.name, name(hobbies_r.equipment) FROM hobbies_r + ORDER BY 1,2; + name | name +-------------+--------------- + basketball | hightops + posthacking | advil + posthacking | peet's coffee + skywalking | guts +(4 rows) + +SELECT hobbies_r.name, (hobbies_r.equipment).name FROM hobbies_r; + name | name +-------------+--------------- + posthacking | advil + posthacking | peet's coffee + posthacking | advil + posthacking | peet's coffee + basketball | hightops + basketball | hightops + skywalking | guts +(7 rows) + +-- +-- mike needs advil and peet's coffee, +-- joe and sally need hightops, and +-- everyone else is fine. +-- +SELECT p.name, name(p.hobbies), name(equipment(p.hobbies)) FROM ONLY person p; + name | name | name +-------+-------------+--------------- + mike | posthacking | advil + mike | posthacking | peet's coffee + joe | basketball | hightops + sally | basketball | hightops +(4 rows) + +-- +-- as above, but jeff needs advil and peet's coffee as well. +-- +SELECT p.name, name(p.hobbies), name(equipment(p.hobbies)) FROM person* p; + name | name | name +-------+-------------+--------------- + mike | posthacking | advil + mike | posthacking | peet's coffee + joe | basketball | hightops + sally | basketball | hightops + jeff | posthacking | advil + jeff | posthacking | peet's coffee +(6 rows) + +-- +-- just like the last two, but make sure that the target list fixup and +-- unflattening is being done correctly. +-- +SELECT name(equipment(p.hobbies)), p.name, name(p.hobbies) FROM ONLY person p; + name | name | name +---------------+-------+------------- + advil | mike | posthacking + peet's coffee | mike | posthacking + hightops | joe | basketball + hightops | sally | basketball +(4 rows) + +SELECT (p.hobbies).equipment.name, p.name, name(p.hobbies) FROM person* p; + name | name | name +---------------+-------+------------- + advil | mike | posthacking + peet's coffee | mike | posthacking + hightops | joe | basketball + hightops | sally | basketball + advil | jeff | posthacking + peet's coffee | jeff | posthacking +(6 rows) + +SELECT (p.hobbies).equipment.name, name(p.hobbies), p.name FROM ONLY person p; + name | name | name +---------------+-------------+------- + advil | posthacking | mike + peet's coffee | posthacking | mike + hightops | basketball | joe + hightops | basketball | sally +(4 rows) + +SELECT name(equipment(p.hobbies)), name(p.hobbies), p.name FROM person* p; + name | name | name +---------------+-------------+------- + advil | posthacking | mike + peet's coffee | posthacking | mike + hightops | basketball | joe + hightops | basketball | sally + advil | posthacking | jeff + peet's coffee | posthacking | jeff +(6 rows) + +SELECT name(equipment(hobby_construct(text 'skywalking', text 'mer'))); + name +------ + guts +(1 row) + +SELECT name(equipment(hobby_construct_named(text 'skywalking', text 'mer'))); + name +------ + guts +(1 row) + +SELECT name(equipment_named(hobby_construct_named(text 'skywalking', text 'mer'))); + name +------ + guts +(1 row) + +SELECT name(equipment_named_ambiguous_1a(hobby_construct_named(text 'skywalking', text 'mer'))); + name +------ + guts +(1 row) + +SELECT name(equipment_named_ambiguous_1b(hobby_construct_named(text 'skywalking', text 'mer'))); + name +------ + guts +(1 row) + +SELECT name(equipment_named_ambiguous_1c(hobby_construct_named(text 'skywalking', text 'mer'))); + name +------ + guts +(1 row) + +SELECT name(equipment_named_ambiguous_2a(text 'skywalking')); + name +------ + guts +(1 row) + +SELECT name(equipment_named_ambiguous_2b(text 'skywalking')); + name +--------------- + advil + peet's coffee + hightops + guts +(4 rows) + +SELECT hobbies_by_name('basketball'); + hobbies_by_name +----------------- + joe +(1 row) + +SELECT name, overpaid(emp.*) FROM emp; + name | overpaid +--------+---------- + sharon | t + sam | t + bill | t + jeff | f + cim | f + linda | f +(6 rows) + +-- +-- Try a few cases with SQL-spec row constructor expressions +-- +SELECT * FROM equipment(ROW('skywalking', 'mer')); + name | hobby +------+------------ + guts | skywalking +(1 row) + +SELECT name(equipment(ROW('skywalking', 'mer'))); + name +------ + guts +(1 row) + +SELECT *, name(equipment(h.*)) FROM hobbies_r h; + name | person | name +-------------+--------+--------------- + posthacking | mike | advil + posthacking | mike | peet's coffee + posthacking | jeff | advil + posthacking | jeff | peet's coffee + basketball | joe | hightops + basketball | sally | hightops + skywalking | | guts +(7 rows) + +SELECT *, (equipment(CAST((h.*) AS hobbies_r))).name FROM hobbies_r h; + name | person | name +-------------+--------+--------------- + posthacking | mike | advil + posthacking | mike | peet's coffee + posthacking | jeff | advil + posthacking | jeff | peet's coffee + basketball | joe | hightops + basketball | sally | hightops + skywalking | | guts +(7 rows) + +-- +-- functional joins +-- +-- +-- instance rules +-- +-- +-- rewrite rules +-- diff --git a/src/test/regress/expected/misc_functions.out b/src/test/regress/expected/misc_functions.out new file mode 100644 index 0000000..c669948 --- /dev/null +++ b/src/test/regress/expected/misc_functions.out @@ -0,0 +1,644 @@ +-- directory paths and dlsuffix are passed to us in environment variables +\getenv libdir PG_LIBDIR +\getenv dlsuffix PG_DLSUFFIX +\set regresslib :libdir '/regress' :dlsuffix +-- +-- num_nulls() +-- +SELECT num_nonnulls(NULL); + num_nonnulls +-------------- + 0 +(1 row) + +SELECT num_nonnulls('1'); + num_nonnulls +-------------- + 1 +(1 row) + +SELECT num_nonnulls(NULL::text); + num_nonnulls +-------------- + 0 +(1 row) + +SELECT num_nonnulls(NULL::text, NULL::int); + num_nonnulls +-------------- + 0 +(1 row) + +SELECT num_nonnulls(1, 2, NULL::text, NULL::point, '', int8 '9', 1.0 / NULL); + num_nonnulls +-------------- + 4 +(1 row) + +SELECT num_nonnulls(VARIADIC '{1,2,NULL,3}'::int[]); + num_nonnulls +-------------- + 3 +(1 row) + +SELECT num_nonnulls(VARIADIC '{"1","2","3","4"}'::text[]); + num_nonnulls +-------------- + 4 +(1 row) + +SELECT num_nonnulls(VARIADIC ARRAY(SELECT CASE WHEN i <> 40 THEN i END FROM generate_series(1, 100) i)); + num_nonnulls +-------------- + 99 +(1 row) + +SELECT num_nulls(NULL); + num_nulls +----------- + 1 +(1 row) + +SELECT num_nulls('1'); + num_nulls +----------- + 0 +(1 row) + +SELECT num_nulls(NULL::text); + num_nulls +----------- + 1 +(1 row) + +SELECT num_nulls(NULL::text, NULL::int); + num_nulls +----------- + 2 +(1 row) + +SELECT num_nulls(1, 2, NULL::text, NULL::point, '', int8 '9', 1.0 / NULL); + num_nulls +----------- + 3 +(1 row) + +SELECT num_nulls(VARIADIC '{1,2,NULL,3}'::int[]); + num_nulls +----------- + 1 +(1 row) + +SELECT num_nulls(VARIADIC '{"1","2","3","4"}'::text[]); + num_nulls +----------- + 0 +(1 row) + +SELECT num_nulls(VARIADIC ARRAY(SELECT CASE WHEN i <> 40 THEN i END FROM generate_series(1, 100) i)); + num_nulls +----------- + 1 +(1 row) + +-- special cases +SELECT num_nonnulls(VARIADIC NULL::text[]); + num_nonnulls +-------------- + +(1 row) + +SELECT num_nonnulls(VARIADIC '{}'::int[]); + num_nonnulls +-------------- + 0 +(1 row) + +SELECT num_nulls(VARIADIC NULL::text[]); + num_nulls +----------- + +(1 row) + +SELECT num_nulls(VARIADIC '{}'::int[]); + num_nulls +----------- + 0 +(1 row) + +-- should fail, one or more arguments is required +SELECT num_nonnulls(); +ERROR: function num_nonnulls() does not exist +LINE 1: SELECT num_nonnulls(); + ^ +HINT: No function matches the given name and argument types. You might need to add explicit type casts. +SELECT num_nulls(); +ERROR: function num_nulls() does not exist +LINE 1: SELECT num_nulls(); + ^ +HINT: No function matches the given name and argument types. You might need to add explicit type casts. +-- +-- canonicalize_path() +-- +CREATE FUNCTION test_canonicalize_path(text) + RETURNS text + AS :'regresslib' + LANGUAGE C STRICT IMMUTABLE; +SELECT test_canonicalize_path('/'); + test_canonicalize_path +------------------------ + / +(1 row) + +SELECT test_canonicalize_path('/./abc/def/'); + test_canonicalize_path +------------------------ + /abc/def +(1 row) + +SELECT test_canonicalize_path('/./../abc/def'); + test_canonicalize_path +------------------------ + /abc/def +(1 row) + +SELECT test_canonicalize_path('/./../../abc/def/'); + test_canonicalize_path +------------------------ + /abc/def +(1 row) + +SELECT test_canonicalize_path('/abc/.././def/ghi'); + test_canonicalize_path +------------------------ + /def/ghi +(1 row) + +SELECT test_canonicalize_path('/abc/./../def/ghi//'); + test_canonicalize_path +------------------------ + /def/ghi +(1 row) + +SELECT test_canonicalize_path('/abc/def/../..'); + test_canonicalize_path +------------------------ + / +(1 row) + +SELECT test_canonicalize_path('/abc/def/../../..'); + test_canonicalize_path +------------------------ + / +(1 row) + +SELECT test_canonicalize_path('/abc/def/../../../../ghi/jkl'); + test_canonicalize_path +------------------------ + /ghi/jkl +(1 row) + +SELECT test_canonicalize_path('.'); + test_canonicalize_path +------------------------ + . +(1 row) + +SELECT test_canonicalize_path('./'); + test_canonicalize_path +------------------------ + . +(1 row) + +SELECT test_canonicalize_path('./abc/..'); + test_canonicalize_path +------------------------ + . +(1 row) + +SELECT test_canonicalize_path('abc/../'); + test_canonicalize_path +------------------------ + . +(1 row) + +SELECT test_canonicalize_path('abc/../def'); + test_canonicalize_path +------------------------ + def +(1 row) + +SELECT test_canonicalize_path('..'); + test_canonicalize_path +------------------------ + .. +(1 row) + +SELECT test_canonicalize_path('../abc/def'); + test_canonicalize_path +------------------------ + ../abc/def +(1 row) + +SELECT test_canonicalize_path('../abc/..'); + test_canonicalize_path +------------------------ + .. +(1 row) + +SELECT test_canonicalize_path('../abc/../def'); + test_canonicalize_path +------------------------ + ../def +(1 row) + +SELECT test_canonicalize_path('../abc/../../def/ghi'); + test_canonicalize_path +------------------------ + ../../def/ghi +(1 row) + +SELECT test_canonicalize_path('./abc/./def/.'); + test_canonicalize_path +------------------------ + abc/def +(1 row) + +SELECT test_canonicalize_path('./abc/././def/.'); + test_canonicalize_path +------------------------ + abc/def +(1 row) + +SELECT test_canonicalize_path('./abc/./def/.././ghi/../../../jkl/mno'); + test_canonicalize_path +------------------------ + ../jkl/mno +(1 row) + +-- +-- pg_log_backend_memory_contexts() +-- +-- Memory contexts are logged and they are not returned to the function. +-- Furthermore, their contents can vary depending on the timing. However, +-- we can at least verify that the code doesn't fail, and that the +-- permissions are set properly. +-- +SELECT pg_log_backend_memory_contexts(pg_backend_pid()); + pg_log_backend_memory_contexts +-------------------------------- + t +(1 row) + +SELECT pg_log_backend_memory_contexts(pid) FROM pg_stat_activity + WHERE backend_type = 'checkpointer'; + pg_log_backend_memory_contexts +-------------------------------- + t +(1 row) + +CREATE ROLE regress_log_memory; +SELECT has_function_privilege('regress_log_memory', + 'pg_log_backend_memory_contexts(integer)', 'EXECUTE'); -- no + has_function_privilege +------------------------ + f +(1 row) + +GRANT EXECUTE ON FUNCTION pg_log_backend_memory_contexts(integer) + TO regress_log_memory; +SELECT has_function_privilege('regress_log_memory', + 'pg_log_backend_memory_contexts(integer)', 'EXECUTE'); -- yes + has_function_privilege +------------------------ + t +(1 row) + +SET ROLE regress_log_memory; +SELECT pg_log_backend_memory_contexts(pg_backend_pid()); + pg_log_backend_memory_contexts +-------------------------------- + t +(1 row) + +RESET ROLE; +REVOKE EXECUTE ON FUNCTION pg_log_backend_memory_contexts(integer) + FROM regress_log_memory; +DROP ROLE regress_log_memory; +-- +-- Test some built-in SRFs +-- +-- The outputs of these are variable, so we can't just print their results +-- directly, but we can at least verify that the code doesn't fail. +-- +select setting as segsize +from pg_settings where name = 'wal_segment_size' +\gset +select count(*) > 0 as ok from pg_ls_waldir(); + ok +---- + t +(1 row) + +-- Test ProjectSet as well as FunctionScan +select count(*) > 0 as ok from (select pg_ls_waldir()) ss; + ok +---- + t +(1 row) + +-- Test not-run-to-completion cases. +select * from pg_ls_waldir() limit 0; + name | size | modification +------+------+-------------- +(0 rows) + +select count(*) > 0 as ok from (select * from pg_ls_waldir() limit 1) ss; + ok +---- + t +(1 row) + +select (w).size = :segsize as ok +from (select pg_ls_waldir() w) ss where length((w).name) = 24 limit 1; + ok +---- + t +(1 row) + +select count(*) >= 0 as ok from pg_ls_archive_statusdir(); + ok +---- + t +(1 row) + +-- pg_read_file() +select length(pg_read_file('postmaster.pid')) > 20; + ?column? +---------- + t +(1 row) + +select length(pg_read_file('postmaster.pid', 1, 20)); + length +-------- + 20 +(1 row) + +-- Test missing_ok +select pg_read_file('does not exist'); -- error +ERROR: could not open file "does not exist" for reading: No such file or directory +select pg_read_file('does not exist', true) IS NULL; -- ok + ?column? +---------- + t +(1 row) + +-- Test invalid argument +select pg_read_file('does not exist', 0, -1); -- error +ERROR: requested length cannot be negative +select pg_read_file('does not exist', 0, -1, true); -- error +ERROR: requested length cannot be negative +-- pg_read_binary_file() +select length(pg_read_binary_file('postmaster.pid')) > 20; + ?column? +---------- + t +(1 row) + +select length(pg_read_binary_file('postmaster.pid', 1, 20)); + length +-------- + 20 +(1 row) + +-- Test missing_ok +select pg_read_binary_file('does not exist'); -- error +ERROR: could not open file "does not exist" for reading: No such file or directory +select pg_read_binary_file('does not exist', true) IS NULL; -- ok + ?column? +---------- + t +(1 row) + +-- Test invalid argument +select pg_read_binary_file('does not exist', 0, -1); -- error +ERROR: requested length cannot be negative +select pg_read_binary_file('does not exist', 0, -1, true); -- error +ERROR: requested length cannot be negative +-- pg_stat_file() +select size > 20, isdir from pg_stat_file('postmaster.pid'); + ?column? | isdir +----------+------- + t | f +(1 row) + +-- pg_ls_dir() +select * from (select pg_ls_dir('.') a) a where a = 'base' limit 1; + a +------ + base +(1 row) + +-- Test missing_ok (second argument) +select pg_ls_dir('does not exist', false, false); -- error +ERROR: could not open directory "does not exist": No such file or directory +select pg_ls_dir('does not exist', true, false); -- ok + pg_ls_dir +----------- +(0 rows) + +-- Test include_dot_dirs (third argument) +select count(*) = 1 as dot_found + from pg_ls_dir('.', false, true) as ls where ls = '.'; + dot_found +----------- + t +(1 row) + +select count(*) = 1 as dot_found + from pg_ls_dir('.', false, false) as ls where ls = '.'; + dot_found +----------- + f +(1 row) + +-- pg_timezone_names() +select * from (select (pg_timezone_names()).name) ptn where name='UTC' limit 1; + name +------ + UTC +(1 row) + +-- pg_tablespace_databases() +select count(*) > 0 from + (select pg_tablespace_databases(oid) as pts from pg_tablespace + where spcname = 'pg_default') pts + join pg_database db on pts.pts = db.oid; + ?column? +---------- + t +(1 row) + +-- +-- Test replication slot directory functions +-- +CREATE ROLE regress_slot_dir_funcs; +-- Not available by default. +SELECT has_function_privilege('regress_slot_dir_funcs', + 'pg_ls_logicalsnapdir()', 'EXECUTE'); + has_function_privilege +------------------------ + f +(1 row) + +SELECT has_function_privilege('regress_slot_dir_funcs', + 'pg_ls_logicalmapdir()', 'EXECUTE'); + has_function_privilege +------------------------ + f +(1 row) + +SELECT has_function_privilege('regress_slot_dir_funcs', + 'pg_ls_replslotdir(text)', 'EXECUTE'); + has_function_privilege +------------------------ + f +(1 row) + +GRANT pg_monitor TO regress_slot_dir_funcs; +-- Role is now part of pg_monitor, so these are available. +SELECT has_function_privilege('regress_slot_dir_funcs', + 'pg_ls_logicalsnapdir()', 'EXECUTE'); + has_function_privilege +------------------------ + t +(1 row) + +SELECT has_function_privilege('regress_slot_dir_funcs', + 'pg_ls_logicalmapdir()', 'EXECUTE'); + has_function_privilege +------------------------ + t +(1 row) + +SELECT has_function_privilege('regress_slot_dir_funcs', + 'pg_ls_replslotdir(text)', 'EXECUTE'); + has_function_privilege +------------------------ + t +(1 row) + +DROP ROLE regress_slot_dir_funcs; +-- +-- Test adding a support function to a subject function +-- +CREATE FUNCTION my_int_eq(int, int) RETURNS bool + LANGUAGE internal STRICT IMMUTABLE PARALLEL SAFE + AS $$int4eq$$; +-- By default, planner does not think that's selective +EXPLAIN (COSTS OFF) +SELECT * FROM tenk1 a JOIN tenk1 b ON a.unique1 = b.unique1 +WHERE my_int_eq(a.unique2, 42); + QUERY PLAN +---------------------------------------------- + Hash Join + Hash Cond: (b.unique1 = a.unique1) + -> Seq Scan on tenk1 b + -> Hash + -> Seq Scan on tenk1 a + Filter: my_int_eq(unique2, 42) +(6 rows) + +-- With support function that knows it's int4eq, we get a different plan +CREATE FUNCTION test_support_func(internal) + RETURNS internal + AS :'regresslib', 'test_support_func' + LANGUAGE C STRICT; +ALTER FUNCTION my_int_eq(int, int) SUPPORT test_support_func; +EXPLAIN (COSTS OFF) +SELECT * FROM tenk1 a JOIN tenk1 b ON a.unique1 = b.unique1 +WHERE my_int_eq(a.unique2, 42); + QUERY PLAN +------------------------------------------------- + Nested Loop + -> Seq Scan on tenk1 a + Filter: my_int_eq(unique2, 42) + -> Index Scan using tenk1_unique1 on tenk1 b + Index Cond: (unique1 = a.unique1) +(5 rows) + +-- Also test non-default rowcount estimate +CREATE FUNCTION my_gen_series(int, int) RETURNS SETOF integer + LANGUAGE internal STRICT IMMUTABLE PARALLEL SAFE + AS $$generate_series_int4$$ + SUPPORT test_support_func; +EXPLAIN (COSTS OFF) +SELECT * FROM tenk1 a JOIN my_gen_series(1,1000) g ON a.unique1 = g; + QUERY PLAN +---------------------------------------- + Hash Join + Hash Cond: (g.g = a.unique1) + -> Function Scan on my_gen_series g + -> Hash + -> Seq Scan on tenk1 a +(5 rows) + +EXPLAIN (COSTS OFF) +SELECT * FROM tenk1 a JOIN my_gen_series(1,10) g ON a.unique1 = g; + QUERY PLAN +------------------------------------------------- + Nested Loop + -> Function Scan on my_gen_series g + -> Index Scan using tenk1_unique1 on tenk1 a + Index Cond: (unique1 = g.g) +(4 rows) + +-- Test functions for control data +SELECT count(*) > 0 AS ok FROM pg_control_checkpoint(); + ok +---- + t +(1 row) + +SELECT count(*) > 0 AS ok FROM pg_control_init(); + ok +---- + t +(1 row) + +SELECT count(*) > 0 AS ok FROM pg_control_recovery(); + ok +---- + t +(1 row) + +SELECT count(*) > 0 AS ok FROM pg_control_system(); + ok +---- + t +(1 row) + +-- pg_split_walfile_name +SELECT * FROM pg_split_walfile_name(NULL); + segment_number | timeline_id +----------------+------------- + | +(1 row) + +SELECT * FROM pg_split_walfile_name('invalid'); +ERROR: invalid WAL file name "invalid" +SELECT segment_number > 0 AS ok_segment_number, timeline_id + FROM pg_split_walfile_name('000000010000000100000000'); + ok_segment_number | timeline_id +-------------------+------------- + t | 1 +(1 row) + +SELECT segment_number > 0 AS ok_segment_number, timeline_id + FROM pg_split_walfile_name('ffffffFF00000001000000af'); + ok_segment_number | timeline_id +-------------------+------------- + t | 4294967295 +(1 row) + diff --git a/src/test/regress/expected/misc_sanity.out b/src/test/regress/expected/misc_sanity.out new file mode 100644 index 0000000..a57fd14 --- /dev/null +++ b/src/test/regress/expected/misc_sanity.out @@ -0,0 +1,91 @@ +-- +-- MISC_SANITY +-- Sanity checks for common errors in making system tables that don't fit +-- comfortably into either opr_sanity or type_sanity. +-- +-- Every test failure in this file should be closely inspected. +-- The description of the failing test should be read carefully before +-- adjusting the expected output. In most cases, the queries should +-- not find *any* matching entries. +-- +-- NB: run this test early, because some later tests create bogus entries. +-- **************** pg_depend **************** +-- Look for illegal values in pg_depend fields. +SELECT * +FROM pg_depend as d1 +WHERE refclassid = 0 OR refobjid = 0 OR + classid = 0 OR objid = 0 OR + deptype NOT IN ('a', 'e', 'i', 'n', 'x', 'P', 'S'); + classid | objid | objsubid | refclassid | refobjid | refobjsubid | deptype +---------+-------+----------+------------+----------+-------------+--------- +(0 rows) + +-- **************** pg_shdepend **************** +-- Look for illegal values in pg_shdepend fields. +SELECT * +FROM pg_shdepend as d1 +WHERE refclassid = 0 OR refobjid = 0 OR + classid = 0 OR objid = 0 OR + deptype NOT IN ('a', 'o', 'r', 't'); + dbid | classid | objid | objsubid | refclassid | refobjid | deptype +------+---------+-------+----------+------------+----------+--------- +(0 rows) + +-- **************** pg_class **************** +-- Look for system tables with varlena columns but no toast table. All +-- system tables with toastable columns should have toast tables, with +-- the following exceptions: +-- 1. pg_class, pg_attribute, and pg_index, due to fear of recursive +-- dependencies as toast tables depend on them. +-- 2. pg_largeobject and pg_largeobject_metadata. Large object catalogs +-- and toast tables are mutually exclusive and large object data is handled +-- as user data by pg_upgrade, which would cause failures. +SELECT relname, attname, atttypid::regtype +FROM pg_class c JOIN pg_attribute a ON c.oid = attrelid +WHERE c.oid < 16384 AND + reltoastrelid = 0 AND + relkind = 'r' AND + attstorage != 'p' +ORDER BY 1, 2; + relname | attname | atttypid +-------------------------+---------------+-------------- + pg_attribute | attacl | aclitem[] + pg_attribute | attfdwoptions | text[] + pg_attribute | attmissingval | anyarray + pg_attribute | attoptions | text[] + pg_class | relacl | aclitem[] + pg_class | reloptions | text[] + pg_class | relpartbound | pg_node_tree + pg_index | indexprs | pg_node_tree + pg_index | indpred | pg_node_tree + pg_largeobject | data | bytea + pg_largeobject_metadata | lomacl | aclitem[] +(11 rows) + +-- system catalogs without primary keys +-- +-- Current exceptions: +-- * pg_depend, pg_shdepend don't have a unique key +SELECT relname +FROM pg_class +WHERE relnamespace = 'pg_catalog'::regnamespace AND relkind = 'r' + AND pg_class.oid NOT IN (SELECT indrelid FROM pg_index WHERE indisprimary) +ORDER BY 1; + relname +------------- + pg_depend + pg_shdepend +(2 rows) + +-- system catalog unique indexes not wrapped in a constraint +-- (There should be none.) +SELECT relname +FROM pg_class c JOIN pg_index i ON c.oid = i.indexrelid +WHERE relnamespace = 'pg_catalog'::regnamespace AND relkind = 'i' + AND i.indisunique + AND c.oid NOT IN (SELECT conindid FROM pg_constraint) +ORDER BY 1; + relname +--------- +(0 rows) + diff --git a/src/test/regress/expected/money.out b/src/test/regress/expected/money.out new file mode 100644 index 0000000..7fd4e31 --- /dev/null +++ b/src/test/regress/expected/money.out @@ -0,0 +1,530 @@ +-- +-- MONEY +-- +-- Note that we assume lc_monetary has been set to C. +-- +CREATE TABLE money_data (m money); +INSERT INTO money_data VALUES ('123'); +SELECT * FROM money_data; + m +--------- + $123.00 +(1 row) + +SELECT m + '123' FROM money_data; + ?column? +---------- + $246.00 +(1 row) + +SELECT m + '123.45' FROM money_data; + ?column? +---------- + $246.45 +(1 row) + +SELECT m - '123.45' FROM money_data; + ?column? +---------- + -$0.45 +(1 row) + +SELECT m / '2'::money FROM money_data; + ?column? +---------- + 61.5 +(1 row) + +SELECT m * 2 FROM money_data; + ?column? +---------- + $246.00 +(1 row) + +SELECT 2 * m FROM money_data; + ?column? +---------- + $246.00 +(1 row) + +SELECT m / 2 FROM money_data; + ?column? +---------- + $61.50 +(1 row) + +SELECT m * 2::int2 FROM money_data; + ?column? +---------- + $246.00 +(1 row) + +SELECT 2::int2 * m FROM money_data; + ?column? +---------- + $246.00 +(1 row) + +SELECT m / 2::int2 FROM money_data; + ?column? +---------- + $61.50 +(1 row) + +SELECT m * 2::int8 FROM money_data; + ?column? +---------- + $246.00 +(1 row) + +SELECT 2::int8 * m FROM money_data; + ?column? +---------- + $246.00 +(1 row) + +SELECT m / 2::int8 FROM money_data; + ?column? +---------- + $61.50 +(1 row) + +SELECT m * 2::float8 FROM money_data; + ?column? +---------- + $246.00 +(1 row) + +SELECT 2::float8 * m FROM money_data; + ?column? +---------- + $246.00 +(1 row) + +SELECT m / 2::float8 FROM money_data; + ?column? +---------- + $61.50 +(1 row) + +SELECT m * 2::float4 FROM money_data; + ?column? +---------- + $246.00 +(1 row) + +SELECT 2::float4 * m FROM money_data; + ?column? +---------- + $246.00 +(1 row) + +SELECT m / 2::float4 FROM money_data; + ?column? +---------- + $61.50 +(1 row) + +-- All true +SELECT m = '$123.00' FROM money_data; + ?column? +---------- + t +(1 row) + +SELECT m != '$124.00' FROM money_data; + ?column? +---------- + t +(1 row) + +SELECT m <= '$123.00' FROM money_data; + ?column? +---------- + t +(1 row) + +SELECT m >= '$123.00' FROM money_data; + ?column? +---------- + t +(1 row) + +SELECT m < '$124.00' FROM money_data; + ?column? +---------- + t +(1 row) + +SELECT m > '$122.00' FROM money_data; + ?column? +---------- + t +(1 row) + +-- All false +SELECT m = '$123.01' FROM money_data; + ?column? +---------- + f +(1 row) + +SELECT m != '$123.00' FROM money_data; + ?column? +---------- + f +(1 row) + +SELECT m <= '$122.99' FROM money_data; + ?column? +---------- + f +(1 row) + +SELECT m >= '$123.01' FROM money_data; + ?column? +---------- + f +(1 row) + +SELECT m > '$124.00' FROM money_data; + ?column? +---------- + f +(1 row) + +SELECT m < '$122.00' FROM money_data; + ?column? +---------- + f +(1 row) + +SELECT cashlarger(m, '$124.00') FROM money_data; + cashlarger +------------ + $124.00 +(1 row) + +SELECT cashsmaller(m, '$124.00') FROM money_data; + cashsmaller +------------- + $123.00 +(1 row) + +SELECT cash_words(m) FROM money_data; + cash_words +------------------------------------------------- + One hundred twenty three dollars and zero cents +(1 row) + +SELECT cash_words(m + '1.23') FROM money_data; + cash_words +-------------------------------------------------------- + One hundred twenty four dollars and twenty three cents +(1 row) + +DELETE FROM money_data; +INSERT INTO money_data VALUES ('$123.45'); +SELECT * FROM money_data; + m +--------- + $123.45 +(1 row) + +DELETE FROM money_data; +INSERT INTO money_data VALUES ('$123.451'); +SELECT * FROM money_data; + m +--------- + $123.45 +(1 row) + +DELETE FROM money_data; +INSERT INTO money_data VALUES ('$123.454'); +SELECT * FROM money_data; + m +--------- + $123.45 +(1 row) + +DELETE FROM money_data; +INSERT INTO money_data VALUES ('$123.455'); +SELECT * FROM money_data; + m +--------- + $123.46 +(1 row) + +DELETE FROM money_data; +INSERT INTO money_data VALUES ('$123.456'); +SELECT * FROM money_data; + m +--------- + $123.46 +(1 row) + +DELETE FROM money_data; +INSERT INTO money_data VALUES ('$123.459'); +SELECT * FROM money_data; + m +--------- + $123.46 +(1 row) + +-- input checks +SELECT '1234567890'::money; + money +------------------- + $1,234,567,890.00 +(1 row) + +SELECT '12345678901234567'::money; + money +---------------------------- + $12,345,678,901,234,567.00 +(1 row) + +SELECT '123456789012345678'::money; +ERROR: value "123456789012345678" is out of range for type money +LINE 1: SELECT '123456789012345678'::money; + ^ +SELECT '9223372036854775807'::money; +ERROR: value "9223372036854775807" is out of range for type money +LINE 1: SELECT '9223372036854775807'::money; + ^ +SELECT '-12345'::money; + money +------------- + -$12,345.00 +(1 row) + +SELECT '-1234567890'::money; + money +-------------------- + -$1,234,567,890.00 +(1 row) + +SELECT '-12345678901234567'::money; + money +----------------------------- + -$12,345,678,901,234,567.00 +(1 row) + +SELECT '-123456789012345678'::money; +ERROR: value "-123456789012345678" is out of range for type money +LINE 1: SELECT '-123456789012345678'::money; + ^ +SELECT '-9223372036854775808'::money; +ERROR: value "-9223372036854775808" is out of range for type money +LINE 1: SELECT '-9223372036854775808'::money; + ^ +-- special characters +SELECT '(1)'::money; + money +-------- + -$1.00 +(1 row) + +SELECT '($123,456.78)'::money; + money +-------------- + -$123,456.78 +(1 row) + +-- test non-error-throwing API +SELECT pg_input_is_valid('\x0001', 'money'); + pg_input_is_valid +------------------- + f +(1 row) + +SELECT * FROM pg_input_error_info('\x0001', 'money'); + message | detail | hint | sql_error_code +-----------------------------------------------+--------+------+---------------- + invalid input syntax for type money: "\x0001" | | | 22P02 +(1 row) + +SELECT pg_input_is_valid('192233720368547758.07', 'money'); + pg_input_is_valid +------------------- + f +(1 row) + +SELECT * FROM pg_input_error_info('192233720368547758.07', 'money'); + message | detail | hint | sql_error_code +--------------------------------------------------------------+--------+------+---------------- + value "192233720368547758.07" is out of range for type money | | | 22003 +(1 row) + +-- documented minimums and maximums +SELECT '-92233720368547758.08'::money; + money +----------------------------- + -$92,233,720,368,547,758.08 +(1 row) + +SELECT '92233720368547758.07'::money; + money +---------------------------- + $92,233,720,368,547,758.07 +(1 row) + +SELECT '-92233720368547758.09'::money; +ERROR: value "-92233720368547758.09" is out of range for type money +LINE 1: SELECT '-92233720368547758.09'::money; + ^ +SELECT '92233720368547758.08'::money; +ERROR: value "92233720368547758.08" is out of range for type money +LINE 1: SELECT '92233720368547758.08'::money; + ^ +-- rounding +SELECT '-92233720368547758.085'::money; +ERROR: value "-92233720368547758.085" is out of range for type money +LINE 1: SELECT '-92233720368547758.085'::money; + ^ +SELECT '92233720368547758.075'::money; +ERROR: value "92233720368547758.075" is out of range for type money +LINE 1: SELECT '92233720368547758.075'::money; + ^ +-- rounding vs. truncation in division +SELECT '878.08'::money / 11::float8; + ?column? +---------- + $79.83 +(1 row) + +SELECT '878.08'::money / 11::float4; + ?column? +---------- + $79.83 +(1 row) + +SELECT '878.08'::money / 11::bigint; + ?column? +---------- + $79.82 +(1 row) + +SELECT '878.08'::money / 11::int; + ?column? +---------- + $79.82 +(1 row) + +SELECT '878.08'::money / 11::smallint; + ?column? +---------- + $79.82 +(1 row) + +-- check for precision loss in division +SELECT '90000000000000099.00'::money / 10::bigint; + ?column? +--------------------------- + $9,000,000,000,000,009.90 +(1 row) + +SELECT '90000000000000099.00'::money / 10::int; + ?column? +--------------------------- + $9,000,000,000,000,009.90 +(1 row) + +SELECT '90000000000000099.00'::money / 10::smallint; + ?column? +--------------------------- + $9,000,000,000,000,009.90 +(1 row) + +-- Cast int4/int8/numeric to money +SELECT 1234567890::money; + money +------------------- + $1,234,567,890.00 +(1 row) + +SELECT 12345678901234567::money; + money +---------------------------- + $12,345,678,901,234,567.00 +(1 row) + +SELECT (-12345)::money; + money +------------- + -$12,345.00 +(1 row) + +SELECT (-1234567890)::money; + money +-------------------- + -$1,234,567,890.00 +(1 row) + +SELECT (-12345678901234567)::money; + money +----------------------------- + -$12,345,678,901,234,567.00 +(1 row) + +SELECT 1234567890::int4::money; + money +------------------- + $1,234,567,890.00 +(1 row) + +SELECT 12345678901234567::int8::money; + money +---------------------------- + $12,345,678,901,234,567.00 +(1 row) + +SELECT 12345678901234567::numeric::money; + money +---------------------------- + $12,345,678,901,234,567.00 +(1 row) + +SELECT (-1234567890)::int4::money; + money +-------------------- + -$1,234,567,890.00 +(1 row) + +SELECT (-12345678901234567)::int8::money; + money +----------------------------- + -$12,345,678,901,234,567.00 +(1 row) + +SELECT (-12345678901234567)::numeric::money; + money +----------------------------- + -$12,345,678,901,234,567.00 +(1 row) + +-- Cast from money to numeric +SELECT '12345678901234567'::money::numeric; + numeric +---------------------- + 12345678901234567.00 +(1 row) + +SELECT '-12345678901234567'::money::numeric; + numeric +----------------------- + -12345678901234567.00 +(1 row) + +SELECT '92233720368547758.07'::money::numeric; + numeric +---------------------- + 92233720368547758.07 +(1 row) + +SELECT '-92233720368547758.08'::money::numeric; + numeric +----------------------- + -92233720368547758.08 +(1 row) + diff --git a/src/test/regress/expected/multirangetypes.out b/src/test/regress/expected/multirangetypes.out new file mode 100644 index 0000000..a0cb875 --- /dev/null +++ b/src/test/regress/expected/multirangetypes.out @@ -0,0 +1,3363 @@ +-- Tests for multirange data types. +-- +-- test input parser +-- +-- negative tests; should fail +select ''::textmultirange; +ERROR: malformed multirange literal: "" +LINE 1: select ''::textmultirange; + ^ +DETAIL: Missing left brace. +select '{,}'::textmultirange; +ERROR: malformed multirange literal: "{,}" +LINE 1: select '{,}'::textmultirange; + ^ +DETAIL: Expected range start. +select '{(,)}.'::textmultirange; +ERROR: malformed multirange literal: "{(,)}." +LINE 1: select '{(,)}.'::textmultirange; + ^ +DETAIL: Junk after closing right brace. +select '{[a,c),}'::textmultirange; +ERROR: malformed multirange literal: "{[a,c),}" +LINE 1: select '{[a,c),}'::textmultirange; + ^ +DETAIL: Expected range start. +select '{,[a,c)}'::textmultirange; +ERROR: malformed multirange literal: "{,[a,c)}" +LINE 1: select '{,[a,c)}'::textmultirange; + ^ +DETAIL: Expected range start. +select '{-[a,z)}'::textmultirange; +ERROR: malformed multirange literal: "{-[a,z)}" +LINE 1: select '{-[a,z)}'::textmultirange; + ^ +DETAIL: Expected range start. +select '{[a,z) - }'::textmultirange; +ERROR: malformed multirange literal: "{[a,z) - }" +LINE 1: select '{[a,z) - }'::textmultirange; + ^ +DETAIL: Expected comma or end of multirange. +select '{(",a)}'::textmultirange; +ERROR: malformed multirange literal: "{(",a)}" +LINE 1: select '{(",a)}'::textmultirange; + ^ +DETAIL: Unexpected end of input. +select '{(,,a)}'::textmultirange; +ERROR: malformed range literal: "(,,a)" +LINE 1: select '{(,,a)}'::textmultirange; + ^ +DETAIL: Too many commas. +select '{(),a)}'::textmultirange; +ERROR: malformed range literal: "()" +LINE 1: select '{(),a)}'::textmultirange; + ^ +DETAIL: Missing comma after lower bound. +select '{(a,))}'::textmultirange; +ERROR: malformed multirange literal: "{(a,))}" +LINE 1: select '{(a,))}'::textmultirange; + ^ +DETAIL: Expected comma or end of multirange. +select '{(],a)}'::textmultirange; +ERROR: malformed range literal: "(]" +LINE 1: select '{(],a)}'::textmultirange; + ^ +DETAIL: Missing comma after lower bound. +select '{(a,])}'::textmultirange; +ERROR: malformed multirange literal: "{(a,])}" +LINE 1: select '{(a,])}'::textmultirange; + ^ +DETAIL: Expected comma or end of multirange. +select '{[z,a]}'::textmultirange; +ERROR: range lower bound must be less than or equal to range upper bound +LINE 1: select '{[z,a]}'::textmultirange; + ^ +-- should succeed +select '{}'::textmultirange; + textmultirange +---------------- + {} +(1 row) + +select ' {} '::textmultirange; + textmultirange +---------------- + {} +(1 row) + +select ' { empty, empty } '::textmultirange; + textmultirange +---------------- + {} +(1 row) + +select ' {( " a " " a ", " z " " z " ) }'::textmultirange; + textmultirange +---------------------------- + {(" a a "," z z ")} +(1 row) + +select textrange('\\\\', repeat('a', 200))::textmultirange; + textrange +------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + {["\\\\\\\\",aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa)} +(1 row) + +select '{(,z)}'::textmultirange; + textmultirange +---------------- + {(,z)} +(1 row) + +select '{(a,)}'::textmultirange; + textmultirange +---------------- + {(a,)} +(1 row) + +select '{[,z]}'::textmultirange; + textmultirange +---------------- + {(,z]} +(1 row) + +select '{[a,]}'::textmultirange; + textmultirange +---------------- + {[a,)} +(1 row) + +select '{(,)}'::textmultirange; + textmultirange +---------------- + {(,)} +(1 row) + +select '{[ , ]}'::textmultirange; + textmultirange +---------------- + {[" "," "]} +(1 row) + +select '{["",""]}'::textmultirange; + textmultirange +---------------- + {["",""]} +(1 row) + +select '{[",",","]}'::textmultirange; + textmultirange +---------------- + {[",",","]} +(1 row) + +select '{["\\","\\"]}'::textmultirange; + textmultirange +---------------- + {["\\","\\"]} +(1 row) + +select '{["""","\""]}'::textmultirange; + textmultirange +---------------- + {["""",""""]} +(1 row) + +select '{(\\,a)}'::textmultirange; + textmultirange +---------------- + {("\\",a)} +(1 row) + +select '{((,z)}'::textmultirange; + textmultirange +---------------- + {("(",z)} +(1 row) + +select '{([,z)}'::textmultirange; + textmultirange +---------------- + {("[",z)} +(1 row) + +select '{(!,()}'::textmultirange; + textmultirange +---------------- + {(!,"(")} +(1 row) + +select '{(!,[)}'::textmultirange; + textmultirange +---------------- + {(!,"[")} +(1 row) + +select '{[a,a]}'::textmultirange; + textmultirange +---------------- + {[a,a]} +(1 row) + +select '{[a,a],[a,b]}'::textmultirange; + textmultirange +---------------- + {[a,b]} +(1 row) + +select '{[a,b), [b,e]}'::textmultirange; + textmultirange +---------------- + {[a,e]} +(1 row) + +select '{[a,d), [b,f]}'::textmultirange; + textmultirange +---------------- + {[a,f]} +(1 row) + +select '{[a,a],[b,b]}'::textmultirange; + textmultirange +---------------- + {[a,a],[b,b]} +(1 row) + +-- without canonicalization, we can't join these: +select '{[a,a], [b,b]}'::textmultirange; + textmultirange +---------------- + {[a,a],[b,b]} +(1 row) + +-- with canonicalization, we can join these: +select '{[1,2], [3,4]}'::int4multirange; + int4multirange +---------------- + {[1,5)} +(1 row) + +select '{[a,a], [b,b], [c,c]}'::textmultirange; + textmultirange +--------------------- + {[a,a],[b,b],[c,c]} +(1 row) + +select '{[a,d], [b,e]}'::textmultirange; + textmultirange +---------------- + {[a,e]} +(1 row) + +select '{[a,d), [d,e)}'::textmultirange; + textmultirange +---------------- + {[a,e)} +(1 row) + +-- these are allowed but normalize to empty: +select '{[a,a)}'::textmultirange; + textmultirange +---------------- + {} +(1 row) + +select '{(a,a]}'::textmultirange; + textmultirange +---------------- + {} +(1 row) + +select '{(a,a)}'::textmultirange; + textmultirange +---------------- + {} +(1 row) + +-- Also try it with non-error-throwing API +select pg_input_is_valid('{[1,2], [4,5]}', 'int4multirange'); + pg_input_is_valid +------------------- + t +(1 row) + +select pg_input_is_valid('{[1,2], [4,5]', 'int4multirange'); + pg_input_is_valid +------------------- + f +(1 row) + +select * from pg_input_error_info('{[1,2], [4,5]', 'int4multirange'); + message | detail | hint | sql_error_code +-----------------------------------------------+--------------------------+------+---------------- + malformed multirange literal: "{[1,2], [4,5]" | Unexpected end of input. | | 22P02 +(1 row) + +select pg_input_is_valid('{[1,2], [4,zed]}', 'int4multirange'); + pg_input_is_valid +------------------- + f +(1 row) + +select * from pg_input_error_info('{[1,2], [4,zed]}', 'int4multirange'); + message | detail | hint | sql_error_code +----------------------------------------------+--------+------+---------------- + invalid input syntax for type integer: "zed" | | | 22P02 +(1 row) + +-- +-- test the constructor +--- +select textmultirange(); + textmultirange +---------------- + {} +(1 row) + +select textmultirange(textrange('a', 'c')); + textmultirange +---------------- + {[a,c)} +(1 row) + +select textmultirange(textrange('a', 'c'), textrange('f', 'g')); + textmultirange +---------------- + {[a,c),[f,g)} +(1 row) + +select textmultirange(textrange('\\\\', repeat('a', 200)), textrange('c', 'd')); + textmultirange +------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + {["\\\\\\\\",aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa),[c,d)} +(1 row) + +-- +-- test casts, both a built-in range type and a user-defined one: +-- +select 'empty'::int4range::int4multirange; + int4multirange +---------------- + {} +(1 row) + +select int4range(1, 3)::int4multirange; + int4range +----------- + {[1,3)} +(1 row) + +select int4range(1, null)::int4multirange; + int4range +----------- + {[1,)} +(1 row) + +select int4range(null, null)::int4multirange; + int4range +----------- + {(,)} +(1 row) + +select 'empty'::textrange::textmultirange; + textmultirange +---------------- + {} +(1 row) + +select textrange('a', 'c')::textmultirange; + textrange +----------- + {[a,c)} +(1 row) + +select textrange('a', null)::textmultirange; + textrange +----------- + {[a,)} +(1 row) + +select textrange(null, null)::textmultirange; + textrange +----------- + {(,)} +(1 row) + +-- +-- test unnest(multirange) function +-- +select unnest(int4multirange(int4range('5', '6'), int4range('1', '2'))); + unnest +-------- + [1,2) + [5,6) +(2 rows) + +select unnest(textmultirange(textrange('a', 'b'), textrange('d', 'e'))); + unnest +-------- + [a,b) + [d,e) +(2 rows) + +select unnest(textmultirange(textrange('\\\\', repeat('a', 200)), textrange('c', 'd'))); + unnest +----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + ["\\\\\\\\",aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa) + [c,d) +(2 rows) + +-- +-- create some test data and test the operators +-- +CREATE TABLE nummultirange_test (nmr NUMMULTIRANGE); +CREATE INDEX nummultirange_test_btree ON nummultirange_test(nmr); +INSERT INTO nummultirange_test VALUES('{}'); +INSERT INTO nummultirange_test VALUES('{[,)}'); +INSERT INTO nummultirange_test VALUES('{[3,]}'); +INSERT INTO nummultirange_test VALUES('{[,), [3,]}'); +INSERT INTO nummultirange_test VALUES('{[, 5)}'); +INSERT INTO nummultirange_test VALUES(nummultirange()); +INSERT INTO nummultirange_test VALUES(nummultirange(variadic '{}'::numrange[])); +INSERT INTO nummultirange_test VALUES(nummultirange(numrange(1.1, 2.2))); +INSERT INTO nummultirange_test VALUES('{empty}'); +INSERT INTO nummultirange_test VALUES(nummultirange(numrange(1.7, 1.7, '[]'), numrange(1.7, 1.9))); +INSERT INTO nummultirange_test VALUES(nummultirange(numrange(1.7, 1.7, '[]'), numrange(1.9, 2.1))); +SELECT nmr, isempty(nmr), lower(nmr), upper(nmr) FROM nummultirange_test ORDER BY nmr; + nmr | isempty | lower | upper +-----------------------+---------+-------+------- + {} | t | | + {} | t | | + {} | t | | + {} | t | | + {(,5)} | f | | 5 + {(,)} | f | | + {(,)} | f | | + {[1.1,2.2)} | f | 1.1 | 2.2 + {[1.7,1.7],[1.9,2.1)} | f | 1.7 | 2.1 + {[1.7,1.9)} | f | 1.7 | 1.9 + {[3,)} | f | 3 | +(11 rows) + +SELECT nmr, lower_inc(nmr), lower_inf(nmr), upper_inc(nmr), upper_inf(nmr) FROM nummultirange_test ORDER BY nmr; + nmr | lower_inc | lower_inf | upper_inc | upper_inf +-----------------------+-----------+-----------+-----------+----------- + {} | f | f | f | f + {} | f | f | f | f + {} | f | f | f | f + {} | f | f | f | f + {(,5)} | f | t | f | f + {(,)} | f | t | f | t + {(,)} | f | t | f | t + {[1.1,2.2)} | t | f | f | f + {[1.7,1.7],[1.9,2.1)} | t | f | f | f + {[1.7,1.9)} | t | f | f | f + {[3,)} | t | f | f | t +(11 rows) + +SELECT * FROM nummultirange_test WHERE nmr = '{}'; + nmr +----- + {} + {} + {} + {} +(4 rows) + +SELECT * FROM nummultirange_test WHERE nmr = '{(,5)}'; + nmr +-------- + {(,5)} +(1 row) + +SELECT * FROM nummultirange_test WHERE nmr = '{[3,)}'; + nmr +-------- + {[3,)} +(1 row) + +SELECT * FROM nummultirange_test WHERE nmr = '{[1.7,1.7]}'; + nmr +----- +(0 rows) + +SELECT * FROM nummultirange_test WHERE nmr = '{[1.7,1.7],[1.9,2.1)}'; + nmr +----------------------- + {[1.7,1.7],[1.9,2.1)} +(1 row) + +SELECT * FROM nummultirange_test WHERE nmr < '{}'; + nmr +----- +(0 rows) + +SELECT * FROM nummultirange_test WHERE nmr < '{[-1000.0, -1000.0]}'; + nmr +-------- + {} + {(,)} + {(,)} + {(,5)} + {} + {} + {} +(7 rows) + +SELECT * FROM nummultirange_test WHERE nmr < '{[0.0, 1.0]}'; + nmr +-------- + {} + {(,)} + {(,)} + {(,5)} + {} + {} + {} +(7 rows) + +SELECT * FROM nummultirange_test WHERE nmr < '{[1000.0, 1001.0]}'; + nmr +----------------------- + {} + {(,)} + {[3,)} + {(,)} + {(,5)} + {} + {} + {[1.1,2.2)} + {} + {[1.7,1.9)} + {[1.7,1.7],[1.9,2.1)} +(11 rows) + +SELECT * FROM nummultirange_test WHERE nmr <= '{}'; + nmr +----- + {} + {} + {} + {} +(4 rows) + +SELECT * FROM nummultirange_test WHERE nmr <= '{[3,)}'; + nmr +----------------------- + {} + {(,)} + {[3,)} + {(,)} + {(,5)} + {} + {} + {[1.1,2.2)} + {} + {[1.7,1.9)} + {[1.7,1.7],[1.9,2.1)} +(11 rows) + +SELECT * FROM nummultirange_test WHERE nmr >= '{}'; + nmr +----------------------- + {} + {(,)} + {[3,)} + {(,)} + {(,5)} + {} + {} + {[1.1,2.2)} + {} + {[1.7,1.9)} + {[1.7,1.7],[1.9,2.1)} +(11 rows) + +SELECT * FROM nummultirange_test WHERE nmr >= '{[3,)}'; + nmr +-------- + {[3,)} +(1 row) + +SELECT * FROM nummultirange_test WHERE nmr > '{}'; + nmr +----------------------- + {(,)} + {[3,)} + {(,)} + {(,5)} + {[1.1,2.2)} + {[1.7,1.9)} + {[1.7,1.7],[1.9,2.1)} +(7 rows) + +SELECT * FROM nummultirange_test WHERE nmr > '{[-1000.0, -1000.0]}'; + nmr +----------------------- + {[3,)} + {[1.1,2.2)} + {[1.7,1.9)} + {[1.7,1.7],[1.9,2.1)} +(4 rows) + +SELECT * FROM nummultirange_test WHERE nmr > '{[0.0, 1.0]}'; + nmr +----------------------- + {[3,)} + {[1.1,2.2)} + {[1.7,1.9)} + {[1.7,1.7],[1.9,2.1)} +(4 rows) + +SELECT * FROM nummultirange_test WHERE nmr > '{[1000.0, 1001.0]}'; + nmr +----- +(0 rows) + +SELECT * FROM nummultirange_test WHERE nmr <> '{}'; + nmr +----------------------- + {(,)} + {[3,)} + {(,)} + {(,5)} + {[1.1,2.2)} + {[1.7,1.9)} + {[1.7,1.7],[1.9,2.1)} +(7 rows) + +SELECT * FROM nummultirange_test WHERE nmr <> '{(,5)}'; + nmr +----------------------- + {} + {(,)} + {[3,)} + {(,)} + {} + {} + {[1.1,2.2)} + {} + {[1.7,1.9)} + {[1.7,1.7],[1.9,2.1)} +(10 rows) + +select nummultirange(numrange(2.0, 1.0)); +ERROR: range lower bound must be less than or equal to range upper bound +select nummultirange(numrange(5.0, 6.0), numrange(1.0, 2.0)); + nummultirange +----------------------- + {[1.0,2.0),[5.0,6.0)} +(1 row) + +analyze nummultirange_test; +-- overlaps +SELECT * FROM nummultirange_test WHERE range_overlaps_multirange(numrange(4.0, 4.2), nmr); + nmr +-------- + {(,)} + {[3,)} + {(,)} + {(,5)} +(4 rows) + +SELECT * FROM nummultirange_test WHERE numrange(4.0, 4.2) && nmr; + nmr +-------- + {(,)} + {[3,)} + {(,)} + {(,5)} +(4 rows) + +SELECT * FROM nummultirange_test WHERE multirange_overlaps_range(nmr, numrange(4.0, 4.2)); + nmr +-------- + {(,)} + {[3,)} + {(,)} + {(,5)} +(4 rows) + +SELECT * FROM nummultirange_test WHERE nmr && numrange(4.0, 4.2); + nmr +-------- + {(,)} + {[3,)} + {(,)} + {(,5)} +(4 rows) + +SELECT * FROM nummultirange_test WHERE multirange_overlaps_multirange(nmr, nummultirange(numrange(4.0, 4.2), numrange(6.0, 7.0))); + nmr +-------- + {(,)} + {[3,)} + {(,)} + {(,5)} +(4 rows) + +SELECT * FROM nummultirange_test WHERE nmr && nummultirange(numrange(4.0, 4.2), numrange(6.0, 7.0)); + nmr +-------- + {(,)} + {[3,)} + {(,)} + {(,5)} +(4 rows) + +SELECT * FROM nummultirange_test WHERE nmr && nummultirange(numrange(6.0, 7.0)); + nmr +-------- + {(,)} + {[3,)} + {(,)} +(3 rows) + +SELECT * FROM nummultirange_test WHERE nmr && nummultirange(numrange(6.0, 7.0), numrange(8.0, 9.0)); + nmr +-------- + {(,)} + {[3,)} + {(,)} +(3 rows) + +-- mr contains x +SELECT * FROM nummultirange_test WHERE multirange_contains_elem(nmr, 4.0); + nmr +-------- + {(,)} + {[3,)} + {(,)} + {(,5)} +(4 rows) + +SELECT * FROM nummultirange_test WHERE nmr @> 4.0; + nmr +-------- + {(,)} + {[3,)} + {(,)} + {(,5)} +(4 rows) + +SELECT * FROM nummultirange_test WHERE multirange_contains_range(nmr, numrange(4.0, 4.2)); + nmr +-------- + {(,)} + {[3,)} + {(,)} + {(,5)} +(4 rows) + +SELECT * FROM nummultirange_test WHERE nmr @> numrange(4.0, 4.2); + nmr +-------- + {(,)} + {[3,)} + {(,)} + {(,5)} +(4 rows) + +SELECT * FROM nummultirange_test WHERE multirange_contains_multirange(nmr, '{[4.0,4.2), [6.0, 8.0)}'); + nmr +-------- + {(,)} + {[3,)} + {(,)} +(3 rows) + +SELECT * FROM nummultirange_test WHERE nmr @> '{[4.0,4.2), [6.0, 8.0)}'::nummultirange; + nmr +-------- + {(,)} + {[3,)} + {(,)} +(3 rows) + +-- x is contained by mr +SELECT * FROM nummultirange_test WHERE elem_contained_by_multirange(4.0, nmr); + nmr +-------- + {(,)} + {[3,)} + {(,)} + {(,5)} +(4 rows) + +SELECT * FROM nummultirange_test WHERE 4.0 <@ nmr; + nmr +-------- + {(,)} + {[3,)} + {(,)} + {(,5)} +(4 rows) + +SELECT * FROM nummultirange_test WHERE range_contained_by_multirange(numrange(4.0, 4.2), nmr); + nmr +-------- + {(,)} + {[3,)} + {(,)} + {(,5)} +(4 rows) + +SELECT * FROM nummultirange_test WHERE numrange(4.0, 4.2) <@ nmr; + nmr +-------- + {(,)} + {[3,)} + {(,)} + {(,5)} +(4 rows) + +SELECT * FROM nummultirange_test WHERE multirange_contained_by_multirange('{[4.0,4.2), [6.0, 8.0)}', nmr); + nmr +-------- + {(,)} + {[3,)} + {(,)} +(3 rows) + +SELECT * FROM nummultirange_test WHERE '{[4.0,4.2), [6.0, 8.0)}'::nummultirange <@ nmr; + nmr +-------- + {(,)} + {[3,)} + {(,)} +(3 rows) + +-- overlaps +SELECT 'empty'::numrange && nummultirange(); + ?column? +---------- + f +(1 row) + +SELECT 'empty'::numrange && nummultirange(numrange(1,2)); + ?column? +---------- + f +(1 row) + +SELECT nummultirange() && 'empty'::numrange; + ?column? +---------- + f +(1 row) + +SELECT nummultirange(numrange(1,2)) && 'empty'::numrange; + ?column? +---------- + f +(1 row) + +SELECT nummultirange() && nummultirange(); + ?column? +---------- + f +(1 row) + +SELECT nummultirange() && nummultirange(numrange(1,2)); + ?column? +---------- + f +(1 row) + +SELECT nummultirange(numrange(1,2)) && nummultirange(); + ?column? +---------- + f +(1 row) + +SELECT nummultirange(numrange(3,4)) && nummultirange(numrange(1,2), numrange(7,8)); + ?column? +---------- + f +(1 row) + +SELECT nummultirange(numrange(1,2), numrange(7,8)) && nummultirange(numrange(3,4)); + ?column? +---------- + f +(1 row) + +SELECT nummultirange(numrange(3,4)) && nummultirange(numrange(1,2), numrange(3.5,8)); + ?column? +---------- + t +(1 row) + +SELECT nummultirange(numrange(1,2), numrange(3.5,8)) && numrange(3,4); + ?column? +---------- + t +(1 row) + +SELECT nummultirange(numrange(1,2), numrange(3.5,8)) && nummultirange(numrange(3,4)); + ?column? +---------- + t +(1 row) + +select '{(10,20),(30,40),(50,60)}'::nummultirange && '(42,92)'::numrange; + ?column? +---------- + t +(1 row) + +-- contains +SELECT nummultirange() @> nummultirange(); + ?column? +---------- + t +(1 row) + +SELECT nummultirange() @> 'empty'::numrange; + ?column? +---------- + t +(1 row) + +SELECT nummultirange(numrange(null,null)) @> numrange(1,2); + ?column? +---------- + t +(1 row) + +SELECT nummultirange(numrange(null,null)) @> numrange(null,2); + ?column? +---------- + t +(1 row) + +SELECT nummultirange(numrange(null,null)) @> numrange(2,null); + ?column? +---------- + t +(1 row) + +SELECT nummultirange(numrange(null,5)) @> numrange(null,3); + ?column? +---------- + t +(1 row) + +SELECT nummultirange(numrange(null,5)) @> numrange(null,8); + ?column? +---------- + f +(1 row) + +SELECT nummultirange(numrange(5,null)) @> numrange(8,null); + ?column? +---------- + t +(1 row) + +SELECT nummultirange(numrange(5,null)) @> numrange(3,null); + ?column? +---------- + f +(1 row) + +SELECT nummultirange(numrange(1,5)) @> numrange(8,9); + ?column? +---------- + f +(1 row) + +SELECT nummultirange(numrange(1,5)) @> numrange(3,9); + ?column? +---------- + f +(1 row) + +SELECT nummultirange(numrange(1,5)) @> numrange(1,4); + ?column? +---------- + t +(1 row) + +SELECT nummultirange(numrange(1,5)) @> numrange(1,5); + ?column? +---------- + t +(1 row) + +SELECT nummultirange(numrange(-4,-2), numrange(1,5)) @> numrange(1,5); + ?column? +---------- + t +(1 row) + +SELECT nummultirange(numrange(1,5), numrange(8,9)) @> numrange(1,5); + ?column? +---------- + t +(1 row) + +SELECT nummultirange(numrange(1,5), numrange(8,9)) @> numrange(6,7); + ?column? +---------- + f +(1 row) + +SELECT nummultirange(numrange(1,5), numrange(6,9)) @> numrange(6,7); + ?column? +---------- + t +(1 row) + +SELECT '{[1,5)}'::nummultirange @> '{[1,5)}'; + ?column? +---------- + t +(1 row) + +SELECT '{[-4,-2), [1,5)}'::nummultirange @> '{[1,5)}'; + ?column? +---------- + t +(1 row) + +SELECT '{[1,5), [8,9)}'::nummultirange @> '{[1,5)}'; + ?column? +---------- + t +(1 row) + +SELECT '{[1,5), [8,9)}'::nummultirange @> '{[6,7)}'; + ?column? +---------- + f +(1 row) + +SELECT '{[1,5), [6,9)}'::nummultirange @> '{[6,7)}'; + ?column? +---------- + t +(1 row) + +select '{(10,20),(30,40),(50,60)}'::nummultirange @> '(52,56)'::numrange; + ?column? +---------- + t +(1 row) + +SELECT numrange(null,null) @> nummultirange(numrange(1,2)); + ?column? +---------- + t +(1 row) + +SELECT numrange(null,null) @> nummultirange(numrange(null,2)); + ?column? +---------- + t +(1 row) + +SELECT numrange(null,null) @> nummultirange(numrange(2,null)); + ?column? +---------- + t +(1 row) + +SELECT numrange(null,5) @> nummultirange(numrange(null,3)); + ?column? +---------- + t +(1 row) + +SELECT numrange(null,5) @> nummultirange(numrange(null,8)); + ?column? +---------- + f +(1 row) + +SELECT numrange(5,null) @> nummultirange(numrange(8,null)); + ?column? +---------- + t +(1 row) + +SELECT numrange(5,null) @> nummultirange(numrange(3,null)); + ?column? +---------- + f +(1 row) + +SELECT numrange(1,5) @> nummultirange(numrange(8,9)); + ?column? +---------- + f +(1 row) + +SELECT numrange(1,5) @> nummultirange(numrange(3,9)); + ?column? +---------- + f +(1 row) + +SELECT numrange(1,5) @> nummultirange(numrange(1,4)); + ?column? +---------- + t +(1 row) + +SELECT numrange(1,5) @> nummultirange(numrange(1,5)); + ?column? +---------- + t +(1 row) + +SELECT numrange(1,9) @> nummultirange(numrange(-4,-2), numrange(1,5)); + ?column? +---------- + f +(1 row) + +SELECT numrange(1,9) @> nummultirange(numrange(1,5), numrange(8,9)); + ?column? +---------- + t +(1 row) + +SELECT numrange(1,9) @> nummultirange(numrange(1,5), numrange(6,9)); + ?column? +---------- + t +(1 row) + +SELECT numrange(1,9) @> nummultirange(numrange(1,5), numrange(6,10)); + ?column? +---------- + f +(1 row) + +SELECT '{[1,9)}' @> '{[1,5)}'::nummultirange; + ?column? +---------- + t +(1 row) + +SELECT '{[1,9)}' @> '{[-4,-2), [1,5)}'::nummultirange; + ?column? +---------- + f +(1 row) + +SELECT '{[1,9)}' @> '{[1,5), [8,9)}'::nummultirange; + ?column? +---------- + t +(1 row) + +SELECT '{[1,9)}' @> '{[1,5), [6,9)}'::nummultirange; + ?column? +---------- + t +(1 row) + +SELECT '{[1,9)}' @> '{[1,5), [6,10)}'::nummultirange; + ?column? +---------- + f +(1 row) + +-- is contained by +SELECT nummultirange() <@ nummultirange(); + ?column? +---------- + t +(1 row) + +SELECT 'empty'::numrange <@ nummultirange(); + ?column? +---------- + t +(1 row) + +SELECT numrange(1,2) <@ nummultirange(numrange(null,null)); + ?column? +---------- + t +(1 row) + +SELECT numrange(null,2) <@ nummultirange(numrange(null,null)); + ?column? +---------- + t +(1 row) + +SELECT numrange(2,null) <@ nummultirange(numrange(null,null)); + ?column? +---------- + t +(1 row) + +SELECT numrange(null,3) <@ nummultirange(numrange(null,5)); + ?column? +---------- + t +(1 row) + +SELECT numrange(null,8) <@ nummultirange(numrange(null,5)); + ?column? +---------- + f +(1 row) + +SELECT numrange(8,null) <@ nummultirange(numrange(5,null)); + ?column? +---------- + t +(1 row) + +SELECT numrange(3,null) <@ nummultirange(numrange(5,null)); + ?column? +---------- + f +(1 row) + +SELECT numrange(8,9) <@ nummultirange(numrange(1,5)); + ?column? +---------- + f +(1 row) + +SELECT numrange(3,9) <@ nummultirange(numrange(1,5)); + ?column? +---------- + f +(1 row) + +SELECT numrange(1,4) <@ nummultirange(numrange(1,5)); + ?column? +---------- + t +(1 row) + +SELECT numrange(1,5) <@ nummultirange(numrange(1,5)); + ?column? +---------- + t +(1 row) + +SELECT numrange(1,5) <@ nummultirange(numrange(-4,-2), numrange(1,5)); + ?column? +---------- + t +(1 row) + +SELECT numrange(1,5) <@ nummultirange(numrange(1,5), numrange(8,9)); + ?column? +---------- + t +(1 row) + +SELECT numrange(6,7) <@ nummultirange(numrange(1,5), numrange(8,9)); + ?column? +---------- + f +(1 row) + +SELECT numrange(6,7) <@ nummultirange(numrange(1,5), numrange(6,9)); + ?column? +---------- + t +(1 row) + +SELECT '{[1,5)}' <@ '{[1,5)}'::nummultirange; + ?column? +---------- + t +(1 row) + +SELECT '{[1,5)}' <@ '{[-4,-2), [1,5)}'::nummultirange; + ?column? +---------- + t +(1 row) + +SELECT '{[1,5)}' <@ '{[1,5), [8,9)}'::nummultirange; + ?column? +---------- + t +(1 row) + +SELECT '{[6,7)}' <@ '{[1,5), [8,9)}'::nummultirange; + ?column? +---------- + f +(1 row) + +SELECT '{[6,7)}' <@ '{[1,5), [6,9)}'::nummultirange; + ?column? +---------- + t +(1 row) + +SELECT nummultirange(numrange(1,2)) <@ numrange(null,null); + ?column? +---------- + t +(1 row) + +SELECT nummultirange(numrange(null,2)) <@ numrange(null,null); + ?column? +---------- + t +(1 row) + +SELECT nummultirange(numrange(2,null)) <@ numrange(null,null); + ?column? +---------- + t +(1 row) + +SELECT nummultirange(numrange(null,3)) <@ numrange(null,5); + ?column? +---------- + t +(1 row) + +SELECT nummultirange(numrange(null,8)) <@ numrange(null,5); + ?column? +---------- + f +(1 row) + +SELECT nummultirange(numrange(8,null)) <@ numrange(5,null); + ?column? +---------- + t +(1 row) + +SELECT nummultirange(numrange(3,null)) <@ numrange(5,null); + ?column? +---------- + f +(1 row) + +SELECT nummultirange(numrange(8,9)) <@ numrange(1,5); + ?column? +---------- + f +(1 row) + +SELECT nummultirange(numrange(3,9)) <@ numrange(1,5); + ?column? +---------- + f +(1 row) + +SELECT nummultirange(numrange(1,4)) <@ numrange(1,5); + ?column? +---------- + t +(1 row) + +SELECT nummultirange(numrange(1,5)) <@ numrange(1,5); + ?column? +---------- + t +(1 row) + +SELECT nummultirange(numrange(-4,-2), numrange(1,5)) <@ numrange(1,9); + ?column? +---------- + f +(1 row) + +SELECT nummultirange(numrange(1,5), numrange(8,9)) <@ numrange(1,9); + ?column? +---------- + t +(1 row) + +SELECT nummultirange(numrange(1,5), numrange(6,9)) <@ numrange(1,9); + ?column? +---------- + t +(1 row) + +SELECT nummultirange(numrange(1,5), numrange(6,10)) <@ numrange(1,9); + ?column? +---------- + f +(1 row) + +SELECT '{[1,5)}'::nummultirange <@ '{[1,9)}'; + ?column? +---------- + t +(1 row) + +SELECT '{[-4,-2), [1,5)}'::nummultirange <@ '{[1,9)}'; + ?column? +---------- + f +(1 row) + +SELECT '{[1,5), [8,9)}'::nummultirange <@ '{[1,9)}'; + ?column? +---------- + t +(1 row) + +SELECT '{[1,5), [6,9)}'::nummultirange <@ '{[1,9)}'; + ?column? +---------- + t +(1 row) + +SELECT '{[1,5), [6,10)}'::nummultirange <@ '{[1,9)}'; + ?column? +---------- + f +(1 row) + +-- overleft +SELECT 'empty'::numrange &< nummultirange(); + ?column? +---------- + f +(1 row) + +SELECT 'empty'::numrange &< nummultirange(numrange(1,2)); + ?column? +---------- + f +(1 row) + +SELECT nummultirange() &< 'empty'::numrange; + ?column? +---------- + f +(1 row) + +SELECT nummultirange(numrange(1,2)) &< 'empty'::numrange; + ?column? +---------- + f +(1 row) + +SELECT nummultirange() &< nummultirange(); + ?column? +---------- + f +(1 row) + +SELECT nummultirange(numrange(1,2)) &< nummultirange(); + ?column? +---------- + f +(1 row) + +SELECT nummultirange() &< nummultirange(numrange(1,2)); + ?column? +---------- + f +(1 row) + +SELECT numrange(6,7) &< nummultirange(numrange(3,4)); + ?column? +---------- + f +(1 row) + +SELECT numrange(1,2) &< nummultirange(numrange(3,4)); + ?column? +---------- + t +(1 row) + +SELECT numrange(1,4) &< nummultirange(numrange(3,4)); + ?column? +---------- + t +(1 row) + +SELECT numrange(1,6) &< nummultirange(numrange(3,4)); + ?column? +---------- + f +(1 row) + +SELECT numrange(3.5,6) &< nummultirange(numrange(3,4)); + ?column? +---------- + f +(1 row) + +SELECT nummultirange(numrange(6,7)) &< numrange(3,4); + ?column? +---------- + f +(1 row) + +SELECT nummultirange(numrange(1,2)) &< numrange(3,4); + ?column? +---------- + t +(1 row) + +SELECT nummultirange(numrange(1,4)) &< numrange(3,4); + ?column? +---------- + t +(1 row) + +SELECT nummultirange(numrange(1,6)) &< numrange(3,4); + ?column? +---------- + f +(1 row) + +SELECT nummultirange(numrange(3.5,6)) &< numrange(3,4); + ?column? +---------- + f +(1 row) + +SELECT nummultirange(numrange(6,7)) &< nummultirange(numrange(3,4)); + ?column? +---------- + f +(1 row) + +SELECT nummultirange(numrange(1,2)) &< nummultirange(numrange(3,4)); + ?column? +---------- + t +(1 row) + +SELECT nummultirange(numrange(1,4)) &< nummultirange(numrange(3,4)); + ?column? +---------- + t +(1 row) + +SELECT nummultirange(numrange(1,6)) &< nummultirange(numrange(3,4)); + ?column? +---------- + f +(1 row) + +SELECT nummultirange(numrange(3.5,6)) &< nummultirange(numrange(3,4)); + ?column? +---------- + f +(1 row) + +-- overright +SELECT nummultirange() &> 'empty'::numrange; + ?column? +---------- + f +(1 row) + +SELECT nummultirange(numrange(1,2)) &> 'empty'::numrange; + ?column? +---------- + f +(1 row) + +SELECT 'empty'::numrange &> nummultirange(); + ?column? +---------- + f +(1 row) + +SELECT 'empty'::numrange &> nummultirange(numrange(1,2)); + ?column? +---------- + f +(1 row) + +SELECT nummultirange() &> nummultirange(); + ?column? +---------- + f +(1 row) + +SELECT nummultirange() &> nummultirange(numrange(1,2)); + ?column? +---------- + f +(1 row) + +SELECT nummultirange(numrange(1,2)) &> nummultirange(); + ?column? +---------- + f +(1 row) + +SELECT nummultirange(numrange(3,4)) &> numrange(6,7); + ?column? +---------- + f +(1 row) + +SELECT nummultirange(numrange(3,4)) &> numrange(1,2); + ?column? +---------- + t +(1 row) + +SELECT nummultirange(numrange(3,4)) &> numrange(1,4); + ?column? +---------- + t +(1 row) + +SELECT nummultirange(numrange(3,4)) &> numrange(1,6); + ?column? +---------- + t +(1 row) + +SELECT nummultirange(numrange(3,4)) &> numrange(3.5,6); + ?column? +---------- + f +(1 row) + +SELECT numrange(3,4) &> nummultirange(numrange(6,7)); + ?column? +---------- + f +(1 row) + +SELECT numrange(3,4) &> nummultirange(numrange(1,2)); + ?column? +---------- + t +(1 row) + +SELECT numrange(3,4) &> nummultirange(numrange(1,4)); + ?column? +---------- + t +(1 row) + +SELECT numrange(3,4) &> nummultirange(numrange(1,6)); + ?column? +---------- + t +(1 row) + +SELECT numrange(3,4) &> nummultirange(numrange(3.5,6)); + ?column? +---------- + f +(1 row) + +SELECT nummultirange(numrange(3,4)) &> nummultirange(numrange(6,7)); + ?column? +---------- + f +(1 row) + +SELECT nummultirange(numrange(3,4)) &> nummultirange(numrange(1,2)); + ?column? +---------- + t +(1 row) + +SELECT nummultirange(numrange(3,4)) &> nummultirange(numrange(1,4)); + ?column? +---------- + t +(1 row) + +SELECT nummultirange(numrange(3,4)) &> nummultirange(numrange(1,6)); + ?column? +---------- + t +(1 row) + +SELECT nummultirange(numrange(3,4)) &> nummultirange(numrange(3.5,6)); + ?column? +---------- + f +(1 row) + +-- meets +SELECT 'empty'::numrange -|- nummultirange(); + ?column? +---------- + f +(1 row) + +SELECT 'empty'::numrange -|- nummultirange(numrange(1,2)); + ?column? +---------- + f +(1 row) + +SELECT nummultirange() -|- 'empty'::numrange; + ?column? +---------- + f +(1 row) + +SELECT nummultirange(numrange(1,2)) -|- 'empty'::numrange; + ?column? +---------- + f +(1 row) + +SELECT nummultirange() -|- nummultirange(); + ?column? +---------- + f +(1 row) + +SELECT nummultirange(numrange(1,2)) -|- nummultirange(); + ?column? +---------- + f +(1 row) + +SELECT nummultirange() -|- nummultirange(numrange(1,2)); + ?column? +---------- + f +(1 row) + +SELECT numrange(1,2) -|- nummultirange(numrange(2,4)); + ?column? +---------- + t +(1 row) + +SELECT numrange(1,2) -|- nummultirange(numrange(3,4)); + ?column? +---------- + f +(1 row) + +SELECT nummultirange(numrange(1,2)) -|- numrange(2,4); + ?column? +---------- + t +(1 row) + +SELECT nummultirange(numrange(1,2)) -|- numrange(3,4); + ?column? +---------- + f +(1 row) + +SELECT nummultirange(numrange(1,2)) -|- nummultirange(numrange(2,4)); + ?column? +---------- + t +(1 row) + +SELECT nummultirange(numrange(1,2)) -|- nummultirange(numrange(3,4)); + ?column? +---------- + f +(1 row) + +SELECT nummultirange(numrange(1,2), numrange(5,6)) -|- nummultirange(numrange(3,4)); + ?column? +---------- + f +(1 row) + +SELECT nummultirange(numrange(1,2), numrange(5,6)) -|- nummultirange(numrange(6,7)); + ?column? +---------- + t +(1 row) + +SELECT nummultirange(numrange(1,2), numrange(5,6)) -|- nummultirange(numrange(8,9)); + ?column? +---------- + f +(1 row) + +SELECT nummultirange(numrange(1,2)) -|- nummultirange(numrange(2,4), numrange(6,7)); + ?column? +---------- + t +(1 row) + +-- strictly left +select 'empty'::numrange << nummultirange(); + ?column? +---------- + f +(1 row) + +select numrange(1,2) << nummultirange(); + ?column? +---------- + f +(1 row) + +select numrange(1,2) << nummultirange(numrange(3,4)); + ?column? +---------- + t +(1 row) + +select numrange(1,2) << nummultirange(numrange(0,4)); + ?column? +---------- + f +(1 row) + +select numrange(1,2) << nummultirange(numrange(0,4), numrange(7,8)); + ?column? +---------- + f +(1 row) + +select nummultirange() << 'empty'::numrange; + ?column? +---------- + f +(1 row) + +select nummultirange() << numrange(1,2); + ?column? +---------- + f +(1 row) + +select nummultirange(numrange(3,4)) << numrange(3,6); + ?column? +---------- + f +(1 row) + +select nummultirange(numrange(0,2)) << numrange(3,6); + ?column? +---------- + t +(1 row) + +select nummultirange(numrange(0,2), numrange(7,8)) << numrange(3,6); + ?column? +---------- + f +(1 row) + +select nummultirange(numrange(-4,-2), numrange(0,2)) << numrange(3,6); + ?column? +---------- + t +(1 row) + +select nummultirange() << nummultirange(); + ?column? +---------- + f +(1 row) + +select nummultirange() << nummultirange(numrange(1,2)); + ?column? +---------- + f +(1 row) + +select nummultirange(numrange(1,2)) << nummultirange(); + ?column? +---------- + f +(1 row) + +select nummultirange(numrange(1,2)) << nummultirange(numrange(1,2)); + ?column? +---------- + f +(1 row) + +select nummultirange(numrange(1,2)) << nummultirange(numrange(3,4)); + ?column? +---------- + t +(1 row) + +select nummultirange(numrange(1,2)) << nummultirange(numrange(3,4), numrange(7,8)); + ?column? +---------- + t +(1 row) + +select nummultirange(numrange(1,2), numrange(4,5)) << nummultirange(numrange(3,4), numrange(7,8)); + ?column? +---------- + f +(1 row) + +-- strictly right +select nummultirange() >> 'empty'::numrange; + ?column? +---------- + f +(1 row) + +select nummultirange() >> numrange(1,2); + ?column? +---------- + f +(1 row) + +select nummultirange(numrange(3,4)) >> numrange(1,2); + ?column? +---------- + t +(1 row) + +select nummultirange(numrange(0,4)) >> numrange(1,2); + ?column? +---------- + f +(1 row) + +select nummultirange(numrange(0,4), numrange(7,8)) >> numrange(1,2); + ?column? +---------- + f +(1 row) + +select 'empty'::numrange >> nummultirange(); + ?column? +---------- + f +(1 row) + +select numrange(1,2) >> nummultirange(); + ?column? +---------- + f +(1 row) + +select numrange(3,6) >> nummultirange(numrange(3,4)); + ?column? +---------- + f +(1 row) + +select numrange(3,6) >> nummultirange(numrange(0,2)); + ?column? +---------- + t +(1 row) + +select numrange(3,6) >> nummultirange(numrange(0,2), numrange(7,8)); + ?column? +---------- + f +(1 row) + +select numrange(3,6) >> nummultirange(numrange(-4,-2), numrange(0,2)); + ?column? +---------- + t +(1 row) + +select nummultirange() >> nummultirange(); + ?column? +---------- + f +(1 row) + +select nummultirange(numrange(1,2)) >> nummultirange(); + ?column? +---------- + f +(1 row) + +select nummultirange() >> nummultirange(numrange(1,2)); + ?column? +---------- + f +(1 row) + +select nummultirange(numrange(1,2)) >> nummultirange(numrange(1,2)); + ?column? +---------- + f +(1 row) + +select nummultirange(numrange(3,4)) >> nummultirange(numrange(1,2)); + ?column? +---------- + t +(1 row) + +select nummultirange(numrange(3,4), numrange(7,8)) >> nummultirange(numrange(1,2)); + ?column? +---------- + t +(1 row) + +select nummultirange(numrange(3,4), numrange(7,8)) >> nummultirange(numrange(1,2), numrange(4,5)); + ?column? +---------- + f +(1 row) + +-- union +SELECT nummultirange() + nummultirange(); + ?column? +---------- + {} +(1 row) + +SELECT nummultirange() + nummultirange(numrange(1,2)); + ?column? +---------- + {[1,2)} +(1 row) + +SELECT nummultirange(numrange(1,2)) + nummultirange(); + ?column? +---------- + {[1,2)} +(1 row) + +SELECT nummultirange(numrange(1,2)) + nummultirange(numrange(1,2)); + ?column? +---------- + {[1,2)} +(1 row) + +SELECT nummultirange(numrange(1,2)) + nummultirange(numrange(2,4)); + ?column? +---------- + {[1,4)} +(1 row) + +SELECT nummultirange(numrange(1,2)) + nummultirange(numrange(3,4)); + ?column? +--------------- + {[1,2),[3,4)} +(1 row) + +SELECT nummultirange(numrange(1,2), numrange(4,5)) + nummultirange(numrange(2,4)); + ?column? +---------- + {[1,5)} +(1 row) + +SELECT nummultirange(numrange(1,2), numrange(4,5)) + nummultirange(numrange(3,4)); + ?column? +--------------- + {[1,2),[3,5)} +(1 row) + +SELECT nummultirange(numrange(1,2), numrange(4,5)) + nummultirange(numrange(0,9)); + ?column? +---------- + {[0,9)} +(1 row) + +-- merge +SELECT range_merge(nummultirange()); + range_merge +------------- + empty +(1 row) + +SELECT range_merge(nummultirange(numrange(1,2))); + range_merge +------------- + [1,2) +(1 row) + +SELECT range_merge(nummultirange(numrange(1,2), numrange(7,8))); + range_merge +------------- + [1,8) +(1 row) + +-- minus +SELECT nummultirange() - nummultirange(); + ?column? +---------- + {} +(1 row) + +SELECT nummultirange() - nummultirange(numrange(1,2)); + ?column? +---------- + {} +(1 row) + +SELECT nummultirange(numrange(1,2)) - nummultirange(); + ?column? +---------- + {[1,2)} +(1 row) + +SELECT nummultirange(numrange(1,2), numrange(3,4)) - nummultirange(); + ?column? +--------------- + {[1,2),[3,4)} +(1 row) + +SELECT nummultirange(numrange(1,2)) - nummultirange(numrange(1,2)); + ?column? +---------- + {} +(1 row) + +SELECT nummultirange(numrange(1,2)) - nummultirange(numrange(2,4)); + ?column? +---------- + {[1,2)} +(1 row) + +SELECT nummultirange(numrange(1,2)) - nummultirange(numrange(3,4)); + ?column? +---------- + {[1,2)} +(1 row) + +SELECT nummultirange(numrange(1,4)) - nummultirange(numrange(1,2)); + ?column? +---------- + {[2,4)} +(1 row) + +SELECT nummultirange(numrange(1,4)) - nummultirange(numrange(2,3)); + ?column? +--------------- + {[1,2),[3,4)} +(1 row) + +SELECT nummultirange(numrange(1,4)) - nummultirange(numrange(0,8)); + ?column? +---------- + {} +(1 row) + +SELECT nummultirange(numrange(1,4)) - nummultirange(numrange(0,2)); + ?column? +---------- + {[2,4)} +(1 row) + +SELECT nummultirange(numrange(1,8)) - nummultirange(numrange(0,2), numrange(3,4)); + ?column? +--------------- + {[2,3),[4,8)} +(1 row) + +SELECT nummultirange(numrange(1,8)) - nummultirange(numrange(2,3), numrange(5,null)); + ?column? +--------------- + {[1,2),[3,5)} +(1 row) + +SELECT nummultirange(numrange(1,2), numrange(4,5)) - nummultirange(numrange(-2,0)); + ?column? +--------------- + {[1,2),[4,5)} +(1 row) + +SELECT nummultirange(numrange(1,2), numrange(4,5)) - nummultirange(numrange(2,4)); + ?column? +--------------- + {[1,2),[4,5)} +(1 row) + +SELECT nummultirange(numrange(1,2), numrange(4,5)) - nummultirange(numrange(3,5)); + ?column? +---------- + {[1,2)} +(1 row) + +SELECT nummultirange(numrange(1,2), numrange(4,5)) - nummultirange(numrange(0,9)); + ?column? +---------- + {} +(1 row) + +SELECT nummultirange(numrange(1,3), numrange(4,5)) - nummultirange(numrange(2,9)); + ?column? +---------- + {[1,2)} +(1 row) + +SELECT nummultirange(numrange(1,2), numrange(4,5)) - nummultirange(numrange(8,9)); + ?column? +--------------- + {[1,2),[4,5)} +(1 row) + +SELECT nummultirange(numrange(1,2), numrange(4,5)) - nummultirange(numrange(-2,0), numrange(8,9)); + ?column? +--------------- + {[1,2),[4,5)} +(1 row) + +-- intersection +SELECT nummultirange() * nummultirange(); + ?column? +---------- + {} +(1 row) + +SELECT nummultirange() * nummultirange(numrange(1,2)); + ?column? +---------- + {} +(1 row) + +SELECT nummultirange(numrange(1,2)) * nummultirange(); + ?column? +---------- + {} +(1 row) + +SELECT '{[1,3)}'::nummultirange * '{[1,5)}'::nummultirange; + ?column? +---------- + {[1,3)} +(1 row) + +SELECT '{[1,3)}'::nummultirange * '{[0,5)}'::nummultirange; + ?column? +---------- + {[1,3)} +(1 row) + +SELECT '{[1,3)}'::nummultirange * '{[0,2)}'::nummultirange; + ?column? +---------- + {[1,2)} +(1 row) + +SELECT '{[1,3)}'::nummultirange * '{[2,5)}'::nummultirange; + ?column? +---------- + {[2,3)} +(1 row) + +SELECT '{[1,4)}'::nummultirange * '{[2,3)}'::nummultirange; + ?column? +---------- + {[2,3)} +(1 row) + +SELECT '{[1,4)}'::nummultirange * '{[0,2), [3,5)}'::nummultirange; + ?column? +--------------- + {[1,2),[3,4)} +(1 row) + +SELECT '{[1,4), [7,10)}'::nummultirange * '{[0,8), [9,12)}'::nummultirange; + ?column? +---------------------- + {[1,4),[7,8),[9,10)} +(1 row) + +SELECT '{[1,4), [7,10)}'::nummultirange * '{[9,12)}'::nummultirange; + ?column? +---------- + {[9,10)} +(1 row) + +SELECT '{[1,4), [7,10)}'::nummultirange * '{[-5,-4), [5,6), [9,12)}'::nummultirange; + ?column? +---------- + {[9,10)} +(1 row) + +SELECT '{[1,4), [7,10)}'::nummultirange * '{[0,2), [3,8), [9,12)}'::nummultirange; + ?column? +---------------------------- + {[1,2),[3,4),[7,8),[9,10)} +(1 row) + +SELECT '{[1,4), [7,10)}'::nummultirange * '{[0,2), [3,8), [9,12)}'::nummultirange; + ?column? +---------------------------- + {[1,2),[3,4),[7,8),[9,10)} +(1 row) + +-- test GiST index +create table test_multirange_gist(mr int4multirange); +insert into test_multirange_gist select int4multirange(int4range(g, g+10),int4range(g+20, g+30),int4range(g+40, g+50)) from generate_series(1,2000) g; +insert into test_multirange_gist select '{}'::int4multirange from generate_series(1,500) g; +insert into test_multirange_gist select int4multirange(int4range(g, g+10000)) from generate_series(1,1000) g; +insert into test_multirange_gist select int4multirange(int4range(NULL, g*10, '(]'), int4range(g*10, g*20, '(]')) from generate_series(1,100) g; +insert into test_multirange_gist select int4multirange(int4range(g*10, g*20, '(]'), int4range(g*20, NULL, '(]')) from generate_series(1,100) g; +create index test_mulrirange_gist_idx on test_multirange_gist using gist (mr); +-- test statistics and selectivity estimation as well +-- +-- We don't check the accuracy of selectivity estimation, but at least check +-- it doesn't fall. +analyze test_multirange_gist; +-- first, verify non-indexed results +SET enable_seqscan = t; +SET enable_indexscan = f; +SET enable_bitmapscan = f; +select count(*) from test_multirange_gist where mr = '{}'::int4multirange; + count +------- + 500 +(1 row) + +select count(*) from test_multirange_gist where mr @> 'empty'::int4range; + count +------- + 3700 +(1 row) + +select count(*) from test_multirange_gist where mr && 'empty'::int4range; + count +------- + 0 +(1 row) + +select count(*) from test_multirange_gist where mr <@ 'empty'::int4range; + count +------- + 500 +(1 row) + +select count(*) from test_multirange_gist where mr << 'empty'::int4range; + count +------- + 0 +(1 row) + +select count(*) from test_multirange_gist where mr >> 'empty'::int4range; + count +------- + 0 +(1 row) + +select count(*) from test_multirange_gist where mr &< 'empty'::int4range; + count +------- + 0 +(1 row) + +select count(*) from test_multirange_gist where mr &> 'empty'::int4range; + count +------- + 0 +(1 row) + +select count(*) from test_multirange_gist where mr -|- 'empty'::int4range; + count +------- + 0 +(1 row) + +select count(*) from test_multirange_gist where mr @> '{}'::int4multirange; + count +------- + 3700 +(1 row) + +select count(*) from test_multirange_gist where mr @> '{}'::int4multirange; + count +------- + 3700 +(1 row) + +select count(*) from test_multirange_gist where mr && '{}'::int4multirange; + count +------- + 0 +(1 row) + +select count(*) from test_multirange_gist where mr <@ '{}'::int4multirange; + count +------- + 500 +(1 row) + +select count(*) from test_multirange_gist where mr << '{}'::int4multirange; + count +------- + 0 +(1 row) + +select count(*) from test_multirange_gist where mr >> '{}'::int4multirange; + count +------- + 0 +(1 row) + +select count(*) from test_multirange_gist where mr &< '{}'::int4multirange; + count +------- + 0 +(1 row) + +select count(*) from test_multirange_gist where mr &> '{}'::int4multirange; + count +------- + 0 +(1 row) + +select count(*) from test_multirange_gist where mr -|- '{}'::int4multirange; + count +------- + 0 +(1 row) + +select count(*) from test_multirange_gist where mr = int4multirange(int4range(10,20), int4range(30,40), int4range(50,60)); + count +------- + 1 +(1 row) + +select count(*) from test_multirange_gist where mr @> 10; + count +------- + 120 +(1 row) + +select count(*) from test_multirange_gist where mr @> int4range(10,20); + count +------- + 111 +(1 row) + +select count(*) from test_multirange_gist where mr && int4range(10,20); + count +------- + 139 +(1 row) + +select count(*) from test_multirange_gist where mr <@ int4range(10,50); + count +------- + 500 +(1 row) + +select count(*) from test_multirange_gist where mr << int4range(100,500); + count +------- + 54 +(1 row) + +select count(*) from test_multirange_gist where mr >> int4range(100,500); + count +------- + 2053 +(1 row) + +select count(*) from test_multirange_gist where mr &< int4range(100,500); + count +------- + 474 +(1 row) + +select count(*) from test_multirange_gist where mr &> int4range(100,500); + count +------- + 2893 +(1 row) + +select count(*) from test_multirange_gist where mr -|- int4range(100,500); + count +------- + 3 +(1 row) + +select count(*) from test_multirange_gist where mr @> '{}'::int4multirange; + count +------- + 3700 +(1 row) + +select count(*) from test_multirange_gist where mr @> int4multirange(int4range(10,20), int4range(30,40)); + count +------- + 110 +(1 row) + +select count(*) from test_multirange_gist where mr && '{(10,20),(30,40),(50,60)}'::int4multirange; + count +------- + 218 +(1 row) + +select count(*) from test_multirange_gist where mr <@ '{(10,30),(40,60),(70,90)}'::int4multirange; + count +------- + 500 +(1 row) + +select count(*) from test_multirange_gist where mr << int4multirange(int4range(100,200), int4range(400,500)); + count +------- + 54 +(1 row) + +select count(*) from test_multirange_gist where mr >> int4multirange(int4range(100,200), int4range(400,500)); + count +------- + 2053 +(1 row) + +select count(*) from test_multirange_gist where mr &< int4multirange(int4range(100,200), int4range(400,500)); + count +------- + 474 +(1 row) + +select count(*) from test_multirange_gist where mr &> int4multirange(int4range(100,200), int4range(400,500)); + count +------- + 2893 +(1 row) + +select count(*) from test_multirange_gist where mr -|- int4multirange(int4range(100,200), int4range(400,500)); + count +------- + 3 +(1 row) + +-- now check same queries using index +SET enable_seqscan = f; +SET enable_indexscan = t; +SET enable_bitmapscan = f; +select count(*) from test_multirange_gist where mr = '{}'::int4multirange; + count +------- + 500 +(1 row) + +select count(*) from test_multirange_gist where mr @> 'empty'::int4range; + count +------- + 3700 +(1 row) + +select count(*) from test_multirange_gist where mr && 'empty'::int4range; + count +------- + 0 +(1 row) + +select count(*) from test_multirange_gist where mr <@ 'empty'::int4range; + count +------- + 500 +(1 row) + +select count(*) from test_multirange_gist where mr << 'empty'::int4range; + count +------- + 0 +(1 row) + +select count(*) from test_multirange_gist where mr >> 'empty'::int4range; + count +------- + 0 +(1 row) + +select count(*) from test_multirange_gist where mr &< 'empty'::int4range; + count +------- + 0 +(1 row) + +select count(*) from test_multirange_gist where mr &> 'empty'::int4range; + count +------- + 0 +(1 row) + +select count(*) from test_multirange_gist where mr -|- 'empty'::int4range; + count +------- + 0 +(1 row) + +select count(*) from test_multirange_gist where mr @> '{}'::int4multirange; + count +------- + 3700 +(1 row) + +select count(*) from test_multirange_gist where mr @> '{}'::int4multirange; + count +------- + 3700 +(1 row) + +select count(*) from test_multirange_gist where mr && '{}'::int4multirange; + count +------- + 0 +(1 row) + +select count(*) from test_multirange_gist where mr <@ '{}'::int4multirange; + count +------- + 500 +(1 row) + +select count(*) from test_multirange_gist where mr << '{}'::int4multirange; + count +------- + 0 +(1 row) + +select count(*) from test_multirange_gist where mr >> '{}'::int4multirange; + count +------- + 0 +(1 row) + +select count(*) from test_multirange_gist where mr &< '{}'::int4multirange; + count +------- + 0 +(1 row) + +select count(*) from test_multirange_gist where mr &> '{}'::int4multirange; + count +------- + 0 +(1 row) + +select count(*) from test_multirange_gist where mr -|- '{}'::int4multirange; + count +------- + 0 +(1 row) + +select count(*) from test_multirange_gist where mr @> 'empty'::int4range; + count +------- + 3700 +(1 row) + +select count(*) from test_multirange_gist where mr = int4multirange(int4range(10,20), int4range(30,40), int4range(50,60)); + count +------- + 1 +(1 row) + +select count(*) from test_multirange_gist where mr @> 10; + count +------- + 120 +(1 row) + +select count(*) from test_multirange_gist where mr @> int4range(10,20); + count +------- + 111 +(1 row) + +select count(*) from test_multirange_gist where mr && int4range(10,20); + count +------- + 139 +(1 row) + +select count(*) from test_multirange_gist where mr <@ int4range(10,50); + count +------- + 500 +(1 row) + +select count(*) from test_multirange_gist where mr << int4range(100,500); + count +------- + 54 +(1 row) + +select count(*) from test_multirange_gist where mr >> int4range(100,500); + count +------- + 2053 +(1 row) + +select count(*) from test_multirange_gist where mr &< int4range(100,500); + count +------- + 474 +(1 row) + +select count(*) from test_multirange_gist where mr &> int4range(100,500); + count +------- + 2893 +(1 row) + +select count(*) from test_multirange_gist where mr -|- int4range(100,500); + count +------- + 3 +(1 row) + +select count(*) from test_multirange_gist where mr @> '{}'::int4multirange; + count +------- + 3700 +(1 row) + +select count(*) from test_multirange_gist where mr @> int4multirange(int4range(10,20), int4range(30,40)); + count +------- + 110 +(1 row) + +select count(*) from test_multirange_gist where mr && '{(10,20),(30,40),(50,60)}'::int4multirange; + count +------- + 218 +(1 row) + +select count(*) from test_multirange_gist where mr <@ '{(10,30),(40,60),(70,90)}'::int4multirange; + count +------- + 500 +(1 row) + +select count(*) from test_multirange_gist where mr << int4multirange(int4range(100,200), int4range(400,500)); + count +------- + 54 +(1 row) + +select count(*) from test_multirange_gist where mr >> int4multirange(int4range(100,200), int4range(400,500)); + count +------- + 2053 +(1 row) + +select count(*) from test_multirange_gist where mr &< int4multirange(int4range(100,200), int4range(400,500)); + count +------- + 474 +(1 row) + +select count(*) from test_multirange_gist where mr &> int4multirange(int4range(100,200), int4range(400,500)); + count +------- + 2893 +(1 row) + +select count(*) from test_multirange_gist where mr -|- int4multirange(int4range(100,200), int4range(400,500)); + count +------- + 3 +(1 row) + +drop table test_multirange_gist; +-- +-- range_agg function +-- +create table reservations ( room_id integer not null, booked_during daterange ); +insert into reservations values +-- 1: has a meets and a gap +(1, daterange('2018-07-01', '2018-07-07')), +(1, daterange('2018-07-07', '2018-07-14')), +(1, daterange('2018-07-20', '2018-07-22')), +-- 2: just a single row +(2, daterange('2018-07-01', '2018-07-03')), +-- 3: one null range +(3, NULL), +-- 4: two null ranges +(4, NULL), +(4, NULL), +-- 5: a null range and a non-null range +(5, NULL), +(5, daterange('2018-07-01', '2018-07-03')), +-- 6: has overlap +(6, daterange('2018-07-01', '2018-07-07')), +(6, daterange('2018-07-05', '2018-07-10')), +-- 7: two ranges that meet: no gap or overlap +(7, daterange('2018-07-01', '2018-07-07')), +(7, daterange('2018-07-07', '2018-07-14')), +-- 8: an empty range +(8, 'empty'::daterange) +; +SELECT room_id, range_agg(booked_during) +FROM reservations +GROUP BY room_id +ORDER BY room_id; + room_id | range_agg +---------+--------------------------------------------------- + 1 | {[07-01-2018,07-14-2018),[07-20-2018,07-22-2018)} + 2 | {[07-01-2018,07-03-2018)} + 3 | + 4 | + 5 | {[07-01-2018,07-03-2018)} + 6 | {[07-01-2018,07-10-2018)} + 7 | {[07-01-2018,07-14-2018)} + 8 | {} +(8 rows) + +-- range_agg on a custom range type too +SELECT range_agg(r) +FROM (VALUES + ('[a,c]'::textrange), + ('[b,b]'::textrange), + ('[c,f]'::textrange), + ('[g,h)'::textrange), + ('[h,j)'::textrange) + ) t(r); + range_agg +--------------- + {[a,f],[g,j)} +(1 row) + +-- range_agg with multirange inputs +select range_agg(nmr) from nummultirange_test; + range_agg +----------- + {(,)} +(1 row) + +select range_agg(nmr) from nummultirange_test where false; + range_agg +----------- + +(1 row) + +select range_agg(null::nummultirange) from nummultirange_test; + range_agg +----------- + +(1 row) + +select range_agg(nmr) from (values ('{}'::nummultirange)) t(nmr); + range_agg +----------- + {} +(1 row) + +select range_agg(nmr) from (values ('{}'::nummultirange), ('{}'::nummultirange)) t(nmr); + range_agg +----------- + {} +(1 row) + +select range_agg(nmr) from (values ('{[1,2]}'::nummultirange)) t(nmr); + range_agg +----------- + {[1,2]} +(1 row) + +select range_agg(nmr) from (values ('{[1,2], [5,6]}'::nummultirange)) t(nmr); + range_agg +--------------- + {[1,2],[5,6]} +(1 row) + +select range_agg(nmr) from (values ('{[1,2], [2,3]}'::nummultirange)) t(nmr); + range_agg +----------- + {[1,3]} +(1 row) + +select range_agg(nmr) from (values ('{[1,2]}'::nummultirange), ('{[5,6]}'::nummultirange)) t(nmr); + range_agg +--------------- + {[1,2],[5,6]} +(1 row) + +select range_agg(nmr) from (values ('{[1,2]}'::nummultirange), ('{[2,3]}'::nummultirange)) t(nmr); + range_agg +----------- + {[1,3]} +(1 row) + +-- +-- range_intersect_agg function +-- +select range_intersect_agg(nmr) from nummultirange_test; + range_intersect_agg +--------------------- + {} +(1 row) + +select range_intersect_agg(nmr) from nummultirange_test where false; + range_intersect_agg +--------------------- + +(1 row) + +select range_intersect_agg(null::nummultirange) from nummultirange_test; + range_intersect_agg +--------------------- + +(1 row) + +select range_intersect_agg(nmr) from (values ('{[1,3]}'::nummultirange), ('{[6,12]}'::nummultirange)) t(nmr); + range_intersect_agg +--------------------- + {} +(1 row) + +select range_intersect_agg(nmr) from (values ('{[1,6]}'::nummultirange), ('{[3,12]}'::nummultirange)) t(nmr); + range_intersect_agg +--------------------- + {[3,6]} +(1 row) + +select range_intersect_agg(nmr) from (values ('{[1,6], [10,12]}'::nummultirange), ('{[4,14]}'::nummultirange)) t(nmr); + range_intersect_agg +--------------------- + {[4,6],[10,12]} +(1 row) + +-- test with just one input: +select range_intersect_agg(nmr) from (values ('{}'::nummultirange)) t(nmr); + range_intersect_agg +--------------------- + {} +(1 row) + +select range_intersect_agg(nmr) from (values ('{[1,2]}'::nummultirange)) t(nmr); + range_intersect_agg +--------------------- + {[1,2]} +(1 row) + +select range_intersect_agg(nmr) from (values ('{[1,6], [10,12]}'::nummultirange)) t(nmr); + range_intersect_agg +--------------------- + {[1,6],[10,12]} +(1 row) + +select range_intersect_agg(nmr) from nummultirange_test where nmr @> 4.0; + range_intersect_agg +--------------------- + {[3,5)} +(1 row) + +create table nummultirange_test2(nmr nummultirange); +create index nummultirange_test2_hash_idx on nummultirange_test2 using hash (nmr); +INSERT INTO nummultirange_test2 VALUES('{[, 5)}'); +INSERT INTO nummultirange_test2 VALUES(nummultirange(numrange(1.1, 2.2))); +INSERT INTO nummultirange_test2 VALUES(nummultirange(numrange(1.1, 2.2))); +INSERT INTO nummultirange_test2 VALUES(nummultirange(numrange(1.1, 2.2,'()'))); +INSERT INTO nummultirange_test2 VALUES('{}'); +select * from nummultirange_test2 where nmr = '{}'; + nmr +----- + {} +(1 row) + +select * from nummultirange_test2 where nmr = nummultirange(numrange(1.1, 2.2)); + nmr +------------- + {[1.1,2.2)} + {[1.1,2.2)} +(2 rows) + +select * from nummultirange_test2 where nmr = nummultirange(numrange(1.1, 2.3)); + nmr +----- +(0 rows) + +set enable_nestloop=t; +set enable_hashjoin=f; +set enable_mergejoin=f; +select * from nummultirange_test natural join nummultirange_test2 order by nmr; + nmr +------------- + {} + {} + {} + {} + {(,5)} + {[1.1,2.2)} + {[1.1,2.2)} +(7 rows) + +set enable_nestloop=f; +set enable_hashjoin=t; +set enable_mergejoin=f; +select * from nummultirange_test natural join nummultirange_test2 order by nmr; + nmr +------------- + {} + {} + {} + {} + {(,5)} + {[1.1,2.2)} + {[1.1,2.2)} +(7 rows) + +set enable_nestloop=f; +set enable_hashjoin=f; +set enable_mergejoin=t; +select * from nummultirange_test natural join nummultirange_test2 order by nmr; + nmr +------------- + {} + {} + {} + {} + {(,5)} + {[1.1,2.2)} + {[1.1,2.2)} +(7 rows) + +set enable_nestloop to default; +set enable_hashjoin to default; +set enable_mergejoin to default; +DROP TABLE nummultirange_test2; +-- +-- Test user-defined multirange of floats +-- +select '{[123.001, 5.e9)}'::float8multirange @> 888.882::float8; + ?column? +---------- + t +(1 row) + +create table float8multirange_test(f8mr float8multirange, i int); +insert into float8multirange_test values(float8multirange(float8range(-100.00007, '1.111113e9')), 42); +select * from float8multirange_test; + f8mr | i +---------------------------+---- + {[-100.00007,1111113000)} | 42 +(1 row) + +drop table float8multirange_test; +-- +-- Test multirange types over domains +-- +create domain mydomain as int4; +create type mydomainrange as range(subtype=mydomain); +select '{[4,50)}'::mydomainmultirange @> 7::mydomain; + ?column? +---------- + t +(1 row) + +drop domain mydomain cascade; +NOTICE: drop cascades to type mydomainrange +-- +-- Test domains over multirange types +-- +create domain restrictedmultirange as int4multirange check (upper(value) < 10); +select '{[4,5)}'::restrictedmultirange @> 7; + ?column? +---------- + f +(1 row) + +select '{[4,50)}'::restrictedmultirange @> 7; -- should fail +ERROR: value for domain restrictedmultirange violates check constraint "restrictedmultirange_check" +drop domain restrictedmultirange; +--- +-- Check automatic naming of multiranges +--- +create type intr as range(subtype=int); +select intr_multirange(intr(1,10)); + intr_multirange +----------------- + {[1,10)} +(1 row) + +drop type intr; +create type intmultirange as (x int, y int); +create type intrange as range(subtype=int); -- should fail +ERROR: type "intmultirange" already exists +DETAIL: Failed while creating a multirange type for type "intrange". +HINT: You can manually specify a multirange type name using the "multirange_type_name" attribute. +drop type intmultirange; +create type intr_multirange as (x int, y int); +create type intr as range(subtype=int); -- should fail +ERROR: type "intr_multirange" already exists +DETAIL: Failed while creating a multirange type for type "intr". +HINT: You can manually specify a multirange type name using the "multirange_type_name" attribute. +drop type intr_multirange; +-- +-- Test multiple multirange types over the same subtype and manual naming of +-- the multirange type. +-- +-- should fail +create type textrange1 as range(subtype=text, multirange_type_name=int, collation="C"); +ERROR: type "int4" already exists +-- should pass +create type textrange1 as range(subtype=text, multirange_type_name=multirange_of_text, collation="C"); +-- should pass, because existing _textrange1 is automatically renamed +create type textrange2 as range(subtype=text, multirange_type_name=_textrange1, collation="C"); +select multirange_of_text(textrange2('a','Z')); -- should fail +ERROR: function multirange_of_text(textrange2) does not exist +LINE 1: select multirange_of_text(textrange2('a','Z')); + ^ +HINT: No function matches the given name and argument types. You might need to add explicit type casts. +select multirange_of_text(textrange1('a','Z')) @> 'b'::text; +ERROR: range lower bound must be less than or equal to range upper bound +select unnest(multirange_of_text(textrange1('a','b'), textrange1('d','e'))); + unnest +-------- + [a,b) + [d,e) +(2 rows) + +select _textrange1(textrange2('a','z')) @> 'b'::text; + ?column? +---------- + t +(1 row) + +drop type textrange1; +drop type textrange2; +-- +-- Test polymorphic type system +-- +create function anyarray_anymultirange_func(a anyarray, r anymultirange) + returns anyelement as 'select $1[1] + lower($2);' language sql; +select anyarray_anymultirange_func(ARRAY[1,2], int4multirange(int4range(10,20))); + anyarray_anymultirange_func +----------------------------- + 11 +(1 row) + +-- should fail +select anyarray_anymultirange_func(ARRAY[1,2], nummultirange(numrange(10,20))); +ERROR: function anyarray_anymultirange_func(integer[], nummultirange) does not exist +LINE 1: select anyarray_anymultirange_func(ARRAY[1,2], nummultirange... + ^ +HINT: No function matches the given name and argument types. You might need to add explicit type casts. +drop function anyarray_anymultirange_func(anyarray, anymultirange); +-- should fail +create function bogus_func(anyelement) + returns anymultirange as 'select int4multirange(int4range(1,10))' language sql; +ERROR: cannot determine result data type +DETAIL: A result of type anymultirange requires at least one input of type anyrange or anymultirange. +-- should fail +create function bogus_func(int) + returns anymultirange as 'select int4multirange(int4range(1,10))' language sql; +ERROR: cannot determine result data type +DETAIL: A result of type anymultirange requires at least one input of type anyrange or anymultirange. +create function range_add_bounds(anymultirange) + returns anyelement as 'select lower($1) + upper($1)' language sql; +select range_add_bounds(int4multirange(int4range(1, 17))); + range_add_bounds +------------------ + 18 +(1 row) + +select range_add_bounds(nummultirange(numrange(1.0001, 123.123))); + range_add_bounds +------------------ + 124.1231 +(1 row) + +create function multirangetypes_sql(q anymultirange, b anyarray, out c anyelement) + as $$ select upper($1) + $2[1] $$ + language sql; +select multirangetypes_sql(int4multirange(int4range(1,10)), ARRAY[2,20]); + multirangetypes_sql +--------------------- + 12 +(1 row) + +select multirangetypes_sql(nummultirange(numrange(1,10)), ARRAY[2,20]); -- match failure +ERROR: function multirangetypes_sql(nummultirange, integer[]) does not exist +LINE 1: select multirangetypes_sql(nummultirange(numrange(1,10)), AR... + ^ +HINT: No function matches the given name and argument types. You might need to add explicit type casts. +create function anycompatiblearray_anycompatiblemultirange_func(a anycompatiblearray, mr anycompatiblemultirange) + returns anycompatible as 'select $1[1] + lower($2);' language sql; +select anycompatiblearray_anycompatiblemultirange_func(ARRAY[1,2], multirange(int4range(10,20))); + anycompatiblearray_anycompatiblemultirange_func +------------------------------------------------- + 11 +(1 row) + +select anycompatiblearray_anycompatiblemultirange_func(ARRAY[1,2], multirange(numrange(10,20))); + anycompatiblearray_anycompatiblemultirange_func +------------------------------------------------- + 11 +(1 row) + +-- should fail +select anycompatiblearray_anycompatiblemultirange_func(ARRAY[1.1,2], multirange(int4range(10,20))); +ERROR: function anycompatiblearray_anycompatiblemultirange_func(numeric[], int4multirange) does not exist +LINE 1: select anycompatiblearray_anycompatiblemultirange_func(ARRAY... + ^ +HINT: No function matches the given name and argument types. You might need to add explicit type casts. +drop function anycompatiblearray_anycompatiblemultirange_func(anycompatiblearray, anycompatiblemultirange); +create function anycompatiblerange_anycompatiblemultirange_func(r anycompatiblerange, mr anycompatiblemultirange) + returns anycompatible as 'select lower($1) + lower($2);' language sql; +select anycompatiblerange_anycompatiblemultirange_func(int4range(1,2), multirange(int4range(10,20))); + anycompatiblerange_anycompatiblemultirange_func +------------------------------------------------- + 11 +(1 row) + +-- should fail +select anycompatiblerange_anycompatiblemultirange_func(numrange(1,2), multirange(int4range(10,20))); +ERROR: function anycompatiblerange_anycompatiblemultirange_func(numrange, int4multirange) does not exist +LINE 1: select anycompatiblerange_anycompatiblemultirange_func(numra... + ^ +HINT: No function matches the given name and argument types. You might need to add explicit type casts. +drop function anycompatiblerange_anycompatiblemultirange_func(anycompatiblerange, anycompatiblemultirange); +-- should fail +create function bogus_func(anycompatible) + returns anycompatiblerange as 'select int4range(1,10)' language sql; +ERROR: cannot determine result data type +DETAIL: A result of type anycompatiblerange requires at least one input of type anycompatiblerange or anycompatiblemultirange. +-- +-- Arrays of multiranges +-- +select ARRAY[nummultirange(numrange(1.1, 1.2)), nummultirange(numrange(12.3, 155.5))]; + array +---------------------------------- + {"{[1.1,1.2)}","{[12.3,155.5)}"} +(1 row) + +create table i8mr_array (f1 int, f2 int8multirange[]); +insert into i8mr_array values (42, array[int8multirange(int8range(1,10)), int8multirange(int8range(2,20))]); +select * from i8mr_array; + f1 | f2 +----+------------------------- + 42 | {"{[1,10)}","{[2,20)}"} +(1 row) + +drop table i8mr_array; +-- +-- Multiranges of arrays +-- +select arraymultirange(arrayrange(ARRAY[1,2], ARRAY[2,1])); + arraymultirange +--------------------- + {["{1,2}","{2,1}")} +(1 row) + +select arraymultirange(arrayrange(ARRAY[2,1], ARRAY[1,2])); -- fail +ERROR: range lower bound must be less than or equal to range upper bound +select array[1,1] <@ arraymultirange(arrayrange(array[1,2], array[2,1])); + ?column? +---------- + f +(1 row) + +select array[1,3] <@ arraymultirange(arrayrange(array[1,2], array[2,1])); + ?column? +---------- + t +(1 row) + +-- +-- Ranges of composites +-- +create type two_ints as (a int, b int); +create type two_ints_range as range (subtype = two_ints); +-- with debug_parallel_query on, this exercises tqueue.c's range remapping +select *, row_to_json(upper(t)) as u from + (values (two_ints_multirange(two_ints_range(row(1,2), row(3,4)))), + (two_ints_multirange(two_ints_range(row(5,6), row(7,8))))) v(t); + t | u +---------------------+--------------- + {["(1,2)","(3,4)")} | {"a":3,"b":4} + {["(5,6)","(7,8)")} | {"a":7,"b":8} +(2 rows) + +drop type two_ints cascade; +NOTICE: drop cascades to type two_ints_range +-- +-- Check behavior when subtype lacks a hash function +-- +set enable_sort = off; -- try to make it pick a hash setop implementation +select '{(2,5)}'::cashmultirange except select '{(5,6)}'::cashmultirange; + cashmultirange +----------------- + {($2.00,$5.00)} +(1 row) + +reset enable_sort; +-- +-- OUT/INOUT/TABLE functions +-- +-- infer anymultirange from anymultirange +create function mr_outparam_succeed(i anymultirange, out r anymultirange, out t text) + as $$ select $1, 'foo'::text $$ language sql; +select * from mr_outparam_succeed(int4multirange(int4range(1,2))); + r | t +---------+----- + {[1,2)} | foo +(1 row) + +-- infer anyarray from anymultirange +create function mr_outparam_succeed2(i anymultirange, out r anyarray, out t text) + as $$ select ARRAY[upper($1)], 'foo'::text $$ language sql; +select * from mr_outparam_succeed2(int4multirange(int4range(1,2))); + r | t +-----+----- + {2} | foo +(1 row) + +-- infer anyrange from anymultirange +create function mr_outparam_succeed3(i anymultirange, out r anyrange, out t text) + as $$ select range_merge($1), 'foo'::text $$ language sql; +select * from mr_outparam_succeed3(int4multirange(int4range(1,2))); + r | t +-------+----- + [1,2) | foo +(1 row) + +-- infer anymultirange from anyrange +create function mr_outparam_succeed4(i anyrange, out r anymultirange, out t text) + as $$ select multirange($1), 'foo'::text $$ language sql; +select * from mr_outparam_succeed4(int4range(1,2)); + r | t +---------+----- + {[1,2)} | foo +(1 row) + +-- infer anyelement from anymultirange +create function mr_inoutparam_succeed(out i anyelement, inout r anymultirange) + as $$ select upper($1), $1 $$ language sql; +select * from mr_inoutparam_succeed(int4multirange(int4range(1,2))); + i | r +---+--------- + 2 | {[1,2)} +(1 row) + +-- infer anyelement+anymultirange from anyelement+anymultirange +create function mr_table_succeed(i anyelement, r anymultirange) returns table(i anyelement, r anymultirange) + as $$ select $1, $2 $$ language sql; +select * from mr_table_succeed(123, int4multirange(int4range(1,11))); + i | r +-----+---------- + 123 | {[1,11)} +(1 row) + +-- use anymultirange in plpgsql +create function mr_polymorphic(i anyrange) returns anymultirange + as $$ begin return multirange($1); end; $$ language plpgsql; +select mr_polymorphic(int4range(1, 4)); + mr_polymorphic +---------------- + {[1,4)} +(1 row) + +-- should fail +create function mr_outparam_fail(i anyelement, out r anymultirange, out t text) + as $$ select '[1,10]', 'foo' $$ language sql; +ERROR: cannot determine result data type +DETAIL: A result of type anymultirange requires at least one input of type anyrange or anymultirange. +--should fail +create function mr_inoutparam_fail(inout i anyelement, out r anymultirange) + as $$ select $1, '[1,10]' $$ language sql; +ERROR: cannot determine result data type +DETAIL: A result of type anymultirange requires at least one input of type anyrange or anymultirange. +--should fail +create function mr_table_fail(i anyelement) returns table(i anyelement, r anymultirange) + as $$ select $1, '[1,10]' $$ language sql; +ERROR: cannot determine result data type +DETAIL: A result of type anymultirange requires at least one input of type anyrange or anymultirange. diff --git a/src/test/regress/expected/mvcc.out b/src/test/regress/expected/mvcc.out new file mode 100644 index 0000000..225c39f --- /dev/null +++ b/src/test/regress/expected/mvcc.out @@ -0,0 +1,42 @@ +-- +-- Verify that index scans encountering dead rows produced by an +-- aborted subtransaction of the current transaction can utilize the +-- kill_prior_tuple optimization +-- +-- NB: The table size is currently *not* expected to stay the same, we +-- don't have logic to trigger opportunistic pruning in cases like +-- this. +BEGIN; +SET LOCAL enable_seqscan = false; +SET LOCAL enable_indexonlyscan = false; +SET LOCAL enable_bitmapscan = false; +-- Can't easily use a unique index, since dead tuples can be found +-- independent of the kill_prior_tuples optimization. +CREATE TABLE clean_aborted_self(key int, data text); +CREATE INDEX clean_aborted_self_key ON clean_aborted_self(key); +INSERT INTO clean_aborted_self (key, data) VALUES (-1, 'just to allocate metapage'); +-- save index size from before the changes, for comparison +SELECT pg_relation_size('clean_aborted_self_key') AS clean_aborted_self_key_before \gset +DO $$ +BEGIN + -- iterate often enough to see index growth even on larger-than-default page sizes + FOR i IN 1..100 LOOP + BEGIN + -- perform index scan over all the inserted keys to get them to be seen as dead + IF EXISTS(SELECT * FROM clean_aborted_self WHERE key > 0 AND key < 100) THEN + RAISE data_corrupted USING MESSAGE = 'these rows should not exist'; + END IF; + INSERT INTO clean_aborted_self SELECT g.i, 'rolling back in a sec' FROM generate_series(1, 100) g(i); + -- just some error that's not normally thrown + RAISE reading_sql_data_not_permitted USING MESSAGE = 'round and round again'; + EXCEPTION WHEN reading_sql_data_not_permitted THEN END; + END LOOP; +END;$$; +-- show sizes only if they differ +SELECT :clean_aborted_self_key_before AS size_before, pg_relation_size('clean_aborted_self_key') size_after +WHERE :clean_aborted_self_key_before != pg_relation_size('clean_aborted_self_key'); + size_before | size_after +-------------+------------ +(0 rows) + +ROLLBACK; diff --git a/src/test/regress/expected/name.out b/src/test/regress/expected/name.out new file mode 100644 index 0000000..d58df2b --- /dev/null +++ b/src/test/regress/expected/name.out @@ -0,0 +1,197 @@ +-- +-- NAME +-- all inputs are silently truncated at NAMEDATALEN-1 (63) characters +-- +-- fixed-length by reference +SELECT name 'name string' = name 'name string' AS "True"; + True +------ + t +(1 row) + +SELECT name 'name string' = name 'name string ' AS "False"; + False +------- + f +(1 row) + +-- +-- +-- +CREATE TABLE NAME_TBL(f1 name); +INSERT INTO NAME_TBL(f1) VALUES ('1234567890ABCDEFGHIJKLMNOPQRSTUVWXYZ1234567890ABCDEFGHIJKLMNOPQR'); +INSERT INTO NAME_TBL(f1) VALUES ('1234567890abcdefghijklmnopqrstuvwxyz1234567890abcdefghijklmnopqr'); +INSERT INTO NAME_TBL(f1) VALUES ('asdfghjkl;'); +INSERT INTO NAME_TBL(f1) VALUES ('343f%2a'); +INSERT INTO NAME_TBL(f1) VALUES ('d34aaasdf'); +INSERT INTO NAME_TBL(f1) VALUES (''); +INSERT INTO NAME_TBL(f1) VALUES ('1234567890ABCDEFGHIJKLMNOPQRSTUVWXYZ1234567890ABCDEFGHIJKLMNOPQRSTUVWXYZ'); +SELECT * FROM NAME_TBL; + f1 +----------------------------------------------------------------- + 1234567890ABCDEFGHIJKLMNOPQRSTUVWXYZ1234567890ABCDEFGHIJKLMNOPQ + 1234567890abcdefghijklmnopqrstuvwxyz1234567890abcdefghijklmnopq + asdfghjkl; + 343f%2a + d34aaasdf + + 1234567890ABCDEFGHIJKLMNOPQRSTUVWXYZ1234567890ABCDEFGHIJKLMNOPQ +(7 rows) + +SELECT c.f1 FROM NAME_TBL c WHERE c.f1 <> '1234567890ABCDEFGHIJKLMNOPQRSTUVWXYZ1234567890ABCDEFGHIJKLMNOPQR'; + f1 +----------------------------------------------------------------- + 1234567890abcdefghijklmnopqrstuvwxyz1234567890abcdefghijklmnopq + asdfghjkl; + 343f%2a + d34aaasdf + +(5 rows) + +SELECT c.f1 FROM NAME_TBL c WHERE c.f1 = '1234567890ABCDEFGHIJKLMNOPQRSTUVWXYZ1234567890ABCDEFGHIJKLMNOPQR'; + f1 +----------------------------------------------------------------- + 1234567890ABCDEFGHIJKLMNOPQRSTUVWXYZ1234567890ABCDEFGHIJKLMNOPQ + 1234567890ABCDEFGHIJKLMNOPQRSTUVWXYZ1234567890ABCDEFGHIJKLMNOPQ +(2 rows) + +SELECT c.f1 FROM NAME_TBL c WHERE c.f1 < '1234567890ABCDEFGHIJKLMNOPQRSTUVWXYZ1234567890ABCDEFGHIJKLMNOPQR'; + f1 +---- + +(1 row) + +SELECT c.f1 FROM NAME_TBL c WHERE c.f1 <= '1234567890ABCDEFGHIJKLMNOPQRSTUVWXYZ1234567890ABCDEFGHIJKLMNOPQR'; + f1 +----------------------------------------------------------------- + 1234567890ABCDEFGHIJKLMNOPQRSTUVWXYZ1234567890ABCDEFGHIJKLMNOPQ + + 1234567890ABCDEFGHIJKLMNOPQRSTUVWXYZ1234567890ABCDEFGHIJKLMNOPQ +(3 rows) + +SELECT c.f1 FROM NAME_TBL c WHERE c.f1 > '1234567890ABCDEFGHIJKLMNOPQRSTUVWXYZ1234567890ABCDEFGHIJKLMNOPQR'; + f1 +----------------------------------------------------------------- + 1234567890abcdefghijklmnopqrstuvwxyz1234567890abcdefghijklmnopq + asdfghjkl; + 343f%2a + d34aaasdf +(4 rows) + +SELECT c.f1 FROM NAME_TBL c WHERE c.f1 >= '1234567890ABCDEFGHIJKLMNOPQRSTUVWXYZ1234567890ABCDEFGHIJKLMNOPQR'; + f1 +----------------------------------------------------------------- + 1234567890ABCDEFGHIJKLMNOPQRSTUVWXYZ1234567890ABCDEFGHIJKLMNOPQ + 1234567890abcdefghijklmnopqrstuvwxyz1234567890abcdefghijklmnopq + asdfghjkl; + 343f%2a + d34aaasdf + 1234567890ABCDEFGHIJKLMNOPQRSTUVWXYZ1234567890ABCDEFGHIJKLMNOPQ +(6 rows) + +SELECT c.f1 FROM NAME_TBL c WHERE c.f1 ~ '.*'; + f1 +----------------------------------------------------------------- + 1234567890ABCDEFGHIJKLMNOPQRSTUVWXYZ1234567890ABCDEFGHIJKLMNOPQ + 1234567890abcdefghijklmnopqrstuvwxyz1234567890abcdefghijklmnopq + asdfghjkl; + 343f%2a + d34aaasdf + + 1234567890ABCDEFGHIJKLMNOPQRSTUVWXYZ1234567890ABCDEFGHIJKLMNOPQ +(7 rows) + +SELECT c.f1 FROM NAME_TBL c WHERE c.f1 !~ '.*'; + f1 +---- +(0 rows) + +SELECT c.f1 FROM NAME_TBL c WHERE c.f1 ~ '[0-9]'; + f1 +----------------------------------------------------------------- + 1234567890ABCDEFGHIJKLMNOPQRSTUVWXYZ1234567890ABCDEFGHIJKLMNOPQ + 1234567890abcdefghijklmnopqrstuvwxyz1234567890abcdefghijklmnopq + 343f%2a + d34aaasdf + 1234567890ABCDEFGHIJKLMNOPQRSTUVWXYZ1234567890ABCDEFGHIJKLMNOPQ +(5 rows) + +SELECT c.f1 FROM NAME_TBL c WHERE c.f1 ~ '.*asdf.*'; + f1 +------------ + asdfghjkl; + d34aaasdf +(2 rows) + +DROP TABLE NAME_TBL; +DO $$ +DECLARE r text[]; +BEGIN + r := parse_ident('Schemax.Tabley'); + RAISE NOTICE '%', format('%I.%I', r[1], r[2]); + r := parse_ident('"SchemaX"."TableY"'); + RAISE NOTICE '%', format('%I.%I', r[1], r[2]); +END; +$$; +NOTICE: schemax.tabley +NOTICE: "SchemaX"."TableY" +SELECT parse_ident('foo.boo'); + parse_ident +------------- + {foo,boo} +(1 row) + +SELECT parse_ident('foo.boo[]'); -- should fail +ERROR: string is not a valid identifier: "foo.boo[]" +SELECT parse_ident('foo.boo[]', strict => false); -- ok + parse_ident +------------- + {foo,boo} +(1 row) + +-- should fail +SELECT parse_ident(' '); +ERROR: string is not a valid identifier: " " +SELECT parse_ident(' .aaa'); +ERROR: string is not a valid identifier: " .aaa" +DETAIL: No valid identifier before ".". +SELECT parse_ident(' aaa . '); +ERROR: string is not a valid identifier: " aaa . " +DETAIL: No valid identifier after ".". +SELECT parse_ident('aaa.a%b'); +ERROR: string is not a valid identifier: "aaa.a%b" +SELECT parse_ident(E'X\rXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX'); +ERROR: string is not a valid identifier: "X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX" +SELECT length(a[1]), length(a[2]) from parse_ident('"xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx".yyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyy') as a ; + length | length +--------+-------- + 414 | 289 +(1 row) + +SELECT parse_ident(' first . " second " ." third ". " ' || repeat('x',66) || '"'); + parse_ident +----------------------------------------------------------------------------------------------------------- + {first," second "," third "," xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx"} +(1 row) + +SELECT parse_ident(' first . " second " ." third ". " ' || repeat('x',66) || '"')::name[]; + parse_ident +------------------------------------------------------------------------------------------------------ + {first," second "," third "," xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx"} +(1 row) + +SELECT parse_ident(E'"c".X XXXX\002XXXXXX'); +ERROR: string is not a valid identifier: ""c".X XXXXXXXXXX" +SELECT parse_ident('1020'); +ERROR: string is not a valid identifier: "1020" +SELECT parse_ident('10.20'); +ERROR: string is not a valid identifier: "10.20" +SELECT parse_ident('.'); +ERROR: string is not a valid identifier: "." +DETAIL: No valid identifier before ".". +SELECT parse_ident('.1020'); +ERROR: string is not a valid identifier: ".1020" +DETAIL: No valid identifier before ".". +SELECT parse_ident('xxx.1020'); +ERROR: string is not a valid identifier: "xxx.1020" +DETAIL: No valid identifier after ".". diff --git a/src/test/regress/expected/namespace.out b/src/test/regress/expected/namespace.out new file mode 100644 index 0000000..a62fd8d --- /dev/null +++ b/src/test/regress/expected/namespace.out @@ -0,0 +1,116 @@ +-- +-- Regression tests for schemas (namespaces) +-- +-- set the whitespace-only search_path to test that the +-- GUC list syntax is preserved during a schema creation +SELECT pg_catalog.set_config('search_path', ' ', false); + set_config +------------ + +(1 row) + +CREATE SCHEMA test_ns_schema_1 + CREATE UNIQUE INDEX abc_a_idx ON abc (a) + CREATE VIEW abc_view AS + SELECT a+1 AS a, b+1 AS b FROM abc + CREATE TABLE abc ( + a serial, + b int UNIQUE + ); +-- verify that the correct search_path restored on abort +SET search_path to public; +BEGIN; +SET search_path to public, test_ns_schema_1; +CREATE SCHEMA test_ns_schema_2 + CREATE VIEW abc_view AS SELECT c FROM abc; +ERROR: column "c" does not exist +LINE 2: CREATE VIEW abc_view AS SELECT c FROM abc; + ^ +COMMIT; +SHOW search_path; + search_path +------------- + public +(1 row) + +-- verify that the correct search_path preserved +-- after creating the schema and on commit +BEGIN; +SET search_path to public, test_ns_schema_1; +CREATE SCHEMA test_ns_schema_2 + CREATE VIEW abc_view AS SELECT a FROM abc; +SHOW search_path; + search_path +-------------------------- + public, test_ns_schema_1 +(1 row) + +COMMIT; +SHOW search_path; + search_path +-------------------------- + public, test_ns_schema_1 +(1 row) + +DROP SCHEMA test_ns_schema_2 CASCADE; +NOTICE: drop cascades to view test_ns_schema_2.abc_view +-- verify that the objects were created +SELECT COUNT(*) FROM pg_class WHERE relnamespace = + (SELECT oid FROM pg_namespace WHERE nspname = 'test_ns_schema_1'); + count +------- + 5 +(1 row) + +INSERT INTO test_ns_schema_1.abc DEFAULT VALUES; +INSERT INTO test_ns_schema_1.abc DEFAULT VALUES; +INSERT INTO test_ns_schema_1.abc DEFAULT VALUES; +SELECT * FROM test_ns_schema_1.abc; + a | b +---+--- + 1 | + 2 | + 3 | +(3 rows) + +SELECT * FROM test_ns_schema_1.abc_view; + a | b +---+--- + 2 | + 3 | + 4 | +(3 rows) + +ALTER SCHEMA test_ns_schema_1 RENAME TO test_ns_schema_renamed; +SELECT COUNT(*) FROM pg_class WHERE relnamespace = + (SELECT oid FROM pg_namespace WHERE nspname = 'test_ns_schema_1'); + count +------- + 0 +(1 row) + +-- test IF NOT EXISTS cases +CREATE SCHEMA test_ns_schema_renamed; -- fail, already exists +ERROR: schema "test_ns_schema_renamed" already exists +CREATE SCHEMA IF NOT EXISTS test_ns_schema_renamed; -- ok with notice +NOTICE: schema "test_ns_schema_renamed" already exists, skipping +CREATE SCHEMA IF NOT EXISTS test_ns_schema_renamed -- fail, disallowed + CREATE TABLE abc ( + a serial, + b int UNIQUE + ); +ERROR: CREATE SCHEMA IF NOT EXISTS cannot include schema elements +LINE 2: CREATE TABLE abc ( + ^ +DROP SCHEMA test_ns_schema_renamed CASCADE; +NOTICE: drop cascades to 2 other objects +DETAIL: drop cascades to table test_ns_schema_renamed.abc +drop cascades to view test_ns_schema_renamed.abc_view +-- verify that the objects were dropped +SELECT COUNT(*) FROM pg_class WHERE relnamespace = + (SELECT oid FROM pg_namespace WHERE nspname = 'test_ns_schema_renamed'); + count +------- + 0 +(1 row) + diff --git a/src/test/regress/expected/numeric.out b/src/test/regress/expected/numeric.out new file mode 100644 index 0000000..72f03c8 --- /dev/null +++ b/src/test/regress/expected/numeric.out @@ -0,0 +1,3600 @@ +-- +-- NUMERIC +-- +CREATE TABLE num_data (id int4, val numeric(210,10)); +CREATE TABLE num_exp_add (id1 int4, id2 int4, expected numeric(210,10)); +CREATE TABLE num_exp_sub (id1 int4, id2 int4, expected numeric(210,10)); +CREATE TABLE num_exp_div (id1 int4, id2 int4, expected numeric(210,10)); +CREATE TABLE num_exp_mul (id1 int4, id2 int4, expected numeric(210,10)); +CREATE TABLE num_exp_sqrt (id int4, expected numeric(210,10)); +CREATE TABLE num_exp_ln (id int4, expected numeric(210,10)); +CREATE TABLE num_exp_log10 (id int4, expected numeric(210,10)); +CREATE TABLE num_exp_power_10_ln (id int4, expected numeric(210,10)); +CREATE TABLE num_result (id1 int4, id2 int4, result numeric(210,10)); +-- ****************************** +-- * The following EXPECTED results are computed by bc(1) +-- * with a scale of 200 +-- ****************************** +BEGIN TRANSACTION; +INSERT INTO num_exp_add VALUES (0,0,'0'); +INSERT INTO num_exp_sub VALUES (0,0,'0'); +INSERT INTO num_exp_mul VALUES (0,0,'0'); +INSERT INTO num_exp_div VALUES (0,0,'NaN'); +INSERT INTO num_exp_add VALUES (0,1,'0'); +INSERT INTO num_exp_sub VALUES (0,1,'0'); +INSERT INTO num_exp_mul VALUES (0,1,'0'); +INSERT INTO num_exp_div VALUES (0,1,'NaN'); +INSERT INTO num_exp_add VALUES (0,2,'-34338492.215397047'); +INSERT INTO num_exp_sub VALUES (0,2,'34338492.215397047'); +INSERT INTO num_exp_mul VALUES (0,2,'0'); +INSERT INTO num_exp_div VALUES (0,2,'0'); +INSERT INTO num_exp_add VALUES (0,3,'4.31'); +INSERT INTO num_exp_sub VALUES (0,3,'-4.31'); +INSERT INTO num_exp_mul VALUES (0,3,'0'); +INSERT INTO num_exp_div VALUES (0,3,'0'); +INSERT INTO num_exp_add VALUES (0,4,'7799461.4119'); +INSERT INTO num_exp_sub VALUES (0,4,'-7799461.4119'); +INSERT INTO num_exp_mul VALUES (0,4,'0'); +INSERT INTO num_exp_div VALUES (0,4,'0'); +INSERT INTO num_exp_add VALUES (0,5,'16397.038491'); +INSERT INTO num_exp_sub VALUES (0,5,'-16397.038491'); +INSERT INTO num_exp_mul VALUES (0,5,'0'); +INSERT INTO num_exp_div VALUES (0,5,'0'); +INSERT INTO num_exp_add VALUES (0,6,'93901.57763026'); +INSERT INTO num_exp_sub VALUES (0,6,'-93901.57763026'); +INSERT INTO num_exp_mul VALUES (0,6,'0'); +INSERT INTO num_exp_div VALUES (0,6,'0'); +INSERT INTO num_exp_add VALUES (0,7,'-83028485'); +INSERT INTO num_exp_sub VALUES (0,7,'83028485'); +INSERT INTO num_exp_mul VALUES (0,7,'0'); +INSERT INTO num_exp_div VALUES (0,7,'0'); +INSERT INTO num_exp_add VALUES (0,8,'74881'); +INSERT INTO num_exp_sub VALUES (0,8,'-74881'); +INSERT INTO num_exp_mul VALUES (0,8,'0'); +INSERT INTO num_exp_div VALUES (0,8,'0'); +INSERT INTO num_exp_add VALUES (0,9,'-24926804.045047420'); +INSERT INTO num_exp_sub VALUES (0,9,'24926804.045047420'); +INSERT INTO num_exp_mul VALUES (0,9,'0'); +INSERT INTO num_exp_div VALUES (0,9,'0'); +INSERT INTO num_exp_add VALUES (1,0,'0'); +INSERT INTO num_exp_sub VALUES (1,0,'0'); +INSERT INTO num_exp_mul VALUES (1,0,'0'); +INSERT INTO num_exp_div VALUES (1,0,'NaN'); +INSERT INTO num_exp_add VALUES (1,1,'0'); +INSERT INTO num_exp_sub VALUES (1,1,'0'); +INSERT INTO num_exp_mul VALUES (1,1,'0'); +INSERT INTO num_exp_div VALUES (1,1,'NaN'); +INSERT INTO num_exp_add VALUES (1,2,'-34338492.215397047'); +INSERT INTO num_exp_sub VALUES (1,2,'34338492.215397047'); +INSERT INTO num_exp_mul VALUES (1,2,'0'); +INSERT INTO num_exp_div VALUES (1,2,'0'); +INSERT INTO num_exp_add VALUES (1,3,'4.31'); +INSERT INTO num_exp_sub VALUES (1,3,'-4.31'); +INSERT INTO num_exp_mul VALUES (1,3,'0'); +INSERT INTO num_exp_div VALUES (1,3,'0'); +INSERT INTO num_exp_add VALUES (1,4,'7799461.4119'); +INSERT INTO num_exp_sub VALUES (1,4,'-7799461.4119'); +INSERT INTO num_exp_mul VALUES (1,4,'0'); +INSERT INTO num_exp_div VALUES (1,4,'0'); +INSERT INTO num_exp_add VALUES (1,5,'16397.038491'); +INSERT INTO num_exp_sub VALUES (1,5,'-16397.038491'); +INSERT INTO num_exp_mul VALUES (1,5,'0'); +INSERT INTO num_exp_div VALUES (1,5,'0'); +INSERT INTO num_exp_add VALUES (1,6,'93901.57763026'); +INSERT INTO num_exp_sub VALUES (1,6,'-93901.57763026'); +INSERT INTO num_exp_mul VALUES (1,6,'0'); +INSERT INTO num_exp_div VALUES (1,6,'0'); +INSERT INTO num_exp_add VALUES (1,7,'-83028485'); +INSERT INTO num_exp_sub VALUES (1,7,'83028485'); +INSERT INTO num_exp_mul VALUES (1,7,'0'); +INSERT INTO num_exp_div VALUES (1,7,'0'); +INSERT INTO num_exp_add VALUES (1,8,'74881'); +INSERT INTO num_exp_sub VALUES (1,8,'-74881'); +INSERT INTO num_exp_mul VALUES (1,8,'0'); +INSERT INTO num_exp_div VALUES (1,8,'0'); +INSERT INTO num_exp_add VALUES (1,9,'-24926804.045047420'); +INSERT INTO num_exp_sub VALUES (1,9,'24926804.045047420'); +INSERT INTO num_exp_mul VALUES (1,9,'0'); +INSERT INTO num_exp_div VALUES (1,9,'0'); +INSERT INTO num_exp_add VALUES (2,0,'-34338492.215397047'); +INSERT INTO num_exp_sub VALUES (2,0,'-34338492.215397047'); +INSERT INTO num_exp_mul VALUES (2,0,'0'); +INSERT INTO num_exp_div VALUES (2,0,'NaN'); +INSERT INTO num_exp_add VALUES (2,1,'-34338492.215397047'); +INSERT INTO num_exp_sub VALUES (2,1,'-34338492.215397047'); +INSERT INTO num_exp_mul VALUES (2,1,'0'); +INSERT INTO num_exp_div VALUES (2,1,'NaN'); +INSERT INTO num_exp_add VALUES (2,2,'-68676984.430794094'); +INSERT INTO num_exp_sub VALUES (2,2,'0'); +INSERT INTO num_exp_mul VALUES (2,2,'1179132047626883.596862135856320209'); +INSERT INTO num_exp_div VALUES (2,2,'1.00000000000000000000'); +INSERT INTO num_exp_add VALUES (2,3,'-34338487.905397047'); +INSERT INTO num_exp_sub VALUES (2,3,'-34338496.525397047'); +INSERT INTO num_exp_mul VALUES (2,3,'-147998901.44836127257'); +INSERT INTO num_exp_div VALUES (2,3,'-7967167.56737750510440835266'); +INSERT INTO num_exp_add VALUES (2,4,'-26539030.803497047'); +INSERT INTO num_exp_sub VALUES (2,4,'-42137953.627297047'); +INSERT INTO num_exp_mul VALUES (2,4,'-267821744976817.8111137106593'); +INSERT INTO num_exp_div VALUES (2,4,'-4.40267480046830116685'); +INSERT INTO num_exp_add VALUES (2,5,'-34322095.176906047'); +INSERT INTO num_exp_sub VALUES (2,5,'-34354889.253888047'); +INSERT INTO num_exp_mul VALUES (2,5,'-563049578578.769242506736077'); +INSERT INTO num_exp_div VALUES (2,5,'-2094.18866914563535496429'); +INSERT INTO num_exp_add VALUES (2,6,'-34244590.637766787'); +INSERT INTO num_exp_sub VALUES (2,6,'-34432393.793027307'); +INSERT INTO num_exp_mul VALUES (2,6,'-3224438592470.18449811926184222'); +INSERT INTO num_exp_div VALUES (2,6,'-365.68599891479766440940'); +INSERT INTO num_exp_add VALUES (2,7,'-117366977.215397047'); +INSERT INTO num_exp_sub VALUES (2,7,'48689992.784602953'); +INSERT INTO num_exp_mul VALUES (2,7,'2851072985828710.485883795'); +INSERT INTO num_exp_div VALUES (2,7,'.41357483778485235518'); +INSERT INTO num_exp_add VALUES (2,8,'-34263611.215397047'); +INSERT INTO num_exp_sub VALUES (2,8,'-34413373.215397047'); +INSERT INTO num_exp_mul VALUES (2,8,'-2571300635581.146276407'); +INSERT INTO num_exp_div VALUES (2,8,'-458.57416721727870888476'); +INSERT INTO num_exp_add VALUES (2,9,'-59265296.260444467'); +INSERT INTO num_exp_sub VALUES (2,9,'-9411688.170349627'); +INSERT INTO num_exp_mul VALUES (2,9,'855948866655588.453741509242968740'); +INSERT INTO num_exp_div VALUES (2,9,'1.37757299946438931811'); +INSERT INTO num_exp_add VALUES (3,0,'4.31'); +INSERT INTO num_exp_sub VALUES (3,0,'4.31'); +INSERT INTO num_exp_mul VALUES (3,0,'0'); +INSERT INTO num_exp_div VALUES (3,0,'NaN'); +INSERT INTO num_exp_add VALUES (3,1,'4.31'); +INSERT INTO num_exp_sub VALUES (3,1,'4.31'); +INSERT INTO num_exp_mul VALUES (3,1,'0'); +INSERT INTO num_exp_div VALUES (3,1,'NaN'); +INSERT INTO num_exp_add VALUES (3,2,'-34338487.905397047'); +INSERT INTO num_exp_sub VALUES (3,2,'34338496.525397047'); +INSERT INTO num_exp_mul VALUES (3,2,'-147998901.44836127257'); +INSERT INTO num_exp_div VALUES (3,2,'-.00000012551512084352'); +INSERT INTO num_exp_add VALUES (3,3,'8.62'); +INSERT INTO num_exp_sub VALUES (3,3,'0'); +INSERT INTO num_exp_mul VALUES (3,3,'18.5761'); +INSERT INTO num_exp_div VALUES (3,3,'1.00000000000000000000'); +INSERT INTO num_exp_add VALUES (3,4,'7799465.7219'); +INSERT INTO num_exp_sub VALUES (3,4,'-7799457.1019'); +INSERT INTO num_exp_mul VALUES (3,4,'33615678.685289'); +INSERT INTO num_exp_div VALUES (3,4,'.00000055260225961552'); +INSERT INTO num_exp_add VALUES (3,5,'16401.348491'); +INSERT INTO num_exp_sub VALUES (3,5,'-16392.728491'); +INSERT INTO num_exp_mul VALUES (3,5,'70671.23589621'); +INSERT INTO num_exp_div VALUES (3,5,'.00026285234387695504'); +INSERT INTO num_exp_add VALUES (3,6,'93905.88763026'); +INSERT INTO num_exp_sub VALUES (3,6,'-93897.26763026'); +INSERT INTO num_exp_mul VALUES (3,6,'404715.7995864206'); +INSERT INTO num_exp_div VALUES (3,6,'.00004589912234457595'); +INSERT INTO num_exp_add VALUES (3,7,'-83028480.69'); +INSERT INTO num_exp_sub VALUES (3,7,'83028489.31'); +INSERT INTO num_exp_mul VALUES (3,7,'-357852770.35'); +INSERT INTO num_exp_div VALUES (3,7,'-.00000005190989574240'); +INSERT INTO num_exp_add VALUES (3,8,'74885.31'); +INSERT INTO num_exp_sub VALUES (3,8,'-74876.69'); +INSERT INTO num_exp_mul VALUES (3,8,'322737.11'); +INSERT INTO num_exp_div VALUES (3,8,'.00005755799201399553'); +INSERT INTO num_exp_add VALUES (3,9,'-24926799.735047420'); +INSERT INTO num_exp_sub VALUES (3,9,'24926808.355047420'); +INSERT INTO num_exp_mul VALUES (3,9,'-107434525.43415438020'); +INSERT INTO num_exp_div VALUES (3,9,'-.00000017290624149854'); +INSERT INTO num_exp_add VALUES (4,0,'7799461.4119'); +INSERT INTO num_exp_sub VALUES (4,0,'7799461.4119'); +INSERT INTO num_exp_mul VALUES (4,0,'0'); +INSERT INTO num_exp_div VALUES (4,0,'NaN'); +INSERT INTO num_exp_add VALUES (4,1,'7799461.4119'); +INSERT INTO num_exp_sub VALUES (4,1,'7799461.4119'); +INSERT INTO num_exp_mul VALUES (4,1,'0'); +INSERT INTO num_exp_div VALUES (4,1,'NaN'); +INSERT INTO num_exp_add VALUES (4,2,'-26539030.803497047'); +INSERT INTO num_exp_sub VALUES (4,2,'42137953.627297047'); +INSERT INTO num_exp_mul VALUES (4,2,'-267821744976817.8111137106593'); +INSERT INTO num_exp_div VALUES (4,2,'-.22713465002993920385'); +INSERT INTO num_exp_add VALUES (4,3,'7799465.7219'); +INSERT INTO num_exp_sub VALUES (4,3,'7799457.1019'); +INSERT INTO num_exp_mul VALUES (4,3,'33615678.685289'); +INSERT INTO num_exp_div VALUES (4,3,'1809619.81714617169373549883'); +INSERT INTO num_exp_add VALUES (4,4,'15598922.8238'); +INSERT INTO num_exp_sub VALUES (4,4,'0'); +INSERT INTO num_exp_mul VALUES (4,4,'60831598315717.14146161'); +INSERT INTO num_exp_div VALUES (4,4,'1.00000000000000000000'); +INSERT INTO num_exp_add VALUES (4,5,'7815858.450391'); +INSERT INTO num_exp_sub VALUES (4,5,'7783064.373409'); +INSERT INTO num_exp_mul VALUES (4,5,'127888068979.9935054429'); +INSERT INTO num_exp_div VALUES (4,5,'475.66281046305802686061'); +INSERT INTO num_exp_add VALUES (4,6,'7893362.98953026'); +INSERT INTO num_exp_sub VALUES (4,6,'7705559.83426974'); +INSERT INTO num_exp_mul VALUES (4,6,'732381731243.745115764094'); +INSERT INTO num_exp_div VALUES (4,6,'83.05996138436129499606'); +INSERT INTO num_exp_add VALUES (4,7,'-75229023.5881'); +INSERT INTO num_exp_sub VALUES (4,7,'90827946.4119'); +INSERT INTO num_exp_mul VALUES (4,7,'-647577464846017.9715'); +INSERT INTO num_exp_div VALUES (4,7,'-.09393717604145131637'); +INSERT INTO num_exp_add VALUES (4,8,'7874342.4119'); +INSERT INTO num_exp_sub VALUES (4,8,'7724580.4119'); +INSERT INTO num_exp_mul VALUES (4,8,'584031469984.4839'); +INSERT INTO num_exp_div VALUES (4,8,'104.15808298366741897143'); +INSERT INTO num_exp_add VALUES (4,9,'-17127342.633147420'); +INSERT INTO num_exp_sub VALUES (4,9,'32726265.456947420'); +INSERT INTO num_exp_mul VALUES (4,9,'-194415646271340.1815956522980'); +INSERT INTO num_exp_div VALUES (4,9,'-.31289456112403769409'); +INSERT INTO num_exp_add VALUES (5,0,'16397.038491'); +INSERT INTO num_exp_sub VALUES (5,0,'16397.038491'); +INSERT INTO num_exp_mul VALUES (5,0,'0'); +INSERT INTO num_exp_div VALUES (5,0,'NaN'); +INSERT INTO num_exp_add VALUES (5,1,'16397.038491'); +INSERT INTO num_exp_sub VALUES (5,1,'16397.038491'); +INSERT INTO num_exp_mul VALUES (5,1,'0'); +INSERT INTO num_exp_div VALUES (5,1,'NaN'); +INSERT INTO num_exp_add VALUES (5,2,'-34322095.176906047'); +INSERT INTO num_exp_sub VALUES (5,2,'34354889.253888047'); +INSERT INTO num_exp_mul VALUES (5,2,'-563049578578.769242506736077'); +INSERT INTO num_exp_div VALUES (5,2,'-.00047751189505192446'); +INSERT INTO num_exp_add VALUES (5,3,'16401.348491'); +INSERT INTO num_exp_sub VALUES (5,3,'16392.728491'); +INSERT INTO num_exp_mul VALUES (5,3,'70671.23589621'); +INSERT INTO num_exp_div VALUES (5,3,'3804.41728329466357308584'); +INSERT INTO num_exp_add VALUES (5,4,'7815858.450391'); +INSERT INTO num_exp_sub VALUES (5,4,'-7783064.373409'); +INSERT INTO num_exp_mul VALUES (5,4,'127888068979.9935054429'); +INSERT INTO num_exp_div VALUES (5,4,'.00210232958726897192'); +INSERT INTO num_exp_add VALUES (5,5,'32794.076982'); +INSERT INTO num_exp_sub VALUES (5,5,'0'); +INSERT INTO num_exp_mul VALUES (5,5,'268862871.275335557081'); +INSERT INTO num_exp_div VALUES (5,5,'1.00000000000000000000'); +INSERT INTO num_exp_add VALUES (5,6,'110298.61612126'); +INSERT INTO num_exp_sub VALUES (5,6,'-77504.53913926'); +INSERT INTO num_exp_mul VALUES (5,6,'1539707782.76899778633766'); +INSERT INTO num_exp_div VALUES (5,6,'.17461941433576102689'); +INSERT INTO num_exp_add VALUES (5,7,'-83012087.961509'); +INSERT INTO num_exp_sub VALUES (5,7,'83044882.038491'); +INSERT INTO num_exp_mul VALUES (5,7,'-1361421264394.416135'); +INSERT INTO num_exp_div VALUES (5,7,'-.00019748690453643710'); +INSERT INTO num_exp_add VALUES (5,8,'91278.038491'); +INSERT INTO num_exp_sub VALUES (5,8,'-58483.961509'); +INSERT INTO num_exp_mul VALUES (5,8,'1227826639.244571'); +INSERT INTO num_exp_div VALUES (5,8,'.21897461960978085228'); +INSERT INTO num_exp_add VALUES (5,9,'-24910407.006556420'); +INSERT INTO num_exp_sub VALUES (5,9,'24943201.083538420'); +INSERT INTO num_exp_mul VALUES (5,9,'-408725765384.257043660243220'); +INSERT INTO num_exp_div VALUES (5,9,'-.00065780749354660427'); +INSERT INTO num_exp_add VALUES (6,0,'93901.57763026'); +INSERT INTO num_exp_sub VALUES (6,0,'93901.57763026'); +INSERT INTO num_exp_mul VALUES (6,0,'0'); +INSERT INTO num_exp_div VALUES (6,0,'NaN'); +INSERT INTO num_exp_add VALUES (6,1,'93901.57763026'); +INSERT INTO num_exp_sub VALUES (6,1,'93901.57763026'); +INSERT INTO num_exp_mul VALUES (6,1,'0'); +INSERT INTO num_exp_div VALUES (6,1,'NaN'); +INSERT INTO num_exp_add VALUES (6,2,'-34244590.637766787'); +INSERT INTO num_exp_sub VALUES (6,2,'34432393.793027307'); +INSERT INTO num_exp_mul VALUES (6,2,'-3224438592470.18449811926184222'); +INSERT INTO num_exp_div VALUES (6,2,'-.00273458651128995823'); +INSERT INTO num_exp_add VALUES (6,3,'93905.88763026'); +INSERT INTO num_exp_sub VALUES (6,3,'93897.26763026'); +INSERT INTO num_exp_mul VALUES (6,3,'404715.7995864206'); +INSERT INTO num_exp_div VALUES (6,3,'21786.90896293735498839907'); +INSERT INTO num_exp_add VALUES (6,4,'7893362.98953026'); +INSERT INTO num_exp_sub VALUES (6,4,'-7705559.83426974'); +INSERT INTO num_exp_mul VALUES (6,4,'732381731243.745115764094'); +INSERT INTO num_exp_div VALUES (6,4,'.01203949512295682469'); +INSERT INTO num_exp_add VALUES (6,5,'110298.61612126'); +INSERT INTO num_exp_sub VALUES (6,5,'77504.53913926'); +INSERT INTO num_exp_mul VALUES (6,5,'1539707782.76899778633766'); +INSERT INTO num_exp_div VALUES (6,5,'5.72674008674192359679'); +INSERT INTO num_exp_add VALUES (6,6,'187803.15526052'); +INSERT INTO num_exp_sub VALUES (6,6,'0'); +INSERT INTO num_exp_mul VALUES (6,6,'8817506281.4517452372676676'); +INSERT INTO num_exp_div VALUES (6,6,'1.00000000000000000000'); +INSERT INTO num_exp_add VALUES (6,7,'-82934583.42236974'); +INSERT INTO num_exp_sub VALUES (6,7,'83122386.57763026'); +INSERT INTO num_exp_mul VALUES (6,7,'-7796505729750.37795610'); +INSERT INTO num_exp_div VALUES (6,7,'-.00113095617281538980'); +INSERT INTO num_exp_add VALUES (6,8,'168782.57763026'); +INSERT INTO num_exp_sub VALUES (6,8,'19020.57763026'); +INSERT INTO num_exp_mul VALUES (6,8,'7031444034.53149906'); +INSERT INTO num_exp_div VALUES (6,8,'1.25401073209839612184'); +INSERT INTO num_exp_add VALUES (6,9,'-24832902.467417160'); +INSERT INTO num_exp_sub VALUES (6,9,'25020705.622677680'); +INSERT INTO num_exp_mul VALUES (6,9,'-2340666225110.29929521292692920'); +INSERT INTO num_exp_div VALUES (6,9,'-.00376709254265256789'); +INSERT INTO num_exp_add VALUES (7,0,'-83028485'); +INSERT INTO num_exp_sub VALUES (7,0,'-83028485'); +INSERT INTO num_exp_mul VALUES (7,0,'0'); +INSERT INTO num_exp_div VALUES (7,0,'NaN'); +INSERT INTO num_exp_add VALUES (7,1,'-83028485'); +INSERT INTO num_exp_sub VALUES (7,1,'-83028485'); +INSERT INTO num_exp_mul VALUES (7,1,'0'); +INSERT INTO num_exp_div VALUES (7,1,'NaN'); +INSERT INTO num_exp_add VALUES (7,2,'-117366977.215397047'); +INSERT INTO num_exp_sub VALUES (7,2,'-48689992.784602953'); +INSERT INTO num_exp_mul VALUES (7,2,'2851072985828710.485883795'); +INSERT INTO num_exp_div VALUES (7,2,'2.41794207151503385700'); +INSERT INTO num_exp_add VALUES (7,3,'-83028480.69'); +INSERT INTO num_exp_sub VALUES (7,3,'-83028489.31'); +INSERT INTO num_exp_mul VALUES (7,3,'-357852770.35'); +INSERT INTO num_exp_div VALUES (7,3,'-19264149.65197215777262180974'); +INSERT INTO num_exp_add VALUES (7,4,'-75229023.5881'); +INSERT INTO num_exp_sub VALUES (7,4,'-90827946.4119'); +INSERT INTO num_exp_mul VALUES (7,4,'-647577464846017.9715'); +INSERT INTO num_exp_div VALUES (7,4,'-10.64541262725136247686'); +INSERT INTO num_exp_add VALUES (7,5,'-83012087.961509'); +INSERT INTO num_exp_sub VALUES (7,5,'-83044882.038491'); +INSERT INTO num_exp_mul VALUES (7,5,'-1361421264394.416135'); +INSERT INTO num_exp_div VALUES (7,5,'-5063.62688881730941836574'); +INSERT INTO num_exp_add VALUES (7,6,'-82934583.42236974'); +INSERT INTO num_exp_sub VALUES (7,6,'-83122386.57763026'); +INSERT INTO num_exp_mul VALUES (7,6,'-7796505729750.37795610'); +INSERT INTO num_exp_div VALUES (7,6,'-884.20756174009028770294'); +INSERT INTO num_exp_add VALUES (7,7,'-166056970'); +INSERT INTO num_exp_sub VALUES (7,7,'0'); +INSERT INTO num_exp_mul VALUES (7,7,'6893729321395225'); +INSERT INTO num_exp_div VALUES (7,7,'1.00000000000000000000'); +INSERT INTO num_exp_add VALUES (7,8,'-82953604'); +INSERT INTO num_exp_sub VALUES (7,8,'-83103366'); +INSERT INTO num_exp_mul VALUES (7,8,'-6217255985285'); +INSERT INTO num_exp_div VALUES (7,8,'-1108.80577182462841041118'); +INSERT INTO num_exp_add VALUES (7,9,'-107955289.045047420'); +INSERT INTO num_exp_sub VALUES (7,9,'-58101680.954952580'); +INSERT INTO num_exp_mul VALUES (7,9,'2069634775752159.035758700'); +INSERT INTO num_exp_div VALUES (7,9,'3.33089171198810413382'); +INSERT INTO num_exp_add VALUES (8,0,'74881'); +INSERT INTO num_exp_sub VALUES (8,0,'74881'); +INSERT INTO num_exp_mul VALUES (8,0,'0'); +INSERT INTO num_exp_div VALUES (8,0,'NaN'); +INSERT INTO num_exp_add VALUES (8,1,'74881'); +INSERT INTO num_exp_sub VALUES (8,1,'74881'); +INSERT INTO num_exp_mul VALUES (8,1,'0'); +INSERT INTO num_exp_div VALUES (8,1,'NaN'); +INSERT INTO num_exp_add VALUES (8,2,'-34263611.215397047'); +INSERT INTO num_exp_sub VALUES (8,2,'34413373.215397047'); +INSERT INTO num_exp_mul VALUES (8,2,'-2571300635581.146276407'); +INSERT INTO num_exp_div VALUES (8,2,'-.00218067233500788615'); +INSERT INTO num_exp_add VALUES (8,3,'74885.31'); +INSERT INTO num_exp_sub VALUES (8,3,'74876.69'); +INSERT INTO num_exp_mul VALUES (8,3,'322737.11'); +INSERT INTO num_exp_div VALUES (8,3,'17373.78190255220417633410'); +INSERT INTO num_exp_add VALUES (8,4,'7874342.4119'); +INSERT INTO num_exp_sub VALUES (8,4,'-7724580.4119'); +INSERT INTO num_exp_mul VALUES (8,4,'584031469984.4839'); +INSERT INTO num_exp_div VALUES (8,4,'.00960079113741758956'); +INSERT INTO num_exp_add VALUES (8,5,'91278.038491'); +INSERT INTO num_exp_sub VALUES (8,5,'58483.961509'); +INSERT INTO num_exp_mul VALUES (8,5,'1227826639.244571'); +INSERT INTO num_exp_div VALUES (8,5,'4.56673929509287019456'); +INSERT INTO num_exp_add VALUES (8,6,'168782.57763026'); +INSERT INTO num_exp_sub VALUES (8,6,'-19020.57763026'); +INSERT INTO num_exp_mul VALUES (8,6,'7031444034.53149906'); +INSERT INTO num_exp_div VALUES (8,6,'.79744134113322314424'); +INSERT INTO num_exp_add VALUES (8,7,'-82953604'); +INSERT INTO num_exp_sub VALUES (8,7,'83103366'); +INSERT INTO num_exp_mul VALUES (8,7,'-6217255985285'); +INSERT INTO num_exp_div VALUES (8,7,'-.00090187120721280172'); +INSERT INTO num_exp_add VALUES (8,8,'149762'); +INSERT INTO num_exp_sub VALUES (8,8,'0'); +INSERT INTO num_exp_mul VALUES (8,8,'5607164161'); +INSERT INTO num_exp_div VALUES (8,8,'1.00000000000000000000'); +INSERT INTO num_exp_add VALUES (8,9,'-24851923.045047420'); +INSERT INTO num_exp_sub VALUES (8,9,'25001685.045047420'); +INSERT INTO num_exp_mul VALUES (8,9,'-1866544013697.195857020'); +INSERT INTO num_exp_div VALUES (8,9,'-.00300403532938582735'); +INSERT INTO num_exp_add VALUES (9,0,'-24926804.045047420'); +INSERT INTO num_exp_sub VALUES (9,0,'-24926804.045047420'); +INSERT INTO num_exp_mul VALUES (9,0,'0'); +INSERT INTO num_exp_div VALUES (9,0,'NaN'); +INSERT INTO num_exp_add VALUES (9,1,'-24926804.045047420'); +INSERT INTO num_exp_sub VALUES (9,1,'-24926804.045047420'); +INSERT INTO num_exp_mul VALUES (9,1,'0'); +INSERT INTO num_exp_div VALUES (9,1,'NaN'); +INSERT INTO num_exp_add VALUES (9,2,'-59265296.260444467'); +INSERT INTO num_exp_sub VALUES (9,2,'9411688.170349627'); +INSERT INTO num_exp_mul VALUES (9,2,'855948866655588.453741509242968740'); +INSERT INTO num_exp_div VALUES (9,2,'.72591434384152961526'); +INSERT INTO num_exp_add VALUES (9,3,'-24926799.735047420'); +INSERT INTO num_exp_sub VALUES (9,3,'-24926808.355047420'); +INSERT INTO num_exp_mul VALUES (9,3,'-107434525.43415438020'); +INSERT INTO num_exp_div VALUES (9,3,'-5783481.21694835730858468677'); +INSERT INTO num_exp_add VALUES (9,4,'-17127342.633147420'); +INSERT INTO num_exp_sub VALUES (9,4,'-32726265.456947420'); +INSERT INTO num_exp_mul VALUES (9,4,'-194415646271340.1815956522980'); +INSERT INTO num_exp_div VALUES (9,4,'-3.19596478892958416484'); +INSERT INTO num_exp_add VALUES (9,5,'-24910407.006556420'); +INSERT INTO num_exp_sub VALUES (9,5,'-24943201.083538420'); +INSERT INTO num_exp_mul VALUES (9,5,'-408725765384.257043660243220'); +INSERT INTO num_exp_div VALUES (9,5,'-1520.20159364322004505807'); +INSERT INTO num_exp_add VALUES (9,6,'-24832902.467417160'); +INSERT INTO num_exp_sub VALUES (9,6,'-25020705.622677680'); +INSERT INTO num_exp_mul VALUES (9,6,'-2340666225110.29929521292692920'); +INSERT INTO num_exp_div VALUES (9,6,'-265.45671195426965751280'); +INSERT INTO num_exp_add VALUES (9,7,'-107955289.045047420'); +INSERT INTO num_exp_sub VALUES (9,7,'58101680.954952580'); +INSERT INTO num_exp_mul VALUES (9,7,'2069634775752159.035758700'); +INSERT INTO num_exp_div VALUES (9,7,'.30021990699995814689'); +INSERT INTO num_exp_add VALUES (9,8,'-24851923.045047420'); +INSERT INTO num_exp_sub VALUES (9,8,'-25001685.045047420'); +INSERT INTO num_exp_mul VALUES (9,8,'-1866544013697.195857020'); +INSERT INTO num_exp_div VALUES (9,8,'-332.88556569820675471748'); +INSERT INTO num_exp_add VALUES (9,9,'-49853608.090094840'); +INSERT INTO num_exp_sub VALUES (9,9,'0'); +INSERT INTO num_exp_mul VALUES (9,9,'621345559900192.420120630048656400'); +INSERT INTO num_exp_div VALUES (9,9,'1.00000000000000000000'); +COMMIT TRANSACTION; +BEGIN TRANSACTION; +INSERT INTO num_exp_sqrt VALUES (0,'0'); +INSERT INTO num_exp_sqrt VALUES (1,'0'); +INSERT INTO num_exp_sqrt VALUES (2,'5859.90547836712524903505'); +INSERT INTO num_exp_sqrt VALUES (3,'2.07605394920266944396'); +INSERT INTO num_exp_sqrt VALUES (4,'2792.75158435189147418923'); +INSERT INTO num_exp_sqrt VALUES (5,'128.05092147657509145473'); +INSERT INTO num_exp_sqrt VALUES (6,'306.43364311096782703406'); +INSERT INTO num_exp_sqrt VALUES (7,'9111.99676251039939975230'); +INSERT INTO num_exp_sqrt VALUES (8,'273.64392922189960397542'); +INSERT INTO num_exp_sqrt VALUES (9,'4992.67503899937593364766'); +COMMIT TRANSACTION; +BEGIN TRANSACTION; +INSERT INTO num_exp_ln VALUES (0,'NaN'); +INSERT INTO num_exp_ln VALUES (1,'NaN'); +INSERT INTO num_exp_ln VALUES (2,'17.35177750493897715514'); +INSERT INTO num_exp_ln VALUES (3,'1.46093790411565641971'); +INSERT INTO num_exp_ln VALUES (4,'15.86956523951936572464'); +INSERT INTO num_exp_ln VALUES (5,'9.70485601768871834038'); +INSERT INTO num_exp_ln VALUES (6,'11.45000246622944403127'); +INSERT INTO num_exp_ln VALUES (7,'18.23469429965478772991'); +INSERT INTO num_exp_ln VALUES (8,'11.22365546576315513668'); +INSERT INTO num_exp_ln VALUES (9,'17.03145425013166006962'); +COMMIT TRANSACTION; +BEGIN TRANSACTION; +INSERT INTO num_exp_log10 VALUES (0,'NaN'); +INSERT INTO num_exp_log10 VALUES (1,'NaN'); +INSERT INTO num_exp_log10 VALUES (2,'7.53578122160797276459'); +INSERT INTO num_exp_log10 VALUES (3,'.63447727016073160075'); +INSERT INTO num_exp_log10 VALUES (4,'6.89206461372691743345'); +INSERT INTO num_exp_log10 VALUES (5,'4.21476541614777768626'); +INSERT INTO num_exp_log10 VALUES (6,'4.97267288886207207671'); +INSERT INTO num_exp_log10 VALUES (7,'7.91922711353275546914'); +INSERT INTO num_exp_log10 VALUES (8,'4.87437163556421004138'); +INSERT INTO num_exp_log10 VALUES (9,'7.39666659961986567059'); +COMMIT TRANSACTION; +BEGIN TRANSACTION; +INSERT INTO num_exp_power_10_ln VALUES (0,'NaN'); +INSERT INTO num_exp_power_10_ln VALUES (1,'NaN'); +INSERT INTO num_exp_power_10_ln VALUES (2,'224790267919917955.13261618583642653184'); +INSERT INTO num_exp_power_10_ln VALUES (3,'28.90266599445155957393'); +INSERT INTO num_exp_power_10_ln VALUES (4,'7405685069594999.07733999469386277636'); +INSERT INTO num_exp_power_10_ln VALUES (5,'5068226527.32127265408584640098'); +INSERT INTO num_exp_power_10_ln VALUES (6,'281839893606.99372343357047819067'); +INSERT INTO num_exp_power_10_ln VALUES (7,'1716699575118597095.42330819910640247627'); +INSERT INTO num_exp_power_10_ln VALUES (8,'167361463828.07491320069016125952'); +INSERT INTO num_exp_power_10_ln VALUES (9,'107511333880052007.04141124673540337457'); +COMMIT TRANSACTION; +BEGIN TRANSACTION; +INSERT INTO num_data VALUES (0, '0'); +INSERT INTO num_data VALUES (1, '0'); +INSERT INTO num_data VALUES (2, '-34338492.215397047'); +INSERT INTO num_data VALUES (3, '4.31'); +INSERT INTO num_data VALUES (4, '7799461.4119'); +INSERT INTO num_data VALUES (5, '16397.038491'); +INSERT INTO num_data VALUES (6, '93901.57763026'); +INSERT INTO num_data VALUES (7, '-83028485'); +INSERT INTO num_data VALUES (8, '74881'); +INSERT INTO num_data VALUES (9, '-24926804.045047420'); +COMMIT TRANSACTION; +-- ****************************** +-- * Create indices for faster checks +-- ****************************** +CREATE UNIQUE INDEX num_exp_add_idx ON num_exp_add (id1, id2); +CREATE UNIQUE INDEX num_exp_sub_idx ON num_exp_sub (id1, id2); +CREATE UNIQUE INDEX num_exp_div_idx ON num_exp_div (id1, id2); +CREATE UNIQUE INDEX num_exp_mul_idx ON num_exp_mul (id1, id2); +CREATE UNIQUE INDEX num_exp_sqrt_idx ON num_exp_sqrt (id); +CREATE UNIQUE INDEX num_exp_ln_idx ON num_exp_ln (id); +CREATE UNIQUE INDEX num_exp_log10_idx ON num_exp_log10 (id); +CREATE UNIQUE INDEX num_exp_power_10_ln_idx ON num_exp_power_10_ln (id); +VACUUM ANALYZE num_exp_add; +VACUUM ANALYZE num_exp_sub; +VACUUM ANALYZE num_exp_div; +VACUUM ANALYZE num_exp_mul; +VACUUM ANALYZE num_exp_sqrt; +VACUUM ANALYZE num_exp_ln; +VACUUM ANALYZE num_exp_log10; +VACUUM ANALYZE num_exp_power_10_ln; +-- ****************************** +-- * Now check the behaviour of the NUMERIC type +-- ****************************** +-- ****************************** +-- * Addition check +-- ****************************** +DELETE FROM num_result; +INSERT INTO num_result SELECT t1.id, t2.id, t1.val + t2.val + FROM num_data t1, num_data t2; +SELECT t1.id1, t1.id2, t1.result, t2.expected + FROM num_result t1, num_exp_add t2 + WHERE t1.id1 = t2.id1 AND t1.id2 = t2.id2 + AND t1.result != t2.expected; + id1 | id2 | result | expected +-----+-----+--------+---------- +(0 rows) + +DELETE FROM num_result; +INSERT INTO num_result SELECT t1.id, t2.id, round(t1.val + t2.val, 10) + FROM num_data t1, num_data t2; +SELECT t1.id1, t1.id2, t1.result, round(t2.expected, 10) as expected + FROM num_result t1, num_exp_add t2 + WHERE t1.id1 = t2.id1 AND t1.id2 = t2.id2 + AND t1.result != round(t2.expected, 10); + id1 | id2 | result | expected +-----+-----+--------+---------- +(0 rows) + +-- ****************************** +-- * Subtraction check +-- ****************************** +DELETE FROM num_result; +INSERT INTO num_result SELECT t1.id, t2.id, t1.val - t2.val + FROM num_data t1, num_data t2; +SELECT t1.id1, t1.id2, t1.result, t2.expected + FROM num_result t1, num_exp_sub t2 + WHERE t1.id1 = t2.id1 AND t1.id2 = t2.id2 + AND t1.result != t2.expected; + id1 | id2 | result | expected +-----+-----+--------+---------- +(0 rows) + +DELETE FROM num_result; +INSERT INTO num_result SELECT t1.id, t2.id, round(t1.val - t2.val, 40) + FROM num_data t1, num_data t2; +SELECT t1.id1, t1.id2, t1.result, round(t2.expected, 40) + FROM num_result t1, num_exp_sub t2 + WHERE t1.id1 = t2.id1 AND t1.id2 = t2.id2 + AND t1.result != round(t2.expected, 40); + id1 | id2 | result | round +-----+-----+--------+------- +(0 rows) + +-- ****************************** +-- * Multiply check +-- ****************************** +DELETE FROM num_result; +INSERT INTO num_result SELECT t1.id, t2.id, t1.val * t2.val + FROM num_data t1, num_data t2; +SELECT t1.id1, t1.id2, t1.result, t2.expected + FROM num_result t1, num_exp_mul t2 + WHERE t1.id1 = t2.id1 AND t1.id2 = t2.id2 + AND t1.result != t2.expected; + id1 | id2 | result | expected +-----+-----+--------+---------- +(0 rows) + +DELETE FROM num_result; +INSERT INTO num_result SELECT t1.id, t2.id, round(t1.val * t2.val, 30) + FROM num_data t1, num_data t2; +SELECT t1.id1, t1.id2, t1.result, round(t2.expected, 30) as expected + FROM num_result t1, num_exp_mul t2 + WHERE t1.id1 = t2.id1 AND t1.id2 = t2.id2 + AND t1.result != round(t2.expected, 30); + id1 | id2 | result | expected +-----+-----+--------+---------- +(0 rows) + +-- ****************************** +-- * Division check +-- ****************************** +DELETE FROM num_result; +INSERT INTO num_result SELECT t1.id, t2.id, t1.val / t2.val + FROM num_data t1, num_data t2 + WHERE t2.val != '0.0'; +SELECT t1.id1, t1.id2, t1.result, t2.expected + FROM num_result t1, num_exp_div t2 + WHERE t1.id1 = t2.id1 AND t1.id2 = t2.id2 + AND t1.result != t2.expected; + id1 | id2 | result | expected +-----+-----+--------+---------- +(0 rows) + +DELETE FROM num_result; +INSERT INTO num_result SELECT t1.id, t2.id, round(t1.val / t2.val, 80) + FROM num_data t1, num_data t2 + WHERE t2.val != '0.0'; +SELECT t1.id1, t1.id2, t1.result, round(t2.expected, 80) as expected + FROM num_result t1, num_exp_div t2 + WHERE t1.id1 = t2.id1 AND t1.id2 = t2.id2 + AND t1.result != round(t2.expected, 80); + id1 | id2 | result | expected +-----+-----+--------+---------- +(0 rows) + +-- ****************************** +-- * Square root check +-- ****************************** +DELETE FROM num_result; +INSERT INTO num_result SELECT id, 0, SQRT(ABS(val)) + FROM num_data; +SELECT t1.id1, t1.result, t2.expected + FROM num_result t1, num_exp_sqrt t2 + WHERE t1.id1 = t2.id + AND t1.result != t2.expected; + id1 | result | expected +-----+--------+---------- +(0 rows) + +-- ****************************** +-- * Natural logarithm check +-- ****************************** +DELETE FROM num_result; +INSERT INTO num_result SELECT id, 0, LN(ABS(val)) + FROM num_data + WHERE val != '0.0'; +SELECT t1.id1, t1.result, t2.expected + FROM num_result t1, num_exp_ln t2 + WHERE t1.id1 = t2.id + AND t1.result != t2.expected; + id1 | result | expected +-----+--------+---------- +(0 rows) + +-- ****************************** +-- * Logarithm base 10 check +-- ****************************** +DELETE FROM num_result; +INSERT INTO num_result SELECT id, 0, LOG(numeric '10', ABS(val)) + FROM num_data + WHERE val != '0.0'; +SELECT t1.id1, t1.result, t2.expected + FROM num_result t1, num_exp_log10 t2 + WHERE t1.id1 = t2.id + AND t1.result != t2.expected; + id1 | result | expected +-----+--------+---------- +(0 rows) + +-- ****************************** +-- * POWER(10, LN(value)) check +-- ****************************** +DELETE FROM num_result; +INSERT INTO num_result SELECT id, 0, POWER(numeric '10', LN(ABS(round(val,200)))) + FROM num_data + WHERE val != '0.0'; +SELECT t1.id1, t1.result, t2.expected + FROM num_result t1, num_exp_power_10_ln t2 + WHERE t1.id1 = t2.id + AND t1.result != t2.expected; + id1 | result | expected +-----+--------+---------- +(0 rows) + +-- ****************************** +-- * Check behavior with Inf and NaN inputs. It's easiest to handle these +-- * separately from the num_data framework used above, because some input +-- * combinations will throw errors. +-- ****************************** +WITH v(x) AS + (VALUES('0'::numeric),('1'),('-1'),('4.2'),('inf'),('-inf'),('nan')) +SELECT x1, x2, + x1 + x2 AS sum, + x1 - x2 AS diff, + x1 * x2 AS prod +FROM v AS v1(x1), v AS v2(x2); + x1 | x2 | sum | diff | prod +-----------+-----------+-----------+-----------+----------- + 0 | 0 | 0 | 0 | 0 + 0 | 1 | 1 | -1 | 0 + 0 | -1 | -1 | 1 | 0 + 0 | 4.2 | 4.2 | -4.2 | 0.0 + 0 | Infinity | Infinity | -Infinity | NaN + 0 | -Infinity | -Infinity | Infinity | NaN + 0 | NaN | NaN | NaN | NaN + 1 | 0 | 1 | 1 | 0 + 1 | 1 | 2 | 0 | 1 + 1 | -1 | 0 | 2 | -1 + 1 | 4.2 | 5.2 | -3.2 | 4.2 + 1 | Infinity | Infinity | -Infinity | Infinity + 1 | -Infinity | -Infinity | Infinity | -Infinity + 1 | NaN | NaN | NaN | NaN + -1 | 0 | -1 | -1 | 0 + -1 | 1 | 0 | -2 | -1 + -1 | -1 | -2 | 0 | 1 + -1 | 4.2 | 3.2 | -5.2 | -4.2 + -1 | Infinity | Infinity | -Infinity | -Infinity + -1 | -Infinity | -Infinity | Infinity | Infinity + -1 | NaN | NaN | NaN | NaN + 4.2 | 0 | 4.2 | 4.2 | 0.0 + 4.2 | 1 | 5.2 | 3.2 | 4.2 + 4.2 | -1 | 3.2 | 5.2 | -4.2 + 4.2 | 4.2 | 8.4 | 0.0 | 17.64 + 4.2 | Infinity | Infinity | -Infinity | Infinity + 4.2 | -Infinity | -Infinity | Infinity | -Infinity + 4.2 | NaN | NaN | NaN | NaN + Infinity | 0 | Infinity | Infinity | NaN + Infinity | 1 | Infinity | Infinity | Infinity + Infinity | -1 | Infinity | Infinity | -Infinity + Infinity | 4.2 | Infinity | Infinity | Infinity + Infinity | Infinity | Infinity | NaN | Infinity + Infinity | -Infinity | NaN | Infinity | -Infinity + Infinity | NaN | NaN | NaN | NaN + -Infinity | 0 | -Infinity | -Infinity | NaN + -Infinity | 1 | -Infinity | -Infinity | -Infinity + -Infinity | -1 | -Infinity | -Infinity | Infinity + -Infinity | 4.2 | -Infinity | -Infinity | -Infinity + -Infinity | Infinity | NaN | -Infinity | -Infinity + -Infinity | -Infinity | -Infinity | NaN | Infinity + -Infinity | NaN | NaN | NaN | NaN + NaN | 0 | NaN | NaN | NaN + NaN | 1 | NaN | NaN | NaN + NaN | -1 | NaN | NaN | NaN + NaN | 4.2 | NaN | NaN | NaN + NaN | Infinity | NaN | NaN | NaN + NaN | -Infinity | NaN | NaN | NaN + NaN | NaN | NaN | NaN | NaN +(49 rows) + +WITH v(x) AS + (VALUES('0'::numeric),('1'),('-1'),('4.2'),('inf'),('-inf'),('nan')) +SELECT x1, x2, + x1 / x2 AS quot, + x1 % x2 AS mod, + div(x1, x2) AS div +FROM v AS v1(x1), v AS v2(x2) WHERE x2 != 0; + x1 | x2 | quot | mod | div +-----------+-----------+-------------------------+------+----------- + 0 | 1 | 0.00000000000000000000 | 0 | 0 + 1 | 1 | 1.00000000000000000000 | 0 | 1 + -1 | 1 | -1.00000000000000000000 | 0 | -1 + 4.2 | 1 | 4.2000000000000000 | 0.2 | 4 + Infinity | 1 | Infinity | NaN | Infinity + -Infinity | 1 | -Infinity | NaN | -Infinity + NaN | 1 | NaN | NaN | NaN + 0 | -1 | 0.00000000000000000000 | 0 | 0 + 1 | -1 | -1.00000000000000000000 | 0 | -1 + -1 | -1 | 1.00000000000000000000 | 0 | 1 + 4.2 | -1 | -4.2000000000000000 | 0.2 | -4 + Infinity | -1 | -Infinity | NaN | -Infinity + -Infinity | -1 | Infinity | NaN | Infinity + NaN | -1 | NaN | NaN | NaN + 0 | 4.2 | 0.00000000000000000000 | 0.0 | 0 + 1 | 4.2 | 0.23809523809523809524 | 1.0 | 0 + -1 | 4.2 | -0.23809523809523809524 | -1.0 | 0 + 4.2 | 4.2 | 1.00000000000000000000 | 0.0 | 1 + Infinity | 4.2 | Infinity | NaN | Infinity + -Infinity | 4.2 | -Infinity | NaN | -Infinity + NaN | 4.2 | NaN | NaN | NaN + 0 | Infinity | 0 | 0 | 0 + 1 | Infinity | 0 | 1 | 0 + -1 | Infinity | 0 | -1 | 0 + 4.2 | Infinity | 0 | 4.2 | 0 + Infinity | Infinity | NaN | NaN | NaN + -Infinity | Infinity | NaN | NaN | NaN + NaN | Infinity | NaN | NaN | NaN + 0 | -Infinity | 0 | 0 | 0 + 1 | -Infinity | 0 | 1 | 0 + -1 | -Infinity | 0 | -1 | 0 + 4.2 | -Infinity | 0 | 4.2 | 0 + Infinity | -Infinity | NaN | NaN | NaN + -Infinity | -Infinity | NaN | NaN | NaN + NaN | -Infinity | NaN | NaN | NaN + 0 | NaN | NaN | NaN | NaN + 1 | NaN | NaN | NaN | NaN + -1 | NaN | NaN | NaN | NaN + 4.2 | NaN | NaN | NaN | NaN + Infinity | NaN | NaN | NaN | NaN + -Infinity | NaN | NaN | NaN | NaN + NaN | NaN | NaN | NaN | NaN +(42 rows) + +SELECT 'inf'::numeric / '0'; +ERROR: division by zero +SELECT '-inf'::numeric / '0'; +ERROR: division by zero +SELECT 'nan'::numeric / '0'; + ?column? +---------- + NaN +(1 row) + +SELECT '0'::numeric / '0'; +ERROR: division by zero +SELECT 'inf'::numeric % '0'; +ERROR: division by zero +SELECT '-inf'::numeric % '0'; +ERROR: division by zero +SELECT 'nan'::numeric % '0'; + ?column? +---------- + NaN +(1 row) + +SELECT '0'::numeric % '0'; +ERROR: division by zero +SELECT div('inf'::numeric, '0'); +ERROR: division by zero +SELECT div('-inf'::numeric, '0'); +ERROR: division by zero +SELECT div('nan'::numeric, '0'); + div +----- + NaN +(1 row) + +SELECT div('0'::numeric, '0'); +ERROR: division by zero +WITH v(x) AS + (VALUES('0'::numeric),('1'),('-1'),('4.2'),('-7.777'),('inf'),('-inf'),('nan')) +SELECT x, -x as minusx, abs(x), floor(x), ceil(x), sign(x), numeric_inc(x) as inc +FROM v; + x | minusx | abs | floor | ceil | sign | inc +-----------+-----------+----------+-----------+-----------+------+----------- + 0 | 0 | 0 | 0 | 0 | 0 | 1 + 1 | -1 | 1 | 1 | 1 | 1 | 2 + -1 | 1 | 1 | -1 | -1 | -1 | 0 + 4.2 | -4.2 | 4.2 | 4 | 5 | 1 | 5.2 + -7.777 | 7.777 | 7.777 | -8 | -7 | -1 | -6.777 + Infinity | -Infinity | Infinity | Infinity | Infinity | 1 | Infinity + -Infinity | Infinity | Infinity | -Infinity | -Infinity | -1 | -Infinity + NaN | NaN | NaN | NaN | NaN | NaN | NaN +(8 rows) + +WITH v(x) AS + (VALUES('0'::numeric),('1'),('-1'),('4.2'),('-7.777'),('inf'),('-inf'),('nan')) +SELECT x, round(x), round(x,1) as round1, trunc(x), trunc(x,1) as trunc1 +FROM v; + x | round | round1 | trunc | trunc1 +-----------+-----------+-----------+-----------+----------- + 0 | 0 | 0.0 | 0 | 0.0 + 1 | 1 | 1.0 | 1 | 1.0 + -1 | -1 | -1.0 | -1 | -1.0 + 4.2 | 4 | 4.2 | 4 | 4.2 + -7.777 | -8 | -7.8 | -7 | -7.7 + Infinity | Infinity | Infinity | Infinity | Infinity + -Infinity | -Infinity | -Infinity | -Infinity | -Infinity + NaN | NaN | NaN | NaN | NaN +(8 rows) + +-- the large values fall into the numeric abbreviation code's maximal classes +WITH v(x) AS + (VALUES('0'::numeric),('1'),('-1'),('4.2'),('-7.777'),('1e340'),('-1e340'), + ('inf'),('-inf'),('nan'), + ('inf'),('-inf'),('nan')) +SELECT substring(x::text, 1, 32) +FROM v ORDER BY x; + substring +---------------------------------- + -Infinity + -Infinity + -1000000000000000000000000000000 + -7.777 + -1 + 0 + 1 + 4.2 + 10000000000000000000000000000000 + Infinity + Infinity + NaN + NaN +(13 rows) + +WITH v(x) AS + (VALUES('0'::numeric),('1'),('4.2'),('inf'),('nan')) +SELECT x, sqrt(x) +FROM v; + x | sqrt +----------+------------------- + 0 | 0.000000000000000 + 1 | 1.000000000000000 + 4.2 | 2.049390153191920 + Infinity | Infinity + NaN | NaN +(5 rows) + +SELECT sqrt('-1'::numeric); +ERROR: cannot take square root of a negative number +SELECT sqrt('-inf'::numeric); +ERROR: cannot take square root of a negative number +WITH v(x) AS + (VALUES('1'::numeric),('4.2'),('inf'),('nan')) +SELECT x, + log(x), + log10(x), + ln(x) +FROM v; + x | log | log10 | ln +----------+--------------------+--------------------+-------------------- + 1 | 0.0000000000000000 | 0.0000000000000000 | 0.0000000000000000 + 4.2 | 0.6232492903979005 | 0.6232492903979005 | 1.4350845252893226 + Infinity | Infinity | Infinity | Infinity + NaN | NaN | NaN | NaN +(4 rows) + +SELECT ln('0'::numeric); +ERROR: cannot take logarithm of zero +SELECT ln('-1'::numeric); +ERROR: cannot take logarithm of a negative number +SELECT ln('-inf'::numeric); +ERROR: cannot take logarithm of a negative number +WITH v(x) AS + (VALUES('2'::numeric),('4.2'),('inf'),('nan')) +SELECT x1, x2, + log(x1, x2) +FROM v AS v1(x1), v AS v2(x2); + x1 | x2 | log +----------+----------+-------------------- + 2 | 2 | 1.0000000000000000 + 2 | 4.2 | 2.0703893278913979 + 2 | Infinity | Infinity + 2 | NaN | NaN + 4.2 | 2 | 0.4830009440873890 + 4.2 | 4.2 | 1.0000000000000000 + 4.2 | Infinity | Infinity + 4.2 | NaN | NaN + Infinity | 2 | 0 + Infinity | 4.2 | 0 + Infinity | Infinity | NaN + Infinity | NaN | NaN + NaN | 2 | NaN + NaN | 4.2 | NaN + NaN | Infinity | NaN + NaN | NaN | NaN +(16 rows) + +SELECT log('0'::numeric, '10'); +ERROR: cannot take logarithm of zero +SELECT log('10'::numeric, '0'); +ERROR: cannot take logarithm of zero +SELECT log('-inf'::numeric, '10'); +ERROR: cannot take logarithm of a negative number +SELECT log('10'::numeric, '-inf'); +ERROR: cannot take logarithm of a negative number +SELECT log('inf'::numeric, '0'); +ERROR: cannot take logarithm of zero +SELECT log('inf'::numeric, '-inf'); +ERROR: cannot take logarithm of a negative number +SELECT log('-inf'::numeric, 'inf'); +ERROR: cannot take logarithm of a negative number +WITH v(x) AS + (VALUES('0'::numeric),('1'),('2'),('4.2'),('inf'),('nan')) +SELECT x1, x2, + power(x1, x2) +FROM v AS v1(x1), v AS v2(x2) WHERE x1 != 0 OR x2 >= 0; + x1 | x2 | power +----------+----------+-------------------- + 0 | 0 | 1.0000000000000000 + 0 | 1 | 0.0000000000000000 + 0 | 2 | 0.0000000000000000 + 0 | 4.2 | 0.0000000000000000 + 0 | Infinity | 0 + 0 | NaN | NaN + 1 | 0 | 1.0000000000000000 + 1 | 1 | 1.0000000000000000 + 1 | 2 | 1.0000000000000000 + 1 | 4.2 | 1.0000000000000000 + 1 | Infinity | 1 + 1 | NaN | 1 + 2 | 0 | 1.0000000000000000 + 2 | 1 | 2.0000000000000000 + 2 | 2 | 4.0000000000000000 + 2 | 4.2 | 18.379173679952560 + 2 | Infinity | Infinity + 2 | NaN | NaN + 4.2 | 0 | 1.0000000000000000 + 4.2 | 1 | 4.2000000000000000 + 4.2 | 2 | 17.640000000000000 + 4.2 | 4.2 | 414.61691860129675 + 4.2 | Infinity | Infinity + 4.2 | NaN | NaN + Infinity | 0 | 1 + Infinity | 1 | Infinity + Infinity | 2 | Infinity + Infinity | 4.2 | Infinity + Infinity | Infinity | Infinity + Infinity | NaN | NaN + NaN | 0 | 1 + NaN | 1 | NaN + NaN | 2 | NaN + NaN | 4.2 | NaN + NaN | Infinity | NaN + NaN | NaN | NaN +(36 rows) + +SELECT power('0'::numeric, '-1'); +ERROR: zero raised to a negative power is undefined +SELECT power('0'::numeric, '-inf'); +ERROR: zero raised to a negative power is undefined +SELECT power('-1'::numeric, 'inf'); + power +------- + 1 +(1 row) + +SELECT power('-2'::numeric, '3'); + power +--------------------- + -8.0000000000000000 +(1 row) + +SELECT power('-2'::numeric, '3.3'); +ERROR: a negative number raised to a non-integer power yields a complex result +SELECT power('-2'::numeric, '-1'); + power +--------------------- + -0.5000000000000000 +(1 row) + +SELECT power('-2'::numeric, '-1.5'); +ERROR: a negative number raised to a non-integer power yields a complex result +SELECT power('-2'::numeric, 'inf'); + power +---------- + Infinity +(1 row) + +SELECT power('-2'::numeric, '-inf'); + power +------- + 0 +(1 row) + +SELECT power('inf'::numeric, '-2'); + power +------- + 0 +(1 row) + +SELECT power('inf'::numeric, '-inf'); + power +------- + 0 +(1 row) + +SELECT power('-inf'::numeric, '2'); + power +---------- + Infinity +(1 row) + +SELECT power('-inf'::numeric, '3'); + power +----------- + -Infinity +(1 row) + +SELECT power('-inf'::numeric, '4.5'); +ERROR: a negative number raised to a non-integer power yields a complex result +SELECT power('-inf'::numeric, '-2'); + power +------- + 0 +(1 row) + +SELECT power('-inf'::numeric, '-3'); + power +------- + 0 +(1 row) + +SELECT power('-inf'::numeric, '0'); + power +------- + 1 +(1 row) + +SELECT power('-inf'::numeric, 'inf'); + power +---------- + Infinity +(1 row) + +SELECT power('-inf'::numeric, '-inf'); + power +------- + 0 +(1 row) + +-- ****************************** +-- * miscellaneous checks for things that have been broken in the past... +-- ****************************** +-- numeric AVG used to fail on some platforms +SELECT AVG(val) FROM num_data; + avg +------------------------ + -13430913.592242320700 +(1 row) + +SELECT MAX(val) FROM num_data; + max +-------------------- + 7799461.4119000000 +(1 row) + +SELECT MIN(val) FROM num_data; + min +---------------------- + -83028485.0000000000 +(1 row) + +SELECT STDDEV(val) FROM num_data; + stddev +------------------------------- + 27791203.28758835329805617386 +(1 row) + +SELECT VARIANCE(val) FROM num_data; + variance +-------------------------------------- + 772350980172061.69659105821915863601 +(1 row) + +-- Check for appropriate rounding and overflow +CREATE TABLE fract_only (id int, val numeric(4,4)); +INSERT INTO fract_only VALUES (1, '0.0'); +INSERT INTO fract_only VALUES (2, '0.1'); +INSERT INTO fract_only VALUES (3, '1.0'); -- should fail +ERROR: numeric field overflow +DETAIL: A field with precision 4, scale 4 must round to an absolute value less than 1. +INSERT INTO fract_only VALUES (4, '-0.9999'); +INSERT INTO fract_only VALUES (5, '0.99994'); +INSERT INTO fract_only VALUES (6, '0.99995'); -- should fail +ERROR: numeric field overflow +DETAIL: A field with precision 4, scale 4 must round to an absolute value less than 1. +INSERT INTO fract_only VALUES (7, '0.00001'); +INSERT INTO fract_only VALUES (8, '0.00017'); +INSERT INTO fract_only VALUES (9, 'NaN'); +INSERT INTO fract_only VALUES (10, 'Inf'); -- should fail +ERROR: numeric field overflow +DETAIL: A field with precision 4, scale 4 cannot hold an infinite value. +INSERT INTO fract_only VALUES (11, '-Inf'); -- should fail +ERROR: numeric field overflow +DETAIL: A field with precision 4, scale 4 cannot hold an infinite value. +SELECT * FROM fract_only; + id | val +----+--------- + 1 | 0.0000 + 2 | 0.1000 + 4 | -0.9999 + 5 | 0.9999 + 7 | 0.0000 + 8 | 0.0002 + 9 | NaN +(7 rows) + +DROP TABLE fract_only; +-- Check conversion to integers +SELECT (-9223372036854775808.5)::int8; -- should fail +ERROR: bigint out of range +SELECT (-9223372036854775808.4)::int8; -- ok + int8 +---------------------- + -9223372036854775808 +(1 row) + +SELECT 9223372036854775807.4::int8; -- ok + int8 +--------------------- + 9223372036854775807 +(1 row) + +SELECT 9223372036854775807.5::int8; -- should fail +ERROR: bigint out of range +SELECT (-2147483648.5)::int4; -- should fail +ERROR: integer out of range +SELECT (-2147483648.4)::int4; -- ok + int4 +------------- + -2147483648 +(1 row) + +SELECT 2147483647.4::int4; -- ok + int4 +------------ + 2147483647 +(1 row) + +SELECT 2147483647.5::int4; -- should fail +ERROR: integer out of range +SELECT (-32768.5)::int2; -- should fail +ERROR: smallint out of range +SELECT (-32768.4)::int2; -- ok + int2 +-------- + -32768 +(1 row) + +SELECT 32767.4::int2; -- ok + int2 +------- + 32767 +(1 row) + +SELECT 32767.5::int2; -- should fail +ERROR: smallint out of range +-- Check inf/nan conversion behavior +SELECT 'NaN'::float8::numeric; + numeric +--------- + NaN +(1 row) + +SELECT 'Infinity'::float8::numeric; + numeric +---------- + Infinity +(1 row) + +SELECT '-Infinity'::float8::numeric; + numeric +----------- + -Infinity +(1 row) + +SELECT 'NaN'::numeric::float8; + float8 +-------- + NaN +(1 row) + +SELECT 'Infinity'::numeric::float8; + float8 +---------- + Infinity +(1 row) + +SELECT '-Infinity'::numeric::float8; + float8 +----------- + -Infinity +(1 row) + +SELECT 'NaN'::float4::numeric; + numeric +--------- + NaN +(1 row) + +SELECT 'Infinity'::float4::numeric; + numeric +---------- + Infinity +(1 row) + +SELECT '-Infinity'::float4::numeric; + numeric +----------- + -Infinity +(1 row) + +SELECT 'NaN'::numeric::float4; + float4 +-------- + NaN +(1 row) + +SELECT 'Infinity'::numeric::float4; + float4 +---------- + Infinity +(1 row) + +SELECT '-Infinity'::numeric::float4; + float4 +----------- + -Infinity +(1 row) + +SELECT '42'::int2::numeric; + numeric +--------- + 42 +(1 row) + +SELECT 'NaN'::numeric::int2; +ERROR: cannot convert NaN to smallint +SELECT 'Infinity'::numeric::int2; +ERROR: cannot convert infinity to smallint +SELECT '-Infinity'::numeric::int2; +ERROR: cannot convert infinity to smallint +SELECT 'NaN'::numeric::int4; +ERROR: cannot convert NaN to integer +SELECT 'Infinity'::numeric::int4; +ERROR: cannot convert infinity to integer +SELECT '-Infinity'::numeric::int4; +ERROR: cannot convert infinity to integer +SELECT 'NaN'::numeric::int8; +ERROR: cannot convert NaN to bigint +SELECT 'Infinity'::numeric::int8; +ERROR: cannot convert infinity to bigint +SELECT '-Infinity'::numeric::int8; +ERROR: cannot convert infinity to bigint +-- Simple check that ceil(), floor(), and round() work correctly +CREATE TABLE ceil_floor_round (a numeric); +INSERT INTO ceil_floor_round VALUES ('-5.5'); +INSERT INTO ceil_floor_round VALUES ('-5.499999'); +INSERT INTO ceil_floor_round VALUES ('9.5'); +INSERT INTO ceil_floor_round VALUES ('9.4999999'); +INSERT INTO ceil_floor_round VALUES ('0.0'); +INSERT INTO ceil_floor_round VALUES ('0.0000001'); +INSERT INTO ceil_floor_round VALUES ('-0.000001'); +SELECT a, ceil(a), ceiling(a), floor(a), round(a) FROM ceil_floor_round; + a | ceil | ceiling | floor | round +-----------+------+---------+-------+------- + -5.5 | -5 | -5 | -6 | -6 + -5.499999 | -5 | -5 | -6 | -5 + 9.5 | 10 | 10 | 9 | 10 + 9.4999999 | 10 | 10 | 9 | 9 + 0.0 | 0 | 0 | 0 | 0 + 0.0000001 | 1 | 1 | 0 | 0 + -0.000001 | 0 | 0 | -1 | 0 +(7 rows) + +DROP TABLE ceil_floor_round; +-- Check rounding, it should round ties away from zero. +SELECT i as pow, + round((-2.5 * 10 ^ i)::numeric, -i), + round((-1.5 * 10 ^ i)::numeric, -i), + round((-0.5 * 10 ^ i)::numeric, -i), + round((0.5 * 10 ^ i)::numeric, -i), + round((1.5 * 10 ^ i)::numeric, -i), + round((2.5 * 10 ^ i)::numeric, -i) +FROM generate_series(-5,5) AS t(i); + pow | round | round | round | round | round | round +-----+----------+----------+----------+---------+---------+--------- + -5 | -0.00003 | -0.00002 | -0.00001 | 0.00001 | 0.00002 | 0.00003 + -4 | -0.0003 | -0.0002 | -0.0001 | 0.0001 | 0.0002 | 0.0003 + -3 | -0.003 | -0.002 | -0.001 | 0.001 | 0.002 | 0.003 + -2 | -0.03 | -0.02 | -0.01 | 0.01 | 0.02 | 0.03 + -1 | -0.3 | -0.2 | -0.1 | 0.1 | 0.2 | 0.3 + 0 | -3 | -2 | -1 | 1 | 2 | 3 + 1 | -30 | -20 | -10 | 10 | 20 | 30 + 2 | -300 | -200 | -100 | 100 | 200 | 300 + 3 | -3000 | -2000 | -1000 | 1000 | 2000 | 3000 + 4 | -30000 | -20000 | -10000 | 10000 | 20000 | 30000 + 5 | -300000 | -200000 | -100000 | 100000 | 200000 | 300000 +(11 rows) + +-- Testing for width_bucket(). For convenience, we test both the +-- numeric and float8 versions of the function in this file. +-- errors +SELECT width_bucket(5.0, 3.0, 4.0, 0); +ERROR: count must be greater than zero +SELECT width_bucket(5.0, 3.0, 4.0, -5); +ERROR: count must be greater than zero +SELECT width_bucket(3.5, 3.0, 3.0, 888); +ERROR: lower bound cannot equal upper bound +SELECT width_bucket(5.0::float8, 3.0::float8, 4.0::float8, 0); +ERROR: count must be greater than zero +SELECT width_bucket(5.0::float8, 3.0::float8, 4.0::float8, -5); +ERROR: count must be greater than zero +SELECT width_bucket(3.5::float8, 3.0::float8, 3.0::float8, 888); +ERROR: lower bound cannot equal upper bound +SELECT width_bucket('NaN', 3.0, 4.0, 888); +ERROR: operand, lower bound, and upper bound cannot be NaN +SELECT width_bucket(0::float8, 'NaN', 4.0::float8, 888); +ERROR: operand, lower bound, and upper bound cannot be NaN +SELECT width_bucket(2.0, 3.0, '-inf', 888); +ERROR: lower and upper bounds must be finite +SELECT width_bucket(0::float8, '-inf', 4.0::float8, 888); +ERROR: lower and upper bounds must be finite +-- normal operation +CREATE TABLE width_bucket_test (operand_num numeric, operand_f8 float8); +COPY width_bucket_test (operand_num) FROM stdin; +UPDATE width_bucket_test SET operand_f8 = operand_num::float8; +SELECT + operand_num, + width_bucket(operand_num, 0, 10, 5) AS wb_1, + width_bucket(operand_f8, 0, 10, 5) AS wb_1f, + width_bucket(operand_num, 10, 0, 5) AS wb_2, + width_bucket(operand_f8, 10, 0, 5) AS wb_2f, + width_bucket(operand_num, 2, 8, 4) AS wb_3, + width_bucket(operand_f8, 2, 8, 4) AS wb_3f, + width_bucket(operand_num, 5.0, 5.5, 20) AS wb_4, + width_bucket(operand_f8, 5.0, 5.5, 20) AS wb_4f, + width_bucket(operand_num, -25, 25, 10) AS wb_5, + width_bucket(operand_f8, -25, 25, 10) AS wb_5f + FROM width_bucket_test; + operand_num | wb_1 | wb_1f | wb_2 | wb_2f | wb_3 | wb_3f | wb_4 | wb_4f | wb_5 | wb_5f +------------------+------+-------+------+-------+------+-------+------+-------+------+------- + -5.2 | 0 | 0 | 6 | 6 | 0 | 0 | 0 | 0 | 4 | 4 + -0.0000000001 | 0 | 0 | 6 | 6 | 0 | 0 | 0 | 0 | 5 | 5 + 0.000000000001 | 1 | 1 | 5 | 5 | 0 | 0 | 0 | 0 | 6 | 6 + 1 | 1 | 1 | 5 | 5 | 0 | 0 | 0 | 0 | 6 | 6 + 1.99999999999999 | 1 | 1 | 5 | 5 | 0 | 0 | 0 | 0 | 6 | 6 + 2 | 2 | 2 | 5 | 5 | 1 | 1 | 0 | 0 | 6 | 6 + 2.00000000000001 | 2 | 2 | 4 | 4 | 1 | 1 | 0 | 0 | 6 | 6 + 3 | 2 | 2 | 4 | 4 | 1 | 1 | 0 | 0 | 6 | 6 + 4 | 3 | 3 | 4 | 4 | 2 | 2 | 0 | 0 | 6 | 6 + 4.5 | 3 | 3 | 3 | 3 | 2 | 2 | 0 | 0 | 6 | 6 + 5 | 3 | 3 | 3 | 3 | 3 | 3 | 1 | 1 | 7 | 7 + 5.5 | 3 | 3 | 3 | 3 | 3 | 3 | 21 | 21 | 7 | 7 + 6 | 4 | 4 | 3 | 3 | 3 | 3 | 21 | 21 | 7 | 7 + 7 | 4 | 4 | 2 | 2 | 4 | 4 | 21 | 21 | 7 | 7 + 8 | 5 | 5 | 2 | 2 | 5 | 5 | 21 | 21 | 7 | 7 + 9 | 5 | 5 | 1 | 1 | 5 | 5 | 21 | 21 | 7 | 7 + 9.99999999999999 | 5 | 5 | 1 | 1 | 5 | 5 | 21 | 21 | 7 | 7 + 10 | 6 | 6 | 1 | 1 | 5 | 5 | 21 | 21 | 8 | 8 + 10.0000000000001 | 6 | 6 | 0 | 0 | 5 | 5 | 21 | 21 | 8 | 8 +(19 rows) + +-- Check positive and negative infinity: we require +-- finite bucket bounds, but allow an infinite operand +SELECT width_bucket(0.0::numeric, 'Infinity'::numeric, 5, 10); -- error +ERROR: lower and upper bounds must be finite +SELECT width_bucket(0.0::numeric, 5, '-Infinity'::numeric, 20); -- error +ERROR: lower and upper bounds must be finite +SELECT width_bucket('Infinity'::numeric, 1, 10, 10), + width_bucket('-Infinity'::numeric, 1, 10, 10); + width_bucket | width_bucket +--------------+-------------- + 11 | 0 +(1 row) + +SELECT width_bucket(0.0::float8, 'Infinity'::float8, 5, 10); -- error +ERROR: lower and upper bounds must be finite +SELECT width_bucket(0.0::float8, 5, '-Infinity'::float8, 20); -- error +ERROR: lower and upper bounds must be finite +SELECT width_bucket('Infinity'::float8, 1, 10, 10), + width_bucket('-Infinity'::float8, 1, 10, 10); + width_bucket | width_bucket +--------------+-------------- + 11 | 0 +(1 row) + +DROP TABLE width_bucket_test; +-- Simple test for roundoff error when results should be exact +SELECT x, width_bucket(x::float8, 10, 100, 9) as flt, + width_bucket(x::numeric, 10, 100, 9) as num +FROM generate_series(0, 110, 10) x; + x | flt | num +-----+-----+----- + 0 | 0 | 0 + 10 | 1 | 1 + 20 | 2 | 2 + 30 | 3 | 3 + 40 | 4 | 4 + 50 | 5 | 5 + 60 | 6 | 6 + 70 | 7 | 7 + 80 | 8 | 8 + 90 | 9 | 9 + 100 | 10 | 10 + 110 | 10 | 10 +(12 rows) + +SELECT x, width_bucket(x::float8, 100, 10, 9) as flt, + width_bucket(x::numeric, 100, 10, 9) as num +FROM generate_series(0, 110, 10) x; + x | flt | num +-----+-----+----- + 0 | 10 | 10 + 10 | 10 | 10 + 20 | 9 | 9 + 30 | 8 | 8 + 40 | 7 | 7 + 50 | 6 | 6 + 60 | 5 | 5 + 70 | 4 | 4 + 80 | 3 | 3 + 90 | 2 | 2 + 100 | 1 | 1 + 110 | 0 | 0 +(12 rows) + +-- Another roundoff-error hazard +SELECT width_bucket(0, -1e100::numeric, 1, 10); + width_bucket +-------------- + 10 +(1 row) + +SELECT width_bucket(0, -1e100::float8, 1, 10); + width_bucket +-------------- + 10 +(1 row) + +SELECT width_bucket(1, 1e100::numeric, 0, 10); + width_bucket +-------------- + 10 +(1 row) + +SELECT width_bucket(1, 1e100::float8, 0, 10); + width_bucket +-------------- + 10 +(1 row) + +-- Check cases that could trigger overflow or underflow within the calculation +SELECT oper, low, high, cnt, width_bucket(oper, low, high, cnt) +FROM + (SELECT 1.797e+308::float8 AS big, 5e-324::float8 AS tiny) as v, + LATERAL (VALUES + (10.5::float8, -big, big, 1), + (10.5::float8, -big, big, 2), + (10.5::float8, -big, big, 3), + (big / 4, -big / 2, big / 2, 10), + (10.5::float8, big, -big, 1), + (10.5::float8, big, -big, 2), + (10.5::float8, big, -big, 3), + (big / 4, big / 2, -big / 2, 10), + (0, 0, tiny, 4), + (tiny, 0, tiny, 4), + (0, 0, 1, 2147483647), + (1, 1, 0, 2147483647) + ) as sample(oper, low, high, cnt); + oper | low | high | cnt | width_bucket +-------------+-------------+-------------+------------+-------------- + 10.5 | -1.797e+308 | 1.797e+308 | 1 | 1 + 10.5 | -1.797e+308 | 1.797e+308 | 2 | 2 + 10.5 | -1.797e+308 | 1.797e+308 | 3 | 2 + 4.4925e+307 | -8.985e+307 | 8.985e+307 | 10 | 8 + 10.5 | 1.797e+308 | -1.797e+308 | 1 | 1 + 10.5 | 1.797e+308 | -1.797e+308 | 2 | 2 + 10.5 | 1.797e+308 | -1.797e+308 | 3 | 2 + 4.4925e+307 | 8.985e+307 | -8.985e+307 | 10 | 3 + 0 | 0 | 5e-324 | 4 | 1 + 5e-324 | 0 | 5e-324 | 4 | 5 + 0 | 0 | 1 | 2147483647 | 1 + 1 | 1 | 0 | 2147483647 | 1 +(12 rows) + +-- These fail because the result would be out of int32 range: +SELECT width_bucket(1::float8, 0, 1, 2147483647); +ERROR: integer out of range +SELECT width_bucket(0::float8, 1, 0, 2147483647); +ERROR: integer out of range +-- +-- TO_CHAR() +-- +SELECT to_char(val, '9G999G999G999G999G999') + FROM num_data; + to_char +------------------------ + 0 + 0 + -34,338,492 + 4 + 7,799,461 + 16,397 + 93,902 + -83,028,485 + 74,881 + -24,926,804 +(10 rows) + +SELECT to_char(val, '9G999G999G999G999G999D999G999G999G999G999') + FROM num_data; + to_char +-------------------------------------------- + .000,000,000,000,000 + .000,000,000,000,000 + -34,338,492.215,397,047,000,000 + 4.310,000,000,000,000 + 7,799,461.411,900,000,000,000 + 16,397.038,491,000,000,000 + 93,901.577,630,260,000,000 + -83,028,485.000,000,000,000,000 + 74,881.000,000,000,000,000 + -24,926,804.045,047,420,000,000 +(10 rows) + +SELECT to_char(val, '9999999999999999.999999999999999PR') + FROM num_data; + to_char +------------------------------------ + .000000000000000 + .000000000000000 + <34338492.215397047000000> + 4.310000000000000 + 7799461.411900000000000 + 16397.038491000000000 + 93901.577630260000000 + <83028485.000000000000000> + 74881.000000000000000 + <24926804.045047420000000> +(10 rows) + +SELECT to_char(val, '9999999999999999.999999999999999S') + FROM num_data; + to_char +----------------------------------- + .000000000000000+ + .000000000000000+ + 34338492.215397047000000- + 4.310000000000000+ + 7799461.411900000000000+ + 16397.038491000000000+ + 93901.577630260000000+ + 83028485.000000000000000- + 74881.000000000000000+ + 24926804.045047420000000- +(10 rows) + +SELECT to_char(val, 'MI9999999999999999.999999999999999') FROM num_data; + to_char +----------------------------------- + .000000000000000 + .000000000000000 + - 34338492.215397047000000 + 4.310000000000000 + 7799461.411900000000000 + 16397.038491000000000 + 93901.577630260000000 + - 83028485.000000000000000 + 74881.000000000000000 + - 24926804.045047420000000 +(10 rows) + +SELECT to_char(val, 'FMS9999999999999999.999999999999999') FROM num_data; + to_char +--------------------- + +0. + +0. + -34338492.215397047 + +4.31 + +7799461.4119 + +16397.038491 + +93901.57763026 + -83028485. + +74881. + -24926804.04504742 +(10 rows) + +SELECT to_char(val, 'FM9999999999999999.999999999999999THPR') FROM num_data; + to_char +---------------------- + 0. + 0. + <34338492.215397047> + 4.31 + 7799461.4119 + 16397.038491 + 93901.57763026 + <83028485.> + 74881. + <24926804.04504742> +(10 rows) + +SELECT to_char(val, 'SG9999999999999999.999999999999999th') FROM num_data; + to_char +----------------------------------- + + .000000000000000 + + .000000000000000 + - 34338492.215397047000000 + + 4.310000000000000 + + 7799461.411900000000000 + + 16397.038491000000000 + + 93901.577630260000000 + - 83028485.000000000000000 + + 74881.000000000000000 + - 24926804.045047420000000 +(10 rows) + +SELECT to_char(val, '0999999999999999.999999999999999') FROM num_data; + to_char +----------------------------------- + 0000000000000000.000000000000000 + 0000000000000000.000000000000000 + -0000000034338492.215397047000000 + 0000000000000004.310000000000000 + 0000000007799461.411900000000000 + 0000000000016397.038491000000000 + 0000000000093901.577630260000000 + -0000000083028485.000000000000000 + 0000000000074881.000000000000000 + -0000000024926804.045047420000000 +(10 rows) + +SELECT to_char(val, 'S0999999999999999.999999999999999') FROM num_data; + to_char +----------------------------------- + +0000000000000000.000000000000000 + +0000000000000000.000000000000000 + -0000000034338492.215397047000000 + +0000000000000004.310000000000000 + +0000000007799461.411900000000000 + +0000000000016397.038491000000000 + +0000000000093901.577630260000000 + -0000000083028485.000000000000000 + +0000000000074881.000000000000000 + -0000000024926804.045047420000000 +(10 rows) + +SELECT to_char(val, 'FM0999999999999999.999999999999999') FROM num_data; + to_char +----------------------------- + 0000000000000000. + 0000000000000000. + -0000000034338492.215397047 + 0000000000000004.31 + 0000000007799461.4119 + 0000000000016397.038491 + 0000000000093901.57763026 + -0000000083028485. + 0000000000074881. + -0000000024926804.04504742 +(10 rows) + +SELECT to_char(val, 'FM9999999999999999.099999999999999') FROM num_data; + to_char +--------------------- + .0 + .0 + -34338492.215397047 + 4.31 + 7799461.4119 + 16397.038491 + 93901.57763026 + -83028485.0 + 74881.0 + -24926804.04504742 +(10 rows) + +SELECT to_char(val, 'FM9999999999990999.990999999999999') FROM num_data; + to_char +--------------------- + 0000.000 + 0000.000 + -34338492.215397047 + 0004.310 + 7799461.4119 + 16397.038491 + 93901.57763026 + -83028485.000 + 74881.000 + -24926804.04504742 +(10 rows) + +SELECT to_char(val, 'FM0999999999999999.999909999999999') FROM num_data; + to_char +----------------------------- + 0000000000000000.00000 + 0000000000000000.00000 + -0000000034338492.215397047 + 0000000000000004.31000 + 0000000007799461.41190 + 0000000000016397.038491 + 0000000000093901.57763026 + -0000000083028485.00000 + 0000000000074881.00000 + -0000000024926804.04504742 +(10 rows) + +SELECT to_char(val, 'FM9999999990999999.099999999999999') FROM num_data; + to_char +--------------------- + 0000000.0 + 0000000.0 + -34338492.215397047 + 0000004.31 + 7799461.4119 + 0016397.038491 + 0093901.57763026 + -83028485.0 + 0074881.0 + -24926804.04504742 +(10 rows) + +SELECT to_char(val, 'L9999999999999999.099999999999999') FROM num_data; + to_char +------------------------------------ + .000000000000000 + .000000000000000 + -34338492.215397047000000 + 4.310000000000000 + 7799461.411900000000000 + 16397.038491000000000 + 93901.577630260000000 + -83028485.000000000000000 + 74881.000000000000000 + -24926804.045047420000000 +(10 rows) + +SELECT to_char(val, 'FM9999999999999999.99999999999999') FROM num_data; + to_char +--------------------- + 0. + 0. + -34338492.215397047 + 4.31 + 7799461.4119 + 16397.038491 + 93901.57763026 + -83028485. + 74881. + -24926804.04504742 +(10 rows) + +SELECT to_char(val, 'S 9 9 9 9 9 9 9 9 9 9 9 9 9 9 9 9 . 9 9 9 9 9 9 9 9 9 9 9 9 9 9 9 9 9') FROM num_data; + to_char +----------------------------------------------------------------------- + +. 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 + +. 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 + -3 4 3 3 8 4 9 2 . 2 1 5 3 9 7 0 4 7 0 0 0 0 0 0 0 0 + +4 . 3 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 + +7 7 9 9 4 6 1 . 4 1 1 9 0 0 0 0 0 0 0 0 0 0 0 0 0 + +1 6 3 9 7 . 0 3 8 4 9 1 0 0 0 0 0 0 0 0 0 0 0 + +9 3 9 0 1 . 5 7 7 6 3 0 2 6 0 0 0 0 0 0 0 0 0 + -8 3 0 2 8 4 8 5 . 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 + +7 4 8 8 1 . 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 + -2 4 9 2 6 8 0 4 . 0 4 5 0 4 7 4 2 0 0 0 0 0 0 0 0 0 +(10 rows) + +SELECT to_char(val, 'FMS 9 9 9 9 9 9 9 9 9 9 9 9 9 9 9 9 . 9 9 9 9 9 9 9 9 9 9 9 9 9 9 9 9 9') FROM num_data; + to_char +------------------------------------------------------- + +0 . + +0 . + -3 4 3 3 8 4 9 2 . 2 1 5 3 9 7 0 4 7 + +4 . 3 1 + +7 7 9 9 4 6 1 . 4 1 1 9 + +1 6 3 9 7 . 0 3 8 4 9 1 + +9 3 9 0 1 . 5 7 7 6 3 0 2 6 + -8 3 0 2 8 4 8 5 . + +7 4 8 8 1 . + -2 4 9 2 6 8 0 4 . 0 4 5 0 4 7 4 2 +(10 rows) + +SELECT to_char(val, E'99999 "text" 9999 "9999" 999 "\\"text between quote marks\\"" 9999') FROM num_data; + to_char +----------------------------------------------------------- + text 9999 "text between quote marks" 0 + text 9999 "text between quote marks" 0 + text -3 9999 433 "text between quote marks" 8492 + text 9999 "text between quote marks" 4 + text 9999 779 "text between quote marks" 9461 + text 9999 1 "text between quote marks" 6397 + text 9999 9 "text between quote marks" 3902 + text -8 9999 302 "text between quote marks" 8485 + text 9999 7 "text between quote marks" 4881 + text -2 9999 492 "text between quote marks" 6804 +(10 rows) + +SELECT to_char(val, '999999SG9999999999') FROM num_data; + to_char +------------------- + + 0 + + 0 + - 34338492 + + 4 + + 7799461 + + 16397 + + 93902 + - 83028485 + + 74881 + - 24926804 +(10 rows) + +SELECT to_char(val, 'FM9999999999999999.999999999999999') FROM num_data; + to_char +--------------------- + 0. + 0. + -34338492.215397047 + 4.31 + 7799461.4119 + 16397.038491 + 93901.57763026 + -83028485. + 74881. + -24926804.04504742 +(10 rows) + +SELECT to_char(val, '9.999EEEE') FROM num_data; + to_char +------------ + 0.000e+00 + 0.000e+00 + -3.434e+07 + 4.310e+00 + 7.799e+06 + 1.640e+04 + 9.390e+04 + -8.303e+07 + 7.488e+04 + -2.493e+07 +(10 rows) + +WITH v(val) AS + (VALUES('0'::numeric),('-4.2'),('4.2e9'),('1.2e-5'),('inf'),('-inf'),('nan')) +SELECT val, + to_char(val, '9.999EEEE') as numeric, + to_char(val::float8, '9.999EEEE') as float8, + to_char(val::float4, '9.999EEEE') as float4 +FROM v; + val | numeric | float8 | float4 +------------+------------+------------+------------ + 0 | 0.000e+00 | 0.000e+00 | 0.000e+00 + -4.2 | -4.200e+00 | -4.200e+00 | -4.200e+00 + 4200000000 | 4.200e+09 | 4.200e+09 | 4.200e+09 + 0.000012 | 1.200e-05 | 1.200e-05 | 1.200e-05 + Infinity | #.####### | #.####### | #.####### + -Infinity | #.####### | #.####### | #.####### + NaN | #.####### | #.####### | #.####### +(7 rows) + +WITH v(exp) AS + (VALUES(-16379),(-16378),(-1234),(-789),(-45),(-5),(-4),(-3),(-2),(-1),(0), + (1),(2),(3),(4),(5),(38),(275),(2345),(45678),(131070),(131071)) +SELECT exp, + to_char(('1.2345e'||exp)::numeric, '9.999EEEE') as numeric +FROM v; + exp | numeric +--------+---------------- + -16379 | 1.235e-16379 + -16378 | 1.235e-16378 + -1234 | 1.235e-1234 + -789 | 1.235e-789 + -45 | 1.235e-45 + -5 | 1.235e-05 + -4 | 1.235e-04 + -3 | 1.235e-03 + -2 | 1.235e-02 + -1 | 1.235e-01 + 0 | 1.235e+00 + 1 | 1.235e+01 + 2 | 1.235e+02 + 3 | 1.235e+03 + 4 | 1.235e+04 + 5 | 1.235e+05 + 38 | 1.235e+38 + 275 | 1.235e+275 + 2345 | 1.235e+2345 + 45678 | 1.235e+45678 + 131070 | 1.235e+131070 + 131071 | 1.235e+131071 +(22 rows) + +WITH v(val) AS + (VALUES('0'::numeric),('-4.2'),('4.2e9'),('1.2e-5'),('inf'),('-inf'),('nan')) +SELECT val, + to_char(val, 'MI9999999999.99') as numeric, + to_char(val::float8, 'MI9999999999.99') as float8, + to_char(val::float4, 'MI9999999999.99') as float4 +FROM v; + val | numeric | float8 | float4 +------------+----------------+----------------+---------------- + 0 | .00 | .00 | .00 + -4.2 | - 4.20 | - 4.20 | - 4.20 + 4200000000 | 4200000000.00 | 4200000000.00 | 4200000000 + 0.000012 | .00 | .00 | .00 + Infinity | Infinity | Infinity | Infinity + -Infinity | - Infinity | - Infinity | - Infinity + NaN | NaN | NaN | NaN +(7 rows) + +WITH v(val) AS + (VALUES('0'::numeric),('-4.2'),('4.2e9'),('1.2e-5'),('inf'),('-inf'),('nan')) +SELECT val, + to_char(val, 'MI99.99') as numeric, + to_char(val::float8, 'MI99.99') as float8, + to_char(val::float4, 'MI99.99') as float4 +FROM v; + val | numeric | float8 | float4 +------------+---------+--------+-------- + 0 | .00 | .00 | .00 + -4.2 | - 4.20 | - 4.20 | - 4.20 + 4200000000 | ##.## | ##.## | ##. + 0.000012 | .00 | .00 | .00 + Infinity | ##.## | ##.## | ##. + -Infinity | -##.## | -##.## | -##. + NaN | ##.## | ##.## | ##.## +(7 rows) + +SELECT to_char('100'::numeric, 'FM999.9'); + to_char +--------- + 100. +(1 row) + +SELECT to_char('100'::numeric, 'FM999.'); + to_char +--------- + 100 +(1 row) + +SELECT to_char('100'::numeric, 'FM999'); + to_char +--------- + 100 +(1 row) + +SELECT to_char('12345678901'::float8, 'FM9999999999D9999900000000000000000'); + to_char +----------------- + ##########.#### +(1 row) + +-- Check parsing of literal text in a format string +SELECT to_char('100'::numeric, 'foo999'); + to_char +--------- + foo 100 +(1 row) + +SELECT to_char('100'::numeric, 'f\oo999'); + to_char +---------- + f\oo 100 +(1 row) + +SELECT to_char('100'::numeric, 'f\\oo999'); + to_char +----------- + f\\oo 100 +(1 row) + +SELECT to_char('100'::numeric, 'f\"oo999'); + to_char +---------- + f"oo 100 +(1 row) + +SELECT to_char('100'::numeric, 'f\\"oo999'); + to_char +----------- + f\"oo 100 +(1 row) + +SELECT to_char('100'::numeric, 'f"ool"999'); + to_char +---------- + fool 100 +(1 row) + +SELECT to_char('100'::numeric, 'f"\ool"999'); + to_char +---------- + fool 100 +(1 row) + +SELECT to_char('100'::numeric, 'f"\\ool"999'); + to_char +----------- + f\ool 100 +(1 row) + +SELECT to_char('100'::numeric, 'f"ool\"999'); + to_char +---------- + fool"999 +(1 row) + +SELECT to_char('100'::numeric, 'f"ool\\"999'); + to_char +----------- + fool\ 100 +(1 row) + +-- TO_NUMBER() +-- +SET lc_numeric = 'C'; +SELECT to_number('-34,338,492', '99G999G999'); + to_number +----------- + -34338492 +(1 row) + +SELECT to_number('-34,338,492.654,878', '99G999G999D999G999'); + to_number +------------------ + -34338492.654878 +(1 row) + +SELECT to_number('<564646.654564>', '999999.999999PR'); + to_number +---------------- + -564646.654564 +(1 row) + +SELECT to_number('0.00001-', '9.999999S'); + to_number +----------- + -0.00001 +(1 row) + +SELECT to_number('5.01-', 'FM9.999999S'); + to_number +----------- + -5.01 +(1 row) + +SELECT to_number('5.01-', 'FM9.999999MI'); + to_number +----------- + -5.01 +(1 row) + +SELECT to_number('5 4 4 4 4 8 . 7 8', '9 9 9 9 9 9 . 9 9'); + to_number +----------- + 544448.78 +(1 row) + +SELECT to_number('.01', 'FM9.99'); + to_number +----------- + 0.01 +(1 row) + +SELECT to_number('.0', '99999999.99999999'); + to_number +----------- + 0.0 +(1 row) + +SELECT to_number('0', '99.99'); + to_number +----------- + 0 +(1 row) + +SELECT to_number('.-01', 'S99.99'); + to_number +----------- + -0.01 +(1 row) + +SELECT to_number('.01-', '99.99S'); + to_number +----------- + -0.01 +(1 row) + +SELECT to_number(' . 0 1-', ' 9 9 . 9 9 S'); + to_number +----------- + -0.01 +(1 row) + +SELECT to_number('34,50','999,99'); + to_number +----------- + 3450 +(1 row) + +SELECT to_number('123,000','999G'); + to_number +----------- + 123 +(1 row) + +SELECT to_number('123456','999G999'); + to_number +----------- + 123456 +(1 row) + +SELECT to_number('$1234.56','L9,999.99'); + to_number +----------- + 1234.56 +(1 row) + +SELECT to_number('$1234.56','L99,999.99'); + to_number +----------- + 1234.56 +(1 row) + +SELECT to_number('$1,234.56','L99,999.99'); + to_number +----------- + 1234.56 +(1 row) + +SELECT to_number('1234.56','L99,999.99'); + to_number +----------- + 1234.56 +(1 row) + +SELECT to_number('1,234.56','L99,999.99'); + to_number +----------- + 1234.56 +(1 row) + +SELECT to_number('42nd', '99th'); + to_number +----------- + 42 +(1 row) + +RESET lc_numeric; +-- +-- Input syntax +-- +CREATE TABLE num_input_test (n1 numeric); +-- good inputs +INSERT INTO num_input_test(n1) VALUES (' 123'); +INSERT INTO num_input_test(n1) VALUES (' 3245874 '); +INSERT INTO num_input_test(n1) VALUES (' -93853'); +INSERT INTO num_input_test(n1) VALUES ('555.50'); +INSERT INTO num_input_test(n1) VALUES ('-555.50'); +INSERT INTO num_input_test(n1) VALUES ('NaN '); +INSERT INTO num_input_test(n1) VALUES (' nan'); +INSERT INTO num_input_test(n1) VALUES (' inf '); +INSERT INTO num_input_test(n1) VALUES (' +inf '); +INSERT INTO num_input_test(n1) VALUES (' -inf '); +INSERT INTO num_input_test(n1) VALUES (' Infinity '); +INSERT INTO num_input_test(n1) VALUES (' +inFinity '); +INSERT INTO num_input_test(n1) VALUES (' -INFINITY '); +INSERT INTO num_input_test(n1) VALUES ('12_000_000_000'); +INSERT INTO num_input_test(n1) VALUES ('12_000.123_456'); +INSERT INTO num_input_test(n1) VALUES ('23_000_000_000e-1_0'); +INSERT INTO num_input_test(n1) VALUES ('.000_000_000_123e1_0'); +INSERT INTO num_input_test(n1) VALUES ('.000_000_000_123e+1_1'); +INSERT INTO num_input_test(n1) VALUES ('0b10001110111100111100001001010'); +INSERT INTO num_input_test(n1) VALUES (' -0B_1010_1011_0101_0100_1010_1001_1000_1100_1110_1011_0001_1111_0000_1010_1101_0010 '); +INSERT INTO num_input_test(n1) VALUES (' +0o112402761777 '); +INSERT INTO num_input_test(n1) VALUES ('-0O0012_5524_5230_6334_3167_0261'); +INSERT INTO num_input_test(n1) VALUES ('-0x0000000000000000000000000deadbeef'); +INSERT INTO num_input_test(n1) VALUES (' 0X_30b1_F33a_6DF0_bD4E_64DF_9BdA_7D15 '); +-- bad inputs +INSERT INTO num_input_test(n1) VALUES (' '); +ERROR: invalid input syntax for type numeric: " " +LINE 1: INSERT INTO num_input_test(n1) VALUES (' '); + ^ +INSERT INTO num_input_test(n1) VALUES (' 1234 %'); +ERROR: invalid input syntax for type numeric: " 1234 %" +LINE 1: INSERT INTO num_input_test(n1) VALUES (' 1234 %'); + ^ +INSERT INTO num_input_test(n1) VALUES ('xyz'); +ERROR: invalid input syntax for type numeric: "xyz" +LINE 1: INSERT INTO num_input_test(n1) VALUES ('xyz'); + ^ +INSERT INTO num_input_test(n1) VALUES ('- 1234'); +ERROR: invalid input syntax for type numeric: "- 1234" +LINE 1: INSERT INTO num_input_test(n1) VALUES ('- 1234'); + ^ +INSERT INTO num_input_test(n1) VALUES ('5 . 0'); +ERROR: invalid input syntax for type numeric: "5 . 0" +LINE 1: INSERT INTO num_input_test(n1) VALUES ('5 . 0'); + ^ +INSERT INTO num_input_test(n1) VALUES ('5. 0 '); +ERROR: invalid input syntax for type numeric: "5. 0 " +LINE 1: INSERT INTO num_input_test(n1) VALUES ('5. 0 '); + ^ +INSERT INTO num_input_test(n1) VALUES (''); +ERROR: invalid input syntax for type numeric: "" +LINE 1: INSERT INTO num_input_test(n1) VALUES (''); + ^ +INSERT INTO num_input_test(n1) VALUES (' N aN '); +ERROR: invalid input syntax for type numeric: " N aN " +LINE 1: INSERT INTO num_input_test(n1) VALUES (' N aN '); + ^ +INSERT INTO num_input_test(n1) VALUES ('+NaN'); +ERROR: invalid input syntax for type numeric: "+NaN" +LINE 1: INSERT INTO num_input_test(n1) VALUES ('+NaN'); + ^ +INSERT INTO num_input_test(n1) VALUES ('-NaN'); +ERROR: invalid input syntax for type numeric: "-NaN" +LINE 1: INSERT INTO num_input_test(n1) VALUES ('-NaN'); + ^ +INSERT INTO num_input_test(n1) VALUES ('+ infinity'); +ERROR: invalid input syntax for type numeric: "+ infinity" +LINE 1: INSERT INTO num_input_test(n1) VALUES ('+ infinity'); + ^ +INSERT INTO num_input_test(n1) VALUES ('_123'); +ERROR: invalid input syntax for type numeric: "_123" +LINE 1: INSERT INTO num_input_test(n1) VALUES ('_123'); + ^ +INSERT INTO num_input_test(n1) VALUES ('123_'); +ERROR: invalid input syntax for type numeric: "123_" +LINE 1: INSERT INTO num_input_test(n1) VALUES ('123_'); + ^ +INSERT INTO num_input_test(n1) VALUES ('12__34'); +ERROR: invalid input syntax for type numeric: "12__34" +LINE 1: INSERT INTO num_input_test(n1) VALUES ('12__34'); + ^ +INSERT INTO num_input_test(n1) VALUES ('123_.456'); +ERROR: invalid input syntax for type numeric: "123_.456" +LINE 1: INSERT INTO num_input_test(n1) VALUES ('123_.456'); + ^ +INSERT INTO num_input_test(n1) VALUES ('123._456'); +ERROR: invalid input syntax for type numeric: "123._456" +LINE 1: INSERT INTO num_input_test(n1) VALUES ('123._456'); + ^ +INSERT INTO num_input_test(n1) VALUES ('1.2e_34'); +ERROR: invalid input syntax for type numeric: "1.2e_34" +LINE 1: INSERT INTO num_input_test(n1) VALUES ('1.2e_34'); + ^ +INSERT INTO num_input_test(n1) VALUES ('1.2e34_'); +ERROR: invalid input syntax for type numeric: "1.2e34_" +LINE 1: INSERT INTO num_input_test(n1) VALUES ('1.2e34_'); + ^ +INSERT INTO num_input_test(n1) VALUES ('1.2e3__4'); +ERROR: invalid input syntax for type numeric: "1.2e3__4" +LINE 1: INSERT INTO num_input_test(n1) VALUES ('1.2e3__4'); + ^ +INSERT INTO num_input_test(n1) VALUES ('0b1112'); +ERROR: invalid input syntax for type numeric: "0b1112" +LINE 1: INSERT INTO num_input_test(n1) VALUES ('0b1112'); + ^ +INSERT INTO num_input_test(n1) VALUES ('0c1112'); +ERROR: invalid input syntax for type numeric: "0c1112" +LINE 1: INSERT INTO num_input_test(n1) VALUES ('0c1112'); + ^ +INSERT INTO num_input_test(n1) VALUES ('0o12345678'); +ERROR: invalid input syntax for type numeric: "0o12345678" +LINE 1: INSERT INTO num_input_test(n1) VALUES ('0o12345678'); + ^ +INSERT INTO num_input_test(n1) VALUES ('0x1eg'); +ERROR: invalid input syntax for type numeric: "0x1eg" +LINE 1: INSERT INTO num_input_test(n1) VALUES ('0x1eg'); + ^ +INSERT INTO num_input_test(n1) VALUES ('0x12.34'); +ERROR: invalid input syntax for type numeric: "0x12.34" +LINE 1: INSERT INTO num_input_test(n1) VALUES ('0x12.34'); + ^ +INSERT INTO num_input_test(n1) VALUES ('0x__1234'); +ERROR: invalid input syntax for type numeric: "0x__1234" +LINE 1: INSERT INTO num_input_test(n1) VALUES ('0x__1234'); + ^ +INSERT INTO num_input_test(n1) VALUES ('0x1234_'); +ERROR: invalid input syntax for type numeric: "0x1234_" +LINE 1: INSERT INTO num_input_test(n1) VALUES ('0x1234_'); + ^ +INSERT INTO num_input_test(n1) VALUES ('0x12__34'); +ERROR: invalid input syntax for type numeric: "0x12__34" +LINE 1: INSERT INTO num_input_test(n1) VALUES ('0x12__34'); + ^ +SELECT * FROM num_input_test; + n1 +----------------------------------- + 123 + 3245874 + -93853 + 555.50 + -555.50 + NaN + NaN + Infinity + Infinity + -Infinity + Infinity + Infinity + -Infinity + 12000000000 + 12000.123456 + 2.3000000000 + 1.23 + 12.3 + 299792458 + -12345678901234567890 + 9999999999 + -12345678900987654321 + -3735928559 + 987654321234567898765432123456789 +(24 rows) + +-- Also try it with non-error-throwing API +SELECT pg_input_is_valid('34.5', 'numeric'); + pg_input_is_valid +------------------- + t +(1 row) + +SELECT pg_input_is_valid('34xyz', 'numeric'); + pg_input_is_valid +------------------- + f +(1 row) + +SELECT pg_input_is_valid('1e400000', 'numeric'); + pg_input_is_valid +------------------- + f +(1 row) + +SELECT * FROM pg_input_error_info('1e400000', 'numeric'); + message | detail | hint | sql_error_code +--------------------------------+--------+------+---------------- + value overflows numeric format | | | 22003 +(1 row) + +SELECT pg_input_is_valid('1234.567', 'numeric(8,4)'); + pg_input_is_valid +------------------- + t +(1 row) + +SELECT pg_input_is_valid('1234.567', 'numeric(7,4)'); + pg_input_is_valid +------------------- + f +(1 row) + +SELECT * FROM pg_input_error_info('1234.567', 'numeric(7,4)'); + message | detail | hint | sql_error_code +------------------------+-----------------------------------------------------------------------------------+------+---------------- + numeric field overflow | A field with precision 7, scale 4 must round to an absolute value less than 10^3. | | 22003 +(1 row) + +SELECT * FROM pg_input_error_info('0x1234.567', 'numeric'); + message | detail | hint | sql_error_code +-----------------------------------------------------+--------+------+---------------- + invalid input syntax for type numeric: "0x1234.567" | | | 22P02 +(1 row) + +-- +-- Test precision and scale typemods +-- +CREATE TABLE num_typemod_test ( + millions numeric(3, -6), + thousands numeric(3, -3), + units numeric(3, 0), + thousandths numeric(3, 3), + millionths numeric(3, 6) +); +\d num_typemod_test + Table "public.num_typemod_test" + Column | Type | Collation | Nullable | Default +-------------+---------------+-----------+----------+--------- + millions | numeric(3,-6) | | | + thousands | numeric(3,-3) | | | + units | numeric(3,0) | | | + thousandths | numeric(3,3) | | | + millionths | numeric(3,6) | | | + +-- rounding of valid inputs +INSERT INTO num_typemod_test VALUES (123456, 123, 0.123, 0.000123, 0.000000123); +INSERT INTO num_typemod_test VALUES (654321, 654, 0.654, 0.000654, 0.000000654); +INSERT INTO num_typemod_test VALUES (2345678, 2345, 2.345, 0.002345, 0.000002345); +INSERT INTO num_typemod_test VALUES (7654321, 7654, 7.654, 0.007654, 0.000007654); +INSERT INTO num_typemod_test VALUES (12345678, 12345, 12.345, 0.012345, 0.000012345); +INSERT INTO num_typemod_test VALUES (87654321, 87654, 87.654, 0.087654, 0.000087654); +INSERT INTO num_typemod_test VALUES (123456789, 123456, 123.456, 0.123456, 0.000123456); +INSERT INTO num_typemod_test VALUES (987654321, 987654, 987.654, 0.987654, 0.000987654); +INSERT INTO num_typemod_test VALUES ('NaN', 'NaN', 'NaN', 'NaN', 'NaN'); +SELECT scale(millions), * FROM num_typemod_test ORDER BY millions; + scale | millions | thousands | units | thousandths | millionths +-------+-----------+-----------+-------+-------------+------------ + 0 | 0 | 0 | 0 | 0.000 | 0.000000 + 0 | 1000000 | 1000 | 1 | 0.001 | 0.000001 + 0 | 2000000 | 2000 | 2 | 0.002 | 0.000002 + 0 | 8000000 | 8000 | 8 | 0.008 | 0.000008 + 0 | 12000000 | 12000 | 12 | 0.012 | 0.000012 + 0 | 88000000 | 88000 | 88 | 0.088 | 0.000088 + 0 | 123000000 | 123000 | 123 | 0.123 | 0.000123 + 0 | 988000000 | 988000 | 988 | 0.988 | 0.000988 + | NaN | NaN | NaN | NaN | NaN +(9 rows) + +-- invalid inputs +INSERT INTO num_typemod_test (millions) VALUES ('inf'); +ERROR: numeric field overflow +DETAIL: A field with precision 3, scale -6 cannot hold an infinite value. +INSERT INTO num_typemod_test (millions) VALUES (999500000); +ERROR: numeric field overflow +DETAIL: A field with precision 3, scale -6 must round to an absolute value less than 10^9. +INSERT INTO num_typemod_test (thousands) VALUES (999500); +ERROR: numeric field overflow +DETAIL: A field with precision 3, scale -3 must round to an absolute value less than 10^6. +INSERT INTO num_typemod_test (units) VALUES (999.5); +ERROR: numeric field overflow +DETAIL: A field with precision 3, scale 0 must round to an absolute value less than 10^3. +INSERT INTO num_typemod_test (thousandths) VALUES (0.9995); +ERROR: numeric field overflow +DETAIL: A field with precision 3, scale 3 must round to an absolute value less than 1. +INSERT INTO num_typemod_test (millionths) VALUES (0.0009995); +ERROR: numeric field overflow +DETAIL: A field with precision 3, scale 6 must round to an absolute value less than 10^-3. +-- +-- Test some corner cases for multiplication +-- +select 4790999999999999999999999999999999999999999999999999999999999999999999999999999999999999 * 9999999999999999999999999999999999999999999999999999999999999999999999999999999999999999; + ?column? +---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + 47909999999999999999999999999999999999999999999999999999999999999999999999999999999999985209000000000000000000000000000000000000000000000000000000000000000000000000000000000001 +(1 row) + +select 4789999999999999999999999999999999999999999999999999999999999999999999999999999999999999 * 9999999999999999999999999999999999999999999999999999999999999999999999999999999999999999; + ?column? +---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + 47899999999999999999999999999999999999999999999999999999999999999999999999999999999999985210000000000000000000000000000000000000000000000000000000000000000000000000000000000001 +(1 row) + +select 4770999999999999999999999999999999999999999999999999999999999999999999999999999999999999 * 9999999999999999999999999999999999999999999999999999999999999999999999999999999999999999; + ?column? +---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + 47709999999999999999999999999999999999999999999999999999999999999999999999999999999999985229000000000000000000000000000000000000000000000000000000000000000000000000000000000001 +(1 row) + +select 4769999999999999999999999999999999999999999999999999999999999999999999999999999999999999 * 9999999999999999999999999999999999999999999999999999999999999999999999999999999999999999; + ?column? +---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + 47699999999999999999999999999999999999999999999999999999999999999999999999999999999999985230000000000000000000000000000000000000000000000000000000000000000000000000000000000001 +(1 row) + +select trim_scale((0.1 - 2e-16383) * (0.1 - 3e-16383)); + trim_scale +------------ + 0.01 +(1 row) + +-- +-- Test some corner cases for division +-- +select 999999999999999999999::numeric/1000000000000000000000; + ?column? +------------------------ + 1.00000000000000000000 +(1 row) + +select div(999999999999999999999::numeric,1000000000000000000000); + div +----- + 0 +(1 row) + +select mod(999999999999999999999::numeric,1000000000000000000000); + mod +----------------------- + 999999999999999999999 +(1 row) + +select div(-9999999999999999999999::numeric,1000000000000000000000); + div +----- + -9 +(1 row) + +select mod(-9999999999999999999999::numeric,1000000000000000000000); + mod +------------------------ + -999999999999999999999 +(1 row) + +select div(-9999999999999999999999::numeric,1000000000000000000000)*1000000000000000000000 + mod(-9999999999999999999999::numeric,1000000000000000000000); + ?column? +------------------------- + -9999999999999999999999 +(1 row) + +select mod (70.0,70) ; + mod +----- + 0.0 +(1 row) + +select div (70.0,70) ; + div +----- + 1 +(1 row) + +select 70.0 / 70 ; + ?column? +------------------------ + 1.00000000000000000000 +(1 row) + +select 12345678901234567890 % 123; + ?column? +---------- + 78 +(1 row) + +select 12345678901234567890 / 123; + ?column? +-------------------- + 100371373180768845 +(1 row) + +select div(12345678901234567890, 123); + div +-------------------- + 100371373180768844 +(1 row) + +select div(12345678901234567890, 123) * 123 + 12345678901234567890 % 123; + ?column? +---------------------- + 12345678901234567890 +(1 row) + +-- +-- Test some corner cases for square root +-- +select sqrt(1.000000000000003::numeric); + sqrt +------------------- + 1.000000000000001 +(1 row) + +select sqrt(1.000000000000004::numeric); + sqrt +------------------- + 1.000000000000002 +(1 row) + +select sqrt(96627521408608.56340355805::numeric); + sqrt +--------------------- + 9829929.87811248648 +(1 row) + +select sqrt(96627521408608.56340355806::numeric); + sqrt +--------------------- + 9829929.87811248649 +(1 row) + +select sqrt(515549506212297735.073688290367::numeric); + sqrt +------------------------ + 718017761.766585921184 +(1 row) + +select sqrt(515549506212297735.073688290368::numeric); + sqrt +------------------------ + 718017761.766585921185 +(1 row) + +select sqrt(8015491789940783531003294973900306::numeric); + sqrt +------------------- + 89529278953540017 +(1 row) + +select sqrt(8015491789940783531003294973900307::numeric); + sqrt +------------------- + 89529278953540018 +(1 row) + +-- +-- Test code path for raising to integer powers +-- +select 10.0 ^ -2147483648 as rounds_to_zero; + rounds_to_zero +-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + 0.0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000 +(1 row) + +select 10.0 ^ -2147483647 as rounds_to_zero; + rounds_to_zero +-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + 0.0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000 +(1 row) + +select 10.0 ^ 2147483647 as overflows; +ERROR: value overflows numeric format +select 117743296169.0 ^ 1000000000 as overflows; +ERROR: value overflows numeric format +-- cases that used to return inaccurate results +select 3.789 ^ 21.0000000000000000; + ?column? +-------------------------------- + 1409343026052.8716016316022141 +(1 row) + +select 3.789 ^ 35.0000000000000000; + ?column? +---------------------------------------- + 177158169650516670809.3820586142670135 +(1 row) + +select 1.2 ^ 345; + ?column? +-------------------------------- + 2077446682327378559843444695.6 +(1 row) + +select 0.12 ^ (-20); + ?column? +------------------------ + 2608405330458882702.55 +(1 row) + +select 1.000000000123 ^ (-2147483648); + ?column? +-------------------- + 0.7678656556403084 +(1 row) + +select coalesce(nullif(0.9999999999 ^ 23300000000000, 0), 0) as rounds_to_zero; + rounds_to_zero +---------------- + 0 +(1 row) + +select round(((1 - 1.500012345678e-1000) ^ 1.45e1003) * 1e1000); + round +---------------------------------------------------------- + 25218976308958387188077465658068501556514992509509282366 +(1 row) + +-- cases that used to error out +select 0.12 ^ (-25); + ?column? +----------------------------- + 104825960103961013959336.50 +(1 row) + +select 0.5678 ^ (-85); + ?column? +---------------------------- + 782333637740774446257.7719 +(1 row) + +select coalesce(nullif(0.9999999999 ^ 70000000000000, 0), 0) as underflows; + underflows +------------ + 0 +(1 row) + +-- negative base to integer powers +select (-1.0) ^ 2147483646; + ?column? +-------------------- + 1.0000000000000000 +(1 row) + +select (-1.0) ^ 2147483647; + ?column? +--------------------- + -1.0000000000000000 +(1 row) + +select (-1.0) ^ 2147483648; + ?column? +-------------------- + 1.0000000000000000 +(1 row) + +select (-1.0) ^ 1000000000000000; + ?column? +-------------------- + 1.0000000000000000 +(1 row) + +select (-1.0) ^ 1000000000000001; + ?column? +--------------------- + -1.0000000000000000 +(1 row) + +-- integer powers of 10 +select n, 10.0 ^ n as "10^n", (10.0 ^ n) * (10.0 ^ (-n)) = 1 as ok +from generate_series(-20, 20) n; + n | 10^n | ok +-----+----------------------------------------+---- + -20 | 0.000000000000000000010000000000000000 | t + -19 | 0.00000000000000000010000000000000000 | t + -18 | 0.0000000000000000010000000000000000 | t + -17 | 0.000000000000000010000000000000000 | t + -16 | 0.00000000000000010000000000000000 | t + -15 | 0.0000000000000010000000000000000 | t + -14 | 0.000000000000010000000000000000 | t + -13 | 0.00000000000010000000000000000 | t + -12 | 0.0000000000010000000000000000 | t + -11 | 0.000000000010000000000000000 | t + -10 | 0.00000000010000000000000000 | t + -9 | 0.0000000010000000000000000 | t + -8 | 0.000000010000000000000000 | t + -7 | 0.00000010000000000000000 | t + -6 | 0.0000010000000000000000 | t + -5 | 0.000010000000000000000 | t + -4 | 0.00010000000000000000 | t + -3 | 0.0010000000000000000 | t + -2 | 0.010000000000000000 | t + -1 | 0.10000000000000000 | t + 0 | 1.0000000000000000 | t + 1 | 10.000000000000000 | t + 2 | 100.00000000000000 | t + 3 | 1000.0000000000000 | t + 4 | 10000.000000000000 | t + 5 | 100000.00000000000 | t + 6 | 1000000.0000000000 | t + 7 | 10000000.000000000 | t + 8 | 100000000.00000000 | t + 9 | 1000000000.0000000 | t + 10 | 10000000000.000000 | t + 11 | 100000000000.00000 | t + 12 | 1000000000000.0000 | t + 13 | 10000000000000.000 | t + 14 | 100000000000000.00 | t + 15 | 1000000000000000.0 | t + 16 | 10000000000000000.0 | t + 17 | 100000000000000000.0 | t + 18 | 1000000000000000000.0 | t + 19 | 10000000000000000000.0 | t + 20 | 100000000000000000000.0 | t +(41 rows) + +-- +-- Tests for raising to non-integer powers +-- +-- special cases +select 0.0 ^ 0.0; + ?column? +-------------------- + 1.0000000000000000 +(1 row) + +select (-12.34) ^ 0.0; + ?column? +-------------------- + 1.0000000000000000 +(1 row) + +select 12.34 ^ 0.0; + ?column? +-------------------- + 1.0000000000000000 +(1 row) + +select 0.0 ^ 12.34; + ?column? +-------------------- + 0.0000000000000000 +(1 row) + +-- NaNs +select 'NaN'::numeric ^ 'NaN'::numeric; + ?column? +---------- + NaN +(1 row) + +select 'NaN'::numeric ^ 0; + ?column? +---------- + 1 +(1 row) + +select 'NaN'::numeric ^ 1; + ?column? +---------- + NaN +(1 row) + +select 0 ^ 'NaN'::numeric; + ?column? +---------- + NaN +(1 row) + +select 1 ^ 'NaN'::numeric; + ?column? +---------- + 1 +(1 row) + +-- invalid inputs +select 0.0 ^ (-12.34); +ERROR: zero raised to a negative power is undefined +select (-12.34) ^ 1.2; +ERROR: a negative number raised to a non-integer power yields a complex result +-- cases that used to generate inaccurate results +select 32.1 ^ 9.8; + ?column? +-------------------- + 580429286790711.10 +(1 row) + +select 32.1 ^ (-9.8); + ?column? +---------------------------------- + 0.000000000000001722862754788209 +(1 row) + +select 12.3 ^ 45.6; + ?column? +------------------------------------------------------ + 50081010321492803393171165777624533697036806969694.9 +(1 row) + +select 12.3 ^ (-45.6); + ?column? +--------------------------------------------------------------------- + 0.00000000000000000000000000000000000000000000000001996764828785491 +(1 row) + +-- big test +select 1.234 ^ 5678; + ?column? +--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + 307239295662090741644584872593956173493568238595074141254349565406661439636598896798876823220904084953233015553994854875890890858118656468658643918169805277399402542281777901029346337707622181574346585989613344285010764501017625366742865066948856161360224801370482171458030533346309750557140549621313515752078638620714732831815297168231790779296290266207315344008883935010274044001522606235576584215999260117523114297033944018699691024106823438431754073086813382242140602291215149759520833200152654884259619588924545324.597 +(1 row) + +-- +-- Tests for EXP() +-- +-- special cases +select exp(0.0); + exp +-------------------- + 1.0000000000000000 +(1 row) + +select exp(1.0); + exp +-------------------- + 2.7182818284590452 +(1 row) + +select exp(1.0::numeric(71,70)); + exp +-------------------------------------------------------------------------- + 2.7182818284590452353602874713526624977572470936999595749669676277240766 +(1 row) + +select exp('nan'::numeric); + exp +----- + NaN +(1 row) + +select exp('inf'::numeric); + exp +---------- + Infinity +(1 row) + +select exp('-inf'::numeric); + exp +----- + 0 +(1 row) + +select coalesce(nullif(exp(-5000::numeric), 0), 0) as rounds_to_zero; + rounds_to_zero +---------------- + 0 +(1 row) + +select coalesce(nullif(exp(-10000::numeric), 0), 0) as underflows; + underflows +------------ + 0 +(1 row) + +-- cases that used to generate inaccurate results +select exp(32.999); + exp +--------------------- + 214429043492155.053 +(1 row) + +select exp(-32.999); + exp +---------------------------------- + 0.000000000000004663547361468248 +(1 row) + +select exp(123.456); + exp +------------------------------------------------------------ + 413294435277809344957685441227343146614594393746575438.725 +(1 row) + +select exp(-123.456); + exp +------------------------------------------------------------------------- + 0.000000000000000000000000000000000000000000000000000002419582541264601 +(1 row) + +-- big test +select exp(1234.5678); + exp +---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + 146549072930959479983482138503979804217622199675223653966270157446954995433819741094410764947112047906012815540251009949604426069672532417736057033099274204598385314594846509975629046864798765888104789074984927709616261452461385220475510438783429612447831614003668421849727379202555580791042606170523016207262965336641214601082882495255771621327088265411334088968112458492660609809762865582162764292604697957813514621259353683899630997077707406305730694385703091201347848855199354307506425820147289848677003277208302716466011827836279231.9667 +(1 row) + +-- +-- Tests for generate_series +-- +select * from generate_series(0.0::numeric, 4.0::numeric); + generate_series +----------------- + 0.0 + 1.0 + 2.0 + 3.0 + 4.0 +(5 rows) + +select * from generate_series(0.1::numeric, 4.0::numeric, 1.3::numeric); + generate_series +----------------- + 0.1 + 1.4 + 2.7 + 4.0 +(4 rows) + +select * from generate_series(4.0::numeric, -1.5::numeric, -2.2::numeric); + generate_series +----------------- + 4.0 + 1.8 + -0.4 +(3 rows) + +-- Trigger errors +select * from generate_series(-100::numeric, 100::numeric, 0::numeric); +ERROR: step size cannot equal zero +select * from generate_series(-100::numeric, 100::numeric, 'nan'::numeric); +ERROR: step size cannot be NaN +select * from generate_series('nan'::numeric, 100::numeric, 10::numeric); +ERROR: start value cannot be NaN +select * from generate_series(0::numeric, 'nan'::numeric, 10::numeric); +ERROR: stop value cannot be NaN +select * from generate_series('inf'::numeric, 'inf'::numeric, 10::numeric); +ERROR: start value cannot be infinity +select * from generate_series(0::numeric, 'inf'::numeric, 10::numeric); +ERROR: stop value cannot be infinity +select * from generate_series(0::numeric, '42'::numeric, '-inf'::numeric); +ERROR: step size cannot be infinity +-- Checks maximum, output is truncated +select (i / (10::numeric ^ 131071))::numeric(1,0) + from generate_series(6 * (10::numeric ^ 131071), + 9 * (10::numeric ^ 131071), + 10::numeric ^ 131071) as a(i); + numeric +--------- + 6 + 7 + 8 + 9 +(4 rows) + +-- Check usage with variables +select * from generate_series(1::numeric, 3::numeric) i, generate_series(i,3) j; + i | j +---+--- + 1 | 1 + 1 | 2 + 1 | 3 + 2 | 2 + 2 | 3 + 3 | 3 +(6 rows) + +select * from generate_series(1::numeric, 3::numeric) i, generate_series(1,i) j; + i | j +---+--- + 1 | 1 + 2 | 1 + 2 | 2 + 3 | 1 + 3 | 2 + 3 | 3 +(6 rows) + +select * from generate_series(1::numeric, 3::numeric) i, generate_series(1,5,i) j; + i | j +---+--- + 1 | 1 + 1 | 2 + 1 | 3 + 1 | 4 + 1 | 5 + 2 | 1 + 2 | 3 + 2 | 5 + 3 | 1 + 3 | 4 +(10 rows) + +-- +-- Tests for LN() +-- +-- Invalid inputs +select ln(-12.34); +ERROR: cannot take logarithm of a negative number +select ln(0.0); +ERROR: cannot take logarithm of zero +-- Some random tests +select ln(1.2345678e-28); + ln +----------------------------------------- + -64.26166165451762991204894255882820859 +(1 row) + +select ln(0.0456789); + ln +--------------------- + -3.0861187944847439 +(1 row) + +select ln(0.349873948359354029493948309745709580730482050975); + ln +----------------------------------------------------- + -1.050182336912082775693991697979750253056317885460 +(1 row) + +select ln(0.99949452); + ln +------------------------- + -0.00050560779808326467 +(1 row) + +select ln(1.00049687395); + ln +------------------------ + 0.00049675054901370394 +(1 row) + +select ln(1234.567890123456789); + ln +-------------------- + 7.1184763012977896 +(1 row) + +select ln(5.80397490724e5); + ln +-------------------- + 13.271468476626518 +(1 row) + +select ln(9.342536355e34); + ln +-------------------- + 80.522470935524187 +(1 row) + +-- +-- Tests for LOG() (base 10) +-- +-- invalid inputs +select log(-12.34); +ERROR: cannot take logarithm of a negative number +CONTEXT: SQL function "log" statement 1 +select log(0.0); +ERROR: cannot take logarithm of zero +CONTEXT: SQL function "log" statement 1 +-- some random tests +select log(1.234567e-89); + log +----------------------------------------------------------------------------------------------------- + -88.90848533591373725637496492944925187293052336306443143312825869985819779294142441287021741054275 +(1 row) + +select log(3.4634998359873254962349856073435545); + log +-------------------------------------- + 0.5395151714070134409152404011959981 +(1 row) + +select log(9.999999999999999999); + log +---------------------- + 1.000000000000000000 +(1 row) + +select log(10.00000000000000000); + log +--------------------- + 1.00000000000000000 +(1 row) + +select log(10.00000000000000001); + log +--------------------- + 1.00000000000000000 +(1 row) + +select log(590489.45235237); + log +------------------- + 5.771212144411727 +(1 row) + +-- +-- Tests for LOG() (arbitrary base) +-- +-- invalid inputs +select log(-12.34, 56.78); +ERROR: cannot take logarithm of a negative number +select log(-12.34, -56.78); +ERROR: cannot take logarithm of a negative number +select log(12.34, -56.78); +ERROR: cannot take logarithm of a negative number +select log(0.0, 12.34); +ERROR: cannot take logarithm of zero +select log(12.34, 0.0); +ERROR: cannot take logarithm of zero +select log(1.0, 12.34); +ERROR: division by zero +-- some random tests +select log(1.23e-89, 6.4689e45); + log +------------------------------------------------------------------------------------------------ + -0.5152489207781856983977054971756484879653568168479201885425588841094788842469115325262329756 +(1 row) + +select log(0.99923, 4.58934e34); + log +--------------------- + -103611.55579544132 +(1 row) + +select log(1.000016, 8.452010e18); + log +-------------------- + 2723830.2877097365 +(1 row) + +select log(3.1954752e47, 9.4792021e-73); + log +------------------------------------------------------------------------------------- + -1.51613372350688302142917386143459361608600157692779164475351842333265418126982165 +(1 row) + +-- +-- Tests for scale() +-- +select scale(numeric 'NaN'); + scale +------- + +(1 row) + +select scale(numeric 'inf'); + scale +------- + +(1 row) + +select scale(NULL::numeric); + scale +------- + +(1 row) + +select scale(1.12); + scale +------- + 2 +(1 row) + +select scale(0); + scale +------- + 0 +(1 row) + +select scale(0.00); + scale +------- + 2 +(1 row) + +select scale(1.12345); + scale +------- + 5 +(1 row) + +select scale(110123.12475871856128); + scale +------- + 14 +(1 row) + +select scale(-1123.12471856128); + scale +------- + 11 +(1 row) + +select scale(-13.000000000000000); + scale +------- + 15 +(1 row) + +-- +-- Tests for min_scale() +-- +select min_scale(numeric 'NaN') is NULL; -- should be true + ?column? +---------- + t +(1 row) + +select min_scale(numeric 'inf') is NULL; -- should be true + ?column? +---------- + t +(1 row) + +select min_scale(0); -- no digits + min_scale +----------- + 0 +(1 row) + +select min_scale(0.00); -- no digits again + min_scale +----------- + 0 +(1 row) + +select min_scale(1.0); -- no scale + min_scale +----------- + 0 +(1 row) + +select min_scale(1.1); -- scale 1 + min_scale +----------- + 1 +(1 row) + +select min_scale(1.12); -- scale 2 + min_scale +----------- + 2 +(1 row) + +select min_scale(1.123); -- scale 3 + min_scale +----------- + 3 +(1 row) + +select min_scale(1.1234); -- scale 4, filled digit + min_scale +----------- + 4 +(1 row) + +select min_scale(1.12345); -- scale 5, 2 NDIGITS + min_scale +----------- + 5 +(1 row) + +select min_scale(1.1000); -- 1 pos in NDIGITS + min_scale +----------- + 1 +(1 row) + +select min_scale(1e100); -- very big number + min_scale +----------- + 0 +(1 row) + +-- +-- Tests for trim_scale() +-- +select trim_scale(numeric 'NaN'); + trim_scale +------------ + NaN +(1 row) + +select trim_scale(numeric 'inf'); + trim_scale +------------ + Infinity +(1 row) + +select trim_scale(1.120); + trim_scale +------------ + 1.12 +(1 row) + +select trim_scale(0); + trim_scale +------------ + 0 +(1 row) + +select trim_scale(0.00); + trim_scale +------------ + 0 +(1 row) + +select trim_scale(1.1234500); + trim_scale +------------ + 1.12345 +(1 row) + +select trim_scale(110123.12475871856128000); + trim_scale +----------------------- + 110123.12475871856128 +(1 row) + +select trim_scale(-1123.124718561280000000); + trim_scale +------------------- + -1123.12471856128 +(1 row) + +select trim_scale(-13.00000000000000000000); + trim_scale +------------ + -13 +(1 row) + +select trim_scale(1e100); + trim_scale +------------------------------------------------------------------------------------------------------- + 10000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000 +(1 row) + +-- +-- Tests for SUM() +-- +-- cases that need carry propagation +SELECT SUM(9999::numeric) FROM generate_series(1, 100000); + sum +----------- + 999900000 +(1 row) + +SELECT SUM((-9999)::numeric) FROM generate_series(1, 100000); + sum +------------ + -999900000 +(1 row) + +-- +-- Tests for VARIANCE() +-- +CREATE TABLE num_variance (a numeric); +INSERT INTO num_variance VALUES (0); +INSERT INTO num_variance VALUES (3e-500); +INSERT INTO num_variance VALUES (-3e-500); +INSERT INTO num_variance VALUES (4e-500 - 1e-16383); +INSERT INTO num_variance VALUES (-4e-500 + 1e-16383); +-- variance is just under 12.5e-1000 and so should round down to 12e-1000 +SELECT trim_scale(variance(a) * 1e1000) FROM num_variance; + trim_scale +------------ + 12 +(1 row) + +-- check that parallel execution produces the same result +BEGIN; +ALTER TABLE num_variance SET (parallel_workers = 4); +SET LOCAL parallel_setup_cost = 0; +SET LOCAL max_parallel_workers_per_gather = 4; +SELECT trim_scale(variance(a) * 1e1000) FROM num_variance; + trim_scale +------------ + 12 +(1 row) + +ROLLBACK; +-- case where sum of squares would overflow but variance does not +DELETE FROM num_variance; +INSERT INTO num_variance SELECT 9e131071 + x FROM generate_series(1, 5) x; +SELECT variance(a) FROM num_variance; + variance +-------------------- + 2.5000000000000000 +(1 row) + +-- check that parallel execution produces the same result +BEGIN; +ALTER TABLE num_variance SET (parallel_workers = 4); +SET LOCAL parallel_setup_cost = 0; +SET LOCAL max_parallel_workers_per_gather = 4; +SELECT variance(a) FROM num_variance; + variance +-------------------- + 2.5000000000000000 +(1 row) + +ROLLBACK; +DROP TABLE num_variance; +-- +-- Tests for GCD() +-- +SELECT a, b, gcd(a, b), gcd(a, -b), gcd(-b, a), gcd(-b, -a) +FROM (VALUES (0::numeric, 0::numeric), + (0::numeric, numeric 'NaN'), + (0::numeric, 46375::numeric), + (433125::numeric, 46375::numeric), + (43312.5::numeric, 4637.5::numeric), + (4331.250::numeric, 463.75000::numeric), + ('inf', '0'), + ('inf', '42'), + ('inf', 'inf') + ) AS v(a, b); + a | b | gcd | gcd | gcd | gcd +----------+-----------+---------+---------+---------+--------- + 0 | 0 | 0 | 0 | 0 | 0 + 0 | NaN | NaN | NaN | NaN | NaN + 0 | 46375 | 46375 | 46375 | 46375 | 46375 + 433125 | 46375 | 875 | 875 | 875 | 875 + 43312.5 | 4637.5 | 87.5 | 87.5 | 87.5 | 87.5 + 4331.250 | 463.75000 | 8.75000 | 8.75000 | 8.75000 | 8.75000 + Infinity | 0 | NaN | NaN | NaN | NaN + Infinity | 42 | NaN | NaN | NaN | NaN + Infinity | Infinity | NaN | NaN | NaN | NaN +(9 rows) + +-- +-- Tests for LCM() +-- +SELECT a,b, lcm(a, b), lcm(a, -b), lcm(-b, a), lcm(-b, -a) +FROM (VALUES (0::numeric, 0::numeric), + (0::numeric, numeric 'NaN'), + (0::numeric, 13272::numeric), + (13272::numeric, 13272::numeric), + (423282::numeric, 13272::numeric), + (42328.2::numeric, 1327.2::numeric), + (4232.820::numeric, 132.72000::numeric), + ('inf', '0'), + ('inf', '42'), + ('inf', 'inf') + ) AS v(a, b); + a | b | lcm | lcm | lcm | lcm +----------+-----------+--------------+--------------+--------------+-------------- + 0 | 0 | 0 | 0 | 0 | 0 + 0 | NaN | NaN | NaN | NaN | NaN + 0 | 13272 | 0 | 0 | 0 | 0 + 13272 | 13272 | 13272 | 13272 | 13272 | 13272 + 423282 | 13272 | 11851896 | 11851896 | 11851896 | 11851896 + 42328.2 | 1327.2 | 1185189.6 | 1185189.6 | 1185189.6 | 1185189.6 + 4232.820 | 132.72000 | 118518.96000 | 118518.96000 | 118518.96000 | 118518.96000 + Infinity | 0 | NaN | NaN | NaN | NaN + Infinity | 42 | NaN | NaN | NaN | NaN + Infinity | Infinity | NaN | NaN | NaN | NaN +(10 rows) + +SELECT lcm(9999 * (10::numeric)^131068 + (10::numeric^131068 - 1), 2); -- overflow +ERROR: value overflows numeric format +-- +-- Tests for factorial +-- +SELECT factorial(4); + factorial +----------- + 24 +(1 row) + +SELECT factorial(15); + factorial +--------------- + 1307674368000 +(1 row) + +SELECT factorial(100000); +ERROR: value overflows numeric format +SELECT factorial(0); + factorial +----------- + 1 +(1 row) + +SELECT factorial(-4); +ERROR: factorial of a negative number is undefined +-- +-- Tests for pg_lsn() +-- +SELECT pg_lsn(23783416::numeric); + pg_lsn +----------- + 0/16AE7F8 +(1 row) + +SELECT pg_lsn(0::numeric); + pg_lsn +-------- + 0/0 +(1 row) + +SELECT pg_lsn(18446744073709551615::numeric); + pg_lsn +------------------- + FFFFFFFF/FFFFFFFF +(1 row) + +SELECT pg_lsn(-1::numeric); +ERROR: pg_lsn out of range +SELECT pg_lsn(18446744073709551616::numeric); +ERROR: pg_lsn out of range +SELECT pg_lsn('NaN'::numeric); +ERROR: cannot convert NaN to pg_lsn diff --git a/src/test/regress/expected/numeric_big.out b/src/test/regress/expected/numeric_big.out new file mode 100644 index 0000000..12ff35b --- /dev/null +++ b/src/test/regress/expected/numeric_big.out @@ -0,0 +1,2082 @@ +-- ****************************** +-- * Test suite for the Postgres NUMERIC data type +-- ****************************** +-- Must drop tables created by short numeric test. +DROP TABLE num_data; +DROP TABLE num_exp_add; +DROP TABLE num_exp_sub; +DROP TABLE num_exp_div; +DROP TABLE num_exp_mul; +DROP TABLE num_exp_sqrt; +DROP TABLE num_exp_ln; +DROP TABLE num_exp_log10; +DROP TABLE num_exp_power_10_ln; +DROP TABLE num_result; +CREATE TABLE num_data (id int4, val numeric(1000,800)); +CREATE TABLE num_exp_add (id1 int4, id2 int4, expected numeric(1000,800)); +CREATE TABLE num_exp_sub (id1 int4, id2 int4, expected numeric(1000,800)); +CREATE TABLE num_exp_div (id1 int4, id2 int4, expected numeric(1000,800)); +CREATE TABLE num_exp_mul (id1 int4, id2 int4, expected numeric(1000,800)); +CREATE TABLE num_exp_sqrt (id int4, expected numeric(1000,800)); +CREATE TABLE num_exp_ln (id int4, expected numeric(1000,800)); +CREATE TABLE num_exp_log10 (id int4, expected numeric(1000,800)); +CREATE TABLE num_exp_power_10_ln (id int4, expected numeric(1000,800)); +CREATE TABLE num_result (id1 int4, id2 int4, result numeric(1000,800)); +-- ****************************** +-- * The following EXPECTED results are computed by bc(1) +-- * with a scale of 1000 +-- ****************************** +BEGIN TRANSACTION; +INSERT INTO num_exp_add VALUES (0,0,'0'); +INSERT INTO num_exp_sub VALUES (0,0,'0'); +INSERT INTO num_exp_mul VALUES (0,0,'0'); +INSERT INTO num_exp_div VALUES (0,0,'NaN'); +INSERT INTO num_exp_add VALUES (0,1,'85243.39540024977626076239847863600785982737155858270959890014613035727868293618673807776733416230953723818527101593495895350807775607346277892835514324320448949370623441059033804864158715021903312693889518990256881059434042443507529601095150710777634743301398926463888783847290873199395304998050753365215426971278237920063435565949203678024225270616295573678510929020831006146661747271783837653203039829647102027431761129518881525935216608429897041525858540380754759125150233053469999022855035'); +INSERT INTO num_exp_sub VALUES (0,1,'-85243.39540024977626076239847863600785982737155858270959890014613035727868293618673807776733416230953723818527101593495895350807775607346277892835514324320448949370623441059033804864158715021903312693889518990256881059434042443507529601095150710777634743301398926463888783847290873199395304998050753365215426971278237920063435565949203678024225270616295573678510929020831006146661747271783837653203039829647102027431761129518881525935216608429897041525858540380754759125150233053469999022855035'); +INSERT INTO num_exp_mul VALUES (0,1,'0'); +INSERT INTO num_exp_div VALUES (0,1,'0'); +INSERT INTO num_exp_add VALUES (0,2,'-994877526002806872754342148749241.04353023451866590387054959174736129501310680280823383331007646306243540953499740615246583399296334239109936336446284803020643582102868247857009494139535009572740621288230740389545481395'); +INSERT INTO num_exp_sub VALUES (0,2,'994877526002806872754342148749241.04353023451866590387054959174736129501310680280823383331007646306243540953499740615246583399296334239109936336446284803020643582102868247857009494139535009572740621288230740389545481395'); +INSERT INTO num_exp_mul VALUES (0,2,'0'); +INSERT INTO num_exp_div VALUES (0,2,'0'); +INSERT INTO num_exp_add VALUES (0,3,'-60302029489319384367663884408085757480.2322712404088283093870869198708849258097125725036189625900174440196153781995220721511009855207486224837798752903681993777275846325950111427710563453217985216966456785944859989497422927661683538629473170704026975786513125842675604577233871570629808699803522400038975396500769162308448069085909755023233588510630417065084295051270219462289785473643946404281422516357503746700705970360169619852905053433235726497292406142332833'); +INSERT INTO num_exp_sub VALUES (0,3,'60302029489319384367663884408085757480.2322712404088283093870869198708849258097125725036189625900174440196153781995220721511009855207486224837798752903681993777275846325950111427710563453217985216966456785944859989497422927661683538629473170704026975786513125842675604577233871570629808699803522400038975396500769162308448069085909755023233588510630417065084295051270219462289785473643946404281422516357503746700705970360169619852905053433235726497292406142332833'); +INSERT INTO num_exp_mul VALUES (0,3,'0'); +INSERT INTO num_exp_div VALUES (0,3,'0'); +INSERT INTO num_exp_add VALUES (0,4,'5329378275943663322215245.24931765987630429629836382184742348353920297283690739124220773955591340709935970062776650204659187764581615597720798385015942389765692769739983054442503547211560297249686289665792078548480268091496050883021187158502798880896590227542729659940394038802461081290690995869705131152889309663639310553909874081663091069118126221594338242710530718836025225507189149221049928936955230868771875644038572888630664890573507822342998964954667474300944699078658989010257103569231493090050659723450626338923049035040974032671138430612839043269997482582763267536489504794826476836323549796385028155416935072959933315468068930689064483178204550825728947252440604703474049780550458442808479096492346910001692358508618202898514895453589357'); +INSERT INTO num_exp_sub VALUES (0,4,'-5329378275943663322215245.24931765987630429629836382184742348353920297283690739124220773955591340709935970062776650204659187764581615597720798385015942389765692769739983054442503547211560297249686289665792078548480268091496050883021187158502798880896590227542729659940394038802461081290690995869705131152889309663639310553909874081663091069118126221594338242710530718836025225507189149221049928936955230868771875644038572888630664890573507822342998964954667474300944699078658989010257103569231493090050659723450626338923049035040974032671138430612839043269997482582763267536489504794826476836323549796385028155416935072959933315468068930689064483178204550825728947252440604703474049780550458442808479096492346910001692358508618202898514895453589357'); +INSERT INTO num_exp_mul VALUES (0,4,'0'); +INSERT INTO num_exp_div VALUES (0,4,'0'); +INSERT INTO num_exp_add VALUES (0,5,'-652755630.43456071828048833552593835051449845484289562110789582081210403487973096161149072377955192388469356112505543620695003436531392789029513380101663750625024853263344909355177280161504414335005574882649025508632900995595004153086358670541462762210415346958050909878501048483523600711486406055424807840429541335391538322886495085448421556770991545781035298449067051916630343957356635391594362639819978677032855590055900561501350354631803808000307050416047072513406855040715556454205065332997338225626635780147287003130754254277103928406089109802521803537038957372612837169223905290912251006321930223154562110264217937'); +INSERT INTO num_exp_sub VALUES (0,5,'652755630.43456071828048833552593835051449845484289562110789582081210403487973096161149072377955192388469356112505543620695003436531392789029513380101663750625024853263344909355177280161504414335005574882649025508632900995595004153086358670541462762210415346958050909878501048483523600711486406055424807840429541335391538322886495085448421556770991545781035298449067051916630343957356635391594362639819978677032855590055900561501350354631803808000307050416047072513406855040715556454205065332997338225626635780147287003130754254277103928406089109802521803537038957372612837169223905290912251006321930223154562110264217937'); +INSERT INTO num_exp_mul VALUES (0,5,'0'); +INSERT INTO num_exp_div VALUES (0,5,'0'); +INSERT INTO num_exp_add VALUES (0,6,'.0469370721950711508944806393077762204079964905145503836835397203739563036579760026190241480514409364'); +INSERT INTO num_exp_sub VALUES (0,6,'-.0469370721950711508944806393077762204079964905145503836835397203739563036579760026190241480514409364'); +INSERT INTO num_exp_mul VALUES (0,6,'0'); +INSERT INTO num_exp_div VALUES (0,6,'0'); +INSERT INTO num_exp_add VALUES (0,7,'-818934540071845742'); +INSERT INTO num_exp_sub VALUES (0,7,'818934540071845742'); +INSERT INTO num_exp_mul VALUES (0,7,'0'); +INSERT INTO num_exp_div VALUES (0,7,'0'); +INSERT INTO num_exp_add VALUES (0,8,'8496986223.64063724353165506167988570717591150432282238795309964705925283285060558038824227595710139960766584401003765241409149793494330798800'); +INSERT INTO num_exp_sub VALUES (0,8,'-8496986223.64063724353165506167988570717591150432282238795309964705925283285060558038824227595710139960766584401003765241409149793494330798800'); +INSERT INTO num_exp_mul VALUES (0,8,'0'); +INSERT INTO num_exp_div VALUES (0,8,'0'); +INSERT INTO num_exp_add VALUES (0,9,'54863480.34685027005508022756223282084742813020271603840941647350440860843570182437301045468670059279379903480024743452620396345637401505220786389930600883087012615993343976556472498552535317826554614696684732913955544753638726438705858481670766245958647367500212800073774509075408148134050353551558174813940258910304990570172170811882520915334358633'); +INSERT INTO num_exp_sub VALUES (0,9,'-54863480.34685027005508022756223282084742813020271603840941647350440860843570182437301045468670059279379903480024743452620396345637401505220786389930600883087012615993343976556472498552535317826554614696684732913955544753638726438705858481670766245958647367500212800073774509075408148134050353551558174813940258910304990570172170811882520915334358633'); +INSERT INTO num_exp_mul VALUES (0,9,'0'); +INSERT INTO num_exp_div VALUES (0,9,'0'); +INSERT INTO num_exp_add VALUES (1,0,'85243.39540024977626076239847863600785982737155858270959890014613035727868293618673807776733416230953723818527101593495895350807775607346277892835514324320448949370623441059033804864158715021903312693889518990256881059434042443507529601095150710777634743301398926463888783847290873199395304998050753365215426971278237920063435565949203678024225270616295573678510929020831006146661747271783837653203039829647102027431761129518881525935216608429897041525858540380754759125150233053469999022855035'); +INSERT INTO num_exp_sub VALUES (1,0,'85243.39540024977626076239847863600785982737155858270959890014613035727868293618673807776733416230953723818527101593495895350807775607346277892835514324320448949370623441059033804864158715021903312693889518990256881059434042443507529601095150710777634743301398926463888783847290873199395304998050753365215426971278237920063435565949203678024225270616295573678510929020831006146661747271783837653203039829647102027431761129518881525935216608429897041525858540380754759125150233053469999022855035'); +INSERT INTO num_exp_mul VALUES (1,0,'0'); +INSERT INTO num_exp_div VALUES (1,0,'NaN'); +INSERT INTO num_exp_add VALUES (1,1,'170486.79080049955252152479695727201571965474311716541919780029226071455736587237347615553466832461907447637054203186991790701615551214692555785671028648640897898741246882118067609728317430043806625387779037980513762118868084887015059202190301421555269486602797852927777567694581746398790609996101506730430853942556475840126871131898407356048450541232591147357021858041662012293323494543567675306406079659294204054863522259037763051870433216859794083051717080761509518250300466106939998045710070'); +INSERT INTO num_exp_sub VALUES (1,1,'0'); +INSERT INTO num_exp_mul VALUES (1,1,'7266436459.363324713115467666113895787027372854351303425444968800459979742082292257107107767894843498525848597439323325297125474674300428669958003640228730876886174255457103020291514229439701871032118057857763809224712818579091741996335014138185389554630910658876423205103697147288306070059640369158894028731728589073730895396494400175420670713113234800826523252075036892246807434088405522834549449664122407363485486902219500109237667016524913027290777216477989904700729228025571098410870506256758678625928245828210775042611512394316804583459576285681159178280400209217948833631961377519855502763611693070238579591463373484424582723121059964236704135695706864890193388054537703767833595331866551990460050750959493829603581882430597105627056085260296454181999581594565113210481151487049158699087454047624433576922179904629'); +INSERT INTO num_exp_div VALUES (1,1,'1.000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000'); +INSERT INTO num_exp_add VALUES (1,2,'-994877526002806872754342148663997.64812998474240514147207095573950146764154822009863493316394610578375247334825932838513167168342610420582834742950389452212867974756590355021495169819086060202117180229196935525386766373096687306110481009743118940565957556492470398904849289222365256698601073536111216152709126800604695001949246634784573028721762079936564434050796321975774729383704426321489070979168993853338252728216162346796960170352897972568238870481118474064783391570102958474141459619245240874849766946530000977144965'); +INSERT INTO num_exp_sub VALUES (1,2,'994877526002806872754342148834484.43893048429492666626902822775522112238466538551783273345620682034111834572173548391979999630250058057637037929942180153828419189449146140692523818459983958943364062347264545253704196416903312693889518990256881059434042443507529601095150710777634743301398926463888783847290873199395304998050753365215426971278237920063435565949203678024225270616295573678510929020831006146661747271783837653203039829647102027431761129518881525935216608429897041525858540380754759125150233053469999022855035'); +INSERT INTO num_exp_mul VALUES (1,2,'-84806738323879544552397401815149740513.8505875535743013876823142649666132764556588225959336097903898464616542203793600590311980154402068027051522932586050753865288419084437796768749509032177577451738712965496693249429231838833655025794915864261585848007162358912070811805298210095333433397862313304655108809804359760907473898420016370058274978588765092161529583480924554820756527238472641797198545539410039895140087686344382628317530286295498797849942258314364503000942821309916954725689781458590617068629906894951122301020797266469357701283289275708774593896770378558232444454118891917258610753077932026885574920166837998049508644891327208474213193224700658584824407382455480657734911543930195324144216374573825'); +INSERT INTO num_exp_div VALUES (1,2,'-.000000000000000000000000000085682300757901809257711279577127388124986344391495296640171942990079130291883279872719240502687189411421655284515420074848478500192127657883342858267913417679786356766341637336955924836847768457039175660279784295612167899455618405343686908907695358239088351870495830739180518509859269437015797489301844593920484927630172344269378248455657186218762679357609204333669024237648538465053048724383898528808961206696787294681884412485427843796696788390072124570957047672341581447744981862017791206857428430183366004980966398716823512288330174863890117558744630102020144500158878244146399686532935435591262767487823942606452349972401012308378888947381934278131785907155692007064636085000405504866631011593239041758448995933095907216863744502344014999804306234830774259496097549717476344048'); +INSERT INTO num_exp_add VALUES (1,3,'-60302029489319384367663884408085672236.83687099063256754698860828386302509843815398979402006244388708674093244201278399438376682321121138429850885935540924586964982855913223221441591310211730902799041126800414795030815514254713522692405212716783388698431088814919226444677188004928663343696636297536500970117716818423689175692808344185016908913828066250587407384563498516598672584120143890364303296142744031320345312431817858545326010704685255237541162931904446804064783391570102958474141459619245240874849766946530000977144965'); +INSERT INTO num_exp_sub VALUES (1,3,'60302029489319384367663884408085842723.62767149018508907178556555587874475318127115521321786273614780129829831438626014991843514783028586066905089122532715288580534070605779007112619958852628801540288008918482404759132944298520148080184250697297150817299173701934285646867489426483932830299434150464278537812298564822479785688909850915447762856384542090714278516461905872647123125352735037721325154184406043613668806975385533851732090363979459292404685190942209855935216608429897041525858540380754759125150233053469999022855035'); +INSERT INTO num_exp_mul VALUES (1,3,'-5140349743195574373979577554212527512597024.162480344833040409158673429491690439298506850052285119390701002577176786023622062742050099464897084793357329597395417632908812044304066963549928478520702505283307379218587635434673128958824348493758429380623577527186462464399974242800361134191519694694139153279582776168995426125926314513926640766117733774558011741611075336271613675760116784769700605008122422944290652448956922432960815546502965310676913079866511016221573557684245901002643719965652152439520727383305120298495304784052489867651462175349450610643411043707261107569691076730261762793560088893354750383257372118118753366377402045596735023445172252225346164608897913115394905485106225627590643805003075069931177395059698550161546962768768895596088478488887530518018212441345360153523733317120037436403475909117998647781920105313938836144009539683'); +INSERT INTO num_exp_div VALUES (1,3,'-.000000000000000000000000000000001413607404628860353773457807436398753936801768769045711604884548436548520368932184112069166807060840219636509423284498981041814526856251281381511288768719259120481595036745286884246627534964287523188738499223075292690431699417313258943941279343383979626641848305343592679057491670166887054819766294147341982669243114259272404203080347707713358471397866402657818267495050115642987782080912962056565478445923456884713049272637646637760989004917643369240372476411912794578381690666695711891846833983534126217706309741885844723208036219144146342212915129560758201609824034610223907791643110990898577049488934294259106725414517181607988173722432655731491050637087261030314548853334338835938120502930424813699221083197863303458179445322810087784892821862085562891180364134284641396475'); +INSERT INTO num_exp_add VALUES (1,4,'5329378275943663322300488.64471790965256505869684245785528331091076155554650629138833809683459634328609777839510066435612911583108717191216693735823717997111970662575497378762952496582183738308720094529950793570383580785385569873278068217936841324404119828637880370718028782103860007754579779716996004352284614661690063919125301052941328989181561787543541920734755989452320799185700078241880935083616978140555713297241612718277766918005268951861880490889884082730841740604517529391011862694381726143520658746305661338923049035040974032671138430612839043269997482582763267536489504794826476836323549796385028155416935072959933315468068930689064483178204550825728947252440604703474049780550458442808479096492346910001692358508618202898514895453589357'); +INSERT INTO num_exp_sub VALUES (1,4,'-5329378275943663322130001.85391741010004353389988518583956365616764439012730849109607738227723047091262162286043233973705463946054514004224903034208166782419414876904468730122054597840936856190652484801633363526576955397606531892764306099068756437389060626447578949162759295501062154826802212022414257953494004665588557188694447110384853149054690655645134564686305448219729651828678220200218922790293483596988037990835533058983562863141746692824117439019450865871047657552800448629502344444081260036580660700595591338923049035040974032671138430612839043269997482582763267536489504794826476836323549796385028155416935072959933315468068930689064483178204550825728947252440604703474049780550458442808479096492346910001692358508618202898514895453589357'); +INSERT INTO num_exp_mul VALUES (1,4,'454294299613767152878025320780.534199313974295807138790763501115780294529340799108297697573066187975311338382917022391830256203305238757334106943821060545424417350991354829668286194840925251162479496893943917530660694097932059166013476064988623431110002057735318529554555260199417935495388243829261809007709919225000608711536928171687251088217591210419208480251102484043683131687013687838713055660405381318396419588727500715930145098362997142075433472039319292466570912777345841400769387321465602989947078951135489852486382469990409873227894248208197179481868230244584527040573428134962626267135732247029762468417273891700661832893497067151409134724061246612631376075173287264787886064622106855886785805818642123776489793586531950438285720668411465570116161790343538663297713926678759640594912243360541590368666922379919514826022141331900181'); +INSERT INTO num_exp_div VALUES (1,4,'.000000000000000000015994998100440878014888861029956505927201309704413242103407885948184870841766875212766910686894450511886242468216220470061916924303252919423028993720180330014505454865704155281502763018913215741264982350384245753394656021401865680441649920273268554396350483440173848850052788410943178207336328451359951614056237100465802151856198860908371340425459435127133071447273887829397881221098443685586506647314622864702873235212396755866459409263439958011711379929751157260020133239574261188528305921244365838405372320186907437842180388704854605498842516581811515413843298370501194935797268161171428747542997504369133579105180311662221854071962295818264211400101689450830279979372422749150894553349570063000769685274875561760334738424509532610467832951796852051505383374693614022043010735004494395190'); +INSERT INTO num_exp_add VALUES (1,5,'-652670387.03916046850422757312745971450663862747133703839829692066597367760104802542475264601221776157515632293978442027199108085723617181683235487266149426304575903892721468296143475297345699313102262188759506518376019936160961709578829069446312051432780603656651983414612264636232727512091101057374054475214114364113300402823059519499217878746766275164739724770556122895799337810694888119810524986616938847385753562624139431982468828696587199570410008890188532132652095915565323400735066310142303225626635780147287003130754254277103928406089109802521803537038957372612837169223905290912251006321930223154562110264217937'); +INSERT INTO num_exp_sub VALUES (1,5,'652840873.82996096805674909792441698652235828221445420381749472095823439215841389779822880154688608619423079931032645214190898787339168396375791272937178074945473802633968350414211085025663129356908887576538544498889782055029046596593888271636613472988050090259449836342389832330814473910881711053475561205644968306669776242949930651397625234795216816397330872127577980937461350104018382663378200293023018506679957617487661691020231880567020416430204091941905612894161614165865789507675064355852373225626635780147287003130754254277103928406089109802521803537038957372612837169223905290912251006321930223154562110264217937'); +INSERT INTO num_exp_mul VALUES (1,5,'-55643106304872.575994253221940844841058071061962511162776681458310912066379595519265546225338405882027547140476045378015935579066580347282075024392379464189067155567624835346798806677988850250198082355055954078446421075165109896091047534711081616362392995575466807084807876544560268050611445006601394735810211678919646667455478469014906335433468365011768049600750224822391684377238242162320161552720449713229523135506671063115436813348612986916614320012995541575293478341408982118538094438068036422562665160411591652618670802973618768526197813319204816293073794413317669922144705633308090832805914096147659820167569140291210526520361556881576175809360614782817717579318298657744021133210954279487777567785280633309576696708168342539425395482429923273623865667723482418178781573723597156804085501875735112311466228778929147929'); +INSERT INTO num_exp_div VALUES (1,5,'-.000130590057635351941758745900947472461593749814351229292370661147301124533787181489468804246182606762727711479707901680546780430454163647774077629503207962424213266902732555945190365467801995495570282501722505521485829885605904543846887348545254658726343578684749830307120625129857380290225370772763609458975555029415082569247186899112975387051141777417911244576134390940441209829852154391377911942082738699481875795620569383196133124499983396562167632007454221121465745085962247988140942672429187053671899537331280701003778040796615094903602095098880716919238394057384949891444700347825726273725378453454782330181608182747900774711384845635284701538541452235224216112380245660177463043471814071809869894647262285332580556739424040615194137651616350340752691170045698234853734471923738591898290468792787543896'); +INSERT INTO num_exp_add VALUES (1,6,'85243.44233732197133191329295927531563604777955507322414928382967007765263923984471408038635831036097817458527101593495895350807775607346277892835514324320448949370623441059033804864158715021903312693889518990256881059434042443507529601095150710777634743301398926463888783847290873199395304998050753365215426971278237920063435565949203678024225270616295573678510929020831006146661747271783837653203039829647102027431761129518881525935216608429897041525858540380754759125150233053469999022855035'); +INSERT INTO num_exp_sub VALUES (1,6,'85243.34846317758118961150399799670008360696356209219504851646259063690472663252876207514831001425809630178527101593495895350807775607346277892835514324320448949370623441059033804864158715021903312693889518990256881059434042443507529601095150710777634743301398926463888783847290873199395304998050753365215426971278237920063435565949203678024225270616295573678510929020831006146661747271783837653203039829647102027431761129518881525935216608429897041525858540380754759125150233053469999022855035'); +INSERT INTO num_exp_mul VALUES (1,6,'4001.075404054519813215296429095020391062109905613738157927030437221793757373268325953178030040276107574363822832168160758728653712686313134828282109532831190239521843808940611025488601517574653932032236616573457735900045655665690517797280666732780030171712864961531623060353548802466577910774711998056232872212688464691036260746751992072745518373073825852119460094113694393273456369345499434994672730920070410547163082189385645712866100999708173472360864669110044660667614583576570496399103026286828660558854973376227247132815728164629722965145778698957093136175449225024685874279280018547740'); +INSERT INTO num_exp_div VALUES (1,6,'1816120.848909727306817960620941575637231136442992819290405125420545200026620306446043740992108329883383706060582482495616151605111275635501481354526017831484915013545483361715432312183101964395505340188909970344423950565285639911521082834494088840596716495422427543520536844348040681236845850482165744696068209384509064196671206362539077218412355776790921130042376467606683622970728503408501481791356294886150690067651815776445750760428874351556866105285911902433352126498951242195408782804314174041618879250740246352525074791310920062276490422853700893340860452528740673590486626464460321410814395342850270921486724297414692313177440726749004398703147904603937755702369682956482832074779404350351752662820773690162594400557957241676636030332988289683112176900913522668426137377289536793838959751008646843014106876005'); +INSERT INTO num_exp_add VALUES (1,7,'-818934540071760498.60459975022373923760152136399214017262844141729040109985386964272131706381326192223266583769046276181472898406504104649192224392653722107164485675679551050629376558940966195135841284978096687306110481009743118940565957556492470398904849289222365256698601073536111216152709126800604695001949246634784573028721762079936564434050796321975774729383704426321489070979168993853338252728216162346796960170352897972568238870481118474064783391570102958474141459619245240874849766946530000977144965'); +INSERT INTO num_exp_sub VALUES (1,7,'818934540071930985.39540024977626076239847863600785982737155858270959890014613035727868293618673807776733416230953723818527101593495895350807775607346277892835514324320448949370623441059033804864158715021903312693889518990256881059434042443507529601095150710777634743301398926463888783847290873199395304998050753365215426971278237920063435565949203678024225270616295573678510929020831006146661747271783837653203039829647102027431761129518881525935216608429897041525858540380754759125150233053469999022855035'); +INSERT INTO num_exp_mul VALUES (1,7,'-69808760806266041400340.70700818693892852138813934414383886494691670042143650609934777814995087699409404201920249076407981012095999320858479644760715204999741683528746097757549835956359129287002171391961763797857794730120426599135099619822532290339000466211195776337667123320942107370731349851576864242697412616810236323676004067839744992733887503405311090677026008324895177587064547630828026123718296429295638934384446325302964896473296829265805737112709269803814942537657996725913938408781715328945194948010970'); +INSERT INTO num_exp_div VALUES (1,7,'-.000000000000104090609479936344103210175655521317012597986331111866307697262848964666360492361638117930801818899121383806224630563676018240181412174154250663423230239912527388431901852952893943812666142740182651125508583527237123596541789628675379232473721293630968882045044077795828674268595016625198802475186587918019739056755398151182369187670251750080227679555002307777300392769289647975058449905106584837938556260801229545589323224752038795423164214112897202147313792076165011373139219134850954217300915326944185918762838321705825423789073869940092569940135329697980600082436317664012683589681419530904283106912171330819469065141821685734295058255484933744156717782754922568796985634397878149984177882018261742637463462647452140104146195353696596211873925359508622779658904411330975862442989437933211964821'); +INSERT INTO num_exp_add VALUES (1,8,'8497071467.03603749330791582407836434318377133169438097066269854720538319012928851657498035372443556191720308219530866834905045144302106406146277892835514324320448949370623441059033804864158715021903312693889518990256881059434042443507529601095150710777634743301398926463888783847290873199395304998050753365215426971278237920063435565949203678024225270616295573678510929020831006146661747271783837653203039829647102027431761129518881525935216608429897041525858540380754759125150233053469999022855035'); +INSERT INTO num_exp_sub VALUES (1,8,'-8496900980.24523699375539429928140707116805167695126380524350074691312247557192264420150419818976723729812860582476663647913254442686555191453722107164485675679551050629376558940966195135841284978096687306110481009743118940565957556492470398904849289222365256698601073536111216152709126800604695001949246634784573028721762079936564434050796321975774729383704426321489070979168993853338252728216162346796960170352897972568238870481118474064783391570102958474141459619245240874849766946530000977144965'); +INSERT INTO num_exp_mul VALUES (1,8,'724311956372274.0135050255361637906710330203036651743488213007179039756514944640108625580172737414192938789413338554327986697518463087452612658955180411327002900979574347739956600177846996063741787205122007268468674386396156638261992679442768654367111433834151087792255469957061758837789341439211010331332174981459471333376067541234901538285101103690622656631026001337239036711179989456674399137008584021283568040818388709554256523118702728176420022080138548890713013682480239784198421500241995499841675772793497485550923152267616622892846304530712344886979674416990935007952941652591352603797627920865960622077762568060903908151958000'); +INSERT INTO num_exp_div VALUES (1,8,'.000010032191786198542900505683562217892317481076466949299850809276743457759270150820565375820388277409258249926696079166209409657808406245382887790534127749833677458375931047385994887406206232330491317602830654688957983804698568410728278089250379255157030886262396950539100566975000094268415749476738358914633948867977798590927055566888255636132486899287919515638902721543629183577900872078173883974905921239149419877613723476347774771230668479296621531969573505480695490386225866950545725121902534610730154727385072738079149623798073810167706094070842646222833137345669922898403368997676634709281456818189049718956207208697021706186341405575300648248555331280690778367620868775005181264547924615247991795542738868003191757946979714250339430363902549866892041102771965653407197094250270379367437342632741280710'); +INSERT INTO num_exp_add VALUES (1,9,'54948723.74225051983134098996071145685528795757427462111901537365053896571438476055974853245403475510333627298551845046116291696445177112567064282766115207407461565363967417615506303416694032848457927390574251904212425813072768882213388082765916956736282110801611726537663292922699021333445658549608928179155685881583228490235606377831724593358583903616295573678510929020831006146661747271783837653203039829647102027431761129518881525935216608429897041525858540380754759125150233053469999022855035'); +INSERT INTO num_exp_sub VALUES (1,9,'-54778236.95145002027881946516375418483956830283115745569981757335827825115701888818627237691936643048426179661497641859124500994829625897874508497095086558766563666622720535497438693688376602804651302002795213923698663694204683995198328880575615535181012624198813873609885725228117274934655048553507421448724831939026752650108735245933317237310133362383704426321489070979168993853338252728216162346796960170352897972568238870481118474064783391570102958474141459619245240874849766946530000977144965'); +INSERT INTO num_exp_mul VALUES (1,9,'4676749348240.390309875431213992853550297086049749814750492488995108783145961719774217441193547534210468967573344456866203963659951312519988497979489304488948342258375915152429008993288817366720647491166024151209542534474867042837694499222928509320280684557676243780452100132238968233413333851595648146954975713386711764268506890884764704949969602122157394714663532141060559896359465918874990769222345665160127552795532197771168442486088776803398878354288847069602460071745966589164282641033852314335279121191855487126430176047553895892632834940595958394834437871886013513058514896870683979585091413977173250824451205330441299000850618134248917380244749589254309567551846327349592529960432446947239714236828401206843011440433362544797025114476612133622499094287321570559088587999417440664282418005102546343020409520421747216'); +INSERT INTO num_exp_div VALUES (1,9,'.001553736563217204408368240901181555234014339476186598647410198373122572205209277343865051610898136462487966496673511261433286284257044548634547569923035899634327495195510767312478861719221916387940027268721306540663743713345337497285507595251328382906111997524508729275471287648008479480805967901972481289402930660848950039779707354469389216931774094174326513465502460315792834278614886136688161679443873815113442220055827192996984074129528034845339130162104547166079591654852164993577408422015514100323825529286511720963047269483211930770803479398243069649400360625259869765138545866815758888670363356947311319523139395191102286838888146829667276592755438606664644975648828848738708349790766370694194763606850690923803984129157519048493985198591771429264967247245289970213262206709011468289046840862597010969'); +INSERT INTO num_exp_add VALUES (2,0,'-994877526002806872754342148749241.04353023451866590387054959174736129501310680280823383331007646306243540953499740615246583399296334239109936336446284803020643582102868247857009494139535009572740621288230740389545481395'); +INSERT INTO num_exp_sub VALUES (2,0,'-994877526002806872754342148749241.04353023451866590387054959174736129501310680280823383331007646306243540953499740615246583399296334239109936336446284803020643582102868247857009494139535009572740621288230740389545481395'); +INSERT INTO num_exp_mul VALUES (2,0,'0'); +INSERT INTO num_exp_div VALUES (2,0,'NaN'); +INSERT INTO num_exp_add VALUES (2,1,'-994877526002806872754342148663997.64812998474240514147207095573950146764154822009863493316394610578375247334825932838513167168342610420582834742950389452212867974756590355021495169819086060202117180229196935525386766373096687306110481009743118940565957556492470398904849289222365256698601073536111216152709126800604695001949246634784573028721762079936564434050796321975774729383704426321489070979168993853338252728216162346796960170352897972568238870481118474064783391570102958474141459619245240874849766946530000977144965'); +INSERT INTO num_exp_sub VALUES (2,1,'-994877526002806872754342148834484.43893048429492666626902822775522112238466538551783273345620682034111834572173548391979999630250058057637037929942180153828419189449146140692523818459983958943364062347264545253704196416903312693889518990256881059434042443507529601095150710777634743301398926463888783847290873199395304998050753365215426971278237920063435565949203678024225270616295573678510929020831006146661747271783837653203039829647102027431761129518881525935216608429897041525858540380754759125150233053469999022855035'); +INSERT INTO num_exp_mul VALUES (2,1,'-84806738323879544552397401815149740513.8505875535743013876823142649666132764556588225959336097903898464616542203793600590311980154402068027051522932586050753865288419084437796768749509032177577451738712965496693249429231838833655025794915864261585848007162358912070811805298210095333433397862313304655108809804359760907473898420016370058274978588765092161529583480924554820756527238472641797198545539410039895140087686344382628317530286295498797849942258314364503000942821309916954725689781458590617068629906894951122301020797266469357701283289275708774593896770378558232444454118891917258610753077932026885574920166837998049508644891327208474213193224700658584824407382455480657734911543930195324144216374573825'); +INSERT INTO num_exp_div VALUES (2,1,'-11671021799770914903865020509.301561107153561058074179843542446420696517132461554451075945807420674211966679216615407057626541711186781735967334896541890595771915856783008831770988426637435694856170266346306640678577376310547806764332837625966429200996250687908930748245035578756314083608655163891041399241377675534416837659335561005203219889972336214863417948542956735403991871098341470996860469878038840964359144637726669728240650066795729910649523281308716277906908340457162235831526838308777581569974551673352306004330423694524256415657620427590352277556907586751621496248973165690360552007637570957980230685679819820147036159174977086193494572117089582758015847544798464543446227632367713941117001423437766840744488426025388612316819120660814681298624293065972395923651314350558006567251033289878238407790871784676348196394482477767774'); +INSERT INTO num_exp_add VALUES (2,2,'-1989755052005613745508684297498482.08706046903733180774109918349472259002621360561646766662015292612487081906999481230493166798592668478219872672892569606041287164205736495714018988279070019145481242576461480779090962790'); +INSERT INTO num_exp_sub VALUES (2,2,'0'); +INSERT INTO num_exp_mul VALUES (2,2,'989781291745465665243281323944996915810556285052564220274237162526.1617859904902612197894543199389468971679632139059029459520163585971122643624316475417489000981872666677202334180945949860058384424993911721081868337499377890298636260338063268639283065887210924895929155083478140340889209440025415565915964293989840603863813531303253038823629712989041722072693449251635519992922148998556112923060331794396659338057474019846675262291146025'); +INSERT INTO num_exp_div VALUES (2,2,'1.000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000'); +INSERT INTO num_exp_add VALUES (2,3,'-60303024366845387174536638750234506721.2758014749274942132576365116182462208228193753118527959000939070820507877345194783035668195137119648748792386548310474079340204536236936213411512867171486174240518914767934028451971067161683538629473170704026975786513125842675604577233871570629808699803522400038975396500769162308448069085909755023233588510630417065084295051270219462289785473643946404281422516357503746700705970360169619852905053433235726497292406142332833'); +INSERT INTO num_exp_sub VALUES (2,3,'60301034611793381560791130065937008239.1887410058901624055165373281235236307966057696953851292799409809571799686645246659986351515277852800926805119259053513475211488115663286642009614039264484259692394657121785950542874788161683538629473170704026975786513125842675604577233871570629808699803522400038975396500769162308448069085909755023233588510630417065084295051270219462289785473643946404281422516357503746700705970360169619852905053433235726497292406142332833'); +INSERT INTO num_exp_mul VALUES (2,3,'59993133911282372667149627097418449223835595194300848703012380022306762.154418449236691515146061305380465061074531890529497774836941002526095632166401249277270674802626154774328055399254982998368191676630276960361274433270795772477146870294928855773172789856196219950097157391050424577381777627004101100872747943673762087675405200265837631665464736842180920496158545887039337399558993437594084473932658319914390365451919627956823980800124880375978662052111797881386060353490432427832058851094210488804887183034572364751639107535041308434932952695103493677600969712634416241541391613699710826602011076372592299807609658979777598672141389319098817824624950794758296679318319299142035'); +INSERT INTO num_exp_div VALUES (2,3,'.000016498242835741013709859217005931279826178662180173096568520102488480129191427472581644597420895622947234184547373944996197105916093347103336318249582032230903680989710242610024298937774441533502282949127537125997753002819456724709929935850697744632904111143787011103837624936502324835260843148595669524694347566421203164808527739207590986975750648112133699756328511947175496694080071202064255118777680958612315513441989609682655431197367166056616661045712867189326408877133865572680407329449150282415810958772293869902662884761202424695742898573841869524376684740249281181605067345203479719345061595919652192297531638467223956758315591610733251562492794891852151639643060692698365496208796638230566761231611376199140556503620471090364900792180618741355091923808605890415081571900697282725022629812561702118'); +INSERT INTO num_exp_add VALUES (2,4,'-994877520673428596810678826533995.79421257464236160757218576989993781147390382997132644206786872350652200243563770552469933194637146474528320738725486418004701192337175478117026439697031462361180324038544450723753402846519731908503949116978812841497201119103409772457270340059605961197538918709309004130294868847110690336360689446090125918336908930881873778405661757289469281163974774492810850778950071063044769131228124355961427111369335109426492177657001035045332525699055300921341010989742896430768506909949340276549373661076950964959025967328861569387160956730002517417236732463510495205173523163676450203614971844583064927040066684531931069310935516821795449174271052747559395296525950219449541557191520903507653089998307641491381797101485104546410643'); +INSERT INTO num_exp_sub VALUES (2,4,'-994877531332185148698005470964486.29284789439497020016891341359478477855230977564514122455228420261834881663435710678023233603955522003691551934167083188036585971868561017596992548582038556784300918537917030055337559943480268091496050883021187158502798880896590227542729659940394038802461081290690995869705131152889309663639310553909874081663091069118126221594338242710530718836025225507189149221049928936955230868771875644038572888630664890573507822342998964954667474300944699078658989010257103569231493090050659723450626338923049035040974032671138430612839043269997482582763267536489504794826476836323549796385028155416935072959933315468068930689064483178204550825728947252440604703474049780550458442808479096492346910001692358508618202898514895453589357'); +INSERT INTO num_exp_mul VALUES (2,4,'-5302078674303935968062773235453828254014583744527466365136.236414807326868572353809920518232561005161225922028750078608989965741402418802255050636954800114792425419735155504035469350521800895164087027043476055514245942961100610551646034472084954313670284875310691807937254054948742125729353864014122131419164449567115006621212424805182687707372956385102095255735458593389920872596796806885847543910224476727171570873698525606016990229936284811067826588349092841322512643043008589065847223683467371925773023109720951609815041012521485326120380123169545818055967455575736140138663815073081494226676896278654189873597341203197903408668523514375373841493189836809506003729379742035629498519683885268256481104619815130659628225053833297766479068686119691010593208135616363994230674606991733148502293102108193522604968743948323130517040609601859735899914987426089053869350663'); +INSERT INTO num_exp_div VALUES (2,4,'-186677971.517539861245390308778107722315862721823627804195528485535806132067679059453022306691281662574091826898288146790399178357754908901382135796783067563944022498807930452234032896817601590728156392188660701355670595952594500812333935362955625137944589981298793332621503315902294100258945995827423279442031218510259915311555745581797315793010762585658196457363672908315687720174516274528662385172326028870945153551774300419158584379602045442200523311437013776079979639415633358878239012925000523542907592866797199229858272764668664323316251874027468128770456766875866492004650352654523634716923150212263912760225390093339729495231675627059805624175587380165509763048913150826017167286786277908970769297060278191518730887417202276531151575412404467497036737825989088867451153485938272367300939127313445244028528055624'); +INSERT INTO num_exp_add VALUES (2,5,'-994877526002806872754342801504871.47809095279915423939648794226185974985600242391612965412218049794216637114648812993201775787765690351615479957141288239552036371132381627958673244764559862836085530643408020551049895730005574882649025508632900995595004153086358670541462762210415346958050909878501048483523600711486406055424807840429541335391538322886495085448421556770991545781035298449067051916630343957356635391594362639819978677032855590055900561501350354631803808000307050416047072513406855040715556454205065332997338225626635780147287003130754254277103928406089109802521803537038957372612837169223905290912251006321930223154562110264217937'); +INSERT INTO num_exp_sub VALUES (2,5,'-994877526002806872754341495993610.60896951623817756834461124123286284017021118170033801249797242818270444792350668237291391010826978126604392715751281366489250793073354867755345743514510156309395711933053460228041067059994425117350974491367099004404995846913641329458537237789584653041949090121498951516476399288513593944575192159570458664608461677113504914551578443229008454218964701550932948083369656042643364608405637360180021322967144409944099438498649645368196191999692949583952927486593144959284443545794934667002661774373364219852712996869245745722896071593910890197478196462961042627387162830776094709087748993678069776845437889735782063'); +INSERT INTO num_exp_mul VALUES (2,5,'649411906691138274293985410502516861224852.2323455192714410716272307781034189160865613770320102043319541634113746032638191509585045862973333645830298922352816245477556264222094036953195419857712804755170632292914187367964994214922001758104594052499795564860466055599417895782179851297585155129541589802249540436678824225950907268084876110445460948679383611117263673106597132046331719468816839434908155684738864149955129235751738204036443603521478609787295079710078973503970964790273461142497259987849074597264522099648376356902360358310245001183020992360260836105404118742418040965190000718736837422434593694808973939805954329718232693154128543253581495885789333274488461716809104532693754070810202831113003978085636579574171344721710232931261731022478029314435363413498991740750878099825781577297965642009156858479681236085226911858782115'); +INSERT INTO num_exp_div VALUES (2,5,'1524119409495532727030986.638577103454261465522025182901477334004986357902177024959076085490119358611626688213654669281670407680244740174673394111775678935383154847014211641601227316639834450258566053805263858706381900273201146454036688771735398324537667996974210741719621449948660517037619359095556637235980122706739013220201060795557114248610410815988952748489854367480813823114296393315170621979351958306734282429929421779129764262568942699813166237466796852578307944635545174715298176546980314973426586923195248536376403319094417073026382024413817222396402299695717290716014320518777088811749776114378145110676170242861393274018655137797545194817703831240390631723050378397773341835222892981773205967439339460305257986693600088957772328044922955990976285151896366292514128607363007421484320868718566256882080399264346243272770200676'); +INSERT INTO num_exp_add VALUES (2,6,'-994877526002806872754342148749240.99659316232359475297606895243958507460511031229368344962653674268847910587702140353344168594152240599109936336446284803020643582102868247857009494139535009572740621288230740389545481395'); +INSERT INTO num_exp_sub VALUES (2,6,'-994877526002806872754342148749241.09046730671373705476503023105513751542110329332278421699361618343639171319297340877148998204440427879109936336446284803020643582102868247857009494139535009572740621288230740389545481395'); +INSERT INTO num_exp_mul VALUES (2,6,'-46696638263247522384986521136500.479312417066793299922708112595886608370451213741279484136907754744903470430131032928908162742687359367826808123516519335458861613010646992354378739165872253762686683966945711430182491860196341344982195078000259063231136011430995647812149294224699587849791008794261026932467933475782780'); +INSERT INTO num_exp_div VALUES (2,6,'-21195986018643887410662481595901800.342199657994285865579781485758715114242459388977583220756870314514884887803267837816669111279417861218648323488364513921592045485003563036021370174294475403630933854767386355037781881144701319212711655881277140183173924089814927297045029394618083349813549439341772734606115369911736164723942330187830605893993276674913563980890459604886172701331890746621222114280438198802989678877404376001410627722336243835841751052795437979198996482216031399073597399901975686733315751292369326904428230195579137225651689857057115970784985439417129044974524632220457594191305254649113470116960582543784928547885740020507755033347968928034294570497118410435615856155184563329718831512839630769097935523279881940380220955993456451396417879773380305142918906742431812580562496634831735169817705720949712410595406012323294829461'); +INSERT INTO num_exp_add VALUES (2,7,'-994877526002807691688882220594983.04353023451866590387054959174736129501310680280823383331007646306243540953499740615246583399296334239109936336446284803020643582102868247857009494139535009572740621288230740389545481395'); +INSERT INTO num_exp_sub VALUES (2,7,'-994877526002806053819802076903499.04353023451866590387054959174736129501310680280823383331007646306243540953499740615246583399296334239109936336446284803020643582102868247857009494139535009572740621288230740389545481395'); +INSERT INTO num_exp_mul VALUES (2,7,'814739569184924399102711674444306584731316176345067.39834031417849342571224916231092924046722938910652929295271097903377854123984307101079073134405782275535446337229706620713104545454319555885847481531722101704765783025789147453570970090'); +INSERT INTO num_exp_div VALUES (2,7,'1214843772391778.127361407585140553741220126410637250571020684739034685508176000812180032686291124045768750332493129822580347351032145964983629059968936201592138368806173099130176852606440296388856520582890650384142745607345709716826703676313341953999327129144154152914234659001555055379537780751567782847296067128932113870102563522810980359433259696591977617184951677390423898232135100000764121508662830515405980450892222598485287609657612482190264517684867291774820716746063133066053446257163185646067618679478975882247893469409405379034723543061767846895135644429012095930584952053545016706315299076691015196261253199176743281648949731423486208098120903720124071047872917636988241710583721537777321338769039241700203546247947405745989053846970910400831817998342969657501678430211657755864160072525313889413731419647001970593'); +INSERT INTO num_exp_add VALUES (2,8,'-994877526002806872754333651763017.40289299098701084219066388457144979069028441485513418625082363021182982914675513019536443438529749838106171095037135009526312783302868247857009494139535009572740621288230740389545481395'); +INSERT INTO num_exp_sub VALUES (2,8,'-994877526002806872754350645735464.68416747805032096555043529892327279933592919076133348036932929591304098992323968210956723360062918640113701577855434596514974380902868247857009494139535009572740621288230740389545481395'); +INSERT INTO num_exp_mul VALUES (2,8,'-8453460632655529853033389979024265783461224.3195241893307807116624750282852146303290708492834695194274289713076935297734670940696121761483641291930931061232942894577813178566088927221374036301485916497770984757492912292002695944367308880163698595015497307574177176409203214324418237020500352652934909632442547242092296504047310806151851207329042221920888326000'); +INSERT INTO num_exp_div VALUES (2,8,'-117085929036205907700251.219065234073336548829793284434494573185718678644093751558890746941383215425734761534822966779511801033216479269605150574332107020180872343673157350081102818832254463561564431056604957702984438484261858890324442581609284935850435611342611117035589511568432559140282381526487115307554496353616929034919886387903446436924514812698404129456069856633480965357915969548215985452939172313964007318881987188665231550330515412104367728617802960792164260429920719961650164518261501571220901151359208484337831586551714193024143212288426326740373893030225940355268499071669300664200888186064836443459131985786957267268845966279576380786883200277187591448294590370986026461176853573555996139940001165172158855197070946665074838360933025833716166930231164328918316437195201546383664484983447934244744303265471044295601062898'); +INSERT INTO num_exp_add VALUES (2,9,'-994877526002806872754342093885760.69667996446358567630831677089993316481039076439881735980566785462673358516198695146576524119916430759085192883825888457383242076882081857926408611052522393579396644731758241837010163568445385303315267086044455246361273561294141518329233754041352632499787199926225490924591851865949646448441825186059741089695009429827829188117479084665641367'); +INSERT INTO num_exp_sub VALUES (2,9,'-994877526002806872754342203612721.39038050457374613143278241259478942521582284121765030681448507149813723390800786083916642678676237719134679789066681148658045087323654637787610377226547625566084597844703238942080799221554614696684732913955544753638726438705858481670766245958647367500212800073774509075408148134050353551558174813940258910304990570172170811882520915334358633'); +INSERT INTO num_exp_mul VALUES (2,9,'-54582443595378013373024060492546032003692.4875677735896411267274323339692558458420972958075073392126734000341372096298914875892612108329218081214550050039133117695428196702128258481789017059073444323729583900855712795086447886053552786449313809589992185978097430132940882612817775035217244553616977182049775786664446683332098226841743818600819221587510039430478859412452506872131851471967577741190323481953867845129745440745526578327709351120432530702446916035797432129052518980799424635406993848916727957825620638983706180841278402925286540375225365057191075559133035'); +INSERT INTO num_exp_div VALUES (2,9,'-18133693300409132895168796.074616314168631402221003009151140409826855230810646429042722071403306917323628118792142878282108022292754325022530103525285999179488507720688317761243448898240836430183645778132937666952111134601563043980164547020295727057908447220163534134835130866457657964382363853570827467081988390359191484798677813656413640874450449802233520570178139244957518604566383671867773821069602665918688868868894979351219381089954104823746091972754649316823714354000113723793845707472924569647945844436702275724514171940901057842455729977729388911537391920702753167125695758365521631000334183494148229356487592577177344247694925635113222720411958290166668659311154664393442690740373285505786584987609789805525300762074682544164213490532272590665630428583216403362629445153016404037983825555019274338559686335405719430737559715778'); +INSERT INTO num_exp_add VALUES (3,0,'-60302029489319384367663884408085757480.2322712404088283093870869198708849258097125725036189625900174440196153781995220721511009855207486224837798752903681993777275846325950111427710563453217985216966456785944859989497422927661683538629473170704026975786513125842675604577233871570629808699803522400038975396500769162308448069085909755023233588510630417065084295051270219462289785473643946404281422516357503746700705970360169619852905053433235726497292406142332833'); +INSERT INTO num_exp_sub VALUES (3,0,'-60302029489319384367663884408085757480.2322712404088283093870869198708849258097125725036189625900174440196153781995220721511009855207486224837798752903681993777275846325950111427710563453217985216966456785944859989497422927661683538629473170704026975786513125842675604577233871570629808699803522400038975396500769162308448069085909755023233588510630417065084295051270219462289785473643946404281422516357503746700705970360169619852905053433235726497292406142332833'); +INSERT INTO num_exp_mul VALUES (3,0,'0'); +INSERT INTO num_exp_div VALUES (3,0,'NaN'); +INSERT INTO num_exp_add VALUES (3,1,'-60302029489319384367663884408085672236.83687099063256754698860828386302509843815398979402006244388708674093244201278399438376682321121138429850885935540924586964982855913223221441591310211730902799041126800414795030815514254713522692405212716783388698431088814919226444677188004928663343696636297536500970117716818423689175692808344185016908913828066250587407384563498516598672584120143890364303296142744031320345312431817858545326010704685255237541162931904446804064783391570102958474141459619245240874849766946530000977144965'); +INSERT INTO num_exp_sub VALUES (3,1,'-60302029489319384367663884408085842723.62767149018508907178556555587874475318127115521321786273614780129829831438626014991843514783028586066905089122532715288580534070605779007112619958852628801540288008918482404759132944298520148080184250697297150817299173701934285646867489426483932830299434150464278537812298564822479785688909850915447762856384542090714278516461905872647123125352735037721325154184406043613668806975385533851732090363979459292404685190942209855935216608429897041525858540380754759125150233053469999022855035'); +INSERT INTO num_exp_mul VALUES (3,1,'-5140349743195574373979577554212527512597024.162480344833040409158673429491690439298506850052285119390701002577176786023622062742050099464897084793357329597395417632908812044304066963549928478520702505283307379218587635434673128958824348493758429380623577527186462464399974242800361134191519694694139153279582776168995426125926314513926640766117733774558011741611075336271613675760116784769700605008122422944290652448956922432960815546502965310676913079866511016221573557684245901002643719965652152439520727383305120298495304784052489867651462175349450610643411043707261107569691076730261762793560088893354750383257372118118753366377402045596735023445172252225346164608897913115394905485106225627590643805003075069931177395059698550161546962768768895596088478488887530518018212441345360153523733317120037436403475909117998647781920105313938836144009539683'); +INSERT INTO num_exp_div VALUES (3,1,'-707409990019504668223608170643582.082425157530076679823177950190511141917761066423266390864536360056345386873500583953954967225431526056199231768143978526582904071798714789552447782850723926323452633811653766838064983821149041415149067433978085927687765773012158659685363079191901396502099956189371719135315616249471739677995520904113581848295732911534266040260836644379296158092198514963023001686666281725991605685524015227112003429486755206848316731257322742428352116058878710728614841247581716185886403744830796740424927494009978599974431617064012221450054532987372285996679180090592706458366967534834069977644215413076082570497451654516268857039718730203921980307096740864747006176117071983875364434497517026142488015705391255750729200497229031250705777282987863242056223584453312226818451807347197583925624299372040413470456696588043062815'); +INSERT INTO num_exp_add VALUES (3,2,'-60303024366845387174536638750234506721.2758014749274942132576365116182462208228193753118527959000939070820507877345194783035668195137119648748792386548310474079340204536236936213411512867171486174240518914767934028451971067161683538629473170704026975786513125842675604577233871570629808699803522400038975396500769162308448069085909755023233588510630417065084295051270219462289785473643946404281422516357503746700705970360169619852905053433235726497292406142332833'); +INSERT INTO num_exp_sub VALUES (3,2,'-60301034611793381560791130065937008239.1887410058901624055165373281235236307966057696953851292799409809571799686645246659986351515277852800926805119259053513475211488115663286642009614039264484259692394657121785950542874788161683538629473170704026975786513125842675604577233871570629808699803522400038975396500769162308448069085909755023233588510630417065084295051270219462289785473643946404281422516357503746700705970360169619852905053433235726497292406142332833'); +INSERT INTO num_exp_mul VALUES (3,2,'59993133911282372667149627097418449223835595194300848703012380022306762.154418449236691515146061305380465061074531890529497774836941002526095632166401249277270674802626154774328055399254982998368191676630276960361274433270795772477146870294928855773172789856196219950097157391050424577381777627004101100872747943673762087675405200265837631665464736842180920496158545887039337399558993437594084473932658319914390365451919627956823980800124880375978662052111797881386060353490432427832058851094210488804887183034572364751639107535041308434932952695103493677600969712634416241541391613699710826602011076372592299807609658979777598672141389319098817824624950794758296679318319299142035'); +INSERT INTO num_exp_div VALUES (3,2,'60612.515523995516156897729403721504966784736064970538891936016753206905080265887046037910122269129293912171105589512464185386239562077778499936203155976336284324712221812806801062157592930664021782540155687632208890794166119782594464410498356083266087045927038416810562596141871858142749062925965665039981381277808608946877852933015970874447235220989360704166270479475802673572039541121473138382812420076284458769543418652217394352637294823914346726065145538710933281768776286965107974980550163605068693568717671571780028113969794125200592691656568731359981803586296135840575095063824258761205175762907549288801963550628589530419118771779395037240198270853609924445368393952404606326559485235840170339343865253618184271158932135392539396160392488927771488269959497352568205940636180870805982484030168838833607478593'); +INSERT INTO num_exp_add VALUES (3,3,'-120604058978638768735327768816171514960.4645424808176566187741738397417698516194251450072379251800348880392307563990441443022019710414972449675597505807363987554551692651900222855421126906435970433932913571889719978994845855323367077258946341408053951573026251685351209154467743141259617399607044800077950793001538324616896138171819510046467177021260834130168590102540438924579570947287892808562845032715007493401411940720339239705810106866471452994584812284665666'); +INSERT INTO num_exp_sub VALUES (3,3,'0'); +INSERT INTO num_exp_mul VALUES (3,3,'3636334760530744652235488357607657374520053530993537920755375319352615385278.023608692512217812784472508939511216316773023870624171279878340621219698109986095090336065266376220109007718694455520948311677863167090936408887147442375455695868593092154861636486745490748828207939155392396090682312136290864359484540126174821846208064763823279315343506148025281475729723686566174395516982893064510403581479746673749128344955124070957545815390178764940816628194640888255387443237798761377617383817511745005525149990207764725040109364671749403389999498572538135588695345112358160274671918953118753964073105250116426665508214894805722798842017943220605600452911496071424281587802689830031742105619630787641205011894680546049982654601956546154572720177337696285354350903475239411654436042931409507429892682706228354459580412759920815932840348933425754970917910500027837428631661182510071352138858'); +INSERT INTO num_exp_div VALUES (3,3,'1.000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000'); +INSERT INTO num_exp_add VALUES (3,4,'-60302029489314054989387940744763542234.98295358053252401308872309802346144227050959966671157134780970446370197110016237152333448347415674483796371931316021552756816073493808344537122580089676304958104270609762310229182150728136567294798680824019082599362332377530165818229609055765904048195574142709698758095302560470195171027219786996322461803443213101532716728918363951912367135900414238535625075942525108530051828834829820554490477645701692374399416239080329365045332525699055300921341010989742896430768506909949340276549373661076950964959025967328861569387160956730002517417236732463510495205173523163676450203614971844583064927040066684531931069310935516821795449174271052747559395296525950219449541557191520903507653089998307641491381797101485104546410643'); +INSERT INTO num_exp_sub VALUES (3,4,'-60302029489324713745939828071407972725.48158890028513260568545074171830840934891554534052635383222518357552878529888177277886748756734050012959603126757618322788700853025193884017088688974683399381224865109134889560766307825097103477790782590061456916367930139323346273315068375646692125800496305291080749834712822775973790354498408104142209966769395239768969172107040437333428573572464689550003374384624966403962290572373571842567623422963022155546431883766327294954667474300944699078658989010257103569231493090050659723450626338923049035040974032671138430612839043269997482582763267536489504794826476836323549796385028155416935072959933315468068930689064483178204550825728947252440604703474049780550458442808479096492346910001692358508618202898514895453589357'); +INSERT INTO num_exp_mul VALUES (3,4,'-321372325955692885069615337209737469749246561535004445508427591.072860243358366933071485495726715620133686420023451450292996945184959542770492705998350644739298629407567812798540119555932604687814429669592481327761428042980782672136901602006622227365754036664912989085940235439697789102358431343119457114603363936544931303133371137532006899162833369543279729021228901466728220729625107362063321334489394782322741444425117731922691457341543446841167138481424319752111748042440994701571955325673470021626946676976482516292402239416632497972073915818846704053624707839813514171497746804751780741682011937606462260710753056669269928580460921188286249923152921382198282201761171043384698319895970192114563900025573490442674225227682235790590616707857188385274186584856872573669591460447105688151281208238908470285147895678001948902280493477604361481216667716971590499226735103039'); +INSERT INTO num_exp_div VALUES (3,4,'-11315021446594.877643290091276308982961654569173523687151347727612592478433578066762912541361898899908505997444632820107356713116459078630334224890355872486337973552333755378190316811715776951317058334754704988120078733912131691682869448731717816749620336196719541702138949084375907248656748314375183301372633028246109596775255074617515860012417935744433243071057057560464360663978361945666099558526069794464437818864063206829678640156992474597480916575712563493776637239091589972373682399519931569163592317107392231951775499293572134702843085474656152913351183535194499521618027894129537558509428098859715020703897463518891082573242502356303078754574312965093639182648263511466558336912294702019648266054331227425119096294871153811412169351624751542166779635702042223762951850816568617453355571302500885410532963789364822647'); +INSERT INTO num_exp_add VALUES (3,5,'-60302029489319384367663884408738513110.66683195868931664491302527038538338065260819361151478340212147889934633981101279593065290940544218360883531149731823374304151252289014494378769385157204705433009477214625880056478643611622410268943757215673170753460135411513114716313801477916713433956086133878890802448531292334570886746283905390661877220497842493537338035961123751393889400517474762491881277080205381424363695095196058838349029211365212855028824622924678684631803808000307050416047072513406855040715556454205065332997338225626635780147287003130754254277103928406089109802521803537038957372612837169223905290912251006321930223154562110264217937'); +INSERT INTO num_exp_sub VALUES (3,5,'-60302029489319384367663884407433001849.79771052212833997386114856935638647096681695139572314177791340913988441658803134837154906163605506135872443908341816501241365674229987734175441883907154998906319658504271319733469814941611260503645706198407368762270127105340397375230875953495882740039984314121888705481484090911598074635434289709802794549714765847764347865064280637851906308955404165593747173246944693509650424312007333558709071857299501674917023499921977975368196191999692949583952927486593144959284443545794934667002661774373364219852712996869245745722896071593910890197478196462961042627387162830776094709087748993678069776845437889735782063'); +INSERT INTO num_exp_mul VALUES (3,5,'39362489275784146262776411377472433635883331946.794473520543457442955620133347015506556162839462623905489255080102447195050109095701660164272430316804466254467810714209179752718730906325952685817112992943656292503112803950215110778476301809440329937774061163668461957943313261962261081942055908935814323069621279128270849852239727888939033546870208376394878842958202403235309372240005941467570230067124830916866857395233038346727879951123599893174252558078732888910139309038957525961212820831321973219557165558911222848692996406741318948607549825343491479728117062814094258484536263158005174429922237853707635743736923521032098496725445243775790161216159399180889906705265012270270348146530113428221072591696851818281866095288773371414866822270689959827332258348570976075184933893434327278299820594014788148344260948638847457822697682605612771344335201258128'); +INSERT INTO num_exp_div VALUES (3,5,'92380711368470856513514428781.033155715252174277753317877861994356621252232374386687048394529670637693505779282500567256835271428113529026462111032257747830329068594622091282098767000694818101994264352932243278144124687156236926607422077479412495979777588932692081795130282128890441931602671468684153168580234070246201722180460130467506344034452687371838907269162119534950946217165384250603250357360223255177692065141037447374172264943732616165429783010079281851748804739433821308362193703012671569249508710820679009084891198169587484117171861141580870066764275087111843275285564262902405980617569581840831518012986031156042600391943605532635833608358301306456966765206853910579231447150839538731157206153540873916893579943906851149770881336811951119112558311734171557608362620988555075663589827484854016702489324791126228380209309587206299'); +INSERT INTO num_exp_add VALUES (3,6,'-60302029489319384367663884408085757480.1853341682137571584926062805631087054017160819890685789064777236456590745415460695320768374693076860837798752903681993777275846325950111427710563453217985216966456785944859989497422927661683538629473170704026975786513125842675604577233871570629808699803522400038975396500769162308448069085909755023233588510630417065084295051270219462289785473643946404281422516357503746700705970360169619852905053433235726497292406142332833'); +INSERT INTO num_exp_sub VALUES (3,6,'-60302029489319384367663884408085757480.2792083126038994602815675591786611462177090630181693462735571643935716818574980747701251335721895588837798752903681993777275846325950111427710563453217985216966456785944859989497422927661683538629473170704026975786513125842675604577233871570629808699803522400038975396500769162308448069085909755023233588510630417065084295051270219462289785473643946404281422516357503746700705970360169619852905053433235726497292406142332833'); +INSERT INTO num_exp_mul VALUES (3,6,'-2830400711649493468815157129316992649.40542786074520931471973065281957756940496588853021620372179463538053123396140685749478530925306163968207226329985017644835203709485594362663495728106061878665324856417118064730721101615473194292620972173690618491026470353143141125614124440035267592258385099934706896692953497971326605145704135723011753705907329979207428661473172503098296622281647255008204864404416199384701720347319806375450632245634238172654086373193251877533131784268854289406126119630708578053354762596511353053106459297339360827562281168219966099848212'); +INSERT INTO num_exp_div VALUES (3,6,'-1284742031601444539630782308463065726620.121021225455596762466053504195700643301310745151565435123335541550963124666304408503436412726848834604336377169205828654564329888653766451656774534718709065521243637375270687684572524302099749018591530352756390467862377335526634920857924031482455373589053524922608255779040656019538392173139295812160325688504210040741075388404155144782519528791757450256668977268409265390016721724966592135644698341754332845002439113523127047593325646484654291494607100188094186116001064043796216982681807318598789324900462932294782971663150070521334398542559480877366424630693734132836518604260869235580641521264976411493166969530737254118968281271908306432918913600567757535151861421384835424322504855607676315840963696944683182767935565256136130185809101891760917733694553800748568697830680328155128016670099315391685422333'); +INSERT INTO num_exp_add VALUES (3,7,'-60302029489319384368482818948157603222.2322712404088283093870869198708849258097125725036189625900174440196153781995220721511009855207486224837798752903681993777275846325950111427710563453217985216966456785944859989497422927661683538629473170704026975786513125842675604577233871570629808699803522400038975396500769162308448069085909755023233588510630417065084295051270219462289785473643946404281422516357503746700705970360169619852905053433235726497292406142332833'); +INSERT INTO num_exp_sub VALUES (3,7,'-60302029489319384366844949868013911738.2322712404088283093870869198708849258097125725036189625900174440196153781995220721511009855207486224837798752903681993777275846325950111427710563453217985216966456785944859989497422927661683538629473170704026975786513125842675604577233871570629808699803522400038975396500769162308448069085909755023233588510630417065084295051270219462289785473643946404281422516357503746700705970360169619852905053433235726497292406142332833'); +INSERT INTO num_exp_mul VALUES (3,7,'49383414785234649002982046297226894664526726187218771083.0993243619030008310875293647868815940421844461627295157812843657782639833900543200310573708100000958929315945039020410482966753145208427035917753919085618457760620513481628641658765820294863970581642745379331727722585319163262763708386199720411053619449096019862596221607526610103408936214184850115071874430846697061554769773328338028749631552202705583855831155461651414320570061181212214810086436100771547030013079997847086'); +INSERT INTO num_exp_div VALUES (3,7,'73634737013325927185.787791148221519354461791539553527545166847382784629235192342551464898036004011575416717008403527685470842765455409054592207142526523023201841973047779202013398235864494503216973882479116841765663948294836180515686647139678530220909072497288527276378202532400736141014848907023234659020093073127450778982904578906877634654521825977382116752537063128793631412296206704078569268566614023846282524151679028060869175439188773864994186109445961525301841201265289707928211114515861536069733921800160245586536759625418951427346236213019358749196674633237197452976517130405065120577692737021174118093373953642724512531935525024447977867020930500433287279183436509990047372809400167546185096048971157700858970777301410692908939206693154161335335755844997198191427289546263182822280127912118140820265025555165337881999926'); +INSERT INTO num_exp_add VALUES (3,8,'-60302029489319384367663884399588771256.5916339968771732477072012126949734214868901845505193155307646111690097978112797961939995859130827784737422228762767014427842766445950111427710563453217985216966456785944859989497422927661683538629473170704026975786513125842675604577233871570629808699803522400038975396500769162308448069085909755023233588510630417065084295051270219462289785473643946404281422516357503746700705970360169619852905053433235726497292406142332833'); +INSERT INTO num_exp_sub VALUES (3,8,'-60302029489319384367663884416582743703.8729084839404833710669726270467964301325349604567186096492702768702209585877643481082023851284144664938175277044596973126708926205950111427710563453217985216966456785944859989497422927661683538629473170704026975786513125842675604577233871570629808699803522400038975396500769162308448069085909755023233588510630417065084295051270219462289785473643946404281422516357503746700705970360169619852905053433235726497292406142332833'); +INSERT INTO num_exp_mul VALUES (3,8,'-512385513828318260570283740065493064477880918352.732624553690077857674083796435724202494963885926573907185100543184828131859183999195040110586155435203949963570735841632689374488877298209082579317039061893012560130258753218955057387206477423088065663401594359617882154814262843273526859406265633827109554791772242178864873774889091687515990672487380368975556580539271333144212685871370972163560839446696514092637412587953506052848750866803569213269271165856310101244342151576488190595936869490659700946174362872797854591188391982770203203644172999264143929484089237665313698600170041324566984832357000400'); +INSERT INTO num_exp_div VALUES (3,8,'-7096872691348467943606706217.907270287823269424282176534343841939501231816905820949045946136373255017076943323578903040918266385724756894003692978391468202345397178445216069294845721607024056189567609414049207292919519881725733381453217071918292453682942046440563446278374996563501512335133749731529362537349288419883140401056747081065947774593869673146309163791076953204291951821124894409171722911526435445719071769008713367057971351892550570642991097981458696464929009464411568672010548002196406312721789582428747564855324072212842315229302959908665089850886951261233852165624100634055045684536311382452553544676139507899503993644452161529145849579200003677255968757773363970434791501820320494192909660871475590637419913907191608957830524390049664686282439567943053924245852983990958276537000732363895444894582579142752920882750130052682'); +INSERT INTO num_exp_add VALUES (3,9,'-60302029489319384367663884408030893999.8854209703537480818248540990234567956069965340942024890856088355839135538265116174644003927269495876835324407641642359213535695803871472434650475144516723617632059130297610134243891145006222068960999879308472500422640481972089756410157246974765071949782242392661524488959954348903412713930092273629207697480131360047867213863018127928853922173643946404281422516357503746700705970360169619852905053433235726497292406142332833'); +INSERT INTO num_exp_sub VALUES (3,9,'-60302029489319384367663884408140620960.5791215104639085369493197407183130560124286109130354360944260524553172025725325268378015783145476572840273098165721628341015996848028750420770651761919246816300854441592109844750954710317145008297946462099581451150385769713261452744310496166494545449824802407416426304041583975713483424241727236417259479541129474082301376239522310995725648773643946404281422516357503746700705970360169619852905053433235726497292406142332833'); +INSERT INTO num_exp_mul VALUES (3,9,'-3308379209762459471107480259839508279070920437.883503980178028214343751083865562028455061662673132221930429904398963590401793045470444301883103141901787466923883803951815572606105617157736442670792467625964359169270739534412932791178258858918086886061702512427989129732248215348301444245772127142869263635282888226326427510486246184233225114523636171202034558843515894542952126988613018789833835507734620046994907453602573865012044120483116345444810078666601100257620969379968264504287700045822481492526688635364586344704730579892342786173395802035361824932075736340405960099542224953439044947229246847140957298841482874444906129049023002897135347878048572628834749795298712449864571996898774444932083319581439741625832405434317985988163261591679157437224404970927012111196724239860528859217322132733404472897289'); +INSERT INTO num_exp_div VALUES (3,9,'-1099128766678422054524173986658.839339966689456265703816212189145237878729886466041806078542573981227645802109969871638687985985845489422516004202630099080709709893022100481258818112345013009059633421290241583864468453396484606925071369550998772875840640325758308835852391176503689677263605949075815552026731067384737231681068134099746550363063940273625924224721503126912810251607546172009765059506591787282558727077669973711491157840340631805422942099954647016059576777054339588421998882440726473698513560202030309804089250300097589174314677765341104767702983421063649104691583044460507666600260994707192787133590502137391691330098102374713996115782701417107878938473243874299874872852713499024851414757892169376458916467621226859152075901273014182163212783658933754507272478777304254191033562324994395916168496097385872331012258027431094381'); +INSERT INTO num_exp_add VALUES (4,0,'5329378275943663322215245.24931765987630429629836382184742348353920297283690739124220773955591340709935970062776650204659187764581615597720798385015942389765692769739983054442503547211560297249686289665792078548480268091496050883021187158502798880896590227542729659940394038802461081290690995869705131152889309663639310553909874081663091069118126221594338242710530718836025225507189149221049928936955230868771875644038572888630664890573507822342998964954667474300944699078658989010257103569231493090050659723450626338923049035040974032671138430612839043269997482582763267536489504794826476836323549796385028155416935072959933315468068930689064483178204550825728947252440604703474049780550458442808479096492346910001692358508618202898514895453589357'); +INSERT INTO num_exp_sub VALUES (4,0,'5329378275943663322215245.24931765987630429629836382184742348353920297283690739124220773955591340709935970062776650204659187764581615597720798385015942389765692769739983054442503547211560297249686289665792078548480268091496050883021187158502798880896590227542729659940394038802461081290690995869705131152889309663639310553909874081663091069118126221594338242710530718836025225507189149221049928936955230868771875644038572888630664890573507822342998964954667474300944699078658989010257103569231493090050659723450626338923049035040974032671138430612839043269997482582763267536489504794826476836323549796385028155416935072959933315468068930689064483178204550825728947252440604703474049780550458442808479096492346910001692358508618202898514895453589357'); +INSERT INTO num_exp_mul VALUES (4,0,'0'); +INSERT INTO num_exp_div VALUES (4,0,'NaN'); +INSERT INTO num_exp_add VALUES (4,1,'5329378275943663322300488.64471790965256505869684245785528331091076155554650629138833809683459634328609777839510066435612911583108717191216693735823717997111970662575497378762952496582183738308720094529950793570383580785385569873278068217936841324404119828637880370718028782103860007754579779716996004352284614661690063919125301052941328989181561787543541920734755989452320799185700078241880935083616978140555713297241612718277766918005268951861880490889884082730841740604517529391011862694381726143520658746305661338923049035040974032671138430612839043269997482582763267536489504794826476836323549796385028155416935072959933315468068930689064483178204550825728947252440604703474049780550458442808479096492346910001692358508618202898514895453589357'); +INSERT INTO num_exp_sub VALUES (4,1,'5329378275943663322130001.85391741010004353389988518583956365616764439012730849109607738227723047091262162286043233973705463946054514004224903034208166782419414876904468730122054597840936856190652484801633363526576955397606531892764306099068756437389060626447578949162759295501062154826802212022414257953494004665588557188694447110384853149054690655645134564686305448219729651828678220200218922790293483596988037990835533058983562863141746692824117439019450865871047657552800448629502344444081260036580660700595591338923049035040974032671138430612839043269997482582763267536489504794826476836323549796385028155416935072959933315468068930689064483178204550825728947252440604703474049780550458442808479096492346910001692358508618202898514895453589357'); +INSERT INTO num_exp_mul VALUES (4,1,'454294299613767152878025320780.534199313974295807138790763501115780294529340799108297697573066187975311338382917022391830256203305238757334106943821060545424417350991354829668286194840925251162479496893943917530660694097932059166013476064988623431110002057735318529554555260199417935495388243829261809007709919225000608711536928171687251088217591210419208480251102484043683131687013687838713055660405381318396419588727500715930145098362997142075433472039319292466570912777345841400769387321465602989947078951135489852486382469990409873227894248208197179481868230244584527040573428134962626267135732247029762468417273891700661832893497067151409134724061246612631376075173287264787886064622106855886785805818642123776489793586531950438285720668411465570116161790343538663297713926678759640594912243360541590368666922379919514826022141331900181'); +INSERT INTO num_exp_div VALUES (4,1,'62519544780217042176.800424689664850775296526267109332647921183817056683200043718160298562843864918741523494444361916531159341418970534833628106062976341639276761669219281771109561175175033739624472497927501467465456946098280878993371659461957361369508794842102784763955539708800574418468150309301129490186416766691183270872711413796386178009615777589066235359283212636467980113350635181915492452697347977967985810294150853782607014649150457138118264698071689065469752702524632313088938504181640435324554007553994564705401249228914199354821595855823113730697333390936834057091883654016371107974899726642500486005445063301647520527084320363513388355471718583708935211830796440056542408492723718088396437530207347815505844074508948817594746824098278470533148171941442049323578854023683167934569551595335539887777638716651319134577441'); +INSERT INTO num_exp_add VALUES (4,2,'-994877520673428596810678826533995.79421257464236160757218576989993781147390382997132644206786872350652200243563770552469933194637146474528320738725486418004701192337175478117026439697031462361180324038544450723753402846519731908503949116978812841497201119103409772457270340059605961197538918709309004130294868847110690336360689446090125918336908930881873778405661757289469281163974774492810850778950071063044769131228124355961427111369335109426492177657001035045332525699055300921341010989742896430768506909949340276549373661076950964959025967328861569387160956730002517417236732463510495205173523163676450203614971844583064927040066684531931069310935516821795449174271052747559395296525950219449541557191520903507653089998307641491381797101485104546410643'); +INSERT INTO num_exp_sub VALUES (4,2,'994877531332185148698005470964486.29284789439497020016891341359478477855230977564514122455228420261834881663435710678023233603955522003691551934167083188036585971868561017596992548582038556784300918537917030055337559943480268091496050883021187158502798880896590227542729659940394038802461081290690995869705131152889309663639310553909874081663091069118126221594338242710530718836025225507189149221049928936955230868771875644038572888630664890573507822342998964954667474300944699078658989010257103569231493090050659723450626338923049035040974032671138430612839043269997482582763267536489504794826476836323549796385028155416935072959933315468068930689064483178204550825728947252440604703474049780550458442808479096492346910001692358508618202898514895453589357'); +INSERT INTO num_exp_mul VALUES (4,2,'-5302078674303935968062773235453828254014583744527466365136.236414807326868572353809920518232561005161225922028750078608989965741402418802255050636954800114792425419735155504035469350521800895164087027043476055514245942961100610551646034472084954313670284875310691807937254054948742125729353864014122131419164449567115006621212424805182687707372956385102095255735458593389920872596796806885847543910224476727171570873698525606016990229936284811067826588349092841322512643043008589065847223683467371925773023109720951609815041012521485326120380123169545818055967455575736140138663815073081494226676896278654189873597341203197903408668523514375373841493189836809506003729379742035629498519683885268256481104619815130659628225053833297766479068686119691010593208135616363994230674606991733148502293102108193522604968743948323130517040609601859735899914987426089053869350663'); +INSERT INTO num_exp_div VALUES (4,2,'-.000000005356818439105666775800262590702859770599410113087721172791624002387236505438218124867814437523686300450045582100868990117124343222534568799037421944272316277130975314766456260710406160143182498931595199129228915695802952695510723443157825968340043198200740606202264287904755124946591110599335909404657109057432686191440989434662797205973563889238804413861126260401987949920244286377128599413927273444061572120561496904543200956508673923547626768641271397088562966176629018606103663605145666976048261236691866387601532424530473754175270500777679603569715192364542901360534980926452487443629100484491344001509360344122933911316486556042277769848194790964257060927912344609376571637126617813506411190014141992988288983968823792971270853369317867326071952900448455162898476163801382836761898292684175721846'); +INSERT INTO num_exp_add VALUES (4,3,'-60302029489314054989387940744763542234.98295358053252401308872309802346144227050959966671157134780970446370197110016237152333448347415674483796371931316021552756816073493808344537122580089676304958104270609762310229182150728136567294798680824019082599362332377530165818229609055765904048195574142709698758095302560470195171027219786996322461803443213101532716728918363951912367135900414238535625075942525108530051828834829820554490477645701692374399416239080329365045332525699055300921341010989742896430768506909949340276549373661076950964959025967328861569387160956730002517417236732463510495205173523163676450203614971844583064927040066684531931069310935516821795449174271052747559395296525950219449541557191520903507653089998307641491381797101485104546410643'); +INSERT INTO num_exp_sub VALUES (4,3,'60302029489324713745939828071407972725.48158890028513260568545074171830840934891554534052635383222518357552878529888177277886748756734050012959603126757618322788700853025193884017088688974683399381224865109134889560766307825097103477790782590061456916367930139323346273315068375646692125800496305291080749834712822775973790354498408104142209966769395239768969172107040437333428573572464689550003374384624966403962290572373571842567623422963022155546431883766327294954667474300944699078658989010257103569231493090050659723450626338923049035040974032671138430612839043269997482582763267536489504794826476836323549796385028155416935072959933315468068930689064483178204550825728947252440604703474049780550458442808479096492346910001692358508618202898514895453589357'); +INSERT INTO num_exp_mul VALUES (4,3,'-321372325955692885069615337209737469749246561535004445508427591.072860243358366933071485495726715620133686420023451450292996945184959542770492705998350644739298629407567812798540119555932604687814429669592481327761428042980782672136901602006622227365754036664912989085940235439697789102358431343119457114603363936544931303133371137532006899162833369543279729021228901466728220729625107362063321334489394782322741444425117731922691457341543446841167138481424319752111748042440994701571955325673470021626946676976482516292402239416632497972073915818846704053624707839813514171497746804751780741682011937606462260710753056669269928580460921188286249923152921382198282201761171043384698319895970192114563900025573490442674225227682235790590616707857188385274186584856872573669591460447105688151281208238908470285147895678001948902280493477604361481216667716971590499226735103039'); +INSERT INTO num_exp_div VALUES (4,3,'-.000000000000088378091435340426596348183959201660680284222502095357746364378698792730669202270228092348823133529449019715406417264278615046537007844589547485282959556860316942508808911542109265489435572674031608663747132688980867386885961271358592278360097086532747883342438036287136994589308551796702164612609710942175900921197001888540314760352113821737014875886635147123114456910985089625906448913621495025509697742196814421833448856595853403450682101743559369637786458968714240975228615283970739279506239628546165569688434254286341567486905374255702980370754235630955328837646999003123103831262789115646588779721625156078607919060762857866951417867378220773543985422722165221371084387943737083254760594128718841665355053236168688218864433967871311858292181233490194833547273501436630325295640020916257836404'); +INSERT INTO num_exp_add VALUES (4,4,'10658756551887326644430490.49863531975260859259672764369484696707840594567381478248441547911182681419871940125553300409318375529163231195441596770031884779531385539479966108885007094423120594499372579331584157096960536182992101766042374317005597761793180455085459319880788077604922162581381991739410262305778619327278621107819748163326182138236252443188676485421061437672050451014378298442099857873910461737543751288077145777261329781147015644685997929909334948601889398157317978020514207138462986180101319446901252677846098070081948065342276861225678086539994965165526535072979009589652953672647099592770056310833870145919866630936137861378128966356409101651457894504881209406948099561100916885616958192984693820003384717017236405797029790907178714'); +INSERT INTO num_exp_sub VALUES (4,4,'0'); +INSERT INTO num_exp_mul VALUES (4,4,'28402272808100253242547006276715304015308580784958.804614276533085644370816876160290159450291717634111299841065255625515058118012211808741402904995080624675460593676923639082981788732031193774047612589113654423166826140872334380708795266307037944059108148612979119729408762532396036043629484049508789880964586236575769826806092391573178899640321403656891487586452524427223891405519836671312830183895761747460911777623703557946796784873885800089025388390522992806365773290733075927321101736155663727528284512100509273076328103465333687228713897893434161293693971954442699482857938492961830350598789444266860160794913830991304996676299650460125000959751177037694425217989910261807246272771711816326991282202653917488360776928533800529297474279497910326579608191975246060946079639658615178160271122713225105861574160788280907842327681375920919676063500116492292319'); +INSERT INTO num_exp_div VALUES (4,4,'1.000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000'); +INSERT INTO num_exp_add VALUES (4,5,'5329378275943662669459614.81475694159581596077242547133292502869630735172901157043010370467618244548786897684821457816189831652076071977025794948484549600736179389638319303817478693948215387894509009504287664213474693208847025374388286162907794727810231557001266897729978691844410171412189947386181530441402903608214502713480332746271552746231631136145916685939539173054989927058122097304419584979598595477177513004218594211597809300517607260841648610322863666300637648662611916496850248528515936635845594390453288113296413254893687029540384176335735114863908372780241463999450547422213639667099644505472777149095004849805371205203850993689064483178204550825728947252440604703474049780550458442808479096492346910001692358508618202898514895453589357'); +INSERT INTO num_exp_sub VALUES (4,5,'5329378275943663974970875.68387837815679263182430217236192193838209859394480321205431177443564436871085042440731842593128543877087159218415801821547335178795206149841646805067528400474905206604863569827296492883485842974145076391654088154097803033982948898084192422150809385760511991169192044353228731864375715719064118394339415417054629392004621307042759799481522264617060523956256201137680272894311866260366238283858551565663520480629408383844349319586471282301251749494706061523663958609947049544255725056447964564549684815188261035801892684889942971676086592385285071073528462167439314005547455087297279161738865296114495425732286867689064483178204550825728947252440604703474049780550458442808479096492346910001692358508618202898514895453589357'); +INSERT INTO num_exp_mul VALUES (4,5,'-3478781676337858247983014311182511.567538638808357215203593479841446379226774481291286361639429856698999485760647422501864626078375852610019829111004807806660731243672830787729048847342063218718651165150612717759770504648306347926061960607388621011846314969634048226452709389995594961695723139571002939804473057725442880410434039783304583526414509590532906062732322732569475349107437896717416548237633532805602064623969799081086996320156575550896200848758685986331692388099427314008504506503745527468550106879602399030419569897808150076298414568875477195447656904373310322813412927463518325927626891046356679526447117311923853482118502868148386882363449163182892615259995945992014431502761210899772725227648729095696228388558331052524469604046072203605897109629560683446827492904111565278516043939137760721315953500281379039771826554155511347152'); +INSERT INTO num_exp_div VALUES (4,5,'-8164430956184510.184223536017248184022252663660196916321116266103608317725855237211273642694947892658721606226082017525816544904635887836163201565923338826779819876742736219975639586566502584026349778499211535661173597356253186281116862244165796632756909578140184577853088376334255860281874385669242675881761388233070861374295536603371778669602656670852115614651462552069294889723058758969660566508798011830996965570446030123780674316363670374970480994905368006454513642480180066435609577311074332150098288374616437489163254821095377348025470309665651059603665062887597814064136313866690824972464351274062540825405003954064175728198182815347642172934453828192850870808373638597839434504241236228591053696481146252072190903430582534862988719805163692697482513169856291048966811374872266165034373412719593685881972700171726777938'); +INSERT INTO num_exp_add VALUES (4,6,'5329378275943663322215245.29625473207137544719284446115519970394719946335145777492574745992986971075733570324679065009803281404581615597720798385015942389765692769739983054442503547211560297249686289665792078548480268091496050883021187158502798880896590227542729659940394038802461081290690995869705131152889309663639310553909874081663091069118126221594338242710530718836025225507189149221049928936955230868771875644038572888630664890573507822342998964954667474300944699078658989010257103569231493090050659723450626338923049035040974032671138430612839043269997482582763267536489504794826476836323549796385028155416935072959933315468068930689064483178204550825728947252440604703474049780550458442808479096492346910001692358508618202898514895453589357'); +INSERT INTO num_exp_sub VALUES (4,6,'5329378275943663322215245.20238058768123314540388318253964726313120648232235700755866801918195710344138369800874235399515094124581615597720798385015942389765692769739983054442503547211560297249686289665792078548480268091496050883021187158502798880896590227542729659940394038802461081290690995869705131152889309663639310553909874081663091069118126221594338242710530718836025225507189149221049928936955230868771875644038572888630664890573507822342998964954667474300944699078658989010257103569231493090050659723450626338923049035040974032671138430612839043269997482582763267536489504794826476836323549796385028155416935072959933315468068930689064483178204550825728947252440604703474049780550458442808479096492346910001692358508618202898514895453589357'); +INSERT INTO num_exp_mul VALUES (4,6,'250145412892811547138949.592621291590152419206270097656346630226508074074623894951308487425470437268130465956063593951784820669318897182831355375451719125809800516979013437732298382708070979871283132689492336823087794373113039154669229889503700598930220858275174342776478898670277868700384853696009897221747924643343353942154528501454689084608965009561564638167714973711022212547096732831847202912862290958304510651828842182545311077713664465815992616213663619529378061133917572474298028065850515876361609671565914027186063801852554353160801534696062207299890867876199323530337336273950892723090754719547285920090419070001019943385293110663922226230169381423410428577990604776655422105400452217085311617728003688836185608912367677734364834577573255789160419371322775733777518997638403409000055707558465286469808848200141192627396502735'); +INSERT INTO num_exp_div VALUES (4,6,'113543048739697485358574290.758354267447744932153707340542459183720907885610125346262898114677742971240785031722334497858930434531517077525413654346644836353208132641713415396062580605566225794048569430676355036264762949452090151450855446984773994337170590068740235544320694721909983307239491151139099779296496785240814600627140543144068640768857707110930453204162312973998304574796413938461971472337040811785231390930046688391955000749644938061585377150632133417156866197053052425576957646564943278156977176976876921235395711611898108821587442609611001702344783440618040704066809035404237786023075676374788819144406909313755996914145273176359246052899650387182222905558751208368173052381982668563471143298720677965028880626152749773712037769548408324298835212547215352657271696665387200792785056233953536347605130973626194099064678842085'); +INSERT INTO num_exp_add VALUES (4,7,'5329377457009123250369503.24931765987630429629836382184742348353920297283690739124220773955591340709935970062776650204659187764581615597720798385015942389765692769739983054442503547211560297249686289665792078548480268091496050883021187158502798880896590227542729659940394038802461081290690995869705131152889309663639310553909874081663091069118126221594338242710530718836025225507189149221049928936955230868771875644038572888630664890573507822342998964954667474300944699078658989010257103569231493090050659723450626338923049035040974032671138430612839043269997482582763267536489504794826476836323549796385028155416935072959933315468068930689064483178204550825728947252440604703474049780550458442808479096492346910001692358508618202898514895453589357'); +INSERT INTO num_exp_sub VALUES (4,7,'5329379094878203394060987.24931765987630429629836382184742348353920297283690739124220773955591340709935970062776650204659187764581615597720798385015942389765692769739983054442503547211560297249686289665792078548480268091496050883021187158502798880896590227542729659940394038802461081290690995869705131152889309663639310553909874081663091069118126221594338242710530718836025225507189149221049928936955230868771875644038572888630664890573507822342998964954667474300944699078658989010257103569231493090050659723450626338923049035040974032671138430612839043269997482582763267536489504794826476836323549796385028155416935072959933315468068930689064483178204550825728947252440604703474049780550458442808479096492346910001692358508618202898514895453589357'); +INSERT INTO num_exp_mul VALUES (4,7,'-4364411947278810125327066890819882483326918.05664098958260550284395870948992407314161088028674246708928421994893923699743452802989464864039994566042797942433140378990308345483670828497915478397481687305406460330009319949623844175096007381662809083363069100235985794575399268709260901964834244796150883807308976949196661411035264619638771824190014274817662519438658481432363824187693821267613212631153175155634316128036152465184903927860719447693468054624663668062006049759837326188252927823612718163916100588143128358998656306593393889422386501730237442526450419990376323903182669190482615734972147533221144682538647497701130447816148459762464395194383090936159579764712919396391813914821973715879062992249315474841639591907249142779103650773383644785606333916967894'); +INSERT INTO num_exp_div VALUES (4,7,'-6507697.520580964829176145824902679560705744817573189143227837387224410616222039115571544850095278317993922427931439719549137387753697989249394347047436951117850128104928719365703899136632100669607126357491484781141296021264049762417528697619931558728863308905257358126654378784709213859234056696519305650316810797382293500878834933984458810656133463638442959750083607649924453935287420620424368291770694630751828333903156364366745210911640207075765008558904788350844410055253643515389003711759818446776538393914018427075074171758415188027562645239606914126802490579848138218395145734902830046359100742374008993296019987093605275289913663224324033923096998194326249508491872193747944673057257521552387923218450155737056841633810711295424578984452176016198348344913655301417872189073133147510027427530833694019910340299'); +INSERT INTO num_exp_add VALUES (4,8,'5329378275943671819201468.88995490340795935797824952902333498786202536079000703830146057240651898748760197658486790165425772165585380839129948178510273188565692769739983054442503547211560297249686289665792078548480268091496050883021187158502798880896590227542729659940394038802461081290690995869705131152889309663639310553909874081663091069118126221594338242710530718836025225507189149221049928936955230868771875644038572888630664890573507822342998964954667474300944699078658989010257103569231493090050659723450626338923049035040974032671138430612839043269997482582763267536489504794826476836323549796385028155416935072959933315468068930689064483178204550825728947252440604703474049780550458442808479096492346910001692358508618202898514895453589357'); +INSERT INTO num_exp_sub VALUES (4,8,'5329378275943654825229021.60868041634464923461847811467151197921638058488380774418295490670530782671111742467066510243892603363577850356311648591521611590965692769739983054442503547211560297249686289665792078548480268091496050883021187158502798880896590227542729659940394038802461081290690995869705131152889309663639310553909874081663091069118126221594338242710530718836025225507189149221049928936955230868771875644038572888630664890573507822342998964954667474300944699078658989010257103569231493090050659723450626338923049035040974032671138430612839043269997482582763267536489504794826476836323549796385028155416935072959933315468068930689064483178204550825728947252440604703474049780550458442808479096492346910001692358508618202898514895453589357'); +INSERT INTO num_exp_mul VALUES (4,8,'45283653791262997781451381354094822.762732909505051438036873220502792213670540454778361182993875916509061144859281577740137081988678361247725064336120451090222456518107029158304937620179032477664627949959143233370320432203497828243297406462513350790251761540074946469824444452248386782451723637769289822576372357189700319768797708375563651655860093365309717823602754924352327588945034832436331911584742966378275504545736896430718939807674966738116698454215555860047859161126694019895490767779791933882712567492115664113775047192011252893773389940988533801360010782816196288710063568554147458866942816721046004257953642508395867837127678980002737669139369781058046396738606563716339660654364541530532834806205571191828994250708412638796240377704994928921528330863683630622922959130920715261879547446054261914770022377059156125037157979236658010950'); +INSERT INTO num_exp_div VALUES (4,8,'627208063620965.397582272040628872773601055303353339700043792111288801181637510303989399395425313995651311362368773096988861977687484912995632130587762386590996099363383976320342247076516604162469063709298438133327434461462906199160715395064249299615054970359309619951777972710299484596875999967582794277241285253106817446259313281064844416249524876385699646393555435017820686376877981018047574348711991428666249794623006175739581915209218834701034964043360823844816042368184094857692062884223864639972005010863342567608351008172649209459933114800143792514183138995700133608613158857147417653998048890116531052767737435620558349226865105888201598712435680481803901906613772821370519525404423549161696526405320391828194356063547089626322474164332505209233143121068245585662919687001395119229263995765376465304715643388771609446'); +INSERT INTO num_exp_add VALUES (4,9,'5329378275943663377078725.59616792993138452386059664269485161374191901124632386474661634799161523147237015531446709484039091244606359050341194730653343894986479159670583937529516163204904273806158788218327396375034882788180783796976731912141525319602448709213495905899041406302673881364465504945113279286939663215197485367850132991968081639290297033476859158044889351836025225507189149221049928936955230868771875644038572888630664890573507822342998964954667474300944699078658989010257103569231493090050659723450626338923049035040974032671138430612839043269997482582763267536489504794826476836323549796385028155416935072959933315468068930689064483178204550825728947252440604703474049780550458442808479096492346910001692358508618202898514895453589357'); +INSERT INTO num_exp_sub VALUES (4,9,'5329378275943663267351764.90246738982122406873613100099999535333648693442749091773779913112021158272634924594106590925279284284556872145100402039378540884544906379809382171355490931218216320693213791113256760721925653394811317969065642404864072442190731745871963413981746671302248281216916486794296983018838956112081135739969615171358100498945955409711817327376172085836025225507189149221049928936955230868771875644038572888630664890573507822342998964954667474300944699078658989010257103569231493090050659723450626338923049035040974032671138430612839043269997482582763267536489504794826476836323549796385028155416935072959933315468068930689064483178204550825728947252440604703474049780550458442808479096492346910001692358508618202898514895453589357'); +INSERT INTO num_exp_mul VALUES (4,9,'292388240303165948041827159734686.255558469787242316676287235194652580157149226950109397295920730296960145548003120827363226435916209781396711693581454960342091452830648929118261388933297036933167543189308061917640517578583521401267417187854611829815212778183983326568586118831109538377828156118900313778053576483381085207892754728937946691892849474364477434665960112125254104966566712906532318984871145605839506991591027939136026602051635433295687547552796828217859648186757719639965988287173297286034098497871707197092627676226053609131138590878743560287292934815277894463305001278326023708395571840850120055316276256138004565442099731931051413153564744766098053176049414330146267604802971221161572130161432525297614616942172815141372973870720928125699420370428856022295499447755488148545048400795053604349570217878099721865670458104653570360'); +INSERT INTO num_exp_div VALUES (4,9,'97138902640718538.241246716463110895614166618530828908023040947887095196830690221211560526562522274118188963051412359798837957512805692731972838989047910709158995922699598619854907969493232150042212406549916252602794415099066259707018021422154933830674786488990033885447289593742424717170197810316367637885248684134204152352748803532396210051700193575105804898183523770153431536054848843504020390623875664696278263569145547515663340450903772852615789980257449146000410036925975898331113013857953289990299253584950458042598491897496393582249411290555264437893099880371008957017323366523688894303458743415715114628052487518110654201696604914159777300997374156315186315524817636714210119873791848535246674326877611945112249137224923201544452904111118569299934059002046318394345055859769572070097973298522564724884895879226870720839'); +INSERT INTO num_exp_add VALUES (5,0,'-652755630.43456071828048833552593835051449845484289562110789582081210403487973096161149072377955192388469356112505543620695003436531392789029513380101663750625024853263344909355177280161504414335005574882649025508632900995595004153086358670541462762210415346958050909878501048483523600711486406055424807840429541335391538322886495085448421556770991545781035298449067051916630343957356635391594362639819978677032855590055900561501350354631803808000307050416047072513406855040715556454205065332997338225626635780147287003130754254277103928406089109802521803537038957372612837169223905290912251006321930223154562110264217937'); +INSERT INTO num_exp_sub VALUES (5,0,'-652755630.43456071828048833552593835051449845484289562110789582081210403487973096161149072377955192388469356112505543620695003436531392789029513380101663750625024853263344909355177280161504414335005574882649025508632900995595004153086358670541462762210415346958050909878501048483523600711486406055424807840429541335391538322886495085448421556770991545781035298449067051916630343957356635391594362639819978677032855590055900561501350354631803808000307050416047072513406855040715556454205065332997338225626635780147287003130754254277103928406089109802521803537038957372612837169223905290912251006321930223154562110264217937'); +INSERT INTO num_exp_mul VALUES (5,0,'0'); +INSERT INTO num_exp_div VALUES (5,0,'NaN'); +INSERT INTO num_exp_add VALUES (5,1,'-652670387.03916046850422757312745971450663862747133703839829692066597367760104802542475264601221776157515632293978442027199108085723617181683235487266149426304575903892721468296143475297345699313102262188759506518376019936160961709578829069446312051432780603656651983414612264636232727512091101057374054475214114364113300402823059519499217878746766275164739724770556122895799337810694888119810524986616938847385753562624139431982468828696587199570410008890188532132652095915565323400735066310142303225626635780147287003130754254277103928406089109802521803537038957372612837169223905290912251006321930223154562110264217937'); +INSERT INTO num_exp_sub VALUES (5,1,'-652840873.82996096805674909792441698652235828221445420381749472095823439215841389779822880154688608619423079931032645214190898787339168396375791272937178074945473802633968350414211085025663129356908887576538544498889782055029046596593888271636613472988050090259449836342389832330814473910881711053475561205644968306669776242949930651397625234795216816397330872127577980937461350104018382663378200293023018506679957617487661691020231880567020416430204091941905612894161614165865789507675064355852373225626635780147287003130754254277103928406089109802521803537038957372612837169223905290912251006321930223154562110264217937'); +INSERT INTO num_exp_mul VALUES (5,1,'-55643106304872.575994253221940844841058071061962511162776681458310912066379595519265546225338405882027547140476045378015935579066580347282075024392379464189067155567624835346798806677988850250198082355055954078446421075165109896091047534711081616362392995575466807084807876544560268050611445006601394735810211678919646667455478469014906335433468365011768049600750224822391684377238242162320161552720449713229523135506671063115436813348612986916614320012995541575293478341408982118538094438068036422562665160411591652618670802973618768526197813319204816293073794413317669922144705633308090832805914096147659820167569140291210526520361556881576175809360614782817717579318298657744021133210954279487777567785280633309576696708168342539425395482429923273623865667723482418178781573723597156804085501875735112311466228778929147929'); +INSERT INTO num_exp_div VALUES (5,1,'-7657.550797567691019915353529993301413746369700087741672762343206271266232635965032053368224472333368713006346867984576168784127503674579531243603836945595880917241997606783133673324236134063757452734295148763280059050480246827193380861494669624151921824660313516974440913733511526807313019192263170823268678149435664224184903925632177789052038092611394447709922076676981043877747276056677801802695466205531230350209787298926245402046182150996849906836743231861317120171583577624262765589605263477198809166390259128339127005924586833372241946051704497188891325715185091060185547236923494393813210904033520844572880475265306843414506359253445517738473745552980984097762509546161690823646176501838559393690565709795724159196133663168004773260451322595899506776323262195323943138344537866088159583331807728944620284996'); +INSERT INTO num_exp_add VALUES (5,2,'-994877526002806872754342801504871.47809095279915423939648794226185974985600242391612965412218049794216637114648812993201775787765690351615479957141288239552036371132381627958673244764559862836085530643408020551049895730005574882649025508632900995595004153086358670541462762210415346958050909878501048483523600711486406055424807840429541335391538322886495085448421556770991545781035298449067051916630343957356635391594362639819978677032855590055900561501350354631803808000307050416047072513406855040715556454205065332997338225626635780147287003130754254277103928406089109802521803537038957372612837169223905290912251006321930223154562110264217937'); +INSERT INTO num_exp_sub VALUES (5,2,'994877526002806872754341495993610.60896951623817756834461124123286284017021118170033801249797242818270444792350668237291391010826978126604392715751281366489250793073354867755345743514510156309395711933053460228041067059994425117350974491367099004404995846913641329458537237789584653041949090121498951516476399288513593944575192159570458664608461677113504914551578443229008454218964701550932948083369656042643364608405637360180021322967144409944099438498649645368196191999692949583952927486593144959284443545794934667002661774373364219852712996869245745722896071593910890197478196462961042627387162830776094709087748993678069776845437889735782063'); +INSERT INTO num_exp_mul VALUES (5,2,'649411906691138274293985410502516861224852.2323455192714410716272307781034189160865613770320102043319541634113746032638191509585045862973333645830298922352816245477556264222094036953195419857712804755170632292914187367964994214922001758104594052499795564860466055599417895782179851297585155129541589802249540436678824225950907268084876110445460948679383611117263673106597132046331719468816839434908155684738864149955129235751738204036443603521478609787295079710078973503970964790273461142497259987849074597264522099648376356902360358310245001183020992360260836105404118742418040965190000718736837422434593694808973939805954329718232693154128543253581495885789333274488461716809104532693754070810202831113003978085636579574171344721710232931261731022478029314435363413498991740750878099825781577297965642009156858479681236085226911858782115'); +INSERT INTO num_exp_div VALUES (5,2,'.000000000000000000000000656116570506105776235076334177868550033347254561166417969910286926369599900073757929714260350320362090452092025380232792749476245042480546813848702351830607516880397305138543526307608094143028291193163613755680419049060162928958489964834941920423432354996040147818253087783193280640282263490705632002572757216731766513434035163528102590524432221718194164133959630768718395847710529339782880381264265894322494716854757290930538739000043383104085867828258790010654331660516512156519838978751447311068903958136482041673109857552178367614498426226323001399275980281507353231821022591045797658991388304873240910526149138339658220844723880158150606035181559877351791752701872877147074033569061408920725522180134133183999181370354585872214368766629114773129541658653693832843354053701079334077'); +INSERT INTO num_exp_add VALUES (5,3,'-60302029489319384367663884408738513110.66683195868931664491302527038538338065260819361151478340212147889934633981101279593065290940544218360883531149731823374304151252289014494378769385157204705433009477214625880056478643611622410268943757215673170753460135411513114716313801477916713433956086133878890802448531292334570886746283905390661877220497842493537338035961123751393889400517474762491881277080205381424363695095196058838349029211365212855028824622924678684631803808000307050416047072513406855040715556454205065332997338225626635780147287003130754254277103928406089109802521803537038957372612837169223905290912251006321930223154562110264217937'); +INSERT INTO num_exp_sub VALUES (5,3,'60302029489319384367663884407433001849.79771052212833997386114856935638647096681695139572314177791340913988441658803134837154906163605506135872443908341816501241365674229987734175441883907154998906319658504271319733469814941611260503645706198407368762270127105340397375230875953495882740039984314121888705481484090911598074635434289709802794549714765847764347865064280637851906308955404165593747173246944693509650424312007333558709071857299501674917023499921977975368196191999692949583952927486593144959284443545794934667002661774373364219852712996869245745722896071593910890197478196462961042627387162830776094709087748993678069776845437889735782063'); +INSERT INTO num_exp_mul VALUES (5,3,'39362489275784146262776411377472433635883331946.794473520543457442955620133347015506556162839462623905489255080102447195050109095701660164272430316804466254467810714209179752718730906325952685817112992943656292503112803950215110778476301809440329937774061163668461957943313261962261081942055908935814323069621279128270849852239727888939033546870208376394878842958202403235309372240005941467570230067124830916866857395233038346727879951123599893174252558078732888910139309038957525961212820831321973219557165558911222848692996406741318948607549825343491479728117062814094258484536263158005174429922237853707635743736923521032098496725445243775790161216159399180889906705265012270270348146530113428221072591696851818281866095288773371414866822270689959827332258348570976075184933893434327278299820594014788148344260948638847457822697682605612771344335201258128'); +INSERT INTO num_exp_div VALUES (5,3,'.000000000000000000000000000010824770508763323320533297369674519056450544793568147911931789010432012750062661590994728968589403602468229106206242395792957238667714358401601098858606386995096923432407249369639633268143022787987190106724545750803196130511146323174462918572423414631798141263222875752767731279138952850500369328934959764805948568471324562210715908420467881411844098258193571194910997918428786213948547748701831331312040839544355427357749520227124858111324859160114175254197992204974033767300989488517391063188153561391320190653403747521648794370679322504188364455328709488846777004202196382575648619395139553279192346251133156445942281048959845827006761160755031086836046398020850814350246219929303018051720203943879538087954853996826539712240458022307680912400297508925714946398031304516583939283'); +INSERT INTO num_exp_add VALUES (5,4,'5329378275943662669459614.81475694159581596077242547133292502869630735172901157043010370467618244548786897684821457816189831652076071977025794948484549600736179389638319303817478693948215387894509009504287664213474693208847025374388286162907794727810231557001266897729978691844410171412189947386181530441402903608214502713480332746271552746231631136145916685939539173054989927058122097304419584979598595477177513004218594211597809300517607260841648610322863666300637648662611916496850248528515936635845594390453288113296413254893687029540384176335735114863908372780241463999450547422213639667099644505472777149095004849805371205203850993689064483178204550825728947252440604703474049780550458442808479096492346910001692358508618202898514895453589357'); +INSERT INTO num_exp_sub VALUES (5,4,'-5329378275943663974970875.68387837815679263182430217236192193838209859394480321205431177443564436871085042440731842593128543877087159218415801821547335178795206149841646805067528400474905206604863569827296492883485842974145076391654088154097803033982948898084192422150809385760511991169192044353228731864375715719064118394339415417054629392004621307042759799481522264617060523956256201137680272894311866260366238283858551565663520480629408383844349319586471282301251749494706061523663958609947049544255725056447964564549684815188261035801892684889942971676086592385285071073528462167439314005547455087297279161738865296114495425732286867689064483178204550825728947252440604703474049780550458442808479096492346910001692358508618202898514895453589357'); +INSERT INTO num_exp_mul VALUES (5,4,'-3478781676337858247983014311182511.567538638808357215203593479841446379226774481291286361639429856698999485760647422501864626078375852610019829111004807806660731243672830787729048847342063218718651165150612717759770504648306347926061960607388621011846314969634048226452709389995594961695723139571002939804473057725442880410434039783304583526414509590532906062732322732569475349107437896717416548237633532805602064623969799081086996320156575550896200848758685986331692388099427314008504506503745527468550106879602399030419569897808150076298414568875477195447656904373310322813412927463518325927626891046356679526447117311923853482118502868148386882363449163182892615259995945992014431502761210899772725227648729095696228388558331052524469604046072203605897109629560683446827492904111565278516043939137760721315953500281379039771826554155511347152'); +INSERT INTO num_exp_div VALUES (5,4,'-.000000000000000122482510461124748279475400009367345900846466958806966807399903713411658400733717078392550780910604704603123670767210550800752620037863340961255721285160854785449315208955654408132775022766783343331151895973970395232686910362226184006990485313002943710214511418310741271074710741339586430026286272098156531835438969774325517509155992092194349661122678547097423264670055720422496527272118788005921590521726691666219504214087867030003203385360001614199656989667055583749577099440092378355805901262289841168751608673297446473709956390142112843400255748161809121986096092991616144443486023218404881798896685413932215981950393130292001833627899480153863300557853617312991880655905907971211246077450786084079040513198340644157868678782195341316027563717617074364438885981635394382733697473265872796207'); +INSERT INTO num_exp_add VALUES (5,5,'-1305511260.86912143656097667105187670102899690968579124221579164162420806975946192322298144755910384776938712225011087241390006873062785578059026760203327501250049706526689818710354560323008828670011149765298051017265801991190008306172717341082925524420830693916101819757002096967047201422972812110849615680859082670783076645772990170896843113541983091562070596898134103833260687914713270783188725279639957354065711180111801123002700709263607616000614100832094145026813710081431112908410130665994676451253271560294574006261508508554207856812178219605043607074077914745225674338447810581824502012643860446309124220528435874'); +INSERT INTO num_exp_sub VALUES (5,5,'0'); +INSERT INTO num_exp_mul VALUES (5,5,'426089913064020811.057708378200224487694731586862745370027417544052374884336177893807736467646454486029424673621605232432043672119510371547153895504456723242262639262542904151307250842477327375961936454637964429999741717244285121019840463692418987118402683746281993192269229200465080358289645050337976214115902915692028162689089167194843185708212911364017271332623359100711545479273675423617018342297822477514128997410642005300368966199980354369928371655155437291469427189561877718971914040675572136507472590254222870537216617260612835805368361975725573009455402822669103118872235140158440342063571894152305875004532651814592458133460160514384171804043127771746596286988679698684698755896736275307574630777027620558428909546664763675431701332632828281070572045822129984625797185173815273651376003614106277727279230096226977335510'); +INSERT INTO num_exp_div VALUES (5,5,'1.000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000'); +INSERT INTO num_exp_add VALUES (5,6,'-652755630.38762364608541718463145771120672223443489913059334543712856431450577465795351472116052777583325262472505543620695003436531392789029513380101663750625024853263344909355177280161504414335005574882649025508632900995595004153086358670541462762210415346958050909878501048483523600711486406055424807840429541335391538322886495085448421556770991545781035298449067051916630343957356635391594362639819978677032855590055900561501350354631803808000307050416047072513406855040715556454205065332997338225626635780147287003130754254277103928406089109802521803537038957372612837169223905290912251006321930223154562110264217937'); +INSERT INTO num_exp_sub VALUES (5,6,'-652755630.48149779047555948642041898982227467525089211162244620449564375525368726526946672639857607193613449752505543620695003436531392789029513380101663750625024853263344909355177280161504414335005574882649025508632900995595004153086358670541462762210415346958050909878501048483523600711486406055424807840429541335391538322886495085448421556770991545781035298449067051916630343957356635391594362639819978677032855590055900561501350354631803808000307050416047072513406855040715556454205065332997338225626635780147287003130754254277103928406089109802521803537038957372612837169223905290912251006321930223154562110264217937'); +INSERT INTO num_exp_mul VALUES (5,6,'-30638438.151446159804025029882398388155309149089870990062944469684482366692824338098201222171115395923414887930224163525189097571163687285244255335505387733673499447610577050114902372990462064696637481657064525319516004273769831260452832960893174173254560250804003884280384718123289136453955482855362019158401218620018346500189769819687260476334734259702665316562988639223597110627626759216850014150105605927773639897638043177685498804811787888811168524202700283461266793154726325540776914500415140842975457394524215869103737379109516024460317825645645301237375972914247141703084877141866316168268901439172491577729880760950895760711857112463508064820414904611059588717092145484656103798852859978690742216940980929562068'); +INSERT INTO num_exp_div VALUES (5,6,'-13907037655.047994416383638650569341223199042786813441967582376077478024677494832069402897226848055043557486983268019376307288565911231748501636517992289743940159005664424461285010295150828744259113760652210086696250085454819340987566229400805422509198052317518991183515696724846560872057916862620762789778660622787735923967096950195583369113574365386627110408307941105082873469072519133330718161987781080307947247163619814890462416622144825161521790673339279047700672881113718394727610096366361422482794458375587355933614201638489194194834709433413694420512869179976485096875057742460003147602405353823942488343056906912173170809084207937229591627643451380735179767199816663168139837088183577975769442341678933576388936845704303859241320794255052627716474860113993958556604381707826493168941926878481079724185426298004604'); +INSERT INTO num_exp_add VALUES (5,7,'-818934540724601372.43456071828048833552593835051449845484289562110789582081210403487973096161149072377955192388469356112505543620695003436531392789029513380101663750625024853263344909355177280161504414335005574882649025508632900995595004153086358670541462762210415346958050909878501048483523600711486406055424807840429541335391538322886495085448421556770991545781035298449067051916630343957356635391594362639819978677032855590055900561501350354631803808000307050416047072513406855040715556454205065332997338225626635780147287003130754254277103928406089109802521803537038957372612837169223905290912251006321930223154562110264217937'); +INSERT INTO num_exp_sub VALUES (5,7,'818934539419090111.56543928171951166447406164948550154515710437889210417918789596512026903838850927622044807611530643887494456379304996563468607210970486619898336249374975146736655090644822719838495585664994425117350974491367099004404995846913641329458537237789584653041949090121498951516476399288513593944575192159570458664608461677113504914551578443229008454218964701550932948083369656042643364608405637360180021322967144409944099438498649645368196191999692949583952927486593144959284443545794934667002661774373364219852712996869245745722896071593910890197478196462961042627387162830776094709087748993678069776845437889735782063'); +INSERT INTO num_exp_mul VALUES (5,7,'534564131989234694540350103.27821462973515555648644772098605028371173048154132108733819196629002548296868548691993248746628993380136454426833349407578676005545111508293942736555269938962058196496152360848131645787941032968937794930046928523006455386861100809286408671908320322523368135203881520526880998279355848280412933152306299256343179622513731096363088094541514890135766460631462465021694553063366717467560655272004461368865264059368514271105464855575429914212085797297268595943955105608543373940035636033207568676745293499106348500559628723682588033431457023964317090780615020801564861497990103549650624438425421690193862533733474254'); +INSERT INTO num_exp_div VALUES (5,7,'.000000000797079129642393611556079160915147221153735075943759104977169600937534508973732991117540626046659124172765761873705978811124901421049332579161931652390647472911517923131800238903184679028518657818755558526885018755394697157094867449047655737107085020874974955627907737126958129710597811740696534189608639914753884882702680512272194316887744972931453458445314561564591875764930680945589486999586667912816485821717403892703364322658245615895415781719033810595358092343690359557942948213374234065052300866661453767599465059289920067095083062096458980564265691295895672503728815182981118876144075942348853666085714846210822847053889733510154276933759200630639642310562242207518883342516103725757482864105340008709446643820864294556778969997115586027866760708448174502158738150605938364482719960251612464993'); +INSERT INTO num_exp_add VALUES (5,8,'7844230593.20607652525116672615394735666141304947992676684520382624714879797087461877675155217754947572297228288498221620714146356962938009770486619898336249374975146736655090644822719838495585664994425117350974491367099004404995846913641329458537237789584653041949090121498951516476399288513593944575192159570458664608461677113504914551578443229008454218964701550932948083369656042643364608405637360180021322967144409944099438498649645368196191999692949583952927486593144959284443545794934667002661774373364219852712996869245745722896071593910890197478196462961042627387162830776094709087748993678069776845437889735782063'); +INSERT INTO num_exp_sub VALUES (5,8,'-9149741854.07519796181214339720582405769040995916571800906099546787135686773033654199973299973665332349235940513509308862104153230025723587829513380101663750625024853263344909355177280161504414335005574882649025508632900995595004153086358670541462762210415346958050909878501048483523600711486406055424807840429541335391538322886495085448421556770991545781035298449067051916630343957356635391594362639819978677032855590055900561501350354631803808000307050416047072513406855040715556454205065332997338225626635780147287003130754254277103928406089109802521803537038957372612837169223905290912251006321930223154562110264217937'); +INSERT INTO num_exp_mul VALUES (5,8,'-5546455599206321494.0676583421119904300307105296377723816472192007866147764761501865875232824814135783697976183493106885436876081315217834621720906478074798596116645640251460842350553806256223963023430631066024389364515688765194373161385579258482225808660340732705687558150699172147896486727530192499184101617379930846663835628510376484675411350654979679181852179924386290069790336316958202582966248703889464308649631486542724072047294216362186036638115240070658004553260251510288423749333873893917690832829128021808383128393431810674177390352413548658782609064839524756041501835115152819802758773711821322162752064589750295542985780512921839490040396053737870038534216948323935020460307350020911362024271167085905714873548388570602799432705061561572854498075600'); +INSERT INTO num_exp_div VALUES (5,8,'-.076822018213756690975099471985461347542955923191183223634407380481978143225129486622351714276452369661632980197282261508936298649901018470846144321441236073683990324039849865750139470288565622579952182053792815638469841531577235191276257498209844422440366423136595067535337374223115507557306455001792362506235886189722508617024948653046102060677266555476719102193278190540414934812073355995577639986512222998268934000209944414236509139290657402937840986061987219441410741189615344050459067454369371094189930607834375561948483494321255500497786795636801854613881105643003358210407867114145806225724880370339074242480071595684502491827709175732777776915682786771730423733673667248186336046898260378049328204094804755195626798951644386924178161926128482002518979482630732440619051262620098544265763306253807191182'); +INSERT INTO num_exp_add VALUES (5,9,'-597892150.08771044822540810796370552966707032464017958269847934730769542644402913723848026909285133109089452632480800168074607090893991283808726990171062867538012237270000932798704781608969096508450960185964292594677356241956277714380500188870696516251767979457838109804726539408115452577436052503866633026489282425086547752714324273565900641436632912781035298449067051916630343957356635391594362639819978677032855590055900561501350354631803808000307050416047072513406855040715556454205065332997338225626635780147287003130754254277103928406089109802521803537038957372612837169223905290912251006321930223154562110264217937'); +INSERT INTO num_exp_sub VALUES (5,9,'-707619110.78141098833556856308817117136192658504561165951731229431651264331543278598450117846625251667849259592530287073315399782168794294250299770032264633712037469256688885911649778714039732161560189579333758422588445749233730591792217152212229008169062714458263709952275557558931748845536759606982982654369800245696528893058665897330942472105350178781035298449067051916630343957356635391594362639819978677032855590055900561501350354631803808000307050416047072513406855040715556454205065332997338225626635780147287003130754254277103928406089109802521803537038957372612837169223905290912251006321930223154562110264217937'); +INSERT INTO num_exp_mul VALUES (5,9,'-35812445701642379.972368737320206275515144213236752803936806738624588812089615098329765811617509505790110909629109400553415312470540217508070421816878544125783329593128638405659896184248784794258084116406472768709113030915308410565617764394827427154923321461158387012978726512246146545834669665093228316853342805604075936530371665576147966721599968786161939347726656168798065647411457701453987215491345496003650288850096338695703984042549594979897253521041581573388369367579323607093487743440894765114619634001789457486407909224339065748496715380572175183589195611952939575073075140094901024063428239223964510824958346570603142906309198033196987949067156046076497974760641964978711558209708743776024313916111738542765749928287600981397080809041007714387564206594515733287925008053261840295560398311905155157989225181164097547541'); +INSERT INTO num_exp_div VALUES (5,9,'-11.897816658873986795664687519069203701902563457968097729876034796143085813450454323128600602495745166997629078984618283588337379184733369491549230343315369634754204412939757136108898254582353378508832611703989221079986765793923635928759179573599208612516427628403686659479459867527627014558600521732194240404211484706621458983727740143568799713006127585168144158660566534382037451913967363675002134687952374080694449905223371627606557311710348820900963340884001770733452314715448053233208783321215998063958966729954113843581448912079950334969908657535514847005768455377990262943747367245613296497099716892292154137652893990339292671106003657659470243633112063075297194691349631518467702876183897580432003030164590920118726657290102377710611324297862045849839571689192181090062958059281673245670440852080202548743'); +INSERT INTO num_exp_add VALUES (6,0,'.0469370721950711508944806393077762204079964905145503836835397203739563036579760026190241480514409364'); +INSERT INTO num_exp_sub VALUES (6,0,'.0469370721950711508944806393077762204079964905145503836835397203739563036579760026190241480514409364'); +INSERT INTO num_exp_mul VALUES (6,0,'0'); +INSERT INTO num_exp_div VALUES (6,0,'NaN'); +INSERT INTO num_exp_add VALUES (6,1,'85243.44233732197133191329295927531563604777955507322414928382967007765263923984471408038635831036097817458527101593495895350807775607346277892835514324320448949370623441059033804864158715021903312693889518990256881059434042443507529601095150710777634743301398926463888783847290873199395304998050753365215426971278237920063435565949203678024225270616295573678510929020831006146661747271783837653203039829647102027431761129518881525935216608429897041525858540380754759125150233053469999022855035'); +INSERT INTO num_exp_sub VALUES (6,1,'-85243.34846317758118961150399799670008360696356209219504851646259063690472663252876207514831001425809630178527101593495895350807775607346277892835514324320448949370623441059033804864158715021903312693889518990256881059434042443507529601095150710777634743301398926463888783847290873199395304998050753365215426971278237920063435565949203678024225270616295573678510929020831006146661747271783837653203039829647102027431761129518881525935216608429897041525858540380754759125150233053469999022855035'); +INSERT INTO num_exp_mul VALUES (6,1,'4001.075404054519813215296429095020391062109905613738157927030437221793757373268325953178030040276107574363822832168160758728653712686313134828282109532831190239521843808940611025488601517574653932032236616573457735900045655665690517797280666732780030171712864961531623060353548802466577910774711998056232872212688464691036260746751992072745518373073825852119460094113694393273456369345499434994672730920070410547163082189385645712866100999708173472360864669110044660667614583576570496399103026286828660558854973376227247132815728164629722965145778698957093136175449225024685874279280018547740'); +INSERT INTO num_exp_div VALUES (6,1,'.000000550624150700285432940805295709861455424264970126953321538967550091614148982212874391026630805836518138806917934859138493583812313778188030836027246840794439412443826640206464415527687555214009725107630387889854278497875708390050387195108441635824296563108288712340902423706104029452615686971019125750530034798026103476074158922893374911891438688457439945897348811702908216883650280617098402133628688982793791562476980709924382381505517834196446365877784931355599480881104446907801805570471686295270927836995181422963320376948188855989986414581755633425437161760674162177776773597848142496583128607548351599750592863590334617838124741567654525843413232313914310487355539260264225486180000012813397807525203822863232682089295055713257835007742845010741137213301116647610033909062369843750685396196342928455'); +INSERT INTO num_exp_add VALUES (6,2,'-994877526002806872754342148749240.99659316232359475297606895243958507460511031229368344962653674268847910587702140353344168594152240599109936336446284803020643582102868247857009494139535009572740621288230740389545481395'); +INSERT INTO num_exp_sub VALUES (6,2,'994877526002806872754342148749241.09046730671373705476503023105513751542110329332278421699361618343639171319297340877148998204440427879109936336446284803020643582102868247857009494139535009572740621288230740389545481395'); +INSERT INTO num_exp_mul VALUES (6,2,'-46696638263247522384986521136500.479312417066793299922708112595886608370451213741279484136907754744903470430131032928908162742687359367826808123516519335458861613010646992354378739165872253762686683966945711430182491860196341344982195078000259063231136011430995647812149294224699587849791008794261026932467933475782780'); +INSERT INTO num_exp_div VALUES (6,2,'-.000000000000000000000000000000000047178744084866106587600962473825168237820701199970144691815329658682341685812472535816245052671243808078367856957579485152424914481414614360809698177236664771558713606961423658442962083541733004775309314926918118528217478256885324362912426275407382550929085958089798861918760121727491366034496581249711153289495601712583077918760003840368008056353090552282274780428335438032908213783490070198414584291402513547386013689752310173492320159738977752795528725029134841933604057954874523842273790958618375118974623107241366036640538085329921129023905888674299774726871808862832797230915933851225308164365269753526489223540580759951230801125605963901491073619448437890841032149898629231552019804656219062534881074125995130202820302133432951999011667568746004715268323913437054078537'); +INSERT INTO num_exp_add VALUES (6,3,'-60302029489319384367663884408085757480.1853341682137571584926062805631087054017160819890685789064777236456590745415460695320768374693076860837798752903681993777275846325950111427710563453217985216966456785944859989497422927661683538629473170704026975786513125842675604577233871570629808699803522400038975396500769162308448069085909755023233588510630417065084295051270219462289785473643946404281422516357503746700705970360169619852905053433235726497292406142332833'); +INSERT INTO num_exp_sub VALUES (6,3,'60302029489319384367663884408085757480.2792083126038994602815675591786611462177090630181693462735571643935716818574980747701251335721895588837798752903681993777275846325950111427710563453217985216966456785944859989497422927661683538629473170704026975786513125842675604577233871570629808699803522400038975396500769162308448069085909755023233588510630417065084295051270219462289785473643946404281422516357503746700705970360169619852905053433235726497292406142332833'); +INSERT INTO num_exp_mul VALUES (6,3,'-2830400711649493468815157129316992649.40542786074520931471973065281957756940496588853021620372179463538053123396140685749478530925306163968207226329985017644835203709485594362663495728106061878665324856417118064730721101615473194292620972173690618491026470353143141125614124440035267592258385099934706896692953497971326605145704135723011753705907329979207428661473172503098296622281647255008204864404416199384701720347319806375450632245634238172654086373193251877533131784268854289406126119630708578053354762596511353053106459297339360827562281168219966099848212'); +INSERT INTO num_exp_div VALUES (6,3,'-.000000000000000000000000000000000000000778366376597400971124059102619954214055884926284646546105035591052258074563706355894551049631537984053410850060739107742208523938741961208742831871056600773325053133977559789796700130019975964192371715826863472981072974742704091801166438465082519558956925444635729210849210496466189037623555622901738570979273502405907969114110345815802999687171113749364073269902319653450479463404003706147915064100959774312307195946966281098140229199529866429134937742584938255441169541436021827079647129394362379406256722903991353136733939395366152312959281905058592776286736536360235356737359904478313225848562436632109470589310799000750518904145312512621838935796912993778920622238202744037977772169066929474233952081158212174549695244127987299282384885288897893503991509410567351494'); +INSERT INTO num_exp_add VALUES (6,4,'5329378275943663322215245.29625473207137544719284446115519970394719946335145777492574745992986971075733570324679065009803281404581615597720798385015942389765692769739983054442503547211560297249686289665792078548480268091496050883021187158502798880896590227542729659940394038802461081290690995869705131152889309663639310553909874081663091069118126221594338242710530718836025225507189149221049928936955230868771875644038572888630664890573507822342998964954667474300944699078658989010257103569231493090050659723450626338923049035040974032671138430612839043269997482582763267536489504794826476836323549796385028155416935072959933315468068930689064483178204550825728947252440604703474049780550458442808479096492346910001692358508618202898514895453589357'); +INSERT INTO num_exp_sub VALUES (6,4,'-5329378275943663322215245.20238058768123314540388318253964726313120648232235700755866801918195710344138369800874235399515094124581615597720798385015942389765692769739983054442503547211560297249686289665792078548480268091496050883021187158502798880896590227542729659940394038802461081290690995869705131152889309663639310553909874081663091069118126221594338242710530718836025225507189149221049928936955230868771875644038572888630664890573507822342998964954667474300944699078658989010257103569231493090050659723450626338923049035040974032671138430612839043269997482582763267536489504794826476836323549796385028155416935072959933315468068930689064483178204550825728947252440604703474049780550458442808479096492346910001692358508618202898514895453589357'); +INSERT INTO num_exp_mul VALUES (6,4,'250145412892811547138949.592621291590152419206270097656346630226508074074623894951308487425470437268130465956063593951784820669318897182831355375451719125809800516979013437732298382708070979871283132689492336823087794373113039154669229889503700598930220858275174342776478898670277868700384853696009897221747924643343353942154528501454689084608965009561564638167714973711022212547096732831847202912862290958304510651828842182545311077713664465815992616213663619529378061133917572474298028065850515876361609671565914027186063801852554353160801534696062207299890867876199323530337336273950892723090754719547285920090419070001019943385293110663922226230169381423410428577990604776655422105400452217085311617728003688836185608912367677734364834577573255789160419371322775733777518997638403409000055707558465286469808848200141192627396502735'); +INSERT INTO num_exp_div VALUES (6,4,'.000000000000000000000000008807232244507937251856465017967626593430084223212999583902527587737263981869382895220711835510154989851222501080395520249593128253795609198666884523792646863341248402687314509176781281863891589925961900674092953408613128961234166906173266411035009516545964362406728942021813644419154548354247112601793685146960840364604115937119024575638240439041250900118977183124605578660115160551830946251713350556181960983267689939549506518185340972020820080460565392359379680036788592213479105831301723237102710863182596413567756605711230290883888612188805367801369264231165178487334557824054205160222371548005742602736713668548450400926514169967213301919971189065307721110805424950794015852531342286935114651278691214233054575660712537044810163930633456573860895791198853393107188289695511873068'); +INSERT INTO num_exp_add VALUES (6,5,'-652755630.38762364608541718463145771120672223443489913059334543712856431450577465795351472116052777583325262472505543620695003436531392789029513380101663750625024853263344909355177280161504414335005574882649025508632900995595004153086358670541462762210415346958050909878501048483523600711486406055424807840429541335391538322886495085448421556770991545781035298449067051916630343957356635391594362639819978677032855590055900561501350354631803808000307050416047072513406855040715556454205065332997338225626635780147287003130754254277103928406089109802521803537038957372612837169223905290912251006321930223154562110264217937'); +INSERT INTO num_exp_sub VALUES (6,5,'652755630.48149779047555948642041898982227467525089211162244620449564375525368726526946672639857607193613449752505543620695003436531392789029513380101663750625024853263344909355177280161504414335005574882649025508632900995595004153086358670541462762210415346958050909878501048483523600711486406055424807840429541335391538322886495085448421556770991545781035298449067051916630343957356635391594362639819978677032855590055900561501350354631803808000307050416047072513406855040715556454205065332997338225626635780147287003130754254277103928406089109802521803537038957372612837169223905290912251006321930223154562110264217937'); +INSERT INTO num_exp_mul VALUES (6,5,'-30638438.151446159804025029882398388155309149089870990062944469684482366692824338098201222171115395923414887930224163525189097571163687285244255335505387733673499447610577050114902372990462064696637481657064525319516004273769831260452832960893174173254560250804003884280384718123289136453955482855362019158401218620018346500189769819687260476334734259702665316562988639223597110627626759216850014150105605927773639897638043177685498804811787888811168524202700283461266793154726325540776914500415140842975457394524215869103737379109516024460317825645645301237375972914247141703084877141866316168268901439172491577729880760950895760711857112463508064820414904611059588717092145484656103798852859978690742216940980929562068'); +INSERT INTO num_exp_div VALUES (6,5,'-.000000000071906039575366987930696117572143566208825430801491864851999044659045681114433294052065377679745375399878664822361548237094424148992770296383642432040129230180142339557437679166815114510467763288057917694948929009212876391059413439647163295629904270262780935228234994930653489111444964446097124407804311494588517082748514970905563707392765567625639455978464081409330528324962333492925267647686759704415549221137291475247571296491073010175087298752769122449499990102435819414671847617062560524758344361194566796343756743243766853291113852464023843527189221162680613675369708907935197867458588904367993736363321133720345058432019986643353417257503619558797249295232894674255060861358071309619524800424087896023710729815248847792174290644245138831518072176198607255346603270853333176255533974364728342822'); +INSERT INTO num_exp_add VALUES (6,6,'.0938741443901423017889612786155524408159929810291007673670794407479126073159520052380482961028818728'); +INSERT INTO num_exp_sub VALUES (6,6,'0'); +INSERT INTO num_exp_mul VALUES (6,6,'.00220308874624532134736695825088747995945783791378828770826401323533973395137378460250799184832278118133622563295093909508983301127615815865216895482784469538070133388154961402881325731054433770884496'); +INSERT INTO num_exp_div VALUES (6,6,'1.000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000'); +INSERT INTO num_exp_add VALUES (6,7,'-818934540071845741.9530629278049288491055193606922237795920035094854496163164602796260436963420239973809758519485590636'); +INSERT INTO num_exp_sub VALUES (6,7,'818934540071845742.0469370721950711508944806393077762204079964905145503836835397203739563036579760026190241480514409364'); +INSERT INTO num_exp_mul VALUES (6,7,'-38438389630389612.0042045464692275627184627672063157323631169405883031379129843031477339360597564128205768842448328088'); +INSERT INTO num_exp_div VALUES (6,7,'-.000000000000000000057314803440765029050667129936880528769333499793237773980613524885506515999851858649385968476426313207429914995755091541422893944525222307473169425244462149015717526718376299808423552027796204632286454853167559026787019718806449038446612978917236245943248168920696452018925986743620392955122431521581268518101342690974749463089739042586011924590503136498488946387508310209984849243014542648765897536338824721211252335866349509669538308454367849024503312249951727948786393404944555844863805495937835281927012430439403132382055464307180153473189842433614777883826783689904293115204700185380661601223693428304020047393499702811581067120117405280772944184877279069842269329959037186324135435468322336398566440055479142909170224780318371473684868152271947368867666706912563225912012901437076773416'); +INSERT INTO num_exp_add VALUES (6,8,'8496986223.68757431572672621257436634648368772473081887846765003074279255322456188404621827857612554765910678041003765241409149793494330798800'); +INSERT INTO num_exp_sub VALUES (6,8,'-8496986223.59370017133658391078540506786813528391482589743854926337571311247664927673026627333807725155622490761003765241409149793494330798800'); +INSERT INTO num_exp_mul VALUES (6,8,'398823655.819545574205652791249227663407026876411660299394659390409794761643751582473390322547798567169668246138880832642141417531427935520467563318363116897177899262525720710134129529640376020947774470933902793259531840625444267816319963200'); +INSERT INTO num_exp_div VALUES (6,8,'.000000000005523967081937952184172713994498918048454262874017009201501812494019618863622631634736130436187167745347383745890248619882896153083428308074678908731005176810208100004498415662458272149380846809398637385270265351808328466537502823071145089961996689711299405627596294988646826454676198092260759424935699382655736524042353938814268760468122584678267125994645166955751211397353140569987758938572953312303398024147927938612934833827734142292697389251052485981023756760420972614486278837214553818521196182883489483756785207650821722660455451660719560529693418375773124813290305501923899840247103166971466167032437598057958226806335324315214908788839919408525748236713611579486768218564733151121028172253396652755590051310396973181595992981076269789287489208817712754098019817792758730835341151711523474207'); +INSERT INTO num_exp_add VALUES (6,9,'54863480.39378734225015137845671346015520435061071252892396685718794832880965812803098645730572474084523997120024743452620396345637401505220786389930600883087012615993343976556472498552535317826554614696684732913955544753638726438705858481670766245958647367500212800073774509075408148134050353551558174813940258910304990570172170811882520915334358633'); +INSERT INTO num_exp_sub VALUES (6,9,'-54863480.29991319786000907666775218153965190979471954789486608982086888806174552071503445206767644474235809840024743452620396345637401505220786389930600883087012615993343976556472498552535317826554614696684732913955544753638726438705858481670766245958647367500212800073774509075408148134050353551558174813940258910304990570172170811882520915334358633'); +INSERT INTO num_exp_mul VALUES (6,9,'2575131.137912978352131546639620215541477987701194164886305951830806120142596646541302305984776928560906754259789485960991272272782091464270104432109904222200473616116525297615725803495463468272171161659654385929185160689572943852767523792651123455283534072794326647404332228203001469884016996499768656263775233430922446983838511590562929268821678518640501686017030536100955531423152839988008496919169395159653034847677470665418765966542111749439412'); +INSERT INTO num_exp_div VALUES (6,9,'.000000000855524875533453524582534418967571681572635027972658867593464437484123442242521660317156546196609749230372398872487667521984251509483676665788527375343148382604836976332389890799079878151841905152004537926201190193814594954194044560537664560344224646197027029681984683465852110060077865421064400958821808374370779297676624123638191407441015008434084079839721156870032377372497814037418047056438760664237367081226979226606227037631073946209105678283624370820396871058367779887709720661001099338250009251834581804647326512873792849059661525874160414378459696930831877643599421297749483849526695657467708603491876916749718079725746259119898269814551222336219537198318796277931946529242436502235147453584237994498566122973953203597470078105606906752099294162422474758048436539653041606499637623370030079916'); +INSERT INTO num_exp_add VALUES (7,0,'-818934540071845742'); +INSERT INTO num_exp_sub VALUES (7,0,'-818934540071845742'); +INSERT INTO num_exp_mul VALUES (7,0,'0'); +INSERT INTO num_exp_div VALUES (7,0,'NaN'); +INSERT INTO num_exp_add VALUES (7,1,'-818934540071760498.60459975022373923760152136399214017262844141729040109985386964272131706381326192223266583769046276181472898406504104649192224392653722107164485675679551050629376558940966195135841284978096687306110481009743118940565957556492470398904849289222365256698601073536111216152709126800604695001949246634784573028721762079936564434050796321975774729383704426321489070979168993853338252728216162346796960170352897972568238870481118474064783391570102958474141459619245240874849766946530000977144965'); +INSERT INTO num_exp_sub VALUES (7,1,'-818934540071930985.39540024977626076239847863600785982737155858270959890014613035727868293618673807776733416230953723818527101593495895350807775607346277892835514324320448949370623441059033804864158715021903312693889518990256881059434042443507529601095150710777634743301398926463888783847290873199395304998050753365215426971278237920063435565949203678024225270616295573678510929020831006146661747271783837653203039829647102027431761129518881525935216608429897041525858540380754759125150233053469999022855035'); +INSERT INTO num_exp_mul VALUES (7,1,'-69808760806266041400340.70700818693892852138813934414383886494691670042143650609934777814995087699409404201920249076407981012095999320858479644760715204999741683528746097757549835956359129287002171391961763797857794730120426599135099619822532290339000466211195776337667123320942107370731349851576864242697412616810236323676004067839744992733887503405311090677026008324895177587064547630828026123718296429295638934384446325302964896473296829265805737112709269803814942537657996725913938408781715328945194948010970'); +INSERT INTO num_exp_div VALUES (7,1,'-9607014551997.140858001442365669993007297071681832468350855627077185145567261170534005832165603932891201648027598773639089125980996652005412450490063683624648655909636499261774535015914730479401090227915382926027949990128880284298688443593909017437720828163877690126019616194376778317148693270900349151496295698078575648169637635898560612738481294674167553369445426793073304518646116539082953755973571046622684332425840412198776081251646424875405772676893185726872613804612566569794177506268399878105117763696990094108960076591684779180089885283939385808214239337829666227427148603057941899878123459708920227867371285837642561064461118016739395972994827327543594846953341750907541716807985738518071480209106185726125017342997283356926976052909493074301401955202616191210810331245427141945840542129607439703255628683506772979'); +INSERT INTO num_exp_add VALUES (7,2,'-994877526002807691688882220594983.04353023451866590387054959174736129501310680280823383331007646306243540953499740615246583399296334239109936336446284803020643582102868247857009494139535009572740621288230740389545481395'); +INSERT INTO num_exp_sub VALUES (7,2,'994877526002806053819802076903499.04353023451866590387054959174736129501310680280823383331007646306243540953499740615246583399296334239109936336446284803020643582102868247857009494139535009572740621288230740389545481395'); +INSERT INTO num_exp_mul VALUES (7,2,'814739569184924399102711674444306584731316176345067.39834031417849342571224916231092924046722938910652929295271097903377854123984307101079073134405782275535446337229706620713104545454319555885847481531722101704765783025789147453570970090'); +INSERT INTO num_exp_div VALUES (7,2,'.000000000000000823151110229758332661330617426417726331211894330147399760458555778324097596176117291103184653828305857999638466183347321835058943563347767579219763002258622507889760416640758842509635599414768344140175277742935564567127659688612699366182158030839083982896107176174766408199870924563237827899202849733606842856491701660599599211106794572237923985121475458446997860253437578966578617985764298513928307852082168209458400544457824307270777530312648199364084272310536024283945598340590403612752287693234647719354745060851129534452514828239800716088248915975054881011343555492596002595181046121935660176097475159074973635534016835214952415720717896518544064238656360099884889450237541254761746029507300068198731306211736696956568648033834554273602524147075895460874922913883751452403825099444642503437'); +INSERT INTO num_exp_add VALUES (7,3,'-60302029489319384368482818948157603222.2322712404088283093870869198708849258097125725036189625900174440196153781995220721511009855207486224837798752903681993777275846325950111427710563453217985216966456785944859989497422927661683538629473170704026975786513125842675604577233871570629808699803522400038975396500769162308448069085909755023233588510630417065084295051270219462289785473643946404281422516357503746700705970360169619852905053433235726497292406142332833'); +INSERT INTO num_exp_sub VALUES (7,3,'60302029489319384366844949868013911738.2322712404088283093870869198708849258097125725036189625900174440196153781995220721511009855207486224837798752903681993777275846325950111427710563453217985216966456785944859989497422927661683538629473170704026975786513125842675604577233871570629808699803522400038975396500769162308448069085909755023233588510630417065084295051270219462289785473643946404281422516357503746700705970360169619852905053433235726497292406142332833'); +INSERT INTO num_exp_mul VALUES (7,3,'49383414785234649002982046297226894664526726187218771083.0993243619030008310875293647868815940421844461627295157812843657782639833900543200310573708100000958929315945039020410482966753145208427035917753919085618457760620513481628641658765820294863970581642745379331727722585319163262763708386199720411053619449096019862596221607526610103408936214184850115071874430846697061554769773328338028749631552202705583855831155461651414320570061181212214810086436100771547030013079997847086'); +INSERT INTO num_exp_div VALUES (7,3,'.000000000000000000013580546907080371873577430837141172674171921610919544849037647398734065712983603204704663262116138799357430947986241590690589753181299773842880079777640016786921825609617596862828930939366173224366864448436461306602680780407912534492687474933386043505172346330210659476505435994582446405414027199938970759003336829722057241708213838318628292667946636226143164221380503228191376939596663443230082698085439531600756771639601022064620204571458766303985028143400866776954225590745596639602613498355332049777798367675438365442468743270334407716567057368347458892075084694158566383133325959042076573734408841629149903649365079563374278550978052491499304166424686842598833319515705663176855033865872333988551611996194856472662292344160194821687681312501127516922809221030420253714666026321243515830'); +INSERT INTO num_exp_add VALUES (7,4,'5329377457009123250369503.24931765987630429629836382184742348353920297283690739124220773955591340709935970062776650204659187764581615597720798385015942389765692769739983054442503547211560297249686289665792078548480268091496050883021187158502798880896590227542729659940394038802461081290690995869705131152889309663639310553909874081663091069118126221594338242710530718836025225507189149221049928936955230868771875644038572888630664890573507822342998964954667474300944699078658989010257103569231493090050659723450626338923049035040974032671138430612839043269997482582763267536489504794826476836323549796385028155416935072959933315468068930689064483178204550825728947252440604703474049780550458442808479096492346910001692358508618202898514895453589357'); +INSERT INTO num_exp_sub VALUES (7,4,'-5329379094878203394060987.24931765987630429629836382184742348353920297283690739124220773955591340709935970062776650204659187764581615597720798385015942389765692769739983054442503547211560297249686289665792078548480268091496050883021187158502798880896590227542729659940394038802461081290690995869705131152889309663639310553909874081663091069118126221594338242710530718836025225507189149221049928936955230868771875644038572888630664890573507822342998964954667474300944699078658989010257103569231493090050659723450626338923049035040974032671138430612839043269997482582763267536489504794826476836323549796385028155416935072959933315468068930689064483178204550825728947252440604703474049780550458442808479096492346910001692358508618202898514895453589357'); +INSERT INTO num_exp_mul VALUES (7,4,'-4364411947278810125327066890819882483326918.05664098958260550284395870948992407314161088028674246708928421994893923699743452802989464864039994566042797942433140378990308345483670828497915478397481687305406460330009319949623844175096007381662809083363069100235985794575399268709260901964834244796150883807308976949196661411035264619638771824190014274817662519438658481432363824187693821267613212631153175155634316128036152465184903927860719447693468054624663668062006049759837326188252927823612718163916100588143128358998656306593393889422386501730237442526450419990376323903182669190482615734972147533221144682538647497701130447816148459762464395194383090936159579764712919396391813914821973715879062992249315474841639591907249142779103650773383644785606333916967894'); +INSERT INTO num_exp_div VALUES (7,4,'-.000000153664179510102140733858340480800294287837601105047285453457000254577644933901525444082336054243749405512900867540483190494113677173628646221933766421338612376123824684592850465460156248403574333545090544920568230979754949827013129083778435107488003838746926270955224758508832133483591156567868631938590248213604979638895901933775098150684618378235712437137852195098700137765601802898366867034641606131280434771339920637353140131159441790904703083143627590062236537714415872864218260252838432414759890832271190606933534662897006726154587341385852258168335058931957995901987808602365467861573344491265289043037273815504867254228957776127752540924854546837197432384563153608878864912196453587628891285275067452280357349897203095502806923463147414086919014592380804424300739713935051357374227246098303140106'); +INSERT INTO num_exp_add VALUES (7,5,'-818934540724601372.43456071828048833552593835051449845484289562110789582081210403487973096161149072377955192388469356112505543620695003436531392789029513380101663750625024853263344909355177280161504414335005574882649025508632900995595004153086358670541462762210415346958050909878501048483523600711486406055424807840429541335391538322886495085448421556770991545781035298449067051916630343957356635391594362639819978677032855590055900561501350354631803808000307050416047072513406855040715556454205065332997338225626635780147287003130754254277103928406089109802521803537038957372612837169223905290912251006321930223154562110264217937'); +INSERT INTO num_exp_sub VALUES (7,5,'-818934539419090111.56543928171951166447406164948550154515710437889210417918789596512026903838850927622044807611530643887494456379304996563468607210970486619898336249374975146736655090644822719838495585664994425117350974491367099004404995846913641329458537237789584653041949090121498951516476399288513593944575192159570458664608461677113504914551578443229008454218964701550932948083369656042643364608405637360180021322967144409944099438498649645368196191999692949583952927486593144959284443545794934667002661774373364219852712996869245745722896071593910890197478196462961042627387162830776094709087748993678069776845437889735782063'); +INSERT INTO num_exp_mul VALUES (7,5,'534564131989234694540350103.27821462973515555648644772098605028371173048154132108733819196629002548296868548691993248746628993380136454426833349407578676005545111508293942736555269938962058196496152360848131645787941032968937794930046928523006455386861100809286408671908320322523368135203881520526880998279355848280412933152306299256343179622513731096363088094541514890135766460631462465021694553063366717467560655272004461368865264059368514271105464855575429914212085797297268595943955105608543373940035636033207568676745293499106348500559628723682588033431457023964317090780615020801564861497990103549650624438425421690193862533733474254'); +INSERT INTO num_exp_div VALUES (7,5,'1254580584.048971438599349046867230181719371038956756285986415773300837165755558702217197735811549684202279755101552533605390208155708695952004683670878589028717509749282693444655857296902117478518511492735290086040573521482737598395369632843374456793385511847676556826348943588519880411018079886373631771830925920986588708409208527042927229627786932908015502292313887561198156623702404977221789649731458241770690830680067801377815840764873662400590343236662968218256211697981048576328148435241545372543075051594952109757428031762469834781538302930957095080167901199455226976113347018972534334210416375400979738414416582588689496706548495076287263281908191770792203069614447622517839588243746755480572371988630084226963919158931419126724681617069720048557166545204944250492282054791996953359013543036918134163144772567093'); +INSERT INTO num_exp_add VALUES (7,6,'-818934540071845741.9530629278049288491055193606922237795920035094854496163164602796260436963420239973809758519485590636'); +INSERT INTO num_exp_sub VALUES (7,6,'-818934540071845742.0469370721950711508944806393077762204079964905145503836835397203739563036579760026190241480514409364'); +INSERT INTO num_exp_mul VALUES (7,6,'-38438389630389612.0042045464692275627184627672063157323631169405883031379129843031477339360597564128205768842448328088'); +INSERT INTO num_exp_div VALUES (7,6,'-17447499423661151023.558342555162228919125358089491573318627107322332520978657843895009110781773496490472817700487707134216424855867015781267287628022535529641238372370292374146871103236048507252055787621394728096799222976387108688980537900309311204203302960751747509648304056939321473462375648710590981564101023812800603438271190184064874290215309040519813024962909469701968804925443161094255632624090623433640078421818321246597728308302979223833487133268472455479442002005374793705431817866798804822885690193667521606781156962792120052947767160957903073698536973292205899421787948529970837601521657406211962967291912148632072929662185840265855612193255596825032457033402506154930851214421895488796227471490998190312007513478459049382774782886773158311656817014322925167278223360446454868236479549745612973293185989975394307678926'); +INSERT INTO num_exp_add VALUES (7,7,'-1637869080143691484'); +INSERT INTO num_exp_sub VALUES (7,7,'0'); +INSERT INTO num_exp_mul VALUES (7,7,'670653780922685519356619170643530564'); +INSERT INTO num_exp_div VALUES (7,7,'1.000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000'); +INSERT INTO num_exp_add VALUES (7,8,'-818934531574859518.35936275646834493832011429282408849567717761204690035294074716714939441961175772404289860039233415598996234758590850206505669201200'); +INSERT INTO num_exp_sub VALUES (7,8,'-818934548568831965.64063724353165506167988570717591150432282238795309964705925283285060558038824227595710139960766584401003765241409149793494330798800'); +INSERT INTO num_exp_mul VALUES (7,8,'-6958475505053954666339703437.48985528725312694198056665033448258303533387675711770743843194274181580881296671866212320171337132096489224277825857521033238709600'); +INSERT INTO num_exp_div VALUES (7,8,'-96379412.478435590945480884955616049873645089637121682284625533034225619945532704111492738646389632607594293500930307222576571876059094206480673293295865214240456906965855425738072430281475736130342229749511650392658808510082775031098547507966544723255869156056349218776847523349173551313282283869146710349521487706884633419341568648959204688757523312579312713453540395840470692533267158388401676533369105590789036132185107859069994833345453200014884023709597817280132465224778002071890368479648934317322270613208789859930618055792958996389145963056607200020526949699302565905917600478429628844015684879886549766473809801710003649193772354147104446894109928903223843036925147624639466770660174828940577089095480826473544099693433597812637069287644606693066736302793687011165899362920686114156254982709172925265118077531'); +INSERT INTO num_exp_add VALUES (7,9,'-818934540016982261.65314972994491977243776717915257186979728396159058352649559139156429817562698954531329940720620096519975256547379603654362598494779213610069399116912987384006656023443527501447464682173445385303315267086044455246361273561294141518329233754041352632499787199926225490924591851865949646448441825186059741089695009429827829188117479084665641367'); +INSERT INTO num_exp_sub VALUES (7,9,'-818934540126709222.34685027005508022756223282084742813020271603840941647350440860843570182437301045468670059279379903480024743452620396345637401505220786389930600883087012615993343976556472498552535317826554614696684732913955544753638726438705858481670766245958647367500212800073774509075408148134050353551558174813940258910304990570172170811882520915334358633'); +INSERT INTO num_exp_mul VALUES (7,9,'-44929599044588573810654775.83678007633232843418115790847152455559258007804727916986432256198687661496804050903769496933400455947645400628259699874770581538122521805603947464462448454681701547899144129061961394870320463199545502030106801911915987309444301341575451240764927967432593181449618816978119423290767783843864768557371257918447461479570164065303599994081990686'); +INSERT INTO num_exp_div VALUES (7,9,'-14926769772.797708334489652004325241753714626257641081061212878627972973992233480868793527325656854681817156284203427388055525855608883067129036717726368707982450450575794623567027457808927082390474261155500697096284790656757163047499531247323702909360444831707029353441147768321257650234732286165724178549576948957405037843360446785505536809409054071975214796532504678683693402401018726571884721963641317944453797513145055081061680091585467186975354801535734149952115333241283186621720677488342266420359417174224757781125498130120775969091933838082305123652811689513300403051544682523761263183781206840940347226802620226164265210810994106136738030959199259066517106713585343004140573604437146025585149934286364795122716971496775012412420105368351774715982565252533025207453326002101655121126631180162560463548157187175671'); +INSERT INTO num_exp_add VALUES (8,0,'8496986223.64063724353165506167988570717591150432282238795309964705925283285060558038824227595710139960766584401003765241409149793494330798800'); +INSERT INTO num_exp_sub VALUES (8,0,'8496986223.64063724353165506167988570717591150432282238795309964705925283285060558038824227595710139960766584401003765241409149793494330798800'); +INSERT INTO num_exp_mul VALUES (8,0,'0'); +INSERT INTO num_exp_div VALUES (8,0,'NaN'); +INSERT INTO num_exp_add VALUES (8,1,'8497071467.03603749330791582407836434318377133169438097066269854720538319012928851657498035372443556191720308219530866834905045144302106406146277892835514324320448949370623441059033804864158715021903312693889518990256881059434042443507529601095150710777634743301398926463888783847290873199395304998050753365215426971278237920063435565949203678024225270616295573678510929020831006146661747271783837653203039829647102027431761129518881525935216608429897041525858540380754759125150233053469999022855035'); +INSERT INTO num_exp_sub VALUES (8,1,'8496900980.24523699375539429928140707116805167695126380524350074691312247557192264420150419818976723729812860582476663647913254442686555191453722107164485675679551050629376558940966195135841284978096687306110481009743118940565957556492470398904849289222365256698601073536111216152709126800604695001949246634784573028721762079936564434050796321975774729383704426321489070979168993853338252728216162346796960170352897972568238870481118474064783391570102958474141459619245240874849766946530000977144965'); +INSERT INTO num_exp_mul VALUES (8,1,'724311956372274.0135050255361637906710330203036651743488213007179039756514944640108625580172737414192938789413338554327986697518463087452612658955180411327002900979574347739956600177846996063741787205122007268468674386396156638261992679442768654367111433834151087792255469957061758837789341439211010331332174981459471333376067541234901538285101103690622656631026001337239036711179989456674399137008584021283568040818388709554256523118702728176420022080138548890713013682480239784198421500241995499841675772793497485550923152267616622892846304530712344886979674416990935007952941652591352603797627920865960622077762568060903908151958000'); +INSERT INTO num_exp_div VALUES (8,1,'99679.115123747637190903598543851248555278745675862923884476564848911494649941770503156134872464666625927195645517181131678518619856156844072856993813601495176097972982587061507650426363887871820112714099226501603733968262566093655417466145183587899155614471697804006772915054739361437054029183182533671508695646413074668188590846200362324428338974890534273352188276373478524543505805545661569395314989170104140776362043880099775594658817242753124957385625811310332354760117110779649164022618274859298031549851269619167173746259018497289174255201452265070501056913033329291819570027877856677145579673495987354805150868813877928857472561883332547900866904764950837506993759536410161752469488392566682723027340638271076406246129989851281210810196699482980833204884400423019400653089825859983062096326294783573417554749'); +INSERT INTO num_exp_add VALUES (8,2,'-994877526002806872754333651763017.40289299098701084219066388457144979069028441485513418625082363021182982914675513019536443438529749838106171095037135009526312783302868247857009494139535009572740621288230740389545481395'); +INSERT INTO num_exp_sub VALUES (8,2,'994877526002806872754350645735464.68416747805032096555043529892327279933592919076133348036932929591304098992323968210956723360062918640113701577855434596514974380902868247857009494139535009572740621288230740389545481395'); +INSERT INTO num_exp_mul VALUES (8,2,'-8453460632655529853033389979024265783461224.3195241893307807116624750282852146303290708492834695194274289713076935297734670940696121761483641291930931061232942894577813178566088927221374036301485916497770984757492912292002695944367308880163698595015497307574177176409203214324418237020500352652934909632442547242092296504047310806151851207329042221920888326000'); +INSERT INTO num_exp_div VALUES (8,2,'-.000000000000000000000008540735921314463871578184793632135730756619558669911183806487803411545406462244216408739432325839683804021466133071768612386706692296158696852363349481716813410857655324486448455846562309041306880675446880859847445987588059144788756984750993583865748280824370754934966494724951583311563735533173023858438364336214213295786266815116844775733072416507474834701984381586060478606371028156925222726225495235702395502085206072985373035972506738983640539009567237336002073370431753469632428303255926718930619221521257726366850472572830063284204851204189447233044832163423057501488364913539948261528280564870049935369825245920984413480757133585498984374354957754078525161296201228031555280486615145365039415418251448980923331334883673792135893857917681235883506783408111446970710546686739582471'); +INSERT INTO num_exp_add VALUES (8,3,'-60302029489319384367663884399588771256.5916339968771732477072012126949734214868901845505193155307646111690097978112797961939995859130827784737422228762767014427842766445950111427710563453217985216966456785944859989497422927661683538629473170704026975786513125842675604577233871570629808699803522400038975396500769162308448069085909755023233588510630417065084295051270219462289785473643946404281422516357503746700705970360169619852905053433235726497292406142332833'); +INSERT INTO num_exp_sub VALUES (8,3,'60302029489319384367663884416582743703.8729084839404833710669726270467964301325349604567186096492702768702209585877643481082023851284144664938175277044596973126708926205950111427710563453217985216966456785944859989497422927661683538629473170704026975786513125842675604577233871570629808699803522400038975396500769162308448069085909755023233588510630417065084295051270219462289785473643946404281422516357503746700705970360169619852905053433235726497292406142332833'); +INSERT INTO num_exp_mul VALUES (8,3,'-512385513828318260570283740065493064477880918352.732624553690077857674083796435724202494963885926573907185100543184828131859183999195040110586155435203949963570735841632689374488877298209082579317039061893012560130258753218955057387206477423088065663401594359617882154814262843273526859406265633827109554791772242178864873774889091687515990672487380368975556580539271333144212685871370972163560839446696514092637412587953506052848750866803569213269271165856310101244342151576488190595936869490659700946174362872797854591188391982770203203644172999264143929484089237665313698600170041324566984832357000400'); +INSERT INTO num_exp_div VALUES (8,3,'-.000000000000000000000000000140907135225782279761112255989433531718277338909398600029580768021365259747075253760824424092983497958717844671162530550507041138147836569244869107757945370200122955794509365120853536859837243314494576053441804831018954867623755033888264275704547752628348151132333655667171970175829826792355986148522268067032057293494927558322394395160508723637192234110428953945018965078022622950949911124494740703606109543716688008516750321047603009424529696862953094999450658951089435460411028678817795100630449046993274191915359520936265372754315076684798942557329584282177053819106884196674660057281227248874819417305259132106690385871316407455034281900110779740008476645291647094776093567400422266906817555937149628005629880142615126571231411138926043531449659320501743591992888328328980526602'); +INSERT INTO num_exp_add VALUES (8,4,'5329378275943671819201468.88995490340795935797824952902333498786202536079000703830146057240651898748760197658486790165425772165585380839129948178510273188565692769739983054442503547211560297249686289665792078548480268091496050883021187158502798880896590227542729659940394038802461081290690995869705131152889309663639310553909874081663091069118126221594338242710530718836025225507189149221049928936955230868771875644038572888630664890573507822342998964954667474300944699078658989010257103569231493090050659723450626338923049035040974032671138430612839043269997482582763267536489504794826476836323549796385028155416935072959933315468068930689064483178204550825728947252440604703474049780550458442808479096492346910001692358508618202898514895453589357'); +INSERT INTO num_exp_sub VALUES (8,4,'-5329378275943654825229021.60868041634464923461847811467151197921638058488380774418295490670530782671111742467066510243892603363577850356311648591521611590965692769739983054442503547211560297249686289665792078548480268091496050883021187158502798880896590227542729659940394038802461081290690995869705131152889309663639310553909874081663091069118126221594338242710530718836025225507189149221049928936955230868771875644038572888630664890573507822342998964954667474300944699078658989010257103569231493090050659723450626338923049035040974032671138430612839043269997482582763267536489504794826476836323549796385028155416935072959933315468068930689064483178204550825728947252440604703474049780550458442808479096492346910001692358508618202898514895453589357'); +INSERT INTO num_exp_mul VALUES (8,4,'45283653791262997781451381354094822.762732909505051438036873220502792213670540454778361182993875916509061144859281577740137081988678361247725064336120451090222456518107029158304937620179032477664627949959143233370320432203497828243297406462513350790251761540074946469824444452248386782451723637769289822576372357189700319768797708375563651655860093365309717823602754924352327588945034832436331911584742966378275504545736896430718939807674966738116698454215555860047859161126694019895490767779791933882712567492115664113775047192011252893773389940988533801360010782816196288710063568554147458866942816721046004257953642508395867837127678980002737669139369781058046396738606563716339660654364541530532834806205571191828994250708412638796240377704994928921528330863683630622922959130920715261879547446054261914770022377059156125037157979236658010950'); +INSERT INTO num_exp_div VALUES (8,4,'.000000000000001594367257057971052149628499448029056279649281098852958322409409919964709324200796473211884339143791758566019217634542932882694487712398244322522748736692741288668885362384266615527166964187404128216235057387796054457728789109537338988453837993084016408244895452291151218602815057669592284587317035387004942691671916981967449109983992675125005085762403043329820872839739877674121174083273716295673230993049263574856197011389828478636779342320299895806297835595427859271617831720398457416685435560152182883615601663820189195644140652141180949257192740185075408019971747810015931542757445763460947106918998459997631117642552273815713467150465548031203738878873114842844016176922502916339025283749846225376341878386377192605865913018132981323065698049618379727531925408677611856682983907951667054819'); +INSERT INTO num_exp_add VALUES (8,5,'7844230593.20607652525116672615394735666141304947992676684520382624714879797087461877675155217754947572297228288498221620714146356962938009770486619898336249374975146736655090644822719838495585664994425117350974491367099004404995846913641329458537237789584653041949090121498951516476399288513593944575192159570458664608461677113504914551578443229008454218964701550932948083369656042643364608405637360180021322967144409944099438498649645368196191999692949583952927486593144959284443545794934667002661774373364219852712996869245745722896071593910890197478196462961042627387162830776094709087748993678069776845437889735782063'); +INSERT INTO num_exp_sub VALUES (8,5,'9149741854.07519796181214339720582405769040995916571800906099546787135686773033654199973299973665332349235940513509308862104153230025723587829513380101663750625024853263344909355177280161504414335005574882649025508632900995595004153086358670541462762210415346958050909878501048483523600711486406055424807840429541335391538322886495085448421556770991545781035298449067051916630343957356635391594362639819978677032855590055900561501350354631803808000307050416047072513406855040715556454205065332997338225626635780147287003130754254277103928406089109802521803537038957372612837169223905290912251006321930223154562110264217937'); +INSERT INTO num_exp_mul VALUES (8,5,'-5546455599206321494.0676583421119904300307105296377723816472192007866147764761501865875232824814135783697976183493106885436876081315217834621720906478074798596116645640251460842350553806256223963023430631066024389364515688765194373161385579258482225808660340732705687558150699172147896486727530192499184101617379930846663835628510376484675411350654979679181852179924386290069790336316958202582966248703889464308649631486542724072047294216362186036638115240070658004553260251510288423749333873893917690832829128021808383128393431810674177390352413548658782609064839524756041501835115152819802758773711821322162752064589750295542985780512921839490040396053737870038534216948323935020460307350020911362024271167085905714873548388570602799432705061561572854498075600'); +INSERT INTO num_exp_div VALUES (8,5,'-13.017101389051085341042057308965769356145255575582875626848796382322826525772114256699384710400140437710569924703769685567402446691691210934185000959063158239023412379691360587119206695513775971704926722817528818197919265145207032750407924774510773427697188520818450702875142190949766251178733262143962213111236591970766836685919581025629742334704854852196126735685421250263035895756028805974153787560164935038227108975229771590754808331856162035119882347418116049174638416621093907738608991987582465865527947015457540650512339263071898410531735438556948115098562123055444965056347091625748703503220861221718449714020622377233272042277814766996198081939221253025243417993701684007826177845003391944496774674489538520354606358872276671998045196738090133576377830721671972381371985771591052597345572374064920279182'); +INSERT INTO num_exp_add VALUES (8,6,'8496986223.68757431572672621257436634648368772473081887846765003074279255322456188404621827857612554765910678041003765241409149793494330798800'); +INSERT INTO num_exp_sub VALUES (8,6,'8496986223.59370017133658391078540506786813528391482589743854926337571311247664927673026627333807725155622490761003765241409149793494330798800'); +INSERT INTO num_exp_mul VALUES (8,6,'398823655.819545574205652791249227663407026876411660299394659390409794761643751582473390322547798567169668246138880832642141417531427935520467563318363116897177899262525720710134129529640376020947774470933902793259531840625444267816319963200'); +INSERT INTO num_exp_div VALUES (8,6,'181029319177.110996740664566780784253502559986936959009611748146099327460471609593148344991059106574612143724330935988823134137686051475120980257829276671900076859337187540608483895641504622910361858962883971613675309676443079313179200981488761707281247447120551917205792352229666049191991270809865110506639390610910481490688182068719005593641339338678014189749279508731647492051879768743158839680867283217578754666643688259810863605002821607490100820241093473083445658378988069593782353275713240897038366242558466047071334385431080003439842348547427066389352198560236731403235927478177780757802759046212921140424771887928786549573201311120885052685761195784207710933764480136690216943336587118385525047554334029388869436622866247240903231799829259264158812528305210833683370536416861544931420820452512390255774498188962903'); +INSERT INTO num_exp_add VALUES (8,7,'-818934531574859518.35936275646834493832011429282408849567717761204690035294074716714939441961175772404289860039233415598996234758590850206505669201200'); +INSERT INTO num_exp_sub VALUES (8,7,'818934548568831965.64063724353165506167988570717591150432282238795309964705925283285060558038824227595710139960766584401003765241409149793494330798800'); +INSERT INTO num_exp_mul VALUES (8,7,'-6958475505053954666339703437.48985528725312694198056665033448258303533387675711770743843194274181580881296671866212320171337132096489224277825857521033238709600'); +INSERT INTO num_exp_div VALUES (8,7,'-.000000010375659845651632013446652385870617923988120764298690164486716047614260682259722116360931978511176121353975789418625836899338225571166376573732227571704071000348895791547943896682585450808398324252224265156214259224488248639550967292466343168350213394398101712526534464002532408445204630441167137710565437434313424987517531891145368203998329086865151248833625645567863740298397742783405267970015165358620026813812552194344790169289440822038223606218360105618852154152168496637886434061050281055613760360200323363465925493033734895631921307644481639236601187225135325401868178006133838932915485272554505684060229409404902185944047523033315868230944723282246159741659387362889777495094736963530708159604929268812778894177095572578862150793098548829744006499229853198046828954650334595737117597239208825268'); +INSERT INTO num_exp_add VALUES (8,8,'16993972447.28127448706331012335977141435182300864564477590619929411850566570121116077648455191420279921533168802007530482818299586988661597600'); +INSERT INTO num_exp_sub VALUES (8,8,'0'); +INSERT INTO num_exp_mul VALUES (8,8,'72198774884738777393.8687539247642452953425155400068591498151280875559609979248583367700231031634872342122563819478919600402159024059794279536786611373504966204744811722007869415559012475160471227957857756325962941799428857291371597146319816910515366298862558849452235442246081440000'); +INSERT INTO num_exp_div VALUES (8,8,'1.000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000'); +INSERT INTO num_exp_add VALUES (8,9,'8551849703.98748751358673528924211852802333963452553842636251612056366144128630740476125273064380199240146487881028508694029546139131732304020786389930600883087012615993343976556472498552535317826554614696684732913955544753638726438705858481670766245958647367500212800073774509075408148134050353551558174813940258910304990570172170811882520915334358633'); +INSERT INTO num_exp_sub VALUES (8,9,'8442122743.29378697347657483411765288632848337412010634954368317355484422441490375601523182127040080681386680920979021788788753447856929293579213610069399116912987384006656023443527501447464682173445385303315267086044455246361273561294141518329233754041352632499787199926225490924591851865949646448441825186059741089695009429827829188117479084665641367'); +INSERT INTO num_exp_mul VALUES (8,9,'466174236688165594.9218054325256670866060556227711696100465581464881295978997280335378678072434776702952026828137140986670189756965420183565968027969700090735690246176791371115610886533930223141650377886909408268207750238603105232560663571044993507074695683027062426288270199495225881785499139012931143826099668999261931834700467395442768201666740663642498098541516326470052372008385656719236306238735524802875519713512894448940917708118676095378518264553310312628830009314653641136566040400'); +INSERT INTO num_exp_div VALUES (8,9,'154.875085756903716715488911525453064308758123952566428258639786597308109810869086867746263482721081985848551254298524280231489145092826397833394044637104667137816928932471315095067524966582810436282901424423215992139000153713476369887383242289102867530775908269805285313842050961754114751975054515055089553180717444020378611767296609130477264722612784088270193199394531972594028420402254831778715196248487757266330454269044609134602570688339750190391651801546906342796660819535014295618246236706572780627362908121159003488810140236665846928586992082180006454824311789091323774002510945263351862712964422865623934112293184149374573706760114682326698881257123280119140924775171374360283137569618025005229268057970275164869735173660958715166148344076027212231446680947914004346760896298312286730627916684448923824769'); +INSERT INTO num_exp_add VALUES (9,0,'54863480.34685027005508022756223282084742813020271603840941647350440860843570182437301045468670059279379903480024743452620396345637401505220786389930600883087012615993343976556472498552535317826554614696684732913955544753638726438705858481670766245958647367500212800073774509075408148134050353551558174813940258910304990570172170811882520915334358633'); +INSERT INTO num_exp_sub VALUES (9,0,'54863480.34685027005508022756223282084742813020271603840941647350440860843570182437301045468670059279379903480024743452620396345637401505220786389930600883087012615993343976556472498552535317826554614696684732913955544753638726438705858481670766245958647367500212800073774509075408148134050353551558174813940258910304990570172170811882520915334358633'); +INSERT INTO num_exp_mul VALUES (9,0,'0'); +INSERT INTO num_exp_div VALUES (9,0,'NaN'); +INSERT INTO num_exp_add VALUES (9,1,'54948723.74225051983134098996071145685528795757427462111901537365053896571438476055974853245403475510333627298551845046116291696445177112567064282766115207407461565363967417615506303416694032848457927390574251904212425813072768882213388082765916956736282110801611726537663292922699021333445658549608928179155685881583228490235606377831724593358583903616295573678510929020831006146661747271783837653203039829647102027431761129518881525935216608429897041525858540380754759125150233053469999022855035'); +INSERT INTO num_exp_sub VALUES (9,1,'54778236.95145002027881946516375418483956830283115745569981757335827825115701888818627237691936643048426179661497641859124500994829625897874508497095086558766563666622720535497438693688376602804651302002795213923698663694204683995198328880575615535181012624198813873609885725228117274934655048553507421448724831939026752650108735245933317237310133362383704426321489070979168993853338252728216162346796960170352897972568238870481118474064783391570102958474141459619245240874849766946530000977144965'); +INSERT INTO num_exp_mul VALUES (9,1,'4676749348240.390309875431213992853550297086049749814750492488995108783145961719774217441193547534210468967573344456866203963659951312519988497979489304488948342258375915152429008993288817366720647491166024151209542534474867042837694499222928509320280684557676243780452100132238968233413333851595648146954975713386711764268506890884764704949969602122157394714663532141060559896359465918874990769222345665160127552795532197771168442486088776803398878354288847069602460071745966589164282641033852314335279121191855487126430176047553895892632834940595958394834437871886013513058514896870683979585091413977173250824451205330441299000850618134248917380244749589254309567551846327349592529960432446947239714236828401206843011440433362544797025114476612133622499094287321570559088587999417440664282418005102546343020409520421747216'); +INSERT INTO num_exp_div VALUES (9,1,'643.609749344751131516972294140174556703217311736700045690413622699888869645595256683013323517984528456698303984909359393772036036540901870537096836621035845014213031549051156299974682317824766457362427063305495772666640279328909129870227828460705733995380145417663304348663705694070309475835826101153850359826502235923289787750107778906593010060115662191620280031872002110849782776325630424918493602259707267214006217268630948545349980430128422952869610116216278256812581821942763705098526140427280008360043829906543029486315209818099697988089748683904695870401517598840185535891464842870210715421728852789815860153472208176465166954851895457846723102438114697692610933532992841803219018495137378534010155991355251803548866919409031477821173935696065078362044927492034445482457329200246282082707380974745411383781'); +INSERT INTO num_exp_add VALUES (9,2,'-994877526002806872754342093885760.69667996446358567630831677089993316481039076439881735980566785462673358516198695146576524119916430759085192883825888457383242076882081857926408611052522393579396644731758241837010163568445385303315267086044455246361273561294141518329233754041352632499787199926225490924591851865949646448441825186059741089695009429827829188117479084665641367'); +INSERT INTO num_exp_sub VALUES (9,2,'994877526002806872754342203612721.39038050457374613143278241259478942521582284121765030681448507149813723390800786083916642678676237719134679789066681148658045087323654637787610377226547625566084597844703238942080799221554614696684732913955544753638726438705858481670766245958647367500212800073774509075408148134050353551558174813940258910304990570172170811882520915334358633'); +INSERT INTO num_exp_mul VALUES (9,2,'-54582443595378013373024060492546032003692.4875677735896411267274323339692558458420972958075073392126734000341372096298914875892612108329218081214550050039133117695428196702128258481789017059073444323729583900855712795086447886053552786449313809589992185978097430132940882612817775035217244553616977182049775786664446683332098226841743818600819221587510039430478859412452506872131851471967577741190323481953867845129745440745526578327709351120432530702446916035797432129052518980799424635406993848916727957825620638983706180841278402925286540375225365057191075559133035'); +INSERT INTO num_exp_div VALUES (9,2,'-.000000000000000000000000055145964114074763360265614481666934002579974728749248345352023099030383962250681574081874554842623852433135871821620640200582985140388676650602814646133317791813938390695683843848260103199745295436998313216878337673674660966362155480524935736646623766057029148471463569162153009963312016563281545776175277904913263614668092319707343286073000287493274965714031678784835459999763925833141049057636632430975424499618419962303087175237320046300285962065818926167792812657620724550768858763098967149546312995222223400007044549870620849992226072041407997925405957501929449911416474388622107825120486594723448780503829317691081601820425151593487431389373265285594626753418140874747955925763163132984655078996173911578832035721963554569605730262976354029623260224710106409129114204296314733036'); +INSERT INTO num_exp_add VALUES (9,3,'-60302029489319384367663884408030893999.8854209703537480818248540990234567956069965340942024890856088355839135538265116174644003927269495876835324407641642359213535695803871472434650475144516723617632059130297610134243891145006222068960999879308472500422640481972089756410157246974765071949782242392661524488959954348903412713930092273629207697480131360047867213863018127928853922173643946404281422516357503746700705970360169619852905053433235726497292406142332833'); +INSERT INTO num_exp_sub VALUES (9,3,'60302029489319384367663884408140620960.5791215104639085369493197407183130560124286109130354360944260524553172025725325268378015783145476572840273098165721628341015996848028750420770651761919246816300854441592109844750954710317145008297946462099581451150385769713261452744310496166494545449824802407416426304041583975713483424241727236417259479541129474082301376239522310995725648773643946404281422516357503746700705970360169619852905053433235726497292406142332833'); +INSERT INTO num_exp_mul VALUES (9,3,'-3308379209762459471107480259839508279070920437.883503980178028214343751083865562028455061662673132221930429904398963590401793045470444301883103141901787466923883803951815572606105617157736442670792467625964359169270739534412932791178258858918086886061702512427989129732248215348301444245772127142869263635282888226326427510486246184233225114523636171202034558843515894542952126988613018789833835507734620046994907453602573865012044120483116345444810078666601100257620969379968264504287700045822481492526688635364586344704730579892342786173395802035361824932075736340405960099542224953439044947229246847140957298841482874444906129049023002897135347878048572628834749795298712449864571996898774444932083319581439741625832405434317985988163261591679157437224404970927012111196724239860528859217322132733404472897289'); +INSERT INTO num_exp_div VALUES (9,3,'-.000000000000000000000000000000909811507365065002714756487495210579371808512079908127938523896001746219475805196061435010714649189975968123072269549018826343830061696154665503565341929634172463095299662727352635590451263034658630449260378893723785917860125051787451512267088404686342938118993621396641623525252649748977992770709930435013456855344203854749977414354164157192885125263071636468941596567220391082793700307461350484216679632552883058303710297475827456761138832914743429330069022439380297715971317819244718196187172770061156794130040674050533617155253444764036426045091327368023602807193742585178432544430741520636125146531502042579276206322507516332917325631822606079220413965396706334639331097621824106950192993127113903265025719013680733760540930122186345919977470628988674677630636632053583144327'); +INSERT INTO num_exp_add VALUES (9,4,'5329378275943663377078725.59616792993138452386059664269485161374191901124632386474661634799161523147237015531446709484039091244606359050341194730653343894986479159670583937529516163204904273806158788218327396375034882788180783796976731912141525319602448709213495905899041406302673881364465504945113279286939663215197485367850132991968081639290297033476859158044889351836025225507189149221049928936955230868771875644038572888630664890573507822342998964954667474300944699078658989010257103569231493090050659723450626338923049035040974032671138430612839043269997482582763267536489504794826476836323549796385028155416935072959933315468068930689064483178204550825728947252440604703474049780550458442808479096492346910001692358508618202898514895453589357'); +INSERT INTO num_exp_sub VALUES (9,4,'-5329378275943663267351764.90246738982122406873613100099999535333648693442749091773779913112021158272634924594106590925279284284556872145100402039378540884544906379809382171355490931218216320693213791113256760721925653394811317969065642404864072442190731745871963413981746671302248281216916486794296983018838956112081135739969615171358100498945955409711817327376172085836025225507189149221049928936955230868771875644038572888630664890573507822342998964954667474300944699078658989010257103569231493090050659723450626338923049035040974032671138430612839043269997482582763267536489504794826476836323549796385028155416935072959933315468068930689064483178204550825728947252440604703474049780550458442808479096492346910001692358508618202898514895453589357'); +INSERT INTO num_exp_mul VALUES (9,4,'292388240303165948041827159734686.255558469787242316676287235194652580157149226950109397295920730296960145548003120827363226435916209781396711693581454960342091452830648929118261388933297036933167543189308061917640517578583521401267417187854611829815212778183983326568586118831109538377828156118900313778053576483381085207892754728937946691892849474364477434665960112125254104966566712906532318984871145605839506991591027939136026602051635433295687547552796828217859648186757719639965988287173297286034098497871707197092627676226053609131138590878743560287292934815277894463305001278326023708395571840850120055316276256138004565442099731931051413153564744766098053176049414330146267604802971221161572130161432525297614616942172815141372973870720928125699420370428856022295499447755488148545048400795053604349570217878099721865670458104653570360'); +INSERT INTO num_exp_div VALUES (9,4,'.000000000000000010294536718194523982241053267404812827031741197656209184880073175960433631103885281961037127283726462743623757855378209281373475473018922090781553213750339001555832360656399849031527008437303091226051008068950896796359518673740801770866360774945096397034708173365378527676779736929035450380795854046109380272505550244458858231227568118355064007614608452292270378691774826689216790090661497154742954386244856792006376222923780801296832612827123778915598893970651480451509706836620045721191411824060983487064555397842027454385628620582036592315345973096405447742002746762099231557054678593446667904250189208490698468539396733604833688133512716508825505666644390119877423938820483653319376926639295680552194966870285838815705038244628263602997511842285889300557188773128635554621378148419364876651'); +INSERT INTO num_exp_add VALUES (9,5,'-597892150.08771044822540810796370552966707032464017958269847934730769542644402913723848026909285133109089452632480800168074607090893991283808726990171062867538012237270000932798704781608969096508450960185964292594677356241956277714380500188870696516251767979457838109804726539408115452577436052503866633026489282425086547752714324273565900641436632912781035298449067051916630343957356635391594362639819978677032855590055900561501350354631803808000307050416047072513406855040715556454205065332997338225626635780147287003130754254277103928406089109802521803537038957372612837169223905290912251006321930223154562110264217937'); +INSERT INTO num_exp_sub VALUES (9,5,'707619110.78141098833556856308817117136192658504561165951731229431651264331543278598450117846625251667849259592530287073315399782168794294250299770032264633712037469256688885911649778714039732161560189579333758422588445749233730591792217152212229008169062714458263709952275557558931748845536759606982982654369800245696528893058665897330942472105350178781035298449067051916630343957356635391594362639819978677032855590055900561501350354631803808000307050416047072513406855040715556454205065332997338225626635780147287003130754254277103928406089109802521803537038957372612837169223905290912251006321930223154562110264217937'); +INSERT INTO num_exp_mul VALUES (9,5,'-35812445701642379.972368737320206275515144213236752803936806738624588812089615098329765811617509505790110909629109400553415312470540217508070421816878544125783329593128638405659896184248784794258084116406472768709113030915308410565617764394827427154923321461158387012978726512246146545834669665093228316853342805604075936530371665576147966721599968786161939347726656168798065647411457701453987215491345496003650288850096338695703984042549594979897253521041581573388369367579323607093487743440894765114619634001789457486407909224339065748496715380572175183589195611952939575073075140094901024063428239223964510824958346570603142906309198033196987949067156046076497974760641964978711558209708743776024313916111738542765749928287600981397080809041007714387564206594515733287925008053261840295560398311905155157989225181164097547541'); +INSERT INTO num_exp_div VALUES (9,5,'-.084049034261605466896663277055600903951276881294745183935726262038673990196778002490449355450474227878560465916800470848046625257516764244432096856845087412397406701521972651300484716852035267197801389708234913163750232707469240634303111868882057393120649919262424619226282082184091177505826009374043368623853156698509808569378758387708910629731005691079770517679511879694426434724918004419953301426679939010592502325130576915399009756468717124460489039474155719834555522581553817856854607844133431854471292027873672356863673617090151801474016666978499651970627896504709551656249007718965259502928591648533670568214972768900993459927860068104745163979267716597907297073374689384723943955361288974065531322408839914599555769945298758102515352082822617428033648130099822033393662643586331479103933840387663729387'); +INSERT INTO num_exp_add VALUES (9,6,'54863480.39378734225015137845671346015520435061071252892396685718794832880965812803098645730572474084523997120024743452620396345637401505220786389930600883087012615993343976556472498552535317826554614696684732913955544753638726438705858481670766245958647367500212800073774509075408148134050353551558174813940258910304990570172170811882520915334358633'); +INSERT INTO num_exp_sub VALUES (9,6,'54863480.29991319786000907666775218153965190979471954789486608982086888806174552071503445206767644474235809840024743452620396345637401505220786389930600883087012615993343976556472498552535317826554614696684732913955544753638726438705858481670766245958647367500212800073774509075408148134050353551558174813940258910304990570172170811882520915334358633'); +INSERT INTO num_exp_mul VALUES (9,6,'2575131.137912978352131546639620215541477987701194164886305951830806120142596646541302305984776928560906754259789485960991272272782091464270104432109904222200473616116525297615725803495463468272171161659654385929185160689572943852767523792651123455283534072794326647404332228203001469884016996499768656263775233430922446983838511590562929268821678518640501686017030536100955531423152839988008496919169395159653034847677470665418765966542111749439412'); +INSERT INTO num_exp_div VALUES (9,6,'1168873084.346566233232746391559830634361431940000227460271861554316197556566224118756340501278103405856646766537018954185964066240457859194626558143313125824412559635129130086906976028635444060218797992547370132082916380788496584864016645155338102476357490305222392452114945853620686975383081427840791892729407194179236897452655907829255937027286698570784397487382242990326347080472574546312522326038419753951437799831430690304084087684303035538181812523230890783372773953961677974396907303758903934808035747944477277528267001070234880092255363221274303820343225415479126819937070570562654065195009839593938440374000473302075568746771126391307584779249330981594640387657042725725493800876630516005713789705652827210295338592985225924959199657729900181287069808881130884115897407246324220524401243575641227725030779990490'); +INSERT INTO num_exp_add VALUES (9,7,'-818934540016982261.65314972994491977243776717915257186979728396159058352649559139156429817562698954531329940720620096519975256547379603654362598494779213610069399116912987384006656023443527501447464682173445385303315267086044455246361273561294141518329233754041352632499787199926225490924591851865949646448441825186059741089695009429827829188117479084665641367'); +INSERT INTO num_exp_sub VALUES (9,7,'818934540126709222.34685027005508022756223282084742813020271603840941647350440860843570182437301045468670059279379903480024743452620396345637401505220786389930600883087012615993343976556472498552535317826554614696684732913955544753638726438705858481670766245958647367500212800073774509075408148134050353551558174813940258910304990570172170811882520915334358633'); +INSERT INTO num_exp_mul VALUES (9,7,'-44929599044588573810654775.83678007633232843418115790847152455559258007804727916986432256198687661496804050903769496933400455947645400628259699874770581538122521805603947464462448454681701547899144129061961394870320463199545502030106801911915987309444301341575451240764927967432593181449618816978119423290767783843864768557371257918447461479570164065303599994081990686'); +INSERT INTO num_exp_div VALUES (9,7,'-.000000000066993731076524206362744068866774567920404984046399050881532938231826344009126898802592302273719505485084766150904380671495128604515800845609713368334606489445184535043833069145643553083555507533900955661105251251918425885537513359541698046533092111969478225528665278023069818968531644884466229545497943710817187632203193468836772459599856856811131193744272314519908999458320275710240994009061040198159739169960258978462113813370513611735006229733329565083659159456172425715216475781507996483885669437855000029758892126410922067202159414570164537031153818197618428471046051340835826664787585016361564969663413176434498159140395476980277574789931364078570781760777773379636490084338326576889857824344578398580499610233575273027387501809967324874264742269453420400624883982643066864175851881870402856698'); +INSERT INTO num_exp_add VALUES (9,8,'8551849703.98748751358673528924211852802333963452553842636251612056366144128630740476125273064380199240146487881028508694029546139131732304020786389930600883087012615993343976556472498552535317826554614696684732913955544753638726438705858481670766245958647367500212800073774509075408148134050353551558174813940258910304990570172170811882520915334358633'); +INSERT INTO num_exp_sub VALUES (9,8,'-8442122743.29378697347657483411765288632848337412010634954368317355484422441490375601523182127040080681386680920979021788788753447856929293579213610069399116912987384006656023443527501447464682173445385303315267086044455246361273561294141518329233754041352632499787199926225490924591851865949646448441825186059741089695009429827829188117479084665641367'); +INSERT INTO num_exp_mul VALUES (9,8,'466174236688165594.9218054325256670866060556227711696100465581464881295978997280335378678072434776702952026828137140986670189756965420183565968027969700090735690246176791371115610886533930223141650377886909408268207750238603105232560663571044993507074695683027062426288270199495225881785499139012931143826099668999261931834700467395442768201666740663642498098541516326470052372008385656719236306238735524802875519713512894448940917708118676095378518264553310312628830009314653641136566040400'); +INSERT INTO num_exp_div VALUES (9,8,'.006456816440893715330247418029019114736889626790871612141686117271826070935285769018710680035004320626745647926106882508048159628931624522666638442625219959259156539178378186912871506893482633695438850964052285542425753626455183282159259999492971992739484319464700978750304962671213318202670228197968646486740006148091321740497272644910882302412140576608739962605210964504469426861972705740810533465451230811358870068391007718532021526225893542801514255726272411690175555142385382688220121052891017808391607717500701760375927811435030512071347521837090721052128992926357375527600337655573639413811262412492632491693179011503973930804928749370652038245414768103001067902012962988384812280453070895781287237746786414435546976395632454474312533482077585837153357017362048554313154580576238549196250793055676215164'); +INSERT INTO num_exp_add VALUES (9,9,'109726960.69370054011016045512446564169485626040543207681883294700881721687140364874602090937340118558759806960049486905240792691274803010441572779861201766174025231986687953112944997105070635653109229393369465827911089507277452877411716963341532491917294735000425600147549018150816296268100707103116349627880517820609981140344341623765041830668717266'); +INSERT INTO num_exp_sub VALUES (9,9,'0'); +INSERT INTO num_exp_mul VALUES (9,9,'3010001475769225.8286280957637941018500905354415197182850820227163907782811814730309044010416886791014702373809932926301368137684091094408663914110947072451332976891128659038142954192986392936981664792370678656287232795203974766040821110221158579481177539669363513848425151485663431478439528936592701070340012569297177488556353760756495238304538439278682066056721729656193616571456456325016960870401748115848423105783116854283646624807603476682295234280408938557209608025246638166902335016025467565869375885610813662767004038102486303756741615124814580306266901273803721191779461890468156043551004644728343579032524687612403663816107770451694666844862368101122025340182510019516924578414085461628689'); +INSERT INTO num_exp_div VALUES (9,9,'1.000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000'); +COMMIT TRANSACTION; +BEGIN TRANSACTION; +INSERT INTO num_exp_sqrt VALUES (0,'0'); +INSERT INTO num_exp_sqrt VALUES (1,'291.964716019333021494947753821238960905461614737525349376826064492714634914263808902604580614735501799528494357560837535773816469841426747889103714048646989532842972129124080559131220979335403729022278994440514872845756198274805589586120535745968205107562348427941379641465378272611453955517402598409789621997041856848783989993820946766177453801729783316269310186191833995557234577548740940419224137195404391193633808203715191863638616433190672511651125299379882126530500870287424768024674231651229908224729856278167033444719242144302972892419034855417126978468296581589282861879645409909873113678361180607775255758820910366926076380306290306477790931129670172989289536405788838857428768869345763784112862591549008321546447442552533919976570125718481191724503352619626562352280522949665158335559389298720990302071'); +INSERT INTO num_exp_sqrt VALUES (2,'31541679188064906.712574384704440356216787857626740375004266523720148374188511622980520374202725176835435173058936870163875556102907654264048353814040480579464700545975346621546520503928314632418705230212623378642743044255181848913683862360044189531298446109955034944189751302497670367665492719604026161836224535961347218522748523360100432275693829501972749859329753224444694962089604095212784768854310289429208671271394086829270986183171968944659703708706544668326267327938226750760690620258967209626420981505237183055363540806281098871221581265173394406715458619627534396065960117454160969749739483126059760636526242783235685190739315590041294766649891987044641492234243404608847939002062827210734973778130441825067858641461599799772535304379732674727995848518807202053316225824685704785148921785964036119338754973714515974054'); +INSERT INTO num_exp_sqrt VALUES (3,'7765438138915239878.949520541017683429203286303188179443533225547096446554008374834292278237558244698868300666061834105683999048386497322007336816482648302911579331582895326423063492240235074387242190187374869842856897538718280497895072291181675294000739548676781615025944675912072664211455701112700937190832332966000160156597821149428032612782336278939437593991008833233156511435294360065004167893309428565243314846456225604669764879344135321428948841659419438769652686215993544390780212859309497190065178705035652106614050448518931820975038314187040226298661787490226917902356569717171481159691409131778764973037046501816919243659681416263730519167614043077472097520207347950292377914586524327206547377189493301153212000966249655331053184913579513686655963686155890934436604123384536027235444923674128269748280097789270784333442'); +INSERT INTO num_exp_sqrt VALUES (4,'2308544622905.016172868282330339228589083058636874526727829838244942341440716909466939214393597311710652963849541394758298277969240038668406494621950956862959196896847352631445328917063551082418729435554972200530109505384839391233286173517804321019323644218483570886304028175359854335870835404627608254205407525763332087823548640923282031978903399118139052814618531713327991857575390136755426466065839913887477577516426991104516201265995293600539957187007068885368699949673989051443005684755994465547159213587471972139403333249259808344536605314911144950465968669770276463111776581675944967401948957460097365849699783091843609965345747287667911324039374314413430490112443463386381631812537639503425989372084906324702158112088898424705684574998783112519152403201231176840068666882123684602080460378627639651465436618032671756'); +INSERT INTO num_exp_sqrt VALUES (5,'25549.082770905117529972076915050747181125832857399138345044265535151111965091602789684342996759657333588444489085160336703294705499665424408218434077722506748278242942379566431768762487954917389137120540138359870652558814224523699917122023018717544160579704907452934297025088008618627873220397030397424422097405152321366495319708580932627092620533785271831833326130796638935296720064431288560292191928489034307645738331451165431755179025359993690642194334018457793169983249853388987495489562746304107188105521296156525984787815685365255240654972150342496329030279439124533240114879332406941960563154881888172285475336782757262639979527682925214971861707635327995621436598536743180180978457735632181738067997521785965451385630326464388080990200265186437768409003553910194212076755448477164192901658547251079126833187'); +INSERT INTO num_exp_sqrt VALUES (6,'.216649653115510782473161631235601739254284877523828136703593069337209747459679979369185882839688430004369697316986054374456779366220242645866798278985273820408495361607183119980716020227424205519727777568954933592987351750339481522149106749713967143685591960510946511796062486795368200503801097611436787402191532618456991115230272084771674098613479989808680789347124789253499967359190605681912854639520917409710307182238065185749856554472717209097115325999946728168357936779767099041518574001682560265549916593333117469681763348860131760281253987626822958726920016922608371657319505153308390495179319529587670415367205193280809809356733443291197315823747505896510820272670040485083775482983378341120809542502350385555577946098824446199419354197416933858522419312733314383889554606932774046771497129486979593226'); +INSERT INTO num_exp_sqrt VALUES (7,'904950020.759072496304165474991957396337281699986101765045213964054286624338102141970514306010139529492299343393832200631760194440206005974547202512275476562767685193838576516154915404389465528270010938533075930081897392863141132529694804621418663424569202655893682412466871297412964570322984865326770090075582481194532433411398133265643849129084449161396724635797324126396071308557057830046688990212282866035593809633839882468628249964862932050189148498591642162462777480125024786829078066012617362076651920045684345679767223337287825546294839320770903419463644110383560050404456170063805115223954191445548226706113970164823214416171441655706141596091717118495955441099867737827763335880891937222647408575142200256804313345924443344596462585960919126827045197885802122062165934504665811115031150357820196176799560314653'); +INSERT INTO num_exp_sqrt VALUES (8,'92179.098626752893864900181023972781406074846653380680747862421481598042923358730531575438403865501429843141967819802251116774924400485954931201776260931315313253827346015775662310076094882239170765060649024538403329505426563390044695320714825481746233901773893996663258170360232639353378395244461670781152793416950717050461856097473105730100523010642696332151571372764781034028324977128554099993021459338419164426784774496292405945103200724413639660488309795423335142455569853549710795692020963174011003447023610692365550245567840477105794884132665155376243735213346877116105595296043532605899184658904822980397411096930267453332143879534914237169761039374689145860503772331147367757318826885494994339695470190886515765452545019167989882527248872835783707554463866334705735781549392895480816605355996057201589681125'); +INSERT INTO num_exp_sqrt VALUES (9,'7406.988615277484686670011157489572203134420118818648711986549881046321377798441006745317356200279801348355202517703531020643333388857073977704009782384103170022716610432579974132111487533733493986910583223121269323909760573942980360508642443245341392335557152177332615977623338526935953706604224108508582338123915133189529507760875123300397933931420500010248194253078118618381590347297853307090813639981736227771834732256867579490224181748450683295253634852775448770576585177080941820456051588076218688792321741398867304684922665590162004919486643750098085197190000638539994723704724550600891137853975703823903659121582583388450687255538838161486019214242094423895463814933532217776443473765708693285683261505695170847285063013324823850724236845500162436661946026097459146424122412596018946436589967013641971183281'); +COMMIT TRANSACTION; +BEGIN TRANSACTION; +INSERT INTO num_exp_ln VALUES (0,'NaN'); +INSERT INTO num_exp_ln VALUES (1,'11.353265918833698201334218522735144514838241118349715803442713722607336732214173255618762341321138898556011520430414052782971985419141860417968593746833898952016980791997105866598425597066404919489902082738711038276194174786383758877067916049129476352925010880025206629976454341252818402788928939407784629386362069592202090897264194883276572978998896242281239126931595483958092059051047739223830394259082355969005503976135238921488192773135287876801394308064862257453262299764712613486466254696464150007113953810688169396432889052881763511661127351872408811370081346456019961324265446884877073712053408327408917588393884214304220369626106333713688792094943405258431214313197283237071070354654837081449831786573831004911008790533179001070424813584405346221388686999574752038655226138085374176702005198770598232862'); +INSERT INTO num_exp_ln VALUES (2,'75.980172429959420723484178622920965327708652620924912610122049843800380131746381968266727388919414524075492921510147435877107720844487333947572033626887969846858337336557672107987074468763307953130616555202495401302128216460637786993535376622372745654109623249396257174895352222213037880060756992073605135503615371392439827458529942230210514752764526895030759481226199720092008002458654297737883219558685499445394647863430593136350562417924068100891680398878483362058595716232013516337079804607378041880078724811071904523716775991447489914128580100888252698281559809224785596795038122963619830942475652745611551345360922016753939774272970008770647516790944335173711498988149783075646985898883858697162003144539047532603946093022417842140993960433780913606807466518632121884254341907122163281927271483110212890483'); +INSERT INTO num_exp_ln VALUES (3,'86.992429107491709045555322727377654177072455841678650084144967727028762699430180506209786297136121512625728883607972513154010138109866327600596617277403558404624813332464431424791338402731178416819791932126837396086742033973404980654712734845137075562739300866280737071167943367603243180515859476717635339619107593771719314284984269343476343816253634799874584843436046260962736006310389088154751401911743739429257286834178656182340416539923956100441369280015412718483971113838923221170027312390404790743389872757674342133486652087007983701950040432125562287337697971646750563062524010514537132255605131615248097901911480464339325353279118429890601202554448469387179349495284716473293965884844451619766312048304583068386805927433174443889441171878078987788018564357316138422561213329104267180509029624308926098065'); +INSERT INTO num_exp_ln VALUES (4,'56.935276817066740776567329017240462885579486075188456418197311631774373422196025180114152248099799048545382060930401786002025479108787121595516444894009593031141335985913019897883627990503003577804436730367402618412514152465206336556967419434371593632864308139215157721913158949066717186782560422199668568894551013785702491365073449320535603830475158258853167712460432995074161536886421366716995573365924430692151761737886552457036412140640821310927642146210426044265504978418405684030862182425702683702307323138985481047994648222224089112998195621687911787785594701557252468626097576375468916953563766801336922479861708649876362257086586679701715813254414915314296890025577780265459584203893089574567331742100451277992780400302806430264717887468808962517029442262560742822875484362427192693300423729233467613910'); +INSERT INTO num_exp_ln VALUES (5,'20.296713391219923821414834924710998522858242536565236229645868008008504475111229451635162536658197320282791428572861452713483981402773630985812066048575864982038046409484905688236579134672910905547858248343712686247795669280482288748331949478864729205285910525962001251260319741279139167559906461672936902355959755164523720443059989357054368460911050707727029320725144824995614445423492687177126412520389766864793826362309254124276325522276592246655562770110024099522184080118637524912964002223613671995639705240767929562023556724031894855094820328152633412077228479168557819219970917880393852962560319397442566813746504969336443969816954424715197797253670026862362130664772772977978222813915593329422557592316429203293264572088112274848838446633519530653849595288125585730314673691986554304725866754516304420665'); +INSERT INTO num_exp_ln VALUES (6,'-3.058947463851998053084898503420969773173569760507671013593014983772013099601022840164736581595033399273677583253456908293015637115395777673836877852797643436458673662566205707359569792482081945396989472318998080581824382006377064185813936544714612287417301161454496258176319380348780934551188852900784476213986897306897793456700682073399936398243222895442594762628402487110466705108765286617060826203345783502301472192906817785365563881556293576463515218574477264521950513789471494214626744754200844840310516235570475410854073969787604451971790833680742315518808178608136598148628107328076871698598743664423452623124027059698038466681488746505289551548778131621576387262707147068500249466398507704796800459013580425992071957391417767257856002976954566094297724379688683375704613872658653366052459242767328235849'); +INSERT INTO num_exp_ln VALUES (7,'41.246780548917246608934265057073076900048579756649769602488660179351587788197892095257027979113051775079905924990472069951828742350559917110289416201523653941731339141666097617614477426376799479821365070373247490598890520285155435501242427296281987676879064510605563522117334502131946383957407685328562874307957108543536378261847119286989184256009392692140821396916222386573424618796707564187152459973446833193743614720624765332006827171872712331032607870580880807058576154429597725560836582655488602546786785520452359711161305828045237044625934404295366273012300148250900116489718279757540843657039519736455668388572899273464839528462223812926410544976290646668870192676914370659142463304861500879195867873346447316374869974900582948166687948531910220128160490935170837209017355954301127162240133341813847180541'); +INSERT INTO num_exp_ln VALUES (8,'22.862977375646110045361670561177818139082238721442691850491173190000619222046296383571431877856442345505931635735363450488731186880557789439424987680284612480261693386095598289519783790826332183796775862215503493910816035128476952347072320869461206895223935484838130924268616681347949695029657753251443811448783435000569829291535036468240771401957519222523032235686030017496209956550934543164421459898155836108824017735809352580723262896259290484291175350770265895317482371895188221452083719817251845416195168686335127805092334984596224320638378502008767433534450949989322562311171685891891122105437154553106840103473941148230953978989145470651955269817951560544095229079088083494695756914405635176899994279484466773598435268700064279990885608144109747858515514066444373797446449729058958270758597627587968112958'); +INSERT INTO num_exp_ln VALUES (9,'17.820358481980064387183481028572263407130633079314879566896470101569251997264841660326428805413719418277889123643557369421967068805165885825106611310020187894256310674762734896979157570968168599492401269694048046876387337971177513661006711375440365724346137980004810780215236524986274043416621637509807126148966029923572853117418545426960105154053049098579812135003711132897895016476695223444397389521434633067499404903493027304737402519428197015899833229473322655155458942323004249812974150129789653469524573801259946118454333405580647485894435301530550214095993989552176497867244278699359917247910082169086524111229983698975613609318418313798992088206507831757327320958918656453341769110558376097374227592021075267882222057385413453949580066342977546145482215220982989992069525148522710254796105001938615214263'); +COMMIT TRANSACTION; +BEGIN TRANSACTION; +INSERT INTO num_exp_log10 VALUES (0,'NaN'); +INSERT INTO num_exp_log10 VALUES (1,'4.930660740129727276654889314296515979425461685461970306647398411855044094312185293195497201658739777714943974003690119189101973212927970410047992001003936259467465542044528955416040460487922970233600641954269411521809500203864460110903973264337093883907933081597350982496469748131390809569321256206859934619579029279954574676601709408712255490686948453752571699579252140062805776361984468580258289509013081691778727372026090522694670379557247829136504595898935235926069699309392675806881162434168418505908116911054206058735257796918687777716036307205415038158583184624809880157060625643069601549803887864772092583549388533013233603450097615537162442973385137488450178790573546382354482351187412256794374383453695483855501587939419102008302408157959291557415763034668013452188944554607063362933134950906875499201'); +INSERT INTO num_exp_log10 VALUES (2,'32.997769620388965086774969704518222090258389987679691893351902336370051104718852164011301929506188893338106627980171059175447833290713847317665944354651476245003161501753612545484635275306181777040447675475670149066399611203341262105766118892586541910243351018829302798733989560900125591073082441126709911019648451232244139674063434385451279378543163944005973452562993913383659295688375546058256196254319767218634546732685705517341998116744642480938405113447415486950667007645850519659606476727681944251201236366198374488204017630268083077471516734133869728427050843306716313813724061560369884508660845630727190444623729815564381063131729592825825486515070406390371638817503915214206586939112681762984038333298146999891250107667687034785493312416966635780188163871680959873288697497561452228182734430749066579749'); +INSERT INTO num_exp_log10 VALUES (3,'37.780331928743475574895606142114739140772838801045013007323050327909196792739138159615327729728110344767302636436234256468332011934881494997184865617793179255006442447189720642997935223133982347184994174261506212652322213673745795726283311685835974151422721233207287206894148660531800622455957268888702309499182978182878524951883775154983702898237404558813230370364953160102391101897560104513279410610948028599674950811462114131673380477843456965645417025376374320207504913806546872166094337441573669261285052323206348035827948287081776955945081345131570610652073053464020209215624179904586956137079321655773178387441622685682721151900601340680061607114354850640946256225260430676099781727317540719923791064452012925902993317349390523278687089530234444415688602090547516647302454865526291471706301790881694022223'); +INSERT INTO num_exp_log10 VALUES (4,'24.726676547286224970759328746582840552419566534667446425423046931401641497155587075591229106937829957279943690528061985864558314570189069764367933957499905044566413640017549478921384160584906257607957223101377816440084188042395098536074479064548620374152344954289432050971466476174493306432228880930006524504974367146536665170956555486181410864034862861231267121149652317599303804477688621597163730470970207231328339082779056152481480926452142005969020950341307977091850953883445808399574256295803245530993204179747743812544604144379381347499056545148243304041538981954204310612049423688645476667184129189153715486929216331980316967699254518020077226689317148303152585009031597809279387172427408557115400021035692880631275593381822805377317270568779655383061987766693697518921188619814204902583361096973421134004'); +INSERT INTO num_exp_log10 VALUES (5,'8.814750626578650238811431417807018895270298639823442501111235973209197727215795256506525221092818797578008152140054383421240180435087611869193019443372556081555311825248667278358330916098378127100899126895012782320751838528480712942601038190627182482614147263228588284866661508052724762701223357327343090598060805245853527435948381893458352744679795853650453594546267600486696643924152372736774331080527157374379043696696647158270918245668579680394279565181670004245143555617589138267976417280970718829942998800499312890580011246294669585429723974582350357991472101919333996770115834067969654217063942059882195268353998096891812525364797586486311202350700339609637274043915687880562465121559531284337603363356183320193656553931871200575467929714875483123706358278876389849119105053294688326141759401230994901405'); +INSERT INTO num_exp_log10 VALUES (6,'-1.328484003982869642690619298690906747763234110040562640557173509402512757735587333095924652711056556491908059708986413635120656426593745303715671199761364516107844087845783714418487426723538440387069985879601248897538855843115404484229652166941838283489828419407478748732927617251897244190697443966424660881366993754577233476597163021768156814527570512834684713730559883782625870597080940193303268818336816535968869931456641949301731046034660616615392129109391145214470757259042172416816936479713743188047425796931722546185493217275537303458837771965375448968719169174136287532752370175863826715450565025635651343928205805494319778539652563499901671319955144823432132740582617949774638538594081514904904341299199113721131520557004571803778698005652464301037962272085633628653321081368256925971558076970172779715'); +INSERT INTO num_exp_log10 VALUES (7,'17.913249188669140643510654105014358282516966474257460687880559542190804665566625978925406311113121982595279826214959603627387555578965653325278444455875162277940655989601428868642914577248262147833499137348602966573601719040813549936948178463592211685237720748377879836890106515699728652218324794927458352954247096536337594789471529493944292143186953509162522579060020018226817623648563806559917579317916242706559131476179714031602207057714677845347616752450567251644277767418397621490301286115159509360375419599968738067461569666699939732107480135216621373057421990702923042287910730395998082514702629760389192370666675364405730936537832803383367187639209534697198515928978064543150195911463663617683085348965065679311986715357338675515370634753254774665197233934933271954463040729779956682570415317734489164385'); +INSERT INTO num_exp_log10 VALUES (8,'9.929264914121995501917993119394933531225401243275938207624866270551448544301376913376130982251708700134720886862945040266148728213253651323129942781577143957084726727561987639140151337848818195806259935747329665025823709044567138449084349729747202164413995795609659711723455165142329822773177102845804114214340046404641970845707372809306219463962664551623665322610139794354769767829380018857313559373283673392337954610346290037758389035140213224696023751541663171574697035012610534455189013755134090933979479069288110010954211669067225249755249337768792642303351914884187159646984708862430789018895140670365476746734456807215043628059581947593694929159076346249490593187993386780521089745819640214783614157516171005086731241769146397577246387886107367648843380733370112546792442909347322732196805316614555689762'); +INSERT INTO num_exp_log10 VALUES (9,'7.739283354261751283625223433456284905560931805428759681411970457812279544250432389511382263439324085689734710188041049046660480575958686859942980599595036769090747781359217248301544587434077376812293034848418204834388504169166350770257248896025815531248627658465029806509131631454856186387892627989218208026727504548130018922325585619738185507999433763118148418722504204066578294826264005398891049629199412773138457218976050467479292777172717500219850781664314597312411301296201533610562886229900497272268364496763758868455934979903774531992886483396489868888731578355541611359130188566524240259770918423445785338175040098706500034487703124623745259139247432324145633151895802637182446905097253961951018926565652497920605819785424451050191604602898777804133717341512568151920576684198443843944721398831404081859'); +COMMIT TRANSACTION; +BEGIN TRANSACTION; +INSERT INTO num_exp_power_10_ln VALUES (0,'NaN'); +INSERT INTO num_exp_power_10_ln VALUES (1,'225561990715.277245515991117670624124484084762557459065170589803293759247930753528436379932442146759103295277479258327642314622036941865221478746258727236601688778946696303277607709407496616423493315166963938393760548678730128692212077086588682984700837334554241405763691119669847463520746595280034536307041368063462023793177898200220207765205127584303464304601759554817607633012272490650155253979182893585119965271975927569080191838676053084168631217591768468344106219831174026139608715965691941366334940196517120885214887008671956523579678156919416435031020452971977153991139145404842034138317592877675821045409772456977018293365238179815614004574330200783530118851005077771478448804470170641452481992602803877112958872108069738434946694089025321283178188028224338756015337492913115267362635647236447601252924834642796058'); +INSERT INTO num_exp_power_10_ln VALUES (2,'9553718264533556311125292459627965006385666643531070061102266984368939757379.536714147420215784125170401370065894858487440153494392538261078415409784085960333028254155527328359894197540839556987826344995348426293585457768226283066583722499658006242709930685932246087653832230889613022921575445199055131152661556678809191264086381976922223866204038615136758192929883317207903579770917317641181652055458721731297347443662717939116561947785705140374908203404860090658919334137955075887697259604047657534191202566335372150375993361370075961180728155127447781364264047857624746079509591666068708743260905728661917791822925979235918475633100283148558978385583805341715868143937062092264994833222352433299015979561976964779350640064096690062929265992966564232453102431600199173711947391200249130712039686700111791790265309426741120465259677894665532560198051256215915373145226284270408649736509'); +INSERT INTO num_exp_power_10_ln VALUES (3,'982718444846268846508445482774217796844461660819285525931206164100817251856409365450682.362683768066405322653747385034480250394145008573806022660379219602846285813744865438912887625784087005970975437905783802114553690522787857272953842288090141945268495451006273685577260054069522075046955466204804067271437138871789034722069934693546671607506851844248427950939791205412350536883779850165603116191193657054604569586553874805856647223849267039531773072343908345333155562072887754900969504551717514980465801806565999410206735831440712124661645970935112535081991606671600328471264697018198676317466846450405861359235297846597981143547119390922405594115478086038680663368675222949247096131378724350715530605691796680604309063173515781378545860473572389718345696107553363715518601596249508215455106779522851210398208919496668879040223859884166805448827948087400426315425231119801173387715922086154065273'); +INSERT INTO num_exp_power_10_ln VALUES (4,'861542720105376650266753999919217194383259935058507531116.774511336660822591851369622743235084609149542494189385785321912210129989390054947787009383210009523204976629456268332186620016067379702483800883493431423160815760933380418976582725913410929214462739708321325884209636272001805871036779154087677637129248122540412937033791526383240502286607736226090213753913654673523613612439527815137888202973659987501649474772884055648603290154867585312925699571949539600328906295652872654314913539778815035321695215634102441494403825526533235061083947035338872599854931230001361227174477274708230470794066733245241594719912710139298949856243576688344051439047966427547889756037265151798639614843866387316916203238068277912991427278268083231579195846744438643659745041780103653332041031419793815914447232121937821142169172566753399257291244398531365781832297786941359729799400'); +INSERT INTO num_exp_power_10_ln VALUES (5,'198021976607570296508.271597639984889464620426933601643322058775615235389194561064983706229795978402690473201671702614911129095149240715527556855309177671128442458698638704394974473956869419481315262823632891676087912529523219333012290621046361106033860210270638559271706082115529424772192777643046125905852037759566224116373416253787241195450409652089019290072319861181399387753223422998872180810295299831487867222464355713552301775702554189470264147325049133532522718679336524769566984150923939420759804463781082299907043016120177416779442865059261387111806785876531152192378576258351599534512031062777609734092707165605364139201322351960602280089186180302246827234844736393745487324460438448807241887783263546165171099497316415863122023114646876909575845860402164818094500541234974716577550807551946414081410743197768993152975501'); +INSERT INTO num_exp_power_10_ln VALUES (6,'.000873076977206566818052116526263730226812004454463281371489634779519089200224205946321120805055212090024554381349223642352209212670470260295303361873760972918129853308169576675500721645609379420329169271088810484607337679253503247351324049221970104335289487989027621978310506220905131150125321713385148268584530413680037620544212746920563790371941626294733473967065607791756894237438288480748407449237446113996117912144587258434808327522518688617394025018756570740098795745692805352377041347367240475846033282850136270250633825482156304826383360291164928049344226886150285595932088884965511963310715773499733217615863523253012606066583814112265708693122563204149232245895551314975524172504103194858904869273185785182598234060315036187756490539352752560361560286717869643902435677448962235275054804452967413005'); +INSERT INTO num_exp_power_10_ln VALUES (7,'176514565873872717825163931126806100435750.096278384530154766967061948052237623936423931849868926020451465515367348890410352640552194499619062823622476972850692557798609619250753020363520533767813563613425606228355802781302735485038377521515850536680425059519814786118919994914180918228654298075183514200191737597656810036850772127169441661576862538643715648802139886576391427423689320082366572297580054381937437005879583216745596935643579262248665490169331304003204939561361718554509909313409421397022626924406091551900222555950699170864234411017062042057683304265485826061096835531732950909546314722726990314852356462874701181085379772134121978510387397276859318242238150439474660772561390798432890789762504242822787017140808209820627435991445529404692793744568204608385843245177656436105160780897472099970336514833257055017279707999437302548655364559'); +INSERT INTO num_exp_power_10_ln VALUES (8,'72941951052009383458167.300747500436981484566111756088702608000390737594784514635592222758882092500858797317505303492923829092720870826490477962201959426813271424853341826896270963213736922458746003100613943600855942721319226948714369219316345322636075285343544788982588956431405042577296229122673590336976893594798942025893296105815818487227300314490440902574022885833779324177053242170024559675073866612316965636832258283516275906085642459351367507561963945012828379111856700009391438637054015804558386733558956649061672420804826896303889067785497738203077050774825608647969196321506624991188638449047860249367840775936911749905927108478444112230174584693363226143549933224252679398881354887872642908328737917862751077365602631600279486028043329404269490375935308156815477700961014566228692743960491745353377403533037122586797765130'); +INSERT INTO num_exp_power_10_ln VALUES (9,'661239032819374816.097553651299556484820492272269662685578275493609248662925676004753503494252951243895572437264999063878330704584509915845096232798927524470286655554736724913758600775591269525423912692080421094644542553026831758426157681271572808657664918053119324646138457659418857926209701677786068580819823633713337632456905824562235373422309621872998037966404189020165296080436871220718574009921789858751384547836431858428729570977259373272041837411903005303672798845573379758630607982213326716018594073712340609488043353995410508475153538231445235003980586600882223782814368245305160648543466496726973755388826656879616734762068443462618454921858705377028522664844761719759342490380417060255776725333319537746890406213693117052223545525717132695297770810635066731941724108167146710297146989770382041617889670713111888375717'); +COMMIT TRANSACTION; +BEGIN TRANSACTION; +INSERT INTO num_data VALUES (0, '0'); +INSERT INTO num_data VALUES (1, '85243.39540024977626076239847863600785982737155858270959890014613035727868293618673807776733416230953723818527101593495895350807775607346277892835514324320448949370623441059033804864158715021903312693889518990256881059434042443507529601095150710777634743301398926463888783847290873199395304998050753365215426971278237920063435565949203678024225270616295573678510929020831006146661747271783837653203039829647102027431761129518881525935216608429897041525858540380754759125150233053469999022855035'); +INSERT INTO num_data VALUES (2, '-994877526002806872754342148749241.04353023451866590387054959174736129501310680280823383331007646306243540953499740615246583399296334239109936336446284803020643582102868247857009494139535009572740621288230740389545481395'); +INSERT INTO num_data VALUES (3, '-60302029489319384367663884408085757480.2322712404088283093870869198708849258097125725036189625900174440196153781995220721511009855207486224837798752903681993777275846325950111427710563453217985216966456785944859989497422927661683538629473170704026975786513125842675604577233871570629808699803522400038975396500769162308448069085909755023233588510630417065084295051270219462289785473643946404281422516357503746700705970360169619852905053433235726497292406142332833'); +INSERT INTO num_data VALUES (4, '5329378275943663322215245.24931765987630429629836382184742348353920297283690739124220773955591340709935970062776650204659187764581615597720798385015942389765692769739983054442503547211560297249686289665792078548480268091496050883021187158502798880896590227542729659940394038802461081290690995869705131152889309663639310553909874081663091069118126221594338242710530718836025225507189149221049928936955230868771875644038572888630664890573507822342998964954667474300944699078658989010257103569231493090050659723450626338923049035040974032671138430612839043269997482582763267536489504794826476836323549796385028155416935072959933315468068930689064483178204550825728947252440604703474049780550458442808479096492346910001692358508618202898514895453589357'); +INSERT INTO num_data VALUES (5, '-652755630.43456071828048833552593835051449845484289562110789582081210403487973096161149072377955192388469356112505543620695003436531392789029513380101663750625024853263344909355177280161504414335005574882649025508632900995595004153086358670541462762210415346958050909878501048483523600711486406055424807840429541335391538322886495085448421556770991545781035298449067051916630343957356635391594362639819978677032855590055900561501350354631803808000307050416047072513406855040715556454205065332997338225626635780147287003130754254277103928406089109802521803537038957372612837169223905290912251006321930223154562110264217937'); +INSERT INTO num_data VALUES (6, '0.0469370721950711508944806393077762204079964905145503836835397203739563036579760026190241480514409364'); +INSERT INTO num_data VALUES (7, '-818934540071845742'); +INSERT INTO num_data VALUES (8, '8496986223.64063724353165506167988570717591150432282238795309964705925283285060558038824227595710139960766584401003765241409149793494330798800'); +INSERT INTO num_data VALUES (9, '054863480.34685027005508022756223282084742813020271603840941647350440860843570182437301045468670059279379903480024743452620396345637401505220786389930600883087012615993343976556472498552535317826554614696684732913955544753638726438705858481670766245958647367500212800073774509075408148134050353551558174813940258910304990570172170811882520915334358633'); +COMMIT TRANSACTION; +-- ****************************** +-- * Create indices for faster checks +-- ****************************** +CREATE UNIQUE INDEX num_exp_add_idx ON num_exp_add (id1, id2); +CREATE UNIQUE INDEX num_exp_sub_idx ON num_exp_sub (id1, id2); +CREATE UNIQUE INDEX num_exp_div_idx ON num_exp_div (id1, id2); +CREATE UNIQUE INDEX num_exp_mul_idx ON num_exp_mul (id1, id2); +CREATE UNIQUE INDEX num_exp_sqrt_idx ON num_exp_sqrt (id); +CREATE UNIQUE INDEX num_exp_ln_idx ON num_exp_ln (id); +CREATE UNIQUE INDEX num_exp_log10_idx ON num_exp_log10 (id); +CREATE UNIQUE INDEX num_exp_power_10_ln_idx ON num_exp_power_10_ln (id); +VACUUM ANALYZE num_exp_add; +VACUUM ANALYZE num_exp_sub; +VACUUM ANALYZE num_exp_div; +VACUUM ANALYZE num_exp_mul; +VACUUM ANALYZE num_exp_sqrt; +VACUUM ANALYZE num_exp_ln; +VACUUM ANALYZE num_exp_log10; +VACUUM ANALYZE num_exp_power_10_ln; +-- ****************************** +-- * Now check the behaviour of the NUMERIC type +-- ****************************** +-- ****************************** +-- * Addition check +-- ****************************** +DELETE FROM num_result; +INSERT INTO num_result SELECT t1.id, t2.id, t1.val + t2.val + FROM num_data t1, num_data t2; +SELECT t1.id1, t1.id2, t1.result, t2.expected + FROM num_result t1, num_exp_add t2 + WHERE t1.id1 = t2.id1 AND t1.id2 = t2.id2 + AND t1.result != t2.expected; + id1 | id2 | result | expected +-----+-----+--------+---------- +(0 rows) + +DELETE FROM num_result; +INSERT INTO num_result SELECT t1.id, t2.id, round(t1.val + t2.val, 10) + FROM num_data t1, num_data t2; +SELECT t1.id1, t1.id2, t1.result, round(t2.expected, 10) as expected + FROM num_result t1, num_exp_add t2 + WHERE t1.id1 = t2.id1 AND t1.id2 = t2.id2 + AND t1.result != round(t2.expected, 10); + id1 | id2 | result | expected +-----+-----+--------+---------- +(0 rows) + +-- ****************************** +-- * Subtraction check +-- ****************************** +DELETE FROM num_result; +INSERT INTO num_result SELECT t1.id, t2.id, t1.val - t2.val + FROM num_data t1, num_data t2; +SELECT t1.id1, t1.id2, t1.result, t2.expected + FROM num_result t1, num_exp_sub t2 + WHERE t1.id1 = t2.id1 AND t1.id2 = t2.id2 + AND t1.result != t2.expected; + id1 | id2 | result | expected +-----+-----+--------+---------- +(0 rows) + +DELETE FROM num_result; +INSERT INTO num_result SELECT t1.id, t2.id, round(t1.val - t2.val, 40) + FROM num_data t1, num_data t2; +SELECT t1.id1, t1.id2, t1.result, round(t2.expected, 40) + FROM num_result t1, num_exp_sub t2 + WHERE t1.id1 = t2.id1 AND t1.id2 = t2.id2 + AND t1.result != round(t2.expected, 40); + id1 | id2 | result | round +-----+-----+--------+------- +(0 rows) + +-- ****************************** +-- * Multiply check +-- ****************************** +DELETE FROM num_result; +INSERT INTO num_result SELECT t1.id, t2.id, t1.val * t2.val + FROM num_data t1, num_data t2; +SELECT t1.id1, t1.id2, t1.result, t2.expected + FROM num_result t1, num_exp_mul t2 + WHERE t1.id1 = t2.id1 AND t1.id2 = t2.id2 + AND t1.result != t2.expected; + id1 | id2 | result | expected +-----+-----+--------+---------- +(0 rows) + +DELETE FROM num_result; +INSERT INTO num_result SELECT t1.id, t2.id, round(t1.val * t2.val, 30) + FROM num_data t1, num_data t2; +SELECT t1.id1, t1.id2, t1.result, round(t2.expected, 30) as expected + FROM num_result t1, num_exp_mul t2 + WHERE t1.id1 = t2.id1 AND t1.id2 = t2.id2 + AND t1.result != round(t2.expected, 30); + id1 | id2 | result | expected +-----+-----+--------+---------- +(0 rows) + +-- ****************************** +-- * Division check +-- ****************************** +DELETE FROM num_result; +INSERT INTO num_result SELECT t1.id, t2.id, t1.val / t2.val + FROM num_data t1, num_data t2 + WHERE t2.val != '0.0'; +SELECT t1.id1, t1.id2, t1.result, t2.expected + FROM num_result t1, num_exp_div t2 + WHERE t1.id1 = t2.id1 AND t1.id2 = t2.id2 + AND t1.result != t2.expected; + id1 | id2 | result | expected +-----+-----+--------+---------- +(0 rows) + +DELETE FROM num_result; +INSERT INTO num_result SELECT t1.id, t2.id, round(t1.val / t2.val, 80) + FROM num_data t1, num_data t2 + WHERE t2.val != '0.0'; +SELECT t1.id1, t1.id2, t1.result, round(t2.expected, 80) as expected + FROM num_result t1, num_exp_div t2 + WHERE t1.id1 = t2.id1 AND t1.id2 = t2.id2 + AND t1.result != round(t2.expected, 80); + id1 | id2 | result | expected +-----+-----+--------+---------- +(0 rows) + +-- ****************************** +-- * Square root check +-- ****************************** +DELETE FROM num_result; +INSERT INTO num_result SELECT id, 0, SQRT(ABS(val)) + FROM num_data; +SELECT t1.id1, t1.result, t2.expected + FROM num_result t1, num_exp_sqrt t2 + WHERE t1.id1 = t2.id + AND t1.result != t2.expected; + id1 | result | expected +-----+--------+---------- +(0 rows) + +-- ****************************** +-- * Natural logarithm check +-- ****************************** +DELETE FROM num_result; +INSERT INTO num_result SELECT id, 0, LN(ABS(val)) + FROM num_data + WHERE val != '0.0'; +SELECT t1.id1, t1.result, t2.expected + FROM num_result t1, num_exp_ln t2 + WHERE t1.id1 = t2.id + AND t1.result != t2.expected; + id1 | result | expected +-----+--------+---------- +(0 rows) + +-- ****************************** +-- * Logarithm base 10 check +-- ****************************** +DELETE FROM num_result; +INSERT INTO num_result SELECT id, 0, LOG('10'::numeric, ABS(val)) + FROM num_data + WHERE val != '0.0'; +SELECT t1.id1, t1.result, t2.expected + FROM num_result t1, num_exp_log10 t2 + WHERE t1.id1 = t2.id + AND t1.result != t2.expected; + id1 | result | expected +-----+--------+---------- +(0 rows) + +-- ****************************** +-- * POW(10, LN(value)) check +-- ****************************** +DELETE FROM num_result; +INSERT INTO num_result SELECT id, 0, POW(numeric '10', LN(ABS(round(val,1000)))) + FROM num_data + WHERE val != '0.0'; +SELECT t1.id1, t1.result, t2.expected + FROM num_result t1, num_exp_power_10_ln t2 + WHERE t1.id1 = t2.id + AND t1.result != t2.expected; + id1 | result | expected +-----+--------+---------- +(0 rows) + +-- +-- Test code path for raising to integer powers +-- +-- base less than 1 +-- +-- bc(1) results computed with a scale of 500 and truncated using the script +-- below, and then rounded by hand to match the precision of POW(): +-- +-- for p in {-20..20} +-- do +-- b="0.084738" +-- r=$(bc -ql <<< "scale=500 ; $b^$p" | head -n 1) +-- echo "($b, $p, $r)," +-- done +WITH t(b, p, bc_result) AS (VALUES +(0.084738, -20, 2744326694304960114888.785913), +(0.084738, -19, 232548755422013710215.445941), +(0.084738, -18, 19705716436950597776.236458), +(0.084738, -17, 1669822999434319754.362725), +(0.084738, -16, 141497461326065387.345189), +(0.084738, -15, 11990211877848128.792857), +(0.084738, -14, 1016026574105094.737649), +(0.084738, -13, 86096059836517.517879), +(0.084738, -12, 7295607918426.821430), +(0.084738, -11, 618215223791.651994), +(0.084738, -10, 52386321633.657007), +(0.084738, -9, 4439112122.5928274), +(0.084738, -8, 376161483.04427101), +(0.084738, -7, 31875171.750205437), +(0.084738, -6, 2701038.3037689083), +(0.084738, -5, 228880.58378476975), +(0.084738, -4, 19394.882908753819), +(0.084738, -3, 1643.4835879219811), +(0.084738, -2, 139.26551227333284), +(0.084738, -1, 11.801080979017678), +(0.084738, 0, 1), +(0.084738, 1, .084738), +(0.084738, 2, .007180528644), +(0.084738, 3, .000608463636235272), +(0.084738, 4, .00005155999160730448), +(0.084738, 5, .000004369090568819767), +(0.084738, 6, .0000003702279966206494), +(0.084738, 7, .00000003137237997764059), +(0.084738, 8, .000000002658432734545308), +(0.084738, 9, .0000000002252702730599003), +(0.084738, 10, .00000000001908895239854983), +(0.084738, 11, .000000000001617559648348316), +(0.084738, 12, .0000000000001370687694817396), +(0.084738, 13, .00000000000001161493338834365), +(0.084738, 14, .0000000000000009842262254614642), +(0.084738, 15, .00000000000000008340136189315355), +(0.084738, 16, .000000000000000007067264604102046), +(0.084738, 17, .0000000000000000005988658680223991), +(0.084738, 18, .00000000000000000005074669592448206), +(0.084738, 19, .000000000000000000004300173519248761), +(0.084738, 20, .0000000000000000000003643881036741015)) +SELECT b, p, bc_result, b^p AS power, b^p - bc_result AS diff FROM t; + b | p | bc_result | power | diff +----------+-----+-----------------------------------------+-----------------------------------------+----------------------------------------- + 0.084738 | -20 | 2744326694304960114888.785913 | 2744326694304960114888.785913 | 0.000000 + 0.084738 | -19 | 232548755422013710215.445941 | 232548755422013710215.445941 | 0.000000 + 0.084738 | -18 | 19705716436950597776.236458 | 19705716436950597776.236458 | 0.000000 + 0.084738 | -17 | 1669822999434319754.362725 | 1669822999434319754.362725 | 0.000000 + 0.084738 | -16 | 141497461326065387.345189 | 141497461326065387.345189 | 0.000000 + 0.084738 | -15 | 11990211877848128.792857 | 11990211877848128.792857 | 0.000000 + 0.084738 | -14 | 1016026574105094.737649 | 1016026574105094.737649 | 0.000000 + 0.084738 | -13 | 86096059836517.517879 | 86096059836517.517879 | 0.000000 + 0.084738 | -12 | 7295607918426.821430 | 7295607918426.821430 | 0.000000 + 0.084738 | -11 | 618215223791.651994 | 618215223791.651994 | 0.000000 + 0.084738 | -10 | 52386321633.657007 | 52386321633.657007 | 0.000000 + 0.084738 | -9 | 4439112122.5928274 | 4439112122.5928274 | 0.0000000 + 0.084738 | -8 | 376161483.04427101 | 376161483.04427101 | 0.00000000 + 0.084738 | -7 | 31875171.750205437 | 31875171.750205437 | 0.000000000 + 0.084738 | -6 | 2701038.3037689083 | 2701038.3037689083 | 0.0000000000 + 0.084738 | -5 | 228880.58378476975 | 228880.58378476975 | 0.00000000000 + 0.084738 | -4 | 19394.882908753819 | 19394.882908753819 | 0.000000000000 + 0.084738 | -3 | 1643.4835879219811 | 1643.4835879219811 | 0.0000000000000 + 0.084738 | -2 | 139.26551227333284 | 139.26551227333284 | 0.00000000000000 + 0.084738 | -1 | 11.801080979017678 | 11.801080979017678 | 0.000000000000000 + 0.084738 | 0 | 1 | 1.0000000000000000 | 0.0000000000000000 + 0.084738 | 1 | 0.084738 | 0.08473800000000000 | 0.00000000000000000 + 0.084738 | 2 | 0.007180528644 | 0.007180528644000000 | 0.000000000000000000 + 0.084738 | 3 | 0.000608463636235272 | 0.0006084636362352720 | 0.0000000000000000000 + 0.084738 | 4 | 0.00005155999160730448 | 0.00005155999160730448 | 0.00000000000000000000 + 0.084738 | 5 | 0.000004369090568819767 | 0.000004369090568819767 | 0.000000000000000000000 + 0.084738 | 6 | 0.0000003702279966206494 | 0.0000003702279966206494 | 0.0000000000000000000000 + 0.084738 | 7 | 0.00000003137237997764059 | 0.00000003137237997764059 | 0.00000000000000000000000 + 0.084738 | 8 | 0.000000002658432734545308 | 0.000000002658432734545308 | 0.000000000000000000000000 + 0.084738 | 9 | 0.0000000002252702730599003 | 0.0000000002252702730599003 | 0.0000000000000000000000000 + 0.084738 | 10 | 0.00000000001908895239854983 | 0.00000000001908895239854983 | 0.00000000000000000000000000 + 0.084738 | 11 | 0.000000000001617559648348316 | 0.000000000001617559648348316 | 0.000000000000000000000000000 + 0.084738 | 12 | 0.0000000000001370687694817396 | 0.0000000000001370687694817396 | 0.0000000000000000000000000000 + 0.084738 | 13 | 0.00000000000001161493338834365 | 0.00000000000001161493338834365 | 0.00000000000000000000000000000 + 0.084738 | 14 | 0.0000000000000009842262254614642 | 0.0000000000000009842262254614642 | 0.0000000000000000000000000000000 + 0.084738 | 15 | 0.00000000000000008340136189315355 | 0.00000000000000008340136189315355 | 0.00000000000000000000000000000000 + 0.084738 | 16 | 0.000000000000000007067264604102046 | 0.000000000000000007067264604102046 | 0.000000000000000000000000000000000 + 0.084738 | 17 | 0.0000000000000000005988658680223991 | 0.0000000000000000005988658680223991 | 0.0000000000000000000000000000000000 + 0.084738 | 18 | 0.00000000000000000005074669592448206 | 0.00000000000000000005074669592448206 | 0.00000000000000000000000000000000000 + 0.084738 | 19 | 0.000000000000000000004300173519248761 | 0.000000000000000000004300173519248761 | 0.000000000000000000000000000000000000 + 0.084738 | 20 | 0.0000000000000000000003643881036741015 | 0.0000000000000000000003643881036741015 | 0.0000000000000000000000000000000000000 +(41 rows) + +-- base greater than 1 +-- +-- bc(1) results computed with a scale of 500 and truncated using the script +-- below, and then rounded by hand to match the precision of POW(): +-- +-- for p in {-20..20} +-- do +-- b="37.821637" +-- r=$(bc -ql <<< "scale=500 ; $b^$p" | head -n 1) +-- echo "($b, $p, $r)," +-- done +WITH t(b, p, bc_result) AS (VALUES +(37.821637, -20, .00000000000000000000000000000002787363175065101), +(37.821637, -19, .000000000000000000000000000001054226381944797), +(37.821637, -18, .00000000000000000000000000003987256753373947), +(37.821637, -17, .000000000000000000000000001508045775519079), +(37.821637, -16, .00000000000000000000000005703675990106610), +(37.821637, -15, .000000000000000000000002157223628634278), +(37.821637, -14, .00000000000000000000008158972901002847), +(37.821637, -13, .000000000000000000003085857113545666), +(37.821637, -12, .0000000000000000001167121675823920), +(37.821637, -11, .000000000000000004414245235784397), +(37.821637, -10, .0000000000000001669539809368169), +(37.821637, -9, .000000000000006314472862697207), +(37.821637, -8, .0000000000002388237004592846), +(37.821637, -7, .000000000009032703305767796), +(37.821637, -6, .0000000003416316255594496), +(37.821637, -5, .00000001292106732962942), +(37.821637, -4, .0000004886959181938034), +(37.821637, -3, .00001848327962130773), +(37.821637, -2, .0006990678924065984), +(37.821637, -1, .02643989206495742), +(37.821637, 0, 1), +(37.821637, 1, 37.821637), +(37.821637, 2, 1430.476225359769), +(37.821637, 3, 54102.952532687378), +(37.821637, 4, 2046262.2313195326), +(37.821637, 5, 77392987.319777394), +(37.821637, 6, 2927129472.7542235), +(37.821637, 7, 110708828370.511632), +(37.821637, 8, 4187189119324.792454), +(37.821637, 9, 158366346921451.985294), +(37.821637, 10, 5989674486279224.500736), +(37.821637, 11, 226539294168214309.708325), +(37.821637, 12, 8568086950266418559.993831), +(37.821637, 13, 324059074417413536066.149409), +(37.821637, 14, 12256444679171401239980.310926), +(37.821637, 15, 463558801566202198479885.206986), +(37.821637, 16, 17532552720991931019508170.100286), +(37.821637, 17, 663109844696719094948877928.067252), +(37.821637, 18, 25079899837245684700124994552.671731), +(37.821637, 19, 948562867640665366544581398598.127577), +(37.821637, 20, 35876200451584291931921101974730.690104)) +SELECT b, p, bc_result, b^p AS power, b^p - bc_result AS diff FROM t; + b | p | bc_result | power | diff +-----------+-----+---------------------------------------------------+---------------------------------------------------+--------------------------------------------------- + 37.821637 | -20 | 0.00000000000000000000000000000002787363175065101 | 0.00000000000000000000000000000002787363175065101 | 0.00000000000000000000000000000000000000000000000 + 37.821637 | -19 | 0.000000000000000000000000000001054226381944797 | 0.000000000000000000000000000001054226381944797 | 0.000000000000000000000000000000000000000000000 + 37.821637 | -18 | 0.00000000000000000000000000003987256753373947 | 0.00000000000000000000000000003987256753373947 | 0.00000000000000000000000000000000000000000000 + 37.821637 | -17 | 0.000000000000000000000000001508045775519079 | 0.000000000000000000000000001508045775519079 | 0.000000000000000000000000000000000000000000 + 37.821637 | -16 | 0.00000000000000000000000005703675990106610 | 0.00000000000000000000000005703675990106610 | 0.00000000000000000000000000000000000000000 + 37.821637 | -15 | 0.000000000000000000000002157223628634278 | 0.000000000000000000000002157223628634278 | 0.000000000000000000000000000000000000000 + 37.821637 | -14 | 0.00000000000000000000008158972901002847 | 0.00000000000000000000008158972901002847 | 0.00000000000000000000000000000000000000 + 37.821637 | -13 | 0.000000000000000000003085857113545666 | 0.000000000000000000003085857113545666 | 0.000000000000000000000000000000000000 + 37.821637 | -12 | 0.0000000000000000001167121675823920 | 0.0000000000000000001167121675823920 | 0.0000000000000000000000000000000000 + 37.821637 | -11 | 0.000000000000000004414245235784397 | 0.000000000000000004414245235784397 | 0.000000000000000000000000000000000 + 37.821637 | -10 | 0.0000000000000001669539809368169 | 0.0000000000000001669539809368169 | 0.0000000000000000000000000000000 + 37.821637 | -9 | 0.000000000000006314472862697207 | 0.000000000000006314472862697207 | 0.000000000000000000000000000000 + 37.821637 | -8 | 0.0000000000002388237004592846 | 0.0000000000002388237004592846 | 0.0000000000000000000000000000 + 37.821637 | -7 | 0.000000000009032703305767796 | 0.000000000009032703305767796 | 0.000000000000000000000000000 + 37.821637 | -6 | 0.0000000003416316255594496 | 0.0000000003416316255594496 | 0.0000000000000000000000000 + 37.821637 | -5 | 0.00000001292106732962942 | 0.00000001292106732962942 | 0.00000000000000000000000 + 37.821637 | -4 | 0.0000004886959181938034 | 0.0000004886959181938034 | 0.0000000000000000000000 + 37.821637 | -3 | 0.00001848327962130773 | 0.00001848327962130773 | 0.00000000000000000000 + 37.821637 | -2 | 0.0006990678924065984 | 0.0006990678924065984 | 0.0000000000000000000 + 37.821637 | -1 | 0.02643989206495742 | 0.02643989206495742 | 0.00000000000000000 + 37.821637 | 0 | 1 | 1.0000000000000000 | 0.0000000000000000 + 37.821637 | 1 | 37.821637 | 37.821637000000000 | 0.000000000000000 + 37.821637 | 2 | 1430.476225359769 | 1430.4762253597690 | 0.0000000000000 + 37.821637 | 3 | 54102.952532687378 | 54102.952532687378 | 0.000000000000 + 37.821637 | 4 | 2046262.2313195326 | 2046262.2313195326 | 0.0000000000 + 37.821637 | 5 | 77392987.319777394 | 77392987.319777394 | 0.000000000 + 37.821637 | 6 | 2927129472.7542235 | 2927129472.7542235 | 0.0000000 + 37.821637 | 7 | 110708828370.511632 | 110708828370.511632 | 0.000000 + 37.821637 | 8 | 4187189119324.792454 | 4187189119324.792454 | 0.000000 + 37.821637 | 9 | 158366346921451.985294 | 158366346921451.985294 | 0.000000 + 37.821637 | 10 | 5989674486279224.500736 | 5989674486279224.500736 | 0.000000 + 37.821637 | 11 | 226539294168214309.708325 | 226539294168214309.708325 | 0.000000 + 37.821637 | 12 | 8568086950266418559.993831 | 8568086950266418559.993831 | 0.000000 + 37.821637 | 13 | 324059074417413536066.149409 | 324059074417413536066.149409 | 0.000000 + 37.821637 | 14 | 12256444679171401239980.310926 | 12256444679171401239980.310926 | 0.000000 + 37.821637 | 15 | 463558801566202198479885.206986 | 463558801566202198479885.206986 | 0.000000 + 37.821637 | 16 | 17532552720991931019508170.100286 | 17532552720991931019508170.100286 | 0.000000 + 37.821637 | 17 | 663109844696719094948877928.067252 | 663109844696719094948877928.067252 | 0.000000 + 37.821637 | 18 | 25079899837245684700124994552.671731 | 25079899837245684700124994552.671731 | 0.000000 + 37.821637 | 19 | 948562867640665366544581398598.127577 | 948562867640665366544581398598.127577 | 0.000000 + 37.821637 | 20 | 35876200451584291931921101974730.690104 | 35876200451584291931921101974730.690104 | 0.000000 +(41 rows) + +-- +-- Tests for raising to non-integer powers +-- +-- base less than 1 +-- +-- bc(1) results computed with a scale of 500 and truncated using the script +-- below, and then rounded by hand to match the precision of POW(): +-- +-- for n in {-20..20} +-- do +-- b="0.06933247" +-- p="$n.342987" +-- r=$(bc -ql <<< "scale=500 ; e($p*l($b))" | head -n 1) +-- echo "($b, $p, $r)," +-- done +WITH t(b, p, bc_result) AS (VALUES +(0.06933247, -20.342987, 379149253615977128356318.39406340), +(0.06933247, -19.342987, 26287354251852125772450.59436685), +(0.06933247, -18.342987, 1822567200045909954554.65766042), +(0.06933247, -17.342987, 126363085720167050546.86216560), +(0.06933247, -16.342987, 8761064849800910427.02880469), +(0.06933247, -15.342987, 607426265866876128.15466179), +(0.06933247, -14.342987, 42114363355427213.14899924), +(0.06933247, -13.342987, 2919892833909256.59283660), +(0.06933247, -12.342987, 202443382310228.51544515), +(0.06933247, -11.342987, 14035899730722.44924025), +(0.06933247, -10.342987, 973143597003.32229028), +(0.06933247, -9.342987, 67470449244.92493259), +(0.06933247, -8.342987, 4677892898.16028054), +(0.06933247, -7.342987, 324329869.02491071), +(0.06933247, -6.342987, 22486590.914273551), +(0.06933247, -5.342987, 1559050.8899661435), +(0.06933247, -4.342987, 108092.84905705095), +(0.06933247, -3.342987, 7494.3442144625131), +(0.06933247, -2.342987, 519.60139541889576), +(0.06933247, -1.342987, 36.025248159838727), +(0.06933247, 0.342987, .40036522320023350), +(0.06933247, 1.342987, .02775830982657349), +(0.06933247, 2.342987, .001924552183301612), +(0.06933247, 3.342987, .0001334339565121935), +(0.06933247, 4.342987, .000009251305786862961), +(0.06933247, 5.342987, .0000006414158809285026), +(0.06933247, 6.342987, .00000004447094732199898), +(0.06933247, 7.342987, .000000003083280621074075), +(0.06933247, 8.342987, .0000000002137714611621997), +(0.06933247, 9.342987, .00000000001482130341788437), +(0.06933247, 10.342987, .000000000001027597574581366), +(0.06933247, 11.342987, .00000000000007124587801173530), +(0.06933247, 12.342987, .000000000000004939652699872298), +(0.06933247, 13.342987, .0000000000000003424783226243151), +(0.06933247, 14.342987, .00000000000000002374486802900065), +(0.06933247, 15.342987, .000000000000000001646290350274646), +(0.06933247, 16.342987, .0000000000000000001141413763217064), +(0.06933247, 17.342987, .000000000000000000007913703549583420), +(0.06933247, 18.342987, .0000000000000000000005486766139403860), +(0.06933247, 19.342987, .00000000000000000000003804110487572339), +(0.06933247, 20.342987, .000000000000000000000002637483762562946)) +SELECT b, p, bc_result, b^p AS power, b^p - bc_result AS diff FROM t; + b | p | bc_result | power | diff +------------+------------+-------------------------------------------+-------------------------------------------+------------------------------------------- + 0.06933247 | -20.342987 | 379149253615977128356318.39406340 | 379149253615977128356318.39406340 | 0.00000000 + 0.06933247 | -19.342987 | 26287354251852125772450.59436685 | 26287354251852125772450.59436685 | 0.00000000 + 0.06933247 | -18.342987 | 1822567200045909954554.65766042 | 1822567200045909954554.65766042 | 0.00000000 + 0.06933247 | -17.342987 | 126363085720167050546.86216560 | 126363085720167050546.86216560 | 0.00000000 + 0.06933247 | -16.342987 | 8761064849800910427.02880469 | 8761064849800910427.02880469 | 0.00000000 + 0.06933247 | -15.342987 | 607426265866876128.15466179 | 607426265866876128.15466179 | 0.00000000 + 0.06933247 | -14.342987 | 42114363355427213.14899924 | 42114363355427213.14899924 | 0.00000000 + 0.06933247 | -13.342987 | 2919892833909256.59283660 | 2919892833909256.59283660 | 0.00000000 + 0.06933247 | -12.342987 | 202443382310228.51544515 | 202443382310228.51544515 | 0.00000000 + 0.06933247 | -11.342987 | 14035899730722.44924025 | 14035899730722.44924025 | 0.00000000 + 0.06933247 | -10.342987 | 973143597003.32229028 | 973143597003.32229028 | 0.00000000 + 0.06933247 | -9.342987 | 67470449244.92493259 | 67470449244.92493259 | 0.00000000 + 0.06933247 | -8.342987 | 4677892898.16028054 | 4677892898.16028054 | 0.00000000 + 0.06933247 | -7.342987 | 324329869.02491071 | 324329869.02491071 | 0.00000000 + 0.06933247 | -6.342987 | 22486590.914273551 | 22486590.914273551 | 0.000000000 + 0.06933247 | -5.342987 | 1559050.8899661435 | 1559050.8899661435 | 0.0000000000 + 0.06933247 | -4.342987 | 108092.84905705095 | 108092.84905705095 | 0.00000000000 + 0.06933247 | -3.342987 | 7494.3442144625131 | 7494.3442144625131 | 0.0000000000000 + 0.06933247 | -2.342987 | 519.60139541889576 | 519.60139541889576 | 0.00000000000000 + 0.06933247 | -1.342987 | 36.025248159838727 | 36.025248159838727 | 0.000000000000000 + 0.06933247 | 0.342987 | 0.40036522320023350 | 0.4003652232002335 | 0.00000000000000000 + 0.06933247 | 1.342987 | 0.02775830982657349 | 0.02775830982657349 | 0.00000000000000000 + 0.06933247 | 2.342987 | 0.001924552183301612 | 0.001924552183301612 | 0.000000000000000000 + 0.06933247 | 3.342987 | 0.0001334339565121935 | 0.0001334339565121935 | 0.0000000000000000000 + 0.06933247 | 4.342987 | 0.000009251305786862961 | 0.000009251305786862961 | 0.000000000000000000000 + 0.06933247 | 5.342987 | 0.0000006414158809285026 | 0.0000006414158809285026 | 0.0000000000000000000000 + 0.06933247 | 6.342987 | 0.00000004447094732199898 | 0.00000004447094732199898 | 0.00000000000000000000000 + 0.06933247 | 7.342987 | 0.000000003083280621074075 | 0.000000003083280621074075 | 0.000000000000000000000000 + 0.06933247 | 8.342987 | 0.0000000002137714611621997 | 0.0000000002137714611621997 | 0.0000000000000000000000000 + 0.06933247 | 9.342987 | 0.00000000001482130341788437 | 0.00000000001482130341788437 | 0.00000000000000000000000000 + 0.06933247 | 10.342987 | 0.000000000001027597574581366 | 0.000000000001027597574581366 | 0.000000000000000000000000000 + 0.06933247 | 11.342987 | 0.00000000000007124587801173530 | 0.00000000000007124587801173530 | 0.00000000000000000000000000000 + 0.06933247 | 12.342987 | 0.000000000000004939652699872298 | 0.000000000000004939652699872298 | 0.000000000000000000000000000000 + 0.06933247 | 13.342987 | 0.0000000000000003424783226243151 | 0.0000000000000003424783226243151 | 0.0000000000000000000000000000000 + 0.06933247 | 14.342987 | 0.00000000000000002374486802900065 | 0.00000000000000002374486802900065 | 0.00000000000000000000000000000000 + 0.06933247 | 15.342987 | 0.000000000000000001646290350274646 | 0.000000000000000001646290350274646 | 0.000000000000000000000000000000000 + 0.06933247 | 16.342987 | 0.0000000000000000001141413763217064 | 0.0000000000000000001141413763217064 | 0.0000000000000000000000000000000000 + 0.06933247 | 17.342987 | 0.000000000000000000007913703549583420 | 0.000000000000000000007913703549583420 | 0.000000000000000000000000000000000000 + 0.06933247 | 18.342987 | 0.0000000000000000000005486766139403860 | 0.0000000000000000000005486766139403860 | 0.0000000000000000000000000000000000000 + 0.06933247 | 19.342987 | 0.00000000000000000000003804110487572339 | 0.00000000000000000000003804110487572339 | 0.00000000000000000000000000000000000000 + 0.06933247 | 20.342987 | 0.000000000000000000000002637483762562946 | 0.000000000000000000000002637483762562946 | 0.000000000000000000000000000000000000000 +(41 rows) + +-- base greater than 1 +-- +-- bc(1) results computed with a scale of 500 and truncated using the script +-- below, and then rounded by hand to match the precision of POW(): +-- +-- for n in {-20..20} +-- do +-- b="27.234987" +-- p="$n.230957" +-- r=$(bc -ql <<< "scale=500 ; e($p*l($b))" | head -n 1) +-- echo "($b, $p, $r)," +-- done +WITH t(b, p, bc_result) AS (VALUES +(27.234987, -20.230957, .000000000000000000000000000009247064512095633), +(27.234987, -19.230957, .0000000000000000000000000002518436817750859), +(27.234987, -18.230957, .000000000000000000000000006858959399176602), +(27.234987, -17.230957, .0000000000000000000000001868036700701026), +(27.234987, -16.230957, .000000000000000000000005087595525911532), +(27.234987, -15.230957, .0000000000000000000001385605980094587), +(27.234987, -14.230957, .000000000000000000003773696085499835), +(27.234987, -13.230957, .0000000000000000001027765638305389), +(27.234987, -12.230957, .000000000000000002799118379829397), +(27.234987, -11.230957, .00000000000000007623395268611469), +(27.234987, -10.230957, .000000000000002076230710364949), +(27.234987, -9.230957, .00000000000005654611640579014), +(27.234987, -8.230957, .000000000001540032745212181), +(27.234987, -7.230957, .00000000004194277179542807), +(27.234987, -6.230957, .000000001142310844592450), +(27.234987, -5.230957, .00000003111082100243440), +(27.234987, -4.230957, .0000008473028055606278), +(27.234987, -3.230957, .00002307628089450723), +(27.234987, -2.230957, .0006284822101702527), +(27.234987, -1.230957, .01711670482371810), +(27.234987, 0.230957, 2.1451253063142300), +(27.234987, 1.230957, 58.422459830839071), +(27.234987, 2.230957, 1591.1349340009243), +(27.234987, 3.230957, 43334.539242761031), +(27.234987, 4.230957, 1180215.6129275865), +(27.234987, 5.230957, 32143156.875279851), +(27.234987, 6.230957, 875418459.63720737), +(27.234987, 7.230957, 23842010367.779367), +(27.234987, 8.230957, 649336842420.336290), +(27.234987, 9.230957, 17684680461938.907402), +(27.234987, 10.230957, 481642042480060.137900), +(27.234987, 11.230957, 13117514765597885.614921), +(27.234987, 12.230957, 357255344113366461.949871), +(27.234987, 13.230957, 9729844652608062117.440722), +(27.234987, 14.230957, 264992192625800087863.690528), +(27.234987, 15.230957, 7217058921265161257566.469315), +(27.234987, 16.230957, 196556505898890690402726.443417), +(27.234987, 17.230957, 5353213882921711267539279.451015), +(27.234987, 18.230957, 145794710509592328389185797.837767), +(27.234987, 19.230957, 3970717045397510438979206144.696206), +(27.234987, 20.230957, 108142427112079606637962972621.121293)) +SELECT b, p, bc_result, b^p AS power, b^p - bc_result AS diff FROM t; + b | p | bc_result | power | diff +-----------+------------+-------------------------------------------------+-------------------------------------------------+------------------------------------------------- + 27.234987 | -20.230957 | 0.000000000000000000000000000009247064512095633 | 0.000000000000000000000000000009247064512095633 | 0.000000000000000000000000000000000000000000000 + 27.234987 | -19.230957 | 0.0000000000000000000000000002518436817750859 | 0.0000000000000000000000000002518436817750859 | 0.0000000000000000000000000000000000000000000 + 27.234987 | -18.230957 | 0.000000000000000000000000006858959399176602 | 0.000000000000000000000000006858959399176602 | 0.000000000000000000000000000000000000000000 + 27.234987 | -17.230957 | 0.0000000000000000000000001868036700701026 | 0.0000000000000000000000001868036700701026 | 0.0000000000000000000000000000000000000000 + 27.234987 | -16.230957 | 0.000000000000000000000005087595525911532 | 0.000000000000000000000005087595525911532 | 0.000000000000000000000000000000000000000 + 27.234987 | -15.230957 | 0.0000000000000000000001385605980094587 | 0.0000000000000000000001385605980094587 | 0.0000000000000000000000000000000000000 + 27.234987 | -14.230957 | 0.000000000000000000003773696085499835 | 0.000000000000000000003773696085499835 | 0.000000000000000000000000000000000000 + 27.234987 | -13.230957 | 0.0000000000000000001027765638305389 | 0.0000000000000000001027765638305389 | 0.0000000000000000000000000000000000 + 27.234987 | -12.230957 | 0.000000000000000002799118379829397 | 0.000000000000000002799118379829397 | 0.000000000000000000000000000000000 + 27.234987 | -11.230957 | 0.00000000000000007623395268611469 | 0.00000000000000007623395268611469 | 0.00000000000000000000000000000000 + 27.234987 | -10.230957 | 0.000000000000002076230710364949 | 0.000000000000002076230710364949 | 0.000000000000000000000000000000 + 27.234987 | -9.230957 | 0.00000000000005654611640579014 | 0.00000000000005654611640579014 | 0.00000000000000000000000000000 + 27.234987 | -8.230957 | 0.000000000001540032745212181 | 0.000000000001540032745212181 | 0.000000000000000000000000000 + 27.234987 | -7.230957 | 0.00000000004194277179542807 | 0.00000000004194277179542807 | 0.00000000000000000000000000 + 27.234987 | -6.230957 | 0.000000001142310844592450 | 0.000000001142310844592450 | 0.000000000000000000000000 + 27.234987 | -5.230957 | 0.00000003111082100243440 | 0.00000003111082100243440 | 0.00000000000000000000000 + 27.234987 | -4.230957 | 0.0000008473028055606278 | 0.0000008473028055606278 | 0.0000000000000000000000 + 27.234987 | -3.230957 | 0.00002307628089450723 | 0.00002307628089450723 | 0.00000000000000000000 + 27.234987 | -2.230957 | 0.0006284822101702527 | 0.0006284822101702527 | 0.0000000000000000000 + 27.234987 | -1.230957 | 0.01711670482371810 | 0.01711670482371810 | 0.00000000000000000 + 27.234987 | 0.230957 | 2.1451253063142300 | 2.1451253063142300 | 0.0000000000000000 + 27.234987 | 1.230957 | 58.422459830839071 | 58.422459830839071 | 0.000000000000000 + 27.234987 | 2.230957 | 1591.1349340009243 | 1591.1349340009243 | 0.0000000000000 + 27.234987 | 3.230957 | 43334.539242761031 | 43334.539242761031 | 0.000000000000 + 27.234987 | 4.230957 | 1180215.6129275865 | 1180215.6129275865 | 0.0000000000 + 27.234987 | 5.230957 | 32143156.875279851 | 32143156.875279851 | 0.000000000 + 27.234987 | 6.230957 | 875418459.63720737 | 875418459.63720737 | 0.00000000 + 27.234987 | 7.230957 | 23842010367.779367 | 23842010367.779367 | 0.000000 + 27.234987 | 8.230957 | 649336842420.336290 | 649336842420.336290 | 0.000000 + 27.234987 | 9.230957 | 17684680461938.907402 | 17684680461938.907402 | 0.000000 + 27.234987 | 10.230957 | 481642042480060.137900 | 481642042480060.137900 | 0.000000 + 27.234987 | 11.230957 | 13117514765597885.614921 | 13117514765597885.614921 | 0.000000 + 27.234987 | 12.230957 | 357255344113366461.949871 | 357255344113366461.949871 | 0.000000 + 27.234987 | 13.230957 | 9729844652608062117.440722 | 9729844652608062117.440722 | 0.000000 + 27.234987 | 14.230957 | 264992192625800087863.690528 | 264992192625800087863.690528 | 0.000000 + 27.234987 | 15.230957 | 7217058921265161257566.469315 | 7217058921265161257566.469315 | 0.000000 + 27.234987 | 16.230957 | 196556505898890690402726.443417 | 196556505898890690402726.443417 | 0.000000 + 27.234987 | 17.230957 | 5353213882921711267539279.451015 | 5353213882921711267539279.451015 | 0.000000 + 27.234987 | 18.230957 | 145794710509592328389185797.837767 | 145794710509592328389185797.837767 | 0.000000 + 27.234987 | 19.230957 | 3970717045397510438979206144.696206 | 3970717045397510438979206144.696206 | 0.000000 + 27.234987 | 20.230957 | 108142427112079606637962972621.121293 | 108142427112079606637962972621.121293 | 0.000000 +(41 rows) + +-- Inputs close to overflow +-- +-- bc(1) results computed with a scale of 2700 and truncated to 4 decimal +-- places. +WITH t(b, p, bc_result) AS (VALUES +(0.12, -2829.8369, 58463948950011752465280493160293790845494328939320966633018493248607815580903065923369555885857984675501574162389726507612128133630191173383130639968378879506624785786843501848666498440326970769604109017960864573408272864266102690849952650095786874354625921641729880352858506454246180842452983243549491658464046163869265572232996388827878976066830374513768599285647145439771472435206769249126377164951470622827631950210853282324510655982757098065657709137845327135766013147354253426364240746381620690117663724329288646510198895137275207992825719846135857839292915100523542874885080351683587865157015032404901182924720371819942957083390475846809517968191151435281268695782594904484795360890092607679215675240583291240729468370895035823777914792823688291214492607109455017754453939895630226174304357121900605689015734289765672740769194115142607443713769825894380064727556869268488695795705030158832909348803019429370973064732712469794182891757241046263341655894972953512257981661670321890336672832647028099324621932563236459127918144141230217523147304565594514812518826936144181257723061181656522095236928347413997136815409159361412494284201481609684892562646522086577634100783077813105675590737823924220663206479031113753135119759722725207724879578900186075841393115040465401462266086907464970054073340036852442184414587772177753008511913377364966775792477387717262694468450099866775550614257191941835797445874557362115814601886902749237439492398087966544817154173072811937702110580330775581851211123491341435883319798273456296794954514173820352334127081705706502510709179711510240917772628308487366740741280043704807717608366220401933596364641284631036907635403895053036499618723044314773148779735006542501244942039455169872946018271985844759209768927953340447524637670938413827595013338859796135512187473850161303598087634723542727044978083220970836296653305188470017342167913572166172051819741354902582606590658382067039498769674611071582171914886494269818475850690414812481252963932223686078322390396586222238852602472958831686564971334200490182175112490433364675164900946902818404704835106260174052265784055642968397240262737313737007322288203637798365320295080314524864099419556398713380156353062937736280885716820226469419928595465390700629307079710611273715705695938635644841913194091407807776191951797748706106000922803167645881087385311847268311361092838264814899353459146959869764278464187826798546290981492648723002412475976344071283321798061003719251864595518596639432393032991023409676558943539937377229130132816883146259468718344018277257037013406135980469482324577407154032999045733141275895.3432), +(1.2, 32908.8896, 58463467728170833376633133695001863276259293590926929026251227859007891876739460057725441400966420577009060860805883032969522911803372870882799865787473726926215148161529632590083389287080925059682489116446754279752928005457087175157581627230586554364417068189211136840990661174760199073702207450133797324318403866058202372178813998850887986769280847189341565507156189065295823921162851958925352114220880236114784962150135485415106748467247897246441194126125699204912883449386043559785865023459356275014504597646990160571664166410683323036984805434677654413174177920726210827006973855410386789516533036723888687725436216478665958434776205940192130053647653715221076841771578099896259902368829351569726536927952661429685419815305418450230567773264738536471211804481206474781470237730069753206249915908804615495060673071058534441654604668770343616386612119048579369195201590008082689834456232255266932976831478404670192731621439902738547169253818323045451045749609624500171633897705543164388470746657118050314064066768449450440405619135824055131398727045420324382226572368236570500391463795989258779677208133531636928003546809249007993065200108076924439703799231711400266122025052209803513232429907231051873161206025860851056337427740362763618748092029386371493898291580557004812947013231371383576580415676519066503391905962989205397824064923920045371823949776899815750413244195402085917098964452866825666226141169411712884994564949174271056284898570445214367063763956186792886147126466387576513166370247576466566827375268334148320298849218878848928271566491769458471357076035396330179659440244425914213309776100351793665960978678576150833311810944729586040624059867137538839913141142139636023129691775489034134511666020819676247950267220131499463010350308195762769192775344260909521732256844149916046793599150786757764962585268686580124987490115873389726527572428003433405659445349155536369077209682951123806333170190998931670309088422483075609203671527331975811507450670132060984691061148836994322505371265263690017938762760088575875666254883673433331627055180154954694693433502522592907190906966067656027637884202418119121728966267936832338377284832958974299187166554160783467156478554899314000348357280306042140481751668215838656488457943830180819301102535170705017482946779698265096226184239631924271857062033454725540956591929965181603262502135610768915716020374362368495244256420143645126927013882334008435586481691725030031204304273292938132599127402133470745819213047706793887965197191137237066440328777206799072470374264316425913530947082957300047105685634407092811630672103242089966046839626911122.7149)) +SELECT b, p, bc_result, b^p AS power, b^p - bc_result AS diff FROM t; + b | p | bc_result | power | diff +------+------------+---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+-------- + 0.12 | -2829.8369 | 58463948950011752465280493160293790845494328939320966633018493248607815580903065923369555885857984675501574162389726507612128133630191173383130639968378879506624785786843501848666498440326970769604109017960864573408272864266102690849952650095786874354625921641729880352858506454246180842452983243549491658464046163869265572232996388827878976066830374513768599285647145439771472435206769249126377164951470622827631950210853282324510655982757098065657709137845327135766013147354253426364240746381620690117663724329288646510198895137275207992825719846135857839292915100523542874885080351683587865157015032404901182924720371819942957083390475846809517968191151435281268695782594904484795360890092607679215675240583291240729468370895035823777914792823688291214492607109455017754453939895630226174304357121900605689015734289765672740769194115142607443713769825894380064727556869268488695795705030158832909348803019429370973064732712469794182891757241046263341655894972953512257981661670321890336672832647028099324621932563236459127918144141230217523147304565594514812518826936144181257723061181656522095236928347413997136815409159361412494284201481609684892562646522086577634100783077813105675590737823924220663206479031113753135119759722725207724879578900186075841393115040465401462266086907464970054073340036852442184414587772177753008511913377364966775792477387717262694468450099866775550614257191941835797445874557362115814601886902749237439492398087966544817154173072811937702110580330775581851211123491341435883319798273456296794954514173820352334127081705706502510709179711510240917772628308487366740741280043704807717608366220401933596364641284631036907635403895053036499618723044314773148779735006542501244942039455169872946018271985844759209768927953340447524637670938413827595013338859796135512187473850161303598087634723542727044978083220970836296653305188470017342167913572166172051819741354902582606590658382067039498769674611071582171914886494269818475850690414812481252963932223686078322390396586222238852602472958831686564971334200490182175112490433364675164900946902818404704835106260174052265784055642968397240262737313737007322288203637798365320295080314524864099419556398713380156353062937736280885716820226469419928595465390700629307079710611273715705695938635644841913194091407807776191951797748706106000922803167645881087385311847268311361092838264814899353459146959869764278464187826798546290981492648723002412475976344071283321798061003719251864595518596639432393032991023409676558943539937377229130132816883146259468718344018277257037013406135980469482324577407154032999045733141275895.3432 | 58463948950011752465280493160293790845494328939320966633018493248607815580903065923369555885857984675501574162389726507612128133630191173383130639968378879506624785786843501848666498440326970769604109017960864573408272864266102690849952650095786874354625921641729880352858506454246180842452983243549491658464046163869265572232996388827878976066830374513768599285647145439771472435206769249126377164951470622827631950210853282324510655982757098065657709137845327135766013147354253426364240746381620690117663724329288646510198895137275207992825719846135857839292915100523542874885080351683587865157015032404901182924720371819942957083390475846809517968191151435281268695782594904484795360890092607679215675240583291240729468370895035823777914792823688291214492607109455017754453939895630226174304357121900605689015734289765672740769194115142607443713769825894380064727556869268488695795705030158832909348803019429370973064732712469794182891757241046263341655894972953512257981661670321890336672832647028099324621932563236459127918144141230217523147304565594514812518826936144181257723061181656522095236928347413997136815409159361412494284201481609684892562646522086577634100783077813105675590737823924220663206479031113753135119759722725207724879578900186075841393115040465401462266086907464970054073340036852442184414587772177753008511913377364966775792477387717262694468450099866775550614257191941835797445874557362115814601886902749237439492398087966544817154173072811937702110580330775581851211123491341435883319798273456296794954514173820352334127081705706502510709179711510240917772628308487366740741280043704807717608366220401933596364641284631036907635403895053036499618723044314773148779735006542501244942039455169872946018271985844759209768927953340447524637670938413827595013338859796135512187473850161303598087634723542727044978083220970836296653305188470017342167913572166172051819741354902582606590658382067039498769674611071582171914886494269818475850690414812481252963932223686078322390396586222238852602472958831686564971334200490182175112490433364675164900946902818404704835106260174052265784055642968397240262737313737007322288203637798365320295080314524864099419556398713380156353062937736280885716820226469419928595465390700629307079710611273715705695938635644841913194091407807776191951797748706106000922803167645881087385311847268311361092838264814899353459146959869764278464187826798546290981492648723002412475976344071283321798061003719251864595518596639432393032991023409676558943539937377229130132816883146259468718344018277257037013406135980469482324577407154032999045733141275895.3432 | 0.0000 + 1.2 | 32908.8896 | 58463467728170833376633133695001863276259293590926929026251227859007891876739460057725441400966420577009060860805883032969522911803372870882799865787473726926215148161529632590083389287080925059682489116446754279752928005457087175157581627230586554364417068189211136840990661174760199073702207450133797324318403866058202372178813998850887986769280847189341565507156189065295823921162851958925352114220880236114784962150135485415106748467247897246441194126125699204912883449386043559785865023459356275014504597646990160571664166410683323036984805434677654413174177920726210827006973855410386789516533036723888687725436216478665958434776205940192130053647653715221076841771578099896259902368829351569726536927952661429685419815305418450230567773264738536471211804481206474781470237730069753206249915908804615495060673071058534441654604668770343616386612119048579369195201590008082689834456232255266932976831478404670192731621439902738547169253818323045451045749609624500171633897705543164388470746657118050314064066768449450440405619135824055131398727045420324382226572368236570500391463795989258779677208133531636928003546809249007993065200108076924439703799231711400266122025052209803513232429907231051873161206025860851056337427740362763618748092029386371493898291580557004812947013231371383576580415676519066503391905962989205397824064923920045371823949776899815750413244195402085917098964452866825666226141169411712884994564949174271056284898570445214367063763956186792886147126466387576513166370247576466566827375268334148320298849218878848928271566491769458471357076035396330179659440244425914213309776100351793665960978678576150833311810944729586040624059867137538839913141142139636023129691775489034134511666020819676247950267220131499463010350308195762769192775344260909521732256844149916046793599150786757764962585268686580124987490115873389726527572428003433405659445349155536369077209682951123806333170190998931670309088422483075609203671527331975811507450670132060984691061148836994322505371265263690017938762760088575875666254883673433331627055180154954694693433502522592907190906966067656027637884202418119121728966267936832338377284832958974299187166554160783467156478554899314000348357280306042140481751668215838656488457943830180819301102535170705017482946779698265096226184239631924271857062033454725540956591929965181603262502135610768915716020374362368495244256420143645126927013882334008435586481691725030031204304273292938132599127402133470745819213047706793887965197191137237066440328777206799072470374264316425913530947082957300047105685634407092811630672103242089966046839626911122.7149 | 58463467728170833376633133695001863276259293590926929026251227859007891876739460057725441400966420577009060860805883032969522911803372870882799865787473726926215148161529632590083389287080925059682489116446754279752928005457087175157581627230586554364417068189211136840990661174760199073702207450133797324318403866058202372178813998850887986769280847189341565507156189065295823921162851958925352114220880236114784962150135485415106748467247897246441194126125699204912883449386043559785865023459356275014504597646990160571664166410683323036984805434677654413174177920726210827006973855410386789516533036723888687725436216478665958434776205940192130053647653715221076841771578099896259902368829351569726536927952661429685419815305418450230567773264738536471211804481206474781470237730069753206249915908804615495060673071058534441654604668770343616386612119048579369195201590008082689834456232255266932976831478404670192731621439902738547169253818323045451045749609624500171633897705543164388470746657118050314064066768449450440405619135824055131398727045420324382226572368236570500391463795989258779677208133531636928003546809249007993065200108076924439703799231711400266122025052209803513232429907231051873161206025860851056337427740362763618748092029386371493898291580557004812947013231371383576580415676519066503391905962989205397824064923920045371823949776899815750413244195402085917098964452866825666226141169411712884994564949174271056284898570445214367063763956186792886147126466387576513166370247576466566827375268334148320298849218878848928271566491769458471357076035396330179659440244425914213309776100351793665960978678576150833311810944729586040624059867137538839913141142139636023129691775489034134511666020819676247950267220131499463010350308195762769192775344260909521732256844149916046793599150786757764962585268686580124987490115873389726527572428003433405659445349155536369077209682951123806333170190998931670309088422483075609203671527331975811507450670132060984691061148836994322505371265263690017938762760088575875666254883673433331627055180154954694693433502522592907190906966067656027637884202418119121728966267936832338377284832958974299187166554160783467156478554899314000348357280306042140481751668215838656488457943830180819301102535170705017482946779698265096226184239631924271857062033454725540956591929965181603262502135610768915716020374362368495244256420143645126927013882334008435586481691725030031204304273292938132599127402133470745819213047706793887965197191137237066440328777206799072470374264316425913530947082957300047105685634407092811630672103242089966046839626911122.7149 | 0.0000 +(2 rows) + +-- +-- Tests for EXP() +-- +-- bc(1) results computed with a scale of 500 and truncated using the script +-- below, and then rounded by hand to match the precision of EXP(): +-- +-- for n in {-20..20} +-- do +-- x="$n.29837" +-- r=$(bc -ql <<< "scale=500 ; e($x)" | head -n 1) +-- echo "($x, $r)," +-- done +WITH t(x, bc_result) AS (VALUES +(-20.29837, .000000001529431101152222), +(-19.29837, .000000004157424770142192), +(-18.29837, .00000001130105220586304), +(-17.29837, .00000003071944485366452), +(-16.29837, .00000008350410872606600), +(-15.29837, .0000002269877013517336), +(-14.29837, .0000006170165438681061), +(-13.29837, .000001677224859055276), +(-12.29837, .000004559169856609741), +(-11.29837, .00001239310857408049), +(-10.29837, .00003368796183504298), +(-9.29837, .00009157337449401917), +(-8.29837, .0002489222398577673), +(-7.29837, .0006766408013046928), +(-6.29837, .001839300394580514), +(-5.29837, .004999736839665763), +(-4.29837, .01359069379834070), +(-3.29837, .03694333598818056), +(-2.29837, .1004223988993283), +(-1.29837, .2729763820983097), +(0.29837, 1.3476603299656679), +(1.29837, 3.6633205858807959), +(2.29837, 9.9579377804197108), +(3.29837, 27.068481317440698), +(4.29837, 73.579760889182206), +(5.29837, 200.01052696742555), +(6.29837, 543.68498095607070), +(7.29837, 1477.8890041389891), +(8.29837, 4017.3188244304487), +(9.29837, 10920.204759575742), +(10.29837, 29684.194161006717), +(11.29837, 80690.005580314652), +(12.29837, 219338.17590722828), +(13.29837, 596222.97785597218), +(14.29837, 1620702.0864156289), +(15.29837, 4405525.0308492653), +(16.29837, 11975458.636179032), +(17.29837, 32552671.598188404), +(18.29837, 88487335.673150406), +(19.29837, 240533516.60908059), +(20.29837, 653837887.33381570)) +SELECT x, bc_result, exp(x), exp(x)-bc_result AS diff FROM t; + x | bc_result | exp | diff +-----------+----------------------------+----------------------------+---------------------------- + -20.29837 | 0.000000001529431101152222 | 0.000000001529431101152222 | 0.000000000000000000000000 + -19.29837 | 0.000000004157424770142192 | 0.000000004157424770142192 | 0.000000000000000000000000 + -18.29837 | 0.00000001130105220586304 | 0.00000001130105220586304 | 0.00000000000000000000000 + -17.29837 | 0.00000003071944485366452 | 0.00000003071944485366452 | 0.00000000000000000000000 + -16.29837 | 0.00000008350410872606600 | 0.00000008350410872606600 | 0.00000000000000000000000 + -15.29837 | 0.0000002269877013517336 | 0.0000002269877013517336 | 0.0000000000000000000000 + -14.29837 | 0.0000006170165438681061 | 0.0000006170165438681061 | 0.0000000000000000000000 + -13.29837 | 0.000001677224859055276 | 0.000001677224859055276 | 0.000000000000000000000 + -12.29837 | 0.000004559169856609741 | 0.000004559169856609741 | 0.000000000000000000000 + -11.29837 | 0.00001239310857408049 | 0.00001239310857408049 | 0.00000000000000000000 + -10.29837 | 0.00003368796183504298 | 0.00003368796183504298 | 0.00000000000000000000 + -9.29837 | 0.00009157337449401917 | 0.00009157337449401917 | 0.00000000000000000000 + -8.29837 | 0.0002489222398577673 | 0.0002489222398577673 | 0.0000000000000000000 + -7.29837 | 0.0006766408013046928 | 0.0006766408013046928 | 0.0000000000000000000 + -6.29837 | 0.001839300394580514 | 0.001839300394580514 | 0.000000000000000000 + -5.29837 | 0.004999736839665763 | 0.004999736839665763 | 0.000000000000000000 + -4.29837 | 0.01359069379834070 | 0.01359069379834070 | 0.00000000000000000 + -3.29837 | 0.03694333598818056 | 0.03694333598818056 | 0.00000000000000000 + -2.29837 | 0.1004223988993283 | 0.1004223988993283 | 0.0000000000000000 + -1.29837 | 0.2729763820983097 | 0.2729763820983097 | 0.0000000000000000 + 0.29837 | 1.3476603299656679 | 1.3476603299656679 | 0.0000000000000000 + 1.29837 | 3.6633205858807959 | 3.6633205858807959 | 0.0000000000000000 + 2.29837 | 9.9579377804197108 | 9.9579377804197108 | 0.0000000000000000 + 3.29837 | 27.068481317440698 | 27.068481317440698 | 0.000000000000000 + 4.29837 | 73.579760889182206 | 73.579760889182206 | 0.000000000000000 + 5.29837 | 200.01052696742555 | 200.01052696742555 | 0.00000000000000 + 6.29837 | 543.68498095607070 | 543.68498095607070 | 0.00000000000000 + 7.29837 | 1477.8890041389891 | 1477.8890041389891 | 0.0000000000000 + 8.29837 | 4017.3188244304487 | 4017.3188244304487 | 0.0000000000000 + 9.29837 | 10920.204759575742 | 10920.204759575742 | 0.000000000000 + 10.29837 | 29684.194161006717 | 29684.194161006717 | 0.000000000000 + 11.29837 | 80690.005580314652 | 80690.005580314652 | 0.000000000000 + 12.29837 | 219338.17590722828 | 219338.17590722828 | 0.00000000000 + 13.29837 | 596222.97785597218 | 596222.97785597218 | 0.00000000000 + 14.29837 | 1620702.0864156289 | 1620702.0864156289 | 0.0000000000 + 15.29837 | 4405525.0308492653 | 4405525.0308492653 | 0.0000000000 + 16.29837 | 11975458.636179032 | 11975458.636179032 | 0.000000000 + 17.29837 | 32552671.598188404 | 32552671.598188404 | 0.000000000 + 18.29837 | 88487335.673150406 | 88487335.673150406 | 0.000000000 + 19.29837 | 240533516.60908059 | 240533516.60908059 | 0.00000000 + 20.29837 | 653837887.33381570 | 653837887.33381570 | 0.00000000 +(41 rows) + +-- +-- Tests for LN() +-- +-- input very small +-- +-- bc(1) results computed with a scale of 500 and truncated using the script +-- below, and then rounded by hand to match the precision of LN(): +-- +-- for p in {1..40} +-- do +-- l=$(bc -ql <<< "scale=500 ; l(10^-$p)" | head -n 1) +-- echo "('1.0e-$p', $l)," +-- done +WITH t(x, bc_result) AS (VALUES +('1.0e-1', -2.3025850929940457), +('1.0e-2', -4.6051701859880914), +('1.0e-3', -6.9077552789821371), +('1.0e-4', -9.2103403719761827), +('1.0e-5', -11.512925464970228), +('1.0e-6', -13.815510557964274), +('1.0e-7', -16.118095650958320), +('1.0e-8', -18.420680743952365), +('1.0e-9', -20.723265836946411), +('1.0e-10', -23.025850929940457), +('1.0e-11', -25.328436022934503), +('1.0e-12', -27.631021115928548), +('1.0e-13', -29.933606208922594), +('1.0e-14', -32.236191301916640), +('1.0e-15', -34.5387763949106853), +('1.0e-16', -36.84136148790473094), +('1.0e-17', -39.143946580898776628), +('1.0e-18', -41.4465316738928223123), +('1.0e-19', -43.74911676688686799634), +('1.0e-20', -46.051701859880913680360), +('1.0e-21', -48.3542869528749593643778), +('1.0e-22', -50.65687204586900504839581), +('1.0e-23', -52.959457138863050732413803), +('1.0e-24', -55.2620422318570964164317949), +('1.0e-25', -57.56462732485114210044978637), +('1.0e-26', -59.867212417845187784467777822), +('1.0e-27', -62.1697975108392334684857692765), +('1.0e-28', -64.47238260383327915250376073116), +('1.0e-29', -66.774967696827324836521752185847), +('1.0e-30', -69.0775527898213705205397436405309), +('1.0e-31', -71.38013788281541620455773509521529), +('1.0e-32', -73.682722975809461888575726549899655), +('1.0e-33', -75.9853080688035075725937180045840189), +('1.0e-34', -78.28789316179755325661170945926838306), +('1.0e-35', -80.590478254791598940629700913952747266), +('1.0e-36', -82.8930633477856446246476923686371114736), +('1.0e-37', -85.19564844077969030866568382332147568124), +('1.0e-38', -87.498233533773735992683675278005839888842), +('1.0e-39', -89.8008186267677816767016667326902040964430), +('1.0e-40', -92.10340371976182736071965818737456830404406)) +SELECT x, bc_result, ln(x::numeric), ln(x::numeric)-bc_result AS diff FROM t; + x | bc_result | ln | diff +---------+-----------------------------------------------+-----------------------------------------------+--------------------------------------------- + 1.0e-1 | -2.3025850929940457 | -2.3025850929940457 | 0.0000000000000000 + 1.0e-2 | -4.6051701859880914 | -4.6051701859880914 | 0.0000000000000000 + 1.0e-3 | -6.9077552789821371 | -6.9077552789821371 | 0.0000000000000000 + 1.0e-4 | -9.2103403719761827 | -9.2103403719761827 | 0.0000000000000000 + 1.0e-5 | -11.512925464970228 | -11.512925464970228 | 0.000000000000000 + 1.0e-6 | -13.815510557964274 | -13.815510557964274 | 0.000000000000000 + 1.0e-7 | -16.118095650958320 | -16.118095650958320 | 0.000000000000000 + 1.0e-8 | -18.420680743952365 | -18.420680743952365 | 0.000000000000000 + 1.0e-9 | -20.723265836946411 | -20.723265836946411 | 0.000000000000000 + 1.0e-10 | -23.025850929940457 | -23.025850929940457 | 0.000000000000000 + 1.0e-11 | -25.328436022934503 | -25.328436022934503 | 0.000000000000000 + 1.0e-12 | -27.631021115928548 | -27.631021115928548 | 0.000000000000000 + 1.0e-13 | -29.933606208922594 | -29.933606208922594 | 0.000000000000000 + 1.0e-14 | -32.236191301916640 | -32.236191301916640 | 0.000000000000000 + 1.0e-15 | -34.5387763949106853 | -34.5387763949106853 | 0.0000000000000000 + 1.0e-16 | -36.84136148790473094 | -36.84136148790473094 | 0.00000000000000000 + 1.0e-17 | -39.143946580898776628 | -39.143946580898776628 | 0.000000000000000000 + 1.0e-18 | -41.4465316738928223123 | -41.4465316738928223123 | 0.0000000000000000000 + 1.0e-19 | -43.74911676688686799634 | -43.74911676688686799634 | 0.00000000000000000000 + 1.0e-20 | -46.051701859880913680360 | -46.051701859880913680360 | 0.000000000000000000000 + 1.0e-21 | -48.3542869528749593643778 | -48.3542869528749593643778 | 0.0000000000000000000000 + 1.0e-22 | -50.65687204586900504839581 | -50.65687204586900504839581 | 0.00000000000000000000000 + 1.0e-23 | -52.959457138863050732413803 | -52.959457138863050732413803 | 0.000000000000000000000000 + 1.0e-24 | -55.2620422318570964164317949 | -55.2620422318570964164317949 | 0.0000000000000000000000000 + 1.0e-25 | -57.56462732485114210044978637 | -57.56462732485114210044978637 | 0.00000000000000000000000000 + 1.0e-26 | -59.867212417845187784467777822 | -59.867212417845187784467777822 | 0.000000000000000000000000000 + 1.0e-27 | -62.1697975108392334684857692765 | -62.1697975108392334684857692765 | 0.0000000000000000000000000000 + 1.0e-28 | -64.47238260383327915250376073116 | -64.47238260383327915250376073116 | 0.00000000000000000000000000000 + 1.0e-29 | -66.774967696827324836521752185847 | -66.774967696827324836521752185847 | 0.000000000000000000000000000000 + 1.0e-30 | -69.0775527898213705205397436405309 | -69.0775527898213705205397436405309 | 0.0000000000000000000000000000000 + 1.0e-31 | -71.38013788281541620455773509521529 | -71.38013788281541620455773509521529 | 0.00000000000000000000000000000000 + 1.0e-32 | -73.682722975809461888575726549899655 | -73.682722975809461888575726549899655 | 0.000000000000000000000000000000000 + 1.0e-33 | -75.9853080688035075725937180045840189 | -75.9853080688035075725937180045840189 | 0.0000000000000000000000000000000000 + 1.0e-34 | -78.28789316179755325661170945926838306 | -78.28789316179755325661170945926838306 | 0.00000000000000000000000000000000000 + 1.0e-35 | -80.590478254791598940629700913952747266 | -80.590478254791598940629700913952747266 | 0.000000000000000000000000000000000000 + 1.0e-36 | -82.8930633477856446246476923686371114736 | -82.8930633477856446246476923686371114736 | 0.0000000000000000000000000000000000000 + 1.0e-37 | -85.19564844077969030866568382332147568124 | -85.19564844077969030866568382332147568124 | 0.00000000000000000000000000000000000000 + 1.0e-38 | -87.498233533773735992683675278005839888842 | -87.498233533773735992683675278005839888842 | 0.000000000000000000000000000000000000000 + 1.0e-39 | -89.8008186267677816767016667326902040964430 | -89.8008186267677816767016667326902040964430 | 0.0000000000000000000000000000000000000000 + 1.0e-40 | -92.10340371976182736071965818737456830404406 | -92.10340371976182736071965818737456830404406 | 0.00000000000000000000000000000000000000000 +(40 rows) + +-- input very close to but smaller than 1 +-- +-- bc(1) results computed with a scale of 500 and truncated using the script +-- below, and then rounded by hand to match the precision of LN(): +-- +-- for p in {1..40} +-- do +-- l=$(bc -ql <<< "scale=500 ; l(1-10^-$p)" | head -n 1) +-- echo "('1.0e-$p', $l)," +-- done +WITH t(x, bc_result) AS (VALUES +('1.0e-1', -.10536051565782630), +('1.0e-2', -.010050335853501441), +('1.0e-3', -.0010005003335835335), +('1.0e-4', -.00010000500033335834), +('1.0e-5', -.000010000050000333336), +('1.0e-6', -.0000010000005000003333), +('1.0e-7', -.00000010000000500000033), +('1.0e-8', -.000000010000000050000000), +('1.0e-9', -.0000000010000000005000000), +('1.0e-10', -.00000000010000000000500000), +('1.0e-11', -.000000000010000000000050000), +('1.0e-12', -.0000000000010000000000005000), +('1.0e-13', -.00000000000010000000000000500), +('1.0e-14', -.000000000000010000000000000050), +('1.0e-15', -.0000000000000010000000000000005), +('1.0e-16', -.00000000000000010000000000000001), +('1.0e-17', -.000000000000000010000000000000000), +('1.0e-18', -.0000000000000000010000000000000000), +('1.0e-19', -.00000000000000000010000000000000000), +('1.0e-20', -.000000000000000000010000000000000000), +('1.0e-21', -.0000000000000000000010000000000000000), +('1.0e-22', -.00000000000000000000010000000000000000), +('1.0e-23', -.000000000000000000000010000000000000000), +('1.0e-24', -.0000000000000000000000010000000000000000), +('1.0e-25', -.00000000000000000000000010000000000000000), +('1.0e-26', -.000000000000000000000000010000000000000000), +('1.0e-27', -.0000000000000000000000000010000000000000000), +('1.0e-28', -.00000000000000000000000000010000000000000000), +('1.0e-29', -.000000000000000000000000000010000000000000000), +('1.0e-30', -.0000000000000000000000000000010000000000000000), +('1.0e-31', -.00000000000000000000000000000010000000000000000), +('1.0e-32', -.000000000000000000000000000000010000000000000000), +('1.0e-33', -.0000000000000000000000000000000010000000000000000), +('1.0e-34', -.00000000000000000000000000000000010000000000000000), +('1.0e-35', -.000000000000000000000000000000000010000000000000000), +('1.0e-36', -.0000000000000000000000000000000000010000000000000000), +('1.0e-37', -.00000000000000000000000000000000000010000000000000000), +('1.0e-38', -.000000000000000000000000000000000000010000000000000000), +('1.0e-39', -.0000000000000000000000000000000000000010000000000000000), +('1.0e-40', -.00000000000000000000000000000000000000010000000000000000)) +SELECT '1-'||x, bc_result, ln(1.0-x::numeric), ln(1.0-x::numeric)-bc_result AS diff FROM t; + ?column? | bc_result | ln | diff +-----------+-------------------------------------------------------------+-------------------------------------------------------------+------------------------------------------------------------ + 1-1.0e-1 | -0.10536051565782630 | -0.10536051565782630 | 0.00000000000000000 + 1-1.0e-2 | -0.010050335853501441 | -0.010050335853501441 | 0.000000000000000000 + 1-1.0e-3 | -0.0010005003335835335 | -0.0010005003335835335 | 0.0000000000000000000 + 1-1.0e-4 | -0.00010000500033335834 | -0.00010000500033335834 | 0.00000000000000000000 + 1-1.0e-5 | -0.000010000050000333336 | -0.000010000050000333336 | 0.000000000000000000000 + 1-1.0e-6 | -0.0000010000005000003333 | -0.0000010000005000003333 | 0.0000000000000000000000 + 1-1.0e-7 | -0.00000010000000500000033 | -0.00000010000000500000033 | 0.00000000000000000000000 + 1-1.0e-8 | -0.000000010000000050000000 | -0.000000010000000050000000 | 0.000000000000000000000000 + 1-1.0e-9 | -0.0000000010000000005000000 | -0.0000000010000000005000000 | 0.0000000000000000000000000 + 1-1.0e-10 | -0.00000000010000000000500000 | -0.00000000010000000000500000 | 0.00000000000000000000000000 + 1-1.0e-11 | -0.000000000010000000000050000 | -0.000000000010000000000050000 | 0.000000000000000000000000000 + 1-1.0e-12 | -0.0000000000010000000000005000 | -0.0000000000010000000000005000 | 0.0000000000000000000000000000 + 1-1.0e-13 | -0.00000000000010000000000000500 | -0.00000000000010000000000000500 | 0.00000000000000000000000000000 + 1-1.0e-14 | -0.000000000000010000000000000050 | -0.000000000000010000000000000050 | 0.000000000000000000000000000000 + 1-1.0e-15 | -0.0000000000000010000000000000005 | -0.0000000000000010000000000000005 | 0.0000000000000000000000000000000 + 1-1.0e-16 | -0.00000000000000010000000000000001 | -0.00000000000000010000000000000001 | 0.00000000000000000000000000000000 + 1-1.0e-17 | -0.000000000000000010000000000000000 | -0.000000000000000010000000000000000 | 0.000000000000000000000000000000000 + 1-1.0e-18 | -0.0000000000000000010000000000000000 | -0.0000000000000000010000000000000000 | 0.0000000000000000000000000000000000 + 1-1.0e-19 | -0.00000000000000000010000000000000000 | -0.00000000000000000010000000000000000 | 0.00000000000000000000000000000000000 + 1-1.0e-20 | -0.000000000000000000010000000000000000 | -0.000000000000000000010000000000000000 | 0.000000000000000000000000000000000000 + 1-1.0e-21 | -0.0000000000000000000010000000000000000 | -0.0000000000000000000010000000000000000 | 0.0000000000000000000000000000000000000 + 1-1.0e-22 | -0.00000000000000000000010000000000000000 | -0.00000000000000000000010000000000000000 | 0.00000000000000000000000000000000000000 + 1-1.0e-23 | -0.000000000000000000000010000000000000000 | -0.000000000000000000000010000000000000000 | 0.000000000000000000000000000000000000000 + 1-1.0e-24 | -0.0000000000000000000000010000000000000000 | -0.0000000000000000000000010000000000000000 | 0.0000000000000000000000000000000000000000 + 1-1.0e-25 | -0.00000000000000000000000010000000000000000 | -0.00000000000000000000000010000000000000000 | 0.00000000000000000000000000000000000000000 + 1-1.0e-26 | -0.000000000000000000000000010000000000000000 | -0.000000000000000000000000010000000000000000 | 0.000000000000000000000000000000000000000000 + 1-1.0e-27 | -0.0000000000000000000000000010000000000000000 | -0.0000000000000000000000000010000000000000000 | 0.0000000000000000000000000000000000000000000 + 1-1.0e-28 | -0.00000000000000000000000000010000000000000000 | -0.00000000000000000000000000010000000000000000 | 0.00000000000000000000000000000000000000000000 + 1-1.0e-29 | -0.000000000000000000000000000010000000000000000 | -0.000000000000000000000000000010000000000000000 | 0.000000000000000000000000000000000000000000000 + 1-1.0e-30 | -0.0000000000000000000000000000010000000000000000 | -0.0000000000000000000000000000010000000000000000 | 0.0000000000000000000000000000000000000000000000 + 1-1.0e-31 | -0.00000000000000000000000000000010000000000000000 | -0.00000000000000000000000000000010000000000000000 | 0.00000000000000000000000000000000000000000000000 + 1-1.0e-32 | -0.000000000000000000000000000000010000000000000000 | -0.000000000000000000000000000000010000000000000000 | 0.000000000000000000000000000000000000000000000000 + 1-1.0e-33 | -0.0000000000000000000000000000000010000000000000000 | -0.0000000000000000000000000000000010000000000000000 | 0.0000000000000000000000000000000000000000000000000 + 1-1.0e-34 | -0.00000000000000000000000000000000010000000000000000 | -0.00000000000000000000000000000000010000000000000000 | 0.00000000000000000000000000000000000000000000000000 + 1-1.0e-35 | -0.000000000000000000000000000000000010000000000000000 | -0.000000000000000000000000000000000010000000000000000 | 0.000000000000000000000000000000000000000000000000000 + 1-1.0e-36 | -0.0000000000000000000000000000000000010000000000000000 | -0.0000000000000000000000000000000000010000000000000000 | 0.0000000000000000000000000000000000000000000000000000 + 1-1.0e-37 | -0.00000000000000000000000000000000000010000000000000000 | -0.00000000000000000000000000000000000010000000000000000 | 0.00000000000000000000000000000000000000000000000000000 + 1-1.0e-38 | -0.000000000000000000000000000000000000010000000000000000 | -0.000000000000000000000000000000000000010000000000000000 | 0.000000000000000000000000000000000000000000000000000000 + 1-1.0e-39 | -0.0000000000000000000000000000000000000010000000000000000 | -0.0000000000000000000000000000000000000010000000000000000 | 0.0000000000000000000000000000000000000000000000000000000 + 1-1.0e-40 | -0.00000000000000000000000000000000000000010000000000000000 | -0.00000000000000000000000000000000000000010000000000000000 | 0.00000000000000000000000000000000000000000000000000000000 +(40 rows) + +-- input very close to but larger than 1 +-- +-- bc(1) results computed with a scale of 500 and truncated using the script +-- below, and then rounded by hand to match the precision of LN(): +-- +-- for p in {1..40} +-- do +-- l=$(bc -ql <<< "scale=500 ; l(1+10^-$p)" | head -n 1) +-- echo "('1.0e-$p', $l)," +-- done +WITH t(x, bc_result) AS (VALUES +('1.0e-1', .09531017980432486), +('1.0e-2', .009950330853168083), +('1.0e-3', .0009995003330835332), +('1.0e-4', .00009999500033330834), +('1.0e-5', .000009999950000333331), +('1.0e-6', .0000009999995000003333), +('1.0e-7', .00000009999999500000033), +('1.0e-8', .000000009999999950000000), +('1.0e-9', .0000000009999999995000000), +('1.0e-10', .00000000009999999999500000), +('1.0e-11', .000000000009999999999950000), +('1.0e-12', .0000000000009999999999995000), +('1.0e-13', .00000000000009999999999999500), +('1.0e-14', .000000000000009999999999999950), +('1.0e-15', .0000000000000009999999999999995), +('1.0e-16', .00000000000000010000000000000000), +('1.0e-17', .000000000000000010000000000000000), +('1.0e-18', .0000000000000000010000000000000000), +('1.0e-19', .00000000000000000010000000000000000), +('1.0e-20', .000000000000000000010000000000000000), +('1.0e-21', .0000000000000000000010000000000000000), +('1.0e-22', .00000000000000000000010000000000000000), +('1.0e-23', .000000000000000000000010000000000000000), +('1.0e-24', .0000000000000000000000010000000000000000), +('1.0e-25', .00000000000000000000000010000000000000000), +('1.0e-26', .000000000000000000000000010000000000000000), +('1.0e-27', .0000000000000000000000000010000000000000000), +('1.0e-28', .00000000000000000000000000010000000000000000), +('1.0e-29', .000000000000000000000000000010000000000000000), +('1.0e-30', .0000000000000000000000000000010000000000000000), +('1.0e-31', .00000000000000000000000000000010000000000000000), +('1.0e-32', .000000000000000000000000000000010000000000000000), +('1.0e-33', .0000000000000000000000000000000010000000000000000), +('1.0e-34', .00000000000000000000000000000000010000000000000000), +('1.0e-35', .000000000000000000000000000000000010000000000000000), +('1.0e-36', .0000000000000000000000000000000000010000000000000000), +('1.0e-37', .00000000000000000000000000000000000010000000000000000), +('1.0e-38', .000000000000000000000000000000000000010000000000000000), +('1.0e-39', .0000000000000000000000000000000000000010000000000000000), +('1.0e-40', .00000000000000000000000000000000000000010000000000000000)) +SELECT '1+'||x, bc_result, ln(1.0+x::numeric), ln(1.0+x::numeric)-bc_result AS diff FROM t; + ?column? | bc_result | ln | diff +-----------+------------------------------------------------------------+------------------------------------------------------------+------------------------------------------------------------ + 1+1.0e-1 | 0.09531017980432486 | 0.09531017980432486 | 0.00000000000000000 + 1+1.0e-2 | 0.009950330853168083 | 0.009950330853168083 | 0.000000000000000000 + 1+1.0e-3 | 0.0009995003330835332 | 0.0009995003330835332 | 0.0000000000000000000 + 1+1.0e-4 | 0.00009999500033330834 | 0.00009999500033330834 | 0.00000000000000000000 + 1+1.0e-5 | 0.000009999950000333331 | 0.000009999950000333331 | 0.000000000000000000000 + 1+1.0e-6 | 0.0000009999995000003333 | 0.0000009999995000003333 | 0.0000000000000000000000 + 1+1.0e-7 | 0.00000009999999500000033 | 0.00000009999999500000033 | 0.00000000000000000000000 + 1+1.0e-8 | 0.000000009999999950000000 | 0.000000009999999950000000 | 0.000000000000000000000000 + 1+1.0e-9 | 0.0000000009999999995000000 | 0.0000000009999999995000000 | 0.0000000000000000000000000 + 1+1.0e-10 | 0.00000000009999999999500000 | 0.00000000009999999999500000 | 0.00000000000000000000000000 + 1+1.0e-11 | 0.000000000009999999999950000 | 0.000000000009999999999950000 | 0.000000000000000000000000000 + 1+1.0e-12 | 0.0000000000009999999999995000 | 0.0000000000009999999999995000 | 0.0000000000000000000000000000 + 1+1.0e-13 | 0.00000000000009999999999999500 | 0.00000000000009999999999999500 | 0.00000000000000000000000000000 + 1+1.0e-14 | 0.000000000000009999999999999950 | 0.000000000000009999999999999950 | 0.000000000000000000000000000000 + 1+1.0e-15 | 0.0000000000000009999999999999995 | 0.0000000000000009999999999999995 | 0.0000000000000000000000000000000 + 1+1.0e-16 | 0.00000000000000010000000000000000 | 0.00000000000000010000000000000000 | 0.00000000000000000000000000000000 + 1+1.0e-17 | 0.000000000000000010000000000000000 | 0.000000000000000010000000000000000 | 0.000000000000000000000000000000000 + 1+1.0e-18 | 0.0000000000000000010000000000000000 | 0.0000000000000000010000000000000000 | 0.0000000000000000000000000000000000 + 1+1.0e-19 | 0.00000000000000000010000000000000000 | 0.00000000000000000010000000000000000 | 0.00000000000000000000000000000000000 + 1+1.0e-20 | 0.000000000000000000010000000000000000 | 0.000000000000000000010000000000000000 | 0.000000000000000000000000000000000000 + 1+1.0e-21 | 0.0000000000000000000010000000000000000 | 0.0000000000000000000010000000000000000 | 0.0000000000000000000000000000000000000 + 1+1.0e-22 | 0.00000000000000000000010000000000000000 | 0.00000000000000000000010000000000000000 | 0.00000000000000000000000000000000000000 + 1+1.0e-23 | 0.000000000000000000000010000000000000000 | 0.000000000000000000000010000000000000000 | 0.000000000000000000000000000000000000000 + 1+1.0e-24 | 0.0000000000000000000000010000000000000000 | 0.0000000000000000000000010000000000000000 | 0.0000000000000000000000000000000000000000 + 1+1.0e-25 | 0.00000000000000000000000010000000000000000 | 0.00000000000000000000000010000000000000000 | 0.00000000000000000000000000000000000000000 + 1+1.0e-26 | 0.000000000000000000000000010000000000000000 | 0.000000000000000000000000010000000000000000 | 0.000000000000000000000000000000000000000000 + 1+1.0e-27 | 0.0000000000000000000000000010000000000000000 | 0.0000000000000000000000000010000000000000000 | 0.0000000000000000000000000000000000000000000 + 1+1.0e-28 | 0.00000000000000000000000000010000000000000000 | 0.00000000000000000000000000010000000000000000 | 0.00000000000000000000000000000000000000000000 + 1+1.0e-29 | 0.000000000000000000000000000010000000000000000 | 0.000000000000000000000000000010000000000000000 | 0.000000000000000000000000000000000000000000000 + 1+1.0e-30 | 0.0000000000000000000000000000010000000000000000 | 0.0000000000000000000000000000010000000000000000 | 0.0000000000000000000000000000000000000000000000 + 1+1.0e-31 | 0.00000000000000000000000000000010000000000000000 | 0.00000000000000000000000000000010000000000000000 | 0.00000000000000000000000000000000000000000000000 + 1+1.0e-32 | 0.000000000000000000000000000000010000000000000000 | 0.000000000000000000000000000000010000000000000000 | 0.000000000000000000000000000000000000000000000000 + 1+1.0e-33 | 0.0000000000000000000000000000000010000000000000000 | 0.0000000000000000000000000000000010000000000000000 | 0.0000000000000000000000000000000000000000000000000 + 1+1.0e-34 | 0.00000000000000000000000000000000010000000000000000 | 0.00000000000000000000000000000000010000000000000000 | 0.00000000000000000000000000000000000000000000000000 + 1+1.0e-35 | 0.000000000000000000000000000000000010000000000000000 | 0.000000000000000000000000000000000010000000000000000 | 0.000000000000000000000000000000000000000000000000000 + 1+1.0e-36 | 0.0000000000000000000000000000000000010000000000000000 | 0.0000000000000000000000000000000000010000000000000000 | 0.0000000000000000000000000000000000000000000000000000 + 1+1.0e-37 | 0.00000000000000000000000000000000000010000000000000000 | 0.00000000000000000000000000000000000010000000000000000 | 0.00000000000000000000000000000000000000000000000000000 + 1+1.0e-38 | 0.000000000000000000000000000000000000010000000000000000 | 0.000000000000000000000000000000000000010000000000000000 | 0.000000000000000000000000000000000000000000000000000000 + 1+1.0e-39 | 0.0000000000000000000000000000000000000010000000000000000 | 0.0000000000000000000000000000000000000010000000000000000 | 0.0000000000000000000000000000000000000000000000000000000 + 1+1.0e-40 | 0.00000000000000000000000000000000000000010000000000000000 | 0.00000000000000000000000000000000000000010000000000000000 | 0.00000000000000000000000000000000000000000000000000000000 +(40 rows) + +-- input very large +-- +-- bc(1) results computed with a scale of 500 and truncated using the script +-- below, and then rounded by hand to match the precision of LN(): +-- +-- for p in {1..40} +-- do +-- l=$(bc -ql <<< "scale=500 ; l(10^$p)" | head -n 1) +-- echo "('1.0e$p', $l)," +-- done +WITH t(x, bc_result) AS (VALUES +('1.0e1', 2.3025850929940457), +('1.0e2', 4.6051701859880914), +('1.0e3', 6.9077552789821371), +('1.0e4', 9.2103403719761827), +('1.0e5', 11.512925464970228), +('1.0e6', 13.815510557964274), +('1.0e7', 16.118095650958320), +('1.0e8', 18.420680743952365), +('1.0e9', 20.723265836946411), +('1.0e10', 23.025850929940457), +('1.0e11', 25.328436022934503), +('1.0e12', 27.631021115928548), +('1.0e13', 29.933606208922594), +('1.0e14', 32.236191301916640), +('1.0e15', 34.538776394910685), +('1.0e16', 36.841361487904731), +('1.0e17', 39.143946580898777), +('1.0e18', 41.446531673892822), +('1.0e19', 43.749116766886868), +('1.0e20', 46.051701859880914), +('1.0e21', 48.354286952874959), +('1.0e22', 50.656872045869005), +('1.0e23', 52.959457138863051), +('1.0e24', 55.262042231857096), +('1.0e25', 57.564627324851142), +('1.0e26', 59.867212417845188), +('1.0e27', 62.169797510839233), +('1.0e28', 64.472382603833279), +('1.0e29', 66.774967696827325), +('1.0e30', 69.077552789821371), +('1.0e31', 71.380137882815416), +('1.0e32', 73.682722975809462), +('1.0e33', 75.985308068803508), +('1.0e34', 78.287893161797553), +('1.0e35', 80.590478254791599), +('1.0e36', 82.893063347785645), +('1.0e37', 85.195648440779690), +('1.0e38', 87.498233533773736), +('1.0e39', 89.800818626767782), +('1.0e40', 92.103403719761827)) +SELECT x, bc_result, ln(x::numeric), ln(x::numeric)-bc_result AS diff FROM t; + x | bc_result | ln | diff +--------+--------------------+--------------------+-------------------- + 1.0e1 | 2.3025850929940457 | 2.3025850929940457 | 0.0000000000000000 + 1.0e2 | 4.6051701859880914 | 4.6051701859880914 | 0.0000000000000000 + 1.0e3 | 6.9077552789821371 | 6.9077552789821371 | 0.0000000000000000 + 1.0e4 | 9.2103403719761827 | 9.2103403719761827 | 0.0000000000000000 + 1.0e5 | 11.512925464970228 | 11.512925464970228 | 0.000000000000000 + 1.0e6 | 13.815510557964274 | 13.815510557964274 | 0.000000000000000 + 1.0e7 | 16.118095650958320 | 16.118095650958320 | 0.000000000000000 + 1.0e8 | 18.420680743952365 | 18.420680743952365 | 0.000000000000000 + 1.0e9 | 20.723265836946411 | 20.723265836946411 | 0.000000000000000 + 1.0e10 | 23.025850929940457 | 23.025850929940457 | 0.000000000000000 + 1.0e11 | 25.328436022934503 | 25.328436022934503 | 0.000000000000000 + 1.0e12 | 27.631021115928548 | 27.631021115928548 | 0.000000000000000 + 1.0e13 | 29.933606208922594 | 29.933606208922594 | 0.000000000000000 + 1.0e14 | 32.236191301916640 | 32.236191301916640 | 0.000000000000000 + 1.0e15 | 34.538776394910685 | 34.538776394910685 | 0.000000000000000 + 1.0e16 | 36.841361487904731 | 36.841361487904731 | 0.000000000000000 + 1.0e17 | 39.143946580898777 | 39.143946580898777 | 0.000000000000000 + 1.0e18 | 41.446531673892822 | 41.446531673892822 | 0.000000000000000 + 1.0e19 | 43.749116766886868 | 43.749116766886868 | 0.000000000000000 + 1.0e20 | 46.051701859880914 | 46.051701859880914 | 0.000000000000000 + 1.0e21 | 48.354286952874959 | 48.354286952874959 | 0.000000000000000 + 1.0e22 | 50.656872045869005 | 50.656872045869005 | 0.000000000000000 + 1.0e23 | 52.959457138863051 | 52.959457138863051 | 0.000000000000000 + 1.0e24 | 55.262042231857096 | 55.262042231857096 | 0.000000000000000 + 1.0e25 | 57.564627324851142 | 57.564627324851142 | 0.000000000000000 + 1.0e26 | 59.867212417845188 | 59.867212417845188 | 0.000000000000000 + 1.0e27 | 62.169797510839233 | 62.169797510839233 | 0.000000000000000 + 1.0e28 | 64.472382603833279 | 64.472382603833279 | 0.000000000000000 + 1.0e29 | 66.774967696827325 | 66.774967696827325 | 0.000000000000000 + 1.0e30 | 69.077552789821371 | 69.077552789821371 | 0.000000000000000 + 1.0e31 | 71.380137882815416 | 71.380137882815416 | 0.000000000000000 + 1.0e32 | 73.682722975809462 | 73.682722975809462 | 0.000000000000000 + 1.0e33 | 75.985308068803508 | 75.985308068803508 | 0.000000000000000 + 1.0e34 | 78.287893161797553 | 78.287893161797553 | 0.000000000000000 + 1.0e35 | 80.590478254791599 | 80.590478254791599 | 0.000000000000000 + 1.0e36 | 82.893063347785645 | 82.893063347785645 | 0.000000000000000 + 1.0e37 | 85.195648440779690 | 85.195648440779690 | 0.000000000000000 + 1.0e38 | 87.498233533773736 | 87.498233533773736 | 0.000000000000000 + 1.0e39 | 89.800818626767782 | 89.800818626767782 | 0.000000000000000 + 1.0e40 | 92.103403719761827 | 92.103403719761827 | 0.000000000000000 +(40 rows) + +-- input huge +-- +-- bc(1) results computed with a scale of 1000 and truncated using the script +-- below, and then rounded by hand to match the precision of LN(): +-- +-- for p in {1..10} +-- do +-- l=$(bc -ql <<< "scale=1000 ; l(10^${p}00)" | head -n 1) +-- echo "('1.0e${p}00', $l)," +-- done +WITH t(x, bc_result) AS (VALUES +('1.0e100', 230.25850929940457), +('1.0e200', 460.51701859880914), +('1.0e300', 690.77552789821371), +('1.0e400', 921.03403719761827), +('1.0e500', 1151.2925464970228), +('1.0e600', 1381.5510557964274), +('1.0e700', 1611.8095650958320), +('1.0e800', 1842.0680743952365), +('1.0e900', 2072.3265836946411), +('1.0e1000', 2302.5850929940457)) +SELECT x, bc_result, ln(x::numeric), ln(x::numeric)-bc_result AS diff FROM t; + x | bc_result | ln | diff +----------+--------------------+--------------------+------------------ + 1.0e100 | 230.25850929940457 | 230.25850929940457 | 0.00000000000000 + 1.0e200 | 460.51701859880914 | 460.51701859880914 | 0.00000000000000 + 1.0e300 | 690.77552789821371 | 690.77552789821371 | 0.00000000000000 + 1.0e400 | 921.03403719761827 | 921.03403719761827 | 0.00000000000000 + 1.0e500 | 1151.2925464970228 | 1151.2925464970228 | 0.0000000000000 + 1.0e600 | 1381.5510557964274 | 1381.5510557964274 | 0.0000000000000 + 1.0e700 | 1611.8095650958320 | 1611.8095650958320 | 0.0000000000000 + 1.0e800 | 1842.0680743952365 | 1842.0680743952365 | 0.0000000000000 + 1.0e900 | 2072.3265836946411 | 2072.3265836946411 | 0.0000000000000 + 1.0e1000 | 2302.5850929940457 | 2302.5850929940457 | 0.0000000000000 +(10 rows) + +-- inputs with 1000 decimal places +-- +-- bc(1) results computed with a scale of 2000 and rounded to 1000 decimal +-- places +WITH t(x, bc_result) AS (VALUES +(484990182159328900690402236933516249572671683638747490717351807610531884491845416923860371219625151551889257298200816555016472471293780254009492949585031653913930735918829139712249577547959394351523545901788627247613322896296041868431769047433229466634098452564756860190085118463828382895145244362033728480588969626012192733802377468089120757046364393407262957242230928854711898925295251902007136232994524624903257456111389508582206404271734668422903183500589303866613158037169610592539145461637447957948521714058034772237111009429638870236361143304703683377693378577075353794118557951847394763531830696578809001981568860219578880229402696449243344235099860421846016326538272155937175661905904288335499593232232926636205909086901191153907183842087577811871344870731324067822883041265129394268082883745408414994.8967939438561591657171240282983703914075472645212002662497023142663831371447287624846942598424990784971781730103682951722370983277124599054059027055336437808366784501932987082321905202623642371063626378290734289114618092750984153422293450048717769065428713836637664433167768445609659527458911187829232316677137895259433038764404970599325009178297626038331436654541552998098529141205301472138026818453893127265938030066392881979113522757891639646670670272542401773230506961559808927249585675430838495658225557294666522469887436551840596777627408780618586500922973500018513068499587683746133637919751545157547095670767246977244726331271787622126889459658539988980096764323712767863722912919120929339399753431689512753214200090670880647731689804555417871258907716687575767185444541243606329768784843125926070743277339790277626515824924290352180761378846035233155198504033292692893297993698953705472933411199778880561376633444249703838589180474329586470353212010427945060694794274109764269805332803290229, + 1864.3702986939570026328504202935192533137907736189919154633800554877738455118081651650863235106905871352085850240570561347180517240105510505203972860921397909573687877993477806728098306202020229409548306695695574102950949468160529713610952021974630774784174851619325758380143625473386495586347322798415543385655090746985183329114860118551572428921774322172798724455202876781611633419444058398798142214904998877857425038669920064728855823072107227506485770367799671977282350083029452784747350395161797215115525867416898416360638482342253129160308632504217096916335590470843180746834864303790913372081974355613359678634194879425862536147988835528973291020680020540866655622823550861337486588647231688134992810403147262346312159819432914207194632564009749236609081399504118359354620598232725290537215007867979331582119891661859015726276335168158288396939655310210558566592649049602925182137256134162660116182293851038854455437841571331011002023088829768308520393956515509475418031437505751407687618234418262), +(87190145885430429849953615409019208993240447426362428988181639909267773304254748257120061524000254226856815085523676417146197197996896030672521334101413071112068202429835905642444187493717977611730127126387257253646790849384975208460867137315507010888782632024640844766297185244443116696943912406389670302370461137850160539373600494054874979342373255280815156048999900951842673141766630630919020492255966628630634124452614590400422133958133100159154995520080124736657520969784129924799670552560034302960877087853678350801769339861812435411200669026902417951572668727488315537985378304242438181615160041688723201917323705450185975141141262578884689500612295576288125956289035673242989906973367691922065122033180281670221390667818909912035903387888639331486823729897326624516015340.0330856710565117793999512551468220085711713631167607285185762046751452975325645379302403715842570486302993296501788672462090620871511446272026693318239212657949496275318383141403236705902077406660768573015707706831878445598837931116223956945944726162551477136715847593742032488181481888084716920605114101902724395659898621880016853548602514706686907951229872573180602614761229992106144727082722940736406782659562775289407005631298246624198606031298081220736931229256511054595028182057216042683060059115371651410352645266000330509331097811566633211452233019461903115970558624057877018778178814946285827512359903934291318219271464841957435711594154280905473802599888081783098187210283997106131616471807951265003903143099667366508222327805543948921694362089860577380749774036318574113007382111997454202845559941557812813566442364810680529092880773126707073967537693927177460459341763934709686530005721141046645111784404932103241501569571235364365556796422998363930810983452790309019295181282099408260156, + 1793.5767085750017553306932533574391150814202249805881581227430032600579405884415934520704053351781361105595296647510475380766428668443641914861849764330704062323054023252886955844207807229267936432730818329225450152491146839618683772020068682795388746108876393249306737841247788224204701299467519965182171772253974884845661168860422489046657965359832930382114760565628765599962013955588754803194908990025689040598990346417563277021386852342928910383706995866844541160576254266641602065102228267316550706943783591722246885978355472097314691737807509436806788803362444745551013400341861820755594413819894154786253014501454443272120342005711761286524843010157182464200556865694401941794983935172457481497909987740544409272349152397774548604845897687504977786762391359552407068124283290504752932824699865504970420939586707791994870941813718246825616335675307740641350673558328821461530563823677144691877374809441673507467507447891562257806191361453045937798278733402269265623588493124129181374135958668436774), +(93936642222690597390233191619858485419795942047468396309991947772747208870873993801669373075421461116465960407843923269693395211616591453397070258466704654943689268224479477016161636938138334729982904232438440955361656138189836032891825113139184685132178764873033678116450665758561650355252211196676137179184043639278410827092182700922151290703747496962700158844772453483316974221113826173404445159281421213715669245417896170368554410830320000019029956317336703559699859949692222685614036912057150632902650913831404804982509990655560731349634628713944739168096272097122388116038119844786988276635032016787352796502360718569977397214936366251320294621522016.6483354941025384161536675750898007896744690911429670830432784905421638721478353275821072200938900938046264210604940707974410950770029535636602548377806284157951164875821446035013896786653932045182167021839184824627082391478016195098055107001433336586881395912782883663046617432598969149948351689103230162742769845955320418573803127107923535948653168889411316007796459064267436246637115946581149511513369842911210359447262641996566147462977170742544980481275049898092152042927981394239266559286915303786701737610786594006685748456635797125029722684151298695274097006242412384086302106763844070230264910503179385988626477852818174114043927841085089058972074427820150462261941575665882880501074676800316585217150509780489224388148722603385921057007086785238310735038314861960410473809826927329368597558806004392175746233568789445929554890241140656324160187253042639339549705859147930476532359840809944163908006480881926041259363654863689570520534301207043189181147254153307163555433328278834311658232337, + 1510.4332713542154696529645934345554302578243896764921637693542962119938599884313210100957753316832762996428481801312323020427109678979117469716796746760060470871840325255146954580681101106876674367471955788143763250819168311353856748872452260808797135108102729064040463343792765872545182299889360257515315869180266759715933989413256377582681707188367254513700731642913479683031478361835565783219287780434673712341147656477670848734998849030451414278832848680301511646182446524915091598080243532068451726548537866633622180283865668708517173065893429240665300584705585310049892047293928733753369421499719516009692095913169665213597158441636480707309244604139865130782756488091268094213446272360006907802989573582755585110277620911226015342778471352130366770729972784317323917141031824334355639769512749560550167491709646539950725523461943580211843652293561678342656010571108219244870234329176123205423872844099992204896411752620881541000940129833754169391528449211839693800724450201835161044717173715867437)) +SELECT trim_scale(ln(x::numeric)-bc_result) AS diff FROM t; + diff +------ + 0 + 0 + 0 +(3 rows) + +-- +-- Tests for LOG() (base 10) +-- +-- input very small, exact result known +WITH t(x) AS (SELECT '1e-'||n FROM generate_series(1, 100) g(n)) +SELECT x, log(x::numeric) FROM t; + x | log +--------+----------------------------------------------------------------------------------------------------------- + 1e-1 | -1.0000000000000000 + 1e-2 | -2.0000000000000000 + 1e-3 | -3.0000000000000000 + 1e-4 | -4.0000000000000000 + 1e-5 | -5.000000000000000 + 1e-6 | -6.000000000000000 + 1e-7 | -7.000000000000000 + 1e-8 | -8.000000000000000 + 1e-9 | -9.000000000000000 + 1e-10 | -10.000000000000000 + 1e-11 | -11.000000000000000 + 1e-12 | -12.000000000000000 + 1e-13 | -13.000000000000000 + 1e-14 | -14.000000000000000 + 1e-15 | -15.000000000000000 + 1e-16 | -16.0000000000000000 + 1e-17 | -17.00000000000000000 + 1e-18 | -18.000000000000000000 + 1e-19 | -19.0000000000000000000 + 1e-20 | -20.00000000000000000000 + 1e-21 | -21.000000000000000000000 + 1e-22 | -22.0000000000000000000000 + 1e-23 | -23.00000000000000000000000 + 1e-24 | -24.000000000000000000000000 + 1e-25 | -25.0000000000000000000000000 + 1e-26 | -26.00000000000000000000000000 + 1e-27 | -27.000000000000000000000000000 + 1e-28 | -28.0000000000000000000000000000 + 1e-29 | -29.00000000000000000000000000000 + 1e-30 | -30.000000000000000000000000000000 + 1e-31 | -31.0000000000000000000000000000000 + 1e-32 | -32.00000000000000000000000000000000 + 1e-33 | -33.000000000000000000000000000000000 + 1e-34 | -34.0000000000000000000000000000000000 + 1e-35 | -35.00000000000000000000000000000000000 + 1e-36 | -36.000000000000000000000000000000000000 + 1e-37 | -37.0000000000000000000000000000000000000 + 1e-38 | -38.00000000000000000000000000000000000000 + 1e-39 | -39.000000000000000000000000000000000000000 + 1e-40 | -40.0000000000000000000000000000000000000000 + 1e-41 | -41.00000000000000000000000000000000000000000 + 1e-42 | -42.000000000000000000000000000000000000000000 + 1e-43 | -43.0000000000000000000000000000000000000000000 + 1e-44 | -44.00000000000000000000000000000000000000000000 + 1e-45 | -45.000000000000000000000000000000000000000000000 + 1e-46 | -46.0000000000000000000000000000000000000000000000 + 1e-47 | -47.00000000000000000000000000000000000000000000000 + 1e-48 | -48.000000000000000000000000000000000000000000000000 + 1e-49 | -49.0000000000000000000000000000000000000000000000000 + 1e-50 | -50.00000000000000000000000000000000000000000000000000 + 1e-51 | -51.000000000000000000000000000000000000000000000000000 + 1e-52 | -52.0000000000000000000000000000000000000000000000000000 + 1e-53 | -53.00000000000000000000000000000000000000000000000000000 + 1e-54 | -54.000000000000000000000000000000000000000000000000000000 + 1e-55 | -55.0000000000000000000000000000000000000000000000000000000 + 1e-56 | -56.00000000000000000000000000000000000000000000000000000000 + 1e-57 | -57.000000000000000000000000000000000000000000000000000000000 + 1e-58 | -58.0000000000000000000000000000000000000000000000000000000000 + 1e-59 | -59.00000000000000000000000000000000000000000000000000000000000 + 1e-60 | -60.000000000000000000000000000000000000000000000000000000000000 + 1e-61 | -61.0000000000000000000000000000000000000000000000000000000000000 + 1e-62 | -62.00000000000000000000000000000000000000000000000000000000000000 + 1e-63 | -63.000000000000000000000000000000000000000000000000000000000000000 + 1e-64 | -64.0000000000000000000000000000000000000000000000000000000000000000 + 1e-65 | -65.00000000000000000000000000000000000000000000000000000000000000000 + 1e-66 | -66.000000000000000000000000000000000000000000000000000000000000000000 + 1e-67 | -67.0000000000000000000000000000000000000000000000000000000000000000000 + 1e-68 | -68.00000000000000000000000000000000000000000000000000000000000000000000 + 1e-69 | -69.000000000000000000000000000000000000000000000000000000000000000000000 + 1e-70 | -70.0000000000000000000000000000000000000000000000000000000000000000000000 + 1e-71 | -71.00000000000000000000000000000000000000000000000000000000000000000000000 + 1e-72 | -72.000000000000000000000000000000000000000000000000000000000000000000000000 + 1e-73 | -73.0000000000000000000000000000000000000000000000000000000000000000000000000 + 1e-74 | -74.00000000000000000000000000000000000000000000000000000000000000000000000000 + 1e-75 | -75.000000000000000000000000000000000000000000000000000000000000000000000000000 + 1e-76 | -76.0000000000000000000000000000000000000000000000000000000000000000000000000000 + 1e-77 | -77.00000000000000000000000000000000000000000000000000000000000000000000000000000 + 1e-78 | -78.000000000000000000000000000000000000000000000000000000000000000000000000000000 + 1e-79 | -79.0000000000000000000000000000000000000000000000000000000000000000000000000000000 + 1e-80 | -80.00000000000000000000000000000000000000000000000000000000000000000000000000000000 + 1e-81 | -81.000000000000000000000000000000000000000000000000000000000000000000000000000000000 + 1e-82 | -82.0000000000000000000000000000000000000000000000000000000000000000000000000000000000 + 1e-83 | -83.00000000000000000000000000000000000000000000000000000000000000000000000000000000000 + 1e-84 | -84.000000000000000000000000000000000000000000000000000000000000000000000000000000000000 + 1e-85 | -85.0000000000000000000000000000000000000000000000000000000000000000000000000000000000000 + 1e-86 | -86.00000000000000000000000000000000000000000000000000000000000000000000000000000000000000 + 1e-87 | -87.000000000000000000000000000000000000000000000000000000000000000000000000000000000000000 + 1e-88 | -88.0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000 + 1e-89 | -89.00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000 + 1e-90 | -90.000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000 + 1e-91 | -91.0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000 + 1e-92 | -92.00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000 + 1e-93 | -93.000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000 + 1e-94 | -94.0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000 + 1e-95 | -95.00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000 + 1e-96 | -96.000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000 + 1e-97 | -97.0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000 + 1e-98 | -98.00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000 + 1e-99 | -99.000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000 + 1e-100 | -100.0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000 +(100 rows) + +-- input very small, non-exact results +-- +-- bc(1) results computed with a scale of 500 and truncated using the script +-- below, and then rounded by hand to match the precision of LN(): +-- +-- for p in {1..50..7} +-- do +-- for d in {9..1..3} +-- do +-- l=$(bc -ql <<< "scale=500 ; l($d*10^-$p) / l(10)" | head -n 1) +-- echo "('${d}.0e-$p', $l)," +-- done +-- done +WITH t(x, bc_result) AS (VALUES +('9.0e-1', -.04575749056067513), +('6.0e-1', -.2218487496163564), +('3.0e-1', -.5228787452803376), +('9.0e-8', -7.045757490560675), +('6.0e-8', -7.221848749616356), +('3.0e-8', -7.522878745280338), +('9.0e-15', -14.0457574905606751), +('6.0e-15', -14.2218487496163564), +('3.0e-15', -14.5228787452803376), +('9.0e-22', -21.04575749056067512540994), +('6.0e-22', -21.22184874961635636749123), +('3.0e-22', -21.52287874528033756270497), +('9.0e-29', -28.045757490560675125409944193490), +('6.0e-29', -28.221848749616356367491233202020), +('3.0e-29', -28.522878745280337562704972096745), +('9.0e-36', -35.0457574905606751254099441934897693816), +('6.0e-36', -35.2218487496163563674912332020203916640), +('3.0e-36', -35.5228787452803375627049720967448846908), +('9.0e-43', -42.04575749056067512540994419348976938159974227), +('6.0e-43', -42.22184874961635636749123320202039166403168125), +('3.0e-43', -42.52287874528033756270497209674488469079987114), +('9.0e-50', -49.045757490560675125409944193489769381599742271618608), +('6.0e-50', -49.221848749616356367491233202020391664031681254347196), +('3.0e-50', -49.522878745280337562704972096744884690799871135809304)) +SELECT x, bc_result, log(x::numeric), log(x::numeric)-bc_result AS diff FROM t; + x | bc_result | log | diff +---------+---------------------------------------------------------+---------------------------------------------------------+------------------------------------------------------- + 9.0e-1 | -0.04575749056067513 | -0.04575749056067513 | 0.00000000000000000 + 6.0e-1 | -0.2218487496163564 | -0.2218487496163564 | 0.0000000000000000 + 3.0e-1 | -0.5228787452803376 | -0.5228787452803376 | 0.0000000000000000 + 9.0e-8 | -7.045757490560675 | -7.045757490560675 | 0.000000000000000 + 6.0e-8 | -7.221848749616356 | -7.221848749616356 | 0.000000000000000 + 3.0e-8 | -7.522878745280338 | -7.522878745280338 | 0.000000000000000 + 9.0e-15 | -14.0457574905606751 | -14.0457574905606751 | 0.0000000000000000 + 6.0e-15 | -14.2218487496163564 | -14.2218487496163564 | 0.0000000000000000 + 3.0e-15 | -14.5228787452803376 | -14.5228787452803376 | 0.0000000000000000 + 9.0e-22 | -21.04575749056067512540994 | -21.04575749056067512540994 | 0.00000000000000000000000 + 6.0e-22 | -21.22184874961635636749123 | -21.22184874961635636749123 | 0.00000000000000000000000 + 3.0e-22 | -21.52287874528033756270497 | -21.52287874528033756270497 | 0.00000000000000000000000 + 9.0e-29 | -28.045757490560675125409944193490 | -28.045757490560675125409944193490 | 0.000000000000000000000000000000 + 6.0e-29 | -28.221848749616356367491233202020 | -28.221848749616356367491233202020 | 0.000000000000000000000000000000 + 3.0e-29 | -28.522878745280337562704972096745 | -28.522878745280337562704972096745 | 0.000000000000000000000000000000 + 9.0e-36 | -35.0457574905606751254099441934897693816 | -35.0457574905606751254099441934897693816 | 0.0000000000000000000000000000000000000 + 6.0e-36 | -35.2218487496163563674912332020203916640 | -35.2218487496163563674912332020203916640 | 0.0000000000000000000000000000000000000 + 3.0e-36 | -35.5228787452803375627049720967448846908 | -35.5228787452803375627049720967448846908 | 0.0000000000000000000000000000000000000 + 9.0e-43 | -42.04575749056067512540994419348976938159974227 | -42.04575749056067512540994419348976938159974227 | 0.00000000000000000000000000000000000000000000 + 6.0e-43 | -42.22184874961635636749123320202039166403168125 | -42.22184874961635636749123320202039166403168125 | 0.00000000000000000000000000000000000000000000 + 3.0e-43 | -42.52287874528033756270497209674488469079987114 | -42.52287874528033756270497209674488469079987114 | 0.00000000000000000000000000000000000000000000 + 9.0e-50 | -49.045757490560675125409944193489769381599742271618608 | -49.045757490560675125409944193489769381599742271618608 | 0.000000000000000000000000000000000000000000000000000 + 6.0e-50 | -49.221848749616356367491233202020391664031681254347196 | -49.221848749616356367491233202020391664031681254347196 | 0.000000000000000000000000000000000000000000000000000 + 3.0e-50 | -49.522878745280337562704972096744884690799871135809304 | -49.522878745280337562704972096744884690799871135809304 | 0.000000000000000000000000000000000000000000000000000 +(24 rows) + +-- input very close to but smaller than 1 +-- +-- bc(1) results computed with a scale of 500 and truncated using the script +-- below, and then rounded by hand to match the precision of LN(): +-- +-- for p in {1..40..7} +-- do +-- for d in {9..1..3} +-- do +-- l=$(bc -ql <<< "scale=500 ; l(1-$d*10^-$p) / l(10)" | head -n 1) +-- echo "('${d}.0e-$p', $l)," +-- done +-- done +WITH t(x, bc_result) AS (VALUES +('9.0e-1', -1.0000000000000000), +('6.0e-1', -.3979400086720376), +('3.0e-1', -.1549019599857432), +('9.0e-8', -.000000039086505130185422), +('6.0e-8', -.000000026057669695925208), +('3.0e-8', -.000000013028834652530076), +('9.0e-15', -.0000000000000039086503371292840), +('6.0e-15', -.0000000000000026057668914195188), +('3.0e-15', -.0000000000000013028834457097574), +('9.0e-22', -.00000000000000000000039086503371292664), +('6.0e-22', -.00000000000000000000026057668914195110), +('3.0e-22', -.00000000000000000000013028834457097555), +('9.0e-29', -.000000000000000000000000000039086503371292664), +('6.0e-29', -.000000000000000000000000000026057668914195110), +('3.0e-29', -.000000000000000000000000000013028834457097555), +('9.0e-36', -.0000000000000000000000000000000000039086503371292664), +('6.0e-36', -.0000000000000000000000000000000000026057668914195110), +('3.0e-36', -.0000000000000000000000000000000000013028834457097555)) +SELECT '1-'||x, bc_result, log(1.0-x::numeric), log(1.0-x::numeric)-bc_result AS diff FROM t; + ?column? | bc_result | log | diff +-----------+---------------------------------------------------------+---------------------------------------------------------+-------------------------------------------------------- + 1-9.0e-1 | -1.0000000000000000 | -1.0000000000000000 | 0.0000000000000000 + 1-6.0e-1 | -0.3979400086720376 | -0.3979400086720376 | 0.0000000000000000 + 1-3.0e-1 | -0.1549019599857432 | -0.1549019599857432 | 0.0000000000000000 + 1-9.0e-8 | -0.000000039086505130185422 | -0.000000039086505130185422 | 0.000000000000000000000000 + 1-6.0e-8 | -0.000000026057669695925208 | -0.000000026057669695925208 | 0.000000000000000000000000 + 1-3.0e-8 | -0.000000013028834652530076 | -0.000000013028834652530076 | 0.000000000000000000000000 + 1-9.0e-15 | -0.0000000000000039086503371292840 | -0.0000000000000039086503371292840 | 0.0000000000000000000000000000000 + 1-6.0e-15 | -0.0000000000000026057668914195188 | -0.0000000000000026057668914195188 | 0.0000000000000000000000000000000 + 1-3.0e-15 | -0.0000000000000013028834457097574 | -0.0000000000000013028834457097574 | 0.0000000000000000000000000000000 + 1-9.0e-22 | -0.00000000000000000000039086503371292664 | -0.00000000000000000000039086503371292664 | 0.00000000000000000000000000000000000000 + 1-6.0e-22 | -0.00000000000000000000026057668914195110 | -0.00000000000000000000026057668914195110 | 0.00000000000000000000000000000000000000 + 1-3.0e-22 | -0.00000000000000000000013028834457097555 | -0.00000000000000000000013028834457097555 | 0.00000000000000000000000000000000000000 + 1-9.0e-29 | -0.000000000000000000000000000039086503371292664 | -0.000000000000000000000000000039086503371292664 | 0.000000000000000000000000000000000000000000000 + 1-6.0e-29 | -0.000000000000000000000000000026057668914195110 | -0.000000000000000000000000000026057668914195110 | 0.000000000000000000000000000000000000000000000 + 1-3.0e-29 | -0.000000000000000000000000000013028834457097555 | -0.000000000000000000000000000013028834457097555 | 0.000000000000000000000000000000000000000000000 + 1-9.0e-36 | -0.0000000000000000000000000000000000039086503371292664 | -0.0000000000000000000000000000000000039086503371292664 | 0.0000000000000000000000000000000000000000000000000000 + 1-6.0e-36 | -0.0000000000000000000000000000000000026057668914195110 | -0.0000000000000000000000000000000000026057668914195110 | 0.0000000000000000000000000000000000000000000000000000 + 1-3.0e-36 | -0.0000000000000000000000000000000000013028834457097555 | -0.0000000000000000000000000000000000013028834457097555 | 0.0000000000000000000000000000000000000000000000000000 +(18 rows) + +-- input very close to but larger than 1 +-- +-- bc(1) results computed with a scale of 500 and truncated using the script +-- below, and then rounded by hand to match the precision of LN(): +-- +-- for p in {1..40..7} +-- do +-- for d in {9..1..3} +-- do +-- l=$(bc -ql <<< "scale=500 ; l(1+$d*10^-$p) / l(10)" | head -n 1) +-- echo "('${d}.0e-$p', $l)," +-- done +-- done +WITH t(x, bc_result) AS (VALUES +('9.0e-1', .2787536009528290), +('6.0e-1', .2041199826559248), +('3.0e-1', .1139433523068368), +('9.0e-8', .000000039086501612400118), +('6.0e-8', .000000026057668132465074), +('3.0e-8', .000000013028834261665042), +('9.0e-15', .0000000000000039086503371292489), +('6.0e-15', .0000000000000026057668914195031), +('3.0e-15', .0000000000000013028834457097535), +('9.0e-22', .00000000000000000000039086503371292664), +('6.0e-22', .00000000000000000000026057668914195110), +('3.0e-22', .00000000000000000000013028834457097555), +('9.0e-29', .000000000000000000000000000039086503371292664), +('6.0e-29', .000000000000000000000000000026057668914195110), +('3.0e-29', .000000000000000000000000000013028834457097555), +('9.0e-36', .0000000000000000000000000000000000039086503371292664), +('6.0e-36', .0000000000000000000000000000000000026057668914195110), +('3.0e-36', .0000000000000000000000000000000000013028834457097555)) +SELECT '1+'||x, bc_result, log(1.0+x::numeric), log(1.0+x::numeric)-bc_result AS diff FROM t; + ?column? | bc_result | log | diff +-----------+--------------------------------------------------------+--------------------------------------------------------+-------------------------------------------------------- + 1+9.0e-1 | 0.2787536009528290 | 0.2787536009528290 | 0.0000000000000000 + 1+6.0e-1 | 0.2041199826559248 | 0.2041199826559248 | 0.0000000000000000 + 1+3.0e-1 | 0.1139433523068368 | 0.1139433523068368 | 0.0000000000000000 + 1+9.0e-8 | 0.000000039086501612400118 | 0.000000039086501612400118 | 0.000000000000000000000000 + 1+6.0e-8 | 0.000000026057668132465074 | 0.000000026057668132465074 | 0.000000000000000000000000 + 1+3.0e-8 | 0.000000013028834261665042 | 0.000000013028834261665042 | 0.000000000000000000000000 + 1+9.0e-15 | 0.0000000000000039086503371292489 | 0.0000000000000039086503371292489 | 0.0000000000000000000000000000000 + 1+6.0e-15 | 0.0000000000000026057668914195031 | 0.0000000000000026057668914195031 | 0.0000000000000000000000000000000 + 1+3.0e-15 | 0.0000000000000013028834457097535 | 0.0000000000000013028834457097535 | 0.0000000000000000000000000000000 + 1+9.0e-22 | 0.00000000000000000000039086503371292664 | 0.00000000000000000000039086503371292664 | 0.00000000000000000000000000000000000000 + 1+6.0e-22 | 0.00000000000000000000026057668914195110 | 0.00000000000000000000026057668914195110 | 0.00000000000000000000000000000000000000 + 1+3.0e-22 | 0.00000000000000000000013028834457097555 | 0.00000000000000000000013028834457097555 | 0.00000000000000000000000000000000000000 + 1+9.0e-29 | 0.000000000000000000000000000039086503371292664 | 0.000000000000000000000000000039086503371292664 | 0.000000000000000000000000000000000000000000000 + 1+6.0e-29 | 0.000000000000000000000000000026057668914195110 | 0.000000000000000000000000000026057668914195110 | 0.000000000000000000000000000000000000000000000 + 1+3.0e-29 | 0.000000000000000000000000000013028834457097555 | 0.000000000000000000000000000013028834457097555 | 0.000000000000000000000000000000000000000000000 + 1+9.0e-36 | 0.0000000000000000000000000000000000039086503371292664 | 0.0000000000000000000000000000000000039086503371292664 | 0.0000000000000000000000000000000000000000000000000000 + 1+6.0e-36 | 0.0000000000000000000000000000000000026057668914195110 | 0.0000000000000000000000000000000000026057668914195110 | 0.0000000000000000000000000000000000000000000000000000 + 1+3.0e-36 | 0.0000000000000000000000000000000000013028834457097555 | 0.0000000000000000000000000000000000013028834457097555 | 0.0000000000000000000000000000000000000000000000000000 +(18 rows) + +-- input very large, exact result known +WITH t(x) AS (SELECT '1e'||n FROM generate_series(1, 100) g(n)) +SELECT x, log(x::numeric) FROM t; + x | log +-------+-------------------- + 1e1 | 1.0000000000000000 + 1e2 | 2.0000000000000000 + 1e3 | 3.0000000000000000 + 1e4 | 4.0000000000000000 + 1e5 | 5.000000000000000 + 1e6 | 6.000000000000000 + 1e7 | 7.000000000000000 + 1e8 | 8.000000000000000 + 1e9 | 9.000000000000000 + 1e10 | 10.000000000000000 + 1e11 | 11.000000000000000 + 1e12 | 12.000000000000000 + 1e13 | 13.000000000000000 + 1e14 | 14.000000000000000 + 1e15 | 15.000000000000000 + 1e16 | 16.000000000000000 + 1e17 | 17.000000000000000 + 1e18 | 18.000000000000000 + 1e19 | 19.000000000000000 + 1e20 | 20.000000000000000 + 1e21 | 21.000000000000000 + 1e22 | 22.000000000000000 + 1e23 | 23.000000000000000 + 1e24 | 24.000000000000000 + 1e25 | 25.000000000000000 + 1e26 | 26.000000000000000 + 1e27 | 27.000000000000000 + 1e28 | 28.000000000000000 + 1e29 | 29.000000000000000 + 1e30 | 30.000000000000000 + 1e31 | 31.000000000000000 + 1e32 | 32.000000000000000 + 1e33 | 33.000000000000000 + 1e34 | 34.000000000000000 + 1e35 | 35.000000000000000 + 1e36 | 36.000000000000000 + 1e37 | 37.000000000000000 + 1e38 | 38.000000000000000 + 1e39 | 39.000000000000000 + 1e40 | 40.000000000000000 + 1e41 | 41.000000000000000 + 1e42 | 42.000000000000000 + 1e43 | 43.000000000000000 + 1e44 | 44.00000000000000 + 1e45 | 45.00000000000000 + 1e46 | 46.00000000000000 + 1e47 | 47.00000000000000 + 1e48 | 48.00000000000000 + 1e49 | 49.00000000000000 + 1e50 | 50.00000000000000 + 1e51 | 51.00000000000000 + 1e52 | 52.00000000000000 + 1e53 | 53.00000000000000 + 1e54 | 54.00000000000000 + 1e55 | 55.00000000000000 + 1e56 | 56.00000000000000 + 1e57 | 57.00000000000000 + 1e58 | 58.00000000000000 + 1e59 | 59.00000000000000 + 1e60 | 60.00000000000000 + 1e61 | 61.00000000000000 + 1e62 | 62.00000000000000 + 1e63 | 63.00000000000000 + 1e64 | 64.00000000000000 + 1e65 | 65.00000000000000 + 1e66 | 66.00000000000000 + 1e67 | 67.00000000000000 + 1e68 | 68.00000000000000 + 1e69 | 69.00000000000000 + 1e70 | 70.00000000000000 + 1e71 | 71.00000000000000 + 1e72 | 72.00000000000000 + 1e73 | 73.00000000000000 + 1e74 | 74.00000000000000 + 1e75 | 75.00000000000000 + 1e76 | 76.00000000000000 + 1e77 | 77.00000000000000 + 1e78 | 78.00000000000000 + 1e79 | 79.00000000000000 + 1e80 | 80.00000000000000 + 1e81 | 81.00000000000000 + 1e82 | 82.00000000000000 + 1e83 | 83.00000000000000 + 1e84 | 84.00000000000000 + 1e85 | 85.00000000000000 + 1e86 | 86.00000000000000 + 1e87 | 87.00000000000000 + 1e88 | 88.00000000000000 + 1e89 | 89.00000000000000 + 1e90 | 90.00000000000000 + 1e91 | 91.00000000000000 + 1e92 | 92.00000000000000 + 1e93 | 93.00000000000000 + 1e94 | 94.00000000000000 + 1e95 | 95.00000000000000 + 1e96 | 96.00000000000000 + 1e97 | 97.00000000000000 + 1e98 | 98.00000000000000 + 1e99 | 99.00000000000000 + 1e100 | 100.00000000000000 +(100 rows) + +-- input very large, non-exact results +-- +-- bc(1) results computed with a scale of 500 and truncated using the script +-- below, and then rounded by hand to match the precision of LN(): +-- +-- for p in {10..50..7} +-- do +-- for d in {2..9..3} +-- do +-- l=$(bc -ql <<< "scale=500 ; l($d*10^$p) / l(10)" | head -n 1) +-- echo "('${d}.0e$p', $l)," +-- done +-- done +WITH t(x, bc_result) AS (VALUES +('2.0e10', 10.301029995663981), +('5.0e10', 10.698970004336019), +('8.0e10', 10.903089986991944), +('2.0e17', 17.301029995663981), +('5.0e17', 17.698970004336019), +('8.0e17', 17.903089986991944), +('2.0e24', 24.301029995663981), +('5.0e24', 24.698970004336019), +('8.0e24', 24.903089986991944), +('2.0e31', 31.301029995663981), +('5.0e31', 31.698970004336019), +('8.0e31', 31.903089986991944), +('2.0e38', 38.301029995663981), +('5.0e38', 38.698970004336019), +('8.0e38', 38.903089986991944), +('2.0e45', 45.30102999566398), +('5.0e45', 45.69897000433602), +('8.0e45', 45.90308998699194)) +SELECT x, bc_result, log(x::numeric), log(x::numeric)-bc_result AS diff FROM t; + x | bc_result | log | diff +--------+--------------------+--------------------+------------------- + 2.0e10 | 10.301029995663981 | 10.301029995663981 | 0.000000000000000 + 5.0e10 | 10.698970004336019 | 10.698970004336019 | 0.000000000000000 + 8.0e10 | 10.903089986991944 | 10.903089986991944 | 0.000000000000000 + 2.0e17 | 17.301029995663981 | 17.301029995663981 | 0.000000000000000 + 5.0e17 | 17.698970004336019 | 17.698970004336019 | 0.000000000000000 + 8.0e17 | 17.903089986991944 | 17.903089986991944 | 0.000000000000000 + 2.0e24 | 24.301029995663981 | 24.301029995663981 | 0.000000000000000 + 5.0e24 | 24.698970004336019 | 24.698970004336019 | 0.000000000000000 + 8.0e24 | 24.903089986991944 | 24.903089986991944 | 0.000000000000000 + 2.0e31 | 31.301029995663981 | 31.301029995663981 | 0.000000000000000 + 5.0e31 | 31.698970004336019 | 31.698970004336019 | 0.000000000000000 + 8.0e31 | 31.903089986991944 | 31.903089986991944 | 0.000000000000000 + 2.0e38 | 38.301029995663981 | 38.301029995663981 | 0.000000000000000 + 5.0e38 | 38.698970004336019 | 38.698970004336019 | 0.000000000000000 + 8.0e38 | 38.903089986991944 | 38.903089986991944 | 0.000000000000000 + 2.0e45 | 45.30102999566398 | 45.30102999566398 | 0.00000000000000 + 5.0e45 | 45.69897000433602 | 45.69897000433602 | 0.00000000000000 + 8.0e45 | 45.90308998699194 | 45.90308998699194 | 0.00000000000000 +(18 rows) + diff --git a/src/test/regress/expected/numerology.out b/src/test/regress/expected/numerology.out new file mode 100644 index 0000000..f662a50 --- /dev/null +++ b/src/test/regress/expected/numerology.out @@ -0,0 +1,464 @@ +-- +-- NUMEROLOGY +-- Test various combinations of numeric types and functions. +-- +-- +-- numeric literals +-- +SELECT 0b100101; + ?column? +---------- + 37 +(1 row) + +SELECT 0o273; + ?column? +---------- + 187 +(1 row) + +SELECT 0x42F; + ?column? +---------- + 1071 +(1 row) + +-- cases near int4 overflow +SELECT 0b1111111111111111111111111111111; + ?column? +------------ + 2147483647 +(1 row) + +SELECT 0b10000000000000000000000000000000; + ?column? +------------ + 2147483648 +(1 row) + +SELECT 0o17777777777; + ?column? +------------ + 2147483647 +(1 row) + +SELECT 0o20000000000; + ?column? +------------ + 2147483648 +(1 row) + +SELECT 0x7FFFFFFF; + ?column? +------------ + 2147483647 +(1 row) + +SELECT 0x80000000; + ?column? +------------ + 2147483648 +(1 row) + +SELECT -0b10000000000000000000000000000000; + ?column? +------------- + -2147483648 +(1 row) + +SELECT -0b10000000000000000000000000000001; + ?column? +------------- + -2147483649 +(1 row) + +SELECT -0o20000000000; + ?column? +------------- + -2147483648 +(1 row) + +SELECT -0o20000000001; + ?column? +------------- + -2147483649 +(1 row) + +SELECT -0x80000000; + ?column? +------------- + -2147483648 +(1 row) + +SELECT -0x80000001; + ?column? +------------- + -2147483649 +(1 row) + +-- cases near int8 overflow +SELECT 0b111111111111111111111111111111111111111111111111111111111111111; + ?column? +--------------------- + 9223372036854775807 +(1 row) + +SELECT 0b1000000000000000000000000000000000000000000000000000000000000000; + ?column? +--------------------- + 9223372036854775808 +(1 row) + +SELECT 0o777777777777777777777; + ?column? +--------------------- + 9223372036854775807 +(1 row) + +SELECT 0o1000000000000000000000; + ?column? +--------------------- + 9223372036854775808 +(1 row) + +SELECT 0x7FFFFFFFFFFFFFFF; + ?column? +--------------------- + 9223372036854775807 +(1 row) + +SELECT 0x8000000000000000; + ?column? +--------------------- + 9223372036854775808 +(1 row) + +SELECT -0b1000000000000000000000000000000000000000000000000000000000000000; + ?column? +---------------------- + -9223372036854775808 +(1 row) + +SELECT -0b1000000000000000000000000000000000000000000000000000000000000001; + ?column? +---------------------- + -9223372036854775809 +(1 row) + +SELECT -0o1000000000000000000000; + ?column? +---------------------- + -9223372036854775808 +(1 row) + +SELECT -0o1000000000000000000001; + ?column? +---------------------- + -9223372036854775809 +(1 row) + +SELECT -0x8000000000000000; + ?column? +---------------------- + -9223372036854775808 +(1 row) + +SELECT -0x8000000000000001; + ?column? +---------------------- + -9223372036854775809 +(1 row) + +-- error cases +SELECT 123abc; +ERROR: trailing junk after numeric literal at or near "123a" +LINE 1: SELECT 123abc; + ^ +SELECT 0x0o; +ERROR: trailing junk after numeric literal at or near "0x0o" +LINE 1: SELECT 0x0o; + ^ +SELECT 0.a; +ERROR: trailing junk after numeric literal at or near "0.a" +LINE 1: SELECT 0.a; + ^ +SELECT 0.0a; +ERROR: trailing junk after numeric literal at or near "0.0a" +LINE 1: SELECT 0.0a; + ^ +SELECT .0a; +ERROR: trailing junk after numeric literal at or near ".0a" +LINE 1: SELECT .0a; + ^ +SELECT 0.0e1a; +ERROR: trailing junk after numeric literal at or near "0.0e1a" +LINE 1: SELECT 0.0e1a; + ^ +SELECT 0.0e; +ERROR: trailing junk after numeric literal at or near "0.0e" +LINE 1: SELECT 0.0e; + ^ +SELECT 0.0e+a; +ERROR: trailing junk after numeric literal at or near "0.0e+" +LINE 1: SELECT 0.0e+a; + ^ +PREPARE p1 AS SELECT $1a; +ERROR: trailing junk after parameter at or near "$1a" +LINE 1: PREPARE p1 AS SELECT $1a; + ^ +SELECT 0b; +ERROR: invalid binary integer at or near "0b" +LINE 1: SELECT 0b; + ^ +SELECT 1b; +ERROR: trailing junk after numeric literal at or near "1b" +LINE 1: SELECT 1b; + ^ +SELECT 0b0x; +ERROR: trailing junk after numeric literal at or near "0b0x" +LINE 1: SELECT 0b0x; + ^ +SELECT 0o; +ERROR: invalid octal integer at or near "0o" +LINE 1: SELECT 0o; + ^ +SELECT 1o; +ERROR: trailing junk after numeric literal at or near "1o" +LINE 1: SELECT 1o; + ^ +SELECT 0o0x; +ERROR: trailing junk after numeric literal at or near "0o0x" +LINE 1: SELECT 0o0x; + ^ +SELECT 0x; +ERROR: invalid hexadecimal integer at or near "0x" +LINE 1: SELECT 0x; + ^ +SELECT 1x; +ERROR: trailing junk after numeric literal at or near "1x" +LINE 1: SELECT 1x; + ^ +SELECT 0x0y; +ERROR: trailing junk after numeric literal at or near "0x0y" +LINE 1: SELECT 0x0y; + ^ +-- underscores +SELECT 1_000_000; + ?column? +---------- + 1000000 +(1 row) + +SELECT 1_2_3; + ?column? +---------- + 123 +(1 row) + +SELECT 0x1EEE_FFFF; + ?column? +----------- + 518979583 +(1 row) + +SELECT 0o2_73; + ?column? +---------- + 187 +(1 row) + +SELECT 0b_10_0101; + ?column? +---------- + 37 +(1 row) + +SELECT 1_000.000_005; + ?column? +------------- + 1000.000005 +(1 row) + +SELECT 1_000.; + ?column? +---------- + 1000 +(1 row) + +SELECT .000_005; + ?column? +---------- + 0.000005 +(1 row) + +SELECT 1_000.5e0_1; + ?column? +---------- + 10005 +(1 row) + +-- error cases +SELECT _100; +ERROR: column "_100" does not exist +LINE 1: SELECT _100; + ^ +SELECT 100_; +ERROR: trailing junk after numeric literal at or near "100_" +LINE 1: SELECT 100_; + ^ +SELECT 100__000; +ERROR: trailing junk after numeric literal at or near "100_" +LINE 1: SELECT 100__000; + ^ +SELECT _1_000.5; +ERROR: syntax error at or near ".5" +LINE 1: SELECT _1_000.5; + ^ +SELECT 1_000_.5; +ERROR: trailing junk after numeric literal at or near "1_000_" +LINE 1: SELECT 1_000_.5; + ^ +SELECT 1_000._5; +ERROR: trailing junk after numeric literal at or near "1_000._" +LINE 1: SELECT 1_000._5; + ^ +SELECT 1_000.5_; +ERROR: trailing junk after numeric literal at or near "1_000.5_" +LINE 1: SELECT 1_000.5_; + ^ +SELECT 1_000.5e_1; +ERROR: trailing junk after numeric literal at or near "1_000.5e" +LINE 1: SELECT 1_000.5e_1; + ^ +-- +-- Test implicit type conversions +-- This fails for Postgres v6.1 (and earlier?) +-- so let's try explicit conversions for now - tgl 97/05/07 +-- +CREATE TABLE TEMP_FLOAT (f1 FLOAT8); +INSERT INTO TEMP_FLOAT (f1) + SELECT float8(f1) FROM INT4_TBL; +INSERT INTO TEMP_FLOAT (f1) + SELECT float8(f1) FROM INT2_TBL; +SELECT f1 FROM TEMP_FLOAT + ORDER BY f1; + f1 +------------- + -2147483647 + -123456 + -32767 + -1234 + 0 + 0 + 1234 + 32767 + 123456 + 2147483647 +(10 rows) + +-- int4 +CREATE TABLE TEMP_INT4 (f1 INT4); +INSERT INTO TEMP_INT4 (f1) + SELECT int4(f1) FROM FLOAT8_TBL + WHERE (f1 > -2147483647) AND (f1 < 2147483647); +INSERT INTO TEMP_INT4 (f1) + SELECT int4(f1) FROM INT2_TBL; +SELECT f1 FROM TEMP_INT4 + ORDER BY f1; + f1 +-------- + -32767 + -1234 + -1004 + -35 + 0 + 0 + 0 + 1234 + 32767 +(9 rows) + +-- int2 +CREATE TABLE TEMP_INT2 (f1 INT2); +INSERT INTO TEMP_INT2 (f1) + SELECT int2(f1) FROM FLOAT8_TBL + WHERE (f1 >= -32767) AND (f1 <= 32767); +INSERT INTO TEMP_INT2 (f1) + SELECT int2(f1) FROM INT4_TBL + WHERE (f1 >= -32767) AND (f1 <= 32767); +SELECT f1 FROM TEMP_INT2 + ORDER BY f1; + f1 +------- + -1004 + -35 + 0 + 0 + 0 +(5 rows) + +-- +-- Group-by combinations +-- +CREATE TABLE TEMP_GROUP (f1 INT4, f2 INT4, f3 FLOAT8); +INSERT INTO TEMP_GROUP + SELECT 1, (- i.f1), (- f.f1) + FROM INT4_TBL i, FLOAT8_TBL f; +INSERT INTO TEMP_GROUP + SELECT 2, i.f1, f.f1 + FROM INT4_TBL i, FLOAT8_TBL f; +SELECT DISTINCT f1 AS two FROM TEMP_GROUP ORDER BY 1; + two +----- + 1 + 2 +(2 rows) + +SELECT f1 AS two, max(f3) AS max_float, min(f3) as min_float + FROM TEMP_GROUP + GROUP BY f1 + ORDER BY two, max_float, min_float; + two | max_float | min_float +-----+----------------------+----------------------- + 1 | 1.2345678901234e+200 | -0 + 2 | 0 | -1.2345678901234e+200 +(2 rows) + +-- GROUP BY a result column name is not legal per SQL92, but we accept it +-- anyway (if the name is not the name of any column exposed by FROM). +SELECT f1 AS two, max(f3) AS max_float, min(f3) AS min_float + FROM TEMP_GROUP + GROUP BY two + ORDER BY two, max_float, min_float; + two | max_float | min_float +-----+----------------------+----------------------- + 1 | 1.2345678901234e+200 | -0 + 2 | 0 | -1.2345678901234e+200 +(2 rows) + +SELECT f1 AS two, (max(f3) + 1) AS max_plus_1, (min(f3) - 1) AS min_minus_1 + FROM TEMP_GROUP + GROUP BY f1 + ORDER BY two, min_minus_1; + two | max_plus_1 | min_minus_1 +-----+----------------------+----------------------- + 1 | 1.2345678901234e+200 | -1 + 2 | 1 | -1.2345678901234e+200 +(2 rows) + +SELECT f1 AS two, + max(f2) + min(f2) AS max_plus_min, + min(f3) - 1 AS min_minus_1 + FROM TEMP_GROUP + GROUP BY f1 + ORDER BY two, min_minus_1; + two | max_plus_min | min_minus_1 +-----+--------------+----------------------- + 1 | 0 | -1 + 2 | 0 | -1.2345678901234e+200 +(2 rows) + +DROP TABLE TEMP_INT2; +DROP TABLE TEMP_INT4; +DROP TABLE TEMP_FLOAT; +DROP TABLE TEMP_GROUP; diff --git a/src/test/regress/expected/object_address.out b/src/test/regress/expected/object_address.out new file mode 100644 index 0000000..fc42d41 --- /dev/null +++ b/src/test/regress/expected/object_address.out @@ -0,0 +1,638 @@ +-- +-- Test for pg_get_object_address +-- +-- Clean up in case a prior regression run failed +SET client_min_messages TO 'warning'; +DROP ROLE IF EXISTS regress_addr_user; +RESET client_min_messages; +CREATE USER regress_addr_user; +-- Test generic object addressing/identification functions +CREATE SCHEMA addr_nsp; +SET search_path TO 'addr_nsp'; +CREATE FOREIGN DATA WRAPPER addr_fdw; +CREATE SERVER addr_fserv FOREIGN DATA WRAPPER addr_fdw; +CREATE TEXT SEARCH DICTIONARY addr_ts_dict (template=simple); +CREATE TEXT SEARCH CONFIGURATION addr_ts_conf (copy=english); +CREATE TEXT SEARCH TEMPLATE addr_ts_temp (lexize=dsimple_lexize); +CREATE TEXT SEARCH PARSER addr_ts_prs + (start = prsd_start, gettoken = prsd_nexttoken, end = prsd_end, lextypes = prsd_lextype); +CREATE TABLE addr_nsp.gentable ( + a serial primary key CONSTRAINT a_chk CHECK (a > 0), + b text DEFAULT 'hello' +); +CREATE TABLE addr_nsp.parttable ( + a int PRIMARY KEY +) PARTITION BY RANGE (a); +CREATE VIEW addr_nsp.genview AS SELECT * from addr_nsp.gentable; +CREATE MATERIALIZED VIEW addr_nsp.genmatview AS SELECT * FROM addr_nsp.gentable; +CREATE TYPE addr_nsp.gencomptype AS (a int); +CREATE TYPE addr_nsp.genenum AS ENUM ('one', 'two'); +CREATE FOREIGN TABLE addr_nsp.genftable (a int) SERVER addr_fserv; +CREATE AGGREGATE addr_nsp.genaggr(int4) (sfunc = int4pl, stype = int4); +CREATE DOMAIN addr_nsp.gendomain AS int4 CONSTRAINT domconstr CHECK (value > 0); +CREATE FUNCTION addr_nsp.trig() RETURNS TRIGGER LANGUAGE plpgsql AS $$ BEGIN END; $$; +CREATE TRIGGER t BEFORE INSERT ON addr_nsp.gentable FOR EACH ROW EXECUTE PROCEDURE addr_nsp.trig(); +CREATE POLICY genpol ON addr_nsp.gentable; +CREATE PROCEDURE addr_nsp.proc(int4) LANGUAGE SQL AS $$ $$; +CREATE SERVER "integer" FOREIGN DATA WRAPPER addr_fdw; +CREATE USER MAPPING FOR regress_addr_user SERVER "integer"; +ALTER DEFAULT PRIVILEGES FOR ROLE regress_addr_user IN SCHEMA public GRANT ALL ON TABLES TO regress_addr_user; +ALTER DEFAULT PRIVILEGES FOR ROLE regress_addr_user REVOKE DELETE ON TABLES FROM regress_addr_user; +-- this transform would be quite unsafe to leave lying around, +-- except that the SQL language pays no attention to transforms: +CREATE TRANSFORM FOR int LANGUAGE SQL ( + FROM SQL WITH FUNCTION prsd_lextype(internal), + TO SQL WITH FUNCTION int4recv(internal)); +-- suppress warning that depends on wal_level +SET client_min_messages = 'ERROR'; +CREATE PUBLICATION addr_pub FOR TABLE addr_nsp.gentable; +CREATE PUBLICATION addr_pub_schema FOR TABLES IN SCHEMA addr_nsp; +RESET client_min_messages; +CREATE SUBSCRIPTION regress_addr_sub CONNECTION '' PUBLICATION bar WITH (connect = false, slot_name = NONE); +WARNING: subscription was created, but is not connected +HINT: To initiate replication, you must manually create the replication slot, enable the subscription, and refresh the subscription. +CREATE STATISTICS addr_nsp.gentable_stat ON a, b FROM addr_nsp.gentable; +-- test some error cases +SELECT pg_get_object_address('stone', '{}', '{}'); +ERROR: unrecognized object type "stone" +SELECT pg_get_object_address('table', '{}', '{}'); +ERROR: name list length must be at least 1 +SELECT pg_get_object_address('table', '{NULL}', '{}'); +ERROR: name or argument lists may not contain nulls +-- unrecognized object types +DO $$ +DECLARE + objtype text; +BEGIN + FOR objtype IN VALUES ('toast table'), ('index column'), ('sequence column'), + ('toast table column'), ('view column'), ('materialized view column') + LOOP + BEGIN + PERFORM pg_get_object_address(objtype, '{one}', '{}'); + EXCEPTION WHEN invalid_parameter_value THEN + RAISE WARNING 'error for %: %', objtype, sqlerrm; + END; + END LOOP; +END; +$$; +WARNING: error for toast table: unsupported object type "toast table" +WARNING: error for index column: unsupported object type "index column" +WARNING: error for sequence column: unsupported object type "sequence column" +WARNING: error for toast table column: unsupported object type "toast table column" +WARNING: error for view column: unsupported object type "view column" +WARNING: error for materialized view column: unsupported object type "materialized view column" +-- miscellaneous other errors +select * from pg_get_object_address('operator of access method', '{btree,integer_ops,1}', '{int4,bool}'); +ERROR: operator 1 (int4, bool) of operator family integer_ops for access method btree does not exist +select * from pg_get_object_address('operator of access method', '{btree,integer_ops,99}', '{int4,int4}'); +ERROR: operator 99 (int4, int4) of operator family integer_ops for access method btree does not exist +select * from pg_get_object_address('function of access method', '{btree,integer_ops,1}', '{int4,bool}'); +ERROR: function 1 (int4, bool) of operator family integer_ops for access method btree does not exist +select * from pg_get_object_address('function of access method', '{btree,integer_ops,99}', '{int4,int4}'); +ERROR: function 99 (int4, int4) of operator family integer_ops for access method btree does not exist +DO $$ +DECLARE + objtype text; + names text[]; + args text[]; +BEGIN + FOR objtype IN VALUES + ('table'), ('index'), ('sequence'), ('view'), + ('materialized view'), ('foreign table'), + ('table column'), ('foreign table column'), + ('aggregate'), ('function'), ('procedure'), ('type'), ('cast'), + ('table constraint'), ('domain constraint'), ('conversion'), ('default value'), + ('operator'), ('operator class'), ('operator family'), ('rule'), ('trigger'), + ('text search parser'), ('text search dictionary'), + ('text search template'), ('text search configuration'), + ('policy'), ('user mapping'), ('default acl'), ('transform'), + ('operator of access method'), ('function of access method'), + ('publication namespace'), ('publication relation') + LOOP + FOR names IN VALUES ('{eins}'), ('{addr_nsp, zwei}'), ('{eins, zwei, drei}') + LOOP + FOR args IN VALUES ('{}'), ('{integer}') + LOOP + BEGIN + PERFORM pg_get_object_address(objtype, names, args); + EXCEPTION WHEN OTHERS THEN + RAISE WARNING 'error for %,%,%: %', objtype, names, args, sqlerrm; + END; + END LOOP; + END LOOP; + END LOOP; +END; +$$; +WARNING: error for table,{eins},{}: relation "eins" does not exist +WARNING: error for table,{eins},{integer}: relation "eins" does not exist +WARNING: error for table,{addr_nsp,zwei},{}: relation "addr_nsp.zwei" does not exist +WARNING: error for table,{addr_nsp,zwei},{integer}: relation "addr_nsp.zwei" does not exist +WARNING: error for table,{eins,zwei,drei},{}: cross-database references are not implemented: "eins.zwei.drei" +WARNING: error for table,{eins,zwei,drei},{integer}: cross-database references are not implemented: "eins.zwei.drei" +WARNING: error for index,{eins},{}: relation "eins" does not exist +WARNING: error for index,{eins},{integer}: relation "eins" does not exist +WARNING: error for index,{addr_nsp,zwei},{}: relation "addr_nsp.zwei" does not exist +WARNING: error for index,{addr_nsp,zwei},{integer}: relation "addr_nsp.zwei" does not exist +WARNING: error for index,{eins,zwei,drei},{}: cross-database references are not implemented: "eins.zwei.drei" +WARNING: error for index,{eins,zwei,drei},{integer}: cross-database references are not implemented: "eins.zwei.drei" +WARNING: error for sequence,{eins},{}: relation "eins" does not exist +WARNING: error for sequence,{eins},{integer}: relation "eins" does not exist +WARNING: error for sequence,{addr_nsp,zwei},{}: relation "addr_nsp.zwei" does not exist +WARNING: error for sequence,{addr_nsp,zwei},{integer}: relation "addr_nsp.zwei" does not exist +WARNING: error for sequence,{eins,zwei,drei},{}: cross-database references are not implemented: "eins.zwei.drei" +WARNING: error for sequence,{eins,zwei,drei},{integer}: cross-database references are not implemented: "eins.zwei.drei" +WARNING: error for view,{eins},{}: relation "eins" does not exist +WARNING: error for view,{eins},{integer}: relation "eins" does not exist +WARNING: error for view,{addr_nsp,zwei},{}: relation "addr_nsp.zwei" does not exist +WARNING: error for view,{addr_nsp,zwei},{integer}: relation "addr_nsp.zwei" does not exist +WARNING: error for view,{eins,zwei,drei},{}: cross-database references are not implemented: "eins.zwei.drei" +WARNING: error for view,{eins,zwei,drei},{integer}: cross-database references are not implemented: "eins.zwei.drei" +WARNING: error for materialized view,{eins},{}: relation "eins" does not exist +WARNING: error for materialized view,{eins},{integer}: relation "eins" does not exist +WARNING: error for materialized view,{addr_nsp,zwei},{}: relation "addr_nsp.zwei" does not exist +WARNING: error for materialized view,{addr_nsp,zwei},{integer}: relation "addr_nsp.zwei" does not exist +WARNING: error for materialized view,{eins,zwei,drei},{}: cross-database references are not implemented: "eins.zwei.drei" +WARNING: error for materialized view,{eins,zwei,drei},{integer}: cross-database references are not implemented: "eins.zwei.drei" +WARNING: error for foreign table,{eins},{}: relation "eins" does not exist +WARNING: error for foreign table,{eins},{integer}: relation "eins" does not exist +WARNING: error for foreign table,{addr_nsp,zwei},{}: relation "addr_nsp.zwei" does not exist +WARNING: error for foreign table,{addr_nsp,zwei},{integer}: relation "addr_nsp.zwei" does not exist +WARNING: error for foreign table,{eins,zwei,drei},{}: cross-database references are not implemented: "eins.zwei.drei" +WARNING: error for foreign table,{eins,zwei,drei},{integer}: cross-database references are not implemented: "eins.zwei.drei" +WARNING: error for table column,{eins},{}: column name must be qualified +WARNING: error for table column,{eins},{integer}: column name must be qualified +WARNING: error for table column,{addr_nsp,zwei},{}: relation "addr_nsp" does not exist +WARNING: error for table column,{addr_nsp,zwei},{integer}: relation "addr_nsp" does not exist +WARNING: error for table column,{eins,zwei,drei},{}: schema "eins" does not exist +WARNING: error for table column,{eins,zwei,drei},{integer}: schema "eins" does not exist +WARNING: error for foreign table column,{eins},{}: column name must be qualified +WARNING: error for foreign table column,{eins},{integer}: column name must be qualified +WARNING: error for foreign table column,{addr_nsp,zwei},{}: relation "addr_nsp" does not exist +WARNING: error for foreign table column,{addr_nsp,zwei},{integer}: relation "addr_nsp" does not exist +WARNING: error for foreign table column,{eins,zwei,drei},{}: schema "eins" does not exist +WARNING: error for foreign table column,{eins,zwei,drei},{integer}: schema "eins" does not exist +WARNING: error for aggregate,{eins},{}: aggregate eins(*) does not exist +WARNING: error for aggregate,{eins},{integer}: aggregate eins(integer) does not exist +WARNING: error for aggregate,{addr_nsp,zwei},{}: aggregate addr_nsp.zwei(*) does not exist +WARNING: error for aggregate,{addr_nsp,zwei},{integer}: aggregate addr_nsp.zwei(integer) does not exist +WARNING: error for aggregate,{eins,zwei,drei},{}: cross-database references are not implemented: eins.zwei.drei +WARNING: error for aggregate,{eins,zwei,drei},{integer}: cross-database references are not implemented: eins.zwei.drei +WARNING: error for function,{eins},{}: function eins() does not exist +WARNING: error for function,{eins},{integer}: function eins(integer) does not exist +WARNING: error for function,{addr_nsp,zwei},{}: function addr_nsp.zwei() does not exist +WARNING: error for function,{addr_nsp,zwei},{integer}: function addr_nsp.zwei(integer) does not exist +WARNING: error for function,{eins,zwei,drei},{}: cross-database references are not implemented: eins.zwei.drei +WARNING: error for function,{eins,zwei,drei},{integer}: cross-database references are not implemented: eins.zwei.drei +WARNING: error for procedure,{eins},{}: procedure eins() does not exist +WARNING: error for procedure,{eins},{integer}: procedure eins(integer) does not exist +WARNING: error for procedure,{addr_nsp,zwei},{}: procedure addr_nsp.zwei() does not exist +WARNING: error for procedure,{addr_nsp,zwei},{integer}: procedure addr_nsp.zwei(integer) does not exist +WARNING: error for procedure,{eins,zwei,drei},{}: cross-database references are not implemented: eins.zwei.drei +WARNING: error for procedure,{eins,zwei,drei},{integer}: cross-database references are not implemented: eins.zwei.drei +WARNING: error for type,{eins},{}: type "eins" does not exist +WARNING: error for type,{eins},{integer}: type "eins" does not exist +WARNING: error for type,{addr_nsp,zwei},{}: name list length must be exactly 1 +WARNING: error for type,{addr_nsp,zwei},{integer}: name list length must be exactly 1 +WARNING: error for type,{eins,zwei,drei},{}: name list length must be exactly 1 +WARNING: error for type,{eins,zwei,drei},{integer}: name list length must be exactly 1 +WARNING: error for cast,{eins},{}: argument list length must be exactly 1 +WARNING: error for cast,{eins},{integer}: type "eins" does not exist +WARNING: error for cast,{addr_nsp,zwei},{}: name list length must be exactly 1 +WARNING: error for cast,{addr_nsp,zwei},{integer}: name list length must be exactly 1 +WARNING: error for cast,{eins,zwei,drei},{}: name list length must be exactly 1 +WARNING: error for cast,{eins,zwei,drei},{integer}: name list length must be exactly 1 +WARNING: error for table constraint,{eins},{}: must specify relation and object name +WARNING: error for table constraint,{eins},{integer}: must specify relation and object name +WARNING: error for table constraint,{addr_nsp,zwei},{}: relation "addr_nsp" does not exist +WARNING: error for table constraint,{addr_nsp,zwei},{integer}: relation "addr_nsp" does not exist +WARNING: error for table constraint,{eins,zwei,drei},{}: schema "eins" does not exist +WARNING: error for table constraint,{eins,zwei,drei},{integer}: schema "eins" does not exist +WARNING: error for domain constraint,{eins},{}: argument list length must be exactly 1 +WARNING: error for domain constraint,{eins},{integer}: type "eins" does not exist +WARNING: error for domain constraint,{addr_nsp,zwei},{}: name list length must be exactly 1 +WARNING: error for domain constraint,{addr_nsp,zwei},{integer}: name list length must be exactly 1 +WARNING: error for domain constraint,{eins,zwei,drei},{}: name list length must be exactly 1 +WARNING: error for domain constraint,{eins,zwei,drei},{integer}: name list length must be exactly 1 +WARNING: error for conversion,{eins},{}: conversion "eins" does not exist +WARNING: error for conversion,{eins},{integer}: conversion "eins" does not exist +WARNING: error for conversion,{addr_nsp,zwei},{}: conversion "addr_nsp.zwei" does not exist +WARNING: error for conversion,{addr_nsp,zwei},{integer}: conversion "addr_nsp.zwei" does not exist +WARNING: error for conversion,{eins,zwei,drei},{}: cross-database references are not implemented: eins.zwei.drei +WARNING: error for conversion,{eins,zwei,drei},{integer}: cross-database references are not implemented: eins.zwei.drei +WARNING: error for default value,{eins},{}: column name must be qualified +WARNING: error for default value,{eins},{integer}: column name must be qualified +WARNING: error for default value,{addr_nsp,zwei},{}: relation "addr_nsp" does not exist +WARNING: error for default value,{addr_nsp,zwei},{integer}: relation "addr_nsp" does not exist +WARNING: error for default value,{eins,zwei,drei},{}: schema "eins" does not exist +WARNING: error for default value,{eins,zwei,drei},{integer}: schema "eins" does not exist +WARNING: error for operator,{eins},{}: argument list length must be exactly 2 +WARNING: error for operator,{eins},{integer}: argument list length must be exactly 2 +WARNING: error for operator,{addr_nsp,zwei},{}: argument list length must be exactly 2 +WARNING: error for operator,{addr_nsp,zwei},{integer}: argument list length must be exactly 2 +WARNING: error for operator,{eins,zwei,drei},{}: argument list length must be exactly 2 +WARNING: error for operator,{eins,zwei,drei},{integer}: argument list length must be exactly 2 +WARNING: error for operator class,{eins},{}: name list length must be at least 2 +WARNING: error for operator class,{eins},{integer}: name list length must be at least 2 +WARNING: error for operator class,{addr_nsp,zwei},{}: access method "addr_nsp" does not exist +WARNING: error for operator class,{addr_nsp,zwei},{integer}: access method "addr_nsp" does not exist +WARNING: error for operator class,{eins,zwei,drei},{}: access method "eins" does not exist +WARNING: error for operator class,{eins,zwei,drei},{integer}: access method "eins" does not exist +WARNING: error for operator family,{eins},{}: name list length must be at least 2 +WARNING: error for operator family,{eins},{integer}: name list length must be at least 2 +WARNING: error for operator family,{addr_nsp,zwei},{}: access method "addr_nsp" does not exist +WARNING: error for operator family,{addr_nsp,zwei},{integer}: access method "addr_nsp" does not exist +WARNING: error for operator family,{eins,zwei,drei},{}: access method "eins" does not exist +WARNING: error for operator family,{eins,zwei,drei},{integer}: access method "eins" does not exist +WARNING: error for rule,{eins},{}: must specify relation and object name +WARNING: error for rule,{eins},{integer}: must specify relation and object name +WARNING: error for rule,{addr_nsp,zwei},{}: relation "addr_nsp" does not exist +WARNING: error for rule,{addr_nsp,zwei},{integer}: relation "addr_nsp" does not exist +WARNING: error for rule,{eins,zwei,drei},{}: schema "eins" does not exist +WARNING: error for rule,{eins,zwei,drei},{integer}: schema "eins" does not exist +WARNING: error for trigger,{eins},{}: must specify relation and object name +WARNING: error for trigger,{eins},{integer}: must specify relation and object name +WARNING: error for trigger,{addr_nsp,zwei},{}: relation "addr_nsp" does not exist +WARNING: error for trigger,{addr_nsp,zwei},{integer}: relation "addr_nsp" does not exist +WARNING: error for trigger,{eins,zwei,drei},{}: schema "eins" does not exist +WARNING: error for trigger,{eins,zwei,drei},{integer}: schema "eins" does not exist +WARNING: error for text search parser,{eins},{}: text search parser "eins" does not exist +WARNING: error for text search parser,{eins},{integer}: text search parser "eins" does not exist +WARNING: error for text search parser,{addr_nsp,zwei},{}: text search parser "addr_nsp.zwei" does not exist +WARNING: error for text search parser,{addr_nsp,zwei},{integer}: text search parser "addr_nsp.zwei" does not exist +WARNING: error for text search parser,{eins,zwei,drei},{}: cross-database references are not implemented: eins.zwei.drei +WARNING: error for text search parser,{eins,zwei,drei},{integer}: cross-database references are not implemented: eins.zwei.drei +WARNING: error for text search dictionary,{eins},{}: text search dictionary "eins" does not exist +WARNING: error for text search dictionary,{eins},{integer}: text search dictionary "eins" does not exist +WARNING: error for text search dictionary,{addr_nsp,zwei},{}: text search dictionary "addr_nsp.zwei" does not exist +WARNING: error for text search dictionary,{addr_nsp,zwei},{integer}: text search dictionary "addr_nsp.zwei" does not exist +WARNING: error for text search dictionary,{eins,zwei,drei},{}: cross-database references are not implemented: eins.zwei.drei +WARNING: error for text search dictionary,{eins,zwei,drei},{integer}: cross-database references are not implemented: eins.zwei.drei +WARNING: error for text search template,{eins},{}: text search template "eins" does not exist +WARNING: error for text search template,{eins},{integer}: text search template "eins" does not exist +WARNING: error for text search template,{addr_nsp,zwei},{}: text search template "addr_nsp.zwei" does not exist +WARNING: error for text search template,{addr_nsp,zwei},{integer}: text search template "addr_nsp.zwei" does not exist +WARNING: error for text search template,{eins,zwei,drei},{}: cross-database references are not implemented: eins.zwei.drei +WARNING: error for text search template,{eins,zwei,drei},{integer}: cross-database references are not implemented: eins.zwei.drei +WARNING: error for text search configuration,{eins},{}: text search configuration "eins" does not exist +WARNING: error for text search configuration,{eins},{integer}: text search configuration "eins" does not exist +WARNING: error for text search configuration,{addr_nsp,zwei},{}: text search configuration "addr_nsp.zwei" does not exist +WARNING: error for text search configuration,{addr_nsp,zwei},{integer}: text search configuration "addr_nsp.zwei" does not exist +WARNING: error for text search configuration,{eins,zwei,drei},{}: cross-database references are not implemented: eins.zwei.drei +WARNING: error for text search configuration,{eins,zwei,drei},{integer}: cross-database references are not implemented: eins.zwei.drei +WARNING: error for policy,{eins},{}: must specify relation and object name +WARNING: error for policy,{eins},{integer}: must specify relation and object name +WARNING: error for policy,{addr_nsp,zwei},{}: relation "addr_nsp" does not exist +WARNING: error for policy,{addr_nsp,zwei},{integer}: relation "addr_nsp" does not exist +WARNING: error for policy,{eins,zwei,drei},{}: schema "eins" does not exist +WARNING: error for policy,{eins,zwei,drei},{integer}: schema "eins" does not exist +WARNING: error for user mapping,{eins},{}: argument list length must be exactly 1 +WARNING: error for user mapping,{eins},{integer}: user mapping for user "eins" on server "integer" does not exist +WARNING: error for user mapping,{addr_nsp,zwei},{}: name list length must be exactly 1 +WARNING: error for user mapping,{addr_nsp,zwei},{integer}: name list length must be exactly 1 +WARNING: error for user mapping,{eins,zwei,drei},{}: name list length must be exactly 1 +WARNING: error for user mapping,{eins,zwei,drei},{integer}: name list length must be exactly 1 +WARNING: error for default acl,{eins},{}: argument list length must be exactly 1 +WARNING: error for default acl,{eins},{integer}: unrecognized default ACL object type "i" +WARNING: error for default acl,{addr_nsp,zwei},{}: argument list length must be exactly 1 +WARNING: error for default acl,{addr_nsp,zwei},{integer}: unrecognized default ACL object type "i" +WARNING: error for default acl,{eins,zwei,drei},{}: argument list length must be exactly 1 +WARNING: error for default acl,{eins,zwei,drei},{integer}: unrecognized default ACL object type "i" +WARNING: error for transform,{eins},{}: argument list length must be exactly 1 +WARNING: error for transform,{eins},{integer}: type "eins" does not exist +WARNING: error for transform,{addr_nsp,zwei},{}: name list length must be exactly 1 +WARNING: error for transform,{addr_nsp,zwei},{integer}: name list length must be exactly 1 +WARNING: error for transform,{eins,zwei,drei},{}: name list length must be exactly 1 +WARNING: error for transform,{eins,zwei,drei},{integer}: name list length must be exactly 1 +WARNING: error for operator of access method,{eins},{}: name list length must be at least 3 +WARNING: error for operator of access method,{eins},{integer}: name list length must be at least 3 +WARNING: error for operator of access method,{addr_nsp,zwei},{}: name list length must be at least 3 +WARNING: error for operator of access method,{addr_nsp,zwei},{integer}: name list length must be at least 3 +WARNING: error for operator of access method,{eins,zwei,drei},{}: argument list length must be exactly 2 +WARNING: error for operator of access method,{eins,zwei,drei},{integer}: argument list length must be exactly 2 +WARNING: error for function of access method,{eins},{}: name list length must be at least 3 +WARNING: error for function of access method,{eins},{integer}: name list length must be at least 3 +WARNING: error for function of access method,{addr_nsp,zwei},{}: name list length must be at least 3 +WARNING: error for function of access method,{addr_nsp,zwei},{integer}: name list length must be at least 3 +WARNING: error for function of access method,{eins,zwei,drei},{}: argument list length must be exactly 2 +WARNING: error for function of access method,{eins,zwei,drei},{integer}: argument list length must be exactly 2 +WARNING: error for publication namespace,{eins},{}: argument list length must be exactly 1 +WARNING: error for publication namespace,{eins},{integer}: schema "eins" does not exist +WARNING: error for publication namespace,{addr_nsp,zwei},{}: name list length must be exactly 1 +WARNING: error for publication namespace,{addr_nsp,zwei},{integer}: name list length must be exactly 1 +WARNING: error for publication namespace,{eins,zwei,drei},{}: name list length must be exactly 1 +WARNING: error for publication namespace,{eins,zwei,drei},{integer}: name list length must be exactly 1 +WARNING: error for publication relation,{eins},{}: argument list length must be exactly 1 +WARNING: error for publication relation,{eins},{integer}: relation "eins" does not exist +WARNING: error for publication relation,{addr_nsp,zwei},{}: argument list length must be exactly 1 +WARNING: error for publication relation,{addr_nsp,zwei},{integer}: relation "addr_nsp.zwei" does not exist +WARNING: error for publication relation,{eins,zwei,drei},{}: argument list length must be exactly 1 +WARNING: error for publication relation,{eins,zwei,drei},{integer}: cross-database references are not implemented: "eins.zwei.drei" +-- these object types cannot be qualified names +SELECT pg_get_object_address('language', '{one}', '{}'); +ERROR: language "one" does not exist +SELECT pg_get_object_address('language', '{one,two}', '{}'); +ERROR: name list length must be exactly 1 +SELECT pg_get_object_address('large object', '{123}', '{}'); +ERROR: large object 123 does not exist +SELECT pg_get_object_address('large object', '{123,456}', '{}'); +ERROR: name list length must be exactly 1 +SELECT pg_get_object_address('large object', '{blargh}', '{}'); +ERROR: invalid input syntax for type oid: "blargh" +SELECT pg_get_object_address('schema', '{one}', '{}'); +ERROR: schema "one" does not exist +SELECT pg_get_object_address('schema', '{one,two}', '{}'); +ERROR: name list length must be exactly 1 +SELECT pg_get_object_address('role', '{one}', '{}'); +ERROR: role "one" does not exist +SELECT pg_get_object_address('role', '{one,two}', '{}'); +ERROR: name list length must be exactly 1 +SELECT pg_get_object_address('database', '{one}', '{}'); +ERROR: database "one" does not exist +SELECT pg_get_object_address('database', '{one,two}', '{}'); +ERROR: name list length must be exactly 1 +SELECT pg_get_object_address('tablespace', '{one}', '{}'); +ERROR: tablespace "one" does not exist +SELECT pg_get_object_address('tablespace', '{one,two}', '{}'); +ERROR: name list length must be exactly 1 +SELECT pg_get_object_address('foreign-data wrapper', '{one}', '{}'); +ERROR: foreign-data wrapper "one" does not exist +SELECT pg_get_object_address('foreign-data wrapper', '{one,two}', '{}'); +ERROR: name list length must be exactly 1 +SELECT pg_get_object_address('server', '{one}', '{}'); +ERROR: server "one" does not exist +SELECT pg_get_object_address('server', '{one,two}', '{}'); +ERROR: name list length must be exactly 1 +SELECT pg_get_object_address('extension', '{one}', '{}'); +ERROR: extension "one" does not exist +SELECT pg_get_object_address('extension', '{one,two}', '{}'); +ERROR: name list length must be exactly 1 +SELECT pg_get_object_address('event trigger', '{one}', '{}'); +ERROR: event trigger "one" does not exist +SELECT pg_get_object_address('event trigger', '{one,two}', '{}'); +ERROR: name list length must be exactly 1 +SELECT pg_get_object_address('access method', '{one}', '{}'); +ERROR: access method "one" does not exist +SELECT pg_get_object_address('access method', '{one,two}', '{}'); +ERROR: name list length must be exactly 1 +SELECT pg_get_object_address('publication', '{one}', '{}'); +ERROR: publication "one" does not exist +SELECT pg_get_object_address('publication', '{one,two}', '{}'); +ERROR: name list length must be exactly 1 +SELECT pg_get_object_address('subscription', '{one}', '{}'); +ERROR: subscription "one" does not exist +SELECT pg_get_object_address('subscription', '{one,two}', '{}'); +ERROR: name list length must be exactly 1 +-- Make sure that NULL handling is correct. +\pset null 'NULL' +-- Temporarily disable fancy output, so as future additions never create +-- a large amount of diffs. +\a\t +-- test successful cases +WITH objects (type, name, args) AS (VALUES + ('table', '{addr_nsp, gentable}'::text[], '{}'::text[]), + ('table', '{addr_nsp, parttable}'::text[], '{}'::text[]), + ('index', '{addr_nsp, gentable_pkey}', '{}'), + ('index', '{addr_nsp, parttable_pkey}', '{}'), + ('sequence', '{addr_nsp, gentable_a_seq}', '{}'), + -- toast table + ('view', '{addr_nsp, genview}', '{}'), + ('materialized view', '{addr_nsp, genmatview}', '{}'), + ('foreign table', '{addr_nsp, genftable}', '{}'), + ('table column', '{addr_nsp, gentable, b}', '{}'), + ('foreign table column', '{addr_nsp, genftable, a}', '{}'), + ('aggregate', '{addr_nsp, genaggr}', '{int4}'), + ('function', '{pg_catalog, pg_identify_object}', '{pg_catalog.oid, pg_catalog.oid, int4}'), + ('procedure', '{addr_nsp, proc}', '{int4}'), + ('type', '{pg_catalog._int4}', '{}'), + ('type', '{addr_nsp.gendomain}', '{}'), + ('type', '{addr_nsp.gencomptype}', '{}'), + ('type', '{addr_nsp.genenum}', '{}'), + ('cast', '{int8}', '{int4}'), + ('collation', '{default}', '{}'), + ('table constraint', '{addr_nsp, gentable, a_chk}', '{}'), + ('domain constraint', '{addr_nsp.gendomain}', '{domconstr}'), + ('conversion', '{pg_catalog, koi8_r_to_mic}', '{}'), + ('default value', '{addr_nsp, gentable, b}', '{}'), + ('language', '{plpgsql}', '{}'), + -- large object + ('operator', '{+}', '{int4, int4}'), + ('operator class', '{btree, int4_ops}', '{}'), + ('operator family', '{btree, integer_ops}', '{}'), + ('operator of access method', '{btree,integer_ops,1}', '{integer,integer}'), + ('function of access method', '{btree,integer_ops,2}', '{integer,integer}'), + ('rule', '{addr_nsp, genview, _RETURN}', '{}'), + ('trigger', '{addr_nsp, gentable, t}', '{}'), + ('schema', '{addr_nsp}', '{}'), + ('text search parser', '{addr_ts_prs}', '{}'), + ('text search dictionary', '{addr_ts_dict}', '{}'), + ('text search template', '{addr_ts_temp}', '{}'), + ('text search configuration', '{addr_ts_conf}', '{}'), + ('role', '{regress_addr_user}', '{}'), + -- database + -- tablespace + ('foreign-data wrapper', '{addr_fdw}', '{}'), + ('server', '{addr_fserv}', '{}'), + ('user mapping', '{regress_addr_user}', '{integer}'), + ('default acl', '{regress_addr_user,public}', '{r}'), + ('default acl', '{regress_addr_user}', '{r}'), + -- extension + -- event trigger + ('policy', '{addr_nsp, gentable, genpol}', '{}'), + ('transform', '{int}', '{sql}'), + ('access method', '{btree}', '{}'), + ('publication', '{addr_pub}', '{}'), + ('publication namespace', '{addr_nsp}', '{addr_pub_schema}'), + ('publication relation', '{addr_nsp, gentable}', '{addr_pub}'), + ('subscription', '{regress_addr_sub}', '{}'), + ('statistics object', '{addr_nsp, gentable_stat}', '{}') + ) +SELECT (pg_identify_object(addr1.classid, addr1.objid, addr1.objsubid)).*, + -- test roundtrip through pg_identify_object_as_address + ROW(pg_identify_object(addr1.classid, addr1.objid, addr1.objsubid)) = + ROW(pg_identify_object(addr2.classid, addr2.objid, addr2.objsubid)) AS roundtrip +FROM objects, + pg_get_object_address(type, name, args) AS addr1, + pg_identify_object_as_address(classid, objid, objsubid) AS ioa (typ, nms, args), + pg_get_object_address(typ, nms, ioa.args) AS addr2 +ORDER BY addr1.classid, addr1.objid, addr1.objsubid; +default acl|NULL|NULL|for role regress_addr_user in schema public on tables|t +default acl|NULL|NULL|for role regress_addr_user on tables|t +type|pg_catalog|_int4|integer[]|t +type|addr_nsp|gencomptype|addr_nsp.gencomptype|t +type|addr_nsp|genenum|addr_nsp.genenum|t +type|addr_nsp|gendomain|addr_nsp.gendomain|t +function|pg_catalog|NULL|pg_catalog.pg_identify_object(pg_catalog.oid,pg_catalog.oid,integer)|t +aggregate|addr_nsp|NULL|addr_nsp.genaggr(integer)|t +procedure|addr_nsp|NULL|addr_nsp.proc(integer)|t +sequence|addr_nsp|gentable_a_seq|addr_nsp.gentable_a_seq|t +table|addr_nsp|gentable|addr_nsp.gentable|t +table column|addr_nsp|gentable|addr_nsp.gentable.b|t +index|addr_nsp|gentable_pkey|addr_nsp.gentable_pkey|t +table|addr_nsp|parttable|addr_nsp.parttable|t +index|addr_nsp|parttable_pkey|addr_nsp.parttable_pkey|t +view|addr_nsp|genview|addr_nsp.genview|t +materialized view|addr_nsp|genmatview|addr_nsp.genmatview|t +foreign table|addr_nsp|genftable|addr_nsp.genftable|t +foreign table column|addr_nsp|genftable|addr_nsp.genftable.a|t +role|NULL|regress_addr_user|regress_addr_user|t +server|NULL|addr_fserv|addr_fserv|t +user mapping|NULL|NULL|regress_addr_user on server integer|t +foreign-data wrapper|NULL|addr_fdw|addr_fdw|t +access method|NULL|btree|btree|t +operator of access method|NULL|NULL|operator 1 (integer, integer) of pg_catalog.integer_ops USING btree|t +function of access method|NULL|NULL|function 2 (integer, integer) of pg_catalog.integer_ops USING btree|t +default value|NULL|NULL|for addr_nsp.gentable.b|t +cast|NULL|NULL|(bigint AS integer)|t +table constraint|addr_nsp|NULL|a_chk on addr_nsp.gentable|t +domain constraint|addr_nsp|NULL|domconstr on addr_nsp.gendomain|t +conversion|pg_catalog|koi8_r_to_mic|pg_catalog.koi8_r_to_mic|t +language|NULL|plpgsql|plpgsql|t +schema|NULL|addr_nsp|addr_nsp|t +operator class|pg_catalog|int4_ops|pg_catalog.int4_ops USING btree|t +operator|pg_catalog|NULL|pg_catalog.+(integer,integer)|t +rule|NULL|NULL|"_RETURN" on addr_nsp.genview|t +trigger|NULL|NULL|t on addr_nsp.gentable|t +operator family|pg_catalog|integer_ops|pg_catalog.integer_ops USING btree|t +policy|NULL|NULL|genpol on addr_nsp.gentable|t +statistics object|addr_nsp|gentable_stat|addr_nsp.gentable_stat|t +collation|pg_catalog|"default"|pg_catalog."default"|t +transform|NULL|NULL|for integer language sql|t +text search dictionary|addr_nsp|addr_ts_dict|addr_nsp.addr_ts_dict|t +text search parser|addr_nsp|addr_ts_prs|addr_nsp.addr_ts_prs|t +text search configuration|addr_nsp|addr_ts_conf|addr_nsp.addr_ts_conf|t +text search template|addr_nsp|addr_ts_temp|addr_nsp.addr_ts_temp|t +subscription|NULL|regress_addr_sub|regress_addr_sub|t +publication|NULL|addr_pub|addr_pub|t +publication relation|NULL|NULL|addr_nsp.gentable in publication addr_pub|t +publication namespace|NULL|NULL|addr_nsp in publication addr_pub_schema|t +--- +--- Cleanup resources +--- +DROP FOREIGN DATA WRAPPER addr_fdw CASCADE; +NOTICE: drop cascades to 4 other objects +DETAIL: drop cascades to server addr_fserv +drop cascades to foreign table genftable +drop cascades to server integer +drop cascades to user mapping for regress_addr_user on server integer +DROP PUBLICATION addr_pub; +DROP PUBLICATION addr_pub_schema; +DROP SUBSCRIPTION regress_addr_sub; +DROP SCHEMA addr_nsp CASCADE; +NOTICE: drop cascades to 14 other objects +DETAIL: drop cascades to text search dictionary addr_ts_dict +drop cascades to text search configuration addr_ts_conf +drop cascades to text search template addr_ts_temp +drop cascades to text search parser addr_ts_prs +drop cascades to table gentable +drop cascades to table parttable +drop cascades to view genview +drop cascades to materialized view genmatview +drop cascades to type gencomptype +drop cascades to type genenum +drop cascades to function genaggr(integer) +drop cascades to type gendomain +drop cascades to function trig() +drop cascades to function proc(integer) +DROP OWNED BY regress_addr_user; +DROP USER regress_addr_user; +-- +-- Checks for invalid objects +-- +-- Keep this list in the same order as getObjectIdentityParts() +-- in objectaddress.c. +WITH objects (classid, objid, objsubid) AS (VALUES + ('pg_class'::regclass, 0, 0), -- no relation + ('pg_class'::regclass, 'pg_class'::regclass, 100), -- no column for relation + ('pg_proc'::regclass, 0, 0), -- no function + ('pg_type'::regclass, 0, 0), -- no type + ('pg_cast'::regclass, 0, 0), -- no cast + ('pg_collation'::regclass, 0, 0), -- no collation + ('pg_constraint'::regclass, 0, 0), -- no constraint + ('pg_conversion'::regclass, 0, 0), -- no conversion + ('pg_attrdef'::regclass, 0, 0), -- no default attribute + ('pg_language'::regclass, 0, 0), -- no language + ('pg_largeobject'::regclass, 0, 0), -- no large object, no error + ('pg_operator'::regclass, 0, 0), -- no operator + ('pg_opclass'::regclass, 0, 0), -- no opclass, no need to check for no access method + ('pg_opfamily'::regclass, 0, 0), -- no opfamily + ('pg_am'::regclass, 0, 0), -- no access method + ('pg_amop'::regclass, 0, 0), -- no AM operator + ('pg_amproc'::regclass, 0, 0), -- no AM proc + ('pg_rewrite'::regclass, 0, 0), -- no rewrite + ('pg_trigger'::regclass, 0, 0), -- no trigger + ('pg_namespace'::regclass, 0, 0), -- no schema + ('pg_statistic_ext'::regclass, 0, 0), -- no statistics + ('pg_ts_parser'::regclass, 0, 0), -- no TS parser + ('pg_ts_dict'::regclass, 0, 0), -- no TS dictionary + ('pg_ts_template'::regclass, 0, 0), -- no TS template + ('pg_ts_config'::regclass, 0, 0), -- no TS configuration + ('pg_authid'::regclass, 0, 0), -- no role + ('pg_auth_members'::regclass, 0, 0), -- no role membership + ('pg_database'::regclass, 0, 0), -- no database + ('pg_tablespace'::regclass, 0, 0), -- no tablespace + ('pg_foreign_data_wrapper'::regclass, 0, 0), -- no FDW + ('pg_foreign_server'::regclass, 0, 0), -- no server + ('pg_user_mapping'::regclass, 0, 0), -- no user mapping + ('pg_default_acl'::regclass, 0, 0), -- no default ACL + ('pg_extension'::regclass, 0, 0), -- no extension + ('pg_event_trigger'::regclass, 0, 0), -- no event trigger + ('pg_parameter_acl'::regclass, 0, 0), -- no parameter ACL + ('pg_policy'::regclass, 0, 0), -- no policy + ('pg_publication'::regclass, 0, 0), -- no publication + ('pg_publication_namespace'::regclass, 0, 0), -- no publication namespace + ('pg_publication_rel'::regclass, 0, 0), -- no publication relation + ('pg_subscription'::regclass, 0, 0), -- no subscription + ('pg_transform'::regclass, 0, 0) -- no transformation + ) +SELECT ROW(pg_identify_object(objects.classid, objects.objid, objects.objsubid)) + AS ident, + ROW(pg_identify_object_as_address(objects.classid, objects.objid, objects.objsubid)) + AS addr, + pg_describe_object(objects.classid, objects.objid, objects.objsubid) + AS descr +FROM objects +ORDER BY objects.classid, objects.objid, objects.objsubid; +("(""default acl"",,,)")|("(""default acl"",,)")|NULL +("(tablespace,,,)")|("(tablespace,,)")|NULL +("(type,,,)")|("(type,,)")|NULL +("(routine,,,)")|("(routine,,)")|NULL +("(relation,,,)")|("(relation,,)")|NULL +("(""table column"",,,)")|("(""table column"",,)")|NULL +("(role,,,)")|("(role,,)")|NULL +("(""role membership"",,,)")|("(""role membership"",,)")|NULL +("(database,,,)")|("(database,,)")|NULL +("(server,,,)")|("(server,,)")|NULL +("(""user mapping"",,,)")|("(""user mapping"",,)")|NULL +("(""foreign-data wrapper"",,,)")|("(""foreign-data wrapper"",,)")|NULL +("(""access method"",,,)")|("(""access method"",,)")|NULL +("(""operator of access method"",,,)")|("(""operator of access method"",,)")|NULL +("(""function of access method"",,,)")|("(""function of access method"",,)")|NULL +("(""default value"",,,)")|("(""default value"",,)")|NULL +("(cast,,,)")|("(cast,,)")|NULL +("(constraint,,,)")|("(constraint,,)")|NULL +("(conversion,,,)")|("(conversion,,)")|NULL +("(language,,,)")|("(language,,)")|NULL +("(""large object"",,,)")|("(""large object"",,)")|NULL +("(schema,,,)")|("(schema,,)")|NULL +("(""operator class"",,,)")|("(""operator class"",,)")|NULL +("(operator,,,)")|("(operator,,)")|NULL +("(rule,,,)")|("(rule,,)")|NULL +("(trigger,,,)")|("(trigger,,)")|NULL +("(""operator family"",,,)")|("(""operator family"",,)")|NULL +("(extension,,,)")|("(extension,,)")|NULL +("(policy,,,)")|("(policy,,)")|NULL +("(""statistics object"",,,)")|("(""statistics object"",,)")|NULL +("(collation,,,)")|("(collation,,)")|NULL +("(""event trigger"",,,)")|("(""event trigger"",,)")|NULL +("(transform,,,)")|("(transform,,)")|NULL +("(""text search dictionary"",,,)")|("(""text search dictionary"",,)")|NULL +("(""text search parser"",,,)")|("(""text search parser"",,)")|NULL +("(""text search configuration"",,,)")|("(""text search configuration"",,)")|NULL +("(""text search template"",,,)")|("(""text search template"",,)")|NULL +("(subscription,,,)")|("(subscription,,)")|NULL +("(publication,,,)")|("(publication,,)")|NULL +("(""publication relation"",,,)")|("(""publication relation"",,)")|NULL +("(""publication namespace"",,,)")|("(""publication namespace"",,)")|NULL +("(""parameter ACL"",,,)")|("(""parameter ACL"",,)")|NULL +-- restore normal output mode +\a\t diff --git a/src/test/regress/expected/oid.out b/src/test/regress/expected/oid.out new file mode 100644 index 0000000..b80cb47 --- /dev/null +++ b/src/test/regress/expected/oid.out @@ -0,0 +1,184 @@ +-- +-- OID +-- +CREATE TABLE OID_TBL(f1 oid); +INSERT INTO OID_TBL(f1) VALUES ('1234'); +INSERT INTO OID_TBL(f1) VALUES ('1235'); +INSERT INTO OID_TBL(f1) VALUES ('987'); +INSERT INTO OID_TBL(f1) VALUES ('-1040'); +INSERT INTO OID_TBL(f1) VALUES ('99999999'); +INSERT INTO OID_TBL(f1) VALUES ('5 '); +INSERT INTO OID_TBL(f1) VALUES (' 10 '); +-- leading/trailing hard tab is also allowed +INSERT INTO OID_TBL(f1) VALUES (' 15 '); +-- bad inputs +INSERT INTO OID_TBL(f1) VALUES (''); +ERROR: invalid input syntax for type oid: "" +LINE 1: INSERT INTO OID_TBL(f1) VALUES (''); + ^ +INSERT INTO OID_TBL(f1) VALUES (' '); +ERROR: invalid input syntax for type oid: " " +LINE 1: INSERT INTO OID_TBL(f1) VALUES (' '); + ^ +INSERT INTO OID_TBL(f1) VALUES ('asdfasd'); +ERROR: invalid input syntax for type oid: "asdfasd" +LINE 1: INSERT INTO OID_TBL(f1) VALUES ('asdfasd'); + ^ +INSERT INTO OID_TBL(f1) VALUES ('99asdfasd'); +ERROR: invalid input syntax for type oid: "99asdfasd" +LINE 1: INSERT INTO OID_TBL(f1) VALUES ('99asdfasd'); + ^ +INSERT INTO OID_TBL(f1) VALUES ('5 d'); +ERROR: invalid input syntax for type oid: "5 d" +LINE 1: INSERT INTO OID_TBL(f1) VALUES ('5 d'); + ^ +INSERT INTO OID_TBL(f1) VALUES (' 5d'); +ERROR: invalid input syntax for type oid: " 5d" +LINE 1: INSERT INTO OID_TBL(f1) VALUES (' 5d'); + ^ +INSERT INTO OID_TBL(f1) VALUES ('5 5'); +ERROR: invalid input syntax for type oid: "5 5" +LINE 1: INSERT INTO OID_TBL(f1) VALUES ('5 5'); + ^ +INSERT INTO OID_TBL(f1) VALUES (' - 500'); +ERROR: invalid input syntax for type oid: " - 500" +LINE 1: INSERT INTO OID_TBL(f1) VALUES (' - 500'); + ^ +INSERT INTO OID_TBL(f1) VALUES ('32958209582039852935'); +ERROR: value "32958209582039852935" is out of range for type oid +LINE 1: INSERT INTO OID_TBL(f1) VALUES ('32958209582039852935'); + ^ +INSERT INTO OID_TBL(f1) VALUES ('-23582358720398502385'); +ERROR: value "-23582358720398502385" is out of range for type oid +LINE 1: INSERT INTO OID_TBL(f1) VALUES ('-23582358720398502385'); + ^ +SELECT * FROM OID_TBL; + f1 +------------ + 1234 + 1235 + 987 + 4294966256 + 99999999 + 5 + 10 + 15 +(8 rows) + +-- Also try it with non-error-throwing API +SELECT pg_input_is_valid('1234', 'oid'); + pg_input_is_valid +------------------- + t +(1 row) + +SELECT pg_input_is_valid('01XYZ', 'oid'); + pg_input_is_valid +------------------- + f +(1 row) + +SELECT * FROM pg_input_error_info('01XYZ', 'oid'); + message | detail | hint | sql_error_code +--------------------------------------------+--------+------+---------------- + invalid input syntax for type oid: "01XYZ" | | | 22P02 +(1 row) + +SELECT pg_input_is_valid('9999999999', 'oid'); + pg_input_is_valid +------------------- + f +(1 row) + +SELECT * FROM pg_input_error_info('9999999999', 'oid'); + message | detail | hint | sql_error_code +-------------------------------------------------+--------+------+---------------- + value "9999999999" is out of range for type oid | | | 22003 +(1 row) + +-- While we're here, check oidvector as well +SELECT pg_input_is_valid(' 1 2 4 ', 'oidvector'); + pg_input_is_valid +------------------- + t +(1 row) + +SELECT pg_input_is_valid('01 01XYZ', 'oidvector'); + pg_input_is_valid +------------------- + f +(1 row) + +SELECT * FROM pg_input_error_info('01 01XYZ', 'oidvector'); + message | detail | hint | sql_error_code +------------------------------------------+--------+------+---------------- + invalid input syntax for type oid: "XYZ" | | | 22P02 +(1 row) + +SELECT pg_input_is_valid('01 9999999999', 'oidvector'); + pg_input_is_valid +------------------- + f +(1 row) + +SELECT * FROM pg_input_error_info('01 9999999999', 'oidvector'); + message | detail | hint | sql_error_code +-------------------------------------------------+--------+------+---------------- + value "9999999999" is out of range for type oid | | | 22003 +(1 row) + +SELECT o.* FROM OID_TBL o WHERE o.f1 = 1234; + f1 +------ + 1234 +(1 row) + +SELECT o.* FROM OID_TBL o WHERE o.f1 <> '1234'; + f1 +------------ + 1235 + 987 + 4294966256 + 99999999 + 5 + 10 + 15 +(7 rows) + +SELECT o.* FROM OID_TBL o WHERE o.f1 <= '1234'; + f1 +------ + 1234 + 987 + 5 + 10 + 15 +(5 rows) + +SELECT o.* FROM OID_TBL o WHERE o.f1 < '1234'; + f1 +----- + 987 + 5 + 10 + 15 +(4 rows) + +SELECT o.* FROM OID_TBL o WHERE o.f1 >= '1234'; + f1 +------------ + 1234 + 1235 + 4294966256 + 99999999 +(4 rows) + +SELECT o.* FROM OID_TBL o WHERE o.f1 > '1234'; + f1 +------------ + 1235 + 4294966256 + 99999999 +(3 rows) + +DROP TABLE OID_TBL; diff --git a/src/test/regress/expected/oidjoins.out b/src/test/regress/expected/oidjoins.out new file mode 100644 index 0000000..215eb89 --- /dev/null +++ b/src/test/regress/expected/oidjoins.out @@ -0,0 +1,268 @@ +-- +-- Verify system catalog foreign key relationships +-- +DO $doblock$ +declare + fk record; + nkeys integer; + cmd text; + err record; +begin + for fk in select * from pg_get_catalog_foreign_keys() + loop + raise notice 'checking % % => % %', + fk.fktable, fk.fkcols, fk.pktable, fk.pkcols; + nkeys := array_length(fk.fkcols, 1); + cmd := 'SELECT ctid'; + for i in 1 .. nkeys loop + cmd := cmd || ', ' || quote_ident(fk.fkcols[i]); + end loop; + if fk.is_array then + cmd := cmd || ' FROM (SELECT ctid'; + for i in 1 .. nkeys-1 loop + cmd := cmd || ', ' || quote_ident(fk.fkcols[i]); + end loop; + cmd := cmd || ', unnest(' || quote_ident(fk.fkcols[nkeys]); + cmd := cmd || ') as ' || quote_ident(fk.fkcols[nkeys]); + cmd := cmd || ' FROM ' || fk.fktable::text || ') fk WHERE '; + else + cmd := cmd || ' FROM ' || fk.fktable::text || ' fk WHERE '; + end if; + if fk.is_opt then + for i in 1 .. nkeys loop + cmd := cmd || quote_ident(fk.fkcols[i]) || ' != 0 AND '; + end loop; + end if; + cmd := cmd || 'NOT EXISTS(SELECT 1 FROM ' || fk.pktable::text || ' pk WHERE '; + for i in 1 .. nkeys loop + if i > 1 then cmd := cmd || ' AND '; end if; + cmd := cmd || 'pk.' || quote_ident(fk.pkcols[i]); + cmd := cmd || ' = fk.' || quote_ident(fk.fkcols[i]); + end loop; + cmd := cmd || ')'; + -- raise notice 'cmd = %', cmd; + for err in execute cmd loop + raise warning 'FK VIOLATION IN %(%): %', fk.fktable, fk.fkcols, err; + end loop; + end loop; +end +$doblock$; +NOTICE: checking pg_proc {pronamespace} => pg_namespace {oid} +NOTICE: checking pg_proc {proowner} => pg_authid {oid} +NOTICE: checking pg_proc {prolang} => pg_language {oid} +NOTICE: checking pg_proc {provariadic} => pg_type {oid} +NOTICE: checking pg_proc {prosupport} => pg_proc {oid} +NOTICE: checking pg_proc {prorettype} => pg_type {oid} +NOTICE: checking pg_proc {proargtypes} => pg_type {oid} +NOTICE: checking pg_proc {proallargtypes} => pg_type {oid} +NOTICE: checking pg_proc {protrftypes} => pg_type {oid} +NOTICE: checking pg_type {typnamespace} => pg_namespace {oid} +NOTICE: checking pg_type {typowner} => pg_authid {oid} +NOTICE: checking pg_type {typrelid} => pg_class {oid} +NOTICE: checking pg_type {typsubscript} => pg_proc {oid} +NOTICE: checking pg_type {typelem} => pg_type {oid} +NOTICE: checking pg_type {typarray} => pg_type {oid} +NOTICE: checking pg_type {typinput} => pg_proc {oid} +NOTICE: checking pg_type {typoutput} => pg_proc {oid} +NOTICE: checking pg_type {typreceive} => pg_proc {oid} +NOTICE: checking pg_type {typsend} => pg_proc {oid} +NOTICE: checking pg_type {typmodin} => pg_proc {oid} +NOTICE: checking pg_type {typmodout} => pg_proc {oid} +NOTICE: checking pg_type {typanalyze} => pg_proc {oid} +NOTICE: checking pg_type {typbasetype} => pg_type {oid} +NOTICE: checking pg_type {typcollation} => pg_collation {oid} +NOTICE: checking pg_attribute {attrelid} => pg_class {oid} +NOTICE: checking pg_attribute {atttypid} => pg_type {oid} +NOTICE: checking pg_attribute {attcollation} => pg_collation {oid} +NOTICE: checking pg_class {relnamespace} => pg_namespace {oid} +NOTICE: checking pg_class {reltype} => pg_type {oid} +NOTICE: checking pg_class {reloftype} => pg_type {oid} +NOTICE: checking pg_class {relowner} => pg_authid {oid} +NOTICE: checking pg_class {relam} => pg_am {oid} +NOTICE: checking pg_class {reltablespace} => pg_tablespace {oid} +NOTICE: checking pg_class {reltoastrelid} => pg_class {oid} +NOTICE: checking pg_class {relrewrite} => pg_class {oid} +NOTICE: checking pg_attrdef {adrelid} => pg_class {oid} +NOTICE: checking pg_attrdef {adrelid,adnum} => pg_attribute {attrelid,attnum} +NOTICE: checking pg_constraint {connamespace} => pg_namespace {oid} +NOTICE: checking pg_constraint {conrelid} => pg_class {oid} +NOTICE: checking pg_constraint {contypid} => pg_type {oid} +NOTICE: checking pg_constraint {conindid} => pg_class {oid} +NOTICE: checking pg_constraint {conparentid} => pg_constraint {oid} +NOTICE: checking pg_constraint {confrelid} => pg_class {oid} +NOTICE: checking pg_constraint {conpfeqop} => pg_operator {oid} +NOTICE: checking pg_constraint {conppeqop} => pg_operator {oid} +NOTICE: checking pg_constraint {conffeqop} => pg_operator {oid} +NOTICE: checking pg_constraint {conexclop} => pg_operator {oid} +NOTICE: checking pg_constraint {conrelid,conkey} => pg_attribute {attrelid,attnum} +NOTICE: checking pg_constraint {confrelid,confkey} => pg_attribute {attrelid,attnum} +NOTICE: checking pg_inherits {inhrelid} => pg_class {oid} +NOTICE: checking pg_inherits {inhparent} => pg_class {oid} +NOTICE: checking pg_index {indexrelid} => pg_class {oid} +NOTICE: checking pg_index {indrelid} => pg_class {oid} +NOTICE: checking pg_index {indcollation} => pg_collation {oid} +NOTICE: checking pg_index {indclass} => pg_opclass {oid} +NOTICE: checking pg_index {indrelid,indkey} => pg_attribute {attrelid,attnum} +NOTICE: checking pg_operator {oprnamespace} => pg_namespace {oid} +NOTICE: checking pg_operator {oprowner} => pg_authid {oid} +NOTICE: checking pg_operator {oprleft} => pg_type {oid} +NOTICE: checking pg_operator {oprright} => pg_type {oid} +NOTICE: checking pg_operator {oprresult} => pg_type {oid} +NOTICE: checking pg_operator {oprcom} => pg_operator {oid} +NOTICE: checking pg_operator {oprnegate} => pg_operator {oid} +NOTICE: checking pg_operator {oprcode} => pg_proc {oid} +NOTICE: checking pg_operator {oprrest} => pg_proc {oid} +NOTICE: checking pg_operator {oprjoin} => pg_proc {oid} +NOTICE: checking pg_opfamily {opfmethod} => pg_am {oid} +NOTICE: checking pg_opfamily {opfnamespace} => pg_namespace {oid} +NOTICE: checking pg_opfamily {opfowner} => pg_authid {oid} +NOTICE: checking pg_opclass {opcmethod} => pg_am {oid} +NOTICE: checking pg_opclass {opcnamespace} => pg_namespace {oid} +NOTICE: checking pg_opclass {opcowner} => pg_authid {oid} +NOTICE: checking pg_opclass {opcfamily} => pg_opfamily {oid} +NOTICE: checking pg_opclass {opcintype} => pg_type {oid} +NOTICE: checking pg_opclass {opckeytype} => pg_type {oid} +NOTICE: checking pg_am {amhandler} => pg_proc {oid} +NOTICE: checking pg_amop {amopfamily} => pg_opfamily {oid} +NOTICE: checking pg_amop {amoplefttype} => pg_type {oid} +NOTICE: checking pg_amop {amoprighttype} => pg_type {oid} +NOTICE: checking pg_amop {amopopr} => pg_operator {oid} +NOTICE: checking pg_amop {amopmethod} => pg_am {oid} +NOTICE: checking pg_amop {amopsortfamily} => pg_opfamily {oid} +NOTICE: checking pg_amproc {amprocfamily} => pg_opfamily {oid} +NOTICE: checking pg_amproc {amproclefttype} => pg_type {oid} +NOTICE: checking pg_amproc {amprocrighttype} => pg_type {oid} +NOTICE: checking pg_amproc {amproc} => pg_proc {oid} +NOTICE: checking pg_language {lanowner} => pg_authid {oid} +NOTICE: checking pg_language {lanplcallfoid} => pg_proc {oid} +NOTICE: checking pg_language {laninline} => pg_proc {oid} +NOTICE: checking pg_language {lanvalidator} => pg_proc {oid} +NOTICE: checking pg_largeobject_metadata {lomowner} => pg_authid {oid} +NOTICE: checking pg_largeobject {loid} => pg_largeobject_metadata {oid} +NOTICE: checking pg_aggregate {aggfnoid} => pg_proc {oid} +NOTICE: checking pg_aggregate {aggtransfn} => pg_proc {oid} +NOTICE: checking pg_aggregate {aggfinalfn} => pg_proc {oid} +NOTICE: checking pg_aggregate {aggcombinefn} => pg_proc {oid} +NOTICE: checking pg_aggregate {aggserialfn} => pg_proc {oid} +NOTICE: checking pg_aggregate {aggdeserialfn} => pg_proc {oid} +NOTICE: checking pg_aggregate {aggmtransfn} => pg_proc {oid} +NOTICE: checking pg_aggregate {aggminvtransfn} => pg_proc {oid} +NOTICE: checking pg_aggregate {aggmfinalfn} => pg_proc {oid} +NOTICE: checking pg_aggregate {aggsortop} => pg_operator {oid} +NOTICE: checking pg_aggregate {aggtranstype} => pg_type {oid} +NOTICE: checking pg_aggregate {aggmtranstype} => pg_type {oid} +NOTICE: checking pg_statistic {starelid} => pg_class {oid} +NOTICE: checking pg_statistic {staop1} => pg_operator {oid} +NOTICE: checking pg_statistic {staop2} => pg_operator {oid} +NOTICE: checking pg_statistic {staop3} => pg_operator {oid} +NOTICE: checking pg_statistic {staop4} => pg_operator {oid} +NOTICE: checking pg_statistic {staop5} => pg_operator {oid} +NOTICE: checking pg_statistic {stacoll1} => pg_collation {oid} +NOTICE: checking pg_statistic {stacoll2} => pg_collation {oid} +NOTICE: checking pg_statistic {stacoll3} => pg_collation {oid} +NOTICE: checking pg_statistic {stacoll4} => pg_collation {oid} +NOTICE: checking pg_statistic {stacoll5} => pg_collation {oid} +NOTICE: checking pg_statistic {starelid,staattnum} => pg_attribute {attrelid,attnum} +NOTICE: checking pg_statistic_ext {stxrelid} => pg_class {oid} +NOTICE: checking pg_statistic_ext {stxnamespace} => pg_namespace {oid} +NOTICE: checking pg_statistic_ext {stxowner} => pg_authid {oid} +NOTICE: checking pg_statistic_ext {stxrelid,stxkeys} => pg_attribute {attrelid,attnum} +NOTICE: checking pg_statistic_ext_data {stxoid} => pg_statistic_ext {oid} +NOTICE: checking pg_rewrite {ev_class} => pg_class {oid} +NOTICE: checking pg_trigger {tgrelid} => pg_class {oid} +NOTICE: checking pg_trigger {tgparentid} => pg_trigger {oid} +NOTICE: checking pg_trigger {tgfoid} => pg_proc {oid} +NOTICE: checking pg_trigger {tgconstrrelid} => pg_class {oid} +NOTICE: checking pg_trigger {tgconstrindid} => pg_class {oid} +NOTICE: checking pg_trigger {tgconstraint} => pg_constraint {oid} +NOTICE: checking pg_trigger {tgrelid,tgattr} => pg_attribute {attrelid,attnum} +NOTICE: checking pg_event_trigger {evtowner} => pg_authid {oid} +NOTICE: checking pg_event_trigger {evtfoid} => pg_proc {oid} +NOTICE: checking pg_description {classoid} => pg_class {oid} +NOTICE: checking pg_cast {castsource} => pg_type {oid} +NOTICE: checking pg_cast {casttarget} => pg_type {oid} +NOTICE: checking pg_cast {castfunc} => pg_proc {oid} +NOTICE: checking pg_enum {enumtypid} => pg_type {oid} +NOTICE: checking pg_namespace {nspowner} => pg_authid {oid} +NOTICE: checking pg_conversion {connamespace} => pg_namespace {oid} +NOTICE: checking pg_conversion {conowner} => pg_authid {oid} +NOTICE: checking pg_conversion {conproc} => pg_proc {oid} +NOTICE: checking pg_depend {classid} => pg_class {oid} +NOTICE: checking pg_depend {refclassid} => pg_class {oid} +NOTICE: checking pg_database {datdba} => pg_authid {oid} +NOTICE: checking pg_database {dattablespace} => pg_tablespace {oid} +NOTICE: checking pg_db_role_setting {setdatabase} => pg_database {oid} +NOTICE: checking pg_db_role_setting {setrole} => pg_authid {oid} +NOTICE: checking pg_tablespace {spcowner} => pg_authid {oid} +NOTICE: checking pg_auth_members {roleid} => pg_authid {oid} +NOTICE: checking pg_auth_members {member} => pg_authid {oid} +NOTICE: checking pg_auth_members {grantor} => pg_authid {oid} +NOTICE: checking pg_shdepend {dbid} => pg_database {oid} +NOTICE: checking pg_shdepend {classid} => pg_class {oid} +NOTICE: checking pg_shdepend {refclassid} => pg_class {oid} +NOTICE: checking pg_shdescription {classoid} => pg_class {oid} +NOTICE: checking pg_ts_config {cfgnamespace} => pg_namespace {oid} +NOTICE: checking pg_ts_config {cfgowner} => pg_authid {oid} +NOTICE: checking pg_ts_config {cfgparser} => pg_ts_parser {oid} +NOTICE: checking pg_ts_config_map {mapcfg} => pg_ts_config {oid} +NOTICE: checking pg_ts_config_map {mapdict} => pg_ts_dict {oid} +NOTICE: checking pg_ts_dict {dictnamespace} => pg_namespace {oid} +NOTICE: checking pg_ts_dict {dictowner} => pg_authid {oid} +NOTICE: checking pg_ts_dict {dicttemplate} => pg_ts_template {oid} +NOTICE: checking pg_ts_parser {prsnamespace} => pg_namespace {oid} +NOTICE: checking pg_ts_parser {prsstart} => pg_proc {oid} +NOTICE: checking pg_ts_parser {prstoken} => pg_proc {oid} +NOTICE: checking pg_ts_parser {prsend} => pg_proc {oid} +NOTICE: checking pg_ts_parser {prsheadline} => pg_proc {oid} +NOTICE: checking pg_ts_parser {prslextype} => pg_proc {oid} +NOTICE: checking pg_ts_template {tmplnamespace} => pg_namespace {oid} +NOTICE: checking pg_ts_template {tmplinit} => pg_proc {oid} +NOTICE: checking pg_ts_template {tmpllexize} => pg_proc {oid} +NOTICE: checking pg_extension {extowner} => pg_authid {oid} +NOTICE: checking pg_extension {extnamespace} => pg_namespace {oid} +NOTICE: checking pg_extension {extconfig} => pg_class {oid} +NOTICE: checking pg_foreign_data_wrapper {fdwowner} => pg_authid {oid} +NOTICE: checking pg_foreign_data_wrapper {fdwhandler} => pg_proc {oid} +NOTICE: checking pg_foreign_data_wrapper {fdwvalidator} => pg_proc {oid} +NOTICE: checking pg_foreign_server {srvowner} => pg_authid {oid} +NOTICE: checking pg_foreign_server {srvfdw} => pg_foreign_data_wrapper {oid} +NOTICE: checking pg_user_mapping {umuser} => pg_authid {oid} +NOTICE: checking pg_user_mapping {umserver} => pg_foreign_server {oid} +NOTICE: checking pg_foreign_table {ftrelid} => pg_class {oid} +NOTICE: checking pg_foreign_table {ftserver} => pg_foreign_server {oid} +NOTICE: checking pg_policy {polrelid} => pg_class {oid} +NOTICE: checking pg_policy {polroles} => pg_authid {oid} +NOTICE: checking pg_default_acl {defaclrole} => pg_authid {oid} +NOTICE: checking pg_default_acl {defaclnamespace} => pg_namespace {oid} +NOTICE: checking pg_init_privs {classoid} => pg_class {oid} +NOTICE: checking pg_seclabel {classoid} => pg_class {oid} +NOTICE: checking pg_shseclabel {classoid} => pg_class {oid} +NOTICE: checking pg_collation {collnamespace} => pg_namespace {oid} +NOTICE: checking pg_collation {collowner} => pg_authid {oid} +NOTICE: checking pg_partitioned_table {partrelid} => pg_class {oid} +NOTICE: checking pg_partitioned_table {partdefid} => pg_class {oid} +NOTICE: checking pg_partitioned_table {partclass} => pg_opclass {oid} +NOTICE: checking pg_partitioned_table {partcollation} => pg_collation {oid} +NOTICE: checking pg_partitioned_table {partrelid,partattrs} => pg_attribute {attrelid,attnum} +NOTICE: checking pg_range {rngtypid} => pg_type {oid} +NOTICE: checking pg_range {rngsubtype} => pg_type {oid} +NOTICE: checking pg_range {rngmultitypid} => pg_type {oid} +NOTICE: checking pg_range {rngcollation} => pg_collation {oid} +NOTICE: checking pg_range {rngsubopc} => pg_opclass {oid} +NOTICE: checking pg_range {rngcanonical} => pg_proc {oid} +NOTICE: checking pg_range {rngsubdiff} => pg_proc {oid} +NOTICE: checking pg_transform {trftype} => pg_type {oid} +NOTICE: checking pg_transform {trflang} => pg_language {oid} +NOTICE: checking pg_transform {trffromsql} => pg_proc {oid} +NOTICE: checking pg_transform {trftosql} => pg_proc {oid} +NOTICE: checking pg_sequence {seqrelid} => pg_class {oid} +NOTICE: checking pg_sequence {seqtypid} => pg_type {oid} +NOTICE: checking pg_publication {pubowner} => pg_authid {oid} +NOTICE: checking pg_publication_namespace {pnpubid} => pg_publication {oid} +NOTICE: checking pg_publication_namespace {pnnspid} => pg_namespace {oid} +NOTICE: checking pg_publication_rel {prpubid} => pg_publication {oid} +NOTICE: checking pg_publication_rel {prrelid} => pg_class {oid} +NOTICE: checking pg_subscription {subdbid} => pg_database {oid} +NOTICE: checking pg_subscription {subowner} => pg_authid {oid} +NOTICE: checking pg_subscription_rel {srsubid} => pg_subscription {oid} +NOTICE: checking pg_subscription_rel {srrelid} => pg_class {oid} diff --git a/src/test/regress/expected/opr_sanity.out b/src/test/regress/expected/opr_sanity.out new file mode 100644 index 0000000..7610b01 --- /dev/null +++ b/src/test/regress/expected/opr_sanity.out @@ -0,0 +1,2301 @@ +-- +-- OPR_SANITY +-- Sanity checks for common errors in making operator/procedure system tables: +-- pg_operator, pg_proc, pg_cast, pg_conversion, pg_aggregate, pg_am, +-- pg_amop, pg_amproc, pg_opclass, pg_opfamily, pg_index. +-- +-- Every test failure in this file should be closely inspected. +-- The description of the failing test should be read carefully before +-- adjusting the expected output. In most cases, the queries should +-- not find *any* matching entries. +-- +-- NB: we assume the oidjoins test will have caught any dangling links, +-- that is OID or REGPROC fields that are not zero and do not match some +-- row in the linked-to table. However, if we want to enforce that a link +-- field can't be 0, we have to check it here. +-- +-- NB: run this test earlier than the create_operator test, because +-- that test creates some bogus operators... +-- **************** pg_proc **************** +-- Look for illegal values in pg_proc fields. +SELECT p1.oid, p1.proname +FROM pg_proc as p1 +WHERE p1.prolang = 0 OR p1.prorettype = 0 OR + p1.pronargs < 0 OR + p1.pronargdefaults < 0 OR + p1.pronargdefaults > p1.pronargs OR + array_lower(p1.proargtypes, 1) != 0 OR + array_upper(p1.proargtypes, 1) != p1.pronargs-1 OR + 0::oid = ANY (p1.proargtypes) OR + procost <= 0 OR + CASE WHEN proretset THEN prorows <= 0 ELSE prorows != 0 END OR + prokind NOT IN ('f', 'a', 'w', 'p') OR + provolatile NOT IN ('i', 's', 'v') OR + proparallel NOT IN ('s', 'r', 'u'); + oid | proname +-----+--------- +(0 rows) + +-- prosrc should never be null; it can be empty only if prosqlbody isn't null +SELECT p1.oid, p1.proname +FROM pg_proc as p1 +WHERE prosrc IS NULL; + oid | proname +-----+--------- +(0 rows) + +SELECT p1.oid, p1.proname +FROM pg_proc as p1 +WHERE (prosrc = '' OR prosrc = '-') AND prosqlbody IS NULL; + oid | proname +-----+--------- +(0 rows) + +-- proretset should only be set for normal functions +SELECT p1.oid, p1.proname +FROM pg_proc AS p1 +WHERE proretset AND prokind != 'f'; + oid | proname +-----+--------- +(0 rows) + +-- currently, no built-in functions should be SECURITY DEFINER; +-- this might change in future, but there will probably never be many. +SELECT p1.oid, p1.proname +FROM pg_proc AS p1 +WHERE prosecdef +ORDER BY 1; + oid | proname +-----+--------- +(0 rows) + +-- pronargdefaults should be 0 iff proargdefaults is null +SELECT p1.oid, p1.proname +FROM pg_proc AS p1 +WHERE (pronargdefaults <> 0) != (proargdefaults IS NOT NULL); + oid | proname +-----+--------- +(0 rows) + +-- probin should be non-empty for C functions, null everywhere else +SELECT p1.oid, p1.proname +FROM pg_proc as p1 +WHERE prolang = 13 AND (probin IS NULL OR probin = '' OR probin = '-'); + oid | proname +-----+--------- +(0 rows) + +SELECT p1.oid, p1.proname +FROM pg_proc as p1 +WHERE prolang != 13 AND probin IS NOT NULL; + oid | proname +-----+--------- +(0 rows) + +-- Look for conflicting proc definitions (same names and input datatypes). +-- (This test should be dead code now that we have the unique index +-- pg_proc_proname_args_nsp_index, but I'll leave it in anyway.) +SELECT p1.oid, p1.proname, p2.oid, p2.proname +FROM pg_proc AS p1, pg_proc AS p2 +WHERE p1.oid != p2.oid AND + p1.proname = p2.proname AND + p1.pronargs = p2.pronargs AND + p1.proargtypes = p2.proargtypes; + oid | proname | oid | proname +-----+---------+-----+--------- +(0 rows) + +-- Considering only built-in procs (prolang = 12), look for multiple uses +-- of the same internal function (ie, matching prosrc fields). It's OK to +-- have several entries with different pronames for the same internal function, +-- but conflicts in the number of arguments and other critical items should +-- be complained of. (We don't check data types here; see next query.) +-- Note: ignore aggregate functions here, since they all point to the same +-- dummy built-in function. +SELECT p1.oid, p1.proname, p2.oid, p2.proname +FROM pg_proc AS p1, pg_proc AS p2 +WHERE p1.oid < p2.oid AND + p1.prosrc = p2.prosrc AND + p1.prolang = 12 AND p2.prolang = 12 AND + (p1.prokind != 'a' OR p2.prokind != 'a') AND + (p1.prolang != p2.prolang OR + p1.prokind != p2.prokind OR + p1.prosecdef != p2.prosecdef OR + p1.proleakproof != p2.proleakproof OR + p1.proisstrict != p2.proisstrict OR + p1.proretset != p2.proretset OR + p1.provolatile != p2.provolatile OR + p1.pronargs != p2.pronargs); + oid | proname | oid | proname +-----+---------+-----+--------- +(0 rows) + +-- Look for uses of different type OIDs in the argument/result type fields +-- for different aliases of the same built-in function. +-- This indicates that the types are being presumed to be binary-equivalent, +-- or that the built-in function is prepared to deal with different types. +-- That's not wrong, necessarily, but we make lists of all the types being +-- so treated. Note that the expected output of this part of the test will +-- need to be modified whenever new pairs of types are made binary-equivalent, +-- or when new polymorphic built-in functions are added! +-- Note: ignore aggregate functions here, since they all point to the same +-- dummy built-in function. Likewise, ignore range and multirange constructor +-- functions. +SELECT DISTINCT p1.prorettype::regtype, p2.prorettype::regtype +FROM pg_proc AS p1, pg_proc AS p2 +WHERE p1.oid != p2.oid AND + p1.prosrc = p2.prosrc AND + p1.prolang = 12 AND p2.prolang = 12 AND + p1.prokind != 'a' AND p2.prokind != 'a' AND + p1.prosrc NOT LIKE E'range\\_constructor_' AND + p2.prosrc NOT LIKE E'range\\_constructor_' AND + p1.prosrc NOT LIKE E'multirange\\_constructor_' AND + p2.prosrc NOT LIKE E'multirange\\_constructor_' AND + (p1.prorettype < p2.prorettype) +ORDER BY 1, 2; + prorettype | prorettype +-----------------------------+-------------------------- + bigint | xid8 + text | character varying + timestamp without time zone | timestamp with time zone + txid_snapshot | pg_snapshot +(4 rows) + +SELECT DISTINCT p1.proargtypes[0]::regtype, p2.proargtypes[0]::regtype +FROM pg_proc AS p1, pg_proc AS p2 +WHERE p1.oid != p2.oid AND + p1.prosrc = p2.prosrc AND + p1.prolang = 12 AND p2.prolang = 12 AND + p1.prokind != 'a' AND p2.prokind != 'a' AND + p1.prosrc NOT LIKE E'range\\_constructor_' AND + p2.prosrc NOT LIKE E'range\\_constructor_' AND + p1.prosrc NOT LIKE E'multirange\\_constructor_' AND + p2.prosrc NOT LIKE E'multirange\\_constructor_' AND + (p1.proargtypes[0] < p2.proargtypes[0]) +ORDER BY 1, 2; + proargtypes | proargtypes +-----------------------------+-------------------------- + bigint | xid8 + text | character + text | character varying + timestamp without time zone | timestamp with time zone + bit | bit varying + txid_snapshot | pg_snapshot +(6 rows) + +SELECT DISTINCT p1.proargtypes[1]::regtype, p2.proargtypes[1]::regtype +FROM pg_proc AS p1, pg_proc AS p2 +WHERE p1.oid != p2.oid AND + p1.prosrc = p2.prosrc AND + p1.prolang = 12 AND p2.prolang = 12 AND + p1.prokind != 'a' AND p2.prokind != 'a' AND + p1.prosrc NOT LIKE E'range\\_constructor_' AND + p2.prosrc NOT LIKE E'range\\_constructor_' AND + p1.prosrc NOT LIKE E'multirange\\_constructor_' AND + p2.prosrc NOT LIKE E'multirange\\_constructor_' AND + (p1.proargtypes[1] < p2.proargtypes[1]) +ORDER BY 1, 2; + proargtypes | proargtypes +-----------------------------+-------------------------- + integer | xid + timestamp without time zone | timestamp with time zone + bit | bit varying + txid_snapshot | pg_snapshot + anyrange | anymultirange +(5 rows) + +SELECT DISTINCT p1.proargtypes[2]::regtype, p2.proargtypes[2]::regtype +FROM pg_proc AS p1, pg_proc AS p2 +WHERE p1.oid != p2.oid AND + p1.prosrc = p2.prosrc AND + p1.prolang = 12 AND p2.prolang = 12 AND + p1.prokind != 'a' AND p2.prokind != 'a' AND + (p1.proargtypes[2] < p2.proargtypes[2]) +ORDER BY 1, 2; + proargtypes | proargtypes +-----------------------------+-------------------------- + timestamp without time zone | timestamp with time zone +(1 row) + +SELECT DISTINCT p1.proargtypes[3]::regtype, p2.proargtypes[3]::regtype +FROM pg_proc AS p1, pg_proc AS p2 +WHERE p1.oid != p2.oid AND + p1.prosrc = p2.prosrc AND + p1.prolang = 12 AND p2.prolang = 12 AND + p1.prokind != 'a' AND p2.prokind != 'a' AND + (p1.proargtypes[3] < p2.proargtypes[3]) +ORDER BY 1, 2; + proargtypes | proargtypes +-----------------------------+-------------------------- + timestamp without time zone | timestamp with time zone +(1 row) + +SELECT DISTINCT p1.proargtypes[4]::regtype, p2.proargtypes[4]::regtype +FROM pg_proc AS p1, pg_proc AS p2 +WHERE p1.oid != p2.oid AND + p1.prosrc = p2.prosrc AND + p1.prolang = 12 AND p2.prolang = 12 AND + p1.prokind != 'a' AND p2.prokind != 'a' AND + (p1.proargtypes[4] < p2.proargtypes[4]) +ORDER BY 1, 2; + proargtypes | proargtypes +-------------+------------- +(0 rows) + +SELECT DISTINCT p1.proargtypes[5]::regtype, p2.proargtypes[5]::regtype +FROM pg_proc AS p1, pg_proc AS p2 +WHERE p1.oid != p2.oid AND + p1.prosrc = p2.prosrc AND + p1.prolang = 12 AND p2.prolang = 12 AND + p1.prokind != 'a' AND p2.prokind != 'a' AND + (p1.proargtypes[5] < p2.proargtypes[5]) +ORDER BY 1, 2; + proargtypes | proargtypes +-------------+------------- +(0 rows) + +SELECT DISTINCT p1.proargtypes[6]::regtype, p2.proargtypes[6]::regtype +FROM pg_proc AS p1, pg_proc AS p2 +WHERE p1.oid != p2.oid AND + p1.prosrc = p2.prosrc AND + p1.prolang = 12 AND p2.prolang = 12 AND + p1.prokind != 'a' AND p2.prokind != 'a' AND + (p1.proargtypes[6] < p2.proargtypes[6]) +ORDER BY 1, 2; + proargtypes | proargtypes +-------------+------------- +(0 rows) + +SELECT DISTINCT p1.proargtypes[7]::regtype, p2.proargtypes[7]::regtype +FROM pg_proc AS p1, pg_proc AS p2 +WHERE p1.oid != p2.oid AND + p1.prosrc = p2.prosrc AND + p1.prolang = 12 AND p2.prolang = 12 AND + p1.prokind != 'a' AND p2.prokind != 'a' AND + (p1.proargtypes[7] < p2.proargtypes[7]) +ORDER BY 1, 2; + proargtypes | proargtypes +-------------+------------- +(0 rows) + +-- Look for functions that return type "internal" and do not have any +-- "internal" argument. Such a function would be a security hole since +-- it might be used to call an internal function from an SQL command. +-- As of 7.3 this query should find only internal_in, which is safe because +-- it always throws an error when called. +SELECT p1.oid, p1.proname +FROM pg_proc as p1 +WHERE p1.prorettype = 'internal'::regtype AND NOT + 'internal'::regtype = ANY (p1.proargtypes); + oid | proname +------+------------- + 2304 | internal_in +(1 row) + +-- Look for functions that return a polymorphic type and do not have any +-- polymorphic argument. Calls of such functions would be unresolvable +-- at parse time. As of 9.6 this query should find only some input functions +-- and GiST support functions associated with these pseudotypes. +SELECT p1.oid, p1.proname +FROM pg_proc as p1 +WHERE p1.prorettype IN + ('anyelement'::regtype, 'anyarray'::regtype, 'anynonarray'::regtype, + 'anyenum'::regtype) + AND NOT + ('anyelement'::regtype = ANY (p1.proargtypes) OR + 'anyarray'::regtype = ANY (p1.proargtypes) OR + 'anynonarray'::regtype = ANY (p1.proargtypes) OR + 'anyenum'::regtype = ANY (p1.proargtypes) OR + 'anyrange'::regtype = ANY (p1.proargtypes) OR + 'anymultirange'::regtype = ANY (p1.proargtypes)) +ORDER BY 2; + oid | proname +------+---------------- + 2296 | anyarray_in + 2502 | anyarray_recv + 2312 | anyelement_in + 3504 | anyenum_in + 2777 | anynonarray_in + 750 | array_in + 2400 | array_recv + 3506 | enum_in + 3532 | enum_recv +(9 rows) + +-- anyrange and anymultirange are tighter than the rest, can only resolve +-- from each other +SELECT p1.oid, p1.proname +FROM pg_proc as p1 +WHERE p1.prorettype IN ('anyrange'::regtype, 'anymultirange'::regtype) + AND NOT + ('anyrange'::regtype = ANY (p1.proargtypes) OR + 'anymultirange'::regtype = ANY (p1.proargtypes)) +ORDER BY 2; + oid | proname +------+------------------ + 4229 | anymultirange_in + 3832 | anyrange_in + 4231 | multirange_in + 4233 | multirange_recv + 3876 | range_gist_union + 3834 | range_in + 3836 | range_recv +(7 rows) + +-- similarly for the anycompatible family +SELECT p1.oid, p1.proname +FROM pg_proc as p1 +WHERE p1.prorettype IN + ('anycompatible'::regtype, 'anycompatiblearray'::regtype, + 'anycompatiblenonarray'::regtype) + AND NOT + ('anycompatible'::regtype = ANY (p1.proargtypes) OR + 'anycompatiblearray'::regtype = ANY (p1.proargtypes) OR + 'anycompatiblenonarray'::regtype = ANY (p1.proargtypes) OR + 'anycompatiblerange'::regtype = ANY (p1.proargtypes)) +ORDER BY 2; + oid | proname +------+-------------------------- + 5086 | anycompatible_in + 5088 | anycompatiblearray_in + 5090 | anycompatiblearray_recv + 5092 | anycompatiblenonarray_in +(4 rows) + +SELECT p1.oid, p1.proname +FROM pg_proc as p1 +WHERE p1.prorettype = 'anycompatiblerange'::regtype + AND NOT + 'anycompatiblerange'::regtype = ANY (p1.proargtypes) +ORDER BY 2; + oid | proname +------+----------------------- + 5094 | anycompatiblerange_in +(1 row) + +-- Look for functions that accept cstring and are neither datatype input +-- functions nor encoding conversion functions. It's almost never a good +-- idea to use cstring input for a function meant to be called from SQL; +-- text should be used instead, because cstring lacks suitable casts. +-- As of 9.6 this query should find only cstring_out and cstring_send. +-- However, we must manually exclude shell_in, which might or might not be +-- rejected by the EXISTS clause depending on whether there are currently +-- any shell types. +SELECT p1.oid, p1.proname +FROM pg_proc as p1 +WHERE 'cstring'::regtype = ANY (p1.proargtypes) + AND NOT EXISTS(SELECT 1 FROM pg_type WHERE typinput = p1.oid) + AND NOT EXISTS(SELECT 1 FROM pg_conversion WHERE conproc = p1.oid) + AND p1.oid != 'shell_in(cstring)'::regprocedure +ORDER BY 1; + oid | proname +------+-------------- + 2293 | cstring_out + 2501 | cstring_send +(2 rows) + +-- Likewise, look for functions that return cstring and aren't datatype output +-- functions nor typmod output functions. +-- As of 9.6 this query should find only cstring_in and cstring_recv. +-- However, we must manually exclude shell_out. +SELECT p1.oid, p1.proname +FROM pg_proc as p1 +WHERE p1.prorettype = 'cstring'::regtype + AND NOT EXISTS(SELECT 1 FROM pg_type WHERE typoutput = p1.oid) + AND NOT EXISTS(SELECT 1 FROM pg_type WHERE typmodout = p1.oid) + AND p1.oid != 'shell_out(void)'::regprocedure +ORDER BY 1; + oid | proname +------+-------------- + 2292 | cstring_in + 2500 | cstring_recv +(2 rows) + +-- Check for length inconsistencies between the various argument-info arrays. +SELECT p1.oid, p1.proname +FROM pg_proc as p1 +WHERE proallargtypes IS NOT NULL AND + array_length(proallargtypes,1) < array_length(proargtypes,1); + oid | proname +-----+--------- +(0 rows) + +SELECT p1.oid, p1.proname +FROM pg_proc as p1 +WHERE proargmodes IS NOT NULL AND + array_length(proargmodes,1) < array_length(proargtypes,1); + oid | proname +-----+--------- +(0 rows) + +SELECT p1.oid, p1.proname +FROM pg_proc as p1 +WHERE proargnames IS NOT NULL AND + array_length(proargnames,1) < array_length(proargtypes,1); + oid | proname +-----+--------- +(0 rows) + +SELECT p1.oid, p1.proname +FROM pg_proc as p1 +WHERE proallargtypes IS NOT NULL AND proargmodes IS NOT NULL AND + array_length(proallargtypes,1) <> array_length(proargmodes,1); + oid | proname +-----+--------- +(0 rows) + +SELECT p1.oid, p1.proname +FROM pg_proc as p1 +WHERE proallargtypes IS NOT NULL AND proargnames IS NOT NULL AND + array_length(proallargtypes,1) <> array_length(proargnames,1); + oid | proname +-----+--------- +(0 rows) + +SELECT p1.oid, p1.proname +FROM pg_proc as p1 +WHERE proargmodes IS NOT NULL AND proargnames IS NOT NULL AND + array_length(proargmodes,1) <> array_length(proargnames,1); + oid | proname +-----+--------- +(0 rows) + +-- Check that proallargtypes matches proargtypes +SELECT p1.oid, p1.proname, p1.proargtypes, p1.proallargtypes, p1.proargmodes +FROM pg_proc as p1 +WHERE proallargtypes IS NOT NULL AND + ARRAY(SELECT unnest(proargtypes)) <> + ARRAY(SELECT proallargtypes[i] + FROM generate_series(1, array_length(proallargtypes, 1)) g(i) + WHERE proargmodes IS NULL OR proargmodes[i] IN ('i', 'b', 'v')); + oid | proname | proargtypes | proallargtypes | proargmodes +-----+---------+-------------+----------------+------------- +(0 rows) + +-- Check for type of the variadic array parameter's elements. +-- provariadic should be ANYOID if the type of the last element is ANYOID, +-- ANYELEMENTOID if the type of the last element is ANYARRAYOID, +-- ANYCOMPATIBLEOID if the type of the last element is ANYCOMPATIBLEARRAYOID, +-- and otherwise the element type corresponding to the array type. +SELECT oid::regprocedure, provariadic::regtype, proargtypes::regtype[] +FROM pg_proc +WHERE provariadic != 0 +AND case proargtypes[array_length(proargtypes, 1)-1] + WHEN '"any"'::regtype THEN '"any"'::regtype + WHEN 'anyarray'::regtype THEN 'anyelement'::regtype + WHEN 'anycompatiblearray'::regtype THEN 'anycompatible'::regtype + ELSE (SELECT t.oid + FROM pg_type t + WHERE t.typarray = proargtypes[array_length(proargtypes, 1)-1]) + END != provariadic; + oid | provariadic | proargtypes +-----+-------------+------------- +(0 rows) + +-- Check that all and only those functions with a variadic type have +-- a variadic argument. +SELECT oid::regprocedure, proargmodes, provariadic +FROM pg_proc +WHERE (proargmodes IS NOT NULL AND 'v' = any(proargmodes)) + IS DISTINCT FROM + (provariadic != 0); + oid | proargmodes | provariadic +-----+-------------+------------- +(0 rows) + +-- Check for prosupport functions with the wrong signature +SELECT p1.oid, p1.proname, p2.oid, p2.proname +FROM pg_proc AS p1, pg_proc AS p2 +WHERE p2.oid = p1.prosupport AND + (p2.prorettype != 'internal'::regtype OR p2.proretset OR p2.pronargs != 1 + OR p2.proargtypes[0] != 'internal'::regtype); + oid | proname | oid | proname +-----+---------+-----+--------- +(0 rows) + +-- Insist that all built-in pg_proc entries have descriptions +SELECT p1.oid, p1.proname +FROM pg_proc as p1 LEFT JOIN pg_description as d + ON p1.tableoid = d.classoid and p1.oid = d.objoid and d.objsubid = 0 +WHERE d.classoid IS NULL AND p1.oid <= 9999; + oid | proname +-----+--------- +(0 rows) + +-- List of built-in leakproof functions +-- +-- Leakproof functions should only be added after carefully +-- scrutinizing all possibly executed codepaths for possible +-- information leaks. Don't add functions here unless you know what a +-- leakproof function is. If unsure, don't mark it as such. +-- temporarily disable fancy output, so catalog changes create less diff noise +\a\t +SELECT p1.oid::regprocedure +FROM pg_proc p1 JOIN pg_namespace pn + ON pronamespace = pn.oid +WHERE nspname = 'pg_catalog' AND proleakproof +ORDER BY 1; +boollt(boolean,boolean) +boolgt(boolean,boolean) +booleq(boolean,boolean) +chareq("char","char") +nameeq(name,name) +int2eq(smallint,smallint) +int2lt(smallint,smallint) +int4eq(integer,integer) +int4lt(integer,integer) +texteq(text,text) +xideq(xid,xid) +cideq(cid,cid) +charne("char","char") +charle("char","char") +chargt("char","char") +charge("char","char") +boolne(boolean,boolean) +int4ne(integer,integer) +int2ne(smallint,smallint) +int2gt(smallint,smallint) +int4gt(integer,integer) +int2le(smallint,smallint) +int4le(integer,integer) +int4ge(integer,integer) +int2ge(smallint,smallint) +textne(text,text) +int24eq(smallint,integer) +int42eq(integer,smallint) +int24lt(smallint,integer) +int42lt(integer,smallint) +int24gt(smallint,integer) +int42gt(integer,smallint) +int24ne(smallint,integer) +int42ne(integer,smallint) +int24le(smallint,integer) +int42le(integer,smallint) +int24ge(smallint,integer) +int42ge(integer,smallint) +oideq(oid,oid) +oidne(oid,oid) +float8(smallint) +float4(smallint) +nameeqtext(name,text) +namelttext(name,text) +nameletext(name,text) +namegetext(name,text) +namegttext(name,text) +namenetext(name,text) +btnametextcmp(name,text) +texteqname(text,name) +textltname(text,name) +textlename(text,name) +textgename(text,name) +textgtname(text,name) +textnename(text,name) +bttextnamecmp(text,name) +float4eq(real,real) +float4ne(real,real) +float4lt(real,real) +float4le(real,real) +float4gt(real,real) +float4ge(real,real) +float8eq(double precision,double precision) +float8ne(double precision,double precision) +float8lt(double precision,double precision) +float8le(double precision,double precision) +float8gt(double precision,double precision) +float8ge(double precision,double precision) +float48eq(real,double precision) +float48ne(real,double precision) +float48lt(real,double precision) +float48le(real,double precision) +float48gt(real,double precision) +float48ge(real,double precision) +float84eq(double precision,real) +float84ne(double precision,real) +float84lt(double precision,real) +float84le(double precision,real) +float84gt(double precision,real) +float84ge(double precision,real) +float8(real) +int4(smallint) +float8(integer) +float4(integer) +btint2cmp(smallint,smallint) +btint4cmp(integer,integer) +btfloat4cmp(real,real) +btfloat8cmp(double precision,double precision) +btoidcmp(oid,oid) +btcharcmp("char","char") +btnamecmp(name,name) +bttextcmp(text,text) +cash_cmp(money,money) +btoidvectorcmp(oidvector,oidvector) +text(name) +name(text) +name(character) +text_larger(text,text) +text_smaller(text,text) +int8eq(bigint,bigint) +int8ne(bigint,bigint) +int8lt(bigint,bigint) +int8gt(bigint,bigint) +int8le(bigint,bigint) +int8ge(bigint,bigint) +int84eq(bigint,integer) +int84ne(bigint,integer) +int84lt(bigint,integer) +int84gt(bigint,integer) +int84le(bigint,integer) +int84ge(bigint,integer) +int8(integer) +float8(bigint) +oidvectorne(oidvector,oidvector) +float4(bigint) +namelt(name,name) +namele(name,name) +namegt(name,name) +namege(name,name) +namene(name,name) +oidvectorlt(oidvector,oidvector) +oidvectorle(oidvector,oidvector) +oidvectoreq(oidvector,oidvector) +oidvectorge(oidvector,oidvector) +oidvectorgt(oidvector,oidvector) +oidlt(oid,oid) +oidle(oid,oid) +text_lt(text,text) +text_le(text,text) +text_gt(text,text) +text_ge(text,text) +int8(smallint) +macaddr_eq(macaddr,macaddr) +macaddr_lt(macaddr,macaddr) +macaddr_le(macaddr,macaddr) +macaddr_gt(macaddr,macaddr) +macaddr_ge(macaddr,macaddr) +macaddr_ne(macaddr,macaddr) +macaddr_cmp(macaddr,macaddr) +btint8cmp(bigint,bigint) +int48eq(integer,bigint) +int48ne(integer,bigint) +int48lt(integer,bigint) +int48gt(integer,bigint) +int48le(integer,bigint) +int48ge(integer,bigint) +cash_eq(money,money) +cash_ne(money,money) +cash_lt(money,money) +cash_le(money,money) +cash_gt(money,money) +cash_ge(money,money) +network_eq(inet,inet) +network_lt(inet,inet) +network_le(inet,inet) +network_gt(inet,inet) +network_ge(inet,inet) +network_ne(inet,inet) +network_cmp(inet,inet) +lseg_eq(lseg,lseg) +bpchareq(character,character) +bpcharlt(character,character) +bpcharle(character,character) +bpchargt(character,character) +bpcharge(character,character) +bpcharne(character,character) +bpchar_larger(character,character) +bpchar_smaller(character,character) +bpcharcmp(character,character) +date_eq(date,date) +date_lt(date,date) +date_le(date,date) +date_gt(date,date) +date_ge(date,date) +date_ne(date,date) +date_cmp(date,date) +time_lt(time without time zone,time without time zone) +time_le(time without time zone,time without time zone) +time_gt(time without time zone,time without time zone) +time_ge(time without time zone,time without time zone) +time_ne(time without time zone,time without time zone) +time_cmp(time without time zone,time without time zone) +time_eq(time without time zone,time without time zone) +timestamptz_eq(timestamp with time zone,timestamp with time zone) +timestamptz_ne(timestamp with time zone,timestamp with time zone) +timestamptz_lt(timestamp with time zone,timestamp with time zone) +timestamptz_le(timestamp with time zone,timestamp with time zone) +timestamptz_ge(timestamp with time zone,timestamp with time zone) +timestamptz_gt(timestamp with time zone,timestamp with time zone) +interval_eq(interval,interval) +interval_ne(interval,interval) +interval_lt(interval,interval) +interval_le(interval,interval) +interval_ge(interval,interval) +interval_gt(interval,interval) +charlt("char","char") +tidne(tid,tid) +int8(oid) +tideq(tid,tid) +timestamptz_cmp(timestamp with time zone,timestamp with time zone) +interval_cmp(interval,interval) +xideqint4(xid,integer) +timetz_eq(time with time zone,time with time zone) +timetz_ne(time with time zone,time with time zone) +timetz_lt(time with time zone,time with time zone) +timetz_le(time with time zone,time with time zone) +timetz_ge(time with time zone,time with time zone) +timetz_gt(time with time zone,time with time zone) +timetz_cmp(time with time zone,time with time zone) +"interval"(time without time zone) +name(character varying) +"varchar"(name) +circle_eq(circle,circle) +circle_ne(circle,circle) +circle_lt(circle,circle) +circle_gt(circle,circle) +circle_le(circle,circle) +circle_ge(circle,circle) +lseg_ne(lseg,lseg) +lseg_lt(lseg,lseg) +lseg_le(lseg,lseg) +lseg_gt(lseg,lseg) +lseg_ge(lseg,lseg) +biteq(bit,bit) +bitne(bit,bit) +bitge(bit,bit) +bitgt(bit,bit) +bitle(bit,bit) +bitlt(bit,bit) +bitcmp(bit,bit) +oidgt(oid,oid) +oidge(oid,oid) +varbiteq(bit varying,bit varying) +varbitne(bit varying,bit varying) +varbitge(bit varying,bit varying) +varbitgt(bit varying,bit varying) +varbitle(bit varying,bit varying) +varbitlt(bit varying,bit varying) +varbitcmp(bit varying,bit varying) +boolle(boolean,boolean) +boolge(boolean,boolean) +btboolcmp(boolean,boolean) +"numeric"(integer) +"numeric"(real) +"numeric"(double precision) +"numeric"(bigint) +"numeric"(smallint) +int28eq(smallint,bigint) +int28ne(smallint,bigint) +int28lt(smallint,bigint) +int28gt(smallint,bigint) +int28le(smallint,bigint) +int28ge(smallint,bigint) +int82eq(bigint,smallint) +int82ne(bigint,smallint) +int82lt(bigint,smallint) +int82gt(bigint,smallint) +int82le(bigint,smallint) +int82ge(bigint,smallint) +byteaeq(bytea,bytea) +bytealt(bytea,bytea) +byteale(bytea,bytea) +byteagt(bytea,bytea) +byteage(bytea,bytea) +byteane(bytea,bytea) +byteacmp(bytea,bytea) +timestamp_cmp(timestamp without time zone,timestamp without time zone) +timestamp_eq(timestamp without time zone,timestamp without time zone) +timestamp_ne(timestamp without time zone,timestamp without time zone) +timestamp_lt(timestamp without time zone,timestamp without time zone) +timestamp_le(timestamp without time zone,timestamp without time zone) +timestamp_ge(timestamp without time zone,timestamp without time zone) +timestamp_gt(timestamp without time zone,timestamp without time zone) +text_pattern_lt(text,text) +text_pattern_le(text,text) +text_pattern_ge(text,text) +text_pattern_gt(text,text) +bttext_pattern_cmp(text,text) +bpchar_pattern_lt(character,character) +bpchar_pattern_le(character,character) +bpchar_pattern_ge(character,character) +bpchar_pattern_gt(character,character) +btbpchar_pattern_cmp(character,character) +btint48cmp(integer,bigint) +btint84cmp(bigint,integer) +btint24cmp(smallint,integer) +btint42cmp(integer,smallint) +btint28cmp(smallint,bigint) +btint82cmp(bigint,smallint) +btfloat48cmp(real,double precision) +btfloat84cmp(double precision,real) +md5(text) +md5(bytea) +bool(integer) +int4(boolean) +tidgt(tid,tid) +tidlt(tid,tid) +tidge(tid,tid) +tidle(tid,tid) +bttidcmp(tid,tid) +uuid_lt(uuid,uuid) +uuid_le(uuid,uuid) +uuid_eq(uuid,uuid) +uuid_ge(uuid,uuid) +uuid_gt(uuid,uuid) +uuid_ne(uuid,uuid) +uuid_cmp(uuid,uuid) +pg_lsn_lt(pg_lsn,pg_lsn) +pg_lsn_le(pg_lsn,pg_lsn) +pg_lsn_eq(pg_lsn,pg_lsn) +pg_lsn_ge(pg_lsn,pg_lsn) +pg_lsn_gt(pg_lsn,pg_lsn) +pg_lsn_ne(pg_lsn,pg_lsn) +pg_lsn_cmp(pg_lsn,pg_lsn) +xidneq(xid,xid) +xidneqint4(xid,integer) +sha224(bytea) +sha256(bytea) +sha384(bytea) +sha512(bytea) +gen_random_uuid() +starts_with(text,text) +macaddr8_eq(macaddr8,macaddr8) +macaddr8_lt(macaddr8,macaddr8) +macaddr8_le(macaddr8,macaddr8) +macaddr8_gt(macaddr8,macaddr8) +macaddr8_ge(macaddr8,macaddr8) +macaddr8_ne(macaddr8,macaddr8) +macaddr8_cmp(macaddr8,macaddr8) +macaddr8(macaddr) +xid8lt(xid8,xid8) +xid8gt(xid8,xid8) +xid8le(xid8,xid8) +xid8ge(xid8,xid8) +xid8eq(xid8,xid8) +xid8ne(xid8,xid8) +xid8cmp(xid8,xid8) +-- restore normal output mode +\a\t +-- List of functions used by libpq's fe-lobj.c +-- +-- If the output of this query changes, you probably broke libpq. +-- lo_initialize() assumes that there will be at most one match for +-- each listed name. +select proname, oid from pg_catalog.pg_proc +where proname in ( + 'lo_open', + 'lo_close', + 'lo_creat', + 'lo_create', + 'lo_unlink', + 'lo_lseek', + 'lo_lseek64', + 'lo_tell', + 'lo_tell64', + 'lo_truncate', + 'lo_truncate64', + 'loread', + 'lowrite') +and pronamespace = (select oid from pg_catalog.pg_namespace + where nspname = 'pg_catalog') +order by 1; + proname | oid +---------------+------ + lo_close | 953 + lo_creat | 957 + lo_create | 715 + lo_lseek | 956 + lo_lseek64 | 3170 + lo_open | 952 + lo_tell | 958 + lo_tell64 | 3171 + lo_truncate | 1004 + lo_truncate64 | 3172 + lo_unlink | 964 + loread | 954 + lowrite | 955 +(13 rows) + +-- Check that all immutable functions are marked parallel safe +SELECT p1.oid, p1.proname +FROM pg_proc AS p1 +WHERE provolatile = 'i' AND proparallel = 'u'; + oid | proname +-----+--------- +(0 rows) + +-- **************** pg_cast **************** +-- Catch bogus values in pg_cast columns (other than cases detected by +-- oidjoins test). +SELECT * +FROM pg_cast c +WHERE castsource = 0 OR casttarget = 0 OR castcontext NOT IN ('e', 'a', 'i') + OR castmethod NOT IN ('f', 'b' ,'i'); + oid | castsource | casttarget | castfunc | castcontext | castmethod +-----+------------+------------+----------+-------------+------------ +(0 rows) + +-- Check that castfunc is nonzero only for cast methods that need a function, +-- and zero otherwise +SELECT * +FROM pg_cast c +WHERE (castmethod = 'f' AND castfunc = 0) + OR (castmethod IN ('b', 'i') AND castfunc <> 0); + oid | castsource | casttarget | castfunc | castcontext | castmethod +-----+------------+------------+----------+-------------+------------ +(0 rows) + +-- Look for casts to/from the same type that aren't length coercion functions. +-- (We assume they are length coercions if they take multiple arguments.) +-- Such entries are not necessarily harmful, but they are useless. +SELECT * +FROM pg_cast c +WHERE castsource = casttarget AND castfunc = 0; + oid | castsource | casttarget | castfunc | castcontext | castmethod +-----+------------+------------+----------+-------------+------------ +(0 rows) + +SELECT c.* +FROM pg_cast c, pg_proc p +WHERE c.castfunc = p.oid AND p.pronargs < 2 AND castsource = casttarget; + oid | castsource | casttarget | castfunc | castcontext | castmethod +-----+------------+------------+----------+-------------+------------ +(0 rows) + +-- Look for cast functions that don't have the right signature. The +-- argument and result types in pg_proc must be the same as, or binary +-- compatible with, what it says in pg_cast. +-- As a special case, we allow casts from CHAR(n) that use functions +-- declared to take TEXT. This does not pass the binary-coercibility test +-- because CHAR(n)-to-TEXT normally invokes rtrim(). However, the results +-- are the same, so long as the function is one that ignores trailing blanks. +SELECT c.* +FROM pg_cast c, pg_proc p +WHERE c.castfunc = p.oid AND + (p.pronargs < 1 OR p.pronargs > 3 + OR NOT (binary_coercible(c.castsource, p.proargtypes[0]) + OR (c.castsource = 'character'::regtype AND + p.proargtypes[0] = 'text'::regtype)) + OR NOT binary_coercible(p.prorettype, c.casttarget)); + oid | castsource | casttarget | castfunc | castcontext | castmethod +-----+------------+------------+----------+-------------+------------ +(0 rows) + +SELECT c.* +FROM pg_cast c, pg_proc p +WHERE c.castfunc = p.oid AND + ((p.pronargs > 1 AND p.proargtypes[1] != 'int4'::regtype) OR + (p.pronargs > 2 AND p.proargtypes[2] != 'bool'::regtype)); + oid | castsource | casttarget | castfunc | castcontext | castmethod +-----+------------+------------+----------+-------------+------------ +(0 rows) + +-- Look for binary compatible casts that do not have the reverse +-- direction registered as well, or where the reverse direction is not +-- also binary compatible. This is legal, but usually not intended. +-- As of 7.4, this finds the casts from text and varchar to bpchar, because +-- those are binary-compatible while the reverse way goes through rtrim(). +-- As of 8.2, this finds the cast from cidr to inet, because that is a +-- trivial binary coercion while the other way goes through inet_to_cidr(). +-- As of 8.3, this finds the casts from xml to text, varchar, and bpchar, +-- because those are binary-compatible while the reverse goes through +-- texttoxml(), which does an XML syntax check. +-- As of 9.1, this finds the cast from pg_node_tree to text, which we +-- intentionally do not provide a reverse pathway for. +SELECT castsource::regtype, casttarget::regtype, castfunc, castcontext +FROM pg_cast c +WHERE c.castmethod = 'b' AND + NOT EXISTS (SELECT 1 FROM pg_cast k + WHERE k.castmethod = 'b' AND + k.castsource = c.casttarget AND + k.casttarget = c.castsource); + castsource | casttarget | castfunc | castcontext +-------------------+-------------------+----------+------------- + text | character | 0 | i + character varying | character | 0 | i + pg_node_tree | text | 0 | i + pg_ndistinct | bytea | 0 | i + pg_dependencies | bytea | 0 | i + pg_mcv_list | bytea | 0 | i + cidr | inet | 0 | i + xml | text | 0 | a + xml | character varying | 0 | a + xml | character | 0 | a +(10 rows) + +-- **************** pg_conversion **************** +-- Look for illegal values in pg_conversion fields. +SELECT c.oid, c.conname +FROM pg_conversion as c +WHERE c.conproc = 0 OR + pg_encoding_to_char(conforencoding) = '' OR + pg_encoding_to_char(contoencoding) = ''; + oid | conname +-----+--------- +(0 rows) + +-- Look for conprocs that don't have the expected signature. +SELECT p.oid, p.proname, c.oid, c.conname +FROM pg_proc p, pg_conversion c +WHERE p.oid = c.conproc AND + (p.prorettype != 'int4'::regtype OR p.proretset OR + p.pronargs != 6 OR + p.proargtypes[0] != 'int4'::regtype OR + p.proargtypes[1] != 'int4'::regtype OR + p.proargtypes[2] != 'cstring'::regtype OR + p.proargtypes[3] != 'internal'::regtype OR + p.proargtypes[4] != 'int4'::regtype OR + p.proargtypes[5] != 'bool'::regtype); + oid | proname | oid | conname +-----+---------+-----+--------- +(0 rows) + +-- Check for conprocs that don't perform the specific conversion that +-- pg_conversion alleges they do, by trying to invoke each conversion +-- on some simple ASCII data. (The conproc should throw an error if +-- it doesn't accept the encodings that are passed to it.) +-- Unfortunately, we can't test non-default conprocs this way, because +-- there is no way to ask convert() to invoke them, and we cannot call +-- them directly from SQL. But there are no non-default built-in +-- conversions anyway. +-- (Similarly, this doesn't cope with any search path issues.) +SELECT c.oid, c.conname +FROM pg_conversion as c +WHERE condefault AND + convert('ABC'::bytea, pg_encoding_to_char(conforencoding), + pg_encoding_to_char(contoencoding)) != 'ABC'; + oid | conname +-----+--------- +(0 rows) + +-- **************** pg_operator **************** +-- Look for illegal values in pg_operator fields. +SELECT o1.oid, o1.oprname +FROM pg_operator as o1 +WHERE (o1.oprkind != 'b' AND o1.oprkind != 'l') OR + o1.oprresult = 0 OR o1.oprcode = 0; + oid | oprname +-----+--------- +(0 rows) + +-- Look for missing or unwanted operand types +SELECT o1.oid, o1.oprname +FROM pg_operator as o1 +WHERE (o1.oprleft = 0 and o1.oprkind != 'l') OR + (o1.oprleft != 0 and o1.oprkind = 'l') OR + o1.oprright = 0; + oid | oprname +-----+--------- +(0 rows) + +-- Look for conflicting operator definitions (same names and input datatypes). +SELECT o1.oid, o1.oprcode, o2.oid, o2.oprcode +FROM pg_operator AS o1, pg_operator AS o2 +WHERE o1.oid != o2.oid AND + o1.oprname = o2.oprname AND + o1.oprkind = o2.oprkind AND + o1.oprleft = o2.oprleft AND + o1.oprright = o2.oprright; + oid | oprcode | oid | oprcode +-----+---------+-----+--------- +(0 rows) + +-- Look for commutative operators that don't commute. +-- DEFINITIONAL NOTE: If A.oprcom = B, then x A y has the same result as y B x. +-- We expect that B will always say that B.oprcom = A as well; that's not +-- inherently essential, but it would be inefficient not to mark it so. +SELECT o1.oid, o1.oprcode, o2.oid, o2.oprcode +FROM pg_operator AS o1, pg_operator AS o2 +WHERE o1.oprcom = o2.oid AND + (o1.oprkind != 'b' OR + o1.oprleft != o2.oprright OR + o1.oprright != o2.oprleft OR + o1.oprresult != o2.oprresult OR + o1.oid != o2.oprcom); + oid | oprcode | oid | oprcode +-----+---------+-----+--------- +(0 rows) + +-- Look for negatory operators that don't agree. +-- DEFINITIONAL NOTE: If A.oprnegate = B, then both A and B must yield +-- boolean results, and (x A y) == ! (x B y), or the equivalent for +-- single-operand operators. +-- We expect that B will always say that B.oprnegate = A as well; that's not +-- inherently essential, but it would be inefficient not to mark it so. +-- Also, A and B had better not be the same operator. +SELECT o1.oid, o1.oprcode, o2.oid, o2.oprcode +FROM pg_operator AS o1, pg_operator AS o2 +WHERE o1.oprnegate = o2.oid AND + (o1.oprkind != o2.oprkind OR + o1.oprleft != o2.oprleft OR + o1.oprright != o2.oprright OR + o1.oprresult != 'bool'::regtype OR + o2.oprresult != 'bool'::regtype OR + o1.oid != o2.oprnegate OR + o1.oid = o2.oid); + oid | oprcode | oid | oprcode +-----+---------+-----+--------- +(0 rows) + +-- Make a list of the names of operators that are claimed to be commutator +-- pairs. This list will grow over time, but before accepting a new entry +-- make sure you didn't link the wrong operators. +SELECT DISTINCT o1.oprname AS op1, o2.oprname AS op2 +FROM pg_operator o1, pg_operator o2 +WHERE o1.oprcom = o2.oid AND o1.oprname <= o2.oprname +ORDER BY 1, 2; + op1 | op2 +------+------ + # | # + & | & + && | && + * | * + *< | *> + *<= | *>= + *<> | *<> + *= | *= + + | + + -|- | -|- + < | > + <-> | <-> + << | >> + <<= | >>= + <= | >= + <> | <> + <@ | @> + = | = + ?# | ?# + ?- | ?- + ?-| | ?-| + ?| | ?| + ?|| | ?|| + @@ | @@ + @@@ | @@@ + | | | + ~<=~ | ~>=~ + ~<~ | ~>~ + ~= | ~= +(29 rows) + +-- Likewise for negator pairs. +SELECT DISTINCT o1.oprname AS op1, o2.oprname AS op2 +FROM pg_operator o1, pg_operator o2 +WHERE o1.oprnegate = o2.oid AND o1.oprname <= o2.oprname +ORDER BY 1, 2; + op1 | op2 +------+------ + !~ | ~ + !~* | ~* + !~~ | ~~ + !~~* | ~~* + *< | *>= + *<= | *> + *<> | *= + < | >= + <= | > + <> | = + <> | ~= + ~<=~ | ~>~ + ~<~ | ~>=~ +(13 rows) + +-- A mergejoinable or hashjoinable operator must be binary, must return +-- boolean, and must have a commutator (itself, unless it's a cross-type +-- operator). +SELECT o1.oid, o1.oprname FROM pg_operator AS o1 +WHERE (o1.oprcanmerge OR o1.oprcanhash) AND NOT + (o1.oprkind = 'b' AND o1.oprresult = 'bool'::regtype AND o1.oprcom != 0); + oid | oprname +-----+--------- +(0 rows) + +-- What's more, the commutator had better be mergejoinable/hashjoinable too. +SELECT o1.oid, o1.oprname, o2.oid, o2.oprname +FROM pg_operator AS o1, pg_operator AS o2 +WHERE o1.oprcom = o2.oid AND + (o1.oprcanmerge != o2.oprcanmerge OR + o1.oprcanhash != o2.oprcanhash); + oid | oprname | oid | oprname +-----+---------+-----+--------- +(0 rows) + +-- Mergejoinable operators should appear as equality members of btree index +-- opfamilies. +SELECT o1.oid, o1.oprname +FROM pg_operator AS o1 +WHERE o1.oprcanmerge AND NOT EXISTS + (SELECT 1 FROM pg_amop + WHERE amopmethod = (SELECT oid FROM pg_am WHERE amname = 'btree') AND + amopopr = o1.oid AND amopstrategy = 3); + oid | oprname +-----+--------- +(0 rows) + +-- And the converse. +SELECT o1.oid, o1.oprname, p.amopfamily +FROM pg_operator AS o1, pg_amop p +WHERE amopopr = o1.oid + AND amopmethod = (SELECT oid FROM pg_am WHERE amname = 'btree') + AND amopstrategy = 3 + AND NOT o1.oprcanmerge; + oid | oprname | amopfamily +-----+---------+------------ +(0 rows) + +-- Hashable operators should appear as members of hash index opfamilies. +SELECT o1.oid, o1.oprname +FROM pg_operator AS o1 +WHERE o1.oprcanhash AND NOT EXISTS + (SELECT 1 FROM pg_amop + WHERE amopmethod = (SELECT oid FROM pg_am WHERE amname = 'hash') AND + amopopr = o1.oid AND amopstrategy = 1); + oid | oprname +-----+--------- +(0 rows) + +-- And the converse. +SELECT o1.oid, o1.oprname, p.amopfamily +FROM pg_operator AS o1, pg_amop p +WHERE amopopr = o1.oid + AND amopmethod = (SELECT oid FROM pg_am WHERE amname = 'hash') + AND NOT o1.oprcanhash; + oid | oprname | amopfamily +-----+---------+------------ +(0 rows) + +-- Check that each operator defined in pg_operator matches its oprcode entry +-- in pg_proc. Easiest to do this separately for each oprkind. +SELECT o1.oid, o1.oprname, p1.oid, p1.proname +FROM pg_operator AS o1, pg_proc AS p1 +WHERE o1.oprcode = p1.oid AND + o1.oprkind = 'b' AND + (p1.pronargs != 2 + OR NOT binary_coercible(p1.prorettype, o1.oprresult) + OR NOT binary_coercible(o1.oprleft, p1.proargtypes[0]) + OR NOT binary_coercible(o1.oprright, p1.proargtypes[1])); + oid | oprname | oid | proname +-----+---------+-----+--------- +(0 rows) + +SELECT o1.oid, o1.oprname, p1.oid, p1.proname +FROM pg_operator AS o1, pg_proc AS p1 +WHERE o1.oprcode = p1.oid AND + o1.oprkind = 'l' AND + (p1.pronargs != 1 + OR NOT binary_coercible(p1.prorettype, o1.oprresult) + OR NOT binary_coercible(o1.oprright, p1.proargtypes[0]) + OR o1.oprleft != 0); + oid | oprname | oid | proname +-----+---------+-----+--------- +(0 rows) + +-- If the operator is mergejoinable or hashjoinable, its underlying function +-- should not be volatile. +SELECT o1.oid, o1.oprname, p1.oid, p1.proname +FROM pg_operator AS o1, pg_proc AS p1 +WHERE o1.oprcode = p1.oid AND + (o1.oprcanmerge OR o1.oprcanhash) AND + p1.provolatile = 'v'; + oid | oprname | oid | proname +-----+---------+-----+--------- +(0 rows) + +-- If oprrest is set, the operator must return boolean, +-- and it must link to a proc with the right signature +-- to be a restriction selectivity estimator. +-- The proc signature we want is: float8 proc(internal, oid, internal, int4) +SELECT o1.oid, o1.oprname, p2.oid, p2.proname +FROM pg_operator AS o1, pg_proc AS p2 +WHERE o1.oprrest = p2.oid AND + (o1.oprresult != 'bool'::regtype OR + p2.prorettype != 'float8'::regtype OR p2.proretset OR + p2.pronargs != 4 OR + p2.proargtypes[0] != 'internal'::regtype OR + p2.proargtypes[1] != 'oid'::regtype OR + p2.proargtypes[2] != 'internal'::regtype OR + p2.proargtypes[3] != 'int4'::regtype); + oid | oprname | oid | proname +-----+---------+-----+--------- +(0 rows) + +-- If oprjoin is set, the operator must be a binary boolean op, +-- and it must link to a proc with the right signature +-- to be a join selectivity estimator. +-- The proc signature we want is: float8 proc(internal, oid, internal, int2, internal) +-- (Note: the old signature with only 4 args is still allowed, but no core +-- estimator should be using it.) +SELECT o1.oid, o1.oprname, p2.oid, p2.proname +FROM pg_operator AS o1, pg_proc AS p2 +WHERE o1.oprjoin = p2.oid AND + (o1.oprkind != 'b' OR o1.oprresult != 'bool'::regtype OR + p2.prorettype != 'float8'::regtype OR p2.proretset OR + p2.pronargs != 5 OR + p2.proargtypes[0] != 'internal'::regtype OR + p2.proargtypes[1] != 'oid'::regtype OR + p2.proargtypes[2] != 'internal'::regtype OR + p2.proargtypes[3] != 'int2'::regtype OR + p2.proargtypes[4] != 'internal'::regtype); + oid | oprname | oid | proname +-----+---------+-----+--------- +(0 rows) + +-- Insist that all built-in pg_operator entries have descriptions +SELECT o1.oid, o1.oprname +FROM pg_operator as o1 LEFT JOIN pg_description as d + ON o1.tableoid = d.classoid and o1.oid = d.objoid and d.objsubid = 0 +WHERE d.classoid IS NULL AND o1.oid <= 9999; + oid | oprname +-----+--------- +(0 rows) + +-- Check that operators' underlying functions have suitable comments, +-- namely 'implementation of XXX operator'. (Note: it's not necessary to +-- put such comments into pg_proc.dat; initdb will generate them as needed.) +-- In some cases involving legacy names for operators, there are multiple +-- operators referencing the same pg_proc entry, so ignore operators whose +-- comments say they are deprecated. +-- We also have a few functions that are both operator support and meant to +-- be called directly; those should have comments matching their operator. +WITH funcdescs AS ( + SELECT p.oid as p_oid, proname, o.oid as o_oid, + pd.description as prodesc, + 'implementation of ' || oprname || ' operator' as expecteddesc, + od.description as oprdesc + FROM pg_proc p JOIN pg_operator o ON oprcode = p.oid + LEFT JOIN pg_description pd ON + (pd.objoid = p.oid and pd.classoid = p.tableoid and pd.objsubid = 0) + LEFT JOIN pg_description od ON + (od.objoid = o.oid and od.classoid = o.tableoid and od.objsubid = 0) + WHERE o.oid <= 9999 +) +SELECT * FROM funcdescs + WHERE prodesc IS DISTINCT FROM expecteddesc + AND oprdesc NOT LIKE 'deprecated%' + AND prodesc IS DISTINCT FROM oprdesc; + p_oid | proname | o_oid | prodesc | expecteddesc | oprdesc +-------+---------+-------+---------+--------------+--------- +(0 rows) + +-- Show all the operator-implementation functions that have their own +-- comments. This should happen only in cases where the function and +-- operator syntaxes are both documented at the user level. +-- This should be a pretty short list; it's mostly legacy cases. +WITH funcdescs AS ( + SELECT p.oid as p_oid, proname, o.oid as o_oid, + pd.description as prodesc, + 'implementation of ' || oprname || ' operator' as expecteddesc, + od.description as oprdesc + FROM pg_proc p JOIN pg_operator o ON oprcode = p.oid + LEFT JOIN pg_description pd ON + (pd.objoid = p.oid and pd.classoid = p.tableoid and pd.objsubid = 0) + LEFT JOIN pg_description od ON + (od.objoid = o.oid and od.classoid = o.tableoid and od.objsubid = 0) + WHERE o.oid <= 9999 +) +SELECT p_oid, proname, prodesc FROM funcdescs + WHERE prodesc IS DISTINCT FROM expecteddesc + AND oprdesc NOT LIKE 'deprecated%' +ORDER BY 1; + p_oid | proname | prodesc +-------+-------------------------+------------------------------------------------- + 378 | array_append | append element onto end of array + 379 | array_prepend | prepend element onto front of array + 1035 | aclinsert | add/update ACL item + 1036 | aclremove | remove ACL item + 1037 | aclcontains | contains + 3217 | jsonb_extract_path | get value from jsonb with path elements + 3940 | jsonb_extract_path_text | get value from jsonb as text with path elements + 3951 | json_extract_path | get value from json with path elements + 3953 | json_extract_path_text | get value from json as text with path elements +(9 rows) + +-- Operators that are commutator pairs should have identical volatility +-- and leakproofness markings on their implementation functions. +SELECT o1.oid, o1.oprcode, o2.oid, o2.oprcode +FROM pg_operator AS o1, pg_operator AS o2, pg_proc AS p1, pg_proc AS p2 +WHERE o1.oprcom = o2.oid AND p1.oid = o1.oprcode AND p2.oid = o2.oprcode AND + (p1.provolatile != p2.provolatile OR + p1.proleakproof != p2.proleakproof); + oid | oprcode | oid | oprcode +-----+---------+-----+--------- +(0 rows) + +-- Likewise for negator pairs. +SELECT o1.oid, o1.oprcode, o2.oid, o2.oprcode +FROM pg_operator AS o1, pg_operator AS o2, pg_proc AS p1, pg_proc AS p2 +WHERE o1.oprnegate = o2.oid AND p1.oid = o1.oprcode AND p2.oid = o2.oprcode AND + (p1.provolatile != p2.provolatile OR + p1.proleakproof != p2.proleakproof); + oid | oprcode | oid | oprcode +-----+---------+-----+--------- +(0 rows) + +-- Btree comparison operators' functions should have the same volatility +-- and leakproofness markings as the associated comparison support function. +SELECT pp.oid::regprocedure as proc, pp.provolatile as vp, pp.proleakproof as lp, + po.oid::regprocedure as opr, po.provolatile as vo, po.proleakproof as lo +FROM pg_proc pp, pg_proc po, pg_operator o, pg_amproc ap, pg_amop ao +WHERE pp.oid = ap.amproc AND po.oid = o.oprcode AND o.oid = ao.amopopr AND + ao.amopmethod = (SELECT oid FROM pg_am WHERE amname = 'btree') AND + ao.amopfamily = ap.amprocfamily AND + ao.amoplefttype = ap.amproclefttype AND + ao.amoprighttype = ap.amprocrighttype AND + ap.amprocnum = 1 AND + (pp.provolatile != po.provolatile OR + pp.proleakproof != po.proleakproof) +ORDER BY 1; + proc | vp | lp | opr | vo | lo +------+----+----+-----+----+---- +(0 rows) + +-- **************** pg_aggregate **************** +-- Look for illegal values in pg_aggregate fields. +SELECT ctid, aggfnoid::oid +FROM pg_aggregate as a +WHERE aggfnoid = 0 OR aggtransfn = 0 OR + aggkind NOT IN ('n', 'o', 'h') OR + aggnumdirectargs < 0 OR + (aggkind = 'n' AND aggnumdirectargs > 0) OR + aggfinalmodify NOT IN ('r', 's', 'w') OR + aggmfinalmodify NOT IN ('r', 's', 'w') OR + aggtranstype = 0 OR aggtransspace < 0 OR aggmtransspace < 0; + ctid | aggfnoid +------+---------- +(0 rows) + +-- Make sure the matching pg_proc entry is sensible, too. +SELECT a.aggfnoid::oid, p.proname +FROM pg_aggregate as a, pg_proc as p +WHERE a.aggfnoid = p.oid AND + (p.prokind != 'a' OR p.proretset OR p.pronargs < a.aggnumdirectargs); + aggfnoid | proname +----------+--------- +(0 rows) + +-- Make sure there are no prokind = PROKIND_AGGREGATE pg_proc entries without matches. +SELECT oid, proname +FROM pg_proc as p +WHERE p.prokind = 'a' AND + NOT EXISTS (SELECT 1 FROM pg_aggregate a WHERE a.aggfnoid = p.oid); + oid | proname +-----+--------- +(0 rows) + +-- If there is no finalfn then the output type must be the transtype. +SELECT a.aggfnoid::oid, p.proname +FROM pg_aggregate as a, pg_proc as p +WHERE a.aggfnoid = p.oid AND + a.aggfinalfn = 0 AND p.prorettype != a.aggtranstype; + aggfnoid | proname +----------+--------- +(0 rows) + +-- Cross-check transfn against its entry in pg_proc. +SELECT a.aggfnoid::oid, p.proname, ptr.oid, ptr.proname +FROM pg_aggregate AS a, pg_proc AS p, pg_proc AS ptr +WHERE a.aggfnoid = p.oid AND + a.aggtransfn = ptr.oid AND + (ptr.proretset + OR NOT (ptr.pronargs = + CASE WHEN a.aggkind = 'n' THEN p.pronargs + 1 + ELSE greatest(p.pronargs - a.aggnumdirectargs, 1) + 1 END) + OR NOT binary_coercible(ptr.prorettype, a.aggtranstype) + OR NOT binary_coercible(a.aggtranstype, ptr.proargtypes[0]) + OR (p.pronargs > 0 AND + NOT binary_coercible(p.proargtypes[0], ptr.proargtypes[1])) + OR (p.pronargs > 1 AND + NOT binary_coercible(p.proargtypes[1], ptr.proargtypes[2])) + OR (p.pronargs > 2 AND + NOT binary_coercible(p.proargtypes[2], ptr.proargtypes[3])) + OR (p.pronargs > 3 AND + NOT binary_coercible(p.proargtypes[3], ptr.proargtypes[4])) + -- we could carry the check further, but 4 args is enough for now + OR (p.pronargs > 4) + ); + aggfnoid | proname | oid | proname +----------+---------+-----+--------- +(0 rows) + +-- Cross-check finalfn (if present) against its entry in pg_proc. +SELECT a.aggfnoid::oid, p.proname, pfn.oid, pfn.proname +FROM pg_aggregate AS a, pg_proc AS p, pg_proc AS pfn +WHERE a.aggfnoid = p.oid AND + a.aggfinalfn = pfn.oid AND + (pfn.proretset OR + NOT binary_coercible(pfn.prorettype, p.prorettype) OR + NOT binary_coercible(a.aggtranstype, pfn.proargtypes[0]) OR + CASE WHEN a.aggfinalextra THEN pfn.pronargs != p.pronargs + 1 + ELSE pfn.pronargs != a.aggnumdirectargs + 1 END + OR (pfn.pronargs > 1 AND + NOT binary_coercible(p.proargtypes[0], pfn.proargtypes[1])) + OR (pfn.pronargs > 2 AND + NOT binary_coercible(p.proargtypes[1], pfn.proargtypes[2])) + OR (pfn.pronargs > 3 AND + NOT binary_coercible(p.proargtypes[2], pfn.proargtypes[3])) + -- we could carry the check further, but 4 args is enough for now + OR (pfn.pronargs > 4) + ); + aggfnoid | proname | oid | proname +----------+---------+-----+--------- +(0 rows) + +-- If transfn is strict then either initval should be non-NULL, or +-- input type should match transtype so that the first non-null input +-- can be assigned as the state value. +SELECT a.aggfnoid::oid, p.proname, ptr.oid, ptr.proname +FROM pg_aggregate AS a, pg_proc AS p, pg_proc AS ptr +WHERE a.aggfnoid = p.oid AND + a.aggtransfn = ptr.oid AND ptr.proisstrict AND + a.agginitval IS NULL AND + NOT binary_coercible(p.proargtypes[0], a.aggtranstype); + aggfnoid | proname | oid | proname +----------+---------+-----+--------- +(0 rows) + +-- Check for inconsistent specifications of moving-aggregate columns. +SELECT ctid, aggfnoid::oid +FROM pg_aggregate as a +WHERE aggmtranstype != 0 AND + (aggmtransfn = 0 OR aggminvtransfn = 0); + ctid | aggfnoid +------+---------- +(0 rows) + +SELECT ctid, aggfnoid::oid +FROM pg_aggregate as a +WHERE aggmtranstype = 0 AND + (aggmtransfn != 0 OR aggminvtransfn != 0 OR aggmfinalfn != 0 OR + aggmtransspace != 0 OR aggminitval IS NOT NULL); + ctid | aggfnoid +------+---------- +(0 rows) + +-- If there is no mfinalfn then the output type must be the mtranstype. +SELECT a.aggfnoid::oid, p.proname +FROM pg_aggregate as a, pg_proc as p +WHERE a.aggfnoid = p.oid AND + a.aggmtransfn != 0 AND + a.aggmfinalfn = 0 AND p.prorettype != a.aggmtranstype; + aggfnoid | proname +----------+--------- +(0 rows) + +-- Cross-check mtransfn (if present) against its entry in pg_proc. +SELECT a.aggfnoid::oid, p.proname, ptr.oid, ptr.proname +FROM pg_aggregate AS a, pg_proc AS p, pg_proc AS ptr +WHERE a.aggfnoid = p.oid AND + a.aggmtransfn = ptr.oid AND + (ptr.proretset + OR NOT (ptr.pronargs = + CASE WHEN a.aggkind = 'n' THEN p.pronargs + 1 + ELSE greatest(p.pronargs - a.aggnumdirectargs, 1) + 1 END) + OR NOT binary_coercible(ptr.prorettype, a.aggmtranstype) + OR NOT binary_coercible(a.aggmtranstype, ptr.proargtypes[0]) + OR (p.pronargs > 0 AND + NOT binary_coercible(p.proargtypes[0], ptr.proargtypes[1])) + OR (p.pronargs > 1 AND + NOT binary_coercible(p.proargtypes[1], ptr.proargtypes[2])) + OR (p.pronargs > 2 AND + NOT binary_coercible(p.proargtypes[2], ptr.proargtypes[3])) + -- we could carry the check further, but 3 args is enough for now + OR (p.pronargs > 3) + ); + aggfnoid | proname | oid | proname +----------+---------+-----+--------- +(0 rows) + +-- Cross-check minvtransfn (if present) against its entry in pg_proc. +SELECT a.aggfnoid::oid, p.proname, ptr.oid, ptr.proname +FROM pg_aggregate AS a, pg_proc AS p, pg_proc AS ptr +WHERE a.aggfnoid = p.oid AND + a.aggminvtransfn = ptr.oid AND + (ptr.proretset + OR NOT (ptr.pronargs = + CASE WHEN a.aggkind = 'n' THEN p.pronargs + 1 + ELSE greatest(p.pronargs - a.aggnumdirectargs, 1) + 1 END) + OR NOT binary_coercible(ptr.prorettype, a.aggmtranstype) + OR NOT binary_coercible(a.aggmtranstype, ptr.proargtypes[0]) + OR (p.pronargs > 0 AND + NOT binary_coercible(p.proargtypes[0], ptr.proargtypes[1])) + OR (p.pronargs > 1 AND + NOT binary_coercible(p.proargtypes[1], ptr.proargtypes[2])) + OR (p.pronargs > 2 AND + NOT binary_coercible(p.proargtypes[2], ptr.proargtypes[3])) + -- we could carry the check further, but 3 args is enough for now + OR (p.pronargs > 3) + ); + aggfnoid | proname | oid | proname +----------+---------+-----+--------- +(0 rows) + +-- Cross-check mfinalfn (if present) against its entry in pg_proc. +SELECT a.aggfnoid::oid, p.proname, pfn.oid, pfn.proname +FROM pg_aggregate AS a, pg_proc AS p, pg_proc AS pfn +WHERE a.aggfnoid = p.oid AND + a.aggmfinalfn = pfn.oid AND + (pfn.proretset OR + NOT binary_coercible(pfn.prorettype, p.prorettype) OR + NOT binary_coercible(a.aggmtranstype, pfn.proargtypes[0]) OR + CASE WHEN a.aggmfinalextra THEN pfn.pronargs != p.pronargs + 1 + ELSE pfn.pronargs != a.aggnumdirectargs + 1 END + OR (pfn.pronargs > 1 AND + NOT binary_coercible(p.proargtypes[0], pfn.proargtypes[1])) + OR (pfn.pronargs > 2 AND + NOT binary_coercible(p.proargtypes[1], pfn.proargtypes[2])) + OR (pfn.pronargs > 3 AND + NOT binary_coercible(p.proargtypes[2], pfn.proargtypes[3])) + -- we could carry the check further, but 4 args is enough for now + OR (pfn.pronargs > 4) + ); + aggfnoid | proname | oid | proname +----------+---------+-----+--------- +(0 rows) + +-- If mtransfn is strict then either minitval should be non-NULL, or +-- input type should match mtranstype so that the first non-null input +-- can be assigned as the state value. +SELECT a.aggfnoid::oid, p.proname, ptr.oid, ptr.proname +FROM pg_aggregate AS a, pg_proc AS p, pg_proc AS ptr +WHERE a.aggfnoid = p.oid AND + a.aggmtransfn = ptr.oid AND ptr.proisstrict AND + a.aggminitval IS NULL AND + NOT binary_coercible(p.proargtypes[0], a.aggmtranstype); + aggfnoid | proname | oid | proname +----------+---------+-----+--------- +(0 rows) + +-- mtransfn and minvtransfn should have same strictness setting. +SELECT a.aggfnoid::oid, p.proname, ptr.oid, ptr.proname, iptr.oid, iptr.proname +FROM pg_aggregate AS a, pg_proc AS p, pg_proc AS ptr, pg_proc AS iptr +WHERE a.aggfnoid = p.oid AND + a.aggmtransfn = ptr.oid AND + a.aggminvtransfn = iptr.oid AND + ptr.proisstrict != iptr.proisstrict; + aggfnoid | proname | oid | proname | oid | proname +----------+---------+-----+---------+-----+--------- +(0 rows) + +-- Check that all combine functions have signature +-- combine(transtype, transtype) returns transtype +SELECT a.aggfnoid, p.proname +FROM pg_aggregate as a, pg_proc as p +WHERE a.aggcombinefn = p.oid AND + (p.pronargs != 2 OR + p.prorettype != p.proargtypes[0] OR + p.prorettype != p.proargtypes[1] OR + NOT binary_coercible(a.aggtranstype, p.proargtypes[0])); + aggfnoid | proname +----------+--------- +(0 rows) + +-- Check that no combine function for an INTERNAL transtype is strict. +SELECT a.aggfnoid, p.proname +FROM pg_aggregate as a, pg_proc as p +WHERE a.aggcombinefn = p.oid AND + a.aggtranstype = 'internal'::regtype AND p.proisstrict; + aggfnoid | proname +----------+--------- +(0 rows) + +-- serialize/deserialize functions should be specified only for aggregates +-- with transtype internal and a combine function, and we should have both +-- or neither of them. +SELECT aggfnoid, aggtranstype, aggserialfn, aggdeserialfn +FROM pg_aggregate +WHERE (aggserialfn != 0 OR aggdeserialfn != 0) + AND (aggtranstype != 'internal'::regtype OR aggcombinefn = 0 OR + aggserialfn = 0 OR aggdeserialfn = 0); + aggfnoid | aggtranstype | aggserialfn | aggdeserialfn +----------+--------------+-------------+--------------- +(0 rows) + +-- Check that all serialization functions have signature +-- serialize(internal) returns bytea +-- Also insist that they be strict; it's wasteful to run them on NULLs. +SELECT a.aggfnoid, p.proname +FROM pg_aggregate as a, pg_proc as p +WHERE a.aggserialfn = p.oid AND + (p.prorettype != 'bytea'::regtype OR p.pronargs != 1 OR + p.proargtypes[0] != 'internal'::regtype OR + NOT p.proisstrict); + aggfnoid | proname +----------+--------- +(0 rows) + +-- Check that all deserialization functions have signature +-- deserialize(bytea, internal) returns internal +-- Also insist that they be strict; it's wasteful to run them on NULLs. +SELECT a.aggfnoid, p.proname +FROM pg_aggregate as a, pg_proc as p +WHERE a.aggdeserialfn = p.oid AND + (p.prorettype != 'internal'::regtype OR p.pronargs != 2 OR + p.proargtypes[0] != 'bytea'::regtype OR + p.proargtypes[1] != 'internal'::regtype OR + NOT p.proisstrict); + aggfnoid | proname +----------+--------- +(0 rows) + +-- Check that aggregates which have the same transition function also have +-- the same combine, serialization, and deserialization functions. +-- While that isn't strictly necessary, it's fishy if they don't. +SELECT a.aggfnoid, a.aggcombinefn, a.aggserialfn, a.aggdeserialfn, + b.aggfnoid, b.aggcombinefn, b.aggserialfn, b.aggdeserialfn +FROM + pg_aggregate a, pg_aggregate b +WHERE + a.aggfnoid < b.aggfnoid AND a.aggtransfn = b.aggtransfn AND + (a.aggcombinefn != b.aggcombinefn OR a.aggserialfn != b.aggserialfn + OR a.aggdeserialfn != b.aggdeserialfn); + aggfnoid | aggcombinefn | aggserialfn | aggdeserialfn | aggfnoid | aggcombinefn | aggserialfn | aggdeserialfn +----------+--------------+-------------+---------------+----------+--------------+-------------+--------------- +(0 rows) + +-- Cross-check aggsortop (if present) against pg_operator. +-- We expect to find entries for bool_and, bool_or, every, max, and min. +SELECT DISTINCT proname, oprname +FROM pg_operator AS o, pg_aggregate AS a, pg_proc AS p +WHERE a.aggfnoid = p.oid AND a.aggsortop = o.oid +ORDER BY 1, 2; + proname | oprname +----------+--------- + bool_and | < + bool_or | > + every | < + max | > + min | < +(5 rows) + +-- Check datatypes match +SELECT a.aggfnoid::oid, o.oid +FROM pg_operator AS o, pg_aggregate AS a, pg_proc AS p +WHERE a.aggfnoid = p.oid AND a.aggsortop = o.oid AND + (oprkind != 'b' OR oprresult != 'boolean'::regtype + OR oprleft != p.proargtypes[0] OR oprright != p.proargtypes[0]); + aggfnoid | oid +----------+----- +(0 rows) + +-- Check operator is a suitable btree opfamily member +SELECT a.aggfnoid::oid, o.oid +FROM pg_operator AS o, pg_aggregate AS a, pg_proc AS p +WHERE a.aggfnoid = p.oid AND a.aggsortop = o.oid AND + NOT EXISTS(SELECT 1 FROM pg_amop + WHERE amopmethod = (SELECT oid FROM pg_am WHERE amname = 'btree') + AND amopopr = o.oid + AND amoplefttype = o.oprleft + AND amoprighttype = o.oprright); + aggfnoid | oid +----------+----- +(0 rows) + +-- Check correspondence of btree strategies and names +SELECT DISTINCT proname, oprname, amopstrategy +FROM pg_operator AS o, pg_aggregate AS a, pg_proc AS p, + pg_amop as ao +WHERE a.aggfnoid = p.oid AND a.aggsortop = o.oid AND + amopopr = o.oid AND + amopmethod = (SELECT oid FROM pg_am WHERE amname = 'btree') +ORDER BY 1, 2; + proname | oprname | amopstrategy +----------+---------+-------------- + bool_and | < | 1 + bool_or | > | 5 + every | < | 1 + max | > | 5 + min | < | 1 +(5 rows) + +-- Check that there are not aggregates with the same name and different +-- numbers of arguments. While not technically wrong, we have a project policy +-- to avoid this because it opens the door for confusion in connection with +-- ORDER BY: novices frequently put the ORDER BY in the wrong place. +-- See the fate of the single-argument form of string_agg() for history. +-- (Note: we don't forbid users from creating such aggregates; the policy is +-- just to think twice before creating built-in aggregates like this.) +-- The only aggregates that should show up here are count(x) and count(*). +SELECT p1.oid::regprocedure, p2.oid::regprocedure +FROM pg_proc AS p1, pg_proc AS p2 +WHERE p1.oid < p2.oid AND p1.proname = p2.proname AND + p1.prokind = 'a' AND p2.prokind = 'a' AND + array_dims(p1.proargtypes) != array_dims(p2.proargtypes) +ORDER BY 1; + oid | oid +--------------+--------- + count("any") | count() +(1 row) + +-- For the same reason, built-in aggregates with default arguments are no good. +SELECT oid, proname +FROM pg_proc AS p +WHERE prokind = 'a' AND proargdefaults IS NOT NULL; + oid | proname +-----+--------- +(0 rows) + +-- For the same reason, we avoid creating built-in variadic aggregates, except +-- that variadic ordered-set aggregates are OK (since they have special syntax +-- that is not subject to the misplaced ORDER BY issue). +SELECT p.oid, proname +FROM pg_proc AS p JOIN pg_aggregate AS a ON a.aggfnoid = p.oid +WHERE prokind = 'a' AND provariadic != 0 AND a.aggkind = 'n'; + oid | proname +-----+--------- +(0 rows) + +-- **************** pg_opfamily **************** +-- Look for illegal values in pg_opfamily fields +SELECT f.oid +FROM pg_opfamily as f +WHERE f.opfmethod = 0 OR f.opfnamespace = 0; + oid +----- +(0 rows) + +-- Look for opfamilies having no opclasses. While most validation of +-- opfamilies is now handled by AM-specific amvalidate functions, that's +-- driven from pg_opclass entries below, so an empty opfamily would not +-- get noticed. +SELECT oid, opfname FROM pg_opfamily f +WHERE NOT EXISTS (SELECT 1 FROM pg_opclass WHERE opcfamily = f.oid); + oid | opfname +-----+--------- +(0 rows) + +-- **************** pg_opclass **************** +-- Look for illegal values in pg_opclass fields +SELECT c1.oid +FROM pg_opclass AS c1 +WHERE c1.opcmethod = 0 OR c1.opcnamespace = 0 OR c1.opcfamily = 0 + OR c1.opcintype = 0; + oid +----- +(0 rows) + +-- opcmethod must match owning opfamily's opfmethod +SELECT c1.oid, f1.oid +FROM pg_opclass AS c1, pg_opfamily AS f1 +WHERE c1.opcfamily = f1.oid AND c1.opcmethod != f1.opfmethod; + oid | oid +-----+----- +(0 rows) + +-- There should not be multiple entries in pg_opclass with opcdefault true +-- and the same opcmethod/opcintype combination. +SELECT c1.oid, c2.oid +FROM pg_opclass AS c1, pg_opclass AS c2 +WHERE c1.oid != c2.oid AND + c1.opcmethod = c2.opcmethod AND c1.opcintype = c2.opcintype AND + c1.opcdefault AND c2.opcdefault; + oid | oid +-----+----- +(0 rows) + +-- Ask access methods to validate opclasses +-- (this replaces a lot of SQL-level checks that used to be done in this file) +SELECT oid, opcname FROM pg_opclass WHERE NOT amvalidate(oid); + oid | opcname +-----+--------- +(0 rows) + +-- **************** pg_am **************** +-- Look for illegal values in pg_am fields +SELECT a1.oid, a1.amname +FROM pg_am AS a1 +WHERE a1.amhandler = 0; + oid | amname +-----+-------- +(0 rows) + +-- Check for index amhandler functions with the wrong signature +SELECT a1.oid, a1.amname, p1.oid, p1.proname +FROM pg_am AS a1, pg_proc AS p1 +WHERE p1.oid = a1.amhandler AND a1.amtype = 'i' AND + (p1.prorettype != 'index_am_handler'::regtype + OR p1.proretset + OR p1.pronargs != 1 + OR p1.proargtypes[0] != 'internal'::regtype); + oid | amname | oid | proname +-----+--------+-----+--------- +(0 rows) + +-- Check for table amhandler functions with the wrong signature +SELECT a1.oid, a1.amname, p1.oid, p1.proname +FROM pg_am AS a1, pg_proc AS p1 +WHERE p1.oid = a1.amhandler AND a1.amtype = 't' AND + (p1.prorettype != 'table_am_handler'::regtype + OR p1.proretset + OR p1.pronargs != 1 + OR p1.proargtypes[0] != 'internal'::regtype); + oid | amname | oid | proname +-----+--------+-----+--------- +(0 rows) + +-- **************** pg_amop **************** +-- Look for illegal values in pg_amop fields +SELECT a1.amopfamily, a1.amopstrategy +FROM pg_amop as a1 +WHERE a1.amopfamily = 0 OR a1.amoplefttype = 0 OR a1.amoprighttype = 0 + OR a1.amopopr = 0 OR a1.amopmethod = 0 OR a1.amopstrategy < 1; + amopfamily | amopstrategy +------------+-------------- +(0 rows) + +SELECT a1.amopfamily, a1.amopstrategy +FROM pg_amop as a1 +WHERE NOT ((a1.amoppurpose = 's' AND a1.amopsortfamily = 0) OR + (a1.amoppurpose = 'o' AND a1.amopsortfamily <> 0)); + amopfamily | amopstrategy +------------+-------------- +(0 rows) + +-- amopmethod must match owning opfamily's opfmethod +SELECT a1.oid, f1.oid +FROM pg_amop AS a1, pg_opfamily AS f1 +WHERE a1.amopfamily = f1.oid AND a1.amopmethod != f1.opfmethod; + oid | oid +-----+----- +(0 rows) + +-- Make a list of all the distinct operator names being used in particular +-- strategy slots. This is a bit hokey, since the list might need to change +-- in future releases, but it's an effective way of spotting mistakes such as +-- swapping two operators within a family. +SELECT DISTINCT amopmethod, amopstrategy, oprname +FROM pg_amop a1 LEFT JOIN pg_operator o1 ON amopopr = o1.oid +ORDER BY 1, 2, 3; + amopmethod | amopstrategy | oprname +------------+--------------+--------- + 403 | 1 | *< + 403 | 1 | < + 403 | 1 | ~<~ + 403 | 2 | *<= + 403 | 2 | <= + 403 | 2 | ~<=~ + 403 | 3 | *= + 403 | 3 | = + 403 | 4 | *>= + 403 | 4 | >= + 403 | 4 | ~>=~ + 403 | 5 | *> + 403 | 5 | > + 403 | 5 | ~>~ + 405 | 1 | = + 783 | 1 | << + 783 | 1 | @@ + 783 | 2 | &< + 783 | 3 | && + 783 | 4 | &> + 783 | 5 | >> + 783 | 6 | -|- + 783 | 6 | ~= + 783 | 7 | @> + 783 | 8 | <@ + 783 | 9 | &<| + 783 | 10 | <<| + 783 | 11 | |>> + 783 | 12 | |&> + 783 | 15 | <-> + 783 | 16 | @> + 783 | 18 | = + 783 | 19 | <> + 783 | 20 | < + 783 | 21 | <= + 783 | 22 | > + 783 | 23 | >= + 783 | 24 | << + 783 | 25 | <<= + 783 | 26 | >> + 783 | 27 | >>= + 783 | 28 | <@ + 783 | 29 | <^ + 783 | 30 | >^ + 783 | 48 | <@ + 783 | 68 | <@ + 2742 | 1 | && + 2742 | 1 | @@ + 2742 | 2 | @> + 2742 | 2 | @@@ + 2742 | 3 | <@ + 2742 | 4 | = + 2742 | 7 | @> + 2742 | 9 | ? + 2742 | 10 | ?| + 2742 | 11 | ?& + 2742 | 15 | @? + 2742 | 16 | @@ + 3580 | 1 | < + 3580 | 1 | << + 3580 | 1 | = + 3580 | 2 | &< + 3580 | 2 | <= + 3580 | 3 | && + 3580 | 3 | = + 3580 | 4 | &> + 3580 | 4 | >= + 3580 | 5 | > + 3580 | 5 | >> + 3580 | 6 | ~= + 3580 | 7 | >>= + 3580 | 7 | @> + 3580 | 8 | <<= + 3580 | 8 | <@ + 3580 | 9 | &<| + 3580 | 10 | <<| + 3580 | 11 | |>> + 3580 | 12 | |&> + 3580 | 16 | @> + 3580 | 17 | -|- + 3580 | 18 | = + 3580 | 20 | < + 3580 | 21 | <= + 3580 | 22 | > + 3580 | 23 | >= + 3580 | 24 | >> + 3580 | 26 | << + 4000 | 1 | << + 4000 | 1 | ~<~ + 4000 | 2 | &< + 4000 | 2 | ~<=~ + 4000 | 3 | && + 4000 | 3 | = + 4000 | 4 | &> + 4000 | 4 | ~>=~ + 4000 | 5 | >> + 4000 | 5 | ~>~ + 4000 | 6 | -|- + 4000 | 6 | ~= + 4000 | 7 | @> + 4000 | 8 | <@ + 4000 | 9 | &<| + 4000 | 10 | <<| + 4000 | 11 | < + 4000 | 11 | |>> + 4000 | 12 | <= + 4000 | 12 | |&> + 4000 | 14 | >= + 4000 | 15 | <-> + 4000 | 15 | > + 4000 | 16 | @> + 4000 | 18 | = + 4000 | 19 | <> + 4000 | 20 | < + 4000 | 21 | <= + 4000 | 22 | > + 4000 | 23 | >= + 4000 | 24 | << + 4000 | 25 | <<= + 4000 | 26 | >> + 4000 | 27 | >>= + 4000 | 28 | ^@ + 4000 | 29 | <^ + 4000 | 30 | >^ +(124 rows) + +-- Check that all opclass search operators have selectivity estimators. +-- This is not absolutely required, but it seems a reasonable thing +-- to insist on for all standard datatypes. +SELECT a1.amopfamily, a1.amopopr, o1.oid, o1.oprname +FROM pg_amop AS a1, pg_operator AS o1 +WHERE a1.amopopr = o1.oid AND a1.amoppurpose = 's' AND + (o1.oprrest = 0 OR o1.oprjoin = 0); + amopfamily | amopopr | oid | oprname +------------+---------+-----+--------- +(0 rows) + +-- Check that each opclass in an opfamily has associated operators, that is +-- ones whose oprleft matches opcintype (possibly by coercion). +SELECT c1.opcname, c1.opcfamily +FROM pg_opclass AS c1 +WHERE NOT EXISTS(SELECT 1 FROM pg_amop AS a1 + WHERE a1.amopfamily = c1.opcfamily + AND binary_coercible(c1.opcintype, a1.amoplefttype)); + opcname | opcfamily +---------+----------- +(0 rows) + +-- Check that each operator listed in pg_amop has an associated opclass, +-- that is one whose opcintype matches oprleft (possibly by coercion). +-- Otherwise the operator is useless because it cannot be matched to an index. +-- (In principle it could be useful to list such operators in multiple-datatype +-- btree opfamilies, but in practice you'd expect there to be an opclass for +-- every datatype the family knows about.) +SELECT a1.amopfamily, a1.amopstrategy, a1.amopopr +FROM pg_amop AS a1 +WHERE NOT EXISTS(SELECT 1 FROM pg_opclass AS c1 + WHERE c1.opcfamily = a1.amopfamily + AND binary_coercible(c1.opcintype, a1.amoplefttype)); + amopfamily | amopstrategy | amopopr +------------+--------------+--------- +(0 rows) + +-- Operators that are primary members of opclasses must be immutable (else +-- it suggests that the index ordering isn't fixed). Operators that are +-- cross-type members need only be stable, since they are just shorthands +-- for index probe queries. +SELECT a1.amopfamily, a1.amopopr, o1.oprname, p1.prosrc +FROM pg_amop AS a1, pg_operator AS o1, pg_proc AS p1 +WHERE a1.amopopr = o1.oid AND o1.oprcode = p1.oid AND + a1.amoplefttype = a1.amoprighttype AND + p1.provolatile != 'i'; + amopfamily | amopopr | oprname | prosrc +------------+---------+---------+-------- +(0 rows) + +SELECT a1.amopfamily, a1.amopopr, o1.oprname, p1.prosrc +FROM pg_amop AS a1, pg_operator AS o1, pg_proc AS p1 +WHERE a1.amopopr = o1.oid AND o1.oprcode = p1.oid AND + a1.amoplefttype != a1.amoprighttype AND + p1.provolatile = 'v'; + amopfamily | amopopr | oprname | prosrc +------------+---------+---------+-------- +(0 rows) + +-- **************** pg_amproc **************** +-- Look for illegal values in pg_amproc fields +SELECT a1.amprocfamily, a1.amprocnum +FROM pg_amproc as a1 +WHERE a1.amprocfamily = 0 OR a1.amproclefttype = 0 OR a1.amprocrighttype = 0 + OR a1.amprocnum < 0 OR a1.amproc = 0; + amprocfamily | amprocnum +--------------+----------- +(0 rows) + +-- Support routines that are primary members of opfamilies must be immutable +-- (else it suggests that the index ordering isn't fixed). But cross-type +-- members need only be stable, since they are just shorthands +-- for index probe queries. +SELECT a1.amprocfamily, a1.amproc, p1.prosrc +FROM pg_amproc AS a1, pg_proc AS p1 +WHERE a1.amproc = p1.oid AND + a1.amproclefttype = a1.amprocrighttype AND + p1.provolatile != 'i'; + amprocfamily | amproc | prosrc +--------------+--------+-------- +(0 rows) + +SELECT a1.amprocfamily, a1.amproc, p1.prosrc +FROM pg_amproc AS a1, pg_proc AS p1 +WHERE a1.amproc = p1.oid AND + a1.amproclefttype != a1.amprocrighttype AND + p1.provolatile = 'v'; + amprocfamily | amproc | prosrc +--------------+--------+-------- +(0 rows) + +-- Almost all of the core distribution's Btree opclasses can use one of the +-- two generic "equalimage" functions as their support function 4. Look for +-- opclasses that don't allow deduplication unconditionally here. +-- +-- Newly added Btree opclasses don't have to support deduplication. It will +-- usually be trivial to add support, though. Note that the expected output +-- of this part of the test will need to be updated when a new opclass cannot +-- support deduplication (by using btequalimage). +SELECT amp.amproc::regproc AS proc, opf.opfname AS opfamily_name, + opc.opcname AS opclass_name, opc.opcintype::regtype AS opcintype +FROM pg_am AS am +JOIN pg_opclass AS opc ON opc.opcmethod = am.oid +JOIN pg_opfamily AS opf ON opc.opcfamily = opf.oid +LEFT JOIN pg_amproc AS amp ON amp.amprocfamily = opf.oid AND + amp.amproclefttype = opc.opcintype AND amp.amprocnum = 4 +WHERE am.amname = 'btree' AND + amp.amproc IS DISTINCT FROM 'btequalimage'::regproc +ORDER BY 1, 2, 3; + proc | opfamily_name | opclass_name | opcintype +--------------------+------------------+------------------+------------------ + btvarstrequalimage | bpchar_ops | bpchar_ops | character + btvarstrequalimage | text_ops | name_ops | name + btvarstrequalimage | text_ops | text_ops | text + btvarstrequalimage | text_ops | varchar_ops | text + | array_ops | array_ops | anyarray + | float_ops | float4_ops | real + | float_ops | float8_ops | double precision + | interval_ops | interval_ops | interval + | jsonb_ops | jsonb_ops | jsonb + | multirange_ops | multirange_ops | anymultirange + | numeric_ops | numeric_ops | numeric + | range_ops | range_ops | anyrange + | record_image_ops | record_image_ops | record + | record_ops | record_ops | record + | tsquery_ops | tsquery_ops | tsquery + | tsvector_ops | tsvector_ops | tsvector +(16 rows) + +-- **************** pg_index **************** +-- Look for illegal values in pg_index fields. +SELECT indexrelid, indrelid +FROM pg_index +WHERE indexrelid = 0 OR indrelid = 0 OR + indnatts <= 0 OR indnatts > 32; + indexrelid | indrelid +------------+---------- +(0 rows) + +-- oidvector and int2vector fields should be of length indnatts. +SELECT indexrelid, indrelid +FROM pg_index +WHERE array_lower(indkey, 1) != 0 OR array_upper(indkey, 1) != indnatts-1 OR + array_lower(indclass, 1) != 0 OR array_upper(indclass, 1) != indnatts-1 OR + array_lower(indcollation, 1) != 0 OR array_upper(indcollation, 1) != indnatts-1 OR + array_lower(indoption, 1) != 0 OR array_upper(indoption, 1) != indnatts-1; + indexrelid | indrelid +------------+---------- +(0 rows) + +-- Check that opclasses and collations match the underlying columns. +-- (As written, this test ignores expression indexes.) +SELECT indexrelid::regclass, indrelid::regclass, attname, atttypid::regtype, opcname +FROM (SELECT indexrelid, indrelid, unnest(indkey) as ikey, + unnest(indclass) as iclass, unnest(indcollation) as icoll + FROM pg_index) ss, + pg_attribute a, + pg_opclass opc +WHERE a.attrelid = indrelid AND a.attnum = ikey AND opc.oid = iclass AND + (NOT binary_coercible(atttypid, opcintype) OR icoll != attcollation); + indexrelid | indrelid | attname | atttypid | opcname +------------+----------+---------+----------+--------- +(0 rows) + +-- For system catalogs, be even tighter: nearly all indexes should be +-- exact type matches not binary-coercible matches. At this writing +-- the only exception is an OID index on a regproc column. +SELECT indexrelid::regclass, indrelid::regclass, attname, atttypid::regtype, opcname +FROM (SELECT indexrelid, indrelid, unnest(indkey) as ikey, + unnest(indclass) as iclass, unnest(indcollation) as icoll + FROM pg_index + WHERE indrelid < 16384) ss, + pg_attribute a, + pg_opclass opc +WHERE a.attrelid = indrelid AND a.attnum = ikey AND opc.oid = iclass AND + (opcintype != atttypid OR icoll != attcollation) +ORDER BY 1; + indexrelid | indrelid | attname | atttypid | opcname +--------------------------+--------------+----------+----------+--------- + pg_aggregate_fnoid_index | pg_aggregate | aggfnoid | regproc | oid_ops +(1 row) + +-- Check for system catalogs with collation-sensitive ordering. This is not +-- a representational error in pg_index, but simply wrong catalog design. +-- It's bad because we expect to be able to clone template0 and assign the +-- copy a different database collation. It would especially not work for +-- shared catalogs. +SELECT relname, attname, attcollation +FROM pg_class c, pg_attribute a +WHERE c.oid = attrelid AND c.oid < 16384 AND + c.relkind != 'v' AND -- we don't care about columns in views + attcollation != 0 AND + attcollation != (SELECT oid FROM pg_collation WHERE collname = 'C'); + relname | attname | attcollation +---------+---------+-------------- +(0 rows) + +-- Double-check that collation-sensitive indexes have "C" collation, too. +SELECT indexrelid::regclass, indrelid::regclass, iclass, icoll +FROM (SELECT indexrelid, indrelid, + unnest(indclass) as iclass, unnest(indcollation) as icoll + FROM pg_index + WHERE indrelid < 16384) ss +WHERE icoll != 0 AND + icoll != (SELECT oid FROM pg_collation WHERE collname = 'C'); + indexrelid | indrelid | iclass | icoll +------------+----------+--------+------- +(0 rows) + diff --git a/src/test/regress/expected/partition_aggregate.out b/src/test/regress/expected/partition_aggregate.out new file mode 100644 index 0000000..1b900fd --- /dev/null +++ b/src/test/regress/expected/partition_aggregate.out @@ -0,0 +1,1520 @@ +-- +-- PARTITION_AGGREGATE +-- Test partitionwise aggregation on partitioned tables +-- +-- Note: to ensure plan stability, it's a good idea to make the partitions of +-- any one partitioned table in this test all have different numbers of rows. +-- +-- Enable partitionwise aggregate, which by default is disabled. +SET enable_partitionwise_aggregate TO true; +-- Enable partitionwise join, which by default is disabled. +SET enable_partitionwise_join TO true; +-- Disable parallel plans. +SET max_parallel_workers_per_gather TO 0; +-- Disable incremental sort, which can influence selected plans due to fuzz factor. +SET enable_incremental_sort TO off; +-- +-- Tests for list partitioned tables. +-- +CREATE TABLE pagg_tab (a int, b int, c text, d int) PARTITION BY LIST(c); +CREATE TABLE pagg_tab_p1 PARTITION OF pagg_tab FOR VALUES IN ('0000', '0001', '0002', '0003', '0004'); +CREATE TABLE pagg_tab_p2 PARTITION OF pagg_tab FOR VALUES IN ('0005', '0006', '0007', '0008'); +CREATE TABLE pagg_tab_p3 PARTITION OF pagg_tab FOR VALUES IN ('0009', '0010', '0011'); +INSERT INTO pagg_tab SELECT i % 20, i % 30, to_char(i % 12, 'FM0000'), i % 30 FROM generate_series(0, 2999) i; +ANALYZE pagg_tab; +-- When GROUP BY clause matches; full aggregation is performed for each partition. +EXPLAIN (COSTS OFF) +SELECT c, sum(a), avg(b), count(*), min(a), max(b) FROM pagg_tab GROUP BY c HAVING avg(d) < 15 ORDER BY 1, 2, 3; + QUERY PLAN +-------------------------------------------------------------- + Sort + Sort Key: pagg_tab.c, (sum(pagg_tab.a)), (avg(pagg_tab.b)) + -> Append + -> HashAggregate + Group Key: pagg_tab.c + Filter: (avg(pagg_tab.d) < '15'::numeric) + -> Seq Scan on pagg_tab_p1 pagg_tab + -> HashAggregate + Group Key: pagg_tab_1.c + Filter: (avg(pagg_tab_1.d) < '15'::numeric) + -> Seq Scan on pagg_tab_p2 pagg_tab_1 + -> HashAggregate + Group Key: pagg_tab_2.c + Filter: (avg(pagg_tab_2.d) < '15'::numeric) + -> Seq Scan on pagg_tab_p3 pagg_tab_2 +(15 rows) + +SELECT c, sum(a), avg(b), count(*), min(a), max(b) FROM pagg_tab GROUP BY c HAVING avg(d) < 15 ORDER BY 1, 2, 3; + c | sum | avg | count | min | max +------+------+---------------------+-------+-----+----- + 0000 | 2000 | 12.0000000000000000 | 250 | 0 | 24 + 0001 | 2250 | 13.0000000000000000 | 250 | 1 | 25 + 0002 | 2500 | 14.0000000000000000 | 250 | 2 | 26 + 0006 | 2500 | 12.0000000000000000 | 250 | 2 | 24 + 0007 | 2750 | 13.0000000000000000 | 250 | 3 | 25 + 0008 | 2000 | 14.0000000000000000 | 250 | 0 | 26 +(6 rows) + +-- When GROUP BY clause does not match; partial aggregation is performed for each partition. +EXPLAIN (COSTS OFF) +SELECT a, sum(b), avg(b), count(*), min(a), max(b) FROM pagg_tab GROUP BY a HAVING avg(d) < 15 ORDER BY 1, 2, 3; + QUERY PLAN +-------------------------------------------------------------- + Sort + Sort Key: pagg_tab.a, (sum(pagg_tab.b)), (avg(pagg_tab.b)) + -> Finalize HashAggregate + Group Key: pagg_tab.a + Filter: (avg(pagg_tab.d) < '15'::numeric) + -> Append + -> Partial HashAggregate + Group Key: pagg_tab.a + -> Seq Scan on pagg_tab_p1 pagg_tab + -> Partial HashAggregate + Group Key: pagg_tab_1.a + -> Seq Scan on pagg_tab_p2 pagg_tab_1 + -> Partial HashAggregate + Group Key: pagg_tab_2.a + -> Seq Scan on pagg_tab_p3 pagg_tab_2 +(15 rows) + +SELECT a, sum(b), avg(b), count(*), min(a), max(b) FROM pagg_tab GROUP BY a HAVING avg(d) < 15 ORDER BY 1, 2, 3; + a | sum | avg | count | min | max +----+------+---------------------+-------+-----+----- + 0 | 1500 | 10.0000000000000000 | 150 | 0 | 20 + 1 | 1650 | 11.0000000000000000 | 150 | 1 | 21 + 2 | 1800 | 12.0000000000000000 | 150 | 2 | 22 + 3 | 1950 | 13.0000000000000000 | 150 | 3 | 23 + 4 | 2100 | 14.0000000000000000 | 150 | 4 | 24 + 10 | 1500 | 10.0000000000000000 | 150 | 10 | 20 + 11 | 1650 | 11.0000000000000000 | 150 | 11 | 21 + 12 | 1800 | 12.0000000000000000 | 150 | 12 | 22 + 13 | 1950 | 13.0000000000000000 | 150 | 13 | 23 + 14 | 2100 | 14.0000000000000000 | 150 | 14 | 24 +(10 rows) + +-- Check with multiple columns in GROUP BY +EXPLAIN (COSTS OFF) +SELECT a, c, count(*) FROM pagg_tab GROUP BY a, c; + QUERY PLAN +------------------------------------------------ + Append + -> HashAggregate + Group Key: pagg_tab.a, pagg_tab.c + -> Seq Scan on pagg_tab_p1 pagg_tab + -> HashAggregate + Group Key: pagg_tab_1.a, pagg_tab_1.c + -> Seq Scan on pagg_tab_p2 pagg_tab_1 + -> HashAggregate + Group Key: pagg_tab_2.a, pagg_tab_2.c + -> Seq Scan on pagg_tab_p3 pagg_tab_2 +(10 rows) + +-- Check with multiple columns in GROUP BY, order in GROUP BY is reversed +EXPLAIN (COSTS OFF) +SELECT a, c, count(*) FROM pagg_tab GROUP BY c, a; + QUERY PLAN +------------------------------------------------ + Append + -> HashAggregate + Group Key: pagg_tab.c, pagg_tab.a + -> Seq Scan on pagg_tab_p1 pagg_tab + -> HashAggregate + Group Key: pagg_tab_1.c, pagg_tab_1.a + -> Seq Scan on pagg_tab_p2 pagg_tab_1 + -> HashAggregate + Group Key: pagg_tab_2.c, pagg_tab_2.a + -> Seq Scan on pagg_tab_p3 pagg_tab_2 +(10 rows) + +-- Check with multiple columns in GROUP BY, order in target-list is reversed +EXPLAIN (COSTS OFF) +SELECT c, a, count(*) FROM pagg_tab GROUP BY a, c; + QUERY PLAN +------------------------------------------------ + Append + -> HashAggregate + Group Key: pagg_tab.a, pagg_tab.c + -> Seq Scan on pagg_tab_p1 pagg_tab + -> HashAggregate + Group Key: pagg_tab_1.a, pagg_tab_1.c + -> Seq Scan on pagg_tab_p2 pagg_tab_1 + -> HashAggregate + Group Key: pagg_tab_2.a, pagg_tab_2.c + -> Seq Scan on pagg_tab_p3 pagg_tab_2 +(10 rows) + +-- Test when input relation for grouping is dummy +EXPLAIN (COSTS OFF) +SELECT c, sum(a) FROM pagg_tab WHERE 1 = 2 GROUP BY c; + QUERY PLAN +-------------------------------- + HashAggregate + Group Key: c + -> Result + One-Time Filter: false +(4 rows) + +SELECT c, sum(a) FROM pagg_tab WHERE 1 = 2 GROUP BY c; + c | sum +---+----- +(0 rows) + +EXPLAIN (COSTS OFF) +SELECT c, sum(a) FROM pagg_tab WHERE c = 'x' GROUP BY c; + QUERY PLAN +-------------------------------- + GroupAggregate + -> Result + One-Time Filter: false +(3 rows) + +SELECT c, sum(a) FROM pagg_tab WHERE c = 'x' GROUP BY c; + c | sum +---+----- +(0 rows) + +-- Test GroupAggregate paths by disabling hash aggregates. +SET enable_hashagg TO false; +-- When GROUP BY clause matches full aggregation is performed for each partition. +EXPLAIN (COSTS OFF) +SELECT c, sum(a), avg(b), count(*) FROM pagg_tab GROUP BY 1 HAVING avg(d) < 15 ORDER BY 1, 2, 3; + QUERY PLAN +-------------------------------------------------------------- + Sort + Sort Key: pagg_tab.c, (sum(pagg_tab.a)), (avg(pagg_tab.b)) + -> Append + -> GroupAggregate + Group Key: pagg_tab.c + Filter: (avg(pagg_tab.d) < '15'::numeric) + -> Sort + Sort Key: pagg_tab.c + -> Seq Scan on pagg_tab_p1 pagg_tab + -> GroupAggregate + Group Key: pagg_tab_1.c + Filter: (avg(pagg_tab_1.d) < '15'::numeric) + -> Sort + Sort Key: pagg_tab_1.c + -> Seq Scan on pagg_tab_p2 pagg_tab_1 + -> GroupAggregate + Group Key: pagg_tab_2.c + Filter: (avg(pagg_tab_2.d) < '15'::numeric) + -> Sort + Sort Key: pagg_tab_2.c + -> Seq Scan on pagg_tab_p3 pagg_tab_2 +(21 rows) + +SELECT c, sum(a), avg(b), count(*) FROM pagg_tab GROUP BY 1 HAVING avg(d) < 15 ORDER BY 1, 2, 3; + c | sum | avg | count +------+------+---------------------+------- + 0000 | 2000 | 12.0000000000000000 | 250 + 0001 | 2250 | 13.0000000000000000 | 250 + 0002 | 2500 | 14.0000000000000000 | 250 + 0006 | 2500 | 12.0000000000000000 | 250 + 0007 | 2750 | 13.0000000000000000 | 250 + 0008 | 2000 | 14.0000000000000000 | 250 +(6 rows) + +-- When GROUP BY clause does not match; partial aggregation is performed for each partition. +EXPLAIN (COSTS OFF) +SELECT a, sum(b), avg(b), count(*) FROM pagg_tab GROUP BY 1 HAVING avg(d) < 15 ORDER BY 1, 2, 3; + QUERY PLAN +------------------------------------------------------------------ + Sort + Sort Key: pagg_tab.a, (sum(pagg_tab.b)), (avg(pagg_tab.b)) + -> Finalize GroupAggregate + Group Key: pagg_tab.a + Filter: (avg(pagg_tab.d) < '15'::numeric) + -> Merge Append + Sort Key: pagg_tab.a + -> Partial GroupAggregate + Group Key: pagg_tab.a + -> Sort + Sort Key: pagg_tab.a + -> Seq Scan on pagg_tab_p1 pagg_tab + -> Partial GroupAggregate + Group Key: pagg_tab_1.a + -> Sort + Sort Key: pagg_tab_1.a + -> Seq Scan on pagg_tab_p2 pagg_tab_1 + -> Partial GroupAggregate + Group Key: pagg_tab_2.a + -> Sort + Sort Key: pagg_tab_2.a + -> Seq Scan on pagg_tab_p3 pagg_tab_2 +(22 rows) + +SELECT a, sum(b), avg(b), count(*) FROM pagg_tab GROUP BY 1 HAVING avg(d) < 15 ORDER BY 1, 2, 3; + a | sum | avg | count +----+------+---------------------+------- + 0 | 1500 | 10.0000000000000000 | 150 + 1 | 1650 | 11.0000000000000000 | 150 + 2 | 1800 | 12.0000000000000000 | 150 + 3 | 1950 | 13.0000000000000000 | 150 + 4 | 2100 | 14.0000000000000000 | 150 + 10 | 1500 | 10.0000000000000000 | 150 + 11 | 1650 | 11.0000000000000000 | 150 + 12 | 1800 | 12.0000000000000000 | 150 + 13 | 1950 | 13.0000000000000000 | 150 + 14 | 2100 | 14.0000000000000000 | 150 +(10 rows) + +-- Test partitionwise grouping without any aggregates +EXPLAIN (COSTS OFF) +SELECT c FROM pagg_tab GROUP BY c ORDER BY 1; + QUERY PLAN +------------------------------------------------------ + Merge Append + Sort Key: pagg_tab.c + -> Group + Group Key: pagg_tab.c + -> Sort + Sort Key: pagg_tab.c + -> Seq Scan on pagg_tab_p1 pagg_tab + -> Group + Group Key: pagg_tab_1.c + -> Sort + Sort Key: pagg_tab_1.c + -> Seq Scan on pagg_tab_p2 pagg_tab_1 + -> Group + Group Key: pagg_tab_2.c + -> Sort + Sort Key: pagg_tab_2.c + -> Seq Scan on pagg_tab_p3 pagg_tab_2 +(17 rows) + +SELECT c FROM pagg_tab GROUP BY c ORDER BY 1; + c +------ + 0000 + 0001 + 0002 + 0003 + 0004 + 0005 + 0006 + 0007 + 0008 + 0009 + 0010 + 0011 +(12 rows) + +EXPLAIN (COSTS OFF) +SELECT a FROM pagg_tab WHERE a < 3 GROUP BY a ORDER BY 1; + QUERY PLAN +------------------------------------------------------------ + Group + Group Key: pagg_tab.a + -> Merge Append + Sort Key: pagg_tab.a + -> Group + Group Key: pagg_tab.a + -> Sort + Sort Key: pagg_tab.a + -> Seq Scan on pagg_tab_p1 pagg_tab + Filter: (a < 3) + -> Group + Group Key: pagg_tab_1.a + -> Sort + Sort Key: pagg_tab_1.a + -> Seq Scan on pagg_tab_p2 pagg_tab_1 + Filter: (a < 3) + -> Group + Group Key: pagg_tab_2.a + -> Sort + Sort Key: pagg_tab_2.a + -> Seq Scan on pagg_tab_p3 pagg_tab_2 + Filter: (a < 3) +(22 rows) + +SELECT a FROM pagg_tab WHERE a < 3 GROUP BY a ORDER BY 1; + a +--- + 0 + 1 + 2 +(3 rows) + +RESET enable_hashagg; +-- ROLLUP, partitionwise aggregation does not apply +EXPLAIN (COSTS OFF) +SELECT c, sum(a) FROM pagg_tab GROUP BY rollup(c) ORDER BY 1, 2; + QUERY PLAN +------------------------------------------------------ + Sort + Sort Key: pagg_tab.c, (sum(pagg_tab.a)) + -> MixedAggregate + Hash Key: pagg_tab.c + Group Key: () + -> Append + -> Seq Scan on pagg_tab_p1 pagg_tab_1 + -> Seq Scan on pagg_tab_p2 pagg_tab_2 + -> Seq Scan on pagg_tab_p3 pagg_tab_3 +(9 rows) + +-- ORDERED SET within the aggregate. +-- Full aggregation; since all the rows that belong to the same group come +-- from the same partition, having an ORDER BY within the aggregate doesn't +-- make any difference. +EXPLAIN (COSTS OFF) +SELECT c, sum(b order by a) FROM pagg_tab GROUP BY c ORDER BY 1, 2; + QUERY PLAN +--------------------------------------------------------------- + Sort + Sort Key: pagg_tab.c, (sum(pagg_tab.b ORDER BY pagg_tab.a)) + -> Append + -> GroupAggregate + Group Key: pagg_tab.c + -> Sort + Sort Key: pagg_tab.c, pagg_tab.a + -> Seq Scan on pagg_tab_p1 pagg_tab + -> GroupAggregate + Group Key: pagg_tab_1.c + -> Sort + Sort Key: pagg_tab_1.c, pagg_tab_1.a + -> Seq Scan on pagg_tab_p2 pagg_tab_1 + -> GroupAggregate + Group Key: pagg_tab_2.c + -> Sort + Sort Key: pagg_tab_2.c, pagg_tab_2.a + -> Seq Scan on pagg_tab_p3 pagg_tab_2 +(18 rows) + +-- Since GROUP BY clause does not match with PARTITION KEY; we need to do +-- partial aggregation. However, ORDERED SET are not partial safe and thus +-- partitionwise aggregation plan is not generated. +EXPLAIN (COSTS OFF) +SELECT a, sum(b order by a) FROM pagg_tab GROUP BY a ORDER BY 1, 2; + QUERY PLAN +--------------------------------------------------------------- + Sort + Sort Key: pagg_tab.a, (sum(pagg_tab.b ORDER BY pagg_tab.a)) + -> GroupAggregate + Group Key: pagg_tab.a + -> Sort + Sort Key: pagg_tab.a + -> Append + -> Seq Scan on pagg_tab_p1 pagg_tab_1 + -> Seq Scan on pagg_tab_p2 pagg_tab_2 + -> Seq Scan on pagg_tab_p3 pagg_tab_3 +(10 rows) + +-- JOIN query +CREATE TABLE pagg_tab1(x int, y int) PARTITION BY RANGE(x); +CREATE TABLE pagg_tab1_p1 PARTITION OF pagg_tab1 FOR VALUES FROM (0) TO (10); +CREATE TABLE pagg_tab1_p2 PARTITION OF pagg_tab1 FOR VALUES FROM (10) TO (20); +CREATE TABLE pagg_tab1_p3 PARTITION OF pagg_tab1 FOR VALUES FROM (20) TO (30); +CREATE TABLE pagg_tab2(x int, y int) PARTITION BY RANGE(y); +CREATE TABLE pagg_tab2_p1 PARTITION OF pagg_tab2 FOR VALUES FROM (0) TO (10); +CREATE TABLE pagg_tab2_p2 PARTITION OF pagg_tab2 FOR VALUES FROM (10) TO (20); +CREATE TABLE pagg_tab2_p3 PARTITION OF pagg_tab2 FOR VALUES FROM (20) TO (30); +INSERT INTO pagg_tab1 SELECT i % 30, i % 20 FROM generate_series(0, 299, 2) i; +INSERT INTO pagg_tab2 SELECT i % 20, i % 30 FROM generate_series(0, 299, 3) i; +ANALYZE pagg_tab1; +ANALYZE pagg_tab2; +-- When GROUP BY clause matches; full aggregation is performed for each partition. +EXPLAIN (COSTS OFF) +SELECT t1.x, sum(t1.y), count(*) FROM pagg_tab1 t1, pagg_tab2 t2 WHERE t1.x = t2.y GROUP BY t1.x ORDER BY 1, 2, 3; + QUERY PLAN +------------------------------------------------------------- + Sort + Sort Key: t1.x, (sum(t1.y)), (count(*)) + -> Append + -> HashAggregate + Group Key: t1.x + -> Hash Join + Hash Cond: (t1.x = t2.y) + -> Seq Scan on pagg_tab1_p1 t1 + -> Hash + -> Seq Scan on pagg_tab2_p1 t2 + -> HashAggregate + Group Key: t1_1.x + -> Hash Join + Hash Cond: (t1_1.x = t2_1.y) + -> Seq Scan on pagg_tab1_p2 t1_1 + -> Hash + -> Seq Scan on pagg_tab2_p2 t2_1 + -> HashAggregate + Group Key: t1_2.x + -> Hash Join + Hash Cond: (t2_2.y = t1_2.x) + -> Seq Scan on pagg_tab2_p3 t2_2 + -> Hash + -> Seq Scan on pagg_tab1_p3 t1_2 +(24 rows) + +SELECT t1.x, sum(t1.y), count(*) FROM pagg_tab1 t1, pagg_tab2 t2 WHERE t1.x = t2.y GROUP BY t1.x ORDER BY 1, 2, 3; + x | sum | count +----+------+------- + 0 | 500 | 100 + 6 | 1100 | 100 + 12 | 700 | 100 + 18 | 1300 | 100 + 24 | 900 | 100 +(5 rows) + +-- Check with whole-row reference; partitionwise aggregation does not apply +EXPLAIN (COSTS OFF) +SELECT t1.x, sum(t1.y), count(t1) FROM pagg_tab1 t1, pagg_tab2 t2 WHERE t1.x = t2.y GROUP BY t1.x ORDER BY 1, 2, 3; + QUERY PLAN +------------------------------------------------------------- + Sort + Sort Key: t1.x, (sum(t1.y)), (count(((t1.*)::pagg_tab1))) + -> HashAggregate + Group Key: t1.x + -> Hash Join + Hash Cond: (t1.x = t2.y) + -> Append + -> Seq Scan on pagg_tab1_p1 t1_1 + -> Seq Scan on pagg_tab1_p2 t1_2 + -> Seq Scan on pagg_tab1_p3 t1_3 + -> Hash + -> Append + -> Seq Scan on pagg_tab2_p1 t2_1 + -> Seq Scan on pagg_tab2_p2 t2_2 + -> Seq Scan on pagg_tab2_p3 t2_3 +(15 rows) + +SELECT t1.x, sum(t1.y), count(t1) FROM pagg_tab1 t1, pagg_tab2 t2 WHERE t1.x = t2.y GROUP BY t1.x ORDER BY 1, 2, 3; + x | sum | count +----+------+------- + 0 | 500 | 100 + 6 | 1100 | 100 + 12 | 700 | 100 + 18 | 1300 | 100 + 24 | 900 | 100 +(5 rows) + +-- GROUP BY having other matching key +EXPLAIN (COSTS OFF) +SELECT t2.y, sum(t1.y), count(*) FROM pagg_tab1 t1, pagg_tab2 t2 WHERE t1.x = t2.y GROUP BY t2.y ORDER BY 1, 2, 3; + QUERY PLAN +------------------------------------------------------------- + Sort + Sort Key: t2.y, (sum(t1.y)), (count(*)) + -> Append + -> HashAggregate + Group Key: t2.y + -> Hash Join + Hash Cond: (t1.x = t2.y) + -> Seq Scan on pagg_tab1_p1 t1 + -> Hash + -> Seq Scan on pagg_tab2_p1 t2 + -> HashAggregate + Group Key: t2_1.y + -> Hash Join + Hash Cond: (t1_1.x = t2_1.y) + -> Seq Scan on pagg_tab1_p2 t1_1 + -> Hash + -> Seq Scan on pagg_tab2_p2 t2_1 + -> HashAggregate + Group Key: t2_2.y + -> Hash Join + Hash Cond: (t2_2.y = t1_2.x) + -> Seq Scan on pagg_tab2_p3 t2_2 + -> Hash + -> Seq Scan on pagg_tab1_p3 t1_2 +(24 rows) + +-- When GROUP BY clause does not match; partial aggregation is performed for each partition. +-- Also test GroupAggregate paths by disabling hash aggregates. +SET enable_hashagg TO false; +EXPLAIN (COSTS OFF) +SELECT t1.y, sum(t1.x), count(*) FROM pagg_tab1 t1, pagg_tab2 t2 WHERE t1.x = t2.y GROUP BY t1.y HAVING avg(t1.x) > 10 ORDER BY 1, 2, 3; + QUERY PLAN +------------------------------------------------------------------------- + Sort + Sort Key: t1.y, (sum(t1.x)), (count(*)) + -> Finalize GroupAggregate + Group Key: t1.y + Filter: (avg(t1.x) > '10'::numeric) + -> Merge Append + Sort Key: t1.y + -> Partial GroupAggregate + Group Key: t1.y + -> Sort + Sort Key: t1.y + -> Hash Join + Hash Cond: (t1.x = t2.y) + -> Seq Scan on pagg_tab1_p1 t1 + -> Hash + -> Seq Scan on pagg_tab2_p1 t2 + -> Partial GroupAggregate + Group Key: t1_1.y + -> Sort + Sort Key: t1_1.y + -> Hash Join + Hash Cond: (t1_1.x = t2_1.y) + -> Seq Scan on pagg_tab1_p2 t1_1 + -> Hash + -> Seq Scan on pagg_tab2_p2 t2_1 + -> Partial GroupAggregate + Group Key: t1_2.y + -> Sort + Sort Key: t1_2.y + -> Hash Join + Hash Cond: (t2_2.y = t1_2.x) + -> Seq Scan on pagg_tab2_p3 t2_2 + -> Hash + -> Seq Scan on pagg_tab1_p3 t1_2 +(34 rows) + +SELECT t1.y, sum(t1.x), count(*) FROM pagg_tab1 t1, pagg_tab2 t2 WHERE t1.x = t2.y GROUP BY t1.y HAVING avg(t1.x) > 10 ORDER BY 1, 2, 3; + y | sum | count +----+------+------- + 2 | 600 | 50 + 4 | 1200 | 50 + 8 | 900 | 50 + 12 | 600 | 50 + 14 | 1200 | 50 + 18 | 900 | 50 +(6 rows) + +RESET enable_hashagg; +-- Check with LEFT/RIGHT/FULL OUTER JOINs which produces NULL values for +-- aggregation +-- LEFT JOIN, should produce partial partitionwise aggregation plan as +-- GROUP BY is on nullable column +EXPLAIN (COSTS OFF) +SELECT b.y, sum(a.y) FROM pagg_tab1 a LEFT JOIN pagg_tab2 b ON a.x = b.y GROUP BY b.y ORDER BY 1 NULLS LAST; + QUERY PLAN +------------------------------------------------------------------ + Finalize GroupAggregate + Group Key: b.y + -> Sort + Sort Key: b.y + -> Append + -> Partial HashAggregate + Group Key: b.y + -> Hash Left Join + Hash Cond: (a.x = b.y) + -> Seq Scan on pagg_tab1_p1 a + -> Hash + -> Seq Scan on pagg_tab2_p1 b + -> Partial HashAggregate + Group Key: b_1.y + -> Hash Left Join + Hash Cond: (a_1.x = b_1.y) + -> Seq Scan on pagg_tab1_p2 a_1 + -> Hash + -> Seq Scan on pagg_tab2_p2 b_1 + -> Partial HashAggregate + Group Key: b_2.y + -> Hash Right Join + Hash Cond: (b_2.y = a_2.x) + -> Seq Scan on pagg_tab2_p3 b_2 + -> Hash + -> Seq Scan on pagg_tab1_p3 a_2 +(26 rows) + +SELECT b.y, sum(a.y) FROM pagg_tab1 a LEFT JOIN pagg_tab2 b ON a.x = b.y GROUP BY b.y ORDER BY 1 NULLS LAST; + y | sum +----+------ + 0 | 500 + 6 | 1100 + 12 | 700 + 18 | 1300 + 24 | 900 + | 900 +(6 rows) + +-- RIGHT JOIN, should produce full partitionwise aggregation plan as +-- GROUP BY is on non-nullable column +EXPLAIN (COSTS OFF) +SELECT b.y, sum(a.y) FROM pagg_tab1 a RIGHT JOIN pagg_tab2 b ON a.x = b.y GROUP BY b.y ORDER BY 1 NULLS LAST; + QUERY PLAN +------------------------------------------------------------ + Sort + Sort Key: b.y + -> Append + -> HashAggregate + Group Key: b.y + -> Hash Right Join + Hash Cond: (a.x = b.y) + -> Seq Scan on pagg_tab1_p1 a + -> Hash + -> Seq Scan on pagg_tab2_p1 b + -> HashAggregate + Group Key: b_1.y + -> Hash Right Join + Hash Cond: (a_1.x = b_1.y) + -> Seq Scan on pagg_tab1_p2 a_1 + -> Hash + -> Seq Scan on pagg_tab2_p2 b_1 + -> HashAggregate + Group Key: b_2.y + -> Hash Left Join + Hash Cond: (b_2.y = a_2.x) + -> Seq Scan on pagg_tab2_p3 b_2 + -> Hash + -> Seq Scan on pagg_tab1_p3 a_2 +(24 rows) + +SELECT b.y, sum(a.y) FROM pagg_tab1 a RIGHT JOIN pagg_tab2 b ON a.x = b.y GROUP BY b.y ORDER BY 1 NULLS LAST; + y | sum +----+------ + 0 | 500 + 3 | + 6 | 1100 + 9 | + 12 | 700 + 15 | + 18 | 1300 + 21 | + 24 | 900 + 27 | +(10 rows) + +-- FULL JOIN, should produce partial partitionwise aggregation plan as +-- GROUP BY is on nullable column +EXPLAIN (COSTS OFF) +SELECT a.x, sum(b.x) FROM pagg_tab1 a FULL OUTER JOIN pagg_tab2 b ON a.x = b.y GROUP BY a.x ORDER BY 1 NULLS LAST; + QUERY PLAN +------------------------------------------------------------------ + Finalize GroupAggregate + Group Key: a.x + -> Sort + Sort Key: a.x + -> Append + -> Partial HashAggregate + Group Key: a.x + -> Hash Full Join + Hash Cond: (a.x = b.y) + -> Seq Scan on pagg_tab1_p1 a + -> Hash + -> Seq Scan on pagg_tab2_p1 b + -> Partial HashAggregate + Group Key: a_1.x + -> Hash Full Join + Hash Cond: (a_1.x = b_1.y) + -> Seq Scan on pagg_tab1_p2 a_1 + -> Hash + -> Seq Scan on pagg_tab2_p2 b_1 + -> Partial HashAggregate + Group Key: a_2.x + -> Hash Full Join + Hash Cond: (b_2.y = a_2.x) + -> Seq Scan on pagg_tab2_p3 b_2 + -> Hash + -> Seq Scan on pagg_tab1_p3 a_2 +(26 rows) + +SELECT a.x, sum(b.x) FROM pagg_tab1 a FULL OUTER JOIN pagg_tab2 b ON a.x = b.y GROUP BY a.x ORDER BY 1 NULLS LAST; + x | sum +----+------ + 0 | 500 + 2 | + 4 | + 6 | 1100 + 8 | + 10 | + 12 | 700 + 14 | + 16 | + 18 | 1300 + 20 | + 22 | + 24 | 900 + 26 | + 28 | + | 500 +(16 rows) + +-- LEFT JOIN, with dummy relation on right side, ideally +-- should produce full partitionwise aggregation plan as GROUP BY is on +-- non-nullable columns. +-- But right now we are unable to do partitionwise join in this case. +EXPLAIN (COSTS OFF) +SELECT a.x, b.y, count(*) FROM (SELECT * FROM pagg_tab1 WHERE x < 20) a LEFT JOIN (SELECT * FROM pagg_tab2 WHERE y > 10) b ON a.x = b.y WHERE a.x > 5 or b.y < 20 GROUP BY a.x, b.y ORDER BY 1, 2; + QUERY PLAN +-------------------------------------------------------------------- + Sort + Sort Key: pagg_tab1.x, pagg_tab2.y + -> HashAggregate + Group Key: pagg_tab1.x, pagg_tab2.y + -> Hash Left Join + Hash Cond: (pagg_tab1.x = pagg_tab2.y) + Filter: ((pagg_tab1.x > 5) OR (pagg_tab2.y < 20)) + -> Append + -> Seq Scan on pagg_tab1_p1 pagg_tab1_1 + Filter: (x < 20) + -> Seq Scan on pagg_tab1_p2 pagg_tab1_2 + Filter: (x < 20) + -> Hash + -> Append + -> Seq Scan on pagg_tab2_p2 pagg_tab2_1 + Filter: (y > 10) + -> Seq Scan on pagg_tab2_p3 pagg_tab2_2 + Filter: (y > 10) +(18 rows) + +SELECT a.x, b.y, count(*) FROM (SELECT * FROM pagg_tab1 WHERE x < 20) a LEFT JOIN (SELECT * FROM pagg_tab2 WHERE y > 10) b ON a.x = b.y WHERE a.x > 5 or b.y < 20 GROUP BY a.x, b.y ORDER BY 1, 2; + x | y | count +----+----+------- + 6 | | 10 + 8 | | 10 + 10 | | 10 + 12 | 12 | 100 + 14 | | 10 + 16 | | 10 + 18 | 18 | 100 +(7 rows) + +-- FULL JOIN, with dummy relations on both sides, ideally +-- should produce partial partitionwise aggregation plan as GROUP BY is on +-- nullable columns. +-- But right now we are unable to do partitionwise join in this case. +EXPLAIN (COSTS OFF) +SELECT a.x, b.y, count(*) FROM (SELECT * FROM pagg_tab1 WHERE x < 20) a FULL JOIN (SELECT * FROM pagg_tab2 WHERE y > 10) b ON a.x = b.y WHERE a.x > 5 or b.y < 20 GROUP BY a.x, b.y ORDER BY 1, 2; + QUERY PLAN +-------------------------------------------------------------------- + Sort + Sort Key: pagg_tab1.x, pagg_tab2.y + -> HashAggregate + Group Key: pagg_tab1.x, pagg_tab2.y + -> Hash Full Join + Hash Cond: (pagg_tab1.x = pagg_tab2.y) + Filter: ((pagg_tab1.x > 5) OR (pagg_tab2.y < 20)) + -> Append + -> Seq Scan on pagg_tab1_p1 pagg_tab1_1 + Filter: (x < 20) + -> Seq Scan on pagg_tab1_p2 pagg_tab1_2 + Filter: (x < 20) + -> Hash + -> Append + -> Seq Scan on pagg_tab2_p2 pagg_tab2_1 + Filter: (y > 10) + -> Seq Scan on pagg_tab2_p3 pagg_tab2_2 + Filter: (y > 10) +(18 rows) + +SELECT a.x, b.y, count(*) FROM (SELECT * FROM pagg_tab1 WHERE x < 20) a FULL JOIN (SELECT * FROM pagg_tab2 WHERE y > 10) b ON a.x = b.y WHERE a.x > 5 or b.y < 20 GROUP BY a.x, b.y ORDER BY 1, 2; + x | y | count +----+----+------- + 6 | | 10 + 8 | | 10 + 10 | | 10 + 12 | 12 | 100 + 14 | | 10 + 16 | | 10 + 18 | 18 | 100 + | 15 | 10 +(8 rows) + +-- Empty join relation because of empty outer side, no partitionwise agg plan +EXPLAIN (COSTS OFF) +SELECT a.x, a.y, count(*) FROM (SELECT * FROM pagg_tab1 WHERE x = 1 AND x = 2) a LEFT JOIN pagg_tab2 b ON a.x = b.y GROUP BY a.x, a.y ORDER BY 1, 2; + QUERY PLAN +-------------------------------------- + GroupAggregate + Group Key: pagg_tab1.y + -> Sort + Sort Key: pagg_tab1.y + -> Result + One-Time Filter: false +(6 rows) + +SELECT a.x, a.y, count(*) FROM (SELECT * FROM pagg_tab1 WHERE x = 1 AND x = 2) a LEFT JOIN pagg_tab2 b ON a.x = b.y GROUP BY a.x, a.y ORDER BY 1, 2; + x | y | count +---+---+------- +(0 rows) + +-- Partition by multiple columns +CREATE TABLE pagg_tab_m (a int, b int, c int) PARTITION BY RANGE(a, ((a+b)/2)); +CREATE TABLE pagg_tab_m_p1 PARTITION OF pagg_tab_m FOR VALUES FROM (0, 0) TO (12, 12); +CREATE TABLE pagg_tab_m_p2 PARTITION OF pagg_tab_m FOR VALUES FROM (12, 12) TO (22, 22); +CREATE TABLE pagg_tab_m_p3 PARTITION OF pagg_tab_m FOR VALUES FROM (22, 22) TO (30, 30); +INSERT INTO pagg_tab_m SELECT i % 30, i % 40, i % 50 FROM generate_series(0, 2999) i; +ANALYZE pagg_tab_m; +-- Partial aggregation as GROUP BY clause does not match with PARTITION KEY +EXPLAIN (COSTS OFF) +SELECT a, sum(b), avg(c), count(*) FROM pagg_tab_m GROUP BY a HAVING avg(c) < 22 ORDER BY 1, 2, 3; + QUERY PLAN +-------------------------------------------------------------------- + Sort + Sort Key: pagg_tab_m.a, (sum(pagg_tab_m.b)), (avg(pagg_tab_m.c)) + -> Finalize HashAggregate + Group Key: pagg_tab_m.a + Filter: (avg(pagg_tab_m.c) < '22'::numeric) + -> Append + -> Partial HashAggregate + Group Key: pagg_tab_m.a + -> Seq Scan on pagg_tab_m_p1 pagg_tab_m + -> Partial HashAggregate + Group Key: pagg_tab_m_1.a + -> Seq Scan on pagg_tab_m_p2 pagg_tab_m_1 + -> Partial HashAggregate + Group Key: pagg_tab_m_2.a + -> Seq Scan on pagg_tab_m_p3 pagg_tab_m_2 +(15 rows) + +SELECT a, sum(b), avg(c), count(*) FROM pagg_tab_m GROUP BY a HAVING avg(c) < 22 ORDER BY 1, 2, 3; + a | sum | avg | count +----+------+---------------------+------- + 0 | 1500 | 20.0000000000000000 | 100 + 1 | 1600 | 21.0000000000000000 | 100 + 10 | 1500 | 20.0000000000000000 | 100 + 11 | 1600 | 21.0000000000000000 | 100 + 20 | 1500 | 20.0000000000000000 | 100 + 21 | 1600 | 21.0000000000000000 | 100 +(6 rows) + +-- Full aggregation as GROUP BY clause matches with PARTITION KEY +EXPLAIN (COSTS OFF) +SELECT a, sum(b), avg(c), count(*) FROM pagg_tab_m GROUP BY a, (a+b)/2 HAVING sum(b) < 50 ORDER BY 1, 2, 3; + QUERY PLAN +---------------------------------------------------------------------------------- + Sort + Sort Key: pagg_tab_m.a, (sum(pagg_tab_m.b)), (avg(pagg_tab_m.c)) + -> Append + -> HashAggregate + Group Key: pagg_tab_m.a, ((pagg_tab_m.a + pagg_tab_m.b) / 2) + Filter: (sum(pagg_tab_m.b) < 50) + -> Seq Scan on pagg_tab_m_p1 pagg_tab_m + -> HashAggregate + Group Key: pagg_tab_m_1.a, ((pagg_tab_m_1.a + pagg_tab_m_1.b) / 2) + Filter: (sum(pagg_tab_m_1.b) < 50) + -> Seq Scan on pagg_tab_m_p2 pagg_tab_m_1 + -> HashAggregate + Group Key: pagg_tab_m_2.a, ((pagg_tab_m_2.a + pagg_tab_m_2.b) / 2) + Filter: (sum(pagg_tab_m_2.b) < 50) + -> Seq Scan on pagg_tab_m_p3 pagg_tab_m_2 +(15 rows) + +SELECT a, sum(b), avg(c), count(*) FROM pagg_tab_m GROUP BY a, (a+b)/2 HAVING sum(b) < 50 ORDER BY 1, 2, 3; + a | sum | avg | count +----+-----+---------------------+------- + 0 | 0 | 20.0000000000000000 | 25 + 1 | 25 | 21.0000000000000000 | 25 + 10 | 0 | 20.0000000000000000 | 25 + 11 | 25 | 21.0000000000000000 | 25 + 20 | 0 | 20.0000000000000000 | 25 + 21 | 25 | 21.0000000000000000 | 25 +(6 rows) + +-- Full aggregation as PARTITION KEY is part of GROUP BY clause +EXPLAIN (COSTS OFF) +SELECT a, c, sum(b), avg(c), count(*) FROM pagg_tab_m GROUP BY (a+b)/2, 2, 1 HAVING sum(b) = 50 AND avg(c) > 25 ORDER BY 1, 2, 3; + QUERY PLAN +-------------------------------------------------------------------------------------------------- + Sort + Sort Key: pagg_tab_m.a, pagg_tab_m.c, (sum(pagg_tab_m.b)) + -> Append + -> HashAggregate + Group Key: ((pagg_tab_m.a + pagg_tab_m.b) / 2), pagg_tab_m.c, pagg_tab_m.a + Filter: ((sum(pagg_tab_m.b) = 50) AND (avg(pagg_tab_m.c) > '25'::numeric)) + -> Seq Scan on pagg_tab_m_p1 pagg_tab_m + -> HashAggregate + Group Key: ((pagg_tab_m_1.a + pagg_tab_m_1.b) / 2), pagg_tab_m_1.c, pagg_tab_m_1.a + Filter: ((sum(pagg_tab_m_1.b) = 50) AND (avg(pagg_tab_m_1.c) > '25'::numeric)) + -> Seq Scan on pagg_tab_m_p2 pagg_tab_m_1 + -> HashAggregate + Group Key: ((pagg_tab_m_2.a + pagg_tab_m_2.b) / 2), pagg_tab_m_2.c, pagg_tab_m_2.a + Filter: ((sum(pagg_tab_m_2.b) = 50) AND (avg(pagg_tab_m_2.c) > '25'::numeric)) + -> Seq Scan on pagg_tab_m_p3 pagg_tab_m_2 +(15 rows) + +SELECT a, c, sum(b), avg(c), count(*) FROM pagg_tab_m GROUP BY (a+b)/2, 2, 1 HAVING sum(b) = 50 AND avg(c) > 25 ORDER BY 1, 2, 3; + a | c | sum | avg | count +----+----+-----+---------------------+------- + 0 | 30 | 50 | 30.0000000000000000 | 5 + 0 | 40 | 50 | 40.0000000000000000 | 5 + 10 | 30 | 50 | 30.0000000000000000 | 5 + 10 | 40 | 50 | 40.0000000000000000 | 5 + 20 | 30 | 50 | 30.0000000000000000 | 5 + 20 | 40 | 50 | 40.0000000000000000 | 5 +(6 rows) + +-- Test with multi-level partitioning scheme +CREATE TABLE pagg_tab_ml (a int, b int, c text) PARTITION BY RANGE(a); +CREATE TABLE pagg_tab_ml_p1 PARTITION OF pagg_tab_ml FOR VALUES FROM (0) TO (12); +CREATE TABLE pagg_tab_ml_p2 PARTITION OF pagg_tab_ml FOR VALUES FROM (12) TO (20) PARTITION BY LIST (c); +CREATE TABLE pagg_tab_ml_p2_s1 PARTITION OF pagg_tab_ml_p2 FOR VALUES IN ('0000', '0001', '0002'); +CREATE TABLE pagg_tab_ml_p2_s2 PARTITION OF pagg_tab_ml_p2 FOR VALUES IN ('0003'); +-- This level of partitioning has different column positions than the parent +CREATE TABLE pagg_tab_ml_p3(b int, c text, a int) PARTITION BY RANGE (b); +CREATE TABLE pagg_tab_ml_p3_s1(c text, a int, b int); +CREATE TABLE pagg_tab_ml_p3_s2 PARTITION OF pagg_tab_ml_p3 FOR VALUES FROM (7) TO (10); +ALTER TABLE pagg_tab_ml_p3 ATTACH PARTITION pagg_tab_ml_p3_s1 FOR VALUES FROM (0) TO (7); +ALTER TABLE pagg_tab_ml ATTACH PARTITION pagg_tab_ml_p3 FOR VALUES FROM (20) TO (30); +INSERT INTO pagg_tab_ml SELECT i % 30, i % 10, to_char(i % 4, 'FM0000') FROM generate_series(0, 29999) i; +ANALYZE pagg_tab_ml; +-- For Parallel Append +SET max_parallel_workers_per_gather TO 2; +SET parallel_setup_cost = 0; +-- Full aggregation at level 1 as GROUP BY clause matches with PARTITION KEY +-- for level 1 only. For subpartitions, GROUP BY clause does not match with +-- PARTITION KEY, but still we do not see a partial aggregation as array_agg() +-- is not partial agg safe. +EXPLAIN (COSTS OFF) +SELECT a, sum(b), array_agg(distinct c), count(*) FROM pagg_tab_ml GROUP BY a HAVING avg(b) < 3 ORDER BY 1, 2, 3; + QUERY PLAN +-------------------------------------------------------------------------------------- + Sort + Sort Key: pagg_tab_ml.a, (sum(pagg_tab_ml.b)), (array_agg(DISTINCT pagg_tab_ml.c)) + -> Gather + Workers Planned: 2 + -> Parallel Append + -> GroupAggregate + Group Key: pagg_tab_ml.a + Filter: (avg(pagg_tab_ml.b) < '3'::numeric) + -> Sort + Sort Key: pagg_tab_ml.a, pagg_tab_ml.c + -> Seq Scan on pagg_tab_ml_p1 pagg_tab_ml + -> GroupAggregate + Group Key: pagg_tab_ml_5.a + Filter: (avg(pagg_tab_ml_5.b) < '3'::numeric) + -> Sort + Sort Key: pagg_tab_ml_5.a, pagg_tab_ml_5.c + -> Append + -> Seq Scan on pagg_tab_ml_p3_s1 pagg_tab_ml_5 + -> Seq Scan on pagg_tab_ml_p3_s2 pagg_tab_ml_6 + -> GroupAggregate + Group Key: pagg_tab_ml_2.a + Filter: (avg(pagg_tab_ml_2.b) < '3'::numeric) + -> Sort + Sort Key: pagg_tab_ml_2.a, pagg_tab_ml_2.c + -> Append + -> Seq Scan on pagg_tab_ml_p2_s1 pagg_tab_ml_2 + -> Seq Scan on pagg_tab_ml_p2_s2 pagg_tab_ml_3 +(27 rows) + +SELECT a, sum(b), array_agg(distinct c), count(*) FROM pagg_tab_ml GROUP BY a HAVING avg(b) < 3 ORDER BY 1, 2, 3; + a | sum | array_agg | count +----+------+-------------+------- + 0 | 0 | {0000,0002} | 1000 + 1 | 1000 | {0001,0003} | 1000 + 2 | 2000 | {0000,0002} | 1000 + 10 | 0 | {0000,0002} | 1000 + 11 | 1000 | {0001,0003} | 1000 + 12 | 2000 | {0000,0002} | 1000 + 20 | 0 | {0000,0002} | 1000 + 21 | 1000 | {0001,0003} | 1000 + 22 | 2000 | {0000,0002} | 1000 +(9 rows) + +-- Without ORDER BY clause, to test Gather at top-most path +EXPLAIN (COSTS OFF) +SELECT a, sum(b), array_agg(distinct c), count(*) FROM pagg_tab_ml GROUP BY a HAVING avg(b) < 3; + QUERY PLAN +--------------------------------------------------------------------------- + Gather + Workers Planned: 2 + -> Parallel Append + -> GroupAggregate + Group Key: pagg_tab_ml.a + Filter: (avg(pagg_tab_ml.b) < '3'::numeric) + -> Sort + Sort Key: pagg_tab_ml.a, pagg_tab_ml.c + -> Seq Scan on pagg_tab_ml_p1 pagg_tab_ml + -> GroupAggregate + Group Key: pagg_tab_ml_5.a + Filter: (avg(pagg_tab_ml_5.b) < '3'::numeric) + -> Sort + Sort Key: pagg_tab_ml_5.a, pagg_tab_ml_5.c + -> Append + -> Seq Scan on pagg_tab_ml_p3_s1 pagg_tab_ml_5 + -> Seq Scan on pagg_tab_ml_p3_s2 pagg_tab_ml_6 + -> GroupAggregate + Group Key: pagg_tab_ml_2.a + Filter: (avg(pagg_tab_ml_2.b) < '3'::numeric) + -> Sort + Sort Key: pagg_tab_ml_2.a, pagg_tab_ml_2.c + -> Append + -> Seq Scan on pagg_tab_ml_p2_s1 pagg_tab_ml_2 + -> Seq Scan on pagg_tab_ml_p2_s2 pagg_tab_ml_3 +(25 rows) + +RESET parallel_setup_cost; +-- Full aggregation at level 1 as GROUP BY clause matches with PARTITION KEY +-- for level 1 only. For subpartitions, GROUP BY clause does not match with +-- PARTITION KEY, thus we will have a partial aggregation for them. +EXPLAIN (COSTS OFF) +SELECT a, sum(b), count(*) FROM pagg_tab_ml GROUP BY a HAVING avg(b) < 3 ORDER BY 1, 2, 3; + QUERY PLAN +--------------------------------------------------------------------------------- + Sort + Sort Key: pagg_tab_ml.a, (sum(pagg_tab_ml.b)), (count(*)) + -> Append + -> HashAggregate + Group Key: pagg_tab_ml.a + Filter: (avg(pagg_tab_ml.b) < '3'::numeric) + -> Seq Scan on pagg_tab_ml_p1 pagg_tab_ml + -> Finalize GroupAggregate + Group Key: pagg_tab_ml_2.a + Filter: (avg(pagg_tab_ml_2.b) < '3'::numeric) + -> Sort + Sort Key: pagg_tab_ml_2.a + -> Append + -> Partial HashAggregate + Group Key: pagg_tab_ml_2.a + -> Seq Scan on pagg_tab_ml_p2_s1 pagg_tab_ml_2 + -> Partial HashAggregate + Group Key: pagg_tab_ml_3.a + -> Seq Scan on pagg_tab_ml_p2_s2 pagg_tab_ml_3 + -> Finalize GroupAggregate + Group Key: pagg_tab_ml_5.a + Filter: (avg(pagg_tab_ml_5.b) < '3'::numeric) + -> Sort + Sort Key: pagg_tab_ml_5.a + -> Append + -> Partial HashAggregate + Group Key: pagg_tab_ml_5.a + -> Seq Scan on pagg_tab_ml_p3_s1 pagg_tab_ml_5 + -> Partial HashAggregate + Group Key: pagg_tab_ml_6.a + -> Seq Scan on pagg_tab_ml_p3_s2 pagg_tab_ml_6 +(31 rows) + +SELECT a, sum(b), count(*) FROM pagg_tab_ml GROUP BY a HAVING avg(b) < 3 ORDER BY 1, 2, 3; + a | sum | count +----+------+------- + 0 | 0 | 1000 + 1 | 1000 | 1000 + 2 | 2000 | 1000 + 10 | 0 | 1000 + 11 | 1000 | 1000 + 12 | 2000 | 1000 + 20 | 0 | 1000 + 21 | 1000 | 1000 + 22 | 2000 | 1000 +(9 rows) + +-- Partial aggregation at all levels as GROUP BY clause does not match with +-- PARTITION KEY +EXPLAIN (COSTS OFF) +SELECT b, sum(a), count(*) FROM pagg_tab_ml GROUP BY b ORDER BY 1, 2, 3; + QUERY PLAN +--------------------------------------------------------------------------- + Sort + Sort Key: pagg_tab_ml.b, (sum(pagg_tab_ml.a)), (count(*)) + -> Finalize GroupAggregate + Group Key: pagg_tab_ml.b + -> Sort + Sort Key: pagg_tab_ml.b + -> Append + -> Partial HashAggregate + Group Key: pagg_tab_ml.b + -> Seq Scan on pagg_tab_ml_p1 pagg_tab_ml + -> Partial HashAggregate + Group Key: pagg_tab_ml_1.b + -> Seq Scan on pagg_tab_ml_p2_s1 pagg_tab_ml_1 + -> Partial HashAggregate + Group Key: pagg_tab_ml_2.b + -> Seq Scan on pagg_tab_ml_p2_s2 pagg_tab_ml_2 + -> Partial HashAggregate + Group Key: pagg_tab_ml_3.b + -> Seq Scan on pagg_tab_ml_p3_s1 pagg_tab_ml_3 + -> Partial HashAggregate + Group Key: pagg_tab_ml_4.b + -> Seq Scan on pagg_tab_ml_p3_s2 pagg_tab_ml_4 +(22 rows) + +SELECT b, sum(a), count(*) FROM pagg_tab_ml GROUP BY b HAVING avg(a) < 15 ORDER BY 1, 2, 3; + b | sum | count +---+-------+------- + 0 | 30000 | 3000 + 1 | 33000 | 3000 + 2 | 36000 | 3000 + 3 | 39000 | 3000 + 4 | 42000 | 3000 +(5 rows) + +-- Full aggregation at all levels as GROUP BY clause matches with PARTITION KEY +EXPLAIN (COSTS OFF) +SELECT a, sum(b), count(*) FROM pagg_tab_ml GROUP BY a, b, c HAVING avg(b) > 7 ORDER BY 1, 2, 3; + QUERY PLAN +---------------------------------------------------------------------------- + Sort + Sort Key: pagg_tab_ml.a, (sum(pagg_tab_ml.b)), (count(*)) + -> Append + -> HashAggregate + Group Key: pagg_tab_ml.a, pagg_tab_ml.b, pagg_tab_ml.c + Filter: (avg(pagg_tab_ml.b) > '7'::numeric) + -> Seq Scan on pagg_tab_ml_p1 pagg_tab_ml + -> HashAggregate + Group Key: pagg_tab_ml_1.a, pagg_tab_ml_1.b, pagg_tab_ml_1.c + Filter: (avg(pagg_tab_ml_1.b) > '7'::numeric) + -> Seq Scan on pagg_tab_ml_p2_s1 pagg_tab_ml_1 + -> HashAggregate + Group Key: pagg_tab_ml_2.a, pagg_tab_ml_2.b, pagg_tab_ml_2.c + Filter: (avg(pagg_tab_ml_2.b) > '7'::numeric) + -> Seq Scan on pagg_tab_ml_p2_s2 pagg_tab_ml_2 + -> HashAggregate + Group Key: pagg_tab_ml_3.a, pagg_tab_ml_3.b, pagg_tab_ml_3.c + Filter: (avg(pagg_tab_ml_3.b) > '7'::numeric) + -> Seq Scan on pagg_tab_ml_p3_s1 pagg_tab_ml_3 + -> HashAggregate + Group Key: pagg_tab_ml_4.a, pagg_tab_ml_4.b, pagg_tab_ml_4.c + Filter: (avg(pagg_tab_ml_4.b) > '7'::numeric) + -> Seq Scan on pagg_tab_ml_p3_s2 pagg_tab_ml_4 +(23 rows) + +SELECT a, sum(b), count(*) FROM pagg_tab_ml GROUP BY a, b, c HAVING avg(b) > 7 ORDER BY 1, 2, 3; + a | sum | count +----+------+------- + 8 | 4000 | 500 + 8 | 4000 | 500 + 9 | 4500 | 500 + 9 | 4500 | 500 + 18 | 4000 | 500 + 18 | 4000 | 500 + 19 | 4500 | 500 + 19 | 4500 | 500 + 28 | 4000 | 500 + 28 | 4000 | 500 + 29 | 4500 | 500 + 29 | 4500 | 500 +(12 rows) + +-- Parallelism within partitionwise aggregates +SET min_parallel_table_scan_size TO '8kB'; +SET parallel_setup_cost TO 0; +-- Full aggregation at level 1 as GROUP BY clause matches with PARTITION KEY +-- for level 1 only. For subpartitions, GROUP BY clause does not match with +-- PARTITION KEY, thus we will have a partial aggregation for them. +EXPLAIN (COSTS OFF) +SELECT a, sum(b), count(*) FROM pagg_tab_ml GROUP BY a HAVING avg(b) < 3 ORDER BY 1, 2, 3; + QUERY PLAN +------------------------------------------------------------------------------------------------ + Sort + Sort Key: pagg_tab_ml.a, (sum(pagg_tab_ml.b)), (count(*)) + -> Append + -> Finalize GroupAggregate + Group Key: pagg_tab_ml.a + Filter: (avg(pagg_tab_ml.b) < '3'::numeric) + -> Gather Merge + Workers Planned: 2 + -> Sort + Sort Key: pagg_tab_ml.a + -> Partial HashAggregate + Group Key: pagg_tab_ml.a + -> Parallel Seq Scan on pagg_tab_ml_p1 pagg_tab_ml + -> Finalize GroupAggregate + Group Key: pagg_tab_ml_2.a + Filter: (avg(pagg_tab_ml_2.b) < '3'::numeric) + -> Gather Merge + Workers Planned: 2 + -> Sort + Sort Key: pagg_tab_ml_2.a + -> Parallel Append + -> Partial HashAggregate + Group Key: pagg_tab_ml_2.a + -> Parallel Seq Scan on pagg_tab_ml_p2_s1 pagg_tab_ml_2 + -> Partial HashAggregate + Group Key: pagg_tab_ml_3.a + -> Parallel Seq Scan on pagg_tab_ml_p2_s2 pagg_tab_ml_3 + -> Finalize GroupAggregate + Group Key: pagg_tab_ml_5.a + Filter: (avg(pagg_tab_ml_5.b) < '3'::numeric) + -> Gather Merge + Workers Planned: 2 + -> Sort + Sort Key: pagg_tab_ml_5.a + -> Parallel Append + -> Partial HashAggregate + Group Key: pagg_tab_ml_5.a + -> Parallel Seq Scan on pagg_tab_ml_p3_s1 pagg_tab_ml_5 + -> Partial HashAggregate + Group Key: pagg_tab_ml_6.a + -> Parallel Seq Scan on pagg_tab_ml_p3_s2 pagg_tab_ml_6 +(41 rows) + +SELECT a, sum(b), count(*) FROM pagg_tab_ml GROUP BY a HAVING avg(b) < 3 ORDER BY 1, 2, 3; + a | sum | count +----+------+------- + 0 | 0 | 1000 + 1 | 1000 | 1000 + 2 | 2000 | 1000 + 10 | 0 | 1000 + 11 | 1000 | 1000 + 12 | 2000 | 1000 + 20 | 0 | 1000 + 21 | 1000 | 1000 + 22 | 2000 | 1000 +(9 rows) + +-- Partial aggregation at all levels as GROUP BY clause does not match with +-- PARTITION KEY +EXPLAIN (COSTS OFF) +SELECT b, sum(a), count(*) FROM pagg_tab_ml GROUP BY b ORDER BY 1, 2, 3; + QUERY PLAN +------------------------------------------------------------------------------------------ + Sort + Sort Key: pagg_tab_ml.b, (sum(pagg_tab_ml.a)), (count(*)) + -> Finalize GroupAggregate + Group Key: pagg_tab_ml.b + -> Gather Merge + Workers Planned: 2 + -> Sort + Sort Key: pagg_tab_ml.b + -> Parallel Append + -> Partial HashAggregate + Group Key: pagg_tab_ml.b + -> Parallel Seq Scan on pagg_tab_ml_p1 pagg_tab_ml + -> Partial HashAggregate + Group Key: pagg_tab_ml_3.b + -> Parallel Seq Scan on pagg_tab_ml_p3_s1 pagg_tab_ml_3 + -> Partial HashAggregate + Group Key: pagg_tab_ml_1.b + -> Parallel Seq Scan on pagg_tab_ml_p2_s1 pagg_tab_ml_1 + -> Partial HashAggregate + Group Key: pagg_tab_ml_4.b + -> Parallel Seq Scan on pagg_tab_ml_p3_s2 pagg_tab_ml_4 + -> Partial HashAggregate + Group Key: pagg_tab_ml_2.b + -> Parallel Seq Scan on pagg_tab_ml_p2_s2 pagg_tab_ml_2 +(24 rows) + +SELECT b, sum(a), count(*) FROM pagg_tab_ml GROUP BY b HAVING avg(a) < 15 ORDER BY 1, 2, 3; + b | sum | count +---+-------+------- + 0 | 30000 | 3000 + 1 | 33000 | 3000 + 2 | 36000 | 3000 + 3 | 39000 | 3000 + 4 | 42000 | 3000 +(5 rows) + +-- Full aggregation at all levels as GROUP BY clause matches with PARTITION KEY +EXPLAIN (COSTS OFF) +SELECT a, sum(b), count(*) FROM pagg_tab_ml GROUP BY a, b, c HAVING avg(b) > 7 ORDER BY 1, 2, 3; + QUERY PLAN +---------------------------------------------------------------------------------- + Gather Merge + Workers Planned: 2 + -> Sort + Sort Key: pagg_tab_ml.a, (sum(pagg_tab_ml.b)), (count(*)) + -> Parallel Append + -> HashAggregate + Group Key: pagg_tab_ml.a, pagg_tab_ml.b, pagg_tab_ml.c + Filter: (avg(pagg_tab_ml.b) > '7'::numeric) + -> Seq Scan on pagg_tab_ml_p1 pagg_tab_ml + -> HashAggregate + Group Key: pagg_tab_ml_3.a, pagg_tab_ml_3.b, pagg_tab_ml_3.c + Filter: (avg(pagg_tab_ml_3.b) > '7'::numeric) + -> Seq Scan on pagg_tab_ml_p3_s1 pagg_tab_ml_3 + -> HashAggregate + Group Key: pagg_tab_ml_1.a, pagg_tab_ml_1.b, pagg_tab_ml_1.c + Filter: (avg(pagg_tab_ml_1.b) > '7'::numeric) + -> Seq Scan on pagg_tab_ml_p2_s1 pagg_tab_ml_1 + -> HashAggregate + Group Key: pagg_tab_ml_4.a, pagg_tab_ml_4.b, pagg_tab_ml_4.c + Filter: (avg(pagg_tab_ml_4.b) > '7'::numeric) + -> Seq Scan on pagg_tab_ml_p3_s2 pagg_tab_ml_4 + -> HashAggregate + Group Key: pagg_tab_ml_2.a, pagg_tab_ml_2.b, pagg_tab_ml_2.c + Filter: (avg(pagg_tab_ml_2.b) > '7'::numeric) + -> Seq Scan on pagg_tab_ml_p2_s2 pagg_tab_ml_2 +(25 rows) + +SELECT a, sum(b), count(*) FROM pagg_tab_ml GROUP BY a, b, c HAVING avg(b) > 7 ORDER BY 1, 2, 3; + a | sum | count +----+------+------- + 8 | 4000 | 500 + 8 | 4000 | 500 + 9 | 4500 | 500 + 9 | 4500 | 500 + 18 | 4000 | 500 + 18 | 4000 | 500 + 19 | 4500 | 500 + 19 | 4500 | 500 + 28 | 4000 | 500 + 28 | 4000 | 500 + 29 | 4500 | 500 + 29 | 4500 | 500 +(12 rows) + +-- Parallelism within partitionwise aggregates (single level) +-- Add few parallel setup cost, so that we will see a plan which gathers +-- partially created paths even for full aggregation and sticks a single Gather +-- followed by finalization step. +-- Without this, the cost of doing partial aggregation + Gather + finalization +-- for each partition and then Append over it turns out to be same and this +-- wins as we add it first. This parallel_setup_cost plays a vital role in +-- costing such plans. +SET parallel_setup_cost TO 10; +CREATE TABLE pagg_tab_para(x int, y int) PARTITION BY RANGE(x); +CREATE TABLE pagg_tab_para_p1 PARTITION OF pagg_tab_para FOR VALUES FROM (0) TO (12); +CREATE TABLE pagg_tab_para_p2 PARTITION OF pagg_tab_para FOR VALUES FROM (12) TO (22); +CREATE TABLE pagg_tab_para_p3 PARTITION OF pagg_tab_para FOR VALUES FROM (22) TO (30); +INSERT INTO pagg_tab_para SELECT i % 30, i % 20 FROM generate_series(0, 29999) i; +ANALYZE pagg_tab_para; +-- When GROUP BY clause matches; full aggregation is performed for each partition. +EXPLAIN (COSTS OFF) +SELECT x, sum(y), avg(y), count(*) FROM pagg_tab_para GROUP BY x HAVING avg(y) < 7 ORDER BY 1, 2, 3; + QUERY PLAN +------------------------------------------------------------------------------------------- + Sort + Sort Key: pagg_tab_para.x, (sum(pagg_tab_para.y)), (avg(pagg_tab_para.y)) + -> Finalize GroupAggregate + Group Key: pagg_tab_para.x + Filter: (avg(pagg_tab_para.y) < '7'::numeric) + -> Gather Merge + Workers Planned: 2 + -> Sort + Sort Key: pagg_tab_para.x + -> Parallel Append + -> Partial HashAggregate + Group Key: pagg_tab_para.x + -> Parallel Seq Scan on pagg_tab_para_p1 pagg_tab_para + -> Partial HashAggregate + Group Key: pagg_tab_para_1.x + -> Parallel Seq Scan on pagg_tab_para_p2 pagg_tab_para_1 + -> Partial HashAggregate + Group Key: pagg_tab_para_2.x + -> Parallel Seq Scan on pagg_tab_para_p3 pagg_tab_para_2 +(19 rows) + +SELECT x, sum(y), avg(y), count(*) FROM pagg_tab_para GROUP BY x HAVING avg(y) < 7 ORDER BY 1, 2, 3; + x | sum | avg | count +----+------+--------------------+------- + 0 | 5000 | 5.0000000000000000 | 1000 + 1 | 6000 | 6.0000000000000000 | 1000 + 10 | 5000 | 5.0000000000000000 | 1000 + 11 | 6000 | 6.0000000000000000 | 1000 + 20 | 5000 | 5.0000000000000000 | 1000 + 21 | 6000 | 6.0000000000000000 | 1000 +(6 rows) + +-- When GROUP BY clause does not match; partial aggregation is performed for each partition. +EXPLAIN (COSTS OFF) +SELECT y, sum(x), avg(x), count(*) FROM pagg_tab_para GROUP BY y HAVING avg(x) < 12 ORDER BY 1, 2, 3; + QUERY PLAN +------------------------------------------------------------------------------------------- + Sort + Sort Key: pagg_tab_para.y, (sum(pagg_tab_para.x)), (avg(pagg_tab_para.x)) + -> Finalize GroupAggregate + Group Key: pagg_tab_para.y + Filter: (avg(pagg_tab_para.x) < '12'::numeric) + -> Gather Merge + Workers Planned: 2 + -> Sort + Sort Key: pagg_tab_para.y + -> Parallel Append + -> Partial HashAggregate + Group Key: pagg_tab_para.y + -> Parallel Seq Scan on pagg_tab_para_p1 pagg_tab_para + -> Partial HashAggregate + Group Key: pagg_tab_para_1.y + -> Parallel Seq Scan on pagg_tab_para_p2 pagg_tab_para_1 + -> Partial HashAggregate + Group Key: pagg_tab_para_2.y + -> Parallel Seq Scan on pagg_tab_para_p3 pagg_tab_para_2 +(19 rows) + +SELECT y, sum(x), avg(x), count(*) FROM pagg_tab_para GROUP BY y HAVING avg(x) < 12 ORDER BY 1, 2, 3; + y | sum | avg | count +----+-------+---------------------+------- + 0 | 15000 | 10.0000000000000000 | 1500 + 1 | 16500 | 11.0000000000000000 | 1500 + 10 | 15000 | 10.0000000000000000 | 1500 + 11 | 16500 | 11.0000000000000000 | 1500 +(4 rows) + +-- Test when parent can produce parallel paths but not any (or some) of its children +-- (Use one more aggregate to tilt the cost estimates for the plan we want) +ALTER TABLE pagg_tab_para_p1 SET (parallel_workers = 0); +ALTER TABLE pagg_tab_para_p3 SET (parallel_workers = 0); +ANALYZE pagg_tab_para; +EXPLAIN (COSTS OFF) +SELECT x, sum(y), avg(y), sum(x+y), count(*) FROM pagg_tab_para GROUP BY x HAVING avg(y) < 7 ORDER BY 1, 2, 3; + QUERY PLAN +------------------------------------------------------------------------------------------- + Sort + Sort Key: pagg_tab_para.x, (sum(pagg_tab_para.y)), (avg(pagg_tab_para.y)) + -> Finalize GroupAggregate + Group Key: pagg_tab_para.x + Filter: (avg(pagg_tab_para.y) < '7'::numeric) + -> Gather Merge + Workers Planned: 2 + -> Sort + Sort Key: pagg_tab_para.x + -> Partial HashAggregate + Group Key: pagg_tab_para.x + -> Parallel Append + -> Seq Scan on pagg_tab_para_p1 pagg_tab_para_1 + -> Seq Scan on pagg_tab_para_p3 pagg_tab_para_3 + -> Parallel Seq Scan on pagg_tab_para_p2 pagg_tab_para_2 +(15 rows) + +SELECT x, sum(y), avg(y), sum(x+y), count(*) FROM pagg_tab_para GROUP BY x HAVING avg(y) < 7 ORDER BY 1, 2, 3; + x | sum | avg | sum | count +----+------+--------------------+-------+------- + 0 | 5000 | 5.0000000000000000 | 5000 | 1000 + 1 | 6000 | 6.0000000000000000 | 7000 | 1000 + 10 | 5000 | 5.0000000000000000 | 15000 | 1000 + 11 | 6000 | 6.0000000000000000 | 17000 | 1000 + 20 | 5000 | 5.0000000000000000 | 25000 | 1000 + 21 | 6000 | 6.0000000000000000 | 27000 | 1000 +(6 rows) + +ALTER TABLE pagg_tab_para_p2 SET (parallel_workers = 0); +ANALYZE pagg_tab_para; +EXPLAIN (COSTS OFF) +SELECT x, sum(y), avg(y), sum(x+y), count(*) FROM pagg_tab_para GROUP BY x HAVING avg(y) < 7 ORDER BY 1, 2, 3; + QUERY PLAN +---------------------------------------------------------------------------------- + Sort + Sort Key: pagg_tab_para.x, (sum(pagg_tab_para.y)), (avg(pagg_tab_para.y)) + -> Finalize GroupAggregate + Group Key: pagg_tab_para.x + Filter: (avg(pagg_tab_para.y) < '7'::numeric) + -> Gather Merge + Workers Planned: 2 + -> Sort + Sort Key: pagg_tab_para.x + -> Partial HashAggregate + Group Key: pagg_tab_para.x + -> Parallel Append + -> Seq Scan on pagg_tab_para_p1 pagg_tab_para_1 + -> Seq Scan on pagg_tab_para_p2 pagg_tab_para_2 + -> Seq Scan on pagg_tab_para_p3 pagg_tab_para_3 +(15 rows) + +SELECT x, sum(y), avg(y), sum(x+y), count(*) FROM pagg_tab_para GROUP BY x HAVING avg(y) < 7 ORDER BY 1, 2, 3; + x | sum | avg | sum | count +----+------+--------------------+-------+------- + 0 | 5000 | 5.0000000000000000 | 5000 | 1000 + 1 | 6000 | 6.0000000000000000 | 7000 | 1000 + 10 | 5000 | 5.0000000000000000 | 15000 | 1000 + 11 | 6000 | 6.0000000000000000 | 17000 | 1000 + 20 | 5000 | 5.0000000000000000 | 25000 | 1000 + 21 | 6000 | 6.0000000000000000 | 27000 | 1000 +(6 rows) + +-- Reset parallelism parameters to get partitionwise aggregation plan. +RESET min_parallel_table_scan_size; +RESET parallel_setup_cost; +EXPLAIN (COSTS OFF) +SELECT x, sum(y), avg(y), count(*) FROM pagg_tab_para GROUP BY x HAVING avg(y) < 7 ORDER BY 1, 2, 3; + QUERY PLAN +----------------------------------------------------------------------------- + Sort + Sort Key: pagg_tab_para.x, (sum(pagg_tab_para.y)), (avg(pagg_tab_para.y)) + -> Append + -> HashAggregate + Group Key: pagg_tab_para.x + Filter: (avg(pagg_tab_para.y) < '7'::numeric) + -> Seq Scan on pagg_tab_para_p1 pagg_tab_para + -> HashAggregate + Group Key: pagg_tab_para_1.x + Filter: (avg(pagg_tab_para_1.y) < '7'::numeric) + -> Seq Scan on pagg_tab_para_p2 pagg_tab_para_1 + -> HashAggregate + Group Key: pagg_tab_para_2.x + Filter: (avg(pagg_tab_para_2.y) < '7'::numeric) + -> Seq Scan on pagg_tab_para_p3 pagg_tab_para_2 +(15 rows) + +SELECT x, sum(y), avg(y), count(*) FROM pagg_tab_para GROUP BY x HAVING avg(y) < 7 ORDER BY 1, 2, 3; + x | sum | avg | count +----+------+--------------------+------- + 0 | 5000 | 5.0000000000000000 | 1000 + 1 | 6000 | 6.0000000000000000 | 1000 + 10 | 5000 | 5.0000000000000000 | 1000 + 11 | 6000 | 6.0000000000000000 | 1000 + 20 | 5000 | 5.0000000000000000 | 1000 + 21 | 6000 | 6.0000000000000000 | 1000 +(6 rows) + diff --git a/src/test/regress/expected/partition_info.out b/src/test/regress/expected/partition_info.out new file mode 100644 index 0000000..42b6bc7 --- /dev/null +++ b/src/test/regress/expected/partition_info.out @@ -0,0 +1,351 @@ +-- +-- Tests for functions providing information about partitions +-- +SELECT * FROM pg_partition_tree(NULL); + relid | parentrelid | isleaf | level +-------+-------------+--------+------- +(0 rows) + +SELECT * FROM pg_partition_tree(0); + relid | parentrelid | isleaf | level +-------+-------------+--------+------- +(0 rows) + +SELECT * FROM pg_partition_ancestors(NULL); + relid +------- +(0 rows) + +SELECT * FROM pg_partition_ancestors(0); + relid +------- +(0 rows) + +SELECT pg_partition_root(NULL); + pg_partition_root +------------------- + +(1 row) + +SELECT pg_partition_root(0); + pg_partition_root +------------------- + +(1 row) + +-- Test table partition trees +CREATE TABLE ptif_test (a int, b int) PARTITION BY range (a); +CREATE TABLE ptif_test0 PARTITION OF ptif_test + FOR VALUES FROM (minvalue) TO (0) PARTITION BY list (b); +CREATE TABLE ptif_test01 PARTITION OF ptif_test0 FOR VALUES IN (1); +CREATE TABLE ptif_test1 PARTITION OF ptif_test + FOR VALUES FROM (0) TO (100) PARTITION BY list (b); +CREATE TABLE ptif_test11 PARTITION OF ptif_test1 FOR VALUES IN (1); +CREATE TABLE ptif_test2 PARTITION OF ptif_test + FOR VALUES FROM (100) TO (200); +-- This partitioned table should remain with no partitions. +CREATE TABLE ptif_test3 PARTITION OF ptif_test + FOR VALUES FROM (200) TO (maxvalue) PARTITION BY list (b); +-- Test pg_partition_root for tables +SELECT pg_partition_root('ptif_test'); + pg_partition_root +------------------- + ptif_test +(1 row) + +SELECT pg_partition_root('ptif_test0'); + pg_partition_root +------------------- + ptif_test +(1 row) + +SELECT pg_partition_root('ptif_test01'); + pg_partition_root +------------------- + ptif_test +(1 row) + +SELECT pg_partition_root('ptif_test3'); + pg_partition_root +------------------- + ptif_test +(1 row) + +-- Test index partition tree +CREATE INDEX ptif_test_index ON ONLY ptif_test (a); +CREATE INDEX ptif_test0_index ON ONLY ptif_test0 (a); +ALTER INDEX ptif_test_index ATTACH PARTITION ptif_test0_index; +CREATE INDEX ptif_test01_index ON ptif_test01 (a); +ALTER INDEX ptif_test0_index ATTACH PARTITION ptif_test01_index; +CREATE INDEX ptif_test1_index ON ONLY ptif_test1 (a); +ALTER INDEX ptif_test_index ATTACH PARTITION ptif_test1_index; +CREATE INDEX ptif_test11_index ON ptif_test11 (a); +ALTER INDEX ptif_test1_index ATTACH PARTITION ptif_test11_index; +CREATE INDEX ptif_test2_index ON ptif_test2 (a); +ALTER INDEX ptif_test_index ATTACH PARTITION ptif_test2_index; +CREATE INDEX ptif_test3_index ON ptif_test3 (a); +ALTER INDEX ptif_test_index ATTACH PARTITION ptif_test3_index; +-- Test pg_partition_root for indexes +SELECT pg_partition_root('ptif_test_index'); + pg_partition_root +------------------- + ptif_test_index +(1 row) + +SELECT pg_partition_root('ptif_test0_index'); + pg_partition_root +------------------- + ptif_test_index +(1 row) + +SELECT pg_partition_root('ptif_test01_index'); + pg_partition_root +------------------- + ptif_test_index +(1 row) + +SELECT pg_partition_root('ptif_test3_index'); + pg_partition_root +------------------- + ptif_test_index +(1 row) + +-- List all tables members of the tree +SELECT relid, parentrelid, level, isleaf + FROM pg_partition_tree('ptif_test'); + relid | parentrelid | level | isleaf +-------------+-------------+-------+-------- + ptif_test | | 0 | f + ptif_test0 | ptif_test | 1 | f + ptif_test1 | ptif_test | 1 | f + ptif_test2 | ptif_test | 1 | t + ptif_test3 | ptif_test | 1 | f + ptif_test01 | ptif_test0 | 2 | t + ptif_test11 | ptif_test1 | 2 | t +(7 rows) + +-- List tables from an intermediate level +SELECT relid, parentrelid, level, isleaf + FROM pg_partition_tree('ptif_test0') p + JOIN pg_class c ON (p.relid = c.oid); + relid | parentrelid | level | isleaf +-------------+-------------+-------+-------- + ptif_test0 | ptif_test | 0 | f + ptif_test01 | ptif_test0 | 1 | t +(2 rows) + +-- List from leaf table +SELECT relid, parentrelid, level, isleaf + FROM pg_partition_tree('ptif_test01') p + JOIN pg_class c ON (p.relid = c.oid); + relid | parentrelid | level | isleaf +-------------+-------------+-------+-------- + ptif_test01 | ptif_test0 | 0 | t +(1 row) + +-- List from partitioned table with no partitions +SELECT relid, parentrelid, level, isleaf + FROM pg_partition_tree('ptif_test3') p + JOIN pg_class c ON (p.relid = c.oid); + relid | parentrelid | level | isleaf +------------+-------------+-------+-------- + ptif_test3 | ptif_test | 0 | f +(1 row) + +-- List all ancestors of root and leaf tables +SELECT * FROM pg_partition_ancestors('ptif_test01'); + relid +------------- + ptif_test01 + ptif_test0 + ptif_test +(3 rows) + +SELECT * FROM pg_partition_ancestors('ptif_test'); + relid +----------- + ptif_test +(1 row) + +-- List all members using pg_partition_root with leaf table reference +SELECT relid, parentrelid, level, isleaf + FROM pg_partition_tree(pg_partition_root('ptif_test01')) p + JOIN pg_class c ON (p.relid = c.oid); + relid | parentrelid | level | isleaf +-------------+-------------+-------+-------- + ptif_test | | 0 | f + ptif_test0 | ptif_test | 1 | f + ptif_test1 | ptif_test | 1 | f + ptif_test2 | ptif_test | 1 | t + ptif_test3 | ptif_test | 1 | f + ptif_test01 | ptif_test0 | 2 | t + ptif_test11 | ptif_test1 | 2 | t +(7 rows) + +-- List all indexes members of the tree +SELECT relid, parentrelid, level, isleaf + FROM pg_partition_tree('ptif_test_index'); + relid | parentrelid | level | isleaf +-------------------+------------------+-------+-------- + ptif_test_index | | 0 | f + ptif_test0_index | ptif_test_index | 1 | f + ptif_test1_index | ptif_test_index | 1 | f + ptif_test2_index | ptif_test_index | 1 | t + ptif_test3_index | ptif_test_index | 1 | f + ptif_test01_index | ptif_test0_index | 2 | t + ptif_test11_index | ptif_test1_index | 2 | t +(7 rows) + +-- List indexes from an intermediate level +SELECT relid, parentrelid, level, isleaf + FROM pg_partition_tree('ptif_test0_index') p + JOIN pg_class c ON (p.relid = c.oid); + relid | parentrelid | level | isleaf +-------------------+------------------+-------+-------- + ptif_test0_index | ptif_test_index | 0 | f + ptif_test01_index | ptif_test0_index | 1 | t +(2 rows) + +-- List from leaf index +SELECT relid, parentrelid, level, isleaf + FROM pg_partition_tree('ptif_test01_index') p + JOIN pg_class c ON (p.relid = c.oid); + relid | parentrelid | level | isleaf +-------------------+------------------+-------+-------- + ptif_test01_index | ptif_test0_index | 0 | t +(1 row) + +-- List from partitioned index with no partitions +SELECT relid, parentrelid, level, isleaf + FROM pg_partition_tree('ptif_test3_index') p + JOIN pg_class c ON (p.relid = c.oid); + relid | parentrelid | level | isleaf +------------------+-----------------+-------+-------- + ptif_test3_index | ptif_test_index | 0 | f +(1 row) + +-- List all members using pg_partition_root with leaf index reference +SELECT relid, parentrelid, level, isleaf + FROM pg_partition_tree(pg_partition_root('ptif_test01_index')) p + JOIN pg_class c ON (p.relid = c.oid); + relid | parentrelid | level | isleaf +-------------------+------------------+-------+-------- + ptif_test_index | | 0 | f + ptif_test0_index | ptif_test_index | 1 | f + ptif_test1_index | ptif_test_index | 1 | f + ptif_test2_index | ptif_test_index | 1 | t + ptif_test3_index | ptif_test_index | 1 | f + ptif_test01_index | ptif_test0_index | 2 | t + ptif_test11_index | ptif_test1_index | 2 | t +(7 rows) + +-- List all ancestors of root and leaf indexes +SELECT * FROM pg_partition_ancestors('ptif_test01_index'); + relid +------------------- + ptif_test01_index + ptif_test0_index + ptif_test_index +(3 rows) + +SELECT * FROM pg_partition_ancestors('ptif_test_index'); + relid +----------------- + ptif_test_index +(1 row) + +DROP TABLE ptif_test; +-- Table that is not part of any partition tree is not listed. +CREATE TABLE ptif_normal_table(a int); +SELECT relid, parentrelid, level, isleaf + FROM pg_partition_tree('ptif_normal_table'); + relid | parentrelid | level | isleaf +-------+-------------+-------+-------- +(0 rows) + +SELECT * FROM pg_partition_ancestors('ptif_normal_table'); + relid +------- +(0 rows) + +SELECT pg_partition_root('ptif_normal_table'); + pg_partition_root +------------------- + +(1 row) + +DROP TABLE ptif_normal_table; +-- Various partitioning-related functions return empty/NULL if passed relations +-- of types that cannot be part of a partition tree; for example, views, +-- materialized views, legacy inheritance children or parents, etc. +CREATE VIEW ptif_test_view AS SELECT 1; +CREATE MATERIALIZED VIEW ptif_test_matview AS SELECT 1; +CREATE TABLE ptif_li_parent (); +CREATE TABLE ptif_li_child () INHERITS (ptif_li_parent); +SELECT * FROM pg_partition_tree('ptif_test_view'); + relid | parentrelid | isleaf | level +-------+-------------+--------+------- +(0 rows) + +SELECT * FROM pg_partition_tree('ptif_test_matview'); + relid | parentrelid | isleaf | level +-------+-------------+--------+------- +(0 rows) + +SELECT * FROM pg_partition_tree('ptif_li_parent'); + relid | parentrelid | isleaf | level +-------+-------------+--------+------- +(0 rows) + +SELECT * FROM pg_partition_tree('ptif_li_child'); + relid | parentrelid | isleaf | level +-------+-------------+--------+------- +(0 rows) + +SELECT * FROM pg_partition_ancestors('ptif_test_view'); + relid +------- +(0 rows) + +SELECT * FROM pg_partition_ancestors('ptif_test_matview'); + relid +------- +(0 rows) + +SELECT * FROM pg_partition_ancestors('ptif_li_parent'); + relid +------- +(0 rows) + +SELECT * FROM pg_partition_ancestors('ptif_li_child'); + relid +------- +(0 rows) + +SELECT pg_partition_root('ptif_test_view'); + pg_partition_root +------------------- + +(1 row) + +SELECT pg_partition_root('ptif_test_matview'); + pg_partition_root +------------------- + +(1 row) + +SELECT pg_partition_root('ptif_li_parent'); + pg_partition_root +------------------- + +(1 row) + +SELECT pg_partition_root('ptif_li_child'); + pg_partition_root +------------------- + +(1 row) + +DROP VIEW ptif_test_view; +DROP MATERIALIZED VIEW ptif_test_matview; +DROP TABLE ptif_li_parent, ptif_li_child; diff --git a/src/test/regress/expected/partition_join.out b/src/test/regress/expected/partition_join.out new file mode 100644 index 0000000..320b727 --- /dev/null +++ b/src/test/regress/expected/partition_join.out @@ -0,0 +1,5134 @@ +-- +-- PARTITION_JOIN +-- Test partitionwise join between partitioned tables +-- +-- Enable partitionwise join, which by default is disabled. +SET enable_partitionwise_join to true; +-- +-- partitioned by a single column +-- +CREATE TABLE prt1 (a int, b int, c varchar) PARTITION BY RANGE(a); +CREATE TABLE prt1_p1 PARTITION OF prt1 FOR VALUES FROM (0) TO (250); +CREATE TABLE prt1_p3 PARTITION OF prt1 FOR VALUES FROM (500) TO (600); +CREATE TABLE prt1_p2 PARTITION OF prt1 FOR VALUES FROM (250) TO (500); +INSERT INTO prt1 SELECT i, i % 25, to_char(i, 'FM0000') FROM generate_series(0, 599) i WHERE i % 2 = 0; +CREATE INDEX iprt1_p1_a on prt1_p1(a); +CREATE INDEX iprt1_p2_a on prt1_p2(a); +CREATE INDEX iprt1_p3_a on prt1_p3(a); +ANALYZE prt1; +CREATE TABLE prt2 (a int, b int, c varchar) PARTITION BY RANGE(b); +CREATE TABLE prt2_p1 PARTITION OF prt2 FOR VALUES FROM (0) TO (250); +CREATE TABLE prt2_p2 PARTITION OF prt2 FOR VALUES FROM (250) TO (500); +CREATE TABLE prt2_p3 PARTITION OF prt2 FOR VALUES FROM (500) TO (600); +INSERT INTO prt2 SELECT i % 25, i, to_char(i, 'FM0000') FROM generate_series(0, 599) i WHERE i % 3 = 0; +CREATE INDEX iprt2_p1_b on prt2_p1(b); +CREATE INDEX iprt2_p2_b on prt2_p2(b); +CREATE INDEX iprt2_p3_b on prt2_p3(b); +ANALYZE prt2; +-- inner join +EXPLAIN (COSTS OFF) +SELECT t1.a, t1.c, t2.b, t2.c FROM prt1 t1, prt2 t2 WHERE t1.a = t2.b AND t1.b = 0 ORDER BY t1.a, t2.b; + QUERY PLAN +-------------------------------------------------- + Sort + Sort Key: t1.a + -> Append + -> Hash Join + Hash Cond: (t2_1.b = t1_1.a) + -> Seq Scan on prt2_p1 t2_1 + -> Hash + -> Seq Scan on prt1_p1 t1_1 + Filter: (b = 0) + -> Hash Join + Hash Cond: (t2_2.b = t1_2.a) + -> Seq Scan on prt2_p2 t2_2 + -> Hash + -> Seq Scan on prt1_p2 t1_2 + Filter: (b = 0) + -> Hash Join + Hash Cond: (t2_3.b = t1_3.a) + -> Seq Scan on prt2_p3 t2_3 + -> Hash + -> Seq Scan on prt1_p3 t1_3 + Filter: (b = 0) +(21 rows) + +SELECT t1.a, t1.c, t2.b, t2.c FROM prt1 t1, prt2 t2 WHERE t1.a = t2.b AND t1.b = 0 ORDER BY t1.a, t2.b; + a | c | b | c +-----+------+-----+------ + 0 | 0000 | 0 | 0000 + 150 | 0150 | 150 | 0150 + 300 | 0300 | 300 | 0300 + 450 | 0450 | 450 | 0450 +(4 rows) + +-- left outer join, 3-way +EXPLAIN (COSTS OFF) +SELECT COUNT(*) FROM prt1 t1 + LEFT JOIN prt1 t2 ON t1.a = t2.a + LEFT JOIN prt1 t3 ON t2.a = t3.a; + QUERY PLAN +-------------------------------------------------------- + Aggregate + -> Append + -> Hash Left Join + Hash Cond: (t2_1.a = t3_1.a) + -> Hash Left Join + Hash Cond: (t1_1.a = t2_1.a) + -> Seq Scan on prt1_p1 t1_1 + -> Hash + -> Seq Scan on prt1_p1 t2_1 + -> Hash + -> Seq Scan on prt1_p1 t3_1 + -> Hash Left Join + Hash Cond: (t2_2.a = t3_2.a) + -> Hash Left Join + Hash Cond: (t1_2.a = t2_2.a) + -> Seq Scan on prt1_p2 t1_2 + -> Hash + -> Seq Scan on prt1_p2 t2_2 + -> Hash + -> Seq Scan on prt1_p2 t3_2 + -> Hash Left Join + Hash Cond: (t2_3.a = t3_3.a) + -> Hash Left Join + Hash Cond: (t1_3.a = t2_3.a) + -> Seq Scan on prt1_p3 t1_3 + -> Hash + -> Seq Scan on prt1_p3 t2_3 + -> Hash + -> Seq Scan on prt1_p3 t3_3 +(29 rows) + +SELECT COUNT(*) FROM prt1 t1 + LEFT JOIN prt1 t2 ON t1.a = t2.a + LEFT JOIN prt1 t3 ON t2.a = t3.a; + count +------- + 300 +(1 row) + +-- left outer join, with whole-row reference; partitionwise join does not apply +EXPLAIN (COSTS OFF) +SELECT t1, t2 FROM prt1 t1 LEFT JOIN prt2 t2 ON t1.a = t2.b WHERE t1.b = 0 ORDER BY t1.a, t2.b; + QUERY PLAN +-------------------------------------------------- + Sort + Sort Key: t1.a, t2.b + -> Hash Right Join + Hash Cond: (t2.b = t1.a) + -> Append + -> Seq Scan on prt2_p1 t2_1 + -> Seq Scan on prt2_p2 t2_2 + -> Seq Scan on prt2_p3 t2_3 + -> Hash + -> Append + -> Seq Scan on prt1_p1 t1_1 + Filter: (b = 0) + -> Seq Scan on prt1_p2 t1_2 + Filter: (b = 0) + -> Seq Scan on prt1_p3 t1_3 + Filter: (b = 0) +(16 rows) + +SELECT t1, t2 FROM prt1 t1 LEFT JOIN prt2 t2 ON t1.a = t2.b WHERE t1.b = 0 ORDER BY t1.a, t2.b; + t1 | t2 +--------------+-------------- + (0,0,0000) | (0,0,0000) + (50,0,0050) | + (100,0,0100) | + (150,0,0150) | (0,150,0150) + (200,0,0200) | + (250,0,0250) | + (300,0,0300) | (0,300,0300) + (350,0,0350) | + (400,0,0400) | + (450,0,0450) | (0,450,0450) + (500,0,0500) | + (550,0,0550) | +(12 rows) + +-- right outer join +EXPLAIN (COSTS OFF) +SELECT t1.a, t1.c, t2.b, t2.c FROM prt1 t1 RIGHT JOIN prt2 t2 ON t1.a = t2.b WHERE t2.a = 0 ORDER BY t1.a, t2.b; + QUERY PLAN +--------------------------------------------------------------- + Sort + Sort Key: t1.a, t2.b + -> Append + -> Hash Right Join + Hash Cond: (t1_1.a = t2_1.b) + -> Seq Scan on prt1_p1 t1_1 + -> Hash + -> Seq Scan on prt2_p1 t2_1 + Filter: (a = 0) + -> Hash Right Join + Hash Cond: (t1_2.a = t2_2.b) + -> Seq Scan on prt1_p2 t1_2 + -> Hash + -> Seq Scan on prt2_p2 t2_2 + Filter: (a = 0) + -> Nested Loop Left Join + -> Seq Scan on prt2_p3 t2_3 + Filter: (a = 0) + -> Index Scan using iprt1_p3_a on prt1_p3 t1_3 + Index Cond: (a = t2_3.b) +(20 rows) + +SELECT t1.a, t1.c, t2.b, t2.c FROM prt1 t1 RIGHT JOIN prt2 t2 ON t1.a = t2.b WHERE t2.a = 0 ORDER BY t1.a, t2.b; + a | c | b | c +-----+------+-----+------ + 0 | 0000 | 0 | 0000 + 150 | 0150 | 150 | 0150 + 300 | 0300 | 300 | 0300 + 450 | 0450 | 450 | 0450 + | | 75 | 0075 + | | 225 | 0225 + | | 375 | 0375 + | | 525 | 0525 +(8 rows) + +-- full outer join, with placeholder vars +EXPLAIN (COSTS OFF) +SELECT t1.a, t1.c, t2.b, t2.c FROM (SELECT 50 phv, * FROM prt1 WHERE prt1.b = 0) t1 FULL JOIN (SELECT 75 phv, * FROM prt2 WHERE prt2.a = 0) t2 ON (t1.a = t2.b) WHERE t1.phv = t1.a OR t2.phv = t2.b ORDER BY t1.a, t2.b; + QUERY PLAN +---------------------------------------------------------------- + Sort + Sort Key: prt1.a, prt2.b + -> Append + -> Hash Full Join + Hash Cond: (prt1_1.a = prt2_1.b) + Filter: (((50) = prt1_1.a) OR ((75) = prt2_1.b)) + -> Seq Scan on prt1_p1 prt1_1 + Filter: (b = 0) + -> Hash + -> Seq Scan on prt2_p1 prt2_1 + Filter: (a = 0) + -> Hash Full Join + Hash Cond: (prt1_2.a = prt2_2.b) + Filter: (((50) = prt1_2.a) OR ((75) = prt2_2.b)) + -> Seq Scan on prt1_p2 prt1_2 + Filter: (b = 0) + -> Hash + -> Seq Scan on prt2_p2 prt2_2 + Filter: (a = 0) + -> Hash Full Join + Hash Cond: (prt1_3.a = prt2_3.b) + Filter: (((50) = prt1_3.a) OR ((75) = prt2_3.b)) + -> Seq Scan on prt1_p3 prt1_3 + Filter: (b = 0) + -> Hash + -> Seq Scan on prt2_p3 prt2_3 + Filter: (a = 0) +(27 rows) + +SELECT t1.a, t1.c, t2.b, t2.c FROM (SELECT 50 phv, * FROM prt1 WHERE prt1.b = 0) t1 FULL JOIN (SELECT 75 phv, * FROM prt2 WHERE prt2.a = 0) t2 ON (t1.a = t2.b) WHERE t1.phv = t1.a OR t2.phv = t2.b ORDER BY t1.a, t2.b; + a | c | b | c +----+------+----+------ + 50 | 0050 | | + | | 75 | 0075 +(2 rows) + +-- Join with pruned partitions from joining relations +EXPLAIN (COSTS OFF) +SELECT t1.a, t1.c, t2.b, t2.c FROM prt1 t1, prt2 t2 WHERE t1.a = t2.b AND t1.a < 450 AND t2.b > 250 AND t1.b = 0 ORDER BY t1.a, t2.b; + QUERY PLAN +----------------------------------------------------- + Sort + Sort Key: t1.a + -> Hash Join + Hash Cond: (t2.b = t1.a) + -> Seq Scan on prt2_p2 t2 + Filter: (b > 250) + -> Hash + -> Seq Scan on prt1_p2 t1 + Filter: ((a < 450) AND (b = 0)) +(9 rows) + +SELECT t1.a, t1.c, t2.b, t2.c FROM prt1 t1, prt2 t2 WHERE t1.a = t2.b AND t1.a < 450 AND t2.b > 250 AND t1.b = 0 ORDER BY t1.a, t2.b; + a | c | b | c +-----+------+-----+------ + 300 | 0300 | 300 | 0300 +(1 row) + +-- Currently we can't do partitioned join if nullable-side partitions are pruned +EXPLAIN (COSTS OFF) +SELECT t1.a, t1.c, t2.b, t2.c FROM (SELECT * FROM prt1 WHERE a < 450) t1 LEFT JOIN (SELECT * FROM prt2 WHERE b > 250) t2 ON t1.a = t2.b WHERE t1.b = 0 ORDER BY t1.a, t2.b; + QUERY PLAN +----------------------------------------------------------- + Sort + Sort Key: prt1.a, prt2.b + -> Hash Right Join + Hash Cond: (prt2.b = prt1.a) + -> Append + -> Seq Scan on prt2_p2 prt2_1 + Filter: (b > 250) + -> Seq Scan on prt2_p3 prt2_2 + Filter: (b > 250) + -> Hash + -> Append + -> Seq Scan on prt1_p1 prt1_1 + Filter: ((a < 450) AND (b = 0)) + -> Seq Scan on prt1_p2 prt1_2 + Filter: ((a < 450) AND (b = 0)) +(15 rows) + +SELECT t1.a, t1.c, t2.b, t2.c FROM (SELECT * FROM prt1 WHERE a < 450) t1 LEFT JOIN (SELECT * FROM prt2 WHERE b > 250) t2 ON t1.a = t2.b WHERE t1.b = 0 ORDER BY t1.a, t2.b; + a | c | b | c +-----+------+-----+------ + 0 | 0000 | | + 50 | 0050 | | + 100 | 0100 | | + 150 | 0150 | | + 200 | 0200 | | + 250 | 0250 | | + 300 | 0300 | 300 | 0300 + 350 | 0350 | | + 400 | 0400 | | +(9 rows) + +-- Currently we can't do partitioned join if nullable-side partitions are pruned +EXPLAIN (COSTS OFF) +SELECT t1.a, t1.c, t2.b, t2.c FROM (SELECT * FROM prt1 WHERE a < 450) t1 FULL JOIN (SELECT * FROM prt2 WHERE b > 250) t2 ON t1.a = t2.b WHERE t1.b = 0 OR t2.a = 0 ORDER BY t1.a, t2.b; + QUERY PLAN +---------------------------------------------------- + Sort + Sort Key: prt1.a, prt2.b + -> Hash Full Join + Hash Cond: (prt1.a = prt2.b) + Filter: ((prt1.b = 0) OR (prt2.a = 0)) + -> Append + -> Seq Scan on prt1_p1 prt1_1 + Filter: (a < 450) + -> Seq Scan on prt1_p2 prt1_2 + Filter: (a < 450) + -> Hash + -> Append + -> Seq Scan on prt2_p2 prt2_1 + Filter: (b > 250) + -> Seq Scan on prt2_p3 prt2_2 + Filter: (b > 250) +(16 rows) + +SELECT t1.a, t1.c, t2.b, t2.c FROM (SELECT * FROM prt1 WHERE a < 450) t1 FULL JOIN (SELECT * FROM prt2 WHERE b > 250) t2 ON t1.a = t2.b WHERE t1.b = 0 OR t2.a = 0 ORDER BY t1.a, t2.b; + a | c | b | c +-----+------+-----+------ + 0 | 0000 | | + 50 | 0050 | | + 100 | 0100 | | + 150 | 0150 | | + 200 | 0200 | | + 250 | 0250 | | + 300 | 0300 | 300 | 0300 + 350 | 0350 | | + 400 | 0400 | | + | | 375 | 0375 + | | 450 | 0450 + | | 525 | 0525 +(12 rows) + +-- Semi-join +EXPLAIN (COSTS OFF) +SELECT t1.* FROM prt1 t1 WHERE t1.a IN (SELECT t2.b FROM prt2 t2 WHERE t2.a = 0) AND t1.b = 0 ORDER BY t1.a; + QUERY PLAN +-------------------------------------------------- + Sort + Sort Key: t1.a + -> Append + -> Hash Semi Join + Hash Cond: (t1_1.a = t2_1.b) + -> Seq Scan on prt1_p1 t1_1 + Filter: (b = 0) + -> Hash + -> Seq Scan on prt2_p1 t2_1 + Filter: (a = 0) + -> Hash Semi Join + Hash Cond: (t1_2.a = t2_2.b) + -> Seq Scan on prt1_p2 t1_2 + Filter: (b = 0) + -> Hash + -> Seq Scan on prt2_p2 t2_2 + Filter: (a = 0) + -> Nested Loop Semi Join + Join Filter: (t1_3.a = t2_3.b) + -> Seq Scan on prt1_p3 t1_3 + Filter: (b = 0) + -> Materialize + -> Seq Scan on prt2_p3 t2_3 + Filter: (a = 0) +(24 rows) + +SELECT t1.* FROM prt1 t1 WHERE t1.a IN (SELECT t2.b FROM prt2 t2 WHERE t2.a = 0) AND t1.b = 0 ORDER BY t1.a; + a | b | c +-----+---+------ + 0 | 0 | 0000 + 150 | 0 | 0150 + 300 | 0 | 0300 + 450 | 0 | 0450 +(4 rows) + +-- Anti-join with aggregates +EXPLAIN (COSTS OFF) +SELECT sum(t1.a), avg(t1.a), sum(t1.b), avg(t1.b) FROM prt1 t1 WHERE NOT EXISTS (SELECT 1 FROM prt2 t2 WHERE t1.a = t2.b); + QUERY PLAN +-------------------------------------------------- + Aggregate + -> Append + -> Hash Anti Join + Hash Cond: (t1_1.a = t2_1.b) + -> Seq Scan on prt1_p1 t1_1 + -> Hash + -> Seq Scan on prt2_p1 t2_1 + -> Hash Anti Join + Hash Cond: (t1_2.a = t2_2.b) + -> Seq Scan on prt1_p2 t1_2 + -> Hash + -> Seq Scan on prt2_p2 t2_2 + -> Hash Anti Join + Hash Cond: (t1_3.a = t2_3.b) + -> Seq Scan on prt1_p3 t1_3 + -> Hash + -> Seq Scan on prt2_p3 t2_3 +(17 rows) + +SELECT sum(t1.a), avg(t1.a), sum(t1.b), avg(t1.b) FROM prt1 t1 WHERE NOT EXISTS (SELECT 1 FROM prt2 t2 WHERE t1.a = t2.b); + sum | avg | sum | avg +-------+----------------------+------+--------------------- + 60000 | 300.0000000000000000 | 2400 | 12.0000000000000000 +(1 row) + +-- lateral reference +EXPLAIN (COSTS OFF) +SELECT * FROM prt1 t1 LEFT JOIN LATERAL + (SELECT t2.a AS t2a, t3.a AS t3a, least(t1.a,t2.a,t3.b) FROM prt1 t2 JOIN prt2 t3 ON (t2.a = t3.b)) ss + ON t1.a = ss.t2a WHERE t1.b = 0 ORDER BY t1.a; + QUERY PLAN +-------------------------------------------------------------------------- + Sort + Sort Key: t1.a + -> Append + -> Nested Loop Left Join + -> Seq Scan on prt1_p1 t1_1 + Filter: (b = 0) + -> Nested Loop + -> Index Only Scan using iprt1_p1_a on prt1_p1 t2_1 + Index Cond: (a = t1_1.a) + -> Index Scan using iprt2_p1_b on prt2_p1 t3_1 + Index Cond: (b = t2_1.a) + -> Nested Loop Left Join + -> Seq Scan on prt1_p2 t1_2 + Filter: (b = 0) + -> Nested Loop + -> Index Only Scan using iprt1_p2_a on prt1_p2 t2_2 + Index Cond: (a = t1_2.a) + -> Index Scan using iprt2_p2_b on prt2_p2 t3_2 + Index Cond: (b = t2_2.a) + -> Nested Loop Left Join + -> Seq Scan on prt1_p3 t1_3 + Filter: (b = 0) + -> Nested Loop + -> Index Only Scan using iprt1_p3_a on prt1_p3 t2_3 + Index Cond: (a = t1_3.a) + -> Index Scan using iprt2_p3_b on prt2_p3 t3_3 + Index Cond: (b = t2_3.a) +(27 rows) + +SELECT * FROM prt1 t1 LEFT JOIN LATERAL + (SELECT t2.a AS t2a, t3.a AS t3a, least(t1.a,t2.a,t3.b) FROM prt1 t2 JOIN prt2 t3 ON (t2.a = t3.b)) ss + ON t1.a = ss.t2a WHERE t1.b = 0 ORDER BY t1.a; + a | b | c | t2a | t3a | least +-----+---+------+-----+-----+------- + 0 | 0 | 0000 | 0 | 0 | 0 + 50 | 0 | 0050 | | | + 100 | 0 | 0100 | | | + 150 | 0 | 0150 | 150 | 0 | 150 + 200 | 0 | 0200 | | | + 250 | 0 | 0250 | | | + 300 | 0 | 0300 | 300 | 0 | 300 + 350 | 0 | 0350 | | | + 400 | 0 | 0400 | | | + 450 | 0 | 0450 | 450 | 0 | 450 + 500 | 0 | 0500 | | | + 550 | 0 | 0550 | | | +(12 rows) + +EXPLAIN (COSTS OFF) +SELECT t1.a, ss.t2a, ss.t2c FROM prt1 t1 LEFT JOIN LATERAL + (SELECT t2.a AS t2a, t3.a AS t3a, t2.b t2b, t2.c t2c, least(t1.a,t2.a,t3.b) FROM prt1 t2 JOIN prt2 t3 ON (t2.a = t3.b)) ss + ON t1.c = ss.t2c WHERE (t1.b + coalesce(ss.t2b, 0)) = 0 ORDER BY t1.a; + QUERY PLAN +-------------------------------------------------------------- + Sort + Sort Key: t1.a + -> Hash Left Join + Hash Cond: ((t1.c)::text = (t2.c)::text) + Filter: ((t1.b + COALESCE(t2.b, 0)) = 0) + -> Append + -> Seq Scan on prt1_p1 t1_1 + -> Seq Scan on prt1_p2 t1_2 + -> Seq Scan on prt1_p3 t1_3 + -> Hash + -> Append + -> Hash Join + Hash Cond: (t2_1.a = t3_1.b) + -> Seq Scan on prt1_p1 t2_1 + -> Hash + -> Seq Scan on prt2_p1 t3_1 + -> Hash Join + Hash Cond: (t2_2.a = t3_2.b) + -> Seq Scan on prt1_p2 t2_2 + -> Hash + -> Seq Scan on prt2_p2 t3_2 + -> Hash Join + Hash Cond: (t2_3.a = t3_3.b) + -> Seq Scan on prt1_p3 t2_3 + -> Hash + -> Seq Scan on prt2_p3 t3_3 +(26 rows) + +SELECT t1.a, ss.t2a, ss.t2c FROM prt1 t1 LEFT JOIN LATERAL + (SELECT t2.a AS t2a, t3.a AS t3a, t2.b t2b, t2.c t2c, least(t1.a,t2.a,t3.a) FROM prt1 t2 JOIN prt2 t3 ON (t2.a = t3.b)) ss + ON t1.c = ss.t2c WHERE (t1.b + coalesce(ss.t2b, 0)) = 0 ORDER BY t1.a; + a | t2a | t2c +-----+-----+------ + 0 | 0 | 0000 + 50 | | + 100 | | + 150 | 150 | 0150 + 200 | | + 250 | | + 300 | 300 | 0300 + 350 | | + 400 | | + 450 | 450 | 0450 + 500 | | + 550 | | +(12 rows) + +SET max_parallel_workers_per_gather = 0; +-- If there are lateral references to the other relation in sample scan, +-- we cannot generate a partitionwise join. +EXPLAIN (COSTS OFF) +SELECT * FROM prt1 t1 JOIN LATERAL + (SELECT * FROM prt1 t2 TABLESAMPLE SYSTEM (t1.a) REPEATABLE(t1.b)) s + ON t1.a = s.a; + QUERY PLAN +--------------------------------------------------------- + Nested Loop + -> Append + -> Seq Scan on prt1_p1 t1_1 + -> Seq Scan on prt1_p2 t1_2 + -> Seq Scan on prt1_p3 t1_3 + -> Append + -> Sample Scan on prt1_p1 t2_1 + Sampling: system (t1.a) REPEATABLE (t1.b) + Filter: (t1.a = a) + -> Sample Scan on prt1_p2 t2_2 + Sampling: system (t1.a) REPEATABLE (t1.b) + Filter: (t1.a = a) + -> Sample Scan on prt1_p3 t2_3 + Sampling: system (t1.a) REPEATABLE (t1.b) + Filter: (t1.a = a) +(15 rows) + +-- If there are lateral references to the other relation in scan's restriction +-- clauses, we cannot generate a partitionwise join. +EXPLAIN (COSTS OFF) +SELECT count(*) FROM prt1 t1 LEFT JOIN LATERAL + (SELECT t1.b AS t1b, t2.* FROM prt2 t2) s + ON t1.a = s.b WHERE s.t1b = s.a; + QUERY PLAN +--------------------------------------------------------------- + Aggregate + -> Nested Loop + -> Append + -> Seq Scan on prt1_p1 t1_1 + -> Seq Scan on prt1_p2 t1_2 + -> Seq Scan on prt1_p3 t1_3 + -> Append + -> Index Scan using iprt2_p1_b on prt2_p1 t2_1 + Index Cond: (b = t1.a) + Filter: (t1.b = a) + -> Index Scan using iprt2_p2_b on prt2_p2 t2_2 + Index Cond: (b = t1.a) + Filter: (t1.b = a) + -> Index Scan using iprt2_p3_b on prt2_p3 t2_3 + Index Cond: (b = t1.a) + Filter: (t1.b = a) +(16 rows) + +SELECT count(*) FROM prt1 t1 LEFT JOIN LATERAL + (SELECT t1.b AS t1b, t2.* FROM prt2 t2) s + ON t1.a = s.b WHERE s.t1b = s.a; + count +------- + 100 +(1 row) + +EXPLAIN (COSTS OFF) +SELECT count(*) FROM prt1 t1 LEFT JOIN LATERAL + (SELECT t1.b AS t1b, t2.* FROM prt2 t2) s + ON t1.a = s.b WHERE s.t1b = s.b; + QUERY PLAN +-------------------------------------------------------------------- + Aggregate + -> Nested Loop + -> Append + -> Seq Scan on prt1_p1 t1_1 + -> Seq Scan on prt1_p2 t1_2 + -> Seq Scan on prt1_p3 t1_3 + -> Append + -> Index Only Scan using iprt2_p1_b on prt2_p1 t2_1 + Index Cond: (b = t1.a) + Filter: (b = t1.b) + -> Index Only Scan using iprt2_p2_b on prt2_p2 t2_2 + Index Cond: (b = t1.a) + Filter: (b = t1.b) + -> Index Only Scan using iprt2_p3_b on prt2_p3 t2_3 + Index Cond: (b = t1.a) + Filter: (b = t1.b) +(16 rows) + +SELECT count(*) FROM prt1 t1 LEFT JOIN LATERAL + (SELECT t1.b AS t1b, t2.* FROM prt2 t2) s + ON t1.a = s.b WHERE s.t1b = s.b; + count +------- + 5 +(1 row) + +RESET max_parallel_workers_per_gather; +-- bug with inadequate sort key representation +SET enable_partitionwise_aggregate TO true; +SET enable_hashjoin TO false; +EXPLAIN (COSTS OFF) +SELECT a, b FROM prt1 FULL JOIN prt2 p2(b,a,c) USING(a,b) + WHERE a BETWEEN 490 AND 510 + GROUP BY 1, 2 ORDER BY 1, 2; + QUERY PLAN +----------------------------------------------------------------------------------------------------------------- + Group + Group Key: (COALESCE(prt1.a, p2.a)), (COALESCE(prt1.b, p2.b)) + -> Merge Append + Sort Key: (COALESCE(prt1.a, p2.a)), (COALESCE(prt1.b, p2.b)) + -> Group + Group Key: (COALESCE(prt1.a, p2.a)), (COALESCE(prt1.b, p2.b)) + -> Sort + Sort Key: (COALESCE(prt1.a, p2.a)), (COALESCE(prt1.b, p2.b)) + -> Merge Full Join + Merge Cond: ((prt1.a = p2.a) AND (prt1.b = p2.b)) + Filter: ((COALESCE(prt1.a, p2.a) >= 490) AND (COALESCE(prt1.a, p2.a) <= 510)) + -> Sort + Sort Key: prt1.a, prt1.b + -> Seq Scan on prt1_p1 prt1 + -> Sort + Sort Key: p2.a, p2.b + -> Seq Scan on prt2_p1 p2 + -> Group + Group Key: (COALESCE(prt1_1.a, p2_1.a)), (COALESCE(prt1_1.b, p2_1.b)) + -> Sort + Sort Key: (COALESCE(prt1_1.a, p2_1.a)), (COALESCE(prt1_1.b, p2_1.b)) + -> Merge Full Join + Merge Cond: ((prt1_1.a = p2_1.a) AND (prt1_1.b = p2_1.b)) + Filter: ((COALESCE(prt1_1.a, p2_1.a) >= 490) AND (COALESCE(prt1_1.a, p2_1.a) <= 510)) + -> Sort + Sort Key: prt1_1.a, prt1_1.b + -> Seq Scan on prt1_p2 prt1_1 + -> Sort + Sort Key: p2_1.a, p2_1.b + -> Seq Scan on prt2_p2 p2_1 + -> Group + Group Key: (COALESCE(prt1_2.a, p2_2.a)), (COALESCE(prt1_2.b, p2_2.b)) + -> Sort + Sort Key: (COALESCE(prt1_2.a, p2_2.a)), (COALESCE(prt1_2.b, p2_2.b)) + -> Merge Full Join + Merge Cond: ((prt1_2.a = p2_2.a) AND (prt1_2.b = p2_2.b)) + Filter: ((COALESCE(prt1_2.a, p2_2.a) >= 490) AND (COALESCE(prt1_2.a, p2_2.a) <= 510)) + -> Sort + Sort Key: prt1_2.a, prt1_2.b + -> Seq Scan on prt1_p3 prt1_2 + -> Sort + Sort Key: p2_2.a, p2_2.b + -> Seq Scan on prt2_p3 p2_2 +(43 rows) + +SELECT a, b FROM prt1 FULL JOIN prt2 p2(b,a,c) USING(a,b) + WHERE a BETWEEN 490 AND 510 + GROUP BY 1, 2 ORDER BY 1, 2; + a | b +-----+---- + 490 | 15 + 492 | 17 + 494 | 19 + 495 | 20 + 496 | 21 + 498 | 23 + 500 | 0 + 501 | 1 + 502 | 2 + 504 | 4 + 506 | 6 + 507 | 7 + 508 | 8 + 510 | 10 +(14 rows) + +RESET enable_partitionwise_aggregate; +RESET enable_hashjoin; +-- +-- partitioned by expression +-- +CREATE TABLE prt1_e (a int, b int, c int) PARTITION BY RANGE(((a + b)/2)); +CREATE TABLE prt1_e_p1 PARTITION OF prt1_e FOR VALUES FROM (0) TO (250); +CREATE TABLE prt1_e_p2 PARTITION OF prt1_e FOR VALUES FROM (250) TO (500); +CREATE TABLE prt1_e_p3 PARTITION OF prt1_e FOR VALUES FROM (500) TO (600); +INSERT INTO prt1_e SELECT i, i, i % 25 FROM generate_series(0, 599, 2) i; +CREATE INDEX iprt1_e_p1_ab2 on prt1_e_p1(((a+b)/2)); +CREATE INDEX iprt1_e_p2_ab2 on prt1_e_p2(((a+b)/2)); +CREATE INDEX iprt1_e_p3_ab2 on prt1_e_p3(((a+b)/2)); +ANALYZE prt1_e; +CREATE TABLE prt2_e (a int, b int, c int) PARTITION BY RANGE(((b + a)/2)); +CREATE TABLE prt2_e_p1 PARTITION OF prt2_e FOR VALUES FROM (0) TO (250); +CREATE TABLE prt2_e_p2 PARTITION OF prt2_e FOR VALUES FROM (250) TO (500); +CREATE TABLE prt2_e_p3 PARTITION OF prt2_e FOR VALUES FROM (500) TO (600); +INSERT INTO prt2_e SELECT i, i, i % 25 FROM generate_series(0, 599, 3) i; +ANALYZE prt2_e; +EXPLAIN (COSTS OFF) +SELECT t1.a, t1.c, t2.b, t2.c FROM prt1_e t1, prt2_e t2 WHERE (t1.a + t1.b)/2 = (t2.b + t2.a)/2 AND t1.c = 0 ORDER BY t1.a, t2.b; + QUERY PLAN +------------------------------------------------------------------------------ + Sort + Sort Key: t1.a, t2.b + -> Append + -> Hash Join + Hash Cond: (((t2_1.b + t2_1.a) / 2) = ((t1_1.a + t1_1.b) / 2)) + -> Seq Scan on prt2_e_p1 t2_1 + -> Hash + -> Seq Scan on prt1_e_p1 t1_1 + Filter: (c = 0) + -> Hash Join + Hash Cond: (((t2_2.b + t2_2.a) / 2) = ((t1_2.a + t1_2.b) / 2)) + -> Seq Scan on prt2_e_p2 t2_2 + -> Hash + -> Seq Scan on prt1_e_p2 t1_2 + Filter: (c = 0) + -> Hash Join + Hash Cond: (((t2_3.b + t2_3.a) / 2) = ((t1_3.a + t1_3.b) / 2)) + -> Seq Scan on prt2_e_p3 t2_3 + -> Hash + -> Seq Scan on prt1_e_p3 t1_3 + Filter: (c = 0) +(21 rows) + +SELECT t1.a, t1.c, t2.b, t2.c FROM prt1_e t1, prt2_e t2 WHERE (t1.a + t1.b)/2 = (t2.b + t2.a)/2 AND t1.c = 0 ORDER BY t1.a, t2.b; + a | c | b | c +-----+---+-----+--- + 0 | 0 | 0 | 0 + 150 | 0 | 150 | 0 + 300 | 0 | 300 | 0 + 450 | 0 | 450 | 0 +(4 rows) + +-- +-- N-way join +-- +EXPLAIN (COSTS OFF) +SELECT t1.a, t1.c, t2.b, t2.c, t3.a + t3.b, t3.c FROM prt1 t1, prt2 t2, prt1_e t3 WHERE t1.a = t2.b AND t1.a = (t3.a + t3.b)/2 AND t1.b = 0 ORDER BY t1.a, t2.b; + QUERY PLAN +--------------------------------------------------------------------- + Sort + Sort Key: t1.a + -> Append + -> Nested Loop + Join Filter: (t1_1.a = ((t3_1.a + t3_1.b) / 2)) + -> Hash Join + Hash Cond: (t2_1.b = t1_1.a) + -> Seq Scan on prt2_p1 t2_1 + -> Hash + -> Seq Scan on prt1_p1 t1_1 + Filter: (b = 0) + -> Index Scan using iprt1_e_p1_ab2 on prt1_e_p1 t3_1 + Index Cond: (((a + b) / 2) = t2_1.b) + -> Nested Loop + Join Filter: (t1_2.a = ((t3_2.a + t3_2.b) / 2)) + -> Hash Join + Hash Cond: (t2_2.b = t1_2.a) + -> Seq Scan on prt2_p2 t2_2 + -> Hash + -> Seq Scan on prt1_p2 t1_2 + Filter: (b = 0) + -> Index Scan using iprt1_e_p2_ab2 on prt1_e_p2 t3_2 + Index Cond: (((a + b) / 2) = t2_2.b) + -> Nested Loop + Join Filter: (t1_3.a = ((t3_3.a + t3_3.b) / 2)) + -> Hash Join + Hash Cond: (t2_3.b = t1_3.a) + -> Seq Scan on prt2_p3 t2_3 + -> Hash + -> Seq Scan on prt1_p3 t1_3 + Filter: (b = 0) + -> Index Scan using iprt1_e_p3_ab2 on prt1_e_p3 t3_3 + Index Cond: (((a + b) / 2) = t2_3.b) +(33 rows) + +SELECT t1.a, t1.c, t2.b, t2.c, t3.a + t3.b, t3.c FROM prt1 t1, prt2 t2, prt1_e t3 WHERE t1.a = t2.b AND t1.a = (t3.a + t3.b)/2 AND t1.b = 0 ORDER BY t1.a, t2.b; + a | c | b | c | ?column? | c +-----+------+-----+------+----------+--- + 0 | 0000 | 0 | 0000 | 0 | 0 + 150 | 0150 | 150 | 0150 | 300 | 0 + 300 | 0300 | 300 | 0300 | 600 | 0 + 450 | 0450 | 450 | 0450 | 900 | 0 +(4 rows) + +EXPLAIN (COSTS OFF) +SELECT t1.a, t1.c, t2.b, t2.c, t3.a + t3.b, t3.c FROM (prt1 t1 LEFT JOIN prt2 t2 ON t1.a = t2.b) LEFT JOIN prt1_e t3 ON (t1.a = (t3.a + t3.b)/2) WHERE t1.b = 0 ORDER BY t1.a, t2.b, t3.a + t3.b; + QUERY PLAN +-------------------------------------------------------------- + Sort + Sort Key: t1.a, t2.b, ((t3.a + t3.b)) + -> Append + -> Hash Right Join + Hash Cond: (((t3_1.a + t3_1.b) / 2) = t1_1.a) + -> Seq Scan on prt1_e_p1 t3_1 + -> Hash + -> Hash Right Join + Hash Cond: (t2_1.b = t1_1.a) + -> Seq Scan on prt2_p1 t2_1 + -> Hash + -> Seq Scan on prt1_p1 t1_1 + Filter: (b = 0) + -> Hash Right Join + Hash Cond: (((t3_2.a + t3_2.b) / 2) = t1_2.a) + -> Seq Scan on prt1_e_p2 t3_2 + -> Hash + -> Hash Right Join + Hash Cond: (t2_2.b = t1_2.a) + -> Seq Scan on prt2_p2 t2_2 + -> Hash + -> Seq Scan on prt1_p2 t1_2 + Filter: (b = 0) + -> Hash Right Join + Hash Cond: (((t3_3.a + t3_3.b) / 2) = t1_3.a) + -> Seq Scan on prt1_e_p3 t3_3 + -> Hash + -> Hash Right Join + Hash Cond: (t2_3.b = t1_3.a) + -> Seq Scan on prt2_p3 t2_3 + -> Hash + -> Seq Scan on prt1_p3 t1_3 + Filter: (b = 0) +(33 rows) + +SELECT t1.a, t1.c, t2.b, t2.c, t3.a + t3.b, t3.c FROM (prt1 t1 LEFT JOIN prt2 t2 ON t1.a = t2.b) LEFT JOIN prt1_e t3 ON (t1.a = (t3.a + t3.b)/2) WHERE t1.b = 0 ORDER BY t1.a, t2.b, t3.a + t3.b; + a | c | b | c | ?column? | c +-----+------+-----+------+----------+--- + 0 | 0000 | 0 | 0000 | 0 | 0 + 50 | 0050 | | | 100 | 0 + 100 | 0100 | | | 200 | 0 + 150 | 0150 | 150 | 0150 | 300 | 0 + 200 | 0200 | | | 400 | 0 + 250 | 0250 | | | 500 | 0 + 300 | 0300 | 300 | 0300 | 600 | 0 + 350 | 0350 | | | 700 | 0 + 400 | 0400 | | | 800 | 0 + 450 | 0450 | 450 | 0450 | 900 | 0 + 500 | 0500 | | | 1000 | 0 + 550 | 0550 | | | 1100 | 0 +(12 rows) + +EXPLAIN (COSTS OFF) +SELECT t1.a, t1.c, t2.b, t2.c, t3.a + t3.b, t3.c FROM (prt1 t1 LEFT JOIN prt2 t2 ON t1.a = t2.b) RIGHT JOIN prt1_e t3 ON (t1.a = (t3.a + t3.b)/2) WHERE t3.c = 0 ORDER BY t1.a, t2.b, t3.a + t3.b; + QUERY PLAN +------------------------------------------------------------------- + Sort + Sort Key: t1.a, t2.b, ((t3.a + t3.b)) + -> Append + -> Nested Loop Left Join + -> Hash Right Join + Hash Cond: (t1_1.a = ((t3_1.a + t3_1.b) / 2)) + -> Seq Scan on prt1_p1 t1_1 + -> Hash + -> Seq Scan on prt1_e_p1 t3_1 + Filter: (c = 0) + -> Index Scan using iprt2_p1_b on prt2_p1 t2_1 + Index Cond: (b = t1_1.a) + -> Nested Loop Left Join + -> Hash Right Join + Hash Cond: (t1_2.a = ((t3_2.a + t3_2.b) / 2)) + -> Seq Scan on prt1_p2 t1_2 + -> Hash + -> Seq Scan on prt1_e_p2 t3_2 + Filter: (c = 0) + -> Index Scan using iprt2_p2_b on prt2_p2 t2_2 + Index Cond: (b = t1_2.a) + -> Nested Loop Left Join + -> Hash Right Join + Hash Cond: (t1_3.a = ((t3_3.a + t3_3.b) / 2)) + -> Seq Scan on prt1_p3 t1_3 + -> Hash + -> Seq Scan on prt1_e_p3 t3_3 + Filter: (c = 0) + -> Index Scan using iprt2_p3_b on prt2_p3 t2_3 + Index Cond: (b = t1_3.a) +(30 rows) + +SELECT t1.a, t1.c, t2.b, t2.c, t3.a + t3.b, t3.c FROM (prt1 t1 LEFT JOIN prt2 t2 ON t1.a = t2.b) RIGHT JOIN prt1_e t3 ON (t1.a = (t3.a + t3.b)/2) WHERE t3.c = 0 ORDER BY t1.a, t2.b, t3.a + t3.b; + a | c | b | c | ?column? | c +-----+------+-----+------+----------+--- + 0 | 0000 | 0 | 0000 | 0 | 0 + 50 | 0050 | | | 100 | 0 + 100 | 0100 | | | 200 | 0 + 150 | 0150 | 150 | 0150 | 300 | 0 + 200 | 0200 | | | 400 | 0 + 250 | 0250 | | | 500 | 0 + 300 | 0300 | 300 | 0300 | 600 | 0 + 350 | 0350 | | | 700 | 0 + 400 | 0400 | | | 800 | 0 + 450 | 0450 | 450 | 0450 | 900 | 0 + 500 | 0500 | | | 1000 | 0 + 550 | 0550 | | | 1100 | 0 +(12 rows) + +-- +-- 3-way full join +-- +EXPLAIN (COSTS OFF) +SELECT COUNT(*) FROM prt1 FULL JOIN prt2 p2(b,a,c) USING(a,b) FULL JOIN prt2 p3(b,a,c) USING (a, b) + WHERE a BETWEEN 490 AND 510; + QUERY PLAN +----------------------------------------------------------------------------------------------------------------------------------------- + Aggregate + -> Append + -> Hash Full Join + Hash Cond: ((COALESCE(prt1_1.a, p2_1.a) = p3_1.a) AND (COALESCE(prt1_1.b, p2_1.b) = p3_1.b)) + Filter: ((COALESCE(COALESCE(prt1_1.a, p2_1.a), p3_1.a) >= 490) AND (COALESCE(COALESCE(prt1_1.a, p2_1.a), p3_1.a) <= 510)) + -> Hash Full Join + Hash Cond: ((prt1_1.a = p2_1.a) AND (prt1_1.b = p2_1.b)) + -> Seq Scan on prt1_p1 prt1_1 + -> Hash + -> Seq Scan on prt2_p1 p2_1 + -> Hash + -> Seq Scan on prt2_p1 p3_1 + -> Hash Full Join + Hash Cond: ((COALESCE(prt1_2.a, p2_2.a) = p3_2.a) AND (COALESCE(prt1_2.b, p2_2.b) = p3_2.b)) + Filter: ((COALESCE(COALESCE(prt1_2.a, p2_2.a), p3_2.a) >= 490) AND (COALESCE(COALESCE(prt1_2.a, p2_2.a), p3_2.a) <= 510)) + -> Hash Full Join + Hash Cond: ((prt1_2.a = p2_2.a) AND (prt1_2.b = p2_2.b)) + -> Seq Scan on prt1_p2 prt1_2 + -> Hash + -> Seq Scan on prt2_p2 p2_2 + -> Hash + -> Seq Scan on prt2_p2 p3_2 + -> Hash Full Join + Hash Cond: ((COALESCE(prt1_3.a, p2_3.a) = p3_3.a) AND (COALESCE(prt1_3.b, p2_3.b) = p3_3.b)) + Filter: ((COALESCE(COALESCE(prt1_3.a, p2_3.a), p3_3.a) >= 490) AND (COALESCE(COALESCE(prt1_3.a, p2_3.a), p3_3.a) <= 510)) + -> Hash Full Join + Hash Cond: ((prt1_3.a = p2_3.a) AND (prt1_3.b = p2_3.b)) + -> Seq Scan on prt1_p3 prt1_3 + -> Hash + -> Seq Scan on prt2_p3 p2_3 + -> Hash + -> Seq Scan on prt2_p3 p3_3 +(32 rows) + +SELECT COUNT(*) FROM prt1 FULL JOIN prt2 p2(b,a,c) USING(a,b) FULL JOIN prt2 p3(b,a,c) USING (a, b) + WHERE a BETWEEN 490 AND 510; + count +------- + 14 +(1 row) + +-- +-- 4-way full join +-- +EXPLAIN (COSTS OFF) +SELECT COUNT(*) FROM prt1 FULL JOIN prt2 p2(b,a,c) USING(a,b) FULL JOIN prt2 p3(b,a,c) USING (a, b) FULL JOIN prt1 p4 (a,b,c) USING (a, b) + WHERE a BETWEEN 490 AND 510; + QUERY PLAN +----------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Aggregate + -> Append + -> Hash Full Join + Hash Cond: ((COALESCE(COALESCE(prt1_1.a, p2_1.a), p3_1.a) = p4_1.a) AND (COALESCE(COALESCE(prt1_1.b, p2_1.b), p3_1.b) = p4_1.b)) + Filter: ((COALESCE(COALESCE(COALESCE(prt1_1.a, p2_1.a), p3_1.a), p4_1.a) >= 490) AND (COALESCE(COALESCE(COALESCE(prt1_1.a, p2_1.a), p3_1.a), p4_1.a) <= 510)) + -> Hash Full Join + Hash Cond: ((COALESCE(prt1_1.a, p2_1.a) = p3_1.a) AND (COALESCE(prt1_1.b, p2_1.b) = p3_1.b)) + -> Hash Full Join + Hash Cond: ((prt1_1.a = p2_1.a) AND (prt1_1.b = p2_1.b)) + -> Seq Scan on prt1_p1 prt1_1 + -> Hash + -> Seq Scan on prt2_p1 p2_1 + -> Hash + -> Seq Scan on prt2_p1 p3_1 + -> Hash + -> Seq Scan on prt1_p1 p4_1 + -> Hash Full Join + Hash Cond: ((COALESCE(COALESCE(prt1_2.a, p2_2.a), p3_2.a) = p4_2.a) AND (COALESCE(COALESCE(prt1_2.b, p2_2.b), p3_2.b) = p4_2.b)) + Filter: ((COALESCE(COALESCE(COALESCE(prt1_2.a, p2_2.a), p3_2.a), p4_2.a) >= 490) AND (COALESCE(COALESCE(COALESCE(prt1_2.a, p2_2.a), p3_2.a), p4_2.a) <= 510)) + -> Hash Full Join + Hash Cond: ((COALESCE(prt1_2.a, p2_2.a) = p3_2.a) AND (COALESCE(prt1_2.b, p2_2.b) = p3_2.b)) + -> Hash Full Join + Hash Cond: ((prt1_2.a = p2_2.a) AND (prt1_2.b = p2_2.b)) + -> Seq Scan on prt1_p2 prt1_2 + -> Hash + -> Seq Scan on prt2_p2 p2_2 + -> Hash + -> Seq Scan on prt2_p2 p3_2 + -> Hash + -> Seq Scan on prt1_p2 p4_2 + -> Hash Full Join + Hash Cond: ((COALESCE(COALESCE(prt1_3.a, p2_3.a), p3_3.a) = p4_3.a) AND (COALESCE(COALESCE(prt1_3.b, p2_3.b), p3_3.b) = p4_3.b)) + Filter: ((COALESCE(COALESCE(COALESCE(prt1_3.a, p2_3.a), p3_3.a), p4_3.a) >= 490) AND (COALESCE(COALESCE(COALESCE(prt1_3.a, p2_3.a), p3_3.a), p4_3.a) <= 510)) + -> Hash Full Join + Hash Cond: ((COALESCE(prt1_3.a, p2_3.a) = p3_3.a) AND (COALESCE(prt1_3.b, p2_3.b) = p3_3.b)) + -> Hash Full Join + Hash Cond: ((prt1_3.a = p2_3.a) AND (prt1_3.b = p2_3.b)) + -> Seq Scan on prt1_p3 prt1_3 + -> Hash + -> Seq Scan on prt2_p3 p2_3 + -> Hash + -> Seq Scan on prt2_p3 p3_3 + -> Hash + -> Seq Scan on prt1_p3 p4_3 +(44 rows) + +SELECT COUNT(*) FROM prt1 FULL JOIN prt2 p2(b,a,c) USING(a,b) FULL JOIN prt2 p3(b,a,c) USING (a, b) FULL JOIN prt1 p4 (a,b,c) USING (a, b) + WHERE a BETWEEN 490 AND 510; + count +------- + 14 +(1 row) + +-- Cases with non-nullable expressions in subquery results; +-- make sure these go to null as expected +EXPLAIN (COSTS OFF) +SELECT t1.a, t1.phv, t2.b, t2.phv, t3.a + t3.b, t3.phv FROM ((SELECT 50 phv, * FROM prt1 WHERE prt1.b = 0) t1 FULL JOIN (SELECT 75 phv, * FROM prt2 WHERE prt2.a = 0) t2 ON (t1.a = t2.b)) FULL JOIN (SELECT 50 phv, * FROM prt1_e WHERE prt1_e.c = 0) t3 ON (t1.a = (t3.a + t3.b)/2) WHERE t1.a = t1.phv OR t2.b = t2.phv OR (t3.a + t3.b)/2 = t3.phv ORDER BY t1.a, t2.b, t3.a + t3.b; + QUERY PLAN +------------------------------------------------------------------------------------------------------------ + Sort + Sort Key: prt1.a, prt2.b, ((prt1_e.a + prt1_e.b)) + -> Append + -> Hash Full Join + Hash Cond: (prt1_1.a = ((prt1_e_1.a + prt1_e_1.b) / 2)) + Filter: ((prt1_1.a = (50)) OR (prt2_1.b = (75)) OR (((prt1_e_1.a + prt1_e_1.b) / 2) = (50))) + -> Hash Full Join + Hash Cond: (prt1_1.a = prt2_1.b) + -> Seq Scan on prt1_p1 prt1_1 + Filter: (b = 0) + -> Hash + -> Seq Scan on prt2_p1 prt2_1 + Filter: (a = 0) + -> Hash + -> Seq Scan on prt1_e_p1 prt1_e_1 + Filter: (c = 0) + -> Hash Full Join + Hash Cond: (prt1_2.a = ((prt1_e_2.a + prt1_e_2.b) / 2)) + Filter: ((prt1_2.a = (50)) OR (prt2_2.b = (75)) OR (((prt1_e_2.a + prt1_e_2.b) / 2) = (50))) + -> Hash Full Join + Hash Cond: (prt1_2.a = prt2_2.b) + -> Seq Scan on prt1_p2 prt1_2 + Filter: (b = 0) + -> Hash + -> Seq Scan on prt2_p2 prt2_2 + Filter: (a = 0) + -> Hash + -> Seq Scan on prt1_e_p2 prt1_e_2 + Filter: (c = 0) + -> Hash Full Join + Hash Cond: (prt1_3.a = ((prt1_e_3.a + prt1_e_3.b) / 2)) + Filter: ((prt1_3.a = (50)) OR (prt2_3.b = (75)) OR (((prt1_e_3.a + prt1_e_3.b) / 2) = (50))) + -> Hash Full Join + Hash Cond: (prt1_3.a = prt2_3.b) + -> Seq Scan on prt1_p3 prt1_3 + Filter: (b = 0) + -> Hash + -> Seq Scan on prt2_p3 prt2_3 + Filter: (a = 0) + -> Hash + -> Seq Scan on prt1_e_p3 prt1_e_3 + Filter: (c = 0) +(42 rows) + +SELECT t1.a, t1.phv, t2.b, t2.phv, t3.a + t3.b, t3.phv FROM ((SELECT 50 phv, * FROM prt1 WHERE prt1.b = 0) t1 FULL JOIN (SELECT 75 phv, * FROM prt2 WHERE prt2.a = 0) t2 ON (t1.a = t2.b)) FULL JOIN (SELECT 50 phv, * FROM prt1_e WHERE prt1_e.c = 0) t3 ON (t1.a = (t3.a + t3.b)/2) WHERE t1.a = t1.phv OR t2.b = t2.phv OR (t3.a + t3.b)/2 = t3.phv ORDER BY t1.a, t2.b, t3.a + t3.b; + a | phv | b | phv | ?column? | phv +----+-----+----+-----+----------+----- + 50 | 50 | | | 100 | 50 + | | 75 | 75 | | +(2 rows) + +-- Semi-join +EXPLAIN (COSTS OFF) +SELECT t1.* FROM prt1 t1 WHERE t1.a IN (SELECT t1.b FROM prt2 t1, prt1_e t2 WHERE t1.a = 0 AND t1.b = (t2.a + t2.b)/2) AND t1.b = 0 ORDER BY t1.a; + QUERY PLAN +--------------------------------------------------------------------------------- + Sort + Sort Key: t1.a + -> Append + -> Nested Loop + Join Filter: (t1_2.a = t1_5.b) + -> HashAggregate + Group Key: t1_5.b + -> Hash Join + Hash Cond: (((t2_1.a + t2_1.b) / 2) = t1_5.b) + -> Seq Scan on prt1_e_p1 t2_1 + -> Hash + -> Seq Scan on prt2_p1 t1_5 + Filter: (a = 0) + -> Index Scan using iprt1_p1_a on prt1_p1 t1_2 + Index Cond: (a = ((t2_1.a + t2_1.b) / 2)) + Filter: (b = 0) + -> Nested Loop + Join Filter: (t1_3.a = t1_6.b) + -> HashAggregate + Group Key: t1_6.b + -> Hash Join + Hash Cond: (((t2_2.a + t2_2.b) / 2) = t1_6.b) + -> Seq Scan on prt1_e_p2 t2_2 + -> Hash + -> Seq Scan on prt2_p2 t1_6 + Filter: (a = 0) + -> Index Scan using iprt1_p2_a on prt1_p2 t1_3 + Index Cond: (a = ((t2_2.a + t2_2.b) / 2)) + Filter: (b = 0) + -> Nested Loop + Join Filter: (t1_4.a = t1_7.b) + -> HashAggregate + Group Key: t1_7.b + -> Nested Loop + -> Seq Scan on prt2_p3 t1_7 + Filter: (a = 0) + -> Index Scan using iprt1_e_p3_ab2 on prt1_e_p3 t2_3 + Index Cond: (((a + b) / 2) = t1_7.b) + -> Index Scan using iprt1_p3_a on prt1_p3 t1_4 + Index Cond: (a = ((t2_3.a + t2_3.b) / 2)) + Filter: (b = 0) +(41 rows) + +SELECT t1.* FROM prt1 t1 WHERE t1.a IN (SELECT t1.b FROM prt2 t1, prt1_e t2 WHERE t1.a = 0 AND t1.b = (t2.a + t2.b)/2) AND t1.b = 0 ORDER BY t1.a; + a | b | c +-----+---+------ + 0 | 0 | 0000 + 150 | 0 | 0150 + 300 | 0 | 0300 + 450 | 0 | 0450 +(4 rows) + +EXPLAIN (COSTS OFF) +SELECT t1.* FROM prt1 t1 WHERE t1.a IN (SELECT t1.b FROM prt2 t1 WHERE t1.b IN (SELECT (t1.a + t1.b)/2 FROM prt1_e t1 WHERE t1.c = 0)) AND t1.b = 0 ORDER BY t1.a; + QUERY PLAN +--------------------------------------------------------------------------- + Sort + Sort Key: t1.a + -> Append + -> Nested Loop + -> HashAggregate + Group Key: t1_6.b + -> Hash Semi Join + Hash Cond: (t1_6.b = ((t1_9.a + t1_9.b) / 2)) + -> Seq Scan on prt2_p1 t1_6 + -> Hash + -> Seq Scan on prt1_e_p1 t1_9 + Filter: (c = 0) + -> Index Scan using iprt1_p1_a on prt1_p1 t1_3 + Index Cond: (a = t1_6.b) + Filter: (b = 0) + -> Nested Loop + -> HashAggregate + Group Key: t1_7.b + -> Hash Semi Join + Hash Cond: (t1_7.b = ((t1_10.a + t1_10.b) / 2)) + -> Seq Scan on prt2_p2 t1_7 + -> Hash + -> Seq Scan on prt1_e_p2 t1_10 + Filter: (c = 0) + -> Index Scan using iprt1_p2_a on prt1_p2 t1_4 + Index Cond: (a = t1_7.b) + Filter: (b = 0) + -> Nested Loop + -> HashAggregate + Group Key: t1_8.b + -> Hash Semi Join + Hash Cond: (t1_8.b = ((t1_11.a + t1_11.b) / 2)) + -> Seq Scan on prt2_p3 t1_8 + -> Hash + -> Seq Scan on prt1_e_p3 t1_11 + Filter: (c = 0) + -> Index Scan using iprt1_p3_a on prt1_p3 t1_5 + Index Cond: (a = t1_8.b) + Filter: (b = 0) +(39 rows) + +SELECT t1.* FROM prt1 t1 WHERE t1.a IN (SELECT t1.b FROM prt2 t1 WHERE t1.b IN (SELECT (t1.a + t1.b)/2 FROM prt1_e t1 WHERE t1.c = 0)) AND t1.b = 0 ORDER BY t1.a; + a | b | c +-----+---+------ + 0 | 0 | 0000 + 150 | 0 | 0150 + 300 | 0 | 0300 + 450 | 0 | 0450 +(4 rows) + +-- test merge joins +SET enable_hashjoin TO off; +SET enable_nestloop TO off; +EXPLAIN (COSTS OFF) +SELECT t1.* FROM prt1 t1 WHERE t1.a IN (SELECT t1.b FROM prt2 t1 WHERE t1.b IN (SELECT (t1.a + t1.b)/2 FROM prt1_e t1 WHERE t1.c = 0)) AND t1.b = 0 ORDER BY t1.a; + QUERY PLAN +------------------------------------------------------------------ + Merge Append + Sort Key: t1.a + -> Merge Semi Join + Merge Cond: (t1_3.a = t1_6.b) + -> Sort + Sort Key: t1_3.a + -> Seq Scan on prt1_p1 t1_3 + Filter: (b = 0) + -> Merge Semi Join + Merge Cond: (t1_6.b = (((t1_9.a + t1_9.b) / 2))) + -> Sort + Sort Key: t1_6.b + -> Seq Scan on prt2_p1 t1_6 + -> Sort + Sort Key: (((t1_9.a + t1_9.b) / 2)) + -> Seq Scan on prt1_e_p1 t1_9 + Filter: (c = 0) + -> Merge Semi Join + Merge Cond: (t1_4.a = t1_7.b) + -> Sort + Sort Key: t1_4.a + -> Seq Scan on prt1_p2 t1_4 + Filter: (b = 0) + -> Merge Semi Join + Merge Cond: (t1_7.b = (((t1_10.a + t1_10.b) / 2))) + -> Sort + Sort Key: t1_7.b + -> Seq Scan on prt2_p2 t1_7 + -> Sort + Sort Key: (((t1_10.a + t1_10.b) / 2)) + -> Seq Scan on prt1_e_p2 t1_10 + Filter: (c = 0) + -> Merge Semi Join + Merge Cond: (t1_5.a = t1_8.b) + -> Sort + Sort Key: t1_5.a + -> Seq Scan on prt1_p3 t1_5 + Filter: (b = 0) + -> Merge Semi Join + Merge Cond: (t1_8.b = (((t1_11.a + t1_11.b) / 2))) + -> Sort + Sort Key: t1_8.b + -> Seq Scan on prt2_p3 t1_8 + -> Sort + Sort Key: (((t1_11.a + t1_11.b) / 2)) + -> Seq Scan on prt1_e_p3 t1_11 + Filter: (c = 0) +(47 rows) + +SELECT t1.* FROM prt1 t1 WHERE t1.a IN (SELECT t1.b FROM prt2 t1 WHERE t1.b IN (SELECT (t1.a + t1.b)/2 FROM prt1_e t1 WHERE t1.c = 0)) AND t1.b = 0 ORDER BY t1.a; + a | b | c +-----+---+------ + 0 | 0 | 0000 + 150 | 0 | 0150 + 300 | 0 | 0300 + 450 | 0 | 0450 +(4 rows) + +EXPLAIN (COSTS OFF) +SELECT t1.a, t1.c, t2.b, t2.c, t3.a + t3.b, t3.c FROM (prt1 t1 LEFT JOIN prt2 t2 ON t1.a = t2.b) RIGHT JOIN prt1_e t3 ON (t1.a = (t3.a + t3.b)/2) WHERE t3.c = 0 ORDER BY t1.a, t2.b, t3.a + t3.b; + QUERY PLAN +---------------------------------------------------------------------------- + Sort + Sort Key: t1.a, t2.b, ((t3.a + t3.b)) + -> Append + -> Merge Left Join + Merge Cond: (t1_1.a = t2_1.b) + -> Sort + Sort Key: t1_1.a + -> Merge Left Join + Merge Cond: ((((t3_1.a + t3_1.b) / 2)) = t1_1.a) + -> Sort + Sort Key: (((t3_1.a + t3_1.b) / 2)) + -> Seq Scan on prt1_e_p1 t3_1 + Filter: (c = 0) + -> Sort + Sort Key: t1_1.a + -> Seq Scan on prt1_p1 t1_1 + -> Sort + Sort Key: t2_1.b + -> Seq Scan on prt2_p1 t2_1 + -> Merge Left Join + Merge Cond: (t1_2.a = t2_2.b) + -> Sort + Sort Key: t1_2.a + -> Merge Left Join + Merge Cond: ((((t3_2.a + t3_2.b) / 2)) = t1_2.a) + -> Sort + Sort Key: (((t3_2.a + t3_2.b) / 2)) + -> Seq Scan on prt1_e_p2 t3_2 + Filter: (c = 0) + -> Sort + Sort Key: t1_2.a + -> Seq Scan on prt1_p2 t1_2 + -> Sort + Sort Key: t2_2.b + -> Seq Scan on prt2_p2 t2_2 + -> Merge Left Join + Merge Cond: (t1_3.a = t2_3.b) + -> Sort + Sort Key: t1_3.a + -> Merge Left Join + Merge Cond: ((((t3_3.a + t3_3.b) / 2)) = t1_3.a) + -> Sort + Sort Key: (((t3_3.a + t3_3.b) / 2)) + -> Seq Scan on prt1_e_p3 t3_3 + Filter: (c = 0) + -> Sort + Sort Key: t1_3.a + -> Seq Scan on prt1_p3 t1_3 + -> Sort + Sort Key: t2_3.b + -> Seq Scan on prt2_p3 t2_3 +(51 rows) + +SELECT t1.a, t1.c, t2.b, t2.c, t3.a + t3.b, t3.c FROM (prt1 t1 LEFT JOIN prt2 t2 ON t1.a = t2.b) RIGHT JOIN prt1_e t3 ON (t1.a = (t3.a + t3.b)/2) WHERE t3.c = 0 ORDER BY t1.a, t2.b, t3.a + t3.b; + a | c | b | c | ?column? | c +-----+------+-----+------+----------+--- + 0 | 0000 | 0 | 0000 | 0 | 0 + 50 | 0050 | | | 100 | 0 + 100 | 0100 | | | 200 | 0 + 150 | 0150 | 150 | 0150 | 300 | 0 + 200 | 0200 | | | 400 | 0 + 250 | 0250 | | | 500 | 0 + 300 | 0300 | 300 | 0300 | 600 | 0 + 350 | 0350 | | | 700 | 0 + 400 | 0400 | | | 800 | 0 + 450 | 0450 | 450 | 0450 | 900 | 0 + 500 | 0500 | | | 1000 | 0 + 550 | 0550 | | | 1100 | 0 +(12 rows) + +-- MergeAppend on nullable column +-- This should generate a partitionwise join, but currently fails to +EXPLAIN (COSTS OFF) +SELECT t1.a, t2.b FROM (SELECT * FROM prt1 WHERE a < 450) t1 LEFT JOIN (SELECT * FROM prt2 WHERE b > 250) t2 ON t1.a = t2.b WHERE t1.b = 0 ORDER BY t1.a, t2.b; + QUERY PLAN +----------------------------------------------------------- + Incremental Sort + Sort Key: prt1.a, prt2.b + Presorted Key: prt1.a + -> Merge Left Join + Merge Cond: (prt1.a = prt2.b) + -> Sort + Sort Key: prt1.a + -> Append + -> Seq Scan on prt1_p1 prt1_1 + Filter: ((a < 450) AND (b = 0)) + -> Seq Scan on prt1_p2 prt1_2 + Filter: ((a < 450) AND (b = 0)) + -> Sort + Sort Key: prt2.b + -> Append + -> Seq Scan on prt2_p2 prt2_1 + Filter: (b > 250) + -> Seq Scan on prt2_p3 prt2_2 + Filter: (b > 250) +(19 rows) + +SELECT t1.a, t2.b FROM (SELECT * FROM prt1 WHERE a < 450) t1 LEFT JOIN (SELECT * FROM prt2 WHERE b > 250) t2 ON t1.a = t2.b WHERE t1.b = 0 ORDER BY t1.a, t2.b; + a | b +-----+----- + 0 | + 50 | + 100 | + 150 | + 200 | + 250 | + 300 | 300 + 350 | + 400 | +(9 rows) + +-- merge join when expression with whole-row reference needs to be sorted; +-- partitionwise join does not apply +EXPLAIN (COSTS OFF) +SELECT t1.a, t2.b FROM prt1 t1, prt2 t2 WHERE t1::text = t2::text AND t1.a = t2.b ORDER BY t1.a; + QUERY PLAN +----------------------------------------------------------------------------------------- + Merge Join + Merge Cond: ((t1.a = t2.b) AND (((((t1.*)::prt1))::text) = ((((t2.*)::prt2))::text))) + -> Sort + Sort Key: t1.a, ((((t1.*)::prt1))::text) + -> Result + -> Append + -> Seq Scan on prt1_p1 t1_1 + -> Seq Scan on prt1_p2 t1_2 + -> Seq Scan on prt1_p3 t1_3 + -> Sort + Sort Key: t2.b, ((((t2.*)::prt2))::text) + -> Result + -> Append + -> Seq Scan on prt2_p1 t2_1 + -> Seq Scan on prt2_p2 t2_2 + -> Seq Scan on prt2_p3 t2_3 +(16 rows) + +SELECT t1.a, t2.b FROM prt1 t1, prt2 t2 WHERE t1::text = t2::text AND t1.a = t2.b ORDER BY t1.a; + a | b +----+---- + 0 | 0 + 6 | 6 + 12 | 12 + 18 | 18 + 24 | 24 +(5 rows) + +RESET enable_hashjoin; +RESET enable_nestloop; +-- +-- partitioned by multiple columns +-- +CREATE TABLE prt1_m (a int, b int, c int) PARTITION BY RANGE(a, ((a + b)/2)); +CREATE TABLE prt1_m_p1 PARTITION OF prt1_m FOR VALUES FROM (0, 0) TO (250, 250); +CREATE TABLE prt1_m_p2 PARTITION OF prt1_m FOR VALUES FROM (250, 250) TO (500, 500); +CREATE TABLE prt1_m_p3 PARTITION OF prt1_m FOR VALUES FROM (500, 500) TO (600, 600); +INSERT INTO prt1_m SELECT i, i, i % 25 FROM generate_series(0, 599, 2) i; +ANALYZE prt1_m; +CREATE TABLE prt2_m (a int, b int, c int) PARTITION BY RANGE(((b + a)/2), b); +CREATE TABLE prt2_m_p1 PARTITION OF prt2_m FOR VALUES FROM (0, 0) TO (250, 250); +CREATE TABLE prt2_m_p2 PARTITION OF prt2_m FOR VALUES FROM (250, 250) TO (500, 500); +CREATE TABLE prt2_m_p3 PARTITION OF prt2_m FOR VALUES FROM (500, 500) TO (600, 600); +INSERT INTO prt2_m SELECT i, i, i % 25 FROM generate_series(0, 599, 3) i; +ANALYZE prt2_m; +EXPLAIN (COSTS OFF) +SELECT t1.a, t1.c, t2.b, t2.c FROM (SELECT * FROM prt1_m WHERE prt1_m.c = 0) t1 FULL JOIN (SELECT * FROM prt2_m WHERE prt2_m.c = 0) t2 ON (t1.a = (t2.b + t2.a)/2 AND t2.b = (t1.a + t1.b)/2) ORDER BY t1.a, t2.b; + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------------ + Sort + Sort Key: prt1_m.a, prt2_m.b + -> Append + -> Hash Full Join + Hash Cond: ((prt1_m_1.a = ((prt2_m_1.b + prt2_m_1.a) / 2)) AND (((prt1_m_1.a + prt1_m_1.b) / 2) = prt2_m_1.b)) + -> Seq Scan on prt1_m_p1 prt1_m_1 + Filter: (c = 0) + -> Hash + -> Seq Scan on prt2_m_p1 prt2_m_1 + Filter: (c = 0) + -> Hash Full Join + Hash Cond: ((prt1_m_2.a = ((prt2_m_2.b + prt2_m_2.a) / 2)) AND (((prt1_m_2.a + prt1_m_2.b) / 2) = prt2_m_2.b)) + -> Seq Scan on prt1_m_p2 prt1_m_2 + Filter: (c = 0) + -> Hash + -> Seq Scan on prt2_m_p2 prt2_m_2 + Filter: (c = 0) + -> Hash Full Join + Hash Cond: ((prt1_m_3.a = ((prt2_m_3.b + prt2_m_3.a) / 2)) AND (((prt1_m_3.a + prt1_m_3.b) / 2) = prt2_m_3.b)) + -> Seq Scan on prt1_m_p3 prt1_m_3 + Filter: (c = 0) + -> Hash + -> Seq Scan on prt2_m_p3 prt2_m_3 + Filter: (c = 0) +(24 rows) + +SELECT t1.a, t1.c, t2.b, t2.c FROM (SELECT * FROM prt1_m WHERE prt1_m.c = 0) t1 FULL JOIN (SELECT * FROM prt2_m WHERE prt2_m.c = 0) t2 ON (t1.a = (t2.b + t2.a)/2 AND t2.b = (t1.a + t1.b)/2) ORDER BY t1.a, t2.b; + a | c | b | c +-----+---+-----+--- + 0 | 0 | 0 | 0 + 50 | 0 | | + 100 | 0 | | + 150 | 0 | 150 | 0 + 200 | 0 | | + 250 | 0 | | + 300 | 0 | 300 | 0 + 350 | 0 | | + 400 | 0 | | + 450 | 0 | 450 | 0 + 500 | 0 | | + 550 | 0 | | + | | 75 | 0 + | | 225 | 0 + | | 375 | 0 + | | 525 | 0 +(16 rows) + +-- +-- tests for list partitioned tables. +-- +CREATE TABLE plt1 (a int, b int, c text) PARTITION BY LIST(c); +CREATE TABLE plt1_p1 PARTITION OF plt1 FOR VALUES IN ('0000', '0003', '0004', '0010'); +CREATE TABLE plt1_p2 PARTITION OF plt1 FOR VALUES IN ('0001', '0005', '0002', '0009'); +CREATE TABLE plt1_p3 PARTITION OF plt1 FOR VALUES IN ('0006', '0007', '0008', '0011'); +INSERT INTO plt1 SELECT i, i, to_char(i/50, 'FM0000') FROM generate_series(0, 599, 2) i; +ANALYZE plt1; +CREATE TABLE plt2 (a int, b int, c text) PARTITION BY LIST(c); +CREATE TABLE plt2_p1 PARTITION OF plt2 FOR VALUES IN ('0000', '0003', '0004', '0010'); +CREATE TABLE plt2_p2 PARTITION OF plt2 FOR VALUES IN ('0001', '0005', '0002', '0009'); +CREATE TABLE plt2_p3 PARTITION OF plt2 FOR VALUES IN ('0006', '0007', '0008', '0011'); +INSERT INTO plt2 SELECT i, i, to_char(i/50, 'FM0000') FROM generate_series(0, 599, 3) i; +ANALYZE plt2; +-- +-- list partitioned by expression +-- +CREATE TABLE plt1_e (a int, b int, c text) PARTITION BY LIST(ltrim(c, 'A')); +CREATE TABLE plt1_e_p1 PARTITION OF plt1_e FOR VALUES IN ('0000', '0003', '0004', '0010'); +CREATE TABLE plt1_e_p2 PARTITION OF plt1_e FOR VALUES IN ('0001', '0005', '0002', '0009'); +CREATE TABLE plt1_e_p3 PARTITION OF plt1_e FOR VALUES IN ('0006', '0007', '0008', '0011'); +INSERT INTO plt1_e SELECT i, i, 'A' || to_char(i/50, 'FM0000') FROM generate_series(0, 599, 2) i; +ANALYZE plt1_e; +-- test partition matching with N-way join +EXPLAIN (COSTS OFF) +SELECT avg(t1.a), avg(t2.b), avg(t3.a + t3.b), t1.c, t2.c, t3.c FROM plt1 t1, plt2 t2, plt1_e t3 WHERE t1.b = t2.b AND t1.c = t2.c AND ltrim(t3.c, 'A') = t1.c GROUP BY t1.c, t2.c, t3.c ORDER BY t1.c, t2.c, t3.c; + QUERY PLAN +-------------------------------------------------------------------------------- + GroupAggregate + Group Key: t1.c, t3.c + -> Sort + Sort Key: t1.c, t3.c + -> Append + -> Hash Join + Hash Cond: (t1_1.c = ltrim(t3_1.c, 'A'::text)) + -> Hash Join + Hash Cond: ((t1_1.b = t2_1.b) AND (t1_1.c = t2_1.c)) + -> Seq Scan on plt1_p1 t1_1 + -> Hash + -> Seq Scan on plt2_p1 t2_1 + -> Hash + -> Seq Scan on plt1_e_p1 t3_1 + -> Hash Join + Hash Cond: (t1_2.c = ltrim(t3_2.c, 'A'::text)) + -> Hash Join + Hash Cond: ((t1_2.b = t2_2.b) AND (t1_2.c = t2_2.c)) + -> Seq Scan on plt1_p2 t1_2 + -> Hash + -> Seq Scan on plt2_p2 t2_2 + -> Hash + -> Seq Scan on plt1_e_p2 t3_2 + -> Hash Join + Hash Cond: (t1_3.c = ltrim(t3_3.c, 'A'::text)) + -> Hash Join + Hash Cond: ((t1_3.b = t2_3.b) AND (t1_3.c = t2_3.c)) + -> Seq Scan on plt1_p3 t1_3 + -> Hash + -> Seq Scan on plt2_p3 t2_3 + -> Hash + -> Seq Scan on plt1_e_p3 t3_3 +(32 rows) + +SELECT avg(t1.a), avg(t2.b), avg(t3.a + t3.b), t1.c, t2.c, t3.c FROM plt1 t1, plt2 t2, plt1_e t3 WHERE t1.b = t2.b AND t1.c = t2.c AND ltrim(t3.c, 'A') = t1.c GROUP BY t1.c, t2.c, t3.c ORDER BY t1.c, t2.c, t3.c; + avg | avg | avg | c | c | c +----------------------+----------------------+-----------------------+------+------+------- + 24.0000000000000000 | 24.0000000000000000 | 48.0000000000000000 | 0000 | 0000 | A0000 + 75.0000000000000000 | 75.0000000000000000 | 148.0000000000000000 | 0001 | 0001 | A0001 + 123.0000000000000000 | 123.0000000000000000 | 248.0000000000000000 | 0002 | 0002 | A0002 + 174.0000000000000000 | 174.0000000000000000 | 348.0000000000000000 | 0003 | 0003 | A0003 + 225.0000000000000000 | 225.0000000000000000 | 448.0000000000000000 | 0004 | 0004 | A0004 + 273.0000000000000000 | 273.0000000000000000 | 548.0000000000000000 | 0005 | 0005 | A0005 + 324.0000000000000000 | 324.0000000000000000 | 648.0000000000000000 | 0006 | 0006 | A0006 + 375.0000000000000000 | 375.0000000000000000 | 748.0000000000000000 | 0007 | 0007 | A0007 + 423.0000000000000000 | 423.0000000000000000 | 848.0000000000000000 | 0008 | 0008 | A0008 + 474.0000000000000000 | 474.0000000000000000 | 948.0000000000000000 | 0009 | 0009 | A0009 + 525.0000000000000000 | 525.0000000000000000 | 1048.0000000000000000 | 0010 | 0010 | A0010 + 573.0000000000000000 | 573.0000000000000000 | 1148.0000000000000000 | 0011 | 0011 | A0011 +(12 rows) + +-- joins where one of the relations is proven empty +EXPLAIN (COSTS OFF) +SELECT t1.a, t1.c, t2.b, t2.c FROM prt1 t1, prt2 t2 WHERE t1.a = t2.b AND t1.a = 1 AND t1.a = 2; + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) +SELECT t1.a, t1.c, t2.b, t2.c FROM (SELECT * FROM prt1 WHERE a = 1 AND a = 2) t1 LEFT JOIN prt2 t2 ON t1.a = t2.b; + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +EXPLAIN (COSTS OFF) +SELECT t1.a, t1.c, t2.b, t2.c FROM (SELECT * FROM prt1 WHERE a = 1 AND a = 2) t1 RIGHT JOIN prt2 t2 ON t1.a = t2.b, prt1 t3 WHERE t2.b = t3.a; + QUERY PLAN +-------------------------------------------------- + Hash Left Join + Hash Cond: (t2.b = a) + -> Append + -> Hash Join + Hash Cond: (t3_1.a = t2_1.b) + -> Seq Scan on prt1_p1 t3_1 + -> Hash + -> Seq Scan on prt2_p1 t2_1 + -> Hash Join + Hash Cond: (t3_2.a = t2_2.b) + -> Seq Scan on prt1_p2 t3_2 + -> Hash + -> Seq Scan on prt2_p2 t2_2 + -> Hash Join + Hash Cond: (t3_3.a = t2_3.b) + -> Seq Scan on prt1_p3 t3_3 + -> Hash + -> Seq Scan on prt2_p3 t2_3 + -> Hash + -> Result + One-Time Filter: false +(21 rows) + +EXPLAIN (COSTS OFF) +SELECT t1.a, t1.c, t2.b, t2.c FROM (SELECT * FROM prt1 WHERE a = 1 AND a = 2) t1 FULL JOIN prt2 t2 ON t1.a = t2.b WHERE t2.a = 0 ORDER BY t1.a, t2.b; + QUERY PLAN +-------------------------------------------- + Sort + Sort Key: a, t2.b + -> Hash Left Join + Hash Cond: (t2.b = a) + -> Append + -> Seq Scan on prt2_p1 t2_1 + Filter: (a = 0) + -> Seq Scan on prt2_p2 t2_2 + Filter: (a = 0) + -> Seq Scan on prt2_p3 t2_3 + Filter: (a = 0) + -> Hash + -> Result + One-Time Filter: false +(14 rows) + +-- +-- tests for hash partitioned tables. +-- +CREATE TABLE pht1 (a int, b int, c text) PARTITION BY HASH(c); +CREATE TABLE pht1_p1 PARTITION OF pht1 FOR VALUES WITH (MODULUS 3, REMAINDER 0); +CREATE TABLE pht1_p2 PARTITION OF pht1 FOR VALUES WITH (MODULUS 3, REMAINDER 1); +CREATE TABLE pht1_p3 PARTITION OF pht1 FOR VALUES WITH (MODULUS 3, REMAINDER 2); +INSERT INTO pht1 SELECT i, i, to_char(i/50, 'FM0000') FROM generate_series(0, 599, 2) i; +ANALYZE pht1; +CREATE TABLE pht2 (a int, b int, c text) PARTITION BY HASH(c); +CREATE TABLE pht2_p1 PARTITION OF pht2 FOR VALUES WITH (MODULUS 3, REMAINDER 0); +CREATE TABLE pht2_p2 PARTITION OF pht2 FOR VALUES WITH (MODULUS 3, REMAINDER 1); +CREATE TABLE pht2_p3 PARTITION OF pht2 FOR VALUES WITH (MODULUS 3, REMAINDER 2); +INSERT INTO pht2 SELECT i, i, to_char(i/50, 'FM0000') FROM generate_series(0, 599, 3) i; +ANALYZE pht2; +-- +-- hash partitioned by expression +-- +CREATE TABLE pht1_e (a int, b int, c text) PARTITION BY HASH(ltrim(c, 'A')); +CREATE TABLE pht1_e_p1 PARTITION OF pht1_e FOR VALUES WITH (MODULUS 3, REMAINDER 0); +CREATE TABLE pht1_e_p2 PARTITION OF pht1_e FOR VALUES WITH (MODULUS 3, REMAINDER 1); +CREATE TABLE pht1_e_p3 PARTITION OF pht1_e FOR VALUES WITH (MODULUS 3, REMAINDER 2); +INSERT INTO pht1_e SELECT i, i, 'A' || to_char(i/50, 'FM0000') FROM generate_series(0, 299, 2) i; +ANALYZE pht1_e; +-- test partition matching with N-way join +EXPLAIN (COSTS OFF) +SELECT avg(t1.a), avg(t2.b), avg(t3.a + t3.b), t1.c, t2.c, t3.c FROM pht1 t1, pht2 t2, pht1_e t3 WHERE t1.b = t2.b AND t1.c = t2.c AND ltrim(t3.c, 'A') = t1.c GROUP BY t1.c, t2.c, t3.c ORDER BY t1.c, t2.c, t3.c; + QUERY PLAN +-------------------------------------------------------------------------------- + GroupAggregate + Group Key: t1.c, t3.c + -> Sort + Sort Key: t1.c, t3.c + -> Append + -> Hash Join + Hash Cond: (t1_1.c = ltrim(t3_1.c, 'A'::text)) + -> Hash Join + Hash Cond: ((t1_1.b = t2_1.b) AND (t1_1.c = t2_1.c)) + -> Seq Scan on pht1_p1 t1_1 + -> Hash + -> Seq Scan on pht2_p1 t2_1 + -> Hash + -> Seq Scan on pht1_e_p1 t3_1 + -> Hash Join + Hash Cond: (t1_2.c = ltrim(t3_2.c, 'A'::text)) + -> Hash Join + Hash Cond: ((t1_2.b = t2_2.b) AND (t1_2.c = t2_2.c)) + -> Seq Scan on pht1_p2 t1_2 + -> Hash + -> Seq Scan on pht2_p2 t2_2 + -> Hash + -> Seq Scan on pht1_e_p2 t3_2 + -> Hash Join + Hash Cond: (t1_3.c = ltrim(t3_3.c, 'A'::text)) + -> Hash Join + Hash Cond: ((t1_3.b = t2_3.b) AND (t1_3.c = t2_3.c)) + -> Seq Scan on pht1_p3 t1_3 + -> Hash + -> Seq Scan on pht2_p3 t2_3 + -> Hash + -> Seq Scan on pht1_e_p3 t3_3 +(32 rows) + +SELECT avg(t1.a), avg(t2.b), avg(t3.a + t3.b), t1.c, t2.c, t3.c FROM pht1 t1, pht2 t2, pht1_e t3 WHERE t1.b = t2.b AND t1.c = t2.c AND ltrim(t3.c, 'A') = t1.c GROUP BY t1.c, t2.c, t3.c ORDER BY t1.c, t2.c, t3.c; + avg | avg | avg | c | c | c +----------------------+----------------------+----------------------+------+------+------- + 24.0000000000000000 | 24.0000000000000000 | 48.0000000000000000 | 0000 | 0000 | A0000 + 75.0000000000000000 | 75.0000000000000000 | 148.0000000000000000 | 0001 | 0001 | A0001 + 123.0000000000000000 | 123.0000000000000000 | 248.0000000000000000 | 0002 | 0002 | A0002 + 174.0000000000000000 | 174.0000000000000000 | 348.0000000000000000 | 0003 | 0003 | A0003 + 225.0000000000000000 | 225.0000000000000000 | 448.0000000000000000 | 0004 | 0004 | A0004 + 273.0000000000000000 | 273.0000000000000000 | 548.0000000000000000 | 0005 | 0005 | A0005 +(6 rows) + +-- test default partition behavior for range +ALTER TABLE prt1 DETACH PARTITION prt1_p3; +ALTER TABLE prt1 ATTACH PARTITION prt1_p3 DEFAULT; +ANALYZE prt1; +ALTER TABLE prt2 DETACH PARTITION prt2_p3; +ALTER TABLE prt2 ATTACH PARTITION prt2_p3 DEFAULT; +ANALYZE prt2; +EXPLAIN (COSTS OFF) +SELECT t1.a, t1.c, t2.b, t2.c FROM prt1 t1, prt2 t2 WHERE t1.a = t2.b AND t1.b = 0 ORDER BY t1.a, t2.b; + QUERY PLAN +-------------------------------------------------- + Sort + Sort Key: t1.a + -> Append + -> Hash Join + Hash Cond: (t2_1.b = t1_1.a) + -> Seq Scan on prt2_p1 t2_1 + -> Hash + -> Seq Scan on prt1_p1 t1_1 + Filter: (b = 0) + -> Hash Join + Hash Cond: (t2_2.b = t1_2.a) + -> Seq Scan on prt2_p2 t2_2 + -> Hash + -> Seq Scan on prt1_p2 t1_2 + Filter: (b = 0) + -> Hash Join + Hash Cond: (t2_3.b = t1_3.a) + -> Seq Scan on prt2_p3 t2_3 + -> Hash + -> Seq Scan on prt1_p3 t1_3 + Filter: (b = 0) +(21 rows) + +-- test default partition behavior for list +ALTER TABLE plt1 DETACH PARTITION plt1_p3; +ALTER TABLE plt1 ATTACH PARTITION plt1_p3 DEFAULT; +ANALYZE plt1; +ALTER TABLE plt2 DETACH PARTITION plt2_p3; +ALTER TABLE plt2 ATTACH PARTITION plt2_p3 DEFAULT; +ANALYZE plt2; +EXPLAIN (COSTS OFF) +SELECT avg(t1.a), avg(t2.b), t1.c, t2.c FROM plt1 t1 RIGHT JOIN plt2 t2 ON t1.c = t2.c WHERE t1.a % 25 = 0 GROUP BY t1.c, t2.c ORDER BY t1.c, t2.c; + QUERY PLAN +-------------------------------------------------------- + Sort + Sort Key: t1.c + -> HashAggregate + Group Key: t1.c + -> Append + -> Hash Join + Hash Cond: (t2_1.c = t1_1.c) + -> Seq Scan on plt2_p1 t2_1 + -> Hash + -> Seq Scan on plt1_p1 t1_1 + Filter: ((a % 25) = 0) + -> Hash Join + Hash Cond: (t2_2.c = t1_2.c) + -> Seq Scan on plt2_p2 t2_2 + -> Hash + -> Seq Scan on plt1_p2 t1_2 + Filter: ((a % 25) = 0) + -> Hash Join + Hash Cond: (t2_3.c = t1_3.c) + -> Seq Scan on plt2_p3 t2_3 + -> Hash + -> Seq Scan on plt1_p3 t1_3 + Filter: ((a % 25) = 0) +(23 rows) + +-- +-- multiple levels of partitioning +-- +CREATE TABLE prt1_l (a int, b int, c varchar) PARTITION BY RANGE(a); +CREATE TABLE prt1_l_p1 PARTITION OF prt1_l FOR VALUES FROM (0) TO (250); +CREATE TABLE prt1_l_p2 PARTITION OF prt1_l FOR VALUES FROM (250) TO (500) PARTITION BY LIST (c); +CREATE TABLE prt1_l_p2_p1 PARTITION OF prt1_l_p2 FOR VALUES IN ('0000', '0001'); +CREATE TABLE prt1_l_p2_p2 PARTITION OF prt1_l_p2 FOR VALUES IN ('0002', '0003'); +CREATE TABLE prt1_l_p3 PARTITION OF prt1_l FOR VALUES FROM (500) TO (600) PARTITION BY RANGE (b); +CREATE TABLE prt1_l_p3_p1 PARTITION OF prt1_l_p3 FOR VALUES FROM (0) TO (13); +CREATE TABLE prt1_l_p3_p2 PARTITION OF prt1_l_p3 FOR VALUES FROM (13) TO (25); +INSERT INTO prt1_l SELECT i, i % 25, to_char(i % 4, 'FM0000') FROM generate_series(0, 599, 2) i; +ANALYZE prt1_l; +CREATE TABLE prt2_l (a int, b int, c varchar) PARTITION BY RANGE(b); +CREATE TABLE prt2_l_p1 PARTITION OF prt2_l FOR VALUES FROM (0) TO (250); +CREATE TABLE prt2_l_p2 PARTITION OF prt2_l FOR VALUES FROM (250) TO (500) PARTITION BY LIST (c); +CREATE TABLE prt2_l_p2_p1 PARTITION OF prt2_l_p2 FOR VALUES IN ('0000', '0001'); +CREATE TABLE prt2_l_p2_p2 PARTITION OF prt2_l_p2 FOR VALUES IN ('0002', '0003'); +CREATE TABLE prt2_l_p3 PARTITION OF prt2_l FOR VALUES FROM (500) TO (600) PARTITION BY RANGE (a); +CREATE TABLE prt2_l_p3_p1 PARTITION OF prt2_l_p3 FOR VALUES FROM (0) TO (13); +CREATE TABLE prt2_l_p3_p2 PARTITION OF prt2_l_p3 FOR VALUES FROM (13) TO (25); +INSERT INTO prt2_l SELECT i % 25, i, to_char(i % 4, 'FM0000') FROM generate_series(0, 599, 3) i; +ANALYZE prt2_l; +-- inner join, qual covering only top-level partitions +EXPLAIN (COSTS OFF) +SELECT t1.a, t1.c, t2.b, t2.c FROM prt1_l t1, prt2_l t2 WHERE t1.a = t2.b AND t1.b = 0 ORDER BY t1.a, t2.b; + QUERY PLAN +------------------------------------------------------------- + Sort + Sort Key: t1.a + -> Append + -> Hash Join + Hash Cond: (t2_1.b = t1_1.a) + -> Seq Scan on prt2_l_p1 t2_1 + -> Hash + -> Seq Scan on prt1_l_p1 t1_1 + Filter: (b = 0) + -> Hash Join + Hash Cond: (t2_3.b = t1_3.a) + -> Append + -> Seq Scan on prt2_l_p2_p1 t2_3 + -> Seq Scan on prt2_l_p2_p2 t2_4 + -> Hash + -> Append + -> Seq Scan on prt1_l_p2_p1 t1_3 + Filter: (b = 0) + -> Seq Scan on prt1_l_p2_p2 t1_4 + Filter: (b = 0) + -> Hash Join + Hash Cond: (t2_6.b = t1_5.a) + -> Append + -> Seq Scan on prt2_l_p3_p1 t2_6 + -> Seq Scan on prt2_l_p3_p2 t2_7 + -> Hash + -> Seq Scan on prt1_l_p3_p1 t1_5 + Filter: (b = 0) +(28 rows) + +SELECT t1.a, t1.c, t2.b, t2.c FROM prt1_l t1, prt2_l t2 WHERE t1.a = t2.b AND t1.b = 0 ORDER BY t1.a, t2.b; + a | c | b | c +-----+------+-----+------ + 0 | 0000 | 0 | 0000 + 150 | 0002 | 150 | 0002 + 300 | 0000 | 300 | 0000 + 450 | 0002 | 450 | 0002 +(4 rows) + +-- left join +EXPLAIN (COSTS OFF) +SELECT t1.a, t1.c, t2.b, t2.c FROM prt1_l t1 LEFT JOIN prt2_l t2 ON t1.a = t2.b AND t1.c = t2.c WHERE t1.b = 0 ORDER BY t1.a, t2.b; + QUERY PLAN +------------------------------------------------------------------------------------ + Sort + Sort Key: t1.a, t2.b + -> Append + -> Hash Right Join + Hash Cond: ((t2_1.b = t1_1.a) AND ((t2_1.c)::text = (t1_1.c)::text)) + -> Seq Scan on prt2_l_p1 t2_1 + -> Hash + -> Seq Scan on prt1_l_p1 t1_1 + Filter: (b = 0) + -> Hash Right Join + Hash Cond: ((t2_2.b = t1_2.a) AND ((t2_2.c)::text = (t1_2.c)::text)) + -> Seq Scan on prt2_l_p2_p1 t2_2 + -> Hash + -> Seq Scan on prt1_l_p2_p1 t1_2 + Filter: (b = 0) + -> Hash Right Join + Hash Cond: ((t2_3.b = t1_3.a) AND ((t2_3.c)::text = (t1_3.c)::text)) + -> Seq Scan on prt2_l_p2_p2 t2_3 + -> Hash + -> Seq Scan on prt1_l_p2_p2 t1_3 + Filter: (b = 0) + -> Hash Right Join + Hash Cond: ((t2_5.b = t1_4.a) AND ((t2_5.c)::text = (t1_4.c)::text)) + -> Append + -> Seq Scan on prt2_l_p3_p1 t2_5 + -> Seq Scan on prt2_l_p3_p2 t2_6 + -> Hash + -> Seq Scan on prt1_l_p3_p1 t1_4 + Filter: (b = 0) +(29 rows) + +SELECT t1.a, t1.c, t2.b, t2.c FROM prt1_l t1 LEFT JOIN prt2_l t2 ON t1.a = t2.b AND t1.c = t2.c WHERE t1.b = 0 ORDER BY t1.a, t2.b; + a | c | b | c +-----+------+-----+------ + 0 | 0000 | 0 | 0000 + 50 | 0002 | | + 100 | 0000 | | + 150 | 0002 | 150 | 0002 + 200 | 0000 | | + 250 | 0002 | | + 300 | 0000 | 300 | 0000 + 350 | 0002 | | + 400 | 0000 | | + 450 | 0002 | 450 | 0002 + 500 | 0000 | | + 550 | 0002 | | +(12 rows) + +-- right join +EXPLAIN (COSTS OFF) +SELECT t1.a, t1.c, t2.b, t2.c FROM prt1_l t1 RIGHT JOIN prt2_l t2 ON t1.a = t2.b AND t1.c = t2.c WHERE t2.a = 0 ORDER BY t1.a, t2.b; + QUERY PLAN +------------------------------------------------------------------------------------ + Sort + Sort Key: t1.a, t2.b + -> Append + -> Hash Right Join + Hash Cond: ((t1_1.a = t2_1.b) AND ((t1_1.c)::text = (t2_1.c)::text)) + -> Seq Scan on prt1_l_p1 t1_1 + -> Hash + -> Seq Scan on prt2_l_p1 t2_1 + Filter: (a = 0) + -> Hash Right Join + Hash Cond: ((t1_2.a = t2_2.b) AND ((t1_2.c)::text = (t2_2.c)::text)) + -> Seq Scan on prt1_l_p2_p1 t1_2 + -> Hash + -> Seq Scan on prt2_l_p2_p1 t2_2 + Filter: (a = 0) + -> Hash Right Join + Hash Cond: ((t1_3.a = t2_3.b) AND ((t1_3.c)::text = (t2_3.c)::text)) + -> Seq Scan on prt1_l_p2_p2 t1_3 + -> Hash + -> Seq Scan on prt2_l_p2_p2 t2_3 + Filter: (a = 0) + -> Hash Right Join + Hash Cond: ((t1_5.a = t2_4.b) AND ((t1_5.c)::text = (t2_4.c)::text)) + -> Append + -> Seq Scan on prt1_l_p3_p1 t1_5 + -> Seq Scan on prt1_l_p3_p2 t1_6 + -> Hash + -> Seq Scan on prt2_l_p3_p1 t2_4 + Filter: (a = 0) +(29 rows) + +SELECT t1.a, t1.c, t2.b, t2.c FROM prt1_l t1 RIGHT JOIN prt2_l t2 ON t1.a = t2.b AND t1.c = t2.c WHERE t2.a = 0 ORDER BY t1.a, t2.b; + a | c | b | c +-----+------+-----+------ + 0 | 0000 | 0 | 0000 + 150 | 0002 | 150 | 0002 + 300 | 0000 | 300 | 0000 + 450 | 0002 | 450 | 0002 + | | 75 | 0003 + | | 225 | 0001 + | | 375 | 0003 + | | 525 | 0001 +(8 rows) + +-- full join +EXPLAIN (COSTS OFF) +SELECT t1.a, t1.c, t2.b, t2.c FROM (SELECT * FROM prt1_l WHERE prt1_l.b = 0) t1 FULL JOIN (SELECT * FROM prt2_l WHERE prt2_l.a = 0) t2 ON (t1.a = t2.b AND t1.c = t2.c) ORDER BY t1.a, t2.b; + QUERY PLAN +---------------------------------------------------------------------------------------------------- + Sort + Sort Key: prt1_l.a, prt2_l.b + -> Append + -> Hash Full Join + Hash Cond: ((prt1_l_1.a = prt2_l_1.b) AND ((prt1_l_1.c)::text = (prt2_l_1.c)::text)) + -> Seq Scan on prt1_l_p1 prt1_l_1 + Filter: (b = 0) + -> Hash + -> Seq Scan on prt2_l_p1 prt2_l_1 + Filter: (a = 0) + -> Hash Full Join + Hash Cond: ((prt1_l_2.a = prt2_l_2.b) AND ((prt1_l_2.c)::text = (prt2_l_2.c)::text)) + -> Seq Scan on prt1_l_p2_p1 prt1_l_2 + Filter: (b = 0) + -> Hash + -> Seq Scan on prt2_l_p2_p1 prt2_l_2 + Filter: (a = 0) + -> Hash Full Join + Hash Cond: ((prt1_l_3.a = prt2_l_3.b) AND ((prt1_l_3.c)::text = (prt2_l_3.c)::text)) + -> Seq Scan on prt1_l_p2_p2 prt1_l_3 + Filter: (b = 0) + -> Hash + -> Seq Scan on prt2_l_p2_p2 prt2_l_3 + Filter: (a = 0) + -> Hash Full Join + Hash Cond: ((prt1_l_4.a = prt2_l_4.b) AND ((prt1_l_4.c)::text = (prt2_l_4.c)::text)) + -> Seq Scan on prt1_l_p3_p1 prt1_l_4 + Filter: (b = 0) + -> Hash + -> Seq Scan on prt2_l_p3_p1 prt2_l_4 + Filter: (a = 0) +(31 rows) + +SELECT t1.a, t1.c, t2.b, t2.c FROM (SELECT * FROM prt1_l WHERE prt1_l.b = 0) t1 FULL JOIN (SELECT * FROM prt2_l WHERE prt2_l.a = 0) t2 ON (t1.a = t2.b AND t1.c = t2.c) ORDER BY t1.a, t2.b; + a | c | b | c +-----+------+-----+------ + 0 | 0000 | 0 | 0000 + 50 | 0002 | | + 100 | 0000 | | + 150 | 0002 | 150 | 0002 + 200 | 0000 | | + 250 | 0002 | | + 300 | 0000 | 300 | 0000 + 350 | 0002 | | + 400 | 0000 | | + 450 | 0002 | 450 | 0002 + 500 | 0000 | | + 550 | 0002 | | + | | 75 | 0003 + | | 225 | 0001 + | | 375 | 0003 + | | 525 | 0001 +(16 rows) + +-- lateral partitionwise join +EXPLAIN (COSTS OFF) +SELECT * FROM prt1_l t1 LEFT JOIN LATERAL + (SELECT t2.a AS t2a, t2.c AS t2c, t2.b AS t2b, t3.b AS t3b, least(t1.a,t2.a,t3.b) FROM prt1_l t2 JOIN prt2_l t3 ON (t2.a = t3.b AND t2.c = t3.c)) ss + ON t1.a = ss.t2a AND t1.c = ss.t2c WHERE t1.b = 0 ORDER BY t1.a; + QUERY PLAN +----------------------------------------------------------------------------------------------- + Sort + Sort Key: t1.a + -> Append + -> Nested Loop Left Join + -> Seq Scan on prt1_l_p1 t1_1 + Filter: (b = 0) + -> Hash Join + Hash Cond: ((t3_1.b = t2_1.a) AND ((t3_1.c)::text = (t2_1.c)::text)) + -> Seq Scan on prt2_l_p1 t3_1 + -> Hash + -> Seq Scan on prt1_l_p1 t2_1 + Filter: ((t1_1.a = a) AND ((t1_1.c)::text = (c)::text)) + -> Nested Loop Left Join + -> Seq Scan on prt1_l_p2_p1 t1_2 + Filter: (b = 0) + -> Hash Join + Hash Cond: ((t3_2.b = t2_2.a) AND ((t3_2.c)::text = (t2_2.c)::text)) + -> Seq Scan on prt2_l_p2_p1 t3_2 + -> Hash + -> Seq Scan on prt1_l_p2_p1 t2_2 + Filter: ((t1_2.a = a) AND ((t1_2.c)::text = (c)::text)) + -> Nested Loop Left Join + -> Seq Scan on prt1_l_p2_p2 t1_3 + Filter: (b = 0) + -> Hash Join + Hash Cond: ((t3_3.b = t2_3.a) AND ((t3_3.c)::text = (t2_3.c)::text)) + -> Seq Scan on prt2_l_p2_p2 t3_3 + -> Hash + -> Seq Scan on prt1_l_p2_p2 t2_3 + Filter: ((t1_3.a = a) AND ((t1_3.c)::text = (c)::text)) + -> Nested Loop Left Join + -> Seq Scan on prt1_l_p3_p1 t1_4 + Filter: (b = 0) + -> Hash Join + Hash Cond: ((t3_5.b = t2_5.a) AND ((t3_5.c)::text = (t2_5.c)::text)) + -> Append + -> Seq Scan on prt2_l_p3_p1 t3_5 + -> Seq Scan on prt2_l_p3_p2 t3_6 + -> Hash + -> Append + -> Seq Scan on prt1_l_p3_p1 t2_5 + Filter: ((t1_4.a = a) AND ((t1_4.c)::text = (c)::text)) + -> Seq Scan on prt1_l_p3_p2 t2_6 + Filter: ((t1_4.a = a) AND ((t1_4.c)::text = (c)::text)) +(44 rows) + +SELECT * FROM prt1_l t1 LEFT JOIN LATERAL + (SELECT t2.a AS t2a, t2.c AS t2c, t2.b AS t2b, t3.b AS t3b, least(t1.a,t2.a,t3.b) FROM prt1_l t2 JOIN prt2_l t3 ON (t2.a = t3.b AND t2.c = t3.c)) ss + ON t1.a = ss.t2a AND t1.c = ss.t2c WHERE t1.b = 0 ORDER BY t1.a; + a | b | c | t2a | t2c | t2b | t3b | least +-----+---+------+-----+------+-----+-----+------- + 0 | 0 | 0000 | 0 | 0000 | 0 | 0 | 0 + 50 | 0 | 0002 | | | | | + 100 | 0 | 0000 | | | | | + 150 | 0 | 0002 | 150 | 0002 | 0 | 150 | 150 + 200 | 0 | 0000 | | | | | + 250 | 0 | 0002 | | | | | + 300 | 0 | 0000 | 300 | 0000 | 0 | 300 | 300 + 350 | 0 | 0002 | | | | | + 400 | 0 | 0000 | | | | | + 450 | 0 | 0002 | 450 | 0002 | 0 | 450 | 450 + 500 | 0 | 0000 | | | | | + 550 | 0 | 0002 | | | | | +(12 rows) + +SET max_parallel_workers_per_gather = 0; +-- If there are lateral references to the other relation in sample scan, +-- we cannot generate a partitionwise join. +EXPLAIN (COSTS OFF) +SELECT * FROM prt1_l t1 JOIN LATERAL + (SELECT * FROM prt1_l t2 TABLESAMPLE SYSTEM (t1.a) REPEATABLE(t1.b)) s + ON t1.a = s.a AND t1.b = s.b AND t1.c = s.c; + QUERY PLAN +---------------------------------------------------------------------------------- + Nested Loop + -> Append + -> Seq Scan on prt1_l_p1 t1_1 + -> Seq Scan on prt1_l_p2_p1 t1_2 + -> Seq Scan on prt1_l_p2_p2 t1_3 + -> Seq Scan on prt1_l_p3_p1 t1_4 + -> Seq Scan on prt1_l_p3_p2 t1_5 + -> Append + -> Sample Scan on prt1_l_p1 t2_1 + Sampling: system (t1.a) REPEATABLE (t1.b) + Filter: ((t1.a = a) AND (t1.b = b) AND ((t1.c)::text = (c)::text)) + -> Sample Scan on prt1_l_p2_p1 t2_2 + Sampling: system (t1.a) REPEATABLE (t1.b) + Filter: ((t1.a = a) AND (t1.b = b) AND ((t1.c)::text = (c)::text)) + -> Sample Scan on prt1_l_p2_p2 t2_3 + Sampling: system (t1.a) REPEATABLE (t1.b) + Filter: ((t1.a = a) AND (t1.b = b) AND ((t1.c)::text = (c)::text)) + -> Sample Scan on prt1_l_p3_p1 t2_4 + Sampling: system (t1.a) REPEATABLE (t1.b) + Filter: ((t1.a = a) AND (t1.b = b) AND ((t1.c)::text = (c)::text)) + -> Sample Scan on prt1_l_p3_p2 t2_5 + Sampling: system (t1.a) REPEATABLE (t1.b) + Filter: ((t1.a = a) AND (t1.b = b) AND ((t1.c)::text = (c)::text)) +(23 rows) + +-- If there are lateral references to the other relation in scan's restriction +-- clauses, we cannot generate a partitionwise join. +EXPLAIN (COSTS OFF) +SELECT COUNT(*) FROM prt1_l t1 LEFT JOIN LATERAL + (SELECT t1.b AS t1b, t2.* FROM prt2_l t2) s + ON t1.a = s.b AND t1.b = s.a AND t1.c = s.c + WHERE s.t1b = s.a; + QUERY PLAN +------------------------------------------------------------------------------------------------------- + Aggregate + -> Nested Loop + -> Append + -> Seq Scan on prt1_l_p1 t1_1 + -> Seq Scan on prt1_l_p2_p1 t1_2 + -> Seq Scan on prt1_l_p2_p2 t1_3 + -> Seq Scan on prt1_l_p3_p1 t1_4 + -> Seq Scan on prt1_l_p3_p2 t1_5 + -> Append + -> Seq Scan on prt2_l_p1 t2_1 + Filter: ((a = t1.b) AND (t1.a = b) AND (t1.b = a) AND ((t1.c)::text = (c)::text)) + -> Seq Scan on prt2_l_p2_p1 t2_2 + Filter: ((a = t1.b) AND (t1.a = b) AND (t1.b = a) AND ((t1.c)::text = (c)::text)) + -> Seq Scan on prt2_l_p2_p2 t2_3 + Filter: ((a = t1.b) AND (t1.a = b) AND (t1.b = a) AND ((t1.c)::text = (c)::text)) + -> Seq Scan on prt2_l_p3_p1 t2_4 + Filter: ((a = t1.b) AND (t1.a = b) AND (t1.b = a) AND ((t1.c)::text = (c)::text)) + -> Seq Scan on prt2_l_p3_p2 t2_5 + Filter: ((a = t1.b) AND (t1.a = b) AND (t1.b = a) AND ((t1.c)::text = (c)::text)) +(19 rows) + +SELECT COUNT(*) FROM prt1_l t1 LEFT JOIN LATERAL + (SELECT t1.b AS t1b, t2.* FROM prt2_l t2) s + ON t1.a = s.b AND t1.b = s.a AND t1.c = s.c + WHERE s.t1b = s.a; + count +------- + 100 +(1 row) + +RESET max_parallel_workers_per_gather; +-- join with one side empty +EXPLAIN (COSTS OFF) +SELECT t1.a, t1.c, t2.b, t2.c FROM (SELECT * FROM prt1_l WHERE a = 1 AND a = 2) t1 RIGHT JOIN prt2_l t2 ON t1.a = t2.b AND t1.b = t2.a AND t1.c = t2.c; + QUERY PLAN +------------------------------------------------------------------------- + Hash Left Join + Hash Cond: ((t2.b = a) AND (t2.a = b) AND ((t2.c)::text = (c)::text)) + -> Append + -> Seq Scan on prt2_l_p1 t2_1 + -> Seq Scan on prt2_l_p2_p1 t2_2 + -> Seq Scan on prt2_l_p2_p2 t2_3 + -> Seq Scan on prt2_l_p3_p1 t2_4 + -> Seq Scan on prt2_l_p3_p2 t2_5 + -> Hash + -> Result + One-Time Filter: false +(11 rows) + +-- Test case to verify proper handling of subqueries in a partitioned delete. +-- The weird-looking lateral join is just there to force creation of a +-- nestloop parameter within the subquery, which exposes the problem if the +-- planner fails to make multiple copies of the subquery as appropriate. +EXPLAIN (COSTS OFF) +DELETE FROM prt1_l +WHERE EXISTS ( + SELECT 1 + FROM int4_tbl, + LATERAL (SELECT int4_tbl.f1 FROM int8_tbl LIMIT 2) ss + WHERE prt1_l.c IS NULL); + QUERY PLAN +---------------------------------------------------------- + Delete on prt1_l + Delete on prt1_l_p1 prt1_l_1 + Delete on prt1_l_p3_p1 prt1_l_2 + Delete on prt1_l_p3_p2 prt1_l_3 + -> Nested Loop Semi Join + -> Append + -> Seq Scan on prt1_l_p1 prt1_l_1 + Filter: (c IS NULL) + -> Seq Scan on prt1_l_p3_p1 prt1_l_2 + Filter: (c IS NULL) + -> Seq Scan on prt1_l_p3_p2 prt1_l_3 + Filter: (c IS NULL) + -> Materialize + -> Nested Loop + -> Seq Scan on int4_tbl + -> Subquery Scan on ss + -> Limit + -> Seq Scan on int8_tbl +(18 rows) + +-- +-- negative testcases +-- +CREATE TABLE prt1_n (a int, b int, c varchar) PARTITION BY RANGE(c); +CREATE TABLE prt1_n_p1 PARTITION OF prt1_n FOR VALUES FROM ('0000') TO ('0250'); +CREATE TABLE prt1_n_p2 PARTITION OF prt1_n FOR VALUES FROM ('0250') TO ('0500'); +INSERT INTO prt1_n SELECT i, i, to_char(i, 'FM0000') FROM generate_series(0, 499, 2) i; +ANALYZE prt1_n; +CREATE TABLE prt2_n (a int, b int, c text) PARTITION BY LIST(c); +CREATE TABLE prt2_n_p1 PARTITION OF prt2_n FOR VALUES IN ('0000', '0003', '0004', '0010', '0006', '0007'); +CREATE TABLE prt2_n_p2 PARTITION OF prt2_n FOR VALUES IN ('0001', '0005', '0002', '0009', '0008', '0011'); +INSERT INTO prt2_n SELECT i, i, to_char(i/50, 'FM0000') FROM generate_series(0, 599, 2) i; +ANALYZE prt2_n; +CREATE TABLE prt3_n (a int, b int, c text) PARTITION BY LIST(c); +CREATE TABLE prt3_n_p1 PARTITION OF prt3_n FOR VALUES IN ('0000', '0004', '0006', '0007'); +CREATE TABLE prt3_n_p2 PARTITION OF prt3_n FOR VALUES IN ('0001', '0002', '0008', '0010'); +CREATE TABLE prt3_n_p3 PARTITION OF prt3_n FOR VALUES IN ('0003', '0005', '0009', '0011'); +INSERT INTO prt2_n SELECT i, i, to_char(i/50, 'FM0000') FROM generate_series(0, 599, 2) i; +ANALYZE prt3_n; +CREATE TABLE prt4_n (a int, b int, c text) PARTITION BY RANGE(a); +CREATE TABLE prt4_n_p1 PARTITION OF prt4_n FOR VALUES FROM (0) TO (300); +CREATE TABLE prt4_n_p2 PARTITION OF prt4_n FOR VALUES FROM (300) TO (500); +CREATE TABLE prt4_n_p3 PARTITION OF prt4_n FOR VALUES FROM (500) TO (600); +INSERT INTO prt4_n SELECT i, i, to_char(i, 'FM0000') FROM generate_series(0, 599, 2) i; +ANALYZE prt4_n; +-- partitionwise join can not be applied if the partition ranges differ +EXPLAIN (COSTS OFF) +SELECT t1.a, t1.c, t2.b, t2.c FROM prt1 t1, prt4_n t2 WHERE t1.a = t2.a; + QUERY PLAN +---------------------------------------------- + Hash Join + Hash Cond: (t1.a = t2.a) + -> Append + -> Seq Scan on prt1_p1 t1_1 + -> Seq Scan on prt1_p2 t1_2 + -> Seq Scan on prt1_p3 t1_3 + -> Hash + -> Append + -> Seq Scan on prt4_n_p1 t2_1 + -> Seq Scan on prt4_n_p2 t2_2 + -> Seq Scan on prt4_n_p3 t2_3 +(11 rows) + +EXPLAIN (COSTS OFF) +SELECT t1.a, t1.c, t2.b, t2.c FROM prt1 t1, prt4_n t2, prt2 t3 WHERE t1.a = t2.a and t1.a = t3.b; + QUERY PLAN +-------------------------------------------------------- + Hash Join + Hash Cond: (t2.a = t1.a) + -> Append + -> Seq Scan on prt4_n_p1 t2_1 + -> Seq Scan on prt4_n_p2 t2_2 + -> Seq Scan on prt4_n_p3 t2_3 + -> Hash + -> Append + -> Hash Join + Hash Cond: (t1_1.a = t3_1.b) + -> Seq Scan on prt1_p1 t1_1 + -> Hash + -> Seq Scan on prt2_p1 t3_1 + -> Hash Join + Hash Cond: (t1_2.a = t3_2.b) + -> Seq Scan on prt1_p2 t1_2 + -> Hash + -> Seq Scan on prt2_p2 t3_2 + -> Hash Join + Hash Cond: (t1_3.a = t3_3.b) + -> Seq Scan on prt1_p3 t1_3 + -> Hash + -> Seq Scan on prt2_p3 t3_3 +(23 rows) + +-- partitionwise join can not be applied if there are no equi-join conditions +-- between partition keys +EXPLAIN (COSTS OFF) +SELECT t1.a, t1.c, t2.b, t2.c FROM prt1 t1 LEFT JOIN prt2 t2 ON (t1.a < t2.b); + QUERY PLAN +--------------------------------------------------------- + Nested Loop Left Join + -> Append + -> Seq Scan on prt1_p1 t1_1 + -> Seq Scan on prt1_p2 t1_2 + -> Seq Scan on prt1_p3 t1_3 + -> Append + -> Index Scan using iprt2_p1_b on prt2_p1 t2_1 + Index Cond: (b > t1.a) + -> Index Scan using iprt2_p2_b on prt2_p2 t2_2 + Index Cond: (b > t1.a) + -> Index Scan using iprt2_p3_b on prt2_p3 t2_3 + Index Cond: (b > t1.a) +(12 rows) + +-- equi-join with join condition on partial keys does not qualify for +-- partitionwise join +EXPLAIN (COSTS OFF) +SELECT t1.a, t1.c, t2.b, t2.c FROM prt1_m t1, prt2_m t2 WHERE t1.a = (t2.b + t2.a)/2; + QUERY PLAN +---------------------------------------------- + Hash Join + Hash Cond: (((t2.b + t2.a) / 2) = t1.a) + -> Append + -> Seq Scan on prt2_m_p1 t2_1 + -> Seq Scan on prt2_m_p2 t2_2 + -> Seq Scan on prt2_m_p3 t2_3 + -> Hash + -> Append + -> Seq Scan on prt1_m_p1 t1_1 + -> Seq Scan on prt1_m_p2 t1_2 + -> Seq Scan on prt1_m_p3 t1_3 +(11 rows) + +-- equi-join between out-of-order partition key columns does not qualify for +-- partitionwise join +EXPLAIN (COSTS OFF) +SELECT t1.a, t1.c, t2.b, t2.c FROM prt1_m t1 LEFT JOIN prt2_m t2 ON t1.a = t2.b; + QUERY PLAN +---------------------------------------------- + Hash Left Join + Hash Cond: (t1.a = t2.b) + -> Append + -> Seq Scan on prt1_m_p1 t1_1 + -> Seq Scan on prt1_m_p2 t1_2 + -> Seq Scan on prt1_m_p3 t1_3 + -> Hash + -> Append + -> Seq Scan on prt2_m_p1 t2_1 + -> Seq Scan on prt2_m_p2 t2_2 + -> Seq Scan on prt2_m_p3 t2_3 +(11 rows) + +-- equi-join between non-key columns does not qualify for partitionwise join +EXPLAIN (COSTS OFF) +SELECT t1.a, t1.c, t2.b, t2.c FROM prt1_m t1 LEFT JOIN prt2_m t2 ON t1.c = t2.c; + QUERY PLAN +---------------------------------------------- + Hash Left Join + Hash Cond: (t1.c = t2.c) + -> Append + -> Seq Scan on prt1_m_p1 t1_1 + -> Seq Scan on prt1_m_p2 t1_2 + -> Seq Scan on prt1_m_p3 t1_3 + -> Hash + -> Append + -> Seq Scan on prt2_m_p1 t2_1 + -> Seq Scan on prt2_m_p2 t2_2 + -> Seq Scan on prt2_m_p3 t2_3 +(11 rows) + +-- partitionwise join can not be applied for a join between list and range +-- partitioned tables +EXPLAIN (COSTS OFF) +SELECT t1.a, t1.c, t2.b, t2.c FROM prt1_n t1 LEFT JOIN prt2_n t2 ON (t1.c = t2.c); + QUERY PLAN +---------------------------------------------- + Hash Right Join + Hash Cond: (t2.c = (t1.c)::text) + -> Append + -> Seq Scan on prt2_n_p1 t2_1 + -> Seq Scan on prt2_n_p2 t2_2 + -> Hash + -> Append + -> Seq Scan on prt1_n_p1 t1_1 + -> Seq Scan on prt1_n_p2 t1_2 +(9 rows) + +-- partitionwise join can not be applied between tables with different +-- partition lists +EXPLAIN (COSTS OFF) +SELECT t1.a, t1.c, t2.b, t2.c FROM prt1_n t1 JOIN prt2_n t2 ON (t1.c = t2.c) JOIN plt1 t3 ON (t1.c = t3.c); + QUERY PLAN +---------------------------------------------------------- + Hash Join + Hash Cond: (t2.c = (t1.c)::text) + -> Append + -> Seq Scan on prt2_n_p1 t2_1 + -> Seq Scan on prt2_n_p2 t2_2 + -> Hash + -> Hash Join + Hash Cond: (t3.c = (t1.c)::text) + -> Append + -> Seq Scan on plt1_p1 t3_1 + -> Seq Scan on plt1_p2 t3_2 + -> Seq Scan on plt1_p3 t3_3 + -> Hash + -> Append + -> Seq Scan on prt1_n_p1 t1_1 + -> Seq Scan on prt1_n_p2 t1_2 +(16 rows) + +-- partitionwise join can not be applied for a join between key column and +-- non-key column +EXPLAIN (COSTS OFF) +SELECT t1.a, t1.c, t2.b, t2.c FROM prt1_n t1 FULL JOIN prt1 t2 ON (t1.c = t2.c); + QUERY PLAN +---------------------------------------------- + Hash Full Join + Hash Cond: ((t2.c)::text = (t1.c)::text) + -> Append + -> Seq Scan on prt1_p1 t2_1 + -> Seq Scan on prt1_p2 t2_2 + -> Seq Scan on prt1_p3 t2_3 + -> Hash + -> Append + -> Seq Scan on prt1_n_p1 t1_1 + -> Seq Scan on prt1_n_p2 t1_2 +(10 rows) + +-- +-- Test some other plan types in a partitionwise join (unfortunately, +-- we need larger tables to get the planner to choose these plan types) +-- +create temp table prtx1 (a integer, b integer, c integer) + partition by range (a); +create temp table prtx1_1 partition of prtx1 for values from (1) to (11); +create temp table prtx1_2 partition of prtx1 for values from (11) to (21); +create temp table prtx1_3 partition of prtx1 for values from (21) to (31); +create temp table prtx2 (a integer, b integer, c integer) + partition by range (a); +create temp table prtx2_1 partition of prtx2 for values from (1) to (11); +create temp table prtx2_2 partition of prtx2 for values from (11) to (21); +create temp table prtx2_3 partition of prtx2 for values from (21) to (31); +insert into prtx1 select 1 + i%30, i, i + from generate_series(1,1000) i; +insert into prtx2 select 1 + i%30, i, i + from generate_series(1,500) i, generate_series(1,10) j; +create index on prtx2 (b); +create index on prtx2 (c); +analyze prtx1; +analyze prtx2; +explain (costs off) +select * from prtx1 +where not exists (select 1 from prtx2 + where prtx2.a=prtx1.a and prtx2.b=prtx1.b and prtx2.c=123) + and a<20 and c=120; + QUERY PLAN +------------------------------------------------------------- + Append + -> Nested Loop Anti Join + -> Seq Scan on prtx1_1 + Filter: ((a < 20) AND (c = 120)) + -> Bitmap Heap Scan on prtx2_1 + Recheck Cond: ((b = prtx1_1.b) AND (c = 123)) + Filter: (a = prtx1_1.a) + -> BitmapAnd + -> Bitmap Index Scan on prtx2_1_b_idx + Index Cond: (b = prtx1_1.b) + -> Bitmap Index Scan on prtx2_1_c_idx + Index Cond: (c = 123) + -> Nested Loop Anti Join + -> Seq Scan on prtx1_2 + Filter: ((a < 20) AND (c = 120)) + -> Bitmap Heap Scan on prtx2_2 + Recheck Cond: ((b = prtx1_2.b) AND (c = 123)) + Filter: (a = prtx1_2.a) + -> BitmapAnd + -> Bitmap Index Scan on prtx2_2_b_idx + Index Cond: (b = prtx1_2.b) + -> Bitmap Index Scan on prtx2_2_c_idx + Index Cond: (c = 123) +(23 rows) + +select * from prtx1 +where not exists (select 1 from prtx2 + where prtx2.a=prtx1.a and prtx2.b=prtx1.b and prtx2.c=123) + and a<20 and c=120; + a | b | c +---+-----+----- + 1 | 120 | 120 +(1 row) + +explain (costs off) +select * from prtx1 +where not exists (select 1 from prtx2 + where prtx2.a=prtx1.a and (prtx2.b=prtx1.b+1 or prtx2.c=99)) + and a<20 and c=91; + QUERY PLAN +----------------------------------------------------------------- + Append + -> Nested Loop Anti Join + -> Seq Scan on prtx1_1 + Filter: ((a < 20) AND (c = 91)) + -> Bitmap Heap Scan on prtx2_1 + Recheck Cond: ((b = (prtx1_1.b + 1)) OR (c = 99)) + Filter: (a = prtx1_1.a) + -> BitmapOr + -> Bitmap Index Scan on prtx2_1_b_idx + Index Cond: (b = (prtx1_1.b + 1)) + -> Bitmap Index Scan on prtx2_1_c_idx + Index Cond: (c = 99) + -> Nested Loop Anti Join + -> Seq Scan on prtx1_2 + Filter: ((a < 20) AND (c = 91)) + -> Bitmap Heap Scan on prtx2_2 + Recheck Cond: ((b = (prtx1_2.b + 1)) OR (c = 99)) + Filter: (a = prtx1_2.a) + -> BitmapOr + -> Bitmap Index Scan on prtx2_2_b_idx + Index Cond: (b = (prtx1_2.b + 1)) + -> Bitmap Index Scan on prtx2_2_c_idx + Index Cond: (c = 99) +(23 rows) + +select * from prtx1 +where not exists (select 1 from prtx2 + where prtx2.a=prtx1.a and (prtx2.b=prtx1.b+1 or prtx2.c=99)) + and a<20 and c=91; + a | b | c +---+----+---- + 2 | 91 | 91 +(1 row) + +-- +-- Test advanced partition-matching algorithm for partitioned join +-- +-- Tests for range-partitioned tables +CREATE TABLE prt1_adv (a int, b int, c varchar) PARTITION BY RANGE (a); +CREATE TABLE prt1_adv_p1 PARTITION OF prt1_adv FOR VALUES FROM (100) TO (200); +CREATE TABLE prt1_adv_p2 PARTITION OF prt1_adv FOR VALUES FROM (200) TO (300); +CREATE TABLE prt1_adv_p3 PARTITION OF prt1_adv FOR VALUES FROM (300) TO (400); +CREATE INDEX prt1_adv_a_idx ON prt1_adv (a); +INSERT INTO prt1_adv SELECT i, i % 25, to_char(i, 'FM0000') FROM generate_series(100, 399) i; +ANALYZE prt1_adv; +CREATE TABLE prt2_adv (a int, b int, c varchar) PARTITION BY RANGE (b); +CREATE TABLE prt2_adv_p1 PARTITION OF prt2_adv FOR VALUES FROM (100) TO (150); +CREATE TABLE prt2_adv_p2 PARTITION OF prt2_adv FOR VALUES FROM (200) TO (300); +CREATE TABLE prt2_adv_p3 PARTITION OF prt2_adv FOR VALUES FROM (350) TO (500); +CREATE INDEX prt2_adv_b_idx ON prt2_adv (b); +INSERT INTO prt2_adv_p1 SELECT i % 25, i, to_char(i, 'FM0000') FROM generate_series(100, 149) i; +INSERT INTO prt2_adv_p2 SELECT i % 25, i, to_char(i, 'FM0000') FROM generate_series(200, 299) i; +INSERT INTO prt2_adv_p3 SELECT i % 25, i, to_char(i, 'FM0000') FROM generate_series(350, 499) i; +ANALYZE prt2_adv; +-- inner join +EXPLAIN (COSTS OFF) +SELECT t1.a, t1.c, t2.b, t2.c FROM prt1_adv t1 INNER JOIN prt2_adv t2 ON (t1.a = t2.b) WHERE t1.b = 0 ORDER BY t1.a, t2.b; + QUERY PLAN +------------------------------------------------------ + Sort + Sort Key: t1.a + -> Append + -> Hash Join + Hash Cond: (t2_1.b = t1_1.a) + -> Seq Scan on prt2_adv_p1 t2_1 + -> Hash + -> Seq Scan on prt1_adv_p1 t1_1 + Filter: (b = 0) + -> Hash Join + Hash Cond: (t2_2.b = t1_2.a) + -> Seq Scan on prt2_adv_p2 t2_2 + -> Hash + -> Seq Scan on prt1_adv_p2 t1_2 + Filter: (b = 0) + -> Hash Join + Hash Cond: (t2_3.b = t1_3.a) + -> Seq Scan on prt2_adv_p3 t2_3 + -> Hash + -> Seq Scan on prt1_adv_p3 t1_3 + Filter: (b = 0) +(21 rows) + +SELECT t1.a, t1.c, t2.b, t2.c FROM prt1_adv t1 INNER JOIN prt2_adv t2 ON (t1.a = t2.b) WHERE t1.b = 0 ORDER BY t1.a, t2.b; + a | c | b | c +-----+------+-----+------ + 100 | 0100 | 100 | 0100 + 125 | 0125 | 125 | 0125 + 200 | 0200 | 200 | 0200 + 225 | 0225 | 225 | 0225 + 250 | 0250 | 250 | 0250 + 275 | 0275 | 275 | 0275 + 350 | 0350 | 350 | 0350 + 375 | 0375 | 375 | 0375 +(8 rows) + +-- semi join +EXPLAIN (COSTS OFF) +SELECT t1.* FROM prt1_adv t1 WHERE EXISTS (SELECT 1 FROM prt2_adv t2 WHERE t1.a = t2.b) AND t1.b = 0 ORDER BY t1.a; + QUERY PLAN +------------------------------------------------------ + Sort + Sort Key: t1.a + -> Append + -> Hash Semi Join + Hash Cond: (t1_1.a = t2_1.b) + -> Seq Scan on prt1_adv_p1 t1_1 + Filter: (b = 0) + -> Hash + -> Seq Scan on prt2_adv_p1 t2_1 + -> Hash Semi Join + Hash Cond: (t1_2.a = t2_2.b) + -> Seq Scan on prt1_adv_p2 t1_2 + Filter: (b = 0) + -> Hash + -> Seq Scan on prt2_adv_p2 t2_2 + -> Hash Semi Join + Hash Cond: (t1_3.a = t2_3.b) + -> Seq Scan on prt1_adv_p3 t1_3 + Filter: (b = 0) + -> Hash + -> Seq Scan on prt2_adv_p3 t2_3 +(21 rows) + +SELECT t1.* FROM prt1_adv t1 WHERE EXISTS (SELECT 1 FROM prt2_adv t2 WHERE t1.a = t2.b) AND t1.b = 0 ORDER BY t1.a; + a | b | c +-----+---+------ + 100 | 0 | 0100 + 125 | 0 | 0125 + 200 | 0 | 0200 + 225 | 0 | 0225 + 250 | 0 | 0250 + 275 | 0 | 0275 + 350 | 0 | 0350 + 375 | 0 | 0375 +(8 rows) + +-- left join +EXPLAIN (COSTS OFF) +SELECT t1.a, t1.c, t2.b, t2.c FROM prt1_adv t1 LEFT JOIN prt2_adv t2 ON (t1.a = t2.b) WHERE t1.b = 0 ORDER BY t1.a, t2.b; + QUERY PLAN +------------------------------------------------------ + Sort + Sort Key: t1.a, t2.b + -> Append + -> Hash Right Join + Hash Cond: (t2_1.b = t1_1.a) + -> Seq Scan on prt2_adv_p1 t2_1 + -> Hash + -> Seq Scan on prt1_adv_p1 t1_1 + Filter: (b = 0) + -> Hash Right Join + Hash Cond: (t2_2.b = t1_2.a) + -> Seq Scan on prt2_adv_p2 t2_2 + -> Hash + -> Seq Scan on prt1_adv_p2 t1_2 + Filter: (b = 0) + -> Hash Right Join + Hash Cond: (t2_3.b = t1_3.a) + -> Seq Scan on prt2_adv_p3 t2_3 + -> Hash + -> Seq Scan on prt1_adv_p3 t1_3 + Filter: (b = 0) +(21 rows) + +SELECT t1.a, t1.c, t2.b, t2.c FROM prt1_adv t1 LEFT JOIN prt2_adv t2 ON (t1.a = t2.b) WHERE t1.b = 0 ORDER BY t1.a, t2.b; + a | c | b | c +-----+------+-----+------ + 100 | 0100 | 100 | 0100 + 125 | 0125 | 125 | 0125 + 150 | 0150 | | + 175 | 0175 | | + 200 | 0200 | 200 | 0200 + 225 | 0225 | 225 | 0225 + 250 | 0250 | 250 | 0250 + 275 | 0275 | 275 | 0275 + 300 | 0300 | | + 325 | 0325 | | + 350 | 0350 | 350 | 0350 + 375 | 0375 | 375 | 0375 +(12 rows) + +-- anti join +EXPLAIN (COSTS OFF) +SELECT t1.* FROM prt1_adv t1 WHERE NOT EXISTS (SELECT 1 FROM prt2_adv t2 WHERE t1.a = t2.b) AND t1.b = 0 ORDER BY t1.a; + QUERY PLAN +------------------------------------------------------ + Sort + Sort Key: t1.a + -> Append + -> Hash Right Anti Join + Hash Cond: (t2_1.b = t1_1.a) + -> Seq Scan on prt2_adv_p1 t2_1 + -> Hash + -> Seq Scan on prt1_adv_p1 t1_1 + Filter: (b = 0) + -> Hash Right Anti Join + Hash Cond: (t2_2.b = t1_2.a) + -> Seq Scan on prt2_adv_p2 t2_2 + -> Hash + -> Seq Scan on prt1_adv_p2 t1_2 + Filter: (b = 0) + -> Hash Right Anti Join + Hash Cond: (t2_3.b = t1_3.a) + -> Seq Scan on prt2_adv_p3 t2_3 + -> Hash + -> Seq Scan on prt1_adv_p3 t1_3 + Filter: (b = 0) +(21 rows) + +SELECT t1.* FROM prt1_adv t1 WHERE NOT EXISTS (SELECT 1 FROM prt2_adv t2 WHERE t1.a = t2.b) AND t1.b = 0 ORDER BY t1.a; + a | b | c +-----+---+------ + 150 | 0 | 0150 + 175 | 0 | 0175 + 300 | 0 | 0300 + 325 | 0 | 0325 +(4 rows) + +-- full join +EXPLAIN (COSTS OFF) +SELECT t1.a, t1.c, t2.b, t2.c FROM (SELECT 175 phv, * FROM prt1_adv WHERE prt1_adv.b = 0) t1 FULL JOIN (SELECT 425 phv, * FROM prt2_adv WHERE prt2_adv.a = 0) t2 ON (t1.a = t2.b) WHERE t1.phv = t1.a OR t2.phv = t2.b ORDER BY t1.a, t2.b; + QUERY PLAN +-------------------------------------------------------------------------- + Sort + Sort Key: prt1_adv.a, prt2_adv.b + -> Append + -> Hash Full Join + Hash Cond: (prt1_adv_1.a = prt2_adv_1.b) + Filter: (((175) = prt1_adv_1.a) OR ((425) = prt2_adv_1.b)) + -> Seq Scan on prt1_adv_p1 prt1_adv_1 + Filter: (b = 0) + -> Hash + -> Seq Scan on prt2_adv_p1 prt2_adv_1 + Filter: (a = 0) + -> Hash Full Join + Hash Cond: (prt1_adv_2.a = prt2_adv_2.b) + Filter: (((175) = prt1_adv_2.a) OR ((425) = prt2_adv_2.b)) + -> Seq Scan on prt1_adv_p2 prt1_adv_2 + Filter: (b = 0) + -> Hash + -> Seq Scan on prt2_adv_p2 prt2_adv_2 + Filter: (a = 0) + -> Hash Full Join + Hash Cond: (prt2_adv_3.b = prt1_adv_3.a) + Filter: (((175) = prt1_adv_3.a) OR ((425) = prt2_adv_3.b)) + -> Seq Scan on prt2_adv_p3 prt2_adv_3 + Filter: (a = 0) + -> Hash + -> Seq Scan on prt1_adv_p3 prt1_adv_3 + Filter: (b = 0) +(27 rows) + +SELECT t1.a, t1.c, t2.b, t2.c FROM (SELECT 175 phv, * FROM prt1_adv WHERE prt1_adv.b = 0) t1 FULL JOIN (SELECT 425 phv, * FROM prt2_adv WHERE prt2_adv.a = 0) t2 ON (t1.a = t2.b) WHERE t1.phv = t1.a OR t2.phv = t2.b ORDER BY t1.a, t2.b; + a | c | b | c +-----+------+-----+------ + 175 | 0175 | | + | | 425 | 0425 +(2 rows) + +-- Test cases where one side has an extra partition +CREATE TABLE prt2_adv_extra PARTITION OF prt2_adv FOR VALUES FROM (500) TO (MAXVALUE); +INSERT INTO prt2_adv SELECT i % 25, i, to_char(i, 'FM0000') FROM generate_series(500, 599) i; +ANALYZE prt2_adv; +-- inner join +EXPLAIN (COSTS OFF) +SELECT t1.a, t1.c, t2.b, t2.c FROM prt1_adv t1 INNER JOIN prt2_adv t2 ON (t1.a = t2.b) WHERE t1.b = 0 ORDER BY t1.a, t2.b; + QUERY PLAN +------------------------------------------------------ + Sort + Sort Key: t1.a + -> Append + -> Hash Join + Hash Cond: (t2_1.b = t1_1.a) + -> Seq Scan on prt2_adv_p1 t2_1 + -> Hash + -> Seq Scan on prt1_adv_p1 t1_1 + Filter: (b = 0) + -> Hash Join + Hash Cond: (t2_2.b = t1_2.a) + -> Seq Scan on prt2_adv_p2 t2_2 + -> Hash + -> Seq Scan on prt1_adv_p2 t1_2 + Filter: (b = 0) + -> Hash Join + Hash Cond: (t2_3.b = t1_3.a) + -> Seq Scan on prt2_adv_p3 t2_3 + -> Hash + -> Seq Scan on prt1_adv_p3 t1_3 + Filter: (b = 0) +(21 rows) + +SELECT t1.a, t1.c, t2.b, t2.c FROM prt1_adv t1 INNER JOIN prt2_adv t2 ON (t1.a = t2.b) WHERE t1.b = 0 ORDER BY t1.a, t2.b; + a | c | b | c +-----+------+-----+------ + 100 | 0100 | 100 | 0100 + 125 | 0125 | 125 | 0125 + 200 | 0200 | 200 | 0200 + 225 | 0225 | 225 | 0225 + 250 | 0250 | 250 | 0250 + 275 | 0275 | 275 | 0275 + 350 | 0350 | 350 | 0350 + 375 | 0375 | 375 | 0375 +(8 rows) + +-- semi join +EXPLAIN (COSTS OFF) +SELECT t1.* FROM prt1_adv t1 WHERE EXISTS (SELECT 1 FROM prt2_adv t2 WHERE t1.a = t2.b) AND t1.b = 0 ORDER BY t1.a; + QUERY PLAN +------------------------------------------------------ + Sort + Sort Key: t1.a + -> Append + -> Hash Semi Join + Hash Cond: (t1_1.a = t2_1.b) + -> Seq Scan on prt1_adv_p1 t1_1 + Filter: (b = 0) + -> Hash + -> Seq Scan on prt2_adv_p1 t2_1 + -> Hash Semi Join + Hash Cond: (t1_2.a = t2_2.b) + -> Seq Scan on prt1_adv_p2 t1_2 + Filter: (b = 0) + -> Hash + -> Seq Scan on prt2_adv_p2 t2_2 + -> Hash Semi Join + Hash Cond: (t1_3.a = t2_3.b) + -> Seq Scan on prt1_adv_p3 t1_3 + Filter: (b = 0) + -> Hash + -> Seq Scan on prt2_adv_p3 t2_3 +(21 rows) + +SELECT t1.* FROM prt1_adv t1 WHERE EXISTS (SELECT 1 FROM prt2_adv t2 WHERE t1.a = t2.b) AND t1.b = 0 ORDER BY t1.a; + a | b | c +-----+---+------ + 100 | 0 | 0100 + 125 | 0 | 0125 + 200 | 0 | 0200 + 225 | 0 | 0225 + 250 | 0 | 0250 + 275 | 0 | 0275 + 350 | 0 | 0350 + 375 | 0 | 0375 +(8 rows) + +-- left join +EXPLAIN (COSTS OFF) +SELECT t1.a, t1.c, t2.b, t2.c FROM prt1_adv t1 LEFT JOIN prt2_adv t2 ON (t1.a = t2.b) WHERE t1.b = 0 ORDER BY t1.a, t2.b; + QUERY PLAN +------------------------------------------------------ + Sort + Sort Key: t1.a, t2.b + -> Append + -> Hash Right Join + Hash Cond: (t2_1.b = t1_1.a) + -> Seq Scan on prt2_adv_p1 t2_1 + -> Hash + -> Seq Scan on prt1_adv_p1 t1_1 + Filter: (b = 0) + -> Hash Right Join + Hash Cond: (t2_2.b = t1_2.a) + -> Seq Scan on prt2_adv_p2 t2_2 + -> Hash + -> Seq Scan on prt1_adv_p2 t1_2 + Filter: (b = 0) + -> Hash Right Join + Hash Cond: (t2_3.b = t1_3.a) + -> Seq Scan on prt2_adv_p3 t2_3 + -> Hash + -> Seq Scan on prt1_adv_p3 t1_3 + Filter: (b = 0) +(21 rows) + +SELECT t1.a, t1.c, t2.b, t2.c FROM prt1_adv t1 LEFT JOIN prt2_adv t2 ON (t1.a = t2.b) WHERE t1.b = 0 ORDER BY t1.a, t2.b; + a | c | b | c +-----+------+-----+------ + 100 | 0100 | 100 | 0100 + 125 | 0125 | 125 | 0125 + 150 | 0150 | | + 175 | 0175 | | + 200 | 0200 | 200 | 0200 + 225 | 0225 | 225 | 0225 + 250 | 0250 | 250 | 0250 + 275 | 0275 | 275 | 0275 + 300 | 0300 | | + 325 | 0325 | | + 350 | 0350 | 350 | 0350 + 375 | 0375 | 375 | 0375 +(12 rows) + +-- left join; currently we can't do partitioned join if there are no matched +-- partitions on the nullable side +EXPLAIN (COSTS OFF) +SELECT t1.b, t1.c, t2.a, t2.c FROM prt2_adv t1 LEFT JOIN prt1_adv t2 ON (t1.b = t2.a) WHERE t1.a = 0 ORDER BY t1.b, t2.a; + QUERY PLAN +--------------------------------------------------------- + Sort + Sort Key: t1.b, t2.a + -> Hash Right Join + Hash Cond: (t2.a = t1.b) + -> Append + -> Seq Scan on prt1_adv_p1 t2_1 + -> Seq Scan on prt1_adv_p2 t2_2 + -> Seq Scan on prt1_adv_p3 t2_3 + -> Hash + -> Append + -> Seq Scan on prt2_adv_p1 t1_1 + Filter: (a = 0) + -> Seq Scan on prt2_adv_p2 t1_2 + Filter: (a = 0) + -> Seq Scan on prt2_adv_p3 t1_3 + Filter: (a = 0) + -> Seq Scan on prt2_adv_extra t1_4 + Filter: (a = 0) +(18 rows) + +-- anti join +EXPLAIN (COSTS OFF) +SELECT t1.* FROM prt1_adv t1 WHERE NOT EXISTS (SELECT 1 FROM prt2_adv t2 WHERE t1.a = t2.b) AND t1.b = 0 ORDER BY t1.a; + QUERY PLAN +------------------------------------------------------ + Sort + Sort Key: t1.a + -> Append + -> Hash Right Anti Join + Hash Cond: (t2_1.b = t1_1.a) + -> Seq Scan on prt2_adv_p1 t2_1 + -> Hash + -> Seq Scan on prt1_adv_p1 t1_1 + Filter: (b = 0) + -> Hash Right Anti Join + Hash Cond: (t2_2.b = t1_2.a) + -> Seq Scan on prt2_adv_p2 t2_2 + -> Hash + -> Seq Scan on prt1_adv_p2 t1_2 + Filter: (b = 0) + -> Hash Right Anti Join + Hash Cond: (t2_3.b = t1_3.a) + -> Seq Scan on prt2_adv_p3 t2_3 + -> Hash + -> Seq Scan on prt1_adv_p3 t1_3 + Filter: (b = 0) +(21 rows) + +SELECT t1.* FROM prt1_adv t1 WHERE NOT EXISTS (SELECT 1 FROM prt2_adv t2 WHERE t1.a = t2.b) AND t1.b = 0 ORDER BY t1.a; + a | b | c +-----+---+------ + 150 | 0 | 0150 + 175 | 0 | 0175 + 300 | 0 | 0300 + 325 | 0 | 0325 +(4 rows) + +-- anti join; currently we can't do partitioned join if there are no matched +-- partitions on the nullable side +EXPLAIN (COSTS OFF) +SELECT t1.* FROM prt2_adv t1 WHERE NOT EXISTS (SELECT 1 FROM prt1_adv t2 WHERE t1.b = t2.a) AND t1.a = 0 ORDER BY t1.b; + QUERY PLAN +--------------------------------------------------------- + Sort + Sort Key: t1.b + -> Hash Right Anti Join + Hash Cond: (t2.a = t1.b) + -> Append + -> Seq Scan on prt1_adv_p1 t2_1 + -> Seq Scan on prt1_adv_p2 t2_2 + -> Seq Scan on prt1_adv_p3 t2_3 + -> Hash + -> Append + -> Seq Scan on prt2_adv_p1 t1_1 + Filter: (a = 0) + -> Seq Scan on prt2_adv_p2 t1_2 + Filter: (a = 0) + -> Seq Scan on prt2_adv_p3 t1_3 + Filter: (a = 0) + -> Seq Scan on prt2_adv_extra t1_4 + Filter: (a = 0) +(18 rows) + +-- full join; currently we can't do partitioned join if there are no matched +-- partitions on the nullable side +EXPLAIN (COSTS OFF) +SELECT t1.a, t1.c, t2.b, t2.c FROM (SELECT 175 phv, * FROM prt1_adv WHERE prt1_adv.b = 0) t1 FULL JOIN (SELECT 425 phv, * FROM prt2_adv WHERE prt2_adv.a = 0) t2 ON (t1.a = t2.b) WHERE t1.phv = t1.a OR t2.phv = t2.b ORDER BY t1.a, t2.b; + QUERY PLAN +---------------------------------------------------------------- + Sort + Sort Key: prt1_adv.a, prt2_adv.b + -> Hash Full Join + Hash Cond: (prt2_adv.b = prt1_adv.a) + Filter: (((175) = prt1_adv.a) OR ((425) = prt2_adv.b)) + -> Append + -> Seq Scan on prt2_adv_p1 prt2_adv_1 + Filter: (a = 0) + -> Seq Scan on prt2_adv_p2 prt2_adv_2 + Filter: (a = 0) + -> Seq Scan on prt2_adv_p3 prt2_adv_3 + Filter: (a = 0) + -> Seq Scan on prt2_adv_extra prt2_adv_4 + Filter: (a = 0) + -> Hash + -> Append + -> Seq Scan on prt1_adv_p1 prt1_adv_1 + Filter: (b = 0) + -> Seq Scan on prt1_adv_p2 prt1_adv_2 + Filter: (b = 0) + -> Seq Scan on prt1_adv_p3 prt1_adv_3 + Filter: (b = 0) +(22 rows) + +-- 3-way join where not every pair of relations can do partitioned join +EXPLAIN (COSTS OFF) +SELECT t1.b, t1.c, t2.a, t2.c, t3.a, t3.c FROM prt2_adv t1 LEFT JOIN prt1_adv t2 ON (t1.b = t2.a) INNER JOIN prt1_adv t3 ON (t1.b = t3.a) WHERE t1.a = 0 ORDER BY t1.b, t2.a, t3.a; + QUERY PLAN +-------------------------------------------------------------------------------- + Sort + Sort Key: t1.b, t2.a + -> Append + -> Nested Loop Left Join + -> Nested Loop + -> Seq Scan on prt2_adv_p1 t1_1 + Filter: (a = 0) + -> Index Scan using prt1_adv_p1_a_idx on prt1_adv_p1 t3_1 + Index Cond: (a = t1_1.b) + -> Index Scan using prt1_adv_p1_a_idx on prt1_adv_p1 t2_1 + Index Cond: (a = t1_1.b) + -> Hash Right Join + Hash Cond: (t2_2.a = t1_2.b) + -> Seq Scan on prt1_adv_p2 t2_2 + -> Hash + -> Hash Join + Hash Cond: (t3_2.a = t1_2.b) + -> Seq Scan on prt1_adv_p2 t3_2 + -> Hash + -> Seq Scan on prt2_adv_p2 t1_2 + Filter: (a = 0) + -> Hash Right Join + Hash Cond: (t2_3.a = t1_3.b) + -> Seq Scan on prt1_adv_p3 t2_3 + -> Hash + -> Hash Join + Hash Cond: (t3_3.a = t1_3.b) + -> Seq Scan on prt1_adv_p3 t3_3 + -> Hash + -> Seq Scan on prt2_adv_p3 t1_3 + Filter: (a = 0) +(31 rows) + +SELECT t1.b, t1.c, t2.a, t2.c, t3.a, t3.c FROM prt2_adv t1 LEFT JOIN prt1_adv t2 ON (t1.b = t2.a) INNER JOIN prt1_adv t3 ON (t1.b = t3.a) WHERE t1.a = 0 ORDER BY t1.b, t2.a, t3.a; + b | c | a | c | a | c +-----+------+-----+------+-----+------ + 100 | 0100 | 100 | 0100 | 100 | 0100 + 125 | 0125 | 125 | 0125 | 125 | 0125 + 200 | 0200 | 200 | 0200 | 200 | 0200 + 225 | 0225 | 225 | 0225 | 225 | 0225 + 250 | 0250 | 250 | 0250 | 250 | 0250 + 275 | 0275 | 275 | 0275 | 275 | 0275 + 350 | 0350 | 350 | 0350 | 350 | 0350 + 375 | 0375 | 375 | 0375 | 375 | 0375 +(8 rows) + +DROP TABLE prt2_adv_extra; +-- Test cases where a partition on one side matches multiple partitions on +-- the other side; we currently can't do partitioned join in such cases +ALTER TABLE prt2_adv DETACH PARTITION prt2_adv_p3; +-- Split prt2_adv_p3 into two partitions so that prt1_adv_p3 matches both +CREATE TABLE prt2_adv_p3_1 PARTITION OF prt2_adv FOR VALUES FROM (350) TO (375); +CREATE TABLE prt2_adv_p3_2 PARTITION OF prt2_adv FOR VALUES FROM (375) TO (500); +INSERT INTO prt2_adv SELECT i % 25, i, to_char(i, 'FM0000') FROM generate_series(350, 499) i; +ANALYZE prt2_adv; +-- inner join +EXPLAIN (COSTS OFF) +SELECT t1.a, t1.c, t2.b, t2.c FROM prt1_adv t1 INNER JOIN prt2_adv t2 ON (t1.a = t2.b) WHERE t1.b = 0 ORDER BY t1.a, t2.b; + QUERY PLAN +------------------------------------------------------ + Sort + Sort Key: t1.a + -> Hash Join + Hash Cond: (t2.b = t1.a) + -> Append + -> Seq Scan on prt2_adv_p1 t2_1 + -> Seq Scan on prt2_adv_p2 t2_2 + -> Seq Scan on prt2_adv_p3_1 t2_3 + -> Seq Scan on prt2_adv_p3_2 t2_4 + -> Hash + -> Append + -> Seq Scan on prt1_adv_p1 t1_1 + Filter: (b = 0) + -> Seq Scan on prt1_adv_p2 t1_2 + Filter: (b = 0) + -> Seq Scan on prt1_adv_p3 t1_3 + Filter: (b = 0) +(17 rows) + +-- semi join +EXPLAIN (COSTS OFF) +SELECT t1.* FROM prt1_adv t1 WHERE EXISTS (SELECT 1 FROM prt2_adv t2 WHERE t1.a = t2.b) AND t1.b = 0 ORDER BY t1.a; + QUERY PLAN +-------------------------------------------------------- + Sort + Sort Key: t1.a + -> Hash Semi Join + Hash Cond: (t1.a = t2.b) + -> Append + -> Seq Scan on prt1_adv_p1 t1_1 + Filter: (b = 0) + -> Seq Scan on prt1_adv_p2 t1_2 + Filter: (b = 0) + -> Seq Scan on prt1_adv_p3 t1_3 + Filter: (b = 0) + -> Hash + -> Append + -> Seq Scan on prt2_adv_p1 t2_1 + -> Seq Scan on prt2_adv_p2 t2_2 + -> Seq Scan on prt2_adv_p3_1 t2_3 + -> Seq Scan on prt2_adv_p3_2 t2_4 +(17 rows) + +-- left join +EXPLAIN (COSTS OFF) +SELECT t1.a, t1.c, t2.b, t2.c FROM prt1_adv t1 LEFT JOIN prt2_adv t2 ON (t1.a = t2.b) WHERE t1.b = 0 ORDER BY t1.a, t2.b; + QUERY PLAN +------------------------------------------------------ + Sort + Sort Key: t1.a, t2.b + -> Hash Right Join + Hash Cond: (t2.b = t1.a) + -> Append + -> Seq Scan on prt2_adv_p1 t2_1 + -> Seq Scan on prt2_adv_p2 t2_2 + -> Seq Scan on prt2_adv_p3_1 t2_3 + -> Seq Scan on prt2_adv_p3_2 t2_4 + -> Hash + -> Append + -> Seq Scan on prt1_adv_p1 t1_1 + Filter: (b = 0) + -> Seq Scan on prt1_adv_p2 t1_2 + Filter: (b = 0) + -> Seq Scan on prt1_adv_p3 t1_3 + Filter: (b = 0) +(17 rows) + +-- anti join +EXPLAIN (COSTS OFF) +SELECT t1.* FROM prt1_adv t1 WHERE NOT EXISTS (SELECT 1 FROM prt2_adv t2 WHERE t1.a = t2.b) AND t1.b = 0 ORDER BY t1.a; + QUERY PLAN +------------------------------------------------------ + Sort + Sort Key: t1.a + -> Hash Right Anti Join + Hash Cond: (t2.b = t1.a) + -> Append + -> Seq Scan on prt2_adv_p1 t2_1 + -> Seq Scan on prt2_adv_p2 t2_2 + -> Seq Scan on prt2_adv_p3_1 t2_3 + -> Seq Scan on prt2_adv_p3_2 t2_4 + -> Hash + -> Append + -> Seq Scan on prt1_adv_p1 t1_1 + Filter: (b = 0) + -> Seq Scan on prt1_adv_p2 t1_2 + Filter: (b = 0) + -> Seq Scan on prt1_adv_p3 t1_3 + Filter: (b = 0) +(17 rows) + +-- full join +EXPLAIN (COSTS OFF) +SELECT t1.a, t1.c, t2.b, t2.c FROM (SELECT 175 phv, * FROM prt1_adv WHERE prt1_adv.b = 0) t1 FULL JOIN (SELECT 425 phv, * FROM prt2_adv WHERE prt2_adv.a = 0) t2 ON (t1.a = t2.b) WHERE t1.phv = t1.a OR t2.phv = t2.b ORDER BY t1.a, t2.b; + QUERY PLAN +---------------------------------------------------------------- + Sort + Sort Key: prt1_adv.a, prt2_adv.b + -> Hash Full Join + Hash Cond: (prt2_adv.b = prt1_adv.a) + Filter: (((175) = prt1_adv.a) OR ((425) = prt2_adv.b)) + -> Append + -> Seq Scan on prt2_adv_p1 prt2_adv_1 + Filter: (a = 0) + -> Seq Scan on prt2_adv_p2 prt2_adv_2 + Filter: (a = 0) + -> Seq Scan on prt2_adv_p3_1 prt2_adv_3 + Filter: (a = 0) + -> Seq Scan on prt2_adv_p3_2 prt2_adv_4 + Filter: (a = 0) + -> Hash + -> Append + -> Seq Scan on prt1_adv_p1 prt1_adv_1 + Filter: (b = 0) + -> Seq Scan on prt1_adv_p2 prt1_adv_2 + Filter: (b = 0) + -> Seq Scan on prt1_adv_p3 prt1_adv_3 + Filter: (b = 0) +(22 rows) + +DROP TABLE prt2_adv_p3_1; +DROP TABLE prt2_adv_p3_2; +ANALYZE prt2_adv; +-- Test default partitions +ALTER TABLE prt1_adv DETACH PARTITION prt1_adv_p1; +-- Change prt1_adv_p1 to the default partition +ALTER TABLE prt1_adv ATTACH PARTITION prt1_adv_p1 DEFAULT; +ALTER TABLE prt1_adv DETACH PARTITION prt1_adv_p3; +ANALYZE prt1_adv; +-- We can do partitioned join even if only one of relations has the default +-- partition +EXPLAIN (COSTS OFF) +SELECT t1.a, t1.c, t2.b, t2.c FROM prt1_adv t1 INNER JOIN prt2_adv t2 ON (t1.a = t2.b) WHERE t1.b = 0 ORDER BY t1.a, t2.b; + QUERY PLAN +------------------------------------------------------ + Sort + Sort Key: t1.a + -> Append + -> Hash Join + Hash Cond: (t2_1.b = t1_2.a) + -> Seq Scan on prt2_adv_p1 t2_1 + -> Hash + -> Seq Scan on prt1_adv_p1 t1_2 + Filter: (b = 0) + -> Hash Join + Hash Cond: (t2_2.b = t1_1.a) + -> Seq Scan on prt2_adv_p2 t2_2 + -> Hash + -> Seq Scan on prt1_adv_p2 t1_1 + Filter: (b = 0) +(15 rows) + +SELECT t1.a, t1.c, t2.b, t2.c FROM prt1_adv t1 INNER JOIN prt2_adv t2 ON (t1.a = t2.b) WHERE t1.b = 0 ORDER BY t1.a, t2.b; + a | c | b | c +-----+------+-----+------ + 100 | 0100 | 100 | 0100 + 125 | 0125 | 125 | 0125 + 200 | 0200 | 200 | 0200 + 225 | 0225 | 225 | 0225 + 250 | 0250 | 250 | 0250 + 275 | 0275 | 275 | 0275 +(6 rows) + +-- Restore prt1_adv_p3 +ALTER TABLE prt1_adv ATTACH PARTITION prt1_adv_p3 FOR VALUES FROM (300) TO (400); +ANALYZE prt1_adv; +-- Restore prt2_adv_p3 +ALTER TABLE prt2_adv ATTACH PARTITION prt2_adv_p3 FOR VALUES FROM (350) TO (500); +ANALYZE prt2_adv; +-- Partitioned join can't be applied because the default partition of prt1_adv +-- matches prt2_adv_p1 and prt2_adv_p3 +EXPLAIN (COSTS OFF) +SELECT t1.a, t1.c, t2.b, t2.c FROM prt1_adv t1 INNER JOIN prt2_adv t2 ON (t1.a = t2.b) WHERE t1.b = 0 ORDER BY t1.a, t2.b; + QUERY PLAN +------------------------------------------------------ + Sort + Sort Key: t1.a + -> Hash Join + Hash Cond: (t2.b = t1.a) + -> Append + -> Seq Scan on prt2_adv_p1 t2_1 + -> Seq Scan on prt2_adv_p2 t2_2 + -> Seq Scan on prt2_adv_p3 t2_3 + -> Hash + -> Append + -> Seq Scan on prt1_adv_p2 t1_1 + Filter: (b = 0) + -> Seq Scan on prt1_adv_p3 t1_2 + Filter: (b = 0) + -> Seq Scan on prt1_adv_p1 t1_3 + Filter: (b = 0) +(16 rows) + +ALTER TABLE prt2_adv DETACH PARTITION prt2_adv_p3; +-- Change prt2_adv_p3 to the default partition +ALTER TABLE prt2_adv ATTACH PARTITION prt2_adv_p3 DEFAULT; +ANALYZE prt2_adv; +-- Partitioned join can't be applied because the default partition of prt1_adv +-- matches prt2_adv_p1 and prt2_adv_p3 +EXPLAIN (COSTS OFF) +SELECT t1.a, t1.c, t2.b, t2.c FROM prt1_adv t1 INNER JOIN prt2_adv t2 ON (t1.a = t2.b) WHERE t1.b = 0 ORDER BY t1.a, t2.b; + QUERY PLAN +------------------------------------------------------ + Sort + Sort Key: t1.a + -> Hash Join + Hash Cond: (t2.b = t1.a) + -> Append + -> Seq Scan on prt2_adv_p1 t2_1 + -> Seq Scan on prt2_adv_p2 t2_2 + -> Seq Scan on prt2_adv_p3 t2_3 + -> Hash + -> Append + -> Seq Scan on prt1_adv_p2 t1_1 + Filter: (b = 0) + -> Seq Scan on prt1_adv_p3 t1_2 + Filter: (b = 0) + -> Seq Scan on prt1_adv_p1 t1_3 + Filter: (b = 0) +(16 rows) + +DROP TABLE prt1_adv_p3; +ANALYZE prt1_adv; +DROP TABLE prt2_adv_p3; +ANALYZE prt2_adv; +CREATE TABLE prt3_adv (a int, b int, c varchar) PARTITION BY RANGE (a); +CREATE TABLE prt3_adv_p1 PARTITION OF prt3_adv FOR VALUES FROM (200) TO (300); +CREATE TABLE prt3_adv_p2 PARTITION OF prt3_adv FOR VALUES FROM (300) TO (400); +CREATE INDEX prt3_adv_a_idx ON prt3_adv (a); +INSERT INTO prt3_adv SELECT i, i % 25, to_char(i, 'FM0000') FROM generate_series(200, 399) i; +ANALYZE prt3_adv; +-- 3-way join to test the default partition of a join relation +EXPLAIN (COSTS OFF) +SELECT t1.a, t1.c, t2.b, t2.c, t3.a, t3.c FROM prt1_adv t1 LEFT JOIN prt2_adv t2 ON (t1.a = t2.b) LEFT JOIN prt3_adv t3 ON (t1.a = t3.a) WHERE t1.b = 0 ORDER BY t1.a, t2.b, t3.a; + QUERY PLAN +------------------------------------------------------------------ + Sort + Sort Key: t1.a, t2.b, t3.a + -> Append + -> Hash Right Join + Hash Cond: (t3_1.a = t1_1.a) + -> Seq Scan on prt3_adv_p1 t3_1 + -> Hash + -> Hash Right Join + Hash Cond: (t2_2.b = t1_1.a) + -> Seq Scan on prt2_adv_p2 t2_2 + -> Hash + -> Seq Scan on prt1_adv_p2 t1_1 + Filter: (b = 0) + -> Hash Right Join + Hash Cond: (t3_2.a = t1_2.a) + -> Seq Scan on prt3_adv_p2 t3_2 + -> Hash + -> Hash Right Join + Hash Cond: (t2_1.b = t1_2.a) + -> Seq Scan on prt2_adv_p1 t2_1 + -> Hash + -> Seq Scan on prt1_adv_p1 t1_2 + Filter: (b = 0) +(23 rows) + +SELECT t1.a, t1.c, t2.b, t2.c, t3.a, t3.c FROM prt1_adv t1 LEFT JOIN prt2_adv t2 ON (t1.a = t2.b) LEFT JOIN prt3_adv t3 ON (t1.a = t3.a) WHERE t1.b = 0 ORDER BY t1.a, t2.b, t3.a; + a | c | b | c | a | c +-----+------+-----+------+-----+------ + 100 | 0100 | 100 | 0100 | | + 125 | 0125 | 125 | 0125 | | + 150 | 0150 | | | | + 175 | 0175 | | | | + 200 | 0200 | 200 | 0200 | 200 | 0200 + 225 | 0225 | 225 | 0225 | 225 | 0225 + 250 | 0250 | 250 | 0250 | 250 | 0250 + 275 | 0275 | 275 | 0275 | 275 | 0275 +(8 rows) + +DROP TABLE prt1_adv; +DROP TABLE prt2_adv; +DROP TABLE prt3_adv; +-- Test interaction of partitioned join with partition pruning +CREATE TABLE prt1_adv (a int, b int, c varchar) PARTITION BY RANGE (a); +CREATE TABLE prt1_adv_p1 PARTITION OF prt1_adv FOR VALUES FROM (100) TO (200); +CREATE TABLE prt1_adv_p2 PARTITION OF prt1_adv FOR VALUES FROM (200) TO (300); +CREATE TABLE prt1_adv_p3 PARTITION OF prt1_adv FOR VALUES FROM (300) TO (400); +CREATE INDEX prt1_adv_a_idx ON prt1_adv (a); +INSERT INTO prt1_adv SELECT i, i % 25, to_char(i, 'FM0000') FROM generate_series(100, 399) i; +ANALYZE prt1_adv; +CREATE TABLE prt2_adv (a int, b int, c varchar) PARTITION BY RANGE (b); +CREATE TABLE prt2_adv_p1 PARTITION OF prt2_adv FOR VALUES FROM (100) TO (200); +CREATE TABLE prt2_adv_p2 PARTITION OF prt2_adv FOR VALUES FROM (200) TO (400); +CREATE INDEX prt2_adv_b_idx ON prt2_adv (b); +INSERT INTO prt2_adv SELECT i % 25, i, to_char(i, 'FM0000') FROM generate_series(100, 399) i; +ANALYZE prt2_adv; +EXPLAIN (COSTS OFF) +SELECT t1.a, t1.c, t2.b, t2.c FROM prt1_adv t1 INNER JOIN prt2_adv t2 ON (t1.a = t2.b) WHERE t1.a < 300 AND t1.b = 0 ORDER BY t1.a, t2.b; + QUERY PLAN +----------------------------------------------------------- + Sort + Sort Key: t1.a + -> Append + -> Hash Join + Hash Cond: (t2_1.b = t1_1.a) + -> Seq Scan on prt2_adv_p1 t2_1 + -> Hash + -> Seq Scan on prt1_adv_p1 t1_1 + Filter: ((a < 300) AND (b = 0)) + -> Hash Join + Hash Cond: (t2_2.b = t1_2.a) + -> Seq Scan on prt2_adv_p2 t2_2 + -> Hash + -> Seq Scan on prt1_adv_p2 t1_2 + Filter: ((a < 300) AND (b = 0)) +(15 rows) + +SELECT t1.a, t1.c, t2.b, t2.c FROM prt1_adv t1 INNER JOIN prt2_adv t2 ON (t1.a = t2.b) WHERE t1.a < 300 AND t1.b = 0 ORDER BY t1.a, t2.b; + a | c | b | c +-----+------+-----+------ + 100 | 0100 | 100 | 0100 + 125 | 0125 | 125 | 0125 + 150 | 0150 | 150 | 0150 + 175 | 0175 | 175 | 0175 + 200 | 0200 | 200 | 0200 + 225 | 0225 | 225 | 0225 + 250 | 0250 | 250 | 0250 + 275 | 0275 | 275 | 0275 +(8 rows) + +DROP TABLE prt1_adv_p3; +CREATE TABLE prt1_adv_default PARTITION OF prt1_adv DEFAULT; +ANALYZE prt1_adv; +CREATE TABLE prt2_adv_default PARTITION OF prt2_adv DEFAULT; +ANALYZE prt2_adv; +EXPLAIN (COSTS OFF) +SELECT t1.a, t1.c, t2.b, t2.c FROM prt1_adv t1 INNER JOIN prt2_adv t2 ON (t1.a = t2.b) WHERE t1.a >= 100 AND t1.a < 300 AND t1.b = 0 ORDER BY t1.a, t2.b; + QUERY PLAN +-------------------------------------------------------------------------- + Sort + Sort Key: t1.a + -> Append + -> Hash Join + Hash Cond: (t2_1.b = t1_1.a) + -> Seq Scan on prt2_adv_p1 t2_1 + -> Hash + -> Seq Scan on prt1_adv_p1 t1_1 + Filter: ((a >= 100) AND (a < 300) AND (b = 0)) + -> Hash Join + Hash Cond: (t2_2.b = t1_2.a) + -> Seq Scan on prt2_adv_p2 t2_2 + -> Hash + -> Seq Scan on prt1_adv_p2 t1_2 + Filter: ((a >= 100) AND (a < 300) AND (b = 0)) +(15 rows) + +SELECT t1.a, t1.c, t2.b, t2.c FROM prt1_adv t1 INNER JOIN prt2_adv t2 ON (t1.a = t2.b) WHERE t1.a >= 100 AND t1.a < 300 AND t1.b = 0 ORDER BY t1.a, t2.b; + a | c | b | c +-----+------+-----+------ + 100 | 0100 | 100 | 0100 + 125 | 0125 | 125 | 0125 + 150 | 0150 | 150 | 0150 + 175 | 0175 | 175 | 0175 + 200 | 0200 | 200 | 0200 + 225 | 0225 | 225 | 0225 + 250 | 0250 | 250 | 0250 + 275 | 0275 | 275 | 0275 +(8 rows) + +DROP TABLE prt1_adv; +DROP TABLE prt2_adv; +-- Tests for list-partitioned tables +CREATE TABLE plt1_adv (a int, b int, c text) PARTITION BY LIST (c); +CREATE TABLE plt1_adv_p1 PARTITION OF plt1_adv FOR VALUES IN ('0001', '0003'); +CREATE TABLE plt1_adv_p2 PARTITION OF plt1_adv FOR VALUES IN ('0004', '0006'); +CREATE TABLE plt1_adv_p3 PARTITION OF plt1_adv FOR VALUES IN ('0008', '0009'); +INSERT INTO plt1_adv SELECT i, i, to_char(i % 10, 'FM0000') FROM generate_series(1, 299) i WHERE i % 10 IN (1, 3, 4, 6, 8, 9); +ANALYZE plt1_adv; +CREATE TABLE plt2_adv (a int, b int, c text) PARTITION BY LIST (c); +CREATE TABLE plt2_adv_p1 PARTITION OF plt2_adv FOR VALUES IN ('0002', '0003'); +CREATE TABLE plt2_adv_p2 PARTITION OF plt2_adv FOR VALUES IN ('0004', '0006'); +CREATE TABLE plt2_adv_p3 PARTITION OF plt2_adv FOR VALUES IN ('0007', '0009'); +INSERT INTO plt2_adv SELECT i, i, to_char(i % 10, 'FM0000') FROM generate_series(1, 299) i WHERE i % 10 IN (2, 3, 4, 6, 7, 9); +ANALYZE plt2_adv; +-- inner join +EXPLAIN (COSTS OFF) +SELECT t1.a, t1.c, t2.a, t2.c FROM plt1_adv t1 INNER JOIN plt2_adv t2 ON (t1.a = t2.a AND t1.c = t2.c) WHERE t1.b < 10 ORDER BY t1.a; + QUERY PLAN +-------------------------------------------------------------------- + Sort + Sort Key: t1.a + -> Append + -> Hash Join + Hash Cond: ((t2_1.a = t1_1.a) AND (t2_1.c = t1_1.c)) + -> Seq Scan on plt2_adv_p1 t2_1 + -> Hash + -> Seq Scan on plt1_adv_p1 t1_1 + Filter: (b < 10) + -> Hash Join + Hash Cond: ((t2_2.a = t1_2.a) AND (t2_2.c = t1_2.c)) + -> Seq Scan on plt2_adv_p2 t2_2 + -> Hash + -> Seq Scan on plt1_adv_p2 t1_2 + Filter: (b < 10) + -> Hash Join + Hash Cond: ((t2_3.a = t1_3.a) AND (t2_3.c = t1_3.c)) + -> Seq Scan on plt2_adv_p3 t2_3 + -> Hash + -> Seq Scan on plt1_adv_p3 t1_3 + Filter: (b < 10) +(21 rows) + +SELECT t1.a, t1.c, t2.a, t2.c FROM plt1_adv t1 INNER JOIN plt2_adv t2 ON (t1.a = t2.a AND t1.c = t2.c) WHERE t1.b < 10 ORDER BY t1.a; + a | c | a | c +---+------+---+------ + 3 | 0003 | 3 | 0003 + 4 | 0004 | 4 | 0004 + 6 | 0006 | 6 | 0006 + 9 | 0009 | 9 | 0009 +(4 rows) + +-- semi join +EXPLAIN (COSTS OFF) +SELECT t1.* FROM plt1_adv t1 WHERE EXISTS (SELECT 1 FROM plt2_adv t2 WHERE t1.a = t2.a AND t1.c = t2.c) AND t1.b < 10 ORDER BY t1.a; + QUERY PLAN +---------------------------------------------------------------------- + Sort + Sort Key: t1.a + -> Append + -> Nested Loop Semi Join + Join Filter: ((t1_1.a = t2_1.a) AND (t1_1.c = t2_1.c)) + -> Seq Scan on plt1_adv_p1 t1_1 + Filter: (b < 10) + -> Seq Scan on plt2_adv_p1 t2_1 + -> Nested Loop Semi Join + Join Filter: ((t1_2.a = t2_2.a) AND (t1_2.c = t2_2.c)) + -> Seq Scan on plt1_adv_p2 t1_2 + Filter: (b < 10) + -> Seq Scan on plt2_adv_p2 t2_2 + -> Nested Loop Semi Join + Join Filter: ((t1_3.a = t2_3.a) AND (t1_3.c = t2_3.c)) + -> Seq Scan on plt1_adv_p3 t1_3 + Filter: (b < 10) + -> Seq Scan on plt2_adv_p3 t2_3 +(18 rows) + +SELECT t1.* FROM plt1_adv t1 WHERE EXISTS (SELECT 1 FROM plt2_adv t2 WHERE t1.a = t2.a AND t1.c = t2.c) AND t1.b < 10 ORDER BY t1.a; + a | b | c +---+---+------ + 3 | 3 | 0003 + 4 | 4 | 0004 + 6 | 6 | 0006 + 9 | 9 | 0009 +(4 rows) + +-- left join +EXPLAIN (COSTS OFF) +SELECT t1.a, t1.c, t2.a, t2.c FROM plt1_adv t1 LEFT JOIN plt2_adv t2 ON (t1.a = t2.a AND t1.c = t2.c) WHERE t1.b < 10 ORDER BY t1.a; + QUERY PLAN +-------------------------------------------------------------------- + Sort + Sort Key: t1.a + -> Append + -> Hash Right Join + Hash Cond: ((t2_1.a = t1_1.a) AND (t2_1.c = t1_1.c)) + -> Seq Scan on plt2_adv_p1 t2_1 + -> Hash + -> Seq Scan on plt1_adv_p1 t1_1 + Filter: (b < 10) + -> Hash Right Join + Hash Cond: ((t2_2.a = t1_2.a) AND (t2_2.c = t1_2.c)) + -> Seq Scan on plt2_adv_p2 t2_2 + -> Hash + -> Seq Scan on plt1_adv_p2 t1_2 + Filter: (b < 10) + -> Hash Right Join + Hash Cond: ((t2_3.a = t1_3.a) AND (t2_3.c = t1_3.c)) + -> Seq Scan on plt2_adv_p3 t2_3 + -> Hash + -> Seq Scan on plt1_adv_p3 t1_3 + Filter: (b < 10) +(21 rows) + +SELECT t1.a, t1.c, t2.a, t2.c FROM plt1_adv t1 LEFT JOIN plt2_adv t2 ON (t1.a = t2.a AND t1.c = t2.c) WHERE t1.b < 10 ORDER BY t1.a; + a | c | a | c +---+------+---+------ + 1 | 0001 | | + 3 | 0003 | 3 | 0003 + 4 | 0004 | 4 | 0004 + 6 | 0006 | 6 | 0006 + 8 | 0008 | | + 9 | 0009 | 9 | 0009 +(6 rows) + +-- anti join +EXPLAIN (COSTS OFF) +SELECT t1.* FROM plt1_adv t1 WHERE NOT EXISTS (SELECT 1 FROM plt2_adv t2 WHERE t1.a = t2.a AND t1.c = t2.c) AND t1.b < 10 ORDER BY t1.a; + QUERY PLAN +-------------------------------------------------------------------- + Sort + Sort Key: t1.a + -> Append + -> Hash Right Anti Join + Hash Cond: ((t2_1.a = t1_1.a) AND (t2_1.c = t1_1.c)) + -> Seq Scan on plt2_adv_p1 t2_1 + -> Hash + -> Seq Scan on plt1_adv_p1 t1_1 + Filter: (b < 10) + -> Hash Right Anti Join + Hash Cond: ((t2_2.a = t1_2.a) AND (t2_2.c = t1_2.c)) + -> Seq Scan on plt2_adv_p2 t2_2 + -> Hash + -> Seq Scan on plt1_adv_p2 t1_2 + Filter: (b < 10) + -> Hash Right Anti Join + Hash Cond: ((t2_3.a = t1_3.a) AND (t2_3.c = t1_3.c)) + -> Seq Scan on plt2_adv_p3 t2_3 + -> Hash + -> Seq Scan on plt1_adv_p3 t1_3 + Filter: (b < 10) +(21 rows) + +SELECT t1.* FROM plt1_adv t1 WHERE NOT EXISTS (SELECT 1 FROM plt2_adv t2 WHERE t1.a = t2.a AND t1.c = t2.c) AND t1.b < 10 ORDER BY t1.a; + a | b | c +---+---+------ + 1 | 1 | 0001 + 8 | 8 | 0008 +(2 rows) + +-- full join +EXPLAIN (COSTS OFF) +SELECT t1.a, t1.c, t2.a, t2.c FROM plt1_adv t1 FULL JOIN plt2_adv t2 ON (t1.a = t2.a AND t1.c = t2.c) WHERE coalesce(t1.b, 0) < 10 AND coalesce(t2.b, 0) < 10 ORDER BY t1.a, t2.a; + QUERY PLAN +----------------------------------------------------------------------------------- + Sort + Sort Key: t1.a, t2.a + -> Append + -> Hash Full Join + Hash Cond: ((t1_1.a = t2_1.a) AND (t1_1.c = t2_1.c)) + Filter: ((COALESCE(t1_1.b, 0) < 10) AND (COALESCE(t2_1.b, 0) < 10)) + -> Seq Scan on plt1_adv_p1 t1_1 + -> Hash + -> Seq Scan on plt2_adv_p1 t2_1 + -> Hash Full Join + Hash Cond: ((t1_2.a = t2_2.a) AND (t1_2.c = t2_2.c)) + Filter: ((COALESCE(t1_2.b, 0) < 10) AND (COALESCE(t2_2.b, 0) < 10)) + -> Seq Scan on plt1_adv_p2 t1_2 + -> Hash + -> Seq Scan on plt2_adv_p2 t2_2 + -> Hash Full Join + Hash Cond: ((t1_3.a = t2_3.a) AND (t1_3.c = t2_3.c)) + Filter: ((COALESCE(t1_3.b, 0) < 10) AND (COALESCE(t2_3.b, 0) < 10)) + -> Seq Scan on plt1_adv_p3 t1_3 + -> Hash + -> Seq Scan on plt2_adv_p3 t2_3 +(21 rows) + +SELECT t1.a, t1.c, t2.a, t2.c FROM plt1_adv t1 FULL JOIN plt2_adv t2 ON (t1.a = t2.a AND t1.c = t2.c) WHERE coalesce(t1.b, 0) < 10 AND coalesce(t2.b, 0) < 10 ORDER BY t1.a, t2.a; + a | c | a | c +---+------+---+------ + 1 | 0001 | | + 3 | 0003 | 3 | 0003 + 4 | 0004 | 4 | 0004 + 6 | 0006 | 6 | 0006 + 8 | 0008 | | + 9 | 0009 | 9 | 0009 + | | 2 | 0002 + | | 7 | 0007 +(8 rows) + +-- Test cases where one side has an extra partition +CREATE TABLE plt2_adv_extra PARTITION OF plt2_adv FOR VALUES IN ('0000'); +INSERT INTO plt2_adv_extra VALUES (0, 0, '0000'); +ANALYZE plt2_adv; +-- inner join +EXPLAIN (COSTS OFF) +SELECT t1.a, t1.c, t2.a, t2.c FROM plt1_adv t1 INNER JOIN plt2_adv t2 ON (t1.a = t2.a AND t1.c = t2.c) WHERE t1.b < 10 ORDER BY t1.a; + QUERY PLAN +-------------------------------------------------------------------- + Sort + Sort Key: t1.a + -> Append + -> Hash Join + Hash Cond: ((t2_1.a = t1_1.a) AND (t2_1.c = t1_1.c)) + -> Seq Scan on plt2_adv_p1 t2_1 + -> Hash + -> Seq Scan on plt1_adv_p1 t1_1 + Filter: (b < 10) + -> Hash Join + Hash Cond: ((t2_2.a = t1_2.a) AND (t2_2.c = t1_2.c)) + -> Seq Scan on plt2_adv_p2 t2_2 + -> Hash + -> Seq Scan on plt1_adv_p2 t1_2 + Filter: (b < 10) + -> Hash Join + Hash Cond: ((t2_3.a = t1_3.a) AND (t2_3.c = t1_3.c)) + -> Seq Scan on plt2_adv_p3 t2_3 + -> Hash + -> Seq Scan on plt1_adv_p3 t1_3 + Filter: (b < 10) +(21 rows) + +SELECT t1.a, t1.c, t2.a, t2.c FROM plt1_adv t1 INNER JOIN plt2_adv t2 ON (t1.a = t2.a AND t1.c = t2.c) WHERE t1.b < 10 ORDER BY t1.a; + a | c | a | c +---+------+---+------ + 3 | 0003 | 3 | 0003 + 4 | 0004 | 4 | 0004 + 6 | 0006 | 6 | 0006 + 9 | 0009 | 9 | 0009 +(4 rows) + +-- semi join +EXPLAIN (COSTS OFF) +SELECT t1.* FROM plt1_adv t1 WHERE EXISTS (SELECT 1 FROM plt2_adv t2 WHERE t1.a = t2.a AND t1.c = t2.c) AND t1.b < 10 ORDER BY t1.a; + QUERY PLAN +---------------------------------------------------------------------- + Sort + Sort Key: t1.a + -> Append + -> Nested Loop Semi Join + Join Filter: ((t1_1.a = t2_1.a) AND (t1_1.c = t2_1.c)) + -> Seq Scan on plt1_adv_p1 t1_1 + Filter: (b < 10) + -> Seq Scan on plt2_adv_p1 t2_1 + -> Nested Loop Semi Join + Join Filter: ((t1_2.a = t2_2.a) AND (t1_2.c = t2_2.c)) + -> Seq Scan on plt1_adv_p2 t1_2 + Filter: (b < 10) + -> Seq Scan on plt2_adv_p2 t2_2 + -> Nested Loop Semi Join + Join Filter: ((t1_3.a = t2_3.a) AND (t1_3.c = t2_3.c)) + -> Seq Scan on plt1_adv_p3 t1_3 + Filter: (b < 10) + -> Seq Scan on plt2_adv_p3 t2_3 +(18 rows) + +SELECT t1.* FROM plt1_adv t1 WHERE EXISTS (SELECT 1 FROM plt2_adv t2 WHERE t1.a = t2.a AND t1.c = t2.c) AND t1.b < 10 ORDER BY t1.a; + a | b | c +---+---+------ + 3 | 3 | 0003 + 4 | 4 | 0004 + 6 | 6 | 0006 + 9 | 9 | 0009 +(4 rows) + +-- left join +EXPLAIN (COSTS OFF) +SELECT t1.a, t1.c, t2.a, t2.c FROM plt1_adv t1 LEFT JOIN plt2_adv t2 ON (t1.a = t2.a AND t1.c = t2.c) WHERE t1.b < 10 ORDER BY t1.a; + QUERY PLAN +-------------------------------------------------------------------- + Sort + Sort Key: t1.a + -> Append + -> Hash Right Join + Hash Cond: ((t2_1.a = t1_1.a) AND (t2_1.c = t1_1.c)) + -> Seq Scan on plt2_adv_p1 t2_1 + -> Hash + -> Seq Scan on plt1_adv_p1 t1_1 + Filter: (b < 10) + -> Hash Right Join + Hash Cond: ((t2_2.a = t1_2.a) AND (t2_2.c = t1_2.c)) + -> Seq Scan on plt2_adv_p2 t2_2 + -> Hash + -> Seq Scan on plt1_adv_p2 t1_2 + Filter: (b < 10) + -> Hash Right Join + Hash Cond: ((t2_3.a = t1_3.a) AND (t2_3.c = t1_3.c)) + -> Seq Scan on plt2_adv_p3 t2_3 + -> Hash + -> Seq Scan on plt1_adv_p3 t1_3 + Filter: (b < 10) +(21 rows) + +SELECT t1.a, t1.c, t2.a, t2.c FROM plt1_adv t1 LEFT JOIN plt2_adv t2 ON (t1.a = t2.a AND t1.c = t2.c) WHERE t1.b < 10 ORDER BY t1.a; + a | c | a | c +---+------+---+------ + 1 | 0001 | | + 3 | 0003 | 3 | 0003 + 4 | 0004 | 4 | 0004 + 6 | 0006 | 6 | 0006 + 8 | 0008 | | + 9 | 0009 | 9 | 0009 +(6 rows) + +-- left join; currently we can't do partitioned join if there are no matched +-- partitions on the nullable side +EXPLAIN (COSTS OFF) +SELECT t1.a, t1.c, t2.a, t2.c FROM plt2_adv t1 LEFT JOIN plt1_adv t2 ON (t1.a = t2.a AND t1.c = t2.c) WHERE t1.b < 10 ORDER BY t1.a; + QUERY PLAN +--------------------------------------------------------- + Sort + Sort Key: t1.a + -> Hash Right Join + Hash Cond: ((t2.a = t1.a) AND (t2.c = t1.c)) + -> Append + -> Seq Scan on plt1_adv_p1 t2_1 + -> Seq Scan on plt1_adv_p2 t2_2 + -> Seq Scan on plt1_adv_p3 t2_3 + -> Hash + -> Append + -> Seq Scan on plt2_adv_extra t1_1 + Filter: (b < 10) + -> Seq Scan on plt2_adv_p1 t1_2 + Filter: (b < 10) + -> Seq Scan on plt2_adv_p2 t1_3 + Filter: (b < 10) + -> Seq Scan on plt2_adv_p3 t1_4 + Filter: (b < 10) +(18 rows) + +-- anti join +EXPLAIN (COSTS OFF) +SELECT t1.* FROM plt1_adv t1 WHERE NOT EXISTS (SELECT 1 FROM plt2_adv t2 WHERE t1.a = t2.a AND t1.c = t2.c) AND t1.b < 10 ORDER BY t1.a; + QUERY PLAN +-------------------------------------------------------------------- + Sort + Sort Key: t1.a + -> Append + -> Hash Right Anti Join + Hash Cond: ((t2_1.a = t1_1.a) AND (t2_1.c = t1_1.c)) + -> Seq Scan on plt2_adv_p1 t2_1 + -> Hash + -> Seq Scan on plt1_adv_p1 t1_1 + Filter: (b < 10) + -> Hash Right Anti Join + Hash Cond: ((t2_2.a = t1_2.a) AND (t2_2.c = t1_2.c)) + -> Seq Scan on plt2_adv_p2 t2_2 + -> Hash + -> Seq Scan on plt1_adv_p2 t1_2 + Filter: (b < 10) + -> Hash Right Anti Join + Hash Cond: ((t2_3.a = t1_3.a) AND (t2_3.c = t1_3.c)) + -> Seq Scan on plt2_adv_p3 t2_3 + -> Hash + -> Seq Scan on plt1_adv_p3 t1_3 + Filter: (b < 10) +(21 rows) + +SELECT t1.* FROM plt1_adv t1 WHERE NOT EXISTS (SELECT 1 FROM plt2_adv t2 WHERE t1.a = t2.a AND t1.c = t2.c) AND t1.b < 10 ORDER BY t1.a; + a | b | c +---+---+------ + 1 | 1 | 0001 + 8 | 8 | 0008 +(2 rows) + +-- anti join; currently we can't do partitioned join if there are no matched +-- partitions on the nullable side +EXPLAIN (COSTS OFF) +SELECT t1.* FROM plt2_adv t1 WHERE NOT EXISTS (SELECT 1 FROM plt1_adv t2 WHERE t1.a = t2.a AND t1.c = t2.c) AND t1.b < 10 ORDER BY t1.a; + QUERY PLAN +--------------------------------------------------------- + Sort + Sort Key: t1.a + -> Hash Right Anti Join + Hash Cond: ((t2.a = t1.a) AND (t2.c = t1.c)) + -> Append + -> Seq Scan on plt1_adv_p1 t2_1 + -> Seq Scan on plt1_adv_p2 t2_2 + -> Seq Scan on plt1_adv_p3 t2_3 + -> Hash + -> Append + -> Seq Scan on plt2_adv_extra t1_1 + Filter: (b < 10) + -> Seq Scan on plt2_adv_p1 t1_2 + Filter: (b < 10) + -> Seq Scan on plt2_adv_p2 t1_3 + Filter: (b < 10) + -> Seq Scan on plt2_adv_p3 t1_4 + Filter: (b < 10) +(18 rows) + +-- full join; currently we can't do partitioned join if there are no matched +-- partitions on the nullable side +EXPLAIN (COSTS OFF) +SELECT t1.a, t1.c, t2.a, t2.c FROM plt1_adv t1 FULL JOIN plt2_adv t2 ON (t1.a = t2.a AND t1.c = t2.c) WHERE coalesce(t1.b, 0) < 10 AND coalesce(t2.b, 0) < 10 ORDER BY t1.a, t2.a; + QUERY PLAN +------------------------------------------------------------------------- + Sort + Sort Key: t1.a, t2.a + -> Hash Full Join + Hash Cond: ((t2.a = t1.a) AND (t2.c = t1.c)) + Filter: ((COALESCE(t1.b, 0) < 10) AND (COALESCE(t2.b, 0) < 10)) + -> Append + -> Seq Scan on plt2_adv_extra t2_1 + -> Seq Scan on plt2_adv_p1 t2_2 + -> Seq Scan on plt2_adv_p2 t2_3 + -> Seq Scan on plt2_adv_p3 t2_4 + -> Hash + -> Append + -> Seq Scan on plt1_adv_p1 t1_1 + -> Seq Scan on plt1_adv_p2 t1_2 + -> Seq Scan on plt1_adv_p3 t1_3 +(15 rows) + +DROP TABLE plt2_adv_extra; +-- Test cases where a partition on one side matches multiple partitions on +-- the other side; we currently can't do partitioned join in such cases +ALTER TABLE plt2_adv DETACH PARTITION plt2_adv_p2; +-- Split plt2_adv_p2 into two partitions so that plt1_adv_p2 matches both +CREATE TABLE plt2_adv_p2_1 PARTITION OF plt2_adv FOR VALUES IN ('0004'); +CREATE TABLE plt2_adv_p2_2 PARTITION OF plt2_adv FOR VALUES IN ('0006'); +INSERT INTO plt2_adv SELECT i, i, to_char(i % 10, 'FM0000') FROM generate_series(1, 299) i WHERE i % 10 IN (4, 6); +ANALYZE plt2_adv; +-- inner join +EXPLAIN (COSTS OFF) +SELECT t1.a, t1.c, t2.a, t2.c FROM plt1_adv t1 INNER JOIN plt2_adv t2 ON (t1.a = t2.a AND t1.c = t2.c) WHERE t1.b < 10 ORDER BY t1.a; + QUERY PLAN +------------------------------------------------------ + Sort + Sort Key: t1.a + -> Hash Join + Hash Cond: ((t2.a = t1.a) AND (t2.c = t1.c)) + -> Append + -> Seq Scan on plt2_adv_p1 t2_1 + -> Seq Scan on plt2_adv_p2_1 t2_2 + -> Seq Scan on plt2_adv_p2_2 t2_3 + -> Seq Scan on plt2_adv_p3 t2_4 + -> Hash + -> Append + -> Seq Scan on plt1_adv_p1 t1_1 + Filter: (b < 10) + -> Seq Scan on plt1_adv_p2 t1_2 + Filter: (b < 10) + -> Seq Scan on plt1_adv_p3 t1_3 + Filter: (b < 10) +(17 rows) + +-- semi join +EXPLAIN (COSTS OFF) +SELECT t1.* FROM plt1_adv t1 WHERE EXISTS (SELECT 1 FROM plt2_adv t2 WHERE t1.a = t2.a AND t1.c = t2.c) AND t1.b < 10 ORDER BY t1.a; + QUERY PLAN +-------------------------------------------------------- + Sort + Sort Key: t1.a + -> Hash Semi Join + Hash Cond: ((t1.a = t2.a) AND (t1.c = t2.c)) + -> Append + -> Seq Scan on plt1_adv_p1 t1_1 + Filter: (b < 10) + -> Seq Scan on plt1_adv_p2 t1_2 + Filter: (b < 10) + -> Seq Scan on plt1_adv_p3 t1_3 + Filter: (b < 10) + -> Hash + -> Append + -> Seq Scan on plt2_adv_p1 t2_1 + -> Seq Scan on plt2_adv_p2_1 t2_2 + -> Seq Scan on plt2_adv_p2_2 t2_3 + -> Seq Scan on plt2_adv_p3 t2_4 +(17 rows) + +-- left join +EXPLAIN (COSTS OFF) +SELECT t1.a, t1.c, t2.a, t2.c FROM plt1_adv t1 LEFT JOIN plt2_adv t2 ON (t1.a = t2.a AND t1.c = t2.c) WHERE t1.b < 10 ORDER BY t1.a; + QUERY PLAN +------------------------------------------------------ + Sort + Sort Key: t1.a + -> Hash Right Join + Hash Cond: ((t2.a = t1.a) AND (t2.c = t1.c)) + -> Append + -> Seq Scan on plt2_adv_p1 t2_1 + -> Seq Scan on plt2_adv_p2_1 t2_2 + -> Seq Scan on plt2_adv_p2_2 t2_3 + -> Seq Scan on plt2_adv_p3 t2_4 + -> Hash + -> Append + -> Seq Scan on plt1_adv_p1 t1_1 + Filter: (b < 10) + -> Seq Scan on plt1_adv_p2 t1_2 + Filter: (b < 10) + -> Seq Scan on plt1_adv_p3 t1_3 + Filter: (b < 10) +(17 rows) + +-- anti join +EXPLAIN (COSTS OFF) +SELECT t1.* FROM plt1_adv t1 WHERE NOT EXISTS (SELECT 1 FROM plt2_adv t2 WHERE t1.a = t2.a AND t1.c = t2.c) AND t1.b < 10 ORDER BY t1.a; + QUERY PLAN +------------------------------------------------------ + Sort + Sort Key: t1.a + -> Hash Right Anti Join + Hash Cond: ((t2.a = t1.a) AND (t2.c = t1.c)) + -> Append + -> Seq Scan on plt2_adv_p1 t2_1 + -> Seq Scan on plt2_adv_p2_1 t2_2 + -> Seq Scan on plt2_adv_p2_2 t2_3 + -> Seq Scan on plt2_adv_p3 t2_4 + -> Hash + -> Append + -> Seq Scan on plt1_adv_p1 t1_1 + Filter: (b < 10) + -> Seq Scan on plt1_adv_p2 t1_2 + Filter: (b < 10) + -> Seq Scan on plt1_adv_p3 t1_3 + Filter: (b < 10) +(17 rows) + +-- full join +EXPLAIN (COSTS OFF) +SELECT t1.a, t1.c, t2.a, t2.c FROM plt1_adv t1 FULL JOIN plt2_adv t2 ON (t1.a = t2.a AND t1.c = t2.c) WHERE coalesce(t1.b, 0) < 10 AND coalesce(t2.b, 0) < 10 ORDER BY t1.a, t2.a; + QUERY PLAN +------------------------------------------------------------------------- + Sort + Sort Key: t1.a, t2.a + -> Hash Full Join + Hash Cond: ((t2.a = t1.a) AND (t2.c = t1.c)) + Filter: ((COALESCE(t1.b, 0) < 10) AND (COALESCE(t2.b, 0) < 10)) + -> Append + -> Seq Scan on plt2_adv_p1 t2_1 + -> Seq Scan on plt2_adv_p2_1 t2_2 + -> Seq Scan on plt2_adv_p2_2 t2_3 + -> Seq Scan on plt2_adv_p3 t2_4 + -> Hash + -> Append + -> Seq Scan on plt1_adv_p1 t1_1 + -> Seq Scan on plt1_adv_p2 t1_2 + -> Seq Scan on plt1_adv_p3 t1_3 +(15 rows) + +DROP TABLE plt2_adv_p2_1; +DROP TABLE plt2_adv_p2_2; +-- Restore plt2_adv_p2 +ALTER TABLE plt2_adv ATTACH PARTITION plt2_adv_p2 FOR VALUES IN ('0004', '0006'); +-- Test NULL partitions +ALTER TABLE plt1_adv DETACH PARTITION plt1_adv_p1; +-- Change plt1_adv_p1 to the NULL partition +CREATE TABLE plt1_adv_p1_null PARTITION OF plt1_adv FOR VALUES IN (NULL, '0001', '0003'); +INSERT INTO plt1_adv SELECT i, i, to_char(i % 10, 'FM0000') FROM generate_series(1, 299) i WHERE i % 10 IN (1, 3); +INSERT INTO plt1_adv VALUES (-1, -1, NULL); +ANALYZE plt1_adv; +ALTER TABLE plt2_adv DETACH PARTITION plt2_adv_p3; +-- Change plt2_adv_p3 to the NULL partition +CREATE TABLE plt2_adv_p3_null PARTITION OF plt2_adv FOR VALUES IN (NULL, '0007', '0009'); +INSERT INTO plt2_adv SELECT i, i, to_char(i % 10, 'FM0000') FROM generate_series(1, 299) i WHERE i % 10 IN (7, 9); +INSERT INTO plt2_adv VALUES (-1, -1, NULL); +ANALYZE plt2_adv; +-- inner join +EXPLAIN (COSTS OFF) +SELECT t1.a, t1.c, t2.a, t2.c FROM plt1_adv t1 INNER JOIN plt2_adv t2 ON (t1.a = t2.a AND t1.c = t2.c) WHERE t1.b < 10 ORDER BY t1.a; + QUERY PLAN +-------------------------------------------------------------------- + Sort + Sort Key: t1.a + -> Append + -> Hash Join + Hash Cond: ((t2_1.a = t1_1.a) AND (t2_1.c = t1_1.c)) + -> Seq Scan on plt2_adv_p1 t2_1 + -> Hash + -> Seq Scan on plt1_adv_p1_null t1_1 + Filter: (b < 10) + -> Hash Join + Hash Cond: ((t2_2.a = t1_2.a) AND (t2_2.c = t1_2.c)) + -> Seq Scan on plt2_adv_p2 t2_2 + -> Hash + -> Seq Scan on plt1_adv_p2 t1_2 + Filter: (b < 10) + -> Hash Join + Hash Cond: ((t2_3.a = t1_3.a) AND (t2_3.c = t1_3.c)) + -> Seq Scan on plt2_adv_p3_null t2_3 + -> Hash + -> Seq Scan on plt1_adv_p3 t1_3 + Filter: (b < 10) +(21 rows) + +SELECT t1.a, t1.c, t2.a, t2.c FROM plt1_adv t1 INNER JOIN plt2_adv t2 ON (t1.a = t2.a AND t1.c = t2.c) WHERE t1.b < 10 ORDER BY t1.a; + a | c | a | c +---+------+---+------ + 3 | 0003 | 3 | 0003 + 4 | 0004 | 4 | 0004 + 6 | 0006 | 6 | 0006 + 9 | 0009 | 9 | 0009 +(4 rows) + +-- semi join +EXPLAIN (COSTS OFF) +SELECT t1.* FROM plt1_adv t1 WHERE EXISTS (SELECT 1 FROM plt2_adv t2 WHERE t1.a = t2.a AND t1.c = t2.c) AND t1.b < 10 ORDER BY t1.a; + QUERY PLAN +---------------------------------------------------------------------- + Sort + Sort Key: t1.a + -> Append + -> Hash Semi Join + Hash Cond: ((t1_1.a = t2_1.a) AND (t1_1.c = t2_1.c)) + -> Seq Scan on plt1_adv_p1_null t1_1 + Filter: (b < 10) + -> Hash + -> Seq Scan on plt2_adv_p1 t2_1 + -> Nested Loop Semi Join + Join Filter: ((t1_2.a = t2_2.a) AND (t1_2.c = t2_2.c)) + -> Seq Scan on plt1_adv_p2 t1_2 + Filter: (b < 10) + -> Seq Scan on plt2_adv_p2 t2_2 + -> Nested Loop Semi Join + Join Filter: ((t1_3.a = t2_3.a) AND (t1_3.c = t2_3.c)) + -> Seq Scan on plt1_adv_p3 t1_3 + Filter: (b < 10) + -> Seq Scan on plt2_adv_p3_null t2_3 +(19 rows) + +SELECT t1.* FROM plt1_adv t1 WHERE EXISTS (SELECT 1 FROM plt2_adv t2 WHERE t1.a = t2.a AND t1.c = t2.c) AND t1.b < 10 ORDER BY t1.a; + a | b | c +---+---+------ + 3 | 3 | 0003 + 4 | 4 | 0004 + 6 | 6 | 0006 + 9 | 9 | 0009 +(4 rows) + +-- left join +EXPLAIN (COSTS OFF) +SELECT t1.a, t1.c, t2.a, t2.c FROM plt1_adv t1 LEFT JOIN plt2_adv t2 ON (t1.a = t2.a AND t1.c = t2.c) WHERE t1.b < 10 ORDER BY t1.a; + QUERY PLAN +-------------------------------------------------------------------- + Sort + Sort Key: t1.a + -> Append + -> Hash Right Join + Hash Cond: ((t2_1.a = t1_1.a) AND (t2_1.c = t1_1.c)) + -> Seq Scan on plt2_adv_p1 t2_1 + -> Hash + -> Seq Scan on plt1_adv_p1_null t1_1 + Filter: (b < 10) + -> Hash Right Join + Hash Cond: ((t2_2.a = t1_2.a) AND (t2_2.c = t1_2.c)) + -> Seq Scan on plt2_adv_p2 t2_2 + -> Hash + -> Seq Scan on plt1_adv_p2 t1_2 + Filter: (b < 10) + -> Hash Right Join + Hash Cond: ((t2_3.a = t1_3.a) AND (t2_3.c = t1_3.c)) + -> Seq Scan on plt2_adv_p3_null t2_3 + -> Hash + -> Seq Scan on plt1_adv_p3 t1_3 + Filter: (b < 10) +(21 rows) + +SELECT t1.a, t1.c, t2.a, t2.c FROM plt1_adv t1 LEFT JOIN plt2_adv t2 ON (t1.a = t2.a AND t1.c = t2.c) WHERE t1.b < 10 ORDER BY t1.a; + a | c | a | c +----+------+---+------ + -1 | | | + 1 | 0001 | | + 3 | 0003 | 3 | 0003 + 4 | 0004 | 4 | 0004 + 6 | 0006 | 6 | 0006 + 8 | 0008 | | + 9 | 0009 | 9 | 0009 +(7 rows) + +-- anti join +EXPLAIN (COSTS OFF) +SELECT t1.* FROM plt1_adv t1 WHERE NOT EXISTS (SELECT 1 FROM plt2_adv t2 WHERE t1.a = t2.a AND t1.c = t2.c) AND t1.b < 10 ORDER BY t1.a; + QUERY PLAN +-------------------------------------------------------------------- + Sort + Sort Key: t1.a + -> Append + -> Hash Right Anti Join + Hash Cond: ((t2_1.a = t1_1.a) AND (t2_1.c = t1_1.c)) + -> Seq Scan on plt2_adv_p1 t2_1 + -> Hash + -> Seq Scan on plt1_adv_p1_null t1_1 + Filter: (b < 10) + -> Hash Right Anti Join + Hash Cond: ((t2_2.a = t1_2.a) AND (t2_2.c = t1_2.c)) + -> Seq Scan on plt2_adv_p2 t2_2 + -> Hash + -> Seq Scan on plt1_adv_p2 t1_2 + Filter: (b < 10) + -> Hash Right Anti Join + Hash Cond: ((t2_3.a = t1_3.a) AND (t2_3.c = t1_3.c)) + -> Seq Scan on plt2_adv_p3_null t2_3 + -> Hash + -> Seq Scan on plt1_adv_p3 t1_3 + Filter: (b < 10) +(21 rows) + +SELECT t1.* FROM plt1_adv t1 WHERE NOT EXISTS (SELECT 1 FROM plt2_adv t2 WHERE t1.a = t2.a AND t1.c = t2.c) AND t1.b < 10 ORDER BY t1.a; + a | b | c +----+----+------ + -1 | -1 | + 1 | 1 | 0001 + 8 | 8 | 0008 +(3 rows) + +-- full join +EXPLAIN (COSTS OFF) +SELECT t1.a, t1.c, t2.a, t2.c FROM plt1_adv t1 FULL JOIN plt2_adv t2 ON (t1.a = t2.a AND t1.c = t2.c) WHERE coalesce(t1.b, 0) < 10 AND coalesce(t2.b, 0) < 10 ORDER BY t1.a, t2.a; + QUERY PLAN +----------------------------------------------------------------------------------- + Sort + Sort Key: t1.a, t2.a + -> Append + -> Hash Full Join + Hash Cond: ((t1_1.a = t2_1.a) AND (t1_1.c = t2_1.c)) + Filter: ((COALESCE(t1_1.b, 0) < 10) AND (COALESCE(t2_1.b, 0) < 10)) + -> Seq Scan on plt1_adv_p1_null t1_1 + -> Hash + -> Seq Scan on plt2_adv_p1 t2_1 + -> Hash Full Join + Hash Cond: ((t1_2.a = t2_2.a) AND (t1_2.c = t2_2.c)) + Filter: ((COALESCE(t1_2.b, 0) < 10) AND (COALESCE(t2_2.b, 0) < 10)) + -> Seq Scan on plt1_adv_p2 t1_2 + -> Hash + -> Seq Scan on plt2_adv_p2 t2_2 + -> Hash Full Join + Hash Cond: ((t2_3.a = t1_3.a) AND (t2_3.c = t1_3.c)) + Filter: ((COALESCE(t1_3.b, 0) < 10) AND (COALESCE(t2_3.b, 0) < 10)) + -> Seq Scan on plt2_adv_p3_null t2_3 + -> Hash + -> Seq Scan on plt1_adv_p3 t1_3 +(21 rows) + +SELECT t1.a, t1.c, t2.a, t2.c FROM plt1_adv t1 FULL JOIN plt2_adv t2 ON (t1.a = t2.a AND t1.c = t2.c) WHERE coalesce(t1.b, 0) < 10 AND coalesce(t2.b, 0) < 10 ORDER BY t1.a, t2.a; + a | c | a | c +----+------+----+------ + -1 | | | + 1 | 0001 | | + 3 | 0003 | 3 | 0003 + 4 | 0004 | 4 | 0004 + 6 | 0006 | 6 | 0006 + 8 | 0008 | | + 9 | 0009 | 9 | 0009 + | | -1 | + | | 2 | 0002 + | | 7 | 0007 +(10 rows) + +DROP TABLE plt1_adv_p1_null; +-- Restore plt1_adv_p1 +ALTER TABLE plt1_adv ATTACH PARTITION plt1_adv_p1 FOR VALUES IN ('0001', '0003'); +-- Add to plt1_adv the extra NULL partition containing only NULL values as the +-- key values +CREATE TABLE plt1_adv_extra PARTITION OF plt1_adv FOR VALUES IN (NULL); +INSERT INTO plt1_adv VALUES (-1, -1, NULL); +ANALYZE plt1_adv; +DROP TABLE plt2_adv_p3_null; +-- Restore plt2_adv_p3 +ALTER TABLE plt2_adv ATTACH PARTITION plt2_adv_p3 FOR VALUES IN ('0007', '0009'); +ANALYZE plt2_adv; +-- inner join +EXPLAIN (COSTS OFF) +SELECT t1.a, t1.c, t2.a, t2.c FROM plt1_adv t1 INNER JOIN plt2_adv t2 ON (t1.a = t2.a AND t1.c = t2.c) WHERE t1.b < 10 ORDER BY t1.a; + QUERY PLAN +-------------------------------------------------------------------- + Sort + Sort Key: t1.a + -> Append + -> Hash Join + Hash Cond: ((t2_1.a = t1_1.a) AND (t2_1.c = t1_1.c)) + -> Seq Scan on plt2_adv_p1 t2_1 + -> Hash + -> Seq Scan on plt1_adv_p1 t1_1 + Filter: (b < 10) + -> Hash Join + Hash Cond: ((t2_2.a = t1_2.a) AND (t2_2.c = t1_2.c)) + -> Seq Scan on plt2_adv_p2 t2_2 + -> Hash + -> Seq Scan on plt1_adv_p2 t1_2 + Filter: (b < 10) + -> Hash Join + Hash Cond: ((t2_3.a = t1_3.a) AND (t2_3.c = t1_3.c)) + -> Seq Scan on plt2_adv_p3 t2_3 + -> Hash + -> Seq Scan on plt1_adv_p3 t1_3 + Filter: (b < 10) +(21 rows) + +SELECT t1.a, t1.c, t2.a, t2.c FROM plt1_adv t1 INNER JOIN plt2_adv t2 ON (t1.a = t2.a AND t1.c = t2.c) WHERE t1.b < 10 ORDER BY t1.a; + a | c | a | c +---+------+---+------ + 3 | 0003 | 3 | 0003 + 4 | 0004 | 4 | 0004 + 6 | 0006 | 6 | 0006 + 9 | 0009 | 9 | 0009 +(4 rows) + +-- left join; currently we can't do partitioned join if there are no matched +-- partitions on the nullable side +EXPLAIN (COSTS OFF) +SELECT t1.a, t1.c, t2.a, t2.c FROM plt1_adv t1 LEFT JOIN plt2_adv t2 ON (t1.a = t2.a AND t1.c = t2.c) WHERE t1.b < 10 ORDER BY t1.a; + QUERY PLAN +--------------------------------------------------------- + Sort + Sort Key: t1.a + -> Hash Right Join + Hash Cond: ((t2.a = t1.a) AND (t2.c = t1.c)) + -> Append + -> Seq Scan on plt2_adv_p1 t2_1 + -> Seq Scan on plt2_adv_p2 t2_2 + -> Seq Scan on plt2_adv_p3 t2_3 + -> Hash + -> Append + -> Seq Scan on plt1_adv_p1 t1_1 + Filter: (b < 10) + -> Seq Scan on plt1_adv_p2 t1_2 + Filter: (b < 10) + -> Seq Scan on plt1_adv_p3 t1_3 + Filter: (b < 10) + -> Seq Scan on plt1_adv_extra t1_4 + Filter: (b < 10) +(18 rows) + +-- full join; currently we can't do partitioned join if there are no matched +-- partitions on the nullable side +EXPLAIN (COSTS OFF) +SELECT t1.a, t1.c, t2.a, t2.c FROM plt1_adv t1 FULL JOIN plt2_adv t2 ON (t1.a = t2.a AND t1.c = t2.c) WHERE coalesce(t1.b, 0) < 10 AND coalesce(t2.b, 0) < 10 ORDER BY t1.a, t2.a; + QUERY PLAN +------------------------------------------------------------------------- + Sort + Sort Key: t1.a, t2.a + -> Hash Full Join + Hash Cond: ((t1.a = t2.a) AND (t1.c = t2.c)) + Filter: ((COALESCE(t1.b, 0) < 10) AND (COALESCE(t2.b, 0) < 10)) + -> Append + -> Seq Scan on plt1_adv_p1 t1_1 + -> Seq Scan on plt1_adv_p2 t1_2 + -> Seq Scan on plt1_adv_p3 t1_3 + -> Seq Scan on plt1_adv_extra t1_4 + -> Hash + -> Append + -> Seq Scan on plt2_adv_p1 t2_1 + -> Seq Scan on plt2_adv_p2 t2_2 + -> Seq Scan on plt2_adv_p3 t2_3 +(15 rows) + +-- Add to plt2_adv the extra NULL partition containing only NULL values as the +-- key values +CREATE TABLE plt2_adv_extra PARTITION OF plt2_adv FOR VALUES IN (NULL); +INSERT INTO plt2_adv VALUES (-1, -1, NULL); +ANALYZE plt2_adv; +-- inner join +EXPLAIN (COSTS OFF) +SELECT t1.a, t1.c, t2.a, t2.c FROM plt1_adv t1 INNER JOIN plt2_adv t2 ON (t1.a = t2.a AND t1.c = t2.c) WHERE t1.b < 10 ORDER BY t1.a; + QUERY PLAN +-------------------------------------------------------------------- + Sort + Sort Key: t1.a + -> Append + -> Hash Join + Hash Cond: ((t2_1.a = t1_1.a) AND (t2_1.c = t1_1.c)) + -> Seq Scan on plt2_adv_p1 t2_1 + -> Hash + -> Seq Scan on plt1_adv_p1 t1_1 + Filter: (b < 10) + -> Hash Join + Hash Cond: ((t2_2.a = t1_2.a) AND (t2_2.c = t1_2.c)) + -> Seq Scan on plt2_adv_p2 t2_2 + -> Hash + -> Seq Scan on plt1_adv_p2 t1_2 + Filter: (b < 10) + -> Hash Join + Hash Cond: ((t2_3.a = t1_3.a) AND (t2_3.c = t1_3.c)) + -> Seq Scan on plt2_adv_p3 t2_3 + -> Hash + -> Seq Scan on plt1_adv_p3 t1_3 + Filter: (b < 10) +(21 rows) + +SELECT t1.a, t1.c, t2.a, t2.c FROM plt1_adv t1 INNER JOIN plt2_adv t2 ON (t1.a = t2.a AND t1.c = t2.c) WHERE t1.b < 10 ORDER BY t1.a; + a | c | a | c +---+------+---+------ + 3 | 0003 | 3 | 0003 + 4 | 0004 | 4 | 0004 + 6 | 0006 | 6 | 0006 + 9 | 0009 | 9 | 0009 +(4 rows) + +-- left join +EXPLAIN (COSTS OFF) +SELECT t1.a, t1.c, t2.a, t2.c FROM plt1_adv t1 LEFT JOIN plt2_adv t2 ON (t1.a = t2.a AND t1.c = t2.c) WHERE t1.b < 10 ORDER BY t1.a; + QUERY PLAN +---------------------------------------------------------------------- + Sort + Sort Key: t1.a + -> Append + -> Hash Right Join + Hash Cond: ((t2_1.a = t1_1.a) AND (t2_1.c = t1_1.c)) + -> Seq Scan on plt2_adv_p1 t2_1 + -> Hash + -> Seq Scan on plt1_adv_p1 t1_1 + Filter: (b < 10) + -> Hash Right Join + Hash Cond: ((t2_2.a = t1_2.a) AND (t2_2.c = t1_2.c)) + -> Seq Scan on plt2_adv_p2 t2_2 + -> Hash + -> Seq Scan on plt1_adv_p2 t1_2 + Filter: (b < 10) + -> Hash Right Join + Hash Cond: ((t2_3.a = t1_3.a) AND (t2_3.c = t1_3.c)) + -> Seq Scan on plt2_adv_p3 t2_3 + -> Hash + -> Seq Scan on plt1_adv_p3 t1_3 + Filter: (b < 10) + -> Nested Loop Left Join + Join Filter: ((t1_4.a = t2_4.a) AND (t1_4.c = t2_4.c)) + -> Seq Scan on plt1_adv_extra t1_4 + Filter: (b < 10) + -> Seq Scan on plt2_adv_extra t2_4 +(26 rows) + +SELECT t1.a, t1.c, t2.a, t2.c FROM plt1_adv t1 LEFT JOIN plt2_adv t2 ON (t1.a = t2.a AND t1.c = t2.c) WHERE t1.b < 10 ORDER BY t1.a; + a | c | a | c +----+------+---+------ + -1 | | | + 1 | 0001 | | + 3 | 0003 | 3 | 0003 + 4 | 0004 | 4 | 0004 + 6 | 0006 | 6 | 0006 + 8 | 0008 | | + 9 | 0009 | 9 | 0009 +(7 rows) + +-- full join +EXPLAIN (COSTS OFF) +SELECT t1.a, t1.c, t2.a, t2.c FROM plt1_adv t1 FULL JOIN plt2_adv t2 ON (t1.a = t2.a AND t1.c = t2.c) WHERE coalesce(t1.b, 0) < 10 AND coalesce(t2.b, 0) < 10 ORDER BY t1.a, t2.a; + QUERY PLAN +----------------------------------------------------------------------------------- + Sort + Sort Key: t1.a, t2.a + -> Append + -> Hash Full Join + Hash Cond: ((t1_1.a = t2_1.a) AND (t1_1.c = t2_1.c)) + Filter: ((COALESCE(t1_1.b, 0) < 10) AND (COALESCE(t2_1.b, 0) < 10)) + -> Seq Scan on plt1_adv_p1 t1_1 + -> Hash + -> Seq Scan on plt2_adv_p1 t2_1 + -> Hash Full Join + Hash Cond: ((t1_2.a = t2_2.a) AND (t1_2.c = t2_2.c)) + Filter: ((COALESCE(t1_2.b, 0) < 10) AND (COALESCE(t2_2.b, 0) < 10)) + -> Seq Scan on plt1_adv_p2 t1_2 + -> Hash + -> Seq Scan on plt2_adv_p2 t2_2 + -> Hash Full Join + Hash Cond: ((t1_3.a = t2_3.a) AND (t1_3.c = t2_3.c)) + Filter: ((COALESCE(t1_3.b, 0) < 10) AND (COALESCE(t2_3.b, 0) < 10)) + -> Seq Scan on plt1_adv_p3 t1_3 + -> Hash + -> Seq Scan on plt2_adv_p3 t2_3 + -> Hash Full Join + Hash Cond: ((t1_4.a = t2_4.a) AND (t1_4.c = t2_4.c)) + Filter: ((COALESCE(t1_4.b, 0) < 10) AND (COALESCE(t2_4.b, 0) < 10)) + -> Seq Scan on plt1_adv_extra t1_4 + -> Hash + -> Seq Scan on plt2_adv_extra t2_4 +(27 rows) + +SELECT t1.a, t1.c, t2.a, t2.c FROM plt1_adv t1 FULL JOIN plt2_adv t2 ON (t1.a = t2.a AND t1.c = t2.c) WHERE coalesce(t1.b, 0) < 10 AND coalesce(t2.b, 0) < 10 ORDER BY t1.a, t2.a; + a | c | a | c +----+------+----+------ + -1 | | | + 1 | 0001 | | + 3 | 0003 | 3 | 0003 + 4 | 0004 | 4 | 0004 + 6 | 0006 | 6 | 0006 + 8 | 0008 | | + 9 | 0009 | 9 | 0009 + | | -1 | + | | 2 | 0002 + | | 7 | 0007 +(10 rows) + +-- 3-way join to test the NULL partition of a join relation +EXPLAIN (COSTS OFF) +SELECT t1.a, t1.c, t2.a, t2.c, t3.a, t3.c FROM plt1_adv t1 LEFT JOIN plt2_adv t2 ON (t1.a = t2.a AND t1.c = t2.c) LEFT JOIN plt1_adv t3 ON (t1.a = t3.a AND t1.c = t3.c) WHERE t1.b < 10 ORDER BY t1.a; + QUERY PLAN +-------------------------------------------------------------------------------- + Sort + Sort Key: t1.a + -> Append + -> Hash Right Join + Hash Cond: ((t3_1.a = t1_1.a) AND (t3_1.c = t1_1.c)) + -> Seq Scan on plt1_adv_p1 t3_1 + -> Hash + -> Hash Right Join + Hash Cond: ((t2_1.a = t1_1.a) AND (t2_1.c = t1_1.c)) + -> Seq Scan on plt2_adv_p1 t2_1 + -> Hash + -> Seq Scan on plt1_adv_p1 t1_1 + Filter: (b < 10) + -> Hash Right Join + Hash Cond: ((t3_2.a = t1_2.a) AND (t3_2.c = t1_2.c)) + -> Seq Scan on plt1_adv_p2 t3_2 + -> Hash + -> Hash Right Join + Hash Cond: ((t2_2.a = t1_2.a) AND (t2_2.c = t1_2.c)) + -> Seq Scan on plt2_adv_p2 t2_2 + -> Hash + -> Seq Scan on plt1_adv_p2 t1_2 + Filter: (b < 10) + -> Hash Right Join + Hash Cond: ((t3_3.a = t1_3.a) AND (t3_3.c = t1_3.c)) + -> Seq Scan on plt1_adv_p3 t3_3 + -> Hash + -> Hash Right Join + Hash Cond: ((t2_3.a = t1_3.a) AND (t2_3.c = t1_3.c)) + -> Seq Scan on plt2_adv_p3 t2_3 + -> Hash + -> Seq Scan on plt1_adv_p3 t1_3 + Filter: (b < 10) + -> Nested Loop Left Join + Join Filter: ((t1_4.a = t3_4.a) AND (t1_4.c = t3_4.c)) + -> Nested Loop Left Join + Join Filter: ((t1_4.a = t2_4.a) AND (t1_4.c = t2_4.c)) + -> Seq Scan on plt1_adv_extra t1_4 + Filter: (b < 10) + -> Seq Scan on plt2_adv_extra t2_4 + -> Seq Scan on plt1_adv_extra t3_4 +(41 rows) + +SELECT t1.a, t1.c, t2.a, t2.c, t3.a, t3.c FROM plt1_adv t1 LEFT JOIN plt2_adv t2 ON (t1.a = t2.a AND t1.c = t2.c) LEFT JOIN plt1_adv t3 ON (t1.a = t3.a AND t1.c = t3.c) WHERE t1.b < 10 ORDER BY t1.a; + a | c | a | c | a | c +----+------+---+------+---+------ + -1 | | | | | + 1 | 0001 | | | 1 | 0001 + 3 | 0003 | 3 | 0003 | 3 | 0003 + 4 | 0004 | 4 | 0004 | 4 | 0004 + 6 | 0006 | 6 | 0006 | 6 | 0006 + 8 | 0008 | | | 8 | 0008 + 9 | 0009 | 9 | 0009 | 9 | 0009 +(7 rows) + +DROP TABLE plt1_adv_extra; +DROP TABLE plt2_adv_extra; +-- Test default partitions +ALTER TABLE plt1_adv DETACH PARTITION plt1_adv_p1; +-- Change plt1_adv_p1 to the default partition +ALTER TABLE plt1_adv ATTACH PARTITION plt1_adv_p1 DEFAULT; +DROP TABLE plt1_adv_p3; +ANALYZE plt1_adv; +DROP TABLE plt2_adv_p3; +ANALYZE plt2_adv; +-- We can do partitioned join even if only one of relations has the default +-- partition +EXPLAIN (COSTS OFF) +SELECT t1.a, t1.c, t2.a, t2.c FROM plt1_adv t1 INNER JOIN plt2_adv t2 ON (t1.a = t2.a AND t1.c = t2.c) WHERE t1.b < 10 ORDER BY t1.a; + QUERY PLAN +-------------------------------------------------------------------- + Sort + Sort Key: t1.a + -> Append + -> Hash Join + Hash Cond: ((t2_1.a = t1_2.a) AND (t2_1.c = t1_2.c)) + -> Seq Scan on plt2_adv_p1 t2_1 + -> Hash + -> Seq Scan on plt1_adv_p1 t1_2 + Filter: (b < 10) + -> Hash Join + Hash Cond: ((t2_2.a = t1_1.a) AND (t2_2.c = t1_1.c)) + -> Seq Scan on plt2_adv_p2 t2_2 + -> Hash + -> Seq Scan on plt1_adv_p2 t1_1 + Filter: (b < 10) +(15 rows) + +SELECT t1.a, t1.c, t2.a, t2.c FROM plt1_adv t1 INNER JOIN plt2_adv t2 ON (t1.a = t2.a AND t1.c = t2.c) WHERE t1.b < 10 ORDER BY t1.a; + a | c | a | c +---+------+---+------ + 3 | 0003 | 3 | 0003 + 4 | 0004 | 4 | 0004 + 6 | 0006 | 6 | 0006 +(3 rows) + +ALTER TABLE plt2_adv DETACH PARTITION plt2_adv_p2; +-- Change plt2_adv_p2 to contain '0005' in addition to '0004' and '0006' as +-- the key values +CREATE TABLE plt2_adv_p2_ext PARTITION OF plt2_adv FOR VALUES IN ('0004', '0005', '0006'); +INSERT INTO plt2_adv SELECT i, i, to_char(i % 10, 'FM0000') FROM generate_series(1, 299) i WHERE i % 10 IN (4, 5, 6); +ANALYZE plt2_adv; +-- Partitioned join can't be applied because the default partition of plt1_adv +-- matches plt2_adv_p1 and plt2_adv_p2_ext +EXPLAIN (COSTS OFF) +SELECT t1.a, t1.c, t2.a, t2.c FROM plt1_adv t1 INNER JOIN plt2_adv t2 ON (t1.a = t2.a AND t1.c = t2.c) WHERE t1.b < 10 ORDER BY t1.a; + QUERY PLAN +------------------------------------------------------ + Sort + Sort Key: t1.a + -> Hash Join + Hash Cond: ((t2.a = t1.a) AND (t2.c = t1.c)) + -> Append + -> Seq Scan on plt2_adv_p1 t2_1 + -> Seq Scan on plt2_adv_p2_ext t2_2 + -> Hash + -> Append + -> Seq Scan on plt1_adv_p2 t1_1 + Filter: (b < 10) + -> Seq Scan on plt1_adv_p1 t1_2 + Filter: (b < 10) +(13 rows) + +ALTER TABLE plt2_adv DETACH PARTITION plt2_adv_p2_ext; +-- Change plt2_adv_p2_ext to the default partition +ALTER TABLE plt2_adv ATTACH PARTITION plt2_adv_p2_ext DEFAULT; +ANALYZE plt2_adv; +-- Partitioned join can't be applied because the default partition of plt1_adv +-- matches plt2_adv_p1 and plt2_adv_p2_ext +EXPLAIN (COSTS OFF) +SELECT t1.a, t1.c, t2.a, t2.c FROM plt1_adv t1 INNER JOIN plt2_adv t2 ON (t1.a = t2.a AND t1.c = t2.c) WHERE t1.b < 10 ORDER BY t1.a; + QUERY PLAN +------------------------------------------------------ + Sort + Sort Key: t1.a + -> Hash Join + Hash Cond: ((t2.a = t1.a) AND (t2.c = t1.c)) + -> Append + -> Seq Scan on plt2_adv_p1 t2_1 + -> Seq Scan on plt2_adv_p2_ext t2_2 + -> Hash + -> Append + -> Seq Scan on plt1_adv_p2 t1_1 + Filter: (b < 10) + -> Seq Scan on plt1_adv_p1 t1_2 + Filter: (b < 10) +(13 rows) + +DROP TABLE plt2_adv_p2_ext; +-- Restore plt2_adv_p2 +ALTER TABLE plt2_adv ATTACH PARTITION plt2_adv_p2 FOR VALUES IN ('0004', '0006'); +ANALYZE plt2_adv; +CREATE TABLE plt3_adv (a int, b int, c text) PARTITION BY LIST (c); +CREATE TABLE plt3_adv_p1 PARTITION OF plt3_adv FOR VALUES IN ('0004', '0006'); +CREATE TABLE plt3_adv_p2 PARTITION OF plt3_adv FOR VALUES IN ('0007', '0009'); +INSERT INTO plt3_adv SELECT i, i, to_char(i % 10, 'FM0000') FROM generate_series(1, 299) i WHERE i % 10 IN (4, 6, 7, 9); +ANALYZE plt3_adv; +-- 3-way join to test the default partition of a join relation +EXPLAIN (COSTS OFF) +SELECT t1.a, t1.c, t2.a, t2.c, t3.a, t3.c FROM plt1_adv t1 LEFT JOIN plt2_adv t2 ON (t1.a = t2.a AND t1.c = t2.c) LEFT JOIN plt3_adv t3 ON (t1.a = t3.a AND t1.c = t3.c) WHERE t1.b < 10 ORDER BY t1.a; + QUERY PLAN +-------------------------------------------------------------------------------- + Sort + Sort Key: t1.a + -> Append + -> Hash Right Join + Hash Cond: ((t3_1.a = t1_1.a) AND (t3_1.c = t1_1.c)) + -> Seq Scan on plt3_adv_p1 t3_1 + -> Hash + -> Hash Right Join + Hash Cond: ((t2_2.a = t1_1.a) AND (t2_2.c = t1_1.c)) + -> Seq Scan on plt2_adv_p2 t2_2 + -> Hash + -> Seq Scan on plt1_adv_p2 t1_1 + Filter: (b < 10) + -> Hash Right Join + Hash Cond: ((t3_2.a = t1_2.a) AND (t3_2.c = t1_2.c)) + -> Seq Scan on plt3_adv_p2 t3_2 + -> Hash + -> Hash Right Join + Hash Cond: ((t2_1.a = t1_2.a) AND (t2_1.c = t1_2.c)) + -> Seq Scan on plt2_adv_p1 t2_1 + -> Hash + -> Seq Scan on plt1_adv_p1 t1_2 + Filter: (b < 10) +(23 rows) + +SELECT t1.a, t1.c, t2.a, t2.c, t3.a, t3.c FROM plt1_adv t1 LEFT JOIN plt2_adv t2 ON (t1.a = t2.a AND t1.c = t2.c) LEFT JOIN plt3_adv t3 ON (t1.a = t3.a AND t1.c = t3.c) WHERE t1.b < 10 ORDER BY t1.a; + a | c | a | c | a | c +---+------+---+------+---+------ + 1 | 0001 | | | | + 3 | 0003 | 3 | 0003 | | + 4 | 0004 | 4 | 0004 | 4 | 0004 + 6 | 0006 | 6 | 0006 | 6 | 0006 +(4 rows) + +-- Test cases where one side has the default partition while the other side +-- has the NULL partition +DROP TABLE plt2_adv_p1; +-- Add the NULL partition to plt2_adv +CREATE TABLE plt2_adv_p1_null PARTITION OF plt2_adv FOR VALUES IN (NULL, '0001', '0003'); +INSERT INTO plt2_adv SELECT i, i, to_char(i % 10, 'FM0000') FROM generate_series(1, 299) i WHERE i % 10 IN (1, 3); +INSERT INTO plt2_adv VALUES (-1, -1, NULL); +ANALYZE plt2_adv; +EXPLAIN (COSTS OFF) +SELECT t1.a, t1.c, t2.a, t2.c FROM plt1_adv t1 INNER JOIN plt2_adv t2 ON (t1.a = t2.a AND t1.c = t2.c) WHERE t1.b < 10 ORDER BY t1.a; + QUERY PLAN +-------------------------------------------------------------------- + Sort + Sort Key: t1.a + -> Append + -> Hash Join + Hash Cond: ((t2_1.a = t1_2.a) AND (t2_1.c = t1_2.c)) + -> Seq Scan on plt2_adv_p1_null t2_1 + -> Hash + -> Seq Scan on plt1_adv_p1 t1_2 + Filter: (b < 10) + -> Hash Join + Hash Cond: ((t2_2.a = t1_1.a) AND (t2_2.c = t1_1.c)) + -> Seq Scan on plt2_adv_p2 t2_2 + -> Hash + -> Seq Scan on plt1_adv_p2 t1_1 + Filter: (b < 10) +(15 rows) + +SELECT t1.a, t1.c, t2.a, t2.c FROM plt1_adv t1 INNER JOIN plt2_adv t2 ON (t1.a = t2.a AND t1.c = t2.c) WHERE t1.b < 10 ORDER BY t1.a; + a | c | a | c +---+------+---+------ + 1 | 0001 | 1 | 0001 + 3 | 0003 | 3 | 0003 + 4 | 0004 | 4 | 0004 + 6 | 0006 | 6 | 0006 +(4 rows) + +DROP TABLE plt2_adv_p1_null; +-- Add the NULL partition that contains only NULL values as the key values +CREATE TABLE plt2_adv_p1_null PARTITION OF plt2_adv FOR VALUES IN (NULL); +INSERT INTO plt2_adv VALUES (-1, -1, NULL); +ANALYZE plt2_adv; +EXPLAIN (COSTS OFF) +SELECT t1.a, t1.c, t2.a, t2.c FROM plt1_adv t1 INNER JOIN plt2_adv t2 ON (t1.a = t2.a AND t1.c = t2.c) WHERE t1.b < 10 ORDER BY t1.a; + QUERY PLAN +------------------------------------------------------ + Sort + Sort Key: t1.a + -> Hash Join + Hash Cond: ((t2.a = t1.a) AND (t2.c = t1.c)) + -> Seq Scan on plt2_adv_p2 t2 + -> Hash + -> Seq Scan on plt1_adv_p2 t1 + Filter: (b < 10) +(8 rows) + +SELECT t1.a, t1.c, t2.a, t2.c FROM plt1_adv t1 INNER JOIN plt2_adv t2 ON (t1.a = t2.a AND t1.c = t2.c) WHERE t1.b < 10 ORDER BY t1.a; + a | c | a | c +---+------+---+------ + 4 | 0004 | 4 | 0004 + 6 | 0006 | 6 | 0006 +(2 rows) + +DROP TABLE plt1_adv; +DROP TABLE plt2_adv; +DROP TABLE plt3_adv; +-- Test interaction of partitioned join with partition pruning +CREATE TABLE plt1_adv (a int, b int, c text) PARTITION BY LIST (c); +CREATE TABLE plt1_adv_p1 PARTITION OF plt1_adv FOR VALUES IN ('0001'); +CREATE TABLE plt1_adv_p2 PARTITION OF plt1_adv FOR VALUES IN ('0002'); +CREATE TABLE plt1_adv_p3 PARTITION OF plt1_adv FOR VALUES IN ('0003'); +CREATE TABLE plt1_adv_p4 PARTITION OF plt1_adv FOR VALUES IN (NULL, '0004', '0005'); +INSERT INTO plt1_adv SELECT i, i, to_char(i % 10, 'FM0000') FROM generate_series(1, 299) i WHERE i % 10 IN (1, 2, 3, 4, 5); +INSERT INTO plt1_adv VALUES (-1, -1, NULL); +ANALYZE plt1_adv; +CREATE TABLE plt2_adv (a int, b int, c text) PARTITION BY LIST (c); +CREATE TABLE plt2_adv_p1 PARTITION OF plt2_adv FOR VALUES IN ('0001', '0002'); +CREATE TABLE plt2_adv_p2 PARTITION OF plt2_adv FOR VALUES IN (NULL); +CREATE TABLE plt2_adv_p3 PARTITION OF plt2_adv FOR VALUES IN ('0003'); +CREATE TABLE plt2_adv_p4 PARTITION OF plt2_adv FOR VALUES IN ('0004', '0005'); +INSERT INTO plt2_adv SELECT i, i, to_char(i % 10, 'FM0000') FROM generate_series(1, 299) i WHERE i % 10 IN (1, 2, 3, 4, 5); +INSERT INTO plt2_adv VALUES (-1, -1, NULL); +ANALYZE plt2_adv; +EXPLAIN (COSTS OFF) +SELECT t1.a, t1.c, t2.a, t2.c FROM plt1_adv t1 INNER JOIN plt2_adv t2 ON (t1.a = t2.a AND t1.c = t2.c) WHERE t1.c IN ('0003', '0004', '0005') AND t1.b < 10 ORDER BY t1.a; + QUERY PLAN +----------------------------------------------------------------------------------------- + Sort + Sort Key: t1.a + -> Append + -> Hash Join + Hash Cond: ((t2_1.a = t1_1.a) AND (t2_1.c = t1_1.c)) + -> Seq Scan on plt2_adv_p3 t2_1 + -> Hash + -> Seq Scan on plt1_adv_p3 t1_1 + Filter: ((b < 10) AND (c = ANY ('{0003,0004,0005}'::text[]))) + -> Hash Join + Hash Cond: ((t2_2.a = t1_2.a) AND (t2_2.c = t1_2.c)) + -> Seq Scan on plt2_adv_p4 t2_2 + -> Hash + -> Seq Scan on plt1_adv_p4 t1_2 + Filter: ((b < 10) AND (c = ANY ('{0003,0004,0005}'::text[]))) +(15 rows) + +SELECT t1.a, t1.c, t2.a, t2.c FROM plt1_adv t1 INNER JOIN plt2_adv t2 ON (t1.a = t2.a AND t1.c = t2.c) WHERE t1.c IN ('0003', '0004', '0005') AND t1.b < 10 ORDER BY t1.a; + a | c | a | c +---+------+---+------ + 3 | 0003 | 3 | 0003 + 4 | 0004 | 4 | 0004 + 5 | 0005 | 5 | 0005 +(3 rows) + +EXPLAIN (COSTS OFF) +SELECT t1.a, t1.c, t2.a, t2.c FROM plt1_adv t1 LEFT JOIN plt2_adv t2 ON (t1.a = t2.a AND t1.c = t2.c) WHERE t1.c IS NULL AND t1.b < 10 ORDER BY t1.a; + QUERY PLAN +-------------------------------------------------------- + Sort + Sort Key: t1.a + -> Hash Right Join + Hash Cond: ((t2.a = t1.a) AND (t2.c = t1.c)) + -> Seq Scan on plt2_adv_p4 t2 + -> Hash + -> Seq Scan on plt1_adv_p4 t1 + Filter: ((c IS NULL) AND (b < 10)) +(8 rows) + +SELECT t1.a, t1.c, t2.a, t2.c FROM plt1_adv t1 LEFT JOIN plt2_adv t2 ON (t1.a = t2.a AND t1.c = t2.c) WHERE t1.c IS NULL AND t1.b < 10 ORDER BY t1.a; + a | c | a | c +----+---+---+--- + -1 | | | +(1 row) + +CREATE TABLE plt1_adv_default PARTITION OF plt1_adv DEFAULT; +ANALYZE plt1_adv; +CREATE TABLE plt2_adv_default PARTITION OF plt2_adv DEFAULT; +ANALYZE plt2_adv; +EXPLAIN (COSTS OFF) +SELECT t1.a, t1.c, t2.a, t2.c FROM plt1_adv t1 INNER JOIN plt2_adv t2 ON (t1.a = t2.a AND t1.c = t2.c) WHERE t1.c IN ('0003', '0004', '0005') AND t1.b < 10 ORDER BY t1.a; + QUERY PLAN +----------------------------------------------------------------------------------------- + Sort + Sort Key: t1.a + -> Append + -> Hash Join + Hash Cond: ((t2_1.a = t1_1.a) AND (t2_1.c = t1_1.c)) + -> Seq Scan on plt2_adv_p3 t2_1 + -> Hash + -> Seq Scan on plt1_adv_p3 t1_1 + Filter: ((b < 10) AND (c = ANY ('{0003,0004,0005}'::text[]))) + -> Hash Join + Hash Cond: ((t2_2.a = t1_2.a) AND (t2_2.c = t1_2.c)) + -> Seq Scan on plt2_adv_p4 t2_2 + -> Hash + -> Seq Scan on plt1_adv_p4 t1_2 + Filter: ((b < 10) AND (c = ANY ('{0003,0004,0005}'::text[]))) +(15 rows) + +SELECT t1.a, t1.c, t2.a, t2.c FROM plt1_adv t1 INNER JOIN plt2_adv t2 ON (t1.a = t2.a AND t1.c = t2.c) WHERE t1.c IN ('0003', '0004', '0005') AND t1.b < 10 ORDER BY t1.a; + a | c | a | c +---+------+---+------ + 3 | 0003 | 3 | 0003 + 4 | 0004 | 4 | 0004 + 5 | 0005 | 5 | 0005 +(3 rows) + +EXPLAIN (COSTS OFF) +SELECT t1.a, t1.c, t2.a, t2.c FROM plt1_adv t1 LEFT JOIN plt2_adv t2 ON (t1.a = t2.a AND t1.c = t2.c) WHERE t1.c IS NULL AND t1.b < 10 ORDER BY t1.a; + QUERY PLAN +-------------------------------------------------------- + Sort + Sort Key: t1.a + -> Hash Right Join + Hash Cond: ((t2.a = t1.a) AND (t2.c = t1.c)) + -> Seq Scan on plt2_adv_p4 t2 + -> Hash + -> Seq Scan on plt1_adv_p4 t1 + Filter: ((c IS NULL) AND (b < 10)) +(8 rows) + +SELECT t1.a, t1.c, t2.a, t2.c FROM plt1_adv t1 LEFT JOIN plt2_adv t2 ON (t1.a = t2.a AND t1.c = t2.c) WHERE t1.c IS NULL AND t1.b < 10 ORDER BY t1.a; + a | c | a | c +----+---+---+--- + -1 | | | +(1 row) + +DROP TABLE plt1_adv; +DROP TABLE plt2_adv; +-- Test the process_outer_partition() code path +CREATE TABLE plt1_adv (a int, b int, c text) PARTITION BY LIST (c); +CREATE TABLE plt1_adv_p1 PARTITION OF plt1_adv FOR VALUES IN ('0000', '0001', '0002'); +CREATE TABLE plt1_adv_p2 PARTITION OF plt1_adv FOR VALUES IN ('0003', '0004'); +INSERT INTO plt1_adv SELECT i, i, to_char(i % 5, 'FM0000') FROM generate_series(0, 24) i; +ANALYZE plt1_adv; +CREATE TABLE plt2_adv (a int, b int, c text) PARTITION BY LIST (c); +CREATE TABLE plt2_adv_p1 PARTITION OF plt2_adv FOR VALUES IN ('0002'); +CREATE TABLE plt2_adv_p2 PARTITION OF plt2_adv FOR VALUES IN ('0003', '0004'); +INSERT INTO plt2_adv SELECT i, i, to_char(i % 5, 'FM0000') FROM generate_series(0, 24) i WHERE i % 5 IN (2, 3, 4); +ANALYZE plt2_adv; +CREATE TABLE plt3_adv (a int, b int, c text) PARTITION BY LIST (c); +CREATE TABLE plt3_adv_p1 PARTITION OF plt3_adv FOR VALUES IN ('0001'); +CREATE TABLE plt3_adv_p2 PARTITION OF plt3_adv FOR VALUES IN ('0003', '0004'); +INSERT INTO plt3_adv SELECT i, i, to_char(i % 5, 'FM0000') FROM generate_series(0, 24) i WHERE i % 5 IN (1, 3, 4); +ANALYZE plt3_adv; +-- This tests that when merging partitions from plt1_adv and plt2_adv in +-- merge_list_bounds(), process_outer_partition() returns an already-assigned +-- merged partition when re-called with plt1_adv_p1 for the second list value +-- '0001' of that partition +EXPLAIN (COSTS OFF) +SELECT t1.a, t1.c, t2.a, t2.c, t3.a, t3.c FROM (plt1_adv t1 LEFT JOIN plt2_adv t2 ON (t1.c = t2.c)) FULL JOIN plt3_adv t3 ON (t1.c = t3.c) WHERE coalesce(t1.a, 0) % 5 != 3 AND coalesce(t1.a, 0) % 5 != 4 ORDER BY t1.c, t1.a, t2.a, t3.a; + QUERY PLAN +----------------------------------------------------------------------------------------------- + Sort + Sort Key: t1.c, t1.a, t2.a, t3.a + -> Append + -> Hash Full Join + Hash Cond: (t1_1.c = t3_1.c) + Filter: (((COALESCE(t1_1.a, 0) % 5) <> 3) AND ((COALESCE(t1_1.a, 0) % 5) <> 4)) + -> Hash Left Join + Hash Cond: (t1_1.c = t2_1.c) + -> Seq Scan on plt1_adv_p1 t1_1 + -> Hash + -> Seq Scan on plt2_adv_p1 t2_1 + -> Hash + -> Seq Scan on plt3_adv_p1 t3_1 + -> Hash Full Join + Hash Cond: (t1_2.c = t3_2.c) + Filter: (((COALESCE(t1_2.a, 0) % 5) <> 3) AND ((COALESCE(t1_2.a, 0) % 5) <> 4)) + -> Hash Left Join + Hash Cond: (t1_2.c = t2_2.c) + -> Seq Scan on plt1_adv_p2 t1_2 + -> Hash + -> Seq Scan on plt2_adv_p2 t2_2 + -> Hash + -> Seq Scan on plt3_adv_p2 t3_2 +(23 rows) + +SELECT t1.a, t1.c, t2.a, t2.c, t3.a, t3.c FROM (plt1_adv t1 LEFT JOIN plt2_adv t2 ON (t1.c = t2.c)) FULL JOIN plt3_adv t3 ON (t1.c = t3.c) WHERE coalesce(t1.a, 0) % 5 != 3 AND coalesce(t1.a, 0) % 5 != 4 ORDER BY t1.c, t1.a, t2.a, t3.a; + a | c | a | c | a | c +----+------+----+------+----+------ + 0 | 0000 | | | | + 5 | 0000 | | | | + 10 | 0000 | | | | + 15 | 0000 | | | | + 20 | 0000 | | | | + 1 | 0001 | | | 1 | 0001 + 1 | 0001 | | | 6 | 0001 + 1 | 0001 | | | 11 | 0001 + 1 | 0001 | | | 16 | 0001 + 1 | 0001 | | | 21 | 0001 + 6 | 0001 | | | 1 | 0001 + 6 | 0001 | | | 6 | 0001 + 6 | 0001 | | | 11 | 0001 + 6 | 0001 | | | 16 | 0001 + 6 | 0001 | | | 21 | 0001 + 11 | 0001 | | | 1 | 0001 + 11 | 0001 | | | 6 | 0001 + 11 | 0001 | | | 11 | 0001 + 11 | 0001 | | | 16 | 0001 + 11 | 0001 | | | 21 | 0001 + 16 | 0001 | | | 1 | 0001 + 16 | 0001 | | | 6 | 0001 + 16 | 0001 | | | 11 | 0001 + 16 | 0001 | | | 16 | 0001 + 16 | 0001 | | | 21 | 0001 + 21 | 0001 | | | 1 | 0001 + 21 | 0001 | | | 6 | 0001 + 21 | 0001 | | | 11 | 0001 + 21 | 0001 | | | 16 | 0001 + 21 | 0001 | | | 21 | 0001 + 2 | 0002 | 2 | 0002 | | + 2 | 0002 | 7 | 0002 | | + 2 | 0002 | 12 | 0002 | | + 2 | 0002 | 17 | 0002 | | + 2 | 0002 | 22 | 0002 | | + 7 | 0002 | 2 | 0002 | | + 7 | 0002 | 7 | 0002 | | + 7 | 0002 | 12 | 0002 | | + 7 | 0002 | 17 | 0002 | | + 7 | 0002 | 22 | 0002 | | + 12 | 0002 | 2 | 0002 | | + 12 | 0002 | 7 | 0002 | | + 12 | 0002 | 12 | 0002 | | + 12 | 0002 | 17 | 0002 | | + 12 | 0002 | 22 | 0002 | | + 17 | 0002 | 2 | 0002 | | + 17 | 0002 | 7 | 0002 | | + 17 | 0002 | 12 | 0002 | | + 17 | 0002 | 17 | 0002 | | + 17 | 0002 | 22 | 0002 | | + 22 | 0002 | 2 | 0002 | | + 22 | 0002 | 7 | 0002 | | + 22 | 0002 | 12 | 0002 | | + 22 | 0002 | 17 | 0002 | | + 22 | 0002 | 22 | 0002 | | +(55 rows) + +DROP TABLE plt1_adv; +DROP TABLE plt2_adv; +DROP TABLE plt3_adv; +-- Tests for multi-level partitioned tables +CREATE TABLE alpha (a double precision, b int, c text) PARTITION BY RANGE (a); +CREATE TABLE alpha_neg PARTITION OF alpha FOR VALUES FROM ('-Infinity') TO (0) PARTITION BY RANGE (b); +CREATE TABLE alpha_pos PARTITION OF alpha FOR VALUES FROM (0) TO (10.0) PARTITION BY LIST (c); +CREATE TABLE alpha_neg_p1 PARTITION OF alpha_neg FOR VALUES FROM (100) TO (200); +CREATE TABLE alpha_neg_p2 PARTITION OF alpha_neg FOR VALUES FROM (200) TO (300); +CREATE TABLE alpha_neg_p3 PARTITION OF alpha_neg FOR VALUES FROM (300) TO (400); +CREATE TABLE alpha_pos_p1 PARTITION OF alpha_pos FOR VALUES IN ('0001', '0003'); +CREATE TABLE alpha_pos_p2 PARTITION OF alpha_pos FOR VALUES IN ('0004', '0006'); +CREATE TABLE alpha_pos_p3 PARTITION OF alpha_pos FOR VALUES IN ('0008', '0009'); +INSERT INTO alpha_neg SELECT -1.0, i, to_char(i % 10, 'FM0000') FROM generate_series(100, 399) i WHERE i % 10 IN (1, 3, 4, 6, 8, 9); +INSERT INTO alpha_pos SELECT 1.0, i, to_char(i % 10, 'FM0000') FROM generate_series(100, 399) i WHERE i % 10 IN (1, 3, 4, 6, 8, 9); +ANALYZE alpha; +CREATE TABLE beta (a double precision, b int, c text) PARTITION BY RANGE (a); +CREATE TABLE beta_neg PARTITION OF beta FOR VALUES FROM (-10.0) TO (0) PARTITION BY RANGE (b); +CREATE TABLE beta_pos PARTITION OF beta FOR VALUES FROM (0) TO ('Infinity') PARTITION BY LIST (c); +CREATE TABLE beta_neg_p1 PARTITION OF beta_neg FOR VALUES FROM (100) TO (150); +CREATE TABLE beta_neg_p2 PARTITION OF beta_neg FOR VALUES FROM (200) TO (300); +CREATE TABLE beta_neg_p3 PARTITION OF beta_neg FOR VALUES FROM (350) TO (500); +CREATE TABLE beta_pos_p1 PARTITION OF beta_pos FOR VALUES IN ('0002', '0003'); +CREATE TABLE beta_pos_p2 PARTITION OF beta_pos FOR VALUES IN ('0004', '0006'); +CREATE TABLE beta_pos_p3 PARTITION OF beta_pos FOR VALUES IN ('0007', '0009'); +INSERT INTO beta_neg SELECT -1.0, i, to_char(i % 10, 'FM0000') FROM generate_series(100, 149) i WHERE i % 10 IN (2, 3, 4, 6, 7, 9); +INSERT INTO beta_neg SELECT -1.0, i, to_char(i % 10, 'FM0000') FROM generate_series(200, 299) i WHERE i % 10 IN (2, 3, 4, 6, 7, 9); +INSERT INTO beta_neg SELECT -1.0, i, to_char(i % 10, 'FM0000') FROM generate_series(350, 499) i WHERE i % 10 IN (2, 3, 4, 6, 7, 9); +INSERT INTO beta_pos SELECT 1.0, i, to_char(i % 10, 'FM0000') FROM generate_series(100, 149) i WHERE i % 10 IN (2, 3, 4, 6, 7, 9); +INSERT INTO beta_pos SELECT 1.0, i, to_char(i % 10, 'FM0000') FROM generate_series(200, 299) i WHERE i % 10 IN (2, 3, 4, 6, 7, 9); +INSERT INTO beta_pos SELECT 1.0, i, to_char(i % 10, 'FM0000') FROM generate_series(350, 499) i WHERE i % 10 IN (2, 3, 4, 6, 7, 9); +ANALYZE beta; +EXPLAIN (COSTS OFF) +SELECT t1.*, t2.* FROM alpha t1 INNER JOIN beta t2 ON (t1.a = t2.a AND t1.b = t2.b) WHERE t1.b >= 125 AND t1.b < 225 ORDER BY t1.a, t1.b; + QUERY PLAN +-------------------------------------------------------------------- + Sort + Sort Key: t1.a, t1.b + -> Append + -> Hash Join + Hash Cond: ((t1_1.a = t2_1.a) AND (t1_1.b = t2_1.b)) + -> Seq Scan on alpha_neg_p1 t1_1 + Filter: ((b >= 125) AND (b < 225)) + -> Hash + -> Seq Scan on beta_neg_p1 t2_1 + -> Hash Join + Hash Cond: ((t2_2.a = t1_2.a) AND (t2_2.b = t1_2.b)) + -> Seq Scan on beta_neg_p2 t2_2 + -> Hash + -> Seq Scan on alpha_neg_p2 t1_2 + Filter: ((b >= 125) AND (b < 225)) + -> Hash Join + Hash Cond: ((t2_4.a = t1_4.a) AND (t2_4.b = t1_4.b)) + -> Append + -> Seq Scan on beta_pos_p1 t2_4 + -> Seq Scan on beta_pos_p2 t2_5 + -> Seq Scan on beta_pos_p3 t2_6 + -> Hash + -> Append + -> Seq Scan on alpha_pos_p1 t1_4 + Filter: ((b >= 125) AND (b < 225)) + -> Seq Scan on alpha_pos_p2 t1_5 + Filter: ((b >= 125) AND (b < 225)) + -> Seq Scan on alpha_pos_p3 t1_6 + Filter: ((b >= 125) AND (b < 225)) +(29 rows) + +SELECT t1.*, t2.* FROM alpha t1 INNER JOIN beta t2 ON (t1.a = t2.a AND t1.b = t2.b) WHERE t1.b >= 125 AND t1.b < 225 ORDER BY t1.a, t1.b; + a | b | c | a | b | c +----+-----+------+----+-----+------ + -1 | 126 | 0006 | -1 | 126 | 0006 + -1 | 129 | 0009 | -1 | 129 | 0009 + -1 | 133 | 0003 | -1 | 133 | 0003 + -1 | 134 | 0004 | -1 | 134 | 0004 + -1 | 136 | 0006 | -1 | 136 | 0006 + -1 | 139 | 0009 | -1 | 139 | 0009 + -1 | 143 | 0003 | -1 | 143 | 0003 + -1 | 144 | 0004 | -1 | 144 | 0004 + -1 | 146 | 0006 | -1 | 146 | 0006 + -1 | 149 | 0009 | -1 | 149 | 0009 + -1 | 203 | 0003 | -1 | 203 | 0003 + -1 | 204 | 0004 | -1 | 204 | 0004 + -1 | 206 | 0006 | -1 | 206 | 0006 + -1 | 209 | 0009 | -1 | 209 | 0009 + -1 | 213 | 0003 | -1 | 213 | 0003 + -1 | 214 | 0004 | -1 | 214 | 0004 + -1 | 216 | 0006 | -1 | 216 | 0006 + -1 | 219 | 0009 | -1 | 219 | 0009 + -1 | 223 | 0003 | -1 | 223 | 0003 + -1 | 224 | 0004 | -1 | 224 | 0004 + 1 | 126 | 0006 | 1 | 126 | 0006 + 1 | 129 | 0009 | 1 | 129 | 0009 + 1 | 133 | 0003 | 1 | 133 | 0003 + 1 | 134 | 0004 | 1 | 134 | 0004 + 1 | 136 | 0006 | 1 | 136 | 0006 + 1 | 139 | 0009 | 1 | 139 | 0009 + 1 | 143 | 0003 | 1 | 143 | 0003 + 1 | 144 | 0004 | 1 | 144 | 0004 + 1 | 146 | 0006 | 1 | 146 | 0006 + 1 | 149 | 0009 | 1 | 149 | 0009 + 1 | 203 | 0003 | 1 | 203 | 0003 + 1 | 204 | 0004 | 1 | 204 | 0004 + 1 | 206 | 0006 | 1 | 206 | 0006 + 1 | 209 | 0009 | 1 | 209 | 0009 + 1 | 213 | 0003 | 1 | 213 | 0003 + 1 | 214 | 0004 | 1 | 214 | 0004 + 1 | 216 | 0006 | 1 | 216 | 0006 + 1 | 219 | 0009 | 1 | 219 | 0009 + 1 | 223 | 0003 | 1 | 223 | 0003 + 1 | 224 | 0004 | 1 | 224 | 0004 +(40 rows) + +EXPLAIN (COSTS OFF) +SELECT t1.*, t2.* FROM alpha t1 INNER JOIN beta t2 ON (t1.a = t2.a AND t1.c = t2.c) WHERE ((t1.b >= 100 AND t1.b < 110) OR (t1.b >= 200 AND t1.b < 210)) AND ((t2.b >= 100 AND t2.b < 110) OR (t2.b >= 200 AND t2.b < 210)) AND t1.c IN ('0004', '0009') ORDER BY t1.a, t1.b, t2.b; + QUERY PLAN +-------------------------------------------------------------------------------------------------------------------------------------- + Sort + Sort Key: t1.a, t1.b, t2.b + -> Append + -> Hash Join + Hash Cond: ((t1_2.a = t2_2.a) AND (t1_2.c = t2_2.c)) + -> Append + -> Seq Scan on alpha_neg_p1 t1_2 + Filter: ((c = ANY ('{0004,0009}'::text[])) AND (((b >= 100) AND (b < 110)) OR ((b >= 200) AND (b < 210)))) + -> Seq Scan on alpha_neg_p2 t1_3 + Filter: ((c = ANY ('{0004,0009}'::text[])) AND (((b >= 100) AND (b < 110)) OR ((b >= 200) AND (b < 210)))) + -> Hash + -> Append + -> Seq Scan on beta_neg_p1 t2_2 + Filter: (((b >= 100) AND (b < 110)) OR ((b >= 200) AND (b < 210))) + -> Seq Scan on beta_neg_p2 t2_3 + Filter: (((b >= 100) AND (b < 110)) OR ((b >= 200) AND (b < 210))) + -> Nested Loop + Join Filter: ((t1_4.a = t2_4.a) AND (t1_4.c = t2_4.c)) + -> Seq Scan on alpha_pos_p2 t1_4 + Filter: ((c = ANY ('{0004,0009}'::text[])) AND (((b >= 100) AND (b < 110)) OR ((b >= 200) AND (b < 210)))) + -> Seq Scan on beta_pos_p2 t2_4 + Filter: (((b >= 100) AND (b < 110)) OR ((b >= 200) AND (b < 210))) + -> Nested Loop + Join Filter: ((t1_5.a = t2_5.a) AND (t1_5.c = t2_5.c)) + -> Seq Scan on alpha_pos_p3 t1_5 + Filter: ((c = ANY ('{0004,0009}'::text[])) AND (((b >= 100) AND (b < 110)) OR ((b >= 200) AND (b < 210)))) + -> Seq Scan on beta_pos_p3 t2_5 + Filter: (((b >= 100) AND (b < 110)) OR ((b >= 200) AND (b < 210))) +(28 rows) + +SELECT t1.*, t2.* FROM alpha t1 INNER JOIN beta t2 ON (t1.a = t2.a AND t1.c = t2.c) WHERE ((t1.b >= 100 AND t1.b < 110) OR (t1.b >= 200 AND t1.b < 210)) AND ((t2.b >= 100 AND t2.b < 110) OR (t2.b >= 200 AND t2.b < 210)) AND t1.c IN ('0004', '0009') ORDER BY t1.a, t1.b, t2.b; + a | b | c | a | b | c +----+-----+------+----+-----+------ + -1 | 104 | 0004 | -1 | 104 | 0004 + -1 | 104 | 0004 | -1 | 204 | 0004 + -1 | 109 | 0009 | -1 | 109 | 0009 + -1 | 109 | 0009 | -1 | 209 | 0009 + -1 | 204 | 0004 | -1 | 104 | 0004 + -1 | 204 | 0004 | -1 | 204 | 0004 + -1 | 209 | 0009 | -1 | 109 | 0009 + -1 | 209 | 0009 | -1 | 209 | 0009 + 1 | 104 | 0004 | 1 | 104 | 0004 + 1 | 104 | 0004 | 1 | 204 | 0004 + 1 | 109 | 0009 | 1 | 109 | 0009 + 1 | 109 | 0009 | 1 | 209 | 0009 + 1 | 204 | 0004 | 1 | 104 | 0004 + 1 | 204 | 0004 | 1 | 204 | 0004 + 1 | 209 | 0009 | 1 | 109 | 0009 + 1 | 209 | 0009 | 1 | 209 | 0009 +(16 rows) + +EXPLAIN (COSTS OFF) +SELECT t1.*, t2.* FROM alpha t1 INNER JOIN beta t2 ON (t1.a = t2.a AND t1.b = t2.b AND t1.c = t2.c) WHERE ((t1.b >= 100 AND t1.b < 110) OR (t1.b >= 200 AND t1.b < 210)) AND ((t2.b >= 100 AND t2.b < 110) OR (t2.b >= 200 AND t2.b < 210)) AND t1.c IN ('0004', '0009') ORDER BY t1.a, t1.b; + QUERY PLAN +-------------------------------------------------------------------------------------------------------------------------------- + Sort + Sort Key: t1.a, t1.b + -> Append + -> Hash Join + Hash Cond: ((t1_1.a = t2_1.a) AND (t1_1.b = t2_1.b) AND (t1_1.c = t2_1.c)) + -> Seq Scan on alpha_neg_p1 t1_1 + Filter: ((c = ANY ('{0004,0009}'::text[])) AND (((b >= 100) AND (b < 110)) OR ((b >= 200) AND (b < 210)))) + -> Hash + -> Seq Scan on beta_neg_p1 t2_1 + Filter: (((b >= 100) AND (b < 110)) OR ((b >= 200) AND (b < 210))) + -> Hash Join + Hash Cond: ((t1_2.a = t2_2.a) AND (t1_2.b = t2_2.b) AND (t1_2.c = t2_2.c)) + -> Seq Scan on alpha_neg_p2 t1_2 + Filter: ((c = ANY ('{0004,0009}'::text[])) AND (((b >= 100) AND (b < 110)) OR ((b >= 200) AND (b < 210)))) + -> Hash + -> Seq Scan on beta_neg_p2 t2_2 + Filter: (((b >= 100) AND (b < 110)) OR ((b >= 200) AND (b < 210))) + -> Nested Loop + Join Filter: ((t1_3.a = t2_3.a) AND (t1_3.b = t2_3.b) AND (t1_3.c = t2_3.c)) + -> Seq Scan on alpha_pos_p2 t1_3 + Filter: ((c = ANY ('{0004,0009}'::text[])) AND (((b >= 100) AND (b < 110)) OR ((b >= 200) AND (b < 210)))) + -> Seq Scan on beta_pos_p2 t2_3 + Filter: (((b >= 100) AND (b < 110)) OR ((b >= 200) AND (b < 210))) + -> Nested Loop + Join Filter: ((t1_4.a = t2_4.a) AND (t1_4.b = t2_4.b) AND (t1_4.c = t2_4.c)) + -> Seq Scan on alpha_pos_p3 t1_4 + Filter: ((c = ANY ('{0004,0009}'::text[])) AND (((b >= 100) AND (b < 110)) OR ((b >= 200) AND (b < 210)))) + -> Seq Scan on beta_pos_p3 t2_4 + Filter: (((b >= 100) AND (b < 110)) OR ((b >= 200) AND (b < 210))) +(29 rows) + +SELECT t1.*, t2.* FROM alpha t1 INNER JOIN beta t2 ON (t1.a = t2.a AND t1.b = t2.b AND t1.c = t2.c) WHERE ((t1.b >= 100 AND t1.b < 110) OR (t1.b >= 200 AND t1.b < 210)) AND ((t2.b >= 100 AND t2.b < 110) OR (t2.b >= 200 AND t2.b < 210)) AND t1.c IN ('0004', '0009') ORDER BY t1.a, t1.b; + a | b | c | a | b | c +----+-----+------+----+-----+------ + -1 | 104 | 0004 | -1 | 104 | 0004 + -1 | 109 | 0009 | -1 | 109 | 0009 + -1 | 204 | 0004 | -1 | 204 | 0004 + -1 | 209 | 0009 | -1 | 209 | 0009 + 1 | 104 | 0004 | 1 | 104 | 0004 + 1 | 109 | 0009 | 1 | 109 | 0009 + 1 | 204 | 0004 | 1 | 204 | 0004 + 1 | 209 | 0009 | 1 | 209 | 0009 +(8 rows) + +-- partitionwise join with fractional paths +CREATE TABLE fract_t (id BIGINT, PRIMARY KEY (id)) PARTITION BY RANGE (id); +CREATE TABLE fract_t0 PARTITION OF fract_t FOR VALUES FROM ('0') TO ('1000'); +CREATE TABLE fract_t1 PARTITION OF fract_t FOR VALUES FROM ('1000') TO ('2000'); +-- insert data +INSERT INTO fract_t (id) (SELECT generate_series(0, 1999)); +ANALYZE fract_t; +-- verify plan; nested index only scans +SET max_parallel_workers_per_gather = 0; +SET enable_partitionwise_join = on; +EXPLAIN (COSTS OFF) +SELECT x.id, y.id FROM fract_t x LEFT JOIN fract_t y USING (id) ORDER BY x.id ASC LIMIT 10; + QUERY PLAN +----------------------------------------------------------------------- + Limit + -> Merge Append + Sort Key: x.id + -> Merge Left Join + Merge Cond: (x_1.id = y_1.id) + -> Index Only Scan using fract_t0_pkey on fract_t0 x_1 + -> Index Only Scan using fract_t0_pkey on fract_t0 y_1 + -> Merge Left Join + Merge Cond: (x_2.id = y_2.id) + -> Index Only Scan using fract_t1_pkey on fract_t1 x_2 + -> Index Only Scan using fract_t1_pkey on fract_t1 y_2 +(11 rows) + +EXPLAIN (COSTS OFF) +SELECT x.id, y.id FROM fract_t x LEFT JOIN fract_t y USING (id) ORDER BY x.id DESC LIMIT 10; + QUERY PLAN +-------------------------------------------------------------------------------- + Limit + -> Merge Append + Sort Key: x.id DESC + -> Nested Loop Left Join + -> Index Only Scan Backward using fract_t0_pkey on fract_t0 x_1 + -> Index Only Scan using fract_t0_pkey on fract_t0 y_1 + Index Cond: (id = x_1.id) + -> Nested Loop Left Join + -> Index Only Scan Backward using fract_t1_pkey on fract_t1 x_2 + -> Index Only Scan using fract_t1_pkey on fract_t1 y_2 + Index Cond: (id = x_2.id) +(11 rows) + +-- cleanup +DROP TABLE fract_t; +RESET max_parallel_workers_per_gather; +RESET enable_partitionwise_join; diff --git a/src/test/regress/expected/partition_prune.out b/src/test/regress/expected/partition_prune.out new file mode 100644 index 0000000..a820385 --- /dev/null +++ b/src/test/regress/expected/partition_prune.out @@ -0,0 +1,4287 @@ +-- +-- Test partitioning planner code +-- +-- Force generic plans to be used for all prepared statements in this file. +set plan_cache_mode = force_generic_plan; +create table lp (a char) partition by list (a); +create table lp_default partition of lp default; +create table lp_ef partition of lp for values in ('e', 'f'); +create table lp_ad partition of lp for values in ('a', 'd'); +create table lp_bc partition of lp for values in ('b', 'c'); +create table lp_g partition of lp for values in ('g'); +create table lp_null partition of lp for values in (null); +explain (costs off) select * from lp; + QUERY PLAN +----------------------------------- + Append + -> Seq Scan on lp_ad lp_1 + -> Seq Scan on lp_bc lp_2 + -> Seq Scan on lp_ef lp_3 + -> Seq Scan on lp_g lp_4 + -> Seq Scan on lp_null lp_5 + -> Seq Scan on lp_default lp_6 +(7 rows) + +explain (costs off) select * from lp where a > 'a' and a < 'd'; + QUERY PLAN +----------------------------------------------------------- + Append + -> Seq Scan on lp_bc lp_1 + Filter: ((a > 'a'::bpchar) AND (a < 'd'::bpchar)) + -> Seq Scan on lp_default lp_2 + Filter: ((a > 'a'::bpchar) AND (a < 'd'::bpchar)) +(5 rows) + +explain (costs off) select * from lp where a > 'a' and a <= 'd'; + QUERY PLAN +------------------------------------------------------------ + Append + -> Seq Scan on lp_ad lp_1 + Filter: ((a > 'a'::bpchar) AND (a <= 'd'::bpchar)) + -> Seq Scan on lp_bc lp_2 + Filter: ((a > 'a'::bpchar) AND (a <= 'd'::bpchar)) + -> Seq Scan on lp_default lp_3 + Filter: ((a > 'a'::bpchar) AND (a <= 'd'::bpchar)) +(7 rows) + +explain (costs off) select * from lp where a = 'a'; + QUERY PLAN +----------------------------- + Seq Scan on lp_ad lp + Filter: (a = 'a'::bpchar) +(2 rows) + +explain (costs off) select * from lp where 'a' = a; /* commuted */ + QUERY PLAN +----------------------------- + Seq Scan on lp_ad lp + Filter: ('a'::bpchar = a) +(2 rows) + +explain (costs off) select * from lp where a is not null; + QUERY PLAN +----------------------------------- + Append + -> Seq Scan on lp_ad lp_1 + Filter: (a IS NOT NULL) + -> Seq Scan on lp_bc lp_2 + Filter: (a IS NOT NULL) + -> Seq Scan on lp_ef lp_3 + Filter: (a IS NOT NULL) + -> Seq Scan on lp_g lp_4 + Filter: (a IS NOT NULL) + -> Seq Scan on lp_default lp_5 + Filter: (a IS NOT NULL) +(11 rows) + +explain (costs off) select * from lp where a is null; + QUERY PLAN +------------------------ + Seq Scan on lp_null lp + Filter: (a IS NULL) +(2 rows) + +explain (costs off) select * from lp where a = 'a' or a = 'c'; + QUERY PLAN +---------------------------------------------------------- + Append + -> Seq Scan on lp_ad lp_1 + Filter: ((a = 'a'::bpchar) OR (a = 'c'::bpchar)) + -> Seq Scan on lp_bc lp_2 + Filter: ((a = 'a'::bpchar) OR (a = 'c'::bpchar)) +(5 rows) + +explain (costs off) select * from lp where a is not null and (a = 'a' or a = 'c'); + QUERY PLAN +-------------------------------------------------------------------------------- + Append + -> Seq Scan on lp_ad lp_1 + Filter: ((a IS NOT NULL) AND ((a = 'a'::bpchar) OR (a = 'c'::bpchar))) + -> Seq Scan on lp_bc lp_2 + Filter: ((a IS NOT NULL) AND ((a = 'a'::bpchar) OR (a = 'c'::bpchar))) +(5 rows) + +explain (costs off) select * from lp where a <> 'g'; + QUERY PLAN +------------------------------------ + Append + -> Seq Scan on lp_ad lp_1 + Filter: (a <> 'g'::bpchar) + -> Seq Scan on lp_bc lp_2 + Filter: (a <> 'g'::bpchar) + -> Seq Scan on lp_ef lp_3 + Filter: (a <> 'g'::bpchar) + -> Seq Scan on lp_default lp_4 + Filter: (a <> 'g'::bpchar) +(9 rows) + +explain (costs off) select * from lp where a <> 'a' and a <> 'd'; + QUERY PLAN +------------------------------------------------------------- + Append + -> Seq Scan on lp_bc lp_1 + Filter: ((a <> 'a'::bpchar) AND (a <> 'd'::bpchar)) + -> Seq Scan on lp_ef lp_2 + Filter: ((a <> 'a'::bpchar) AND (a <> 'd'::bpchar)) + -> Seq Scan on lp_g lp_3 + Filter: ((a <> 'a'::bpchar) AND (a <> 'd'::bpchar)) + -> Seq Scan on lp_default lp_4 + Filter: ((a <> 'a'::bpchar) AND (a <> 'd'::bpchar)) +(9 rows) + +explain (costs off) select * from lp where a not in ('a', 'd'); + QUERY PLAN +------------------------------------------------ + Append + -> Seq Scan on lp_bc lp_1 + Filter: (a <> ALL ('{a,d}'::bpchar[])) + -> Seq Scan on lp_ef lp_2 + Filter: (a <> ALL ('{a,d}'::bpchar[])) + -> Seq Scan on lp_g lp_3 + Filter: (a <> ALL ('{a,d}'::bpchar[])) + -> Seq Scan on lp_default lp_4 + Filter: (a <> ALL ('{a,d}'::bpchar[])) +(9 rows) + +-- collation matches the partitioning collation, pruning works +create table coll_pruning (a text collate "C") partition by list (a); +create table coll_pruning_a partition of coll_pruning for values in ('a'); +create table coll_pruning_b partition of coll_pruning for values in ('b'); +create table coll_pruning_def partition of coll_pruning default; +explain (costs off) select * from coll_pruning where a collate "C" = 'a' collate "C"; + QUERY PLAN +----------------------------------------- + Seq Scan on coll_pruning_a coll_pruning + Filter: (a = 'a'::text COLLATE "C") +(2 rows) + +-- collation doesn't match the partitioning collation, no pruning occurs +explain (costs off) select * from coll_pruning where a collate "POSIX" = 'a' collate "POSIX"; + QUERY PLAN +--------------------------------------------------------- + Append + -> Seq Scan on coll_pruning_a coll_pruning_1 + Filter: ((a)::text = 'a'::text COLLATE "POSIX") + -> Seq Scan on coll_pruning_b coll_pruning_2 + Filter: ((a)::text = 'a'::text COLLATE "POSIX") + -> Seq Scan on coll_pruning_def coll_pruning_3 + Filter: ((a)::text = 'a'::text COLLATE "POSIX") +(7 rows) + +create table rlp (a int, b varchar) partition by range (a); +create table rlp_default partition of rlp default partition by list (a); +create table rlp_default_default partition of rlp_default default; +create table rlp_default_10 partition of rlp_default for values in (10); +create table rlp_default_30 partition of rlp_default for values in (30); +create table rlp_default_null partition of rlp_default for values in (null); +create table rlp1 partition of rlp for values from (minvalue) to (1); +create table rlp2 partition of rlp for values from (1) to (10); +create table rlp3 (b varchar, a int) partition by list (b varchar_ops); +create table rlp3_default partition of rlp3 default; +create table rlp3abcd partition of rlp3 for values in ('ab', 'cd'); +create table rlp3efgh partition of rlp3 for values in ('ef', 'gh'); +create table rlp3nullxy partition of rlp3 for values in (null, 'xy'); +alter table rlp attach partition rlp3 for values from (15) to (20); +create table rlp4 partition of rlp for values from (20) to (30) partition by range (a); +create table rlp4_default partition of rlp4 default; +create table rlp4_1 partition of rlp4 for values from (20) to (25); +create table rlp4_2 partition of rlp4 for values from (25) to (29); +create table rlp5 partition of rlp for values from (31) to (maxvalue) partition by range (a); +create table rlp5_default partition of rlp5 default; +create table rlp5_1 partition of rlp5 for values from (31) to (40); +explain (costs off) select * from rlp where a < 1; + QUERY PLAN +---------------------- + Seq Scan on rlp1 rlp + Filter: (a < 1) +(2 rows) + +explain (costs off) select * from rlp where 1 > a; /* commuted */ + QUERY PLAN +---------------------- + Seq Scan on rlp1 rlp + Filter: (1 > a) +(2 rows) + +explain (costs off) select * from rlp where a <= 1; + QUERY PLAN +------------------------------ + Append + -> Seq Scan on rlp1 rlp_1 + Filter: (a <= 1) + -> Seq Scan on rlp2 rlp_2 + Filter: (a <= 1) +(5 rows) + +explain (costs off) select * from rlp where a = 1; + QUERY PLAN +---------------------- + Seq Scan on rlp2 rlp + Filter: (a = 1) +(2 rows) + +explain (costs off) select * from rlp where a = 1::bigint; /* same as above */ + QUERY PLAN +----------------------------- + Seq Scan on rlp2 rlp + Filter: (a = '1'::bigint) +(2 rows) + +explain (costs off) select * from rlp where a = 1::numeric; /* no pruning */ + QUERY PLAN +----------------------------------------------- + Append + -> Seq Scan on rlp1 rlp_1 + Filter: ((a)::numeric = '1'::numeric) + -> Seq Scan on rlp2 rlp_2 + Filter: ((a)::numeric = '1'::numeric) + -> Seq Scan on rlp3abcd rlp_3 + Filter: ((a)::numeric = '1'::numeric) + -> Seq Scan on rlp3efgh rlp_4 + Filter: ((a)::numeric = '1'::numeric) + -> Seq Scan on rlp3nullxy rlp_5 + Filter: ((a)::numeric = '1'::numeric) + -> Seq Scan on rlp3_default rlp_6 + Filter: ((a)::numeric = '1'::numeric) + -> Seq Scan on rlp4_1 rlp_7 + Filter: ((a)::numeric = '1'::numeric) + -> Seq Scan on rlp4_2 rlp_8 + Filter: ((a)::numeric = '1'::numeric) + -> Seq Scan on rlp4_default rlp_9 + Filter: ((a)::numeric = '1'::numeric) + -> Seq Scan on rlp5_1 rlp_10 + Filter: ((a)::numeric = '1'::numeric) + -> Seq Scan on rlp5_default rlp_11 + Filter: ((a)::numeric = '1'::numeric) + -> Seq Scan on rlp_default_10 rlp_12 + Filter: ((a)::numeric = '1'::numeric) + -> Seq Scan on rlp_default_30 rlp_13 + Filter: ((a)::numeric = '1'::numeric) + -> Seq Scan on rlp_default_null rlp_14 + Filter: ((a)::numeric = '1'::numeric) + -> Seq Scan on rlp_default_default rlp_15 + Filter: ((a)::numeric = '1'::numeric) +(31 rows) + +explain (costs off) select * from rlp where a <= 10; + QUERY PLAN +--------------------------------------------- + Append + -> Seq Scan on rlp1 rlp_1 + Filter: (a <= 10) + -> Seq Scan on rlp2 rlp_2 + Filter: (a <= 10) + -> Seq Scan on rlp_default_10 rlp_3 + Filter: (a <= 10) + -> Seq Scan on rlp_default_default rlp_4 + Filter: (a <= 10) +(9 rows) + +explain (costs off) select * from rlp where a > 10; + QUERY PLAN +---------------------------------------------- + Append + -> Seq Scan on rlp3abcd rlp_1 + Filter: (a > 10) + -> Seq Scan on rlp3efgh rlp_2 + Filter: (a > 10) + -> Seq Scan on rlp3nullxy rlp_3 + Filter: (a > 10) + -> Seq Scan on rlp3_default rlp_4 + Filter: (a > 10) + -> Seq Scan on rlp4_1 rlp_5 + Filter: (a > 10) + -> Seq Scan on rlp4_2 rlp_6 + Filter: (a > 10) + -> Seq Scan on rlp4_default rlp_7 + Filter: (a > 10) + -> Seq Scan on rlp5_1 rlp_8 + Filter: (a > 10) + -> Seq Scan on rlp5_default rlp_9 + Filter: (a > 10) + -> Seq Scan on rlp_default_30 rlp_10 + Filter: (a > 10) + -> Seq Scan on rlp_default_default rlp_11 + Filter: (a > 10) +(23 rows) + +explain (costs off) select * from rlp where a < 15; + QUERY PLAN +--------------------------------------------- + Append + -> Seq Scan on rlp1 rlp_1 + Filter: (a < 15) + -> Seq Scan on rlp2 rlp_2 + Filter: (a < 15) + -> Seq Scan on rlp_default_10 rlp_3 + Filter: (a < 15) + -> Seq Scan on rlp_default_default rlp_4 + Filter: (a < 15) +(9 rows) + +explain (costs off) select * from rlp where a <= 15; + QUERY PLAN +--------------------------------------------- + Append + -> Seq Scan on rlp1 rlp_1 + Filter: (a <= 15) + -> Seq Scan on rlp2 rlp_2 + Filter: (a <= 15) + -> Seq Scan on rlp3abcd rlp_3 + Filter: (a <= 15) + -> Seq Scan on rlp3efgh rlp_4 + Filter: (a <= 15) + -> Seq Scan on rlp3nullxy rlp_5 + Filter: (a <= 15) + -> Seq Scan on rlp3_default rlp_6 + Filter: (a <= 15) + -> Seq Scan on rlp_default_10 rlp_7 + Filter: (a <= 15) + -> Seq Scan on rlp_default_default rlp_8 + Filter: (a <= 15) +(17 rows) + +explain (costs off) select * from rlp where a > 15 and b = 'ab'; + QUERY PLAN +--------------------------------------------------------- + Append + -> Seq Scan on rlp3abcd rlp_1 + Filter: ((a > 15) AND ((b)::text = 'ab'::text)) + -> Seq Scan on rlp4_1 rlp_2 + Filter: ((a > 15) AND ((b)::text = 'ab'::text)) + -> Seq Scan on rlp4_2 rlp_3 + Filter: ((a > 15) AND ((b)::text = 'ab'::text)) + -> Seq Scan on rlp4_default rlp_4 + Filter: ((a > 15) AND ((b)::text = 'ab'::text)) + -> Seq Scan on rlp5_1 rlp_5 + Filter: ((a > 15) AND ((b)::text = 'ab'::text)) + -> Seq Scan on rlp5_default rlp_6 + Filter: ((a > 15) AND ((b)::text = 'ab'::text)) + -> Seq Scan on rlp_default_30 rlp_7 + Filter: ((a > 15) AND ((b)::text = 'ab'::text)) + -> Seq Scan on rlp_default_default rlp_8 + Filter: ((a > 15) AND ((b)::text = 'ab'::text)) +(17 rows) + +explain (costs off) select * from rlp where a = 16; + QUERY PLAN +-------------------------------------- + Append + -> Seq Scan on rlp3abcd rlp_1 + Filter: (a = 16) + -> Seq Scan on rlp3efgh rlp_2 + Filter: (a = 16) + -> Seq Scan on rlp3nullxy rlp_3 + Filter: (a = 16) + -> Seq Scan on rlp3_default rlp_4 + Filter: (a = 16) +(9 rows) + +explain (costs off) select * from rlp where a = 16 and b in ('not', 'in', 'here'); + QUERY PLAN +---------------------------------------------------------------------- + Seq Scan on rlp3_default rlp + Filter: ((a = 16) AND ((b)::text = ANY ('{not,in,here}'::text[]))) +(2 rows) + +explain (costs off) select * from rlp where a = 16 and b < 'ab'; + QUERY PLAN +--------------------------------------------------- + Seq Scan on rlp3_default rlp + Filter: (((b)::text < 'ab'::text) AND (a = 16)) +(2 rows) + +explain (costs off) select * from rlp where a = 16 and b <= 'ab'; + QUERY PLAN +---------------------------------------------------------- + Append + -> Seq Scan on rlp3abcd rlp_1 + Filter: (((b)::text <= 'ab'::text) AND (a = 16)) + -> Seq Scan on rlp3_default rlp_2 + Filter: (((b)::text <= 'ab'::text) AND (a = 16)) +(5 rows) + +explain (costs off) select * from rlp where a = 16 and b is null; + QUERY PLAN +-------------------------------------- + Seq Scan on rlp3nullxy rlp + Filter: ((b IS NULL) AND (a = 16)) +(2 rows) + +explain (costs off) select * from rlp where a = 16 and b is not null; + QUERY PLAN +------------------------------------------------ + Append + -> Seq Scan on rlp3abcd rlp_1 + Filter: ((b IS NOT NULL) AND (a = 16)) + -> Seq Scan on rlp3efgh rlp_2 + Filter: ((b IS NOT NULL) AND (a = 16)) + -> Seq Scan on rlp3nullxy rlp_3 + Filter: ((b IS NOT NULL) AND (a = 16)) + -> Seq Scan on rlp3_default rlp_4 + Filter: ((b IS NOT NULL) AND (a = 16)) +(9 rows) + +explain (costs off) select * from rlp where a is null; + QUERY PLAN +---------------------------------- + Seq Scan on rlp_default_null rlp + Filter: (a IS NULL) +(2 rows) + +explain (costs off) select * from rlp where a is not null; + QUERY PLAN +---------------------------------------------- + Append + -> Seq Scan on rlp1 rlp_1 + Filter: (a IS NOT NULL) + -> Seq Scan on rlp2 rlp_2 + Filter: (a IS NOT NULL) + -> Seq Scan on rlp3abcd rlp_3 + Filter: (a IS NOT NULL) + -> Seq Scan on rlp3efgh rlp_4 + Filter: (a IS NOT NULL) + -> Seq Scan on rlp3nullxy rlp_5 + Filter: (a IS NOT NULL) + -> Seq Scan on rlp3_default rlp_6 + Filter: (a IS NOT NULL) + -> Seq Scan on rlp4_1 rlp_7 + Filter: (a IS NOT NULL) + -> Seq Scan on rlp4_2 rlp_8 + Filter: (a IS NOT NULL) + -> Seq Scan on rlp4_default rlp_9 + Filter: (a IS NOT NULL) + -> Seq Scan on rlp5_1 rlp_10 + Filter: (a IS NOT NULL) + -> Seq Scan on rlp5_default rlp_11 + Filter: (a IS NOT NULL) + -> Seq Scan on rlp_default_10 rlp_12 + Filter: (a IS NOT NULL) + -> Seq Scan on rlp_default_30 rlp_13 + Filter: (a IS NOT NULL) + -> Seq Scan on rlp_default_default rlp_14 + Filter: (a IS NOT NULL) +(29 rows) + +explain (costs off) select * from rlp where a > 30; + QUERY PLAN +--------------------------------------------- + Append + -> Seq Scan on rlp5_1 rlp_1 + Filter: (a > 30) + -> Seq Scan on rlp5_default rlp_2 + Filter: (a > 30) + -> Seq Scan on rlp_default_default rlp_3 + Filter: (a > 30) +(7 rows) + +explain (costs off) select * from rlp where a = 30; /* only default is scanned */ + QUERY PLAN +-------------------------------- + Seq Scan on rlp_default_30 rlp + Filter: (a = 30) +(2 rows) + +explain (costs off) select * from rlp where a <= 31; + QUERY PLAN +---------------------------------------------- + Append + -> Seq Scan on rlp1 rlp_1 + Filter: (a <= 31) + -> Seq Scan on rlp2 rlp_2 + Filter: (a <= 31) + -> Seq Scan on rlp3abcd rlp_3 + Filter: (a <= 31) + -> Seq Scan on rlp3efgh rlp_4 + Filter: (a <= 31) + -> Seq Scan on rlp3nullxy rlp_5 + Filter: (a <= 31) + -> Seq Scan on rlp3_default rlp_6 + Filter: (a <= 31) + -> Seq Scan on rlp4_1 rlp_7 + Filter: (a <= 31) + -> Seq Scan on rlp4_2 rlp_8 + Filter: (a <= 31) + -> Seq Scan on rlp4_default rlp_9 + Filter: (a <= 31) + -> Seq Scan on rlp5_1 rlp_10 + Filter: (a <= 31) + -> Seq Scan on rlp_default_10 rlp_11 + Filter: (a <= 31) + -> Seq Scan on rlp_default_30 rlp_12 + Filter: (a <= 31) + -> Seq Scan on rlp_default_default rlp_13 + Filter: (a <= 31) +(27 rows) + +explain (costs off) select * from rlp where a = 1 or a = 7; + QUERY PLAN +-------------------------------- + Seq Scan on rlp2 rlp + Filter: ((a = 1) OR (a = 7)) +(2 rows) + +explain (costs off) select * from rlp where a = 1 or b = 'ab'; + QUERY PLAN +------------------------------------------------------- + Append + -> Seq Scan on rlp1 rlp_1 + Filter: ((a = 1) OR ((b)::text = 'ab'::text)) + -> Seq Scan on rlp2 rlp_2 + Filter: ((a = 1) OR ((b)::text = 'ab'::text)) + -> Seq Scan on rlp3abcd rlp_3 + Filter: ((a = 1) OR ((b)::text = 'ab'::text)) + -> Seq Scan on rlp4_1 rlp_4 + Filter: ((a = 1) OR ((b)::text = 'ab'::text)) + -> Seq Scan on rlp4_2 rlp_5 + Filter: ((a = 1) OR ((b)::text = 'ab'::text)) + -> Seq Scan on rlp4_default rlp_6 + Filter: ((a = 1) OR ((b)::text = 'ab'::text)) + -> Seq Scan on rlp5_1 rlp_7 + Filter: ((a = 1) OR ((b)::text = 'ab'::text)) + -> Seq Scan on rlp5_default rlp_8 + Filter: ((a = 1) OR ((b)::text = 'ab'::text)) + -> Seq Scan on rlp_default_10 rlp_9 + Filter: ((a = 1) OR ((b)::text = 'ab'::text)) + -> Seq Scan on rlp_default_30 rlp_10 + Filter: ((a = 1) OR ((b)::text = 'ab'::text)) + -> Seq Scan on rlp_default_null rlp_11 + Filter: ((a = 1) OR ((b)::text = 'ab'::text)) + -> Seq Scan on rlp_default_default rlp_12 + Filter: ((a = 1) OR ((b)::text = 'ab'::text)) +(25 rows) + +explain (costs off) select * from rlp where a > 20 and a < 27; + QUERY PLAN +----------------------------------------- + Append + -> Seq Scan on rlp4_1 rlp_1 + Filter: ((a > 20) AND (a < 27)) + -> Seq Scan on rlp4_2 rlp_2 + Filter: ((a > 20) AND (a < 27)) +(5 rows) + +explain (costs off) select * from rlp where a = 29; + QUERY PLAN +------------------------------ + Seq Scan on rlp4_default rlp + Filter: (a = 29) +(2 rows) + +explain (costs off) select * from rlp where a >= 29; + QUERY PLAN +--------------------------------------------- + Append + -> Seq Scan on rlp4_default rlp_1 + Filter: (a >= 29) + -> Seq Scan on rlp5_1 rlp_2 + Filter: (a >= 29) + -> Seq Scan on rlp5_default rlp_3 + Filter: (a >= 29) + -> Seq Scan on rlp_default_30 rlp_4 + Filter: (a >= 29) + -> Seq Scan on rlp_default_default rlp_5 + Filter: (a >= 29) +(11 rows) + +explain (costs off) select * from rlp where a < 1 or (a > 20 and a < 25); + QUERY PLAN +------------------------------------------------------ + Append + -> Seq Scan on rlp1 rlp_1 + Filter: ((a < 1) OR ((a > 20) AND (a < 25))) + -> Seq Scan on rlp4_1 rlp_2 + Filter: ((a < 1) OR ((a > 20) AND (a < 25))) +(5 rows) + +-- where clause contradicts sub-partition's constraint +explain (costs off) select * from rlp where a = 20 or a = 40; + QUERY PLAN +---------------------------------------- + Append + -> Seq Scan on rlp4_1 rlp_1 + Filter: ((a = 20) OR (a = 40)) + -> Seq Scan on rlp5_default rlp_2 + Filter: ((a = 20) OR (a = 40)) +(5 rows) + +explain (costs off) select * from rlp3 where a = 20; /* empty */ + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +-- redundant clauses are eliminated +explain (costs off) select * from rlp where a > 1 and a = 10; /* only default */ + QUERY PLAN +---------------------------------- + Seq Scan on rlp_default_10 rlp + Filter: ((a > 1) AND (a = 10)) +(2 rows) + +explain (costs off) select * from rlp where a > 1 and a >=15; /* rlp3 onwards, including default */ + QUERY PLAN +---------------------------------------------- + Append + -> Seq Scan on rlp3abcd rlp_1 + Filter: ((a > 1) AND (a >= 15)) + -> Seq Scan on rlp3efgh rlp_2 + Filter: ((a > 1) AND (a >= 15)) + -> Seq Scan on rlp3nullxy rlp_3 + Filter: ((a > 1) AND (a >= 15)) + -> Seq Scan on rlp3_default rlp_4 + Filter: ((a > 1) AND (a >= 15)) + -> Seq Scan on rlp4_1 rlp_5 + Filter: ((a > 1) AND (a >= 15)) + -> Seq Scan on rlp4_2 rlp_6 + Filter: ((a > 1) AND (a >= 15)) + -> Seq Scan on rlp4_default rlp_7 + Filter: ((a > 1) AND (a >= 15)) + -> Seq Scan on rlp5_1 rlp_8 + Filter: ((a > 1) AND (a >= 15)) + -> Seq Scan on rlp5_default rlp_9 + Filter: ((a > 1) AND (a >= 15)) + -> Seq Scan on rlp_default_30 rlp_10 + Filter: ((a > 1) AND (a >= 15)) + -> Seq Scan on rlp_default_default rlp_11 + Filter: ((a > 1) AND (a >= 15)) +(23 rows) + +explain (costs off) select * from rlp where a = 1 and a = 3; /* empty */ + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +explain (costs off) select * from rlp where (a = 1 and a = 3) or (a > 1 and a = 15); + QUERY PLAN +------------------------------------------------------------------- + Append + -> Seq Scan on rlp2 rlp_1 + Filter: (((a = 1) AND (a = 3)) OR ((a > 1) AND (a = 15))) + -> Seq Scan on rlp3abcd rlp_2 + Filter: (((a = 1) AND (a = 3)) OR ((a > 1) AND (a = 15))) + -> Seq Scan on rlp3efgh rlp_3 + Filter: (((a = 1) AND (a = 3)) OR ((a > 1) AND (a = 15))) + -> Seq Scan on rlp3nullxy rlp_4 + Filter: (((a = 1) AND (a = 3)) OR ((a > 1) AND (a = 15))) + -> Seq Scan on rlp3_default rlp_5 + Filter: (((a = 1) AND (a = 3)) OR ((a > 1) AND (a = 15))) +(11 rows) + +-- multi-column keys +create table mc3p (a int, b int, c int) partition by range (a, abs(b), c); +create table mc3p_default partition of mc3p default; +create table mc3p0 partition of mc3p for values from (minvalue, minvalue, minvalue) to (1, 1, 1); +create table mc3p1 partition of mc3p for values from (1, 1, 1) to (10, 5, 10); +create table mc3p2 partition of mc3p for values from (10, 5, 10) to (10, 10, 10); +create table mc3p3 partition of mc3p for values from (10, 10, 10) to (10, 10, 20); +create table mc3p4 partition of mc3p for values from (10, 10, 20) to (10, maxvalue, maxvalue); +create table mc3p5 partition of mc3p for values from (11, 1, 1) to (20, 10, 10); +create table mc3p6 partition of mc3p for values from (20, 10, 10) to (20, 20, 20); +create table mc3p7 partition of mc3p for values from (20, 20, 20) to (maxvalue, maxvalue, maxvalue); +explain (costs off) select * from mc3p where a = 1; + QUERY PLAN +--------------------------------------- + Append + -> Seq Scan on mc3p0 mc3p_1 + Filter: (a = 1) + -> Seq Scan on mc3p1 mc3p_2 + Filter: (a = 1) + -> Seq Scan on mc3p_default mc3p_3 + Filter: (a = 1) +(7 rows) + +explain (costs off) select * from mc3p where a = 1 and abs(b) < 1; + QUERY PLAN +-------------------------------------------- + Append + -> Seq Scan on mc3p0 mc3p_1 + Filter: ((a = 1) AND (abs(b) < 1)) + -> Seq Scan on mc3p_default mc3p_2 + Filter: ((a = 1) AND (abs(b) < 1)) +(5 rows) + +explain (costs off) select * from mc3p where a = 1 and abs(b) = 1; + QUERY PLAN +-------------------------------------------- + Append + -> Seq Scan on mc3p0 mc3p_1 + Filter: ((a = 1) AND (abs(b) = 1)) + -> Seq Scan on mc3p1 mc3p_2 + Filter: ((a = 1) AND (abs(b) = 1)) + -> Seq Scan on mc3p_default mc3p_3 + Filter: ((a = 1) AND (abs(b) = 1)) +(7 rows) + +explain (costs off) select * from mc3p where a = 1 and abs(b) = 1 and c < 8; + QUERY PLAN +-------------------------------------------------------- + Append + -> Seq Scan on mc3p0 mc3p_1 + Filter: ((c < 8) AND (a = 1) AND (abs(b) = 1)) + -> Seq Scan on mc3p1 mc3p_2 + Filter: ((c < 8) AND (a = 1) AND (abs(b) = 1)) +(5 rows) + +explain (costs off) select * from mc3p where a = 10 and abs(b) between 5 and 35; + QUERY PLAN +----------------------------------------------------------------- + Append + -> Seq Scan on mc3p1 mc3p_1 + Filter: ((a = 10) AND (abs(b) >= 5) AND (abs(b) <= 35)) + -> Seq Scan on mc3p2 mc3p_2 + Filter: ((a = 10) AND (abs(b) >= 5) AND (abs(b) <= 35)) + -> Seq Scan on mc3p3 mc3p_3 + Filter: ((a = 10) AND (abs(b) >= 5) AND (abs(b) <= 35)) + -> Seq Scan on mc3p4 mc3p_4 + Filter: ((a = 10) AND (abs(b) >= 5) AND (abs(b) <= 35)) + -> Seq Scan on mc3p_default mc3p_5 + Filter: ((a = 10) AND (abs(b) >= 5) AND (abs(b) <= 35)) +(11 rows) + +explain (costs off) select * from mc3p where a > 10; + QUERY PLAN +--------------------------------------- + Append + -> Seq Scan on mc3p5 mc3p_1 + Filter: (a > 10) + -> Seq Scan on mc3p6 mc3p_2 + Filter: (a > 10) + -> Seq Scan on mc3p7 mc3p_3 + Filter: (a > 10) + -> Seq Scan on mc3p_default mc3p_4 + Filter: (a > 10) +(9 rows) + +explain (costs off) select * from mc3p where a >= 10; + QUERY PLAN +--------------------------------------- + Append + -> Seq Scan on mc3p1 mc3p_1 + Filter: (a >= 10) + -> Seq Scan on mc3p2 mc3p_2 + Filter: (a >= 10) + -> Seq Scan on mc3p3 mc3p_3 + Filter: (a >= 10) + -> Seq Scan on mc3p4 mc3p_4 + Filter: (a >= 10) + -> Seq Scan on mc3p5 mc3p_5 + Filter: (a >= 10) + -> Seq Scan on mc3p6 mc3p_6 + Filter: (a >= 10) + -> Seq Scan on mc3p7 mc3p_7 + Filter: (a >= 10) + -> Seq Scan on mc3p_default mc3p_8 + Filter: (a >= 10) +(17 rows) + +explain (costs off) select * from mc3p where a < 10; + QUERY PLAN +--------------------------------------- + Append + -> Seq Scan on mc3p0 mc3p_1 + Filter: (a < 10) + -> Seq Scan on mc3p1 mc3p_2 + Filter: (a < 10) + -> Seq Scan on mc3p_default mc3p_3 + Filter: (a < 10) +(7 rows) + +explain (costs off) select * from mc3p where a <= 10 and abs(b) < 10; + QUERY PLAN +----------------------------------------------- + Append + -> Seq Scan on mc3p0 mc3p_1 + Filter: ((a <= 10) AND (abs(b) < 10)) + -> Seq Scan on mc3p1 mc3p_2 + Filter: ((a <= 10) AND (abs(b) < 10)) + -> Seq Scan on mc3p2 mc3p_3 + Filter: ((a <= 10) AND (abs(b) < 10)) + -> Seq Scan on mc3p_default mc3p_4 + Filter: ((a <= 10) AND (abs(b) < 10)) +(9 rows) + +explain (costs off) select * from mc3p where a = 11 and abs(b) = 0; + QUERY PLAN +--------------------------------------- + Seq Scan on mc3p_default mc3p + Filter: ((a = 11) AND (abs(b) = 0)) +(2 rows) + +explain (costs off) select * from mc3p where a = 20 and abs(b) = 10 and c = 100; + QUERY PLAN +------------------------------------------------------ + Seq Scan on mc3p6 mc3p + Filter: ((a = 20) AND (c = 100) AND (abs(b) = 10)) +(2 rows) + +explain (costs off) select * from mc3p where a > 20; + QUERY PLAN +--------------------------------------- + Append + -> Seq Scan on mc3p7 mc3p_1 + Filter: (a > 20) + -> Seq Scan on mc3p_default mc3p_2 + Filter: (a > 20) +(5 rows) + +explain (costs off) select * from mc3p where a >= 20; + QUERY PLAN +--------------------------------------- + Append + -> Seq Scan on mc3p5 mc3p_1 + Filter: (a >= 20) + -> Seq Scan on mc3p6 mc3p_2 + Filter: (a >= 20) + -> Seq Scan on mc3p7 mc3p_3 + Filter: (a >= 20) + -> Seq Scan on mc3p_default mc3p_4 + Filter: (a >= 20) +(9 rows) + +explain (costs off) select * from mc3p where (a = 1 and abs(b) = 1 and c = 1) or (a = 10 and abs(b) = 5 and c = 10) or (a > 11 and a < 20); + QUERY PLAN +--------------------------------------------------------------------------------------------------------------------------------- + Append + -> Seq Scan on mc3p1 mc3p_1 + Filter: (((a = 1) AND (abs(b) = 1) AND (c = 1)) OR ((a = 10) AND (abs(b) = 5) AND (c = 10)) OR ((a > 11) AND (a < 20))) + -> Seq Scan on mc3p2 mc3p_2 + Filter: (((a = 1) AND (abs(b) = 1) AND (c = 1)) OR ((a = 10) AND (abs(b) = 5) AND (c = 10)) OR ((a > 11) AND (a < 20))) + -> Seq Scan on mc3p5 mc3p_3 + Filter: (((a = 1) AND (abs(b) = 1) AND (c = 1)) OR ((a = 10) AND (abs(b) = 5) AND (c = 10)) OR ((a > 11) AND (a < 20))) + -> Seq Scan on mc3p_default mc3p_4 + Filter: (((a = 1) AND (abs(b) = 1) AND (c = 1)) OR ((a = 10) AND (abs(b) = 5) AND (c = 10)) OR ((a > 11) AND (a < 20))) +(9 rows) + +explain (costs off) select * from mc3p where (a = 1 and abs(b) = 1 and c = 1) or (a = 10 and abs(b) = 5 and c = 10) or (a > 11 and a < 20) or a < 1; + QUERY PLAN +-------------------------------------------------------------------------------------------------------------------------------------------- + Append + -> Seq Scan on mc3p0 mc3p_1 + Filter: (((a = 1) AND (abs(b) = 1) AND (c = 1)) OR ((a = 10) AND (abs(b) = 5) AND (c = 10)) OR ((a > 11) AND (a < 20)) OR (a < 1)) + -> Seq Scan on mc3p1 mc3p_2 + Filter: (((a = 1) AND (abs(b) = 1) AND (c = 1)) OR ((a = 10) AND (abs(b) = 5) AND (c = 10)) OR ((a > 11) AND (a < 20)) OR (a < 1)) + -> Seq Scan on mc3p2 mc3p_3 + Filter: (((a = 1) AND (abs(b) = 1) AND (c = 1)) OR ((a = 10) AND (abs(b) = 5) AND (c = 10)) OR ((a > 11) AND (a < 20)) OR (a < 1)) + -> Seq Scan on mc3p5 mc3p_4 + Filter: (((a = 1) AND (abs(b) = 1) AND (c = 1)) OR ((a = 10) AND (abs(b) = 5) AND (c = 10)) OR ((a > 11) AND (a < 20)) OR (a < 1)) + -> Seq Scan on mc3p_default mc3p_5 + Filter: (((a = 1) AND (abs(b) = 1) AND (c = 1)) OR ((a = 10) AND (abs(b) = 5) AND (c = 10)) OR ((a > 11) AND (a < 20)) OR (a < 1)) +(11 rows) + +explain (costs off) select * from mc3p where (a = 1 and abs(b) = 1 and c = 1) or (a = 10 and abs(b) = 5 and c = 10) or (a > 11 and a < 20) or a < 1 or a = 1; + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------------------------------------- + Append + -> Seq Scan on mc3p0 mc3p_1 + Filter: (((a = 1) AND (abs(b) = 1) AND (c = 1)) OR ((a = 10) AND (abs(b) = 5) AND (c = 10)) OR ((a > 11) AND (a < 20)) OR (a < 1) OR (a = 1)) + -> Seq Scan on mc3p1 mc3p_2 + Filter: (((a = 1) AND (abs(b) = 1) AND (c = 1)) OR ((a = 10) AND (abs(b) = 5) AND (c = 10)) OR ((a > 11) AND (a < 20)) OR (a < 1) OR (a = 1)) + -> Seq Scan on mc3p2 mc3p_3 + Filter: (((a = 1) AND (abs(b) = 1) AND (c = 1)) OR ((a = 10) AND (abs(b) = 5) AND (c = 10)) OR ((a > 11) AND (a < 20)) OR (a < 1) OR (a = 1)) + -> Seq Scan on mc3p5 mc3p_4 + Filter: (((a = 1) AND (abs(b) = 1) AND (c = 1)) OR ((a = 10) AND (abs(b) = 5) AND (c = 10)) OR ((a > 11) AND (a < 20)) OR (a < 1) OR (a = 1)) + -> Seq Scan on mc3p_default mc3p_5 + Filter: (((a = 1) AND (abs(b) = 1) AND (c = 1)) OR ((a = 10) AND (abs(b) = 5) AND (c = 10)) OR ((a > 11) AND (a < 20)) OR (a < 1) OR (a = 1)) +(11 rows) + +explain (costs off) select * from mc3p where a = 1 or abs(b) = 1 or c = 1; + QUERY PLAN +------------------------------------------------------ + Append + -> Seq Scan on mc3p0 mc3p_1 + Filter: ((a = 1) OR (abs(b) = 1) OR (c = 1)) + -> Seq Scan on mc3p1 mc3p_2 + Filter: ((a = 1) OR (abs(b) = 1) OR (c = 1)) + -> Seq Scan on mc3p2 mc3p_3 + Filter: ((a = 1) OR (abs(b) = 1) OR (c = 1)) + -> Seq Scan on mc3p3 mc3p_4 + Filter: ((a = 1) OR (abs(b) = 1) OR (c = 1)) + -> Seq Scan on mc3p4 mc3p_5 + Filter: ((a = 1) OR (abs(b) = 1) OR (c = 1)) + -> Seq Scan on mc3p5 mc3p_6 + Filter: ((a = 1) OR (abs(b) = 1) OR (c = 1)) + -> Seq Scan on mc3p6 mc3p_7 + Filter: ((a = 1) OR (abs(b) = 1) OR (c = 1)) + -> Seq Scan on mc3p7 mc3p_8 + Filter: ((a = 1) OR (abs(b) = 1) OR (c = 1)) + -> Seq Scan on mc3p_default mc3p_9 + Filter: ((a = 1) OR (abs(b) = 1) OR (c = 1)) +(19 rows) + +explain (costs off) select * from mc3p where (a = 1 and abs(b) = 1) or (a = 10 and abs(b) = 10); + QUERY PLAN +------------------------------------------------------------------------------ + Append + -> Seq Scan on mc3p0 mc3p_1 + Filter: (((a = 1) AND (abs(b) = 1)) OR ((a = 10) AND (abs(b) = 10))) + -> Seq Scan on mc3p1 mc3p_2 + Filter: (((a = 1) AND (abs(b) = 1)) OR ((a = 10) AND (abs(b) = 10))) + -> Seq Scan on mc3p2 mc3p_3 + Filter: (((a = 1) AND (abs(b) = 1)) OR ((a = 10) AND (abs(b) = 10))) + -> Seq Scan on mc3p3 mc3p_4 + Filter: (((a = 1) AND (abs(b) = 1)) OR ((a = 10) AND (abs(b) = 10))) + -> Seq Scan on mc3p4 mc3p_5 + Filter: (((a = 1) AND (abs(b) = 1)) OR ((a = 10) AND (abs(b) = 10))) + -> Seq Scan on mc3p_default mc3p_6 + Filter: (((a = 1) AND (abs(b) = 1)) OR ((a = 10) AND (abs(b) = 10))) +(13 rows) + +explain (costs off) select * from mc3p where (a = 1 and abs(b) = 1) or (a = 10 and abs(b) = 9); + QUERY PLAN +----------------------------------------------------------------------------- + Append + -> Seq Scan on mc3p0 mc3p_1 + Filter: (((a = 1) AND (abs(b) = 1)) OR ((a = 10) AND (abs(b) = 9))) + -> Seq Scan on mc3p1 mc3p_2 + Filter: (((a = 1) AND (abs(b) = 1)) OR ((a = 10) AND (abs(b) = 9))) + -> Seq Scan on mc3p2 mc3p_3 + Filter: (((a = 1) AND (abs(b) = 1)) OR ((a = 10) AND (abs(b) = 9))) + -> Seq Scan on mc3p_default mc3p_4 + Filter: (((a = 1) AND (abs(b) = 1)) OR ((a = 10) AND (abs(b) = 9))) +(9 rows) + +-- a simpler multi-column keys case +create table mc2p (a int, b int) partition by range (a, b); +create table mc2p_default partition of mc2p default; +create table mc2p0 partition of mc2p for values from (minvalue, minvalue) to (1, minvalue); +create table mc2p1 partition of mc2p for values from (1, minvalue) to (1, 1); +create table mc2p2 partition of mc2p for values from (1, 1) to (2, minvalue); +create table mc2p3 partition of mc2p for values from (2, minvalue) to (2, 1); +create table mc2p4 partition of mc2p for values from (2, 1) to (2, maxvalue); +create table mc2p5 partition of mc2p for values from (2, maxvalue) to (maxvalue, maxvalue); +explain (costs off) select * from mc2p where a < 2; + QUERY PLAN +--------------------------------------- + Append + -> Seq Scan on mc2p0 mc2p_1 + Filter: (a < 2) + -> Seq Scan on mc2p1 mc2p_2 + Filter: (a < 2) + -> Seq Scan on mc2p2 mc2p_3 + Filter: (a < 2) + -> Seq Scan on mc2p_default mc2p_4 + Filter: (a < 2) +(9 rows) + +explain (costs off) select * from mc2p where a = 2 and b < 1; + QUERY PLAN +--------------------------------- + Seq Scan on mc2p3 mc2p + Filter: ((b < 1) AND (a = 2)) +(2 rows) + +explain (costs off) select * from mc2p where a > 1; + QUERY PLAN +--------------------------------------- + Append + -> Seq Scan on mc2p2 mc2p_1 + Filter: (a > 1) + -> Seq Scan on mc2p3 mc2p_2 + Filter: (a > 1) + -> Seq Scan on mc2p4 mc2p_3 + Filter: (a > 1) + -> Seq Scan on mc2p5 mc2p_4 + Filter: (a > 1) + -> Seq Scan on mc2p_default mc2p_5 + Filter: (a > 1) +(11 rows) + +explain (costs off) select * from mc2p where a = 1 and b > 1; + QUERY PLAN +--------------------------------- + Seq Scan on mc2p2 mc2p + Filter: ((b > 1) AND (a = 1)) +(2 rows) + +-- all partitions but the default one should be pruned +explain (costs off) select * from mc2p where a = 1 and b is null; + QUERY PLAN +------------------------------------- + Seq Scan on mc2p_default mc2p + Filter: ((b IS NULL) AND (a = 1)) +(2 rows) + +explain (costs off) select * from mc2p where a is null and b is null; + QUERY PLAN +----------------------------------------- + Seq Scan on mc2p_default mc2p + Filter: ((a IS NULL) AND (b IS NULL)) +(2 rows) + +explain (costs off) select * from mc2p where a is null and b = 1; + QUERY PLAN +------------------------------------- + Seq Scan on mc2p_default mc2p + Filter: ((a IS NULL) AND (b = 1)) +(2 rows) + +explain (costs off) select * from mc2p where a is null; + QUERY PLAN +------------------------------- + Seq Scan on mc2p_default mc2p + Filter: (a IS NULL) +(2 rows) + +explain (costs off) select * from mc2p where b is null; + QUERY PLAN +------------------------------- + Seq Scan on mc2p_default mc2p + Filter: (b IS NULL) +(2 rows) + +-- boolean partitioning +create table boolpart (a bool) partition by list (a); +create table boolpart_default partition of boolpart default; +create table boolpart_t partition of boolpart for values in ('true'); +create table boolpart_f partition of boolpart for values in ('false'); +insert into boolpart values (true), (false), (null); +explain (costs off) select * from boolpart where a in (true, false); + QUERY PLAN +------------------------------------------------ + Append + -> Seq Scan on boolpart_f boolpart_1 + Filter: (a = ANY ('{t,f}'::boolean[])) + -> Seq Scan on boolpart_t boolpart_2 + Filter: (a = ANY ('{t,f}'::boolean[])) +(5 rows) + +explain (costs off) select * from boolpart where a = false; + QUERY PLAN +--------------------------------- + Seq Scan on boolpart_f boolpart + Filter: (NOT a) +(2 rows) + +explain (costs off) select * from boolpart where not a = false; + QUERY PLAN +--------------------------------- + Seq Scan on boolpart_t boolpart + Filter: a +(2 rows) + +explain (costs off) select * from boolpart where a is true or a is not true; + QUERY PLAN +-------------------------------------------------- + Append + -> Seq Scan on boolpart_f boolpart_1 + Filter: ((a IS TRUE) OR (a IS NOT TRUE)) + -> Seq Scan on boolpart_t boolpart_2 + Filter: ((a IS TRUE) OR (a IS NOT TRUE)) + -> Seq Scan on boolpart_default boolpart_3 + Filter: ((a IS TRUE) OR (a IS NOT TRUE)) +(7 rows) + +explain (costs off) select * from boolpart where a is not true; + QUERY PLAN +----------------------------------------------- + Append + -> Seq Scan on boolpart_f boolpart_1 + Filter: (a IS NOT TRUE) + -> Seq Scan on boolpart_default boolpart_2 + Filter: (a IS NOT TRUE) +(5 rows) + +explain (costs off) select * from boolpart where a is not true and a is not false; + QUERY PLAN +-------------------------------------------------- + Seq Scan on boolpart_default boolpart + Filter: ((a IS NOT TRUE) AND (a IS NOT FALSE)) +(2 rows) + +explain (costs off) select * from boolpart where a is unknown; + QUERY PLAN +----------------------------------------------- + Append + -> Seq Scan on boolpart_f boolpart_1 + Filter: (a IS UNKNOWN) + -> Seq Scan on boolpart_t boolpart_2 + Filter: (a IS UNKNOWN) + -> Seq Scan on boolpart_default boolpart_3 + Filter: (a IS UNKNOWN) +(7 rows) + +explain (costs off) select * from boolpart where a is not unknown; + QUERY PLAN +----------------------------------------------- + Append + -> Seq Scan on boolpart_f boolpart_1 + Filter: (a IS NOT UNKNOWN) + -> Seq Scan on boolpart_t boolpart_2 + Filter: (a IS NOT UNKNOWN) + -> Seq Scan on boolpart_default boolpart_3 + Filter: (a IS NOT UNKNOWN) +(7 rows) + +select * from boolpart where a in (true, false); + a +--- + f + t +(2 rows) + +select * from boolpart where a = false; + a +--- + f +(1 row) + +select * from boolpart where not a = false; + a +--- + t +(1 row) + +select * from boolpart where a is true or a is not true; + a +--- + f + t + +(3 rows) + +select * from boolpart where a is not true; + a +--- + f + +(2 rows) + +select * from boolpart where a is not true and a is not false; + a +--- + +(1 row) + +select * from boolpart where a is unknown; + a +--- + +(1 row) + +select * from boolpart where a is not unknown; + a +--- + f + t +(2 rows) + +-- inverse boolean partitioning - a seemingly unlikely design, but we've got +-- code for it, so we'd better test it. +create table iboolpart (a bool) partition by list ((not a)); +create table iboolpart_default partition of iboolpart default; +create table iboolpart_f partition of iboolpart for values in ('true'); +create table iboolpart_t partition of iboolpart for values in ('false'); +insert into iboolpart values (true), (false), (null); +explain (costs off) select * from iboolpart where a in (true, false); + QUERY PLAN +------------------------------------------------- + Append + -> Seq Scan on iboolpart_t iboolpart_1 + Filter: (a = ANY ('{t,f}'::boolean[])) + -> Seq Scan on iboolpart_f iboolpart_2 + Filter: (a = ANY ('{t,f}'::boolean[])) + -> Seq Scan on iboolpart_default iboolpart_3 + Filter: (a = ANY ('{t,f}'::boolean[])) +(7 rows) + +explain (costs off) select * from iboolpart where a = false; + QUERY PLAN +----------------------------------- + Seq Scan on iboolpart_f iboolpart + Filter: (NOT a) +(2 rows) + +explain (costs off) select * from iboolpart where not a = false; + QUERY PLAN +----------------------------------- + Seq Scan on iboolpart_t iboolpart + Filter: a +(2 rows) + +explain (costs off) select * from iboolpart where a is true or a is not true; + QUERY PLAN +-------------------------------------------------- + Append + -> Seq Scan on iboolpart_t iboolpart_1 + Filter: ((a IS TRUE) OR (a IS NOT TRUE)) + -> Seq Scan on iboolpart_f iboolpart_2 + Filter: ((a IS TRUE) OR (a IS NOT TRUE)) + -> Seq Scan on iboolpart_default iboolpart_3 + Filter: ((a IS TRUE) OR (a IS NOT TRUE)) +(7 rows) + +explain (costs off) select * from iboolpart where a is not true; + QUERY PLAN +------------------------------------------------- + Append + -> Seq Scan on iboolpart_t iboolpart_1 + Filter: (a IS NOT TRUE) + -> Seq Scan on iboolpart_f iboolpart_2 + Filter: (a IS NOT TRUE) + -> Seq Scan on iboolpart_default iboolpart_3 + Filter: (a IS NOT TRUE) +(7 rows) + +explain (costs off) select * from iboolpart where a is not true and a is not false; + QUERY PLAN +-------------------------------------------------------- + Append + -> Seq Scan on iboolpart_t iboolpart_1 + Filter: ((a IS NOT TRUE) AND (a IS NOT FALSE)) + -> Seq Scan on iboolpart_f iboolpart_2 + Filter: ((a IS NOT TRUE) AND (a IS NOT FALSE)) + -> Seq Scan on iboolpart_default iboolpart_3 + Filter: ((a IS NOT TRUE) AND (a IS NOT FALSE)) +(7 rows) + +explain (costs off) select * from iboolpart where a is unknown; + QUERY PLAN +------------------------------------------------- + Append + -> Seq Scan on iboolpart_t iboolpart_1 + Filter: (a IS UNKNOWN) + -> Seq Scan on iboolpart_f iboolpart_2 + Filter: (a IS UNKNOWN) + -> Seq Scan on iboolpart_default iboolpart_3 + Filter: (a IS UNKNOWN) +(7 rows) + +explain (costs off) select * from iboolpart where a is not unknown; + QUERY PLAN +------------------------------------------------- + Append + -> Seq Scan on iboolpart_t iboolpart_1 + Filter: (a IS NOT UNKNOWN) + -> Seq Scan on iboolpart_f iboolpart_2 + Filter: (a IS NOT UNKNOWN) + -> Seq Scan on iboolpart_default iboolpart_3 + Filter: (a IS NOT UNKNOWN) +(7 rows) + +select * from iboolpart where a in (true, false); + a +--- + t + f +(2 rows) + +select * from iboolpart where a = false; + a +--- + f +(1 row) + +select * from iboolpart where not a = false; + a +--- + t +(1 row) + +select * from iboolpart where a is true or a is not true; + a +--- + t + f + +(3 rows) + +select * from iboolpart where a is not true; + a +--- + f + +(2 rows) + +select * from iboolpart where a is not true and a is not false; + a +--- + +(1 row) + +select * from iboolpart where a is unknown; + a +--- + +(1 row) + +select * from iboolpart where a is not unknown; + a +--- + t + f +(2 rows) + +create table boolrangep (a bool, b bool, c int) partition by range (a,b,c); +create table boolrangep_tf partition of boolrangep for values from ('true', 'false', 0) to ('true', 'false', 100); +create table boolrangep_ft partition of boolrangep for values from ('false', 'true', 0) to ('false', 'true', 100); +create table boolrangep_ff1 partition of boolrangep for values from ('false', 'false', 0) to ('false', 'false', 50); +create table boolrangep_ff2 partition of boolrangep for values from ('false', 'false', 50) to ('false', 'false', 100); +-- try a more complex case that's been known to trip up pruning in the past +explain (costs off) select * from boolrangep where not a and not b and c = 25; + QUERY PLAN +---------------------------------------------- + Seq Scan on boolrangep_ff1 boolrangep + Filter: ((NOT a) AND (NOT b) AND (c = 25)) +(2 rows) + +-- test scalar-to-array operators +create table coercepart (a varchar) partition by list (a); +create table coercepart_ab partition of coercepart for values in ('ab'); +create table coercepart_bc partition of coercepart for values in ('bc'); +create table coercepart_cd partition of coercepart for values in ('cd'); +explain (costs off) select * from coercepart where a in ('ab', to_char(125, '999')); + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------------ + Append + -> Seq Scan on coercepart_ab coercepart_1 + Filter: ((a)::text = ANY ((ARRAY['ab'::character varying, (to_char(125, '999'::text))::character varying])::text[])) + -> Seq Scan on coercepart_bc coercepart_2 + Filter: ((a)::text = ANY ((ARRAY['ab'::character varying, (to_char(125, '999'::text))::character varying])::text[])) + -> Seq Scan on coercepart_cd coercepart_3 + Filter: ((a)::text = ANY ((ARRAY['ab'::character varying, (to_char(125, '999'::text))::character varying])::text[])) +(7 rows) + +explain (costs off) select * from coercepart where a ~ any ('{ab}'); + QUERY PLAN +---------------------------------------------------- + Append + -> Seq Scan on coercepart_ab coercepart_1 + Filter: ((a)::text ~ ANY ('{ab}'::text[])) + -> Seq Scan on coercepart_bc coercepart_2 + Filter: ((a)::text ~ ANY ('{ab}'::text[])) + -> Seq Scan on coercepart_cd coercepart_3 + Filter: ((a)::text ~ ANY ('{ab}'::text[])) +(7 rows) + +explain (costs off) select * from coercepart where a !~ all ('{ab}'); + QUERY PLAN +----------------------------------------------------- + Append + -> Seq Scan on coercepart_ab coercepart_1 + Filter: ((a)::text !~ ALL ('{ab}'::text[])) + -> Seq Scan on coercepart_bc coercepart_2 + Filter: ((a)::text !~ ALL ('{ab}'::text[])) + -> Seq Scan on coercepart_cd coercepart_3 + Filter: ((a)::text !~ ALL ('{ab}'::text[])) +(7 rows) + +explain (costs off) select * from coercepart where a ~ any ('{ab,bc}'); + QUERY PLAN +------------------------------------------------------- + Append + -> Seq Scan on coercepart_ab coercepart_1 + Filter: ((a)::text ~ ANY ('{ab,bc}'::text[])) + -> Seq Scan on coercepart_bc coercepart_2 + Filter: ((a)::text ~ ANY ('{ab,bc}'::text[])) + -> Seq Scan on coercepart_cd coercepart_3 + Filter: ((a)::text ~ ANY ('{ab,bc}'::text[])) +(7 rows) + +explain (costs off) select * from coercepart where a !~ all ('{ab,bc}'); + QUERY PLAN +-------------------------------------------------------- + Append + -> Seq Scan on coercepart_ab coercepart_1 + Filter: ((a)::text !~ ALL ('{ab,bc}'::text[])) + -> Seq Scan on coercepart_bc coercepart_2 + Filter: ((a)::text !~ ALL ('{ab,bc}'::text[])) + -> Seq Scan on coercepart_cd coercepart_3 + Filter: ((a)::text !~ ALL ('{ab,bc}'::text[])) +(7 rows) + +explain (costs off) select * from coercepart where a = any ('{ab,bc}'); + QUERY PLAN +------------------------------------------------------- + Append + -> Seq Scan on coercepart_ab coercepart_1 + Filter: ((a)::text = ANY ('{ab,bc}'::text[])) + -> Seq Scan on coercepart_bc coercepart_2 + Filter: ((a)::text = ANY ('{ab,bc}'::text[])) +(5 rows) + +explain (costs off) select * from coercepart where a = any ('{ab,null}'); + QUERY PLAN +--------------------------------------------------- + Seq Scan on coercepart_ab coercepart + Filter: ((a)::text = ANY ('{ab,NULL}'::text[])) +(2 rows) + +explain (costs off) select * from coercepart where a = any (null::text[]); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +explain (costs off) select * from coercepart where a = all ('{ab}'); + QUERY PLAN +---------------------------------------------- + Seq Scan on coercepart_ab coercepart + Filter: ((a)::text = ALL ('{ab}'::text[])) +(2 rows) + +explain (costs off) select * from coercepart where a = all ('{ab,bc}'); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +explain (costs off) select * from coercepart where a = all ('{ab,null}'); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +explain (costs off) select * from coercepart where a = all (null::text[]); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +drop table coercepart; +CREATE TABLE part (a INT, b INT) PARTITION BY LIST (a); +CREATE TABLE part_p1 PARTITION OF part FOR VALUES IN (-2,-1,0,1,2); +CREATE TABLE part_p2 PARTITION OF part DEFAULT PARTITION BY RANGE(a); +CREATE TABLE part_p2_p1 PARTITION OF part_p2 DEFAULT; +CREATE TABLE part_rev (b INT, c INT, a INT); +ALTER TABLE part ATTACH PARTITION part_rev FOR VALUES IN (3); -- fail +ERROR: table "part_rev" contains column "c" not found in parent "part" +DETAIL: The new partition may contain only the columns present in parent. +ALTER TABLE part_rev DROP COLUMN c; +ALTER TABLE part ATTACH PARTITION part_rev FOR VALUES IN (3); -- now it's ok +INSERT INTO part VALUES (-1,-1), (1,1), (2,NULL), (NULL,-2),(NULL,NULL); +EXPLAIN (COSTS OFF) SELECT tableoid::regclass as part, a, b FROM part WHERE a IS NULL ORDER BY 1, 2, 3; + QUERY PLAN +--------------------------------------------------------- + Sort + Sort Key: ((part.tableoid)::regclass), part.a, part.b + -> Seq Scan on part_p2_p1 part + Filter: (a IS NULL) +(4 rows) + +EXPLAIN (VERBOSE, COSTS OFF) SELECT * FROM part p(x) ORDER BY x; + QUERY PLAN +----------------------------------------------- + Sort + Output: p.x, p.b + Sort Key: p.x + -> Append + -> Seq Scan on public.part_p1 p_1 + Output: p_1.x, p_1.b + -> Seq Scan on public.part_rev p_2 + Output: p_2.x, p_2.b + -> Seq Scan on public.part_p2_p1 p_3 + Output: p_3.x, p_3.b +(10 rows) + +-- +-- some more cases +-- +-- +-- pruning for partitioned table appearing inside a sub-query +-- +-- pruning won't work for mc3p, because some keys are Params +explain (costs off) select * from mc2p t1, lateral (select count(*) from mc3p t2 where t2.a = t1.b and abs(t2.b) = 1 and t2.c = 1) s where t1.a = 1; + QUERY PLAN +----------------------------------------------------------------------- + Nested Loop + -> Append + -> Seq Scan on mc2p1 t1_1 + Filter: (a = 1) + -> Seq Scan on mc2p2 t1_2 + Filter: (a = 1) + -> Seq Scan on mc2p_default t1_3 + Filter: (a = 1) + -> Aggregate + -> Append + -> Seq Scan on mc3p0 t2_1 + Filter: ((a = t1.b) AND (c = 1) AND (abs(b) = 1)) + -> Seq Scan on mc3p1 t2_2 + Filter: ((a = t1.b) AND (c = 1) AND (abs(b) = 1)) + -> Seq Scan on mc3p2 t2_3 + Filter: ((a = t1.b) AND (c = 1) AND (abs(b) = 1)) + -> Seq Scan on mc3p3 t2_4 + Filter: ((a = t1.b) AND (c = 1) AND (abs(b) = 1)) + -> Seq Scan on mc3p4 t2_5 + Filter: ((a = t1.b) AND (c = 1) AND (abs(b) = 1)) + -> Seq Scan on mc3p5 t2_6 + Filter: ((a = t1.b) AND (c = 1) AND (abs(b) = 1)) + -> Seq Scan on mc3p6 t2_7 + Filter: ((a = t1.b) AND (c = 1) AND (abs(b) = 1)) + -> Seq Scan on mc3p7 t2_8 + Filter: ((a = t1.b) AND (c = 1) AND (abs(b) = 1)) + -> Seq Scan on mc3p_default t2_9 + Filter: ((a = t1.b) AND (c = 1) AND (abs(b) = 1)) +(28 rows) + +-- pruning should work fine, because values for a prefix of keys (a, b) are +-- available +explain (costs off) select * from mc2p t1, lateral (select count(*) from mc3p t2 where t2.c = t1.b and abs(t2.b) = 1 and t2.a = 1) s where t1.a = 1; + QUERY PLAN +----------------------------------------------------------------------- + Nested Loop + -> Append + -> Seq Scan on mc2p1 t1_1 + Filter: (a = 1) + -> Seq Scan on mc2p2 t1_2 + Filter: (a = 1) + -> Seq Scan on mc2p_default t1_3 + Filter: (a = 1) + -> Aggregate + -> Append + -> Seq Scan on mc3p0 t2_1 + Filter: ((c = t1.b) AND (a = 1) AND (abs(b) = 1)) + -> Seq Scan on mc3p1 t2_2 + Filter: ((c = t1.b) AND (a = 1) AND (abs(b) = 1)) + -> Seq Scan on mc3p_default t2_3 + Filter: ((c = t1.b) AND (a = 1) AND (abs(b) = 1)) +(16 rows) + +-- also here, because values for all keys are provided +explain (costs off) select * from mc2p t1, lateral (select count(*) from mc3p t2 where t2.a = 1 and abs(t2.b) = 1 and t2.c = 1) s where t1.a = 1; + QUERY PLAN +-------------------------------------------------------------- + Nested Loop + -> Aggregate + -> Seq Scan on mc3p1 t2 + Filter: ((a = 1) AND (c = 1) AND (abs(b) = 1)) + -> Append + -> Seq Scan on mc2p1 t1_1 + Filter: (a = 1) + -> Seq Scan on mc2p2 t1_2 + Filter: (a = 1) + -> Seq Scan on mc2p_default t1_3 + Filter: (a = 1) +(11 rows) + +-- +-- pruning with clauses containing <> operator +-- +-- doesn't prune range partitions +create table rp (a int) partition by range (a); +create table rp0 partition of rp for values from (minvalue) to (1); +create table rp1 partition of rp for values from (1) to (2); +create table rp2 partition of rp for values from (2) to (maxvalue); +explain (costs off) select * from rp where a <> 1; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on rp0 rp_1 + Filter: (a <> 1) + -> Seq Scan on rp1 rp_2 + Filter: (a <> 1) + -> Seq Scan on rp2 rp_3 + Filter: (a <> 1) +(7 rows) + +explain (costs off) select * from rp where a <> 1 and a <> 2; + QUERY PLAN +----------------------------------------- + Append + -> Seq Scan on rp0 rp_1 + Filter: ((a <> 1) AND (a <> 2)) + -> Seq Scan on rp1 rp_2 + Filter: ((a <> 1) AND (a <> 2)) + -> Seq Scan on rp2 rp_3 + Filter: ((a <> 1) AND (a <> 2)) +(7 rows) + +-- null partition should be eliminated due to strict <> clause. +explain (costs off) select * from lp where a <> 'a'; + QUERY PLAN +------------------------------------ + Append + -> Seq Scan on lp_ad lp_1 + Filter: (a <> 'a'::bpchar) + -> Seq Scan on lp_bc lp_2 + Filter: (a <> 'a'::bpchar) + -> Seq Scan on lp_ef lp_3 + Filter: (a <> 'a'::bpchar) + -> Seq Scan on lp_g lp_4 + Filter: (a <> 'a'::bpchar) + -> Seq Scan on lp_default lp_5 + Filter: (a <> 'a'::bpchar) +(11 rows) + +-- ensure we detect contradictions in clauses; a can't be NULL and NOT NULL. +explain (costs off) select * from lp where a <> 'a' and a is null; + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +explain (costs off) select * from lp where (a <> 'a' and a <> 'd') or a is null; + QUERY PLAN +------------------------------------------------------------------------------ + Append + -> Seq Scan on lp_bc lp_1 + Filter: (((a <> 'a'::bpchar) AND (a <> 'd'::bpchar)) OR (a IS NULL)) + -> Seq Scan on lp_ef lp_2 + Filter: (((a <> 'a'::bpchar) AND (a <> 'd'::bpchar)) OR (a IS NULL)) + -> Seq Scan on lp_g lp_3 + Filter: (((a <> 'a'::bpchar) AND (a <> 'd'::bpchar)) OR (a IS NULL)) + -> Seq Scan on lp_null lp_4 + Filter: (((a <> 'a'::bpchar) AND (a <> 'd'::bpchar)) OR (a IS NULL)) + -> Seq Scan on lp_default lp_5 + Filter: (((a <> 'a'::bpchar) AND (a <> 'd'::bpchar)) OR (a IS NULL)) +(11 rows) + +-- check that it also works for a partitioned table that's not root, +-- which in this case are partitions of rlp that are themselves +-- list-partitioned on b +explain (costs off) select * from rlp where a = 15 and b <> 'ab' and b <> 'cd' and b <> 'xy' and b is not null; + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------------------------ + Append + -> Seq Scan on rlp3efgh rlp_1 + Filter: ((b IS NOT NULL) AND ((b)::text <> 'ab'::text) AND ((b)::text <> 'cd'::text) AND ((b)::text <> 'xy'::text) AND (a = 15)) + -> Seq Scan on rlp3_default rlp_2 + Filter: ((b IS NOT NULL) AND ((b)::text <> 'ab'::text) AND ((b)::text <> 'cd'::text) AND ((b)::text <> 'xy'::text) AND (a = 15)) +(5 rows) + +-- +-- different collations for different keys with same expression +-- +create table coll_pruning_multi (a text) partition by range (substr(a, 1) collate "POSIX", substr(a, 1) collate "C"); +create table coll_pruning_multi1 partition of coll_pruning_multi for values from ('a', 'a') to ('a', 'e'); +create table coll_pruning_multi2 partition of coll_pruning_multi for values from ('a', 'e') to ('a', 'z'); +create table coll_pruning_multi3 partition of coll_pruning_multi for values from ('b', 'a') to ('b', 'e'); +-- no pruning, because no value for the leading key +explain (costs off) select * from coll_pruning_multi where substr(a, 1) = 'e' collate "C"; + QUERY PLAN +------------------------------------------------------------ + Append + -> Seq Scan on coll_pruning_multi1 coll_pruning_multi_1 + Filter: (substr(a, 1) = 'e'::text COLLATE "C") + -> Seq Scan on coll_pruning_multi2 coll_pruning_multi_2 + Filter: (substr(a, 1) = 'e'::text COLLATE "C") + -> Seq Scan on coll_pruning_multi3 coll_pruning_multi_3 + Filter: (substr(a, 1) = 'e'::text COLLATE "C") +(7 rows) + +-- pruning, with a value provided for the leading key +explain (costs off) select * from coll_pruning_multi where substr(a, 1) = 'a' collate "POSIX"; + QUERY PLAN +------------------------------------------------------------ + Append + -> Seq Scan on coll_pruning_multi1 coll_pruning_multi_1 + Filter: (substr(a, 1) = 'a'::text COLLATE "POSIX") + -> Seq Scan on coll_pruning_multi2 coll_pruning_multi_2 + Filter: (substr(a, 1) = 'a'::text COLLATE "POSIX") +(5 rows) + +-- pruning, with values provided for both keys +explain (costs off) select * from coll_pruning_multi where substr(a, 1) = 'e' collate "C" and substr(a, 1) = 'a' collate "POSIX"; + QUERY PLAN +--------------------------------------------------------------------------------------------------- + Seq Scan on coll_pruning_multi2 coll_pruning_multi + Filter: ((substr(a, 1) = 'e'::text COLLATE "C") AND (substr(a, 1) = 'a'::text COLLATE "POSIX")) +(2 rows) + +-- +-- LIKE operators don't prune +-- +create table like_op_noprune (a text) partition by list (a); +create table like_op_noprune1 partition of like_op_noprune for values in ('ABC'); +create table like_op_noprune2 partition of like_op_noprune for values in ('BCD'); +explain (costs off) select * from like_op_noprune where a like '%BC'; + QUERY PLAN +------------------------------------------------------ + Append + -> Seq Scan on like_op_noprune1 like_op_noprune_1 + Filter: (a ~~ '%BC'::text) + -> Seq Scan on like_op_noprune2 like_op_noprune_2 + Filter: (a ~~ '%BC'::text) +(5 rows) + +-- +-- tests wherein clause value requires a cross-type comparison function +-- +create table lparted_by_int2 (a smallint) partition by list (a); +create table lparted_by_int2_1 partition of lparted_by_int2 for values in (1); +create table lparted_by_int2_16384 partition of lparted_by_int2 for values in (16384); +explain (costs off) select * from lparted_by_int2 where a = 100_000_000_000_000; + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +create table rparted_by_int2 (a smallint) partition by range (a); +create table rparted_by_int2_1 partition of rparted_by_int2 for values from (1) to (10); +create table rparted_by_int2_16384 partition of rparted_by_int2 for values from (10) to (16384); +-- all partitions pruned +explain (costs off) select * from rparted_by_int2 where a > 100_000_000_000_000; + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +create table rparted_by_int2_maxvalue partition of rparted_by_int2 for values from (16384) to (maxvalue); +-- all partitions but rparted_by_int2_maxvalue pruned +explain (costs off) select * from rparted_by_int2 where a > 100_000_000_000_000; + QUERY PLAN +------------------------------------------------------ + Seq Scan on rparted_by_int2_maxvalue rparted_by_int2 + Filter: (a > '100000000000000'::bigint) +(2 rows) + +drop table lp, coll_pruning, rlp, mc3p, mc2p, boolpart, iboolpart, boolrangep, rp, coll_pruning_multi, like_op_noprune, lparted_by_int2, rparted_by_int2; +-- +-- Test Partition pruning for HASH partitioning +-- +-- Use hand-rolled hash functions and operator classes to get predictable +-- result on different machines. See the definitions of +-- part_part_test_int4_ops and part_test_text_ops in insert.sql. +-- +create table hp (a int, b text, c int) + partition by hash (a part_test_int4_ops, b part_test_text_ops); +create table hp0 partition of hp for values with (modulus 4, remainder 0); +create table hp3 partition of hp for values with (modulus 4, remainder 3); +create table hp1 partition of hp for values with (modulus 4, remainder 1); +create table hp2 partition of hp for values with (modulus 4, remainder 2); +insert into hp values (null, null, 0); +insert into hp values (1, null, 1); +insert into hp values (1, 'xxx', 2); +insert into hp values (null, 'xxx', 3); +insert into hp values (2, 'xxx', 4); +insert into hp values (1, 'abcde', 5); +select tableoid::regclass, * from hp order by c; + tableoid | a | b | c +----------+---+-------+--- + hp0 | | | 0 + hp1 | 1 | | 1 + hp0 | 1 | xxx | 2 + hp2 | | xxx | 3 + hp3 | 2 | xxx | 4 + hp2 | 1 | abcde | 5 +(6 rows) + +-- partial keys won't prune, nor would non-equality conditions +explain (costs off) select * from hp where a = 1; + QUERY PLAN +---------------------------- + Append + -> Seq Scan on hp0 hp_1 + Filter: (a = 1) + -> Seq Scan on hp1 hp_2 + Filter: (a = 1) + -> Seq Scan on hp2 hp_3 + Filter: (a = 1) + -> Seq Scan on hp3 hp_4 + Filter: (a = 1) +(9 rows) + +explain (costs off) select * from hp where b = 'xxx'; + QUERY PLAN +----------------------------------- + Append + -> Seq Scan on hp0 hp_1 + Filter: (b = 'xxx'::text) + -> Seq Scan on hp1 hp_2 + Filter: (b = 'xxx'::text) + -> Seq Scan on hp2 hp_3 + Filter: (b = 'xxx'::text) + -> Seq Scan on hp3 hp_4 + Filter: (b = 'xxx'::text) +(9 rows) + +explain (costs off) select * from hp where a is null; + QUERY PLAN +----------------------------- + Append + -> Seq Scan on hp0 hp_1 + Filter: (a IS NULL) + -> Seq Scan on hp1 hp_2 + Filter: (a IS NULL) + -> Seq Scan on hp2 hp_3 + Filter: (a IS NULL) + -> Seq Scan on hp3 hp_4 + Filter: (a IS NULL) +(9 rows) + +explain (costs off) select * from hp where b is null; + QUERY PLAN +----------------------------- + Append + -> Seq Scan on hp0 hp_1 + Filter: (b IS NULL) + -> Seq Scan on hp1 hp_2 + Filter: (b IS NULL) + -> Seq Scan on hp2 hp_3 + Filter: (b IS NULL) + -> Seq Scan on hp3 hp_4 + Filter: (b IS NULL) +(9 rows) + +explain (costs off) select * from hp where a < 1 and b = 'xxx'; + QUERY PLAN +------------------------------------------------- + Append + -> Seq Scan on hp0 hp_1 + Filter: ((a < 1) AND (b = 'xxx'::text)) + -> Seq Scan on hp1 hp_2 + Filter: ((a < 1) AND (b = 'xxx'::text)) + -> Seq Scan on hp2 hp_3 + Filter: ((a < 1) AND (b = 'xxx'::text)) + -> Seq Scan on hp3 hp_4 + Filter: ((a < 1) AND (b = 'xxx'::text)) +(9 rows) + +explain (costs off) select * from hp where a <> 1 and b = 'yyy'; + QUERY PLAN +-------------------------------------------------- + Append + -> Seq Scan on hp0 hp_1 + Filter: ((a <> 1) AND (b = 'yyy'::text)) + -> Seq Scan on hp1 hp_2 + Filter: ((a <> 1) AND (b = 'yyy'::text)) + -> Seq Scan on hp2 hp_3 + Filter: ((a <> 1) AND (b = 'yyy'::text)) + -> Seq Scan on hp3 hp_4 + Filter: ((a <> 1) AND (b = 'yyy'::text)) +(9 rows) + +explain (costs off) select * from hp where a <> 1 and b <> 'xxx'; + QUERY PLAN +--------------------------------------------------- + Append + -> Seq Scan on hp0 hp_1 + Filter: ((a <> 1) AND (b <> 'xxx'::text)) + -> Seq Scan on hp1 hp_2 + Filter: ((a <> 1) AND (b <> 'xxx'::text)) + -> Seq Scan on hp2 hp_3 + Filter: ((a <> 1) AND (b <> 'xxx'::text)) + -> Seq Scan on hp3 hp_4 + Filter: ((a <> 1) AND (b <> 'xxx'::text)) +(9 rows) + +-- pruning should work if either a value or a IS NULL clause is provided for +-- each of the keys +explain (costs off) select * from hp where a is null and b is null; + QUERY PLAN +----------------------------------------- + Seq Scan on hp0 hp + Filter: ((a IS NULL) AND (b IS NULL)) +(2 rows) + +explain (costs off) select * from hp where a = 1 and b is null; + QUERY PLAN +------------------------------------- + Seq Scan on hp1 hp + Filter: ((b IS NULL) AND (a = 1)) +(2 rows) + +explain (costs off) select * from hp where a = 1 and b = 'xxx'; + QUERY PLAN +------------------------------------------- + Seq Scan on hp0 hp + Filter: ((a = 1) AND (b = 'xxx'::text)) +(2 rows) + +explain (costs off) select * from hp where a is null and b = 'xxx'; + QUERY PLAN +----------------------------------------------- + Seq Scan on hp2 hp + Filter: ((a IS NULL) AND (b = 'xxx'::text)) +(2 rows) + +explain (costs off) select * from hp where a = 2 and b = 'xxx'; + QUERY PLAN +------------------------------------------- + Seq Scan on hp3 hp + Filter: ((a = 2) AND (b = 'xxx'::text)) +(2 rows) + +explain (costs off) select * from hp where a = 1 and b = 'abcde'; + QUERY PLAN +--------------------------------------------- + Seq Scan on hp2 hp + Filter: ((a = 1) AND (b = 'abcde'::text)) +(2 rows) + +explain (costs off) select * from hp where (a = 1 and b = 'abcde') or (a = 2 and b = 'xxx') or (a is null and b is null); + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------- + Append + -> Seq Scan on hp0 hp_1 + Filter: (((a = 1) AND (b = 'abcde'::text)) OR ((a = 2) AND (b = 'xxx'::text)) OR ((a IS NULL) AND (b IS NULL))) + -> Seq Scan on hp2 hp_2 + Filter: (((a = 1) AND (b = 'abcde'::text)) OR ((a = 2) AND (b = 'xxx'::text)) OR ((a IS NULL) AND (b IS NULL))) + -> Seq Scan on hp3 hp_3 + Filter: (((a = 1) AND (b = 'abcde'::text)) OR ((a = 2) AND (b = 'xxx'::text)) OR ((a IS NULL) AND (b IS NULL))) +(7 rows) + +-- test pruning when not all the partitions exist +drop table hp1; +drop table hp3; +explain (costs off) select * from hp where a = 1 and b = 'abcde'; + QUERY PLAN +--------------------------------------------- + Seq Scan on hp2 hp + Filter: ((a = 1) AND (b = 'abcde'::text)) +(2 rows) + +explain (costs off) select * from hp where a = 1 and b = 'abcde' and + (c = 2 or c = 3); + QUERY PLAN +---------------------------------------------------------------------- + Seq Scan on hp2 hp + Filter: ((a = 1) AND (b = 'abcde'::text) AND ((c = 2) OR (c = 3))) +(2 rows) + +drop table hp2; +explain (costs off) select * from hp where a = 1 and b = 'abcde' and + (c = 2 or c = 3); + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +-- +-- Test runtime partition pruning +-- +create table ab (a int not null, b int not null) partition by list (a); +create table ab_a2 partition of ab for values in(2) partition by list (b); +create table ab_a2_b1 partition of ab_a2 for values in (1); +create table ab_a2_b2 partition of ab_a2 for values in (2); +create table ab_a2_b3 partition of ab_a2 for values in (3); +create table ab_a1 partition of ab for values in(1) partition by list (b); +create table ab_a1_b1 partition of ab_a1 for values in (1); +create table ab_a1_b2 partition of ab_a1 for values in (2); +create table ab_a1_b3 partition of ab_a1 for values in (3); +create table ab_a3 partition of ab for values in(3) partition by list (b); +create table ab_a3_b1 partition of ab_a3 for values in (1); +create table ab_a3_b2 partition of ab_a3 for values in (2); +create table ab_a3_b3 partition of ab_a3 for values in (3); +-- Disallow index only scans as concurrent transactions may stop visibility +-- bits being set causing "Heap Fetches" to be unstable in the EXPLAIN ANALYZE +-- output. +set enable_indexonlyscan = off; +prepare ab_q1 (int, int, int) as +select * from ab where a between $1 and $2 and b <= $3; +explain (analyze, costs off, summary off, timing off) execute ab_q1 (2, 2, 3); + QUERY PLAN +--------------------------------------------------------- + Append (actual rows=0 loops=1) + Subplans Removed: 6 + -> Seq Scan on ab_a2_b1 ab_1 (actual rows=0 loops=1) + Filter: ((a >= $1) AND (a <= $2) AND (b <= $3)) + -> Seq Scan on ab_a2_b2 ab_2 (actual rows=0 loops=1) + Filter: ((a >= $1) AND (a <= $2) AND (b <= $3)) + -> Seq Scan on ab_a2_b3 ab_3 (actual rows=0 loops=1) + Filter: ((a >= $1) AND (a <= $2) AND (b <= $3)) +(8 rows) + +explain (analyze, costs off, summary off, timing off) execute ab_q1 (1, 2, 3); + QUERY PLAN +--------------------------------------------------------- + Append (actual rows=0 loops=1) + Subplans Removed: 3 + -> Seq Scan on ab_a1_b1 ab_1 (actual rows=0 loops=1) + Filter: ((a >= $1) AND (a <= $2) AND (b <= $3)) + -> Seq Scan on ab_a1_b2 ab_2 (actual rows=0 loops=1) + Filter: ((a >= $1) AND (a <= $2) AND (b <= $3)) + -> Seq Scan on ab_a1_b3 ab_3 (actual rows=0 loops=1) + Filter: ((a >= $1) AND (a <= $2) AND (b <= $3)) + -> Seq Scan on ab_a2_b1 ab_4 (actual rows=0 loops=1) + Filter: ((a >= $1) AND (a <= $2) AND (b <= $3)) + -> Seq Scan on ab_a2_b2 ab_5 (actual rows=0 loops=1) + Filter: ((a >= $1) AND (a <= $2) AND (b <= $3)) + -> Seq Scan on ab_a2_b3 ab_6 (actual rows=0 loops=1) + Filter: ((a >= $1) AND (a <= $2) AND (b <= $3)) +(14 rows) + +deallocate ab_q1; +-- Runtime pruning after optimizer pruning +prepare ab_q1 (int, int) as +select a from ab where a between $1 and $2 and b < 3; +explain (analyze, costs off, summary off, timing off) execute ab_q1 (2, 2); + QUERY PLAN +--------------------------------------------------------- + Append (actual rows=0 loops=1) + Subplans Removed: 4 + -> Seq Scan on ab_a2_b1 ab_1 (actual rows=0 loops=1) + Filter: ((a >= $1) AND (a <= $2) AND (b < 3)) + -> Seq Scan on ab_a2_b2 ab_2 (actual rows=0 loops=1) + Filter: ((a >= $1) AND (a <= $2) AND (b < 3)) +(6 rows) + +explain (analyze, costs off, summary off, timing off) execute ab_q1 (2, 4); + QUERY PLAN +--------------------------------------------------------- + Append (actual rows=0 loops=1) + Subplans Removed: 2 + -> Seq Scan on ab_a2_b1 ab_1 (actual rows=0 loops=1) + Filter: ((a >= $1) AND (a <= $2) AND (b < 3)) + -> Seq Scan on ab_a2_b2 ab_2 (actual rows=0 loops=1) + Filter: ((a >= $1) AND (a <= $2) AND (b < 3)) + -> Seq Scan on ab_a3_b1 ab_3 (actual rows=0 loops=1) + Filter: ((a >= $1) AND (a <= $2) AND (b < 3)) + -> Seq Scan on ab_a3_b2 ab_4 (actual rows=0 loops=1) + Filter: ((a >= $1) AND (a <= $2) AND (b < 3)) +(10 rows) + +-- Ensure a mix of PARAM_EXTERN and PARAM_EXEC Params work together at +-- different levels of partitioning. +prepare ab_q2 (int, int) as +select a from ab where a between $1 and $2 and b < (select 3); +explain (analyze, costs off, summary off, timing off) execute ab_q2 (2, 2); + QUERY PLAN +--------------------------------------------------------- + Append (actual rows=0 loops=1) + Subplans Removed: 6 + InitPlan 1 (returns $0) + -> Result (actual rows=1 loops=1) + -> Seq Scan on ab_a2_b1 ab_1 (actual rows=0 loops=1) + Filter: ((a >= $1) AND (a <= $2) AND (b < $0)) + -> Seq Scan on ab_a2_b2 ab_2 (actual rows=0 loops=1) + Filter: ((a >= $1) AND (a <= $2) AND (b < $0)) + -> Seq Scan on ab_a2_b3 ab_3 (never executed) + Filter: ((a >= $1) AND (a <= $2) AND (b < $0)) +(10 rows) + +-- As above, but swap the PARAM_EXEC Param to the first partition level +prepare ab_q3 (int, int) as +select a from ab where b between $1 and $2 and a < (select 3); +explain (analyze, costs off, summary off, timing off) execute ab_q3 (2, 2); + QUERY PLAN +--------------------------------------------------------- + Append (actual rows=0 loops=1) + Subplans Removed: 6 + InitPlan 1 (returns $0) + -> Result (actual rows=1 loops=1) + -> Seq Scan on ab_a1_b2 ab_1 (actual rows=0 loops=1) + Filter: ((b >= $1) AND (b <= $2) AND (a < $0)) + -> Seq Scan on ab_a2_b2 ab_2 (actual rows=0 loops=1) + Filter: ((b >= $1) AND (b <= $2) AND (a < $0)) + -> Seq Scan on ab_a3_b2 ab_3 (never executed) + Filter: ((b >= $1) AND (b <= $2) AND (a < $0)) +(10 rows) + +-- +-- Test runtime pruning with hash partitioned tables +-- +-- recreate partitions dropped above +create table hp1 partition of hp for values with (modulus 4, remainder 1); +create table hp2 partition of hp for values with (modulus 4, remainder 2); +create table hp3 partition of hp for values with (modulus 4, remainder 3); +-- Ensure we correctly prune unneeded partitions when there is an IS NULL qual +prepare hp_q1 (text) as +select * from hp where a is null and b = $1; +explain (costs off) execute hp_q1('xxx'); + QUERY PLAN +-------------------------------------------- + Append + Subplans Removed: 3 + -> Seq Scan on hp2 hp_1 + Filter: ((a IS NULL) AND (b = $1)) +(4 rows) + +deallocate hp_q1; +drop table hp; +-- Test a backwards Append scan +create table list_part (a int) partition by list (a); +create table list_part1 partition of list_part for values in (1); +create table list_part2 partition of list_part for values in (2); +create table list_part3 partition of list_part for values in (3); +create table list_part4 partition of list_part for values in (4); +insert into list_part select generate_series(1,4); +begin; +-- Don't select an actual value out of the table as the order of the Append's +-- subnodes may not be stable. +declare cur SCROLL CURSOR for select 1 from list_part where a > (select 1) and a < (select 4); +-- move beyond the final row +move 3 from cur; +-- Ensure we get two rows. +fetch backward all from cur; + ?column? +---------- + 1 + 1 +(2 rows) + +commit; +begin; +-- Test run-time pruning using stable functions +create function list_part_fn(int) returns int as $$ begin return $1; end;$$ language plpgsql stable; +-- Ensure pruning works using a stable function containing no Vars +explain (analyze, costs off, summary off, timing off) select * from list_part where a = list_part_fn(1); + QUERY PLAN +------------------------------------------------------------------ + Append (actual rows=1 loops=1) + Subplans Removed: 3 + -> Seq Scan on list_part1 list_part_1 (actual rows=1 loops=1) + Filter: (a = list_part_fn(1)) +(4 rows) + +-- Ensure pruning does not take place when the function has a Var parameter +explain (analyze, costs off, summary off, timing off) select * from list_part where a = list_part_fn(a); + QUERY PLAN +------------------------------------------------------------------ + Append (actual rows=4 loops=1) + -> Seq Scan on list_part1 list_part_1 (actual rows=1 loops=1) + Filter: (a = list_part_fn(a)) + -> Seq Scan on list_part2 list_part_2 (actual rows=1 loops=1) + Filter: (a = list_part_fn(a)) + -> Seq Scan on list_part3 list_part_3 (actual rows=1 loops=1) + Filter: (a = list_part_fn(a)) + -> Seq Scan on list_part4 list_part_4 (actual rows=1 loops=1) + Filter: (a = list_part_fn(a)) +(9 rows) + +-- Ensure pruning does not take place when the expression contains a Var. +explain (analyze, costs off, summary off, timing off) select * from list_part where a = list_part_fn(1) + a; + QUERY PLAN +------------------------------------------------------------------ + Append (actual rows=0 loops=1) + -> Seq Scan on list_part1 list_part_1 (actual rows=0 loops=1) + Filter: (a = (list_part_fn(1) + a)) + Rows Removed by Filter: 1 + -> Seq Scan on list_part2 list_part_2 (actual rows=0 loops=1) + Filter: (a = (list_part_fn(1) + a)) + Rows Removed by Filter: 1 + -> Seq Scan on list_part3 list_part_3 (actual rows=0 loops=1) + Filter: (a = (list_part_fn(1) + a)) + Rows Removed by Filter: 1 + -> Seq Scan on list_part4 list_part_4 (actual rows=0 loops=1) + Filter: (a = (list_part_fn(1) + a)) + Rows Removed by Filter: 1 +(13 rows) + +rollback; +drop table list_part; +-- Parallel append +-- Parallel queries won't necessarily get as many workers as the planner +-- asked for. This affects not only the "Workers Launched:" field of EXPLAIN +-- results, but also row counts and loop counts for parallel scans, Gathers, +-- and everything in between. This function filters out the values we can't +-- rely on to be stable. +-- This removes enough info that you might wonder why bother with EXPLAIN +-- ANALYZE at all. The answer is that we need to see '(never executed)' +-- notations because that's the only way to verify runtime pruning. +create function explain_parallel_append(text) returns setof text +language plpgsql as +$$ +declare + ln text; +begin + for ln in + execute format('explain (analyze, costs off, summary off, timing off) %s', + $1) + loop + ln := regexp_replace(ln, 'Workers Launched: \d+', 'Workers Launched: N'); + ln := regexp_replace(ln, 'actual rows=\d+ loops=\d+', 'actual rows=N loops=N'); + ln := regexp_replace(ln, 'Rows Removed by Filter: \d+', 'Rows Removed by Filter: N'); + return next ln; + end loop; +end; +$$; +prepare ab_q4 (int, int) as +select avg(a) from ab where a between $1 and $2 and b < 4; +-- Encourage use of parallel plans +set parallel_setup_cost = 0; +set parallel_tuple_cost = 0; +set min_parallel_table_scan_size = 0; +set max_parallel_workers_per_gather = 2; +select explain_parallel_append('execute ab_q4 (2, 2)'); + explain_parallel_append +------------------------------------------------------------------------------------ + Finalize Aggregate (actual rows=N loops=N) + -> Gather (actual rows=N loops=N) + Workers Planned: 2 + Workers Launched: N + -> Partial Aggregate (actual rows=N loops=N) + -> Parallel Append (actual rows=N loops=N) + Subplans Removed: 6 + -> Parallel Seq Scan on ab_a2_b1 ab_1 (actual rows=N loops=N) + Filter: ((a >= $1) AND (a <= $2) AND (b < 4)) + -> Parallel Seq Scan on ab_a2_b2 ab_2 (actual rows=N loops=N) + Filter: ((a >= $1) AND (a <= $2) AND (b < 4)) + -> Parallel Seq Scan on ab_a2_b3 ab_3 (actual rows=N loops=N) + Filter: ((a >= $1) AND (a <= $2) AND (b < 4)) +(13 rows) + +-- Test run-time pruning with IN lists. +prepare ab_q5 (int, int, int) as +select avg(a) from ab where a in($1,$2,$3) and b < 4; +select explain_parallel_append('execute ab_q5 (1, 1, 1)'); + explain_parallel_append +------------------------------------------------------------------------------------ + Finalize Aggregate (actual rows=N loops=N) + -> Gather (actual rows=N loops=N) + Workers Planned: 2 + Workers Launched: N + -> Partial Aggregate (actual rows=N loops=N) + -> Parallel Append (actual rows=N loops=N) + Subplans Removed: 6 + -> Parallel Seq Scan on ab_a1_b1 ab_1 (actual rows=N loops=N) + Filter: ((b < 4) AND (a = ANY (ARRAY[$1, $2, $3]))) + -> Parallel Seq Scan on ab_a1_b2 ab_2 (actual rows=N loops=N) + Filter: ((b < 4) AND (a = ANY (ARRAY[$1, $2, $3]))) + -> Parallel Seq Scan on ab_a1_b3 ab_3 (actual rows=N loops=N) + Filter: ((b < 4) AND (a = ANY (ARRAY[$1, $2, $3]))) +(13 rows) + +select explain_parallel_append('execute ab_q5 (2, 3, 3)'); + explain_parallel_append +------------------------------------------------------------------------------------ + Finalize Aggregate (actual rows=N loops=N) + -> Gather (actual rows=N loops=N) + Workers Planned: 2 + Workers Launched: N + -> Partial Aggregate (actual rows=N loops=N) + -> Parallel Append (actual rows=N loops=N) + Subplans Removed: 3 + -> Parallel Seq Scan on ab_a2_b1 ab_1 (actual rows=N loops=N) + Filter: ((b < 4) AND (a = ANY (ARRAY[$1, $2, $3]))) + -> Parallel Seq Scan on ab_a2_b2 ab_2 (actual rows=N loops=N) + Filter: ((b < 4) AND (a = ANY (ARRAY[$1, $2, $3]))) + -> Parallel Seq Scan on ab_a2_b3 ab_3 (actual rows=N loops=N) + Filter: ((b < 4) AND (a = ANY (ARRAY[$1, $2, $3]))) + -> Parallel Seq Scan on ab_a3_b1 ab_4 (actual rows=N loops=N) + Filter: ((b < 4) AND (a = ANY (ARRAY[$1, $2, $3]))) + -> Parallel Seq Scan on ab_a3_b2 ab_5 (actual rows=N loops=N) + Filter: ((b < 4) AND (a = ANY (ARRAY[$1, $2, $3]))) + -> Parallel Seq Scan on ab_a3_b3 ab_6 (actual rows=N loops=N) + Filter: ((b < 4) AND (a = ANY (ARRAY[$1, $2, $3]))) +(19 rows) + +-- Try some params whose values do not belong to any partition. +select explain_parallel_append('execute ab_q5 (33, 44, 55)'); + explain_parallel_append +----------------------------------------------------------- + Finalize Aggregate (actual rows=N loops=N) + -> Gather (actual rows=N loops=N) + Workers Planned: 2 + Workers Launched: N + -> Partial Aggregate (actual rows=N loops=N) + -> Parallel Append (actual rows=N loops=N) + Subplans Removed: 9 +(7 rows) + +-- Test Parallel Append with PARAM_EXEC Params +select explain_parallel_append('select count(*) from ab where (a = (select 1) or a = (select 3)) and b = 2'); + explain_parallel_append +------------------------------------------------------------------------------ + Aggregate (actual rows=N loops=N) + InitPlan 1 (returns $0) + -> Result (actual rows=N loops=N) + InitPlan 2 (returns $1) + -> Result (actual rows=N loops=N) + -> Gather (actual rows=N loops=N) + Workers Planned: 2 + Params Evaluated: $0, $1 + Workers Launched: N + -> Parallel Append (actual rows=N loops=N) + -> Parallel Seq Scan on ab_a1_b2 ab_1 (actual rows=N loops=N) + Filter: ((b = 2) AND ((a = $0) OR (a = $1))) + -> Parallel Seq Scan on ab_a2_b2 ab_2 (never executed) + Filter: ((b = 2) AND ((a = $0) OR (a = $1))) + -> Parallel Seq Scan on ab_a3_b2 ab_3 (actual rows=N loops=N) + Filter: ((b = 2) AND ((a = $0) OR (a = $1))) +(16 rows) + +-- Test pruning during parallel nested loop query +create table lprt_a (a int not null); +-- Insert some values we won't find in ab +insert into lprt_a select 0 from generate_series(1,100); +-- and insert some values that we should find. +insert into lprt_a values(1),(1); +analyze lprt_a; +create index ab_a2_b1_a_idx on ab_a2_b1 (a); +create index ab_a2_b2_a_idx on ab_a2_b2 (a); +create index ab_a2_b3_a_idx on ab_a2_b3 (a); +create index ab_a1_b1_a_idx on ab_a1_b1 (a); +create index ab_a1_b2_a_idx on ab_a1_b2 (a); +create index ab_a1_b3_a_idx on ab_a1_b3 (a); +create index ab_a3_b1_a_idx on ab_a3_b1 (a); +create index ab_a3_b2_a_idx on ab_a3_b2 (a); +create index ab_a3_b3_a_idx on ab_a3_b3 (a); +set enable_hashjoin = 0; +set enable_mergejoin = 0; +set enable_memoize = 0; +select explain_parallel_append('select avg(ab.a) from ab inner join lprt_a a on ab.a = a.a where a.a in(0, 0, 1)'); + explain_parallel_append +-------------------------------------------------------------------------------------------------------- + Finalize Aggregate (actual rows=N loops=N) + -> Gather (actual rows=N loops=N) + Workers Planned: 1 + Workers Launched: N + -> Partial Aggregate (actual rows=N loops=N) + -> Nested Loop (actual rows=N loops=N) + -> Parallel Seq Scan on lprt_a a (actual rows=N loops=N) + Filter: (a = ANY ('{0,0,1}'::integer[])) + -> Append (actual rows=N loops=N) + -> Index Scan using ab_a1_b1_a_idx on ab_a1_b1 ab_1 (actual rows=N loops=N) + Index Cond: (a = a.a) + -> Index Scan using ab_a1_b2_a_idx on ab_a1_b2 ab_2 (actual rows=N loops=N) + Index Cond: (a = a.a) + -> Index Scan using ab_a1_b3_a_idx on ab_a1_b3 ab_3 (actual rows=N loops=N) + Index Cond: (a = a.a) + -> Index Scan using ab_a2_b1_a_idx on ab_a2_b1 ab_4 (never executed) + Index Cond: (a = a.a) + -> Index Scan using ab_a2_b2_a_idx on ab_a2_b2 ab_5 (never executed) + Index Cond: (a = a.a) + -> Index Scan using ab_a2_b3_a_idx on ab_a2_b3 ab_6 (never executed) + Index Cond: (a = a.a) + -> Index Scan using ab_a3_b1_a_idx on ab_a3_b1 ab_7 (never executed) + Index Cond: (a = a.a) + -> Index Scan using ab_a3_b2_a_idx on ab_a3_b2 ab_8 (never executed) + Index Cond: (a = a.a) + -> Index Scan using ab_a3_b3_a_idx on ab_a3_b3 ab_9 (never executed) + Index Cond: (a = a.a) +(27 rows) + +-- Ensure the same partitions are pruned when we make the nested loop +-- parameter an Expr rather than a plain Param. +select explain_parallel_append('select avg(ab.a) from ab inner join lprt_a a on ab.a = a.a + 0 where a.a in(0, 0, 1)'); + explain_parallel_append +-------------------------------------------------------------------------------------------------------- + Finalize Aggregate (actual rows=N loops=N) + -> Gather (actual rows=N loops=N) + Workers Planned: 1 + Workers Launched: N + -> Partial Aggregate (actual rows=N loops=N) + -> Nested Loop (actual rows=N loops=N) + -> Parallel Seq Scan on lprt_a a (actual rows=N loops=N) + Filter: (a = ANY ('{0,0,1}'::integer[])) + -> Append (actual rows=N loops=N) + -> Index Scan using ab_a1_b1_a_idx on ab_a1_b1 ab_1 (actual rows=N loops=N) + Index Cond: (a = (a.a + 0)) + -> Index Scan using ab_a1_b2_a_idx on ab_a1_b2 ab_2 (actual rows=N loops=N) + Index Cond: (a = (a.a + 0)) + -> Index Scan using ab_a1_b3_a_idx on ab_a1_b3 ab_3 (actual rows=N loops=N) + Index Cond: (a = (a.a + 0)) + -> Index Scan using ab_a2_b1_a_idx on ab_a2_b1 ab_4 (never executed) + Index Cond: (a = (a.a + 0)) + -> Index Scan using ab_a2_b2_a_idx on ab_a2_b2 ab_5 (never executed) + Index Cond: (a = (a.a + 0)) + -> Index Scan using ab_a2_b3_a_idx on ab_a2_b3 ab_6 (never executed) + Index Cond: (a = (a.a + 0)) + -> Index Scan using ab_a3_b1_a_idx on ab_a3_b1 ab_7 (never executed) + Index Cond: (a = (a.a + 0)) + -> Index Scan using ab_a3_b2_a_idx on ab_a3_b2 ab_8 (never executed) + Index Cond: (a = (a.a + 0)) + -> Index Scan using ab_a3_b3_a_idx on ab_a3_b3 ab_9 (never executed) + Index Cond: (a = (a.a + 0)) +(27 rows) + +insert into lprt_a values(3),(3); +select explain_parallel_append('select avg(ab.a) from ab inner join lprt_a a on ab.a = a.a where a.a in(1, 0, 3)'); + explain_parallel_append +-------------------------------------------------------------------------------------------------------- + Finalize Aggregate (actual rows=N loops=N) + -> Gather (actual rows=N loops=N) + Workers Planned: 1 + Workers Launched: N + -> Partial Aggregate (actual rows=N loops=N) + -> Nested Loop (actual rows=N loops=N) + -> Parallel Seq Scan on lprt_a a (actual rows=N loops=N) + Filter: (a = ANY ('{1,0,3}'::integer[])) + -> Append (actual rows=N loops=N) + -> Index Scan using ab_a1_b1_a_idx on ab_a1_b1 ab_1 (actual rows=N loops=N) + Index Cond: (a = a.a) + -> Index Scan using ab_a1_b2_a_idx on ab_a1_b2 ab_2 (actual rows=N loops=N) + Index Cond: (a = a.a) + -> Index Scan using ab_a1_b3_a_idx on ab_a1_b3 ab_3 (actual rows=N loops=N) + Index Cond: (a = a.a) + -> Index Scan using ab_a2_b1_a_idx on ab_a2_b1 ab_4 (never executed) + Index Cond: (a = a.a) + -> Index Scan using ab_a2_b2_a_idx on ab_a2_b2 ab_5 (never executed) + Index Cond: (a = a.a) + -> Index Scan using ab_a2_b3_a_idx on ab_a2_b3 ab_6 (never executed) + Index Cond: (a = a.a) + -> Index Scan using ab_a3_b1_a_idx on ab_a3_b1 ab_7 (actual rows=N loops=N) + Index Cond: (a = a.a) + -> Index Scan using ab_a3_b2_a_idx on ab_a3_b2 ab_8 (actual rows=N loops=N) + Index Cond: (a = a.a) + -> Index Scan using ab_a3_b3_a_idx on ab_a3_b3 ab_9 (actual rows=N loops=N) + Index Cond: (a = a.a) +(27 rows) + +select explain_parallel_append('select avg(ab.a) from ab inner join lprt_a a on ab.a = a.a where a.a in(1, 0, 0)'); + explain_parallel_append +-------------------------------------------------------------------------------------------------------- + Finalize Aggregate (actual rows=N loops=N) + -> Gather (actual rows=N loops=N) + Workers Planned: 1 + Workers Launched: N + -> Partial Aggregate (actual rows=N loops=N) + -> Nested Loop (actual rows=N loops=N) + -> Parallel Seq Scan on lprt_a a (actual rows=N loops=N) + Filter: (a = ANY ('{1,0,0}'::integer[])) + Rows Removed by Filter: N + -> Append (actual rows=N loops=N) + -> Index Scan using ab_a1_b1_a_idx on ab_a1_b1 ab_1 (actual rows=N loops=N) + Index Cond: (a = a.a) + -> Index Scan using ab_a1_b2_a_idx on ab_a1_b2 ab_2 (actual rows=N loops=N) + Index Cond: (a = a.a) + -> Index Scan using ab_a1_b3_a_idx on ab_a1_b3 ab_3 (actual rows=N loops=N) + Index Cond: (a = a.a) + -> Index Scan using ab_a2_b1_a_idx on ab_a2_b1 ab_4 (never executed) + Index Cond: (a = a.a) + -> Index Scan using ab_a2_b2_a_idx on ab_a2_b2 ab_5 (never executed) + Index Cond: (a = a.a) + -> Index Scan using ab_a2_b3_a_idx on ab_a2_b3 ab_6 (never executed) + Index Cond: (a = a.a) + -> Index Scan using ab_a3_b1_a_idx on ab_a3_b1 ab_7 (never executed) + Index Cond: (a = a.a) + -> Index Scan using ab_a3_b2_a_idx on ab_a3_b2 ab_8 (never executed) + Index Cond: (a = a.a) + -> Index Scan using ab_a3_b3_a_idx on ab_a3_b3 ab_9 (never executed) + Index Cond: (a = a.a) +(28 rows) + +delete from lprt_a where a = 1; +select explain_parallel_append('select avg(ab.a) from ab inner join lprt_a a on ab.a = a.a where a.a in(1, 0, 0)'); + explain_parallel_append +------------------------------------------------------------------------------------------------- + Finalize Aggregate (actual rows=N loops=N) + -> Gather (actual rows=N loops=N) + Workers Planned: 1 + Workers Launched: N + -> Partial Aggregate (actual rows=N loops=N) + -> Nested Loop (actual rows=N loops=N) + -> Parallel Seq Scan on lprt_a a (actual rows=N loops=N) + Filter: (a = ANY ('{1,0,0}'::integer[])) + Rows Removed by Filter: N + -> Append (actual rows=N loops=N) + -> Index Scan using ab_a1_b1_a_idx on ab_a1_b1 ab_1 (never executed) + Index Cond: (a = a.a) + -> Index Scan using ab_a1_b2_a_idx on ab_a1_b2 ab_2 (never executed) + Index Cond: (a = a.a) + -> Index Scan using ab_a1_b3_a_idx on ab_a1_b3 ab_3 (never executed) + Index Cond: (a = a.a) + -> Index Scan using ab_a2_b1_a_idx on ab_a2_b1 ab_4 (never executed) + Index Cond: (a = a.a) + -> Index Scan using ab_a2_b2_a_idx on ab_a2_b2 ab_5 (never executed) + Index Cond: (a = a.a) + -> Index Scan using ab_a2_b3_a_idx on ab_a2_b3 ab_6 (never executed) + Index Cond: (a = a.a) + -> Index Scan using ab_a3_b1_a_idx on ab_a3_b1 ab_7 (never executed) + Index Cond: (a = a.a) + -> Index Scan using ab_a3_b2_a_idx on ab_a3_b2 ab_8 (never executed) + Index Cond: (a = a.a) + -> Index Scan using ab_a3_b3_a_idx on ab_a3_b3 ab_9 (never executed) + Index Cond: (a = a.a) +(28 rows) + +reset enable_hashjoin; +reset enable_mergejoin; +reset enable_memoize; +reset parallel_setup_cost; +reset parallel_tuple_cost; +reset min_parallel_table_scan_size; +reset max_parallel_workers_per_gather; +-- Test run-time partition pruning with an initplan +explain (analyze, costs off, summary off, timing off) +select * from ab where a = (select max(a) from lprt_a) and b = (select max(a)-1 from lprt_a); + QUERY PLAN +------------------------------------------------------------------------- + Append (actual rows=0 loops=1) + InitPlan 1 (returns $0) + -> Aggregate (actual rows=1 loops=1) + -> Seq Scan on lprt_a (actual rows=102 loops=1) + InitPlan 2 (returns $1) + -> Aggregate (actual rows=1 loops=1) + -> Seq Scan on lprt_a lprt_a_1 (actual rows=102 loops=1) + -> Bitmap Heap Scan on ab_a1_b1 ab_1 (never executed) + Recheck Cond: (a = $0) + Filter: (b = $1) + -> Bitmap Index Scan on ab_a1_b1_a_idx (never executed) + Index Cond: (a = $0) + -> Bitmap Heap Scan on ab_a1_b2 ab_2 (never executed) + Recheck Cond: (a = $0) + Filter: (b = $1) + -> Bitmap Index Scan on ab_a1_b2_a_idx (never executed) + Index Cond: (a = $0) + -> Bitmap Heap Scan on ab_a1_b3 ab_3 (never executed) + Recheck Cond: (a = $0) + Filter: (b = $1) + -> Bitmap Index Scan on ab_a1_b3_a_idx (never executed) + Index Cond: (a = $0) + -> Bitmap Heap Scan on ab_a2_b1 ab_4 (never executed) + Recheck Cond: (a = $0) + Filter: (b = $1) + -> Bitmap Index Scan on ab_a2_b1_a_idx (never executed) + Index Cond: (a = $0) + -> Bitmap Heap Scan on ab_a2_b2 ab_5 (never executed) + Recheck Cond: (a = $0) + Filter: (b = $1) + -> Bitmap Index Scan on ab_a2_b2_a_idx (never executed) + Index Cond: (a = $0) + -> Bitmap Heap Scan on ab_a2_b3 ab_6 (never executed) + Recheck Cond: (a = $0) + Filter: (b = $1) + -> Bitmap Index Scan on ab_a2_b3_a_idx (never executed) + Index Cond: (a = $0) + -> Bitmap Heap Scan on ab_a3_b1 ab_7 (never executed) + Recheck Cond: (a = $0) + Filter: (b = $1) + -> Bitmap Index Scan on ab_a3_b1_a_idx (never executed) + Index Cond: (a = $0) + -> Bitmap Heap Scan on ab_a3_b2 ab_8 (actual rows=0 loops=1) + Recheck Cond: (a = $0) + Filter: (b = $1) + -> Bitmap Index Scan on ab_a3_b2_a_idx (actual rows=0 loops=1) + Index Cond: (a = $0) + -> Bitmap Heap Scan on ab_a3_b3 ab_9 (never executed) + Recheck Cond: (a = $0) + Filter: (b = $1) + -> Bitmap Index Scan on ab_a3_b3_a_idx (never executed) + Index Cond: (a = $0) +(52 rows) + +-- Test run-time partition pruning with UNION ALL parents +explain (analyze, costs off, summary off, timing off) +select * from (select * from ab where a = 1 union all select * from ab) ab where b = (select 1); + QUERY PLAN +------------------------------------------------------------------------------- + Append (actual rows=0 loops=1) + InitPlan 1 (returns $0) + -> Result (actual rows=1 loops=1) + -> Append (actual rows=0 loops=1) + -> Bitmap Heap Scan on ab_a1_b1 ab_11 (actual rows=0 loops=1) + Recheck Cond: (a = 1) + Filter: (b = $0) + -> Bitmap Index Scan on ab_a1_b1_a_idx (actual rows=0 loops=1) + Index Cond: (a = 1) + -> Bitmap Heap Scan on ab_a1_b2 ab_12 (never executed) + Recheck Cond: (a = 1) + Filter: (b = $0) + -> Bitmap Index Scan on ab_a1_b2_a_idx (never executed) + Index Cond: (a = 1) + -> Bitmap Heap Scan on ab_a1_b3 ab_13 (never executed) + Recheck Cond: (a = 1) + Filter: (b = $0) + -> Bitmap Index Scan on ab_a1_b3_a_idx (never executed) + Index Cond: (a = 1) + -> Seq Scan on ab_a1_b1 ab_1 (actual rows=0 loops=1) + Filter: (b = $0) + -> Seq Scan on ab_a1_b2 ab_2 (never executed) + Filter: (b = $0) + -> Seq Scan on ab_a1_b3 ab_3 (never executed) + Filter: (b = $0) + -> Seq Scan on ab_a2_b1 ab_4 (actual rows=0 loops=1) + Filter: (b = $0) + -> Seq Scan on ab_a2_b2 ab_5 (never executed) + Filter: (b = $0) + -> Seq Scan on ab_a2_b3 ab_6 (never executed) + Filter: (b = $0) + -> Seq Scan on ab_a3_b1 ab_7 (actual rows=0 loops=1) + Filter: (b = $0) + -> Seq Scan on ab_a3_b2 ab_8 (never executed) + Filter: (b = $0) + -> Seq Scan on ab_a3_b3 ab_9 (never executed) + Filter: (b = $0) +(37 rows) + +-- A case containing a UNION ALL with a non-partitioned child. +explain (analyze, costs off, summary off, timing off) +select * from (select * from ab where a = 1 union all (values(10,5)) union all select * from ab) ab where b = (select 1); + QUERY PLAN +------------------------------------------------------------------------------- + Append (actual rows=0 loops=1) + InitPlan 1 (returns $0) + -> Result (actual rows=1 loops=1) + -> Append (actual rows=0 loops=1) + -> Bitmap Heap Scan on ab_a1_b1 ab_11 (actual rows=0 loops=1) + Recheck Cond: (a = 1) + Filter: (b = $0) + -> Bitmap Index Scan on ab_a1_b1_a_idx (actual rows=0 loops=1) + Index Cond: (a = 1) + -> Bitmap Heap Scan on ab_a1_b2 ab_12 (never executed) + Recheck Cond: (a = 1) + Filter: (b = $0) + -> Bitmap Index Scan on ab_a1_b2_a_idx (never executed) + Index Cond: (a = 1) + -> Bitmap Heap Scan on ab_a1_b3 ab_13 (never executed) + Recheck Cond: (a = 1) + Filter: (b = $0) + -> Bitmap Index Scan on ab_a1_b3_a_idx (never executed) + Index Cond: (a = 1) + -> Result (actual rows=0 loops=1) + One-Time Filter: (5 = $0) + -> Seq Scan on ab_a1_b1 ab_1 (actual rows=0 loops=1) + Filter: (b = $0) + -> Seq Scan on ab_a1_b2 ab_2 (never executed) + Filter: (b = $0) + -> Seq Scan on ab_a1_b3 ab_3 (never executed) + Filter: (b = $0) + -> Seq Scan on ab_a2_b1 ab_4 (actual rows=0 loops=1) + Filter: (b = $0) + -> Seq Scan on ab_a2_b2 ab_5 (never executed) + Filter: (b = $0) + -> Seq Scan on ab_a2_b3 ab_6 (never executed) + Filter: (b = $0) + -> Seq Scan on ab_a3_b1 ab_7 (actual rows=0 loops=1) + Filter: (b = $0) + -> Seq Scan on ab_a3_b2 ab_8 (never executed) + Filter: (b = $0) + -> Seq Scan on ab_a3_b3 ab_9 (never executed) + Filter: (b = $0) +(39 rows) + +-- Another UNION ALL test, but containing a mix of exec init and exec run-time pruning. +create table xy_1 (x int, y int); +insert into xy_1 values(100,-10); +set enable_bitmapscan = 0; +set enable_indexscan = 0; +prepare ab_q6 as +select * from ( + select tableoid::regclass,a,b from ab +union all + select tableoid::regclass,x,y from xy_1 +union all + select tableoid::regclass,a,b from ab +) ab where a = $1 and b = (select -10); +-- Ensure the xy_1 subplan is not pruned. +explain (analyze, costs off, summary off, timing off) execute ab_q6(1); + QUERY PLAN +-------------------------------------------------- + Append (actual rows=0 loops=1) + Subplans Removed: 12 + InitPlan 1 (returns $0) + -> Result (actual rows=1 loops=1) + -> Seq Scan on ab_a1_b1 ab_1 (never executed) + Filter: ((a = $1) AND (b = $0)) + -> Seq Scan on ab_a1_b2 ab_2 (never executed) + Filter: ((a = $1) AND (b = $0)) + -> Seq Scan on ab_a1_b3 ab_3 (never executed) + Filter: ((a = $1) AND (b = $0)) + -> Seq Scan on xy_1 (actual rows=0 loops=1) + Filter: ((x = $1) AND (y = $0)) + Rows Removed by Filter: 1 + -> Seq Scan on ab_a1_b1 ab_4 (never executed) + Filter: ((a = $1) AND (b = $0)) + -> Seq Scan on ab_a1_b2 ab_5 (never executed) + Filter: ((a = $1) AND (b = $0)) + -> Seq Scan on ab_a1_b3 ab_6 (never executed) + Filter: ((a = $1) AND (b = $0)) +(19 rows) + +-- Ensure we see just the xy_1 row. +execute ab_q6(100); + tableoid | a | b +----------+-----+----- + xy_1 | 100 | -10 +(1 row) + +reset enable_bitmapscan; +reset enable_indexscan; +deallocate ab_q1; +deallocate ab_q2; +deallocate ab_q3; +deallocate ab_q4; +deallocate ab_q5; +deallocate ab_q6; +-- UPDATE on a partition subtree has been seen to have problems. +insert into ab values (1,2); +explain (analyze, costs off, summary off, timing off) +update ab_a1 set b = 3 from ab where ab.a = 1 and ab.a = ab_a1.a; + QUERY PLAN +------------------------------------------------------------------------------------------- + Update on ab_a1 (actual rows=0 loops=1) + Update on ab_a1_b1 ab_a1_1 + Update on ab_a1_b2 ab_a1_2 + Update on ab_a1_b3 ab_a1_3 + -> Nested Loop (actual rows=1 loops=1) + -> Append (actual rows=1 loops=1) + -> Bitmap Heap Scan on ab_a1_b1 ab_a1_1 (actual rows=0 loops=1) + Recheck Cond: (a = 1) + -> Bitmap Index Scan on ab_a1_b1_a_idx (actual rows=0 loops=1) + Index Cond: (a = 1) + -> Bitmap Heap Scan on ab_a1_b2 ab_a1_2 (actual rows=1 loops=1) + Recheck Cond: (a = 1) + Heap Blocks: exact=1 + -> Bitmap Index Scan on ab_a1_b2_a_idx (actual rows=1 loops=1) + Index Cond: (a = 1) + -> Bitmap Heap Scan on ab_a1_b3 ab_a1_3 (actual rows=0 loops=1) + Recheck Cond: (a = 1) + -> Bitmap Index Scan on ab_a1_b3_a_idx (actual rows=1 loops=1) + Index Cond: (a = 1) + -> Materialize (actual rows=1 loops=1) + -> Append (actual rows=1 loops=1) + -> Bitmap Heap Scan on ab_a1_b1 ab_1 (actual rows=0 loops=1) + Recheck Cond: (a = 1) + -> Bitmap Index Scan on ab_a1_b1_a_idx (actual rows=0 loops=1) + Index Cond: (a = 1) + -> Bitmap Heap Scan on ab_a1_b2 ab_2 (actual rows=1 loops=1) + Recheck Cond: (a = 1) + Heap Blocks: exact=1 + -> Bitmap Index Scan on ab_a1_b2_a_idx (actual rows=1 loops=1) + Index Cond: (a = 1) + -> Bitmap Heap Scan on ab_a1_b3 ab_3 (actual rows=0 loops=1) + Recheck Cond: (a = 1) + -> Bitmap Index Scan on ab_a1_b3_a_idx (actual rows=1 loops=1) + Index Cond: (a = 1) +(34 rows) + +table ab; + a | b +---+--- + 1 | 3 +(1 row) + +-- Test UPDATE where source relation has run-time pruning enabled +truncate ab; +insert into ab values (1, 1), (1, 2), (1, 3), (2, 1); +explain (analyze, costs off, summary off, timing off) +update ab_a1 set b = 3 from ab_a2 where ab_a2.b = (select 1); + QUERY PLAN +------------------------------------------------------------------------------ + Update on ab_a1 (actual rows=0 loops=1) + Update on ab_a1_b1 ab_a1_1 + Update on ab_a1_b2 ab_a1_2 + Update on ab_a1_b3 ab_a1_3 + InitPlan 1 (returns $0) + -> Result (actual rows=1 loops=1) + -> Nested Loop (actual rows=3 loops=1) + -> Append (actual rows=3 loops=1) + -> Seq Scan on ab_a1_b1 ab_a1_1 (actual rows=1 loops=1) + -> Seq Scan on ab_a1_b2 ab_a1_2 (actual rows=1 loops=1) + -> Seq Scan on ab_a1_b3 ab_a1_3 (actual rows=1 loops=1) + -> Materialize (actual rows=1 loops=3) + -> Append (actual rows=1 loops=1) + -> Seq Scan on ab_a2_b1 ab_a2_1 (actual rows=1 loops=1) + Filter: (b = $0) + -> Seq Scan on ab_a2_b2 ab_a2_2 (never executed) + Filter: (b = $0) + -> Seq Scan on ab_a2_b3 ab_a2_3 (never executed) + Filter: (b = $0) +(19 rows) + +select tableoid::regclass, * from ab; + tableoid | a | b +----------+---+--- + ab_a1_b3 | 1 | 3 + ab_a1_b3 | 1 | 3 + ab_a1_b3 | 1 | 3 + ab_a2_b1 | 2 | 1 +(4 rows) + +drop table ab, lprt_a; +-- Join +create table tbl1(col1 int); +insert into tbl1 values (501), (505); +-- Basic table +create table tprt (col1 int) partition by range (col1); +create table tprt_1 partition of tprt for values from (1) to (501); +create table tprt_2 partition of tprt for values from (501) to (1001); +create table tprt_3 partition of tprt for values from (1001) to (2001); +create table tprt_4 partition of tprt for values from (2001) to (3001); +create table tprt_5 partition of tprt for values from (3001) to (4001); +create table tprt_6 partition of tprt for values from (4001) to (5001); +create index tprt1_idx on tprt_1 (col1); +create index tprt2_idx on tprt_2 (col1); +create index tprt3_idx on tprt_3 (col1); +create index tprt4_idx on tprt_4 (col1); +create index tprt5_idx on tprt_5 (col1); +create index tprt6_idx on tprt_6 (col1); +insert into tprt values (10), (20), (501), (502), (505), (1001), (4500); +set enable_hashjoin = off; +set enable_mergejoin = off; +explain (analyze, costs off, summary off, timing off) +select * from tbl1 join tprt on tbl1.col1 > tprt.col1; + QUERY PLAN +-------------------------------------------------------------------------- + Nested Loop (actual rows=6 loops=1) + -> Seq Scan on tbl1 (actual rows=2 loops=1) + -> Append (actual rows=3 loops=2) + -> Index Scan using tprt1_idx on tprt_1 (actual rows=2 loops=2) + Index Cond: (col1 < tbl1.col1) + -> Index Scan using tprt2_idx on tprt_2 (actual rows=2 loops=1) + Index Cond: (col1 < tbl1.col1) + -> Index Scan using tprt3_idx on tprt_3 (never executed) + Index Cond: (col1 < tbl1.col1) + -> Index Scan using tprt4_idx on tprt_4 (never executed) + Index Cond: (col1 < tbl1.col1) + -> Index Scan using tprt5_idx on tprt_5 (never executed) + Index Cond: (col1 < tbl1.col1) + -> Index Scan using tprt6_idx on tprt_6 (never executed) + Index Cond: (col1 < tbl1.col1) +(15 rows) + +explain (analyze, costs off, summary off, timing off) +select * from tbl1 join tprt on tbl1.col1 = tprt.col1; + QUERY PLAN +-------------------------------------------------------------------------- + Nested Loop (actual rows=2 loops=1) + -> Seq Scan on tbl1 (actual rows=2 loops=1) + -> Append (actual rows=1 loops=2) + -> Index Scan using tprt1_idx on tprt_1 (never executed) + Index Cond: (col1 = tbl1.col1) + -> Index Scan using tprt2_idx on tprt_2 (actual rows=1 loops=2) + Index Cond: (col1 = tbl1.col1) + -> Index Scan using tprt3_idx on tprt_3 (never executed) + Index Cond: (col1 = tbl1.col1) + -> Index Scan using tprt4_idx on tprt_4 (never executed) + Index Cond: (col1 = tbl1.col1) + -> Index Scan using tprt5_idx on tprt_5 (never executed) + Index Cond: (col1 = tbl1.col1) + -> Index Scan using tprt6_idx on tprt_6 (never executed) + Index Cond: (col1 = tbl1.col1) +(15 rows) + +select tbl1.col1, tprt.col1 from tbl1 +inner join tprt on tbl1.col1 > tprt.col1 +order by tbl1.col1, tprt.col1; + col1 | col1 +------+------ + 501 | 10 + 501 | 20 + 505 | 10 + 505 | 20 + 505 | 501 + 505 | 502 +(6 rows) + +select tbl1.col1, tprt.col1 from tbl1 +inner join tprt on tbl1.col1 = tprt.col1 +order by tbl1.col1, tprt.col1; + col1 | col1 +------+------ + 501 | 501 + 505 | 505 +(2 rows) + +-- Multiple partitions +insert into tbl1 values (1001), (1010), (1011); +explain (analyze, costs off, summary off, timing off) +select * from tbl1 inner join tprt on tbl1.col1 > tprt.col1; + QUERY PLAN +-------------------------------------------------------------------------- + Nested Loop (actual rows=23 loops=1) + -> Seq Scan on tbl1 (actual rows=5 loops=1) + -> Append (actual rows=5 loops=5) + -> Index Scan using tprt1_idx on tprt_1 (actual rows=2 loops=5) + Index Cond: (col1 < tbl1.col1) + -> Index Scan using tprt2_idx on tprt_2 (actual rows=3 loops=4) + Index Cond: (col1 < tbl1.col1) + -> Index Scan using tprt3_idx on tprt_3 (actual rows=1 loops=2) + Index Cond: (col1 < tbl1.col1) + -> Index Scan using tprt4_idx on tprt_4 (never executed) + Index Cond: (col1 < tbl1.col1) + -> Index Scan using tprt5_idx on tprt_5 (never executed) + Index Cond: (col1 < tbl1.col1) + -> Index Scan using tprt6_idx on tprt_6 (never executed) + Index Cond: (col1 < tbl1.col1) +(15 rows) + +explain (analyze, costs off, summary off, timing off) +select * from tbl1 inner join tprt on tbl1.col1 = tprt.col1; + QUERY PLAN +-------------------------------------------------------------------------- + Nested Loop (actual rows=3 loops=1) + -> Seq Scan on tbl1 (actual rows=5 loops=1) + -> Append (actual rows=1 loops=5) + -> Index Scan using tprt1_idx on tprt_1 (never executed) + Index Cond: (col1 = tbl1.col1) + -> Index Scan using tprt2_idx on tprt_2 (actual rows=1 loops=2) + Index Cond: (col1 = tbl1.col1) + -> Index Scan using tprt3_idx on tprt_3 (actual rows=0 loops=3) + Index Cond: (col1 = tbl1.col1) + -> Index Scan using tprt4_idx on tprt_4 (never executed) + Index Cond: (col1 = tbl1.col1) + -> Index Scan using tprt5_idx on tprt_5 (never executed) + Index Cond: (col1 = tbl1.col1) + -> Index Scan using tprt6_idx on tprt_6 (never executed) + Index Cond: (col1 = tbl1.col1) +(15 rows) + +select tbl1.col1, tprt.col1 from tbl1 +inner join tprt on tbl1.col1 > tprt.col1 +order by tbl1.col1, tprt.col1; + col1 | col1 +------+------ + 501 | 10 + 501 | 20 + 505 | 10 + 505 | 20 + 505 | 501 + 505 | 502 + 1001 | 10 + 1001 | 20 + 1001 | 501 + 1001 | 502 + 1001 | 505 + 1010 | 10 + 1010 | 20 + 1010 | 501 + 1010 | 502 + 1010 | 505 + 1010 | 1001 + 1011 | 10 + 1011 | 20 + 1011 | 501 + 1011 | 502 + 1011 | 505 + 1011 | 1001 +(23 rows) + +select tbl1.col1, tprt.col1 from tbl1 +inner join tprt on tbl1.col1 = tprt.col1 +order by tbl1.col1, tprt.col1; + col1 | col1 +------+------ + 501 | 501 + 505 | 505 + 1001 | 1001 +(3 rows) + +-- Last partition +delete from tbl1; +insert into tbl1 values (4400); +explain (analyze, costs off, summary off, timing off) +select * from tbl1 join tprt on tbl1.col1 < tprt.col1; + QUERY PLAN +-------------------------------------------------------------------------- + Nested Loop (actual rows=1 loops=1) + -> Seq Scan on tbl1 (actual rows=1 loops=1) + -> Append (actual rows=1 loops=1) + -> Index Scan using tprt1_idx on tprt_1 (never executed) + Index Cond: (col1 > tbl1.col1) + -> Index Scan using tprt2_idx on tprt_2 (never executed) + Index Cond: (col1 > tbl1.col1) + -> Index Scan using tprt3_idx on tprt_3 (never executed) + Index Cond: (col1 > tbl1.col1) + -> Index Scan using tprt4_idx on tprt_4 (never executed) + Index Cond: (col1 > tbl1.col1) + -> Index Scan using tprt5_idx on tprt_5 (never executed) + Index Cond: (col1 > tbl1.col1) + -> Index Scan using tprt6_idx on tprt_6 (actual rows=1 loops=1) + Index Cond: (col1 > tbl1.col1) +(15 rows) + +select tbl1.col1, tprt.col1 from tbl1 +inner join tprt on tbl1.col1 < tprt.col1 +order by tbl1.col1, tprt.col1; + col1 | col1 +------+------ + 4400 | 4500 +(1 row) + +-- No matching partition +delete from tbl1; +insert into tbl1 values (10000); +explain (analyze, costs off, summary off, timing off) +select * from tbl1 join tprt on tbl1.col1 = tprt.col1; + QUERY PLAN +------------------------------------------------------------------- + Nested Loop (actual rows=0 loops=1) + -> Seq Scan on tbl1 (actual rows=1 loops=1) + -> Append (actual rows=0 loops=1) + -> Index Scan using tprt1_idx on tprt_1 (never executed) + Index Cond: (col1 = tbl1.col1) + -> Index Scan using tprt2_idx on tprt_2 (never executed) + Index Cond: (col1 = tbl1.col1) + -> Index Scan using tprt3_idx on tprt_3 (never executed) + Index Cond: (col1 = tbl1.col1) + -> Index Scan using tprt4_idx on tprt_4 (never executed) + Index Cond: (col1 = tbl1.col1) + -> Index Scan using tprt5_idx on tprt_5 (never executed) + Index Cond: (col1 = tbl1.col1) + -> Index Scan using tprt6_idx on tprt_6 (never executed) + Index Cond: (col1 = tbl1.col1) +(15 rows) + +select tbl1.col1, tprt.col1 from tbl1 +inner join tprt on tbl1.col1 = tprt.col1 +order by tbl1.col1, tprt.col1; + col1 | col1 +------+------ +(0 rows) + +drop table tbl1, tprt; +-- Test with columns defined in varying orders between each level +create table part_abc (a int not null, b int not null, c int not null) partition by list (a); +create table part_bac (b int not null, a int not null, c int not null) partition by list (b); +create table part_cab (c int not null, a int not null, b int not null) partition by list (c); +create table part_abc_p1 (a int not null, b int not null, c int not null); +alter table part_abc attach partition part_bac for values in(1); +alter table part_bac attach partition part_cab for values in(2); +alter table part_cab attach partition part_abc_p1 for values in(3); +prepare part_abc_q1 (int, int, int) as +select * from part_abc where a = $1 and b = $2 and c = $3; +-- Single partition should be scanned. +explain (analyze, costs off, summary off, timing off) execute part_abc_q1 (1, 2, 3); + QUERY PLAN +---------------------------------------------------------- + Seq Scan on part_abc_p1 part_abc (actual rows=0 loops=1) + Filter: ((a = $1) AND (b = $2) AND (c = $3)) +(2 rows) + +deallocate part_abc_q1; +drop table part_abc; +-- Ensure that an Append node properly handles a sub-partitioned table +-- matching without any of its leaf partitions matching the clause. +create table listp (a int, b int) partition by list (a); +create table listp_1 partition of listp for values in(1) partition by list (b); +create table listp_1_1 partition of listp_1 for values in(1); +create table listp_2 partition of listp for values in(2) partition by list (b); +create table listp_2_1 partition of listp_2 for values in(2); +select * from listp where b = 1; + a | b +---+--- +(0 rows) + +-- Ensure that an Append node properly can handle selection of all first level +-- partitions before finally detecting the correct set of 2nd level partitions +-- which match the given parameter. +prepare q1 (int,int) as select * from listp where b in ($1,$2); +explain (analyze, costs off, summary off, timing off) execute q1 (1,1); + QUERY PLAN +------------------------------------------------------------- + Append (actual rows=0 loops=1) + Subplans Removed: 1 + -> Seq Scan on listp_1_1 listp_1 (actual rows=0 loops=1) + Filter: (b = ANY (ARRAY[$1, $2])) +(4 rows) + +explain (analyze, costs off, summary off, timing off) execute q1 (2,2); + QUERY PLAN +------------------------------------------------------------- + Append (actual rows=0 loops=1) + Subplans Removed: 1 + -> Seq Scan on listp_2_1 listp_1 (actual rows=0 loops=1) + Filter: (b = ANY (ARRAY[$1, $2])) +(4 rows) + +-- Try with no matching partitions. +explain (analyze, costs off, summary off, timing off) execute q1 (0,0); + QUERY PLAN +-------------------------------- + Append (actual rows=0 loops=1) + Subplans Removed: 2 +(2 rows) + +deallocate q1; +-- Test more complex cases where a not-equal condition further eliminates partitions. +prepare q1 (int,int,int,int) as select * from listp where b in($1,$2) and $3 <> b and $4 <> b; +-- Both partitions allowed by IN clause, but one disallowed by <> clause +explain (analyze, costs off, summary off, timing off) execute q1 (1,2,2,0); + QUERY PLAN +------------------------------------------------------------------------- + Append (actual rows=0 loops=1) + Subplans Removed: 1 + -> Seq Scan on listp_1_1 listp_1 (actual rows=0 loops=1) + Filter: ((b = ANY (ARRAY[$1, $2])) AND ($3 <> b) AND ($4 <> b)) +(4 rows) + +-- Both partitions allowed by IN clause, then both excluded again by <> clauses. +explain (analyze, costs off, summary off, timing off) execute q1 (1,2,2,1); + QUERY PLAN +-------------------------------- + Append (actual rows=0 loops=1) + Subplans Removed: 2 +(2 rows) + +-- Ensure Params that evaluate to NULL properly prune away all partitions +explain (analyze, costs off, summary off, timing off) +select * from listp where a = (select null::int); + QUERY PLAN +------------------------------------------------------ + Append (actual rows=0 loops=1) + InitPlan 1 (returns $0) + -> Result (actual rows=1 loops=1) + -> Seq Scan on listp_1_1 listp_1 (never executed) + Filter: (a = $0) + -> Seq Scan on listp_2_1 listp_2 (never executed) + Filter: (a = $0) +(7 rows) + +drop table listp; +-- +-- check that stable query clauses are only used in run-time pruning +-- +create table stable_qual_pruning (a timestamp) partition by range (a); +create table stable_qual_pruning1 partition of stable_qual_pruning + for values from ('2000-01-01') to ('2000-02-01'); +create table stable_qual_pruning2 partition of stable_qual_pruning + for values from ('2000-02-01') to ('2000-03-01'); +create table stable_qual_pruning3 partition of stable_qual_pruning + for values from ('3000-02-01') to ('3000-03-01'); +-- comparison against a stable value requires run-time pruning +explain (analyze, costs off, summary off, timing off) +select * from stable_qual_pruning where a < localtimestamp; + QUERY PLAN +-------------------------------------------------------------------------------------- + Append (actual rows=0 loops=1) + Subplans Removed: 1 + -> Seq Scan on stable_qual_pruning1 stable_qual_pruning_1 (actual rows=0 loops=1) + Filter: (a < LOCALTIMESTAMP) + -> Seq Scan on stable_qual_pruning2 stable_qual_pruning_2 (actual rows=0 loops=1) + Filter: (a < LOCALTIMESTAMP) +(6 rows) + +-- timestamp < timestamptz comparison is only stable, not immutable +explain (analyze, costs off, summary off, timing off) +select * from stable_qual_pruning where a < '2000-02-01'::timestamptz; + QUERY PLAN +-------------------------------------------------------------------------------------- + Append (actual rows=0 loops=1) + Subplans Removed: 2 + -> Seq Scan on stable_qual_pruning1 stable_qual_pruning_1 (actual rows=0 loops=1) + Filter: (a < 'Tue Feb 01 00:00:00 2000 PST'::timestamp with time zone) +(4 rows) + +-- check ScalarArrayOp cases +explain (analyze, costs off, summary off, timing off) +select * from stable_qual_pruning + where a = any(array['2010-02-01', '2020-01-01']::timestamp[]); + QUERY PLAN +-------------------------------- + Result (actual rows=0 loops=1) + One-Time Filter: false +(2 rows) + +explain (analyze, costs off, summary off, timing off) +select * from stable_qual_pruning + where a = any(array['2000-02-01', '2010-01-01']::timestamp[]); + QUERY PLAN +---------------------------------------------------------------------------------------------------------------- + Seq Scan on stable_qual_pruning2 stable_qual_pruning (actual rows=0 loops=1) + Filter: (a = ANY ('{"Tue Feb 01 00:00:00 2000","Fri Jan 01 00:00:00 2010"}'::timestamp without time zone[])) +(2 rows) + +explain (analyze, costs off, summary off, timing off) +select * from stable_qual_pruning + where a = any(array['2000-02-01', localtimestamp]::timestamp[]); + QUERY PLAN +------------------------------------------------------------------------------------------------------------ + Append (actual rows=0 loops=1) + Subplans Removed: 2 + -> Seq Scan on stable_qual_pruning2 stable_qual_pruning_1 (actual rows=0 loops=1) + Filter: (a = ANY (ARRAY['Tue Feb 01 00:00:00 2000'::timestamp without time zone, LOCALTIMESTAMP])) +(4 rows) + +explain (analyze, costs off, summary off, timing off) +select * from stable_qual_pruning + where a = any(array['2010-02-01', '2020-01-01']::timestamptz[]); + QUERY PLAN +-------------------------------- + Append (actual rows=0 loops=1) + Subplans Removed: 3 +(2 rows) + +explain (analyze, costs off, summary off, timing off) +select * from stable_qual_pruning + where a = any(array['2000-02-01', '2010-01-01']::timestamptz[]); + QUERY PLAN +--------------------------------------------------------------------------------------------------------------------------- + Append (actual rows=0 loops=1) + Subplans Removed: 2 + -> Seq Scan on stable_qual_pruning2 stable_qual_pruning_1 (actual rows=0 loops=1) + Filter: (a = ANY ('{"Tue Feb 01 00:00:00 2000 PST","Fri Jan 01 00:00:00 2010 PST"}'::timestamp with time zone[])) +(4 rows) + +explain (analyze, costs off, summary off, timing off) +select * from stable_qual_pruning + where a = any(null::timestamptz[]); + QUERY PLAN +-------------------------------------------------------------------------------------- + Append (actual rows=0 loops=1) + -> Seq Scan on stable_qual_pruning1 stable_qual_pruning_1 (actual rows=0 loops=1) + Filter: (a = ANY (NULL::timestamp with time zone[])) + -> Seq Scan on stable_qual_pruning2 stable_qual_pruning_2 (actual rows=0 loops=1) + Filter: (a = ANY (NULL::timestamp with time zone[])) + -> Seq Scan on stable_qual_pruning3 stable_qual_pruning_3 (actual rows=0 loops=1) + Filter: (a = ANY (NULL::timestamp with time zone[])) +(7 rows) + +drop table stable_qual_pruning; +-- +-- Check that pruning with composite range partitioning works correctly when +-- it must ignore clauses for trailing keys once it has seen a clause with +-- non-inclusive operator for an earlier key +-- +create table mc3p (a int, b int, c int) partition by range (a, abs(b), c); +create table mc3p0 partition of mc3p + for values from (0, 0, 0) to (0, maxvalue, maxvalue); +create table mc3p1 partition of mc3p + for values from (1, 1, 1) to (2, minvalue, minvalue); +create table mc3p2 partition of mc3p + for values from (2, minvalue, minvalue) to (3, maxvalue, maxvalue); +insert into mc3p values (0, 1, 1), (1, 1, 1), (2, 1, 1); +explain (analyze, costs off, summary off, timing off) +select * from mc3p where a < 3 and abs(b) = 1; + QUERY PLAN +-------------------------------------------------------- + Append (actual rows=3 loops=1) + -> Seq Scan on mc3p0 mc3p_1 (actual rows=1 loops=1) + Filter: ((a < 3) AND (abs(b) = 1)) + -> Seq Scan on mc3p1 mc3p_2 (actual rows=1 loops=1) + Filter: ((a < 3) AND (abs(b) = 1)) + -> Seq Scan on mc3p2 mc3p_3 (actual rows=1 loops=1) + Filter: ((a < 3) AND (abs(b) = 1)) +(7 rows) + +-- +-- Check that pruning with composite range partitioning works correctly when +-- a combination of runtime parameters is specified, not all of whose values +-- are available at the same time +-- +prepare ps1 as + select * from mc3p where a = $1 and abs(b) < (select 3); +explain (analyze, costs off, summary off, timing off) +execute ps1(1); + QUERY PLAN +-------------------------------------------------------- + Append (actual rows=1 loops=1) + Subplans Removed: 2 + InitPlan 1 (returns $0) + -> Result (actual rows=1 loops=1) + -> Seq Scan on mc3p1 mc3p_1 (actual rows=1 loops=1) + Filter: ((a = $1) AND (abs(b) < $0)) +(6 rows) + +deallocate ps1; +prepare ps2 as + select * from mc3p where a <= $1 and abs(b) < (select 3); +explain (analyze, costs off, summary off, timing off) +execute ps2(1); + QUERY PLAN +-------------------------------------------------------- + Append (actual rows=2 loops=1) + Subplans Removed: 1 + InitPlan 1 (returns $0) + -> Result (actual rows=1 loops=1) + -> Seq Scan on mc3p0 mc3p_1 (actual rows=1 loops=1) + Filter: ((a <= $1) AND (abs(b) < $0)) + -> Seq Scan on mc3p1 mc3p_2 (actual rows=1 loops=1) + Filter: ((a <= $1) AND (abs(b) < $0)) +(8 rows) + +deallocate ps2; +drop table mc3p; +-- Ensure runtime pruning works with initplans params with boolean types +create table boolvalues (value bool not null); +insert into boolvalues values('t'),('f'); +create table boolp (a bool) partition by list (a); +create table boolp_t partition of boolp for values in('t'); +create table boolp_f partition of boolp for values in('f'); +explain (analyze, costs off, summary off, timing off) +select * from boolp where a = (select value from boolvalues where value); + QUERY PLAN +----------------------------------------------------------- + Append (actual rows=0 loops=1) + InitPlan 1 (returns $0) + -> Seq Scan on boolvalues (actual rows=1 loops=1) + Filter: value + Rows Removed by Filter: 1 + -> Seq Scan on boolp_f boolp_1 (never executed) + Filter: (a = $0) + -> Seq Scan on boolp_t boolp_2 (actual rows=0 loops=1) + Filter: (a = $0) +(9 rows) + +explain (analyze, costs off, summary off, timing off) +select * from boolp where a = (select value from boolvalues where not value); + QUERY PLAN +----------------------------------------------------------- + Append (actual rows=0 loops=1) + InitPlan 1 (returns $0) + -> Seq Scan on boolvalues (actual rows=1 loops=1) + Filter: (NOT value) + Rows Removed by Filter: 1 + -> Seq Scan on boolp_f boolp_1 (actual rows=0 loops=1) + Filter: (a = $0) + -> Seq Scan on boolp_t boolp_2 (never executed) + Filter: (a = $0) +(9 rows) + +drop table boolp; +-- +-- Test run-time pruning of MergeAppend subnodes +-- +set enable_seqscan = off; +set enable_sort = off; +create table ma_test (a int, b int) partition by range (a); +create table ma_test_p1 partition of ma_test for values from (0) to (10); +create table ma_test_p2 partition of ma_test for values from (10) to (20); +create table ma_test_p3 partition of ma_test for values from (20) to (30); +insert into ma_test select x,x from generate_series(0,29) t(x); +create index on ma_test (b); +analyze ma_test; +prepare mt_q1 (int) as select a from ma_test where a >= $1 and a % 10 = 5 order by b; +explain (analyze, costs off, summary off, timing off) execute mt_q1(15); + QUERY PLAN +----------------------------------------------------------------------------------------- + Merge Append (actual rows=2 loops=1) + Sort Key: ma_test.b + Subplans Removed: 1 + -> Index Scan using ma_test_p2_b_idx on ma_test_p2 ma_test_1 (actual rows=1 loops=1) + Filter: ((a >= $1) AND ((a % 10) = 5)) + Rows Removed by Filter: 9 + -> Index Scan using ma_test_p3_b_idx on ma_test_p3 ma_test_2 (actual rows=1 loops=1) + Filter: ((a >= $1) AND ((a % 10) = 5)) + Rows Removed by Filter: 9 +(9 rows) + +execute mt_q1(15); + a +---- + 15 + 25 +(2 rows) + +explain (analyze, costs off, summary off, timing off) execute mt_q1(25); + QUERY PLAN +----------------------------------------------------------------------------------------- + Merge Append (actual rows=1 loops=1) + Sort Key: ma_test.b + Subplans Removed: 2 + -> Index Scan using ma_test_p3_b_idx on ma_test_p3 ma_test_1 (actual rows=1 loops=1) + Filter: ((a >= $1) AND ((a % 10) = 5)) + Rows Removed by Filter: 9 +(6 rows) + +execute mt_q1(25); + a +---- + 25 +(1 row) + +-- Ensure MergeAppend behaves correctly when no subplans match +explain (analyze, costs off, summary off, timing off) execute mt_q1(35); + QUERY PLAN +-------------------------------------- + Merge Append (actual rows=0 loops=1) + Sort Key: ma_test.b + Subplans Removed: 3 +(3 rows) + +execute mt_q1(35); + a +--- +(0 rows) + +deallocate mt_q1; +prepare mt_q2 (int) as select * from ma_test where a >= $1 order by b limit 1; +-- Ensure output list looks sane when the MergeAppend has no subplans. +explain (analyze, verbose, costs off, summary off, timing off) execute mt_q2 (35); + QUERY PLAN +-------------------------------------------- + Limit (actual rows=0 loops=1) + Output: ma_test.a, ma_test.b + -> Merge Append (actual rows=0 loops=1) + Sort Key: ma_test.b + Subplans Removed: 3 +(5 rows) + +deallocate mt_q2; +-- ensure initplan params properly prune partitions +explain (analyze, costs off, summary off, timing off) select * from ma_test where a >= (select min(b) from ma_test_p2) order by b; + QUERY PLAN +----------------------------------------------------------------------------------------------- + Merge Append (actual rows=20 loops=1) + Sort Key: ma_test.b + InitPlan 2 (returns $1) + -> Result (actual rows=1 loops=1) + InitPlan 1 (returns $0) + -> Limit (actual rows=1 loops=1) + -> Index Scan using ma_test_p2_b_idx on ma_test_p2 (actual rows=1 loops=1) + Index Cond: (b IS NOT NULL) + -> Index Scan using ma_test_p1_b_idx on ma_test_p1 ma_test_1 (never executed) + Filter: (a >= $1) + -> Index Scan using ma_test_p2_b_idx on ma_test_p2 ma_test_2 (actual rows=10 loops=1) + Filter: (a >= $1) + -> Index Scan using ma_test_p3_b_idx on ma_test_p3 ma_test_3 (actual rows=10 loops=1) + Filter: (a >= $1) +(14 rows) + +reset enable_seqscan; +reset enable_sort; +drop table ma_test; +reset enable_indexonlyscan; +-- +-- check that pruning works properly when the partition key is of a +-- pseudotype +-- +-- array type list partition key +create table pp_arrpart (a int[]) partition by list (a); +create table pp_arrpart1 partition of pp_arrpart for values in ('{1}'); +create table pp_arrpart2 partition of pp_arrpart for values in ('{2, 3}', '{4, 5}'); +explain (costs off) select * from pp_arrpart where a = '{1}'; + QUERY PLAN +------------------------------------ + Seq Scan on pp_arrpart1 pp_arrpart + Filter: (a = '{1}'::integer[]) +(2 rows) + +explain (costs off) select * from pp_arrpart where a = '{1, 2}'; + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +explain (costs off) select * from pp_arrpart where a in ('{4, 5}', '{1}'); + QUERY PLAN +---------------------------------------------------------------------- + Append + -> Seq Scan on pp_arrpart1 pp_arrpart_1 + Filter: ((a = '{4,5}'::integer[]) OR (a = '{1}'::integer[])) + -> Seq Scan on pp_arrpart2 pp_arrpart_2 + Filter: ((a = '{4,5}'::integer[]) OR (a = '{1}'::integer[])) +(5 rows) + +explain (costs off) update pp_arrpart set a = a where a = '{1}'; + QUERY PLAN +-------------------------------------------- + Update on pp_arrpart + Update on pp_arrpart1 pp_arrpart_1 + -> Seq Scan on pp_arrpart1 pp_arrpart_1 + Filter: (a = '{1}'::integer[]) +(4 rows) + +explain (costs off) delete from pp_arrpart where a = '{1}'; + QUERY PLAN +-------------------------------------------- + Delete on pp_arrpart + Delete on pp_arrpart1 pp_arrpart_1 + -> Seq Scan on pp_arrpart1 pp_arrpart_1 + Filter: (a = '{1}'::integer[]) +(4 rows) + +drop table pp_arrpart; +-- array type hash partition key +create table pph_arrpart (a int[]) partition by hash (a); +create table pph_arrpart1 partition of pph_arrpart for values with (modulus 2, remainder 0); +create table pph_arrpart2 partition of pph_arrpart for values with (modulus 2, remainder 1); +insert into pph_arrpart values ('{1}'), ('{1, 2}'), ('{4, 5}'); +select tableoid::regclass, * from pph_arrpart order by 1; + tableoid | a +--------------+------- + pph_arrpart1 | {1,2} + pph_arrpart1 | {4,5} + pph_arrpart2 | {1} +(3 rows) + +explain (costs off) select * from pph_arrpart where a = '{1}'; + QUERY PLAN +-------------------------------------- + Seq Scan on pph_arrpart2 pph_arrpart + Filter: (a = '{1}'::integer[]) +(2 rows) + +explain (costs off) select * from pph_arrpart where a = '{1, 2}'; + QUERY PLAN +-------------------------------------- + Seq Scan on pph_arrpart1 pph_arrpart + Filter: (a = '{1,2}'::integer[]) +(2 rows) + +explain (costs off) select * from pph_arrpart where a in ('{4, 5}', '{1}'); + QUERY PLAN +---------------------------------------------------------------------- + Append + -> Seq Scan on pph_arrpart1 pph_arrpart_1 + Filter: ((a = '{4,5}'::integer[]) OR (a = '{1}'::integer[])) + -> Seq Scan on pph_arrpart2 pph_arrpart_2 + Filter: ((a = '{4,5}'::integer[]) OR (a = '{1}'::integer[])) +(5 rows) + +drop table pph_arrpart; +-- enum type list partition key +create type pp_colors as enum ('green', 'blue', 'black'); +create table pp_enumpart (a pp_colors) partition by list (a); +create table pp_enumpart_green partition of pp_enumpart for values in ('green'); +create table pp_enumpart_blue partition of pp_enumpart for values in ('blue'); +explain (costs off) select * from pp_enumpart where a = 'blue'; + QUERY PLAN +------------------------------------------ + Seq Scan on pp_enumpart_blue pp_enumpart + Filter: (a = 'blue'::pp_colors) +(2 rows) + +explain (costs off) select * from pp_enumpart where a = 'black'; + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +drop table pp_enumpart; +drop type pp_colors; +-- record type as partition key +create type pp_rectype as (a int, b int); +create table pp_recpart (a pp_rectype) partition by list (a); +create table pp_recpart_11 partition of pp_recpart for values in ('(1,1)'); +create table pp_recpart_23 partition of pp_recpart for values in ('(2,3)'); +explain (costs off) select * from pp_recpart where a = '(1,1)'::pp_rectype; + QUERY PLAN +-------------------------------------- + Seq Scan on pp_recpart_11 pp_recpart + Filter: (a = '(1,1)'::pp_rectype) +(2 rows) + +explain (costs off) select * from pp_recpart where a = '(1,2)'::pp_rectype; + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +drop table pp_recpart; +drop type pp_rectype; +-- range type partition key +create table pp_intrangepart (a int4range) partition by list (a); +create table pp_intrangepart12 partition of pp_intrangepart for values in ('[1,2]'); +create table pp_intrangepart2inf partition of pp_intrangepart for values in ('[2,)'); +explain (costs off) select * from pp_intrangepart where a = '[1,2]'::int4range; + QUERY PLAN +----------------------------------------------- + Seq Scan on pp_intrangepart12 pp_intrangepart + Filter: (a = '[1,3)'::int4range) +(2 rows) + +explain (costs off) select * from pp_intrangepart where a = '(1,2)'::int4range; + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +drop table pp_intrangepart; +-- +-- Ensure the enable_partition_prune GUC properly disables partition pruning. +-- +create table pp_lp (a int, value int) partition by list (a); +create table pp_lp1 partition of pp_lp for values in(1); +create table pp_lp2 partition of pp_lp for values in(2); +explain (costs off) select * from pp_lp where a = 1; + QUERY PLAN +-------------------------- + Seq Scan on pp_lp1 pp_lp + Filter: (a = 1) +(2 rows) + +explain (costs off) update pp_lp set value = 10 where a = 1; + QUERY PLAN +---------------------------------- + Update on pp_lp + Update on pp_lp1 pp_lp_1 + -> Seq Scan on pp_lp1 pp_lp_1 + Filter: (a = 1) +(4 rows) + +explain (costs off) delete from pp_lp where a = 1; + QUERY PLAN +---------------------------------- + Delete on pp_lp + Delete on pp_lp1 pp_lp_1 + -> Seq Scan on pp_lp1 pp_lp_1 + Filter: (a = 1) +(4 rows) + +set enable_partition_pruning = off; +set constraint_exclusion = 'partition'; -- this should not affect the result. +explain (costs off) select * from pp_lp where a = 1; + QUERY PLAN +---------------------------------- + Append + -> Seq Scan on pp_lp1 pp_lp_1 + Filter: (a = 1) + -> Seq Scan on pp_lp2 pp_lp_2 + Filter: (a = 1) +(5 rows) + +explain (costs off) update pp_lp set value = 10 where a = 1; + QUERY PLAN +---------------------------------------- + Update on pp_lp + Update on pp_lp1 pp_lp_1 + Update on pp_lp2 pp_lp_2 + -> Append + -> Seq Scan on pp_lp1 pp_lp_1 + Filter: (a = 1) + -> Seq Scan on pp_lp2 pp_lp_2 + Filter: (a = 1) +(8 rows) + +explain (costs off) delete from pp_lp where a = 1; + QUERY PLAN +---------------------------------------- + Delete on pp_lp + Delete on pp_lp1 pp_lp_1 + Delete on pp_lp2 pp_lp_2 + -> Append + -> Seq Scan on pp_lp1 pp_lp_1 + Filter: (a = 1) + -> Seq Scan on pp_lp2 pp_lp_2 + Filter: (a = 1) +(8 rows) + +set constraint_exclusion = 'off'; -- this should not affect the result. +explain (costs off) select * from pp_lp where a = 1; + QUERY PLAN +---------------------------------- + Append + -> Seq Scan on pp_lp1 pp_lp_1 + Filter: (a = 1) + -> Seq Scan on pp_lp2 pp_lp_2 + Filter: (a = 1) +(5 rows) + +explain (costs off) update pp_lp set value = 10 where a = 1; + QUERY PLAN +---------------------------------------- + Update on pp_lp + Update on pp_lp1 pp_lp_1 + Update on pp_lp2 pp_lp_2 + -> Append + -> Seq Scan on pp_lp1 pp_lp_1 + Filter: (a = 1) + -> Seq Scan on pp_lp2 pp_lp_2 + Filter: (a = 1) +(8 rows) + +explain (costs off) delete from pp_lp where a = 1; + QUERY PLAN +---------------------------------------- + Delete on pp_lp + Delete on pp_lp1 pp_lp_1 + Delete on pp_lp2 pp_lp_2 + -> Append + -> Seq Scan on pp_lp1 pp_lp_1 + Filter: (a = 1) + -> Seq Scan on pp_lp2 pp_lp_2 + Filter: (a = 1) +(8 rows) + +drop table pp_lp; +-- Ensure enable_partition_prune does not affect non-partitioned tables. +create table inh_lp (a int, value int); +create table inh_lp1 (a int, value int, check(a = 1)) inherits (inh_lp); +NOTICE: merging column "a" with inherited definition +NOTICE: merging column "value" with inherited definition +create table inh_lp2 (a int, value int, check(a = 2)) inherits (inh_lp); +NOTICE: merging column "a" with inherited definition +NOTICE: merging column "value" with inherited definition +set constraint_exclusion = 'partition'; +-- inh_lp2 should be removed in the following 3 cases. +explain (costs off) select * from inh_lp where a = 1; + QUERY PLAN +------------------------------------ + Append + -> Seq Scan on inh_lp inh_lp_1 + Filter: (a = 1) + -> Seq Scan on inh_lp1 inh_lp_2 + Filter: (a = 1) +(5 rows) + +explain (costs off) update inh_lp set value = 10 where a = 1; + QUERY PLAN +------------------------------------------------ + Update on inh_lp + Update on inh_lp inh_lp_1 + Update on inh_lp1 inh_lp_2 + -> Result + -> Append + -> Seq Scan on inh_lp inh_lp_1 + Filter: (a = 1) + -> Seq Scan on inh_lp1 inh_lp_2 + Filter: (a = 1) +(9 rows) + +explain (costs off) delete from inh_lp where a = 1; + QUERY PLAN +------------------------------------------ + Delete on inh_lp + Delete on inh_lp inh_lp_1 + Delete on inh_lp1 inh_lp_2 + -> Append + -> Seq Scan on inh_lp inh_lp_1 + Filter: (a = 1) + -> Seq Scan on inh_lp1 inh_lp_2 + Filter: (a = 1) +(8 rows) + +-- Ensure we don't exclude normal relations when we only expect to exclude +-- inheritance children +explain (costs off) update inh_lp1 set value = 10 where a = 2; + QUERY PLAN +--------------------------- + Update on inh_lp1 + -> Seq Scan on inh_lp1 + Filter: (a = 2) +(3 rows) + +drop table inh_lp cascade; +NOTICE: drop cascades to 2 other objects +DETAIL: drop cascades to table inh_lp1 +drop cascades to table inh_lp2 +reset enable_partition_pruning; +reset constraint_exclusion; +-- Check pruning for a partition tree containing only temporary relations +create temp table pp_temp_parent (a int) partition by list (a); +create temp table pp_temp_part_1 partition of pp_temp_parent for values in (1); +create temp table pp_temp_part_def partition of pp_temp_parent default; +explain (costs off) select * from pp_temp_parent where true; + QUERY PLAN +----------------------------------------------------- + Append + -> Seq Scan on pp_temp_part_1 pp_temp_parent_1 + -> Seq Scan on pp_temp_part_def pp_temp_parent_2 +(3 rows) + +explain (costs off) select * from pp_temp_parent where a = 2; + QUERY PLAN +--------------------------------------------- + Seq Scan on pp_temp_part_def pp_temp_parent + Filter: (a = 2) +(2 rows) + +drop table pp_temp_parent; +-- Stress run-time partition pruning a bit more, per bug reports +create temp table p (a int, b int, c int) partition by list (a); +create temp table p1 partition of p for values in (1); +create temp table p2 partition of p for values in (2); +create temp table q (a int, b int, c int) partition by list (a); +create temp table q1 partition of q for values in (1) partition by list (b); +create temp table q11 partition of q1 for values in (1) partition by list (c); +create temp table q111 partition of q11 for values in (1); +create temp table q2 partition of q for values in (2) partition by list (b); +create temp table q21 partition of q2 for values in (1); +create temp table q22 partition of q2 for values in (2); +insert into q22 values (2, 2, 3); +explain (costs off) +select * +from ( + select * from p + union all + select * from q1 + union all + select 1, 1, 1 + ) s(a, b, c) +where s.a = 1 and s.b = 1 and s.c = (select 1); + QUERY PLAN +---------------------------------------------------- + Append + InitPlan 1 (returns $0) + -> Result + -> Seq Scan on p1 p + Filter: ((a = 1) AND (b = 1) AND (c = $0)) + -> Seq Scan on q111 q1 + Filter: ((a = 1) AND (b = 1) AND (c = $0)) + -> Result + One-Time Filter: (1 = $0) +(9 rows) + +select * +from ( + select * from p + union all + select * from q1 + union all + select 1, 1, 1 + ) s(a, b, c) +where s.a = 1 and s.b = 1 and s.c = (select 1); + a | b | c +---+---+--- + 1 | 1 | 1 +(1 row) + +prepare q (int, int) as +select * +from ( + select * from p + union all + select * from q1 + union all + select 1, 1, 1 + ) s(a, b, c) +where s.a = $1 and s.b = $2 and s.c = (select 1); +explain (costs off) execute q (1, 1); + QUERY PLAN +--------------------------------------------------------------- + Append + Subplans Removed: 1 + InitPlan 1 (returns $0) + -> Result + -> Seq Scan on p1 p + Filter: ((a = $1) AND (b = $2) AND (c = $0)) + -> Seq Scan on q111 q1 + Filter: ((a = $1) AND (b = $2) AND (c = $0)) + -> Result + One-Time Filter: ((1 = $1) AND (1 = $2) AND (1 = $0)) +(10 rows) + +execute q (1, 1); + a | b | c +---+---+--- + 1 | 1 | 1 +(1 row) + +drop table p, q; +-- Ensure run-time pruning works correctly when we match a partitioned table +-- on the first level but find no matching partitions on the second level. +create table listp (a int, b int) partition by list (a); +create table listp1 partition of listp for values in(1); +create table listp2 partition of listp for values in(2) partition by list(b); +create table listp2_10 partition of listp2 for values in (10); +explain (analyze, costs off, summary off, timing off) +select * from listp where a = (select 2) and b <> 10; + QUERY PLAN +-------------------------------------------------- + Seq Scan on listp1 listp (actual rows=0 loops=1) + Filter: ((b <> 10) AND (a = $0)) + InitPlan 1 (returns $0) + -> Result (never executed) +(4 rows) + +-- +-- check that a partition directly accessed in a query is excluded with +-- constraint_exclusion = on +-- +-- turn off partition pruning, so that it doesn't interfere +set enable_partition_pruning to off; +-- setting constraint_exclusion to 'partition' disables exclusion +set constraint_exclusion to 'partition'; +explain (costs off) select * from listp1 where a = 2; + QUERY PLAN +-------------------- + Seq Scan on listp1 + Filter: (a = 2) +(2 rows) + +explain (costs off) update listp1 set a = 1 where a = 2; + QUERY PLAN +-------------------------- + Update on listp1 + -> Seq Scan on listp1 + Filter: (a = 2) +(3 rows) + +-- constraint exclusion enabled +set constraint_exclusion to 'on'; +explain (costs off) select * from listp1 where a = 2; + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +explain (costs off) update listp1 set a = 1 where a = 2; + QUERY PLAN +-------------------------------- + Update on listp1 + -> Result + One-Time Filter: false +(3 rows) + +reset constraint_exclusion; +reset enable_partition_pruning; +drop table listp; +-- Ensure run-time pruning works correctly for nested Append nodes +set parallel_setup_cost to 0; +set parallel_tuple_cost to 0; +create table listp (a int) partition by list(a); +create table listp_12 partition of listp for values in(1,2) partition by list(a); +create table listp_12_1 partition of listp_12 for values in(1); +create table listp_12_2 partition of listp_12 for values in(2); +-- Force the 2nd subnode of the Append to be non-parallel. This results in +-- a nested Append node because the mixed parallel / non-parallel paths cannot +-- be pulled into the top-level Append. +alter table listp_12_1 set (parallel_workers = 0); +-- Ensure that listp_12_2 is not scanned. (The nested Append is not seen in +-- the plan as it's pulled in setref.c due to having just a single subnode). +select explain_parallel_append('select * from listp where a = (select 1);'); + explain_parallel_append +---------------------------------------------------------------------- + Gather (actual rows=N loops=N) + Workers Planned: 2 + Params Evaluated: $0 + Workers Launched: N + InitPlan 1 (returns $0) + -> Result (actual rows=N loops=N) + -> Parallel Append (actual rows=N loops=N) + -> Seq Scan on listp_12_1 listp_1 (actual rows=N loops=N) + Filter: (a = $0) + -> Parallel Seq Scan on listp_12_2 listp_2 (never executed) + Filter: (a = $0) +(11 rows) + +-- Like the above but throw some more complexity at the planner by adding +-- a UNION ALL. We expect both sides of the union not to scan the +-- non-required partitions. +select explain_parallel_append( +'select * from listp where a = (select 1) + union all +select * from listp where a = (select 2);'); + explain_parallel_append +----------------------------------------------------------------------------------- + Append (actual rows=N loops=N) + -> Gather (actual rows=N loops=N) + Workers Planned: 2 + Params Evaluated: $0 + Workers Launched: N + InitPlan 1 (returns $0) + -> Result (actual rows=N loops=N) + -> Parallel Append (actual rows=N loops=N) + -> Seq Scan on listp_12_1 listp_1 (actual rows=N loops=N) + Filter: (a = $0) + -> Parallel Seq Scan on listp_12_2 listp_2 (never executed) + Filter: (a = $0) + -> Gather (actual rows=N loops=N) + Workers Planned: 2 + Params Evaluated: $1 + Workers Launched: N + InitPlan 2 (returns $1) + -> Result (actual rows=N loops=N) + -> Parallel Append (actual rows=N loops=N) + -> Seq Scan on listp_12_1 listp_4 (never executed) + Filter: (a = $1) + -> Parallel Seq Scan on listp_12_2 listp_5 (actual rows=N loops=N) + Filter: (a = $1) +(23 rows) + +drop table listp; +reset parallel_tuple_cost; +reset parallel_setup_cost; +-- Test case for run-time pruning with a nested Merge Append +set enable_sort to 0; +create table rangep (a int, b int) partition by range (a); +create table rangep_0_to_100 partition of rangep for values from (0) to (100) partition by list (b); +-- We need 3 sub-partitions. 1 to validate pruning worked and another two +-- because a single remaining partition would be pulled up to the main Append. +create table rangep_0_to_100_1 partition of rangep_0_to_100 for values in(1); +create table rangep_0_to_100_2 partition of rangep_0_to_100 for values in(2); +create table rangep_0_to_100_3 partition of rangep_0_to_100 for values in(3); +create table rangep_100_to_200 partition of rangep for values from (100) to (200); +create index on rangep (a); +-- Ensure run-time pruning works on the nested Merge Append +explain (analyze on, costs off, timing off, summary off) +select * from rangep where b IN((select 1),(select 2)) order by a; + QUERY PLAN +------------------------------------------------------------------------------------------------------------ + Append (actual rows=0 loops=1) + InitPlan 1 (returns $0) + -> Result (actual rows=1 loops=1) + InitPlan 2 (returns $1) + -> Result (actual rows=1 loops=1) + -> Merge Append (actual rows=0 loops=1) + Sort Key: rangep_2.a + -> Index Scan using rangep_0_to_100_1_a_idx on rangep_0_to_100_1 rangep_2 (actual rows=0 loops=1) + Filter: (b = ANY (ARRAY[$0, $1])) + -> Index Scan using rangep_0_to_100_2_a_idx on rangep_0_to_100_2 rangep_3 (actual rows=0 loops=1) + Filter: (b = ANY (ARRAY[$0, $1])) + -> Index Scan using rangep_0_to_100_3_a_idx on rangep_0_to_100_3 rangep_4 (never executed) + Filter: (b = ANY (ARRAY[$0, $1])) + -> Index Scan using rangep_100_to_200_a_idx on rangep_100_to_200 rangep_5 (actual rows=0 loops=1) + Filter: (b = ANY (ARRAY[$0, $1])) +(15 rows) + +reset enable_sort; +drop table rangep; +-- +-- Check that gen_prune_steps_from_opexps() works well for various cases of +-- clauses for different partition keys +-- +create table rp_prefix_test1 (a int, b varchar) partition by range(a, b); +create table rp_prefix_test1_p1 partition of rp_prefix_test1 for values from (1, 'a') to (1, 'b'); +create table rp_prefix_test1_p2 partition of rp_prefix_test1 for values from (2, 'a') to (2, 'b'); +-- Don't call get_steps_using_prefix() with the last partition key b plus +-- an empty prefix +explain (costs off) select * from rp_prefix_test1 where a <= 1 and b = 'a'; + QUERY PLAN +-------------------------------------------------- + Seq Scan on rp_prefix_test1_p1 rp_prefix_test1 + Filter: ((a <= 1) AND ((b)::text = 'a'::text)) +(2 rows) + +create table rp_prefix_test2 (a int, b int, c int) partition by range(a, b, c); +create table rp_prefix_test2_p1 partition of rp_prefix_test2 for values from (1, 1, 0) to (1, 1, 10); +create table rp_prefix_test2_p2 partition of rp_prefix_test2 for values from (2, 2, 0) to (2, 2, 10); +-- Don't call get_steps_using_prefix() with the last partition key c plus +-- an invalid prefix (ie, b = 1) +explain (costs off) select * from rp_prefix_test2 where a <= 1 and b = 1 and c >= 0; + QUERY PLAN +------------------------------------------------ + Seq Scan on rp_prefix_test2_p1 rp_prefix_test2 + Filter: ((a <= 1) AND (c >= 0) AND (b = 1)) +(2 rows) + +create table rp_prefix_test3 (a int, b int, c int, d int) partition by range(a, b, c, d); +create table rp_prefix_test3_p1 partition of rp_prefix_test3 for values from (1, 1, 1, 0) to (1, 1, 1, 10); +create table rp_prefix_test3_p2 partition of rp_prefix_test3 for values from (2, 2, 2, 0) to (2, 2, 2, 10); +-- Test that get_steps_using_prefix() handles a prefix that contains multiple +-- clauses for the partition key b (ie, b >= 1 and b >= 2) +explain (costs off) select * from rp_prefix_test3 where a >= 1 and b >= 1 and b >= 2 and c >= 2 and d >= 0; + QUERY PLAN +-------------------------------------------------------------------------- + Seq Scan on rp_prefix_test3_p2 rp_prefix_test3 + Filter: ((a >= 1) AND (b >= 1) AND (b >= 2) AND (c >= 2) AND (d >= 0)) +(2 rows) + +-- Test that get_steps_using_prefix() handles a prefix that contains multiple +-- clauses for the partition key b (ie, b >= 1 and b = 2) (This also tests +-- that the caller arranges clauses in that prefix in the required order) +explain (costs off) select * from rp_prefix_test3 where a >= 1 and b >= 1 and b = 2 and c = 2 and d >= 0; + QUERY PLAN +------------------------------------------------------------------------ + Seq Scan on rp_prefix_test3_p2 rp_prefix_test3 + Filter: ((a >= 1) AND (b >= 1) AND (d >= 0) AND (b = 2) AND (c = 2)) +(2 rows) + +drop table rp_prefix_test1; +drop table rp_prefix_test2; +drop table rp_prefix_test3; +-- +-- Test that get_steps_using_prefix() handles IS NULL clauses correctly +-- +create table hp_prefix_test (a int, b int, c int, d int) + partition by hash (a part_test_int4_ops, b part_test_int4_ops, c part_test_int4_ops, d part_test_int4_ops); +-- create 8 partitions +select 'create table hp_prefix_test_p' || x::text || ' partition of hp_prefix_test for values with (modulus 8, remainder ' || x::text || ');' +from generate_Series(0,7) x; + ?column? +------------------------------------------------------------------------------------------------------ + create table hp_prefix_test_p0 partition of hp_prefix_test for values with (modulus 8, remainder 0); + create table hp_prefix_test_p1 partition of hp_prefix_test for values with (modulus 8, remainder 1); + create table hp_prefix_test_p2 partition of hp_prefix_test for values with (modulus 8, remainder 2); + create table hp_prefix_test_p3 partition of hp_prefix_test for values with (modulus 8, remainder 3); + create table hp_prefix_test_p4 partition of hp_prefix_test for values with (modulus 8, remainder 4); + create table hp_prefix_test_p5 partition of hp_prefix_test for values with (modulus 8, remainder 5); + create table hp_prefix_test_p6 partition of hp_prefix_test for values with (modulus 8, remainder 6); + create table hp_prefix_test_p7 partition of hp_prefix_test for values with (modulus 8, remainder 7); +(8 rows) + +\gexec +create table hp_prefix_test_p0 partition of hp_prefix_test for values with (modulus 8, remainder 0); +create table hp_prefix_test_p1 partition of hp_prefix_test for values with (modulus 8, remainder 1); +create table hp_prefix_test_p2 partition of hp_prefix_test for values with (modulus 8, remainder 2); +create table hp_prefix_test_p3 partition of hp_prefix_test for values with (modulus 8, remainder 3); +create table hp_prefix_test_p4 partition of hp_prefix_test for values with (modulus 8, remainder 4); +create table hp_prefix_test_p5 partition of hp_prefix_test for values with (modulus 8, remainder 5); +create table hp_prefix_test_p6 partition of hp_prefix_test for values with (modulus 8, remainder 6); +create table hp_prefix_test_p7 partition of hp_prefix_test for values with (modulus 8, remainder 7); +-- insert 16 rows, one row for each test to perform. +insert into hp_prefix_test +select + case a when 0 then null else 1 end, + case b when 0 then null else 2 end, + case c when 0 then null else 3 end, + case d when 0 then null else 4 end +from + generate_series(0,1) a, + generate_series(0,1) b, + generate_Series(0,1) c, + generate_Series(0,1) d; +-- Ensure partition pruning works correctly for each combination of IS NULL +-- and equality quals. This may seem a little excessive, but there have been +-- a number of bugs in this area over the years. We make use of row only +-- output to reduce the size of the expected results. +\t on +select + 'explain (costs off) select tableoid::regclass,* from hp_prefix_test where ' || + string_agg(c.colname || case when g.s & (1 << c.colpos) = 0 then ' is null' else ' = ' || (colpos+1)::text end, ' and ' order by c.colpos) +from (values('a',0),('b',1),('c',2),('d',3)) c(colname, colpos), generate_Series(0,15) g(s) +group by g.s +order by g.s; + explain (costs off) select tableoid::regclass,* from hp_prefix_test where a is null and b is null and c is null and d is null + explain (costs off) select tableoid::regclass,* from hp_prefix_test where a = 1 and b is null and c is null and d is null + explain (costs off) select tableoid::regclass,* from hp_prefix_test where a is null and b = 2 and c is null and d is null + explain (costs off) select tableoid::regclass,* from hp_prefix_test where a = 1 and b = 2 and c is null and d is null + explain (costs off) select tableoid::regclass,* from hp_prefix_test where a is null and b is null and c = 3 and d is null + explain (costs off) select tableoid::regclass,* from hp_prefix_test where a = 1 and b is null and c = 3 and d is null + explain (costs off) select tableoid::regclass,* from hp_prefix_test where a is null and b = 2 and c = 3 and d is null + explain (costs off) select tableoid::regclass,* from hp_prefix_test where a = 1 and b = 2 and c = 3 and d is null + explain (costs off) select tableoid::regclass,* from hp_prefix_test where a is null and b is null and c is null and d = 4 + explain (costs off) select tableoid::regclass,* from hp_prefix_test where a = 1 and b is null and c is null and d = 4 + explain (costs off) select tableoid::regclass,* from hp_prefix_test where a is null and b = 2 and c is null and d = 4 + explain (costs off) select tableoid::regclass,* from hp_prefix_test where a = 1 and b = 2 and c is null and d = 4 + explain (costs off) select tableoid::regclass,* from hp_prefix_test where a is null and b is null and c = 3 and d = 4 + explain (costs off) select tableoid::regclass,* from hp_prefix_test where a = 1 and b is null and c = 3 and d = 4 + explain (costs off) select tableoid::regclass,* from hp_prefix_test where a is null and b = 2 and c = 3 and d = 4 + explain (costs off) select tableoid::regclass,* from hp_prefix_test where a = 1 and b = 2 and c = 3 and d = 4 + +\gexec +explain (costs off) select tableoid::regclass,* from hp_prefix_test where a is null and b is null and c is null and d is null + Seq Scan on hp_prefix_test_p0 hp_prefix_test + Filter: ((a IS NULL) AND (b IS NULL) AND (c IS NULL) AND (d IS NULL)) + +explain (costs off) select tableoid::regclass,* from hp_prefix_test where a = 1 and b is null and c is null and d is null + Seq Scan on hp_prefix_test_p1 hp_prefix_test + Filter: ((b IS NULL) AND (c IS NULL) AND (d IS NULL) AND (a = 1)) + +explain (costs off) select tableoid::regclass,* from hp_prefix_test where a is null and b = 2 and c is null and d is null + Seq Scan on hp_prefix_test_p2 hp_prefix_test + Filter: ((a IS NULL) AND (c IS NULL) AND (d IS NULL) AND (b = 2)) + +explain (costs off) select tableoid::regclass,* from hp_prefix_test where a = 1 and b = 2 and c is null and d is null + Seq Scan on hp_prefix_test_p4 hp_prefix_test + Filter: ((c IS NULL) AND (d IS NULL) AND (a = 1) AND (b = 2)) + +explain (costs off) select tableoid::regclass,* from hp_prefix_test where a is null and b is null and c = 3 and d is null + Seq Scan on hp_prefix_test_p3 hp_prefix_test + Filter: ((a IS NULL) AND (b IS NULL) AND (d IS NULL) AND (c = 3)) + +explain (costs off) select tableoid::regclass,* from hp_prefix_test where a = 1 and b is null and c = 3 and d is null + Seq Scan on hp_prefix_test_p7 hp_prefix_test + Filter: ((b IS NULL) AND (d IS NULL) AND (a = 1) AND (c = 3)) + +explain (costs off) select tableoid::regclass,* from hp_prefix_test where a is null and b = 2 and c = 3 and d is null + Seq Scan on hp_prefix_test_p4 hp_prefix_test + Filter: ((a IS NULL) AND (d IS NULL) AND (b = 2) AND (c = 3)) + +explain (costs off) select tableoid::regclass,* from hp_prefix_test where a = 1 and b = 2 and c = 3 and d is null + Seq Scan on hp_prefix_test_p5 hp_prefix_test + Filter: ((d IS NULL) AND (a = 1) AND (b = 2) AND (c = 3)) + +explain (costs off) select tableoid::regclass,* from hp_prefix_test where a is null and b is null and c is null and d = 4 + Seq Scan on hp_prefix_test_p4 hp_prefix_test + Filter: ((a IS NULL) AND (b IS NULL) AND (c IS NULL) AND (d = 4)) + +explain (costs off) select tableoid::regclass,* from hp_prefix_test where a = 1 and b is null and c is null and d = 4 + Seq Scan on hp_prefix_test_p6 hp_prefix_test + Filter: ((b IS NULL) AND (c IS NULL) AND (a = 1) AND (d = 4)) + +explain (costs off) select tableoid::regclass,* from hp_prefix_test where a is null and b = 2 and c is null and d = 4 + Seq Scan on hp_prefix_test_p5 hp_prefix_test + Filter: ((a IS NULL) AND (c IS NULL) AND (b = 2) AND (d = 4)) + +explain (costs off) select tableoid::regclass,* from hp_prefix_test where a = 1 and b = 2 and c is null and d = 4 + Seq Scan on hp_prefix_test_p6 hp_prefix_test + Filter: ((c IS NULL) AND (a = 1) AND (b = 2) AND (d = 4)) + +explain (costs off) select tableoid::regclass,* from hp_prefix_test where a is null and b is null and c = 3 and d = 4 + Seq Scan on hp_prefix_test_p4 hp_prefix_test + Filter: ((a IS NULL) AND (b IS NULL) AND (c = 3) AND (d = 4)) + +explain (costs off) select tableoid::regclass,* from hp_prefix_test where a = 1 and b is null and c = 3 and d = 4 + Seq Scan on hp_prefix_test_p5 hp_prefix_test + Filter: ((b IS NULL) AND (a = 1) AND (c = 3) AND (d = 4)) + +explain (costs off) select tableoid::regclass,* from hp_prefix_test where a is null and b = 2 and c = 3 and d = 4 + Seq Scan on hp_prefix_test_p6 hp_prefix_test + Filter: ((a IS NULL) AND (b = 2) AND (c = 3) AND (d = 4)) + +explain (costs off) select tableoid::regclass,* from hp_prefix_test where a = 1 and b = 2 and c = 3 and d = 4 + Seq Scan on hp_prefix_test_p4 hp_prefix_test + Filter: ((a = 1) AND (b = 2) AND (c = 3) AND (d = 4)) + +-- And ensure we get exactly 1 row from each. Again, all 16 possible combinations. +select + 'select tableoid::regclass,* from hp_prefix_test where ' || + string_agg(c.colname || case when g.s & (1 << c.colpos) = 0 then ' is null' else ' = ' || (colpos+1)::text end, ' and ' order by c.colpos) +from (values('a',0),('b',1),('c',2),('d',3)) c(colname, colpos), generate_Series(0,15) g(s) +group by g.s +order by g.s; + select tableoid::regclass,* from hp_prefix_test where a is null and b is null and c is null and d is null + select tableoid::regclass,* from hp_prefix_test where a = 1 and b is null and c is null and d is null + select tableoid::regclass,* from hp_prefix_test where a is null and b = 2 and c is null and d is null + select tableoid::regclass,* from hp_prefix_test where a = 1 and b = 2 and c is null and d is null + select tableoid::regclass,* from hp_prefix_test where a is null and b is null and c = 3 and d is null + select tableoid::regclass,* from hp_prefix_test where a = 1 and b is null and c = 3 and d is null + select tableoid::regclass,* from hp_prefix_test where a is null and b = 2 and c = 3 and d is null + select tableoid::regclass,* from hp_prefix_test where a = 1 and b = 2 and c = 3 and d is null + select tableoid::regclass,* from hp_prefix_test where a is null and b is null and c is null and d = 4 + select tableoid::regclass,* from hp_prefix_test where a = 1 and b is null and c is null and d = 4 + select tableoid::regclass,* from hp_prefix_test where a is null and b = 2 and c is null and d = 4 + select tableoid::regclass,* from hp_prefix_test where a = 1 and b = 2 and c is null and d = 4 + select tableoid::regclass,* from hp_prefix_test where a is null and b is null and c = 3 and d = 4 + select tableoid::regclass,* from hp_prefix_test where a = 1 and b is null and c = 3 and d = 4 + select tableoid::regclass,* from hp_prefix_test where a is null and b = 2 and c = 3 and d = 4 + select tableoid::regclass,* from hp_prefix_test where a = 1 and b = 2 and c = 3 and d = 4 + +\gexec +select tableoid::regclass,* from hp_prefix_test where a is null and b is null and c is null and d is null + hp_prefix_test_p0 | | | | + +select tableoid::regclass,* from hp_prefix_test where a = 1 and b is null and c is null and d is null + hp_prefix_test_p1 | 1 | | | + +select tableoid::regclass,* from hp_prefix_test where a is null and b = 2 and c is null and d is null + hp_prefix_test_p2 | | 2 | | + +select tableoid::regclass,* from hp_prefix_test where a = 1 and b = 2 and c is null and d is null + hp_prefix_test_p4 | 1 | 2 | | + +select tableoid::regclass,* from hp_prefix_test where a is null and b is null and c = 3 and d is null + hp_prefix_test_p3 | | | 3 | + +select tableoid::regclass,* from hp_prefix_test where a = 1 and b is null and c = 3 and d is null + hp_prefix_test_p7 | 1 | | 3 | + +select tableoid::regclass,* from hp_prefix_test where a is null and b = 2 and c = 3 and d is null + hp_prefix_test_p4 | | 2 | 3 | + +select tableoid::regclass,* from hp_prefix_test where a = 1 and b = 2 and c = 3 and d is null + hp_prefix_test_p5 | 1 | 2 | 3 | + +select tableoid::regclass,* from hp_prefix_test where a is null and b is null and c is null and d = 4 + hp_prefix_test_p4 | | | | 4 + +select tableoid::regclass,* from hp_prefix_test where a = 1 and b is null and c is null and d = 4 + hp_prefix_test_p6 | 1 | | | 4 + +select tableoid::regclass,* from hp_prefix_test where a is null and b = 2 and c is null and d = 4 + hp_prefix_test_p5 | | 2 | | 4 + +select tableoid::regclass,* from hp_prefix_test where a = 1 and b = 2 and c is null and d = 4 + hp_prefix_test_p6 | 1 | 2 | | 4 + +select tableoid::regclass,* from hp_prefix_test where a is null and b is null and c = 3 and d = 4 + hp_prefix_test_p4 | | | 3 | 4 + +select tableoid::regclass,* from hp_prefix_test where a = 1 and b is null and c = 3 and d = 4 + hp_prefix_test_p5 | 1 | | 3 | 4 + +select tableoid::regclass,* from hp_prefix_test where a is null and b = 2 and c = 3 and d = 4 + hp_prefix_test_p6 | | 2 | 3 | 4 + +select tableoid::regclass,* from hp_prefix_test where a = 1 and b = 2 and c = 3 and d = 4 + hp_prefix_test_p4 | 1 | 2 | 3 | 4 + +\t off +drop table hp_prefix_test; +-- +-- Check that gen_partprune_steps() detects self-contradiction from clauses +-- regardless of the order of the clauses (Here we use a custom operator to +-- prevent the equivclass.c machinery from reordering the clauses) +-- +create operator === ( + leftarg = int4, + rightarg = int4, + procedure = int4eq, + commutator = ===, + hashes +); +create operator class part_test_int4_ops2 +for type int4 +using hash as +operator 1 ===, +function 2 part_hashint4_noop(int4, int8); +create table hp_contradict_test (a int, b int) partition by hash (a part_test_int4_ops2, b part_test_int4_ops2); +create table hp_contradict_test_p1 partition of hp_contradict_test for values with (modulus 2, remainder 0); +create table hp_contradict_test_p2 partition of hp_contradict_test for values with (modulus 2, remainder 1); +explain (costs off) select * from hp_contradict_test where a is null and a === 1 and b === 1; + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +explain (costs off) select * from hp_contradict_test where a === 1 and b === 1 and a is null; + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +drop table hp_contradict_test; +drop operator class part_test_int4_ops2 using hash; +drop operator ===(int4, int4); diff --git a/src/test/regress/expected/password.out b/src/test/regress/expected/password.out new file mode 100644 index 0000000..8475231 --- /dev/null +++ b/src/test/regress/expected/password.out @@ -0,0 +1,149 @@ +-- +-- Tests for password types +-- +-- Tests for GUC password_encryption +SET password_encryption = 'novalue'; -- error +ERROR: invalid value for parameter "password_encryption": "novalue" +HINT: Available values: md5, scram-sha-256. +SET password_encryption = true; -- error +ERROR: invalid value for parameter "password_encryption": "true" +HINT: Available values: md5, scram-sha-256. +SET password_encryption = 'md5'; -- ok +SET password_encryption = 'scram-sha-256'; -- ok +-- consistency of password entries +SET password_encryption = 'md5'; +CREATE ROLE regress_passwd1 PASSWORD 'role_pwd1'; +CREATE ROLE regress_passwd2 PASSWORD 'role_pwd2'; +SET password_encryption = 'scram-sha-256'; +CREATE ROLE regress_passwd3 PASSWORD 'role_pwd3'; +CREATE ROLE regress_passwd4 PASSWORD NULL; +-- check list of created entries +-- +-- The scram secret will look something like: +-- SCRAM-SHA-256$4096:E4HxLGtnRzsYwg==$6YtlR4t69SguDiwFvbVgVZtuz6gpJQQqUMZ7IQJK5yI=:ps75jrHeYU4lXCcXI4O8oIdJ3eO8o2jirjruw9phBTo= +-- +-- Since the salt is random, the exact value stored will be different on every test +-- run. Use a regular expression to mask the changing parts. +SELECT rolname, regexp_replace(rolpassword, '(SCRAM-SHA-256)\$(\d+):([a-zA-Z0-9+/=]+)\$([a-zA-Z0-9+=/]+):([a-zA-Z0-9+/=]+)', '\1$\2:$:') as rolpassword_masked + FROM pg_authid + WHERE rolname LIKE 'regress_passwd%' + ORDER BY rolname, rolpassword; + rolname | rolpassword_masked +-----------------+--------------------------------------------------- + regress_passwd1 | md5783277baca28003b33453252be4dbb34 + regress_passwd2 | md54044304ba511dd062133eb5b4b84a2a3 + regress_passwd3 | SCRAM-SHA-256$4096:$: + regress_passwd4 | +(4 rows) + +-- Rename a role +ALTER ROLE regress_passwd2 RENAME TO regress_passwd2_new; +NOTICE: MD5 password cleared because of role rename +-- md5 entry should have been removed +SELECT rolname, rolpassword + FROM pg_authid + WHERE rolname LIKE 'regress_passwd2_new' + ORDER BY rolname, rolpassword; + rolname | rolpassword +---------------------+------------- + regress_passwd2_new | +(1 row) + +ALTER ROLE regress_passwd2_new RENAME TO regress_passwd2; +-- Change passwords with ALTER USER. With plaintext or already-encrypted +-- passwords. +SET password_encryption = 'md5'; +-- encrypt with MD5 +ALTER ROLE regress_passwd2 PASSWORD 'foo'; +-- already encrypted, use as they are +ALTER ROLE regress_passwd1 PASSWORD 'md5cd3578025fe2c3d7ed1b9a9b26238b70'; +ALTER ROLE regress_passwd3 PASSWORD 'SCRAM-SHA-256$4096:VLK4RMaQLCvNtQ==$6YtlR4t69SguDiwFvbVgVZtuz6gpJQQqUMZ7IQJK5yI=:ps75jrHeYU4lXCcXI4O8oIdJ3eO8o2jirjruw9phBTo='; +SET password_encryption = 'scram-sha-256'; +-- create SCRAM secret +ALTER ROLE regress_passwd4 PASSWORD 'foo'; +-- already encrypted with MD5, use as it is +CREATE ROLE regress_passwd5 PASSWORD 'md5e73a4b11df52a6068f8b39f90be36023'; +-- This looks like a valid SCRAM-SHA-256 secret, but it is not +-- so it should be hashed with SCRAM-SHA-256. +CREATE ROLE regress_passwd6 PASSWORD 'SCRAM-SHA-256$1234'; +-- These may look like valid MD5 secrets, but they are not, so they +-- should be hashed with SCRAM-SHA-256. +-- trailing garbage at the end +CREATE ROLE regress_passwd7 PASSWORD 'md5012345678901234567890123456789zz'; +-- invalid length +CREATE ROLE regress_passwd8 PASSWORD 'md501234567890123456789012345678901zz'; +-- Changing the SCRAM iteration count +SET scram_iterations = 1024; +CREATE ROLE regress_passwd9 PASSWORD 'alterediterationcount'; +SELECT rolname, regexp_replace(rolpassword, '(SCRAM-SHA-256)\$(\d+):([a-zA-Z0-9+/=]+)\$([a-zA-Z0-9+=/]+):([a-zA-Z0-9+/=]+)', '\1$\2:$:') as rolpassword_masked + FROM pg_authid + WHERE rolname LIKE 'regress_passwd%' + ORDER BY rolname, rolpassword; + rolname | rolpassword_masked +-----------------+--------------------------------------------------- + regress_passwd1 | md5cd3578025fe2c3d7ed1b9a9b26238b70 + regress_passwd2 | md5dfa155cadd5f4ad57860162f3fab9cdb + regress_passwd3 | SCRAM-SHA-256$4096:$: + regress_passwd4 | SCRAM-SHA-256$4096:$: + regress_passwd5 | md5e73a4b11df52a6068f8b39f90be36023 + regress_passwd6 | SCRAM-SHA-256$4096:$: + regress_passwd7 | SCRAM-SHA-256$4096:$: + regress_passwd8 | SCRAM-SHA-256$4096:$: + regress_passwd9 | SCRAM-SHA-256$1024:$: +(9 rows) + +-- An empty password is not allowed, in any form +CREATE ROLE regress_passwd_empty PASSWORD ''; +NOTICE: empty string is not a valid password, clearing password +ALTER ROLE regress_passwd_empty PASSWORD 'md585939a5ce845f1a1b620742e3c659e0a'; +NOTICE: empty string is not a valid password, clearing password +ALTER ROLE regress_passwd_empty PASSWORD 'SCRAM-SHA-256$4096:hpFyHTUsSWcR7O9P$LgZFIt6Oqdo27ZFKbZ2nV+vtnYM995pDh9ca6WSi120=:qVV5NeluNfUPkwm7Vqat25RjSPLkGeoZBQs6wVv+um4='; +NOTICE: empty string is not a valid password, clearing password +SELECT rolpassword FROM pg_authid WHERE rolname='regress_passwd_empty'; + rolpassword +------------- + +(1 row) + +-- Test with invalid stored and server keys. +-- +-- The first is valid, to act as a control. The others have too long +-- stored/server keys. They will be re-hashed. +CREATE ROLE regress_passwd_sha_len0 PASSWORD 'SCRAM-SHA-256$4096:A6xHKoH/494E941doaPOYg==$Ky+A30sewHIH3VHQLRN9vYsuzlgNyGNKCh37dy96Rqw=:COPdlNiIkrsacU5QoxydEuOH6e/KfiipeETb/bPw8ZI='; +CREATE ROLE regress_passwd_sha_len1 PASSWORD 'SCRAM-SHA-256$4096:A6xHKoH/494E941doaPOYg==$Ky+A30sewHIH3VHQLRN9vYsuzlgNyGNKCh37dy96RqwAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA=:COPdlNiIkrsacU5QoxydEuOH6e/KfiipeETb/bPw8ZI='; +CREATE ROLE regress_passwd_sha_len2 PASSWORD 'SCRAM-SHA-256$4096:A6xHKoH/494E941doaPOYg==$Ky+A30sewHIH3VHQLRN9vYsuzlgNyGNKCh37dy96Rqw=:COPdlNiIkrsacU5QoxydEuOH6e/KfiipeETb/bPw8ZIAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA='; +-- Check that the invalid secrets were re-hashed. A re-hashed secret +-- should not contain the original salt. +SELECT rolname, rolpassword not like '%A6xHKoH/494E941doaPOYg==%' as is_rolpassword_rehashed + FROM pg_authid + WHERE rolname LIKE 'regress_passwd_sha_len%' + ORDER BY rolname; + rolname | is_rolpassword_rehashed +-------------------------+------------------------- + regress_passwd_sha_len0 | f + regress_passwd_sha_len1 | t + regress_passwd_sha_len2 | t +(3 rows) + +DROP ROLE regress_passwd1; +DROP ROLE regress_passwd2; +DROP ROLE regress_passwd3; +DROP ROLE regress_passwd4; +DROP ROLE regress_passwd5; +DROP ROLE regress_passwd6; +DROP ROLE regress_passwd7; +DROP ROLE regress_passwd8; +DROP ROLE regress_passwd9; +DROP ROLE regress_passwd_empty; +DROP ROLE regress_passwd_sha_len0; +DROP ROLE regress_passwd_sha_len1; +DROP ROLE regress_passwd_sha_len2; +-- all entries should have been removed +SELECT rolname, rolpassword + FROM pg_authid + WHERE rolname LIKE 'regress_passwd%' + ORDER BY rolname, rolpassword; + rolname | rolpassword +---------+------------- +(0 rows) + diff --git a/src/test/regress/expected/path.out b/src/test/regress/expected/path.out new file mode 100644 index 0000000..4994641 --- /dev/null +++ b/src/test/regress/expected/path.out @@ -0,0 +1,107 @@ +-- +-- PATH +-- +--DROP TABLE PATH_TBL; +CREATE TABLE PATH_TBL (f1 path); +INSERT INTO PATH_TBL VALUES ('[(1,2),(3,4)]'); +INSERT INTO PATH_TBL VALUES (' ( ( 1 , 2 ) , ( 3 , 4 ) ) '); +INSERT INTO PATH_TBL VALUES ('[ (0,0),(3,0),(4,5),(1,6) ]'); +INSERT INTO PATH_TBL VALUES ('((1,2) ,(3,4 ))'); +INSERT INTO PATH_TBL VALUES ('1,2 ,3,4 '); +INSERT INTO PATH_TBL VALUES (' [1,2,3, 4] '); +INSERT INTO PATH_TBL VALUES ('((10,20))'); -- Only one point +INSERT INTO PATH_TBL VALUES ('[ 11,12,13,14 ]'); +INSERT INTO PATH_TBL VALUES ('( 11,12,13,14) '); +-- bad values for parser testing +INSERT INTO PATH_TBL VALUES ('[]'); +ERROR: invalid input syntax for type path: "[]" +LINE 1: INSERT INTO PATH_TBL VALUES ('[]'); + ^ +INSERT INTO PATH_TBL VALUES ('[(,2),(3,4)]'); +ERROR: invalid input syntax for type path: "[(,2),(3,4)]" +LINE 1: INSERT INTO PATH_TBL VALUES ('[(,2),(3,4)]'); + ^ +INSERT INTO PATH_TBL VALUES ('[(1,2),(3,4)'); +ERROR: invalid input syntax for type path: "[(1,2),(3,4)" +LINE 1: INSERT INTO PATH_TBL VALUES ('[(1,2),(3,4)'); + ^ +INSERT INTO PATH_TBL VALUES ('(1,2,3,4'); +ERROR: invalid input syntax for type path: "(1,2,3,4" +LINE 1: INSERT INTO PATH_TBL VALUES ('(1,2,3,4'); + ^ +INSERT INTO PATH_TBL VALUES ('(1,2),(3,4)]'); +ERROR: invalid input syntax for type path: "(1,2),(3,4)]" +LINE 1: INSERT INTO PATH_TBL VALUES ('(1,2),(3,4)]'); + ^ +SELECT f1 AS open_path FROM PATH_TBL WHERE isopen(f1); + open_path +--------------------------- + [(1,2),(3,4)] + [(0,0),(3,0),(4,5),(1,6)] + [(1,2),(3,4)] + [(11,12),(13,14)] +(4 rows) + +SELECT f1 AS closed_path FROM PATH_TBL WHERE isclosed(f1); + closed_path +------------------- + ((1,2),(3,4)) + ((1,2),(3,4)) + ((1,2),(3,4)) + ((10,20)) + ((11,12),(13,14)) +(5 rows) + +SELECT pclose(f1) AS closed_path FROM PATH_TBL; + closed_path +--------------------------- + ((1,2),(3,4)) + ((1,2),(3,4)) + ((0,0),(3,0),(4,5),(1,6)) + ((1,2),(3,4)) + ((1,2),(3,4)) + ((1,2),(3,4)) + ((10,20)) + ((11,12),(13,14)) + ((11,12),(13,14)) +(9 rows) + +SELECT popen(f1) AS open_path FROM PATH_TBL; + open_path +--------------------------- + [(1,2),(3,4)] + [(1,2),(3,4)] + [(0,0),(3,0),(4,5),(1,6)] + [(1,2),(3,4)] + [(1,2),(3,4)] + [(1,2),(3,4)] + [(10,20)] + [(11,12),(13,14)] + [(11,12),(13,14)] +(9 rows) + +-- test non-error-throwing API for some core types +SELECT pg_input_is_valid('[(1,2),(3)]', 'path'); + pg_input_is_valid +------------------- + f +(1 row) + +SELECT * FROM pg_input_error_info('[(1,2),(3)]', 'path'); + message | detail | hint | sql_error_code +---------------------------------------------------+--------+------+---------------- + invalid input syntax for type path: "[(1,2),(3)]" | | | 22P02 +(1 row) + +SELECT pg_input_is_valid('[(1,2,6),(3,4,6)]', 'path'); + pg_input_is_valid +------------------- + f +(1 row) + +SELECT * FROM pg_input_error_info('[(1,2,6),(3,4,6)]', 'path'); + message | detail | hint | sql_error_code +---------------------------------------------------------+--------+------+---------------- + invalid input syntax for type path: "[(1,2,6),(3,4,6)]" | | | 22P02 +(1 row) + diff --git a/src/test/regress/expected/pg_lsn.out b/src/test/regress/expected/pg_lsn.out new file mode 100644 index 0000000..b27eec7 --- /dev/null +++ b/src/test/regress/expected/pg_lsn.out @@ -0,0 +1,270 @@ +-- +-- PG_LSN +-- +CREATE TABLE PG_LSN_TBL (f1 pg_lsn); +-- Largest and smallest input +INSERT INTO PG_LSN_TBL VALUES ('0/0'); +INSERT INTO PG_LSN_TBL VALUES ('FFFFFFFF/FFFFFFFF'); +-- Incorrect input +INSERT INTO PG_LSN_TBL VALUES ('G/0'); +ERROR: invalid input syntax for type pg_lsn: "G/0" +LINE 1: INSERT INTO PG_LSN_TBL VALUES ('G/0'); + ^ +INSERT INTO PG_LSN_TBL VALUES ('-1/0'); +ERROR: invalid input syntax for type pg_lsn: "-1/0" +LINE 1: INSERT INTO PG_LSN_TBL VALUES ('-1/0'); + ^ +INSERT INTO PG_LSN_TBL VALUES (' 0/12345678'); +ERROR: invalid input syntax for type pg_lsn: " 0/12345678" +LINE 1: INSERT INTO PG_LSN_TBL VALUES (' 0/12345678'); + ^ +INSERT INTO PG_LSN_TBL VALUES ('ABCD/'); +ERROR: invalid input syntax for type pg_lsn: "ABCD/" +LINE 1: INSERT INTO PG_LSN_TBL VALUES ('ABCD/'); + ^ +INSERT INTO PG_LSN_TBL VALUES ('/ABCD'); +ERROR: invalid input syntax for type pg_lsn: "/ABCD" +LINE 1: INSERT INTO PG_LSN_TBL VALUES ('/ABCD'); + ^ +-- Also try it with non-error-throwing API +SELECT pg_input_is_valid('16AE7F7', 'pg_lsn'); + pg_input_is_valid +------------------- + f +(1 row) + +SELECT * FROM pg_input_error_info('16AE7F7', 'pg_lsn'); + message | detail | hint | sql_error_code +-------------------------------------------------+--------+------+---------------- + invalid input syntax for type pg_lsn: "16AE7F7" | | | 22P02 +(1 row) + +-- Min/Max aggregation +SELECT MIN(f1), MAX(f1) FROM PG_LSN_TBL; + min | max +-----+------------------- + 0/0 | FFFFFFFF/FFFFFFFF +(1 row) + +DROP TABLE PG_LSN_TBL; +-- Operators +SELECT '0/16AE7F8' = '0/16AE7F8'::pg_lsn; + ?column? +---------- + t +(1 row) + +SELECT '0/16AE7F8'::pg_lsn != '0/16AE7F7'; + ?column? +---------- + t +(1 row) + +SELECT '0/16AE7F7' < '0/16AE7F8'::pg_lsn; + ?column? +---------- + t +(1 row) + +SELECT '0/16AE7F8' > pg_lsn '0/16AE7F7'; + ?column? +---------- + t +(1 row) + +SELECT '0/16AE7F7'::pg_lsn - '0/16AE7F8'::pg_lsn; + ?column? +---------- + -1 +(1 row) + +SELECT '0/16AE7F8'::pg_lsn - '0/16AE7F7'::pg_lsn; + ?column? +---------- + 1 +(1 row) + +SELECT '0/16AE7F7'::pg_lsn + 16::numeric; + ?column? +----------- + 0/16AE807 +(1 row) + +SELECT 16::numeric + '0/16AE7F7'::pg_lsn; + ?column? +----------- + 0/16AE807 +(1 row) + +SELECT '0/16AE7F7'::pg_lsn - 16::numeric; + ?column? +----------- + 0/16AE7E7 +(1 row) + +SELECT 'FFFFFFFF/FFFFFFFE'::pg_lsn + 1::numeric; + ?column? +------------------- + FFFFFFFF/FFFFFFFF +(1 row) + +SELECT 'FFFFFFFF/FFFFFFFE'::pg_lsn + 2::numeric; -- out of range error +ERROR: pg_lsn out of range +SELECT '0/1'::pg_lsn - 1::numeric; + ?column? +---------- + 0/0 +(1 row) + +SELECT '0/1'::pg_lsn - 2::numeric; -- out of range error +ERROR: pg_lsn out of range +SELECT '0/0'::pg_lsn + ('FFFFFFFF/FFFFFFFF'::pg_lsn - '0/0'::pg_lsn); + ?column? +------------------- + FFFFFFFF/FFFFFFFF +(1 row) + +SELECT 'FFFFFFFF/FFFFFFFF'::pg_lsn - ('FFFFFFFF/FFFFFFFF'::pg_lsn - '0/0'::pg_lsn); + ?column? +---------- + 0/0 +(1 row) + +SELECT '0/16AE7F7'::pg_lsn + 'NaN'::numeric; +ERROR: cannot add NaN to pg_lsn +SELECT '0/16AE7F7'::pg_lsn - 'NaN'::numeric; +ERROR: cannot subtract NaN from pg_lsn +-- Check btree and hash opclasses +EXPLAIN (COSTS OFF) +SELECT DISTINCT (i || '/' || j)::pg_lsn f + FROM generate_series(1, 10) i, + generate_series(1, 10) j, + generate_series(1, 5) k + WHERE i <= 10 AND j > 0 AND j <= 10 + ORDER BY f; + QUERY PLAN +-------------------------------------------------------------------------- + Sort + Sort Key: (((((i.i)::text || '/'::text) || (j.j)::text))::pg_lsn) + -> HashAggregate + Group Key: ((((i.i)::text || '/'::text) || (j.j)::text))::pg_lsn + -> Nested Loop + -> Function Scan on generate_series k + -> Materialize + -> Nested Loop + -> Function Scan on generate_series j + Filter: ((j > 0) AND (j <= 10)) + -> Function Scan on generate_series i + Filter: (i <= 10) +(12 rows) + +SELECT DISTINCT (i || '/' || j)::pg_lsn f + FROM generate_series(1, 10) i, + generate_series(1, 10) j, + generate_series(1, 5) k + WHERE i <= 10 AND j > 0 AND j <= 10 + ORDER BY f; + f +------- + 1/1 + 1/2 + 1/3 + 1/4 + 1/5 + 1/6 + 1/7 + 1/8 + 1/9 + 1/10 + 2/1 + 2/2 + 2/3 + 2/4 + 2/5 + 2/6 + 2/7 + 2/8 + 2/9 + 2/10 + 3/1 + 3/2 + 3/3 + 3/4 + 3/5 + 3/6 + 3/7 + 3/8 + 3/9 + 3/10 + 4/1 + 4/2 + 4/3 + 4/4 + 4/5 + 4/6 + 4/7 + 4/8 + 4/9 + 4/10 + 5/1 + 5/2 + 5/3 + 5/4 + 5/5 + 5/6 + 5/7 + 5/8 + 5/9 + 5/10 + 6/1 + 6/2 + 6/3 + 6/4 + 6/5 + 6/6 + 6/7 + 6/8 + 6/9 + 6/10 + 7/1 + 7/2 + 7/3 + 7/4 + 7/5 + 7/6 + 7/7 + 7/8 + 7/9 + 7/10 + 8/1 + 8/2 + 8/3 + 8/4 + 8/5 + 8/6 + 8/7 + 8/8 + 8/9 + 8/10 + 9/1 + 9/2 + 9/3 + 9/4 + 9/5 + 9/6 + 9/7 + 9/8 + 9/9 + 9/10 + 10/1 + 10/2 + 10/3 + 10/4 + 10/5 + 10/6 + 10/7 + 10/8 + 10/9 + 10/10 +(100 rows) + diff --git a/src/test/regress/expected/plancache.out b/src/test/regress/expected/plancache.out new file mode 100644 index 0000000..4e59188 --- /dev/null +++ b/src/test/regress/expected/plancache.out @@ -0,0 +1,400 @@ +-- +-- Tests to exercise the plan caching/invalidation mechanism +-- +CREATE TEMP TABLE pcachetest AS SELECT * FROM int8_tbl; +-- create and use a cached plan +PREPARE prepstmt AS SELECT * FROM pcachetest; +EXECUTE prepstmt; + q1 | q2 +------------------+------------------- + 123 | 456 + 123 | 4567890123456789 + 4567890123456789 | 123 + 4567890123456789 | 4567890123456789 + 4567890123456789 | -4567890123456789 +(5 rows) + +-- and one with parameters +PREPARE prepstmt2(bigint) AS SELECT * FROM pcachetest WHERE q1 = $1; +EXECUTE prepstmt2(123); + q1 | q2 +-----+------------------ + 123 | 456 + 123 | 4567890123456789 +(2 rows) + +-- invalidate the plans and see what happens +DROP TABLE pcachetest; +EXECUTE prepstmt; +ERROR: relation "pcachetest" does not exist +EXECUTE prepstmt2(123); +ERROR: relation "pcachetest" does not exist +-- recreate the temp table (this demonstrates that the raw plan is +-- purely textual and doesn't depend on OIDs, for instance) +CREATE TEMP TABLE pcachetest AS SELECT * FROM int8_tbl ORDER BY 2; +EXECUTE prepstmt; + q1 | q2 +------------------+------------------- + 4567890123456789 | -4567890123456789 + 4567890123456789 | 123 + 123 | 456 + 123 | 4567890123456789 + 4567890123456789 | 4567890123456789 +(5 rows) + +EXECUTE prepstmt2(123); + q1 | q2 +-----+------------------ + 123 | 456 + 123 | 4567890123456789 +(2 rows) + +-- prepared statements should prevent change in output tupdesc, +-- since clients probably aren't expecting that to change on the fly +ALTER TABLE pcachetest ADD COLUMN q3 bigint; +EXECUTE prepstmt; +ERROR: cached plan must not change result type +EXECUTE prepstmt2(123); +ERROR: cached plan must not change result type +-- but we're nice guys and will let you undo your mistake +ALTER TABLE pcachetest DROP COLUMN q3; +EXECUTE prepstmt; + q1 | q2 +------------------+------------------- + 4567890123456789 | -4567890123456789 + 4567890123456789 | 123 + 123 | 456 + 123 | 4567890123456789 + 4567890123456789 | 4567890123456789 +(5 rows) + +EXECUTE prepstmt2(123); + q1 | q2 +-----+------------------ + 123 | 456 + 123 | 4567890123456789 +(2 rows) + +-- Try it with a view, which isn't directly used in the resulting plan +-- but should trigger invalidation anyway +CREATE TEMP VIEW pcacheview AS + SELECT * FROM pcachetest; +PREPARE vprep AS SELECT * FROM pcacheview; +EXECUTE vprep; + q1 | q2 +------------------+------------------- + 4567890123456789 | -4567890123456789 + 4567890123456789 | 123 + 123 | 456 + 123 | 4567890123456789 + 4567890123456789 | 4567890123456789 +(5 rows) + +CREATE OR REPLACE TEMP VIEW pcacheview AS + SELECT q1, q2/2 AS q2 FROM pcachetest; +EXECUTE vprep; + q1 | q2 +------------------+------------------- + 4567890123456789 | -2283945061728394 + 4567890123456789 | 61 + 123 | 228 + 123 | 2283945061728394 + 4567890123456789 | 2283945061728394 +(5 rows) + +-- Check basic SPI plan invalidation +create function cache_test(int) returns int as $$ +declare total int; +begin + create temp table t1(f1 int); + insert into t1 values($1); + insert into t1 values(11); + insert into t1 values(12); + insert into t1 values(13); + select sum(f1) into total from t1; + drop table t1; + return total; +end +$$ language plpgsql; +select cache_test(1); + cache_test +------------ + 37 +(1 row) + +select cache_test(2); + cache_test +------------ + 38 +(1 row) + +select cache_test(3); + cache_test +------------ + 39 +(1 row) + +-- Check invalidation of plpgsql "simple expression" +create temp view v1 as + select 2+2 as f1; +create function cache_test_2() returns int as $$ +begin + return f1 from v1; +end$$ language plpgsql; +select cache_test_2(); + cache_test_2 +-------------- + 4 +(1 row) + +create or replace temp view v1 as + select 2+2+4 as f1; +select cache_test_2(); + cache_test_2 +-------------- + 8 +(1 row) + +create or replace temp view v1 as + select 2+2+4+(select max(unique1) from tenk1) as f1; +select cache_test_2(); + cache_test_2 +-------------- + 10007 +(1 row) + +--- Check that change of search_path is honored when re-using cached plan +create schema s1 + create table abc (f1 int); +create schema s2 + create table abc (f1 int); +insert into s1.abc values(123); +insert into s2.abc values(456); +set search_path = s1; +prepare p1 as select f1 from abc; +execute p1; + f1 +----- + 123 +(1 row) + +set search_path = s2; +select f1 from abc; + f1 +----- + 456 +(1 row) + +execute p1; + f1 +----- + 456 +(1 row) + +alter table s1.abc add column f2 float8; -- force replan +execute p1; + f1 +----- + 456 +(1 row) + +drop schema s1 cascade; +NOTICE: drop cascades to table s1.abc +drop schema s2 cascade; +NOTICE: drop cascades to table abc +reset search_path; +-- Check that invalidation deals with regclass constants +create temp sequence seq; +prepare p2 as select nextval('seq'); +execute p2; + nextval +--------- + 1 +(1 row) + +drop sequence seq; +create temp sequence seq; +execute p2; + nextval +--------- + 1 +(1 row) + +-- Check DDL via SPI, immediately followed by SPI plan re-use +-- (bug in original coding) +create function cachebug() returns void as $$ +declare r int; +begin + drop table if exists temptable cascade; + create temp table temptable as select * from generate_series(1,3) as f1; + create temp view vv as select * from temptable; + for r in select * from vv loop + raise notice '%', r; + end loop; +end$$ language plpgsql; +select cachebug(); +NOTICE: table "temptable" does not exist, skipping +NOTICE: 1 +NOTICE: 2 +NOTICE: 3 + cachebug +---------- + +(1 row) + +select cachebug(); +NOTICE: drop cascades to view vv +NOTICE: 1 +NOTICE: 2 +NOTICE: 3 + cachebug +---------- + +(1 row) + +-- Check that addition or removal of any partition is correctly dealt with by +-- default partition table when it is being used in prepared statement. +create table pc_list_parted (a int) partition by list(a); +create table pc_list_part_null partition of pc_list_parted for values in (null); +create table pc_list_part_1 partition of pc_list_parted for values in (1); +create table pc_list_part_def partition of pc_list_parted default; +prepare pstmt_def_insert (int) as insert into pc_list_part_def values($1); +-- should fail +execute pstmt_def_insert(null); +ERROR: new row for relation "pc_list_part_def" violates partition constraint +DETAIL: Failing row contains (null). +execute pstmt_def_insert(1); +ERROR: new row for relation "pc_list_part_def" violates partition constraint +DETAIL: Failing row contains (1). +create table pc_list_part_2 partition of pc_list_parted for values in (2); +execute pstmt_def_insert(2); +ERROR: new row for relation "pc_list_part_def" violates partition constraint +DETAIL: Failing row contains (2). +alter table pc_list_parted detach partition pc_list_part_null; +-- should be ok +execute pstmt_def_insert(null); +drop table pc_list_part_1; +-- should be ok +execute pstmt_def_insert(1); +drop table pc_list_parted, pc_list_part_null; +deallocate pstmt_def_insert; +-- Test plan_cache_mode +create table test_mode (a int); +insert into test_mode select 1 from generate_series(1,1000) union all select 2; +create index on test_mode (a); +analyze test_mode; +prepare test_mode_pp (int) as select count(*) from test_mode where a = $1; +select name, generic_plans, custom_plans from pg_prepared_statements + where name = 'test_mode_pp'; + name | generic_plans | custom_plans +--------------+---------------+-------------- + test_mode_pp | 0 | 0 +(1 row) + +-- up to 5 executions, custom plan is used +set plan_cache_mode to auto; +explain (costs off) execute test_mode_pp(2); + QUERY PLAN +---------------------------------------------------------- + Aggregate + -> Index Only Scan using test_mode_a_idx on test_mode + Index Cond: (a = 2) +(3 rows) + +select name, generic_plans, custom_plans from pg_prepared_statements + where name = 'test_mode_pp'; + name | generic_plans | custom_plans +--------------+---------------+-------------- + test_mode_pp | 0 | 1 +(1 row) + +-- force generic plan +set plan_cache_mode to force_generic_plan; +explain (costs off) execute test_mode_pp(2); + QUERY PLAN +----------------------------- + Aggregate + -> Seq Scan on test_mode + Filter: (a = $1) +(3 rows) + +select name, generic_plans, custom_plans from pg_prepared_statements + where name = 'test_mode_pp'; + name | generic_plans | custom_plans +--------------+---------------+-------------- + test_mode_pp | 1 | 1 +(1 row) + +-- get to generic plan by 5 executions +set plan_cache_mode to auto; +execute test_mode_pp(1); -- 1x + count +------- + 1000 +(1 row) + +execute test_mode_pp(1); -- 2x + count +------- + 1000 +(1 row) + +execute test_mode_pp(1); -- 3x + count +------- + 1000 +(1 row) + +execute test_mode_pp(1); -- 4x + count +------- + 1000 +(1 row) + +select name, generic_plans, custom_plans from pg_prepared_statements + where name = 'test_mode_pp'; + name | generic_plans | custom_plans +--------------+---------------+-------------- + test_mode_pp | 1 | 5 +(1 row) + +execute test_mode_pp(1); -- 5x + count +------- + 1000 +(1 row) + +select name, generic_plans, custom_plans from pg_prepared_statements + where name = 'test_mode_pp'; + name | generic_plans | custom_plans +--------------+---------------+-------------- + test_mode_pp | 2 | 5 +(1 row) + +-- we should now get a really bad plan +explain (costs off) execute test_mode_pp(2); + QUERY PLAN +----------------------------- + Aggregate + -> Seq Scan on test_mode + Filter: (a = $1) +(3 rows) + +-- but we can force a custom plan +set plan_cache_mode to force_custom_plan; +explain (costs off) execute test_mode_pp(2); + QUERY PLAN +---------------------------------------------------------- + Aggregate + -> Index Only Scan using test_mode_a_idx on test_mode + Index Cond: (a = 2) +(3 rows) + +select name, generic_plans, custom_plans from pg_prepared_statements + where name = 'test_mode_pp'; + name | generic_plans | custom_plans +--------------+---------------+-------------- + test_mode_pp | 3 | 6 +(1 row) + +drop table test_mode; diff --git a/src/test/regress/expected/plpgsql.out b/src/test/regress/expected/plpgsql.out new file mode 100644 index 0000000..272f5d2 --- /dev/null +++ b/src/test/regress/expected/plpgsql.out @@ -0,0 +1,5827 @@ +-- +-- PLPGSQL +-- +-- Scenario: +-- +-- A building with a modern TP cable installation where any +-- of the wall connectors can be used to plug in phones, +-- ethernet interfaces or local office hubs. The backside +-- of the wall connectors is wired to one of several patch- +-- fields in the building. +-- +-- In the patchfields, there are hubs and all the slots +-- representing the wall connectors. In addition there are +-- slots that can represent a phone line from the central +-- phone system. +-- +-- Triggers ensure consistency of the patching information. +-- +-- Functions are used to build up powerful views that let +-- you look behind the wall when looking at a patchfield +-- or into a room. +-- +create table Room ( + roomno char(8), + comment text +); +create unique index Room_rno on Room using btree (roomno bpchar_ops); +create table WSlot ( + slotname char(20), + roomno char(8), + slotlink char(20), + backlink char(20) +); +create unique index WSlot_name on WSlot using btree (slotname bpchar_ops); +create table PField ( + name text, + comment text +); +create unique index PField_name on PField using btree (name text_ops); +create table PSlot ( + slotname char(20), + pfname text, + slotlink char(20), + backlink char(20) +); +create unique index PSlot_name on PSlot using btree (slotname bpchar_ops); +create table PLine ( + slotname char(20), + phonenumber char(20), + comment text, + backlink char(20) +); +create unique index PLine_name on PLine using btree (slotname bpchar_ops); +create table Hub ( + name char(14), + comment text, + nslots integer +); +create unique index Hub_name on Hub using btree (name bpchar_ops); +create table HSlot ( + slotname char(20), + hubname char(14), + slotno integer, + slotlink char(20) +); +create unique index HSlot_name on HSlot using btree (slotname bpchar_ops); +create index HSlot_hubname on HSlot using btree (hubname bpchar_ops); +create table System ( + name text, + comment text +); +create unique index System_name on System using btree (name text_ops); +create table IFace ( + slotname char(20), + sysname text, + ifname text, + slotlink char(20) +); +create unique index IFace_name on IFace using btree (slotname bpchar_ops); +create table PHone ( + slotname char(20), + comment text, + slotlink char(20) +); +create unique index PHone_name on PHone using btree (slotname bpchar_ops); +-- ************************************************************ +-- * +-- * Trigger procedures and functions for the patchfield +-- * test of PL/pgSQL +-- * +-- ************************************************************ +-- ************************************************************ +-- * AFTER UPDATE on Room +-- * - If room no changes let wall slots follow +-- ************************************************************ +create function tg_room_au() returns trigger as ' +begin + if new.roomno != old.roomno then + update WSlot set roomno = new.roomno where roomno = old.roomno; + end if; + return new; +end; +' language plpgsql; +create trigger tg_room_au after update + on Room for each row execute procedure tg_room_au(); +-- ************************************************************ +-- * AFTER DELETE on Room +-- * - delete wall slots in this room +-- ************************************************************ +create function tg_room_ad() returns trigger as ' +begin + delete from WSlot where roomno = old.roomno; + return old; +end; +' language plpgsql; +create trigger tg_room_ad after delete + on Room for each row execute procedure tg_room_ad(); +-- ************************************************************ +-- * BEFORE INSERT or UPDATE on WSlot +-- * - Check that room exists +-- ************************************************************ +create function tg_wslot_biu() returns trigger as $$ +begin + if count(*) = 0 from Room where roomno = new.roomno then + raise exception 'Room % does not exist', new.roomno; + end if; + return new; +end; +$$ language plpgsql; +create trigger tg_wslot_biu before insert or update + on WSlot for each row execute procedure tg_wslot_biu(); +-- ************************************************************ +-- * AFTER UPDATE on PField +-- * - Let PSlots of this field follow +-- ************************************************************ +create function tg_pfield_au() returns trigger as ' +begin + if new.name != old.name then + update PSlot set pfname = new.name where pfname = old.name; + end if; + return new; +end; +' language plpgsql; +create trigger tg_pfield_au after update + on PField for each row execute procedure tg_pfield_au(); +-- ************************************************************ +-- * AFTER DELETE on PField +-- * - Remove all slots of this patchfield +-- ************************************************************ +create function tg_pfield_ad() returns trigger as ' +begin + delete from PSlot where pfname = old.name; + return old; +end; +' language plpgsql; +create trigger tg_pfield_ad after delete + on PField for each row execute procedure tg_pfield_ad(); +-- ************************************************************ +-- * BEFORE INSERT or UPDATE on PSlot +-- * - Ensure that our patchfield does exist +-- ************************************************************ +create function tg_pslot_biu() returns trigger as $proc$ +declare + pfrec record; + ps alias for new; +begin + select into pfrec * from PField where name = ps.pfname; + if not found then + raise exception $$Patchfield "%" does not exist$$, ps.pfname; + end if; + return ps; +end; +$proc$ language plpgsql; +create trigger tg_pslot_biu before insert or update + on PSlot for each row execute procedure tg_pslot_biu(); +-- ************************************************************ +-- * AFTER UPDATE on System +-- * - If system name changes let interfaces follow +-- ************************************************************ +create function tg_system_au() returns trigger as ' +begin + if new.name != old.name then + update IFace set sysname = new.name where sysname = old.name; + end if; + return new; +end; +' language plpgsql; +create trigger tg_system_au after update + on System for each row execute procedure tg_system_au(); +-- ************************************************************ +-- * BEFORE INSERT or UPDATE on IFace +-- * - set the slotname to IF.sysname.ifname +-- ************************************************************ +create function tg_iface_biu() returns trigger as $$ +declare + sname text; + sysrec record; +begin + select into sysrec * from system where name = new.sysname; + if not found then + raise exception $q$system "%" does not exist$q$, new.sysname; + end if; + sname := 'IF.' || new.sysname; + sname := sname || '.'; + sname := sname || new.ifname; + if length(sname) > 20 then + raise exception 'IFace slotname "%" too long (20 char max)', sname; + end if; + new.slotname := sname; + return new; +end; +$$ language plpgsql; +create trigger tg_iface_biu before insert or update + on IFace for each row execute procedure tg_iface_biu(); +-- ************************************************************ +-- * AFTER INSERT or UPDATE or DELETE on Hub +-- * - insert/delete/rename slots as required +-- ************************************************************ +create function tg_hub_a() returns trigger as ' +declare + hname text; + dummy integer; +begin + if tg_op = ''INSERT'' then + dummy := tg_hub_adjustslots(new.name, 0, new.nslots); + return new; + end if; + if tg_op = ''UPDATE'' then + if new.name != old.name then + update HSlot set hubname = new.name where hubname = old.name; + end if; + dummy := tg_hub_adjustslots(new.name, old.nslots, new.nslots); + return new; + end if; + if tg_op = ''DELETE'' then + dummy := tg_hub_adjustslots(old.name, old.nslots, 0); + return old; + end if; +end; +' language plpgsql; +create trigger tg_hub_a after insert or update or delete + on Hub for each row execute procedure tg_hub_a(); +-- ************************************************************ +-- * Support function to add/remove slots of Hub +-- ************************************************************ +create function tg_hub_adjustslots(hname bpchar, + oldnslots integer, + newnslots integer) +returns integer as ' +begin + if newnslots = oldnslots then + return 0; + end if; + if newnslots < oldnslots then + delete from HSlot where hubname = hname and slotno > newnslots; + return 0; + end if; + for i in oldnslots + 1 .. newnslots loop + insert into HSlot (slotname, hubname, slotno, slotlink) + values (''HS.dummy'', hname, i, ''''); + end loop; + return 0; +end +' language plpgsql; +-- Test comments +COMMENT ON FUNCTION tg_hub_adjustslots_wrong(bpchar, integer, integer) IS 'function with args'; +ERROR: function tg_hub_adjustslots_wrong(character, integer, integer) does not exist +COMMENT ON FUNCTION tg_hub_adjustslots(bpchar, integer, integer) IS 'function with args'; +COMMENT ON FUNCTION tg_hub_adjustslots(bpchar, integer, integer) IS NULL; +-- ************************************************************ +-- * BEFORE INSERT or UPDATE on HSlot +-- * - prevent from manual manipulation +-- * - set the slotname to HS.hubname.slotno +-- ************************************************************ +create function tg_hslot_biu() returns trigger as ' +declare + sname text; + xname HSlot.slotname%TYPE; + hubrec record; +begin + select into hubrec * from Hub where name = new.hubname; + if not found then + raise exception ''no manual manipulation of HSlot''; + end if; + if new.slotno < 1 or new.slotno > hubrec.nslots then + raise exception ''no manual manipulation of HSlot''; + end if; + if tg_op = ''UPDATE'' and new.hubname != old.hubname then + if count(*) > 0 from Hub where name = old.hubname then + raise exception ''no manual manipulation of HSlot''; + end if; + end if; + sname := ''HS.'' || trim(new.hubname); + sname := sname || ''.''; + sname := sname || new.slotno::text; + if length(sname) > 20 then + raise exception ''HSlot slotname "%" too long (20 char max)'', sname; + end if; + new.slotname := sname; + return new; +end; +' language plpgsql; +create trigger tg_hslot_biu before insert or update + on HSlot for each row execute procedure tg_hslot_biu(); +-- ************************************************************ +-- * BEFORE DELETE on HSlot +-- * - prevent from manual manipulation +-- ************************************************************ +create function tg_hslot_bd() returns trigger as ' +declare + hubrec record; +begin + select into hubrec * from Hub where name = old.hubname; + if not found then + return old; + end if; + if old.slotno > hubrec.nslots then + return old; + end if; + raise exception ''no manual manipulation of HSlot''; +end; +' language plpgsql; +create trigger tg_hslot_bd before delete + on HSlot for each row execute procedure tg_hslot_bd(); +-- ************************************************************ +-- * BEFORE INSERT on all slots +-- * - Check name prefix +-- ************************************************************ +create function tg_chkslotname() returns trigger as ' +begin + if substr(new.slotname, 1, 2) != tg_argv[0] then + raise exception ''slotname must begin with %'', tg_argv[0]; + end if; + return new; +end; +' language plpgsql; +create trigger tg_chkslotname before insert + on PSlot for each row execute procedure tg_chkslotname('PS'); +create trigger tg_chkslotname before insert + on WSlot for each row execute procedure tg_chkslotname('WS'); +create trigger tg_chkslotname before insert + on PLine for each row execute procedure tg_chkslotname('PL'); +create trigger tg_chkslotname before insert + on IFace for each row execute procedure tg_chkslotname('IF'); +create trigger tg_chkslotname before insert + on PHone for each row execute procedure tg_chkslotname('PH'); +-- ************************************************************ +-- * BEFORE INSERT or UPDATE on all slots with slotlink +-- * - Set slotlink to empty string if NULL value given +-- ************************************************************ +create function tg_chkslotlink() returns trigger as ' +begin + if new.slotlink isnull then + new.slotlink := ''''; + end if; + return new; +end; +' language plpgsql; +create trigger tg_chkslotlink before insert or update + on PSlot for each row execute procedure tg_chkslotlink(); +create trigger tg_chkslotlink before insert or update + on WSlot for each row execute procedure tg_chkslotlink(); +create trigger tg_chkslotlink before insert or update + on IFace for each row execute procedure tg_chkslotlink(); +create trigger tg_chkslotlink before insert or update + on HSlot for each row execute procedure tg_chkslotlink(); +create trigger tg_chkslotlink before insert or update + on PHone for each row execute procedure tg_chkslotlink(); +-- ************************************************************ +-- * BEFORE INSERT or UPDATE on all slots with backlink +-- * - Set backlink to empty string if NULL value given +-- ************************************************************ +create function tg_chkbacklink() returns trigger as ' +begin + if new.backlink isnull then + new.backlink := ''''; + end if; + return new; +end; +' language plpgsql; +create trigger tg_chkbacklink before insert or update + on PSlot for each row execute procedure tg_chkbacklink(); +create trigger tg_chkbacklink before insert or update + on WSlot for each row execute procedure tg_chkbacklink(); +create trigger tg_chkbacklink before insert or update + on PLine for each row execute procedure tg_chkbacklink(); +-- ************************************************************ +-- * BEFORE UPDATE on PSlot +-- * - do delete/insert instead of update if name changes +-- ************************************************************ +create function tg_pslot_bu() returns trigger as ' +begin + if new.slotname != old.slotname then + delete from PSlot where slotname = old.slotname; + insert into PSlot ( + slotname, + pfname, + slotlink, + backlink + ) values ( + new.slotname, + new.pfname, + new.slotlink, + new.backlink + ); + return null; + end if; + return new; +end; +' language plpgsql; +create trigger tg_pslot_bu before update + on PSlot for each row execute procedure tg_pslot_bu(); +-- ************************************************************ +-- * BEFORE UPDATE on WSlot +-- * - do delete/insert instead of update if name changes +-- ************************************************************ +create function tg_wslot_bu() returns trigger as ' +begin + if new.slotname != old.slotname then + delete from WSlot where slotname = old.slotname; + insert into WSlot ( + slotname, + roomno, + slotlink, + backlink + ) values ( + new.slotname, + new.roomno, + new.slotlink, + new.backlink + ); + return null; + end if; + return new; +end; +' language plpgsql; +create trigger tg_wslot_bu before update + on WSlot for each row execute procedure tg_Wslot_bu(); +-- ************************************************************ +-- * BEFORE UPDATE on PLine +-- * - do delete/insert instead of update if name changes +-- ************************************************************ +create function tg_pline_bu() returns trigger as ' +begin + if new.slotname != old.slotname then + delete from PLine where slotname = old.slotname; + insert into PLine ( + slotname, + phonenumber, + comment, + backlink + ) values ( + new.slotname, + new.phonenumber, + new.comment, + new.backlink + ); + return null; + end if; + return new; +end; +' language plpgsql; +create trigger tg_pline_bu before update + on PLine for each row execute procedure tg_pline_bu(); +-- ************************************************************ +-- * BEFORE UPDATE on IFace +-- * - do delete/insert instead of update if name changes +-- ************************************************************ +create function tg_iface_bu() returns trigger as ' +begin + if new.slotname != old.slotname then + delete from IFace where slotname = old.slotname; + insert into IFace ( + slotname, + sysname, + ifname, + slotlink + ) values ( + new.slotname, + new.sysname, + new.ifname, + new.slotlink + ); + return null; + end if; + return new; +end; +' language plpgsql; +create trigger tg_iface_bu before update + on IFace for each row execute procedure tg_iface_bu(); +-- ************************************************************ +-- * BEFORE UPDATE on HSlot +-- * - do delete/insert instead of update if name changes +-- ************************************************************ +create function tg_hslot_bu() returns trigger as ' +begin + if new.slotname != old.slotname or new.hubname != old.hubname then + delete from HSlot where slotname = old.slotname; + insert into HSlot ( + slotname, + hubname, + slotno, + slotlink + ) values ( + new.slotname, + new.hubname, + new.slotno, + new.slotlink + ); + return null; + end if; + return new; +end; +' language plpgsql; +create trigger tg_hslot_bu before update + on HSlot for each row execute procedure tg_hslot_bu(); +-- ************************************************************ +-- * BEFORE UPDATE on PHone +-- * - do delete/insert instead of update if name changes +-- ************************************************************ +create function tg_phone_bu() returns trigger as ' +begin + if new.slotname != old.slotname then + delete from PHone where slotname = old.slotname; + insert into PHone ( + slotname, + comment, + slotlink + ) values ( + new.slotname, + new.comment, + new.slotlink + ); + return null; + end if; + return new; +end; +' language plpgsql; +create trigger tg_phone_bu before update + on PHone for each row execute procedure tg_phone_bu(); +-- ************************************************************ +-- * AFTER INSERT or UPDATE or DELETE on slot with backlink +-- * - Ensure that the opponent correctly points back to us +-- ************************************************************ +create function tg_backlink_a() returns trigger as ' +declare + dummy integer; +begin + if tg_op = ''INSERT'' then + if new.backlink != '''' then + dummy := tg_backlink_set(new.backlink, new.slotname); + end if; + return new; + end if; + if tg_op = ''UPDATE'' then + if new.backlink != old.backlink then + if old.backlink != '''' then + dummy := tg_backlink_unset(old.backlink, old.slotname); + end if; + if new.backlink != '''' then + dummy := tg_backlink_set(new.backlink, new.slotname); + end if; + else + if new.slotname != old.slotname and new.backlink != '''' then + dummy := tg_slotlink_set(new.backlink, new.slotname); + end if; + end if; + return new; + end if; + if tg_op = ''DELETE'' then + if old.backlink != '''' then + dummy := tg_backlink_unset(old.backlink, old.slotname); + end if; + return old; + end if; +end; +' language plpgsql; +create trigger tg_backlink_a after insert or update or delete + on PSlot for each row execute procedure tg_backlink_a('PS'); +create trigger tg_backlink_a after insert or update or delete + on WSlot for each row execute procedure tg_backlink_a('WS'); +create trigger tg_backlink_a after insert or update or delete + on PLine for each row execute procedure tg_backlink_a('PL'); +-- ************************************************************ +-- * Support function to set the opponents backlink field +-- * if it does not already point to the requested slot +-- ************************************************************ +create function tg_backlink_set(myname bpchar, blname bpchar) +returns integer as ' +declare + mytype char(2); + link char(4); + rec record; +begin + mytype := substr(myname, 1, 2); + link := mytype || substr(blname, 1, 2); + if link = ''PLPL'' then + raise exception + ''backlink between two phone lines does not make sense''; + end if; + if link in (''PLWS'', ''WSPL'') then + raise exception + ''direct link of phone line to wall slot not permitted''; + end if; + if mytype = ''PS'' then + select into rec * from PSlot where slotname = myname; + if not found then + raise exception ''% does not exist'', myname; + end if; + if rec.backlink != blname then + update PSlot set backlink = blname where slotname = myname; + end if; + return 0; + end if; + if mytype = ''WS'' then + select into rec * from WSlot where slotname = myname; + if not found then + raise exception ''% does not exist'', myname; + end if; + if rec.backlink != blname then + update WSlot set backlink = blname where slotname = myname; + end if; + return 0; + end if; + if mytype = ''PL'' then + select into rec * from PLine where slotname = myname; + if not found then + raise exception ''% does not exist'', myname; + end if; + if rec.backlink != blname then + update PLine set backlink = blname where slotname = myname; + end if; + return 0; + end if; + raise exception ''illegal backlink beginning with %'', mytype; +end; +' language plpgsql; +-- ************************************************************ +-- * Support function to clear out the backlink field if +-- * it still points to specific slot +-- ************************************************************ +create function tg_backlink_unset(bpchar, bpchar) +returns integer as ' +declare + myname alias for $1; + blname alias for $2; + mytype char(2); + rec record; +begin + mytype := substr(myname, 1, 2); + if mytype = ''PS'' then + select into rec * from PSlot where slotname = myname; + if not found then + return 0; + end if; + if rec.backlink = blname then + update PSlot set backlink = '''' where slotname = myname; + end if; + return 0; + end if; + if mytype = ''WS'' then + select into rec * from WSlot where slotname = myname; + if not found then + return 0; + end if; + if rec.backlink = blname then + update WSlot set backlink = '''' where slotname = myname; + end if; + return 0; + end if; + if mytype = ''PL'' then + select into rec * from PLine where slotname = myname; + if not found then + return 0; + end if; + if rec.backlink = blname then + update PLine set backlink = '''' where slotname = myname; + end if; + return 0; + end if; +end +' language plpgsql; +-- ************************************************************ +-- * AFTER INSERT or UPDATE or DELETE on slot with slotlink +-- * - Ensure that the opponent correctly points back to us +-- ************************************************************ +create function tg_slotlink_a() returns trigger as ' +declare + dummy integer; +begin + if tg_op = ''INSERT'' then + if new.slotlink != '''' then + dummy := tg_slotlink_set(new.slotlink, new.slotname); + end if; + return new; + end if; + if tg_op = ''UPDATE'' then + if new.slotlink != old.slotlink then + if old.slotlink != '''' then + dummy := tg_slotlink_unset(old.slotlink, old.slotname); + end if; + if new.slotlink != '''' then + dummy := tg_slotlink_set(new.slotlink, new.slotname); + end if; + else + if new.slotname != old.slotname and new.slotlink != '''' then + dummy := tg_slotlink_set(new.slotlink, new.slotname); + end if; + end if; + return new; + end if; + if tg_op = ''DELETE'' then + if old.slotlink != '''' then + dummy := tg_slotlink_unset(old.slotlink, old.slotname); + end if; + return old; + end if; +end; +' language plpgsql; +create trigger tg_slotlink_a after insert or update or delete + on PSlot for each row execute procedure tg_slotlink_a('PS'); +create trigger tg_slotlink_a after insert or update or delete + on WSlot for each row execute procedure tg_slotlink_a('WS'); +create trigger tg_slotlink_a after insert or update or delete + on IFace for each row execute procedure tg_slotlink_a('IF'); +create trigger tg_slotlink_a after insert or update or delete + on HSlot for each row execute procedure tg_slotlink_a('HS'); +create trigger tg_slotlink_a after insert or update or delete + on PHone for each row execute procedure tg_slotlink_a('PH'); +-- ************************************************************ +-- * Support function to set the opponents slotlink field +-- * if it does not already point to the requested slot +-- ************************************************************ +create function tg_slotlink_set(bpchar, bpchar) +returns integer as ' +declare + myname alias for $1; + blname alias for $2; + mytype char(2); + link char(4); + rec record; +begin + mytype := substr(myname, 1, 2); + link := mytype || substr(blname, 1, 2); + if link = ''PHPH'' then + raise exception + ''slotlink between two phones does not make sense''; + end if; + if link in (''PHHS'', ''HSPH'') then + raise exception + ''link of phone to hub does not make sense''; + end if; + if link in (''PHIF'', ''IFPH'') then + raise exception + ''link of phone to hub does not make sense''; + end if; + if link in (''PSWS'', ''WSPS'') then + raise exception + ''slotlink from patchslot to wallslot not permitted''; + end if; + if mytype = ''PS'' then + select into rec * from PSlot where slotname = myname; + if not found then + raise exception ''% does not exist'', myname; + end if; + if rec.slotlink != blname then + update PSlot set slotlink = blname where slotname = myname; + end if; + return 0; + end if; + if mytype = ''WS'' then + select into rec * from WSlot where slotname = myname; + if not found then + raise exception ''% does not exist'', myname; + end if; + if rec.slotlink != blname then + update WSlot set slotlink = blname where slotname = myname; + end if; + return 0; + end if; + if mytype = ''IF'' then + select into rec * from IFace where slotname = myname; + if not found then + raise exception ''% does not exist'', myname; + end if; + if rec.slotlink != blname then + update IFace set slotlink = blname where slotname = myname; + end if; + return 0; + end if; + if mytype = ''HS'' then + select into rec * from HSlot where slotname = myname; + if not found then + raise exception ''% does not exist'', myname; + end if; + if rec.slotlink != blname then + update HSlot set slotlink = blname where slotname = myname; + end if; + return 0; + end if; + if mytype = ''PH'' then + select into rec * from PHone where slotname = myname; + if not found then + raise exception ''% does not exist'', myname; + end if; + if rec.slotlink != blname then + update PHone set slotlink = blname where slotname = myname; + end if; + return 0; + end if; + raise exception ''illegal slotlink beginning with %'', mytype; +end; +' language plpgsql; +-- ************************************************************ +-- * Support function to clear out the slotlink field if +-- * it still points to specific slot +-- ************************************************************ +create function tg_slotlink_unset(bpchar, bpchar) +returns integer as ' +declare + myname alias for $1; + blname alias for $2; + mytype char(2); + rec record; +begin + mytype := substr(myname, 1, 2); + if mytype = ''PS'' then + select into rec * from PSlot where slotname = myname; + if not found then + return 0; + end if; + if rec.slotlink = blname then + update PSlot set slotlink = '''' where slotname = myname; + end if; + return 0; + end if; + if mytype = ''WS'' then + select into rec * from WSlot where slotname = myname; + if not found then + return 0; + end if; + if rec.slotlink = blname then + update WSlot set slotlink = '''' where slotname = myname; + end if; + return 0; + end if; + if mytype = ''IF'' then + select into rec * from IFace where slotname = myname; + if not found then + return 0; + end if; + if rec.slotlink = blname then + update IFace set slotlink = '''' where slotname = myname; + end if; + return 0; + end if; + if mytype = ''HS'' then + select into rec * from HSlot where slotname = myname; + if not found then + return 0; + end if; + if rec.slotlink = blname then + update HSlot set slotlink = '''' where slotname = myname; + end if; + return 0; + end if; + if mytype = ''PH'' then + select into rec * from PHone where slotname = myname; + if not found then + return 0; + end if; + if rec.slotlink = blname then + update PHone set slotlink = '''' where slotname = myname; + end if; + return 0; + end if; +end; +' language plpgsql; +-- ************************************************************ +-- * Describe the backside of a patchfield slot +-- ************************************************************ +create function pslot_backlink_view(bpchar) +returns text as ' +<> +declare + rec record; + bltype char(2); + retval text; +begin + select into rec * from PSlot where slotname = $1; + if not found then + return ''''; + end if; + if rec.backlink = '''' then + return ''-''; + end if; + bltype := substr(rec.backlink, 1, 2); + if bltype = ''PL'' then + declare + rec record; + begin + select into rec * from PLine where slotname = "outer".rec.backlink; + retval := ''Phone line '' || trim(rec.phonenumber); + if rec.comment != '''' then + retval := retval || '' (''; + retval := retval || rec.comment; + retval := retval || '')''; + end if; + return retval; + end; + end if; + if bltype = ''WS'' then + select into rec * from WSlot where slotname = rec.backlink; + retval := trim(rec.slotname) || '' in room ''; + retval := retval || trim(rec.roomno); + retval := retval || '' -> ''; + return retval || wslot_slotlink_view(rec.slotname); + end if; + return rec.backlink; +end; +' language plpgsql; +-- ************************************************************ +-- * Describe the front of a patchfield slot +-- ************************************************************ +create function pslot_slotlink_view(bpchar) +returns text as ' +declare + psrec record; + sltype char(2); + retval text; +begin + select into psrec * from PSlot where slotname = $1; + if not found then + return ''''; + end if; + if psrec.slotlink = '''' then + return ''-''; + end if; + sltype := substr(psrec.slotlink, 1, 2); + if sltype = ''PS'' then + retval := trim(psrec.slotlink) || '' -> ''; + return retval || pslot_backlink_view(psrec.slotlink); + end if; + if sltype = ''HS'' then + retval := comment from Hub H, HSlot HS + where HS.slotname = psrec.slotlink + and H.name = HS.hubname; + retval := retval || '' slot ''; + retval := retval || slotno::text from HSlot + where slotname = psrec.slotlink; + return retval; + end if; + return psrec.slotlink; +end; +' language plpgsql; +-- ************************************************************ +-- * Describe the front of a wall connector slot +-- ************************************************************ +create function wslot_slotlink_view(bpchar) +returns text as ' +declare + rec record; + sltype char(2); + retval text; +begin + select into rec * from WSlot where slotname = $1; + if not found then + return ''''; + end if; + if rec.slotlink = '''' then + return ''-''; + end if; + sltype := substr(rec.slotlink, 1, 2); + if sltype = ''PH'' then + select into rec * from PHone where slotname = rec.slotlink; + retval := ''Phone '' || trim(rec.slotname); + if rec.comment != '''' then + retval := retval || '' (''; + retval := retval || rec.comment; + retval := retval || '')''; + end if; + return retval; + end if; + if sltype = ''IF'' then + declare + syrow System%RowType; + ifrow IFace%ROWTYPE; + begin + select into ifrow * from IFace where slotname = rec.slotlink; + select into syrow * from System where name = ifrow.sysname; + retval := syrow.name || '' IF ''; + retval := retval || ifrow.ifname; + if syrow.comment != '''' then + retval := retval || '' (''; + retval := retval || syrow.comment; + retval := retval || '')''; + end if; + return retval; + end; + end if; + return rec.slotlink; +end; +' language plpgsql; +-- ************************************************************ +-- * View of a patchfield describing backside and patches +-- ************************************************************ +create view Pfield_v1 as select PF.pfname, PF.slotname, + pslot_backlink_view(PF.slotname) as backside, + pslot_slotlink_view(PF.slotname) as patch + from PSlot PF; +-- +-- First we build the house - so we create the rooms +-- +insert into Room values ('001', 'Entrance'); +insert into Room values ('002', 'Office'); +insert into Room values ('003', 'Office'); +insert into Room values ('004', 'Technical'); +insert into Room values ('101', 'Office'); +insert into Room values ('102', 'Conference'); +insert into Room values ('103', 'Restroom'); +insert into Room values ('104', 'Technical'); +insert into Room values ('105', 'Office'); +insert into Room values ('106', 'Office'); +-- +-- Second we install the wall connectors +-- +insert into WSlot values ('WS.001.1a', '001', '', ''); +insert into WSlot values ('WS.001.1b', '001', '', ''); +insert into WSlot values ('WS.001.2a', '001', '', ''); +insert into WSlot values ('WS.001.2b', '001', '', ''); +insert into WSlot values ('WS.001.3a', '001', '', ''); +insert into WSlot values ('WS.001.3b', '001', '', ''); +insert into WSlot values ('WS.002.1a', '002', '', ''); +insert into WSlot values ('WS.002.1b', '002', '', ''); +insert into WSlot values ('WS.002.2a', '002', '', ''); +insert into WSlot values ('WS.002.2b', '002', '', ''); +insert into WSlot values ('WS.002.3a', '002', '', ''); +insert into WSlot values ('WS.002.3b', '002', '', ''); +insert into WSlot values ('WS.003.1a', '003', '', ''); +insert into WSlot values ('WS.003.1b', '003', '', ''); +insert into WSlot values ('WS.003.2a', '003', '', ''); +insert into WSlot values ('WS.003.2b', '003', '', ''); +insert into WSlot values ('WS.003.3a', '003', '', ''); +insert into WSlot values ('WS.003.3b', '003', '', ''); +insert into WSlot values ('WS.101.1a', '101', '', ''); +insert into WSlot values ('WS.101.1b', '101', '', ''); +insert into WSlot values ('WS.101.2a', '101', '', ''); +insert into WSlot values ('WS.101.2b', '101', '', ''); +insert into WSlot values ('WS.101.3a', '101', '', ''); +insert into WSlot values ('WS.101.3b', '101', '', ''); +insert into WSlot values ('WS.102.1a', '102', '', ''); +insert into WSlot values ('WS.102.1b', '102', '', ''); +insert into WSlot values ('WS.102.2a', '102', '', ''); +insert into WSlot values ('WS.102.2b', '102', '', ''); +insert into WSlot values ('WS.102.3a', '102', '', ''); +insert into WSlot values ('WS.102.3b', '102', '', ''); +insert into WSlot values ('WS.105.1a', '105', '', ''); +insert into WSlot values ('WS.105.1b', '105', '', ''); +insert into WSlot values ('WS.105.2a', '105', '', ''); +insert into WSlot values ('WS.105.2b', '105', '', ''); +insert into WSlot values ('WS.105.3a', '105', '', ''); +insert into WSlot values ('WS.105.3b', '105', '', ''); +insert into WSlot values ('WS.106.1a', '106', '', ''); +insert into WSlot values ('WS.106.1b', '106', '', ''); +insert into WSlot values ('WS.106.2a', '106', '', ''); +insert into WSlot values ('WS.106.2b', '106', '', ''); +insert into WSlot values ('WS.106.3a', '106', '', ''); +insert into WSlot values ('WS.106.3b', '106', '', ''); +-- +-- Now create the patch fields and their slots +-- +insert into PField values ('PF0_1', 'Wallslots basement'); +-- +-- The cables for these will be made later, so they are unconnected for now +-- +insert into PSlot values ('PS.base.a1', 'PF0_1', '', ''); +insert into PSlot values ('PS.base.a2', 'PF0_1', '', ''); +insert into PSlot values ('PS.base.a3', 'PF0_1', '', ''); +insert into PSlot values ('PS.base.a4', 'PF0_1', '', ''); +insert into PSlot values ('PS.base.a5', 'PF0_1', '', ''); +insert into PSlot values ('PS.base.a6', 'PF0_1', '', ''); +-- +-- These are already wired to the wall connectors +-- +insert into PSlot values ('PS.base.b1', 'PF0_1', '', 'WS.002.1a'); +insert into PSlot values ('PS.base.b2', 'PF0_1', '', 'WS.002.1b'); +insert into PSlot values ('PS.base.b3', 'PF0_1', '', 'WS.002.2a'); +insert into PSlot values ('PS.base.b4', 'PF0_1', '', 'WS.002.2b'); +insert into PSlot values ('PS.base.b5', 'PF0_1', '', 'WS.002.3a'); +insert into PSlot values ('PS.base.b6', 'PF0_1', '', 'WS.002.3b'); +insert into PSlot values ('PS.base.c1', 'PF0_1', '', 'WS.003.1a'); +insert into PSlot values ('PS.base.c2', 'PF0_1', '', 'WS.003.1b'); +insert into PSlot values ('PS.base.c3', 'PF0_1', '', 'WS.003.2a'); +insert into PSlot values ('PS.base.c4', 'PF0_1', '', 'WS.003.2b'); +insert into PSlot values ('PS.base.c5', 'PF0_1', '', 'WS.003.3a'); +insert into PSlot values ('PS.base.c6', 'PF0_1', '', 'WS.003.3b'); +-- +-- This patchfield will be renamed later into PF0_2 - so its +-- slots references in pfname should follow +-- +insert into PField values ('PF0_X', 'Phonelines basement'); +insert into PSlot values ('PS.base.ta1', 'PF0_X', '', ''); +insert into PSlot values ('PS.base.ta2', 'PF0_X', '', ''); +insert into PSlot values ('PS.base.ta3', 'PF0_X', '', ''); +insert into PSlot values ('PS.base.ta4', 'PF0_X', '', ''); +insert into PSlot values ('PS.base.ta5', 'PF0_X', '', ''); +insert into PSlot values ('PS.base.ta6', 'PF0_X', '', ''); +insert into PSlot values ('PS.base.tb1', 'PF0_X', '', ''); +insert into PSlot values ('PS.base.tb2', 'PF0_X', '', ''); +insert into PSlot values ('PS.base.tb3', 'PF0_X', '', ''); +insert into PSlot values ('PS.base.tb4', 'PF0_X', '', ''); +insert into PSlot values ('PS.base.tb5', 'PF0_X', '', ''); +insert into PSlot values ('PS.base.tb6', 'PF0_X', '', ''); +insert into PField values ('PF1_1', 'Wallslots first floor'); +insert into PSlot values ('PS.first.a1', 'PF1_1', '', 'WS.101.1a'); +insert into PSlot values ('PS.first.a2', 'PF1_1', '', 'WS.101.1b'); +insert into PSlot values ('PS.first.a3', 'PF1_1', '', 'WS.101.2a'); +insert into PSlot values ('PS.first.a4', 'PF1_1', '', 'WS.101.2b'); +insert into PSlot values ('PS.first.a5', 'PF1_1', '', 'WS.101.3a'); +insert into PSlot values ('PS.first.a6', 'PF1_1', '', 'WS.101.3b'); +insert into PSlot values ('PS.first.b1', 'PF1_1', '', 'WS.102.1a'); +insert into PSlot values ('PS.first.b2', 'PF1_1', '', 'WS.102.1b'); +insert into PSlot values ('PS.first.b3', 'PF1_1', '', 'WS.102.2a'); +insert into PSlot values ('PS.first.b4', 'PF1_1', '', 'WS.102.2b'); +insert into PSlot values ('PS.first.b5', 'PF1_1', '', 'WS.102.3a'); +insert into PSlot values ('PS.first.b6', 'PF1_1', '', 'WS.102.3b'); +insert into PSlot values ('PS.first.c1', 'PF1_1', '', 'WS.105.1a'); +insert into PSlot values ('PS.first.c2', 'PF1_1', '', 'WS.105.1b'); +insert into PSlot values ('PS.first.c3', 'PF1_1', '', 'WS.105.2a'); +insert into PSlot values ('PS.first.c4', 'PF1_1', '', 'WS.105.2b'); +insert into PSlot values ('PS.first.c5', 'PF1_1', '', 'WS.105.3a'); +insert into PSlot values ('PS.first.c6', 'PF1_1', '', 'WS.105.3b'); +insert into PSlot values ('PS.first.d1', 'PF1_1', '', 'WS.106.1a'); +insert into PSlot values ('PS.first.d2', 'PF1_1', '', 'WS.106.1b'); +insert into PSlot values ('PS.first.d3', 'PF1_1', '', 'WS.106.2a'); +insert into PSlot values ('PS.first.d4', 'PF1_1', '', 'WS.106.2b'); +insert into PSlot values ('PS.first.d5', 'PF1_1', '', 'WS.106.3a'); +insert into PSlot values ('PS.first.d6', 'PF1_1', '', 'WS.106.3b'); +-- +-- Now we wire the wall connectors 1a-2a in room 001 to the +-- patchfield. In the second update we make an error, and +-- correct it after +-- +update PSlot set backlink = 'WS.001.1a' where slotname = 'PS.base.a1'; +update PSlot set backlink = 'WS.001.1b' where slotname = 'PS.base.a3'; +select * from WSlot where roomno = '001' order by slotname; + slotname | roomno | slotlink | backlink +----------------------+----------+----------------------+---------------------- + WS.001.1a | 001 | | PS.base.a1 + WS.001.1b | 001 | | PS.base.a3 + WS.001.2a | 001 | | + WS.001.2b | 001 | | + WS.001.3a | 001 | | + WS.001.3b | 001 | | +(6 rows) + +select * from PSlot where slotname ~ 'PS.base.a' order by slotname; + slotname | pfname | slotlink | backlink +----------------------+--------+----------------------+---------------------- + PS.base.a1 | PF0_1 | | WS.001.1a + PS.base.a2 | PF0_1 | | + PS.base.a3 | PF0_1 | | WS.001.1b + PS.base.a4 | PF0_1 | | + PS.base.a5 | PF0_1 | | + PS.base.a6 | PF0_1 | | +(6 rows) + +update PSlot set backlink = 'WS.001.2a' where slotname = 'PS.base.a3'; +select * from WSlot where roomno = '001' order by slotname; + slotname | roomno | slotlink | backlink +----------------------+----------+----------------------+---------------------- + WS.001.1a | 001 | | PS.base.a1 + WS.001.1b | 001 | | + WS.001.2a | 001 | | PS.base.a3 + WS.001.2b | 001 | | + WS.001.3a | 001 | | + WS.001.3b | 001 | | +(6 rows) + +select * from PSlot where slotname ~ 'PS.base.a' order by slotname; + slotname | pfname | slotlink | backlink +----------------------+--------+----------------------+---------------------- + PS.base.a1 | PF0_1 | | WS.001.1a + PS.base.a2 | PF0_1 | | + PS.base.a3 | PF0_1 | | WS.001.2a + PS.base.a4 | PF0_1 | | + PS.base.a5 | PF0_1 | | + PS.base.a6 | PF0_1 | | +(6 rows) + +update PSlot set backlink = 'WS.001.1b' where slotname = 'PS.base.a2'; +select * from WSlot where roomno = '001' order by slotname; + slotname | roomno | slotlink | backlink +----------------------+----------+----------------------+---------------------- + WS.001.1a | 001 | | PS.base.a1 + WS.001.1b | 001 | | PS.base.a2 + WS.001.2a | 001 | | PS.base.a3 + WS.001.2b | 001 | | + WS.001.3a | 001 | | + WS.001.3b | 001 | | +(6 rows) + +select * from PSlot where slotname ~ 'PS.base.a' order by slotname; + slotname | pfname | slotlink | backlink +----------------------+--------+----------------------+---------------------- + PS.base.a1 | PF0_1 | | WS.001.1a + PS.base.a2 | PF0_1 | | WS.001.1b + PS.base.a3 | PF0_1 | | WS.001.2a + PS.base.a4 | PF0_1 | | + PS.base.a5 | PF0_1 | | + PS.base.a6 | PF0_1 | | +(6 rows) + +-- +-- Same procedure for 2b-3b but this time updating the WSlot instead +-- of the PSlot. Due to the triggers the result is the same: +-- WSlot and corresponding PSlot point to each other. +-- +update WSlot set backlink = 'PS.base.a4' where slotname = 'WS.001.2b'; +update WSlot set backlink = 'PS.base.a6' where slotname = 'WS.001.3a'; +select * from WSlot where roomno = '001' order by slotname; + slotname | roomno | slotlink | backlink +----------------------+----------+----------------------+---------------------- + WS.001.1a | 001 | | PS.base.a1 + WS.001.1b | 001 | | PS.base.a2 + WS.001.2a | 001 | | PS.base.a3 + WS.001.2b | 001 | | PS.base.a4 + WS.001.3a | 001 | | PS.base.a6 + WS.001.3b | 001 | | +(6 rows) + +select * from PSlot where slotname ~ 'PS.base.a' order by slotname; + slotname | pfname | slotlink | backlink +----------------------+--------+----------------------+---------------------- + PS.base.a1 | PF0_1 | | WS.001.1a + PS.base.a2 | PF0_1 | | WS.001.1b + PS.base.a3 | PF0_1 | | WS.001.2a + PS.base.a4 | PF0_1 | | WS.001.2b + PS.base.a5 | PF0_1 | | + PS.base.a6 | PF0_1 | | WS.001.3a +(6 rows) + +update WSlot set backlink = 'PS.base.a6' where slotname = 'WS.001.3b'; +select * from WSlot where roomno = '001' order by slotname; + slotname | roomno | slotlink | backlink +----------------------+----------+----------------------+---------------------- + WS.001.1a | 001 | | PS.base.a1 + WS.001.1b | 001 | | PS.base.a2 + WS.001.2a | 001 | | PS.base.a3 + WS.001.2b | 001 | | PS.base.a4 + WS.001.3a | 001 | | + WS.001.3b | 001 | | PS.base.a6 +(6 rows) + +select * from PSlot where slotname ~ 'PS.base.a' order by slotname; + slotname | pfname | slotlink | backlink +----------------------+--------+----------------------+---------------------- + PS.base.a1 | PF0_1 | | WS.001.1a + PS.base.a2 | PF0_1 | | WS.001.1b + PS.base.a3 | PF0_1 | | WS.001.2a + PS.base.a4 | PF0_1 | | WS.001.2b + PS.base.a5 | PF0_1 | | + PS.base.a6 | PF0_1 | | WS.001.3b +(6 rows) + +update WSlot set backlink = 'PS.base.a5' where slotname = 'WS.001.3a'; +select * from WSlot where roomno = '001' order by slotname; + slotname | roomno | slotlink | backlink +----------------------+----------+----------------------+---------------------- + WS.001.1a | 001 | | PS.base.a1 + WS.001.1b | 001 | | PS.base.a2 + WS.001.2a | 001 | | PS.base.a3 + WS.001.2b | 001 | | PS.base.a4 + WS.001.3a | 001 | | PS.base.a5 + WS.001.3b | 001 | | PS.base.a6 +(6 rows) + +select * from PSlot where slotname ~ 'PS.base.a' order by slotname; + slotname | pfname | slotlink | backlink +----------------------+--------+----------------------+---------------------- + PS.base.a1 | PF0_1 | | WS.001.1a + PS.base.a2 | PF0_1 | | WS.001.1b + PS.base.a3 | PF0_1 | | WS.001.2a + PS.base.a4 | PF0_1 | | WS.001.2b + PS.base.a5 | PF0_1 | | WS.001.3a + PS.base.a6 | PF0_1 | | WS.001.3b +(6 rows) + +insert into PField values ('PF1_2', 'Phonelines first floor'); +insert into PSlot values ('PS.first.ta1', 'PF1_2', '', ''); +insert into PSlot values ('PS.first.ta2', 'PF1_2', '', ''); +insert into PSlot values ('PS.first.ta3', 'PF1_2', '', ''); +insert into PSlot values ('PS.first.ta4', 'PF1_2', '', ''); +insert into PSlot values ('PS.first.ta5', 'PF1_2', '', ''); +insert into PSlot values ('PS.first.ta6', 'PF1_2', '', ''); +insert into PSlot values ('PS.first.tb1', 'PF1_2', '', ''); +insert into PSlot values ('PS.first.tb2', 'PF1_2', '', ''); +insert into PSlot values ('PS.first.tb3', 'PF1_2', '', ''); +insert into PSlot values ('PS.first.tb4', 'PF1_2', '', ''); +insert into PSlot values ('PS.first.tb5', 'PF1_2', '', ''); +insert into PSlot values ('PS.first.tb6', 'PF1_2', '', ''); +-- +-- Fix the wrong name for patchfield PF0_2 +-- +update PField set name = 'PF0_2' where name = 'PF0_X'; +select * from PSlot order by slotname; + slotname | pfname | slotlink | backlink +----------------------+--------+----------------------+---------------------- + PS.base.a1 | PF0_1 | | WS.001.1a + PS.base.a2 | PF0_1 | | WS.001.1b + PS.base.a3 | PF0_1 | | WS.001.2a + PS.base.a4 | PF0_1 | | WS.001.2b + PS.base.a5 | PF0_1 | | WS.001.3a + PS.base.a6 | PF0_1 | | WS.001.3b + PS.base.b1 | PF0_1 | | WS.002.1a + PS.base.b2 | PF0_1 | | WS.002.1b + PS.base.b3 | PF0_1 | | WS.002.2a + PS.base.b4 | PF0_1 | | WS.002.2b + PS.base.b5 | PF0_1 | | WS.002.3a + PS.base.b6 | PF0_1 | | WS.002.3b + PS.base.c1 | PF0_1 | | WS.003.1a + PS.base.c2 | PF0_1 | | WS.003.1b + PS.base.c3 | PF0_1 | | WS.003.2a + PS.base.c4 | PF0_1 | | WS.003.2b + PS.base.c5 | PF0_1 | | WS.003.3a + PS.base.c6 | PF0_1 | | WS.003.3b + PS.base.ta1 | PF0_2 | | + PS.base.ta2 | PF0_2 | | + PS.base.ta3 | PF0_2 | | + PS.base.ta4 | PF0_2 | | + PS.base.ta5 | PF0_2 | | + PS.base.ta6 | PF0_2 | | + PS.base.tb1 | PF0_2 | | + PS.base.tb2 | PF0_2 | | + PS.base.tb3 | PF0_2 | | + PS.base.tb4 | PF0_2 | | + PS.base.tb5 | PF0_2 | | + PS.base.tb6 | PF0_2 | | + PS.first.a1 | PF1_1 | | WS.101.1a + PS.first.a2 | PF1_1 | | WS.101.1b + PS.first.a3 | PF1_1 | | WS.101.2a + PS.first.a4 | PF1_1 | | WS.101.2b + PS.first.a5 | PF1_1 | | WS.101.3a + PS.first.a6 | PF1_1 | | WS.101.3b + PS.first.b1 | PF1_1 | | WS.102.1a + PS.first.b2 | PF1_1 | | WS.102.1b + PS.first.b3 | PF1_1 | | WS.102.2a + PS.first.b4 | PF1_1 | | WS.102.2b + PS.first.b5 | PF1_1 | | WS.102.3a + PS.first.b6 | PF1_1 | | WS.102.3b + PS.first.c1 | PF1_1 | | WS.105.1a + PS.first.c2 | PF1_1 | | WS.105.1b + PS.first.c3 | PF1_1 | | WS.105.2a + PS.first.c4 | PF1_1 | | WS.105.2b + PS.first.c5 | PF1_1 | | WS.105.3a + PS.first.c6 | PF1_1 | | WS.105.3b + PS.first.d1 | PF1_1 | | WS.106.1a + PS.first.d2 | PF1_1 | | WS.106.1b + PS.first.d3 | PF1_1 | | WS.106.2a + PS.first.d4 | PF1_1 | | WS.106.2b + PS.first.d5 | PF1_1 | | WS.106.3a + PS.first.d6 | PF1_1 | | WS.106.3b + PS.first.ta1 | PF1_2 | | + PS.first.ta2 | PF1_2 | | + PS.first.ta3 | PF1_2 | | + PS.first.ta4 | PF1_2 | | + PS.first.ta5 | PF1_2 | | + PS.first.ta6 | PF1_2 | | + PS.first.tb1 | PF1_2 | | + PS.first.tb2 | PF1_2 | | + PS.first.tb3 | PF1_2 | | + PS.first.tb4 | PF1_2 | | + PS.first.tb5 | PF1_2 | | + PS.first.tb6 | PF1_2 | | +(66 rows) + +select * from WSlot order by slotname; + slotname | roomno | slotlink | backlink +----------------------+----------+----------------------+---------------------- + WS.001.1a | 001 | | PS.base.a1 + WS.001.1b | 001 | | PS.base.a2 + WS.001.2a | 001 | | PS.base.a3 + WS.001.2b | 001 | | PS.base.a4 + WS.001.3a | 001 | | PS.base.a5 + WS.001.3b | 001 | | PS.base.a6 + WS.002.1a | 002 | | PS.base.b1 + WS.002.1b | 002 | | PS.base.b2 + WS.002.2a | 002 | | PS.base.b3 + WS.002.2b | 002 | | PS.base.b4 + WS.002.3a | 002 | | PS.base.b5 + WS.002.3b | 002 | | PS.base.b6 + WS.003.1a | 003 | | PS.base.c1 + WS.003.1b | 003 | | PS.base.c2 + WS.003.2a | 003 | | PS.base.c3 + WS.003.2b | 003 | | PS.base.c4 + WS.003.3a | 003 | | PS.base.c5 + WS.003.3b | 003 | | PS.base.c6 + WS.101.1a | 101 | | PS.first.a1 + WS.101.1b | 101 | | PS.first.a2 + WS.101.2a | 101 | | PS.first.a3 + WS.101.2b | 101 | | PS.first.a4 + WS.101.3a | 101 | | PS.first.a5 + WS.101.3b | 101 | | PS.first.a6 + WS.102.1a | 102 | | PS.first.b1 + WS.102.1b | 102 | | PS.first.b2 + WS.102.2a | 102 | | PS.first.b3 + WS.102.2b | 102 | | PS.first.b4 + WS.102.3a | 102 | | PS.first.b5 + WS.102.3b | 102 | | PS.first.b6 + WS.105.1a | 105 | | PS.first.c1 + WS.105.1b | 105 | | PS.first.c2 + WS.105.2a | 105 | | PS.first.c3 + WS.105.2b | 105 | | PS.first.c4 + WS.105.3a | 105 | | PS.first.c5 + WS.105.3b | 105 | | PS.first.c6 + WS.106.1a | 106 | | PS.first.d1 + WS.106.1b | 106 | | PS.first.d2 + WS.106.2a | 106 | | PS.first.d3 + WS.106.2b | 106 | | PS.first.d4 + WS.106.3a | 106 | | PS.first.d5 + WS.106.3b | 106 | | PS.first.d6 +(42 rows) + +-- +-- Install the central phone system and create the phone numbers. +-- They are wired on insert to the patchfields. Again the +-- triggers automatically tell the PSlots to update their +-- backlink field. +-- +insert into PLine values ('PL.001', '-0', 'Central call', 'PS.base.ta1'); +insert into PLine values ('PL.002', '-101', '', 'PS.base.ta2'); +insert into PLine values ('PL.003', '-102', '', 'PS.base.ta3'); +insert into PLine values ('PL.004', '-103', '', 'PS.base.ta5'); +insert into PLine values ('PL.005', '-104', '', 'PS.base.ta6'); +insert into PLine values ('PL.006', '-106', '', 'PS.base.tb2'); +insert into PLine values ('PL.007', '-108', '', 'PS.base.tb3'); +insert into PLine values ('PL.008', '-109', '', 'PS.base.tb4'); +insert into PLine values ('PL.009', '-121', '', 'PS.base.tb5'); +insert into PLine values ('PL.010', '-122', '', 'PS.base.tb6'); +insert into PLine values ('PL.015', '-134', '', 'PS.first.ta1'); +insert into PLine values ('PL.016', '-137', '', 'PS.first.ta3'); +insert into PLine values ('PL.017', '-139', '', 'PS.first.ta4'); +insert into PLine values ('PL.018', '-362', '', 'PS.first.tb1'); +insert into PLine values ('PL.019', '-363', '', 'PS.first.tb2'); +insert into PLine values ('PL.020', '-364', '', 'PS.first.tb3'); +insert into PLine values ('PL.021', '-365', '', 'PS.first.tb5'); +insert into PLine values ('PL.022', '-367', '', 'PS.first.tb6'); +insert into PLine values ('PL.028', '-501', 'Fax entrance', 'PS.base.ta2'); +insert into PLine values ('PL.029', '-502', 'Fax first floor', 'PS.first.ta1'); +-- +-- Buy some phones, plug them into the wall and patch the +-- phone lines to the corresponding patchfield slots. +-- +insert into PHone values ('PH.hc001', 'Hicom standard', 'WS.001.1a'); +update PSlot set slotlink = 'PS.base.ta1' where slotname = 'PS.base.a1'; +insert into PHone values ('PH.hc002', 'Hicom standard', 'WS.002.1a'); +update PSlot set slotlink = 'PS.base.ta5' where slotname = 'PS.base.b1'; +insert into PHone values ('PH.hc003', 'Hicom standard', 'WS.002.2a'); +update PSlot set slotlink = 'PS.base.tb2' where slotname = 'PS.base.b3'; +insert into PHone values ('PH.fax001', 'Canon fax', 'WS.001.2a'); +update PSlot set slotlink = 'PS.base.ta2' where slotname = 'PS.base.a3'; +-- +-- Install a hub at one of the patchfields, plug a computers +-- ethernet interface into the wall and patch it to the hub. +-- +insert into Hub values ('base.hub1', 'Patchfield PF0_1 hub', 16); +insert into System values ('orion', 'PC'); +insert into IFace values ('IF', 'orion', 'eth0', 'WS.002.1b'); +update PSlot set slotlink = 'HS.base.hub1.1' where slotname = 'PS.base.b2'; +-- +-- Now we take a look at the patchfield +-- +select * from PField_v1 where pfname = 'PF0_1' order by slotname; + pfname | slotname | backside | patch +--------+----------------------+----------------------------------------------------------+----------------------------------------------- + PF0_1 | PS.base.a1 | WS.001.1a in room 001 -> Phone PH.hc001 (Hicom standard) | PS.base.ta1 -> Phone line -0 (Central call) + PF0_1 | PS.base.a2 | WS.001.1b in room 001 -> - | - + PF0_1 | PS.base.a3 | WS.001.2a in room 001 -> Phone PH.fax001 (Canon fax) | PS.base.ta2 -> Phone line -501 (Fax entrance) + PF0_1 | PS.base.a4 | WS.001.2b in room 001 -> - | - + PF0_1 | PS.base.a5 | WS.001.3a in room 001 -> - | - + PF0_1 | PS.base.a6 | WS.001.3b in room 001 -> - | - + PF0_1 | PS.base.b1 | WS.002.1a in room 002 -> Phone PH.hc002 (Hicom standard) | PS.base.ta5 -> Phone line -103 + PF0_1 | PS.base.b2 | WS.002.1b in room 002 -> orion IF eth0 (PC) | Patchfield PF0_1 hub slot 1 + PF0_1 | PS.base.b3 | WS.002.2a in room 002 -> Phone PH.hc003 (Hicom standard) | PS.base.tb2 -> Phone line -106 + PF0_1 | PS.base.b4 | WS.002.2b in room 002 -> - | - + PF0_1 | PS.base.b5 | WS.002.3a in room 002 -> - | - + PF0_1 | PS.base.b6 | WS.002.3b in room 002 -> - | - + PF0_1 | PS.base.c1 | WS.003.1a in room 003 -> - | - + PF0_1 | PS.base.c2 | WS.003.1b in room 003 -> - | - + PF0_1 | PS.base.c3 | WS.003.2a in room 003 -> - | - + PF0_1 | PS.base.c4 | WS.003.2b in room 003 -> - | - + PF0_1 | PS.base.c5 | WS.003.3a in room 003 -> - | - + PF0_1 | PS.base.c6 | WS.003.3b in room 003 -> - | - +(18 rows) + +select * from PField_v1 where pfname = 'PF0_2' order by slotname; + pfname | slotname | backside | patch +--------+----------------------+--------------------------------+------------------------------------------------------------------------ + PF0_2 | PS.base.ta1 | Phone line -0 (Central call) | PS.base.a1 -> WS.001.1a in room 001 -> Phone PH.hc001 (Hicom standard) + PF0_2 | PS.base.ta2 | Phone line -501 (Fax entrance) | PS.base.a3 -> WS.001.2a in room 001 -> Phone PH.fax001 (Canon fax) + PF0_2 | PS.base.ta3 | Phone line -102 | - + PF0_2 | PS.base.ta4 | - | - + PF0_2 | PS.base.ta5 | Phone line -103 | PS.base.b1 -> WS.002.1a in room 002 -> Phone PH.hc002 (Hicom standard) + PF0_2 | PS.base.ta6 | Phone line -104 | - + PF0_2 | PS.base.tb1 | - | - + PF0_2 | PS.base.tb2 | Phone line -106 | PS.base.b3 -> WS.002.2a in room 002 -> Phone PH.hc003 (Hicom standard) + PF0_2 | PS.base.tb3 | Phone line -108 | - + PF0_2 | PS.base.tb4 | Phone line -109 | - + PF0_2 | PS.base.tb5 | Phone line -121 | - + PF0_2 | PS.base.tb6 | Phone line -122 | - +(12 rows) + +-- +-- Finally we want errors +-- +insert into PField values ('PF1_1', 'should fail due to unique index'); +ERROR: duplicate key value violates unique constraint "pfield_name" +DETAIL: Key (name)=(PF1_1) already exists. +update PSlot set backlink = 'WS.not.there' where slotname = 'PS.base.a1'; +ERROR: WS.not.there does not exist +CONTEXT: PL/pgSQL function tg_backlink_set(character,character) line 30 at RAISE +PL/pgSQL function tg_backlink_a() line 17 at assignment +update PSlot set backlink = 'XX.illegal' where slotname = 'PS.base.a1'; +ERROR: illegal backlink beginning with XX +CONTEXT: PL/pgSQL function tg_backlink_set(character,character) line 47 at RAISE +PL/pgSQL function tg_backlink_a() line 17 at assignment +update PSlot set slotlink = 'PS.not.there' where slotname = 'PS.base.a1'; +ERROR: PS.not.there does not exist +CONTEXT: PL/pgSQL function tg_slotlink_set(character,character) line 30 at RAISE +PL/pgSQL function tg_slotlink_a() line 17 at assignment +update PSlot set slotlink = 'XX.illegal' where slotname = 'PS.base.a1'; +ERROR: illegal slotlink beginning with XX +CONTEXT: PL/pgSQL function tg_slotlink_set(character,character) line 77 at RAISE +PL/pgSQL function tg_slotlink_a() line 17 at assignment +insert into HSlot values ('HS', 'base.hub1', 1, ''); +ERROR: duplicate key value violates unique constraint "hslot_name" +DETAIL: Key (slotname)=(HS.base.hub1.1 ) already exists. +insert into HSlot values ('HS', 'base.hub1', 20, ''); +ERROR: no manual manipulation of HSlot +CONTEXT: PL/pgSQL function tg_hslot_biu() line 12 at RAISE +delete from HSlot; +ERROR: no manual manipulation of HSlot +CONTEXT: PL/pgSQL function tg_hslot_bd() line 12 at RAISE +insert into IFace values ('IF', 'notthere', 'eth0', ''); +ERROR: system "notthere" does not exist +CONTEXT: PL/pgSQL function tg_iface_biu() line 8 at RAISE +insert into IFace values ('IF', 'orion', 'ethernet_interface_name_too_long', ''); +ERROR: IFace slotname "IF.orion.ethernet_interface_name_too_long" too long (20 char max) +CONTEXT: PL/pgSQL function tg_iface_biu() line 14 at RAISE +-- +-- The following tests are unrelated to the scenario outlined above; +-- they merely exercise specific parts of PL/pgSQL +-- +-- +-- Test recursion, per bug report 7-Sep-01 +-- +CREATE FUNCTION recursion_test(int,int) RETURNS text AS ' +DECLARE rslt text; +BEGIN + IF $1 <= 0 THEN + rslt = CAST($2 AS TEXT); + ELSE + rslt = CAST($1 AS TEXT) || '','' || recursion_test($1 - 1, $2); + END IF; + RETURN rslt; +END;' LANGUAGE plpgsql; +SELECT recursion_test(4,3); + recursion_test +---------------- + 4,3,2,1,3 +(1 row) + +-- +-- Test the FOUND magic variable +-- +CREATE TABLE found_test_tbl (a int); +create function test_found() + returns boolean as ' + declare + begin + insert into found_test_tbl values (1); + if FOUND then + insert into found_test_tbl values (2); + end if; + + update found_test_tbl set a = 100 where a = 1; + if FOUND then + insert into found_test_tbl values (3); + end if; + + delete from found_test_tbl where a = 9999; -- matches no rows + if not FOUND then + insert into found_test_tbl values (4); + end if; + + for i in 1 .. 10 loop + -- no need to do anything + end loop; + if FOUND then + insert into found_test_tbl values (5); + end if; + + -- never executes the loop + for i in 2 .. 1 loop + -- no need to do anything + end loop; + if not FOUND then + insert into found_test_tbl values (6); + end if; + return true; + end;' language plpgsql; +select test_found(); + test_found +------------ + t +(1 row) + +select * from found_test_tbl; + a +----- + 2 + 100 + 3 + 4 + 5 + 6 +(6 rows) + +-- +-- Test set-returning functions for PL/pgSQL +-- +create function test_table_func_rec() returns setof found_test_tbl as ' +DECLARE + rec RECORD; +BEGIN + FOR rec IN select * from found_test_tbl LOOP + RETURN NEXT rec; + END LOOP; + RETURN; +END;' language plpgsql; +select * from test_table_func_rec(); + a +----- + 2 + 100 + 3 + 4 + 5 + 6 +(6 rows) + +create function test_table_func_row() returns setof found_test_tbl as ' +DECLARE + row found_test_tbl%ROWTYPE; +BEGIN + FOR row IN select * from found_test_tbl LOOP + RETURN NEXT row; + END LOOP; + RETURN; +END;' language plpgsql; +select * from test_table_func_row(); + a +----- + 2 + 100 + 3 + 4 + 5 + 6 +(6 rows) + +create function test_ret_set_scalar(int,int) returns setof int as ' +DECLARE + i int; +BEGIN + FOR i IN $1 .. $2 LOOP + RETURN NEXT i + 1; + END LOOP; + RETURN; +END;' language plpgsql; +select * from test_ret_set_scalar(1,10); + test_ret_set_scalar +--------------------- + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 + 10 + 11 +(10 rows) + +create function test_ret_set_rec_dyn(int) returns setof record as ' +DECLARE + retval RECORD; +BEGIN + IF $1 > 10 THEN + SELECT INTO retval 5, 10, 15; + RETURN NEXT retval; + RETURN NEXT retval; + ELSE + SELECT INTO retval 50, 5::numeric, ''xxx''::text; + RETURN NEXT retval; + RETURN NEXT retval; + END IF; + RETURN; +END;' language plpgsql; +SELECT * FROM test_ret_set_rec_dyn(1500) AS (a int, b int, c int); + a | b | c +---+----+---- + 5 | 10 | 15 + 5 | 10 | 15 +(2 rows) + +SELECT * FROM test_ret_set_rec_dyn(5) AS (a int, b numeric, c text); + a | b | c +----+---+----- + 50 | 5 | xxx + 50 | 5 | xxx +(2 rows) + +create function test_ret_rec_dyn(int) returns record as ' +DECLARE + retval RECORD; +BEGIN + IF $1 > 10 THEN + SELECT INTO retval 5, 10, 15; + RETURN retval; + ELSE + SELECT INTO retval 50, 5::numeric, ''xxx''::text; + RETURN retval; + END IF; +END;' language plpgsql; +SELECT * FROM test_ret_rec_dyn(1500) AS (a int, b int, c int); + a | b | c +---+----+---- + 5 | 10 | 15 +(1 row) + +SELECT * FROM test_ret_rec_dyn(5) AS (a int, b numeric, c text); + a | b | c +----+---+----- + 50 | 5 | xxx +(1 row) + +-- +-- Test some simple polymorphism cases. +-- +create function f1(x anyelement) returns anyelement as $$ +begin + return x + 1; +end$$ language plpgsql; +select f1(42) as int, f1(4.5) as num; + int | num +-----+----- + 43 | 5.5 +(1 row) + +select f1(point(3,4)); -- fail for lack of + operator +ERROR: operator does not exist: point + integer +LINE 1: x + 1 + ^ +HINT: No operator matches the given name and argument types. You might need to add explicit type casts. +QUERY: x + 1 +CONTEXT: PL/pgSQL function f1(anyelement) line 3 at RETURN +drop function f1(x anyelement); +create function f1(x anyelement) returns anyarray as $$ +begin + return array[x + 1, x + 2]; +end$$ language plpgsql; +select f1(42) as int, f1(4.5) as num; + int | num +---------+----------- + {43,44} | {5.5,6.5} +(1 row) + +drop function f1(x anyelement); +create function f1(x anyarray) returns anyelement as $$ +begin + return x[1]; +end$$ language plpgsql; +select f1(array[2,4]) as int, f1(array[4.5, 7.7]) as num; + int | num +-----+----- + 2 | 4.5 +(1 row) + +select f1(stavalues1) from pg_statistic; -- fail, can't infer element type +ERROR: cannot determine element type of "anyarray" argument +drop function f1(x anyarray); +create function f1(x anyarray) returns anyarray as $$ +begin + return x; +end$$ language plpgsql; +select f1(array[2,4]) as int, f1(array[4.5, 7.7]) as num; + int | num +-------+----------- + {2,4} | {4.5,7.7} +(1 row) + +select f1(stavalues1) from pg_statistic; -- fail, can't infer element type +ERROR: PL/pgSQL functions cannot accept type anyarray +CONTEXT: compilation of PL/pgSQL function "f1" near line 1 +drop function f1(x anyarray); +-- fail, can't infer type: +create function f1(x anyelement) returns anyrange as $$ +begin + return array[x + 1, x + 2]; +end$$ language plpgsql; +ERROR: cannot determine result data type +DETAIL: A result of type anyrange requires at least one input of type anyrange or anymultirange. +create function f1(x anyrange) returns anyarray as $$ +begin + return array[lower(x), upper(x)]; +end$$ language plpgsql; +select f1(int4range(42, 49)) as int, f1(float8range(4.5, 7.8)) as num; + int | num +---------+----------- + {42,49} | {4.5,7.8} +(1 row) + +drop function f1(x anyrange); +create function f1(x anycompatible, y anycompatible) returns anycompatiblearray as $$ +begin + return array[x, y]; +end$$ language plpgsql; +select f1(2, 4) as int, f1(2, 4.5) as num; + int | num +-------+--------- + {2,4} | {2,4.5} +(1 row) + +drop function f1(x anycompatible, y anycompatible); +create function f1(x anycompatiblerange, y anycompatible, z anycompatible) returns anycompatiblearray as $$ +begin + return array[lower(x), upper(x), y, z]; +end$$ language plpgsql; +select f1(int4range(42, 49), 11, 2::smallint) as int, f1(float8range(4.5, 7.8), 7.8, 11::real) as num; + int | num +--------------+------------------ + {42,49,11,2} | {4.5,7.8,7.8,11} +(1 row) + +select f1(int4range(42, 49), 11, 4.5) as fail; -- range type doesn't fit +ERROR: function f1(int4range, integer, numeric) does not exist +LINE 1: select f1(int4range(42, 49), 11, 4.5) as fail; + ^ +HINT: No function matches the given name and argument types. You might need to add explicit type casts. +drop function f1(x anycompatiblerange, y anycompatible, z anycompatible); +-- fail, can't infer type: +create function f1(x anycompatible) returns anycompatiblerange as $$ +begin + return array[x + 1, x + 2]; +end$$ language plpgsql; +ERROR: cannot determine result data type +DETAIL: A result of type anycompatiblerange requires at least one input of type anycompatiblerange or anycompatiblemultirange. +create function f1(x anycompatiblerange, y anycompatiblearray) returns anycompatiblerange as $$ +begin + return x; +end$$ language plpgsql; +select f1(int4range(42, 49), array[11]) as int, f1(float8range(4.5, 7.8), array[7]) as num; + int | num +---------+----------- + [42,49) | [4.5,7.8) +(1 row) + +drop function f1(x anycompatiblerange, y anycompatiblearray); +create function f1(a anyelement, b anyarray, + c anycompatible, d anycompatible, + OUT x anyarray, OUT y anycompatiblearray) +as $$ +begin + x := a || b; + y := array[c, d]; +end$$ language plpgsql; +select x, pg_typeof(x), y, pg_typeof(y) + from f1(11, array[1, 2], 42, 34.5); + x | pg_typeof | y | pg_typeof +----------+-----------+-----------+----------- + {11,1,2} | integer[] | {42,34.5} | numeric[] +(1 row) + +select x, pg_typeof(x), y, pg_typeof(y) + from f1(11, array[1, 2], point(1,2), point(3,4)); + x | pg_typeof | y | pg_typeof +----------+-----------+-------------------+----------- + {11,1,2} | integer[] | {"(1,2)","(3,4)"} | point[] +(1 row) + +select x, pg_typeof(x), y, pg_typeof(y) + from f1(11, '{1,2}', point(1,2), '(3,4)'); + x | pg_typeof | y | pg_typeof +----------+-----------+-------------------+----------- + {11,1,2} | integer[] | {"(1,2)","(3,4)"} | point[] +(1 row) + +select x, pg_typeof(x), y, pg_typeof(y) + from f1(11, array[1, 2.2], 42, 34.5); -- fail +ERROR: function f1(integer, numeric[], integer, numeric) does not exist +LINE 2: from f1(11, array[1, 2.2], 42, 34.5); + ^ +HINT: No function matches the given name and argument types. You might need to add explicit type casts. +drop function f1(a anyelement, b anyarray, + c anycompatible, d anycompatible); +-- +-- Test handling of OUT parameters, including polymorphic cases. +-- Note that RETURN is optional with OUT params; we try both ways. +-- +-- wrong way to do it: +create function f1(in i int, out j int) returns int as $$ +begin + return i+1; +end$$ language plpgsql; +ERROR: RETURN cannot have a parameter in function with OUT parameters +LINE 3: return i+1; + ^ +create function f1(in i int, out j int) as $$ +begin + j := i+1; + return; +end$$ language plpgsql; +select f1(42); + f1 +---- + 43 +(1 row) + +select * from f1(42); + j +---- + 43 +(1 row) + +create or replace function f1(inout i int) as $$ +begin + i := i+1; +end$$ language plpgsql; +select f1(42); + f1 +---- + 43 +(1 row) + +select * from f1(42); + i +---- + 43 +(1 row) + +drop function f1(int); +create function f1(in i int, out j int) returns setof int as $$ +begin + j := i+1; + return next; + j := i+2; + return next; + return; +end$$ language plpgsql; +select * from f1(42); + j +---- + 43 + 44 +(2 rows) + +drop function f1(int); +create function f1(in i int, out j int, out k text) as $$ +begin + j := i; + j := j+1; + k := 'foo'; +end$$ language plpgsql; +select f1(42); + f1 +---------- + (43,foo) +(1 row) + +select * from f1(42); + j | k +----+----- + 43 | foo +(1 row) + +drop function f1(int); +create function f1(in i int, out j int, out k text) returns setof record as $$ +begin + j := i+1; + k := 'foo'; + return next; + j := j+1; + k := 'foot'; + return next; +end$$ language plpgsql; +select * from f1(42); + j | k +----+------ + 43 | foo + 44 | foot +(2 rows) + +drop function f1(int); +create function duplic(in i anyelement, out j anyelement, out k anyarray) as $$ +begin + j := i; + k := array[j,j]; + return; +end$$ language plpgsql; +select * from duplic(42); + j | k +----+--------- + 42 | {42,42} +(1 row) + +select * from duplic('foo'::text); + j | k +-----+----------- + foo | {foo,foo} +(1 row) + +drop function duplic(anyelement); +create function duplic(in i anycompatiblerange, out j anycompatible, out k anycompatiblearray) as $$ +begin + j := lower(i); + k := array[lower(i),upper(i)]; + return; +end$$ language plpgsql; +select * from duplic(int4range(42,49)); + j | k +----+--------- + 42 | {42,49} +(1 row) + +select * from duplic(textrange('aaa', 'bbb')); + j | k +-----+----------- + aaa | {aaa,bbb} +(1 row) + +drop function duplic(anycompatiblerange); +-- +-- test PERFORM +-- +create table perform_test ( + a INT, + b INT +); +create function perform_simple_func(int) returns boolean as ' +BEGIN + IF $1 < 20 THEN + INSERT INTO perform_test VALUES ($1, $1 + 10); + RETURN TRUE; + ELSE + RETURN FALSE; + END IF; +END;' language plpgsql; +create function perform_test_func() returns void as ' +BEGIN + IF FOUND then + INSERT INTO perform_test VALUES (100, 100); + END IF; + + PERFORM perform_simple_func(5); + + IF FOUND then + INSERT INTO perform_test VALUES (100, 100); + END IF; + + PERFORM perform_simple_func(50); + + IF FOUND then + INSERT INTO perform_test VALUES (100, 100); + END IF; + + RETURN; +END;' language plpgsql; +SELECT perform_test_func(); + perform_test_func +------------------- + +(1 row) + +SELECT * FROM perform_test; + a | b +-----+----- + 5 | 15 + 100 | 100 + 100 | 100 +(3 rows) + +drop table perform_test; +-- +-- Test proper snapshot handling in simple expressions +-- +create temp table users(login text, id serial); +create function sp_id_user(a_login text) returns int as $$ +declare x int; +begin + select into x id from users where login = a_login; + if found then return x; end if; + return 0; +end$$ language plpgsql stable; +insert into users values('user1'); +select sp_id_user('user1'); + sp_id_user +------------ + 1 +(1 row) + +select sp_id_user('userx'); + sp_id_user +------------ + 0 +(1 row) + +create function sp_add_user(a_login text) returns int as $$ +declare my_id_user int; +begin + my_id_user = sp_id_user( a_login ); + IF my_id_user > 0 THEN + RETURN -1; -- error code for existing user + END IF; + INSERT INTO users ( login ) VALUES ( a_login ); + my_id_user = sp_id_user( a_login ); + IF my_id_user = 0 THEN + RETURN -2; -- error code for insertion failure + END IF; + RETURN my_id_user; +end$$ language plpgsql; +select sp_add_user('user1'); + sp_add_user +------------- + -1 +(1 row) + +select sp_add_user('user2'); + sp_add_user +------------- + 2 +(1 row) + +select sp_add_user('user2'); + sp_add_user +------------- + -1 +(1 row) + +select sp_add_user('user3'); + sp_add_user +------------- + 3 +(1 row) + +select sp_add_user('user3'); + sp_add_user +------------- + -1 +(1 row) + +drop function sp_add_user(text); +drop function sp_id_user(text); +-- +-- tests for refcursors +-- +create table rc_test (a int, b int); +copy rc_test from stdin; +create function return_unnamed_refcursor() returns refcursor as $$ +declare + rc refcursor; +begin + open rc for select a from rc_test; + return rc; +end +$$ language plpgsql; +create function use_refcursor(rc refcursor) returns int as $$ +declare + rc refcursor; + x record; +begin + rc := return_unnamed_refcursor(); + fetch next from rc into x; + return x.a; +end +$$ language plpgsql; +select use_refcursor(return_unnamed_refcursor()); + use_refcursor +--------------- + 5 +(1 row) + +create function return_refcursor(rc refcursor) returns refcursor as $$ +begin + open rc for select a from rc_test; + return rc; +end +$$ language plpgsql; +create function refcursor_test1(refcursor) returns refcursor as $$ +begin + perform return_refcursor($1); + return $1; +end +$$ language plpgsql; +begin; +select refcursor_test1('test1'); + refcursor_test1 +----------------- + test1 +(1 row) + +fetch next in test1; + a +--- + 5 +(1 row) + +select refcursor_test1('test2'); + refcursor_test1 +----------------- + test2 +(1 row) + +fetch all from test2; + a +----- + 5 + 50 + 500 +(3 rows) + +commit; +-- should fail +fetch next from test1; +ERROR: cursor "test1" does not exist +create function refcursor_test2(int, int) returns boolean as $$ +declare + c1 cursor (param1 int, param2 int) for select * from rc_test where a > param1 and b > param2; + nonsense record; +begin + open c1($1, $2); + fetch c1 into nonsense; + close c1; + if found then + return true; + else + return false; + end if; +end +$$ language plpgsql; +select refcursor_test2(20000, 20000) as "Should be false", + refcursor_test2(20, 20) as "Should be true"; + Should be false | Should be true +-----------------+---------------- + f | t +(1 row) + +-- should fail +create function constant_refcursor() returns refcursor as $$ +declare + rc constant refcursor; +begin + open rc for select a from rc_test; + return rc; +end +$$ language plpgsql; +select constant_refcursor(); +ERROR: variable "rc" is declared CONSTANT +CONTEXT: PL/pgSQL function constant_refcursor() line 5 at OPEN +-- but it's okay like this +create or replace function constant_refcursor() returns refcursor as $$ +declare + rc constant refcursor := 'my_cursor_name'; +begin + open rc for select a from rc_test; + return rc; +end +$$ language plpgsql; +select constant_refcursor(); + constant_refcursor +-------------------- + my_cursor_name +(1 row) + +-- +-- tests for cursors with named parameter arguments +-- +create function namedparmcursor_test1(int, int) returns boolean as $$ +declare + c1 cursor (param1 int, param12 int) for select * from rc_test where a > param1 and b > param12; + nonsense record; +begin + open c1(param12 := $2, param1 := $1); + fetch c1 into nonsense; + close c1; + if found then + return true; + else + return false; + end if; +end +$$ language plpgsql; +select namedparmcursor_test1(20000, 20000) as "Should be false", + namedparmcursor_test1(20, 20) as "Should be true"; + Should be false | Should be true +-----------------+---------------- + f | t +(1 row) + +-- mixing named and positional argument notations +create function namedparmcursor_test2(int, int) returns boolean as $$ +declare + c1 cursor (param1 int, param2 int) for select * from rc_test where a > param1 and b > param2; + nonsense record; +begin + open c1(param1 := $1, $2); + fetch c1 into nonsense; + close c1; + if found then + return true; + else + return false; + end if; +end +$$ language plpgsql; +select namedparmcursor_test2(20, 20); + namedparmcursor_test2 +----------------------- + t +(1 row) + +-- mixing named and positional: param2 is given twice, once in named notation +-- and second time in positional notation. Should throw an error at parse time +create function namedparmcursor_test3() returns void as $$ +declare + c1 cursor (param1 int, param2 int) for select * from rc_test where a > param1 and b > param2; +begin + open c1(param2 := 20, 21); +end +$$ language plpgsql; +ERROR: value for parameter "param2" of cursor "c1" specified more than once +LINE 5: open c1(param2 := 20, 21); + ^ +-- mixing named and positional: same as previous test, but param1 is duplicated +create function namedparmcursor_test4() returns void as $$ +declare + c1 cursor (param1 int, param2 int) for select * from rc_test where a > param1 and b > param2; +begin + open c1(20, param1 := 21); +end +$$ language plpgsql; +ERROR: value for parameter "param1" of cursor "c1" specified more than once +LINE 5: open c1(20, param1 := 21); + ^ +-- duplicate named parameter, should throw an error at parse time +create function namedparmcursor_test5() returns void as $$ +declare + c1 cursor (p1 int, p2 int) for + select * from tenk1 where thousand = p1 and tenthous = p2; +begin + open c1 (p2 := 77, p2 := 42); +end +$$ language plpgsql; +ERROR: value for parameter "p2" of cursor "c1" specified more than once +LINE 6: open c1 (p2 := 77, p2 := 42); + ^ +-- not enough parameters, should throw an error at parse time +create function namedparmcursor_test6() returns void as $$ +declare + c1 cursor (p1 int, p2 int) for + select * from tenk1 where thousand = p1 and tenthous = p2; +begin + open c1 (p2 := 77); +end +$$ language plpgsql; +ERROR: not enough arguments for cursor "c1" +LINE 6: open c1 (p2 := 77); + ^ +-- division by zero runtime error, the context given in the error message +-- should be sensible +create function namedparmcursor_test7() returns void as $$ +declare + c1 cursor (p1 int, p2 int) for + select * from tenk1 where thousand = p1 and tenthous = p2; +begin + open c1 (p2 := 77, p1 := 42/0); +end $$ language plpgsql; +select namedparmcursor_test7(); +ERROR: division by zero +CONTEXT: SQL expression "42/0 AS p1, 77 AS p2" +PL/pgSQL function namedparmcursor_test7() line 6 at OPEN +-- check that line comments work correctly within the argument list (there +-- is some special handling of this case in the code: the newline after the +-- comment must be preserved when the argument-evaluating query is +-- constructed, otherwise the comment effectively comments out the next +-- argument, too) +create function namedparmcursor_test8() returns int4 as $$ +declare + c1 cursor (p1 int, p2 int) for + select count(*) from tenk1 where thousand = p1 and tenthous = p2; + n int4; +begin + open c1 (77 -- test + , 42); + fetch c1 into n; + return n; +end $$ language plpgsql; +select namedparmcursor_test8(); + namedparmcursor_test8 +----------------------- + 0 +(1 row) + +-- cursor parameter name can match plpgsql variable or unreserved keyword +create function namedparmcursor_test9(p1 int) returns int4 as $$ +declare + c1 cursor (p1 int, p2 int, debug int) for + select count(*) from tenk1 where thousand = p1 and tenthous = p2 + and four = debug; + p2 int4 := 1006; + n int4; +begin + open c1 (p1 := p1, p2 := p2, debug := 2); + fetch c1 into n; + return n; +end $$ language plpgsql; +select namedparmcursor_test9(6); + namedparmcursor_test9 +----------------------- + 1 +(1 row) + +-- +-- tests for "raise" processing +-- +create function raise_test1(int) returns int as $$ +begin + raise notice 'This message has too many parameters!', $1; + return $1; +end; +$$ language plpgsql; +ERROR: too many parameters specified for RAISE +CONTEXT: compilation of PL/pgSQL function "raise_test1" near line 3 +create function raise_test2(int) returns int as $$ +begin + raise notice 'This message has too few parameters: %, %, %', $1, $1; + return $1; +end; +$$ language plpgsql; +ERROR: too few parameters specified for RAISE +CONTEXT: compilation of PL/pgSQL function "raise_test2" near line 3 +create function raise_test3(int) returns int as $$ +begin + raise notice 'This message has no parameters (despite having %% signs in it)!'; + return $1; +end; +$$ language plpgsql; +select raise_test3(1); +NOTICE: This message has no parameters (despite having % signs in it)! + raise_test3 +------------- + 1 +(1 row) + +-- Test re-RAISE inside a nested exception block. This case is allowed +-- by Oracle's PL/SQL but was handled differently by PG before 9.1. +CREATE FUNCTION reraise_test() RETURNS void AS $$ +BEGIN + BEGIN + RAISE syntax_error; + EXCEPTION + WHEN syntax_error THEN + BEGIN + raise notice 'exception % thrown in inner block, reraising', sqlerrm; + RAISE; + EXCEPTION + WHEN OTHERS THEN + raise notice 'RIGHT - exception % caught in inner block', sqlerrm; + END; + END; +EXCEPTION + WHEN OTHERS THEN + raise notice 'WRONG - exception % caught in outer block', sqlerrm; +END; +$$ LANGUAGE plpgsql; +SELECT reraise_test(); +NOTICE: exception syntax_error thrown in inner block, reraising +NOTICE: RIGHT - exception syntax_error caught in inner block + reraise_test +-------------- + +(1 row) + +-- +-- reject function definitions that contain malformed SQL queries at +-- compile-time, where possible +-- +create function bad_sql1() returns int as $$ +declare a int; +begin + a := 5; + Johnny Yuma; + a := 10; + return a; +end$$ language plpgsql; +ERROR: syntax error at or near "Johnny" +LINE 5: Johnny Yuma; + ^ +create function bad_sql2() returns int as $$ +declare r record; +begin + for r in select I fought the law, the law won LOOP + raise notice 'in loop'; + end loop; + return 5; +end;$$ language plpgsql; +ERROR: syntax error at or near "the" +LINE 4: for r in select I fought the law, the law won LOOP + ^ +-- a RETURN expression is mandatory, except for void-returning +-- functions, where it is not allowed +create function missing_return_expr() returns int as $$ +begin + return ; +end;$$ language plpgsql; +ERROR: missing expression at or near ";" +LINE 3: return ; + ^ +create function void_return_expr() returns void as $$ +begin + return 5; +end;$$ language plpgsql; +ERROR: RETURN cannot have a parameter in function returning void +LINE 3: return 5; + ^ +-- VOID functions are allowed to omit RETURN +create function void_return_expr() returns void as $$ +begin + perform 2+2; +end;$$ language plpgsql; +select void_return_expr(); + void_return_expr +------------------ + +(1 row) + +-- but ordinary functions are not +create function missing_return_expr() returns int as $$ +begin + perform 2+2; +end;$$ language plpgsql; +select missing_return_expr(); +ERROR: control reached end of function without RETURN +CONTEXT: PL/pgSQL function missing_return_expr() +drop function void_return_expr(); +drop function missing_return_expr(); +-- +-- EXECUTE ... INTO test +-- +create table eifoo (i integer, y integer); +create type eitype as (i integer, y integer); +create or replace function execute_into_test(varchar) returns record as $$ +declare + _r record; + _rt eifoo%rowtype; + _v eitype; + i int; + j int; + k int; +begin + execute 'insert into '||$1||' values(10,15)'; + execute 'select (row).* from (select row(10,1)::eifoo) s' into _r; + raise notice '% %', _r.i, _r.y; + execute 'select * from '||$1||' limit 1' into _rt; + raise notice '% %', _rt.i, _rt.y; + execute 'select *, 20 from '||$1||' limit 1' into i, j, k; + raise notice '% % %', i, j, k; + execute 'select 1,2' into _v; + return _v; +end; $$ language plpgsql; +select execute_into_test('eifoo'); +NOTICE: 10 1 +NOTICE: 10 15 +NOTICE: 10 15 20 + execute_into_test +------------------- + (1,2) +(1 row) + +drop table eifoo cascade; +drop type eitype cascade; +-- +-- SQLSTATE and SQLERRM test +-- +create function excpt_test1() returns void as $$ +begin + raise notice '% %', sqlstate, sqlerrm; +end; $$ language plpgsql; +-- should fail: SQLSTATE and SQLERRM are only in defined EXCEPTION +-- blocks +select excpt_test1(); +ERROR: column "sqlstate" does not exist +LINE 1: sqlstate + ^ +QUERY: sqlstate +CONTEXT: PL/pgSQL function excpt_test1() line 3 at RAISE +create function excpt_test2() returns void as $$ +begin + begin + begin + raise notice '% %', sqlstate, sqlerrm; + end; + end; +end; $$ language plpgsql; +-- should fail +select excpt_test2(); +ERROR: column "sqlstate" does not exist +LINE 1: sqlstate + ^ +QUERY: sqlstate +CONTEXT: PL/pgSQL function excpt_test2() line 5 at RAISE +create function excpt_test3() returns void as $$ +begin + begin + raise exception 'user exception'; + exception when others then + raise notice 'caught exception % %', sqlstate, sqlerrm; + begin + raise notice '% %', sqlstate, sqlerrm; + perform 10/0; + exception + when substring_error then + -- this exception handler shouldn't be invoked + raise notice 'unexpected exception: % %', sqlstate, sqlerrm; + when division_by_zero then + raise notice 'caught exception % %', sqlstate, sqlerrm; + end; + raise notice '% %', sqlstate, sqlerrm; + end; +end; $$ language plpgsql; +select excpt_test3(); +NOTICE: caught exception P0001 user exception +NOTICE: P0001 user exception +NOTICE: caught exception 22012 division by zero +NOTICE: P0001 user exception + excpt_test3 +------------- + +(1 row) + +create function excpt_test4() returns text as $$ +begin + begin perform 1/0; + exception when others then return sqlerrm; end; +end; $$ language plpgsql; +select excpt_test4(); + excpt_test4 +------------------ + division by zero +(1 row) + +drop function excpt_test1(); +drop function excpt_test2(); +drop function excpt_test3(); +drop function excpt_test4(); +-- parameters of raise stmt can be expressions +create function raise_exprs() returns void as $$ +declare + a integer[] = '{10,20,30}'; + c varchar = 'xyz'; + i integer; +begin + i := 2; + raise notice '%; %; %; %; %; %', a, a[i], c, (select c || 'abc'), row(10,'aaa',NULL,30), NULL; +end;$$ language plpgsql; +select raise_exprs(); +NOTICE: {10,20,30}; 20; xyz; xyzabc; (10,aaa,,30); + raise_exprs +------------- + +(1 row) + +drop function raise_exprs(); +-- regression test: verify that multiple uses of same plpgsql datum within +-- a SQL command all get mapped to the same $n parameter. The return value +-- of the SELECT is not important, we only care that it doesn't fail with +-- a complaint about an ungrouped column reference. +create function multi_datum_use(p1 int) returns bool as $$ +declare + x int; + y int; +begin + select into x,y unique1/p1, unique1/$1 from tenk1 group by unique1/p1; + return x = y; +end$$ language plpgsql; +select multi_datum_use(42); + multi_datum_use +----------------- + t +(1 row) + +-- +-- Test STRICT limiter in both planned and EXECUTE invocations. +-- Note that a data-modifying query is quasi strict (disallow multi rows) +-- by default in the planned case, but not in EXECUTE. +-- +create temp table foo (f1 int, f2 int); +insert into foo values (1,2), (3,4); +create or replace function stricttest() returns void as $$ +declare x record; +begin + -- should work + insert into foo values(5,6) returning * into x; + raise notice 'x.f1 = %, x.f2 = %', x.f1, x.f2; +end$$ language plpgsql; +select stricttest(); +NOTICE: x.f1 = 5, x.f2 = 6 + stricttest +------------ + +(1 row) + +create or replace function stricttest() returns void as $$ +declare x record; +begin + -- should fail due to implicit strict + insert into foo values(7,8),(9,10) returning * into x; + raise notice 'x.f1 = %, x.f2 = %', x.f1, x.f2; +end$$ language plpgsql; +select stricttest(); +ERROR: query returned more than one row +HINT: Make sure the query returns a single row, or use LIMIT 1. +CONTEXT: PL/pgSQL function stricttest() line 5 at SQL statement +create or replace function stricttest() returns void as $$ +declare x record; +begin + -- should work + execute 'insert into foo values(5,6) returning *' into x; + raise notice 'x.f1 = %, x.f2 = %', x.f1, x.f2; +end$$ language plpgsql; +select stricttest(); +NOTICE: x.f1 = 5, x.f2 = 6 + stricttest +------------ + +(1 row) + +create or replace function stricttest() returns void as $$ +declare x record; +begin + -- this should work since EXECUTE isn't as picky + execute 'insert into foo values(7,8),(9,10) returning *' into x; + raise notice 'x.f1 = %, x.f2 = %', x.f1, x.f2; +end$$ language plpgsql; +select stricttest(); +NOTICE: x.f1 = 7, x.f2 = 8 + stricttest +------------ + +(1 row) + +select * from foo; + f1 | f2 +----+---- + 1 | 2 + 3 | 4 + 5 | 6 + 5 | 6 + 7 | 8 + 9 | 10 +(6 rows) + +create or replace function stricttest() returns void as $$ +declare x record; +begin + -- should work + select * from foo where f1 = 3 into strict x; + raise notice 'x.f1 = %, x.f2 = %', x.f1, x.f2; +end$$ language plpgsql; +select stricttest(); +NOTICE: x.f1 = 3, x.f2 = 4 + stricttest +------------ + +(1 row) + +create or replace function stricttest() returns void as $$ +declare x record; +begin + -- should fail, no rows + select * from foo where f1 = 0 into strict x; + raise notice 'x.f1 = %, x.f2 = %', x.f1, x.f2; +end$$ language plpgsql; +select stricttest(); +ERROR: query returned no rows +CONTEXT: PL/pgSQL function stricttest() line 5 at SQL statement +create or replace function stricttest() returns void as $$ +declare x record; +begin + -- should fail, too many rows + select * from foo where f1 > 3 into strict x; + raise notice 'x.f1 = %, x.f2 = %', x.f1, x.f2; +end$$ language plpgsql; +select stricttest(); +ERROR: query returned more than one row +HINT: Make sure the query returns a single row, or use LIMIT 1. +CONTEXT: PL/pgSQL function stricttest() line 5 at SQL statement +create or replace function stricttest() returns void as $$ +declare x record; +begin + -- should work + execute 'select * from foo where f1 = 3' into strict x; + raise notice 'x.f1 = %, x.f2 = %', x.f1, x.f2; +end$$ language plpgsql; +select stricttest(); +NOTICE: x.f1 = 3, x.f2 = 4 + stricttest +------------ + +(1 row) + +create or replace function stricttest() returns void as $$ +declare x record; +begin + -- should fail, no rows + execute 'select * from foo where f1 = 0' into strict x; + raise notice 'x.f1 = %, x.f2 = %', x.f1, x.f2; +end$$ language plpgsql; +select stricttest(); +ERROR: query returned no rows +CONTEXT: PL/pgSQL function stricttest() line 5 at EXECUTE +create or replace function stricttest() returns void as $$ +declare x record; +begin + -- should fail, too many rows + execute 'select * from foo where f1 > 3' into strict x; + raise notice 'x.f1 = %, x.f2 = %', x.f1, x.f2; +end$$ language plpgsql; +select stricttest(); +ERROR: query returned more than one row +CONTEXT: PL/pgSQL function stricttest() line 5 at EXECUTE +drop function stricttest(); +-- test printing parameters after failure due to STRICT +set plpgsql.print_strict_params to true; +create or replace function stricttest() returns void as $$ +declare +x record; +p1 int := 2; +p3 text := 'foo'; +begin + -- no rows + select * from foo where f1 = p1 and f1::text = p3 into strict x; + raise notice 'x.f1 = %, x.f2 = %', x.f1, x.f2; +end$$ language plpgsql; +select stricttest(); +ERROR: query returned no rows +DETAIL: parameters: p1 = '2', p3 = 'foo' +CONTEXT: PL/pgSQL function stricttest() line 8 at SQL statement +create or replace function stricttest() returns void as $$ +declare +x record; +p1 int := 2; +p3 text := $a$'Valame Dios!' dijo Sancho; 'no le dije yo a vuestra merced que mirase bien lo que hacia?'$a$; +begin + -- no rows + select * from foo where f1 = p1 and f1::text = p3 into strict x; + raise notice 'x.f1 = %, x.f2 = %', x.f1, x.f2; +end$$ language plpgsql; +select stricttest(); +ERROR: query returned no rows +DETAIL: parameters: p1 = '2', p3 = '''Valame Dios!'' dijo Sancho; ''no le dije yo a vuestra merced que mirase bien lo que hacia?''' +CONTEXT: PL/pgSQL function stricttest() line 8 at SQL statement +create or replace function stricttest() returns void as $$ +declare +x record; +p1 int := 2; +p3 text := 'foo'; +begin + -- too many rows + select * from foo where f1 > p1 or f1::text = p3 into strict x; + raise notice 'x.f1 = %, x.f2 = %', x.f1, x.f2; +end$$ language plpgsql; +select stricttest(); +ERROR: query returned more than one row +DETAIL: parameters: p1 = '2', p3 = 'foo' +HINT: Make sure the query returns a single row, or use LIMIT 1. +CONTEXT: PL/pgSQL function stricttest() line 8 at SQL statement +create or replace function stricttest() returns void as $$ +declare x record; +begin + -- too many rows, no params + select * from foo where f1 > 3 into strict x; + raise notice 'x.f1 = %, x.f2 = %', x.f1, x.f2; +end$$ language plpgsql; +select stricttest(); +ERROR: query returned more than one row +HINT: Make sure the query returns a single row, or use LIMIT 1. +CONTEXT: PL/pgSQL function stricttest() line 5 at SQL statement +create or replace function stricttest() returns void as $$ +declare x record; +begin + -- no rows + execute 'select * from foo where f1 = $1 or f1::text = $2' using 0, 'foo' into strict x; + raise notice 'x.f1 = %, x.f2 = %', x.f1, x.f2; +end$$ language plpgsql; +select stricttest(); +ERROR: query returned no rows +DETAIL: parameters: $1 = '0', $2 = 'foo' +CONTEXT: PL/pgSQL function stricttest() line 5 at EXECUTE +create or replace function stricttest() returns void as $$ +declare x record; +begin + -- too many rows + execute 'select * from foo where f1 > $1' using 1 into strict x; + raise notice 'x.f1 = %, x.f2 = %', x.f1, x.f2; +end$$ language plpgsql; +select stricttest(); +ERROR: query returned more than one row +DETAIL: parameters: $1 = '1' +CONTEXT: PL/pgSQL function stricttest() line 5 at EXECUTE +create or replace function stricttest() returns void as $$ +declare x record; +begin + -- too many rows, no parameters + execute 'select * from foo where f1 > 3' into strict x; + raise notice 'x.f1 = %, x.f2 = %', x.f1, x.f2; +end$$ language plpgsql; +select stricttest(); +ERROR: query returned more than one row +CONTEXT: PL/pgSQL function stricttest() line 5 at EXECUTE +create or replace function stricttest() returns void as $$ +-- override the global +#print_strict_params off +declare +x record; +p1 int := 2; +p3 text := 'foo'; +begin + -- too many rows + select * from foo where f1 > p1 or f1::text = p3 into strict x; + raise notice 'x.f1 = %, x.f2 = %', x.f1, x.f2; +end$$ language plpgsql; +select stricttest(); +ERROR: query returned more than one row +HINT: Make sure the query returns a single row, or use LIMIT 1. +CONTEXT: PL/pgSQL function stricttest() line 10 at SQL statement +reset plpgsql.print_strict_params; +create or replace function stricttest() returns void as $$ +-- override the global +#print_strict_params on +declare +x record; +p1 int := 2; +p3 text := 'foo'; +begin + -- too many rows + select * from foo where f1 > p1 or f1::text = p3 into strict x; + raise notice 'x.f1 = %, x.f2 = %', x.f1, x.f2; +end$$ language plpgsql; +select stricttest(); +ERROR: query returned more than one row +DETAIL: parameters: p1 = '2', p3 = 'foo' +HINT: Make sure the query returns a single row, or use LIMIT 1. +CONTEXT: PL/pgSQL function stricttest() line 10 at SQL statement +-- test warnings and errors +set plpgsql.extra_warnings to 'all'; +set plpgsql.extra_warnings to 'none'; +set plpgsql.extra_errors to 'all'; +set plpgsql.extra_errors to 'none'; +-- test warnings when shadowing a variable +set plpgsql.extra_warnings to 'shadowed_variables'; +-- simple shadowing of input and output parameters +create or replace function shadowtest(in1 int) + returns table (out1 int) as $$ +declare +in1 int; +out1 int; +begin +end +$$ language plpgsql; +WARNING: variable "in1" shadows a previously defined variable +LINE 4: in1 int; + ^ +WARNING: variable "out1" shadows a previously defined variable +LINE 5: out1 int; + ^ +select shadowtest(1); + shadowtest +------------ +(0 rows) + +set plpgsql.extra_warnings to 'shadowed_variables'; +select shadowtest(1); + shadowtest +------------ +(0 rows) + +create or replace function shadowtest(in1 int) + returns table (out1 int) as $$ +declare +in1 int; +out1 int; +begin +end +$$ language plpgsql; +WARNING: variable "in1" shadows a previously defined variable +LINE 4: in1 int; + ^ +WARNING: variable "out1" shadows a previously defined variable +LINE 5: out1 int; + ^ +select shadowtest(1); + shadowtest +------------ +(0 rows) + +drop function shadowtest(int); +-- shadowing in a second DECLARE block +create or replace function shadowtest() + returns void as $$ +declare +f1 int; +begin + declare + f1 int; + begin + end; +end$$ language plpgsql; +WARNING: variable "f1" shadows a previously defined variable +LINE 7: f1 int; + ^ +drop function shadowtest(); +-- several levels of shadowing +create or replace function shadowtest(in1 int) + returns void as $$ +declare +in1 int; +begin + declare + in1 int; + begin + end; +end$$ language plpgsql; +WARNING: variable "in1" shadows a previously defined variable +LINE 4: in1 int; + ^ +WARNING: variable "in1" shadows a previously defined variable +LINE 7: in1 int; + ^ +drop function shadowtest(int); +-- shadowing in cursor definitions +create or replace function shadowtest() + returns void as $$ +declare +f1 int; +c1 cursor (f1 int) for select 1; +begin +end$$ language plpgsql; +WARNING: variable "f1" shadows a previously defined variable +LINE 5: c1 cursor (f1 int) for select 1; + ^ +drop function shadowtest(); +-- test errors when shadowing a variable +set plpgsql.extra_errors to 'shadowed_variables'; +create or replace function shadowtest(f1 int) + returns boolean as $$ +declare f1 int; begin return 1; end $$ language plpgsql; +ERROR: variable "f1" shadows a previously defined variable +LINE 3: declare f1 int; begin return 1; end $$ language plpgsql; + ^ +select shadowtest(1); +ERROR: function shadowtest(integer) does not exist +LINE 1: select shadowtest(1); + ^ +HINT: No function matches the given name and argument types. You might need to add explicit type casts. +reset plpgsql.extra_errors; +reset plpgsql.extra_warnings; +create or replace function shadowtest(f1 int) + returns boolean as $$ +declare f1 int; begin return 1; end $$ language plpgsql; +select shadowtest(1); + shadowtest +------------ + t +(1 row) + +-- runtime extra checks +set plpgsql.extra_warnings to 'too_many_rows'; +do $$ +declare x int; +begin + select v from generate_series(1,2) g(v) into x; +end; +$$; +WARNING: query returned more than one row +HINT: Make sure the query returns a single row, or use LIMIT 1. +set plpgsql.extra_errors to 'too_many_rows'; +do $$ +declare x int; +begin + select v from generate_series(1,2) g(v) into x; +end; +$$; +ERROR: query returned more than one row +HINT: Make sure the query returns a single row, or use LIMIT 1. +CONTEXT: PL/pgSQL function inline_code_block line 4 at SQL statement +reset plpgsql.extra_errors; +reset plpgsql.extra_warnings; +set plpgsql.extra_warnings to 'strict_multi_assignment'; +do $$ +declare + x int; + y int; +begin + select 1 into x, y; + select 1,2 into x, y; + select 1,2,3 into x, y; +end +$$; +WARNING: number of source and target fields in assignment does not match +DETAIL: strict_multi_assignment check of extra_warnings is active. +HINT: Make sure the query returns the exact list of columns. +WARNING: number of source and target fields in assignment does not match +DETAIL: strict_multi_assignment check of extra_warnings is active. +HINT: Make sure the query returns the exact list of columns. +set plpgsql.extra_errors to 'strict_multi_assignment'; +do $$ +declare + x int; + y int; +begin + select 1 into x, y; + select 1,2 into x, y; + select 1,2,3 into x, y; +end +$$; +ERROR: number of source and target fields in assignment does not match +DETAIL: strict_multi_assignment check of extra_errors is active. +HINT: Make sure the query returns the exact list of columns. +CONTEXT: PL/pgSQL function inline_code_block line 6 at SQL statement +create table test_01(a int, b int, c int); +alter table test_01 drop column a; +-- the check is active only when source table is not empty +insert into test_01 values(10,20); +do $$ +declare + x int; + y int; +begin + select * from test_01 into x, y; -- should be ok + raise notice 'ok'; + select * from test_01 into x; -- should to fail +end; +$$; +NOTICE: ok +ERROR: number of source and target fields in assignment does not match +DETAIL: strict_multi_assignment check of extra_errors is active. +HINT: Make sure the query returns the exact list of columns. +CONTEXT: PL/pgSQL function inline_code_block line 8 at SQL statement +do $$ +declare + t test_01; +begin + select 1, 2 into t; -- should be ok + raise notice 'ok'; + select 1, 2, 3 into t; -- should fail; +end; +$$; +NOTICE: ok +ERROR: number of source and target fields in assignment does not match +DETAIL: strict_multi_assignment check of extra_errors is active. +HINT: Make sure the query returns the exact list of columns. +CONTEXT: PL/pgSQL function inline_code_block line 7 at SQL statement +do $$ +declare + t test_01; +begin + select 1 into t; -- should fail; +end; +$$; +ERROR: number of source and target fields in assignment does not match +DETAIL: strict_multi_assignment check of extra_errors is active. +HINT: Make sure the query returns the exact list of columns. +CONTEXT: PL/pgSQL function inline_code_block line 5 at SQL statement +drop table test_01; +reset plpgsql.extra_errors; +reset plpgsql.extra_warnings; +-- test scrollable cursor support +create function sc_test() returns setof integer as $$ +declare + c scroll cursor for select f1 from int4_tbl; + x integer; +begin + open c; + fetch last from c into x; + while found loop + return next x; + fetch prior from c into x; + end loop; + close c; +end; +$$ language plpgsql; +select * from sc_test(); + sc_test +------------- + -2147483647 + 2147483647 + -123456 + 123456 + 0 +(5 rows) + +create or replace function sc_test() returns setof integer as $$ +declare + c no scroll cursor for select f1 from int4_tbl; + x integer; +begin + open c; + fetch last from c into x; + while found loop + return next x; + fetch prior from c into x; + end loop; + close c; +end; +$$ language plpgsql; +select * from sc_test(); -- fails because of NO SCROLL specification +ERROR: cursor can only scan forward +HINT: Declare it with SCROLL option to enable backward scan. +CONTEXT: PL/pgSQL function sc_test() line 7 at FETCH +create or replace function sc_test() returns setof integer as $$ +declare + c refcursor; + x integer; +begin + open c scroll for select f1 from int4_tbl; + fetch last from c into x; + while found loop + return next x; + fetch prior from c into x; + end loop; + close c; +end; +$$ language plpgsql; +select * from sc_test(); + sc_test +------------- + -2147483647 + 2147483647 + -123456 + 123456 + 0 +(5 rows) + +create or replace function sc_test() returns setof integer as $$ +declare + c refcursor; + x integer; +begin + open c scroll for execute 'select f1 from int4_tbl'; + fetch last from c into x; + while found loop + return next x; + fetch relative -2 from c into x; + end loop; + close c; +end; +$$ language plpgsql; +select * from sc_test(); + sc_test +------------- + -2147483647 + -123456 + 0 +(3 rows) + +create or replace function sc_test() returns setof integer as $$ +declare + c refcursor; + x integer; +begin + open c scroll for execute 'select f1 from int4_tbl'; + fetch last from c into x; + while found loop + return next x; + move backward 2 from c; + fetch relative -1 from c into x; + end loop; + close c; +end; +$$ language plpgsql; +select * from sc_test(); + sc_test +------------- + -2147483647 + 123456 +(2 rows) + +create or replace function sc_test() returns setof integer as $$ +declare + c cursor for select * from generate_series(1, 10); + x integer; +begin + open c; + loop + move relative 2 in c; + if not found then + exit; + end if; + fetch next from c into x; + if found then + return next x; + end if; + end loop; + close c; +end; +$$ language plpgsql; +select * from sc_test(); + sc_test +--------- + 3 + 6 + 9 +(3 rows) + +create or replace function sc_test() returns setof integer as $$ +declare + c cursor for select * from generate_series(1, 10); + x integer; +begin + open c; + move forward all in c; + fetch backward from c into x; + if found then + return next x; + end if; + close c; +end; +$$ language plpgsql; +select * from sc_test(); + sc_test +--------- + 10 +(1 row) + +drop function sc_test(); +-- test qualified variable names +create function pl_qual_names (param1 int) returns void as $$ +<> +declare + param1 int := 1; +begin + <> + declare + param1 int := 2; + begin + raise notice 'param1 = %', param1; + raise notice 'pl_qual_names.param1 = %', pl_qual_names.param1; + raise notice 'outerblock.param1 = %', outerblock.param1; + raise notice 'innerblock.param1 = %', innerblock.param1; + end; +end; +$$ language plpgsql; +select pl_qual_names(42); +NOTICE: param1 = 2 +NOTICE: pl_qual_names.param1 = 42 +NOTICE: outerblock.param1 = 1 +NOTICE: innerblock.param1 = 2 + pl_qual_names +--------------- + +(1 row) + +drop function pl_qual_names(int); +-- tests for RETURN QUERY +create function ret_query1(out int, out int) returns setof record as $$ +begin + $1 := -1; + $2 := -2; + return next; + return query select x + 1, x * 10 from generate_series(0, 10) s (x); + return next; +end; +$$ language plpgsql; +select * from ret_query1(); + column1 | column2 +---------+--------- + -1 | -2 + 1 | 0 + 2 | 10 + 3 | 20 + 4 | 30 + 5 | 40 + 6 | 50 + 7 | 60 + 8 | 70 + 9 | 80 + 10 | 90 + 11 | 100 + -1 | -2 +(13 rows) + +create type record_type as (x text, y int, z boolean); +create or replace function ret_query2(lim int) returns setof record_type as $$ +begin + return query select fipshash(s.x::text), s.x, s.x > 0 + from generate_series(-8, lim) s (x) where s.x % 2 = 0; +end; +$$ language plpgsql; +select * from ret_query2(8); + x | y | z +----------------------------------+----+--- + e91592205d3881e3ea35d66973bb4898 | -8 | f + 03b26944890929ff751653acb2f2af79 | -6 | f + e5e0093f285a4fb94c3fcc2ad7fd04ed | -4 | f + cf3bae39dd692048a8bf961182e6a34d | -2 | f + 5feceb66ffc86f38d952786c6d696c79 | 0 | f + d4735e3a265e16eee03f59718b9b5d03 | 2 | t + 4b227777d4dd1fc61c6f884f48641d02 | 4 | t + e7f6c011776e8db7cd330b54174fd76f | 6 | t + 2c624232cdd221771294dfbb310aca00 | 8 | t +(9 rows) + +-- test EXECUTE USING +create function exc_using(int, text) returns int as $$ +declare i int; +begin + for i in execute 'select * from generate_series(1,$1)' using $1+1 loop + raise notice '%', i; + end loop; + execute 'select $2 + $2*3 + length($1)' into i using $2,$1; + return i; +end +$$ language plpgsql; +select exc_using(5, 'foobar'); +NOTICE: 1 +NOTICE: 2 +NOTICE: 3 +NOTICE: 4 +NOTICE: 5 +NOTICE: 6 + exc_using +----------- + 26 +(1 row) + +drop function exc_using(int, text); +create or replace function exc_using(int) returns void as $$ +declare + c refcursor; + i int; +begin + open c for execute 'select * from generate_series(1,$1)' using $1+1; + loop + fetch c into i; + exit when not found; + raise notice '%', i; + end loop; + close c; + return; +end; +$$ language plpgsql; +select exc_using(5); +NOTICE: 1 +NOTICE: 2 +NOTICE: 3 +NOTICE: 4 +NOTICE: 5 +NOTICE: 6 + exc_using +----------- + +(1 row) + +drop function exc_using(int); +-- test FOR-over-cursor +create or replace function forc01() returns void as $$ +declare + c cursor(r1 integer, r2 integer) + for select * from generate_series(r1,r2) i; + c2 cursor + for select * from generate_series(41,43) i; +begin + -- assign portal names to cursors to get stable output + c := 'c'; + c2 := 'c2'; + for r in c(5,7) loop + raise notice '% from %', r.i, c; + end loop; + -- again, to test if cursor was closed properly + for r in c(9,10) loop + raise notice '% from %', r.i, c; + end loop; + -- and test a parameterless cursor + for r in c2 loop + raise notice '% from %', r.i, c2; + end loop; + -- and try it with a hand-assigned name + raise notice 'after loop, c2 = %', c2; + c2 := 'special_name'; + for r in c2 loop + raise notice '% from %', r.i, c2; + end loop; + raise notice 'after loop, c2 = %', c2; + -- and try it with a generated name + -- (which we can't show in the output because it's variable) + c2 := null; + for r in c2 loop + raise notice '%', r.i; + end loop; + raise notice 'after loop, c2 = %', c2; + return; +end; +$$ language plpgsql; +select forc01(); +NOTICE: 5 from c +NOTICE: 6 from c +NOTICE: 7 from c +NOTICE: 9 from c +NOTICE: 10 from c +NOTICE: 41 from c2 +NOTICE: 42 from c2 +NOTICE: 43 from c2 +NOTICE: after loop, c2 = c2 +NOTICE: 41 from special_name +NOTICE: 42 from special_name +NOTICE: 43 from special_name +NOTICE: after loop, c2 = special_name +NOTICE: 41 +NOTICE: 42 +NOTICE: 43 +NOTICE: after loop, c2 = + forc01 +-------- + +(1 row) + +-- try updating the cursor's current row +create temp table forc_test as + select n as i, n as j from generate_series(1,10) n; +create or replace function forc01() returns void as $$ +declare + c cursor for select * from forc_test; +begin + for r in c loop + raise notice '%, %', r.i, r.j; + update forc_test set i = i * 100, j = r.j * 2 where current of c; + end loop; +end; +$$ language plpgsql; +select forc01(); +NOTICE: 1, 1 +NOTICE: 2, 2 +NOTICE: 3, 3 +NOTICE: 4, 4 +NOTICE: 5, 5 +NOTICE: 6, 6 +NOTICE: 7, 7 +NOTICE: 8, 8 +NOTICE: 9, 9 +NOTICE: 10, 10 + forc01 +-------- + +(1 row) + +select * from forc_test; + i | j +------+---- + 100 | 2 + 200 | 4 + 300 | 6 + 400 | 8 + 500 | 10 + 600 | 12 + 700 | 14 + 800 | 16 + 900 | 18 + 1000 | 20 +(10 rows) + +-- same, with a cursor whose portal name doesn't match variable name +create or replace function forc01() returns void as $$ +declare + c refcursor := 'fooled_ya'; + r record; +begin + open c for select * from forc_test; + loop + fetch c into r; + exit when not found; + raise notice '%, %', r.i, r.j; + update forc_test set i = i * 100, j = r.j * 2 where current of c; + end loop; +end; +$$ language plpgsql; +select forc01(); +NOTICE: 100, 2 +NOTICE: 200, 4 +NOTICE: 300, 6 +NOTICE: 400, 8 +NOTICE: 500, 10 +NOTICE: 600, 12 +NOTICE: 700, 14 +NOTICE: 800, 16 +NOTICE: 900, 18 +NOTICE: 1000, 20 + forc01 +-------- + +(1 row) + +select * from forc_test; + i | j +--------+---- + 10000 | 4 + 20000 | 8 + 30000 | 12 + 40000 | 16 + 50000 | 20 + 60000 | 24 + 70000 | 28 + 80000 | 32 + 90000 | 36 + 100000 | 40 +(10 rows) + +drop function forc01(); +-- it's okay to re-use a cursor variable name, even when bound +do $$ +declare cnt int := 0; + c1 cursor for select * from forc_test; +begin + for r1 in c1 loop + declare c1 cursor for select * from forc_test; + begin + for r2 in c1 loop + cnt := cnt + 1; + end loop; + end; + end loop; + raise notice 'cnt = %', cnt; +end $$; +NOTICE: cnt = 100 +-- fail because cursor has no query bound to it +create or replace function forc_bad() returns void as $$ +declare + c refcursor; +begin + for r in c loop + raise notice '%', r.i; + end loop; +end; +$$ language plpgsql; +ERROR: cursor FOR loop must use a bound cursor variable +LINE 5: for r in c loop + ^ +-- test RETURN QUERY EXECUTE +create or replace function return_dquery() +returns setof int as $$ +begin + return query execute 'select * from (values(10),(20)) f'; + return query execute 'select * from (values($1),($2)) f' using 40,50; +end; +$$ language plpgsql; +select * from return_dquery(); + return_dquery +--------------- + 10 + 20 + 40 + 50 +(4 rows) + +drop function return_dquery(); +-- test RETURN QUERY with dropped columns +create table tabwithcols(a int, b int, c int, d int); +insert into tabwithcols values(10,20,30,40),(50,60,70,80); +create or replace function returnqueryf() +returns setof tabwithcols as $$ +begin + return query select * from tabwithcols; + return query execute 'select * from tabwithcols'; +end; +$$ language plpgsql; +select * from returnqueryf(); + a | b | c | d +----+----+----+---- + 10 | 20 | 30 | 40 + 50 | 60 | 70 | 80 + 10 | 20 | 30 | 40 + 50 | 60 | 70 | 80 +(4 rows) + +alter table tabwithcols drop column b; +select * from returnqueryf(); + a | c | d +----+----+---- + 10 | 30 | 40 + 50 | 70 | 80 + 10 | 30 | 40 + 50 | 70 | 80 +(4 rows) + +alter table tabwithcols drop column d; +select * from returnqueryf(); + a | c +----+---- + 10 | 30 + 50 | 70 + 10 | 30 + 50 | 70 +(4 rows) + +alter table tabwithcols add column d int; +select * from returnqueryf(); + a | c | d +----+----+--- + 10 | 30 | + 50 | 70 | + 10 | 30 | + 50 | 70 | +(4 rows) + +drop function returnqueryf(); +drop table tabwithcols; +-- +-- Tests for composite-type results +-- +create type compostype as (x int, y varchar); +-- test: use of variable of composite type in return statement +create or replace function compos() returns compostype as $$ +declare + v compostype; +begin + v := (1, 'hello'); + return v; +end; +$$ language plpgsql; +select compos(); + compos +----------- + (1,hello) +(1 row) + +-- test: use of variable of record type in return statement +create or replace function compos() returns compostype as $$ +declare + v record; +begin + v := (1, 'hello'::varchar); + return v; +end; +$$ language plpgsql; +select compos(); + compos +----------- + (1,hello) +(1 row) + +-- test: use of row expr in return statement +create or replace function compos() returns compostype as $$ +begin + return (1, 'hello'::varchar); +end; +$$ language plpgsql; +select compos(); + compos +----------- + (1,hello) +(1 row) + +-- this does not work currently (no implicit casting) +create or replace function compos() returns compostype as $$ +begin + return (1, 'hello'); +end; +$$ language plpgsql; +select compos(); +ERROR: returned record type does not match expected record type +DETAIL: Returned type unknown does not match expected type character varying in column 2. +CONTEXT: PL/pgSQL function compos() while casting return value to function's return type +-- ... but this does +create or replace function compos() returns compostype as $$ +begin + return (1, 'hello')::compostype; +end; +$$ language plpgsql; +select compos(); + compos +----------- + (1,hello) +(1 row) + +drop function compos(); +-- test: return a row expr as record. +create or replace function composrec() returns record as $$ +declare + v record; +begin + v := (1, 'hello'); + return v; +end; +$$ language plpgsql; +select composrec(); + composrec +----------- + (1,hello) +(1 row) + +-- test: return row expr in return statement. +create or replace function composrec() returns record as $$ +begin + return (1, 'hello'); +end; +$$ language plpgsql; +select composrec(); + composrec +----------- + (1,hello) +(1 row) + +drop function composrec(); +-- test: row expr in RETURN NEXT statement. +create or replace function compos() returns setof compostype as $$ +begin + for i in 1..3 + loop + return next (1, 'hello'::varchar); + end loop; + return next null::compostype; + return next (2, 'goodbye')::compostype; +end; +$$ language plpgsql; +select * from compos(); + x | y +---+--------- + 1 | hello + 1 | hello + 1 | hello + | + 2 | goodbye +(5 rows) + +drop function compos(); +-- test: use invalid expr in return statement. +create or replace function compos() returns compostype as $$ +begin + return 1 + 1; +end; +$$ language plpgsql; +select compos(); +ERROR: cannot return non-composite value from function returning composite type +CONTEXT: PL/pgSQL function compos() line 3 at RETURN +-- RETURN variable is a different code path ... +create or replace function compos() returns compostype as $$ +declare x int := 42; +begin + return x; +end; +$$ language plpgsql; +select * from compos(); +ERROR: cannot return non-composite value from function returning composite type +CONTEXT: PL/pgSQL function compos() line 4 at RETURN +drop function compos(); +-- test: invalid use of composite variable in scalar-returning function +create or replace function compos() returns int as $$ +declare + v compostype; +begin + v := (1, 'hello'); + return v; +end; +$$ language plpgsql; +select compos(); +ERROR: invalid input syntax for type integer: "(1,hello)" +CONTEXT: PL/pgSQL function compos() while casting return value to function's return type +-- test: invalid use of composite expression in scalar-returning function +create or replace function compos() returns int as $$ +begin + return (1, 'hello')::compostype; +end; +$$ language plpgsql; +select compos(); +ERROR: invalid input syntax for type integer: "(1,hello)" +CONTEXT: PL/pgSQL function compos() while casting return value to function's return type +drop function compos(); +drop type compostype; +-- +-- Tests for 8.4's new RAISE features +-- +create or replace function raise_test() returns void as $$ +begin + raise notice '% % %', 1, 2, 3 + using errcode = '55001', detail = 'some detail info', hint = 'some hint'; + raise '% % %', 1, 2, 3 + using errcode = 'division_by_zero', detail = 'some detail info'; +end; +$$ language plpgsql; +select raise_test(); +NOTICE: 1 2 3 +DETAIL: some detail info +HINT: some hint +ERROR: 1 2 3 +DETAIL: some detail info +CONTEXT: PL/pgSQL function raise_test() line 5 at RAISE +-- Since we can't actually see the thrown SQLSTATE in default psql output, +-- test it like this; this also tests re-RAISE +create or replace function raise_test() returns void as $$ +begin + raise 'check me' + using errcode = 'division_by_zero', detail = 'some detail info'; + exception + when others then + raise notice 'SQLSTATE: % SQLERRM: %', sqlstate, sqlerrm; + raise; +end; +$$ language plpgsql; +select raise_test(); +NOTICE: SQLSTATE: 22012 SQLERRM: check me +ERROR: check me +DETAIL: some detail info +CONTEXT: PL/pgSQL function raise_test() line 3 at RAISE +create or replace function raise_test() returns void as $$ +begin + raise 'check me' + using errcode = '1234F', detail = 'some detail info'; + exception + when others then + raise notice 'SQLSTATE: % SQLERRM: %', sqlstate, sqlerrm; + raise; +end; +$$ language plpgsql; +select raise_test(); +NOTICE: SQLSTATE: 1234F SQLERRM: check me +ERROR: check me +DETAIL: some detail info +CONTEXT: PL/pgSQL function raise_test() line 3 at RAISE +-- SQLSTATE specification in WHEN +create or replace function raise_test() returns void as $$ +begin + raise 'check me' + using errcode = '1234F', detail = 'some detail info'; + exception + when sqlstate '1234F' then + raise notice 'SQLSTATE: % SQLERRM: %', sqlstate, sqlerrm; + raise; +end; +$$ language plpgsql; +select raise_test(); +NOTICE: SQLSTATE: 1234F SQLERRM: check me +ERROR: check me +DETAIL: some detail info +CONTEXT: PL/pgSQL function raise_test() line 3 at RAISE +create or replace function raise_test() returns void as $$ +begin + raise division_by_zero using detail = 'some detail info'; + exception + when others then + raise notice 'SQLSTATE: % SQLERRM: %', sqlstate, sqlerrm; + raise; +end; +$$ language plpgsql; +select raise_test(); +NOTICE: SQLSTATE: 22012 SQLERRM: division_by_zero +ERROR: division_by_zero +DETAIL: some detail info +CONTEXT: PL/pgSQL function raise_test() line 3 at RAISE +create or replace function raise_test() returns void as $$ +begin + raise division_by_zero; +end; +$$ language plpgsql; +select raise_test(); +ERROR: division_by_zero +CONTEXT: PL/pgSQL function raise_test() line 3 at RAISE +create or replace function raise_test() returns void as $$ +begin + raise sqlstate '1234F'; +end; +$$ language plpgsql; +select raise_test(); +ERROR: 1234F +CONTEXT: PL/pgSQL function raise_test() line 3 at RAISE +create or replace function raise_test() returns void as $$ +begin + raise division_by_zero using message = 'custom' || ' message'; +end; +$$ language plpgsql; +select raise_test(); +ERROR: custom message +CONTEXT: PL/pgSQL function raise_test() line 3 at RAISE +create or replace function raise_test() returns void as $$ +begin + raise using message = 'custom' || ' message', errcode = '22012'; +end; +$$ language plpgsql; +select raise_test(); +ERROR: custom message +CONTEXT: PL/pgSQL function raise_test() line 3 at RAISE +-- conflict on message +create or replace function raise_test() returns void as $$ +begin + raise notice 'some message' using message = 'custom' || ' message', errcode = '22012'; +end; +$$ language plpgsql; +select raise_test(); +ERROR: RAISE option already specified: MESSAGE +CONTEXT: PL/pgSQL function raise_test() line 3 at RAISE +-- conflict on errcode +create or replace function raise_test() returns void as $$ +begin + raise division_by_zero using message = 'custom' || ' message', errcode = '22012'; +end; +$$ language plpgsql; +select raise_test(); +ERROR: RAISE option already specified: ERRCODE +CONTEXT: PL/pgSQL function raise_test() line 3 at RAISE +-- nothing to re-RAISE +create or replace function raise_test() returns void as $$ +begin + raise; +end; +$$ language plpgsql; +select raise_test(); +ERROR: RAISE without parameters cannot be used outside an exception handler +CONTEXT: PL/pgSQL function raise_test() line 3 at RAISE +-- test access to exception data +create function zero_divide() returns int as $$ +declare v int := 0; +begin + return 10 / v; +end; +$$ language plpgsql; +create or replace function raise_test() returns void as $$ +begin + raise exception 'custom exception' + using detail = 'some detail of custom exception', + hint = 'some hint related to custom exception'; +end; +$$ language plpgsql; +create function stacked_diagnostics_test() returns void as $$ +declare _sqlstate text; + _message text; + _context text; +begin + perform zero_divide(); +exception when others then + get stacked diagnostics + _sqlstate = returned_sqlstate, + _message = message_text, + _context = pg_exception_context; + raise notice 'sqlstate: %, message: %, context: [%]', + _sqlstate, _message, replace(_context, E'\n', ' <- '); +end; +$$ language plpgsql; +select stacked_diagnostics_test(); +NOTICE: sqlstate: 22012, message: division by zero, context: [PL/pgSQL function zero_divide() line 4 at RETURN <- SQL statement "SELECT zero_divide()" <- PL/pgSQL function stacked_diagnostics_test() line 6 at PERFORM] + stacked_diagnostics_test +-------------------------- + +(1 row) + +create or replace function stacked_diagnostics_test() returns void as $$ +declare _detail text; + _hint text; + _message text; +begin + perform raise_test(); +exception when others then + get stacked diagnostics + _message = message_text, + _detail = pg_exception_detail, + _hint = pg_exception_hint; + raise notice 'message: %, detail: %, hint: %', _message, _detail, _hint; +end; +$$ language plpgsql; +select stacked_diagnostics_test(); +NOTICE: message: custom exception, detail: some detail of custom exception, hint: some hint related to custom exception + stacked_diagnostics_test +-------------------------- + +(1 row) + +-- fail, cannot use stacked diagnostics statement outside handler +create or replace function stacked_diagnostics_test() returns void as $$ +declare _detail text; + _hint text; + _message text; +begin + get stacked diagnostics + _message = message_text, + _detail = pg_exception_detail, + _hint = pg_exception_hint; + raise notice 'message: %, detail: %, hint: %', _message, _detail, _hint; +end; +$$ language plpgsql; +select stacked_diagnostics_test(); +ERROR: GET STACKED DIAGNOSTICS cannot be used outside an exception handler +CONTEXT: PL/pgSQL function stacked_diagnostics_test() line 6 at GET STACKED DIAGNOSTICS +drop function zero_divide(); +drop function stacked_diagnostics_test(); +-- check cases where implicit SQLSTATE variable could be confused with +-- SQLSTATE as a keyword, cf bug #5524 +create or replace function raise_test() returns void as $$ +begin + perform 1/0; +exception + when sqlstate '22012' then + raise notice using message = sqlstate; + raise sqlstate '22012' using message = 'substitute message'; +end; +$$ language plpgsql; +select raise_test(); +NOTICE: 22012 +ERROR: substitute message +CONTEXT: PL/pgSQL function raise_test() line 7 at RAISE +drop function raise_test(); +-- test passing column_name, constraint_name, datatype_name, table_name +-- and schema_name error fields +create or replace function stacked_diagnostics_test() returns void as $$ +declare _column_name text; + _constraint_name text; + _datatype_name text; + _table_name text; + _schema_name text; +begin + raise exception using + column = '>>some column name<<', + constraint = '>>some constraint name<<', + datatype = '>>some datatype name<<', + table = '>>some table name<<', + schema = '>>some schema name<<'; +exception when others then + get stacked diagnostics + _column_name = column_name, + _constraint_name = constraint_name, + _datatype_name = pg_datatype_name, + _table_name = table_name, + _schema_name = schema_name; + raise notice 'column %, constraint %, type %, table %, schema %', + _column_name, _constraint_name, _datatype_name, _table_name, _schema_name; +end; +$$ language plpgsql; +select stacked_diagnostics_test(); +NOTICE: column >>some column name<<, constraint >>some constraint name<<, type >>some datatype name<<, table >>some table name<<, schema >>some schema name<< + stacked_diagnostics_test +-------------------------- + +(1 row) + +drop function stacked_diagnostics_test(); +-- test variadic functions +create or replace function vari(variadic int[]) +returns void as $$ +begin + for i in array_lower($1,1)..array_upper($1,1) loop + raise notice '%', $1[i]; + end loop; end; +$$ language plpgsql; +select vari(1,2,3,4,5); +NOTICE: 1 +NOTICE: 2 +NOTICE: 3 +NOTICE: 4 +NOTICE: 5 + vari +------ + +(1 row) + +select vari(3,4,5); +NOTICE: 3 +NOTICE: 4 +NOTICE: 5 + vari +------ + +(1 row) + +select vari(variadic array[5,6,7]); +NOTICE: 5 +NOTICE: 6 +NOTICE: 7 + vari +------ + +(1 row) + +drop function vari(int[]); +-- coercion test +create or replace function pleast(variadic numeric[]) +returns numeric as $$ +declare aux numeric = $1[array_lower($1,1)]; +begin + for i in array_lower($1,1)+1..array_upper($1,1) loop + if $1[i] < aux then aux := $1[i]; end if; + end loop; + return aux; +end; +$$ language plpgsql immutable strict; +select pleast(10,1,2,3,-16); + pleast +-------- + -16 +(1 row) + +select pleast(10.2,2.2,-1.1); + pleast +-------- + -1.1 +(1 row) + +select pleast(10.2,10, -20); + pleast +-------- + -20 +(1 row) + +select pleast(10,20, -1.0); + pleast +-------- + -1.0 +(1 row) + +-- in case of conflict, non-variadic version is preferred +create or replace function pleast(numeric) +returns numeric as $$ +begin + raise notice 'non-variadic function called'; + return $1; +end; +$$ language plpgsql immutable strict; +select pleast(10); +NOTICE: non-variadic function called + pleast +-------- + 10 +(1 row) + +drop function pleast(numeric[]); +drop function pleast(numeric); +-- test table functions +create function tftest(int) returns table(a int, b int) as $$ +begin + return query select $1, $1+i from generate_series(1,5) g(i); +end; +$$ language plpgsql immutable strict; +select * from tftest(10); + a | b +----+---- + 10 | 11 + 10 | 12 + 10 | 13 + 10 | 14 + 10 | 15 +(5 rows) + +create or replace function tftest(a1 int) returns table(a int, b int) as $$ +begin + a := a1; b := a1 + 1; + return next; + a := a1 * 10; b := a1 * 10 + 1; + return next; +end; +$$ language plpgsql immutable strict; +select * from tftest(10); + a | b +-----+----- + 10 | 11 + 100 | 101 +(2 rows) + +drop function tftest(int); +create function rttest() +returns setof int as $$ +declare rc int; +begin + return query values(10),(20); + get diagnostics rc = row_count; + raise notice '% %', found, rc; + return query select * from (values(10),(20)) f(a) where false; + get diagnostics rc = row_count; + raise notice '% %', found, rc; + return query execute 'values(10),(20)'; + get diagnostics rc = row_count; + raise notice '% %', found, rc; + return query execute 'select * from (values(10),(20)) f(a) where false'; + get diagnostics rc = row_count; + raise notice '% %', found, rc; +end; +$$ language plpgsql; +select * from rttest(); +NOTICE: t 2 +NOTICE: f 0 +NOTICE: t 2 +NOTICE: f 0 + rttest +-------- + 10 + 20 + 10 + 20 +(4 rows) + +-- check some error cases, too +create or replace function rttest() +returns setof int as $$ +begin + return query select 10 into no_such_table; +end; +$$ language plpgsql; +select * from rttest(); +ERROR: SELECT INTO query does not return tuples +CONTEXT: SQL statement "select 10 into no_such_table" +PL/pgSQL function rttest() line 3 at RETURN QUERY +create or replace function rttest() +returns setof int as $$ +begin + return query execute 'select 10 into no_such_table'; +end; +$$ language plpgsql; +select * from rttest(); +ERROR: SELECT INTO query does not return tuples +CONTEXT: SQL statement "select 10 into no_such_table" +PL/pgSQL function rttest() line 3 at RETURN QUERY +select * from no_such_table; +ERROR: relation "no_such_table" does not exist +LINE 1: select * from no_such_table; + ^ +drop function rttest(); +-- Test for proper cleanup at subtransaction exit. This example +-- exposed a bug in PG 8.2. +CREATE FUNCTION leaker_1(fail BOOL) RETURNS INTEGER AS $$ +DECLARE + v_var INTEGER; +BEGIN + BEGIN + v_var := (leaker_2(fail)).error_code; + EXCEPTION + WHEN others THEN RETURN 0; + END; + RETURN 1; +END; +$$ LANGUAGE plpgsql; +CREATE FUNCTION leaker_2(fail BOOL, OUT error_code INTEGER, OUT new_id INTEGER) + RETURNS RECORD AS $$ +BEGIN + IF fail THEN + RAISE EXCEPTION 'fail ...'; + END IF; + error_code := 1; + new_id := 1; + RETURN; +END; +$$ LANGUAGE plpgsql; +SELECT * FROM leaker_1(false); + leaker_1 +---------- + 1 +(1 row) + +SELECT * FROM leaker_1(true); + leaker_1 +---------- + 0 +(1 row) + +DROP FUNCTION leaker_1(bool); +DROP FUNCTION leaker_2(bool); +-- Test for appropriate cleanup of non-simple expression evaluations +-- (bug in all versions prior to August 2010) +CREATE FUNCTION nonsimple_expr_test() RETURNS text[] AS $$ +DECLARE + arr text[]; + lr text; + i integer; +BEGIN + arr := array[array['foo','bar'], array['baz', 'quux']]; + lr := 'fool'; + i := 1; + -- use sub-SELECTs to make expressions non-simple + arr[(SELECT i)][(SELECT i+1)] := (SELECT lr); + RETURN arr; +END; +$$ LANGUAGE plpgsql; +SELECT nonsimple_expr_test(); + nonsimple_expr_test +------------------------- + {{foo,fool},{baz,quux}} +(1 row) + +DROP FUNCTION nonsimple_expr_test(); +CREATE FUNCTION nonsimple_expr_test() RETURNS integer AS $$ +declare + i integer NOT NULL := 0; +begin + begin + i := (SELECT NULL::integer); -- should throw error + exception + WHEN OTHERS THEN + i := (SELECT 1::integer); + end; + return i; +end; +$$ LANGUAGE plpgsql; +SELECT nonsimple_expr_test(); + nonsimple_expr_test +--------------------- + 1 +(1 row) + +DROP FUNCTION nonsimple_expr_test(); +-- +-- Test cases involving recursion and error recovery in simple expressions +-- (bugs in all versions before October 2010). The problems are most +-- easily exposed by mutual recursion between plpgsql and sql functions. +-- +create function recurse(float8) returns float8 as +$$ +begin + if ($1 > 0) then + return sql_recurse($1 - 1); + else + return $1; + end if; +end; +$$ language plpgsql; +-- "limit" is to prevent this from being inlined +create function sql_recurse(float8) returns float8 as +$$ select recurse($1) limit 1; $$ language sql; +select recurse(10); + recurse +--------- + 0 +(1 row) + +create function error1(text) returns text language sql as +$$ SELECT relname::text FROM pg_class c WHERE c.oid = $1::regclass $$; +create function error2(p_name_table text) returns text language plpgsql as $$ +begin + return error1(p_name_table); +end$$; +BEGIN; +create table public.stuffs (stuff text); +SAVEPOINT a; +select error2('nonexistent.stuffs'); +ERROR: schema "nonexistent" does not exist +CONTEXT: SQL function "error1" statement 1 +PL/pgSQL function error2(text) line 3 at RETURN +ROLLBACK TO a; +select error2('public.stuffs'); + error2 +-------- + stuffs +(1 row) + +rollback; +drop function error2(p_name_table text); +drop function error1(text); +-- Test for proper handling of cast-expression caching +create function sql_to_date(integer) returns date as $$ +select $1::text::date +$$ language sql immutable strict; +create cast (integer as date) with function sql_to_date(integer) as assignment; +create function cast_invoker(integer) returns date as $$ +begin + return $1; +end$$ language plpgsql; +select cast_invoker(20150717); + cast_invoker +-------------- + 07-17-2015 +(1 row) + +select cast_invoker(20150718); -- second call crashed in pre-release 9.5 + cast_invoker +-------------- + 07-18-2015 +(1 row) + +begin; +select cast_invoker(20150717); + cast_invoker +-------------- + 07-17-2015 +(1 row) + +select cast_invoker(20150718); + cast_invoker +-------------- + 07-18-2015 +(1 row) + +savepoint s1; +select cast_invoker(20150718); + cast_invoker +-------------- + 07-18-2015 +(1 row) + +select cast_invoker(-1); -- fails +ERROR: invalid input syntax for type date: "-1" +CONTEXT: SQL function "sql_to_date" statement 1 +PL/pgSQL function cast_invoker(integer) while casting return value to function's return type +rollback to savepoint s1; +select cast_invoker(20150719); + cast_invoker +-------------- + 07-19-2015 +(1 row) + +select cast_invoker(20150720); + cast_invoker +-------------- + 07-20-2015 +(1 row) + +commit; +drop function cast_invoker(integer); +drop function sql_to_date(integer) cascade; +NOTICE: drop cascades to cast from integer to date +-- Test handling of cast cache inside DO blocks +-- (to check the original crash case, this must be a cast not previously +-- used in this session) +begin; +do $$ declare x text[]; begin x := '{1.23, 4.56}'::numeric[]; end $$; +do $$ declare x text[]; begin x := '{1.23, 4.56}'::numeric[]; end $$; +end; +-- Test for consistent reporting of error context +create function fail() returns int language plpgsql as $$ +begin + return 1/0; +end +$$; +select fail(); +ERROR: division by zero +CONTEXT: SQL expression "1/0" +PL/pgSQL function fail() line 3 at RETURN +select fail(); +ERROR: division by zero +CONTEXT: SQL expression "1/0" +PL/pgSQL function fail() line 3 at RETURN +drop function fail(); +-- Test handling of string literals. +set standard_conforming_strings = off; +create or replace function strtest() returns text as $$ +begin + raise notice 'foo\\bar\041baz'; + return 'foo\\bar\041baz'; +end +$$ language plpgsql; +WARNING: nonstandard use of \\ in a string literal +LINE 3: raise notice 'foo\\bar\041baz'; + ^ +HINT: Use the escape string syntax for backslashes, e.g., E'\\'. +WARNING: nonstandard use of \\ in a string literal +LINE 4: return 'foo\\bar\041baz'; + ^ +HINT: Use the escape string syntax for backslashes, e.g., E'\\'. +WARNING: nonstandard use of \\ in a string literal +LINE 4: return 'foo\\bar\041baz'; + ^ +HINT: Use the escape string syntax for backslashes, e.g., E'\\'. +select strtest(); +NOTICE: foo\bar!baz +WARNING: nonstandard use of \\ in a string literal +LINE 1: 'foo\\bar\041baz' + ^ +HINT: Use the escape string syntax for backslashes, e.g., E'\\'. +QUERY: 'foo\\bar\041baz' + strtest +------------- + foo\bar!baz +(1 row) + +create or replace function strtest() returns text as $$ +begin + raise notice E'foo\\bar\041baz'; + return E'foo\\bar\041baz'; +end +$$ language plpgsql; +select strtest(); +NOTICE: foo\bar!baz + strtest +------------- + foo\bar!baz +(1 row) + +set standard_conforming_strings = on; +create or replace function strtest() returns text as $$ +begin + raise notice 'foo\\bar\041baz\'; + return 'foo\\bar\041baz\'; +end +$$ language plpgsql; +select strtest(); +NOTICE: foo\\bar\041baz\ + strtest +------------------ + foo\\bar\041baz\ +(1 row) + +create or replace function strtest() returns text as $$ +begin + raise notice E'foo\\bar\041baz'; + return E'foo\\bar\041baz'; +end +$$ language plpgsql; +select strtest(); +NOTICE: foo\bar!baz + strtest +------------- + foo\bar!baz +(1 row) + +drop function strtest(); +-- Test anonymous code blocks. +DO $$ +DECLARE r record; +BEGIN + FOR r IN SELECT rtrim(roomno) AS roomno, comment FROM Room ORDER BY roomno + LOOP + RAISE NOTICE '%, %', r.roomno, r.comment; + END LOOP; +END$$; +NOTICE: 001, Entrance +NOTICE: 002, Office +NOTICE: 003, Office +NOTICE: 004, Technical +NOTICE: 101, Office +NOTICE: 102, Conference +NOTICE: 103, Restroom +NOTICE: 104, Technical +NOTICE: 105, Office +NOTICE: 106, Office +-- these are to check syntax error reporting +DO LANGUAGE plpgsql $$begin return 1; end$$; +ERROR: RETURN cannot have a parameter in function returning void +LINE 1: DO LANGUAGE plpgsql $$begin return 1; end$$; + ^ +DO $$ +DECLARE r record; +BEGIN + FOR r IN SELECT rtrim(roomno) AS roomno, foo FROM Room ORDER BY roomno + LOOP + RAISE NOTICE '%, %', r.roomno, r.comment; + END LOOP; +END$$; +ERROR: column "foo" does not exist +LINE 1: SELECT rtrim(roomno) AS roomno, foo FROM Room ORDER BY roomn... + ^ +QUERY: SELECT rtrim(roomno) AS roomno, foo FROM Room ORDER BY roomno +CONTEXT: PL/pgSQL function inline_code_block line 4 at FOR over SELECT rows +-- Check handling of errors thrown from/into anonymous code blocks. +do $outer$ +begin + for i in 1..10 loop + begin + execute $ex$ + do $$ + declare x int = 0; + begin + x := 1 / x; + end; + $$; + $ex$; + exception when division_by_zero then + raise notice 'caught division by zero'; + end; + end loop; +end; +$outer$; +NOTICE: caught division by zero +NOTICE: caught division by zero +NOTICE: caught division by zero +NOTICE: caught division by zero +NOTICE: caught division by zero +NOTICE: caught division by zero +NOTICE: caught division by zero +NOTICE: caught division by zero +NOTICE: caught division by zero +NOTICE: caught division by zero +-- Check variable scoping -- a var is not available in its own or prior +-- default expressions, but it is available in later ones. +do $$ +declare x int := x + 1; -- error +begin + raise notice 'x = %', x; +end; +$$; +ERROR: column "x" does not exist +LINE 1: x + 1 + ^ +QUERY: x + 1 +CONTEXT: PL/pgSQL function inline_code_block line 2 during statement block local variable initialization +do $$ +declare y int := x + 1; -- error + x int := 42; +begin + raise notice 'x = %, y = %', x, y; +end; +$$; +ERROR: column "x" does not exist +LINE 1: x + 1 + ^ +QUERY: x + 1 +CONTEXT: PL/pgSQL function inline_code_block line 2 during statement block local variable initialization +do $$ +declare x int := 42; + y int := x + 1; +begin + raise notice 'x = %, y = %', x, y; +end; +$$; +NOTICE: x = 42, y = 43 +do $$ +declare x int := 42; +begin + declare y int := x + 1; + x int := x + 2; + z int := x * 10; + begin + raise notice 'x = %, y = %, z = %', x, y, z; + end; +end; +$$; +NOTICE: x = 44, y = 43, z = 440 +-- Check handling of conflicts between plpgsql vars and table columns. +set plpgsql.variable_conflict = error; +create function conflict_test() returns setof int8_tbl as $$ +declare r record; + q1 bigint := 42; +begin + for r in select q1,q2 from int8_tbl loop + return next r; + end loop; +end; +$$ language plpgsql; +select * from conflict_test(); +ERROR: column reference "q1" is ambiguous +LINE 1: select q1,q2 from int8_tbl + ^ +DETAIL: It could refer to either a PL/pgSQL variable or a table column. +QUERY: select q1,q2 from int8_tbl +CONTEXT: PL/pgSQL function conflict_test() line 5 at FOR over SELECT rows +create or replace function conflict_test() returns setof int8_tbl as $$ +#variable_conflict use_variable +declare r record; + q1 bigint := 42; +begin + for r in select q1,q2 from int8_tbl loop + return next r; + end loop; +end; +$$ language plpgsql; +select * from conflict_test(); + q1 | q2 +----+------------------- + 42 | 456 + 42 | 4567890123456789 + 42 | 123 + 42 | 4567890123456789 + 42 | -4567890123456789 +(5 rows) + +create or replace function conflict_test() returns setof int8_tbl as $$ +#variable_conflict use_column +declare r record; + q1 bigint := 42; +begin + for r in select q1,q2 from int8_tbl loop + return next r; + end loop; +end; +$$ language plpgsql; +select * from conflict_test(); + q1 | q2 +------------------+------------------- + 123 | 456 + 123 | 4567890123456789 + 4567890123456789 | 123 + 4567890123456789 | 4567890123456789 + 4567890123456789 | -4567890123456789 +(5 rows) + +drop function conflict_test(); +-- Check that an unreserved keyword can be used as a variable name +create function unreserved_test() returns int as $$ +declare + forward int := 21; +begin + forward := forward * 2; + return forward; +end +$$ language plpgsql; +select unreserved_test(); + unreserved_test +----------------- + 42 +(1 row) + +create or replace function unreserved_test() returns int as $$ +declare + return int := 42; +begin + return := return + 1; + return return; +end +$$ language plpgsql; +select unreserved_test(); + unreserved_test +----------------- + 43 +(1 row) + +create or replace function unreserved_test() returns int as $$ +declare + comment int := 21; +begin + comment := comment * 2; + comment on function unreserved_test() is 'this is a test'; + return comment; +end +$$ language plpgsql; +select unreserved_test(); + unreserved_test +----------------- + 42 +(1 row) + +select obj_description('unreserved_test()'::regprocedure, 'pg_proc'); + obj_description +----------------- + this is a test +(1 row) + +drop function unreserved_test(); +-- +-- Test FOREACH over arrays +-- +create function foreach_test(anyarray) +returns void as $$ +declare x int; +begin + foreach x in array $1 + loop + raise notice '%', x; + end loop; + end; +$$ language plpgsql; +select foreach_test(ARRAY[1,2,3,4]); +NOTICE: 1 +NOTICE: 2 +NOTICE: 3 +NOTICE: 4 + foreach_test +-------------- + +(1 row) + +select foreach_test(ARRAY[[1,2],[3,4]]); +NOTICE: 1 +NOTICE: 2 +NOTICE: 3 +NOTICE: 4 + foreach_test +-------------- + +(1 row) + +create or replace function foreach_test(anyarray) +returns void as $$ +declare x int; +begin + foreach x slice 1 in array $1 + loop + raise notice '%', x; + end loop; + end; +$$ language plpgsql; +-- should fail +select foreach_test(ARRAY[1,2,3,4]); +ERROR: FOREACH ... SLICE loop variable must be of an array type +CONTEXT: PL/pgSQL function foreach_test(anyarray) line 4 at FOREACH over array +select foreach_test(ARRAY[[1,2],[3,4]]); +ERROR: FOREACH ... SLICE loop variable must be of an array type +CONTEXT: PL/pgSQL function foreach_test(anyarray) line 4 at FOREACH over array +create or replace function foreach_test(anyarray) +returns void as $$ +declare x int[]; +begin + foreach x slice 1 in array $1 + loop + raise notice '%', x; + end loop; + end; +$$ language plpgsql; +select foreach_test(ARRAY[1,2,3,4]); +NOTICE: {1,2,3,4} + foreach_test +-------------- + +(1 row) + +select foreach_test(ARRAY[[1,2],[3,4]]); +NOTICE: {1,2} +NOTICE: {3,4} + foreach_test +-------------- + +(1 row) + +-- higher level of slicing +create or replace function foreach_test(anyarray) +returns void as $$ +declare x int[]; +begin + foreach x slice 2 in array $1 + loop + raise notice '%', x; + end loop; + end; +$$ language plpgsql; +-- should fail +select foreach_test(ARRAY[1,2,3,4]); +ERROR: slice dimension (2) is out of the valid range 0..1 +CONTEXT: PL/pgSQL function foreach_test(anyarray) line 4 at FOREACH over array +-- ok +select foreach_test(ARRAY[[1,2],[3,4]]); +NOTICE: {{1,2},{3,4}} + foreach_test +-------------- + +(1 row) + +select foreach_test(ARRAY[[[1,2]],[[3,4]]]); +NOTICE: {{1,2}} +NOTICE: {{3,4}} + foreach_test +-------------- + +(1 row) + +create type xy_tuple AS (x int, y int); +-- iteration over array of records +create or replace function foreach_test(anyarray) +returns void as $$ +declare r record; +begin + foreach r in array $1 + loop + raise notice '%', r; + end loop; + end; +$$ language plpgsql; +select foreach_test(ARRAY[(10,20),(40,69),(35,78)]::xy_tuple[]); +NOTICE: (10,20) +NOTICE: (40,69) +NOTICE: (35,78) + foreach_test +-------------- + +(1 row) + +select foreach_test(ARRAY[[(10,20),(40,69)],[(35,78),(88,76)]]::xy_tuple[]); +NOTICE: (10,20) +NOTICE: (40,69) +NOTICE: (35,78) +NOTICE: (88,76) + foreach_test +-------------- + +(1 row) + +create or replace function foreach_test(anyarray) +returns void as $$ +declare x int; y int; +begin + foreach x, y in array $1 + loop + raise notice 'x = %, y = %', x, y; + end loop; + end; +$$ language plpgsql; +select foreach_test(ARRAY[(10,20),(40,69),(35,78)]::xy_tuple[]); +NOTICE: x = 10, y = 20 +NOTICE: x = 40, y = 69 +NOTICE: x = 35, y = 78 + foreach_test +-------------- + +(1 row) + +select foreach_test(ARRAY[[(10,20),(40,69)],[(35,78),(88,76)]]::xy_tuple[]); +NOTICE: x = 10, y = 20 +NOTICE: x = 40, y = 69 +NOTICE: x = 35, y = 78 +NOTICE: x = 88, y = 76 + foreach_test +-------------- + +(1 row) + +-- slicing over array of composite types +create or replace function foreach_test(anyarray) +returns void as $$ +declare x xy_tuple[]; +begin + foreach x slice 1 in array $1 + loop + raise notice '%', x; + end loop; + end; +$$ language plpgsql; +select foreach_test(ARRAY[(10,20),(40,69),(35,78)]::xy_tuple[]); +NOTICE: {"(10,20)","(40,69)","(35,78)"} + foreach_test +-------------- + +(1 row) + +select foreach_test(ARRAY[[(10,20),(40,69)],[(35,78),(88,76)]]::xy_tuple[]); +NOTICE: {"(10,20)","(40,69)"} +NOTICE: {"(35,78)","(88,76)"} + foreach_test +-------------- + +(1 row) + +drop function foreach_test(anyarray); +drop type xy_tuple; +-- +-- Assorted tests for array subscript assignment +-- +create temp table rtype (id int, ar text[]); +create function arrayassign1() returns text[] language plpgsql as $$ +declare + r record; +begin + r := row(12, '{foo,bar,baz}')::rtype; + r.ar[2] := 'replace'; + return r.ar; +end$$; +select arrayassign1(); + arrayassign1 +------------------- + {foo,replace,baz} +(1 row) + +select arrayassign1(); -- try again to exercise internal caching + arrayassign1 +------------------- + {foo,replace,baz} +(1 row) + +create domain orderedarray as int[2] + constraint sorted check (value[1] < value[2]); +select '{1,2}'::orderedarray; + orderedarray +-------------- + {1,2} +(1 row) + +select '{2,1}'::orderedarray; -- fail +ERROR: value for domain orderedarray violates check constraint "sorted" +create function testoa(x1 int, x2 int, x3 int) returns orderedarray +language plpgsql as $$ +declare res orderedarray; +begin + res := array[x1, x2]; + res[2] := x3; + return res; +end$$; +select testoa(1,2,3); + testoa +-------- + {1,3} +(1 row) + +select testoa(1,2,3); -- try again to exercise internal caching + testoa +-------- + {1,3} +(1 row) + +select testoa(2,1,3); -- fail at initial assign +ERROR: value for domain orderedarray violates check constraint "sorted" +CONTEXT: PL/pgSQL function testoa(integer,integer,integer) line 4 at assignment +select testoa(1,2,1); -- fail at update +ERROR: value for domain orderedarray violates check constraint "sorted" +CONTEXT: PL/pgSQL function testoa(integer,integer,integer) line 5 at assignment +drop function arrayassign1(); +drop function testoa(x1 int, x2 int, x3 int); +-- +-- Test handling of expanded arrays +-- +create function returns_rw_array(int) returns int[] +language plpgsql as $$ + declare r int[]; + begin r := array[$1, $1]; return r; end; +$$ stable; +create function consumes_rw_array(int[]) returns int +language plpgsql as $$ + begin return $1[1]; end; +$$ stable; +select consumes_rw_array(returns_rw_array(42)); + consumes_rw_array +------------------- + 42 +(1 row) + +-- bug #14174 +explain (verbose, costs off) +select i, a from + (select returns_rw_array(1) as a offset 0) ss, + lateral consumes_rw_array(a) i; + QUERY PLAN +----------------------------------------------------------------- + Nested Loop + Output: i.i, (returns_rw_array(1)) + -> Result + Output: returns_rw_array(1) + -> Function Scan on public.consumes_rw_array i + Output: i.i + Function Call: consumes_rw_array((returns_rw_array(1))) +(7 rows) + +select i, a from + (select returns_rw_array(1) as a offset 0) ss, + lateral consumes_rw_array(a) i; + i | a +---+------- + 1 | {1,1} +(1 row) + +explain (verbose, costs off) +select consumes_rw_array(a), a from returns_rw_array(1) a; + QUERY PLAN +-------------------------------------------- + Function Scan on public.returns_rw_array a + Output: consumes_rw_array(a), a + Function Call: returns_rw_array(1) +(3 rows) + +select consumes_rw_array(a), a from returns_rw_array(1) a; + consumes_rw_array | a +-------------------+------- + 1 | {1,1} +(1 row) + +explain (verbose, costs off) +select consumes_rw_array(a), a from + (values (returns_rw_array(1)), (returns_rw_array(2))) v(a); + QUERY PLAN +--------------------------------------------------------------------- + Values Scan on "*VALUES*" + Output: consumes_rw_array("*VALUES*".column1), "*VALUES*".column1 +(2 rows) + +select consumes_rw_array(a), a from + (values (returns_rw_array(1)), (returns_rw_array(2))) v(a); + consumes_rw_array | a +-------------------+------- + 1 | {1,1} + 2 | {2,2} +(2 rows) + +do $$ +declare a int[] := array[1,2]; +begin + a := a || 3; + raise notice 'a = %', a; +end$$; +NOTICE: a = {1,2,3} +-- +-- Test access to call stack +-- +create function inner_func(int) +returns int as $$ +declare _context text; +begin + get diagnostics _context = pg_context; + raise notice '***%***', _context; + -- lets do it again, just for fun.. + get diagnostics _context = pg_context; + raise notice '***%***', _context; + raise notice 'lets make sure we didnt break anything'; + return 2 * $1; +end; +$$ language plpgsql; +create or replace function outer_func(int) +returns int as $$ +declare + myresult int; +begin + raise notice 'calling down into inner_func()'; + myresult := inner_func($1); + raise notice 'inner_func() done'; + return myresult; +end; +$$ language plpgsql; +create or replace function outer_outer_func(int) +returns int as $$ +declare + myresult int; +begin + raise notice 'calling down into outer_func()'; + myresult := outer_func($1); + raise notice 'outer_func() done'; + return myresult; +end; +$$ language plpgsql; +select outer_outer_func(10); +NOTICE: calling down into outer_func() +NOTICE: calling down into inner_func() +NOTICE: ***PL/pgSQL function inner_func(integer) line 4 at GET DIAGNOSTICS +PL/pgSQL function outer_func(integer) line 6 at assignment +PL/pgSQL function outer_outer_func(integer) line 6 at assignment*** +NOTICE: ***PL/pgSQL function inner_func(integer) line 7 at GET DIAGNOSTICS +PL/pgSQL function outer_func(integer) line 6 at assignment +PL/pgSQL function outer_outer_func(integer) line 6 at assignment*** +NOTICE: lets make sure we didnt break anything +NOTICE: inner_func() done +NOTICE: outer_func() done + outer_outer_func +------------------ + 20 +(1 row) + +-- repeated call should work +select outer_outer_func(20); +NOTICE: calling down into outer_func() +NOTICE: calling down into inner_func() +NOTICE: ***PL/pgSQL function inner_func(integer) line 4 at GET DIAGNOSTICS +PL/pgSQL function outer_func(integer) line 6 at assignment +PL/pgSQL function outer_outer_func(integer) line 6 at assignment*** +NOTICE: ***PL/pgSQL function inner_func(integer) line 7 at GET DIAGNOSTICS +PL/pgSQL function outer_func(integer) line 6 at assignment +PL/pgSQL function outer_outer_func(integer) line 6 at assignment*** +NOTICE: lets make sure we didnt break anything +NOTICE: inner_func() done +NOTICE: outer_func() done + outer_outer_func +------------------ + 40 +(1 row) + +drop function outer_outer_func(int); +drop function outer_func(int); +drop function inner_func(int); +-- access to call stack from exception +create function inner_func(int) +returns int as $$ +declare + _context text; + sx int := 5; +begin + begin + perform sx / 0; + exception + when division_by_zero then + get diagnostics _context = pg_context; + raise notice '***%***', _context; + end; + + -- lets do it again, just for fun.. + get diagnostics _context = pg_context; + raise notice '***%***', _context; + raise notice 'lets make sure we didnt break anything'; + return 2 * $1; +end; +$$ language plpgsql; +create or replace function outer_func(int) +returns int as $$ +declare + myresult int; +begin + raise notice 'calling down into inner_func()'; + myresult := inner_func($1); + raise notice 'inner_func() done'; + return myresult; +end; +$$ language plpgsql; +create or replace function outer_outer_func(int) +returns int as $$ +declare + myresult int; +begin + raise notice 'calling down into outer_func()'; + myresult := outer_func($1); + raise notice 'outer_func() done'; + return myresult; +end; +$$ language plpgsql; +select outer_outer_func(10); +NOTICE: calling down into outer_func() +NOTICE: calling down into inner_func() +NOTICE: ***PL/pgSQL function inner_func(integer) line 10 at GET DIAGNOSTICS +PL/pgSQL function outer_func(integer) line 6 at assignment +PL/pgSQL function outer_outer_func(integer) line 6 at assignment*** +NOTICE: ***PL/pgSQL function inner_func(integer) line 15 at GET DIAGNOSTICS +PL/pgSQL function outer_func(integer) line 6 at assignment +PL/pgSQL function outer_outer_func(integer) line 6 at assignment*** +NOTICE: lets make sure we didnt break anything +NOTICE: inner_func() done +NOTICE: outer_func() done + outer_outer_func +------------------ + 20 +(1 row) + +-- repeated call should work +select outer_outer_func(20); +NOTICE: calling down into outer_func() +NOTICE: calling down into inner_func() +NOTICE: ***PL/pgSQL function inner_func(integer) line 10 at GET DIAGNOSTICS +PL/pgSQL function outer_func(integer) line 6 at assignment +PL/pgSQL function outer_outer_func(integer) line 6 at assignment*** +NOTICE: ***PL/pgSQL function inner_func(integer) line 15 at GET DIAGNOSTICS +PL/pgSQL function outer_func(integer) line 6 at assignment +PL/pgSQL function outer_outer_func(integer) line 6 at assignment*** +NOTICE: lets make sure we didnt break anything +NOTICE: inner_func() done +NOTICE: outer_func() done + outer_outer_func +------------------ + 40 +(1 row) + +drop function outer_outer_func(int); +drop function outer_func(int); +drop function inner_func(int); +-- Test pg_routine_oid +create function current_function(text) +returns regprocedure as $$ +declare + fn_oid regprocedure; +begin + get diagnostics fn_oid = pg_routine_oid; + return fn_oid; +end; +$$ language plpgsql; +select current_function('foo'); + current_function +------------------------ + current_function(text) +(1 row) + +drop function current_function(text); +-- shouldn't fail in DO, even though there's no useful data +do $$ +declare + fn_oid oid; +begin + get diagnostics fn_oid = pg_routine_oid; + raise notice 'pg_routine_oid = %', fn_oid; +end; +$$; +NOTICE: pg_routine_oid = 0 +-- +-- Test ASSERT +-- +do $$ +begin + assert 1=1; -- should succeed +end; +$$; +do $$ +begin + assert 1=0; -- should fail +end; +$$; +ERROR: assertion failed +CONTEXT: PL/pgSQL function inline_code_block line 3 at ASSERT +do $$ +begin + assert NULL; -- should fail +end; +$$; +ERROR: assertion failed +CONTEXT: PL/pgSQL function inline_code_block line 3 at ASSERT +-- check controlling GUC +set plpgsql.check_asserts = off; +do $$ +begin + assert 1=0; -- won't be tested +end; +$$; +reset plpgsql.check_asserts; +-- test custom message +do $$ +declare var text := 'some value'; +begin + assert 1=0, format('assertion failed, var = "%s"', var); +end; +$$; +ERROR: assertion failed, var = "some value" +CONTEXT: PL/pgSQL function inline_code_block line 4 at ASSERT +-- ensure assertions are not trapped by 'others' +do $$ +begin + assert 1=0, 'unhandled assertion'; +exception when others then + null; -- do nothing +end; +$$; +ERROR: unhandled assertion +CONTEXT: PL/pgSQL function inline_code_block line 3 at ASSERT +-- Test use of plpgsql in a domain check constraint (cf. bug #14414) +create function plpgsql_domain_check(val int) returns boolean as $$ +begin return val > 0; end +$$ language plpgsql immutable; +create domain plpgsql_domain as integer check(plpgsql_domain_check(value)); +do $$ +declare v_test plpgsql_domain; +begin + v_test := 1; +end; +$$; +do $$ +declare v_test plpgsql_domain := 1; +begin + v_test := 0; -- fail +end; +$$; +ERROR: value for domain plpgsql_domain violates check constraint "plpgsql_domain_check" +CONTEXT: PL/pgSQL function inline_code_block line 4 at assignment +-- Test handling of expanded array passed to a domain constraint (bug #14472) +create function plpgsql_arr_domain_check(val int[]) returns boolean as $$ +begin return val[1] > 0; end +$$ language plpgsql immutable; +create domain plpgsql_arr_domain as int[] check(plpgsql_arr_domain_check(value)); +do $$ +declare v_test plpgsql_arr_domain; +begin + v_test := array[1]; + v_test := v_test || 2; +end; +$$; +do $$ +declare v_test plpgsql_arr_domain := array[1]; +begin + v_test := 0 || v_test; -- fail +end; +$$; +ERROR: value for domain plpgsql_arr_domain violates check constraint "plpgsql_arr_domain_check" +CONTEXT: PL/pgSQL function inline_code_block line 4 at assignment +-- +-- test usage of transition tables in AFTER triggers +-- +CREATE TABLE transition_table_base (id int PRIMARY KEY, val text); +CREATE FUNCTION transition_table_base_ins_func() + RETURNS trigger + LANGUAGE plpgsql +AS $$ +DECLARE + t text; + l text; +BEGIN + t = ''; + FOR l IN EXECUTE + $q$ + EXPLAIN (TIMING off, COSTS off, VERBOSE on) + SELECT * FROM newtable + $q$ LOOP + t = t || l || E'\n'; + END LOOP; + + RAISE INFO '%', t; + RETURN new; +END; +$$; +CREATE TRIGGER transition_table_base_ins_trig + AFTER INSERT ON transition_table_base + REFERENCING OLD TABLE AS oldtable NEW TABLE AS newtable + FOR EACH STATEMENT + EXECUTE PROCEDURE transition_table_base_ins_func(); +ERROR: OLD TABLE can only be specified for a DELETE or UPDATE trigger +CREATE TRIGGER transition_table_base_ins_trig + AFTER INSERT ON transition_table_base + REFERENCING NEW TABLE AS newtable + FOR EACH STATEMENT + EXECUTE PROCEDURE transition_table_base_ins_func(); +INSERT INTO transition_table_base VALUES (1, 'One'), (2, 'Two'); +INFO: Named Tuplestore Scan + Output: id, val + +INSERT INTO transition_table_base VALUES (3, 'Three'), (4, 'Four'); +INFO: Named Tuplestore Scan + Output: id, val + +CREATE OR REPLACE FUNCTION transition_table_base_upd_func() + RETURNS trigger + LANGUAGE plpgsql +AS $$ +DECLARE + t text; + l text; +BEGIN + t = ''; + FOR l IN EXECUTE + $q$ + EXPLAIN (TIMING off, COSTS off, VERBOSE on) + SELECT * FROM oldtable ot FULL JOIN newtable nt USING (id) + $q$ LOOP + t = t || l || E'\n'; + END LOOP; + + RAISE INFO '%', t; + RETURN new; +END; +$$; +CREATE TRIGGER transition_table_base_upd_trig + AFTER UPDATE ON transition_table_base + REFERENCING OLD TABLE AS oldtable NEW TABLE AS newtable + FOR EACH STATEMENT + EXECUTE PROCEDURE transition_table_base_upd_func(); +UPDATE transition_table_base + SET val = '*' || val || '*' + WHERE id BETWEEN 2 AND 3; +INFO: Hash Full Join + Output: COALESCE(ot.id, nt.id), ot.val, nt.val + Hash Cond: (ot.id = nt.id) + -> Named Tuplestore Scan + Output: ot.id, ot.val + -> Hash + Output: nt.id, nt.val + -> Named Tuplestore Scan + Output: nt.id, nt.val + +CREATE TABLE transition_table_level1 +( + level1_no serial NOT NULL , + level1_node_name varchar(255), + PRIMARY KEY (level1_no) +) WITHOUT OIDS; +CREATE TABLE transition_table_level2 +( + level2_no serial NOT NULL , + parent_no int NOT NULL, + level1_node_name varchar(255), + PRIMARY KEY (level2_no) +) WITHOUT OIDS; +CREATE TABLE transition_table_status +( + level int NOT NULL, + node_no int NOT NULL, + status int, + PRIMARY KEY (level, node_no) +) WITHOUT OIDS; +CREATE FUNCTION transition_table_level1_ri_parent_del_func() + RETURNS TRIGGER + LANGUAGE plpgsql +AS $$ + DECLARE n bigint; + BEGIN + PERFORM FROM p JOIN transition_table_level2 c ON c.parent_no = p.level1_no; + IF FOUND THEN + RAISE EXCEPTION 'RI error'; + END IF; + RETURN NULL; + END; +$$; +CREATE TRIGGER transition_table_level1_ri_parent_del_trigger + AFTER DELETE ON transition_table_level1 + REFERENCING OLD TABLE AS p + FOR EACH STATEMENT EXECUTE PROCEDURE + transition_table_level1_ri_parent_del_func(); +CREATE FUNCTION transition_table_level1_ri_parent_upd_func() + RETURNS TRIGGER + LANGUAGE plpgsql +AS $$ + DECLARE + x int; + BEGIN + WITH p AS (SELECT level1_no, sum(delta) cnt + FROM (SELECT level1_no, 1 AS delta FROM i + UNION ALL + SELECT level1_no, -1 AS delta FROM d) w + GROUP BY level1_no + HAVING sum(delta) < 0) + SELECT level1_no + FROM p JOIN transition_table_level2 c ON c.parent_no = p.level1_no + INTO x; + IF FOUND THEN + RAISE EXCEPTION 'RI error'; + END IF; + RETURN NULL; + END; +$$; +CREATE TRIGGER transition_table_level1_ri_parent_upd_trigger + AFTER UPDATE ON transition_table_level1 + REFERENCING OLD TABLE AS d NEW TABLE AS i + FOR EACH STATEMENT EXECUTE PROCEDURE + transition_table_level1_ri_parent_upd_func(); +CREATE FUNCTION transition_table_level2_ri_child_insupd_func() + RETURNS TRIGGER + LANGUAGE plpgsql +AS $$ + BEGIN + PERFORM FROM i + LEFT JOIN transition_table_level1 p + ON p.level1_no IS NOT NULL AND p.level1_no = i.parent_no + WHERE p.level1_no IS NULL; + IF FOUND THEN + RAISE EXCEPTION 'RI error'; + END IF; + RETURN NULL; + END; +$$; +CREATE TRIGGER transition_table_level2_ri_child_ins_trigger + AFTER INSERT ON transition_table_level2 + REFERENCING NEW TABLE AS i + FOR EACH STATEMENT EXECUTE PROCEDURE + transition_table_level2_ri_child_insupd_func(); +CREATE TRIGGER transition_table_level2_ri_child_upd_trigger + AFTER UPDATE ON transition_table_level2 + REFERENCING NEW TABLE AS i + FOR EACH STATEMENT EXECUTE PROCEDURE + transition_table_level2_ri_child_insupd_func(); +-- create initial test data +INSERT INTO transition_table_level1 (level1_no) + SELECT generate_series(1,200); +ANALYZE transition_table_level1; +INSERT INTO transition_table_level2 (level2_no, parent_no) + SELECT level2_no, level2_no / 50 + 1 AS parent_no + FROM generate_series(1,9999) level2_no; +ANALYZE transition_table_level2; +INSERT INTO transition_table_status (level, node_no, status) + SELECT 1, level1_no, 0 FROM transition_table_level1; +INSERT INTO transition_table_status (level, node_no, status) + SELECT 2, level2_no, 0 FROM transition_table_level2; +ANALYZE transition_table_status; +INSERT INTO transition_table_level1(level1_no) + SELECT generate_series(201,1000); +ANALYZE transition_table_level1; +-- behave reasonably if someone tries to modify a transition table +CREATE FUNCTION transition_table_level2_bad_usage_func() + RETURNS TRIGGER + LANGUAGE plpgsql +AS $$ + BEGIN + INSERT INTO dx VALUES (1000000, 1000000, 'x'); + RETURN NULL; + END; +$$; +CREATE TRIGGER transition_table_level2_bad_usage_trigger + AFTER DELETE ON transition_table_level2 + REFERENCING OLD TABLE AS dx + FOR EACH STATEMENT EXECUTE PROCEDURE + transition_table_level2_bad_usage_func(); +DELETE FROM transition_table_level2 + WHERE level2_no BETWEEN 301 AND 305; +ERROR: relation "dx" cannot be the target of a modifying statement +CONTEXT: SQL statement "INSERT INTO dx VALUES (1000000, 1000000, 'x')" +PL/pgSQL function transition_table_level2_bad_usage_func() line 3 at SQL statement +DROP TRIGGER transition_table_level2_bad_usage_trigger + ON transition_table_level2; +-- attempt modifications which would break RI (should all fail) +DELETE FROM transition_table_level1 + WHERE level1_no = 25; +ERROR: RI error +CONTEXT: PL/pgSQL function transition_table_level1_ri_parent_del_func() line 6 at RAISE +UPDATE transition_table_level1 SET level1_no = -1 + WHERE level1_no = 30; +ERROR: RI error +CONTEXT: PL/pgSQL function transition_table_level1_ri_parent_upd_func() line 15 at RAISE +INSERT INTO transition_table_level2 (level2_no, parent_no) + VALUES (10000, 10000); +ERROR: RI error +CONTEXT: PL/pgSQL function transition_table_level2_ri_child_insupd_func() line 8 at RAISE +UPDATE transition_table_level2 SET parent_no = 2000 + WHERE level2_no = 40; +ERROR: RI error +CONTEXT: PL/pgSQL function transition_table_level2_ri_child_insupd_func() line 8 at RAISE +-- attempt modifications which would not break RI (should all succeed) +DELETE FROM transition_table_level1 + WHERE level1_no BETWEEN 201 AND 1000; +DELETE FROM transition_table_level1 + WHERE level1_no BETWEEN 100000000 AND 100000010; +SELECT count(*) FROM transition_table_level1; + count +------- + 200 +(1 row) + +DELETE FROM transition_table_level2 + WHERE level2_no BETWEEN 211 AND 220; +SELECT count(*) FROM transition_table_level2; + count +------- + 9989 +(1 row) + +CREATE TABLE alter_table_under_transition_tables +( + id int PRIMARY KEY, + name text +); +CREATE FUNCTION alter_table_under_transition_tables_upd_func() + RETURNS TRIGGER + LANGUAGE plpgsql +AS $$ +BEGIN + RAISE WARNING 'old table = %, new table = %', + (SELECT string_agg(id || '=' || name, ',') FROM d), + (SELECT string_agg(id || '=' || name, ',') FROM i); + RAISE NOTICE 'one = %', (SELECT 1 FROM alter_table_under_transition_tables LIMIT 1); + RETURN NULL; +END; +$$; +-- should fail, TRUNCATE is not compatible with transition tables +CREATE TRIGGER alter_table_under_transition_tables_upd_trigger + AFTER TRUNCATE OR UPDATE ON alter_table_under_transition_tables + REFERENCING OLD TABLE AS d NEW TABLE AS i + FOR EACH STATEMENT EXECUTE PROCEDURE + alter_table_under_transition_tables_upd_func(); +ERROR: TRUNCATE triggers with transition tables are not supported +-- should work +CREATE TRIGGER alter_table_under_transition_tables_upd_trigger + AFTER UPDATE ON alter_table_under_transition_tables + REFERENCING OLD TABLE AS d NEW TABLE AS i + FOR EACH STATEMENT EXECUTE PROCEDURE + alter_table_under_transition_tables_upd_func(); +INSERT INTO alter_table_under_transition_tables + VALUES (1, '1'), (2, '2'), (3, '3'); +UPDATE alter_table_under_transition_tables + SET name = name || name; +WARNING: old table = 1=1,2=2,3=3, new table = 1=11,2=22,3=33 +NOTICE: one = 1 +-- now change 'name' to an integer to see what happens... +ALTER TABLE alter_table_under_transition_tables + ALTER COLUMN name TYPE int USING name::integer; +UPDATE alter_table_under_transition_tables + SET name = (name::text || name::text)::integer; +WARNING: old table = 1=11,2=22,3=33, new table = 1=1111,2=2222,3=3333 +NOTICE: one = 1 +-- now drop column 'name' +ALTER TABLE alter_table_under_transition_tables + DROP column name; +UPDATE alter_table_under_transition_tables + SET id = id; +ERROR: column "name" does not exist +LINE 1: (SELECT string_agg(id || '=' || name, ',') FROM d) + ^ +QUERY: (SELECT string_agg(id || '=' || name, ',') FROM d) +CONTEXT: PL/pgSQL function alter_table_under_transition_tables_upd_func() line 3 at RAISE +-- +-- Test multiple reference to a transition table +-- +CREATE TABLE multi_test (i int); +INSERT INTO multi_test VALUES (1); +CREATE OR REPLACE FUNCTION multi_test_trig() RETURNS trigger +LANGUAGE plpgsql AS $$ +BEGIN + RAISE NOTICE 'count = %', (SELECT COUNT(*) FROM new_test); + RAISE NOTICE 'count union = %', + (SELECT COUNT(*) + FROM (SELECT * FROM new_test UNION ALL SELECT * FROM new_test) ss); + RETURN NULL; +END$$; +CREATE TRIGGER my_trigger AFTER UPDATE ON multi_test + REFERENCING NEW TABLE AS new_test OLD TABLE as old_test + FOR EACH STATEMENT EXECUTE PROCEDURE multi_test_trig(); +UPDATE multi_test SET i = i; +NOTICE: count = 1 +NOTICE: count union = 2 +DROP TABLE multi_test; +DROP FUNCTION multi_test_trig(); +-- +-- Check type parsing and record fetching from partitioned tables +-- +CREATE TABLE partitioned_table (a int, b text) PARTITION BY LIST (a); +CREATE TABLE pt_part1 PARTITION OF partitioned_table FOR VALUES IN (1); +CREATE TABLE pt_part2 PARTITION OF partitioned_table FOR VALUES IN (2); +INSERT INTO partitioned_table VALUES (1, 'Row 1'); +INSERT INTO partitioned_table VALUES (2, 'Row 2'); +CREATE OR REPLACE FUNCTION get_from_partitioned_table(partitioned_table.a%type) +RETURNS partitioned_table AS $$ +DECLARE + a_val partitioned_table.a%TYPE; + result partitioned_table%ROWTYPE; +BEGIN + a_val := $1; + SELECT * INTO result FROM partitioned_table WHERE a = a_val; + RETURN result; +END; $$ LANGUAGE plpgsql; +NOTICE: type reference partitioned_table.a%TYPE converted to integer +SELECT * FROM get_from_partitioned_table(1) AS t; + a | b +---+------- + 1 | Row 1 +(1 row) + +CREATE OR REPLACE FUNCTION list_partitioned_table() +RETURNS SETOF partitioned_table.a%TYPE AS $$ +DECLARE + row partitioned_table%ROWTYPE; + a_val partitioned_table.a%TYPE; +BEGIN + FOR row IN SELECT * FROM partitioned_table ORDER BY a LOOP + a_val := row.a; + RETURN NEXT a_val; + END LOOP; + RETURN; +END; $$ LANGUAGE plpgsql; +NOTICE: type reference partitioned_table.a%TYPE converted to integer +SELECT * FROM list_partitioned_table() AS t; + t +--- + 1 + 2 +(2 rows) + +-- +-- Check argument name is used instead of $n in error message +-- +CREATE FUNCTION fx(x WSlot) RETURNS void AS $$ +BEGIN + GET DIAGNOSTICS x = ROW_COUNT; + RETURN; +END; $$ LANGUAGE plpgsql; +ERROR: "x" is not a scalar variable +LINE 3: GET DIAGNOSTICS x = ROW_COUNT; + ^ diff --git a/src/test/regress/expected/point.out b/src/test/regress/expected/point.out new file mode 100644 index 0000000..ba508c3 --- /dev/null +++ b/src/test/regress/expected/point.out @@ -0,0 +1,478 @@ +-- +-- POINT +-- +-- avoid bit-exact output here because operations may not be bit-exact. +SET extra_float_digits = 0; +-- point_tbl was already created and filled in test_setup.sql. +-- Here we just try to insert bad values. +INSERT INTO POINT_TBL(f1) VALUES ('asdfasdf'); +ERROR: invalid input syntax for type point: "asdfasdf" +LINE 1: INSERT INTO POINT_TBL(f1) VALUES ('asdfasdf'); + ^ +INSERT INTO POINT_TBL(f1) VALUES ('(10.0 10.0)'); +ERROR: invalid input syntax for type point: "(10.0 10.0)" +LINE 1: INSERT INTO POINT_TBL(f1) VALUES ('(10.0 10.0)'); + ^ +INSERT INTO POINT_TBL(f1) VALUES ('(10.0, 10.0) x'); +ERROR: invalid input syntax for type point: "(10.0, 10.0) x" +LINE 1: INSERT INTO POINT_TBL(f1) VALUES ('(10.0, 10.0) x'); + ^ +INSERT INTO POINT_TBL(f1) VALUES ('(10.0,10.0'); +ERROR: invalid input syntax for type point: "(10.0,10.0" +LINE 1: INSERT INTO POINT_TBL(f1) VALUES ('(10.0,10.0'); + ^ +INSERT INTO POINT_TBL(f1) VALUES ('(10.0, 1e+500)'); -- Out of range +ERROR: "1e+500" is out of range for type double precision +LINE 1: INSERT INTO POINT_TBL(f1) VALUES ('(10.0, 1e+500)'); + ^ +SELECT * FROM POINT_TBL; + f1 +------------------- + (0,0) + (-10,0) + (-3,4) + (5.1,34.5) + (-5,-12) + (1e-300,-1e-300) + (1e+300,Infinity) + (Infinity,1e+300) + (NaN,NaN) + (10,10) +(10 rows) + +-- left of +SELECT p.* FROM POINT_TBL p WHERE p.f1 << '(0.0, 0.0)'; + f1 +---------- + (-10,0) + (-3,4) + (-5,-12) +(3 rows) + +-- right of +SELECT p.* FROM POINT_TBL p WHERE '(0.0,0.0)' >> p.f1; + f1 +---------- + (-10,0) + (-3,4) + (-5,-12) +(3 rows) + +-- above +SELECT p.* FROM POINT_TBL p WHERE '(0.0,0.0)' |>> p.f1; + f1 +---------- + (-5,-12) +(1 row) + +-- below +SELECT p.* FROM POINT_TBL p WHERE p.f1 <<| '(0.0, 0.0)'; + f1 +---------- + (-5,-12) +(1 row) + +-- equal +SELECT p.* FROM POINT_TBL p WHERE p.f1 ~= '(5.1, 34.5)'; + f1 +------------ + (5.1,34.5) +(1 row) + +-- point in box +SELECT p.* FROM POINT_TBL p + WHERE p.f1 <@ box '(0,0,100,100)'; + f1 +------------ + (0,0) + (5.1,34.5) + (10,10) +(3 rows) + +SELECT p.* FROM POINT_TBL p + WHERE box '(0,0,100,100)' @> p.f1; + f1 +------------ + (0,0) + (5.1,34.5) + (10,10) +(3 rows) + +SELECT p.* FROM POINT_TBL p + WHERE not p.f1 <@ box '(0,0,100,100)'; + f1 +------------------- + (-10,0) + (-3,4) + (-5,-12) + (1e-300,-1e-300) + (1e+300,Infinity) + (Infinity,1e+300) + (NaN,NaN) +(7 rows) + +SELECT p.* FROM POINT_TBL p + WHERE p.f1 <@ path '[(0,0),(-10,0),(-10,10)]'; + f1 +------------------ + (0,0) + (-10,0) + (1e-300,-1e-300) +(3 rows) + +SELECT p.* FROM POINT_TBL p + WHERE not box '(0,0,100,100)' @> p.f1; + f1 +------------------- + (-10,0) + (-3,4) + (-5,-12) + (1e-300,-1e-300) + (1e+300,Infinity) + (Infinity,1e+300) + (NaN,NaN) +(7 rows) + +SELECT p.f1, p.f1 <-> point '(0,0)' AS dist + FROM POINT_TBL p + ORDER BY dist; + f1 | dist +-------------------+---------------------- + (0,0) | 0 + (1e-300,-1e-300) | 1.4142135623731e-300 + (-3,4) | 5 + (-10,0) | 10 + (-5,-12) | 13 + (10,10) | 14.142135623731 + (5.1,34.5) | 34.8749193547455 + (1e+300,Infinity) | Infinity + (Infinity,1e+300) | Infinity + (NaN,NaN) | NaN +(10 rows) + +SELECT p1.f1 AS point1, p2.f1 AS point2, p1.f1 <-> p2.f1 AS dist + FROM POINT_TBL p1, POINT_TBL p2 + ORDER BY dist, p1.f1[0], p2.f1[0]; + point1 | point2 | dist +-------------------+-------------------+---------------------- + (-10,0) | (-10,0) | 0 + (-5,-12) | (-5,-12) | 0 + (-3,4) | (-3,4) | 0 + (0,0) | (0,0) | 0 + (1e-300,-1e-300) | (1e-300,-1e-300) | 0 + (5.1,34.5) | (5.1,34.5) | 0 + (10,10) | (10,10) | 0 + (0,0) | (1e-300,-1e-300) | 1.4142135623731e-300 + (1e-300,-1e-300) | (0,0) | 1.4142135623731e-300 + (-3,4) | (0,0) | 5 + (-3,4) | (1e-300,-1e-300) | 5 + (0,0) | (-3,4) | 5 + (1e-300,-1e-300) | (-3,4) | 5 + (-10,0) | (-3,4) | 8.06225774829855 + (-3,4) | (-10,0) | 8.06225774829855 + (-10,0) | (0,0) | 10 + (-10,0) | (1e-300,-1e-300) | 10 + (0,0) | (-10,0) | 10 + (1e-300,-1e-300) | (-10,0) | 10 + (-10,0) | (-5,-12) | 13 + (-5,-12) | (-10,0) | 13 + (-5,-12) | (0,0) | 13 + (-5,-12) | (1e-300,-1e-300) | 13 + (0,0) | (-5,-12) | 13 + (1e-300,-1e-300) | (-5,-12) | 13 + (0,0) | (10,10) | 14.142135623731 + (1e-300,-1e-300) | (10,10) | 14.142135623731 + (10,10) | (0,0) | 14.142135623731 + (10,10) | (1e-300,-1e-300) | 14.142135623731 + (-3,4) | (10,10) | 14.3178210632764 + (10,10) | (-3,4) | 14.3178210632764 + (-5,-12) | (-3,4) | 16.1245154965971 + (-3,4) | (-5,-12) | 16.1245154965971 + (-10,0) | (10,10) | 22.3606797749979 + (10,10) | (-10,0) | 22.3606797749979 + (5.1,34.5) | (10,10) | 24.9851956166046 + (10,10) | (5.1,34.5) | 24.9851956166046 + (-5,-12) | (10,10) | 26.6270539113887 + (10,10) | (-5,-12) | 26.6270539113887 + (-3,4) | (5.1,34.5) | 31.5572495632937 + (5.1,34.5) | (-3,4) | 31.5572495632937 + (0,0) | (5.1,34.5) | 34.8749193547455 + (1e-300,-1e-300) | (5.1,34.5) | 34.8749193547455 + (5.1,34.5) | (0,0) | 34.8749193547455 + (5.1,34.5) | (1e-300,-1e-300) | 34.8749193547455 + (-10,0) | (5.1,34.5) | 37.6597928831267 + (5.1,34.5) | (-10,0) | 37.6597928831267 + (-5,-12) | (5.1,34.5) | 47.5842410888311 + (5.1,34.5) | (-5,-12) | 47.5842410888311 + (-10,0) | (1e+300,Infinity) | Infinity + (-10,0) | (Infinity,1e+300) | Infinity + (-5,-12) | (1e+300,Infinity) | Infinity + (-5,-12) | (Infinity,1e+300) | Infinity + (-3,4) | (1e+300,Infinity) | Infinity + (-3,4) | (Infinity,1e+300) | Infinity + (0,0) | (1e+300,Infinity) | Infinity + (0,0) | (Infinity,1e+300) | Infinity + (1e-300,-1e-300) | (1e+300,Infinity) | Infinity + (1e-300,-1e-300) | (Infinity,1e+300) | Infinity + (5.1,34.5) | (1e+300,Infinity) | Infinity + (5.1,34.5) | (Infinity,1e+300) | Infinity + (10,10) | (1e+300,Infinity) | Infinity + (10,10) | (Infinity,1e+300) | Infinity + (1e+300,Infinity) | (-10,0) | Infinity + (1e+300,Infinity) | (-5,-12) | Infinity + (1e+300,Infinity) | (-3,4) | Infinity + (1e+300,Infinity) | (0,0) | Infinity + (1e+300,Infinity) | (1e-300,-1e-300) | Infinity + (1e+300,Infinity) | (5.1,34.5) | Infinity + (1e+300,Infinity) | (10,10) | Infinity + (1e+300,Infinity) | (Infinity,1e+300) | Infinity + (Infinity,1e+300) | (-10,0) | Infinity + (Infinity,1e+300) | (-5,-12) | Infinity + (Infinity,1e+300) | (-3,4) | Infinity + (Infinity,1e+300) | (0,0) | Infinity + (Infinity,1e+300) | (1e-300,-1e-300) | Infinity + (Infinity,1e+300) | (5.1,34.5) | Infinity + (Infinity,1e+300) | (10,10) | Infinity + (Infinity,1e+300) | (1e+300,Infinity) | Infinity + (-10,0) | (NaN,NaN) | NaN + (-5,-12) | (NaN,NaN) | NaN + (-3,4) | (NaN,NaN) | NaN + (0,0) | (NaN,NaN) | NaN + (1e-300,-1e-300) | (NaN,NaN) | NaN + (5.1,34.5) | (NaN,NaN) | NaN + (10,10) | (NaN,NaN) | NaN + (1e+300,Infinity) | (1e+300,Infinity) | NaN + (1e+300,Infinity) | (NaN,NaN) | NaN + (Infinity,1e+300) | (Infinity,1e+300) | NaN + (Infinity,1e+300) | (NaN,NaN) | NaN + (NaN,NaN) | (-10,0) | NaN + (NaN,NaN) | (-5,-12) | NaN + (NaN,NaN) | (-3,4) | NaN + (NaN,NaN) | (0,0) | NaN + (NaN,NaN) | (1e-300,-1e-300) | NaN + (NaN,NaN) | (5.1,34.5) | NaN + (NaN,NaN) | (10,10) | NaN + (NaN,NaN) | (1e+300,Infinity) | NaN + (NaN,NaN) | (Infinity,1e+300) | NaN + (NaN,NaN) | (NaN,NaN) | NaN +(100 rows) + +SELECT p1.f1 AS point1, p2.f1 AS point2 + FROM POINT_TBL p1, POINT_TBL p2 + WHERE (p1.f1 <-> p2.f1) > 3; + point1 | point2 +-------------------+------------------- + (0,0) | (-10,0) + (0,0) | (-3,4) + (0,0) | (5.1,34.5) + (0,0) | (-5,-12) + (0,0) | (1e+300,Infinity) + (0,0) | (Infinity,1e+300) + (0,0) | (NaN,NaN) + (0,0) | (10,10) + (-10,0) | (0,0) + (-10,0) | (-3,4) + (-10,0) | (5.1,34.5) + (-10,0) | (-5,-12) + (-10,0) | (1e-300,-1e-300) + (-10,0) | (1e+300,Infinity) + (-10,0) | (Infinity,1e+300) + (-10,0) | (NaN,NaN) + (-10,0) | (10,10) + (-3,4) | (0,0) + (-3,4) | (-10,0) + (-3,4) | (5.1,34.5) + (-3,4) | (-5,-12) + (-3,4) | (1e-300,-1e-300) + (-3,4) | (1e+300,Infinity) + (-3,4) | (Infinity,1e+300) + (-3,4) | (NaN,NaN) + (-3,4) | (10,10) + (5.1,34.5) | (0,0) + (5.1,34.5) | (-10,0) + (5.1,34.5) | (-3,4) + (5.1,34.5) | (-5,-12) + (5.1,34.5) | (1e-300,-1e-300) + (5.1,34.5) | (1e+300,Infinity) + (5.1,34.5) | (Infinity,1e+300) + (5.1,34.5) | (NaN,NaN) + (5.1,34.5) | (10,10) + (-5,-12) | (0,0) + (-5,-12) | (-10,0) + (-5,-12) | (-3,4) + (-5,-12) | (5.1,34.5) + (-5,-12) | (1e-300,-1e-300) + (-5,-12) | (1e+300,Infinity) + (-5,-12) | (Infinity,1e+300) + (-5,-12) | (NaN,NaN) + (-5,-12) | (10,10) + (1e-300,-1e-300) | (-10,0) + (1e-300,-1e-300) | (-3,4) + (1e-300,-1e-300) | (5.1,34.5) + (1e-300,-1e-300) | (-5,-12) + (1e-300,-1e-300) | (1e+300,Infinity) + (1e-300,-1e-300) | (Infinity,1e+300) + (1e-300,-1e-300) | (NaN,NaN) + (1e-300,-1e-300) | (10,10) + (1e+300,Infinity) | (0,0) + (1e+300,Infinity) | (-10,0) + (1e+300,Infinity) | (-3,4) + (1e+300,Infinity) | (5.1,34.5) + (1e+300,Infinity) | (-5,-12) + (1e+300,Infinity) | (1e-300,-1e-300) + (1e+300,Infinity) | (1e+300,Infinity) + (1e+300,Infinity) | (Infinity,1e+300) + (1e+300,Infinity) | (NaN,NaN) + (1e+300,Infinity) | (10,10) + (Infinity,1e+300) | (0,0) + (Infinity,1e+300) | (-10,0) + (Infinity,1e+300) | (-3,4) + (Infinity,1e+300) | (5.1,34.5) + (Infinity,1e+300) | (-5,-12) + (Infinity,1e+300) | (1e-300,-1e-300) + (Infinity,1e+300) | (1e+300,Infinity) + (Infinity,1e+300) | (Infinity,1e+300) + (Infinity,1e+300) | (NaN,NaN) + (Infinity,1e+300) | (10,10) + (NaN,NaN) | (0,0) + (NaN,NaN) | (-10,0) + (NaN,NaN) | (-3,4) + (NaN,NaN) | (5.1,34.5) + (NaN,NaN) | (-5,-12) + (NaN,NaN) | (1e-300,-1e-300) + (NaN,NaN) | (1e+300,Infinity) + (NaN,NaN) | (Infinity,1e+300) + (NaN,NaN) | (NaN,NaN) + (NaN,NaN) | (10,10) + (10,10) | (0,0) + (10,10) | (-10,0) + (10,10) | (-3,4) + (10,10) | (5.1,34.5) + (10,10) | (-5,-12) + (10,10) | (1e-300,-1e-300) + (10,10) | (1e+300,Infinity) + (10,10) | (Infinity,1e+300) + (10,10) | (NaN,NaN) +(91 rows) + +-- put distance result into output to allow sorting with GEQ optimizer - tgl 97/05/10 +SELECT p1.f1 AS point1, p2.f1 AS point2, (p1.f1 <-> p2.f1) AS distance + FROM POINT_TBL p1, POINT_TBL p2 + WHERE (p1.f1 <-> p2.f1) > 3 and p1.f1 << p2.f1 + ORDER BY distance, p1.f1[0], p2.f1[0]; + point1 | point2 | distance +-------------------+-------------------+------------------ + (-3,4) | (0,0) | 5 + (-3,4) | (1e-300,-1e-300) | 5 + (-10,0) | (-3,4) | 8.06225774829855 + (-10,0) | (0,0) | 10 + (-10,0) | (1e-300,-1e-300) | 10 + (-10,0) | (-5,-12) | 13 + (-5,-12) | (0,0) | 13 + (-5,-12) | (1e-300,-1e-300) | 13 + (0,0) | (10,10) | 14.142135623731 + (1e-300,-1e-300) | (10,10) | 14.142135623731 + (-3,4) | (10,10) | 14.3178210632764 + (-5,-12) | (-3,4) | 16.1245154965971 + (-10,0) | (10,10) | 22.3606797749979 + (5.1,34.5) | (10,10) | 24.9851956166046 + (-5,-12) | (10,10) | 26.6270539113887 + (-3,4) | (5.1,34.5) | 31.5572495632937 + (0,0) | (5.1,34.5) | 34.8749193547455 + (1e-300,-1e-300) | (5.1,34.5) | 34.8749193547455 + (-10,0) | (5.1,34.5) | 37.6597928831267 + (-5,-12) | (5.1,34.5) | 47.5842410888311 + (-10,0) | (1e+300,Infinity) | Infinity + (-10,0) | (Infinity,1e+300) | Infinity + (-5,-12) | (1e+300,Infinity) | Infinity + (-5,-12) | (Infinity,1e+300) | Infinity + (-3,4) | (1e+300,Infinity) | Infinity + (-3,4) | (Infinity,1e+300) | Infinity + (0,0) | (1e+300,Infinity) | Infinity + (0,0) | (Infinity,1e+300) | Infinity + (1e-300,-1e-300) | (1e+300,Infinity) | Infinity + (1e-300,-1e-300) | (Infinity,1e+300) | Infinity + (5.1,34.5) | (1e+300,Infinity) | Infinity + (5.1,34.5) | (Infinity,1e+300) | Infinity + (10,10) | (1e+300,Infinity) | Infinity + (10,10) | (Infinity,1e+300) | Infinity + (1e+300,Infinity) | (Infinity,1e+300) | Infinity +(35 rows) + +-- put distance result into output to allow sorting with GEQ optimizer - tgl 97/05/10 +SELECT p1.f1 AS point1, p2.f1 AS point2, (p1.f1 <-> p2.f1) AS distance + FROM POINT_TBL p1, POINT_TBL p2 + WHERE (p1.f1 <-> p2.f1) > 3 and p1.f1 << p2.f1 and p1.f1 |>> p2.f1 + ORDER BY distance; + point1 | point2 | distance +-------------------+-------------------+------------------ + (-3,4) | (0,0) | 5 + (-3,4) | (1e-300,-1e-300) | 5 + (-10,0) | (-5,-12) | 13 + (5.1,34.5) | (10,10) | 24.9851956166046 + (1e+300,Infinity) | (Infinity,1e+300) | Infinity +(5 rows) + +-- Test that GiST indexes provide same behavior as sequential scan +CREATE TEMP TABLE point_gist_tbl(f1 point); +INSERT INTO point_gist_tbl SELECT '(0,0)' FROM generate_series(0,1000); +CREATE INDEX point_gist_tbl_index ON point_gist_tbl USING gist (f1); +INSERT INTO point_gist_tbl VALUES ('(0.0000009,0.0000009)'); +SET enable_seqscan TO true; +SET enable_indexscan TO false; +SET enable_bitmapscan TO false; +SELECT COUNT(*) FROM point_gist_tbl WHERE f1 ~= '(0.0000009,0.0000009)'::point; + count +------- + 1002 +(1 row) + +SELECT COUNT(*) FROM point_gist_tbl WHERE f1 <@ '(0.0000009,0.0000009),(0.0000009,0.0000009)'::box; + count +------- + 1 +(1 row) + +SELECT COUNT(*) FROM point_gist_tbl WHERE f1 ~= '(0.0000018,0.0000018)'::point; + count +------- + 1 +(1 row) + +SET enable_seqscan TO false; +SET enable_indexscan TO true; +SET enable_bitmapscan TO true; +SELECT COUNT(*) FROM point_gist_tbl WHERE f1 ~= '(0.0000009,0.0000009)'::point; + count +------- + 1002 +(1 row) + +SELECT COUNT(*) FROM point_gist_tbl WHERE f1 <@ '(0.0000009,0.0000009),(0.0000009,0.0000009)'::box; + count +------- + 1 +(1 row) + +SELECT COUNT(*) FROM point_gist_tbl WHERE f1 ~= '(0.0000018,0.0000018)'::point; + count +------- + 1 +(1 row) + +RESET enable_seqscan; +RESET enable_indexscan; +RESET enable_bitmapscan; +-- test non-error-throwing API for some core types +SELECT pg_input_is_valid('1,y', 'point'); + pg_input_is_valid +------------------- + f +(1 row) + +SELECT * FROM pg_input_error_info('1,y', 'point'); + message | detail | hint | sql_error_code +--------------------------------------------+--------+------+---------------- + invalid input syntax for type point: "1,y" | | | 22P02 +(1 row) + diff --git a/src/test/regress/expected/polygon.out b/src/test/regress/expected/polygon.out new file mode 100644 index 0000000..7a9778e --- /dev/null +++ b/src/test/regress/expected/polygon.out @@ -0,0 +1,333 @@ +-- +-- POLYGON +-- +-- polygon logic +-- +CREATE TABLE POLYGON_TBL(f1 polygon); +INSERT INTO POLYGON_TBL(f1) VALUES ('(2.0,0.0),(2.0,4.0),(0.0,0.0)'); +INSERT INTO POLYGON_TBL(f1) VALUES ('(3.0,1.0),(3.0,3.0),(1.0,0.0)'); +INSERT INTO POLYGON_TBL(f1) VALUES ('(1,2),(3,4),(5,6),(7,8)'); +INSERT INTO POLYGON_TBL(f1) VALUES ('(7,8),(5,6),(3,4),(1,2)'); -- Reverse +INSERT INTO POLYGON_TBL(f1) VALUES ('(1,2),(7,8),(5,6),(3,-4)'); +-- degenerate polygons +INSERT INTO POLYGON_TBL(f1) VALUES ('(0.0,0.0)'); +INSERT INTO POLYGON_TBL(f1) VALUES ('(0.0,1.0),(0.0,1.0)'); +-- bad polygon input strings +INSERT INTO POLYGON_TBL(f1) VALUES ('0.0'); +ERROR: invalid input syntax for type polygon: "0.0" +LINE 1: INSERT INTO POLYGON_TBL(f1) VALUES ('0.0'); + ^ +INSERT INTO POLYGON_TBL(f1) VALUES ('(0.0 0.0'); +ERROR: invalid input syntax for type polygon: "(0.0 0.0" +LINE 1: INSERT INTO POLYGON_TBL(f1) VALUES ('(0.0 0.0'); + ^ +INSERT INTO POLYGON_TBL(f1) VALUES ('(0,1,2)'); +ERROR: invalid input syntax for type polygon: "(0,1,2)" +LINE 1: INSERT INTO POLYGON_TBL(f1) VALUES ('(0,1,2)'); + ^ +INSERT INTO POLYGON_TBL(f1) VALUES ('(0,1,2,3'); +ERROR: invalid input syntax for type polygon: "(0,1,2,3" +LINE 1: INSERT INTO POLYGON_TBL(f1) VALUES ('(0,1,2,3'); + ^ +INSERT INTO POLYGON_TBL(f1) VALUES ('asdf'); +ERROR: invalid input syntax for type polygon: "asdf" +LINE 1: INSERT INTO POLYGON_TBL(f1) VALUES ('asdf'); + ^ +SELECT * FROM POLYGON_TBL; + f1 +---------------------------- + ((2,0),(2,4),(0,0)) + ((3,1),(3,3),(1,0)) + ((1,2),(3,4),(5,6),(7,8)) + ((7,8),(5,6),(3,4),(1,2)) + ((1,2),(7,8),(5,6),(3,-4)) + ((0,0)) + ((0,1),(0,1)) +(7 rows) + +-- +-- Test the SP-GiST index +-- +CREATE TABLE quad_poly_tbl (id int, p polygon); +INSERT INTO quad_poly_tbl + SELECT (x - 1) * 100 + y, polygon(circle(point(x * 10, y * 10), 1 + (x + y) % 10)) + FROM generate_series(1, 100) x, + generate_series(1, 100) y; +INSERT INTO quad_poly_tbl + SELECT i, polygon '((200, 300),(210, 310),(230, 290))' + FROM generate_series(10001, 11000) AS i; +INSERT INTO quad_poly_tbl + VALUES + (11001, NULL), + (11002, NULL), + (11003, NULL); +CREATE INDEX quad_poly_tbl_idx ON quad_poly_tbl USING spgist(p); +-- get reference results for ORDER BY distance from seq scan +SET enable_seqscan = ON; +SET enable_indexscan = OFF; +SET enable_bitmapscan = OFF; +CREATE TEMP TABLE quad_poly_tbl_ord_seq2 AS +SELECT rank() OVER (ORDER BY p <-> point '123,456') n, p <-> point '123,456' dist, id +FROM quad_poly_tbl WHERE p <@ polygon '((300,300),(400,600),(600,500),(700,200))'; +-- check results from index scan +SET enable_seqscan = OFF; +SET enable_indexscan = OFF; +SET enable_bitmapscan = ON; +EXPLAIN (COSTS OFF) +SELECT count(*) FROM quad_poly_tbl WHERE p << polygon '((300,300),(400,600),(600,500),(700,200))'; + QUERY PLAN +--------------------------------------------------------------------------------------- + Aggregate + -> Bitmap Heap Scan on quad_poly_tbl + Recheck Cond: (p << '((300,300),(400,600),(600,500),(700,200))'::polygon) + -> Bitmap Index Scan on quad_poly_tbl_idx + Index Cond: (p << '((300,300),(400,600),(600,500),(700,200))'::polygon) +(5 rows) + +SELECT count(*) FROM quad_poly_tbl WHERE p << polygon '((300,300),(400,600),(600,500),(700,200))'; + count +------- + 3890 +(1 row) + +EXPLAIN (COSTS OFF) +SELECT count(*) FROM quad_poly_tbl WHERE p &< polygon '((300,300),(400,600),(600,500),(700,200))'; + QUERY PLAN +--------------------------------------------------------------------------------------- + Aggregate + -> Bitmap Heap Scan on quad_poly_tbl + Recheck Cond: (p &< '((300,300),(400,600),(600,500),(700,200))'::polygon) + -> Bitmap Index Scan on quad_poly_tbl_idx + Index Cond: (p &< '((300,300),(400,600),(600,500),(700,200))'::polygon) +(5 rows) + +SELECT count(*) FROM quad_poly_tbl WHERE p &< polygon '((300,300),(400,600),(600,500),(700,200))'; + count +------- + 7900 +(1 row) + +EXPLAIN (COSTS OFF) +SELECT count(*) FROM quad_poly_tbl WHERE p && polygon '((300,300),(400,600),(600,500),(700,200))'; + QUERY PLAN +--------------------------------------------------------------------------------------- + Aggregate + -> Bitmap Heap Scan on quad_poly_tbl + Recheck Cond: (p && '((300,300),(400,600),(600,500),(700,200))'::polygon) + -> Bitmap Index Scan on quad_poly_tbl_idx + Index Cond: (p && '((300,300),(400,600),(600,500),(700,200))'::polygon) +(5 rows) + +SELECT count(*) FROM quad_poly_tbl WHERE p && polygon '((300,300),(400,600),(600,500),(700,200))'; + count +------- + 977 +(1 row) + +EXPLAIN (COSTS OFF) +SELECT count(*) FROM quad_poly_tbl WHERE p &> polygon '((300,300),(400,600),(600,500),(700,200))'; + QUERY PLAN +--------------------------------------------------------------------------------------- + Aggregate + -> Bitmap Heap Scan on quad_poly_tbl + Recheck Cond: (p &> '((300,300),(400,600),(600,500),(700,200))'::polygon) + -> Bitmap Index Scan on quad_poly_tbl_idx + Index Cond: (p &> '((300,300),(400,600),(600,500),(700,200))'::polygon) +(5 rows) + +SELECT count(*) FROM quad_poly_tbl WHERE p &> polygon '((300,300),(400,600),(600,500),(700,200))'; + count +------- + 7000 +(1 row) + +EXPLAIN (COSTS OFF) +SELECT count(*) FROM quad_poly_tbl WHERE p >> polygon '((300,300),(400,600),(600,500),(700,200))'; + QUERY PLAN +--------------------------------------------------------------------------------------- + Aggregate + -> Bitmap Heap Scan on quad_poly_tbl + Recheck Cond: (p >> '((300,300),(400,600),(600,500),(700,200))'::polygon) + -> Bitmap Index Scan on quad_poly_tbl_idx + Index Cond: (p >> '((300,300),(400,600),(600,500),(700,200))'::polygon) +(5 rows) + +SELECT count(*) FROM quad_poly_tbl WHERE p >> polygon '((300,300),(400,600),(600,500),(700,200))'; + count +------- + 2990 +(1 row) + +EXPLAIN (COSTS OFF) +SELECT count(*) FROM quad_poly_tbl WHERE p <<| polygon '((300,300),(400,600),(600,500),(700,200))'; + QUERY PLAN +---------------------------------------------------------------------------------------- + Aggregate + -> Bitmap Heap Scan on quad_poly_tbl + Recheck Cond: (p <<| '((300,300),(400,600),(600,500),(700,200))'::polygon) + -> Bitmap Index Scan on quad_poly_tbl_idx + Index Cond: (p <<| '((300,300),(400,600),(600,500),(700,200))'::polygon) +(5 rows) + +SELECT count(*) FROM quad_poly_tbl WHERE p <<| polygon '((300,300),(400,600),(600,500),(700,200))'; + count +------- + 1890 +(1 row) + +EXPLAIN (COSTS OFF) +SELECT count(*) FROM quad_poly_tbl WHERE p &<| polygon '((300,300),(400,600),(600,500),(700,200))'; + QUERY PLAN +---------------------------------------------------------------------------------------- + Aggregate + -> Bitmap Heap Scan on quad_poly_tbl + Recheck Cond: (p &<| '((300,300),(400,600),(600,500),(700,200))'::polygon) + -> Bitmap Index Scan on quad_poly_tbl_idx + Index Cond: (p &<| '((300,300),(400,600),(600,500),(700,200))'::polygon) +(5 rows) + +SELECT count(*) FROM quad_poly_tbl WHERE p &<| polygon '((300,300),(400,600),(600,500),(700,200))'; + count +------- + 6900 +(1 row) + +EXPLAIN (COSTS OFF) +SELECT count(*) FROM quad_poly_tbl WHERE p |&> polygon '((300,300),(400,600),(600,500),(700,200))'; + QUERY PLAN +---------------------------------------------------------------------------------------- + Aggregate + -> Bitmap Heap Scan on quad_poly_tbl + Recheck Cond: (p |&> '((300,300),(400,600),(600,500),(700,200))'::polygon) + -> Bitmap Index Scan on quad_poly_tbl_idx + Index Cond: (p |&> '((300,300),(400,600),(600,500),(700,200))'::polygon) +(5 rows) + +SELECT count(*) FROM quad_poly_tbl WHERE p |&> polygon '((300,300),(400,600),(600,500),(700,200))'; + count +------- + 9000 +(1 row) + +EXPLAIN (COSTS OFF) +SELECT count(*) FROM quad_poly_tbl WHERE p |>> polygon '((300,300),(400,600),(600,500),(700,200))'; + QUERY PLAN +---------------------------------------------------------------------------------------- + Aggregate + -> Bitmap Heap Scan on quad_poly_tbl + Recheck Cond: (p |>> '((300,300),(400,600),(600,500),(700,200))'::polygon) + -> Bitmap Index Scan on quad_poly_tbl_idx + Index Cond: (p |>> '((300,300),(400,600),(600,500),(700,200))'::polygon) +(5 rows) + +SELECT count(*) FROM quad_poly_tbl WHERE p |>> polygon '((300,300),(400,600),(600,500),(700,200))'; + count +------- + 3990 +(1 row) + +EXPLAIN (COSTS OFF) +SELECT count(*) FROM quad_poly_tbl WHERE p <@ polygon '((300,300),(400,600),(600,500),(700,200))'; + QUERY PLAN +--------------------------------------------------------------------------------------- + Aggregate + -> Bitmap Heap Scan on quad_poly_tbl + Recheck Cond: (p <@ '((300,300),(400,600),(600,500),(700,200))'::polygon) + -> Bitmap Index Scan on quad_poly_tbl_idx + Index Cond: (p <@ '((300,300),(400,600),(600,500),(700,200))'::polygon) +(5 rows) + +SELECT count(*) FROM quad_poly_tbl WHERE p <@ polygon '((300,300),(400,600),(600,500),(700,200))'; + count +------- + 831 +(1 row) + +EXPLAIN (COSTS OFF) +SELECT count(*) FROM quad_poly_tbl WHERE p @> polygon '((340,550),(343,552),(341,553))'; + QUERY PLAN +----------------------------------------------------------------------------- + Aggregate + -> Bitmap Heap Scan on quad_poly_tbl + Recheck Cond: (p @> '((340,550),(343,552),(341,553))'::polygon) + -> Bitmap Index Scan on quad_poly_tbl_idx + Index Cond: (p @> '((340,550),(343,552),(341,553))'::polygon) +(5 rows) + +SELECT count(*) FROM quad_poly_tbl WHERE p @> polygon '((340,550),(343,552),(341,553))'; + count +------- + 1 +(1 row) + +EXPLAIN (COSTS OFF) +SELECT count(*) FROM quad_poly_tbl WHERE p ~= polygon '((200, 300),(210, 310),(230, 290))'; + QUERY PLAN +----------------------------------------------------------------------------- + Aggregate + -> Bitmap Heap Scan on quad_poly_tbl + Recheck Cond: (p ~= '((200,300),(210,310),(230,290))'::polygon) + -> Bitmap Index Scan on quad_poly_tbl_idx + Index Cond: (p ~= '((200,300),(210,310),(230,290))'::polygon) +(5 rows) + +SELECT count(*) FROM quad_poly_tbl WHERE p ~= polygon '((200, 300),(210, 310),(230, 290))'; + count +------- + 1000 +(1 row) + +-- test ORDER BY distance +SET enable_indexscan = ON; +SET enable_bitmapscan = OFF; +EXPLAIN (COSTS OFF) +SELECT rank() OVER (ORDER BY p <-> point '123,456') n, p <-> point '123,456' dist, id +FROM quad_poly_tbl WHERE p <@ polygon '((300,300),(400,600),(600,500),(700,200))'; + QUERY PLAN +--------------------------------------------------------------------------------- + WindowAgg + -> Index Scan using quad_poly_tbl_idx on quad_poly_tbl + Index Cond: (p <@ '((300,300),(400,600),(600,500),(700,200))'::polygon) + Order By: (p <-> '(123,456)'::point) +(4 rows) + +CREATE TEMP TABLE quad_poly_tbl_ord_idx2 AS +SELECT rank() OVER (ORDER BY p <-> point '123,456') n, p <-> point '123,456' dist, id +FROM quad_poly_tbl WHERE p <@ polygon '((300,300),(400,600),(600,500),(700,200))'; +SELECT * +FROM quad_poly_tbl_ord_seq2 seq FULL JOIN quad_poly_tbl_ord_idx2 idx + ON seq.n = idx.n AND seq.id = idx.id AND + (seq.dist = idx.dist OR seq.dist IS NULL AND idx.dist IS NULL) +WHERE seq.id IS NULL OR idx.id IS NULL; + n | dist | id | n | dist | id +---+------+----+---+------+---- +(0 rows) + +RESET enable_seqscan; +RESET enable_indexscan; +RESET enable_bitmapscan; +-- test non-error-throwing API for some core types +SELECT pg_input_is_valid('(2.0,0.8,0.1)', 'polygon'); + pg_input_is_valid +------------------- + f +(1 row) + +SELECT * FROM pg_input_error_info('(2.0,0.8,0.1)', 'polygon'); + message | detail | hint | sql_error_code +--------------------------------------------------------+--------+------+---------------- + invalid input syntax for type polygon: "(2.0,0.8,0.1)" | | | 22P02 +(1 row) + +SELECT pg_input_is_valid('(2.0,xyz)', 'polygon'); + pg_input_is_valid +------------------- + f +(1 row) + +SELECT * FROM pg_input_error_info('(2.0,xyz)', 'polygon'); + message | detail | hint | sql_error_code +----------------------------------------------------+--------+------+---------------- + invalid input syntax for type polygon: "(2.0,xyz)" | | | 22P02 +(1 row) + diff --git a/src/test/regress/expected/polymorphism.out b/src/test/regress/expected/polymorphism.out new file mode 100644 index 0000000..bf08e40 --- /dev/null +++ b/src/test/regress/expected/polymorphism.out @@ -0,0 +1,2098 @@ +-- +-- Tests for polymorphic SQL functions and aggregates based on them. +-- Tests for other features related to function-calling have snuck in, too. +-- +create function polyf(x anyelement) returns anyelement as $$ + select x + 1 +$$ language sql; +select polyf(42) as int, polyf(4.5) as num; + int | num +-----+----- + 43 | 5.5 +(1 row) + +select polyf(point(3,4)); -- fail for lack of + operator +ERROR: operator does not exist: point + integer +LINE 2: select x + 1 + ^ +HINT: No operator matches the given name and argument types. You might need to add explicit type casts. +QUERY: + select x + 1 + +CONTEXT: SQL function "polyf" during inlining +drop function polyf(x anyelement); +create function polyf(x anyelement) returns anyarray as $$ + select array[x + 1, x + 2] +$$ language sql; +select polyf(42) as int, polyf(4.5) as num; + int | num +---------+----------- + {43,44} | {5.5,6.5} +(1 row) + +drop function polyf(x anyelement); +create function polyf(x anyarray) returns anyelement as $$ + select x[1] +$$ language sql; +select polyf(array[2,4]) as int, polyf(array[4.5, 7.7]) as num; + int | num +-----+----- + 2 | 4.5 +(1 row) + +select polyf(stavalues1) from pg_statistic; -- fail, can't infer element type +ERROR: cannot determine element type of "anyarray" argument +drop function polyf(x anyarray); +create function polyf(x anyarray) returns anyarray as $$ + select x +$$ language sql; +select polyf(array[2,4]) as int, polyf(array[4.5, 7.7]) as num; + int | num +-------+----------- + {2,4} | {4.5,7.7} +(1 row) + +select polyf(stavalues1) from pg_statistic; -- fail, can't infer element type +ERROR: return type anyarray is not supported for SQL functions +CONTEXT: SQL function "polyf" during inlining +drop function polyf(x anyarray); +-- fail, can't infer type: +create function polyf(x anyelement) returns anyrange as $$ + select array[x + 1, x + 2] +$$ language sql; +ERROR: cannot determine result data type +DETAIL: A result of type anyrange requires at least one input of type anyrange or anymultirange. +create function polyf(x anyrange) returns anyarray as $$ + select array[lower(x), upper(x)] +$$ language sql; +select polyf(int4range(42, 49)) as int, polyf(float8range(4.5, 7.8)) as num; + int | num +---------+----------- + {42,49} | {4.5,7.8} +(1 row) + +drop function polyf(x anyrange); +create function polyf(x anycompatible, y anycompatible) returns anycompatiblearray as $$ + select array[x, y] +$$ language sql; +select polyf(2, 4) as int, polyf(2, 4.5) as num; + int | num +-------+--------- + {2,4} | {2,4.5} +(1 row) + +drop function polyf(x anycompatible, y anycompatible); +create function polyf(x anycompatiblerange, y anycompatible, z anycompatible) returns anycompatiblearray as $$ + select array[lower(x), upper(x), y, z] +$$ language sql; +select polyf(int4range(42, 49), 11, 2::smallint) as int, polyf(float8range(4.5, 7.8), 7.8, 11::real) as num; + int | num +--------------+------------------ + {42,49,11,2} | {4.5,7.8,7.8,11} +(1 row) + +select polyf(int4range(42, 49), 11, 4.5) as fail; -- range type doesn't fit +ERROR: function polyf(int4range, integer, numeric) does not exist +LINE 1: select polyf(int4range(42, 49), 11, 4.5) as fail; + ^ +HINT: No function matches the given name and argument types. You might need to add explicit type casts. +drop function polyf(x anycompatiblerange, y anycompatible, z anycompatible); +create function polyf(x anycompatiblemultirange, y anycompatible, z anycompatible) returns anycompatiblearray as $$ + select array[lower(x), upper(x), y, z] +$$ language sql; +select polyf(multirange(int4range(42, 49)), 11, 2::smallint) as int, polyf(multirange(float8range(4.5, 7.8)), 7.8, 11::real) as num; + int | num +--------------+------------------ + {42,49,11,2} | {4.5,7.8,7.8,11} +(1 row) + +select polyf(multirange(int4range(42, 49)), 11, 4.5) as fail; -- range type doesn't fit +ERROR: function polyf(int4multirange, integer, numeric) does not exist +LINE 1: select polyf(multirange(int4range(42, 49)), 11, 4.5) as fail... + ^ +HINT: No function matches the given name and argument types. You might need to add explicit type casts. +drop function polyf(x anycompatiblemultirange, y anycompatible, z anycompatible); +-- fail, can't infer type: +create function polyf(x anycompatible) returns anycompatiblerange as $$ + select array[x + 1, x + 2] +$$ language sql; +ERROR: cannot determine result data type +DETAIL: A result of type anycompatiblerange requires at least one input of type anycompatiblerange or anycompatiblemultirange. +create function polyf(x anycompatiblerange, y anycompatiblearray) returns anycompatiblerange as $$ + select x +$$ language sql; +select polyf(int4range(42, 49), array[11]) as int, polyf(float8range(4.5, 7.8), array[7]) as num; + int | num +---------+----------- + [42,49) | [4.5,7.8) +(1 row) + +drop function polyf(x anycompatiblerange, y anycompatiblearray); +-- fail, can't infer type: +create function polyf(x anycompatible) returns anycompatiblemultirange as $$ + select array[x + 1, x + 2] +$$ language sql; +ERROR: cannot determine result data type +DETAIL: A result of type anycompatiblemultirange requires at least one input of type anycompatiblerange or anycompatiblemultirange. +create function polyf(x anycompatiblemultirange, y anycompatiblearray) returns anycompatiblemultirange as $$ + select x +$$ language sql; +select polyf(multirange(int4range(42, 49)), array[11]) as int, polyf(multirange(float8range(4.5, 7.8)), array[7]) as num; + int | num +-----------+------------- + {[42,49)} | {[4.5,7.8)} +(1 row) + +drop function polyf(x anycompatiblemultirange, y anycompatiblearray); +create function polyf(a anyelement, b anyarray, + c anycompatible, d anycompatible, + OUT x anyarray, OUT y anycompatiblearray) +as $$ + select a || b, array[c, d] +$$ language sql; +select x, pg_typeof(x), y, pg_typeof(y) + from polyf(11, array[1, 2], 42, 34.5); + x | pg_typeof | y | pg_typeof +----------+-----------+-----------+----------- + {11,1,2} | integer[] | {42,34.5} | numeric[] +(1 row) + +select x, pg_typeof(x), y, pg_typeof(y) + from polyf(11, array[1, 2], point(1,2), point(3,4)); + x | pg_typeof | y | pg_typeof +----------+-----------+-------------------+----------- + {11,1,2} | integer[] | {"(1,2)","(3,4)"} | point[] +(1 row) + +select x, pg_typeof(x), y, pg_typeof(y) + from polyf(11, '{1,2}', point(1,2), '(3,4)'); + x | pg_typeof | y | pg_typeof +----------+-----------+-------------------+----------- + {11,1,2} | integer[] | {"(1,2)","(3,4)"} | point[] +(1 row) + +select x, pg_typeof(x), y, pg_typeof(y) + from polyf(11, array[1, 2.2], 42, 34.5); -- fail +ERROR: function polyf(integer, numeric[], integer, numeric) does not exist +LINE 2: from polyf(11, array[1, 2.2], 42, 34.5); + ^ +HINT: No function matches the given name and argument types. You might need to add explicit type casts. +drop function polyf(a anyelement, b anyarray, + c anycompatible, d anycompatible); +create function polyf(anyrange) returns anymultirange +as 'select multirange($1);' language sql; +select polyf(int4range(1,10)); + polyf +---------- + {[1,10)} +(1 row) + +select polyf(null); +ERROR: could not determine polymorphic type because input has type unknown +drop function polyf(anyrange); +create function polyf(anymultirange) returns anyelement +as 'select lower($1);' language sql; +select polyf(int4multirange(int4range(1,10), int4range(20,30))); + polyf +------- + 1 +(1 row) + +select polyf(null); +ERROR: could not determine polymorphic type because input has type unknown +drop function polyf(anymultirange); +create function polyf(anycompatiblerange) returns anycompatiblemultirange +as 'select multirange($1);' language sql; +select polyf(int4range(1,10)); + polyf +---------- + {[1,10)} +(1 row) + +select polyf(null); +ERROR: could not determine polymorphic type anycompatiblerange because input has type unknown +drop function polyf(anycompatiblerange); +create function polyf(anymultirange) returns anyrange +as 'select range_merge($1);' language sql; +select polyf(int4multirange(int4range(1,10), int4range(20,30))); + polyf +-------- + [1,30) +(1 row) + +select polyf(null); +ERROR: could not determine polymorphic type because input has type unknown +drop function polyf(anymultirange); +create function polyf(anycompatiblemultirange) returns anycompatiblerange +as 'select range_merge($1);' language sql; +select polyf(int4multirange(int4range(1,10), int4range(20,30))); + polyf +-------- + [1,30) +(1 row) + +select polyf(null); +ERROR: could not determine polymorphic type anycompatiblerange because input has type unknown +drop function polyf(anycompatiblemultirange); +create function polyf(anycompatiblemultirange) returns anycompatible +as 'select lower($1);' language sql; +select polyf(int4multirange(int4range(1,10), int4range(20,30))); + polyf +------- + 1 +(1 row) + +select polyf(null); +ERROR: could not determine polymorphic type anycompatiblemultirange because input has type unknown +drop function polyf(anycompatiblemultirange); +-- +-- Polymorphic aggregate tests +-- +-- Legend: +----------- +-- A = type is ANY +-- P = type is polymorphic +-- N = type is non-polymorphic +-- B = aggregate base type +-- S = aggregate state type +-- R = aggregate return type +-- 1 = arg1 of a function +-- 2 = arg2 of a function +-- ag = aggregate +-- tf = trans (state) function +-- ff = final function +-- rt = return type of a function +-- -> = implies +-- => = allowed +-- !> = not allowed +-- E = exists +-- NE = not-exists +-- +-- Possible states: +-- ---------------- +-- B = (A || P || N) +-- when (B = A) -> (tf2 = NE) +-- S = (P || N) +-- ff = (E || NE) +-- tf1 = (P || N) +-- tf2 = (NE || P || N) +-- R = (P || N) +-- create functions for use as tf and ff with the needed combinations of +-- argument polymorphism, but within the constraints of valid aggregate +-- functions, i.e. tf arg1 and tf return type must match +-- polymorphic single arg transfn +CREATE FUNCTION stfp(anyarray) RETURNS anyarray AS +'select $1' LANGUAGE SQL; +-- non-polymorphic single arg transfn +CREATE FUNCTION stfnp(int[]) RETURNS int[] AS +'select $1' LANGUAGE SQL; +-- dual polymorphic transfn +CREATE FUNCTION tfp(anyarray,anyelement) RETURNS anyarray AS +'select $1 || $2' LANGUAGE SQL; +-- dual non-polymorphic transfn +CREATE FUNCTION tfnp(int[],int) RETURNS int[] AS +'select $1 || $2' LANGUAGE SQL; +-- arg1 only polymorphic transfn +CREATE FUNCTION tf1p(anyarray,int) RETURNS anyarray AS +'select $1' LANGUAGE SQL; +-- arg2 only polymorphic transfn +CREATE FUNCTION tf2p(int[],anyelement) RETURNS int[] AS +'select $1' LANGUAGE SQL; +-- multi-arg polymorphic +CREATE FUNCTION sum3(anyelement,anyelement,anyelement) returns anyelement AS +'select $1+$2+$3' language sql strict; +-- finalfn polymorphic +CREATE FUNCTION ffp(anyarray) RETURNS anyarray AS +'select $1' LANGUAGE SQL; +-- finalfn non-polymorphic +CREATE FUNCTION ffnp(int[]) returns int[] as +'select $1' LANGUAGE SQL; +-- Try to cover all the possible states: +-- +-- Note: in Cases 1 & 2, we are trying to return P. Therefore, if the transfn +-- is stfnp, tfnp, or tf2p, we must use ffp as finalfn, because stfnp, tfnp, +-- and tf2p do not return P. Conversely, in Cases 3 & 4, we are trying to +-- return N. Therefore, if the transfn is stfp, tfp, or tf1p, we must use ffnp +-- as finalfn, because stfp, tfp, and tf1p do not return N. +-- +-- Case1 (R = P) && (B = A) +-- ------------------------ +-- S tf1 +-- ------- +-- N N +-- should CREATE +CREATE AGGREGATE myaggp01a(*) (SFUNC = stfnp, STYPE = int4[], + FINALFUNC = ffp, INITCOND = '{}'); +-- P N +-- should ERROR: stfnp(anyarray) not matched by stfnp(int[]) +CREATE AGGREGATE myaggp02a(*) (SFUNC = stfnp, STYPE = anyarray, + FINALFUNC = ffp, INITCOND = '{}'); +ERROR: cannot determine transition data type +DETAIL: A result of type anyarray requires at least one input of type anyelement, anyarray, anynonarray, anyenum, anyrange, or anymultirange. +-- N P +-- should CREATE +CREATE AGGREGATE myaggp03a(*) (SFUNC = stfp, STYPE = int4[], + FINALFUNC = ffp, INITCOND = '{}'); +CREATE AGGREGATE myaggp03b(*) (SFUNC = stfp, STYPE = int4[], + INITCOND = '{}'); +-- P P +-- should ERROR: we have no way to resolve S +CREATE AGGREGATE myaggp04a(*) (SFUNC = stfp, STYPE = anyarray, + FINALFUNC = ffp, INITCOND = '{}'); +ERROR: cannot determine transition data type +DETAIL: A result of type anyarray requires at least one input of type anyelement, anyarray, anynonarray, anyenum, anyrange, or anymultirange. +CREATE AGGREGATE myaggp04b(*) (SFUNC = stfp, STYPE = anyarray, + INITCOND = '{}'); +ERROR: cannot determine transition data type +DETAIL: A result of type anyarray requires at least one input of type anyelement, anyarray, anynonarray, anyenum, anyrange, or anymultirange. +-- Case2 (R = P) && ((B = P) || (B = N)) +-- ------------------------------------- +-- S tf1 B tf2 +-- ----------------------- +-- N N N N +-- should CREATE +CREATE AGGREGATE myaggp05a(BASETYPE = int, SFUNC = tfnp, STYPE = int[], + FINALFUNC = ffp, INITCOND = '{}'); +-- N N N P +-- should CREATE +CREATE AGGREGATE myaggp06a(BASETYPE = int, SFUNC = tf2p, STYPE = int[], + FINALFUNC = ffp, INITCOND = '{}'); +-- N N P N +-- should ERROR: tfnp(int[], anyelement) not matched by tfnp(int[], int) +CREATE AGGREGATE myaggp07a(BASETYPE = anyelement, SFUNC = tfnp, STYPE = int[], + FINALFUNC = ffp, INITCOND = '{}'); +ERROR: function tfnp(integer[], anyelement) does not exist +-- N N P P +-- should CREATE +CREATE AGGREGATE myaggp08a(BASETYPE = anyelement, SFUNC = tf2p, STYPE = int[], + FINALFUNC = ffp, INITCOND = '{}'); +-- N P N N +-- should CREATE +CREATE AGGREGATE myaggp09a(BASETYPE = int, SFUNC = tf1p, STYPE = int[], + FINALFUNC = ffp, INITCOND = '{}'); +CREATE AGGREGATE myaggp09b(BASETYPE = int, SFUNC = tf1p, STYPE = int[], + INITCOND = '{}'); +-- N P N P +-- should CREATE +CREATE AGGREGATE myaggp10a(BASETYPE = int, SFUNC = tfp, STYPE = int[], + FINALFUNC = ffp, INITCOND = '{}'); +CREATE AGGREGATE myaggp10b(BASETYPE = int, SFUNC = tfp, STYPE = int[], + INITCOND = '{}'); +-- N P P N +-- should ERROR: tf1p(int[],anyelement) not matched by tf1p(anyarray,int) +CREATE AGGREGATE myaggp11a(BASETYPE = anyelement, SFUNC = tf1p, STYPE = int[], + FINALFUNC = ffp, INITCOND = '{}'); +ERROR: function tf1p(integer[], anyelement) does not exist +CREATE AGGREGATE myaggp11b(BASETYPE = anyelement, SFUNC = tf1p, STYPE = int[], + INITCOND = '{}'); +ERROR: function tf1p(integer[], anyelement) does not exist +-- N P P P +-- should ERROR: tfp(int[],anyelement) not matched by tfp(anyarray,anyelement) +CREATE AGGREGATE myaggp12a(BASETYPE = anyelement, SFUNC = tfp, STYPE = int[], + FINALFUNC = ffp, INITCOND = '{}'); +ERROR: function tfp(integer[], anyelement) does not exist +CREATE AGGREGATE myaggp12b(BASETYPE = anyelement, SFUNC = tfp, STYPE = int[], + INITCOND = '{}'); +ERROR: function tfp(integer[], anyelement) does not exist +-- P N N N +-- should ERROR: tfnp(anyarray, int) not matched by tfnp(int[],int) +CREATE AGGREGATE myaggp13a(BASETYPE = int, SFUNC = tfnp, STYPE = anyarray, + FINALFUNC = ffp, INITCOND = '{}'); +ERROR: cannot determine transition data type +DETAIL: A result of type anyarray requires at least one input of type anyelement, anyarray, anynonarray, anyenum, anyrange, or anymultirange. +-- P N N P +-- should ERROR: tf2p(anyarray, int) not matched by tf2p(int[],anyelement) +CREATE AGGREGATE myaggp14a(BASETYPE = int, SFUNC = tf2p, STYPE = anyarray, + FINALFUNC = ffp, INITCOND = '{}'); +ERROR: cannot determine transition data type +DETAIL: A result of type anyarray requires at least one input of type anyelement, anyarray, anynonarray, anyenum, anyrange, or anymultirange. +-- P N P N +-- should ERROR: tfnp(anyarray, anyelement) not matched by tfnp(int[],int) +CREATE AGGREGATE myaggp15a(BASETYPE = anyelement, SFUNC = tfnp, + STYPE = anyarray, FINALFUNC = ffp, INITCOND = '{}'); +ERROR: function tfnp(anyarray, anyelement) does not exist +-- P N P P +-- should ERROR: tf2p(anyarray, anyelement) not matched by tf2p(int[],anyelement) +CREATE AGGREGATE myaggp16a(BASETYPE = anyelement, SFUNC = tf2p, + STYPE = anyarray, FINALFUNC = ffp, INITCOND = '{}'); +ERROR: function tf2p(anyarray, anyelement) does not exist +-- P P N N +-- should ERROR: we have no way to resolve S +CREATE AGGREGATE myaggp17a(BASETYPE = int, SFUNC = tf1p, STYPE = anyarray, + FINALFUNC = ffp, INITCOND = '{}'); +ERROR: cannot determine transition data type +DETAIL: A result of type anyarray requires at least one input of type anyelement, anyarray, anynonarray, anyenum, anyrange, or anymultirange. +CREATE AGGREGATE myaggp17b(BASETYPE = int, SFUNC = tf1p, STYPE = anyarray, + INITCOND = '{}'); +ERROR: cannot determine transition data type +DETAIL: A result of type anyarray requires at least one input of type anyelement, anyarray, anynonarray, anyenum, anyrange, or anymultirange. +-- P P N P +-- should ERROR: tfp(anyarray, int) not matched by tfp(anyarray, anyelement) +CREATE AGGREGATE myaggp18a(BASETYPE = int, SFUNC = tfp, STYPE = anyarray, + FINALFUNC = ffp, INITCOND = '{}'); +ERROR: cannot determine transition data type +DETAIL: A result of type anyarray requires at least one input of type anyelement, anyarray, anynonarray, anyenum, anyrange, or anymultirange. +CREATE AGGREGATE myaggp18b(BASETYPE = int, SFUNC = tfp, STYPE = anyarray, + INITCOND = '{}'); +ERROR: cannot determine transition data type +DETAIL: A result of type anyarray requires at least one input of type anyelement, anyarray, anynonarray, anyenum, anyrange, or anymultirange. +-- P P P N +-- should ERROR: tf1p(anyarray, anyelement) not matched by tf1p(anyarray, int) +CREATE AGGREGATE myaggp19a(BASETYPE = anyelement, SFUNC = tf1p, + STYPE = anyarray, FINALFUNC = ffp, INITCOND = '{}'); +ERROR: function tf1p(anyarray, anyelement) does not exist +CREATE AGGREGATE myaggp19b(BASETYPE = anyelement, SFUNC = tf1p, + STYPE = anyarray, INITCOND = '{}'); +ERROR: function tf1p(anyarray, anyelement) does not exist +-- P P P P +-- should CREATE +CREATE AGGREGATE myaggp20a(BASETYPE = anyelement, SFUNC = tfp, + STYPE = anyarray, FINALFUNC = ffp, INITCOND = '{}'); +CREATE AGGREGATE myaggp20b(BASETYPE = anyelement, SFUNC = tfp, + STYPE = anyarray, INITCOND = '{}'); +-- Case3 (R = N) && (B = A) +-- ------------------------ +-- S tf1 +-- ------- +-- N N +-- should CREATE +CREATE AGGREGATE myaggn01a(*) (SFUNC = stfnp, STYPE = int4[], + FINALFUNC = ffnp, INITCOND = '{}'); +CREATE AGGREGATE myaggn01b(*) (SFUNC = stfnp, STYPE = int4[], + INITCOND = '{}'); +-- P N +-- should ERROR: stfnp(anyarray) not matched by stfnp(int[]) +CREATE AGGREGATE myaggn02a(*) (SFUNC = stfnp, STYPE = anyarray, + FINALFUNC = ffnp, INITCOND = '{}'); +ERROR: cannot determine transition data type +DETAIL: A result of type anyarray requires at least one input of type anyelement, anyarray, anynonarray, anyenum, anyrange, or anymultirange. +CREATE AGGREGATE myaggn02b(*) (SFUNC = stfnp, STYPE = anyarray, + INITCOND = '{}'); +ERROR: cannot determine transition data type +DETAIL: A result of type anyarray requires at least one input of type anyelement, anyarray, anynonarray, anyenum, anyrange, or anymultirange. +-- N P +-- should CREATE +CREATE AGGREGATE myaggn03a(*) (SFUNC = stfp, STYPE = int4[], + FINALFUNC = ffnp, INITCOND = '{}'); +-- P P +-- should ERROR: ffnp(anyarray) not matched by ffnp(int[]) +CREATE AGGREGATE myaggn04a(*) (SFUNC = stfp, STYPE = anyarray, + FINALFUNC = ffnp, INITCOND = '{}'); +ERROR: cannot determine transition data type +DETAIL: A result of type anyarray requires at least one input of type anyelement, anyarray, anynonarray, anyenum, anyrange, or anymultirange. +-- Case4 (R = N) && ((B = P) || (B = N)) +-- ------------------------------------- +-- S tf1 B tf2 +-- ----------------------- +-- N N N N +-- should CREATE +CREATE AGGREGATE myaggn05a(BASETYPE = int, SFUNC = tfnp, STYPE = int[], + FINALFUNC = ffnp, INITCOND = '{}'); +CREATE AGGREGATE myaggn05b(BASETYPE = int, SFUNC = tfnp, STYPE = int[], + INITCOND = '{}'); +-- N N N P +-- should CREATE +CREATE AGGREGATE myaggn06a(BASETYPE = int, SFUNC = tf2p, STYPE = int[], + FINALFUNC = ffnp, INITCOND = '{}'); +CREATE AGGREGATE myaggn06b(BASETYPE = int, SFUNC = tf2p, STYPE = int[], + INITCOND = '{}'); +-- N N P N +-- should ERROR: tfnp(int[], anyelement) not matched by tfnp(int[], int) +CREATE AGGREGATE myaggn07a(BASETYPE = anyelement, SFUNC = tfnp, STYPE = int[], + FINALFUNC = ffnp, INITCOND = '{}'); +ERROR: function tfnp(integer[], anyelement) does not exist +CREATE AGGREGATE myaggn07b(BASETYPE = anyelement, SFUNC = tfnp, STYPE = int[], + INITCOND = '{}'); +ERROR: function tfnp(integer[], anyelement) does not exist +-- N N P P +-- should CREATE +CREATE AGGREGATE myaggn08a(BASETYPE = anyelement, SFUNC = tf2p, STYPE = int[], + FINALFUNC = ffnp, INITCOND = '{}'); +CREATE AGGREGATE myaggn08b(BASETYPE = anyelement, SFUNC = tf2p, STYPE = int[], + INITCOND = '{}'); +-- N P N N +-- should CREATE +CREATE AGGREGATE myaggn09a(BASETYPE = int, SFUNC = tf1p, STYPE = int[], + FINALFUNC = ffnp, INITCOND = '{}'); +-- N P N P +-- should CREATE +CREATE AGGREGATE myaggn10a(BASETYPE = int, SFUNC = tfp, STYPE = int[], + FINALFUNC = ffnp, INITCOND = '{}'); +-- N P P N +-- should ERROR: tf1p(int[],anyelement) not matched by tf1p(anyarray,int) +CREATE AGGREGATE myaggn11a(BASETYPE = anyelement, SFUNC = tf1p, STYPE = int[], + FINALFUNC = ffnp, INITCOND = '{}'); +ERROR: function tf1p(integer[], anyelement) does not exist +-- N P P P +-- should ERROR: tfp(int[],anyelement) not matched by tfp(anyarray,anyelement) +CREATE AGGREGATE myaggn12a(BASETYPE = anyelement, SFUNC = tfp, STYPE = int[], + FINALFUNC = ffnp, INITCOND = '{}'); +ERROR: function tfp(integer[], anyelement) does not exist +-- P N N N +-- should ERROR: tfnp(anyarray, int) not matched by tfnp(int[],int) +CREATE AGGREGATE myaggn13a(BASETYPE = int, SFUNC = tfnp, STYPE = anyarray, + FINALFUNC = ffnp, INITCOND = '{}'); +ERROR: cannot determine transition data type +DETAIL: A result of type anyarray requires at least one input of type anyelement, anyarray, anynonarray, anyenum, anyrange, or anymultirange. +CREATE AGGREGATE myaggn13b(BASETYPE = int, SFUNC = tfnp, STYPE = anyarray, + INITCOND = '{}'); +ERROR: cannot determine transition data type +DETAIL: A result of type anyarray requires at least one input of type anyelement, anyarray, anynonarray, anyenum, anyrange, or anymultirange. +-- P N N P +-- should ERROR: tf2p(anyarray, int) not matched by tf2p(int[],anyelement) +CREATE AGGREGATE myaggn14a(BASETYPE = int, SFUNC = tf2p, STYPE = anyarray, + FINALFUNC = ffnp, INITCOND = '{}'); +ERROR: cannot determine transition data type +DETAIL: A result of type anyarray requires at least one input of type anyelement, anyarray, anynonarray, anyenum, anyrange, or anymultirange. +CREATE AGGREGATE myaggn14b(BASETYPE = int, SFUNC = tf2p, STYPE = anyarray, + INITCOND = '{}'); +ERROR: cannot determine transition data type +DETAIL: A result of type anyarray requires at least one input of type anyelement, anyarray, anynonarray, anyenum, anyrange, or anymultirange. +-- P N P N +-- should ERROR: tfnp(anyarray, anyelement) not matched by tfnp(int[],int) +CREATE AGGREGATE myaggn15a(BASETYPE = anyelement, SFUNC = tfnp, + STYPE = anyarray, FINALFUNC = ffnp, INITCOND = '{}'); +ERROR: function tfnp(anyarray, anyelement) does not exist +CREATE AGGREGATE myaggn15b(BASETYPE = anyelement, SFUNC = tfnp, + STYPE = anyarray, INITCOND = '{}'); +ERROR: function tfnp(anyarray, anyelement) does not exist +-- P N P P +-- should ERROR: tf2p(anyarray, anyelement) not matched by tf2p(int[],anyelement) +CREATE AGGREGATE myaggn16a(BASETYPE = anyelement, SFUNC = tf2p, + STYPE = anyarray, FINALFUNC = ffnp, INITCOND = '{}'); +ERROR: function tf2p(anyarray, anyelement) does not exist +CREATE AGGREGATE myaggn16b(BASETYPE = anyelement, SFUNC = tf2p, + STYPE = anyarray, INITCOND = '{}'); +ERROR: function tf2p(anyarray, anyelement) does not exist +-- P P N N +-- should ERROR: ffnp(anyarray) not matched by ffnp(int[]) +CREATE AGGREGATE myaggn17a(BASETYPE = int, SFUNC = tf1p, STYPE = anyarray, + FINALFUNC = ffnp, INITCOND = '{}'); +ERROR: cannot determine transition data type +DETAIL: A result of type anyarray requires at least one input of type anyelement, anyarray, anynonarray, anyenum, anyrange, or anymultirange. +-- P P N P +-- should ERROR: tfp(anyarray, int) not matched by tfp(anyarray, anyelement) +CREATE AGGREGATE myaggn18a(BASETYPE = int, SFUNC = tfp, STYPE = anyarray, + FINALFUNC = ffnp, INITCOND = '{}'); +ERROR: cannot determine transition data type +DETAIL: A result of type anyarray requires at least one input of type anyelement, anyarray, anynonarray, anyenum, anyrange, or anymultirange. +-- P P P N +-- should ERROR: tf1p(anyarray, anyelement) not matched by tf1p(anyarray, int) +CREATE AGGREGATE myaggn19a(BASETYPE = anyelement, SFUNC = tf1p, + STYPE = anyarray, FINALFUNC = ffnp, INITCOND = '{}'); +ERROR: function tf1p(anyarray, anyelement) does not exist +-- P P P P +-- should ERROR: ffnp(anyarray) not matched by ffnp(int[]) +CREATE AGGREGATE myaggn20a(BASETYPE = anyelement, SFUNC = tfp, + STYPE = anyarray, FINALFUNC = ffnp, INITCOND = '{}'); +ERROR: function ffnp(anyarray) does not exist +-- multi-arg polymorphic +CREATE AGGREGATE mysum2(anyelement,anyelement) (SFUNC = sum3, + STYPE = anyelement, INITCOND = '0'); +-- create test data for polymorphic aggregates +create temp table t(f1 int, f2 int[], f3 text); +insert into t values(1,array[1],'a'); +insert into t values(1,array[11],'b'); +insert into t values(1,array[111],'c'); +insert into t values(2,array[2],'a'); +insert into t values(2,array[22],'b'); +insert into t values(2,array[222],'c'); +insert into t values(3,array[3],'a'); +insert into t values(3,array[3],'b'); +-- test the successfully created polymorphic aggregates +select f3, myaggp01a(*) from t group by f3 order by f3; + f3 | myaggp01a +----+----------- + a | {} + b | {} + c | {} +(3 rows) + +select f3, myaggp03a(*) from t group by f3 order by f3; + f3 | myaggp03a +----+----------- + a | {} + b | {} + c | {} +(3 rows) + +select f3, myaggp03b(*) from t group by f3 order by f3; + f3 | myaggp03b +----+----------- + a | {} + b | {} + c | {} +(3 rows) + +select f3, myaggp05a(f1) from t group by f3 order by f3; + f3 | myaggp05a +----+----------- + a | {1,2,3} + b | {1,2,3} + c | {1,2} +(3 rows) + +select f3, myaggp06a(f1) from t group by f3 order by f3; + f3 | myaggp06a +----+----------- + a | {} + b | {} + c | {} +(3 rows) + +select f3, myaggp08a(f1) from t group by f3 order by f3; + f3 | myaggp08a +----+----------- + a | {} + b | {} + c | {} +(3 rows) + +select f3, myaggp09a(f1) from t group by f3 order by f3; + f3 | myaggp09a +----+----------- + a | {} + b | {} + c | {} +(3 rows) + +select f3, myaggp09b(f1) from t group by f3 order by f3; + f3 | myaggp09b +----+----------- + a | {} + b | {} + c | {} +(3 rows) + +select f3, myaggp10a(f1) from t group by f3 order by f3; + f3 | myaggp10a +----+----------- + a | {1,2,3} + b | {1,2,3} + c | {1,2} +(3 rows) + +select f3, myaggp10b(f1) from t group by f3 order by f3; + f3 | myaggp10b +----+----------- + a | {1,2,3} + b | {1,2,3} + c | {1,2} +(3 rows) + +select f3, myaggp20a(f1) from t group by f3 order by f3; + f3 | myaggp20a +----+----------- + a | {1,2,3} + b | {1,2,3} + c | {1,2} +(3 rows) + +select f3, myaggp20b(f1) from t group by f3 order by f3; + f3 | myaggp20b +----+----------- + a | {1,2,3} + b | {1,2,3} + c | {1,2} +(3 rows) + +select f3, myaggn01a(*) from t group by f3 order by f3; + f3 | myaggn01a +----+----------- + a | {} + b | {} + c | {} +(3 rows) + +select f3, myaggn01b(*) from t group by f3 order by f3; + f3 | myaggn01b +----+----------- + a | {} + b | {} + c | {} +(3 rows) + +select f3, myaggn03a(*) from t group by f3 order by f3; + f3 | myaggn03a +----+----------- + a | {} + b | {} + c | {} +(3 rows) + +select f3, myaggn05a(f1) from t group by f3 order by f3; + f3 | myaggn05a +----+----------- + a | {1,2,3} + b | {1,2,3} + c | {1,2} +(3 rows) + +select f3, myaggn05b(f1) from t group by f3 order by f3; + f3 | myaggn05b +----+----------- + a | {1,2,3} + b | {1,2,3} + c | {1,2} +(3 rows) + +select f3, myaggn06a(f1) from t group by f3 order by f3; + f3 | myaggn06a +----+----------- + a | {} + b | {} + c | {} +(3 rows) + +select f3, myaggn06b(f1) from t group by f3 order by f3; + f3 | myaggn06b +----+----------- + a | {} + b | {} + c | {} +(3 rows) + +select f3, myaggn08a(f1) from t group by f3 order by f3; + f3 | myaggn08a +----+----------- + a | {} + b | {} + c | {} +(3 rows) + +select f3, myaggn08b(f1) from t group by f3 order by f3; + f3 | myaggn08b +----+----------- + a | {} + b | {} + c | {} +(3 rows) + +select f3, myaggn09a(f1) from t group by f3 order by f3; + f3 | myaggn09a +----+----------- + a | {} + b | {} + c | {} +(3 rows) + +select f3, myaggn10a(f1) from t group by f3 order by f3; + f3 | myaggn10a +----+----------- + a | {1,2,3} + b | {1,2,3} + c | {1,2} +(3 rows) + +select mysum2(f1, f1 + 1) from t; + mysum2 +-------- + 38 +(1 row) + +-- test inlining of polymorphic SQL functions +create function bleat(int) returns int as $$ +begin + raise notice 'bleat %', $1; + return $1; +end$$ language plpgsql; +create function sql_if(bool, anyelement, anyelement) returns anyelement as $$ +select case when $1 then $2 else $3 end $$ language sql; +-- Note this would fail with integer overflow, never mind wrong bleat() output, +-- if the CASE expression were not successfully inlined +select f1, sql_if(f1 > 0, bleat(f1), bleat(f1 + 1)) from int4_tbl; +NOTICE: bleat 1 +NOTICE: bleat 123456 +NOTICE: bleat -123455 +NOTICE: bleat 2147483647 +NOTICE: bleat -2147483646 + f1 | sql_if +-------------+------------- + 0 | 1 + 123456 | 123456 + -123456 | -123455 + 2147483647 | 2147483647 + -2147483647 | -2147483646 +(5 rows) + +select q2, sql_if(q2 > 0, q2, q2 + 1) from int8_tbl; + q2 | sql_if +-------------------+------------------- + 456 | 456 + 4567890123456789 | 4567890123456789 + 123 | 123 + 4567890123456789 | 4567890123456789 + -4567890123456789 | -4567890123456788 +(5 rows) + +-- another sort of polymorphic aggregate +CREATE AGGREGATE array_larger_accum (anyarray) +( + sfunc = array_larger, + stype = anyarray, + initcond = '{}' +); +SELECT array_larger_accum(i) +FROM (VALUES (ARRAY[1,2]), (ARRAY[3,4])) as t(i); + array_larger_accum +-------------------- + {3,4} +(1 row) + +SELECT array_larger_accum(i) +FROM (VALUES (ARRAY[row(1,2),row(3,4)]), (ARRAY[row(5,6),row(7,8)])) as t(i); + array_larger_accum +-------------------- + {"(5,6)","(7,8)"} +(1 row) + +-- another kind of polymorphic aggregate +create function add_group(grp anyarray, ad anyelement, size integer) + returns anyarray + as $$ +begin + if grp is null then + return array[ad]; + end if; + if array_upper(grp, 1) < size then + return grp || ad; + end if; + return grp; +end; +$$ + language plpgsql immutable; +create aggregate build_group(anyelement, integer) ( + SFUNC = add_group, + STYPE = anyarray +); +select build_group(q1,3) from int8_tbl; + build_group +---------------------------- + {123,123,4567890123456789} +(1 row) + +-- this should fail because stype isn't compatible with arg +create aggregate build_group(int8, integer) ( + SFUNC = add_group, + STYPE = int2[] +); +ERROR: function add_group(smallint[], bigint, integer) does not exist +-- but we can make a non-poly agg from a poly sfunc if types are OK +create aggregate build_group(int8, integer) ( + SFUNC = add_group, + STYPE = int8[] +); +-- check proper resolution of data types for polymorphic transfn/finalfn +create function first_el_transfn(anyarray, anyelement) returns anyarray as +'select $1 || $2' language sql immutable; +create function first_el(anyarray) returns anyelement as +'select $1[1]' language sql strict immutable; +create aggregate first_el_agg_f8(float8) ( + SFUNC = array_append, + STYPE = float8[], + FINALFUNC = first_el +); +create aggregate first_el_agg_any(anyelement) ( + SFUNC = first_el_transfn, + STYPE = anyarray, + FINALFUNC = first_el +); +select first_el_agg_f8(x::float8) from generate_series(1,10) x; + first_el_agg_f8 +----------------- + 1 +(1 row) + +select first_el_agg_any(x) from generate_series(1,10) x; + first_el_agg_any +------------------ + 1 +(1 row) + +select first_el_agg_f8(x::float8) over(order by x) from generate_series(1,10) x; + first_el_agg_f8 +----------------- + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 +(10 rows) + +select first_el_agg_any(x) over(order by x) from generate_series(1,10) x; + first_el_agg_any +------------------ + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 +(10 rows) + +-- check that we can apply functions taking ANYARRAY to pg_stats +select distinct array_ndims(histogram_bounds) from pg_stats +where histogram_bounds is not null; + array_ndims +------------- + 1 +(1 row) + +-- such functions must protect themselves if varying element type isn't OK +-- (WHERE clause here is to avoid possibly getting a collation error instead) +select max(histogram_bounds) from pg_stats where tablename = 'pg_am'; +ERROR: cannot compare arrays of different element types +-- another corner case is the input functions for polymorphic pseudotypes +select array_in('{1,2,3}','int4'::regtype,-1); -- this has historically worked + array_in +---------- + {1,2,3} +(1 row) + +select * from array_in('{1,2,3}','int4'::regtype,-1); -- this not +ERROR: function "array_in" in FROM has unsupported return type anyarray +LINE 1: select * from array_in('{1,2,3}','int4'::regtype,-1); + ^ +select anyrange_in('[10,20)','int4range'::regtype,-1); +ERROR: cannot accept a value of type anyrange +-- test variadic polymorphic functions +create function myleast(variadic anyarray) returns anyelement as $$ + select min($1[i]) from generate_subscripts($1,1) g(i) +$$ language sql immutable strict; +select myleast(10, 1, 20, 33); + myleast +--------- + 1 +(1 row) + +select myleast(1.1, 0.22, 0.55); + myleast +--------- + 0.22 +(1 row) + +select myleast('z'::text); + myleast +--------- + z +(1 row) + +select myleast(); -- fail +ERROR: function myleast() does not exist +LINE 1: select myleast(); + ^ +HINT: No function matches the given name and argument types. You might need to add explicit type casts. +-- test with variadic call parameter +select myleast(variadic array[1,2,3,4,-1]); + myleast +--------- + -1 +(1 row) + +select myleast(variadic array[1.1, -5.5]); + myleast +--------- + -5.5 +(1 row) + +--test with empty variadic call parameter +select myleast(variadic array[]::int[]); + myleast +--------- + +(1 row) + +-- an example with some ordinary arguments too +create function concat(text, variadic anyarray) returns text as $$ + select array_to_string($2, $1); +$$ language sql immutable strict; +select concat('%', 1, 2, 3, 4, 5); + concat +----------- + 1%2%3%4%5 +(1 row) + +select concat('|', 'a'::text, 'b', 'c'); + concat +-------- + a|b|c +(1 row) + +select concat('|', variadic array[1,2,33]); + concat +-------- + 1|2|33 +(1 row) + +select concat('|', variadic array[]::int[]); + concat +-------- + +(1 row) + +drop function concat(text, anyarray); +-- mix variadic with anyelement +create function formarray(anyelement, variadic anyarray) returns anyarray as $$ + select array_prepend($1, $2); +$$ language sql immutable strict; +select formarray(1,2,3,4,5); + formarray +------------- + {1,2,3,4,5} +(1 row) + +select formarray(1.1, variadic array[1.2,55.5]); + formarray +---------------- + {1.1,1.2,55.5} +(1 row) + +select formarray(1.1, array[1.2,55.5]); -- fail without variadic +ERROR: function formarray(numeric, numeric[]) does not exist +LINE 1: select formarray(1.1, array[1.2,55.5]); + ^ +HINT: No function matches the given name and argument types. You might need to add explicit type casts. +select formarray(1, 'x'::text); -- fail, type mismatch +ERROR: function formarray(integer, text) does not exist +LINE 1: select formarray(1, 'x'::text); + ^ +HINT: No function matches the given name and argument types. You might need to add explicit type casts. +select formarray(1, variadic array['x'::text]); -- fail, type mismatch +ERROR: function formarray(integer, text[]) does not exist +LINE 1: select formarray(1, variadic array['x'::text]); + ^ +HINT: No function matches the given name and argument types. You might need to add explicit type casts. +drop function formarray(anyelement, variadic anyarray); +-- test pg_typeof() function +select pg_typeof(null); -- unknown + pg_typeof +----------- + unknown +(1 row) + +select pg_typeof(0); -- integer + pg_typeof +----------- + integer +(1 row) + +select pg_typeof(0.0); -- numeric + pg_typeof +----------- + numeric +(1 row) + +select pg_typeof(1+1 = 2); -- boolean + pg_typeof +----------- + boolean +(1 row) + +select pg_typeof('x'); -- unknown + pg_typeof +----------- + unknown +(1 row) + +select pg_typeof('' || ''); -- text + pg_typeof +----------- + text +(1 row) + +select pg_typeof(pg_typeof(0)); -- regtype + pg_typeof +----------- + regtype +(1 row) + +select pg_typeof(array[1.2,55.5]); -- numeric[] + pg_typeof +----------- + numeric[] +(1 row) + +select pg_typeof(myleast(10, 1, 20, 33)); -- polymorphic input + pg_typeof +----------- + integer +(1 row) + +-- test functions with default parameters +-- test basic functionality +create function dfunc(a int = 1, int = 2) returns int as $$ + select $1 + $2; +$$ language sql; +select dfunc(); + dfunc +------- + 3 +(1 row) + +select dfunc(10); + dfunc +------- + 12 +(1 row) + +select dfunc(10, 20); + dfunc +------- + 30 +(1 row) + +select dfunc(10, 20, 30); -- fail +ERROR: function dfunc(integer, integer, integer) does not exist +LINE 1: select dfunc(10, 20, 30); + ^ +HINT: No function matches the given name and argument types. You might need to add explicit type casts. +drop function dfunc(); -- fail +ERROR: function dfunc() does not exist +drop function dfunc(int); -- fail +ERROR: function dfunc(integer) does not exist +drop function dfunc(int, int); -- ok +-- fail: defaults must be at end of argument list +create function dfunc(a int = 1, b int) returns int as $$ + select $1 + $2; +$$ language sql; +ERROR: input parameters after one with a default value must also have defaults +-- however, this should work: +create function dfunc(a int = 1, out sum int, b int = 2) as $$ + select $1 + $2; +$$ language sql; +select dfunc(); + dfunc +------- + 3 +(1 row) + +-- verify it lists properly +\df dfunc + List of functions + Schema | Name | Result data type | Argument data types | Type +--------+-------+------------------+-----------------------------------------------------------+------ + public | dfunc | integer | a integer DEFAULT 1, OUT sum integer, b integer DEFAULT 2 | func +(1 row) + +drop function dfunc(int, int); +-- check implicit coercion +create function dfunc(a int DEFAULT 1.0, int DEFAULT '-1') returns int as $$ + select $1 + $2; +$$ language sql; +select dfunc(); + dfunc +------- + 0 +(1 row) + +create function dfunc(a text DEFAULT 'Hello', b text DEFAULT 'World') returns text as $$ + select $1 || ', ' || $2; +$$ language sql; +select dfunc(); -- fail: which dfunc should be called? int or text +ERROR: function dfunc() is not unique +LINE 1: select dfunc(); + ^ +HINT: Could not choose a best candidate function. You might need to add explicit type casts. +select dfunc('Hi'); -- ok + dfunc +----------- + Hi, World +(1 row) + +select dfunc('Hi', 'City'); -- ok + dfunc +---------- + Hi, City +(1 row) + +select dfunc(0); -- ok + dfunc +------- + -1 +(1 row) + +select dfunc(10, 20); -- ok + dfunc +------- + 30 +(1 row) + +drop function dfunc(int, int); +drop function dfunc(text, text); +create function dfunc(int = 1, int = 2) returns int as $$ + select 2; +$$ language sql; +create function dfunc(int = 1, int = 2, int = 3, int = 4) returns int as $$ + select 4; +$$ language sql; +-- Now, dfunc(nargs = 2) and dfunc(nargs = 4) are ambiguous when called +-- with 0 to 2 arguments. +select dfunc(); -- fail +ERROR: function dfunc() is not unique +LINE 1: select dfunc(); + ^ +HINT: Could not choose a best candidate function. You might need to add explicit type casts. +select dfunc(1); -- fail +ERROR: function dfunc(integer) is not unique +LINE 1: select dfunc(1); + ^ +HINT: Could not choose a best candidate function. You might need to add explicit type casts. +select dfunc(1, 2); -- fail +ERROR: function dfunc(integer, integer) is not unique +LINE 1: select dfunc(1, 2); + ^ +HINT: Could not choose a best candidate function. You might need to add explicit type casts. +select dfunc(1, 2, 3); -- ok + dfunc +------- + 4 +(1 row) + +select dfunc(1, 2, 3, 4); -- ok + dfunc +------- + 4 +(1 row) + +drop function dfunc(int, int); +drop function dfunc(int, int, int, int); +-- default values are not allowed for output parameters +create function dfunc(out int = 20) returns int as $$ + select 1; +$$ language sql; +ERROR: only input parameters can have default values +-- polymorphic parameter test +create function dfunc(anyelement = 'World'::text) returns text as $$ + select 'Hello, ' || $1::text; +$$ language sql; +select dfunc(); + dfunc +-------------- + Hello, World +(1 row) + +select dfunc(0); + dfunc +---------- + Hello, 0 +(1 row) + +select dfunc(to_date('20081215','YYYYMMDD')); + dfunc +------------------- + Hello, 12-15-2008 +(1 row) + +select dfunc('City'::text); + dfunc +------------- + Hello, City +(1 row) + +drop function dfunc(anyelement); +-- check defaults for variadics +create function dfunc(a variadic int[]) returns int as +$$ select array_upper($1, 1) $$ language sql; +select dfunc(); -- fail +ERROR: function dfunc() does not exist +LINE 1: select dfunc(); + ^ +HINT: No function matches the given name and argument types. You might need to add explicit type casts. +select dfunc(10); + dfunc +------- + 1 +(1 row) + +select dfunc(10,20); + dfunc +------- + 2 +(1 row) + +create or replace function dfunc(a variadic int[] default array[]::int[]) returns int as +$$ select array_upper($1, 1) $$ language sql; +select dfunc(); -- now ok + dfunc +------- + +(1 row) + +select dfunc(10); + dfunc +------- + 1 +(1 row) + +select dfunc(10,20); + dfunc +------- + 2 +(1 row) + +-- can't remove the default once it exists +create or replace function dfunc(a variadic int[]) returns int as +$$ select array_upper($1, 1) $$ language sql; +ERROR: cannot remove parameter defaults from existing function +HINT: Use DROP FUNCTION dfunc(integer[]) first. +\df dfunc + List of functions + Schema | Name | Result data type | Argument data types | Type +--------+-------+------------------+-------------------------------------------------+------ + public | dfunc | integer | VARIADIC a integer[] DEFAULT ARRAY[]::integer[] | func +(1 row) + +drop function dfunc(a variadic int[]); +-- Ambiguity should be reported only if there's not a better match available +create function dfunc(int = 1, int = 2, int = 3) returns int as $$ + select 3; +$$ language sql; +create function dfunc(int = 1, int = 2) returns int as $$ + select 2; +$$ language sql; +create function dfunc(text) returns text as $$ + select $1; +$$ language sql; +-- dfunc(narg=2) and dfunc(narg=3) are ambiguous +select dfunc(1); -- fail +ERROR: function dfunc(integer) is not unique +LINE 1: select dfunc(1); + ^ +HINT: Could not choose a best candidate function. You might need to add explicit type casts. +-- but this works since the ambiguous functions aren't preferred anyway +select dfunc('Hi'); + dfunc +------- + Hi +(1 row) + +drop function dfunc(int, int, int); +drop function dfunc(int, int); +drop function dfunc(text); +-- +-- Tests for named- and mixed-notation function calling +-- +create function dfunc(a int, b int, c int = 0, d int = 0) + returns table (a int, b int, c int, d int) as $$ + select $1, $2, $3, $4; +$$ language sql; +select (dfunc(10,20,30)).*; + a | b | c | d +----+----+----+--- + 10 | 20 | 30 | 0 +(1 row) + +select (dfunc(a := 10, b := 20, c := 30)).*; + a | b | c | d +----+----+----+--- + 10 | 20 | 30 | 0 +(1 row) + +select * from dfunc(a := 10, b := 20); + a | b | c | d +----+----+---+--- + 10 | 20 | 0 | 0 +(1 row) + +select * from dfunc(b := 10, a := 20); + a | b | c | d +----+----+---+--- + 20 | 10 | 0 | 0 +(1 row) + +select * from dfunc(0); -- fail +ERROR: function dfunc(integer) does not exist +LINE 1: select * from dfunc(0); + ^ +HINT: No function matches the given name and argument types. You might need to add explicit type casts. +select * from dfunc(1,2); + a | b | c | d +---+---+---+--- + 1 | 2 | 0 | 0 +(1 row) + +select * from dfunc(1,2,c := 3); + a | b | c | d +---+---+---+--- + 1 | 2 | 3 | 0 +(1 row) + +select * from dfunc(1,2,d := 3); + a | b | c | d +---+---+---+--- + 1 | 2 | 0 | 3 +(1 row) + +select * from dfunc(x := 20, b := 10, x := 30); -- fail, duplicate name +ERROR: argument name "x" used more than once +LINE 1: select * from dfunc(x := 20, b := 10, x := 30); + ^ +select * from dfunc(10, b := 20, 30); -- fail, named args must be last +ERROR: positional argument cannot follow named argument +LINE 1: select * from dfunc(10, b := 20, 30); + ^ +select * from dfunc(x := 10, b := 20, c := 30); -- fail, unknown param +ERROR: function dfunc(x => integer, b => integer, c => integer) does not exist +LINE 1: select * from dfunc(x := 10, b := 20, c := 30); + ^ +HINT: No function matches the given name and argument types. You might need to add explicit type casts. +select * from dfunc(10, 10, a := 20); -- fail, a overlaps positional parameter +ERROR: function dfunc(integer, integer, a => integer) does not exist +LINE 1: select * from dfunc(10, 10, a := 20); + ^ +HINT: No function matches the given name and argument types. You might need to add explicit type casts. +select * from dfunc(1,c := 2,d := 3); -- fail, no value for b +ERROR: function dfunc(integer, c => integer, d => integer) does not exist +LINE 1: select * from dfunc(1,c := 2,d := 3); + ^ +HINT: No function matches the given name and argument types. You might need to add explicit type casts. +drop function dfunc(int, int, int, int); +-- test with different parameter types +create function dfunc(a varchar, b numeric, c date = current_date) + returns table (a varchar, b numeric, c date) as $$ + select $1, $2, $3; +$$ language sql; +select (dfunc('Hello World', 20, '2009-07-25'::date)).*; + a | b | c +-------------+----+------------ + Hello World | 20 | 07-25-2009 +(1 row) + +select * from dfunc('Hello World', 20, '2009-07-25'::date); + a | b | c +-------------+----+------------ + Hello World | 20 | 07-25-2009 +(1 row) + +select * from dfunc(c := '2009-07-25'::date, a := 'Hello World', b := 20); + a | b | c +-------------+----+------------ + Hello World | 20 | 07-25-2009 +(1 row) + +select * from dfunc('Hello World', b := 20, c := '2009-07-25'::date); + a | b | c +-------------+----+------------ + Hello World | 20 | 07-25-2009 +(1 row) + +select * from dfunc('Hello World', c := '2009-07-25'::date, b := 20); + a | b | c +-------------+----+------------ + Hello World | 20 | 07-25-2009 +(1 row) + +select * from dfunc('Hello World', c := 20, b := '2009-07-25'::date); -- fail +ERROR: function dfunc(unknown, c => integer, b => date) does not exist +LINE 1: select * from dfunc('Hello World', c := 20, b := '2009-07-25... + ^ +HINT: No function matches the given name and argument types. You might need to add explicit type casts. +drop function dfunc(varchar, numeric, date); +-- test out parameters with named params +create function dfunc(a varchar = 'def a', out _a varchar, c numeric = NULL, out _c numeric) +returns record as $$ + select $1, $2; +$$ language sql; +select (dfunc()).*; + _a | _c +-------+---- + def a | +(1 row) + +select * from dfunc(); + _a | _c +-------+---- + def a | +(1 row) + +select * from dfunc('Hello', 100); + _a | _c +-------+----- + Hello | 100 +(1 row) + +select * from dfunc(a := 'Hello', c := 100); + _a | _c +-------+----- + Hello | 100 +(1 row) + +select * from dfunc(c := 100, a := 'Hello'); + _a | _c +-------+----- + Hello | 100 +(1 row) + +select * from dfunc('Hello'); + _a | _c +-------+---- + Hello | +(1 row) + +select * from dfunc('Hello', c := 100); + _a | _c +-------+----- + Hello | 100 +(1 row) + +select * from dfunc(c := 100); + _a | _c +-------+----- + def a | 100 +(1 row) + +-- fail, can no longer change an input parameter's name +create or replace function dfunc(a varchar = 'def a', out _a varchar, x numeric = NULL, out _c numeric) +returns record as $$ + select $1, $2; +$$ language sql; +ERROR: cannot change name of input parameter "c" +HINT: Use DROP FUNCTION dfunc(character varying,numeric) first. +create or replace function dfunc(a varchar = 'def a', out _a varchar, numeric = NULL, out _c numeric) +returns record as $$ + select $1, $2; +$$ language sql; +ERROR: cannot change name of input parameter "c" +HINT: Use DROP FUNCTION dfunc(character varying,numeric) first. +drop function dfunc(varchar, numeric); +--fail, named parameters are not unique +create function testpolym(a int, a int) returns int as $$ select 1;$$ language sql; +ERROR: parameter name "a" used more than once +create function testpolym(int, out a int, out a int) returns int as $$ select 1;$$ language sql; +ERROR: parameter name "a" used more than once +create function testpolym(out a int, inout a int) returns int as $$ select 1;$$ language sql; +ERROR: parameter name "a" used more than once +create function testpolym(a int, inout a int) returns int as $$ select 1;$$ language sql; +ERROR: parameter name "a" used more than once +-- valid +create function testpolym(a int, out a int) returns int as $$ select $1;$$ language sql; +select testpolym(37); + testpolym +----------- + 37 +(1 row) + +drop function testpolym(int); +create function testpolym(a int) returns table(a int) as $$ select $1;$$ language sql; +select * from testpolym(37); + a +---- + 37 +(1 row) + +drop function testpolym(int); +-- test polymorphic params and defaults +create function dfunc(a anyelement, b anyelement = null, flag bool = true) +returns anyelement as $$ + select case when $3 then $1 else $2 end; +$$ language sql; +select dfunc(1,2); + dfunc +------- + 1 +(1 row) + +select dfunc('a'::text, 'b'); -- positional notation with default + dfunc +------- + a +(1 row) + +select dfunc(a := 1, b := 2); + dfunc +------- + 1 +(1 row) + +select dfunc(a := 'a'::text, b := 'b'); + dfunc +------- + a +(1 row) + +select dfunc(a := 'a'::text, b := 'b', flag := false); -- named notation + dfunc +------- + b +(1 row) + +select dfunc(b := 'b'::text, a := 'a'); -- named notation with default + dfunc +------- + a +(1 row) + +select dfunc(a := 'a'::text, flag := true); -- named notation with default + dfunc +------- + a +(1 row) + +select dfunc(a := 'a'::text, flag := false); -- named notation with default + dfunc +------- + +(1 row) + +select dfunc(b := 'b'::text, a := 'a', flag := true); -- named notation + dfunc +------- + a +(1 row) + +select dfunc('a'::text, 'b', false); -- full positional notation + dfunc +------- + b +(1 row) + +select dfunc('a'::text, 'b', flag := false); -- mixed notation + dfunc +------- + b +(1 row) + +select dfunc('a'::text, 'b', true); -- full positional notation + dfunc +------- + a +(1 row) + +select dfunc('a'::text, 'b', flag := true); -- mixed notation + dfunc +------- + a +(1 row) + +-- ansi/sql syntax +select dfunc(a => 1, b => 2); + dfunc +------- + 1 +(1 row) + +select dfunc(a => 'a'::text, b => 'b'); + dfunc +------- + a +(1 row) + +select dfunc(a => 'a'::text, b => 'b', flag => false); -- named notation + dfunc +------- + b +(1 row) + +select dfunc(b => 'b'::text, a => 'a'); -- named notation with default + dfunc +------- + a +(1 row) + +select dfunc(a => 'a'::text, flag => true); -- named notation with default + dfunc +------- + a +(1 row) + +select dfunc(a => 'a'::text, flag => false); -- named notation with default + dfunc +------- + +(1 row) + +select dfunc(b => 'b'::text, a => 'a', flag => true); -- named notation + dfunc +------- + a +(1 row) + +select dfunc('a'::text, 'b', false); -- full positional notation + dfunc +------- + b +(1 row) + +select dfunc('a'::text, 'b', flag => false); -- mixed notation + dfunc +------- + b +(1 row) + +select dfunc('a'::text, 'b', true); -- full positional notation + dfunc +------- + a +(1 row) + +select dfunc('a'::text, 'b', flag => true); -- mixed notation + dfunc +------- + a +(1 row) + +-- this tests lexer edge cases around => +select dfunc(a =>-1); + dfunc +------- + -1 +(1 row) + +select dfunc(a =>+1); + dfunc +------- + 1 +(1 row) + +select dfunc(a =>/**/1); + dfunc +------- + 1 +(1 row) + +select dfunc(a =>--comment to be removed by psql + 1); + dfunc +------- + 1 +(1 row) + +-- need DO to protect the -- from psql +do $$ + declare r integer; + begin + select dfunc(a=>-- comment + 1) into r; + raise info 'r = %', r; + end; +$$; +INFO: r = 1 +-- check reverse-listing of named-arg calls +CREATE VIEW dfview AS + SELECT q1, q2, + dfunc(q1,q2, flag := q1>q2) as c3, + dfunc(q1, flag := q1 q1 > q2) AS c3, + dfunc(q1, flag => q1 < q2, b => q2) AS c4 + FROM int8_tbl; + +drop view dfview; +drop function dfunc(anyelement, anyelement, bool); +-- +-- Tests for ANYCOMPATIBLE polymorphism family +-- +create function anyctest(anycompatible, anycompatible) +returns anycompatible as $$ + select greatest($1, $2) +$$ language sql; +select x, pg_typeof(x) from anyctest(11, 12) x; + x | pg_typeof +----+----------- + 12 | integer +(1 row) + +select x, pg_typeof(x) from anyctest(11, 12.3) x; + x | pg_typeof +------+----------- + 12.3 | numeric +(1 row) + +select x, pg_typeof(x) from anyctest(11, point(1,2)) x; -- fail +ERROR: function anyctest(integer, point) does not exist +LINE 1: select x, pg_typeof(x) from anyctest(11, point(1,2)) x; + ^ +HINT: No function matches the given name and argument types. You might need to add explicit type casts. +select x, pg_typeof(x) from anyctest('11', '12.3') x; -- defaults to text + x | pg_typeof +------+----------- + 12.3 | text +(1 row) + +drop function anyctest(anycompatible, anycompatible); +create function anyctest(anycompatible, anycompatible) +returns anycompatiblearray as $$ + select array[$1, $2] +$$ language sql; +select x, pg_typeof(x) from anyctest(11, 12) x; + x | pg_typeof +---------+----------- + {11,12} | integer[] +(1 row) + +select x, pg_typeof(x) from anyctest(11, 12.3) x; + x | pg_typeof +-----------+----------- + {11,12.3} | numeric[] +(1 row) + +select x, pg_typeof(x) from anyctest(11, array[1,2]) x; -- fail +ERROR: function anyctest(integer, integer[]) does not exist +LINE 1: select x, pg_typeof(x) from anyctest(11, array[1,2]) x; + ^ +HINT: No function matches the given name and argument types. You might need to add explicit type casts. +drop function anyctest(anycompatible, anycompatible); +create function anyctest(anycompatible, anycompatiblearray) +returns anycompatiblearray as $$ + select array[$1] || $2 +$$ language sql; +select x, pg_typeof(x) from anyctest(11, array[12]) x; + x | pg_typeof +---------+----------- + {11,12} | integer[] +(1 row) + +select x, pg_typeof(x) from anyctest(11, array[12.3]) x; + x | pg_typeof +-----------+----------- + {11,12.3} | numeric[] +(1 row) + +select x, pg_typeof(x) from anyctest(12.3, array[13]) x; + x | pg_typeof +-----------+----------- + {12.3,13} | numeric[] +(1 row) + +select x, pg_typeof(x) from anyctest(12.3, '{13,14.4}') x; + x | pg_typeof +----------------+----------- + {12.3,13,14.4} | numeric[] +(1 row) + +select x, pg_typeof(x) from anyctest(11, array[point(1,2)]) x; -- fail +ERROR: function anyctest(integer, point[]) does not exist +LINE 1: select x, pg_typeof(x) from anyctest(11, array[point(1,2)]) ... + ^ +HINT: No function matches the given name and argument types. You might need to add explicit type casts. +select x, pg_typeof(x) from anyctest(11, 12) x; -- fail +ERROR: function anyctest(integer, integer) does not exist +LINE 1: select x, pg_typeof(x) from anyctest(11, 12) x; + ^ +HINT: No function matches the given name and argument types. You might need to add explicit type casts. +drop function anyctest(anycompatible, anycompatiblearray); +create function anyctest(anycompatible, anycompatiblerange) +returns anycompatiblerange as $$ + select $2 +$$ language sql; +select x, pg_typeof(x) from anyctest(11, int4range(4,7)) x; + x | pg_typeof +-------+----------- + [4,7) | int4range +(1 row) + +select x, pg_typeof(x) from anyctest(11, numrange(4,7)) x; + x | pg_typeof +-------+----------- + [4,7) | numrange +(1 row) + +select x, pg_typeof(x) from anyctest(11, 12) x; -- fail +ERROR: function anyctest(integer, integer) does not exist +LINE 1: select x, pg_typeof(x) from anyctest(11, 12) x; + ^ +HINT: No function matches the given name and argument types. You might need to add explicit type casts. +select x, pg_typeof(x) from anyctest(11.2, int4range(4,7)) x; -- fail +ERROR: function anyctest(numeric, int4range) does not exist +LINE 1: select x, pg_typeof(x) from anyctest(11.2, int4range(4,7)) x... + ^ +HINT: No function matches the given name and argument types. You might need to add explicit type casts. +select x, pg_typeof(x) from anyctest(11.2, '[4,7)') x; -- fail +ERROR: could not determine polymorphic type anycompatiblerange because input has type unknown +drop function anyctest(anycompatible, anycompatiblerange); +create function anyctest(anycompatiblerange, anycompatiblerange) +returns anycompatible as $$ + select lower($1) + upper($2) +$$ language sql; +select x, pg_typeof(x) from anyctest(int4range(11,12), int4range(4,7)) x; + x | pg_typeof +----+----------- + 18 | integer +(1 row) + +select x, pg_typeof(x) from anyctest(int4range(11,12), numrange(4,7)) x; -- fail +ERROR: function anyctest(int4range, numrange) does not exist +LINE 1: select x, pg_typeof(x) from anyctest(int4range(11,12), numra... + ^ +HINT: No function matches the given name and argument types. You might need to add explicit type casts. +drop function anyctest(anycompatiblerange, anycompatiblerange); +-- fail, can't infer result type: +create function anyctest(anycompatible) +returns anycompatiblerange as $$ + select $1 +$$ language sql; +ERROR: cannot determine result data type +DETAIL: A result of type anycompatiblerange requires at least one input of type anycompatiblerange or anycompatiblemultirange. +create function anyctest(anycompatible, anycompatiblemultirange) +returns anycompatiblemultirange as $$ + select $2 +$$ language sql; +select x, pg_typeof(x) from anyctest(11, multirange(int4range(4,7))) x; + x | pg_typeof +---------+---------------- + {[4,7)} | int4multirange +(1 row) + +select x, pg_typeof(x) from anyctest(11, multirange(numrange(4,7))) x; + x | pg_typeof +---------+--------------- + {[4,7)} | nummultirange +(1 row) + +select x, pg_typeof(x) from anyctest(11, 12) x; -- fail +ERROR: function anyctest(integer, integer) does not exist +LINE 1: select x, pg_typeof(x) from anyctest(11, 12) x; + ^ +HINT: No function matches the given name and argument types. You might need to add explicit type casts. +select x, pg_typeof(x) from anyctest(11.2, multirange(int4range(4,7))) x; -- fail +ERROR: function anyctest(numeric, int4multirange) does not exist +LINE 1: select x, pg_typeof(x) from anyctest(11.2, multirange(int4ra... + ^ +HINT: No function matches the given name and argument types. You might need to add explicit type casts. +select x, pg_typeof(x) from anyctest(11.2, '{[4,7)}') x; -- fail +ERROR: could not determine polymorphic type anycompatiblemultirange because input has type unknown +drop function anyctest(anycompatible, anycompatiblemultirange); +create function anyctest(anycompatiblemultirange, anycompatiblemultirange) +returns anycompatible as $$ + select lower($1) + upper($2) +$$ language sql; +select x, pg_typeof(x) from anyctest(multirange(int4range(11,12)), multirange(int4range(4,7))) x; + x | pg_typeof +----+----------- + 18 | integer +(1 row) + +select x, pg_typeof(x) from anyctest(multirange(int4range(11,12)), multirange(numrange(4,7))) x; -- fail +ERROR: function anyctest(int4multirange, nummultirange) does not exist +LINE 1: select x, pg_typeof(x) from anyctest(multirange(int4range(11... + ^ +HINT: No function matches the given name and argument types. You might need to add explicit type casts. +drop function anyctest(anycompatiblemultirange, anycompatiblemultirange); +-- fail, can't infer result type: +create function anyctest(anycompatible) +returns anycompatiblemultirange as $$ + select $1 +$$ language sql; +ERROR: cannot determine result data type +DETAIL: A result of type anycompatiblemultirange requires at least one input of type anycompatiblerange or anycompatiblemultirange. +create function anyctest(anycompatiblenonarray, anycompatiblenonarray) +returns anycompatiblearray as $$ + select array[$1, $2] +$$ language sql; +select x, pg_typeof(x) from anyctest(11, 12) x; + x | pg_typeof +---------+----------- + {11,12} | integer[] +(1 row) + +select x, pg_typeof(x) from anyctest(11, 12.3) x; + x | pg_typeof +-----------+----------- + {11,12.3} | numeric[] +(1 row) + +select x, pg_typeof(x) from anyctest(array[11], array[1,2]) x; -- fail +ERROR: function anyctest(integer[], integer[]) does not exist +LINE 1: select x, pg_typeof(x) from anyctest(array[11], array[1,2]) ... + ^ +HINT: No function matches the given name and argument types. You might need to add explicit type casts. +drop function anyctest(anycompatiblenonarray, anycompatiblenonarray); +create function anyctest(a anyelement, b anyarray, + c anycompatible, d anycompatible) +returns anycompatiblearray as $$ + select array[c, d] +$$ language sql; +select x, pg_typeof(x) from anyctest(11, array[1, 2], 42, 34.5) x; + x | pg_typeof +-----------+----------- + {42,34.5} | numeric[] +(1 row) + +select x, pg_typeof(x) from anyctest(11, array[1, 2], point(1,2), point(3,4)) x; + x | pg_typeof +-------------------+----------- + {"(1,2)","(3,4)"} | point[] +(1 row) + +select x, pg_typeof(x) from anyctest(11, '{1,2}', point(1,2), '(3,4)') x; + x | pg_typeof +-------------------+----------- + {"(1,2)","(3,4)"} | point[] +(1 row) + +select x, pg_typeof(x) from anyctest(11, array[1, 2.2], 42, 34.5) x; -- fail +ERROR: function anyctest(integer, numeric[], integer, numeric) does not exist +LINE 1: select x, pg_typeof(x) from anyctest(11, array[1, 2.2], 42, ... + ^ +HINT: No function matches the given name and argument types. You might need to add explicit type casts. +drop function anyctest(a anyelement, b anyarray, + c anycompatible, d anycompatible); +create function anyctest(variadic anycompatiblearray) +returns anycompatiblearray as $$ + select $1 +$$ language sql; +select x, pg_typeof(x) from anyctest(11, 12) x; + x | pg_typeof +---------+----------- + {11,12} | integer[] +(1 row) + +select x, pg_typeof(x) from anyctest(11, 12.2) x; + x | pg_typeof +-----------+----------- + {11,12.2} | numeric[] +(1 row) + +select x, pg_typeof(x) from anyctest(11, '12') x; + x | pg_typeof +---------+----------- + {11,12} | integer[] +(1 row) + +select x, pg_typeof(x) from anyctest(11, '12.2') x; -- fail +ERROR: invalid input syntax for type integer: "12.2" +LINE 1: select x, pg_typeof(x) from anyctest(11, '12.2') x; + ^ +select x, pg_typeof(x) from anyctest(variadic array[11, 12]) x; + x | pg_typeof +---------+----------- + {11,12} | integer[] +(1 row) + +select x, pg_typeof(x) from anyctest(variadic array[11, 12.2]) x; + x | pg_typeof +-----------+----------- + {11,12.2} | numeric[] +(1 row) + +drop function anyctest(variadic anycompatiblearray); diff --git a/src/test/regress/expected/portals.out b/src/test/regress/expected/portals.out new file mode 100644 index 0000000..f71e0b3 --- /dev/null +++ b/src/test/regress/expected/portals.out @@ -0,0 +1,1563 @@ +-- +-- Cursor regression tests +-- +BEGIN; +DECLARE foo1 SCROLL CURSOR FOR SELECT * FROM tenk1 ORDER BY unique2; +DECLARE foo2 SCROLL CURSOR FOR SELECT * FROM tenk2; +DECLARE foo3 SCROLL CURSOR FOR SELECT * FROM tenk1 ORDER BY unique2; +DECLARE foo4 SCROLL CURSOR FOR SELECT * FROM tenk2; +DECLARE foo5 SCROLL CURSOR FOR SELECT * FROM tenk1 ORDER BY unique2; +DECLARE foo6 SCROLL CURSOR FOR SELECT * FROM tenk2; +DECLARE foo7 SCROLL CURSOR FOR SELECT * FROM tenk1 ORDER BY unique2; +DECLARE foo8 SCROLL CURSOR FOR SELECT * FROM tenk2; +DECLARE foo9 SCROLL CURSOR FOR SELECT * FROM tenk1 ORDER BY unique2; +DECLARE foo10 SCROLL CURSOR FOR SELECT * FROM tenk2; +DECLARE foo11 SCROLL CURSOR FOR SELECT * FROM tenk1 ORDER BY unique2; +DECLARE foo12 SCROLL CURSOR FOR SELECT * FROM tenk2; +DECLARE foo13 SCROLL CURSOR FOR SELECT * FROM tenk1 ORDER BY unique2; +DECLARE foo14 SCROLL CURSOR FOR SELECT * FROM tenk2; +DECLARE foo15 SCROLL CURSOR FOR SELECT * FROM tenk1 ORDER BY unique2; +DECLARE foo16 SCROLL CURSOR FOR SELECT * FROM tenk2; +DECLARE foo17 SCROLL CURSOR FOR SELECT * FROM tenk1 ORDER BY unique2; +DECLARE foo18 SCROLL CURSOR FOR SELECT * FROM tenk2; +DECLARE foo19 SCROLL CURSOR FOR SELECT * FROM tenk1 ORDER BY unique2; +DECLARE foo20 SCROLL CURSOR FOR SELECT * FROM tenk2; +DECLARE foo21 SCROLL CURSOR FOR SELECT * FROM tenk1 ORDER BY unique2; +DECLARE foo22 SCROLL CURSOR FOR SELECT * FROM tenk2; +DECLARE foo23 SCROLL CURSOR FOR SELECT * FROM tenk1 ORDER BY unique2; +FETCH 1 in foo1; + unique1 | unique2 | two | four | ten | twenty | hundred | thousand | twothousand | fivethous | tenthous | odd | even | stringu1 | stringu2 | string4 +---------+---------+-----+------+-----+--------+---------+----------+-------------+-----------+----------+-----+------+----------+----------+--------- + 8800 | 0 | 0 | 0 | 0 | 0 | 0 | 800 | 800 | 3800 | 8800 | 0 | 1 | MAAAAA | AAAAAA | AAAAxx +(1 row) + +FETCH 2 in foo2; + unique1 | unique2 | two | four | ten | twenty | hundred | thousand | twothousand | fivethous | tenthous | odd | even | stringu1 | stringu2 | string4 +---------+---------+-----+------+-----+--------+---------+----------+-------------+-----------+----------+-----+------+----------+----------+--------- + 8800 | 0 | 0 | 0 | 0 | 0 | 0 | 800 | 800 | 3800 | 8800 | 0 | 1 | MAAAAA | AAAAAA | AAAAxx + 1891 | 1 | 1 | 3 | 1 | 11 | 91 | 891 | 1891 | 1891 | 1891 | 182 | 183 | TUAAAA | BAAAAA | HHHHxx +(2 rows) + +FETCH 3 in foo3; + unique1 | unique2 | two | four | ten | twenty | hundred | thousand | twothousand | fivethous | tenthous | odd | even | stringu1 | stringu2 | string4 +---------+---------+-----+------+-----+--------+---------+----------+-------------+-----------+----------+-----+------+----------+----------+--------- + 8800 | 0 | 0 | 0 | 0 | 0 | 0 | 800 | 800 | 3800 | 8800 | 0 | 1 | MAAAAA | AAAAAA | AAAAxx + 1891 | 1 | 1 | 3 | 1 | 11 | 91 | 891 | 1891 | 1891 | 1891 | 182 | 183 | TUAAAA | BAAAAA | HHHHxx + 3420 | 2 | 0 | 0 | 0 | 0 | 20 | 420 | 1420 | 3420 | 3420 | 40 | 41 | OBAAAA | CAAAAA | OOOOxx +(3 rows) + +FETCH 4 in foo4; + unique1 | unique2 | two | four | ten | twenty | hundred | thousand | twothousand | fivethous | tenthous | odd | even | stringu1 | stringu2 | string4 +---------+---------+-----+------+-----+--------+---------+----------+-------------+-----------+----------+-----+------+----------+----------+--------- + 8800 | 0 | 0 | 0 | 0 | 0 | 0 | 800 | 800 | 3800 | 8800 | 0 | 1 | MAAAAA | AAAAAA | AAAAxx + 1891 | 1 | 1 | 3 | 1 | 11 | 91 | 891 | 1891 | 1891 | 1891 | 182 | 183 | TUAAAA | BAAAAA | HHHHxx + 3420 | 2 | 0 | 0 | 0 | 0 | 20 | 420 | 1420 | 3420 | 3420 | 40 | 41 | OBAAAA | CAAAAA | OOOOxx + 9850 | 3 | 0 | 2 | 0 | 10 | 50 | 850 | 1850 | 4850 | 9850 | 100 | 101 | WOAAAA | DAAAAA | VVVVxx +(4 rows) + +FETCH 5 in foo5; + unique1 | unique2 | two | four | ten | twenty | hundred | thousand | twothousand | fivethous | tenthous | odd | even | stringu1 | stringu2 | string4 +---------+---------+-----+------+-----+--------+---------+----------+-------------+-----------+----------+-----+------+----------+----------+--------- + 8800 | 0 | 0 | 0 | 0 | 0 | 0 | 800 | 800 | 3800 | 8800 | 0 | 1 | MAAAAA | AAAAAA | AAAAxx + 1891 | 1 | 1 | 3 | 1 | 11 | 91 | 891 | 1891 | 1891 | 1891 | 182 | 183 | TUAAAA | BAAAAA | HHHHxx + 3420 | 2 | 0 | 0 | 0 | 0 | 20 | 420 | 1420 | 3420 | 3420 | 40 | 41 | OBAAAA | CAAAAA | OOOOxx + 9850 | 3 | 0 | 2 | 0 | 10 | 50 | 850 | 1850 | 4850 | 9850 | 100 | 101 | WOAAAA | DAAAAA | VVVVxx + 7164 | 4 | 0 | 0 | 4 | 4 | 64 | 164 | 1164 | 2164 | 7164 | 128 | 129 | OPAAAA | EAAAAA | AAAAxx +(5 rows) + +FETCH 6 in foo6; + unique1 | unique2 | two | four | ten | twenty | hundred | thousand | twothousand | fivethous | tenthous | odd | even | stringu1 | stringu2 | string4 +---------+---------+-----+------+-----+--------+---------+----------+-------------+-----------+----------+-----+------+----------+----------+--------- + 8800 | 0 | 0 | 0 | 0 | 0 | 0 | 800 | 800 | 3800 | 8800 | 0 | 1 | MAAAAA | AAAAAA | AAAAxx + 1891 | 1 | 1 | 3 | 1 | 11 | 91 | 891 | 1891 | 1891 | 1891 | 182 | 183 | TUAAAA | BAAAAA | HHHHxx + 3420 | 2 | 0 | 0 | 0 | 0 | 20 | 420 | 1420 | 3420 | 3420 | 40 | 41 | OBAAAA | CAAAAA | OOOOxx + 9850 | 3 | 0 | 2 | 0 | 10 | 50 | 850 | 1850 | 4850 | 9850 | 100 | 101 | WOAAAA | DAAAAA | VVVVxx + 7164 | 4 | 0 | 0 | 4 | 4 | 64 | 164 | 1164 | 2164 | 7164 | 128 | 129 | OPAAAA | EAAAAA | AAAAxx + 8009 | 5 | 1 | 1 | 9 | 9 | 9 | 9 | 9 | 3009 | 8009 | 18 | 19 | BWAAAA | FAAAAA | HHHHxx +(6 rows) + +FETCH 7 in foo7; + unique1 | unique2 | two | four | ten | twenty | hundred | thousand | twothousand | fivethous | tenthous | odd | even | stringu1 | stringu2 | string4 +---------+---------+-----+------+-----+--------+---------+----------+-------------+-----------+----------+-----+------+----------+----------+--------- + 8800 | 0 | 0 | 0 | 0 | 0 | 0 | 800 | 800 | 3800 | 8800 | 0 | 1 | MAAAAA | AAAAAA | AAAAxx + 1891 | 1 | 1 | 3 | 1 | 11 | 91 | 891 | 1891 | 1891 | 1891 | 182 | 183 | TUAAAA | BAAAAA | HHHHxx + 3420 | 2 | 0 | 0 | 0 | 0 | 20 | 420 | 1420 | 3420 | 3420 | 40 | 41 | OBAAAA | CAAAAA | OOOOxx + 9850 | 3 | 0 | 2 | 0 | 10 | 50 | 850 | 1850 | 4850 | 9850 | 100 | 101 | WOAAAA | DAAAAA | VVVVxx + 7164 | 4 | 0 | 0 | 4 | 4 | 64 | 164 | 1164 | 2164 | 7164 | 128 | 129 | OPAAAA | EAAAAA | AAAAxx + 8009 | 5 | 1 | 1 | 9 | 9 | 9 | 9 | 9 | 3009 | 8009 | 18 | 19 | BWAAAA | FAAAAA | HHHHxx + 5057 | 6 | 1 | 1 | 7 | 17 | 57 | 57 | 1057 | 57 | 5057 | 114 | 115 | NMAAAA | GAAAAA | OOOOxx +(7 rows) + +FETCH 8 in foo8; + unique1 | unique2 | two | four | ten | twenty | hundred | thousand | twothousand | fivethous | tenthous | odd | even | stringu1 | stringu2 | string4 +---------+---------+-----+------+-----+--------+---------+----------+-------------+-----------+----------+-----+------+----------+----------+--------- + 8800 | 0 | 0 | 0 | 0 | 0 | 0 | 800 | 800 | 3800 | 8800 | 0 | 1 | MAAAAA | AAAAAA | AAAAxx + 1891 | 1 | 1 | 3 | 1 | 11 | 91 | 891 | 1891 | 1891 | 1891 | 182 | 183 | TUAAAA | BAAAAA | HHHHxx + 3420 | 2 | 0 | 0 | 0 | 0 | 20 | 420 | 1420 | 3420 | 3420 | 40 | 41 | OBAAAA | CAAAAA | OOOOxx + 9850 | 3 | 0 | 2 | 0 | 10 | 50 | 850 | 1850 | 4850 | 9850 | 100 | 101 | WOAAAA | DAAAAA | VVVVxx + 7164 | 4 | 0 | 0 | 4 | 4 | 64 | 164 | 1164 | 2164 | 7164 | 128 | 129 | OPAAAA | EAAAAA | AAAAxx + 8009 | 5 | 1 | 1 | 9 | 9 | 9 | 9 | 9 | 3009 | 8009 | 18 | 19 | BWAAAA | FAAAAA | HHHHxx + 5057 | 6 | 1 | 1 | 7 | 17 | 57 | 57 | 1057 | 57 | 5057 | 114 | 115 | NMAAAA | GAAAAA | OOOOxx + 6701 | 7 | 1 | 1 | 1 | 1 | 1 | 701 | 701 | 1701 | 6701 | 2 | 3 | TXAAAA | HAAAAA | VVVVxx +(8 rows) + +FETCH 9 in foo9; + unique1 | unique2 | two | four | ten | twenty | hundred | thousand | twothousand | fivethous | tenthous | odd | even | stringu1 | stringu2 | string4 +---------+---------+-----+------+-----+--------+---------+----------+-------------+-----------+----------+-----+------+----------+----------+--------- + 8800 | 0 | 0 | 0 | 0 | 0 | 0 | 800 | 800 | 3800 | 8800 | 0 | 1 | MAAAAA | AAAAAA | AAAAxx + 1891 | 1 | 1 | 3 | 1 | 11 | 91 | 891 | 1891 | 1891 | 1891 | 182 | 183 | TUAAAA | BAAAAA | HHHHxx + 3420 | 2 | 0 | 0 | 0 | 0 | 20 | 420 | 1420 | 3420 | 3420 | 40 | 41 | OBAAAA | CAAAAA | OOOOxx + 9850 | 3 | 0 | 2 | 0 | 10 | 50 | 850 | 1850 | 4850 | 9850 | 100 | 101 | WOAAAA | DAAAAA | VVVVxx + 7164 | 4 | 0 | 0 | 4 | 4 | 64 | 164 | 1164 | 2164 | 7164 | 128 | 129 | OPAAAA | EAAAAA | AAAAxx + 8009 | 5 | 1 | 1 | 9 | 9 | 9 | 9 | 9 | 3009 | 8009 | 18 | 19 | BWAAAA | FAAAAA | HHHHxx + 5057 | 6 | 1 | 1 | 7 | 17 | 57 | 57 | 1057 | 57 | 5057 | 114 | 115 | NMAAAA | GAAAAA | OOOOxx + 6701 | 7 | 1 | 1 | 1 | 1 | 1 | 701 | 701 | 1701 | 6701 | 2 | 3 | TXAAAA | HAAAAA | VVVVxx + 4321 | 8 | 1 | 1 | 1 | 1 | 21 | 321 | 321 | 4321 | 4321 | 42 | 43 | FKAAAA | IAAAAA | AAAAxx +(9 rows) + +FETCH 10 in foo10; + unique1 | unique2 | two | four | ten | twenty | hundred | thousand | twothousand | fivethous | tenthous | odd | even | stringu1 | stringu2 | string4 +---------+---------+-----+------+-----+--------+---------+----------+-------------+-----------+----------+-----+------+----------+----------+--------- + 8800 | 0 | 0 | 0 | 0 | 0 | 0 | 800 | 800 | 3800 | 8800 | 0 | 1 | MAAAAA | AAAAAA | AAAAxx + 1891 | 1 | 1 | 3 | 1 | 11 | 91 | 891 | 1891 | 1891 | 1891 | 182 | 183 | TUAAAA | BAAAAA | HHHHxx + 3420 | 2 | 0 | 0 | 0 | 0 | 20 | 420 | 1420 | 3420 | 3420 | 40 | 41 | OBAAAA | CAAAAA | OOOOxx + 9850 | 3 | 0 | 2 | 0 | 10 | 50 | 850 | 1850 | 4850 | 9850 | 100 | 101 | WOAAAA | DAAAAA | VVVVxx + 7164 | 4 | 0 | 0 | 4 | 4 | 64 | 164 | 1164 | 2164 | 7164 | 128 | 129 | OPAAAA | EAAAAA | AAAAxx + 8009 | 5 | 1 | 1 | 9 | 9 | 9 | 9 | 9 | 3009 | 8009 | 18 | 19 | BWAAAA | FAAAAA | HHHHxx + 5057 | 6 | 1 | 1 | 7 | 17 | 57 | 57 | 1057 | 57 | 5057 | 114 | 115 | NMAAAA | GAAAAA | OOOOxx + 6701 | 7 | 1 | 1 | 1 | 1 | 1 | 701 | 701 | 1701 | 6701 | 2 | 3 | TXAAAA | HAAAAA | VVVVxx + 4321 | 8 | 1 | 1 | 1 | 1 | 21 | 321 | 321 | 4321 | 4321 | 42 | 43 | FKAAAA | IAAAAA | AAAAxx + 3043 | 9 | 1 | 3 | 3 | 3 | 43 | 43 | 1043 | 3043 | 3043 | 86 | 87 | BNAAAA | JAAAAA | HHHHxx +(10 rows) + +FETCH 11 in foo11; + unique1 | unique2 | two | four | ten | twenty | hundred | thousand | twothousand | fivethous | tenthous | odd | even | stringu1 | stringu2 | string4 +---------+---------+-----+------+-----+--------+---------+----------+-------------+-----------+----------+-----+------+----------+----------+--------- + 8800 | 0 | 0 | 0 | 0 | 0 | 0 | 800 | 800 | 3800 | 8800 | 0 | 1 | MAAAAA | AAAAAA | AAAAxx + 1891 | 1 | 1 | 3 | 1 | 11 | 91 | 891 | 1891 | 1891 | 1891 | 182 | 183 | TUAAAA | BAAAAA | HHHHxx + 3420 | 2 | 0 | 0 | 0 | 0 | 20 | 420 | 1420 | 3420 | 3420 | 40 | 41 | OBAAAA | CAAAAA | OOOOxx + 9850 | 3 | 0 | 2 | 0 | 10 | 50 | 850 | 1850 | 4850 | 9850 | 100 | 101 | WOAAAA | DAAAAA | VVVVxx + 7164 | 4 | 0 | 0 | 4 | 4 | 64 | 164 | 1164 | 2164 | 7164 | 128 | 129 | OPAAAA | EAAAAA | AAAAxx + 8009 | 5 | 1 | 1 | 9 | 9 | 9 | 9 | 9 | 3009 | 8009 | 18 | 19 | BWAAAA | FAAAAA | HHHHxx + 5057 | 6 | 1 | 1 | 7 | 17 | 57 | 57 | 1057 | 57 | 5057 | 114 | 115 | NMAAAA | GAAAAA | OOOOxx + 6701 | 7 | 1 | 1 | 1 | 1 | 1 | 701 | 701 | 1701 | 6701 | 2 | 3 | TXAAAA | HAAAAA | VVVVxx + 4321 | 8 | 1 | 1 | 1 | 1 | 21 | 321 | 321 | 4321 | 4321 | 42 | 43 | FKAAAA | IAAAAA | AAAAxx + 3043 | 9 | 1 | 3 | 3 | 3 | 43 | 43 | 1043 | 3043 | 3043 | 86 | 87 | BNAAAA | JAAAAA | HHHHxx + 1314 | 10 | 0 | 2 | 4 | 14 | 14 | 314 | 1314 | 1314 | 1314 | 28 | 29 | OYAAAA | KAAAAA | OOOOxx +(11 rows) + +FETCH 12 in foo12; + unique1 | unique2 | two | four | ten | twenty | hundred | thousand | twothousand | fivethous | tenthous | odd | even | stringu1 | stringu2 | string4 +---------+---------+-----+------+-----+--------+---------+----------+-------------+-----------+----------+-----+------+----------+----------+--------- + 8800 | 0 | 0 | 0 | 0 | 0 | 0 | 800 | 800 | 3800 | 8800 | 0 | 1 | MAAAAA | AAAAAA | AAAAxx + 1891 | 1 | 1 | 3 | 1 | 11 | 91 | 891 | 1891 | 1891 | 1891 | 182 | 183 | TUAAAA | BAAAAA | HHHHxx + 3420 | 2 | 0 | 0 | 0 | 0 | 20 | 420 | 1420 | 3420 | 3420 | 40 | 41 | OBAAAA | CAAAAA | OOOOxx + 9850 | 3 | 0 | 2 | 0 | 10 | 50 | 850 | 1850 | 4850 | 9850 | 100 | 101 | WOAAAA | DAAAAA | VVVVxx + 7164 | 4 | 0 | 0 | 4 | 4 | 64 | 164 | 1164 | 2164 | 7164 | 128 | 129 | OPAAAA | EAAAAA | AAAAxx + 8009 | 5 | 1 | 1 | 9 | 9 | 9 | 9 | 9 | 3009 | 8009 | 18 | 19 | BWAAAA | FAAAAA | HHHHxx + 5057 | 6 | 1 | 1 | 7 | 17 | 57 | 57 | 1057 | 57 | 5057 | 114 | 115 | NMAAAA | GAAAAA | OOOOxx + 6701 | 7 | 1 | 1 | 1 | 1 | 1 | 701 | 701 | 1701 | 6701 | 2 | 3 | TXAAAA | HAAAAA | VVVVxx + 4321 | 8 | 1 | 1 | 1 | 1 | 21 | 321 | 321 | 4321 | 4321 | 42 | 43 | FKAAAA | IAAAAA | AAAAxx + 3043 | 9 | 1 | 3 | 3 | 3 | 43 | 43 | 1043 | 3043 | 3043 | 86 | 87 | BNAAAA | JAAAAA | HHHHxx + 1314 | 10 | 0 | 2 | 4 | 14 | 14 | 314 | 1314 | 1314 | 1314 | 28 | 29 | OYAAAA | KAAAAA | OOOOxx + 1504 | 11 | 0 | 0 | 4 | 4 | 4 | 504 | 1504 | 1504 | 1504 | 8 | 9 | WFAAAA | LAAAAA | VVVVxx +(12 rows) + +FETCH 13 in foo13; + unique1 | unique2 | two | four | ten | twenty | hundred | thousand | twothousand | fivethous | tenthous | odd | even | stringu1 | stringu2 | string4 +---------+---------+-----+------+-----+--------+---------+----------+-------------+-----------+----------+-----+------+----------+----------+--------- + 8800 | 0 | 0 | 0 | 0 | 0 | 0 | 800 | 800 | 3800 | 8800 | 0 | 1 | MAAAAA | AAAAAA | AAAAxx + 1891 | 1 | 1 | 3 | 1 | 11 | 91 | 891 | 1891 | 1891 | 1891 | 182 | 183 | TUAAAA | BAAAAA | HHHHxx + 3420 | 2 | 0 | 0 | 0 | 0 | 20 | 420 | 1420 | 3420 | 3420 | 40 | 41 | OBAAAA | CAAAAA | OOOOxx + 9850 | 3 | 0 | 2 | 0 | 10 | 50 | 850 | 1850 | 4850 | 9850 | 100 | 101 | WOAAAA | DAAAAA | VVVVxx + 7164 | 4 | 0 | 0 | 4 | 4 | 64 | 164 | 1164 | 2164 | 7164 | 128 | 129 | OPAAAA | EAAAAA | AAAAxx + 8009 | 5 | 1 | 1 | 9 | 9 | 9 | 9 | 9 | 3009 | 8009 | 18 | 19 | BWAAAA | FAAAAA | HHHHxx + 5057 | 6 | 1 | 1 | 7 | 17 | 57 | 57 | 1057 | 57 | 5057 | 114 | 115 | NMAAAA | GAAAAA | OOOOxx + 6701 | 7 | 1 | 1 | 1 | 1 | 1 | 701 | 701 | 1701 | 6701 | 2 | 3 | TXAAAA | HAAAAA | VVVVxx + 4321 | 8 | 1 | 1 | 1 | 1 | 21 | 321 | 321 | 4321 | 4321 | 42 | 43 | FKAAAA | IAAAAA | AAAAxx + 3043 | 9 | 1 | 3 | 3 | 3 | 43 | 43 | 1043 | 3043 | 3043 | 86 | 87 | BNAAAA | JAAAAA | HHHHxx + 1314 | 10 | 0 | 2 | 4 | 14 | 14 | 314 | 1314 | 1314 | 1314 | 28 | 29 | OYAAAA | KAAAAA | OOOOxx + 1504 | 11 | 0 | 0 | 4 | 4 | 4 | 504 | 1504 | 1504 | 1504 | 8 | 9 | WFAAAA | LAAAAA | VVVVxx + 5222 | 12 | 0 | 2 | 2 | 2 | 22 | 222 | 1222 | 222 | 5222 | 44 | 45 | WSAAAA | MAAAAA | AAAAxx +(13 rows) + +FETCH 14 in foo14; + unique1 | unique2 | two | four | ten | twenty | hundred | thousand | twothousand | fivethous | tenthous | odd | even | stringu1 | stringu2 | string4 +---------+---------+-----+------+-----+--------+---------+----------+-------------+-----------+----------+-----+------+----------+----------+--------- + 8800 | 0 | 0 | 0 | 0 | 0 | 0 | 800 | 800 | 3800 | 8800 | 0 | 1 | MAAAAA | AAAAAA | AAAAxx + 1891 | 1 | 1 | 3 | 1 | 11 | 91 | 891 | 1891 | 1891 | 1891 | 182 | 183 | TUAAAA | BAAAAA | HHHHxx + 3420 | 2 | 0 | 0 | 0 | 0 | 20 | 420 | 1420 | 3420 | 3420 | 40 | 41 | OBAAAA | CAAAAA | OOOOxx + 9850 | 3 | 0 | 2 | 0 | 10 | 50 | 850 | 1850 | 4850 | 9850 | 100 | 101 | WOAAAA | DAAAAA | VVVVxx + 7164 | 4 | 0 | 0 | 4 | 4 | 64 | 164 | 1164 | 2164 | 7164 | 128 | 129 | OPAAAA | EAAAAA | AAAAxx + 8009 | 5 | 1 | 1 | 9 | 9 | 9 | 9 | 9 | 3009 | 8009 | 18 | 19 | BWAAAA | FAAAAA | HHHHxx + 5057 | 6 | 1 | 1 | 7 | 17 | 57 | 57 | 1057 | 57 | 5057 | 114 | 115 | NMAAAA | GAAAAA | OOOOxx + 6701 | 7 | 1 | 1 | 1 | 1 | 1 | 701 | 701 | 1701 | 6701 | 2 | 3 | TXAAAA | HAAAAA | VVVVxx + 4321 | 8 | 1 | 1 | 1 | 1 | 21 | 321 | 321 | 4321 | 4321 | 42 | 43 | FKAAAA | IAAAAA | AAAAxx + 3043 | 9 | 1 | 3 | 3 | 3 | 43 | 43 | 1043 | 3043 | 3043 | 86 | 87 | BNAAAA | JAAAAA | HHHHxx + 1314 | 10 | 0 | 2 | 4 | 14 | 14 | 314 | 1314 | 1314 | 1314 | 28 | 29 | OYAAAA | KAAAAA | OOOOxx + 1504 | 11 | 0 | 0 | 4 | 4 | 4 | 504 | 1504 | 1504 | 1504 | 8 | 9 | WFAAAA | LAAAAA | VVVVxx + 5222 | 12 | 0 | 2 | 2 | 2 | 22 | 222 | 1222 | 222 | 5222 | 44 | 45 | WSAAAA | MAAAAA | AAAAxx + 6243 | 13 | 1 | 3 | 3 | 3 | 43 | 243 | 243 | 1243 | 6243 | 86 | 87 | DGAAAA | NAAAAA | HHHHxx +(14 rows) + +FETCH 15 in foo15; + unique1 | unique2 | two | four | ten | twenty | hundred | thousand | twothousand | fivethous | tenthous | odd | even | stringu1 | stringu2 | string4 +---------+---------+-----+------+-----+--------+---------+----------+-------------+-----------+----------+-----+------+----------+----------+--------- + 8800 | 0 | 0 | 0 | 0 | 0 | 0 | 800 | 800 | 3800 | 8800 | 0 | 1 | MAAAAA | AAAAAA | AAAAxx + 1891 | 1 | 1 | 3 | 1 | 11 | 91 | 891 | 1891 | 1891 | 1891 | 182 | 183 | TUAAAA | BAAAAA | HHHHxx + 3420 | 2 | 0 | 0 | 0 | 0 | 20 | 420 | 1420 | 3420 | 3420 | 40 | 41 | OBAAAA | CAAAAA | OOOOxx + 9850 | 3 | 0 | 2 | 0 | 10 | 50 | 850 | 1850 | 4850 | 9850 | 100 | 101 | WOAAAA | DAAAAA | VVVVxx + 7164 | 4 | 0 | 0 | 4 | 4 | 64 | 164 | 1164 | 2164 | 7164 | 128 | 129 | OPAAAA | EAAAAA | AAAAxx + 8009 | 5 | 1 | 1 | 9 | 9 | 9 | 9 | 9 | 3009 | 8009 | 18 | 19 | BWAAAA | FAAAAA | HHHHxx + 5057 | 6 | 1 | 1 | 7 | 17 | 57 | 57 | 1057 | 57 | 5057 | 114 | 115 | NMAAAA | GAAAAA | OOOOxx + 6701 | 7 | 1 | 1 | 1 | 1 | 1 | 701 | 701 | 1701 | 6701 | 2 | 3 | TXAAAA | HAAAAA | VVVVxx + 4321 | 8 | 1 | 1 | 1 | 1 | 21 | 321 | 321 | 4321 | 4321 | 42 | 43 | FKAAAA | IAAAAA | AAAAxx + 3043 | 9 | 1 | 3 | 3 | 3 | 43 | 43 | 1043 | 3043 | 3043 | 86 | 87 | BNAAAA | JAAAAA | HHHHxx + 1314 | 10 | 0 | 2 | 4 | 14 | 14 | 314 | 1314 | 1314 | 1314 | 28 | 29 | OYAAAA | KAAAAA | OOOOxx + 1504 | 11 | 0 | 0 | 4 | 4 | 4 | 504 | 1504 | 1504 | 1504 | 8 | 9 | WFAAAA | LAAAAA | VVVVxx + 5222 | 12 | 0 | 2 | 2 | 2 | 22 | 222 | 1222 | 222 | 5222 | 44 | 45 | WSAAAA | MAAAAA | AAAAxx + 6243 | 13 | 1 | 3 | 3 | 3 | 43 | 243 | 243 | 1243 | 6243 | 86 | 87 | DGAAAA | NAAAAA | HHHHxx + 5471 | 14 | 1 | 3 | 1 | 11 | 71 | 471 | 1471 | 471 | 5471 | 142 | 143 | LCAAAA | OAAAAA | OOOOxx +(15 rows) + +FETCH 16 in foo16; + unique1 | unique2 | two | four | ten | twenty | hundred | thousand | twothousand | fivethous | tenthous | odd | even | stringu1 | stringu2 | string4 +---------+---------+-----+------+-----+--------+---------+----------+-------------+-----------+----------+-----+------+----------+----------+--------- + 8800 | 0 | 0 | 0 | 0 | 0 | 0 | 800 | 800 | 3800 | 8800 | 0 | 1 | MAAAAA | AAAAAA | AAAAxx + 1891 | 1 | 1 | 3 | 1 | 11 | 91 | 891 | 1891 | 1891 | 1891 | 182 | 183 | TUAAAA | BAAAAA | HHHHxx + 3420 | 2 | 0 | 0 | 0 | 0 | 20 | 420 | 1420 | 3420 | 3420 | 40 | 41 | OBAAAA | CAAAAA | OOOOxx + 9850 | 3 | 0 | 2 | 0 | 10 | 50 | 850 | 1850 | 4850 | 9850 | 100 | 101 | WOAAAA | DAAAAA | VVVVxx + 7164 | 4 | 0 | 0 | 4 | 4 | 64 | 164 | 1164 | 2164 | 7164 | 128 | 129 | OPAAAA | EAAAAA | AAAAxx + 8009 | 5 | 1 | 1 | 9 | 9 | 9 | 9 | 9 | 3009 | 8009 | 18 | 19 | BWAAAA | FAAAAA | HHHHxx + 5057 | 6 | 1 | 1 | 7 | 17 | 57 | 57 | 1057 | 57 | 5057 | 114 | 115 | NMAAAA | GAAAAA | OOOOxx + 6701 | 7 | 1 | 1 | 1 | 1 | 1 | 701 | 701 | 1701 | 6701 | 2 | 3 | TXAAAA | HAAAAA | VVVVxx + 4321 | 8 | 1 | 1 | 1 | 1 | 21 | 321 | 321 | 4321 | 4321 | 42 | 43 | FKAAAA | IAAAAA | AAAAxx + 3043 | 9 | 1 | 3 | 3 | 3 | 43 | 43 | 1043 | 3043 | 3043 | 86 | 87 | BNAAAA | JAAAAA | HHHHxx + 1314 | 10 | 0 | 2 | 4 | 14 | 14 | 314 | 1314 | 1314 | 1314 | 28 | 29 | OYAAAA | KAAAAA | OOOOxx + 1504 | 11 | 0 | 0 | 4 | 4 | 4 | 504 | 1504 | 1504 | 1504 | 8 | 9 | WFAAAA | LAAAAA | VVVVxx + 5222 | 12 | 0 | 2 | 2 | 2 | 22 | 222 | 1222 | 222 | 5222 | 44 | 45 | WSAAAA | MAAAAA | AAAAxx + 6243 | 13 | 1 | 3 | 3 | 3 | 43 | 243 | 243 | 1243 | 6243 | 86 | 87 | DGAAAA | NAAAAA | HHHHxx + 5471 | 14 | 1 | 3 | 1 | 11 | 71 | 471 | 1471 | 471 | 5471 | 142 | 143 | LCAAAA | OAAAAA | OOOOxx + 5006 | 15 | 0 | 2 | 6 | 6 | 6 | 6 | 1006 | 6 | 5006 | 12 | 13 | OKAAAA | PAAAAA | VVVVxx +(16 rows) + +FETCH 17 in foo17; + unique1 | unique2 | two | four | ten | twenty | hundred | thousand | twothousand | fivethous | tenthous | odd | even | stringu1 | stringu2 | string4 +---------+---------+-----+------+-----+--------+---------+----------+-------------+-----------+----------+-----+------+----------+----------+--------- + 8800 | 0 | 0 | 0 | 0 | 0 | 0 | 800 | 800 | 3800 | 8800 | 0 | 1 | MAAAAA | AAAAAA | AAAAxx + 1891 | 1 | 1 | 3 | 1 | 11 | 91 | 891 | 1891 | 1891 | 1891 | 182 | 183 | TUAAAA | BAAAAA | HHHHxx + 3420 | 2 | 0 | 0 | 0 | 0 | 20 | 420 | 1420 | 3420 | 3420 | 40 | 41 | OBAAAA | CAAAAA | OOOOxx + 9850 | 3 | 0 | 2 | 0 | 10 | 50 | 850 | 1850 | 4850 | 9850 | 100 | 101 | WOAAAA | DAAAAA | VVVVxx + 7164 | 4 | 0 | 0 | 4 | 4 | 64 | 164 | 1164 | 2164 | 7164 | 128 | 129 | OPAAAA | EAAAAA | AAAAxx + 8009 | 5 | 1 | 1 | 9 | 9 | 9 | 9 | 9 | 3009 | 8009 | 18 | 19 | BWAAAA | FAAAAA | HHHHxx + 5057 | 6 | 1 | 1 | 7 | 17 | 57 | 57 | 1057 | 57 | 5057 | 114 | 115 | NMAAAA | GAAAAA | OOOOxx + 6701 | 7 | 1 | 1 | 1 | 1 | 1 | 701 | 701 | 1701 | 6701 | 2 | 3 | TXAAAA | HAAAAA | VVVVxx + 4321 | 8 | 1 | 1 | 1 | 1 | 21 | 321 | 321 | 4321 | 4321 | 42 | 43 | FKAAAA | IAAAAA | AAAAxx + 3043 | 9 | 1 | 3 | 3 | 3 | 43 | 43 | 1043 | 3043 | 3043 | 86 | 87 | BNAAAA | JAAAAA | HHHHxx + 1314 | 10 | 0 | 2 | 4 | 14 | 14 | 314 | 1314 | 1314 | 1314 | 28 | 29 | OYAAAA | KAAAAA | OOOOxx + 1504 | 11 | 0 | 0 | 4 | 4 | 4 | 504 | 1504 | 1504 | 1504 | 8 | 9 | WFAAAA | LAAAAA | VVVVxx + 5222 | 12 | 0 | 2 | 2 | 2 | 22 | 222 | 1222 | 222 | 5222 | 44 | 45 | WSAAAA | MAAAAA | AAAAxx + 6243 | 13 | 1 | 3 | 3 | 3 | 43 | 243 | 243 | 1243 | 6243 | 86 | 87 | DGAAAA | NAAAAA | HHHHxx + 5471 | 14 | 1 | 3 | 1 | 11 | 71 | 471 | 1471 | 471 | 5471 | 142 | 143 | LCAAAA | OAAAAA | OOOOxx + 5006 | 15 | 0 | 2 | 6 | 6 | 6 | 6 | 1006 | 6 | 5006 | 12 | 13 | OKAAAA | PAAAAA | VVVVxx + 5387 | 16 | 1 | 3 | 7 | 7 | 87 | 387 | 1387 | 387 | 5387 | 174 | 175 | FZAAAA | QAAAAA | AAAAxx +(17 rows) + +FETCH 18 in foo18; + unique1 | unique2 | two | four | ten | twenty | hundred | thousand | twothousand | fivethous | tenthous | odd | even | stringu1 | stringu2 | string4 +---------+---------+-----+------+-----+--------+---------+----------+-------------+-----------+----------+-----+------+----------+----------+--------- + 8800 | 0 | 0 | 0 | 0 | 0 | 0 | 800 | 800 | 3800 | 8800 | 0 | 1 | MAAAAA | AAAAAA | AAAAxx + 1891 | 1 | 1 | 3 | 1 | 11 | 91 | 891 | 1891 | 1891 | 1891 | 182 | 183 | TUAAAA | BAAAAA | HHHHxx + 3420 | 2 | 0 | 0 | 0 | 0 | 20 | 420 | 1420 | 3420 | 3420 | 40 | 41 | OBAAAA | CAAAAA | OOOOxx + 9850 | 3 | 0 | 2 | 0 | 10 | 50 | 850 | 1850 | 4850 | 9850 | 100 | 101 | WOAAAA | DAAAAA | VVVVxx + 7164 | 4 | 0 | 0 | 4 | 4 | 64 | 164 | 1164 | 2164 | 7164 | 128 | 129 | OPAAAA | EAAAAA | AAAAxx + 8009 | 5 | 1 | 1 | 9 | 9 | 9 | 9 | 9 | 3009 | 8009 | 18 | 19 | BWAAAA | FAAAAA | HHHHxx + 5057 | 6 | 1 | 1 | 7 | 17 | 57 | 57 | 1057 | 57 | 5057 | 114 | 115 | NMAAAA | GAAAAA | OOOOxx + 6701 | 7 | 1 | 1 | 1 | 1 | 1 | 701 | 701 | 1701 | 6701 | 2 | 3 | TXAAAA | HAAAAA | VVVVxx + 4321 | 8 | 1 | 1 | 1 | 1 | 21 | 321 | 321 | 4321 | 4321 | 42 | 43 | FKAAAA | IAAAAA | AAAAxx + 3043 | 9 | 1 | 3 | 3 | 3 | 43 | 43 | 1043 | 3043 | 3043 | 86 | 87 | BNAAAA | JAAAAA | HHHHxx + 1314 | 10 | 0 | 2 | 4 | 14 | 14 | 314 | 1314 | 1314 | 1314 | 28 | 29 | OYAAAA | KAAAAA | OOOOxx + 1504 | 11 | 0 | 0 | 4 | 4 | 4 | 504 | 1504 | 1504 | 1504 | 8 | 9 | WFAAAA | LAAAAA | VVVVxx + 5222 | 12 | 0 | 2 | 2 | 2 | 22 | 222 | 1222 | 222 | 5222 | 44 | 45 | WSAAAA | MAAAAA | AAAAxx + 6243 | 13 | 1 | 3 | 3 | 3 | 43 | 243 | 243 | 1243 | 6243 | 86 | 87 | DGAAAA | NAAAAA | HHHHxx + 5471 | 14 | 1 | 3 | 1 | 11 | 71 | 471 | 1471 | 471 | 5471 | 142 | 143 | LCAAAA | OAAAAA | OOOOxx + 5006 | 15 | 0 | 2 | 6 | 6 | 6 | 6 | 1006 | 6 | 5006 | 12 | 13 | OKAAAA | PAAAAA | VVVVxx + 5387 | 16 | 1 | 3 | 7 | 7 | 87 | 387 | 1387 | 387 | 5387 | 174 | 175 | FZAAAA | QAAAAA | AAAAxx + 5785 | 17 | 1 | 1 | 5 | 5 | 85 | 785 | 1785 | 785 | 5785 | 170 | 171 | NOAAAA | RAAAAA | HHHHxx +(18 rows) + +FETCH 19 in foo19; + unique1 | unique2 | two | four | ten | twenty | hundred | thousand | twothousand | fivethous | tenthous | odd | even | stringu1 | stringu2 | string4 +---------+---------+-----+------+-----+--------+---------+----------+-------------+-----------+----------+-----+------+----------+----------+--------- + 8800 | 0 | 0 | 0 | 0 | 0 | 0 | 800 | 800 | 3800 | 8800 | 0 | 1 | MAAAAA | AAAAAA | AAAAxx + 1891 | 1 | 1 | 3 | 1 | 11 | 91 | 891 | 1891 | 1891 | 1891 | 182 | 183 | TUAAAA | BAAAAA | HHHHxx + 3420 | 2 | 0 | 0 | 0 | 0 | 20 | 420 | 1420 | 3420 | 3420 | 40 | 41 | OBAAAA | CAAAAA | OOOOxx + 9850 | 3 | 0 | 2 | 0 | 10 | 50 | 850 | 1850 | 4850 | 9850 | 100 | 101 | WOAAAA | DAAAAA | VVVVxx + 7164 | 4 | 0 | 0 | 4 | 4 | 64 | 164 | 1164 | 2164 | 7164 | 128 | 129 | OPAAAA | EAAAAA | AAAAxx + 8009 | 5 | 1 | 1 | 9 | 9 | 9 | 9 | 9 | 3009 | 8009 | 18 | 19 | BWAAAA | FAAAAA | HHHHxx + 5057 | 6 | 1 | 1 | 7 | 17 | 57 | 57 | 1057 | 57 | 5057 | 114 | 115 | NMAAAA | GAAAAA | OOOOxx + 6701 | 7 | 1 | 1 | 1 | 1 | 1 | 701 | 701 | 1701 | 6701 | 2 | 3 | TXAAAA | HAAAAA | VVVVxx + 4321 | 8 | 1 | 1 | 1 | 1 | 21 | 321 | 321 | 4321 | 4321 | 42 | 43 | FKAAAA | IAAAAA | AAAAxx + 3043 | 9 | 1 | 3 | 3 | 3 | 43 | 43 | 1043 | 3043 | 3043 | 86 | 87 | BNAAAA | JAAAAA | HHHHxx + 1314 | 10 | 0 | 2 | 4 | 14 | 14 | 314 | 1314 | 1314 | 1314 | 28 | 29 | OYAAAA | KAAAAA | OOOOxx + 1504 | 11 | 0 | 0 | 4 | 4 | 4 | 504 | 1504 | 1504 | 1504 | 8 | 9 | WFAAAA | LAAAAA | VVVVxx + 5222 | 12 | 0 | 2 | 2 | 2 | 22 | 222 | 1222 | 222 | 5222 | 44 | 45 | WSAAAA | MAAAAA | AAAAxx + 6243 | 13 | 1 | 3 | 3 | 3 | 43 | 243 | 243 | 1243 | 6243 | 86 | 87 | DGAAAA | NAAAAA | HHHHxx + 5471 | 14 | 1 | 3 | 1 | 11 | 71 | 471 | 1471 | 471 | 5471 | 142 | 143 | LCAAAA | OAAAAA | OOOOxx + 5006 | 15 | 0 | 2 | 6 | 6 | 6 | 6 | 1006 | 6 | 5006 | 12 | 13 | OKAAAA | PAAAAA | VVVVxx + 5387 | 16 | 1 | 3 | 7 | 7 | 87 | 387 | 1387 | 387 | 5387 | 174 | 175 | FZAAAA | QAAAAA | AAAAxx + 5785 | 17 | 1 | 1 | 5 | 5 | 85 | 785 | 1785 | 785 | 5785 | 170 | 171 | NOAAAA | RAAAAA | HHHHxx + 6621 | 18 | 1 | 1 | 1 | 1 | 21 | 621 | 621 | 1621 | 6621 | 42 | 43 | RUAAAA | SAAAAA | OOOOxx +(19 rows) + +FETCH 20 in foo20; + unique1 | unique2 | two | four | ten | twenty | hundred | thousand | twothousand | fivethous | tenthous | odd | even | stringu1 | stringu2 | string4 +---------+---------+-----+------+-----+--------+---------+----------+-------------+-----------+----------+-----+------+----------+----------+--------- + 8800 | 0 | 0 | 0 | 0 | 0 | 0 | 800 | 800 | 3800 | 8800 | 0 | 1 | MAAAAA | AAAAAA | AAAAxx + 1891 | 1 | 1 | 3 | 1 | 11 | 91 | 891 | 1891 | 1891 | 1891 | 182 | 183 | TUAAAA | BAAAAA | HHHHxx + 3420 | 2 | 0 | 0 | 0 | 0 | 20 | 420 | 1420 | 3420 | 3420 | 40 | 41 | OBAAAA | CAAAAA | OOOOxx + 9850 | 3 | 0 | 2 | 0 | 10 | 50 | 850 | 1850 | 4850 | 9850 | 100 | 101 | WOAAAA | DAAAAA | VVVVxx + 7164 | 4 | 0 | 0 | 4 | 4 | 64 | 164 | 1164 | 2164 | 7164 | 128 | 129 | OPAAAA | EAAAAA | AAAAxx + 8009 | 5 | 1 | 1 | 9 | 9 | 9 | 9 | 9 | 3009 | 8009 | 18 | 19 | BWAAAA | FAAAAA | HHHHxx + 5057 | 6 | 1 | 1 | 7 | 17 | 57 | 57 | 1057 | 57 | 5057 | 114 | 115 | NMAAAA | GAAAAA | OOOOxx + 6701 | 7 | 1 | 1 | 1 | 1 | 1 | 701 | 701 | 1701 | 6701 | 2 | 3 | TXAAAA | HAAAAA | VVVVxx + 4321 | 8 | 1 | 1 | 1 | 1 | 21 | 321 | 321 | 4321 | 4321 | 42 | 43 | FKAAAA | IAAAAA | AAAAxx + 3043 | 9 | 1 | 3 | 3 | 3 | 43 | 43 | 1043 | 3043 | 3043 | 86 | 87 | BNAAAA | JAAAAA | HHHHxx + 1314 | 10 | 0 | 2 | 4 | 14 | 14 | 314 | 1314 | 1314 | 1314 | 28 | 29 | OYAAAA | KAAAAA | OOOOxx + 1504 | 11 | 0 | 0 | 4 | 4 | 4 | 504 | 1504 | 1504 | 1504 | 8 | 9 | WFAAAA | LAAAAA | VVVVxx + 5222 | 12 | 0 | 2 | 2 | 2 | 22 | 222 | 1222 | 222 | 5222 | 44 | 45 | WSAAAA | MAAAAA | AAAAxx + 6243 | 13 | 1 | 3 | 3 | 3 | 43 | 243 | 243 | 1243 | 6243 | 86 | 87 | DGAAAA | NAAAAA | HHHHxx + 5471 | 14 | 1 | 3 | 1 | 11 | 71 | 471 | 1471 | 471 | 5471 | 142 | 143 | LCAAAA | OAAAAA | OOOOxx + 5006 | 15 | 0 | 2 | 6 | 6 | 6 | 6 | 1006 | 6 | 5006 | 12 | 13 | OKAAAA | PAAAAA | VVVVxx + 5387 | 16 | 1 | 3 | 7 | 7 | 87 | 387 | 1387 | 387 | 5387 | 174 | 175 | FZAAAA | QAAAAA | AAAAxx + 5785 | 17 | 1 | 1 | 5 | 5 | 85 | 785 | 1785 | 785 | 5785 | 170 | 171 | NOAAAA | RAAAAA | HHHHxx + 6621 | 18 | 1 | 1 | 1 | 1 | 21 | 621 | 621 | 1621 | 6621 | 42 | 43 | RUAAAA | SAAAAA | OOOOxx + 6969 | 19 | 1 | 1 | 9 | 9 | 69 | 969 | 969 | 1969 | 6969 | 138 | 139 | BIAAAA | TAAAAA | VVVVxx +(20 rows) + +FETCH 21 in foo21; + unique1 | unique2 | two | four | ten | twenty | hundred | thousand | twothousand | fivethous | tenthous | odd | even | stringu1 | stringu2 | string4 +---------+---------+-----+------+-----+--------+---------+----------+-------------+-----------+----------+-----+------+----------+----------+--------- + 8800 | 0 | 0 | 0 | 0 | 0 | 0 | 800 | 800 | 3800 | 8800 | 0 | 1 | MAAAAA | AAAAAA | AAAAxx + 1891 | 1 | 1 | 3 | 1 | 11 | 91 | 891 | 1891 | 1891 | 1891 | 182 | 183 | TUAAAA | BAAAAA | HHHHxx + 3420 | 2 | 0 | 0 | 0 | 0 | 20 | 420 | 1420 | 3420 | 3420 | 40 | 41 | OBAAAA | CAAAAA | OOOOxx + 9850 | 3 | 0 | 2 | 0 | 10 | 50 | 850 | 1850 | 4850 | 9850 | 100 | 101 | WOAAAA | DAAAAA | VVVVxx + 7164 | 4 | 0 | 0 | 4 | 4 | 64 | 164 | 1164 | 2164 | 7164 | 128 | 129 | OPAAAA | EAAAAA | AAAAxx + 8009 | 5 | 1 | 1 | 9 | 9 | 9 | 9 | 9 | 3009 | 8009 | 18 | 19 | BWAAAA | FAAAAA | HHHHxx + 5057 | 6 | 1 | 1 | 7 | 17 | 57 | 57 | 1057 | 57 | 5057 | 114 | 115 | NMAAAA | GAAAAA | OOOOxx + 6701 | 7 | 1 | 1 | 1 | 1 | 1 | 701 | 701 | 1701 | 6701 | 2 | 3 | TXAAAA | HAAAAA | VVVVxx + 4321 | 8 | 1 | 1 | 1 | 1 | 21 | 321 | 321 | 4321 | 4321 | 42 | 43 | FKAAAA | IAAAAA | AAAAxx + 3043 | 9 | 1 | 3 | 3 | 3 | 43 | 43 | 1043 | 3043 | 3043 | 86 | 87 | BNAAAA | JAAAAA | HHHHxx + 1314 | 10 | 0 | 2 | 4 | 14 | 14 | 314 | 1314 | 1314 | 1314 | 28 | 29 | OYAAAA | KAAAAA | OOOOxx + 1504 | 11 | 0 | 0 | 4 | 4 | 4 | 504 | 1504 | 1504 | 1504 | 8 | 9 | WFAAAA | LAAAAA | VVVVxx + 5222 | 12 | 0 | 2 | 2 | 2 | 22 | 222 | 1222 | 222 | 5222 | 44 | 45 | WSAAAA | MAAAAA | AAAAxx + 6243 | 13 | 1 | 3 | 3 | 3 | 43 | 243 | 243 | 1243 | 6243 | 86 | 87 | DGAAAA | NAAAAA | HHHHxx + 5471 | 14 | 1 | 3 | 1 | 11 | 71 | 471 | 1471 | 471 | 5471 | 142 | 143 | LCAAAA | OAAAAA | OOOOxx + 5006 | 15 | 0 | 2 | 6 | 6 | 6 | 6 | 1006 | 6 | 5006 | 12 | 13 | OKAAAA | PAAAAA | VVVVxx + 5387 | 16 | 1 | 3 | 7 | 7 | 87 | 387 | 1387 | 387 | 5387 | 174 | 175 | FZAAAA | QAAAAA | AAAAxx + 5785 | 17 | 1 | 1 | 5 | 5 | 85 | 785 | 1785 | 785 | 5785 | 170 | 171 | NOAAAA | RAAAAA | HHHHxx + 6621 | 18 | 1 | 1 | 1 | 1 | 21 | 621 | 621 | 1621 | 6621 | 42 | 43 | RUAAAA | SAAAAA | OOOOxx + 6969 | 19 | 1 | 1 | 9 | 9 | 69 | 969 | 969 | 1969 | 6969 | 138 | 139 | BIAAAA | TAAAAA | VVVVxx + 9460 | 20 | 0 | 0 | 0 | 0 | 60 | 460 | 1460 | 4460 | 9460 | 120 | 121 | WZAAAA | UAAAAA | AAAAxx +(21 rows) + +FETCH 22 in foo22; + unique1 | unique2 | two | four | ten | twenty | hundred | thousand | twothousand | fivethous | tenthous | odd | even | stringu1 | stringu2 | string4 +---------+---------+-----+------+-----+--------+---------+----------+-------------+-----------+----------+-----+------+----------+----------+--------- + 8800 | 0 | 0 | 0 | 0 | 0 | 0 | 800 | 800 | 3800 | 8800 | 0 | 1 | MAAAAA | AAAAAA | AAAAxx + 1891 | 1 | 1 | 3 | 1 | 11 | 91 | 891 | 1891 | 1891 | 1891 | 182 | 183 | TUAAAA | BAAAAA | HHHHxx + 3420 | 2 | 0 | 0 | 0 | 0 | 20 | 420 | 1420 | 3420 | 3420 | 40 | 41 | OBAAAA | CAAAAA | OOOOxx + 9850 | 3 | 0 | 2 | 0 | 10 | 50 | 850 | 1850 | 4850 | 9850 | 100 | 101 | WOAAAA | DAAAAA | VVVVxx + 7164 | 4 | 0 | 0 | 4 | 4 | 64 | 164 | 1164 | 2164 | 7164 | 128 | 129 | OPAAAA | EAAAAA | AAAAxx + 8009 | 5 | 1 | 1 | 9 | 9 | 9 | 9 | 9 | 3009 | 8009 | 18 | 19 | BWAAAA | FAAAAA | HHHHxx + 5057 | 6 | 1 | 1 | 7 | 17 | 57 | 57 | 1057 | 57 | 5057 | 114 | 115 | NMAAAA | GAAAAA | OOOOxx + 6701 | 7 | 1 | 1 | 1 | 1 | 1 | 701 | 701 | 1701 | 6701 | 2 | 3 | TXAAAA | HAAAAA | VVVVxx + 4321 | 8 | 1 | 1 | 1 | 1 | 21 | 321 | 321 | 4321 | 4321 | 42 | 43 | FKAAAA | IAAAAA | AAAAxx + 3043 | 9 | 1 | 3 | 3 | 3 | 43 | 43 | 1043 | 3043 | 3043 | 86 | 87 | BNAAAA | JAAAAA | HHHHxx + 1314 | 10 | 0 | 2 | 4 | 14 | 14 | 314 | 1314 | 1314 | 1314 | 28 | 29 | OYAAAA | KAAAAA | OOOOxx + 1504 | 11 | 0 | 0 | 4 | 4 | 4 | 504 | 1504 | 1504 | 1504 | 8 | 9 | WFAAAA | LAAAAA | VVVVxx + 5222 | 12 | 0 | 2 | 2 | 2 | 22 | 222 | 1222 | 222 | 5222 | 44 | 45 | WSAAAA | MAAAAA | AAAAxx + 6243 | 13 | 1 | 3 | 3 | 3 | 43 | 243 | 243 | 1243 | 6243 | 86 | 87 | DGAAAA | NAAAAA | HHHHxx + 5471 | 14 | 1 | 3 | 1 | 11 | 71 | 471 | 1471 | 471 | 5471 | 142 | 143 | LCAAAA | OAAAAA | OOOOxx + 5006 | 15 | 0 | 2 | 6 | 6 | 6 | 6 | 1006 | 6 | 5006 | 12 | 13 | OKAAAA | PAAAAA | VVVVxx + 5387 | 16 | 1 | 3 | 7 | 7 | 87 | 387 | 1387 | 387 | 5387 | 174 | 175 | FZAAAA | QAAAAA | AAAAxx + 5785 | 17 | 1 | 1 | 5 | 5 | 85 | 785 | 1785 | 785 | 5785 | 170 | 171 | NOAAAA | RAAAAA | HHHHxx + 6621 | 18 | 1 | 1 | 1 | 1 | 21 | 621 | 621 | 1621 | 6621 | 42 | 43 | RUAAAA | SAAAAA | OOOOxx + 6969 | 19 | 1 | 1 | 9 | 9 | 69 | 969 | 969 | 1969 | 6969 | 138 | 139 | BIAAAA | TAAAAA | VVVVxx + 9460 | 20 | 0 | 0 | 0 | 0 | 60 | 460 | 1460 | 4460 | 9460 | 120 | 121 | WZAAAA | UAAAAA | AAAAxx + 59 | 21 | 1 | 3 | 9 | 19 | 59 | 59 | 59 | 59 | 59 | 118 | 119 | HCAAAA | VAAAAA | HHHHxx +(22 rows) + +FETCH 23 in foo23; + unique1 | unique2 | two | four | ten | twenty | hundred | thousand | twothousand | fivethous | tenthous | odd | even | stringu1 | stringu2 | string4 +---------+---------+-----+------+-----+--------+---------+----------+-------------+-----------+----------+-----+------+----------+----------+--------- + 8800 | 0 | 0 | 0 | 0 | 0 | 0 | 800 | 800 | 3800 | 8800 | 0 | 1 | MAAAAA | AAAAAA | AAAAxx + 1891 | 1 | 1 | 3 | 1 | 11 | 91 | 891 | 1891 | 1891 | 1891 | 182 | 183 | TUAAAA | BAAAAA | HHHHxx + 3420 | 2 | 0 | 0 | 0 | 0 | 20 | 420 | 1420 | 3420 | 3420 | 40 | 41 | OBAAAA | CAAAAA | OOOOxx + 9850 | 3 | 0 | 2 | 0 | 10 | 50 | 850 | 1850 | 4850 | 9850 | 100 | 101 | WOAAAA | DAAAAA | VVVVxx + 7164 | 4 | 0 | 0 | 4 | 4 | 64 | 164 | 1164 | 2164 | 7164 | 128 | 129 | OPAAAA | EAAAAA | AAAAxx + 8009 | 5 | 1 | 1 | 9 | 9 | 9 | 9 | 9 | 3009 | 8009 | 18 | 19 | BWAAAA | FAAAAA | HHHHxx + 5057 | 6 | 1 | 1 | 7 | 17 | 57 | 57 | 1057 | 57 | 5057 | 114 | 115 | NMAAAA | GAAAAA | OOOOxx + 6701 | 7 | 1 | 1 | 1 | 1 | 1 | 701 | 701 | 1701 | 6701 | 2 | 3 | TXAAAA | HAAAAA | VVVVxx + 4321 | 8 | 1 | 1 | 1 | 1 | 21 | 321 | 321 | 4321 | 4321 | 42 | 43 | FKAAAA | IAAAAA | AAAAxx + 3043 | 9 | 1 | 3 | 3 | 3 | 43 | 43 | 1043 | 3043 | 3043 | 86 | 87 | BNAAAA | JAAAAA | HHHHxx + 1314 | 10 | 0 | 2 | 4 | 14 | 14 | 314 | 1314 | 1314 | 1314 | 28 | 29 | OYAAAA | KAAAAA | OOOOxx + 1504 | 11 | 0 | 0 | 4 | 4 | 4 | 504 | 1504 | 1504 | 1504 | 8 | 9 | WFAAAA | LAAAAA | VVVVxx + 5222 | 12 | 0 | 2 | 2 | 2 | 22 | 222 | 1222 | 222 | 5222 | 44 | 45 | WSAAAA | MAAAAA | AAAAxx + 6243 | 13 | 1 | 3 | 3 | 3 | 43 | 243 | 243 | 1243 | 6243 | 86 | 87 | DGAAAA | NAAAAA | HHHHxx + 5471 | 14 | 1 | 3 | 1 | 11 | 71 | 471 | 1471 | 471 | 5471 | 142 | 143 | LCAAAA | OAAAAA | OOOOxx + 5006 | 15 | 0 | 2 | 6 | 6 | 6 | 6 | 1006 | 6 | 5006 | 12 | 13 | OKAAAA | PAAAAA | VVVVxx + 5387 | 16 | 1 | 3 | 7 | 7 | 87 | 387 | 1387 | 387 | 5387 | 174 | 175 | FZAAAA | QAAAAA | AAAAxx + 5785 | 17 | 1 | 1 | 5 | 5 | 85 | 785 | 1785 | 785 | 5785 | 170 | 171 | NOAAAA | RAAAAA | HHHHxx + 6621 | 18 | 1 | 1 | 1 | 1 | 21 | 621 | 621 | 1621 | 6621 | 42 | 43 | RUAAAA | SAAAAA | OOOOxx + 6969 | 19 | 1 | 1 | 9 | 9 | 69 | 969 | 969 | 1969 | 6969 | 138 | 139 | BIAAAA | TAAAAA | VVVVxx + 9460 | 20 | 0 | 0 | 0 | 0 | 60 | 460 | 1460 | 4460 | 9460 | 120 | 121 | WZAAAA | UAAAAA | AAAAxx + 59 | 21 | 1 | 3 | 9 | 19 | 59 | 59 | 59 | 59 | 59 | 118 | 119 | HCAAAA | VAAAAA | HHHHxx + 8020 | 22 | 0 | 0 | 0 | 0 | 20 | 20 | 20 | 3020 | 8020 | 40 | 41 | MWAAAA | WAAAAA | OOOOxx +(23 rows) + +FETCH backward 1 in foo23; + unique1 | unique2 | two | four | ten | twenty | hundred | thousand | twothousand | fivethous | tenthous | odd | even | stringu1 | stringu2 | string4 +---------+---------+-----+------+-----+--------+---------+----------+-------------+-----------+----------+-----+------+----------+----------+--------- + 59 | 21 | 1 | 3 | 9 | 19 | 59 | 59 | 59 | 59 | 59 | 118 | 119 | HCAAAA | VAAAAA | HHHHxx +(1 row) + +FETCH backward 2 in foo22; + unique1 | unique2 | two | four | ten | twenty | hundred | thousand | twothousand | fivethous | tenthous | odd | even | stringu1 | stringu2 | string4 +---------+---------+-----+------+-----+--------+---------+----------+-------------+-----------+----------+-----+------+----------+----------+--------- + 9460 | 20 | 0 | 0 | 0 | 0 | 60 | 460 | 1460 | 4460 | 9460 | 120 | 121 | WZAAAA | UAAAAA | AAAAxx + 6969 | 19 | 1 | 1 | 9 | 9 | 69 | 969 | 969 | 1969 | 6969 | 138 | 139 | BIAAAA | TAAAAA | VVVVxx +(2 rows) + +FETCH backward 3 in foo21; + unique1 | unique2 | two | four | ten | twenty | hundred | thousand | twothousand | fivethous | tenthous | odd | even | stringu1 | stringu2 | string4 +---------+---------+-----+------+-----+--------+---------+----------+-------------+-----------+----------+-----+------+----------+----------+--------- + 6969 | 19 | 1 | 1 | 9 | 9 | 69 | 969 | 969 | 1969 | 6969 | 138 | 139 | BIAAAA | TAAAAA | VVVVxx + 6621 | 18 | 1 | 1 | 1 | 1 | 21 | 621 | 621 | 1621 | 6621 | 42 | 43 | RUAAAA | SAAAAA | OOOOxx + 5785 | 17 | 1 | 1 | 5 | 5 | 85 | 785 | 1785 | 785 | 5785 | 170 | 171 | NOAAAA | RAAAAA | HHHHxx +(3 rows) + +FETCH backward 4 in foo20; + unique1 | unique2 | two | four | ten | twenty | hundred | thousand | twothousand | fivethous | tenthous | odd | even | stringu1 | stringu2 | string4 +---------+---------+-----+------+-----+--------+---------+----------+-------------+-----------+----------+-----+------+----------+----------+--------- + 6621 | 18 | 1 | 1 | 1 | 1 | 21 | 621 | 621 | 1621 | 6621 | 42 | 43 | RUAAAA | SAAAAA | OOOOxx + 5785 | 17 | 1 | 1 | 5 | 5 | 85 | 785 | 1785 | 785 | 5785 | 170 | 171 | NOAAAA | RAAAAA | HHHHxx + 5387 | 16 | 1 | 3 | 7 | 7 | 87 | 387 | 1387 | 387 | 5387 | 174 | 175 | FZAAAA | QAAAAA | AAAAxx + 5006 | 15 | 0 | 2 | 6 | 6 | 6 | 6 | 1006 | 6 | 5006 | 12 | 13 | OKAAAA | PAAAAA | VVVVxx +(4 rows) + +FETCH backward 5 in foo19; + unique1 | unique2 | two | four | ten | twenty | hundred | thousand | twothousand | fivethous | tenthous | odd | even | stringu1 | stringu2 | string4 +---------+---------+-----+------+-----+--------+---------+----------+-------------+-----------+----------+-----+------+----------+----------+--------- + 5785 | 17 | 1 | 1 | 5 | 5 | 85 | 785 | 1785 | 785 | 5785 | 170 | 171 | NOAAAA | RAAAAA | HHHHxx + 5387 | 16 | 1 | 3 | 7 | 7 | 87 | 387 | 1387 | 387 | 5387 | 174 | 175 | FZAAAA | QAAAAA | AAAAxx + 5006 | 15 | 0 | 2 | 6 | 6 | 6 | 6 | 1006 | 6 | 5006 | 12 | 13 | OKAAAA | PAAAAA | VVVVxx + 5471 | 14 | 1 | 3 | 1 | 11 | 71 | 471 | 1471 | 471 | 5471 | 142 | 143 | LCAAAA | OAAAAA | OOOOxx + 6243 | 13 | 1 | 3 | 3 | 3 | 43 | 243 | 243 | 1243 | 6243 | 86 | 87 | DGAAAA | NAAAAA | HHHHxx +(5 rows) + +FETCH backward 6 in foo18; + unique1 | unique2 | two | four | ten | twenty | hundred | thousand | twothousand | fivethous | tenthous | odd | even | stringu1 | stringu2 | string4 +---------+---------+-----+------+-----+--------+---------+----------+-------------+-----------+----------+-----+------+----------+----------+--------- + 5387 | 16 | 1 | 3 | 7 | 7 | 87 | 387 | 1387 | 387 | 5387 | 174 | 175 | FZAAAA | QAAAAA | AAAAxx + 5006 | 15 | 0 | 2 | 6 | 6 | 6 | 6 | 1006 | 6 | 5006 | 12 | 13 | OKAAAA | PAAAAA | VVVVxx + 5471 | 14 | 1 | 3 | 1 | 11 | 71 | 471 | 1471 | 471 | 5471 | 142 | 143 | LCAAAA | OAAAAA | OOOOxx + 6243 | 13 | 1 | 3 | 3 | 3 | 43 | 243 | 243 | 1243 | 6243 | 86 | 87 | DGAAAA | NAAAAA | HHHHxx + 5222 | 12 | 0 | 2 | 2 | 2 | 22 | 222 | 1222 | 222 | 5222 | 44 | 45 | WSAAAA | MAAAAA | AAAAxx + 1504 | 11 | 0 | 0 | 4 | 4 | 4 | 504 | 1504 | 1504 | 1504 | 8 | 9 | WFAAAA | LAAAAA | VVVVxx +(6 rows) + +FETCH backward 7 in foo17; + unique1 | unique2 | two | four | ten | twenty | hundred | thousand | twothousand | fivethous | tenthous | odd | even | stringu1 | stringu2 | string4 +---------+---------+-----+------+-----+--------+---------+----------+-------------+-----------+----------+-----+------+----------+----------+--------- + 5006 | 15 | 0 | 2 | 6 | 6 | 6 | 6 | 1006 | 6 | 5006 | 12 | 13 | OKAAAA | PAAAAA | VVVVxx + 5471 | 14 | 1 | 3 | 1 | 11 | 71 | 471 | 1471 | 471 | 5471 | 142 | 143 | LCAAAA | OAAAAA | OOOOxx + 6243 | 13 | 1 | 3 | 3 | 3 | 43 | 243 | 243 | 1243 | 6243 | 86 | 87 | DGAAAA | NAAAAA | HHHHxx + 5222 | 12 | 0 | 2 | 2 | 2 | 22 | 222 | 1222 | 222 | 5222 | 44 | 45 | WSAAAA | MAAAAA | AAAAxx + 1504 | 11 | 0 | 0 | 4 | 4 | 4 | 504 | 1504 | 1504 | 1504 | 8 | 9 | WFAAAA | LAAAAA | VVVVxx + 1314 | 10 | 0 | 2 | 4 | 14 | 14 | 314 | 1314 | 1314 | 1314 | 28 | 29 | OYAAAA | KAAAAA | OOOOxx + 3043 | 9 | 1 | 3 | 3 | 3 | 43 | 43 | 1043 | 3043 | 3043 | 86 | 87 | BNAAAA | JAAAAA | HHHHxx +(7 rows) + +FETCH backward 8 in foo16; + unique1 | unique2 | two | four | ten | twenty | hundred | thousand | twothousand | fivethous | tenthous | odd | even | stringu1 | stringu2 | string4 +---------+---------+-----+------+-----+--------+---------+----------+-------------+-----------+----------+-----+------+----------+----------+--------- + 5471 | 14 | 1 | 3 | 1 | 11 | 71 | 471 | 1471 | 471 | 5471 | 142 | 143 | LCAAAA | OAAAAA | OOOOxx + 6243 | 13 | 1 | 3 | 3 | 3 | 43 | 243 | 243 | 1243 | 6243 | 86 | 87 | DGAAAA | NAAAAA | HHHHxx + 5222 | 12 | 0 | 2 | 2 | 2 | 22 | 222 | 1222 | 222 | 5222 | 44 | 45 | WSAAAA | MAAAAA | AAAAxx + 1504 | 11 | 0 | 0 | 4 | 4 | 4 | 504 | 1504 | 1504 | 1504 | 8 | 9 | WFAAAA | LAAAAA | VVVVxx + 1314 | 10 | 0 | 2 | 4 | 14 | 14 | 314 | 1314 | 1314 | 1314 | 28 | 29 | OYAAAA | KAAAAA | OOOOxx + 3043 | 9 | 1 | 3 | 3 | 3 | 43 | 43 | 1043 | 3043 | 3043 | 86 | 87 | BNAAAA | JAAAAA | HHHHxx + 4321 | 8 | 1 | 1 | 1 | 1 | 21 | 321 | 321 | 4321 | 4321 | 42 | 43 | FKAAAA | IAAAAA | AAAAxx + 6701 | 7 | 1 | 1 | 1 | 1 | 1 | 701 | 701 | 1701 | 6701 | 2 | 3 | TXAAAA | HAAAAA | VVVVxx +(8 rows) + +FETCH backward 9 in foo15; + unique1 | unique2 | two | four | ten | twenty | hundred | thousand | twothousand | fivethous | tenthous | odd | even | stringu1 | stringu2 | string4 +---------+---------+-----+------+-----+--------+---------+----------+-------------+-----------+----------+-----+------+----------+----------+--------- + 6243 | 13 | 1 | 3 | 3 | 3 | 43 | 243 | 243 | 1243 | 6243 | 86 | 87 | DGAAAA | NAAAAA | HHHHxx + 5222 | 12 | 0 | 2 | 2 | 2 | 22 | 222 | 1222 | 222 | 5222 | 44 | 45 | WSAAAA | MAAAAA | AAAAxx + 1504 | 11 | 0 | 0 | 4 | 4 | 4 | 504 | 1504 | 1504 | 1504 | 8 | 9 | WFAAAA | LAAAAA | VVVVxx + 1314 | 10 | 0 | 2 | 4 | 14 | 14 | 314 | 1314 | 1314 | 1314 | 28 | 29 | OYAAAA | KAAAAA | OOOOxx + 3043 | 9 | 1 | 3 | 3 | 3 | 43 | 43 | 1043 | 3043 | 3043 | 86 | 87 | BNAAAA | JAAAAA | HHHHxx + 4321 | 8 | 1 | 1 | 1 | 1 | 21 | 321 | 321 | 4321 | 4321 | 42 | 43 | FKAAAA | IAAAAA | AAAAxx + 6701 | 7 | 1 | 1 | 1 | 1 | 1 | 701 | 701 | 1701 | 6701 | 2 | 3 | TXAAAA | HAAAAA | VVVVxx + 5057 | 6 | 1 | 1 | 7 | 17 | 57 | 57 | 1057 | 57 | 5057 | 114 | 115 | NMAAAA | GAAAAA | OOOOxx + 8009 | 5 | 1 | 1 | 9 | 9 | 9 | 9 | 9 | 3009 | 8009 | 18 | 19 | BWAAAA | FAAAAA | HHHHxx +(9 rows) + +FETCH backward 10 in foo14; + unique1 | unique2 | two | four | ten | twenty | hundred | thousand | twothousand | fivethous | tenthous | odd | even | stringu1 | stringu2 | string4 +---------+---------+-----+------+-----+--------+---------+----------+-------------+-----------+----------+-----+------+----------+----------+--------- + 5222 | 12 | 0 | 2 | 2 | 2 | 22 | 222 | 1222 | 222 | 5222 | 44 | 45 | WSAAAA | MAAAAA | AAAAxx + 1504 | 11 | 0 | 0 | 4 | 4 | 4 | 504 | 1504 | 1504 | 1504 | 8 | 9 | WFAAAA | LAAAAA | VVVVxx + 1314 | 10 | 0 | 2 | 4 | 14 | 14 | 314 | 1314 | 1314 | 1314 | 28 | 29 | OYAAAA | KAAAAA | OOOOxx + 3043 | 9 | 1 | 3 | 3 | 3 | 43 | 43 | 1043 | 3043 | 3043 | 86 | 87 | BNAAAA | JAAAAA | HHHHxx + 4321 | 8 | 1 | 1 | 1 | 1 | 21 | 321 | 321 | 4321 | 4321 | 42 | 43 | FKAAAA | IAAAAA | AAAAxx + 6701 | 7 | 1 | 1 | 1 | 1 | 1 | 701 | 701 | 1701 | 6701 | 2 | 3 | TXAAAA | HAAAAA | VVVVxx + 5057 | 6 | 1 | 1 | 7 | 17 | 57 | 57 | 1057 | 57 | 5057 | 114 | 115 | NMAAAA | GAAAAA | OOOOxx + 8009 | 5 | 1 | 1 | 9 | 9 | 9 | 9 | 9 | 3009 | 8009 | 18 | 19 | BWAAAA | FAAAAA | HHHHxx + 7164 | 4 | 0 | 0 | 4 | 4 | 64 | 164 | 1164 | 2164 | 7164 | 128 | 129 | OPAAAA | EAAAAA | AAAAxx + 9850 | 3 | 0 | 2 | 0 | 10 | 50 | 850 | 1850 | 4850 | 9850 | 100 | 101 | WOAAAA | DAAAAA | VVVVxx +(10 rows) + +FETCH backward 11 in foo13; + unique1 | unique2 | two | four | ten | twenty | hundred | thousand | twothousand | fivethous | tenthous | odd | even | stringu1 | stringu2 | string4 +---------+---------+-----+------+-----+--------+---------+----------+-------------+-----------+----------+-----+------+----------+----------+--------- + 1504 | 11 | 0 | 0 | 4 | 4 | 4 | 504 | 1504 | 1504 | 1504 | 8 | 9 | WFAAAA | LAAAAA | VVVVxx + 1314 | 10 | 0 | 2 | 4 | 14 | 14 | 314 | 1314 | 1314 | 1314 | 28 | 29 | OYAAAA | KAAAAA | OOOOxx + 3043 | 9 | 1 | 3 | 3 | 3 | 43 | 43 | 1043 | 3043 | 3043 | 86 | 87 | BNAAAA | JAAAAA | HHHHxx + 4321 | 8 | 1 | 1 | 1 | 1 | 21 | 321 | 321 | 4321 | 4321 | 42 | 43 | FKAAAA | IAAAAA | AAAAxx + 6701 | 7 | 1 | 1 | 1 | 1 | 1 | 701 | 701 | 1701 | 6701 | 2 | 3 | TXAAAA | HAAAAA | VVVVxx + 5057 | 6 | 1 | 1 | 7 | 17 | 57 | 57 | 1057 | 57 | 5057 | 114 | 115 | NMAAAA | GAAAAA | OOOOxx + 8009 | 5 | 1 | 1 | 9 | 9 | 9 | 9 | 9 | 3009 | 8009 | 18 | 19 | BWAAAA | FAAAAA | HHHHxx + 7164 | 4 | 0 | 0 | 4 | 4 | 64 | 164 | 1164 | 2164 | 7164 | 128 | 129 | OPAAAA | EAAAAA | AAAAxx + 9850 | 3 | 0 | 2 | 0 | 10 | 50 | 850 | 1850 | 4850 | 9850 | 100 | 101 | WOAAAA | DAAAAA | VVVVxx + 3420 | 2 | 0 | 0 | 0 | 0 | 20 | 420 | 1420 | 3420 | 3420 | 40 | 41 | OBAAAA | CAAAAA | OOOOxx + 1891 | 1 | 1 | 3 | 1 | 11 | 91 | 891 | 1891 | 1891 | 1891 | 182 | 183 | TUAAAA | BAAAAA | HHHHxx +(11 rows) + +FETCH backward 12 in foo12; + unique1 | unique2 | two | four | ten | twenty | hundred | thousand | twothousand | fivethous | tenthous | odd | even | stringu1 | stringu2 | string4 +---------+---------+-----+------+-----+--------+---------+----------+-------------+-----------+----------+-----+------+----------+----------+--------- + 1314 | 10 | 0 | 2 | 4 | 14 | 14 | 314 | 1314 | 1314 | 1314 | 28 | 29 | OYAAAA | KAAAAA | OOOOxx + 3043 | 9 | 1 | 3 | 3 | 3 | 43 | 43 | 1043 | 3043 | 3043 | 86 | 87 | BNAAAA | JAAAAA | HHHHxx + 4321 | 8 | 1 | 1 | 1 | 1 | 21 | 321 | 321 | 4321 | 4321 | 42 | 43 | FKAAAA | IAAAAA | AAAAxx + 6701 | 7 | 1 | 1 | 1 | 1 | 1 | 701 | 701 | 1701 | 6701 | 2 | 3 | TXAAAA | HAAAAA | VVVVxx + 5057 | 6 | 1 | 1 | 7 | 17 | 57 | 57 | 1057 | 57 | 5057 | 114 | 115 | NMAAAA | GAAAAA | OOOOxx + 8009 | 5 | 1 | 1 | 9 | 9 | 9 | 9 | 9 | 3009 | 8009 | 18 | 19 | BWAAAA | FAAAAA | HHHHxx + 7164 | 4 | 0 | 0 | 4 | 4 | 64 | 164 | 1164 | 2164 | 7164 | 128 | 129 | OPAAAA | EAAAAA | AAAAxx + 9850 | 3 | 0 | 2 | 0 | 10 | 50 | 850 | 1850 | 4850 | 9850 | 100 | 101 | WOAAAA | DAAAAA | VVVVxx + 3420 | 2 | 0 | 0 | 0 | 0 | 20 | 420 | 1420 | 3420 | 3420 | 40 | 41 | OBAAAA | CAAAAA | OOOOxx + 1891 | 1 | 1 | 3 | 1 | 11 | 91 | 891 | 1891 | 1891 | 1891 | 182 | 183 | TUAAAA | BAAAAA | HHHHxx + 8800 | 0 | 0 | 0 | 0 | 0 | 0 | 800 | 800 | 3800 | 8800 | 0 | 1 | MAAAAA | AAAAAA | AAAAxx +(11 rows) + +FETCH backward 13 in foo11; + unique1 | unique2 | two | four | ten | twenty | hundred | thousand | twothousand | fivethous | tenthous | odd | even | stringu1 | stringu2 | string4 +---------+---------+-----+------+-----+--------+---------+----------+-------------+-----------+----------+-----+------+----------+----------+--------- + 3043 | 9 | 1 | 3 | 3 | 3 | 43 | 43 | 1043 | 3043 | 3043 | 86 | 87 | BNAAAA | JAAAAA | HHHHxx + 4321 | 8 | 1 | 1 | 1 | 1 | 21 | 321 | 321 | 4321 | 4321 | 42 | 43 | FKAAAA | IAAAAA | AAAAxx + 6701 | 7 | 1 | 1 | 1 | 1 | 1 | 701 | 701 | 1701 | 6701 | 2 | 3 | TXAAAA | HAAAAA | VVVVxx + 5057 | 6 | 1 | 1 | 7 | 17 | 57 | 57 | 1057 | 57 | 5057 | 114 | 115 | NMAAAA | GAAAAA | OOOOxx + 8009 | 5 | 1 | 1 | 9 | 9 | 9 | 9 | 9 | 3009 | 8009 | 18 | 19 | BWAAAA | FAAAAA | HHHHxx + 7164 | 4 | 0 | 0 | 4 | 4 | 64 | 164 | 1164 | 2164 | 7164 | 128 | 129 | OPAAAA | EAAAAA | AAAAxx + 9850 | 3 | 0 | 2 | 0 | 10 | 50 | 850 | 1850 | 4850 | 9850 | 100 | 101 | WOAAAA | DAAAAA | VVVVxx + 3420 | 2 | 0 | 0 | 0 | 0 | 20 | 420 | 1420 | 3420 | 3420 | 40 | 41 | OBAAAA | CAAAAA | OOOOxx + 1891 | 1 | 1 | 3 | 1 | 11 | 91 | 891 | 1891 | 1891 | 1891 | 182 | 183 | TUAAAA | BAAAAA | HHHHxx + 8800 | 0 | 0 | 0 | 0 | 0 | 0 | 800 | 800 | 3800 | 8800 | 0 | 1 | MAAAAA | AAAAAA | AAAAxx +(10 rows) + +FETCH backward 14 in foo10; + unique1 | unique2 | two | four | ten | twenty | hundred | thousand | twothousand | fivethous | tenthous | odd | even | stringu1 | stringu2 | string4 +---------+---------+-----+------+-----+--------+---------+----------+-------------+-----------+----------+-----+------+----------+----------+--------- + 4321 | 8 | 1 | 1 | 1 | 1 | 21 | 321 | 321 | 4321 | 4321 | 42 | 43 | FKAAAA | IAAAAA | AAAAxx + 6701 | 7 | 1 | 1 | 1 | 1 | 1 | 701 | 701 | 1701 | 6701 | 2 | 3 | TXAAAA | HAAAAA | VVVVxx + 5057 | 6 | 1 | 1 | 7 | 17 | 57 | 57 | 1057 | 57 | 5057 | 114 | 115 | NMAAAA | GAAAAA | OOOOxx + 8009 | 5 | 1 | 1 | 9 | 9 | 9 | 9 | 9 | 3009 | 8009 | 18 | 19 | BWAAAA | FAAAAA | HHHHxx + 7164 | 4 | 0 | 0 | 4 | 4 | 64 | 164 | 1164 | 2164 | 7164 | 128 | 129 | OPAAAA | EAAAAA | AAAAxx + 9850 | 3 | 0 | 2 | 0 | 10 | 50 | 850 | 1850 | 4850 | 9850 | 100 | 101 | WOAAAA | DAAAAA | VVVVxx + 3420 | 2 | 0 | 0 | 0 | 0 | 20 | 420 | 1420 | 3420 | 3420 | 40 | 41 | OBAAAA | CAAAAA | OOOOxx + 1891 | 1 | 1 | 3 | 1 | 11 | 91 | 891 | 1891 | 1891 | 1891 | 182 | 183 | TUAAAA | BAAAAA | HHHHxx + 8800 | 0 | 0 | 0 | 0 | 0 | 0 | 800 | 800 | 3800 | 8800 | 0 | 1 | MAAAAA | AAAAAA | AAAAxx +(9 rows) + +FETCH backward 15 in foo9; + unique1 | unique2 | two | four | ten | twenty | hundred | thousand | twothousand | fivethous | tenthous | odd | even | stringu1 | stringu2 | string4 +---------+---------+-----+------+-----+--------+---------+----------+-------------+-----------+----------+-----+------+----------+----------+--------- + 6701 | 7 | 1 | 1 | 1 | 1 | 1 | 701 | 701 | 1701 | 6701 | 2 | 3 | TXAAAA | HAAAAA | VVVVxx + 5057 | 6 | 1 | 1 | 7 | 17 | 57 | 57 | 1057 | 57 | 5057 | 114 | 115 | NMAAAA | GAAAAA | OOOOxx + 8009 | 5 | 1 | 1 | 9 | 9 | 9 | 9 | 9 | 3009 | 8009 | 18 | 19 | BWAAAA | FAAAAA | HHHHxx + 7164 | 4 | 0 | 0 | 4 | 4 | 64 | 164 | 1164 | 2164 | 7164 | 128 | 129 | OPAAAA | EAAAAA | AAAAxx + 9850 | 3 | 0 | 2 | 0 | 10 | 50 | 850 | 1850 | 4850 | 9850 | 100 | 101 | WOAAAA | DAAAAA | VVVVxx + 3420 | 2 | 0 | 0 | 0 | 0 | 20 | 420 | 1420 | 3420 | 3420 | 40 | 41 | OBAAAA | CAAAAA | OOOOxx + 1891 | 1 | 1 | 3 | 1 | 11 | 91 | 891 | 1891 | 1891 | 1891 | 182 | 183 | TUAAAA | BAAAAA | HHHHxx + 8800 | 0 | 0 | 0 | 0 | 0 | 0 | 800 | 800 | 3800 | 8800 | 0 | 1 | MAAAAA | AAAAAA | AAAAxx +(8 rows) + +FETCH backward 16 in foo8; + unique1 | unique2 | two | four | ten | twenty | hundred | thousand | twothousand | fivethous | tenthous | odd | even | stringu1 | stringu2 | string4 +---------+---------+-----+------+-----+--------+---------+----------+-------------+-----------+----------+-----+------+----------+----------+--------- + 5057 | 6 | 1 | 1 | 7 | 17 | 57 | 57 | 1057 | 57 | 5057 | 114 | 115 | NMAAAA | GAAAAA | OOOOxx + 8009 | 5 | 1 | 1 | 9 | 9 | 9 | 9 | 9 | 3009 | 8009 | 18 | 19 | BWAAAA | FAAAAA | HHHHxx + 7164 | 4 | 0 | 0 | 4 | 4 | 64 | 164 | 1164 | 2164 | 7164 | 128 | 129 | OPAAAA | EAAAAA | AAAAxx + 9850 | 3 | 0 | 2 | 0 | 10 | 50 | 850 | 1850 | 4850 | 9850 | 100 | 101 | WOAAAA | DAAAAA | VVVVxx + 3420 | 2 | 0 | 0 | 0 | 0 | 20 | 420 | 1420 | 3420 | 3420 | 40 | 41 | OBAAAA | CAAAAA | OOOOxx + 1891 | 1 | 1 | 3 | 1 | 11 | 91 | 891 | 1891 | 1891 | 1891 | 182 | 183 | TUAAAA | BAAAAA | HHHHxx + 8800 | 0 | 0 | 0 | 0 | 0 | 0 | 800 | 800 | 3800 | 8800 | 0 | 1 | MAAAAA | AAAAAA | AAAAxx +(7 rows) + +FETCH backward 17 in foo7; + unique1 | unique2 | two | four | ten | twenty | hundred | thousand | twothousand | fivethous | tenthous | odd | even | stringu1 | stringu2 | string4 +---------+---------+-----+------+-----+--------+---------+----------+-------------+-----------+----------+-----+------+----------+----------+--------- + 8009 | 5 | 1 | 1 | 9 | 9 | 9 | 9 | 9 | 3009 | 8009 | 18 | 19 | BWAAAA | FAAAAA | HHHHxx + 7164 | 4 | 0 | 0 | 4 | 4 | 64 | 164 | 1164 | 2164 | 7164 | 128 | 129 | OPAAAA | EAAAAA | AAAAxx + 9850 | 3 | 0 | 2 | 0 | 10 | 50 | 850 | 1850 | 4850 | 9850 | 100 | 101 | WOAAAA | DAAAAA | VVVVxx + 3420 | 2 | 0 | 0 | 0 | 0 | 20 | 420 | 1420 | 3420 | 3420 | 40 | 41 | OBAAAA | CAAAAA | OOOOxx + 1891 | 1 | 1 | 3 | 1 | 11 | 91 | 891 | 1891 | 1891 | 1891 | 182 | 183 | TUAAAA | BAAAAA | HHHHxx + 8800 | 0 | 0 | 0 | 0 | 0 | 0 | 800 | 800 | 3800 | 8800 | 0 | 1 | MAAAAA | AAAAAA | AAAAxx +(6 rows) + +FETCH backward 18 in foo6; + unique1 | unique2 | two | four | ten | twenty | hundred | thousand | twothousand | fivethous | tenthous | odd | even | stringu1 | stringu2 | string4 +---------+---------+-----+------+-----+--------+---------+----------+-------------+-----------+----------+-----+------+----------+----------+--------- + 7164 | 4 | 0 | 0 | 4 | 4 | 64 | 164 | 1164 | 2164 | 7164 | 128 | 129 | OPAAAA | EAAAAA | AAAAxx + 9850 | 3 | 0 | 2 | 0 | 10 | 50 | 850 | 1850 | 4850 | 9850 | 100 | 101 | WOAAAA | DAAAAA | VVVVxx + 3420 | 2 | 0 | 0 | 0 | 0 | 20 | 420 | 1420 | 3420 | 3420 | 40 | 41 | OBAAAA | CAAAAA | OOOOxx + 1891 | 1 | 1 | 3 | 1 | 11 | 91 | 891 | 1891 | 1891 | 1891 | 182 | 183 | TUAAAA | BAAAAA | HHHHxx + 8800 | 0 | 0 | 0 | 0 | 0 | 0 | 800 | 800 | 3800 | 8800 | 0 | 1 | MAAAAA | AAAAAA | AAAAxx +(5 rows) + +FETCH backward 19 in foo5; + unique1 | unique2 | two | four | ten | twenty | hundred | thousand | twothousand | fivethous | tenthous | odd | even | stringu1 | stringu2 | string4 +---------+---------+-----+------+-----+--------+---------+----------+-------------+-----------+----------+-----+------+----------+----------+--------- + 9850 | 3 | 0 | 2 | 0 | 10 | 50 | 850 | 1850 | 4850 | 9850 | 100 | 101 | WOAAAA | DAAAAA | VVVVxx + 3420 | 2 | 0 | 0 | 0 | 0 | 20 | 420 | 1420 | 3420 | 3420 | 40 | 41 | OBAAAA | CAAAAA | OOOOxx + 1891 | 1 | 1 | 3 | 1 | 11 | 91 | 891 | 1891 | 1891 | 1891 | 182 | 183 | TUAAAA | BAAAAA | HHHHxx + 8800 | 0 | 0 | 0 | 0 | 0 | 0 | 800 | 800 | 3800 | 8800 | 0 | 1 | MAAAAA | AAAAAA | AAAAxx +(4 rows) + +FETCH backward 20 in foo4; + unique1 | unique2 | two | four | ten | twenty | hundred | thousand | twothousand | fivethous | tenthous | odd | even | stringu1 | stringu2 | string4 +---------+---------+-----+------+-----+--------+---------+----------+-------------+-----------+----------+-----+------+----------+----------+--------- + 3420 | 2 | 0 | 0 | 0 | 0 | 20 | 420 | 1420 | 3420 | 3420 | 40 | 41 | OBAAAA | CAAAAA | OOOOxx + 1891 | 1 | 1 | 3 | 1 | 11 | 91 | 891 | 1891 | 1891 | 1891 | 182 | 183 | TUAAAA | BAAAAA | HHHHxx + 8800 | 0 | 0 | 0 | 0 | 0 | 0 | 800 | 800 | 3800 | 8800 | 0 | 1 | MAAAAA | AAAAAA | AAAAxx +(3 rows) + +FETCH backward 21 in foo3; + unique1 | unique2 | two | four | ten | twenty | hundred | thousand | twothousand | fivethous | tenthous | odd | even | stringu1 | stringu2 | string4 +---------+---------+-----+------+-----+--------+---------+----------+-------------+-----------+----------+-----+------+----------+----------+--------- + 1891 | 1 | 1 | 3 | 1 | 11 | 91 | 891 | 1891 | 1891 | 1891 | 182 | 183 | TUAAAA | BAAAAA | HHHHxx + 8800 | 0 | 0 | 0 | 0 | 0 | 0 | 800 | 800 | 3800 | 8800 | 0 | 1 | MAAAAA | AAAAAA | AAAAxx +(2 rows) + +FETCH backward 22 in foo2; + unique1 | unique2 | two | four | ten | twenty | hundred | thousand | twothousand | fivethous | tenthous | odd | even | stringu1 | stringu2 | string4 +---------+---------+-----+------+-----+--------+---------+----------+-------------+-----------+----------+-----+------+----------+----------+--------- + 8800 | 0 | 0 | 0 | 0 | 0 | 0 | 800 | 800 | 3800 | 8800 | 0 | 1 | MAAAAA | AAAAAA | AAAAxx +(1 row) + +FETCH backward 23 in foo1; + unique1 | unique2 | two | four | ten | twenty | hundred | thousand | twothousand | fivethous | tenthous | odd | even | stringu1 | stringu2 | string4 +---------+---------+-----+------+-----+--------+---------+----------+-------------+-----------+----------+-----+------+----------+----------+--------- +(0 rows) + +CLOSE foo1; +CLOSE foo2; +CLOSE foo3; +CLOSE foo4; +CLOSE foo5; +CLOSE foo6; +CLOSE foo7; +CLOSE foo8; +CLOSE foo9; +CLOSE foo10; +CLOSE foo11; +CLOSE foo12; +-- leave some cursors open, to test that auto-close works. +-- record this in the system view as well (don't query the time field there +-- however) +SELECT name, statement, is_holdable, is_binary, is_scrollable FROM pg_cursors ORDER BY 1; + name | statement | is_holdable | is_binary | is_scrollable +-------+-----------------------------------------------------------------------+-------------+-----------+--------------- + foo13 | DECLARE foo13 SCROLL CURSOR FOR SELECT * FROM tenk1 ORDER BY unique2; | f | f | t + foo14 | DECLARE foo14 SCROLL CURSOR FOR SELECT * FROM tenk2; | f | f | t + foo15 | DECLARE foo15 SCROLL CURSOR FOR SELECT * FROM tenk1 ORDER BY unique2; | f | f | t + foo16 | DECLARE foo16 SCROLL CURSOR FOR SELECT * FROM tenk2; | f | f | t + foo17 | DECLARE foo17 SCROLL CURSOR FOR SELECT * FROM tenk1 ORDER BY unique2; | f | f | t + foo18 | DECLARE foo18 SCROLL CURSOR FOR SELECT * FROM tenk2; | f | f | t + foo19 | DECLARE foo19 SCROLL CURSOR FOR SELECT * FROM tenk1 ORDER BY unique2; | f | f | t + foo20 | DECLARE foo20 SCROLL CURSOR FOR SELECT * FROM tenk2; | f | f | t + foo21 | DECLARE foo21 SCROLL CURSOR FOR SELECT * FROM tenk1 ORDER BY unique2; | f | f | t + foo22 | DECLARE foo22 SCROLL CURSOR FOR SELECT * FROM tenk2; | f | f | t + foo23 | DECLARE foo23 SCROLL CURSOR FOR SELECT * FROM tenk1 ORDER BY unique2; | f | f | t +(11 rows) + +END; +SELECT name, statement, is_holdable, is_binary, is_scrollable FROM pg_cursors; + name | statement | is_holdable | is_binary | is_scrollable +------+-----------+-------------+-----------+--------------- +(0 rows) + +-- +-- NO SCROLL disallows backward fetching +-- +BEGIN; +DECLARE foo24 NO SCROLL CURSOR FOR SELECT * FROM tenk1 ORDER BY unique2; +FETCH 1 FROM foo24; + unique1 | unique2 | two | four | ten | twenty | hundred | thousand | twothousand | fivethous | tenthous | odd | even | stringu1 | stringu2 | string4 +---------+---------+-----+------+-----+--------+---------+----------+-------------+-----------+----------+-----+------+----------+----------+--------- + 8800 | 0 | 0 | 0 | 0 | 0 | 0 | 800 | 800 | 3800 | 8800 | 0 | 1 | MAAAAA | AAAAAA | AAAAxx +(1 row) + +FETCH BACKWARD 1 FROM foo24; -- should fail +ERROR: cursor can only scan forward +HINT: Declare it with SCROLL option to enable backward scan. +END; +BEGIN; +DECLARE foo24 NO SCROLL CURSOR FOR SELECT * FROM tenk1 ORDER BY unique2; +FETCH 1 FROM foo24; + unique1 | unique2 | two | four | ten | twenty | hundred | thousand | twothousand | fivethous | tenthous | odd | even | stringu1 | stringu2 | string4 +---------+---------+-----+------+-----+--------+---------+----------+-------------+-----------+----------+-----+------+----------+----------+--------- + 8800 | 0 | 0 | 0 | 0 | 0 | 0 | 800 | 800 | 3800 | 8800 | 0 | 1 | MAAAAA | AAAAAA | AAAAxx +(1 row) + +FETCH ABSOLUTE 2 FROM foo24; -- allowed + unique1 | unique2 | two | four | ten | twenty | hundred | thousand | twothousand | fivethous | tenthous | odd | even | stringu1 | stringu2 | string4 +---------+---------+-----+------+-----+--------+---------+----------+-------------+-----------+----------+-----+------+----------+----------+--------- + 1891 | 1 | 1 | 3 | 1 | 11 | 91 | 891 | 1891 | 1891 | 1891 | 182 | 183 | TUAAAA | BAAAAA | HHHHxx +(1 row) + +FETCH ABSOLUTE 1 FROM foo24; -- should fail +ERROR: cursor can only scan forward +HINT: Declare it with SCROLL option to enable backward scan. +END; +-- +-- Cursors outside transaction blocks +-- +SELECT name, statement, is_holdable, is_binary, is_scrollable FROM pg_cursors; + name | statement | is_holdable | is_binary | is_scrollable +------+-----------+-------------+-----------+--------------- +(0 rows) + +BEGIN; +DECLARE foo25 SCROLL CURSOR WITH HOLD FOR SELECT * FROM tenk2; +FETCH FROM foo25; + unique1 | unique2 | two | four | ten | twenty | hundred | thousand | twothousand | fivethous | tenthous | odd | even | stringu1 | stringu2 | string4 +---------+---------+-----+------+-----+--------+---------+----------+-------------+-----------+----------+-----+------+----------+----------+--------- + 8800 | 0 | 0 | 0 | 0 | 0 | 0 | 800 | 800 | 3800 | 8800 | 0 | 1 | MAAAAA | AAAAAA | AAAAxx +(1 row) + +FETCH FROM foo25; + unique1 | unique2 | two | four | ten | twenty | hundred | thousand | twothousand | fivethous | tenthous | odd | even | stringu1 | stringu2 | string4 +---------+---------+-----+------+-----+--------+---------+----------+-------------+-----------+----------+-----+------+----------+----------+--------- + 1891 | 1 | 1 | 3 | 1 | 11 | 91 | 891 | 1891 | 1891 | 1891 | 182 | 183 | TUAAAA | BAAAAA | HHHHxx +(1 row) + +COMMIT; +FETCH FROM foo25; + unique1 | unique2 | two | four | ten | twenty | hundred | thousand | twothousand | fivethous | tenthous | odd | even | stringu1 | stringu2 | string4 +---------+---------+-----+------+-----+--------+---------+----------+-------------+-----------+----------+-----+------+----------+----------+--------- + 3420 | 2 | 0 | 0 | 0 | 0 | 20 | 420 | 1420 | 3420 | 3420 | 40 | 41 | OBAAAA | CAAAAA | OOOOxx +(1 row) + +FETCH BACKWARD FROM foo25; + unique1 | unique2 | two | four | ten | twenty | hundred | thousand | twothousand | fivethous | tenthous | odd | even | stringu1 | stringu2 | string4 +---------+---------+-----+------+-----+--------+---------+----------+-------------+-----------+----------+-----+------+----------+----------+--------- + 1891 | 1 | 1 | 3 | 1 | 11 | 91 | 891 | 1891 | 1891 | 1891 | 182 | 183 | TUAAAA | BAAAAA | HHHHxx +(1 row) + +FETCH ABSOLUTE -1 FROM foo25; + unique1 | unique2 | two | four | ten | twenty | hundred | thousand | twothousand | fivethous | tenthous | odd | even | stringu1 | stringu2 | string4 +---------+---------+-----+------+-----+--------+---------+----------+-------------+-----------+----------+-----+------+----------+----------+--------- + 2968 | 9999 | 0 | 0 | 8 | 8 | 68 | 968 | 968 | 2968 | 2968 | 136 | 137 | EKAAAA | PUOAAA | VVVVxx +(1 row) + +SELECT name, statement, is_holdable, is_binary, is_scrollable FROM pg_cursors; + name | statement | is_holdable | is_binary | is_scrollable +-------+----------------------------------------------------------------+-------------+-----------+--------------- + foo25 | DECLARE foo25 SCROLL CURSOR WITH HOLD FOR SELECT * FROM tenk2; | t | f | t +(1 row) + +CLOSE foo25; +BEGIN; +DECLARE foo25ns NO SCROLL CURSOR WITH HOLD FOR SELECT * FROM tenk2; +FETCH FROM foo25ns; + unique1 | unique2 | two | four | ten | twenty | hundred | thousand | twothousand | fivethous | tenthous | odd | even | stringu1 | stringu2 | string4 +---------+---------+-----+------+-----+--------+---------+----------+-------------+-----------+----------+-----+------+----------+----------+--------- + 8800 | 0 | 0 | 0 | 0 | 0 | 0 | 800 | 800 | 3800 | 8800 | 0 | 1 | MAAAAA | AAAAAA | AAAAxx +(1 row) + +FETCH FROM foo25ns; + unique1 | unique2 | two | four | ten | twenty | hundred | thousand | twothousand | fivethous | tenthous | odd | even | stringu1 | stringu2 | string4 +---------+---------+-----+------+-----+--------+---------+----------+-------------+-----------+----------+-----+------+----------+----------+--------- + 1891 | 1 | 1 | 3 | 1 | 11 | 91 | 891 | 1891 | 1891 | 1891 | 182 | 183 | TUAAAA | BAAAAA | HHHHxx +(1 row) + +COMMIT; +FETCH FROM foo25ns; + unique1 | unique2 | two | four | ten | twenty | hundred | thousand | twothousand | fivethous | tenthous | odd | even | stringu1 | stringu2 | string4 +---------+---------+-----+------+-----+--------+---------+----------+-------------+-----------+----------+-----+------+----------+----------+--------- + 3420 | 2 | 0 | 0 | 0 | 0 | 20 | 420 | 1420 | 3420 | 3420 | 40 | 41 | OBAAAA | CAAAAA | OOOOxx +(1 row) + +FETCH ABSOLUTE 4 FROM foo25ns; + unique1 | unique2 | two | four | ten | twenty | hundred | thousand | twothousand | fivethous | tenthous | odd | even | stringu1 | stringu2 | string4 +---------+---------+-----+------+-----+--------+---------+----------+-------------+-----------+----------+-----+------+----------+----------+--------- + 9850 | 3 | 0 | 2 | 0 | 10 | 50 | 850 | 1850 | 4850 | 9850 | 100 | 101 | WOAAAA | DAAAAA | VVVVxx +(1 row) + +FETCH ABSOLUTE 4 FROM foo25ns; -- fail +ERROR: cursor can only scan forward +HINT: Declare it with SCROLL option to enable backward scan. +SELECT name, statement, is_holdable, is_binary, is_scrollable FROM pg_cursors; + name | statement | is_holdable | is_binary | is_scrollable +---------+---------------------------------------------------------------------+-------------+-----------+--------------- + foo25ns | DECLARE foo25ns NO SCROLL CURSOR WITH HOLD FOR SELECT * FROM tenk2; | t | f | f +(1 row) + +CLOSE foo25ns; +-- +-- ROLLBACK should close holdable cursors +-- +BEGIN; +DECLARE foo26 CURSOR WITH HOLD FOR SELECT * FROM tenk1 ORDER BY unique2; +ROLLBACK; +-- should fail +FETCH FROM foo26; +ERROR: cursor "foo26" does not exist +-- +-- Parameterized DECLARE needs to insert param values into the cursor portal +-- +BEGIN; +CREATE FUNCTION declares_cursor(text) + RETURNS void + AS 'DECLARE c CURSOR FOR SELECT stringu1 FROM tenk1 WHERE stringu1 LIKE $1;' + LANGUAGE SQL; +SELECT declares_cursor('AB%'); + declares_cursor +----------------- + +(1 row) + +FETCH ALL FROM c; + stringu1 +---------- + ABAAAA + ABAAAA + ABAAAA + ABAAAA + ABAAAA + ABAAAA + ABAAAA + ABAAAA + ABAAAA + ABAAAA + ABAAAA + ABAAAA + ABAAAA + ABAAAA + ABAAAA +(15 rows) + +ROLLBACK; +-- +-- Test behavior of both volatile and stable functions inside a cursor; +-- in particular we want to see what happens during commit of a holdable +-- cursor +-- +create temp table tt1(f1 int); +create function count_tt1_v() returns int8 as +'select count(*) from tt1' language sql volatile; +create function count_tt1_s() returns int8 as +'select count(*) from tt1' language sql stable; +begin; +insert into tt1 values(1); +declare c1 cursor for select count_tt1_v(), count_tt1_s(); +insert into tt1 values(2); +fetch all from c1; + count_tt1_v | count_tt1_s +-------------+------------- + 2 | 1 +(1 row) + +rollback; +begin; +insert into tt1 values(1); +declare c2 cursor with hold for select count_tt1_v(), count_tt1_s(); +insert into tt1 values(2); +commit; +delete from tt1; +fetch all from c2; + count_tt1_v | count_tt1_s +-------------+------------- + 2 | 1 +(1 row) + +drop function count_tt1_v(); +drop function count_tt1_s(); +-- Create a cursor with the BINARY option and check the pg_cursors view +BEGIN; +SELECT name, statement, is_holdable, is_binary, is_scrollable FROM pg_cursors; + name | statement | is_holdable | is_binary | is_scrollable +------+----------------------------------------------------------------------+-------------+-----------+--------------- + c2 | declare c2 cursor with hold for select count_tt1_v(), count_tt1_s(); | t | f | f +(1 row) + +DECLARE bc BINARY CURSOR FOR SELECT * FROM tenk1; +SELECT name, statement, is_holdable, is_binary, is_scrollable FROM pg_cursors ORDER BY 1; + name | statement | is_holdable | is_binary | is_scrollable +------+----------------------------------------------------------------------+-------------+-----------+--------------- + bc | DECLARE bc BINARY CURSOR FOR SELECT * FROM tenk1; | f | t | t + c2 | declare c2 cursor with hold for select count_tt1_v(), count_tt1_s(); | t | f | f +(2 rows) + +ROLLBACK; +-- We should not see the portal that is created internally to +-- implement EXECUTE in pg_cursors +PREPARE cprep AS + SELECT name, statement, is_holdable, is_binary, is_scrollable FROM pg_cursors; +EXECUTE cprep; + name | statement | is_holdable | is_binary | is_scrollable +------+----------------------------------------------------------------------+-------------+-----------+--------------- + c2 | declare c2 cursor with hold for select count_tt1_v(), count_tt1_s(); | t | f | f +(1 row) + +-- test CLOSE ALL; +SELECT name FROM pg_cursors ORDER BY 1; + name +------ + c2 +(1 row) + +CLOSE ALL; +SELECT name FROM pg_cursors ORDER BY 1; + name +------ +(0 rows) + +BEGIN; +DECLARE foo1 CURSOR WITH HOLD FOR SELECT 1; +DECLARE foo2 CURSOR WITHOUT HOLD FOR SELECT 1; +SELECT name FROM pg_cursors ORDER BY 1; + name +------ + foo1 + foo2 +(2 rows) + +CLOSE ALL; +SELECT name FROM pg_cursors ORDER BY 1; + name +------ +(0 rows) + +COMMIT; +-- +-- Tests for updatable cursors +-- +CREATE TEMP TABLE uctest(f1 int, f2 text); +INSERT INTO uctest VALUES (1, 'one'), (2, 'two'), (3, 'three'); +SELECT * FROM uctest; + f1 | f2 +----+------- + 1 | one + 2 | two + 3 | three +(3 rows) + +-- Check DELETE WHERE CURRENT +BEGIN; +DECLARE c1 CURSOR FOR SELECT * FROM uctest; +FETCH 2 FROM c1; + f1 | f2 +----+----- + 1 | one + 2 | two +(2 rows) + +DELETE FROM uctest WHERE CURRENT OF c1; +-- should show deletion +SELECT * FROM uctest; + f1 | f2 +----+------- + 1 | one + 3 | three +(2 rows) + +-- cursor did not move +FETCH ALL FROM c1; + f1 | f2 +----+------- + 3 | three +(1 row) + +-- cursor is insensitive +MOVE BACKWARD ALL IN c1; +FETCH ALL FROM c1; + f1 | f2 +----+------- + 1 | one + 2 | two + 3 | three +(3 rows) + +COMMIT; +-- should still see deletion +SELECT * FROM uctest; + f1 | f2 +----+------- + 1 | one + 3 | three +(2 rows) + +-- Check UPDATE WHERE CURRENT; this time use FOR UPDATE +BEGIN; +DECLARE c1 CURSOR FOR SELECT * FROM uctest FOR UPDATE; +FETCH c1; + f1 | f2 +----+----- + 1 | one +(1 row) + +UPDATE uctest SET f1 = 8 WHERE CURRENT OF c1; +SELECT * FROM uctest; + f1 | f2 +----+------- + 3 | three + 8 | one +(2 rows) + +COMMIT; +SELECT * FROM uctest; + f1 | f2 +----+------- + 3 | three + 8 | one +(2 rows) + +-- Check repeated-update and update-then-delete cases +BEGIN; +DECLARE c1 CURSOR FOR SELECT * FROM uctest; +FETCH c1; + f1 | f2 +----+------- + 3 | three +(1 row) + +UPDATE uctest SET f1 = f1 + 10 WHERE CURRENT OF c1; +SELECT * FROM uctest; + f1 | f2 +----+------- + 8 | one + 13 | three +(2 rows) + +UPDATE uctest SET f1 = f1 + 10 WHERE CURRENT OF c1; +SELECT * FROM uctest; + f1 | f2 +----+------- + 8 | one + 23 | three +(2 rows) + +-- insensitive cursor should not show effects of updates or deletes +FETCH RELATIVE 0 FROM c1; + f1 | f2 +----+------- + 3 | three +(1 row) + +DELETE FROM uctest WHERE CURRENT OF c1; +SELECT * FROM uctest; + f1 | f2 +----+----- + 8 | one +(1 row) + +DELETE FROM uctest WHERE CURRENT OF c1; -- no-op +SELECT * FROM uctest; + f1 | f2 +----+----- + 8 | one +(1 row) + +UPDATE uctest SET f1 = f1 + 10 WHERE CURRENT OF c1; -- no-op +SELECT * FROM uctest; + f1 | f2 +----+----- + 8 | one +(1 row) + +FETCH RELATIVE 0 FROM c1; + f1 | f2 +----+------- + 3 | three +(1 row) + +ROLLBACK; +SELECT * FROM uctest; + f1 | f2 +----+------- + 3 | three + 8 | one +(2 rows) + +BEGIN; +DECLARE c1 CURSOR FOR SELECT * FROM uctest FOR UPDATE; +FETCH c1; + f1 | f2 +----+------- + 3 | three +(1 row) + +UPDATE uctest SET f1 = f1 + 10 WHERE CURRENT OF c1; +SELECT * FROM uctest; + f1 | f2 +----+------- + 8 | one + 13 | three +(2 rows) + +UPDATE uctest SET f1 = f1 + 10 WHERE CURRENT OF c1; +SELECT * FROM uctest; + f1 | f2 +----+------- + 8 | one + 23 | three +(2 rows) + +DELETE FROM uctest WHERE CURRENT OF c1; +SELECT * FROM uctest; + f1 | f2 +----+----- + 8 | one +(1 row) + +DELETE FROM uctest WHERE CURRENT OF c1; -- no-op +SELECT * FROM uctest; + f1 | f2 +----+----- + 8 | one +(1 row) + +UPDATE uctest SET f1 = f1 + 10 WHERE CURRENT OF c1; -- no-op +SELECT * FROM uctest; + f1 | f2 +----+----- + 8 | one +(1 row) + +--- FOR UPDATE cursors can't currently scroll back, so this is an error: +FETCH RELATIVE 0 FROM c1; +ERROR: cursor can only scan forward +HINT: Declare it with SCROLL option to enable backward scan. +ROLLBACK; +SELECT * FROM uctest; + f1 | f2 +----+------- + 3 | three + 8 | one +(2 rows) + +-- Check insensitive cursor with INSERT +-- (The above tests don't test the SQL notion of an insensitive cursor +-- correctly, because per SQL standard, changes from WHERE CURRENT OF +-- commands should be visible in the cursor. So here we make the +-- changes with a command that is independent of the cursor.) +BEGIN; +DECLARE c1 INSENSITIVE CURSOR FOR SELECT * FROM uctest; +INSERT INTO uctest VALUES (10, 'ten'); +FETCH NEXT FROM c1; + f1 | f2 +----+------- + 3 | three +(1 row) + +FETCH NEXT FROM c1; + f1 | f2 +----+----- + 8 | one +(1 row) + +FETCH NEXT FROM c1; -- insert not visible + f1 | f2 +----+---- +(0 rows) + +COMMIT; +SELECT * FROM uctest; + f1 | f2 +----+------- + 3 | three + 8 | one + 10 | ten +(3 rows) + +DELETE FROM uctest WHERE f1 = 10; -- restore test table state +-- Check inheritance cases +CREATE TEMP TABLE ucchild () inherits (uctest); +INSERT INTO ucchild values(100, 'hundred'); +SELECT * FROM uctest; + f1 | f2 +-----+--------- + 3 | three + 8 | one + 100 | hundred +(3 rows) + +BEGIN; +DECLARE c1 CURSOR FOR SELECT * FROM uctest FOR UPDATE; +FETCH 1 FROM c1; + f1 | f2 +----+------- + 3 | three +(1 row) + +UPDATE uctest SET f1 = f1 + 10 WHERE CURRENT OF c1; +FETCH 1 FROM c1; + f1 | f2 +----+----- + 8 | one +(1 row) + +UPDATE uctest SET f1 = f1 + 10 WHERE CURRENT OF c1; +FETCH 1 FROM c1; + f1 | f2 +-----+--------- + 100 | hundred +(1 row) + +UPDATE uctest SET f1 = f1 + 10 WHERE CURRENT OF c1; +FETCH 1 FROM c1; + f1 | f2 +----+---- +(0 rows) + +COMMIT; +SELECT * FROM uctest; + f1 | f2 +-----+--------- + 13 | three + 18 | one + 110 | hundred +(3 rows) + +-- Can update from a self-join, but only if FOR UPDATE says which to use +BEGIN; +DECLARE c1 CURSOR FOR SELECT * FROM uctest a, uctest b WHERE a.f1 = b.f1 + 5; +FETCH 1 FROM c1; + f1 | f2 | f1 | f2 +----+-----+----+------- + 18 | one | 13 | three +(1 row) + +UPDATE uctest SET f1 = f1 + 10 WHERE CURRENT OF c1; -- fail +ERROR: cursor "c1" is not a simply updatable scan of table "uctest" +ROLLBACK; +BEGIN; +DECLARE c1 CURSOR FOR SELECT * FROM uctest a, uctest b WHERE a.f1 = b.f1 + 5 FOR UPDATE; +FETCH 1 FROM c1; + f1 | f2 | f1 | f2 +----+-----+----+------- + 18 | one | 13 | three +(1 row) + +UPDATE uctest SET f1 = f1 + 10 WHERE CURRENT OF c1; -- fail +ERROR: cursor "c1" has multiple FOR UPDATE/SHARE references to table "uctest" +ROLLBACK; +BEGIN; +DECLARE c1 CURSOR FOR SELECT * FROM uctest a, uctest b WHERE a.f1 = b.f1 + 5 FOR SHARE OF a; +FETCH 1 FROM c1; + f1 | f2 | f1 | f2 +----+-----+----+------- + 18 | one | 13 | three +(1 row) + +UPDATE uctest SET f1 = f1 + 10 WHERE CURRENT OF c1; +SELECT * FROM uctest; + f1 | f2 +-----+--------- + 13 | three + 28 | one + 110 | hundred +(3 rows) + +ROLLBACK; +-- Check various error cases +DELETE FROM uctest WHERE CURRENT OF c1; -- fail, no such cursor +ERROR: cursor "c1" does not exist +DECLARE cx CURSOR WITH HOLD FOR SELECT * FROM uctest; +DELETE FROM uctest WHERE CURRENT OF cx; -- fail, can't use held cursor +ERROR: cursor "cx" is held from a previous transaction +BEGIN; +DECLARE c CURSOR FOR SELECT * FROM tenk2; +DELETE FROM uctest WHERE CURRENT OF c; -- fail, cursor on wrong table +ERROR: cursor "c" is not a simply updatable scan of table "uctest" +ROLLBACK; +BEGIN; +DECLARE c CURSOR FOR SELECT * FROM tenk2 FOR SHARE; +DELETE FROM uctest WHERE CURRENT OF c; -- fail, cursor on wrong table +ERROR: cursor "c" does not have a FOR UPDATE/SHARE reference to table "uctest" +ROLLBACK; +BEGIN; +DECLARE c CURSOR FOR SELECT * FROM tenk1 JOIN tenk2 USING (unique1); +DELETE FROM tenk1 WHERE CURRENT OF c; -- fail, cursor is on a join +ERROR: cursor "c" is not a simply updatable scan of table "tenk1" +ROLLBACK; +BEGIN; +DECLARE c CURSOR FOR SELECT f1,count(*) FROM uctest GROUP BY f1; +DELETE FROM uctest WHERE CURRENT OF c; -- fail, cursor is on aggregation +ERROR: cursor "c" is not a simply updatable scan of table "uctest" +ROLLBACK; +BEGIN; +DECLARE c1 CURSOR FOR SELECT * FROM uctest; +DELETE FROM uctest WHERE CURRENT OF c1; -- fail, no current row +ERROR: cursor "c1" is not positioned on a row +ROLLBACK; +BEGIN; +DECLARE c1 CURSOR FOR SELECT MIN(f1) FROM uctest FOR UPDATE; +ERROR: FOR UPDATE is not allowed with aggregate functions +ROLLBACK; +-- WHERE CURRENT OF may someday work with views, but today is not that day. +-- For now, just make sure it errors out cleanly. +CREATE TEMP VIEW ucview AS SELECT * FROM uctest; +CREATE RULE ucrule AS ON DELETE TO ucview DO INSTEAD + DELETE FROM uctest WHERE f1 = OLD.f1; +BEGIN; +DECLARE c1 CURSOR FOR SELECT * FROM ucview; +FETCH FROM c1; + f1 | f2 +----+------- + 13 | three +(1 row) + +DELETE FROM ucview WHERE CURRENT OF c1; -- fail, views not supported +ERROR: WHERE CURRENT OF on a view is not implemented +ROLLBACK; +-- Check WHERE CURRENT OF with an index-only scan +BEGIN; +EXPLAIN (costs off) +DECLARE c1 CURSOR FOR SELECT stringu1 FROM onek WHERE stringu1 = 'DZAAAA'; + QUERY PLAN +--------------------------------------------- + Index Only Scan using onek_stringu1 on onek + Index Cond: (stringu1 = 'DZAAAA'::name) +(2 rows) + +DECLARE c1 CURSOR FOR SELECT stringu1 FROM onek WHERE stringu1 = 'DZAAAA'; +FETCH FROM c1; + stringu1 +---------- + DZAAAA +(1 row) + +DELETE FROM onek WHERE CURRENT OF c1; +SELECT stringu1 FROM onek WHERE stringu1 = 'DZAAAA'; + stringu1 +---------- +(0 rows) + +ROLLBACK; +-- Check behavior with rewinding to a previous child scan node, +-- as per bug #15395 +BEGIN; +CREATE TABLE current_check (currentid int, payload text); +CREATE TABLE current_check_1 () INHERITS (current_check); +CREATE TABLE current_check_2 () INHERITS (current_check); +INSERT INTO current_check_1 SELECT i, 'p' || i FROM generate_series(1,9) i; +INSERT INTO current_check_2 SELECT i, 'P' || i FROM generate_series(10,19) i; +DECLARE c1 SCROLL CURSOR FOR SELECT * FROM current_check; +-- This tests the fetch-backwards code path +FETCH ABSOLUTE 12 FROM c1; + currentid | payload +-----------+--------- + 12 | P12 +(1 row) + +FETCH ABSOLUTE 8 FROM c1; + currentid | payload +-----------+--------- + 8 | p8 +(1 row) + +DELETE FROM current_check WHERE CURRENT OF c1 RETURNING *; + currentid | payload +-----------+--------- + 8 | p8 +(1 row) + +-- This tests the ExecutorRewind code path +FETCH ABSOLUTE 13 FROM c1; + currentid | payload +-----------+--------- + 13 | P13 +(1 row) + +FETCH ABSOLUTE 1 FROM c1; + currentid | payload +-----------+--------- + 1 | p1 +(1 row) + +DELETE FROM current_check WHERE CURRENT OF c1 RETURNING *; + currentid | payload +-----------+--------- + 1 | p1 +(1 row) + +SELECT * FROM current_check; + currentid | payload +-----------+--------- + 2 | p2 + 3 | p3 + 4 | p4 + 5 | p5 + 6 | p6 + 7 | p7 + 9 | p9 + 10 | P10 + 11 | P11 + 12 | P12 + 13 | P13 + 14 | P14 + 15 | P15 + 16 | P16 + 17 | P17 + 18 | P18 + 19 | P19 +(17 rows) + +ROLLBACK; +-- Make sure snapshot management works okay, per bug report in +-- 235395b90909301035v7228ce63q392931f15aa74b31@mail.gmail.com +BEGIN; +SET TRANSACTION ISOLATION LEVEL SERIALIZABLE; +CREATE TABLE cursor (a int); +INSERT INTO cursor VALUES (1); +DECLARE c1 NO SCROLL CURSOR FOR SELECT * FROM cursor FOR UPDATE; +UPDATE cursor SET a = 2; +FETCH ALL FROM c1; + a +--- +(0 rows) + +COMMIT; +DROP TABLE cursor; +-- Check rewinding a cursor containing a stable function in LIMIT, +-- per bug report in 8336843.9833.1399385291498.JavaMail.root@quick +begin; +create function nochange(int) returns int + as 'select $1 limit 1' language sql stable; +declare c cursor for select * from int8_tbl limit nochange(3); +fetch all from c; + q1 | q2 +------------------+------------------ + 123 | 456 + 123 | 4567890123456789 + 4567890123456789 | 123 +(3 rows) + +move backward all in c; +fetch all from c; + q1 | q2 +------------------+------------------ + 123 | 456 + 123 | 4567890123456789 + 4567890123456789 | 123 +(3 rows) + +rollback; +-- Check handling of non-backwards-scan-capable plans with scroll cursors +begin; +explain (costs off) declare c1 cursor for select (select 42) as x; + QUERY PLAN +--------------------------- + Result + InitPlan 1 (returns $0) + -> Result +(3 rows) + +explain (costs off) declare c1 scroll cursor for select (select 42) as x; + QUERY PLAN +--------------------------- + Materialize + InitPlan 1 (returns $0) + -> Result + -> Result +(4 rows) + +declare c1 scroll cursor for select (select 42) as x; +fetch all in c1; + x +---- + 42 +(1 row) + +fetch backward all in c1; + x +---- + 42 +(1 row) + +rollback; +begin; +explain (costs off) declare c2 cursor for select generate_series(1,3) as g; + QUERY PLAN +-------------- + ProjectSet + -> Result +(2 rows) + +explain (costs off) declare c2 scroll cursor for select generate_series(1,3) as g; + QUERY PLAN +-------------------- + Materialize + -> ProjectSet + -> Result +(3 rows) + +declare c2 scroll cursor for select generate_series(1,3) as g; +fetch all in c2; + g +--- + 1 + 2 + 3 +(3 rows) + +fetch backward all in c2; + g +--- + 3 + 2 + 1 +(3 rows) + +rollback; +-- Check fetching of toasted datums via cursors. +begin; +-- Other compression algorithms may cause the compressed data to be stored +-- inline. Use pglz to ensure consistent results. +set default_toast_compression = 'pglz'; +create table toasted_data (f1 int[]); +insert into toasted_data + select array_agg(i) from generate_series(12345678, 12345678 + 1000) i; +declare local_portal cursor for select * from toasted_data; +fetch all in local_portal; + f1 +------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ + {12345678,12345679,12345680,12345681,12345682,12345683,12345684,12345685,12345686,12345687,12345688,12345689,12345690,12345691,12345692,12345693,12345694,12345695,12345696,12345697,12345698,12345699,12345700,12345701,12345702,12345703,12345704,12345705,12345706,12345707,12345708,12345709,12345710,12345711,12345712,12345713,12345714,12345715,12345716,12345717,12345718,12345719,12345720,12345721,12345722,12345723,12345724,12345725,12345726,12345727,12345728,12345729,12345730,12345731,12345732,12345733,12345734,12345735,12345736,12345737,12345738,12345739,12345740,12345741,12345742,12345743,12345744,12345745,12345746,12345747,12345748,12345749,12345750,12345751,12345752,12345753,12345754,12345755,12345756,12345757,12345758,12345759,12345760,12345761,12345762,12345763,12345764,12345765,12345766,12345767,12345768,12345769,12345770,12345771,12345772,12345773,12345774,12345775,12345776,12345777,12345778,12345779,12345780,12345781,12345782,12345783,12345784,12345785,12345786,12345787,12345788,12345789,12345790,12345791,12345792,12345793,12345794,12345795,12345796,12345797,12345798,12345799,12345800,12345801,12345802,12345803,12345804,12345805,12345806,12345807,12345808,12345809,12345810,12345811,12345812,12345813,12345814,12345815,12345816,12345817,12345818,12345819,12345820,12345821,12345822,12345823,12345824,12345825,12345826,12345827,12345828,12345829,12345830,12345831,12345832,12345833,12345834,12345835,12345836,12345837,12345838,12345839,12345840,12345841,12345842,12345843,12345844,12345845,12345846,12345847,12345848,12345849,12345850,12345851,12345852,12345853,12345854,12345855,12345856,12345857,12345858,12345859,12345860,12345861,12345862,12345863,12345864,12345865,12345866,12345867,12345868,12345869,12345870,12345871,12345872,12345873,12345874,12345875,12345876,12345877,12345878,12345879,12345880,12345881,12345882,12345883,12345884,12345885,12345886,12345887,12345888,12345889,12345890,12345891,12345892,12345893,12345894,12345895,12345896,12345897,12345898,12345899,12345900,12345901,12345902,12345903,12345904,12345905,12345906,12345907,12345908,12345909,12345910,12345911,12345912,12345913,12345914,12345915,12345916,12345917,12345918,12345919,12345920,12345921,12345922,12345923,12345924,12345925,12345926,12345927,12345928,12345929,12345930,12345931,12345932,12345933,12345934,12345935,12345936,12345937,12345938,12345939,12345940,12345941,12345942,12345943,12345944,12345945,12345946,12345947,12345948,12345949,12345950,12345951,12345952,12345953,12345954,12345955,12345956,12345957,12345958,12345959,12345960,12345961,12345962,12345963,12345964,12345965,12345966,12345967,12345968,12345969,12345970,12345971,12345972,12345973,12345974,12345975,12345976,12345977,12345978,12345979,12345980,12345981,12345982,12345983,12345984,12345985,12345986,12345987,12345988,12345989,12345990,12345991,12345992,12345993,12345994,12345995,12345996,12345997,12345998,12345999,12346000,12346001,12346002,12346003,12346004,12346005,12346006,12346007,12346008,12346009,12346010,12346011,12346012,12346013,12346014,12346015,12346016,12346017,12346018,12346019,12346020,12346021,12346022,12346023,12346024,12346025,12346026,12346027,12346028,12346029,12346030,12346031,12346032,12346033,12346034,12346035,12346036,12346037,12346038,12346039,12346040,12346041,12346042,12346043,12346044,12346045,12346046,12346047,12346048,12346049,12346050,12346051,12346052,12346053,12346054,12346055,12346056,12346057,12346058,12346059,12346060,12346061,12346062,12346063,12346064,12346065,12346066,12346067,12346068,12346069,12346070,12346071,12346072,12346073,12346074,12346075,12346076,12346077,12346078,12346079,12346080,12346081,12346082,12346083,12346084,12346085,12346086,12346087,12346088,12346089,12346090,12346091,12346092,12346093,12346094,12346095,12346096,12346097,12346098,12346099,12346100,12346101,12346102,12346103,12346104,12346105,12346106,12346107,12346108,12346109,12346110,12346111,12346112,12346113,12346114,12346115,12346116,12346117,12346118,12346119,12346120,12346121,12346122,12346123,12346124,12346125,12346126,12346127,12346128,12346129,12346130,12346131,12346132,12346133,12346134,12346135,12346136,12346137,12346138,12346139,12346140,12346141,12346142,12346143,12346144,12346145,12346146,12346147,12346148,12346149,12346150,12346151,12346152,12346153,12346154,12346155,12346156,12346157,12346158,12346159,12346160,12346161,12346162,12346163,12346164,12346165,12346166,12346167,12346168,12346169,12346170,12346171,12346172,12346173,12346174,12346175,12346176,12346177,12346178,12346179,12346180,12346181,12346182,12346183,12346184,12346185,12346186,12346187,12346188,12346189,12346190,12346191,12346192,12346193,12346194,12346195,12346196,12346197,12346198,12346199,12346200,12346201,12346202,12346203,12346204,12346205,12346206,12346207,12346208,12346209,12346210,12346211,12346212,12346213,12346214,12346215,12346216,12346217,12346218,12346219,12346220,12346221,12346222,12346223,12346224,12346225,12346226,12346227,12346228,12346229,12346230,12346231,12346232,12346233,12346234,12346235,12346236,12346237,12346238,12346239,12346240,12346241,12346242,12346243,12346244,12346245,12346246,12346247,12346248,12346249,12346250,12346251,12346252,12346253,12346254,12346255,12346256,12346257,12346258,12346259,12346260,12346261,12346262,12346263,12346264,12346265,12346266,12346267,12346268,12346269,12346270,12346271,12346272,12346273,12346274,12346275,12346276,12346277,12346278,12346279,12346280,12346281,12346282,12346283,12346284,12346285,12346286,12346287,12346288,12346289,12346290,12346291,12346292,12346293,12346294,12346295,12346296,12346297,12346298,12346299,12346300,12346301,12346302,12346303,12346304,12346305,12346306,12346307,12346308,12346309,12346310,12346311,12346312,12346313,12346314,12346315,12346316,12346317,12346318,12346319,12346320,12346321,12346322,12346323,12346324,12346325,12346326,12346327,12346328,12346329,12346330,12346331,12346332,12346333,12346334,12346335,12346336,12346337,12346338,12346339,12346340,12346341,12346342,12346343,12346344,12346345,12346346,12346347,12346348,12346349,12346350,12346351,12346352,12346353,12346354,12346355,12346356,12346357,12346358,12346359,12346360,12346361,12346362,12346363,12346364,12346365,12346366,12346367,12346368,12346369,12346370,12346371,12346372,12346373,12346374,12346375,12346376,12346377,12346378,12346379,12346380,12346381,12346382,12346383,12346384,12346385,12346386,12346387,12346388,12346389,12346390,12346391,12346392,12346393,12346394,12346395,12346396,12346397,12346398,12346399,12346400,12346401,12346402,12346403,12346404,12346405,12346406,12346407,12346408,12346409,12346410,12346411,12346412,12346413,12346414,12346415,12346416,12346417,12346418,12346419,12346420,12346421,12346422,12346423,12346424,12346425,12346426,12346427,12346428,12346429,12346430,12346431,12346432,12346433,12346434,12346435,12346436,12346437,12346438,12346439,12346440,12346441,12346442,12346443,12346444,12346445,12346446,12346447,12346448,12346449,12346450,12346451,12346452,12346453,12346454,12346455,12346456,12346457,12346458,12346459,12346460,12346461,12346462,12346463,12346464,12346465,12346466,12346467,12346468,12346469,12346470,12346471,12346472,12346473,12346474,12346475,12346476,12346477,12346478,12346479,12346480,12346481,12346482,12346483,12346484,12346485,12346486,12346487,12346488,12346489,12346490,12346491,12346492,12346493,12346494,12346495,12346496,12346497,12346498,12346499,12346500,12346501,12346502,12346503,12346504,12346505,12346506,12346507,12346508,12346509,12346510,12346511,12346512,12346513,12346514,12346515,12346516,12346517,12346518,12346519,12346520,12346521,12346522,12346523,12346524,12346525,12346526,12346527,12346528,12346529,12346530,12346531,12346532,12346533,12346534,12346535,12346536,12346537,12346538,12346539,12346540,12346541,12346542,12346543,12346544,12346545,12346546,12346547,12346548,12346549,12346550,12346551,12346552,12346553,12346554,12346555,12346556,12346557,12346558,12346559,12346560,12346561,12346562,12346563,12346564,12346565,12346566,12346567,12346568,12346569,12346570,12346571,12346572,12346573,12346574,12346575,12346576,12346577,12346578,12346579,12346580,12346581,12346582,12346583,12346584,12346585,12346586,12346587,12346588,12346589,12346590,12346591,12346592,12346593,12346594,12346595,12346596,12346597,12346598,12346599,12346600,12346601,12346602,12346603,12346604,12346605,12346606,12346607,12346608,12346609,12346610,12346611,12346612,12346613,12346614,12346615,12346616,12346617,12346618,12346619,12346620,12346621,12346622,12346623,12346624,12346625,12346626,12346627,12346628,12346629,12346630,12346631,12346632,12346633,12346634,12346635,12346636,12346637,12346638,12346639,12346640,12346641,12346642,12346643,12346644,12346645,12346646,12346647,12346648,12346649,12346650,12346651,12346652,12346653,12346654,12346655,12346656,12346657,12346658,12346659,12346660,12346661,12346662,12346663,12346664,12346665,12346666,12346667,12346668,12346669,12346670,12346671,12346672,12346673,12346674,12346675,12346676,12346677,12346678} +(1 row) + +declare held_portal cursor with hold for select * from toasted_data; +commit; +drop table toasted_data; +fetch all in held_portal; + f1 +------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ + {12345678,12345679,12345680,12345681,12345682,12345683,12345684,12345685,12345686,12345687,12345688,12345689,12345690,12345691,12345692,12345693,12345694,12345695,12345696,12345697,12345698,12345699,12345700,12345701,12345702,12345703,12345704,12345705,12345706,12345707,12345708,12345709,12345710,12345711,12345712,12345713,12345714,12345715,12345716,12345717,12345718,12345719,12345720,12345721,12345722,12345723,12345724,12345725,12345726,12345727,12345728,12345729,12345730,12345731,12345732,12345733,12345734,12345735,12345736,12345737,12345738,12345739,12345740,12345741,12345742,12345743,12345744,12345745,12345746,12345747,12345748,12345749,12345750,12345751,12345752,12345753,12345754,12345755,12345756,12345757,12345758,12345759,12345760,12345761,12345762,12345763,12345764,12345765,12345766,12345767,12345768,12345769,12345770,12345771,12345772,12345773,12345774,12345775,12345776,12345777,12345778,12345779,12345780,12345781,12345782,12345783,12345784,12345785,12345786,12345787,12345788,12345789,12345790,12345791,12345792,12345793,12345794,12345795,12345796,12345797,12345798,12345799,12345800,12345801,12345802,12345803,12345804,12345805,12345806,12345807,12345808,12345809,12345810,12345811,12345812,12345813,12345814,12345815,12345816,12345817,12345818,12345819,12345820,12345821,12345822,12345823,12345824,12345825,12345826,12345827,12345828,12345829,12345830,12345831,12345832,12345833,12345834,12345835,12345836,12345837,12345838,12345839,12345840,12345841,12345842,12345843,12345844,12345845,12345846,12345847,12345848,12345849,12345850,12345851,12345852,12345853,12345854,12345855,12345856,12345857,12345858,12345859,12345860,12345861,12345862,12345863,12345864,12345865,12345866,12345867,12345868,12345869,12345870,12345871,12345872,12345873,12345874,12345875,12345876,12345877,12345878,12345879,12345880,12345881,12345882,12345883,12345884,12345885,12345886,12345887,12345888,12345889,12345890,12345891,12345892,12345893,12345894,12345895,12345896,12345897,12345898,12345899,12345900,12345901,12345902,12345903,12345904,12345905,12345906,12345907,12345908,12345909,12345910,12345911,12345912,12345913,12345914,12345915,12345916,12345917,12345918,12345919,12345920,12345921,12345922,12345923,12345924,12345925,12345926,12345927,12345928,12345929,12345930,12345931,12345932,12345933,12345934,12345935,12345936,12345937,12345938,12345939,12345940,12345941,12345942,12345943,12345944,12345945,12345946,12345947,12345948,12345949,12345950,12345951,12345952,12345953,12345954,12345955,12345956,12345957,12345958,12345959,12345960,12345961,12345962,12345963,12345964,12345965,12345966,12345967,12345968,12345969,12345970,12345971,12345972,12345973,12345974,12345975,12345976,12345977,12345978,12345979,12345980,12345981,12345982,12345983,12345984,12345985,12345986,12345987,12345988,12345989,12345990,12345991,12345992,12345993,12345994,12345995,12345996,12345997,12345998,12345999,12346000,12346001,12346002,12346003,12346004,12346005,12346006,12346007,12346008,12346009,12346010,12346011,12346012,12346013,12346014,12346015,12346016,12346017,12346018,12346019,12346020,12346021,12346022,12346023,12346024,12346025,12346026,12346027,12346028,12346029,12346030,12346031,12346032,12346033,12346034,12346035,12346036,12346037,12346038,12346039,12346040,12346041,12346042,12346043,12346044,12346045,12346046,12346047,12346048,12346049,12346050,12346051,12346052,12346053,12346054,12346055,12346056,12346057,12346058,12346059,12346060,12346061,12346062,12346063,12346064,12346065,12346066,12346067,12346068,12346069,12346070,12346071,12346072,12346073,12346074,12346075,12346076,12346077,12346078,12346079,12346080,12346081,12346082,12346083,12346084,12346085,12346086,12346087,12346088,12346089,12346090,12346091,12346092,12346093,12346094,12346095,12346096,12346097,12346098,12346099,12346100,12346101,12346102,12346103,12346104,12346105,12346106,12346107,12346108,12346109,12346110,12346111,12346112,12346113,12346114,12346115,12346116,12346117,12346118,12346119,12346120,12346121,12346122,12346123,12346124,12346125,12346126,12346127,12346128,12346129,12346130,12346131,12346132,12346133,12346134,12346135,12346136,12346137,12346138,12346139,12346140,12346141,12346142,12346143,12346144,12346145,12346146,12346147,12346148,12346149,12346150,12346151,12346152,12346153,12346154,12346155,12346156,12346157,12346158,12346159,12346160,12346161,12346162,12346163,12346164,12346165,12346166,12346167,12346168,12346169,12346170,12346171,12346172,12346173,12346174,12346175,12346176,12346177,12346178,12346179,12346180,12346181,12346182,12346183,12346184,12346185,12346186,12346187,12346188,12346189,12346190,12346191,12346192,12346193,12346194,12346195,12346196,12346197,12346198,12346199,12346200,12346201,12346202,12346203,12346204,12346205,12346206,12346207,12346208,12346209,12346210,12346211,12346212,12346213,12346214,12346215,12346216,12346217,12346218,12346219,12346220,12346221,12346222,12346223,12346224,12346225,12346226,12346227,12346228,12346229,12346230,12346231,12346232,12346233,12346234,12346235,12346236,12346237,12346238,12346239,12346240,12346241,12346242,12346243,12346244,12346245,12346246,12346247,12346248,12346249,12346250,12346251,12346252,12346253,12346254,12346255,12346256,12346257,12346258,12346259,12346260,12346261,12346262,12346263,12346264,12346265,12346266,12346267,12346268,12346269,12346270,12346271,12346272,12346273,12346274,12346275,12346276,12346277,12346278,12346279,12346280,12346281,12346282,12346283,12346284,12346285,12346286,12346287,12346288,12346289,12346290,12346291,12346292,12346293,12346294,12346295,12346296,12346297,12346298,12346299,12346300,12346301,12346302,12346303,12346304,12346305,12346306,12346307,12346308,12346309,12346310,12346311,12346312,12346313,12346314,12346315,12346316,12346317,12346318,12346319,12346320,12346321,12346322,12346323,12346324,12346325,12346326,12346327,12346328,12346329,12346330,12346331,12346332,12346333,12346334,12346335,12346336,12346337,12346338,12346339,12346340,12346341,12346342,12346343,12346344,12346345,12346346,12346347,12346348,12346349,12346350,12346351,12346352,12346353,12346354,12346355,12346356,12346357,12346358,12346359,12346360,12346361,12346362,12346363,12346364,12346365,12346366,12346367,12346368,12346369,12346370,12346371,12346372,12346373,12346374,12346375,12346376,12346377,12346378,12346379,12346380,12346381,12346382,12346383,12346384,12346385,12346386,12346387,12346388,12346389,12346390,12346391,12346392,12346393,12346394,12346395,12346396,12346397,12346398,12346399,12346400,12346401,12346402,12346403,12346404,12346405,12346406,12346407,12346408,12346409,12346410,12346411,12346412,12346413,12346414,12346415,12346416,12346417,12346418,12346419,12346420,12346421,12346422,12346423,12346424,12346425,12346426,12346427,12346428,12346429,12346430,12346431,12346432,12346433,12346434,12346435,12346436,12346437,12346438,12346439,12346440,12346441,12346442,12346443,12346444,12346445,12346446,12346447,12346448,12346449,12346450,12346451,12346452,12346453,12346454,12346455,12346456,12346457,12346458,12346459,12346460,12346461,12346462,12346463,12346464,12346465,12346466,12346467,12346468,12346469,12346470,12346471,12346472,12346473,12346474,12346475,12346476,12346477,12346478,12346479,12346480,12346481,12346482,12346483,12346484,12346485,12346486,12346487,12346488,12346489,12346490,12346491,12346492,12346493,12346494,12346495,12346496,12346497,12346498,12346499,12346500,12346501,12346502,12346503,12346504,12346505,12346506,12346507,12346508,12346509,12346510,12346511,12346512,12346513,12346514,12346515,12346516,12346517,12346518,12346519,12346520,12346521,12346522,12346523,12346524,12346525,12346526,12346527,12346528,12346529,12346530,12346531,12346532,12346533,12346534,12346535,12346536,12346537,12346538,12346539,12346540,12346541,12346542,12346543,12346544,12346545,12346546,12346547,12346548,12346549,12346550,12346551,12346552,12346553,12346554,12346555,12346556,12346557,12346558,12346559,12346560,12346561,12346562,12346563,12346564,12346565,12346566,12346567,12346568,12346569,12346570,12346571,12346572,12346573,12346574,12346575,12346576,12346577,12346578,12346579,12346580,12346581,12346582,12346583,12346584,12346585,12346586,12346587,12346588,12346589,12346590,12346591,12346592,12346593,12346594,12346595,12346596,12346597,12346598,12346599,12346600,12346601,12346602,12346603,12346604,12346605,12346606,12346607,12346608,12346609,12346610,12346611,12346612,12346613,12346614,12346615,12346616,12346617,12346618,12346619,12346620,12346621,12346622,12346623,12346624,12346625,12346626,12346627,12346628,12346629,12346630,12346631,12346632,12346633,12346634,12346635,12346636,12346637,12346638,12346639,12346640,12346641,12346642,12346643,12346644,12346645,12346646,12346647,12346648,12346649,12346650,12346651,12346652,12346653,12346654,12346655,12346656,12346657,12346658,12346659,12346660,12346661,12346662,12346663,12346664,12346665,12346666,12346667,12346668,12346669,12346670,12346671,12346672,12346673,12346674,12346675,12346676,12346677,12346678} +(1 row) + +reset default_toast_compression; diff --git a/src/test/regress/expected/portals_p2.out b/src/test/regress/expected/portals_p2.out new file mode 100644 index 0000000..1e2365a --- /dev/null +++ b/src/test/regress/expected/portals_p2.out @@ -0,0 +1,122 @@ +-- +-- PORTALS_P2 +-- +BEGIN; +DECLARE foo13 CURSOR FOR + SELECT * FROM onek WHERE unique1 = 50; +DECLARE foo14 CURSOR FOR + SELECT * FROM onek WHERE unique1 = 51; +DECLARE foo15 CURSOR FOR + SELECT * FROM onek WHERE unique1 = 52; +DECLARE foo16 CURSOR FOR + SELECT * FROM onek WHERE unique1 = 53; +DECLARE foo17 CURSOR FOR + SELECT * FROM onek WHERE unique1 = 54; +DECLARE foo18 CURSOR FOR + SELECT * FROM onek WHERE unique1 = 55; +DECLARE foo19 CURSOR FOR + SELECT * FROM onek WHERE unique1 = 56; +DECLARE foo20 CURSOR FOR + SELECT * FROM onek WHERE unique1 = 57; +DECLARE foo21 CURSOR FOR + SELECT * FROM onek WHERE unique1 = 58; +DECLARE foo22 CURSOR FOR + SELECT * FROM onek WHERE unique1 = 59; +DECLARE foo23 CURSOR FOR + SELECT * FROM onek WHERE unique1 = 60; +DECLARE foo24 CURSOR FOR + SELECT * FROM onek2 WHERE unique1 = 50; +DECLARE foo25 CURSOR FOR + SELECT * FROM onek2 WHERE unique1 = 60; +FETCH all in foo13; + unique1 | unique2 | two | four | ten | twenty | hundred | thousand | twothousand | fivethous | tenthous | odd | even | stringu1 | stringu2 | string4 +---------+---------+-----+------+-----+--------+---------+----------+-------------+-----------+----------+-----+------+----------+----------+--------- + 50 | 253 | 0 | 2 | 0 | 10 | 0 | 50 | 50 | 50 | 50 | 0 | 1 | YBAAAA | TJAAAA | HHHHxx +(1 row) + +FETCH all in foo14; + unique1 | unique2 | two | four | ten | twenty | hundred | thousand | twothousand | fivethous | tenthous | odd | even | stringu1 | stringu2 | string4 +---------+---------+-----+------+-----+--------+---------+----------+-------------+-----------+----------+-----+------+----------+----------+--------- + 51 | 76 | 1 | 3 | 1 | 11 | 1 | 51 | 51 | 51 | 51 | 2 | 3 | ZBAAAA | YCAAAA | AAAAxx +(1 row) + +FETCH all in foo15; + unique1 | unique2 | two | four | ten | twenty | hundred | thousand | twothousand | fivethous | tenthous | odd | even | stringu1 | stringu2 | string4 +---------+---------+-----+------+-----+--------+---------+----------+-------------+-----------+----------+-----+------+----------+----------+--------- + 52 | 985 | 0 | 0 | 2 | 12 | 2 | 52 | 52 | 52 | 52 | 4 | 5 | ACAAAA | XLBAAA | HHHHxx +(1 row) + +FETCH all in foo16; + unique1 | unique2 | two | four | ten | twenty | hundred | thousand | twothousand | fivethous | tenthous | odd | even | stringu1 | stringu2 | string4 +---------+---------+-----+------+-----+--------+---------+----------+-------------+-----------+----------+-----+------+----------+----------+--------- + 53 | 196 | 1 | 1 | 3 | 13 | 3 | 53 | 53 | 53 | 53 | 6 | 7 | BCAAAA | OHAAAA | AAAAxx +(1 row) + +FETCH all in foo17; + unique1 | unique2 | two | four | ten | twenty | hundred | thousand | twothousand | fivethous | tenthous | odd | even | stringu1 | stringu2 | string4 +---------+---------+-----+------+-----+--------+---------+----------+-------------+-----------+----------+-----+------+----------+----------+--------- + 54 | 356 | 0 | 2 | 4 | 14 | 4 | 54 | 54 | 54 | 54 | 8 | 9 | CCAAAA | SNAAAA | AAAAxx +(1 row) + +FETCH all in foo18; + unique1 | unique2 | two | four | ten | twenty | hundred | thousand | twothousand | fivethous | tenthous | odd | even | stringu1 | stringu2 | string4 +---------+---------+-----+------+-----+--------+---------+----------+-------------+-----------+----------+-----+------+----------+----------+--------- + 55 | 627 | 1 | 3 | 5 | 15 | 5 | 55 | 55 | 55 | 55 | 10 | 11 | DCAAAA | DYAAAA | VVVVxx +(1 row) + +FETCH all in foo19; + unique1 | unique2 | two | four | ten | twenty | hundred | thousand | twothousand | fivethous | tenthous | odd | even | stringu1 | stringu2 | string4 +---------+---------+-----+------+-----+--------+---------+----------+-------------+-----------+----------+-----+------+----------+----------+--------- + 56 | 54 | 0 | 0 | 6 | 16 | 6 | 56 | 56 | 56 | 56 | 12 | 13 | ECAAAA | CCAAAA | OOOOxx +(1 row) + +FETCH all in foo20; + unique1 | unique2 | two | four | ten | twenty | hundred | thousand | twothousand | fivethous | tenthous | odd | even | stringu1 | stringu2 | string4 +---------+---------+-----+------+-----+--------+---------+----------+-------------+-----------+----------+-----+------+----------+----------+--------- + 57 | 942 | 1 | 1 | 7 | 17 | 7 | 57 | 57 | 57 | 57 | 14 | 15 | FCAAAA | GKBAAA | OOOOxx +(1 row) + +FETCH all in foo21; + unique1 | unique2 | two | four | ten | twenty | hundred | thousand | twothousand | fivethous | tenthous | odd | even | stringu1 | stringu2 | string4 +---------+---------+-----+------+-----+--------+---------+----------+-------------+-----------+----------+-----+------+----------+----------+--------- + 58 | 114 | 0 | 2 | 8 | 18 | 8 | 58 | 58 | 58 | 58 | 16 | 17 | GCAAAA | KEAAAA | OOOOxx +(1 row) + +FETCH all in foo22; + unique1 | unique2 | two | four | ten | twenty | hundred | thousand | twothousand | fivethous | tenthous | odd | even | stringu1 | stringu2 | string4 +---------+---------+-----+------+-----+--------+---------+----------+-------------+-----------+----------+-----+------+----------+----------+--------- + 59 | 593 | 1 | 3 | 9 | 19 | 9 | 59 | 59 | 59 | 59 | 18 | 19 | HCAAAA | VWAAAA | HHHHxx +(1 row) + +FETCH all in foo23; + unique1 | unique2 | two | four | ten | twenty | hundred | thousand | twothousand | fivethous | tenthous | odd | even | stringu1 | stringu2 | string4 +---------+---------+-----+------+-----+--------+---------+----------+-------------+-----------+----------+-----+------+----------+----------+--------- + 60 | 483 | 0 | 0 | 0 | 0 | 0 | 60 | 60 | 60 | 60 | 0 | 1 | ICAAAA | PSAAAA | VVVVxx +(1 row) + +FETCH all in foo24; + unique1 | unique2 | two | four | ten | twenty | hundred | thousand | twothousand | fivethous | tenthous | odd | even | stringu1 | stringu2 | string4 +---------+---------+-----+------+-----+--------+---------+----------+-------------+-----------+----------+-----+------+----------+----------+--------- + 50 | 253 | 0 | 2 | 0 | 10 | 0 | 50 | 50 | 50 | 50 | 0 | 1 | YBAAAA | TJAAAA | HHHHxx +(1 row) + +FETCH all in foo25; + unique1 | unique2 | two | four | ten | twenty | hundred | thousand | twothousand | fivethous | tenthous | odd | even | stringu1 | stringu2 | string4 +---------+---------+-----+------+-----+--------+---------+----------+-------------+-----------+----------+-----+------+----------+----------+--------- + 60 | 483 | 0 | 0 | 0 | 0 | 0 | 60 | 60 | 60 | 60 | 0 | 1 | ICAAAA | PSAAAA | VVVVxx +(1 row) + +CLOSE foo13; +CLOSE foo14; +CLOSE foo15; +CLOSE foo16; +CLOSE foo17; +CLOSE foo18; +CLOSE foo19; +CLOSE foo20; +CLOSE foo21; +CLOSE foo22; +CLOSE foo23; +CLOSE foo24; +CLOSE foo25; +END; diff --git a/src/test/regress/expected/prepare.out b/src/test/regress/expected/prepare.out new file mode 100644 index 0000000..5815e17 --- /dev/null +++ b/src/test/regress/expected/prepare.out @@ -0,0 +1,194 @@ +-- Regression tests for prepareable statements. We query the content +-- of the pg_prepared_statements view as prepared statements are +-- created and removed. +SELECT name, statement, parameter_types, result_types FROM pg_prepared_statements; + name | statement | parameter_types | result_types +------+-----------+-----------------+-------------- +(0 rows) + +PREPARE q1 AS SELECT 1 AS a; +EXECUTE q1; + a +--- + 1 +(1 row) + +SELECT name, statement, parameter_types, result_types FROM pg_prepared_statements; + name | statement | parameter_types | result_types +------+------------------------------+-----------------+-------------- + q1 | PREPARE q1 AS SELECT 1 AS a; | {} | {integer} +(1 row) + +-- should fail +PREPARE q1 AS SELECT 2; +ERROR: prepared statement "q1" already exists +-- should succeed +DEALLOCATE q1; +PREPARE q1 AS SELECT 2; +EXECUTE q1; + ?column? +---------- + 2 +(1 row) + +PREPARE q2 AS SELECT 2 AS b; +SELECT name, statement, parameter_types, result_types FROM pg_prepared_statements; + name | statement | parameter_types | result_types +------+------------------------------+-----------------+-------------- + q1 | PREPARE q1 AS SELECT 2; | {} | {integer} + q2 | PREPARE q2 AS SELECT 2 AS b; | {} | {integer} +(2 rows) + +-- sql92 syntax +DEALLOCATE PREPARE q1; +SELECT name, statement, parameter_types, result_types FROM pg_prepared_statements; + name | statement | parameter_types | result_types +------+------------------------------+-----------------+-------------- + q2 | PREPARE q2 AS SELECT 2 AS b; | {} | {integer} +(1 row) + +DEALLOCATE PREPARE q2; +-- the view should return the empty set again +SELECT name, statement, parameter_types, result_types FROM pg_prepared_statements; + name | statement | parameter_types | result_types +------+-----------+-----------------+-------------- +(0 rows) + +-- parameterized queries +PREPARE q2(text) AS + SELECT datname, datistemplate, datallowconn + FROM pg_database WHERE datname = $1; +EXECUTE q2('postgres'); + datname | datistemplate | datallowconn +----------+---------------+-------------- + postgres | f | t +(1 row) + +PREPARE q3(text, int, float, boolean, smallint) AS + SELECT * FROM tenk1 WHERE string4 = $1 AND (four = $2 OR + ten = $3::bigint OR true = $4 OR odd = $5::int) + ORDER BY unique1; +EXECUTE q3('AAAAxx', 5::smallint, 10.5::float, false, 4::bigint); + unique1 | unique2 | two | four | ten | twenty | hundred | thousand | twothousand | fivethous | tenthous | odd | even | stringu1 | stringu2 | string4 +---------+---------+-----+------+-----+--------+---------+----------+-------------+-----------+----------+-----+------+----------+----------+--------- + 2 | 2716 | 0 | 2 | 2 | 2 | 2 | 2 | 2 | 2 | 2 | 4 | 5 | CAAAAA | MAEAAA | AAAAxx + 102 | 612 | 0 | 2 | 2 | 2 | 2 | 102 | 102 | 102 | 102 | 4 | 5 | YDAAAA | OXAAAA | AAAAxx + 802 | 2908 | 0 | 2 | 2 | 2 | 2 | 802 | 802 | 802 | 802 | 4 | 5 | WEAAAA | WHEAAA | AAAAxx + 902 | 1104 | 0 | 2 | 2 | 2 | 2 | 902 | 902 | 902 | 902 | 4 | 5 | SIAAAA | MQBAAA | AAAAxx + 1002 | 2580 | 0 | 2 | 2 | 2 | 2 | 2 | 1002 | 1002 | 1002 | 4 | 5 | OMAAAA | GVDAAA | AAAAxx + 1602 | 8148 | 0 | 2 | 2 | 2 | 2 | 602 | 1602 | 1602 | 1602 | 4 | 5 | QJAAAA | KBMAAA | AAAAxx + 1702 | 7940 | 0 | 2 | 2 | 2 | 2 | 702 | 1702 | 1702 | 1702 | 4 | 5 | MNAAAA | KTLAAA | AAAAxx + 2102 | 6184 | 0 | 2 | 2 | 2 | 2 | 102 | 102 | 2102 | 2102 | 4 | 5 | WCAAAA | WDJAAA | AAAAxx + 2202 | 8028 | 0 | 2 | 2 | 2 | 2 | 202 | 202 | 2202 | 2202 | 4 | 5 | SGAAAA | UWLAAA | AAAAxx + 2302 | 7112 | 0 | 2 | 2 | 2 | 2 | 302 | 302 | 2302 | 2302 | 4 | 5 | OKAAAA | ONKAAA | AAAAxx + 2902 | 6816 | 0 | 2 | 2 | 2 | 2 | 902 | 902 | 2902 | 2902 | 4 | 5 | QHAAAA | ECKAAA | AAAAxx + 3202 | 7128 | 0 | 2 | 2 | 2 | 2 | 202 | 1202 | 3202 | 3202 | 4 | 5 | ETAAAA | EOKAAA | AAAAxx + 3902 | 9224 | 0 | 2 | 2 | 2 | 2 | 902 | 1902 | 3902 | 3902 | 4 | 5 | CUAAAA | UQNAAA | AAAAxx + 4102 | 7676 | 0 | 2 | 2 | 2 | 2 | 102 | 102 | 4102 | 4102 | 4 | 5 | UBAAAA | GJLAAA | AAAAxx + 4202 | 6628 | 0 | 2 | 2 | 2 | 2 | 202 | 202 | 4202 | 4202 | 4 | 5 | QFAAAA | YUJAAA | AAAAxx + 4502 | 412 | 0 | 2 | 2 | 2 | 2 | 502 | 502 | 4502 | 4502 | 4 | 5 | ERAAAA | WPAAAA | AAAAxx + 4702 | 2520 | 0 | 2 | 2 | 2 | 2 | 702 | 702 | 4702 | 4702 | 4 | 5 | WYAAAA | YSDAAA | AAAAxx + 4902 | 1600 | 0 | 2 | 2 | 2 | 2 | 902 | 902 | 4902 | 4902 | 4 | 5 | OGAAAA | OJCAAA | AAAAxx + 5602 | 8796 | 0 | 2 | 2 | 2 | 2 | 602 | 1602 | 602 | 5602 | 4 | 5 | MHAAAA | IANAAA | AAAAxx + 6002 | 8932 | 0 | 2 | 2 | 2 | 2 | 2 | 2 | 1002 | 6002 | 4 | 5 | WWAAAA | OFNAAA | AAAAxx + 6402 | 3808 | 0 | 2 | 2 | 2 | 2 | 402 | 402 | 1402 | 6402 | 4 | 5 | GMAAAA | MQFAAA | AAAAxx + 7602 | 1040 | 0 | 2 | 2 | 2 | 2 | 602 | 1602 | 2602 | 7602 | 4 | 5 | KGAAAA | AOBAAA | AAAAxx + 7802 | 7508 | 0 | 2 | 2 | 2 | 2 | 802 | 1802 | 2802 | 7802 | 4 | 5 | COAAAA | UCLAAA | AAAAxx + 8002 | 9980 | 0 | 2 | 2 | 2 | 2 | 2 | 2 | 3002 | 8002 | 4 | 5 | UVAAAA | WTOAAA | AAAAxx + 8302 | 7800 | 0 | 2 | 2 | 2 | 2 | 302 | 302 | 3302 | 8302 | 4 | 5 | IHAAAA | AOLAAA | AAAAxx + 8402 | 5708 | 0 | 2 | 2 | 2 | 2 | 402 | 402 | 3402 | 8402 | 4 | 5 | ELAAAA | OLIAAA | AAAAxx + 8602 | 5440 | 0 | 2 | 2 | 2 | 2 | 602 | 602 | 3602 | 8602 | 4 | 5 | WSAAAA | GBIAAA | AAAAxx + 9502 | 1812 | 0 | 2 | 2 | 2 | 2 | 502 | 1502 | 4502 | 9502 | 4 | 5 | MBAAAA | SRCAAA | AAAAxx + 9602 | 9972 | 0 | 2 | 2 | 2 | 2 | 602 | 1602 | 4602 | 9602 | 4 | 5 | IFAAAA | OTOAAA | AAAAxx +(29 rows) + +-- too few params +EXECUTE q3('bool'); +ERROR: wrong number of parameters for prepared statement "q3" +DETAIL: Expected 5 parameters but got 1. +-- too many params +EXECUTE q3('bytea', 5::smallint, 10.5::float, false, 4::bigint, true); +ERROR: wrong number of parameters for prepared statement "q3" +DETAIL: Expected 5 parameters but got 6. +-- wrong param types +EXECUTE q3(5::smallint, 10.5::float, false, 4::bigint, 'bytea'); +ERROR: parameter $3 of type boolean cannot be coerced to the expected type double precision +LINE 1: EXECUTE q3(5::smallint, 10.5::float, false, 4::bigint, 'byte... + ^ +HINT: You will need to rewrite or cast the expression. +-- invalid type +PREPARE q4(nonexistenttype) AS SELECT $1; +ERROR: type "nonexistenttype" does not exist +LINE 1: PREPARE q4(nonexistenttype) AS SELECT $1; + ^ +-- create table as execute +PREPARE q5(int, text) AS + SELECT * FROM tenk1 WHERE unique1 = $1 OR stringu1 = $2 + ORDER BY unique1; +CREATE TEMPORARY TABLE q5_prep_results AS EXECUTE q5(200, 'DTAAAA'); +SELECT * FROM q5_prep_results; + unique1 | unique2 | two | four | ten | twenty | hundred | thousand | twothousand | fivethous | tenthous | odd | even | stringu1 | stringu2 | string4 +---------+---------+-----+------+-----+--------+---------+----------+-------------+-----------+----------+-----+------+----------+----------+--------- + 200 | 9441 | 0 | 0 | 0 | 0 | 0 | 200 | 200 | 200 | 200 | 0 | 1 | SHAAAA | DZNAAA | HHHHxx + 497 | 9092 | 1 | 1 | 7 | 17 | 97 | 497 | 497 | 497 | 497 | 194 | 195 | DTAAAA | SLNAAA | AAAAxx + 1173 | 6699 | 1 | 1 | 3 | 13 | 73 | 173 | 1173 | 1173 | 1173 | 146 | 147 | DTAAAA | RXJAAA | VVVVxx + 1849 | 8143 | 1 | 1 | 9 | 9 | 49 | 849 | 1849 | 1849 | 1849 | 98 | 99 | DTAAAA | FBMAAA | VVVVxx + 2525 | 64 | 1 | 1 | 5 | 5 | 25 | 525 | 525 | 2525 | 2525 | 50 | 51 | DTAAAA | MCAAAA | AAAAxx + 3201 | 7309 | 1 | 1 | 1 | 1 | 1 | 201 | 1201 | 3201 | 3201 | 2 | 3 | DTAAAA | DVKAAA | HHHHxx + 3877 | 4060 | 1 | 1 | 7 | 17 | 77 | 877 | 1877 | 3877 | 3877 | 154 | 155 | DTAAAA | EAGAAA | AAAAxx + 4553 | 4113 | 1 | 1 | 3 | 13 | 53 | 553 | 553 | 4553 | 4553 | 106 | 107 | DTAAAA | FCGAAA | HHHHxx + 5229 | 6407 | 1 | 1 | 9 | 9 | 29 | 229 | 1229 | 229 | 5229 | 58 | 59 | DTAAAA | LMJAAA | VVVVxx + 5905 | 9537 | 1 | 1 | 5 | 5 | 5 | 905 | 1905 | 905 | 5905 | 10 | 11 | DTAAAA | VCOAAA | HHHHxx + 6581 | 4686 | 1 | 1 | 1 | 1 | 81 | 581 | 581 | 1581 | 6581 | 162 | 163 | DTAAAA | GYGAAA | OOOOxx + 7257 | 1895 | 1 | 1 | 7 | 17 | 57 | 257 | 1257 | 2257 | 7257 | 114 | 115 | DTAAAA | XUCAAA | VVVVxx + 7933 | 4514 | 1 | 1 | 3 | 13 | 33 | 933 | 1933 | 2933 | 7933 | 66 | 67 | DTAAAA | QRGAAA | OOOOxx + 8609 | 5918 | 1 | 1 | 9 | 9 | 9 | 609 | 609 | 3609 | 8609 | 18 | 19 | DTAAAA | QTIAAA | OOOOxx + 9285 | 8469 | 1 | 1 | 5 | 5 | 85 | 285 | 1285 | 4285 | 9285 | 170 | 171 | DTAAAA | TNMAAA | HHHHxx + 9961 | 2058 | 1 | 1 | 1 | 1 | 61 | 961 | 1961 | 4961 | 9961 | 122 | 123 | DTAAAA | EBDAAA | OOOOxx +(16 rows) + +CREATE TEMPORARY TABLE q5_prep_nodata AS EXECUTE q5(200, 'DTAAAA') + WITH NO DATA; +SELECT * FROM q5_prep_nodata; + unique1 | unique2 | two | four | ten | twenty | hundred | thousand | twothousand | fivethous | tenthous | odd | even | stringu1 | stringu2 | string4 +---------+---------+-----+------+-----+--------+---------+----------+-------------+-----------+----------+-----+------+----------+----------+--------- +(0 rows) + +-- unknown or unspecified parameter types: should succeed +PREPARE q6 AS + SELECT * FROM tenk1 WHERE unique1 = $1 AND stringu1 = $2; +PREPARE q7(unknown) AS + SELECT * FROM road WHERE thepath = $1; +-- DML statements +PREPARE q8 AS + UPDATE tenk1 SET stringu1 = $2 WHERE unique1 = $1; +SELECT name, statement, parameter_types, result_types FROM pg_prepared_statements + ORDER BY name; + name | statement | parameter_types | result_types +------+------------------------------------------------------------------+----------------------------------------------------+-------------------------------------------------------------------------------------------------------------------------- + q2 | PREPARE q2(text) AS +| {text} | {name,boolean,boolean} + | SELECT datname, datistemplate, datallowconn +| | + | FROM pg_database WHERE datname = $1; | | + q3 | PREPARE q3(text, int, float, boolean, smallint) AS +| {text,integer,"double precision",boolean,smallint} | {integer,integer,integer,integer,integer,integer,integer,integer,integer,integer,integer,integer,integer,name,name,name} + | SELECT * FROM tenk1 WHERE string4 = $1 AND (four = $2 OR+| | + | ten = $3::bigint OR true = $4 OR odd = $5::int) +| | + | ORDER BY unique1; | | + q5 | PREPARE q5(int, text) AS +| {integer,text} | {integer,integer,integer,integer,integer,integer,integer,integer,integer,integer,integer,integer,integer,name,name,name} + | SELECT * FROM tenk1 WHERE unique1 = $1 OR stringu1 = $2 +| | + | ORDER BY unique1; | | + q6 | PREPARE q6 AS +| {integer,name} | {integer,integer,integer,integer,integer,integer,integer,integer,integer,integer,integer,integer,integer,name,name,name} + | SELECT * FROM tenk1 WHERE unique1 = $1 AND stringu1 = $2; | | + q7 | PREPARE q7(unknown) AS +| {path} | {text,path} + | SELECT * FROM road WHERE thepath = $1; | | + q8 | PREPARE q8 AS +| {integer,name} | + | UPDATE tenk1 SET stringu1 = $2 WHERE unique1 = $1; | | +(6 rows) + +-- test DEALLOCATE ALL; +DEALLOCATE ALL; +SELECT name, statement, parameter_types FROM pg_prepared_statements + ORDER BY name; + name | statement | parameter_types +------+-----------+----------------- +(0 rows) + diff --git a/src/test/regress/expected/prepared_xacts.out b/src/test/regress/expected/prepared_xacts.out new file mode 100644 index 0000000..ba8e3cc --- /dev/null +++ b/src/test/regress/expected/prepared_xacts.out @@ -0,0 +1,270 @@ +-- +-- PREPARED TRANSACTIONS (two-phase commit) +-- +-- We can't readily test persistence of prepared xacts within the +-- regression script framework, unfortunately. Note that a crash +-- isn't really needed ... stopping and starting the postmaster would +-- be enough, but we can't even do that here. +-- create a simple table that we'll use in the tests +CREATE TABLE pxtest1 (foobar VARCHAR(10)); +INSERT INTO pxtest1 VALUES ('aaa'); +-- Test PREPARE TRANSACTION +BEGIN TRANSACTION ISOLATION LEVEL SERIALIZABLE; +UPDATE pxtest1 SET foobar = 'bbb' WHERE foobar = 'aaa'; +SELECT * FROM pxtest1; + foobar +-------- + bbb +(1 row) + +PREPARE TRANSACTION 'foo1'; +SELECT * FROM pxtest1; + foobar +-------- + aaa +(1 row) + +-- Test pg_prepared_xacts system view +SELECT gid FROM pg_prepared_xacts; + gid +------ + foo1 +(1 row) + +-- Test ROLLBACK PREPARED +ROLLBACK PREPARED 'foo1'; +SELECT * FROM pxtest1; + foobar +-------- + aaa +(1 row) + +SELECT gid FROM pg_prepared_xacts; + gid +----- +(0 rows) + +-- Test COMMIT PREPARED +BEGIN TRANSACTION ISOLATION LEVEL SERIALIZABLE; +INSERT INTO pxtest1 VALUES ('ddd'); +SELECT * FROM pxtest1; + foobar +-------- + aaa + ddd +(2 rows) + +PREPARE TRANSACTION 'foo2'; +SELECT * FROM pxtest1; + foobar +-------- + aaa +(1 row) + +COMMIT PREPARED 'foo2'; +SELECT * FROM pxtest1; + foobar +-------- + aaa + ddd +(2 rows) + +-- Test duplicate gids +BEGIN TRANSACTION ISOLATION LEVEL SERIALIZABLE; +UPDATE pxtest1 SET foobar = 'eee' WHERE foobar = 'ddd'; +SELECT * FROM pxtest1; + foobar +-------- + aaa + eee +(2 rows) + +PREPARE TRANSACTION 'foo3'; +SELECT gid FROM pg_prepared_xacts; + gid +------ + foo3 +(1 row) + +BEGIN TRANSACTION ISOLATION LEVEL SERIALIZABLE; +INSERT INTO pxtest1 VALUES ('fff'); +-- This should fail, because the gid foo3 is already in use +PREPARE TRANSACTION 'foo3'; +ERROR: transaction identifier "foo3" is already in use +SELECT * FROM pxtest1; + foobar +-------- + aaa + ddd +(2 rows) + +ROLLBACK PREPARED 'foo3'; +SELECT * FROM pxtest1; + foobar +-------- + aaa + ddd +(2 rows) + +-- Test serialization failure (SSI) +BEGIN TRANSACTION ISOLATION LEVEL SERIALIZABLE; +UPDATE pxtest1 SET foobar = 'eee' WHERE foobar = 'ddd'; +SELECT * FROM pxtest1; + foobar +-------- + aaa + eee +(2 rows) + +PREPARE TRANSACTION 'foo4'; +SELECT gid FROM pg_prepared_xacts; + gid +------ + foo4 +(1 row) + +BEGIN TRANSACTION ISOLATION LEVEL SERIALIZABLE; +SELECT * FROM pxtest1; + foobar +-------- + aaa + ddd +(2 rows) + +-- This should fail, because the two transactions have a write-skew anomaly +INSERT INTO pxtest1 VALUES ('fff'); +ERROR: could not serialize access due to read/write dependencies among transactions +DETAIL: Reason code: Canceled on identification as a pivot, during write. +HINT: The transaction might succeed if retried. +PREPARE TRANSACTION 'foo5'; +SELECT gid FROM pg_prepared_xacts; + gid +------ + foo4 +(1 row) + +ROLLBACK PREPARED 'foo4'; +SELECT gid FROM pg_prepared_xacts; + gid +----- +(0 rows) + +-- Clean up +DROP TABLE pxtest1; +-- Test detection of session-level and xact-level locks on same object +BEGIN; +SELECT pg_advisory_lock(1); + pg_advisory_lock +------------------ + +(1 row) + +SELECT pg_advisory_xact_lock_shared(1); + pg_advisory_xact_lock_shared +------------------------------ + +(1 row) + +PREPARE TRANSACTION 'foo6'; -- fails +ERROR: cannot PREPARE while holding both session-level and transaction-level locks on the same object +-- Test subtransactions +BEGIN TRANSACTION ISOLATION LEVEL SERIALIZABLE; + CREATE TABLE pxtest2 (a int); + INSERT INTO pxtest2 VALUES (1); + SAVEPOINT a; + INSERT INTO pxtest2 VALUES (2); + ROLLBACK TO a; + SAVEPOINT b; + INSERT INTO pxtest2 VALUES (3); +PREPARE TRANSACTION 'regress-one'; +CREATE TABLE pxtest3(fff int); +-- Test shared invalidation +BEGIN TRANSACTION ISOLATION LEVEL SERIALIZABLE; + DROP TABLE pxtest3; + CREATE TABLE pxtest4 (a int); + INSERT INTO pxtest4 VALUES (1); + INSERT INTO pxtest4 VALUES (2); + DECLARE foo CURSOR FOR SELECT * FROM pxtest4; + -- Fetch 1 tuple, keeping the cursor open + FETCH 1 FROM foo; + a +--- + 1 +(1 row) + +PREPARE TRANSACTION 'regress-two'; +-- No such cursor +FETCH 1 FROM foo; +ERROR: cursor "foo" does not exist +-- Table doesn't exist, the creation hasn't been committed yet +SELECT * FROM pxtest2; +ERROR: relation "pxtest2" does not exist +LINE 1: SELECT * FROM pxtest2; + ^ +-- There should be two prepared transactions +SELECT gid FROM pg_prepared_xacts; + gid +------------- + regress-one + regress-two +(2 rows) + +-- pxtest3 should be locked because of the pending DROP +begin; +lock table pxtest3 in access share mode nowait; +ERROR: could not obtain lock on relation "pxtest3" +rollback; +-- Disconnect, we will continue testing in a different backend +\c - +-- There should still be two prepared transactions +SELECT gid FROM pg_prepared_xacts; + gid +------------- + regress-one + regress-two +(2 rows) + +-- pxtest3 should still be locked because of the pending DROP +begin; +lock table pxtest3 in access share mode nowait; +ERROR: could not obtain lock on relation "pxtest3" +rollback; +-- Commit table creation +COMMIT PREPARED 'regress-one'; +\d pxtest2 + Table "public.pxtest2" + Column | Type | Collation | Nullable | Default +--------+---------+-----------+----------+--------- + a | integer | | | + +SELECT * FROM pxtest2; + a +--- + 1 + 3 +(2 rows) + +-- There should be one prepared transaction +SELECT gid FROM pg_prepared_xacts; + gid +------------- + regress-two +(1 row) + +-- Commit table drop +COMMIT PREPARED 'regress-two'; +SELECT * FROM pxtest3; +ERROR: relation "pxtest3" does not exist +LINE 1: SELECT * FROM pxtest3; + ^ +-- There should be no prepared transactions +SELECT gid FROM pg_prepared_xacts; + gid +----- +(0 rows) + +-- Clean up +DROP TABLE pxtest2; +DROP TABLE pxtest3; -- will still be there if prepared xacts are disabled +ERROR: table "pxtest3" does not exist +DROP TABLE pxtest4; diff --git a/src/test/regress/expected/prepared_xacts_1.out b/src/test/regress/expected/prepared_xacts_1.out new file mode 100644 index 0000000..2cd50ad --- /dev/null +++ b/src/test/regress/expected/prepared_xacts_1.out @@ -0,0 +1,266 @@ +-- +-- PREPARED TRANSACTIONS (two-phase commit) +-- +-- We can't readily test persistence of prepared xacts within the +-- regression script framework, unfortunately. Note that a crash +-- isn't really needed ... stopping and starting the postmaster would +-- be enough, but we can't even do that here. +-- create a simple table that we'll use in the tests +CREATE TABLE pxtest1 (foobar VARCHAR(10)); +INSERT INTO pxtest1 VALUES ('aaa'); +-- Test PREPARE TRANSACTION +BEGIN TRANSACTION ISOLATION LEVEL SERIALIZABLE; +UPDATE pxtest1 SET foobar = 'bbb' WHERE foobar = 'aaa'; +SELECT * FROM pxtest1; + foobar +-------- + bbb +(1 row) + +PREPARE TRANSACTION 'foo1'; +ERROR: prepared transactions are disabled +HINT: Set max_prepared_transactions to a nonzero value. +SELECT * FROM pxtest1; + foobar +-------- + aaa +(1 row) + +-- Test pg_prepared_xacts system view +SELECT gid FROM pg_prepared_xacts; + gid +----- +(0 rows) + +-- Test ROLLBACK PREPARED +ROLLBACK PREPARED 'foo1'; +ERROR: prepared transaction with identifier "foo1" does not exist +SELECT * FROM pxtest1; + foobar +-------- + aaa +(1 row) + +SELECT gid FROM pg_prepared_xacts; + gid +----- +(0 rows) + +-- Test COMMIT PREPARED +BEGIN TRANSACTION ISOLATION LEVEL SERIALIZABLE; +INSERT INTO pxtest1 VALUES ('ddd'); +SELECT * FROM pxtest1; + foobar +-------- + aaa + ddd +(2 rows) + +PREPARE TRANSACTION 'foo2'; +ERROR: prepared transactions are disabled +HINT: Set max_prepared_transactions to a nonzero value. +SELECT * FROM pxtest1; + foobar +-------- + aaa +(1 row) + +COMMIT PREPARED 'foo2'; +ERROR: prepared transaction with identifier "foo2" does not exist +SELECT * FROM pxtest1; + foobar +-------- + aaa +(1 row) + +-- Test duplicate gids +BEGIN TRANSACTION ISOLATION LEVEL SERIALIZABLE; +UPDATE pxtest1 SET foobar = 'eee' WHERE foobar = 'ddd'; +SELECT * FROM pxtest1; + foobar +-------- + aaa +(1 row) + +PREPARE TRANSACTION 'foo3'; +ERROR: prepared transactions are disabled +HINT: Set max_prepared_transactions to a nonzero value. +SELECT gid FROM pg_prepared_xacts; + gid +----- +(0 rows) + +BEGIN TRANSACTION ISOLATION LEVEL SERIALIZABLE; +INSERT INTO pxtest1 VALUES ('fff'); +-- This should fail, because the gid foo3 is already in use +PREPARE TRANSACTION 'foo3'; +ERROR: prepared transactions are disabled +HINT: Set max_prepared_transactions to a nonzero value. +SELECT * FROM pxtest1; + foobar +-------- + aaa +(1 row) + +ROLLBACK PREPARED 'foo3'; +ERROR: prepared transaction with identifier "foo3" does not exist +SELECT * FROM pxtest1; + foobar +-------- + aaa +(1 row) + +-- Test serialization failure (SSI) +BEGIN TRANSACTION ISOLATION LEVEL SERIALIZABLE; +UPDATE pxtest1 SET foobar = 'eee' WHERE foobar = 'ddd'; +SELECT * FROM pxtest1; + foobar +-------- + aaa +(1 row) + +PREPARE TRANSACTION 'foo4'; +ERROR: prepared transactions are disabled +HINT: Set max_prepared_transactions to a nonzero value. +SELECT gid FROM pg_prepared_xacts; + gid +----- +(0 rows) + +BEGIN TRANSACTION ISOLATION LEVEL SERIALIZABLE; +SELECT * FROM pxtest1; + foobar +-------- + aaa +(1 row) + +-- This should fail, because the two transactions have a write-skew anomaly +INSERT INTO pxtest1 VALUES ('fff'); +PREPARE TRANSACTION 'foo5'; +ERROR: prepared transactions are disabled +HINT: Set max_prepared_transactions to a nonzero value. +SELECT gid FROM pg_prepared_xacts; + gid +----- +(0 rows) + +ROLLBACK PREPARED 'foo4'; +ERROR: prepared transaction with identifier "foo4" does not exist +SELECT gid FROM pg_prepared_xacts; + gid +----- +(0 rows) + +-- Clean up +DROP TABLE pxtest1; +-- Test detection of session-level and xact-level locks on same object +BEGIN; +SELECT pg_advisory_lock(1); + pg_advisory_lock +------------------ + +(1 row) + +SELECT pg_advisory_xact_lock_shared(1); + pg_advisory_xact_lock_shared +------------------------------ + +(1 row) + +PREPARE TRANSACTION 'foo6'; -- fails +ERROR: prepared transactions are disabled +HINT: Set max_prepared_transactions to a nonzero value. +-- Test subtransactions +BEGIN TRANSACTION ISOLATION LEVEL SERIALIZABLE; + CREATE TABLE pxtest2 (a int); + INSERT INTO pxtest2 VALUES (1); + SAVEPOINT a; + INSERT INTO pxtest2 VALUES (2); + ROLLBACK TO a; + SAVEPOINT b; + INSERT INTO pxtest2 VALUES (3); +PREPARE TRANSACTION 'regress-one'; +ERROR: prepared transactions are disabled +HINT: Set max_prepared_transactions to a nonzero value. +CREATE TABLE pxtest3(fff int); +-- Test shared invalidation +BEGIN TRANSACTION ISOLATION LEVEL SERIALIZABLE; + DROP TABLE pxtest3; + CREATE TABLE pxtest4 (a int); + INSERT INTO pxtest4 VALUES (1); + INSERT INTO pxtest4 VALUES (2); + DECLARE foo CURSOR FOR SELECT * FROM pxtest4; + -- Fetch 1 tuple, keeping the cursor open + FETCH 1 FROM foo; + a +--- + 1 +(1 row) + +PREPARE TRANSACTION 'regress-two'; +ERROR: prepared transactions are disabled +HINT: Set max_prepared_transactions to a nonzero value. +-- No such cursor +FETCH 1 FROM foo; +ERROR: cursor "foo" does not exist +-- Table doesn't exist, the creation hasn't been committed yet +SELECT * FROM pxtest2; +ERROR: relation "pxtest2" does not exist +LINE 1: SELECT * FROM pxtest2; + ^ +-- There should be two prepared transactions +SELECT gid FROM pg_prepared_xacts; + gid +----- +(0 rows) + +-- pxtest3 should be locked because of the pending DROP +begin; +lock table pxtest3 in access share mode nowait; +rollback; +-- Disconnect, we will continue testing in a different backend +\c - +-- There should still be two prepared transactions +SELECT gid FROM pg_prepared_xacts; + gid +----- +(0 rows) + +-- pxtest3 should still be locked because of the pending DROP +begin; +lock table pxtest3 in access share mode nowait; +rollback; +-- Commit table creation +COMMIT PREPARED 'regress-one'; +ERROR: prepared transaction with identifier "regress-one" does not exist +\d pxtest2 +SELECT * FROM pxtest2; +ERROR: relation "pxtest2" does not exist +LINE 1: SELECT * FROM pxtest2; + ^ +-- There should be one prepared transaction +SELECT gid FROM pg_prepared_xacts; + gid +----- +(0 rows) + +-- Commit table drop +COMMIT PREPARED 'regress-two'; +ERROR: prepared transaction with identifier "regress-two" does not exist +SELECT * FROM pxtest3; + fff +----- +(0 rows) + +-- There should be no prepared transactions +SELECT gid FROM pg_prepared_xacts; + gid +----- +(0 rows) + +-- Clean up +DROP TABLE pxtest2; +ERROR: table "pxtest2" does not exist +DROP TABLE pxtest3; -- will still be there if prepared xacts are disabled +DROP TABLE pxtest4; +ERROR: table "pxtest4" does not exist diff --git a/src/test/regress/expected/privileges.out b/src/test/regress/expected/privileges.out new file mode 100644 index 0000000..fbb0489 --- /dev/null +++ b/src/test/regress/expected/privileges.out @@ -0,0 +1,2915 @@ +-- +-- Test access privileges +-- +-- Clean up in case a prior regression run failed +-- Suppress NOTICE messages when users/groups don't exist +SET client_min_messages TO 'warning'; +DROP ROLE IF EXISTS regress_priv_group1; +DROP ROLE IF EXISTS regress_priv_group2; +DROP ROLE IF EXISTS regress_priv_user1; +DROP ROLE IF EXISTS regress_priv_user2; +DROP ROLE IF EXISTS regress_priv_user3; +DROP ROLE IF EXISTS regress_priv_user4; +DROP ROLE IF EXISTS regress_priv_user5; +DROP ROLE IF EXISTS regress_priv_user6; +DROP ROLE IF EXISTS regress_priv_user7; +SELECT lo_unlink(oid) FROM pg_largeobject_metadata WHERE oid >= 1000 AND oid < 3000 ORDER BY oid; + lo_unlink +----------- +(0 rows) + +RESET client_min_messages; +-- test proper begins here +CREATE USER regress_priv_user1; +CREATE USER regress_priv_user2; +CREATE USER regress_priv_user3; +CREATE USER regress_priv_user4; +CREATE USER regress_priv_user5; +CREATE USER regress_priv_user5; -- duplicate +ERROR: role "regress_priv_user5" already exists +CREATE USER regress_priv_user6; +CREATE USER regress_priv_user7; +CREATE USER regress_priv_user8; +CREATE USER regress_priv_user9; +CREATE USER regress_priv_user10; +CREATE ROLE regress_priv_role; +-- circular ADMIN OPTION grants should be disallowed +GRANT regress_priv_user1 TO regress_priv_user2 WITH ADMIN OPTION; +GRANT regress_priv_user1 TO regress_priv_user3 WITH ADMIN OPTION GRANTED BY regress_priv_user2; +GRANT regress_priv_user1 TO regress_priv_user2 WITH ADMIN OPTION GRANTED BY regress_priv_user3; +ERROR: ADMIN option cannot be granted back to your own grantor +-- need CASCADE to revoke grant or admin option if dependent grants exist +REVOKE ADMIN OPTION FOR regress_priv_user1 FROM regress_priv_user2; -- fail +ERROR: dependent privileges exist +HINT: Use CASCADE to revoke them too. +REVOKE regress_priv_user1 FROM regress_priv_user2; -- fail +ERROR: dependent privileges exist +HINT: Use CASCADE to revoke them too. +SELECT member::regrole, admin_option FROM pg_auth_members WHERE roleid = 'regress_priv_user1'::regrole; + member | admin_option +--------------------+-------------- + regress_priv_user2 | t + regress_priv_user3 | t +(2 rows) + +BEGIN; +REVOKE ADMIN OPTION FOR regress_priv_user1 FROM regress_priv_user2 CASCADE; +SELECT member::regrole, admin_option FROM pg_auth_members WHERE roleid = 'regress_priv_user1'::regrole; + member | admin_option +--------------------+-------------- + regress_priv_user2 | f +(1 row) + +ROLLBACK; +REVOKE regress_priv_user1 FROM regress_priv_user2 CASCADE; +SELECT member::regrole, admin_option FROM pg_auth_members WHERE roleid = 'regress_priv_user1'::regrole; + member | admin_option +--------+-------------- +(0 rows) + +-- inferred grantor must be a role with ADMIN OPTION +GRANT regress_priv_user1 TO regress_priv_user2 WITH ADMIN OPTION; +GRANT regress_priv_user2 TO regress_priv_user3; +SET ROLE regress_priv_user3; +GRANT regress_priv_user1 TO regress_priv_user4; +SELECT grantor::regrole FROM pg_auth_members WHERE roleid = 'regress_priv_user1'::regrole and member = 'regress_priv_user4'::regrole; + grantor +-------------------- + regress_priv_user2 +(1 row) + +RESET ROLE; +REVOKE regress_priv_user2 FROM regress_priv_user3; +REVOKE regress_priv_user1 FROM regress_priv_user2 CASCADE; +-- test GRANTED BY with DROP OWNED and REASSIGN OWNED +GRANT regress_priv_user1 TO regress_priv_user2 WITH ADMIN OPTION; +GRANT regress_priv_user1 TO regress_priv_user3 GRANTED BY regress_priv_user2; +DROP ROLE regress_priv_user2; -- fail, dependency +ERROR: role "regress_priv_user2" cannot be dropped because some objects depend on it +DETAIL: privileges for membership of role regress_priv_user3 in role regress_priv_user1 +REASSIGN OWNED BY regress_priv_user2 TO regress_priv_user4; +DROP ROLE regress_priv_user2; -- still fail, REASSIGN OWNED doesn't help +ERROR: role "regress_priv_user2" cannot be dropped because some objects depend on it +DETAIL: privileges for membership of role regress_priv_user3 in role regress_priv_user1 +DROP OWNED BY regress_priv_user2; +DROP ROLE regress_priv_user2; -- ok now, DROP OWNED does the job +-- test that removing granted role or grantee role removes dependency +GRANT regress_priv_user1 TO regress_priv_user3 WITH ADMIN OPTION; +GRANT regress_priv_user1 TO regress_priv_user4 GRANTED BY regress_priv_user3; +DROP ROLE regress_priv_user3; -- should fail, dependency +ERROR: role "regress_priv_user3" cannot be dropped because some objects depend on it +DETAIL: privileges for membership of role regress_priv_user4 in role regress_priv_user1 +DROP ROLE regress_priv_user4; -- ok +DROP ROLE regress_priv_user3; -- ok now +GRANT regress_priv_user1 TO regress_priv_user5 WITH ADMIN OPTION; +GRANT regress_priv_user1 TO regress_priv_user6 GRANTED BY regress_priv_user5; +DROP ROLE regress_priv_user5; -- should fail, dependency +ERROR: role "regress_priv_user5" cannot be dropped because some objects depend on it +DETAIL: privileges for membership of role regress_priv_user6 in role regress_priv_user1 +DROP ROLE regress_priv_user1, regress_priv_user5; -- ok, despite order +-- recreate the roles we just dropped +CREATE USER regress_priv_user1; +CREATE USER regress_priv_user2; +CREATE USER regress_priv_user3; +CREATE USER regress_priv_user4; +CREATE USER regress_priv_user5; +GRANT pg_read_all_data TO regress_priv_user6; +GRANT pg_write_all_data TO regress_priv_user7; +GRANT pg_read_all_settings TO regress_priv_user8 WITH ADMIN OPTION; +GRANT regress_priv_user9 TO regress_priv_user8; +SET SESSION AUTHORIZATION regress_priv_user8; +GRANT pg_read_all_settings TO regress_priv_user9 WITH ADMIN OPTION; +SET SESSION AUTHORIZATION regress_priv_user9; +GRANT pg_read_all_settings TO regress_priv_user10; +SET SESSION AUTHORIZATION regress_priv_user8; +REVOKE pg_read_all_settings FROM regress_priv_user10 GRANTED BY regress_priv_user9; +REVOKE ADMIN OPTION FOR pg_read_all_settings FROM regress_priv_user9; +REVOKE pg_read_all_settings FROM regress_priv_user9; +RESET SESSION AUTHORIZATION; +REVOKE regress_priv_user9 FROM regress_priv_user8; +REVOKE ADMIN OPTION FOR pg_read_all_settings FROM regress_priv_user8; +SET SESSION AUTHORIZATION regress_priv_user8; +SET ROLE pg_read_all_settings; +RESET ROLE; +RESET SESSION AUTHORIZATION; +REVOKE SET OPTION FOR pg_read_all_settings FROM regress_priv_user8; +GRANT pg_read_all_stats TO regress_priv_user8 WITH SET FALSE; +SET SESSION AUTHORIZATION regress_priv_user8; +SET ROLE pg_read_all_settings; -- fail, no SET option any more +ERROR: permission denied to set role "pg_read_all_settings" +SET ROLE pg_read_all_stats; -- fail, granted without SET option +ERROR: permission denied to set role "pg_read_all_stats" +RESET ROLE; +RESET SESSION AUTHORIZATION; +REVOKE pg_read_all_settings FROM regress_priv_user8; +DROP USER regress_priv_user10; +DROP USER regress_priv_user9; +DROP USER regress_priv_user8; +CREATE GROUP regress_priv_group1; +CREATE GROUP regress_priv_group2 WITH ADMIN regress_priv_user1 USER regress_priv_user2; +ALTER GROUP regress_priv_group1 ADD USER regress_priv_user4; +GRANT regress_priv_group2 TO regress_priv_user2 GRANTED BY regress_priv_user1; +SET SESSION AUTHORIZATION regress_priv_user1; +ALTER GROUP regress_priv_group2 ADD USER regress_priv_user2; +NOTICE: role "regress_priv_user2" has already been granted membership in role "regress_priv_group2" by role "regress_priv_user1" +ALTER GROUP regress_priv_group2 ADD USER regress_priv_user2; -- duplicate +NOTICE: role "regress_priv_user2" has already been granted membership in role "regress_priv_group2" by role "regress_priv_user1" +ALTER GROUP regress_priv_group2 DROP USER regress_priv_user2; +ALTER USER regress_priv_user2 PASSWORD 'verysecret'; -- not permitted +ERROR: permission denied to alter role +DETAIL: To change another role's password, the current user must have the CREATEROLE attribute and the ADMIN option on the role. +RESET SESSION AUTHORIZATION; +ALTER GROUP regress_priv_group2 DROP USER regress_priv_user2; +REVOKE ADMIN OPTION FOR regress_priv_group2 FROM regress_priv_user1; +GRANT regress_priv_group2 TO regress_priv_user4 WITH ADMIN OPTION; +-- prepare non-leakproof function for later +CREATE FUNCTION leak(integer,integer) RETURNS boolean + AS 'int4lt' + LANGUAGE internal IMMUTABLE STRICT; -- but deliberately not LEAKPROOF +ALTER FUNCTION leak(integer,integer) OWNER TO regress_priv_user1; +-- test owner privileges +GRANT regress_priv_role TO regress_priv_user1 WITH ADMIN OPTION GRANTED BY regress_priv_role; -- error, doesn't have ADMIN OPTION +ERROR: permission denied to grant privileges as role "regress_priv_role" +DETAIL: The grantor must have the ADMIN option on role "regress_priv_role". +GRANT regress_priv_role TO regress_priv_user1 WITH ADMIN OPTION GRANTED BY CURRENT_ROLE; +REVOKE ADMIN OPTION FOR regress_priv_role FROM regress_priv_user1 GRANTED BY foo; -- error +ERROR: role "foo" does not exist +REVOKE ADMIN OPTION FOR regress_priv_role FROM regress_priv_user1 GRANTED BY regress_priv_user2; -- warning, noop +WARNING: role "regress_priv_user1" has not been granted membership in role "regress_priv_role" by role "regress_priv_user2" +REVOKE ADMIN OPTION FOR regress_priv_role FROM regress_priv_user1 GRANTED BY CURRENT_USER; +REVOKE regress_priv_role FROM regress_priv_user1 GRANTED BY CURRENT_ROLE; +DROP ROLE regress_priv_role; +SET SESSION AUTHORIZATION regress_priv_user1; +SELECT session_user, current_user; + session_user | current_user +--------------------+-------------------- + regress_priv_user1 | regress_priv_user1 +(1 row) + +CREATE TABLE atest1 ( a int, b text ); +SELECT * FROM atest1; + a | b +---+--- +(0 rows) + +INSERT INTO atest1 VALUES (1, 'one'); +DELETE FROM atest1; +UPDATE atest1 SET a = 1 WHERE b = 'blech'; +TRUNCATE atest1; +BEGIN; +LOCK atest1 IN ACCESS EXCLUSIVE MODE; +COMMIT; +REVOKE ALL ON atest1 FROM PUBLIC; +SELECT * FROM atest1; + a | b +---+--- +(0 rows) + +GRANT ALL ON atest1 TO regress_priv_user2; +GRANT SELECT ON atest1 TO regress_priv_user3, regress_priv_user4; +SELECT * FROM atest1; + a | b +---+--- +(0 rows) + +CREATE TABLE atest2 (col1 varchar(10), col2 boolean); +GRANT SELECT ON atest2 TO regress_priv_user2; +GRANT UPDATE ON atest2 TO regress_priv_user3; +GRANT INSERT ON atest2 TO regress_priv_user4 GRANTED BY CURRENT_USER; +GRANT TRUNCATE ON atest2 TO regress_priv_user5 GRANTED BY CURRENT_ROLE; +GRANT TRUNCATE ON atest2 TO regress_priv_user4 GRANTED BY regress_priv_user5; -- error +ERROR: grantor must be current user +SET SESSION AUTHORIZATION regress_priv_user2; +SELECT session_user, current_user; + session_user | current_user +--------------------+-------------------- + regress_priv_user2 | regress_priv_user2 +(1 row) + +-- try various combinations of queries on atest1 and atest2 +SELECT * FROM atest1; -- ok + a | b +---+--- +(0 rows) + +SELECT * FROM atest2; -- ok + col1 | col2 +------+------ +(0 rows) + +INSERT INTO atest1 VALUES (2, 'two'); -- ok +INSERT INTO atest2 VALUES ('foo', true); -- fail +ERROR: permission denied for table atest2 +INSERT INTO atest1 SELECT 1, b FROM atest1; -- ok +UPDATE atest1 SET a = 1 WHERE a = 2; -- ok +UPDATE atest2 SET col2 = NOT col2; -- fail +ERROR: permission denied for table atest2 +SELECT * FROM atest1 FOR UPDATE; -- ok + a | b +---+----- + 1 | two + 1 | two +(2 rows) + +SELECT * FROM atest2 FOR UPDATE; -- fail +ERROR: permission denied for table atest2 +DELETE FROM atest2; -- fail +ERROR: permission denied for table atest2 +TRUNCATE atest2; -- fail +ERROR: permission denied for table atest2 +BEGIN; +LOCK atest2 IN ACCESS EXCLUSIVE MODE; -- fail +ERROR: permission denied for table atest2 +COMMIT; +COPY atest2 FROM stdin; -- fail +ERROR: permission denied for table atest2 +GRANT ALL ON atest1 TO PUBLIC; -- fail +WARNING: no privileges were granted for "atest1" +-- checks in subquery, both ok +SELECT * FROM atest1 WHERE ( b IN ( SELECT col1 FROM atest2 ) ); + a | b +---+--- +(0 rows) + +SELECT * FROM atest2 WHERE ( col1 IN ( SELECT b FROM atest1 ) ); + col1 | col2 +------+------ +(0 rows) + +SET SESSION AUTHORIZATION regress_priv_user6; +SELECT * FROM atest1; -- ok + a | b +---+----- + 1 | two + 1 | two +(2 rows) + +SELECT * FROM atest2; -- ok + col1 | col2 +------+------ +(0 rows) + +INSERT INTO atest2 VALUES ('foo', true); -- fail +ERROR: permission denied for table atest2 +SET SESSION AUTHORIZATION regress_priv_user7; +SELECT * FROM atest1; -- fail +ERROR: permission denied for table atest1 +SELECT * FROM atest2; -- fail +ERROR: permission denied for table atest2 +INSERT INTO atest2 VALUES ('foo', true); -- ok +UPDATE atest2 SET col2 = true; -- ok +DELETE FROM atest2; -- ok +-- Make sure we are not able to modify system catalogs +UPDATE pg_catalog.pg_class SET relname = '123'; -- fail +ERROR: permission denied for table pg_class +DELETE FROM pg_catalog.pg_class; -- fail +ERROR: permission denied for table pg_class +UPDATE pg_toast.pg_toast_1213 SET chunk_id = 1; -- fail +ERROR: permission denied for table pg_toast_1213 +SET SESSION AUTHORIZATION regress_priv_user3; +SELECT session_user, current_user; + session_user | current_user +--------------------+-------------------- + regress_priv_user3 | regress_priv_user3 +(1 row) + +SELECT * FROM atest1; -- ok + a | b +---+----- + 1 | two + 1 | two +(2 rows) + +SELECT * FROM atest2; -- fail +ERROR: permission denied for table atest2 +INSERT INTO atest1 VALUES (2, 'two'); -- fail +ERROR: permission denied for table atest1 +INSERT INTO atest2 VALUES ('foo', true); -- fail +ERROR: permission denied for table atest2 +INSERT INTO atest1 SELECT 1, b FROM atest1; -- fail +ERROR: permission denied for table atest1 +UPDATE atest1 SET a = 1 WHERE a = 2; -- fail +ERROR: permission denied for table atest1 +UPDATE atest2 SET col2 = NULL; -- ok +UPDATE atest2 SET col2 = NOT col2; -- fails; requires SELECT on atest2 +ERROR: permission denied for table atest2 +UPDATE atest2 SET col2 = true FROM atest1 WHERE atest1.a = 5; -- ok +SELECT * FROM atest1 FOR UPDATE; -- fail +ERROR: permission denied for table atest1 +SELECT * FROM atest2 FOR UPDATE; -- fail +ERROR: permission denied for table atest2 +DELETE FROM atest2; -- fail +ERROR: permission denied for table atest2 +TRUNCATE atest2; -- fail +ERROR: permission denied for table atest2 +BEGIN; +LOCK atest2 IN ACCESS EXCLUSIVE MODE; -- ok +COMMIT; +COPY atest2 FROM stdin; -- fail +ERROR: permission denied for table atest2 +-- checks in subquery, both fail +SELECT * FROM atest1 WHERE ( b IN ( SELECT col1 FROM atest2 ) ); +ERROR: permission denied for table atest2 +SELECT * FROM atest2 WHERE ( col1 IN ( SELECT b FROM atest1 ) ); +ERROR: permission denied for table atest2 +SET SESSION AUTHORIZATION regress_priv_user4; +COPY atest2 FROM stdin; -- ok +SELECT * FROM atest1; -- ok + a | b +---+----- + 1 | two + 1 | two +(2 rows) + +-- test leaky-function protections in selfuncs +-- regress_priv_user1 will own a table and provide views for it. +SET SESSION AUTHORIZATION regress_priv_user1; +CREATE TABLE atest12 as + SELECT x AS a, 10001 - x AS b FROM generate_series(1,10000) x; +CREATE INDEX ON atest12 (a); +CREATE INDEX ON atest12 (abs(a)); +-- results below depend on having quite accurate stats for atest12, so... +ALTER TABLE atest12 SET (autovacuum_enabled = off); +SET default_statistics_target = 10000; +VACUUM ANALYZE atest12; +RESET default_statistics_target; +CREATE OPERATOR <<< (procedure = leak, leftarg = integer, rightarg = integer, + restrict = scalarltsel); +-- views with leaky operator +CREATE VIEW atest12v AS + SELECT * FROM atest12 WHERE b <<< 5; +CREATE VIEW atest12sbv WITH (security_barrier=true) AS + SELECT * FROM atest12 WHERE b <<< 5; +GRANT SELECT ON atest12v TO PUBLIC; +GRANT SELECT ON atest12sbv TO PUBLIC; +-- This plan should use nestloop, knowing that few rows will be selected. +EXPLAIN (COSTS OFF) SELECT * FROM atest12v x, atest12v y WHERE x.a = y.b; + QUERY PLAN +------------------------------------------------- + Nested Loop + -> Seq Scan on atest12 atest12_1 + Filter: (b <<< 5) + -> Index Scan using atest12_a_idx on atest12 + Index Cond: (a = atest12_1.b) + Filter: (b <<< 5) +(6 rows) + +-- And this one. +EXPLAIN (COSTS OFF) SELECT * FROM atest12 x, atest12 y + WHERE x.a = y.b and abs(y.a) <<< 5; + QUERY PLAN +--------------------------------------------------- + Nested Loop + -> Seq Scan on atest12 y + Filter: (abs(a) <<< 5) + -> Index Scan using atest12_a_idx on atest12 x + Index Cond: (a = y.b) +(5 rows) + +-- This should also be a nestloop, but the security barrier forces the inner +-- scan to be materialized +EXPLAIN (COSTS OFF) SELECT * FROM atest12sbv x, atest12sbv y WHERE x.a = y.b; + QUERY PLAN +------------------------------------------- + Nested Loop + Join Filter: (atest12.a = atest12_1.b) + -> Seq Scan on atest12 + Filter: (b <<< 5) + -> Materialize + -> Seq Scan on atest12 atest12_1 + Filter: (b <<< 5) +(7 rows) + +-- Check if regress_priv_user2 can break security. +SET SESSION AUTHORIZATION regress_priv_user2; +CREATE FUNCTION leak2(integer,integer) RETURNS boolean + AS $$begin raise notice 'leak % %', $1, $2; return $1 > $2; end$$ + LANGUAGE plpgsql immutable; +CREATE OPERATOR >>> (procedure = leak2, leftarg = integer, rightarg = integer, + restrict = scalargtsel); +-- This should not show any "leak" notices before failing. +EXPLAIN (COSTS OFF) SELECT * FROM atest12 WHERE a >>> 0; +ERROR: permission denied for table atest12 +-- These plans should continue to use a nestloop, since they execute with the +-- privileges of the view owner. +EXPLAIN (COSTS OFF) SELECT * FROM atest12v x, atest12v y WHERE x.a = y.b; + QUERY PLAN +------------------------------------------------- + Nested Loop + -> Seq Scan on atest12 atest12_1 + Filter: (b <<< 5) + -> Index Scan using atest12_a_idx on atest12 + Index Cond: (a = atest12_1.b) + Filter: (b <<< 5) +(6 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM atest12sbv x, atest12sbv y WHERE x.a = y.b; + QUERY PLAN +------------------------------------------- + Nested Loop + Join Filter: (atest12.a = atest12_1.b) + -> Seq Scan on atest12 + Filter: (b <<< 5) + -> Materialize + -> Seq Scan on atest12 atest12_1 + Filter: (b <<< 5) +(7 rows) + +-- A non-security barrier view does not guard against information leakage. +EXPLAIN (COSTS OFF) SELECT * FROM atest12v x, atest12v y + WHERE x.a = y.b and abs(y.a) <<< 5; + QUERY PLAN +------------------------------------------------- + Nested Loop + -> Seq Scan on atest12 atest12_1 + Filter: ((b <<< 5) AND (abs(a) <<< 5)) + -> Index Scan using atest12_a_idx on atest12 + Index Cond: (a = atest12_1.b) + Filter: (b <<< 5) +(6 rows) + +-- But a security barrier view isolates the leaky operator. +EXPLAIN (COSTS OFF) SELECT * FROM atest12sbv x, atest12sbv y + WHERE x.a = y.b and abs(y.a) <<< 5; + QUERY PLAN +------------------------------------- + Nested Loop + Join Filter: (atest12_1.a = y.b) + -> Subquery Scan on y + Filter: (abs(y.a) <<< 5) + -> Seq Scan on atest12 + Filter: (b <<< 5) + -> Seq Scan on atest12 atest12_1 + Filter: (b <<< 5) +(8 rows) + +-- Now regress_priv_user1 grants sufficient access to regress_priv_user2. +SET SESSION AUTHORIZATION regress_priv_user1; +GRANT SELECT (a, b) ON atest12 TO PUBLIC; +SET SESSION AUTHORIZATION regress_priv_user2; +-- regress_priv_user2 should continue to get a good row estimate. +EXPLAIN (COSTS OFF) SELECT * FROM atest12v x, atest12v y WHERE x.a = y.b; + QUERY PLAN +------------------------------------------------- + Nested Loop + -> Seq Scan on atest12 atest12_1 + Filter: (b <<< 5) + -> Index Scan using atest12_a_idx on atest12 + Index Cond: (a = atest12_1.b) + Filter: (b <<< 5) +(6 rows) + +-- But not for this, due to lack of table-wide permissions needed +-- to make use of the expression index's statistics. +EXPLAIN (COSTS OFF) SELECT * FROM atest12 x, atest12 y + WHERE x.a = y.b and abs(y.a) <<< 5; + QUERY PLAN +-------------------------------------- + Hash Join + Hash Cond: (x.a = y.b) + -> Seq Scan on atest12 x + -> Hash + -> Seq Scan on atest12 y + Filter: (abs(a) <<< 5) +(6 rows) + +-- clean up (regress_priv_user1's objects are all dropped later) +DROP FUNCTION leak2(integer, integer) CASCADE; +NOTICE: drop cascades to operator >>>(integer,integer) +-- groups +SET SESSION AUTHORIZATION regress_priv_user3; +CREATE TABLE atest3 (one int, two int, three int); +GRANT DELETE ON atest3 TO GROUP regress_priv_group2; +SET SESSION AUTHORIZATION regress_priv_user1; +SELECT * FROM atest3; -- fail +ERROR: permission denied for table atest3 +DELETE FROM atest3; -- ok +BEGIN; +RESET SESSION AUTHORIZATION; +ALTER ROLE regress_priv_user1 NOINHERIT; +SET SESSION AUTHORIZATION regress_priv_user1; +SAVEPOINT s1; +DELETE FROM atest3; -- ok because grant-level option is unchanged +ROLLBACK TO s1; +RESET SESSION AUTHORIZATION; +GRANT regress_priv_group2 TO regress_priv_user1 WITH INHERIT FALSE; +SET SESSION AUTHORIZATION regress_priv_user1; +DELETE FROM atest3; -- fail +ERROR: permission denied for table atest3 +ROLLBACK TO s1; +RESET SESSION AUTHORIZATION; +REVOKE INHERIT OPTION FOR regress_priv_group2 FROM regress_priv_user1; +SET SESSION AUTHORIZATION regress_priv_user1; +DELETE FROM atest3; -- also fail +ERROR: permission denied for table atest3 +ROLLBACK; +-- views +SET SESSION AUTHORIZATION regress_priv_user3; +CREATE VIEW atestv1 AS SELECT * FROM atest1; -- ok +/* The next *should* fail, but it's not implemented that way yet. */ +CREATE VIEW atestv2 AS SELECT * FROM atest2; +CREATE VIEW atestv3 AS SELECT * FROM atest3; -- ok +/* Empty view is a corner case that failed in 9.2. */ +CREATE VIEW atestv0 AS SELECT 0 as x WHERE false; -- ok +SELECT * FROM atestv1; -- ok + a | b +---+----- + 1 | two + 1 | two +(2 rows) + +SELECT * FROM atestv2; -- fail +ERROR: permission denied for table atest2 +GRANT SELECT ON atestv1, atestv3 TO regress_priv_user4; +GRANT SELECT ON atestv2 TO regress_priv_user2; +SET SESSION AUTHORIZATION regress_priv_user4; +SELECT * FROM atestv1; -- ok + a | b +---+----- + 1 | two + 1 | two +(2 rows) + +SELECT * FROM atestv2; -- fail +ERROR: permission denied for view atestv2 +SELECT * FROM atestv3; -- ok + one | two | three +-----+-----+------- +(0 rows) + +SELECT * FROM atestv0; -- fail +ERROR: permission denied for view atestv0 +-- Appendrels excluded by constraints failed to check permissions in 8.4-9.2. +select * from + ((select a.q1 as x from int8_tbl a offset 0) + union all + (select b.q2 as x from int8_tbl b offset 0)) ss +where false; +ERROR: permission denied for table int8_tbl +set constraint_exclusion = on; +select * from + ((select a.q1 as x, random() from int8_tbl a where q1 > 0) + union all + (select b.q2 as x, random() from int8_tbl b where q2 > 0)) ss +where x < 0; +ERROR: permission denied for table int8_tbl +reset constraint_exclusion; +CREATE VIEW atestv4 AS SELECT * FROM atestv3; -- nested view +SELECT * FROM atestv4; -- ok + one | two | three +-----+-----+------- +(0 rows) + +GRANT SELECT ON atestv4 TO regress_priv_user2; +SET SESSION AUTHORIZATION regress_priv_user2; +-- Two complex cases: +SELECT * FROM atestv3; -- fail +ERROR: permission denied for view atestv3 +SELECT * FROM atestv4; -- ok (even though regress_priv_user2 cannot access underlying atestv3) + one | two | three +-----+-----+------- +(0 rows) + +SELECT * FROM atest2; -- ok + col1 | col2 +------+------ + bar | t +(1 row) + +SELECT * FROM atestv2; -- fail (even though regress_priv_user2 can access underlying atest2) +ERROR: permission denied for table atest2 +-- Test column level permissions +SET SESSION AUTHORIZATION regress_priv_user1; +CREATE TABLE atest5 (one int, two int unique, three int, four int unique); +CREATE TABLE atest6 (one int, two int, blue int); +GRANT SELECT (one), INSERT (two), UPDATE (three) ON atest5 TO regress_priv_user4; +GRANT ALL (one) ON atest5 TO regress_priv_user3; +INSERT INTO atest5 VALUES (1,2,3); +SET SESSION AUTHORIZATION regress_priv_user4; +SELECT * FROM atest5; -- fail +ERROR: permission denied for table atest5 +SELECT one FROM atest5; -- ok + one +----- + 1 +(1 row) + +COPY atest5 (one) TO stdout; -- ok +1 +SELECT two FROM atest5; -- fail +ERROR: permission denied for table atest5 +COPY atest5 (two) TO stdout; -- fail +ERROR: permission denied for table atest5 +SELECT atest5 FROM atest5; -- fail +ERROR: permission denied for table atest5 +COPY atest5 (one,two) TO stdout; -- fail +ERROR: permission denied for table atest5 +SELECT 1 FROM atest5; -- ok + ?column? +---------- + 1 +(1 row) + +SELECT 1 FROM atest5 a JOIN atest5 b USING (one); -- ok + ?column? +---------- + 1 +(1 row) + +SELECT 1 FROM atest5 a JOIN atest5 b USING (two); -- fail +ERROR: permission denied for table atest5 +SELECT 1 FROM atest5 a NATURAL JOIN atest5 b; -- fail +ERROR: permission denied for table atest5 +SELECT * FROM (atest5 a JOIN atest5 b USING (one)) j; -- fail +ERROR: permission denied for table atest5 +SELECT j.* FROM (atest5 a JOIN atest5 b USING (one)) j; -- fail +ERROR: permission denied for table atest5 +SELECT (j.*) IS NULL FROM (atest5 a JOIN atest5 b USING (one)) j; -- fail +ERROR: permission denied for table atest5 +SELECT one FROM (atest5 a JOIN atest5 b(one,x,y,z) USING (one)) j; -- ok + one +----- + 1 +(1 row) + +SELECT j.one FROM (atest5 a JOIN atest5 b(one,x,y,z) USING (one)) j; -- ok + one +----- + 1 +(1 row) + +SELECT two FROM (atest5 a JOIN atest5 b(one,x,y,z) USING (one)) j; -- fail +ERROR: permission denied for table atest5 +SELECT j.two FROM (atest5 a JOIN atest5 b(one,x,y,z) USING (one)) j; -- fail +ERROR: permission denied for table atest5 +SELECT y FROM (atest5 a JOIN atest5 b(one,x,y,z) USING (one)) j; -- fail +ERROR: permission denied for table atest5 +SELECT j.y FROM (atest5 a JOIN atest5 b(one,x,y,z) USING (one)) j; -- fail +ERROR: permission denied for table atest5 +SELECT * FROM (atest5 a JOIN atest5 b USING (one)); -- fail +ERROR: permission denied for table atest5 +SELECT a.* FROM (atest5 a JOIN atest5 b USING (one)); -- fail +ERROR: permission denied for table atest5 +SELECT (a.*) IS NULL FROM (atest5 a JOIN atest5 b USING (one)); -- fail +ERROR: permission denied for table atest5 +SELECT two FROM (atest5 a JOIN atest5 b(one,x,y,z) USING (one)); -- fail +ERROR: permission denied for table atest5 +SELECT a.two FROM (atest5 a JOIN atest5 b(one,x,y,z) USING (one)); -- fail +ERROR: permission denied for table atest5 +SELECT y FROM (atest5 a JOIN atest5 b(one,x,y,z) USING (one)); -- fail +ERROR: permission denied for table atest5 +SELECT b.y FROM (atest5 a JOIN atest5 b(one,x,y,z) USING (one)); -- fail +ERROR: permission denied for table atest5 +SELECT y FROM (atest5 a LEFT JOIN atest5 b(one,x,y,z) USING (one)); -- fail +ERROR: permission denied for table atest5 +SELECT b.y FROM (atest5 a LEFT JOIN atest5 b(one,x,y,z) USING (one)); -- fail +ERROR: permission denied for table atest5 +SELECT y FROM (atest5 a FULL JOIN atest5 b(one,x,y,z) USING (one)); -- fail +ERROR: permission denied for table atest5 +SELECT b.y FROM (atest5 a FULL JOIN atest5 b(one,x,y,z) USING (one)); -- fail +ERROR: permission denied for table atest5 +SELECT 1 FROM atest5 WHERE two = 2; -- fail +ERROR: permission denied for table atest5 +SELECT * FROM atest1, atest5; -- fail +ERROR: permission denied for table atest5 +SELECT atest1.* FROM atest1, atest5; -- ok + a | b +---+----- + 1 | two + 1 | two +(2 rows) + +SELECT atest1.*,atest5.one FROM atest1, atest5; -- ok + a | b | one +---+-----+----- + 1 | two | 1 + 1 | two | 1 +(2 rows) + +SELECT atest1.*,atest5.one FROM atest1 JOIN atest5 ON (atest1.a = atest5.two); -- fail +ERROR: permission denied for table atest5 +SELECT atest1.*,atest5.one FROM atest1 JOIN atest5 ON (atest1.a = atest5.one); -- ok + a | b | one +---+-----+----- + 1 | two | 1 + 1 | two | 1 +(2 rows) + +SELECT one, two FROM atest5; -- fail +ERROR: permission denied for table atest5 +SET SESSION AUTHORIZATION regress_priv_user1; +GRANT SELECT (one,two) ON atest6 TO regress_priv_user4; +SET SESSION AUTHORIZATION regress_priv_user4; +SELECT one, two FROM atest5 NATURAL JOIN atest6; -- fail still +ERROR: permission denied for table atest5 +SET SESSION AUTHORIZATION regress_priv_user1; +GRANT SELECT (two) ON atest5 TO regress_priv_user4; +SET SESSION AUTHORIZATION regress_priv_user4; +SELECT one, two FROM atest5 NATURAL JOIN atest6; -- ok now + one | two +-----+----- +(0 rows) + +-- test column-level privileges for INSERT and UPDATE +INSERT INTO atest5 (two) VALUES (3); -- ok +COPY atest5 FROM stdin; -- fail +ERROR: permission denied for table atest5 +COPY atest5 (two) FROM stdin; -- ok +INSERT INTO atest5 (three) VALUES (4); -- fail +ERROR: permission denied for table atest5 +INSERT INTO atest5 VALUES (5,5,5); -- fail +ERROR: permission denied for table atest5 +UPDATE atest5 SET three = 10; -- ok +UPDATE atest5 SET one = 8; -- fail +ERROR: permission denied for table atest5 +UPDATE atest5 SET three = 5, one = 2; -- fail +ERROR: permission denied for table atest5 +-- Check that column level privs are enforced in RETURNING +-- Ok. +INSERT INTO atest5(two) VALUES (6) ON CONFLICT (two) DO UPDATE set three = 10; +-- Error. No SELECT on column three. +INSERT INTO atest5(two) VALUES (6) ON CONFLICT (two) DO UPDATE set three = 10 RETURNING atest5.three; +ERROR: permission denied for table atest5 +-- Ok. May SELECT on column "one": +INSERT INTO atest5(two) VALUES (6) ON CONFLICT (two) DO UPDATE set three = 10 RETURNING atest5.one; + one +----- + +(1 row) + +-- Check that column level privileges are enforced for EXCLUDED +-- Ok. we may select one +INSERT INTO atest5(two) VALUES (6) ON CONFLICT (two) DO UPDATE set three = EXCLUDED.one; +-- Error. No select rights on three +INSERT INTO atest5(two) VALUES (6) ON CONFLICT (two) DO UPDATE set three = EXCLUDED.three; +ERROR: permission denied for table atest5 +INSERT INTO atest5(two) VALUES (6) ON CONFLICT (two) DO UPDATE set one = 8; -- fails (due to UPDATE) +ERROR: permission denied for table atest5 +INSERT INTO atest5(three) VALUES (4) ON CONFLICT (two) DO UPDATE set three = 10; -- fails (due to INSERT) +ERROR: permission denied for table atest5 +-- Check that the columns in the inference require select privileges +INSERT INTO atest5(four) VALUES (4); -- fail +ERROR: permission denied for table atest5 +SET SESSION AUTHORIZATION regress_priv_user1; +GRANT INSERT (four) ON atest5 TO regress_priv_user4; +SET SESSION AUTHORIZATION regress_priv_user4; +INSERT INTO atest5(four) VALUES (4) ON CONFLICT (four) DO UPDATE set three = 3; -- fails (due to SELECT) +ERROR: permission denied for table atest5 +INSERT INTO atest5(four) VALUES (4) ON CONFLICT ON CONSTRAINT atest5_four_key DO UPDATE set three = 3; -- fails (due to SELECT) +ERROR: permission denied for table atest5 +INSERT INTO atest5(four) VALUES (4); -- ok +SET SESSION AUTHORIZATION regress_priv_user1; +GRANT SELECT (four) ON atest5 TO regress_priv_user4; +SET SESSION AUTHORIZATION regress_priv_user4; +INSERT INTO atest5(four) VALUES (4) ON CONFLICT (four) DO UPDATE set three = 3; -- ok +INSERT INTO atest5(four) VALUES (4) ON CONFLICT ON CONSTRAINT atest5_four_key DO UPDATE set three = 3; -- ok +SET SESSION AUTHORIZATION regress_priv_user1; +REVOKE ALL (one) ON atest5 FROM regress_priv_user4; +GRANT SELECT (one,two,blue) ON atest6 TO regress_priv_user4; +SET SESSION AUTHORIZATION regress_priv_user4; +SELECT one FROM atest5; -- fail +ERROR: permission denied for table atest5 +UPDATE atest5 SET one = 1; -- fail +ERROR: permission denied for table atest5 +SELECT atest6 FROM atest6; -- ok + atest6 +-------- +(0 rows) + +COPY atest6 TO stdout; -- ok +-- test column privileges with MERGE +SET SESSION AUTHORIZATION regress_priv_user1; +CREATE TABLE mtarget (a int, b text); +CREATE TABLE msource (a int, b text); +INSERT INTO mtarget VALUES (1, 'init1'), (2, 'init2'); +INSERT INTO msource VALUES (1, 'source1'), (2, 'source2'), (3, 'source3'); +GRANT SELECT (a) ON msource TO regress_priv_user4; +GRANT SELECT (a) ON mtarget TO regress_priv_user4; +GRANT INSERT (a,b) ON mtarget TO regress_priv_user4; +GRANT UPDATE (b) ON mtarget TO regress_priv_user4; +SET SESSION AUTHORIZATION regress_priv_user4; +-- +-- test source privileges +-- +-- fail (no SELECT priv on s.b) +MERGE INTO mtarget t USING msource s ON t.a = s.a +WHEN MATCHED THEN + UPDATE SET b = s.b +WHEN NOT MATCHED THEN + INSERT VALUES (a, NULL); +ERROR: permission denied for table msource +-- fail (s.b used in the INSERTed values) +MERGE INTO mtarget t USING msource s ON t.a = s.a +WHEN MATCHED THEN + UPDATE SET b = 'x' +WHEN NOT MATCHED THEN + INSERT VALUES (a, b); +ERROR: permission denied for table msource +-- fail (s.b used in the WHEN quals) +MERGE INTO mtarget t USING msource s ON t.a = s.a +WHEN MATCHED AND s.b = 'x' THEN + UPDATE SET b = 'x' +WHEN NOT MATCHED THEN + INSERT VALUES (a, NULL); +ERROR: permission denied for table msource +-- this should be ok since only s.a is accessed +BEGIN; +MERGE INTO mtarget t USING msource s ON t.a = s.a +WHEN MATCHED THEN + UPDATE SET b = 'ok' +WHEN NOT MATCHED THEN + INSERT VALUES (a, NULL); +ROLLBACK; +SET SESSION AUTHORIZATION regress_priv_user1; +GRANT SELECT (b) ON msource TO regress_priv_user4; +SET SESSION AUTHORIZATION regress_priv_user4; +-- should now be ok +BEGIN; +MERGE INTO mtarget t USING msource s ON t.a = s.a +WHEN MATCHED THEN + UPDATE SET b = s.b +WHEN NOT MATCHED THEN + INSERT VALUES (a, b); +ROLLBACK; +-- +-- test target privileges +-- +-- fail (no SELECT priv on t.b) +MERGE INTO mtarget t USING msource s ON t.a = s.a +WHEN MATCHED THEN + UPDATE SET b = t.b +WHEN NOT MATCHED THEN + INSERT VALUES (a, NULL); +ERROR: permission denied for table mtarget +-- fail (no UPDATE on t.a) +MERGE INTO mtarget t USING msource s ON t.a = s.a +WHEN MATCHED THEN + UPDATE SET b = s.b, a = t.a + 1 +WHEN NOT MATCHED THEN + INSERT VALUES (a, b); +ERROR: permission denied for table mtarget +-- fail (no SELECT on t.b) +MERGE INTO mtarget t USING msource s ON t.a = s.a +WHEN MATCHED AND t.b IS NOT NULL THEN + UPDATE SET b = s.b +WHEN NOT MATCHED THEN + INSERT VALUES (a, b); +ERROR: permission denied for table mtarget +-- ok +BEGIN; +MERGE INTO mtarget t USING msource s ON t.a = s.a +WHEN MATCHED THEN + UPDATE SET b = s.b; +ROLLBACK; +-- fail (no DELETE) +MERGE INTO mtarget t USING msource s ON t.a = s.a +WHEN MATCHED AND t.b IS NOT NULL THEN + DELETE; +ERROR: permission denied for table mtarget +-- grant delete privileges +SET SESSION AUTHORIZATION regress_priv_user1; +GRANT DELETE ON mtarget TO regress_priv_user4; +-- should be ok now +BEGIN; +MERGE INTO mtarget t USING msource s ON t.a = s.a +WHEN MATCHED AND t.b IS NOT NULL THEN + DELETE; +ROLLBACK; +-- check error reporting with column privs +SET SESSION AUTHORIZATION regress_priv_user1; +CREATE TABLE t1 (c1 int, c2 int, c3 int check (c3 < 5), primary key (c1, c2)); +GRANT SELECT (c1) ON t1 TO regress_priv_user2; +GRANT INSERT (c1, c2, c3) ON t1 TO regress_priv_user2; +GRANT UPDATE (c1, c2, c3) ON t1 TO regress_priv_user2; +-- seed data +INSERT INTO t1 VALUES (1, 1, 1); +INSERT INTO t1 VALUES (1, 2, 1); +INSERT INTO t1 VALUES (2, 1, 2); +INSERT INTO t1 VALUES (2, 2, 2); +INSERT INTO t1 VALUES (3, 1, 3); +SET SESSION AUTHORIZATION regress_priv_user2; +INSERT INTO t1 (c1, c2) VALUES (1, 1); -- fail, but row not shown +ERROR: duplicate key value violates unique constraint "t1_pkey" +UPDATE t1 SET c2 = 1; -- fail, but row not shown +ERROR: duplicate key value violates unique constraint "t1_pkey" +INSERT INTO t1 (c1, c2) VALUES (null, null); -- fail, but see columns being inserted +ERROR: null value in column "c1" of relation "t1" violates not-null constraint +DETAIL: Failing row contains (c1, c2) = (null, null). +INSERT INTO t1 (c3) VALUES (null); -- fail, but see columns being inserted or have SELECT +ERROR: null value in column "c1" of relation "t1" violates not-null constraint +DETAIL: Failing row contains (c1, c3) = (null, null). +INSERT INTO t1 (c1) VALUES (5); -- fail, but see columns being inserted or have SELECT +ERROR: null value in column "c2" of relation "t1" violates not-null constraint +DETAIL: Failing row contains (c1) = (5). +UPDATE t1 SET c3 = 10; -- fail, but see columns with SELECT rights, or being modified +ERROR: new row for relation "t1" violates check constraint "t1_c3_check" +DETAIL: Failing row contains (c1, c3) = (1, 10). +SET SESSION AUTHORIZATION regress_priv_user1; +DROP TABLE t1; +-- check error reporting with column privs on a partitioned table +CREATE TABLE errtst(a text, b text NOT NULL, c text, secret1 text, secret2 text) PARTITION BY LIST (a); +CREATE TABLE errtst_part_1(secret2 text, c text, a text, b text NOT NULL, secret1 text); +CREATE TABLE errtst_part_2(secret1 text, secret2 text, a text, c text, b text NOT NULL); +ALTER TABLE errtst ATTACH PARTITION errtst_part_1 FOR VALUES IN ('aaa'); +ALTER TABLE errtst ATTACH PARTITION errtst_part_2 FOR VALUES IN ('aaaa'); +GRANT SELECT (a, b, c) ON TABLE errtst TO regress_priv_user2; +GRANT UPDATE (a, b, c) ON TABLE errtst TO regress_priv_user2; +GRANT INSERT (a, b, c) ON TABLE errtst TO regress_priv_user2; +INSERT INTO errtst_part_1 (a, b, c, secret1, secret2) +VALUES ('aaa', 'bbb', 'ccc', 'the body', 'is in the attic'); +SET SESSION AUTHORIZATION regress_priv_user2; +-- Perform a few updates that violate the NOT NULL constraint. Make sure +-- the error messages don't leak the secret fields. +-- simple insert. +INSERT INTO errtst (a, b) VALUES ('aaa', NULL); +ERROR: null value in column "b" of relation "errtst_part_1" violates not-null constraint +DETAIL: Failing row contains (a, b, c) = (aaa, null, null). +-- simple update. +UPDATE errtst SET b = NULL; +ERROR: null value in column "b" of relation "errtst_part_1" violates not-null constraint +DETAIL: Failing row contains (a, b, c) = (aaa, null, ccc). +-- partitioning key is updated, doesn't move the row. +UPDATE errtst SET a = 'aaa', b = NULL; +ERROR: null value in column "b" of relation "errtst_part_1" violates not-null constraint +DETAIL: Failing row contains (a, b, c) = (aaa, null, ccc). +-- row is moved to another partition. +UPDATE errtst SET a = 'aaaa', b = NULL; +ERROR: null value in column "b" of relation "errtst_part_2" violates not-null constraint +DETAIL: Failing row contains (a, b, c) = (aaaa, null, ccc). +-- row is moved to another partition. This differs from the previous case in +-- that the new partition is excluded by constraint exclusion, so its +-- ResultRelInfo is not created at ExecInitModifyTable, but needs to be +-- constructed on the fly when the updated tuple is routed to it. +UPDATE errtst SET a = 'aaaa', b = NULL WHERE a = 'aaa'; +ERROR: null value in column "b" of relation "errtst_part_2" violates not-null constraint +DETAIL: Failing row contains (a, b, c) = (aaaa, null, ccc). +SET SESSION AUTHORIZATION regress_priv_user1; +DROP TABLE errtst; +-- test column-level privileges when involved with DELETE +SET SESSION AUTHORIZATION regress_priv_user1; +ALTER TABLE atest6 ADD COLUMN three integer; +GRANT DELETE ON atest5 TO regress_priv_user3; +GRANT SELECT (two) ON atest5 TO regress_priv_user3; +REVOKE ALL (one) ON atest5 FROM regress_priv_user3; +GRANT SELECT (one) ON atest5 TO regress_priv_user4; +SET SESSION AUTHORIZATION regress_priv_user4; +SELECT atest6 FROM atest6; -- fail +ERROR: permission denied for table atest6 +SELECT one FROM atest5 NATURAL JOIN atest6; -- fail +ERROR: permission denied for table atest5 +SET SESSION AUTHORIZATION regress_priv_user1; +ALTER TABLE atest6 DROP COLUMN three; +SET SESSION AUTHORIZATION regress_priv_user4; +SELECT atest6 FROM atest6; -- ok + atest6 +-------- +(0 rows) + +SELECT one FROM atest5 NATURAL JOIN atest6; -- ok + one +----- +(0 rows) + +SET SESSION AUTHORIZATION regress_priv_user1; +ALTER TABLE atest6 DROP COLUMN two; +REVOKE SELECT (one,blue) ON atest6 FROM regress_priv_user4; +SET SESSION AUTHORIZATION regress_priv_user4; +SELECT * FROM atest6; -- fail +ERROR: permission denied for table atest6 +SELECT 1 FROM atest6; -- fail +ERROR: permission denied for table atest6 +SET SESSION AUTHORIZATION regress_priv_user3; +DELETE FROM atest5 WHERE one = 1; -- fail +ERROR: permission denied for table atest5 +DELETE FROM atest5 WHERE two = 2; -- ok +-- check inheritance cases +SET SESSION AUTHORIZATION regress_priv_user1; +CREATE TABLE atestp1 (f1 int, f2 int); +CREATE TABLE atestp2 (fx int, fy int); +CREATE TABLE atestc (fz int) INHERITS (atestp1, atestp2); +GRANT SELECT(fx,fy,tableoid) ON atestp2 TO regress_priv_user2; +GRANT SELECT(fx) ON atestc TO regress_priv_user2; +SET SESSION AUTHORIZATION regress_priv_user2; +SELECT fx FROM atestp2; -- ok + fx +---- +(0 rows) + +SELECT fy FROM atestp2; -- ok + fy +---- +(0 rows) + +SELECT atestp2 FROM atestp2; -- ok + atestp2 +--------- +(0 rows) + +SELECT tableoid FROM atestp2; -- ok + tableoid +---------- +(0 rows) + +SELECT fy FROM atestc; -- fail +ERROR: permission denied for table atestc +SET SESSION AUTHORIZATION regress_priv_user1; +GRANT SELECT(fy,tableoid) ON atestc TO regress_priv_user2; +SET SESSION AUTHORIZATION regress_priv_user2; +SELECT fx FROM atestp2; -- still ok + fx +---- +(0 rows) + +SELECT fy FROM atestp2; -- ok + fy +---- +(0 rows) + +SELECT atestp2 FROM atestp2; -- ok + atestp2 +--------- +(0 rows) + +SELECT tableoid FROM atestp2; -- ok + tableoid +---------- +(0 rows) + +-- child's permissions do not apply when operating on parent +SET SESSION AUTHORIZATION regress_priv_user1; +REVOKE ALL ON atestc FROM regress_priv_user2; +GRANT ALL ON atestp1 TO regress_priv_user2; +SET SESSION AUTHORIZATION regress_priv_user2; +SELECT f2 FROM atestp1; -- ok + f2 +---- +(0 rows) + +SELECT f2 FROM atestc; -- fail +ERROR: permission denied for table atestc +DELETE FROM atestp1; -- ok +DELETE FROM atestc; -- fail +ERROR: permission denied for table atestc +UPDATE atestp1 SET f1 = 1; -- ok +UPDATE atestc SET f1 = 1; -- fail +ERROR: permission denied for table atestc +TRUNCATE atestp1; -- ok +TRUNCATE atestc; -- fail +ERROR: permission denied for table atestc +BEGIN; +LOCK atestp1; +END; +BEGIN; +LOCK atestc; +ERROR: permission denied for table atestc +END; +-- privileges on functions, languages +-- switch to superuser +\c - +REVOKE ALL PRIVILEGES ON LANGUAGE sql FROM PUBLIC; +GRANT USAGE ON LANGUAGE sql TO regress_priv_user1; -- ok +GRANT USAGE ON LANGUAGE c TO PUBLIC; -- fail +ERROR: language "c" is not trusted +DETAIL: GRANT and REVOKE are not allowed on untrusted languages, because only superusers can use untrusted languages. +SET SESSION AUTHORIZATION regress_priv_user1; +GRANT USAGE ON LANGUAGE sql TO regress_priv_user2; -- fail +WARNING: no privileges were granted for "sql" +CREATE FUNCTION priv_testfunc1(int) RETURNS int AS 'select 2 * $1;' LANGUAGE sql; +CREATE FUNCTION priv_testfunc2(int) RETURNS int AS 'select 3 * $1;' LANGUAGE sql; +CREATE AGGREGATE priv_testagg1(int) (sfunc = int4pl, stype = int4); +CREATE PROCEDURE priv_testproc1(int) AS 'select $1;' LANGUAGE sql; +REVOKE ALL ON FUNCTION priv_testfunc1(int), priv_testfunc2(int), priv_testagg1(int) FROM PUBLIC; +GRANT EXECUTE ON FUNCTION priv_testfunc1(int), priv_testfunc2(int), priv_testagg1(int) TO regress_priv_user2; +REVOKE ALL ON FUNCTION priv_testproc1(int) FROM PUBLIC; -- fail, not a function +ERROR: priv_testproc1(integer) is not a function +REVOKE ALL ON PROCEDURE priv_testproc1(int) FROM PUBLIC; +GRANT EXECUTE ON PROCEDURE priv_testproc1(int) TO regress_priv_user2; +GRANT USAGE ON FUNCTION priv_testfunc1(int) TO regress_priv_user3; -- semantic error +ERROR: invalid privilege type USAGE for function +GRANT USAGE ON FUNCTION priv_testagg1(int) TO regress_priv_user3; -- semantic error +ERROR: invalid privilege type USAGE for function +GRANT USAGE ON PROCEDURE priv_testproc1(int) TO regress_priv_user3; -- semantic error +ERROR: invalid privilege type USAGE for procedure +GRANT ALL PRIVILEGES ON FUNCTION priv_testfunc1(int) TO regress_priv_user4; +GRANT ALL PRIVILEGES ON FUNCTION priv_testfunc_nosuch(int) TO regress_priv_user4; +ERROR: function priv_testfunc_nosuch(integer) does not exist +GRANT ALL PRIVILEGES ON FUNCTION priv_testagg1(int) TO regress_priv_user4; +GRANT ALL PRIVILEGES ON PROCEDURE priv_testproc1(int) TO regress_priv_user4; +CREATE FUNCTION priv_testfunc4(boolean) RETURNS text + AS 'select col1 from atest2 where col2 = $1;' + LANGUAGE sql SECURITY DEFINER; +GRANT EXECUTE ON FUNCTION priv_testfunc4(boolean) TO regress_priv_user3; +SET SESSION AUTHORIZATION regress_priv_user2; +SELECT priv_testfunc1(5), priv_testfunc2(5); -- ok + priv_testfunc1 | priv_testfunc2 +----------------+---------------- + 10 | 15 +(1 row) + +CREATE FUNCTION priv_testfunc3(int) RETURNS int AS 'select 2 * $1;' LANGUAGE sql; -- fail +ERROR: permission denied for language sql +SELECT priv_testagg1(x) FROM (VALUES (1), (2), (3)) _(x); -- ok + priv_testagg1 +--------------- + 6 +(1 row) + +CALL priv_testproc1(6); -- ok +SET SESSION AUTHORIZATION regress_priv_user3; +SELECT priv_testfunc1(5); -- fail +ERROR: permission denied for function priv_testfunc1 +SELECT priv_testagg1(x) FROM (VALUES (1), (2), (3)) _(x); -- fail +ERROR: permission denied for aggregate priv_testagg1 +CALL priv_testproc1(6); -- fail +ERROR: permission denied for procedure priv_testproc1 +SELECT col1 FROM atest2 WHERE col2 = true; -- fail +ERROR: permission denied for table atest2 +SELECT priv_testfunc4(true); -- ok + priv_testfunc4 +---------------- + bar +(1 row) + +SET SESSION AUTHORIZATION regress_priv_user4; +SELECT priv_testfunc1(5); -- ok + priv_testfunc1 +---------------- + 10 +(1 row) + +SELECT priv_testagg1(x) FROM (VALUES (1), (2), (3)) _(x); -- ok + priv_testagg1 +--------------- + 6 +(1 row) + +CALL priv_testproc1(6); -- ok +DROP FUNCTION priv_testfunc1(int); -- fail +ERROR: must be owner of function priv_testfunc1 +DROP AGGREGATE priv_testagg1(int); -- fail +ERROR: must be owner of aggregate priv_testagg1 +DROP PROCEDURE priv_testproc1(int); -- fail +ERROR: must be owner of procedure priv_testproc1 +\c - +DROP FUNCTION priv_testfunc1(int); -- ok +-- restore to sanity +GRANT ALL PRIVILEGES ON LANGUAGE sql TO PUBLIC; +-- verify privilege checks on array-element coercions +BEGIN; +SELECT '{1}'::int4[]::int8[]; + int8 +------ + {1} +(1 row) + +REVOKE ALL ON FUNCTION int8(integer) FROM PUBLIC; +SELECT '{1}'::int4[]::int8[]; --superuser, succeed + int8 +------ + {1} +(1 row) + +SET SESSION AUTHORIZATION regress_priv_user4; +SELECT '{1}'::int4[]::int8[]; --other user, fail +ERROR: permission denied for function int8 +ROLLBACK; +-- privileges on types +-- switch to superuser +\c - +CREATE TYPE priv_testtype1 AS (a int, b text); +REVOKE USAGE ON TYPE priv_testtype1 FROM PUBLIC; +GRANT USAGE ON TYPE priv_testtype1 TO regress_priv_user2; +GRANT USAGE ON TYPE _priv_testtype1 TO regress_priv_user2; -- fail +ERROR: cannot set privileges of array types +HINT: Set the privileges of the element type instead. +GRANT USAGE ON DOMAIN priv_testtype1 TO regress_priv_user2; -- fail +ERROR: "priv_testtype1" is not a domain +CREATE DOMAIN priv_testdomain1 AS int; +REVOKE USAGE on DOMAIN priv_testdomain1 FROM PUBLIC; +GRANT USAGE ON DOMAIN priv_testdomain1 TO regress_priv_user2; +GRANT USAGE ON TYPE priv_testdomain1 TO regress_priv_user2; -- ok +SET SESSION AUTHORIZATION regress_priv_user1; +-- commands that should fail +CREATE AGGREGATE priv_testagg1a(priv_testdomain1) (sfunc = int4_sum, stype = bigint); +ERROR: permission denied for type priv_testdomain1 +CREATE DOMAIN priv_testdomain2a AS priv_testdomain1; +ERROR: permission denied for type priv_testdomain1 +CREATE DOMAIN priv_testdomain3a AS int; +CREATE FUNCTION castfunc(int) RETURNS priv_testdomain3a AS $$ SELECT $1::priv_testdomain3a $$ LANGUAGE SQL; +CREATE CAST (priv_testdomain1 AS priv_testdomain3a) WITH FUNCTION castfunc(int); +ERROR: permission denied for type priv_testdomain1 +DROP FUNCTION castfunc(int) CASCADE; +DROP DOMAIN priv_testdomain3a; +CREATE FUNCTION priv_testfunc5a(a priv_testdomain1) RETURNS int LANGUAGE SQL AS $$ SELECT $1 $$; +ERROR: permission denied for type priv_testdomain1 +CREATE FUNCTION priv_testfunc6a(b int) RETURNS priv_testdomain1 LANGUAGE SQL AS $$ SELECT $1::priv_testdomain1 $$; +ERROR: permission denied for type priv_testdomain1 +CREATE OPERATOR !+! (PROCEDURE = int4pl, LEFTARG = priv_testdomain1, RIGHTARG = priv_testdomain1); +ERROR: permission denied for type priv_testdomain1 +CREATE TABLE test5a (a int, b priv_testdomain1); +ERROR: permission denied for type priv_testdomain1 +CREATE TABLE test6a OF priv_testtype1; +ERROR: permission denied for type priv_testtype1 +CREATE TABLE test10a (a int[], b priv_testtype1[]); +ERROR: permission denied for type priv_testtype1 +CREATE TABLE test9a (a int, b int); +ALTER TABLE test9a ADD COLUMN c priv_testdomain1; +ERROR: permission denied for type priv_testdomain1 +ALTER TABLE test9a ALTER COLUMN b TYPE priv_testdomain1; +ERROR: permission denied for type priv_testdomain1 +CREATE TYPE test7a AS (a int, b priv_testdomain1); +ERROR: permission denied for type priv_testdomain1 +CREATE TYPE test8a AS (a int, b int); +ALTER TYPE test8a ADD ATTRIBUTE c priv_testdomain1; +ERROR: permission denied for type priv_testdomain1 +ALTER TYPE test8a ALTER ATTRIBUTE b TYPE priv_testdomain1; +ERROR: permission denied for type priv_testdomain1 +CREATE TABLE test11a AS (SELECT 1::priv_testdomain1 AS a); +ERROR: permission denied for type priv_testdomain1 +REVOKE ALL ON TYPE priv_testtype1 FROM PUBLIC; +ERROR: permission denied for type priv_testtype1 +SET SESSION AUTHORIZATION regress_priv_user2; +-- commands that should succeed +CREATE AGGREGATE priv_testagg1b(priv_testdomain1) (sfunc = int4_sum, stype = bigint); +CREATE DOMAIN priv_testdomain2b AS priv_testdomain1; +CREATE DOMAIN priv_testdomain3b AS int; +CREATE FUNCTION castfunc(int) RETURNS priv_testdomain3b AS $$ SELECT $1::priv_testdomain3b $$ LANGUAGE SQL; +CREATE CAST (priv_testdomain1 AS priv_testdomain3b) WITH FUNCTION castfunc(int); +WARNING: cast will be ignored because the source data type is a domain +CREATE FUNCTION priv_testfunc5b(a priv_testdomain1) RETURNS int LANGUAGE SQL AS $$ SELECT $1 $$; +CREATE FUNCTION priv_testfunc6b(b int) RETURNS priv_testdomain1 LANGUAGE SQL AS $$ SELECT $1::priv_testdomain1 $$; +CREATE OPERATOR !! (PROCEDURE = priv_testfunc5b, RIGHTARG = priv_testdomain1); +CREATE TABLE test5b (a int, b priv_testdomain1); +CREATE TABLE test6b OF priv_testtype1; +CREATE TABLE test10b (a int[], b priv_testtype1[]); +CREATE TABLE test9b (a int, b int); +ALTER TABLE test9b ADD COLUMN c priv_testdomain1; +ALTER TABLE test9b ALTER COLUMN b TYPE priv_testdomain1; +CREATE TYPE test7b AS (a int, b priv_testdomain1); +CREATE TYPE test8b AS (a int, b int); +ALTER TYPE test8b ADD ATTRIBUTE c priv_testdomain1; +ALTER TYPE test8b ALTER ATTRIBUTE b TYPE priv_testdomain1; +CREATE TABLE test11b AS (SELECT 1::priv_testdomain1 AS a); +REVOKE ALL ON TYPE priv_testtype1 FROM PUBLIC; +WARNING: no privileges could be revoked for "priv_testtype1" +\c - +DROP AGGREGATE priv_testagg1b(priv_testdomain1); +DROP DOMAIN priv_testdomain2b; +DROP OPERATOR !! (NONE, priv_testdomain1); +DROP FUNCTION priv_testfunc5b(a priv_testdomain1); +DROP FUNCTION priv_testfunc6b(b int); +DROP TABLE test5b; +DROP TABLE test6b; +DROP TABLE test9b; +DROP TABLE test10b; +DROP TYPE test7b; +DROP TYPE test8b; +DROP CAST (priv_testdomain1 AS priv_testdomain3b); +DROP FUNCTION castfunc(int) CASCADE; +DROP DOMAIN priv_testdomain3b; +DROP TABLE test11b; +DROP TYPE priv_testtype1; -- ok +DROP DOMAIN priv_testdomain1; -- ok +-- truncate +SET SESSION AUTHORIZATION regress_priv_user5; +TRUNCATE atest2; -- ok +TRUNCATE atest3; -- fail +ERROR: permission denied for table atest3 +-- has_table_privilege function +-- bad-input checks +select has_table_privilege(NULL,'pg_authid','select'); + has_table_privilege +--------------------- + +(1 row) + +select has_table_privilege('pg_shad','select'); +ERROR: relation "pg_shad" does not exist +select has_table_privilege('nosuchuser','pg_authid','select'); +ERROR: role "nosuchuser" does not exist +select has_table_privilege('pg_authid','sel'); +ERROR: unrecognized privilege type: "sel" +select has_table_privilege(-999999,'pg_authid','update'); + has_table_privilege +--------------------- + f +(1 row) + +select has_table_privilege(1,'select'); + has_table_privilege +--------------------- + +(1 row) + +-- superuser +\c - +select has_table_privilege(current_user,'pg_authid','select'); + has_table_privilege +--------------------- + t +(1 row) + +select has_table_privilege(current_user,'pg_authid','insert'); + has_table_privilege +--------------------- + t +(1 row) + +select has_table_privilege(t2.oid,'pg_authid','update') +from (select oid from pg_roles where rolname = current_user) as t2; + has_table_privilege +--------------------- + t +(1 row) + +select has_table_privilege(t2.oid,'pg_authid','delete') +from (select oid from pg_roles where rolname = current_user) as t2; + has_table_privilege +--------------------- + t +(1 row) + +-- 'rule' privilege no longer exists, but for backwards compatibility +-- has_table_privilege still recognizes the keyword and says FALSE +select has_table_privilege(current_user,t1.oid,'rule') +from (select oid from pg_class where relname = 'pg_authid') as t1; + has_table_privilege +--------------------- + f +(1 row) + +select has_table_privilege(current_user,t1.oid,'references') +from (select oid from pg_class where relname = 'pg_authid') as t1; + has_table_privilege +--------------------- + t +(1 row) + +select has_table_privilege(t2.oid,t1.oid,'select') +from (select oid from pg_class where relname = 'pg_authid') as t1, + (select oid from pg_roles where rolname = current_user) as t2; + has_table_privilege +--------------------- + t +(1 row) + +select has_table_privilege(t2.oid,t1.oid,'insert') +from (select oid from pg_class where relname = 'pg_authid') as t1, + (select oid from pg_roles where rolname = current_user) as t2; + has_table_privilege +--------------------- + t +(1 row) + +select has_table_privilege('pg_authid','update'); + has_table_privilege +--------------------- + t +(1 row) + +select has_table_privilege('pg_authid','delete'); + has_table_privilege +--------------------- + t +(1 row) + +select has_table_privilege('pg_authid','truncate'); + has_table_privilege +--------------------- + t +(1 row) + +select has_table_privilege(t1.oid,'select') +from (select oid from pg_class where relname = 'pg_authid') as t1; + has_table_privilege +--------------------- + t +(1 row) + +select has_table_privilege(t1.oid,'trigger') +from (select oid from pg_class where relname = 'pg_authid') as t1; + has_table_privilege +--------------------- + t +(1 row) + +-- non-superuser +SET SESSION AUTHORIZATION regress_priv_user3; +select has_table_privilege(current_user,'pg_class','select'); + has_table_privilege +--------------------- + t +(1 row) + +select has_table_privilege(current_user,'pg_class','insert'); + has_table_privilege +--------------------- + f +(1 row) + +select has_table_privilege(t2.oid,'pg_class','update') +from (select oid from pg_roles where rolname = current_user) as t2; + has_table_privilege +--------------------- + f +(1 row) + +select has_table_privilege(t2.oid,'pg_class','delete') +from (select oid from pg_roles where rolname = current_user) as t2; + has_table_privilege +--------------------- + f +(1 row) + +select has_table_privilege(current_user,t1.oid,'references') +from (select oid from pg_class where relname = 'pg_class') as t1; + has_table_privilege +--------------------- + f +(1 row) + +select has_table_privilege(t2.oid,t1.oid,'select') +from (select oid from pg_class where relname = 'pg_class') as t1, + (select oid from pg_roles where rolname = current_user) as t2; + has_table_privilege +--------------------- + t +(1 row) + +select has_table_privilege(t2.oid,t1.oid,'insert') +from (select oid from pg_class where relname = 'pg_class') as t1, + (select oid from pg_roles where rolname = current_user) as t2; + has_table_privilege +--------------------- + f +(1 row) + +select has_table_privilege('pg_class','update'); + has_table_privilege +--------------------- + f +(1 row) + +select has_table_privilege('pg_class','delete'); + has_table_privilege +--------------------- + f +(1 row) + +select has_table_privilege('pg_class','truncate'); + has_table_privilege +--------------------- + f +(1 row) + +select has_table_privilege(t1.oid,'select') +from (select oid from pg_class where relname = 'pg_class') as t1; + has_table_privilege +--------------------- + t +(1 row) + +select has_table_privilege(t1.oid,'trigger') +from (select oid from pg_class where relname = 'pg_class') as t1; + has_table_privilege +--------------------- + f +(1 row) + +select has_table_privilege(current_user,'atest1','select'); + has_table_privilege +--------------------- + t +(1 row) + +select has_table_privilege(current_user,'atest1','insert'); + has_table_privilege +--------------------- + f +(1 row) + +select has_table_privilege(t2.oid,'atest1','update') +from (select oid from pg_roles where rolname = current_user) as t2; + has_table_privilege +--------------------- + f +(1 row) + +select has_table_privilege(t2.oid,'atest1','delete') +from (select oid from pg_roles where rolname = current_user) as t2; + has_table_privilege +--------------------- + f +(1 row) + +select has_table_privilege(current_user,t1.oid,'references') +from (select oid from pg_class where relname = 'atest1') as t1; + has_table_privilege +--------------------- + f +(1 row) + +select has_table_privilege(t2.oid,t1.oid,'select') +from (select oid from pg_class where relname = 'atest1') as t1, + (select oid from pg_roles where rolname = current_user) as t2; + has_table_privilege +--------------------- + t +(1 row) + +select has_table_privilege(t2.oid,t1.oid,'insert') +from (select oid from pg_class where relname = 'atest1') as t1, + (select oid from pg_roles where rolname = current_user) as t2; + has_table_privilege +--------------------- + f +(1 row) + +select has_table_privilege('atest1','update'); + has_table_privilege +--------------------- + f +(1 row) + +select has_table_privilege('atest1','delete'); + has_table_privilege +--------------------- + f +(1 row) + +select has_table_privilege('atest1','truncate'); + has_table_privilege +--------------------- + f +(1 row) + +select has_table_privilege(t1.oid,'select') +from (select oid from pg_class where relname = 'atest1') as t1; + has_table_privilege +--------------------- + t +(1 row) + +select has_table_privilege(t1.oid,'trigger') +from (select oid from pg_class where relname = 'atest1') as t1; + has_table_privilege +--------------------- + f +(1 row) + +-- has_column_privilege function +-- bad-input checks (as non-super-user) +select has_column_privilege('pg_authid',NULL,'select'); + has_column_privilege +---------------------- + +(1 row) + +select has_column_privilege('pg_authid','nosuchcol','select'); +ERROR: column "nosuchcol" of relation "pg_authid" does not exist +select has_column_privilege(9999,'nosuchcol','select'); + has_column_privilege +---------------------- + +(1 row) + +select has_column_privilege(9999,99::int2,'select'); + has_column_privilege +---------------------- + +(1 row) + +select has_column_privilege('pg_authid',99::int2,'select'); + has_column_privilege +---------------------- + +(1 row) + +select has_column_privilege(9999,99::int2,'select'); + has_column_privilege +---------------------- + +(1 row) + +create temp table mytable(f1 int, f2 int, f3 int); +alter table mytable drop column f2; +select has_column_privilege('mytable','f2','select'); +ERROR: column "f2" of relation "mytable" does not exist +select has_column_privilege('mytable','........pg.dropped.2........','select'); + has_column_privilege +---------------------- + +(1 row) + +select has_column_privilege('mytable',2::int2,'select'); + has_column_privilege +---------------------- + +(1 row) + +select has_column_privilege('mytable',99::int2,'select'); + has_column_privilege +---------------------- + +(1 row) + +revoke select on table mytable from regress_priv_user3; +select has_column_privilege('mytable',2::int2,'select'); + has_column_privilege +---------------------- + +(1 row) + +select has_column_privilege('mytable',99::int2,'select'); + has_column_privilege +---------------------- + +(1 row) + +drop table mytable; +-- Grant options +SET SESSION AUTHORIZATION regress_priv_user1; +CREATE TABLE atest4 (a int); +GRANT SELECT ON atest4 TO regress_priv_user2 WITH GRANT OPTION; +GRANT UPDATE ON atest4 TO regress_priv_user2; +GRANT SELECT ON atest4 TO GROUP regress_priv_group1 WITH GRANT OPTION; +SET SESSION AUTHORIZATION regress_priv_user2; +GRANT SELECT ON atest4 TO regress_priv_user3; +GRANT UPDATE ON atest4 TO regress_priv_user3; -- fail +WARNING: no privileges were granted for "atest4" +SET SESSION AUTHORIZATION regress_priv_user1; +REVOKE SELECT ON atest4 FROM regress_priv_user3; -- does nothing +SELECT has_table_privilege('regress_priv_user3', 'atest4', 'SELECT'); -- true + has_table_privilege +--------------------- + t +(1 row) + +REVOKE SELECT ON atest4 FROM regress_priv_user2; -- fail +ERROR: dependent privileges exist +HINT: Use CASCADE to revoke them too. +REVOKE GRANT OPTION FOR SELECT ON atest4 FROM regress_priv_user2 CASCADE; -- ok +SELECT has_table_privilege('regress_priv_user2', 'atest4', 'SELECT'); -- true + has_table_privilege +--------------------- + t +(1 row) + +SELECT has_table_privilege('regress_priv_user3', 'atest4', 'SELECT'); -- false + has_table_privilege +--------------------- + f +(1 row) + +SELECT has_table_privilege('regress_priv_user1', 'atest4', 'SELECT WITH GRANT OPTION'); -- true + has_table_privilege +--------------------- + t +(1 row) + +-- security-restricted operations +\c - +CREATE ROLE regress_sro_user; +-- Check that index expressions and predicates are run as the table's owner +-- A dummy index function checking current_user +CREATE FUNCTION sro_ifun(int) RETURNS int AS $$ +BEGIN + -- Below we set the table's owner to regress_sro_user + ASSERT current_user = 'regress_sro_user', + format('sro_ifun(%s) called by %s', $1, current_user); + RETURN $1; +END; +$$ LANGUAGE plpgsql IMMUTABLE; +-- Create a table owned by regress_sro_user +CREATE TABLE sro_tab (a int); +ALTER TABLE sro_tab OWNER TO regress_sro_user; +INSERT INTO sro_tab VALUES (1), (2), (3); +-- Create an expression index with a predicate +CREATE INDEX sro_idx ON sro_tab ((sro_ifun(a) + sro_ifun(0))) + WHERE sro_ifun(a + 10) > sro_ifun(10); +DROP INDEX sro_idx; +-- Do the same concurrently +CREATE INDEX CONCURRENTLY sro_idx ON sro_tab ((sro_ifun(a) + sro_ifun(0))) + WHERE sro_ifun(a + 10) > sro_ifun(10); +-- REINDEX +REINDEX TABLE sro_tab; +REINDEX INDEX sro_idx; +REINDEX TABLE CONCURRENTLY sro_tab; +DROP INDEX sro_idx; +-- CLUSTER +CREATE INDEX sro_cluster_idx ON sro_tab ((sro_ifun(a) + sro_ifun(0))); +CLUSTER sro_tab USING sro_cluster_idx; +DROP INDEX sro_cluster_idx; +-- BRIN index +CREATE INDEX sro_brin ON sro_tab USING brin ((sro_ifun(a) + sro_ifun(0))); +SELECT brin_desummarize_range('sro_brin', 0); + brin_desummarize_range +------------------------ + +(1 row) + +SELECT brin_summarize_range('sro_brin', 0); + brin_summarize_range +---------------------- + 1 +(1 row) + +DROP TABLE sro_tab; +-- Check with a partitioned table +CREATE TABLE sro_ptab (a int) PARTITION BY RANGE (a); +ALTER TABLE sro_ptab OWNER TO regress_sro_user; +CREATE TABLE sro_part PARTITION OF sro_ptab FOR VALUES FROM (1) TO (10); +ALTER TABLE sro_part OWNER TO regress_sro_user; +INSERT INTO sro_ptab VALUES (1), (2), (3); +CREATE INDEX sro_pidx ON sro_ptab ((sro_ifun(a) + sro_ifun(0))) + WHERE sro_ifun(a + 10) > sro_ifun(10); +REINDEX TABLE sro_ptab; +REINDEX INDEX CONCURRENTLY sro_pidx; +SET SESSION AUTHORIZATION regress_sro_user; +CREATE FUNCTION unwanted_grant() RETURNS void LANGUAGE sql AS + 'GRANT regress_priv_group2 TO regress_sro_user'; +CREATE FUNCTION mv_action() RETURNS bool LANGUAGE sql AS + 'DECLARE c CURSOR WITH HOLD FOR SELECT unwanted_grant(); SELECT true'; +-- REFRESH of this MV will queue a GRANT at end of transaction +CREATE MATERIALIZED VIEW sro_mv AS SELECT mv_action() WITH NO DATA; +REFRESH MATERIALIZED VIEW sro_mv; +ERROR: cannot create a cursor WITH HOLD within security-restricted operation +CONTEXT: SQL function "mv_action" statement 1 +\c - +REFRESH MATERIALIZED VIEW sro_mv; +ERROR: cannot create a cursor WITH HOLD within security-restricted operation +CONTEXT: SQL function "mv_action" statement 1 +SET SESSION AUTHORIZATION regress_sro_user; +-- INSERT to this table will queue a GRANT at end of transaction +CREATE TABLE sro_trojan_table (); +CREATE FUNCTION sro_trojan() RETURNS trigger LANGUAGE plpgsql AS + 'BEGIN PERFORM unwanted_grant(); RETURN NULL; END'; +CREATE CONSTRAINT TRIGGER t AFTER INSERT ON sro_trojan_table + INITIALLY DEFERRED FOR EACH ROW EXECUTE PROCEDURE sro_trojan(); +-- Now, REFRESH will issue such an INSERT, queueing the GRANT +CREATE OR REPLACE FUNCTION mv_action() RETURNS bool LANGUAGE sql AS + 'INSERT INTO sro_trojan_table DEFAULT VALUES; SELECT true'; +REFRESH MATERIALIZED VIEW sro_mv; +ERROR: cannot fire deferred trigger within security-restricted operation +CONTEXT: SQL function "mv_action" statement 1 +\c - +REFRESH MATERIALIZED VIEW sro_mv; +ERROR: cannot fire deferred trigger within security-restricted operation +CONTEXT: SQL function "mv_action" statement 1 +BEGIN; SET CONSTRAINTS ALL IMMEDIATE; REFRESH MATERIALIZED VIEW sro_mv; COMMIT; +ERROR: permission denied to grant role "regress_priv_group2" +DETAIL: Only roles with the ADMIN option on role "regress_priv_group2" may grant this role. +CONTEXT: SQL function "unwanted_grant" statement 1 +SQL statement "SELECT unwanted_grant()" +PL/pgSQL function sro_trojan() line 1 at PERFORM +SQL function "mv_action" statement 1 +-- REFRESH MATERIALIZED VIEW CONCURRENTLY use of eval_const_expressions() +SET SESSION AUTHORIZATION regress_sro_user; +CREATE FUNCTION unwanted_grant_nofail(int) RETURNS int + IMMUTABLE LANGUAGE plpgsql AS $$ +BEGIN + PERFORM unwanted_grant(); + RAISE WARNING 'owned'; + RETURN 1; +EXCEPTION WHEN OTHERS THEN + RETURN 2; +END$$; +CREATE MATERIALIZED VIEW sro_index_mv AS SELECT 1 AS c; +CREATE UNIQUE INDEX ON sro_index_mv (c) WHERE unwanted_grant_nofail(1) > 0; +\c - +REFRESH MATERIALIZED VIEW CONCURRENTLY sro_index_mv; +REFRESH MATERIALIZED VIEW sro_index_mv; +DROP OWNED BY regress_sro_user; +DROP ROLE regress_sro_user; +-- Admin options +SET SESSION AUTHORIZATION regress_priv_user4; +CREATE FUNCTION dogrant_ok() RETURNS void LANGUAGE sql SECURITY DEFINER AS + 'GRANT regress_priv_group2 TO regress_priv_user5'; +GRANT regress_priv_group2 TO regress_priv_user5; -- ok: had ADMIN OPTION +SET ROLE regress_priv_group2; +GRANT regress_priv_group2 TO regress_priv_user5; -- fails: SET ROLE suspended privilege +ERROR: permission denied to grant role "regress_priv_group2" +DETAIL: Only roles with the ADMIN option on role "regress_priv_group2" may grant this role. +SET SESSION AUTHORIZATION regress_priv_user1; +GRANT regress_priv_group2 TO regress_priv_user5; -- fails: no ADMIN OPTION +ERROR: permission denied to grant role "regress_priv_group2" +DETAIL: Only roles with the ADMIN option on role "regress_priv_group2" may grant this role. +SELECT dogrant_ok(); -- ok: SECURITY DEFINER conveys ADMIN +NOTICE: role "regress_priv_user5" has already been granted membership in role "regress_priv_group2" by role "regress_priv_user4" + dogrant_ok +------------ + +(1 row) + +SET ROLE regress_priv_group2; +GRANT regress_priv_group2 TO regress_priv_user5; -- fails: SET ROLE did not help +ERROR: permission denied to grant role "regress_priv_group2" +DETAIL: Only roles with the ADMIN option on role "regress_priv_group2" may grant this role. +SET SESSION AUTHORIZATION regress_priv_group2; +GRANT regress_priv_group2 TO regress_priv_user5; -- fails: no self-admin +ERROR: permission denied to grant role "regress_priv_group2" +DETAIL: Only roles with the ADMIN option on role "regress_priv_group2" may grant this role. +SET SESSION AUTHORIZATION regress_priv_user4; +DROP FUNCTION dogrant_ok(); +REVOKE regress_priv_group2 FROM regress_priv_user5; +-- has_sequence_privilege tests +\c - +CREATE SEQUENCE x_seq; +GRANT USAGE on x_seq to regress_priv_user2; +SELECT has_sequence_privilege('regress_priv_user1', 'atest1', 'SELECT'); +ERROR: "atest1" is not a sequence +SELECT has_sequence_privilege('regress_priv_user1', 'x_seq', 'INSERT'); +ERROR: unrecognized privilege type: "INSERT" +SELECT has_sequence_privilege('regress_priv_user1', 'x_seq', 'SELECT'); + has_sequence_privilege +------------------------ + f +(1 row) + +SET SESSION AUTHORIZATION regress_priv_user2; +SELECT has_sequence_privilege('x_seq', 'USAGE'); + has_sequence_privilege +------------------------ + t +(1 row) + +-- largeobject privilege tests +\c - +SET SESSION AUTHORIZATION regress_priv_user1; +SELECT lo_create(1001); + lo_create +----------- + 1001 +(1 row) + +SELECT lo_create(1002); + lo_create +----------- + 1002 +(1 row) + +SELECT lo_create(1003); + lo_create +----------- + 1003 +(1 row) + +SELECT lo_create(1004); + lo_create +----------- + 1004 +(1 row) + +SELECT lo_create(1005); + lo_create +----------- + 1005 +(1 row) + +GRANT ALL ON LARGE OBJECT 1001 TO PUBLIC; +GRANT SELECT ON LARGE OBJECT 1003 TO regress_priv_user2; +GRANT SELECT,UPDATE ON LARGE OBJECT 1004 TO regress_priv_user2; +GRANT ALL ON LARGE OBJECT 1005 TO regress_priv_user2; +GRANT SELECT ON LARGE OBJECT 1005 TO regress_priv_user2 WITH GRANT OPTION; +GRANT SELECT, INSERT ON LARGE OBJECT 1001 TO PUBLIC; -- to be failed +ERROR: invalid privilege type INSERT for large object +GRANT SELECT, UPDATE ON LARGE OBJECT 1001 TO nosuchuser; -- to be failed +ERROR: role "nosuchuser" does not exist +GRANT SELECT, UPDATE ON LARGE OBJECT 999 TO PUBLIC; -- to be failed +ERROR: large object 999 does not exist +\c - +SET SESSION AUTHORIZATION regress_priv_user2; +SELECT lo_create(2001); + lo_create +----------- + 2001 +(1 row) + +SELECT lo_create(2002); + lo_create +----------- + 2002 +(1 row) + +SELECT loread(lo_open(1001, x'20000'::int), 32); -- allowed, for now + loread +-------- + \x +(1 row) + +SELECT lowrite(lo_open(1001, x'40000'::int), 'abcd'); -- fail, wrong mode +ERROR: large object descriptor 0 was not opened for writing +SELECT loread(lo_open(1001, x'40000'::int), 32); + loread +-------- + \x +(1 row) + +SELECT loread(lo_open(1002, x'40000'::int), 32); -- to be denied +ERROR: permission denied for large object 1002 +SELECT loread(lo_open(1003, x'40000'::int), 32); + loread +-------- + \x +(1 row) + +SELECT loread(lo_open(1004, x'40000'::int), 32); + loread +-------- + \x +(1 row) + +SELECT lowrite(lo_open(1001, x'20000'::int), 'abcd'); + lowrite +--------- + 4 +(1 row) + +SELECT lowrite(lo_open(1002, x'20000'::int), 'abcd'); -- to be denied +ERROR: permission denied for large object 1002 +SELECT lowrite(lo_open(1003, x'20000'::int), 'abcd'); -- to be denied +ERROR: permission denied for large object 1003 +SELECT lowrite(lo_open(1004, x'20000'::int), 'abcd'); + lowrite +--------- + 4 +(1 row) + +GRANT SELECT ON LARGE OBJECT 1005 TO regress_priv_user3; +GRANT UPDATE ON LARGE OBJECT 1006 TO regress_priv_user3; -- to be denied +ERROR: large object 1006 does not exist +REVOKE ALL ON LARGE OBJECT 2001, 2002 FROM PUBLIC; +GRANT ALL ON LARGE OBJECT 2001 TO regress_priv_user3; +SELECT lo_unlink(1001); -- to be denied +ERROR: must be owner of large object 1001 +SELECT lo_unlink(2002); + lo_unlink +----------- + 1 +(1 row) + +\c - +-- confirm ACL setting +SELECT oid, pg_get_userbyid(lomowner) ownername, lomacl FROM pg_largeobject_metadata WHERE oid >= 1000 AND oid < 3000 ORDER BY oid; + oid | ownername | lomacl +------+--------------------+------------------------------------------------------------------------------------------------------------------------------ + 1001 | regress_priv_user1 | {regress_priv_user1=rw/regress_priv_user1,=rw/regress_priv_user1} + 1002 | regress_priv_user1 | + 1003 | regress_priv_user1 | {regress_priv_user1=rw/regress_priv_user1,regress_priv_user2=r/regress_priv_user1} + 1004 | regress_priv_user1 | {regress_priv_user1=rw/regress_priv_user1,regress_priv_user2=rw/regress_priv_user1} + 1005 | regress_priv_user1 | {regress_priv_user1=rw/regress_priv_user1,regress_priv_user2=r*w/regress_priv_user1,regress_priv_user3=r/regress_priv_user2} + 2001 | regress_priv_user2 | {regress_priv_user2=rw/regress_priv_user2,regress_priv_user3=rw/regress_priv_user2} +(6 rows) + +SET SESSION AUTHORIZATION regress_priv_user3; +SELECT loread(lo_open(1001, x'40000'::int), 32); + loread +------------ + \x61626364 +(1 row) + +SELECT loread(lo_open(1003, x'40000'::int), 32); -- to be denied +ERROR: permission denied for large object 1003 +SELECT loread(lo_open(1005, x'40000'::int), 32); + loread +-------- + \x +(1 row) + +SELECT lo_truncate(lo_open(1005, x'20000'::int), 10); -- to be denied +ERROR: permission denied for large object 1005 +SELECT lo_truncate(lo_open(2001, x'20000'::int), 10); + lo_truncate +------------- + 0 +(1 row) + +-- compatibility mode in largeobject permission +\c - +SET lo_compat_privileges = false; -- default setting +SET SESSION AUTHORIZATION regress_priv_user4; +SELECT loread(lo_open(1002, x'40000'::int), 32); -- to be denied +ERROR: permission denied for large object 1002 +SELECT lowrite(lo_open(1002, x'20000'::int), 'abcd'); -- to be denied +ERROR: permission denied for large object 1002 +SELECT lo_truncate(lo_open(1002, x'20000'::int), 10); -- to be denied +ERROR: permission denied for large object 1002 +SELECT lo_put(1002, 1, 'abcd'); -- to be denied +ERROR: permission denied for large object 1002 +SELECT lo_unlink(1002); -- to be denied +ERROR: must be owner of large object 1002 +SELECT lo_export(1001, '/dev/null'); -- to be denied +ERROR: permission denied for function lo_export +SELECT lo_import('/dev/null'); -- to be denied +ERROR: permission denied for function lo_import +SELECT lo_import('/dev/null', 2003); -- to be denied +ERROR: permission denied for function lo_import +\c - +SET lo_compat_privileges = true; -- compatibility mode +SET SESSION AUTHORIZATION regress_priv_user4; +SELECT loread(lo_open(1002, x'40000'::int), 32); + loread +-------- + \x +(1 row) + +SELECT lowrite(lo_open(1002, x'20000'::int), 'abcd'); + lowrite +--------- + 4 +(1 row) + +SELECT lo_truncate(lo_open(1002, x'20000'::int), 10); + lo_truncate +------------- + 0 +(1 row) + +SELECT lo_unlink(1002); + lo_unlink +----------- + 1 +(1 row) + +SELECT lo_export(1001, '/dev/null'); -- to be denied +ERROR: permission denied for function lo_export +-- don't allow unpriv users to access pg_largeobject contents +\c - +SELECT * FROM pg_largeobject LIMIT 0; + loid | pageno | data +------+--------+------ +(0 rows) + +SET SESSION AUTHORIZATION regress_priv_user1; +SELECT * FROM pg_largeobject LIMIT 0; -- to be denied +ERROR: permission denied for table pg_largeobject +-- pg_signal_backend can't signal superusers +RESET SESSION AUTHORIZATION; +BEGIN; +CREATE OR REPLACE FUNCTION terminate_nothrow(pid int) RETURNS bool + LANGUAGE plpgsql SECURITY DEFINER SET client_min_messages = error AS $$ +BEGIN + RETURN pg_terminate_backend($1); +EXCEPTION WHEN OTHERS THEN + RETURN false; +END$$; +ALTER FUNCTION terminate_nothrow OWNER TO pg_signal_backend; +SELECT backend_type FROM pg_stat_activity +WHERE CASE WHEN COALESCE(usesysid, 10) = 10 THEN terminate_nothrow(pid) END; + backend_type +-------------- +(0 rows) + +ROLLBACK; +-- test pg_database_owner +RESET SESSION AUTHORIZATION; +GRANT pg_database_owner TO regress_priv_user1; +ERROR: role "pg_database_owner" cannot have explicit members +GRANT regress_priv_user1 TO pg_database_owner; +ERROR: role "pg_database_owner" cannot be a member of any role +CREATE TABLE datdba_only (); +ALTER TABLE datdba_only OWNER TO pg_database_owner; +REVOKE DELETE ON datdba_only FROM pg_database_owner; +SELECT + pg_has_role('regress_priv_user1', 'pg_database_owner', 'USAGE') as priv, + pg_has_role('regress_priv_user1', 'pg_database_owner', 'MEMBER') as mem, + pg_has_role('regress_priv_user1', 'pg_database_owner', + 'MEMBER WITH ADMIN OPTION') as admin; + priv | mem | admin +------+-----+------- + f | f | f +(1 row) + +BEGIN; +DO $$BEGIN EXECUTE format( + 'ALTER DATABASE %I OWNER TO regress_priv_group2', current_catalog); END$$; +SELECT + pg_has_role('regress_priv_user1', 'pg_database_owner', 'USAGE') as priv, + pg_has_role('regress_priv_user1', 'pg_database_owner', 'MEMBER') as mem, + pg_has_role('regress_priv_user1', 'pg_database_owner', + 'MEMBER WITH ADMIN OPTION') as admin; + priv | mem | admin +------+-----+------- + t | t | f +(1 row) + +SET SESSION AUTHORIZATION regress_priv_user1; +TABLE information_schema.enabled_roles ORDER BY role_name COLLATE "C"; + role_name +--------------------- + pg_database_owner + regress_priv_group2 + regress_priv_user1 +(3 rows) + +TABLE information_schema.applicable_roles ORDER BY role_name COLLATE "C"; + grantee | role_name | is_grantable +---------------------+---------------------+-------------- + regress_priv_group2 | pg_database_owner | NO + regress_priv_user1 | regress_priv_group2 | NO +(2 rows) + +INSERT INTO datdba_only DEFAULT VALUES; +SAVEPOINT q; DELETE FROM datdba_only; ROLLBACK TO q; +ERROR: permission denied for table datdba_only +SET SESSION AUTHORIZATION regress_priv_user2; +TABLE information_schema.enabled_roles; + role_name +-------------------- + regress_priv_user2 +(1 row) + +INSERT INTO datdba_only DEFAULT VALUES; +ERROR: permission denied for table datdba_only +ROLLBACK; +-- test default ACLs +\c - +CREATE SCHEMA testns; +GRANT ALL ON SCHEMA testns TO regress_priv_user1; +CREATE TABLE testns.acltest1 (x int); +SELECT has_table_privilege('regress_priv_user1', 'testns.acltest1', 'SELECT'); -- no + has_table_privilege +--------------------- + f +(1 row) + +SELECT has_table_privilege('regress_priv_user1', 'testns.acltest1', 'INSERT'); -- no + has_table_privilege +--------------------- + f +(1 row) + +-- placeholder for test with duplicated schema and role names +ALTER DEFAULT PRIVILEGES IN SCHEMA testns,testns GRANT SELECT ON TABLES TO public,public; +SELECT has_table_privilege('regress_priv_user1', 'testns.acltest1', 'SELECT'); -- no + has_table_privilege +--------------------- + f +(1 row) + +SELECT has_table_privilege('regress_priv_user1', 'testns.acltest1', 'INSERT'); -- no + has_table_privilege +--------------------- + f +(1 row) + +DROP TABLE testns.acltest1; +CREATE TABLE testns.acltest1 (x int); +SELECT has_table_privilege('regress_priv_user1', 'testns.acltest1', 'SELECT'); -- yes + has_table_privilege +--------------------- + t +(1 row) + +SELECT has_table_privilege('regress_priv_user1', 'testns.acltest1', 'INSERT'); -- no + has_table_privilege +--------------------- + f +(1 row) + +ALTER DEFAULT PRIVILEGES IN SCHEMA testns GRANT INSERT ON TABLES TO regress_priv_user1; +DROP TABLE testns.acltest1; +CREATE TABLE testns.acltest1 (x int); +SELECT has_table_privilege('regress_priv_user1', 'testns.acltest1', 'SELECT'); -- yes + has_table_privilege +--------------------- + t +(1 row) + +SELECT has_table_privilege('regress_priv_user1', 'testns.acltest1', 'INSERT'); -- yes + has_table_privilege +--------------------- + t +(1 row) + +ALTER DEFAULT PRIVILEGES IN SCHEMA testns REVOKE INSERT ON TABLES FROM regress_priv_user1; +DROP TABLE testns.acltest1; +CREATE TABLE testns.acltest1 (x int); +SELECT has_table_privilege('regress_priv_user1', 'testns.acltest1', 'SELECT'); -- yes + has_table_privilege +--------------------- + t +(1 row) + +SELECT has_table_privilege('regress_priv_user1', 'testns.acltest1', 'INSERT'); -- no + has_table_privilege +--------------------- + f +(1 row) + +ALTER DEFAULT PRIVILEGES FOR ROLE regress_priv_user1 REVOKE EXECUTE ON FUNCTIONS FROM public; +ALTER DEFAULT PRIVILEGES IN SCHEMA testns GRANT USAGE ON SCHEMAS TO regress_priv_user2; -- error +ERROR: cannot use IN SCHEMA clause when using GRANT/REVOKE ON SCHEMAS +-- Test makeaclitem() +SELECT makeaclitem('regress_priv_user1'::regrole, 'regress_priv_user2'::regrole, + 'SELECT', TRUE); -- single privilege + makeaclitem +------------------------------------------ + regress_priv_user1=r*/regress_priv_user2 +(1 row) + +SELECT makeaclitem('regress_priv_user1'::regrole, 'regress_priv_user2'::regrole, + 'SELECT, INSERT, UPDATE , DELETE ', FALSE); -- multiple privileges + makeaclitem +-------------------------------------------- + regress_priv_user1=arwd/regress_priv_user2 +(1 row) + +SELECT makeaclitem('regress_priv_user1'::regrole, 'regress_priv_user2'::regrole, + 'SELECT, fake_privilege', FALSE); -- error +ERROR: unrecognized privilege type: "fake_privilege" +-- Test non-throwing aclitem I/O +SELECT pg_input_is_valid('regress_priv_user1=r/regress_priv_user2', 'aclitem'); + pg_input_is_valid +------------------- + t +(1 row) + +SELECT pg_input_is_valid('regress_priv_user1=r/', 'aclitem'); + pg_input_is_valid +------------------- + f +(1 row) + +SELECT * FROM pg_input_error_info('regress_priv_user1=r/', 'aclitem'); + message | detail | hint | sql_error_code +---------------------------------+--------+------+---------------- + a name must follow the "/" sign | | | 22P02 +(1 row) + +SELECT pg_input_is_valid('regress_priv_user1=r/regress_no_such_user', 'aclitem'); + pg_input_is_valid +------------------- + f +(1 row) + +SELECT * FROM pg_input_error_info('regress_priv_user1=r/regress_no_such_user', 'aclitem'); + message | detail | hint | sql_error_code +--------------------------------------------+--------+------+---------------- + role "regress_no_such_user" does not exist | | | 42704 +(1 row) + +SELECT pg_input_is_valid('regress_priv_user1=rY', 'aclitem'); + pg_input_is_valid +------------------- + f +(1 row) + +SELECT * FROM pg_input_error_info('regress_priv_user1=rY', 'aclitem'); + message | detail | hint | sql_error_code +---------------------------------------------------------+--------+------+---------------- + invalid mode character: must be one of "arwdDxtXUCTcsA" | | | 22P02 +(1 row) + +-- +-- Testing blanket default grants is very hazardous since it might change +-- the privileges attached to objects created by concurrent regression tests. +-- To avoid that, be sure to revoke the privileges again before committing. +-- +BEGIN; +ALTER DEFAULT PRIVILEGES GRANT USAGE ON SCHEMAS TO regress_priv_user2; +CREATE SCHEMA testns2; +SELECT has_schema_privilege('regress_priv_user2', 'testns2', 'USAGE'); -- yes + has_schema_privilege +---------------------- + t +(1 row) + +SELECT has_schema_privilege('regress_priv_user6', 'testns2', 'USAGE'); -- yes + has_schema_privilege +---------------------- + t +(1 row) + +SELECT has_schema_privilege('regress_priv_user2', 'testns2', 'CREATE'); -- no + has_schema_privilege +---------------------- + f +(1 row) + +ALTER DEFAULT PRIVILEGES REVOKE USAGE ON SCHEMAS FROM regress_priv_user2; +CREATE SCHEMA testns3; +SELECT has_schema_privilege('regress_priv_user2', 'testns3', 'USAGE'); -- no + has_schema_privilege +---------------------- + f +(1 row) + +SELECT has_schema_privilege('regress_priv_user2', 'testns3', 'CREATE'); -- no + has_schema_privilege +---------------------- + f +(1 row) + +ALTER DEFAULT PRIVILEGES GRANT ALL ON SCHEMAS TO regress_priv_user2; +CREATE SCHEMA testns4; +SELECT has_schema_privilege('regress_priv_user2', 'testns4', 'USAGE'); -- yes + has_schema_privilege +---------------------- + t +(1 row) + +SELECT has_schema_privilege('regress_priv_user2', 'testns4', 'CREATE'); -- yes + has_schema_privilege +---------------------- + t +(1 row) + +ALTER DEFAULT PRIVILEGES REVOKE ALL ON SCHEMAS FROM regress_priv_user2; +COMMIT; +-- Test for DROP OWNED BY with shared dependencies. This is done in a +-- separate, rollbacked, transaction to avoid any trouble with other +-- regression sessions. +BEGIN; +ALTER DEFAULT PRIVILEGES GRANT ALL ON FUNCTIONS TO regress_priv_user2; +ALTER DEFAULT PRIVILEGES GRANT ALL ON SCHEMAS TO regress_priv_user2; +ALTER DEFAULT PRIVILEGES GRANT ALL ON SEQUENCES TO regress_priv_user2; +ALTER DEFAULT PRIVILEGES GRANT ALL ON TABLES TO regress_priv_user2; +ALTER DEFAULT PRIVILEGES GRANT ALL ON TYPES TO regress_priv_user2; +SELECT count(*) FROM pg_shdepend + WHERE deptype = 'a' AND + refobjid = 'regress_priv_user2'::regrole AND + classid = 'pg_default_acl'::regclass; + count +------- + 5 +(1 row) + +DROP OWNED BY regress_priv_user2, regress_priv_user2; +SELECT count(*) FROM pg_shdepend + WHERE deptype = 'a' AND + refobjid = 'regress_priv_user2'::regrole AND + classid = 'pg_default_acl'::regclass; + count +------- + 0 +(1 row) + +ROLLBACK; +CREATE SCHEMA testns5; +SELECT has_schema_privilege('regress_priv_user2', 'testns5', 'USAGE'); -- no + has_schema_privilege +---------------------- + f +(1 row) + +SELECT has_schema_privilege('regress_priv_user2', 'testns5', 'CREATE'); -- no + has_schema_privilege +---------------------- + f +(1 row) + +SET ROLE regress_priv_user1; +CREATE FUNCTION testns.foo() RETURNS int AS 'select 1' LANGUAGE sql; +CREATE AGGREGATE testns.agg1(int) (sfunc = int4pl, stype = int4); +CREATE PROCEDURE testns.bar() AS 'select 1' LANGUAGE sql; +SELECT has_function_privilege('regress_priv_user2', 'testns.foo()', 'EXECUTE'); -- no + has_function_privilege +------------------------ + f +(1 row) + +SELECT has_function_privilege('regress_priv_user2', 'testns.agg1(int)', 'EXECUTE'); -- no + has_function_privilege +------------------------ + f +(1 row) + +SELECT has_function_privilege('regress_priv_user2', 'testns.bar()', 'EXECUTE'); -- no + has_function_privilege +------------------------ + f +(1 row) + +ALTER DEFAULT PRIVILEGES IN SCHEMA testns GRANT EXECUTE ON ROUTINES to public; +DROP FUNCTION testns.foo(); +CREATE FUNCTION testns.foo() RETURNS int AS 'select 1' LANGUAGE sql; +DROP AGGREGATE testns.agg1(int); +CREATE AGGREGATE testns.agg1(int) (sfunc = int4pl, stype = int4); +DROP PROCEDURE testns.bar(); +CREATE PROCEDURE testns.bar() AS 'select 1' LANGUAGE sql; +SELECT has_function_privilege('regress_priv_user2', 'testns.foo()', 'EXECUTE'); -- yes + has_function_privilege +------------------------ + t +(1 row) + +SELECT has_function_privilege('regress_priv_user2', 'testns.agg1(int)', 'EXECUTE'); -- yes + has_function_privilege +------------------------ + t +(1 row) + +SELECT has_function_privilege('regress_priv_user2', 'testns.bar()', 'EXECUTE'); -- yes (counts as function here) + has_function_privilege +------------------------ + t +(1 row) + +DROP FUNCTION testns.foo(); +DROP AGGREGATE testns.agg1(int); +DROP PROCEDURE testns.bar(); +ALTER DEFAULT PRIVILEGES FOR ROLE regress_priv_user1 REVOKE USAGE ON TYPES FROM public; +CREATE DOMAIN testns.priv_testdomain1 AS int; +SELECT has_type_privilege('regress_priv_user2', 'testns.priv_testdomain1', 'USAGE'); -- no + has_type_privilege +-------------------- + f +(1 row) + +ALTER DEFAULT PRIVILEGES IN SCHEMA testns GRANT USAGE ON TYPES to public; +DROP DOMAIN testns.priv_testdomain1; +CREATE DOMAIN testns.priv_testdomain1 AS int; +SELECT has_type_privilege('regress_priv_user2', 'testns.priv_testdomain1', 'USAGE'); -- yes + has_type_privilege +-------------------- + t +(1 row) + +DROP DOMAIN testns.priv_testdomain1; +RESET ROLE; +SELECT count(*) + FROM pg_default_acl d LEFT JOIN pg_namespace n ON defaclnamespace = n.oid + WHERE nspname = 'testns'; + count +------- + 3 +(1 row) + +DROP SCHEMA testns CASCADE; +NOTICE: drop cascades to table testns.acltest1 +DROP SCHEMA testns2 CASCADE; +DROP SCHEMA testns3 CASCADE; +DROP SCHEMA testns4 CASCADE; +DROP SCHEMA testns5 CASCADE; +SELECT d.* -- check that entries went away + FROM pg_default_acl d LEFT JOIN pg_namespace n ON defaclnamespace = n.oid + WHERE nspname IS NULL AND defaclnamespace != 0; + oid | defaclrole | defaclnamespace | defaclobjtype | defaclacl +-----+------------+-----------------+---------------+----------- +(0 rows) + +-- Grant on all objects of given type in a schema +\c - +CREATE SCHEMA testns; +CREATE TABLE testns.t1 (f1 int); +CREATE TABLE testns.t2 (f1 int); +SELECT has_table_privilege('regress_priv_user1', 'testns.t1', 'SELECT'); -- false + has_table_privilege +--------------------- + f +(1 row) + +GRANT ALL ON ALL TABLES IN SCHEMA testns TO regress_priv_user1; +SELECT has_table_privilege('regress_priv_user1', 'testns.t1', 'SELECT'); -- true + has_table_privilege +--------------------- + t +(1 row) + +SELECT has_table_privilege('regress_priv_user1', 'testns.t2', 'SELECT'); -- true + has_table_privilege +--------------------- + t +(1 row) + +REVOKE ALL ON ALL TABLES IN SCHEMA testns FROM regress_priv_user1; +SELECT has_table_privilege('regress_priv_user1', 'testns.t1', 'SELECT'); -- false + has_table_privilege +--------------------- + f +(1 row) + +SELECT has_table_privilege('regress_priv_user1', 'testns.t2', 'SELECT'); -- false + has_table_privilege +--------------------- + f +(1 row) + +CREATE FUNCTION testns.priv_testfunc(int) RETURNS int AS 'select 3 * $1;' LANGUAGE sql; +CREATE AGGREGATE testns.priv_testagg(int) (sfunc = int4pl, stype = int4); +CREATE PROCEDURE testns.priv_testproc(int) AS 'select 3' LANGUAGE sql; +SELECT has_function_privilege('regress_priv_user1', 'testns.priv_testfunc(int)', 'EXECUTE'); -- true by default + has_function_privilege +------------------------ + t +(1 row) + +SELECT has_function_privilege('regress_priv_user1', 'testns.priv_testagg(int)', 'EXECUTE'); -- true by default + has_function_privilege +------------------------ + t +(1 row) + +SELECT has_function_privilege('regress_priv_user1', 'testns.priv_testproc(int)', 'EXECUTE'); -- true by default + has_function_privilege +------------------------ + t +(1 row) + +REVOKE ALL ON ALL FUNCTIONS IN SCHEMA testns FROM PUBLIC; +SELECT has_function_privilege('regress_priv_user1', 'testns.priv_testfunc(int)', 'EXECUTE'); -- false + has_function_privilege +------------------------ + f +(1 row) + +SELECT has_function_privilege('regress_priv_user1', 'testns.priv_testagg(int)', 'EXECUTE'); -- false + has_function_privilege +------------------------ + f +(1 row) + +SELECT has_function_privilege('regress_priv_user1', 'testns.priv_testproc(int)', 'EXECUTE'); -- still true, not a function + has_function_privilege +------------------------ + t +(1 row) + +REVOKE ALL ON ALL PROCEDURES IN SCHEMA testns FROM PUBLIC; +SELECT has_function_privilege('regress_priv_user1', 'testns.priv_testproc(int)', 'EXECUTE'); -- now false + has_function_privilege +------------------------ + f +(1 row) + +GRANT ALL ON ALL ROUTINES IN SCHEMA testns TO PUBLIC; +SELECT has_function_privilege('regress_priv_user1', 'testns.priv_testfunc(int)', 'EXECUTE'); -- true + has_function_privilege +------------------------ + t +(1 row) + +SELECT has_function_privilege('regress_priv_user1', 'testns.priv_testagg(int)', 'EXECUTE'); -- true + has_function_privilege +------------------------ + t +(1 row) + +SELECT has_function_privilege('regress_priv_user1', 'testns.priv_testproc(int)', 'EXECUTE'); -- true + has_function_privilege +------------------------ + t +(1 row) + +DROP SCHEMA testns CASCADE; +NOTICE: drop cascades to 5 other objects +DETAIL: drop cascades to table testns.t1 +drop cascades to table testns.t2 +drop cascades to function testns.priv_testfunc(integer) +drop cascades to function testns.priv_testagg(integer) +drop cascades to function testns.priv_testproc(integer) +-- Change owner of the schema & and rename of new schema owner +\c - +CREATE ROLE regress_schemauser1 superuser login; +CREATE ROLE regress_schemauser2 superuser login; +SET SESSION ROLE regress_schemauser1; +CREATE SCHEMA testns; +SELECT nspname, rolname FROM pg_namespace, pg_roles WHERE pg_namespace.nspname = 'testns' AND pg_namespace.nspowner = pg_roles.oid; + nspname | rolname +---------+--------------------- + testns | regress_schemauser1 +(1 row) + +ALTER SCHEMA testns OWNER TO regress_schemauser2; +ALTER ROLE regress_schemauser2 RENAME TO regress_schemauser_renamed; +SELECT nspname, rolname FROM pg_namespace, pg_roles WHERE pg_namespace.nspname = 'testns' AND pg_namespace.nspowner = pg_roles.oid; + nspname | rolname +---------+---------------------------- + testns | regress_schemauser_renamed +(1 row) + +set session role regress_schemauser_renamed; +DROP SCHEMA testns CASCADE; +-- clean up +\c - +DROP ROLE regress_schemauser1; +DROP ROLE regress_schemauser_renamed; +-- test that dependent privileges are revoked (or not) properly +\c - +set session role regress_priv_user1; +create table dep_priv_test (a int); +grant select on dep_priv_test to regress_priv_user2 with grant option; +grant select on dep_priv_test to regress_priv_user3 with grant option; +set session role regress_priv_user2; +grant select on dep_priv_test to regress_priv_user4 with grant option; +set session role regress_priv_user3; +grant select on dep_priv_test to regress_priv_user4 with grant option; +set session role regress_priv_user4; +grant select on dep_priv_test to regress_priv_user5; +\dp dep_priv_test + Access privileges + Schema | Name | Type | Access privileges | Column privileges | Policies +--------+---------------+-------+-----------------------------------------------+-------------------+---------- + public | dep_priv_test | table | regress_priv_user1=arwdDxt/regress_priv_user1+| | + | | | regress_priv_user2=r*/regress_priv_user1 +| | + | | | regress_priv_user3=r*/regress_priv_user1 +| | + | | | regress_priv_user4=r*/regress_priv_user2 +| | + | | | regress_priv_user4=r*/regress_priv_user3 +| | + | | | regress_priv_user5=r/regress_priv_user4 | | +(1 row) + +set session role regress_priv_user2; +revoke select on dep_priv_test from regress_priv_user4 cascade; +\dp dep_priv_test + Access privileges + Schema | Name | Type | Access privileges | Column privileges | Policies +--------+---------------+-------+-----------------------------------------------+-------------------+---------- + public | dep_priv_test | table | regress_priv_user1=arwdDxt/regress_priv_user1+| | + | | | regress_priv_user2=r*/regress_priv_user1 +| | + | | | regress_priv_user3=r*/regress_priv_user1 +| | + | | | regress_priv_user4=r*/regress_priv_user3 +| | + | | | regress_priv_user5=r/regress_priv_user4 | | +(1 row) + +set session role regress_priv_user3; +revoke select on dep_priv_test from regress_priv_user4 cascade; +\dp dep_priv_test + Access privileges + Schema | Name | Type | Access privileges | Column privileges | Policies +--------+---------------+-------+-----------------------------------------------+-------------------+---------- + public | dep_priv_test | table | regress_priv_user1=arwdDxt/regress_priv_user1+| | + | | | regress_priv_user2=r*/regress_priv_user1 +| | + | | | regress_priv_user3=r*/regress_priv_user1 | | +(1 row) + +set session role regress_priv_user1; +drop table dep_priv_test; +-- clean up +\c +drop sequence x_seq; +DROP AGGREGATE priv_testagg1(int); +DROP FUNCTION priv_testfunc2(int); +DROP FUNCTION priv_testfunc4(boolean); +DROP PROCEDURE priv_testproc1(int); +DROP VIEW atestv0; +DROP VIEW atestv1; +DROP VIEW atestv2; +-- this should cascade to drop atestv4 +DROP VIEW atestv3 CASCADE; +NOTICE: drop cascades to view atestv4 +-- this should complain "does not exist" +DROP VIEW atestv4; +ERROR: view "atestv4" does not exist +DROP TABLE atest1; +DROP TABLE atest2; +DROP TABLE atest3; +DROP TABLE atest4; +DROP TABLE atest5; +DROP TABLE atest6; +DROP TABLE atestc; +DROP TABLE atestp1; +DROP TABLE atestp2; +SELECT lo_unlink(oid) FROM pg_largeobject_metadata WHERE oid >= 1000 AND oid < 3000 ORDER BY oid; + lo_unlink +----------- + 1 + 1 + 1 + 1 + 1 +(5 rows) + +DROP GROUP regress_priv_group1; +DROP GROUP regress_priv_group2; +-- these are needed to clean up permissions +REVOKE USAGE ON LANGUAGE sql FROM regress_priv_user1; +DROP OWNED BY regress_priv_user1; +DROP USER regress_priv_user1; +DROP USER regress_priv_user2; +DROP USER regress_priv_user3; +DROP USER regress_priv_user4; +DROP USER regress_priv_user5; +DROP USER regress_priv_user6; +DROP USER regress_priv_user7; +DROP USER regress_priv_user8; -- does not exist +ERROR: role "regress_priv_user8" does not exist +-- permissions with LOCK TABLE +CREATE USER regress_locktable_user; +CREATE TABLE lock_table (a int); +-- LOCK TABLE and SELECT permission +GRANT SELECT ON lock_table TO regress_locktable_user; +SET SESSION AUTHORIZATION regress_locktable_user; +BEGIN; +LOCK TABLE lock_table IN ACCESS SHARE MODE; -- should pass +COMMIT; +BEGIN; +LOCK TABLE lock_table IN ROW EXCLUSIVE MODE; -- should fail +ERROR: permission denied for table lock_table +ROLLBACK; +BEGIN; +LOCK TABLE lock_table IN ACCESS EXCLUSIVE MODE; -- should fail +ERROR: permission denied for table lock_table +ROLLBACK; +\c +REVOKE SELECT ON lock_table FROM regress_locktable_user; +-- LOCK TABLE and INSERT permission +GRANT INSERT ON lock_table TO regress_locktable_user; +SET SESSION AUTHORIZATION regress_locktable_user; +BEGIN; +LOCK TABLE lock_table IN ACCESS SHARE MODE; -- should pass +ROLLBACK; +BEGIN; +LOCK TABLE lock_table IN ROW EXCLUSIVE MODE; -- should pass +COMMIT; +BEGIN; +LOCK TABLE lock_table IN ACCESS EXCLUSIVE MODE; -- should fail +ERROR: permission denied for table lock_table +ROLLBACK; +\c +REVOKE INSERT ON lock_table FROM regress_locktable_user; +-- LOCK TABLE and UPDATE permission +GRANT UPDATE ON lock_table TO regress_locktable_user; +SET SESSION AUTHORIZATION regress_locktable_user; +BEGIN; +LOCK TABLE lock_table IN ACCESS SHARE MODE; -- should pass +ROLLBACK; +BEGIN; +LOCK TABLE lock_table IN ROW EXCLUSIVE MODE; -- should pass +COMMIT; +BEGIN; +LOCK TABLE lock_table IN ACCESS EXCLUSIVE MODE; -- should pass +COMMIT; +\c +REVOKE UPDATE ON lock_table FROM regress_locktable_user; +-- LOCK TABLE and DELETE permission +GRANT DELETE ON lock_table TO regress_locktable_user; +SET SESSION AUTHORIZATION regress_locktable_user; +BEGIN; +LOCK TABLE lock_table IN ACCESS SHARE MODE; -- should pass +ROLLBACK; +BEGIN; +LOCK TABLE lock_table IN ROW EXCLUSIVE MODE; -- should pass +COMMIT; +BEGIN; +LOCK TABLE lock_table IN ACCESS EXCLUSIVE MODE; -- should pass +COMMIT; +\c +REVOKE DELETE ON lock_table FROM regress_locktable_user; +-- LOCK TABLE and TRUNCATE permission +GRANT TRUNCATE ON lock_table TO regress_locktable_user; +SET SESSION AUTHORIZATION regress_locktable_user; +BEGIN; +LOCK TABLE lock_table IN ACCESS SHARE MODE; -- should pass +ROLLBACK; +BEGIN; +LOCK TABLE lock_table IN ROW EXCLUSIVE MODE; -- should pass +COMMIT; +BEGIN; +LOCK TABLE lock_table IN ACCESS EXCLUSIVE MODE; -- should pass +COMMIT; +\c +REVOKE TRUNCATE ON lock_table FROM regress_locktable_user; +-- clean up +DROP TABLE lock_table; +DROP USER regress_locktable_user; +-- test to check privileges of system views pg_shmem_allocations and +-- pg_backend_memory_contexts. +-- switch to superuser +\c - +CREATE ROLE regress_readallstats; +SELECT has_table_privilege('regress_readallstats','pg_backend_memory_contexts','SELECT'); -- no + has_table_privilege +--------------------- + f +(1 row) + +SELECT has_table_privilege('regress_readallstats','pg_shmem_allocations','SELECT'); -- no + has_table_privilege +--------------------- + f +(1 row) + +GRANT pg_read_all_stats TO regress_readallstats; +SELECT has_table_privilege('regress_readallstats','pg_backend_memory_contexts','SELECT'); -- yes + has_table_privilege +--------------------- + t +(1 row) + +SELECT has_table_privilege('regress_readallstats','pg_shmem_allocations','SELECT'); -- yes + has_table_privilege +--------------------- + t +(1 row) + +-- run query to ensure that functions within views can be executed +SET ROLE regress_readallstats; +SELECT COUNT(*) >= 0 AS ok FROM pg_backend_memory_contexts; + ok +---- + t +(1 row) + +SELECT COUNT(*) >= 0 AS ok FROM pg_shmem_allocations; + ok +---- + t +(1 row) + +RESET ROLE; +-- clean up +DROP ROLE regress_readallstats; +-- test role grantor machinery +CREATE ROLE regress_group; +CREATE ROLE regress_group_direct_manager; +CREATE ROLE regress_group_indirect_manager; +CREATE ROLE regress_group_member; +GRANT regress_group TO regress_group_direct_manager WITH INHERIT FALSE, ADMIN TRUE; +GRANT regress_group_direct_manager TO regress_group_indirect_manager; +SET SESSION AUTHORIZATION regress_group_direct_manager; +GRANT regress_group TO regress_group_member; +SELECT member::regrole::text, CASE WHEN grantor = 10 THEN 'BOOTSTRAP SUPERUSER' ELSE grantor::regrole::text END FROM pg_auth_members WHERE roleid = 'regress_group'::regrole ORDER BY 1, 2; + member | grantor +------------------------------+------------------------------ + regress_group_direct_manager | BOOTSTRAP SUPERUSER + regress_group_member | regress_group_direct_manager +(2 rows) + +REVOKE regress_group FROM regress_group_member; +SET SESSION AUTHORIZATION regress_group_indirect_manager; +GRANT regress_group TO regress_group_member; +SELECT member::regrole::text, CASE WHEN grantor = 10 THEN 'BOOTSTRAP SUPERUSER' ELSE grantor::regrole::text END FROM pg_auth_members WHERE roleid = 'regress_group'::regrole ORDER BY 1, 2; + member | grantor +------------------------------+------------------------------ + regress_group_direct_manager | BOOTSTRAP SUPERUSER + regress_group_member | regress_group_direct_manager +(2 rows) + +REVOKE regress_group FROM regress_group_member; +RESET SESSION AUTHORIZATION; +DROP ROLE regress_group; +DROP ROLE regress_group_direct_manager; +DROP ROLE regress_group_indirect_manager; +DROP ROLE regress_group_member; +-- test SET and INHERIT options with object ownership changes +CREATE ROLE regress_roleoption_protagonist; +CREATE ROLE regress_roleoption_donor; +CREATE ROLE regress_roleoption_recipient; +CREATE SCHEMA regress_roleoption; +GRANT CREATE, USAGE ON SCHEMA regress_roleoption TO PUBLIC; +GRANT regress_roleoption_donor TO regress_roleoption_protagonist WITH INHERIT TRUE, SET FALSE; +GRANT regress_roleoption_recipient TO regress_roleoption_protagonist WITH INHERIT FALSE, SET TRUE; +SET SESSION AUTHORIZATION regress_roleoption_protagonist; +CREATE TABLE regress_roleoption.t1 (a int); +CREATE TABLE regress_roleoption.t2 (a int); +SET SESSION AUTHORIZATION regress_roleoption_donor; +CREATE TABLE regress_roleoption.t3 (a int); +SET SESSION AUTHORIZATION regress_roleoption_recipient; +CREATE TABLE regress_roleoption.t4 (a int); +SET SESSION AUTHORIZATION regress_roleoption_protagonist; +ALTER TABLE regress_roleoption.t1 OWNER TO regress_roleoption_donor; -- fails, can't be come donor +ERROR: must be able to SET ROLE "regress_roleoption_donor" +ALTER TABLE regress_roleoption.t2 OWNER TO regress_roleoption_recipient; -- works +ALTER TABLE regress_roleoption.t3 OWNER TO regress_roleoption_protagonist; -- works +ALTER TABLE regress_roleoption.t4 OWNER TO regress_roleoption_protagonist; -- fails, we don't inherit from recipient +ERROR: must be owner of table t4 +RESET SESSION AUTHORIZATION; +DROP TABLE regress_roleoption.t1; +DROP TABLE regress_roleoption.t2; +DROP TABLE regress_roleoption.t3; +DROP TABLE regress_roleoption.t4; +DROP SCHEMA regress_roleoption; +DROP ROLE regress_roleoption_protagonist; +DROP ROLE regress_roleoption_donor; +DROP ROLE regress_roleoption_recipient; diff --git a/src/test/regress/expected/psql.out b/src/test/regress/expected/psql.out new file mode 100644 index 0000000..7cd0c27 --- /dev/null +++ b/src/test/regress/expected/psql.out @@ -0,0 +1,6660 @@ +-- +-- Tests for psql features that aren't closely connected to any +-- specific server features +-- +-- \set +-- fail: invalid name +\set invalid/name foo +invalid variable name: "invalid/name" +-- fail: invalid value for special variable +\set AUTOCOMMIT foo +unrecognized value "foo" for "AUTOCOMMIT": Boolean expected +\set FETCH_COUNT foo +invalid value "foo" for "FETCH_COUNT": integer expected +-- check handling of built-in boolean variable +\echo :ON_ERROR_ROLLBACK +off +\set ON_ERROR_ROLLBACK +\echo :ON_ERROR_ROLLBACK +on +\set ON_ERROR_ROLLBACK foo +unrecognized value "foo" for "ON_ERROR_ROLLBACK" +Available values are: on, off, interactive. +\echo :ON_ERROR_ROLLBACK +on +\set ON_ERROR_ROLLBACK on +\echo :ON_ERROR_ROLLBACK +on +\unset ON_ERROR_ROLLBACK +\echo :ON_ERROR_ROLLBACK +off +-- \g and \gx +SELECT 1 as one, 2 as two \g + one | two +-----+----- + 1 | 2 +(1 row) + +\gx +-[ RECORD 1 ] +one | 1 +two | 2 + +SELECT 3 as three, 4 as four \gx +-[ RECORD 1 ] +three | 3 +four | 4 + +\g + three | four +-------+------ + 3 | 4 +(1 row) + +-- \gx should work in FETCH_COUNT mode too +\set FETCH_COUNT 1 +SELECT 1 as one, 2 as two \g + one | two +-----+----- + 1 | 2 +(1 row) + +\gx +-[ RECORD 1 ] +one | 1 +two | 2 + +SELECT 3 as three, 4 as four \gx +-[ RECORD 1 ] +three | 3 +four | 4 + +\g + three | four +-------+------ + 3 | 4 +(1 row) + +\unset FETCH_COUNT +-- \g/\gx with pset options +SELECT 1 as one, 2 as two \g (format=csv csv_fieldsep='\t') +one two +1 2 +\g + one | two +-----+----- + 1 | 2 +(1 row) + +SELECT 1 as one, 2 as two \gx (title='foo bar') +foo bar +-[ RECORD 1 ] +one | 1 +two | 2 + +\g + one | two +-----+----- + 1 | 2 +(1 row) + +-- \bind (extended query protocol) +SELECT 1 \bind \g + ?column? +---------- + 1 +(1 row) + +SELECT $1 \bind 'foo' \g + ?column? +---------- + foo +(1 row) + +SELECT $1, $2 \bind 'foo' 'bar' \g + ?column? | ?column? +----------+---------- + foo | bar +(1 row) + +-- errors +-- parse error +SELECT foo \bind \g +ERROR: column "foo" does not exist +LINE 1: SELECT foo + ^ +-- tcop error +SELECT 1 \; SELECT 2 \bind \g +ERROR: cannot insert multiple commands into a prepared statement +-- bind error +SELECT $1, $2 \bind 'foo' \g +ERROR: bind message supplies 1 parameters, but prepared statement "" requires 2 +-- \gset +select 10 as test01, 20 as test02, 'Hello' as test03 \gset pref01_ +\echo :pref01_test01 :pref01_test02 :pref01_test03 +10 20 Hello +-- should fail: bad variable name +select 10 as "bad name" +\gset +invalid variable name: "bad name" +select 97 as "EOF", 'ok' as _foo \gset IGNORE +attempt to \gset into specially treated variable "IGNOREEOF" ignored +\echo :IGNORE_foo :IGNOREEOF +ok 0 +-- multiple backslash commands in one line +select 1 as x, 2 as y \gset pref01_ \\ \echo :pref01_x +1 +select 3 as x, 4 as y \gset pref01_ \echo :pref01_x \echo :pref01_y +3 +4 +select 5 as x, 6 as y \gset pref01_ \\ \g \echo :pref01_x :pref01_y + x | y +---+--- + 5 | 6 +(1 row) + +5 6 +select 7 as x, 8 as y \g \gset pref01_ \echo :pref01_x :pref01_y + x | y +---+--- + 7 | 8 +(1 row) + +7 8 +-- NULL should unset the variable +\set var2 xyz +select 1 as var1, NULL as var2, 3 as var3 \gset +\echo :var1 :var2 :var3 +1 :var2 3 +-- \gset requires just one tuple +select 10 as test01, 20 as test02 from generate_series(1,3) \gset +more than one row returned for \gset +select 10 as test01, 20 as test02 from generate_series(1,0) \gset +no rows returned for \gset +-- \gset should work in FETCH_COUNT mode too +\set FETCH_COUNT 1 +select 1 as x, 2 as y \gset pref01_ \\ \echo :pref01_x +1 +select 3 as x, 4 as y \gset pref01_ \echo :pref01_x \echo :pref01_y +3 +4 +select 10 as test01, 20 as test02 from generate_series(1,3) \gset +more than one row returned for \gset +select 10 as test01, 20 as test02 from generate_series(1,0) \gset +no rows returned for \gset +\unset FETCH_COUNT +-- \gdesc +SELECT + NULL AS zero, + 1 AS one, + 2.0 AS two, + 'three' AS three, + $1 AS four, + sin($2) as five, + 'foo'::varchar(4) as six, + CURRENT_DATE AS now +\gdesc + Column | Type +--------+---------------------- + zero | text + one | integer + two | numeric + three | text + four | text + five | double precision + six | character varying(4) + now | date +(8 rows) + +-- should work with tuple-returning utilities, such as EXECUTE +PREPARE test AS SELECT 1 AS first, 2 AS second; +EXECUTE test \gdesc + Column | Type +--------+--------- + first | integer + second | integer +(2 rows) + +EXPLAIN EXECUTE test \gdesc + Column | Type +------------+------ + QUERY PLAN | text +(1 row) + +-- should fail cleanly - syntax error +SELECT 1 + \gdesc +ERROR: syntax error at end of input +LINE 1: SELECT 1 + + ^ +-- check behavior with empty results +SELECT \gdesc +The command has no result, or the result has no columns. +CREATE TABLE bububu(a int) \gdesc +The command has no result, or the result has no columns. +-- subject command should not have executed +TABLE bububu; -- fail +ERROR: relation "bububu" does not exist +LINE 1: TABLE bububu; + ^ +-- query buffer should remain unchanged +SELECT 1 AS x, 'Hello', 2 AS y, true AS "dirty\name" +\gdesc + Column | Type +------------+--------- + x | integer + ?column? | text + y | integer + dirty\name | boolean +(4 rows) + +\g + x | ?column? | y | dirty\name +---+----------+---+------------ + 1 | Hello | 2 | t +(1 row) + +-- all on one line +SELECT 3 AS x, 'Hello', 4 AS y, true AS "dirty\name" \gdesc \g + Column | Type +------------+--------- + x | integer + ?column? | text + y | integer + dirty\name | boolean +(4 rows) + + x | ?column? | y | dirty\name +---+----------+---+------------ + 3 | Hello | 4 | t +(1 row) + +-- test for server bug #17983 with empty statement in aborted transaction +set search_path = default; +begin; +bogus; +ERROR: syntax error at or near "bogus" +LINE 1: bogus; + ^ +; +\gdesc +The command has no result, or the result has no columns. +rollback; +-- \gexec +create temporary table gexec_test(a int, b text, c date, d float); +select format('create index on gexec_test(%I)', attname) +from pg_attribute +where attrelid = 'gexec_test'::regclass and attnum > 0 +order by attnum +\gexec +create index on gexec_test(a) +create index on gexec_test(b) +create index on gexec_test(c) +create index on gexec_test(d) +-- \gexec should work in FETCH_COUNT mode too +-- (though the fetch limit applies to the executed queries not the meta query) +\set FETCH_COUNT 1 +select 'select 1 as ones', 'select x.y, x.y*2 as double from generate_series(1,4) as x(y)' +union all +select 'drop table gexec_test', NULL +union all +select 'drop table gexec_test', 'select ''2000-01-01''::date as party_over' +\gexec +select 1 as ones + ones +------ + 1 +(1 row) + +select x.y, x.y*2 as double from generate_series(1,4) as x(y) + y | double +---+-------- + 1 | 2 + 2 | 4 + 3 | 6 + 4 | 8 +(4 rows) + +drop table gexec_test +drop table gexec_test +ERROR: table "gexec_test" does not exist +select '2000-01-01'::date as party_over + party_over +------------ + 01-01-2000 +(1 row) + +\unset FETCH_COUNT +-- \setenv, \getenv +-- ensure MYVAR isn't set +\setenv MYVAR +-- in which case, reading it doesn't change the target +\getenv res MYVAR +\echo :res +:res +-- now set it +\setenv MYVAR 'environment value' +\getenv res MYVAR +\echo :res +environment value +-- show all pset options +\pset +border 1 +columns 0 +csv_fieldsep ',' +expanded off +fieldsep '|' +fieldsep_zero off +footer on +format aligned +linestyle ascii +null '' +numericlocale off +pager 1 +pager_min_lines 0 +recordsep '\n' +recordsep_zero off +tableattr +title +tuples_only off +unicode_border_linestyle single +unicode_column_linestyle single +unicode_header_linestyle single +xheader_width full +-- test multi-line headers, wrapping, and newline indicators +-- in aligned, unaligned, and wrapped formats +prepare q as select array_to_string(array_agg(repeat('x',2*n)),E'\n') as "ab + +c", array_to_string(array_agg(repeat('y',20-2*n)),E'\n') as "a +bc" from generate_series(1,10) as n(n) group by n>1 order by n>1; +\pset linestyle ascii +\pset expanded off +\pset columns 40 +\pset border 0 +\pset format unaligned +execute q; +ab + +c|a +bc +xx|yyyyyyyyyyyyyyyyyy +xxxx +xxxxxx +xxxxxxxx +xxxxxxxxxx +xxxxxxxxxxxx +xxxxxxxxxxxxxx +xxxxxxxxxxxxxxxx +xxxxxxxxxxxxxxxxxx +xxxxxxxxxxxxxxxxxxxx|yyyyyyyyyyyyyyyy +yyyyyyyyyyyyyy +yyyyyyyyyyyy +yyyyyyyyyy +yyyyyyyy +yyyyyy +yyyy +yy + +(2 rows) +\pset format aligned +execute q; + ab + a + + + bc + c +-------------------- ------------------ +xx yyyyyyyyyyyyyyyyyy +xxxx +yyyyyyyyyyyyyyyy + +xxxxxx +yyyyyyyyyyyyyy + +xxxxxxxx +yyyyyyyyyyyy + +xxxxxxxxxx +yyyyyyyyyy + +xxxxxxxxxxxx +yyyyyyyy + +xxxxxxxxxxxxxx +yyyyyy + +xxxxxxxxxxxxxxxx +yyyy + +xxxxxxxxxxxxxxxxxx +yy + +xxxxxxxxxxxxxxxxxxxx +(2 rows) + +\pset format wrapped +execute q; + ab + a + + + bc + c +-------------------- ------------------ +xx yyyyyyyyyyyyyyyyyy +xxxx +yyyyyyyyyyyyyyyy + +xxxxxx +yyyyyyyyyyyyyy + +xxxxxxxx +yyyyyyyyyyyy + +xxxxxxxxxx +yyyyyyyyyy + +xxxxxxxxxxxx +yyyyyyyy + +xxxxxxxxxxxxxx +yyyyyy + +xxxxxxxxxxxxxxxx +yyyy + +xxxxxxxxxxxxxxxxxx +yy + +xxxxxxxxxxxxxxxxxxxx +(2 rows) + +\pset border 1 +\pset format unaligned +execute q; +ab + +c|a +bc +xx|yyyyyyyyyyyyyyyyyy +xxxx +xxxxxx +xxxxxxxx +xxxxxxxxxx +xxxxxxxxxxxx +xxxxxxxxxxxxxx +xxxxxxxxxxxxxxxx +xxxxxxxxxxxxxxxxxx +xxxxxxxxxxxxxxxxxxxx|yyyyyyyyyyyyyyyy +yyyyyyyyyyyyyy +yyyyyyyyyyyy +yyyyyyyyyy +yyyyyyyy +yyyyyy +yyyy +yy + +(2 rows) +\pset format aligned +execute q; + ab +| a + + +| bc + c | +----------------------+-------------------- + xx | yyyyyyyyyyyyyyyyyy + xxxx +| yyyyyyyyyyyyyyyy + + xxxxxx +| yyyyyyyyyyyyyy + + xxxxxxxx +| yyyyyyyyyyyy + + xxxxxxxxxx +| yyyyyyyyyy + + xxxxxxxxxxxx +| yyyyyyyy + + xxxxxxxxxxxxxx +| yyyyyy + + xxxxxxxxxxxxxxxx +| yyyy + + xxxxxxxxxxxxxxxxxx +| yy + + xxxxxxxxxxxxxxxxxxxx | +(2 rows) + +\pset format wrapped +execute q; + ab +| a + + +| bc + c | +-------------------+-------------------- + xx | yyyyyyyyyyyyyyyyyy + xxxx +| yyyyyyyyyyyyyyyy + + xxxxxx +| yyyyyyyyyyyyyy + + xxxxxxxx +| yyyyyyyyyyyy + + xxxxxxxxxx +| yyyyyyyyyy + + xxxxxxxxxxxx +| yyyyyyyy + + xxxxxxxxxxxxxx +| yyyyyy + + xxxxxxxxxxxxxxxx +| yyyy + + xxxxxxxxxxxxxxxxx.| yy + +.x +| + xxxxxxxxxxxxxxxxx.| +.xxx | +(2 rows) + +\pset border 2 +\pset format unaligned +execute q; +ab + +c|a +bc +xx|yyyyyyyyyyyyyyyyyy +xxxx +xxxxxx +xxxxxxxx +xxxxxxxxxx +xxxxxxxxxxxx +xxxxxxxxxxxxxx +xxxxxxxxxxxxxxxx +xxxxxxxxxxxxxxxxxx +xxxxxxxxxxxxxxxxxxxx|yyyyyyyyyyyyyyyy +yyyyyyyyyyyyyy +yyyyyyyyyyyy +yyyyyyyyyy +yyyyyyyy +yyyyyy +yyyy +yy + +(2 rows) +\pset format aligned +execute q; ++----------------------+--------------------+ +| ab +| a +| +| +| bc | +| c | | ++----------------------+--------------------+ +| xx | yyyyyyyyyyyyyyyyyy | +| xxxx +| yyyyyyyyyyyyyyyy +| +| xxxxxx +| yyyyyyyyyyyyyy +| +| xxxxxxxx +| yyyyyyyyyyyy +| +| xxxxxxxxxx +| yyyyyyyyyy +| +| xxxxxxxxxxxx +| yyyyyyyy +| +| xxxxxxxxxxxxxx +| yyyyyy +| +| xxxxxxxxxxxxxxxx +| yyyy +| +| xxxxxxxxxxxxxxxxxx +| yy +| +| xxxxxxxxxxxxxxxxxxxx | | ++----------------------+--------------------+ +(2 rows) + +\pset format wrapped +execute q; ++-----------------+--------------------+ +| ab +| a +| +| +| bc | +| c | | ++-----------------+--------------------+ +| xx | yyyyyyyyyyyyyyyyyy | +| xxxx +| yyyyyyyyyyyyyyyy +| +| xxxxxx +| yyyyyyyyyyyyyy +| +| xxxxxxxx +| yyyyyyyyyyyy +| +| xxxxxxxxxx +| yyyyyyyyyy +| +| xxxxxxxxxxxx +| yyyyyyyy +| +| xxxxxxxxxxxxxx +| yyyyyy +| +| xxxxxxxxxxxxxxx.| yyyy +| +|.x +| yy +| +| xxxxxxxxxxxxxxx.| | +|.xxx +| | +| xxxxxxxxxxxxxxx.| | +|.xxxxx | | ++-----------------+--------------------+ +(2 rows) + +\pset expanded on +\pset columns 20 +\pset border 0 +\pset format unaligned +execute q; +ab + +c|xx +a +bc|yyyyyyyyyyyyyyyyyy + +ab + +c|xxxx +xxxxxx +xxxxxxxx +xxxxxxxxxx +xxxxxxxxxxxx +xxxxxxxxxxxxxx +xxxxxxxxxxxxxxxx +xxxxxxxxxxxxxxxxxx +xxxxxxxxxxxxxxxxxxxx +a +bc|yyyyyyyyyyyyyyyy +yyyyyyyyyyyyyy +yyyyyyyyyyyy +yyyyyyyyyy +yyyyyyyy +yyyyyy +yyyy +yy + +\pset format aligned +execute q; +* Record 1 +ab+ xx + + +c +a + yyyyyyyyyyyyyyyyyy +bc +* Record 2 +ab+ xxxx + + + xxxxxx + +c xxxxxxxx + + xxxxxxxxxx + + xxxxxxxxxxxx + + xxxxxxxxxxxxxx + + xxxxxxxxxxxxxxxx + + xxxxxxxxxxxxxxxxxx + + xxxxxxxxxxxxxxxxxxxx +a + yyyyyyyyyyyyyyyy + +bc yyyyyyyyyyyyyy + + yyyyyyyyyyyy + + yyyyyyyyyy + + yyyyyyyy + + yyyyyy + + yyyy + + yy + + + +\pset format wrapped +execute q; +* Record 1 +ab+ xx + + +c +a + yyyyyyyyyyyyyyy. +bc .yyy +* Record 2 +ab+ xxxx + + + xxxxxx + +c xxxxxxxx + + xxxxxxxxxx + + xxxxxxxxxxxx + + xxxxxxxxxxxxxx + + xxxxxxxxxxxxxxx. + .x + + xxxxxxxxxxxxxxx. + .xxx + + xxxxxxxxxxxxxxx. + .xxxxx +a + yyyyyyyyyyyyyyy. +bc .y + + yyyyyyyyyyyyyy + + yyyyyyyyyyyy + + yyyyyyyyyy + + yyyyyyyy + + yyyyyy + + yyyy + + yy + + + +\pset border 1 +\pset format unaligned +execute q; +ab + +c|xx +a +bc|yyyyyyyyyyyyyyyyyy + +ab + +c|xxxx +xxxxxx +xxxxxxxx +xxxxxxxxxx +xxxxxxxxxxxx +xxxxxxxxxxxxxx +xxxxxxxxxxxxxxxx +xxxxxxxxxxxxxxxxxx +xxxxxxxxxxxxxxxxxxxx +a +bc|yyyyyyyyyyyyyyyy +yyyyyyyyyyyyyy +yyyyyyyyyyyy +yyyyyyyyyy +yyyyyyyy +yyyyyy +yyyy +yy + +\pset format aligned +execute q; +-[ RECORD 1 ]------------ +ab+| xx + +| +c | +a +| yyyyyyyyyyyyyyyyyy +bc | +-[ RECORD 2 ]------------ +ab+| xxxx + + +| xxxxxx + +c | xxxxxxxx + + | xxxxxxxxxx + + | xxxxxxxxxxxx + + | xxxxxxxxxxxxxx + + | xxxxxxxxxxxxxxxx + + | xxxxxxxxxxxxxxxxxx + + | xxxxxxxxxxxxxxxxxxxx +a +| yyyyyyyyyyyyyyyy + +bc | yyyyyyyyyyyyyy + + | yyyyyyyyyyyy + + | yyyyyyyyyy + + | yyyyyyyy + + | yyyyyy + + | yyyy + + | yy + + | + +\pset format wrapped +execute q; +-[ RECORD 1 ]------ +ab+| xx + +| +c | +a +| yyyyyyyyyyyyyy. +bc |.yyyy +-[ RECORD 2 ]------ +ab+| xxxx + + +| xxxxxx + +c | xxxxxxxx + + | xxxxxxxxxx + + | xxxxxxxxxxxx + + | xxxxxxxxxxxxxx+ + | xxxxxxxxxxxxxx. + |.xx + + | xxxxxxxxxxxxxx. + |.xxxx + + | xxxxxxxxxxxxxx. + |.xxxxxx +a +| yyyyyyyyyyyyyy. +bc |.yy + + | yyyyyyyyyyyyyy+ + | yyyyyyyyyyyy + + | yyyyyyyyyy + + | yyyyyyyy + + | yyyyyy + + | yyyy + + | yy + + | + +\pset border 2 +\pset format unaligned +execute q; +ab + +c|xx +a +bc|yyyyyyyyyyyyyyyyyy + +ab + +c|xxxx +xxxxxx +xxxxxxxx +xxxxxxxxxx +xxxxxxxxxxxx +xxxxxxxxxxxxxx +xxxxxxxxxxxxxxxx +xxxxxxxxxxxxxxxxxx +xxxxxxxxxxxxxxxxxxxx +a +bc|yyyyyyyyyyyyyyyy +yyyyyyyyyyyyyy +yyyyyyyyyyyy +yyyyyyyyyy +yyyyyyyy +yyyyyy +yyyy +yy + +\pset format aligned +execute q; ++-[ RECORD 1 ]--------------+ +| ab+| xx | +| +| | +| c | | +| a +| yyyyyyyyyyyyyyyyyy | +| bc | | ++-[ RECORD 2 ]--------------+ +| ab+| xxxx +| +| +| xxxxxx +| +| c | xxxxxxxx +| +| | xxxxxxxxxx +| +| | xxxxxxxxxxxx +| +| | xxxxxxxxxxxxxx +| +| | xxxxxxxxxxxxxxxx +| +| | xxxxxxxxxxxxxxxxxx +| +| | xxxxxxxxxxxxxxxxxxxx | +| a +| yyyyyyyyyyyyyyyy +| +| bc | yyyyyyyyyyyyyy +| +| | yyyyyyyyyyyy +| +| | yyyyyyyyyy +| +| | yyyyyyyy +| +| | yyyyyy +| +| | yyyy +| +| | yy +| +| | | ++----+----------------------+ + +\pset format wrapped +execute q; ++-[ RECORD 1 ]-----+ +| ab+| xx | +| +| | +| c | | +| a +| yyyyyyyyyyy.| +| bc |.yyyyyyy | ++-[ RECORD 2 ]-----+ +| ab+| xxxx +| +| +| xxxxxx +| +| c | xxxxxxxx +| +| | xxxxxxxxxx +| +| | xxxxxxxxxxx.| +| |.x +| +| | xxxxxxxxxxx.| +| |.xxx +| +| | xxxxxxxxxxx.| +| |.xxxxx +| +| | xxxxxxxxxxx.| +| |.xxxxxxx +| +| | xxxxxxxxxxx.| +| |.xxxxxxxxx | +| a +| yyyyyyyyyyy.| +| bc |.yyyyy +| +| | yyyyyyyyyyy.| +| |.yyy +| +| | yyyyyyyyyyy.| +| |.y +| +| | yyyyyyyyyy +| +| | yyyyyyyy +| +| | yyyyyy +| +| | yyyy +| +| | yy +| +| | | ++----+-------------+ + +\pset linestyle old-ascii +\pset expanded off +\pset columns 40 +\pset border 0 +\pset format unaligned +execute q; +ab + +c|a +bc +xx|yyyyyyyyyyyyyyyyyy +xxxx +xxxxxx +xxxxxxxx +xxxxxxxxxx +xxxxxxxxxxxx +xxxxxxxxxxxxxx +xxxxxxxxxxxxxxxx +xxxxxxxxxxxxxxxxxx +xxxxxxxxxxxxxxxxxxxx|yyyyyyyyyyyyyyyy +yyyyyyyyyyyyyy +yyyyyyyyyyyy +yyyyyyyyyy +yyyyyyyy +yyyyyy +yyyy +yy + +(2 rows) +\pset format aligned +execute q; + ab a + + bc + c + +-------------------- ------------------ +xx yyyyyyyyyyyyyyyyyy +xxxx yyyyyyyyyyyyyyyy +xxxxxx yyyyyyyyyyyyyy +xxxxxxxx yyyyyyyyyyyy +xxxxxxxxxx yyyyyyyyyy +xxxxxxxxxxxx yyyyyyyy +xxxxxxxxxxxxxx yyyyyy +xxxxxxxxxxxxxxxx yyyy +xxxxxxxxxxxxxxxxxx yy +xxxxxxxxxxxxxxxxxxxx +(2 rows) + +\pset format wrapped +execute q; + ab a + + bc + c + +-------------------- ------------------ +xx yyyyyyyyyyyyyyyyyy +xxxx yyyyyyyyyyyyyyyy +xxxxxx yyyyyyyyyyyyyy +xxxxxxxx yyyyyyyyyyyy +xxxxxxxxxx yyyyyyyyyy +xxxxxxxxxxxx yyyyyyyy +xxxxxxxxxxxxxx yyyyyy +xxxxxxxxxxxxxxxx yyyy +xxxxxxxxxxxxxxxxxx yy +xxxxxxxxxxxxxxxxxxxx +(2 rows) + +\pset border 1 +\pset format unaligned +execute q; +ab + +c|a +bc +xx|yyyyyyyyyyyyyyyyyy +xxxx +xxxxxx +xxxxxxxx +xxxxxxxxxx +xxxxxxxxxxxx +xxxxxxxxxxxxxx +xxxxxxxxxxxxxxxx +xxxxxxxxxxxxxxxxxx +xxxxxxxxxxxxxxxxxxxx|yyyyyyyyyyyyyyyy +yyyyyyyyyyyyyy +yyyyyyyyyyyy +yyyyyyyyyy +yyyyyyyy +yyyyyy +yyyy +yy + +(2 rows) +\pset format aligned +execute q; + ab | a ++ |+ bc ++ c |+ +----------------------+-------------------- + xx | yyyyyyyyyyyyyyyyyy + xxxx | yyyyyyyyyyyyyyyy + xxxxxx : yyyyyyyyyyyyyy + xxxxxxxx : yyyyyyyyyyyy + xxxxxxxxxx : yyyyyyyyyy + xxxxxxxxxxxx : yyyyyyyy + xxxxxxxxxxxxxx : yyyyyy + xxxxxxxxxxxxxxxx : yyyy + xxxxxxxxxxxxxxxxxx : yy + xxxxxxxxxxxxxxxxxxxx : +(2 rows) + +\pset format wrapped +execute q; + ab | a ++ |+ bc ++ c |+ +-------------------+-------------------- + xx | yyyyyyyyyyyyyyyyyy + xxxx | yyyyyyyyyyyyyyyy + xxxxxx : yyyyyyyyyyyyyy + xxxxxxxx : yyyyyyyyyyyy + xxxxxxxxxx : yyyyyyyyyy + xxxxxxxxxxxx : yyyyyyyy + xxxxxxxxxxxxxx : yyyyyy + xxxxxxxxxxxxxxxx : yyyy + xxxxxxxxxxxxxxxxx : yy + x : + xxxxxxxxxxxxxxxxx + xxx +(2 rows) + +\pset border 2 +\pset format unaligned +execute q; +ab + +c|a +bc +xx|yyyyyyyyyyyyyyyyyy +xxxx +xxxxxx +xxxxxxxx +xxxxxxxxxx +xxxxxxxxxxxx +xxxxxxxxxxxxxx +xxxxxxxxxxxxxxxx +xxxxxxxxxxxxxxxxxx +xxxxxxxxxxxxxxxxxxxx|yyyyyyyyyyyyyyyy +yyyyyyyyyyyyyy +yyyyyyyyyyyy +yyyyyyyyyy +yyyyyyyy +yyyyyy +yyyy +yy + +(2 rows) +\pset format aligned +execute q; ++----------------------+--------------------+ +| ab | a | +|+ |+ bc | +|+ c |+ | ++----------------------+--------------------+ +| xx | yyyyyyyyyyyyyyyyyy | +| xxxx | yyyyyyyyyyyyyyyy | +| xxxxxx : yyyyyyyyyyyyyy | +| xxxxxxxx : yyyyyyyyyyyy | +| xxxxxxxxxx : yyyyyyyyyy | +| xxxxxxxxxxxx : yyyyyyyy | +| xxxxxxxxxxxxxx : yyyyyy | +| xxxxxxxxxxxxxxxx : yyyy | +| xxxxxxxxxxxxxxxxxx : yy | +| xxxxxxxxxxxxxxxxxxxx : | ++----------------------+--------------------+ +(2 rows) + +\pset format wrapped +execute q; ++-----------------+--------------------+ +| ab | a | +|+ |+ bc | +|+ c |+ | ++-----------------+--------------------+ +| xx | yyyyyyyyyyyyyyyyyy | +| xxxx | yyyyyyyyyyyyyyyy | +| xxxxxx : yyyyyyyyyyyyyy | +| xxxxxxxx : yyyyyyyyyyyy | +| xxxxxxxxxx : yyyyyyyyyy | +| xxxxxxxxxxxx : yyyyyyyy | +| xxxxxxxxxxxxxx : yyyyyy | +| xxxxxxxxxxxxxxx : yyyy | +| x : yy | +| xxxxxxxxxxxxxxx : | +| xxx | +| xxxxxxxxxxxxxxx | +| xxxxx | ++-----------------+--------------------+ +(2 rows) + +\pset expanded on +\pset columns 20 +\pset border 0 +\pset format unaligned +execute q; +ab + +c|xx +a +bc|yyyyyyyyyyyyyyyyyy + +ab + +c|xxxx +xxxxxx +xxxxxxxx +xxxxxxxxxx +xxxxxxxxxxxx +xxxxxxxxxxxxxx +xxxxxxxxxxxxxxxx +xxxxxxxxxxxxxxxxxx +xxxxxxxxxxxxxxxxxxxx +a +bc|yyyyyyyyyyyyyyyy +yyyyyyyyyyyyyy +yyyyyyyyyyyy +yyyyyyyyyy +yyyyyyyy +yyyyyy +yyyy +yy + +\pset format aligned +execute q; +* Record 1 + ab xx ++ ++c + a yyyyyyyyyyyyyyyyyy ++bc +* Record 2 + ab xxxx ++ xxxxxx ++c xxxxxxxx + xxxxxxxxxx + xxxxxxxxxxxx + xxxxxxxxxxxxxx + xxxxxxxxxxxxxxxx + xxxxxxxxxxxxxxxxxx + xxxxxxxxxxxxxxxxxxxx + a yyyyyyyyyyyyyyyy ++bc yyyyyyyyyyyyyy + yyyyyyyyyyyy + yyyyyyyyyy + yyyyyyyy + yyyyyy + yyyy + yy + + +\pset format wrapped +execute q; +* Record 1 + ab xx ++ ++c + a yyyyyyyyyyyyyyyy ++bc yy +* Record 2 + ab xxxx ++ xxxxxx ++c xxxxxxxx + xxxxxxxxxx + xxxxxxxxxxxx + xxxxxxxxxxxxxx + xxxxxxxxxxxxxxxx + xxxxxxxxxxxxxxxx + xx + xxxxxxxxxxxxxxxx + xxxx + a yyyyyyyyyyyyyyyy ++bc yyyyyyyyyyyyyy + yyyyyyyyyyyy + yyyyyyyyyy + yyyyyyyy + yyyyyy + yyyy + yy + + +\pset border 1 +\pset format unaligned +execute q; +ab + +c|xx +a +bc|yyyyyyyyyyyyyyyyyy + +ab + +c|xxxx +xxxxxx +xxxxxxxx +xxxxxxxxxx +xxxxxxxxxxxx +xxxxxxxxxxxxxx +xxxxxxxxxxxxxxxx +xxxxxxxxxxxxxxxxxx +xxxxxxxxxxxxxxxxxxxx +a +bc|yyyyyyyyyyyyyyyy +yyyyyyyyyyyyyy +yyyyyyyyyyyy +yyyyyyyyyy +yyyyyyyy +yyyyyy +yyyy +yy + +\pset format aligned +execute q; +-[ RECORD 1 ]------------- + ab | xx ++ ; ++c ; + a | yyyyyyyyyyyyyyyyyy ++bc ; +-[ RECORD 2 ]------------- + ab | xxxx ++ : xxxxxx ++c : xxxxxxxx + : xxxxxxxxxx + : xxxxxxxxxxxx + : xxxxxxxxxxxxxx + : xxxxxxxxxxxxxxxx + : xxxxxxxxxxxxxxxxxx + : xxxxxxxxxxxxxxxxxxxx + a | yyyyyyyyyyyyyyyy ++bc : yyyyyyyyyyyyyy + : yyyyyyyyyyyy + : yyyyyyyyyy + : yyyyyyyy + : yyyyyy + : yyyy + : yy + : + +\pset format wrapped +execute q; +-[ RECORD 1 ]------- + ab | xx ++ ; ++c ; + a | yyyyyyyyyyyyyy ++bc ; yyyy +-[ RECORD 2 ]------- + ab | xxxx ++ : xxxxxx ++c : xxxxxxxx + : xxxxxxxxxx + : xxxxxxxxxxxx + : xxxxxxxxxxxxxx + : xxxxxxxxxxxxxx + ; xx + : xxxxxxxxxxxxxx + ; xxxx + : xxxxxxxxxxxxxx + ; xxxxxx + a | yyyyyyyyyyyyyy ++bc ; yy + : yyyyyyyyyyyyyy + : yyyyyyyyyyyy + : yyyyyyyyyy + : yyyyyyyy + : yyyyyy + : yyyy + : yy + : + +\pset border 2 +\pset format unaligned +execute q; +ab + +c|xx +a +bc|yyyyyyyyyyyyyyyyyy + +ab + +c|xxxx +xxxxxx +xxxxxxxx +xxxxxxxxxx +xxxxxxxxxxxx +xxxxxxxxxxxxxx +xxxxxxxxxxxxxxxx +xxxxxxxxxxxxxxxxxx +xxxxxxxxxxxxxxxxxxxx +a +bc|yyyyyyyyyyyyyyyy +yyyyyyyyyyyyyy +yyyyyyyyyyyy +yyyyyyyyyy +yyyyyyyy +yyyyyy +yyyy +yy + +\pset format aligned +execute q; ++-[ RECORD 1 ]--------------+ +| ab | xx | +|+ ; | +|+c ; | +| a | yyyyyyyyyyyyyyyyyy | +|+bc ; | ++-[ RECORD 2 ]--------------+ +| ab | xxxx | +|+ : xxxxxx | +|+c : xxxxxxxx | +| : xxxxxxxxxx | +| : xxxxxxxxxxxx | +| : xxxxxxxxxxxxxx | +| : xxxxxxxxxxxxxxxx | +| : xxxxxxxxxxxxxxxxxx | +| : xxxxxxxxxxxxxxxxxxxx | +| a | yyyyyyyyyyyyyyyy | +|+bc : yyyyyyyyyyyyyy | +| : yyyyyyyyyyyy | +| : yyyyyyyyyy | +| : yyyyyyyy | +| : yyyyyy | +| : yyyy | +| : yy | +| : | ++----+----------------------+ + +\pset format wrapped +execute q; ++-[ RECORD 1 ]-----+ +| ab | xx | +|+ ; | +|+c ; | +| a | yyyyyyyyyyy | +|+bc ; yyyyyyy | ++-[ RECORD 2 ]-----+ +| ab | xxxx | +|+ : xxxxxx | +|+c : xxxxxxxx | +| : xxxxxxxxxx | +| : xxxxxxxxxxx | +| ; x | +| : xxxxxxxxxxx | +| ; xxx | +| : xxxxxxxxxxx | +| ; xxxxx | +| : xxxxxxxxxxx | +| ; xxxxxxx | +| : xxxxxxxxxxx | +| ; xxxxxxxxx | +| a | yyyyyyyyyyy | +|+bc ; yyyyy | +| : yyyyyyyyyyy | +| ; yyy | +| : yyyyyyyyyyy | +| ; y | +| : yyyyyyyyyy | +| : yyyyyyyy | +| : yyyyyy | +| : yyyy | +| : yy | +| : | ++----+-------------+ + +deallocate q; +-- test single-line header and data +prepare q as select repeat('x',2*n) as "0123456789abcdef", repeat('y',20-2*n) as "0123456789" from generate_series(1,10) as n; +\pset linestyle ascii +\pset expanded off +\pset columns 40 +\pset border 0 +\pset format unaligned +execute q; +0123456789abcdef|0123456789 +xx|yyyyyyyyyyyyyyyyyy +xxxx|yyyyyyyyyyyyyyyy +xxxxxx|yyyyyyyyyyyyyy +xxxxxxxx|yyyyyyyyyyyy +xxxxxxxxxx|yyyyyyyyyy +xxxxxxxxxxxx|yyyyyyyy +xxxxxxxxxxxxxx|yyyyyy +xxxxxxxxxxxxxxxx|yyyy +xxxxxxxxxxxxxxxxxx|yy +xxxxxxxxxxxxxxxxxxxx| +(10 rows) +\pset format aligned +execute q; + 0123456789abcdef 0123456789 +-------------------- ------------------ +xx yyyyyyyyyyyyyyyyyy +xxxx yyyyyyyyyyyyyyyy +xxxxxx yyyyyyyyyyyyyy +xxxxxxxx yyyyyyyyyyyy +xxxxxxxxxx yyyyyyyyyy +xxxxxxxxxxxx yyyyyyyy +xxxxxxxxxxxxxx yyyyyy +xxxxxxxxxxxxxxxx yyyy +xxxxxxxxxxxxxxxxxx yy +xxxxxxxxxxxxxxxxxxxx +(10 rows) + +\pset format wrapped +execute q; + 0123456789abcdef 0123456789 +-------------------- ------------------ +xx yyyyyyyyyyyyyyyyyy +xxxx yyyyyyyyyyyyyyyy +xxxxxx yyyyyyyyyyyyyy +xxxxxxxx yyyyyyyyyyyy +xxxxxxxxxx yyyyyyyyyy +xxxxxxxxxxxx yyyyyyyy +xxxxxxxxxxxxxx yyyyyy +xxxxxxxxxxxxxxxx yyyy +xxxxxxxxxxxxxxxxxx yy +xxxxxxxxxxxxxxxxxxxx +(10 rows) + +\pset border 1 +\pset format unaligned +execute q; +0123456789abcdef|0123456789 +xx|yyyyyyyyyyyyyyyyyy +xxxx|yyyyyyyyyyyyyyyy +xxxxxx|yyyyyyyyyyyyyy +xxxxxxxx|yyyyyyyyyyyy +xxxxxxxxxx|yyyyyyyyyy +xxxxxxxxxxxx|yyyyyyyy +xxxxxxxxxxxxxx|yyyyyy +xxxxxxxxxxxxxxxx|yyyy +xxxxxxxxxxxxxxxxxx|yy +xxxxxxxxxxxxxxxxxxxx| +(10 rows) +\pset format aligned +execute q; + 0123456789abcdef | 0123456789 +----------------------+-------------------- + xx | yyyyyyyyyyyyyyyyyy + xxxx | yyyyyyyyyyyyyyyy + xxxxxx | yyyyyyyyyyyyyy + xxxxxxxx | yyyyyyyyyyyy + xxxxxxxxxx | yyyyyyyyyy + xxxxxxxxxxxx | yyyyyyyy + xxxxxxxxxxxxxx | yyyyyy + xxxxxxxxxxxxxxxx | yyyy + xxxxxxxxxxxxxxxxxx | yy + xxxxxxxxxxxxxxxxxxxx | +(10 rows) + +\pset format wrapped +execute q; + 0123456789abcdef | 0123456789 +---------------------+------------------ + xx | yyyyyyyyyyyyyyyy. + |.yy + xxxx | yyyyyyyyyyyyyyyy + xxxxxx | yyyyyyyyyyyyyy + xxxxxxxx | yyyyyyyyyyyy + xxxxxxxxxx | yyyyyyyyyy + xxxxxxxxxxxx | yyyyyyyy + xxxxxxxxxxxxxx | yyyyyy + xxxxxxxxxxxxxxxx | yyyy + xxxxxxxxxxxxxxxxxx | yy + xxxxxxxxxxxxxxxxxxx.| +.x | +(10 rows) + +\pset border 2 +\pset format unaligned +execute q; +0123456789abcdef|0123456789 +xx|yyyyyyyyyyyyyyyyyy +xxxx|yyyyyyyyyyyyyyyy +xxxxxx|yyyyyyyyyyyyyy +xxxxxxxx|yyyyyyyyyyyy +xxxxxxxxxx|yyyyyyyyyy +xxxxxxxxxxxx|yyyyyyyy +xxxxxxxxxxxxxx|yyyyyy +xxxxxxxxxxxxxxxx|yyyy +xxxxxxxxxxxxxxxxxx|yy +xxxxxxxxxxxxxxxxxxxx| +(10 rows) +\pset format aligned +execute q; ++----------------------+--------------------+ +| 0123456789abcdef | 0123456789 | ++----------------------+--------------------+ +| xx | yyyyyyyyyyyyyyyyyy | +| xxxx | yyyyyyyyyyyyyyyy | +| xxxxxx | yyyyyyyyyyyyyy | +| xxxxxxxx | yyyyyyyyyyyy | +| xxxxxxxxxx | yyyyyyyyyy | +| xxxxxxxxxxxx | yyyyyyyy | +| xxxxxxxxxxxxxx | yyyyyy | +| xxxxxxxxxxxxxxxx | yyyy | +| xxxxxxxxxxxxxxxxxx | yy | +| xxxxxxxxxxxxxxxxxxxx | | ++----------------------+--------------------+ +(10 rows) + +\pset format wrapped +execute q; ++--------------------+-----------------+ +| 0123456789abcdef | 0123456789 | ++--------------------+-----------------+ +| xx | yyyyyyyyyyyyyyy.| +| |.yyy | +| xxxx | yyyyyyyyyyyyyyy.| +| |.y | +| xxxxxx | yyyyyyyyyyyyyy | +| xxxxxxxx | yyyyyyyyyyyy | +| xxxxxxxxxx | yyyyyyyyyy | +| xxxxxxxxxxxx | yyyyyyyy | +| xxxxxxxxxxxxxx | yyyyyy | +| xxxxxxxxxxxxxxxx | yyyy | +| xxxxxxxxxxxxxxxxxx | yy | +| xxxxxxxxxxxxxxxxxx.| | +|.xx | | ++--------------------+-----------------+ +(10 rows) + +\pset expanded on +\pset columns 30 +\pset border 0 +\pset format unaligned +execute q; +0123456789abcdef|xx +0123456789|yyyyyyyyyyyyyyyyyy + +0123456789abcdef|xxxx +0123456789|yyyyyyyyyyyyyyyy + +0123456789abcdef|xxxxxx +0123456789|yyyyyyyyyyyyyy + +0123456789abcdef|xxxxxxxx +0123456789|yyyyyyyyyyyy + +0123456789abcdef|xxxxxxxxxx +0123456789|yyyyyyyyyy + +0123456789abcdef|xxxxxxxxxxxx +0123456789|yyyyyyyy + +0123456789abcdef|xxxxxxxxxxxxxx +0123456789|yyyyyy + +0123456789abcdef|xxxxxxxxxxxxxxxx +0123456789|yyyy + +0123456789abcdef|xxxxxxxxxxxxxxxxxx +0123456789|yy + +0123456789abcdef|xxxxxxxxxxxxxxxxxxxx +0123456789| +\pset format aligned +execute q; +* Record 1 +0123456789abcdef xx +0123456789 yyyyyyyyyyyyyyyyyy +* Record 2 +0123456789abcdef xxxx +0123456789 yyyyyyyyyyyyyyyy +* Record 3 +0123456789abcdef xxxxxx +0123456789 yyyyyyyyyyyyyy +* Record 4 +0123456789abcdef xxxxxxxx +0123456789 yyyyyyyyyyyy +* Record 5 +0123456789abcdef xxxxxxxxxx +0123456789 yyyyyyyyyy +* Record 6 +0123456789abcdef xxxxxxxxxxxx +0123456789 yyyyyyyy +* Record 7 +0123456789abcdef xxxxxxxxxxxxxx +0123456789 yyyyyy +* Record 8 +0123456789abcdef xxxxxxxxxxxxxxxx +0123456789 yyyy +* Record 9 +0123456789abcdef xxxxxxxxxxxxxxxxxx +0123456789 yy +* Record 10 +0123456789abcdef xxxxxxxxxxxxxxxxxxxx +0123456789 + +\pset format wrapped +execute q; +* Record 1 +0123456789abcdef xx +0123456789 yyyyyyyyyyyy. + .yyyyyy +* Record 2 +0123456789abcdef xxxx +0123456789 yyyyyyyyyyyy. + .yyyy +* Record 3 +0123456789abcdef xxxxxx +0123456789 yyyyyyyyyyyy. + .yy +* Record 4 +0123456789abcdef xxxxxxxx +0123456789 yyyyyyyyyyyy +* Record 5 +0123456789abcdef xxxxxxxxxx +0123456789 yyyyyyyyyy +* Record 6 +0123456789abcdef xxxxxxxxxxxx +0123456789 yyyyyyyy +* Record 7 +0123456789abcdef xxxxxxxxxxxx. + .xx +0123456789 yyyyyy +* Record 8 +0123456789abcdef xxxxxxxxxxxx. + .xxxx +0123456789 yyyy +* Record 9 +0123456789abcdef xxxxxxxxxxxx. + .xxxxxx +0123456789 yy +* Record 10 +0123456789abcdef xxxxxxxxxxxx. + .xxxxxxxx +0123456789 + +\pset border 1 +\pset format unaligned +execute q; +0123456789abcdef|xx +0123456789|yyyyyyyyyyyyyyyyyy + +0123456789abcdef|xxxx +0123456789|yyyyyyyyyyyyyyyy + +0123456789abcdef|xxxxxx +0123456789|yyyyyyyyyyyyyy + +0123456789abcdef|xxxxxxxx +0123456789|yyyyyyyyyyyy + +0123456789abcdef|xxxxxxxxxx +0123456789|yyyyyyyyyy + +0123456789abcdef|xxxxxxxxxxxx +0123456789|yyyyyyyy + +0123456789abcdef|xxxxxxxxxxxxxx +0123456789|yyyyyy + +0123456789abcdef|xxxxxxxxxxxxxxxx +0123456789|yyyy + +0123456789abcdef|xxxxxxxxxxxxxxxxxx +0123456789|yy + +0123456789abcdef|xxxxxxxxxxxxxxxxxxxx +0123456789| +\pset format aligned +execute q; +-[ RECORD 1 ]----+--------------------- +0123456789abcdef | xx +0123456789 | yyyyyyyyyyyyyyyyyy +-[ RECORD 2 ]----+--------------------- +0123456789abcdef | xxxx +0123456789 | yyyyyyyyyyyyyyyy +-[ RECORD 3 ]----+--------------------- +0123456789abcdef | xxxxxx +0123456789 | yyyyyyyyyyyyyy +-[ RECORD 4 ]----+--------------------- +0123456789abcdef | xxxxxxxx +0123456789 | yyyyyyyyyyyy +-[ RECORD 5 ]----+--------------------- +0123456789abcdef | xxxxxxxxxx +0123456789 | yyyyyyyyyy +-[ RECORD 6 ]----+--------------------- +0123456789abcdef | xxxxxxxxxxxx +0123456789 | yyyyyyyy +-[ RECORD 7 ]----+--------------------- +0123456789abcdef | xxxxxxxxxxxxxx +0123456789 | yyyyyy +-[ RECORD 8 ]----+--------------------- +0123456789abcdef | xxxxxxxxxxxxxxxx +0123456789 | yyyy +-[ RECORD 9 ]----+--------------------- +0123456789abcdef | xxxxxxxxxxxxxxxxxx +0123456789 | yy +-[ RECORD 10 ]---+--------------------- +0123456789abcdef | xxxxxxxxxxxxxxxxxxxx +0123456789 | + +\pset format wrapped +execute q; +-[ RECORD 1 ]----+----------- +0123456789abcdef | xx +0123456789 | yyyyyyyyyy. + |.yyyyyyyy +-[ RECORD 2 ]----+----------- +0123456789abcdef | xxxx +0123456789 | yyyyyyyyyy. + |.yyyyyy +-[ RECORD 3 ]----+----------- +0123456789abcdef | xxxxxx +0123456789 | yyyyyyyyyy. + |.yyyy +-[ RECORD 4 ]----+----------- +0123456789abcdef | xxxxxxxx +0123456789 | yyyyyyyyyy. + |.yy +-[ RECORD 5 ]----+----------- +0123456789abcdef | xxxxxxxxxx +0123456789 | yyyyyyyyyy +-[ RECORD 6 ]----+----------- +0123456789abcdef | xxxxxxxxxx. + |.xx +0123456789 | yyyyyyyy +-[ RECORD 7 ]----+----------- +0123456789abcdef | xxxxxxxxxx. + |.xxxx +0123456789 | yyyyyy +-[ RECORD 8 ]----+----------- +0123456789abcdef | xxxxxxxxxx. + |.xxxxxx +0123456789 | yyyy +-[ RECORD 9 ]----+----------- +0123456789abcdef | xxxxxxxxxx. + |.xxxxxxxx +0123456789 | yy +-[ RECORD 10 ]---+----------- +0123456789abcdef | xxxxxxxxxx. + |.xxxxxxxxxx +0123456789 | + +\pset border 2 +\pset format unaligned +execute q; +0123456789abcdef|xx +0123456789|yyyyyyyyyyyyyyyyyy + +0123456789abcdef|xxxx +0123456789|yyyyyyyyyyyyyyyy + +0123456789abcdef|xxxxxx +0123456789|yyyyyyyyyyyyyy + +0123456789abcdef|xxxxxxxx +0123456789|yyyyyyyyyyyy + +0123456789abcdef|xxxxxxxxxx +0123456789|yyyyyyyyyy + +0123456789abcdef|xxxxxxxxxxxx +0123456789|yyyyyyyy + +0123456789abcdef|xxxxxxxxxxxxxx +0123456789|yyyyyy + +0123456789abcdef|xxxxxxxxxxxxxxxx +0123456789|yyyy + +0123456789abcdef|xxxxxxxxxxxxxxxxxx +0123456789|yy + +0123456789abcdef|xxxxxxxxxxxxxxxxxxxx +0123456789| +\pset format aligned +execute q; ++-[ RECORD 1 ]-----+----------------------+ +| 0123456789abcdef | xx | +| 0123456789 | yyyyyyyyyyyyyyyyyy | ++-[ RECORD 2 ]-----+----------------------+ +| 0123456789abcdef | xxxx | +| 0123456789 | yyyyyyyyyyyyyyyy | ++-[ RECORD 3 ]-----+----------------------+ +| 0123456789abcdef | xxxxxx | +| 0123456789 | yyyyyyyyyyyyyy | ++-[ RECORD 4 ]-----+----------------------+ +| 0123456789abcdef | xxxxxxxx | +| 0123456789 | yyyyyyyyyyyy | ++-[ RECORD 5 ]-----+----------------------+ +| 0123456789abcdef | xxxxxxxxxx | +| 0123456789 | yyyyyyyyyy | ++-[ RECORD 6 ]-----+----------------------+ +| 0123456789abcdef | xxxxxxxxxxxx | +| 0123456789 | yyyyyyyy | ++-[ RECORD 7 ]-----+----------------------+ +| 0123456789abcdef | xxxxxxxxxxxxxx | +| 0123456789 | yyyyyy | ++-[ RECORD 8 ]-----+----------------------+ +| 0123456789abcdef | xxxxxxxxxxxxxxxx | +| 0123456789 | yyyy | ++-[ RECORD 9 ]-----+----------------------+ +| 0123456789abcdef | xxxxxxxxxxxxxxxxxx | +| 0123456789 | yy | ++-[ RECORD 10 ]----+----------------------+ +| 0123456789abcdef | xxxxxxxxxxxxxxxxxxxx | +| 0123456789 | | ++------------------+----------------------+ + +\pset format wrapped +execute q; ++-[ RECORD 1 ]-----+---------+ +| 0123456789abcdef | xx | +| 0123456789 | yyyyyyy.| +| |.yyyyyyy.| +| |.yyyy | ++-[ RECORD 2 ]-----+---------+ +| 0123456789abcdef | xxxx | +| 0123456789 | yyyyyyy.| +| |.yyyyyyy.| +| |.yy | ++-[ RECORD 3 ]-----+---------+ +| 0123456789abcdef | xxxxxx | +| 0123456789 | yyyyyyy.| +| |.yyyyyyy | ++-[ RECORD 4 ]-----+---------+ +| 0123456789abcdef | xxxxxxx.| +| |.x | +| 0123456789 | yyyyyyy.| +| |.yyyyy | ++-[ RECORD 5 ]-----+---------+ +| 0123456789abcdef | xxxxxxx.| +| |.xxx | +| 0123456789 | yyyyyyy.| +| |.yyy | ++-[ RECORD 6 ]-----+---------+ +| 0123456789abcdef | xxxxxxx.| +| |.xxxxx | +| 0123456789 | yyyyyyy.| +| |.y | ++-[ RECORD 7 ]-----+---------+ +| 0123456789abcdef | xxxxxxx.| +| |.xxxxxxx | +| 0123456789 | yyyyyy | ++-[ RECORD 8 ]-----+---------+ +| 0123456789abcdef | xxxxxxx.| +| |.xxxxxxx.| +| |.xx | +| 0123456789 | yyyy | ++-[ RECORD 9 ]-----+---------+ +| 0123456789abcdef | xxxxxxx.| +| |.xxxxxxx.| +| |.xxxx | +| 0123456789 | yy | ++-[ RECORD 10 ]----+---------+ +| 0123456789abcdef | xxxxxxx.| +| |.xxxxxxx.| +| |.xxxxxx | +| 0123456789 | | ++------------------+---------+ + +\pset expanded on +\pset columns 20 +\pset border 0 +\pset format unaligned +execute q; +0123456789abcdef|xx +0123456789|yyyyyyyyyyyyyyyyyy + +0123456789abcdef|xxxx +0123456789|yyyyyyyyyyyyyyyy + +0123456789abcdef|xxxxxx +0123456789|yyyyyyyyyyyyyy + +0123456789abcdef|xxxxxxxx +0123456789|yyyyyyyyyyyy + +0123456789abcdef|xxxxxxxxxx +0123456789|yyyyyyyyyy + +0123456789abcdef|xxxxxxxxxxxx +0123456789|yyyyyyyy + +0123456789abcdef|xxxxxxxxxxxxxx +0123456789|yyyyyy + +0123456789abcdef|xxxxxxxxxxxxxxxx +0123456789|yyyy + +0123456789abcdef|xxxxxxxxxxxxxxxxxx +0123456789|yy + +0123456789abcdef|xxxxxxxxxxxxxxxxxxxx +0123456789| +\pset format aligned +execute q; +* Record 1 +0123456789abcdef xx +0123456789 yyyyyyyyyyyyyyyyyy +* Record 2 +0123456789abcdef xxxx +0123456789 yyyyyyyyyyyyyyyy +* Record 3 +0123456789abcdef xxxxxx +0123456789 yyyyyyyyyyyyyy +* Record 4 +0123456789abcdef xxxxxxxx +0123456789 yyyyyyyyyyyy +* Record 5 +0123456789abcdef xxxxxxxxxx +0123456789 yyyyyyyyyy +* Record 6 +0123456789abcdef xxxxxxxxxxxx +0123456789 yyyyyyyy +* Record 7 +0123456789abcdef xxxxxxxxxxxxxx +0123456789 yyyyyy +* Record 8 +0123456789abcdef xxxxxxxxxxxxxxxx +0123456789 yyyy +* Record 9 +0123456789abcdef xxxxxxxxxxxxxxxxxx +0123456789 yy +* Record 10 +0123456789abcdef xxxxxxxxxxxxxxxxxxxx +0123456789 + +\pset format wrapped +execute q; +* Record 1 +0123456789abcdef xx +0123456789 yyy. + .yyy. + .yyy. + .yyy. + .yyy. + .yyy +* Record 2 +0123456789abcdef xxx. + .x +0123456789 yyy. + .yyy. + .yyy. + .yyy. + .yyy. + .y +* Record 3 +0123456789abcdef xxx. + .xxx +0123456789 yyy. + .yyy. + .yyy. + .yyy. + .yy +* Record 4 +0123456789abcdef xxx. + .xxx. + .xx +0123456789 yyy. + .yyy. + .yyy. + .yyy +* Record 5 +0123456789abcdef xxx. + .xxx. + .xxx. + .x +0123456789 yyy. + .yyy. + .yyy. + .y +* Record 6 +0123456789abcdef xxx. + .xxx. + .xxx. + .xxx +0123456789 yyy. + .yyy. + .yy +* Record 7 +0123456789abcdef xxx. + .xxx. + .xxx. + .xxx. + .xx +0123456789 yyy. + .yyy +* Record 8 +0123456789abcdef xxx. + .xxx. + .xxx. + .xxx. + .xxx. + .x +0123456789 yyy. + .y +* Record 9 +0123456789abcdef xxx. + .xxx. + .xxx. + .xxx. + .xxx. + .xxx +0123456789 yy +* Record 10 +0123456789abcdef xxx. + .xxx. + .xxx. + .xxx. + .xxx. + .xxx. + .xx +0123456789 + +\pset border 1 +\pset format unaligned +execute q; +0123456789abcdef|xx +0123456789|yyyyyyyyyyyyyyyyyy + +0123456789abcdef|xxxx +0123456789|yyyyyyyyyyyyyyyy + +0123456789abcdef|xxxxxx +0123456789|yyyyyyyyyyyyyy + +0123456789abcdef|xxxxxxxx +0123456789|yyyyyyyyyyyy + +0123456789abcdef|xxxxxxxxxx +0123456789|yyyyyyyyyy + +0123456789abcdef|xxxxxxxxxxxx +0123456789|yyyyyyyy + +0123456789abcdef|xxxxxxxxxxxxxx +0123456789|yyyyyy + +0123456789abcdef|xxxxxxxxxxxxxxxx +0123456789|yyyy + +0123456789abcdef|xxxxxxxxxxxxxxxxxx +0123456789|yy + +0123456789abcdef|xxxxxxxxxxxxxxxxxxxx +0123456789| +\pset format aligned +execute q; +-[ RECORD 1 ]----+--------------------- +0123456789abcdef | xx +0123456789 | yyyyyyyyyyyyyyyyyy +-[ RECORD 2 ]----+--------------------- +0123456789abcdef | xxxx +0123456789 | yyyyyyyyyyyyyyyy +-[ RECORD 3 ]----+--------------------- +0123456789abcdef | xxxxxx +0123456789 | yyyyyyyyyyyyyy +-[ RECORD 4 ]----+--------------------- +0123456789abcdef | xxxxxxxx +0123456789 | yyyyyyyyyyyy +-[ RECORD 5 ]----+--------------------- +0123456789abcdef | xxxxxxxxxx +0123456789 | yyyyyyyyyy +-[ RECORD 6 ]----+--------------------- +0123456789abcdef | xxxxxxxxxxxx +0123456789 | yyyyyyyy +-[ RECORD 7 ]----+--------------------- +0123456789abcdef | xxxxxxxxxxxxxx +0123456789 | yyyyyy +-[ RECORD 8 ]----+--------------------- +0123456789abcdef | xxxxxxxxxxxxxxxx +0123456789 | yyyy +-[ RECORD 9 ]----+--------------------- +0123456789abcdef | xxxxxxxxxxxxxxxxxx +0123456789 | yy +-[ RECORD 10 ]---+--------------------- +0123456789abcdef | xxxxxxxxxxxxxxxxxxxx +0123456789 | + +\pset format wrapped +execute q; +-[ RECORD 1 ]----+---- +0123456789abcdef | xx +0123456789 | yyy. + |.yyy. + |.yyy. + |.yyy. + |.yyy. + |.yyy +-[ RECORD 2 ]----+---- +0123456789abcdef | xxx. + |.x +0123456789 | yyy. + |.yyy. + |.yyy. + |.yyy. + |.yyy. + |.y +-[ RECORD 3 ]----+---- +0123456789abcdef | xxx. + |.xxx +0123456789 | yyy. + |.yyy. + |.yyy. + |.yyy. + |.yy +-[ RECORD 4 ]----+---- +0123456789abcdef | xxx. + |.xxx. + |.xx +0123456789 | yyy. + |.yyy. + |.yyy. + |.yyy +-[ RECORD 5 ]----+---- +0123456789abcdef | xxx. + |.xxx. + |.xxx. + |.x +0123456789 | yyy. + |.yyy. + |.yyy. + |.y +-[ RECORD 6 ]----+---- +0123456789abcdef | xxx. + |.xxx. + |.xxx. + |.xxx +0123456789 | yyy. + |.yyy. + |.yy +-[ RECORD 7 ]----+---- +0123456789abcdef | xxx. + |.xxx. + |.xxx. + |.xxx. + |.xx +0123456789 | yyy. + |.yyy +-[ RECORD 8 ]----+---- +0123456789abcdef | xxx. + |.xxx. + |.xxx. + |.xxx. + |.xxx. + |.x +0123456789 | yyy. + |.y +-[ RECORD 9 ]----+---- +0123456789abcdef | xxx. + |.xxx. + |.xxx. + |.xxx. + |.xxx. + |.xxx +0123456789 | yy +-[ RECORD 10 ]---+---- +0123456789abcdef | xxx. + |.xxx. + |.xxx. + |.xxx. + |.xxx. + |.xxx. + |.xx +0123456789 | + +\pset border 2 +\pset format unaligned +execute q; +0123456789abcdef|xx +0123456789|yyyyyyyyyyyyyyyyyy + +0123456789abcdef|xxxx +0123456789|yyyyyyyyyyyyyyyy + +0123456789abcdef|xxxxxx +0123456789|yyyyyyyyyyyyyy + +0123456789abcdef|xxxxxxxx +0123456789|yyyyyyyyyyyy + +0123456789abcdef|xxxxxxxxxx +0123456789|yyyyyyyyyy + +0123456789abcdef|xxxxxxxxxxxx +0123456789|yyyyyyyy + +0123456789abcdef|xxxxxxxxxxxxxx +0123456789|yyyyyy + +0123456789abcdef|xxxxxxxxxxxxxxxx +0123456789|yyyy + +0123456789abcdef|xxxxxxxxxxxxxxxxxx +0123456789|yy + +0123456789abcdef|xxxxxxxxxxxxxxxxxxxx +0123456789| +\pset format aligned +execute q; ++-[ RECORD 1 ]-----+----------------------+ +| 0123456789abcdef | xx | +| 0123456789 | yyyyyyyyyyyyyyyyyy | ++-[ RECORD 2 ]-----+----------------------+ +| 0123456789abcdef | xxxx | +| 0123456789 | yyyyyyyyyyyyyyyy | ++-[ RECORD 3 ]-----+----------------------+ +| 0123456789abcdef | xxxxxx | +| 0123456789 | yyyyyyyyyyyyyy | ++-[ RECORD 4 ]-----+----------------------+ +| 0123456789abcdef | xxxxxxxx | +| 0123456789 | yyyyyyyyyyyy | ++-[ RECORD 5 ]-----+----------------------+ +| 0123456789abcdef | xxxxxxxxxx | +| 0123456789 | yyyyyyyyyy | ++-[ RECORD 6 ]-----+----------------------+ +| 0123456789abcdef | xxxxxxxxxxxx | +| 0123456789 | yyyyyyyy | ++-[ RECORD 7 ]-----+----------------------+ +| 0123456789abcdef | xxxxxxxxxxxxxx | +| 0123456789 | yyyyyy | ++-[ RECORD 8 ]-----+----------------------+ +| 0123456789abcdef | xxxxxxxxxxxxxxxx | +| 0123456789 | yyyy | ++-[ RECORD 9 ]-----+----------------------+ +| 0123456789abcdef | xxxxxxxxxxxxxxxxxx | +| 0123456789 | yy | ++-[ RECORD 10 ]----+----------------------+ +| 0123456789abcdef | xxxxxxxxxxxxxxxxxxxx | +| 0123456789 | | ++------------------+----------------------+ + +\pset format wrapped +execute q; ++-[ RECORD 1 ]-----+-----+ +| 0123456789abcdef | xx | +| 0123456789 | yyy.| +| |.yyy.| +| |.yyy.| +| |.yyy.| +| |.yyy.| +| |.yyy | ++-[ RECORD 2 ]-----+-----+ +| 0123456789abcdef | xxx.| +| |.x | +| 0123456789 | yyy.| +| |.yyy.| +| |.yyy.| +| |.yyy.| +| |.yyy.| +| |.y | ++-[ RECORD 3 ]-----+-----+ +| 0123456789abcdef | xxx.| +| |.xxx | +| 0123456789 | yyy.| +| |.yyy.| +| |.yyy.| +| |.yyy.| +| |.yy | ++-[ RECORD 4 ]-----+-----+ +| 0123456789abcdef | xxx.| +| |.xxx.| +| |.xx | +| 0123456789 | yyy.| +| |.yyy.| +| |.yyy.| +| |.yyy | ++-[ RECORD 5 ]-----+-----+ +| 0123456789abcdef | xxx.| +| |.xxx.| +| |.xxx.| +| |.x | +| 0123456789 | yyy.| +| |.yyy.| +| |.yyy.| +| |.y | ++-[ RECORD 6 ]-----+-----+ +| 0123456789abcdef | xxx.| +| |.xxx.| +| |.xxx.| +| |.xxx | +| 0123456789 | yyy.| +| |.yyy.| +| |.yy | ++-[ RECORD 7 ]-----+-----+ +| 0123456789abcdef | xxx.| +| |.xxx.| +| |.xxx.| +| |.xxx.| +| |.xx | +| 0123456789 | yyy.| +| |.yyy | ++-[ RECORD 8 ]-----+-----+ +| 0123456789abcdef | xxx.| +| |.xxx.| +| |.xxx.| +| |.xxx.| +| |.xxx.| +| |.x | +| 0123456789 | yyy.| +| |.y | ++-[ RECORD 9 ]-----+-----+ +| 0123456789abcdef | xxx.| +| |.xxx.| +| |.xxx.| +| |.xxx.| +| |.xxx.| +| |.xxx | +| 0123456789 | yy | ++-[ RECORD 10 ]----+-----+ +| 0123456789abcdef | xxx.| +| |.xxx.| +| |.xxx.| +| |.xxx.| +| |.xxx.| +| |.xxx.| +| |.xx | +| 0123456789 | | ++------------------+-----+ + +\pset linestyle old-ascii +\pset expanded off +\pset columns 40 +\pset border 0 +\pset format unaligned +execute q; +0123456789abcdef|0123456789 +xx|yyyyyyyyyyyyyyyyyy +xxxx|yyyyyyyyyyyyyyyy +xxxxxx|yyyyyyyyyyyyyy +xxxxxxxx|yyyyyyyyyyyy +xxxxxxxxxx|yyyyyyyyyy +xxxxxxxxxxxx|yyyyyyyy +xxxxxxxxxxxxxx|yyyyyy +xxxxxxxxxxxxxxxx|yyyy +xxxxxxxxxxxxxxxxxx|yy +xxxxxxxxxxxxxxxxxxxx| +(10 rows) +\pset format aligned +execute q; + 0123456789abcdef 0123456789 +-------------------- ------------------ +xx yyyyyyyyyyyyyyyyyy +xxxx yyyyyyyyyyyyyyyy +xxxxxx yyyyyyyyyyyyyy +xxxxxxxx yyyyyyyyyyyy +xxxxxxxxxx yyyyyyyyyy +xxxxxxxxxxxx yyyyyyyy +xxxxxxxxxxxxxx yyyyyy +xxxxxxxxxxxxxxxx yyyy +xxxxxxxxxxxxxxxxxx yy +xxxxxxxxxxxxxxxxxxxx +(10 rows) + +\pset format wrapped +execute q; + 0123456789abcdef 0123456789 +-------------------- ------------------ +xx yyyyyyyyyyyyyyyyyy +xxxx yyyyyyyyyyyyyyyy +xxxxxx yyyyyyyyyyyyyy +xxxxxxxx yyyyyyyyyyyy +xxxxxxxxxx yyyyyyyyyy +xxxxxxxxxxxx yyyyyyyy +xxxxxxxxxxxxxx yyyyyy +xxxxxxxxxxxxxxxx yyyy +xxxxxxxxxxxxxxxxxx yy +xxxxxxxxxxxxxxxxxxxx +(10 rows) + +\pset border 1 +\pset format unaligned +execute q; +0123456789abcdef|0123456789 +xx|yyyyyyyyyyyyyyyyyy +xxxx|yyyyyyyyyyyyyyyy +xxxxxx|yyyyyyyyyyyyyy +xxxxxxxx|yyyyyyyyyyyy +xxxxxxxxxx|yyyyyyyyyy +xxxxxxxxxxxx|yyyyyyyy +xxxxxxxxxxxxxx|yyyyyy +xxxxxxxxxxxxxxxx|yyyy +xxxxxxxxxxxxxxxxxx|yy +xxxxxxxxxxxxxxxxxxxx| +(10 rows) +\pset format aligned +execute q; + 0123456789abcdef | 0123456789 +----------------------+-------------------- + xx | yyyyyyyyyyyyyyyyyy + xxxx | yyyyyyyyyyyyyyyy + xxxxxx | yyyyyyyyyyyyyy + xxxxxxxx | yyyyyyyyyyyy + xxxxxxxxxx | yyyyyyyyyy + xxxxxxxxxxxx | yyyyyyyy + xxxxxxxxxxxxxx | yyyyyy + xxxxxxxxxxxxxxxx | yyyy + xxxxxxxxxxxxxxxxxx | yy + xxxxxxxxxxxxxxxxxxxx | +(10 rows) + +\pset format wrapped +execute q; + 0123456789abcdef | 0123456789 +---------------------+------------------ + xx | yyyyyyyyyyyyyyyy + ; yy + xxxx | yyyyyyyyyyyyyyyy + xxxxxx | yyyyyyyyyyyyyy + xxxxxxxx | yyyyyyyyyyyy + xxxxxxxxxx | yyyyyyyyyy + xxxxxxxxxxxx | yyyyyyyy + xxxxxxxxxxxxxx | yyyyyy + xxxxxxxxxxxxxxxx | yyyy + xxxxxxxxxxxxxxxxxx | yy + xxxxxxxxxxxxxxxxxxx | + x +(10 rows) + +\pset border 2 +\pset format unaligned +execute q; +0123456789abcdef|0123456789 +xx|yyyyyyyyyyyyyyyyyy +xxxx|yyyyyyyyyyyyyyyy +xxxxxx|yyyyyyyyyyyyyy +xxxxxxxx|yyyyyyyyyyyy +xxxxxxxxxx|yyyyyyyyyy +xxxxxxxxxxxx|yyyyyyyy +xxxxxxxxxxxxxx|yyyyyy +xxxxxxxxxxxxxxxx|yyyy +xxxxxxxxxxxxxxxxxx|yy +xxxxxxxxxxxxxxxxxxxx| +(10 rows) +\pset format aligned +execute q; ++----------------------+--------------------+ +| 0123456789abcdef | 0123456789 | ++----------------------+--------------------+ +| xx | yyyyyyyyyyyyyyyyyy | +| xxxx | yyyyyyyyyyyyyyyy | +| xxxxxx | yyyyyyyyyyyyyy | +| xxxxxxxx | yyyyyyyyyyyy | +| xxxxxxxxxx | yyyyyyyyyy | +| xxxxxxxxxxxx | yyyyyyyy | +| xxxxxxxxxxxxxx | yyyyyy | +| xxxxxxxxxxxxxxxx | yyyy | +| xxxxxxxxxxxxxxxxxx | yy | +| xxxxxxxxxxxxxxxxxxxx | | ++----------------------+--------------------+ +(10 rows) + +\pset format wrapped +execute q; ++--------------------+-----------------+ +| 0123456789abcdef | 0123456789 | ++--------------------+-----------------+ +| xx | yyyyyyyyyyyyyyy | +| ; yyy | +| xxxx | yyyyyyyyyyyyyyy | +| ; y | +| xxxxxx | yyyyyyyyyyyyyy | +| xxxxxxxx | yyyyyyyyyyyy | +| xxxxxxxxxx | yyyyyyyyyy | +| xxxxxxxxxxxx | yyyyyyyy | +| xxxxxxxxxxxxxx | yyyyyy | +| xxxxxxxxxxxxxxxx | yyyy | +| xxxxxxxxxxxxxxxxxx | yy | +| xxxxxxxxxxxxxxxxxx | | +| xx | ++--------------------+-----------------+ +(10 rows) + +\pset expanded on +\pset border 0 +\pset format unaligned +execute q; +0123456789abcdef|xx +0123456789|yyyyyyyyyyyyyyyyyy + +0123456789abcdef|xxxx +0123456789|yyyyyyyyyyyyyyyy + +0123456789abcdef|xxxxxx +0123456789|yyyyyyyyyyyyyy + +0123456789abcdef|xxxxxxxx +0123456789|yyyyyyyyyyyy + +0123456789abcdef|xxxxxxxxxx +0123456789|yyyyyyyyyy + +0123456789abcdef|xxxxxxxxxxxx +0123456789|yyyyyyyy + +0123456789abcdef|xxxxxxxxxxxxxx +0123456789|yyyyyy + +0123456789abcdef|xxxxxxxxxxxxxxxx +0123456789|yyyy + +0123456789abcdef|xxxxxxxxxxxxxxxxxx +0123456789|yy + +0123456789abcdef|xxxxxxxxxxxxxxxxxxxx +0123456789| +\pset format aligned +execute q; +* Record 1 +0123456789abcdef xx +0123456789 yyyyyyyyyyyyyyyyyy +* Record 2 +0123456789abcdef xxxx +0123456789 yyyyyyyyyyyyyyyy +* Record 3 +0123456789abcdef xxxxxx +0123456789 yyyyyyyyyyyyyy +* Record 4 +0123456789abcdef xxxxxxxx +0123456789 yyyyyyyyyyyy +* Record 5 +0123456789abcdef xxxxxxxxxx +0123456789 yyyyyyyyyy +* Record 6 +0123456789abcdef xxxxxxxxxxxx +0123456789 yyyyyyyy +* Record 7 +0123456789abcdef xxxxxxxxxxxxxx +0123456789 yyyyyy +* Record 8 +0123456789abcdef xxxxxxxxxxxxxxxx +0123456789 yyyy +* Record 9 +0123456789abcdef xxxxxxxxxxxxxxxxxx +0123456789 yy +* Record 10 +0123456789abcdef xxxxxxxxxxxxxxxxxxxx +0123456789 + +\pset format wrapped +execute q; +* Record 1 +0123456789abcdef xx +0123456789 yyyyyyyyyyyyyyyyyy +* Record 2 +0123456789abcdef xxxx +0123456789 yyyyyyyyyyyyyyyy +* Record 3 +0123456789abcdef xxxxxx +0123456789 yyyyyyyyyyyyyy +* Record 4 +0123456789abcdef xxxxxxxx +0123456789 yyyyyyyyyyyy +* Record 5 +0123456789abcdef xxxxxxxxxx +0123456789 yyyyyyyyyy +* Record 6 +0123456789abcdef xxxxxxxxxxxx +0123456789 yyyyyyyy +* Record 7 +0123456789abcdef xxxxxxxxxxxxxx +0123456789 yyyyyy +* Record 8 +0123456789abcdef xxxxxxxxxxxxxxxx +0123456789 yyyy +* Record 9 +0123456789abcdef xxxxxxxxxxxxxxxxxx +0123456789 yy +* Record 10 +0123456789abcdef xxxxxxxxxxxxxxxxxxxx +0123456789 + +\pset border 1 +\pset format unaligned +execute q; +0123456789abcdef|xx +0123456789|yyyyyyyyyyyyyyyyyy + +0123456789abcdef|xxxx +0123456789|yyyyyyyyyyyyyyyy + +0123456789abcdef|xxxxxx +0123456789|yyyyyyyyyyyyyy + +0123456789abcdef|xxxxxxxx +0123456789|yyyyyyyyyyyy + +0123456789abcdef|xxxxxxxxxx +0123456789|yyyyyyyyyy + +0123456789abcdef|xxxxxxxxxxxx +0123456789|yyyyyyyy + +0123456789abcdef|xxxxxxxxxxxxxx +0123456789|yyyyyy + +0123456789abcdef|xxxxxxxxxxxxxxxx +0123456789|yyyy + +0123456789abcdef|xxxxxxxxxxxxxxxxxx +0123456789|yy + +0123456789abcdef|xxxxxxxxxxxxxxxxxxxx +0123456789| +\pset format aligned +execute q; +-[ RECORD 1 ]----+--------------------- +0123456789abcdef | xx +0123456789 | yyyyyyyyyyyyyyyyyy +-[ RECORD 2 ]----+--------------------- +0123456789abcdef | xxxx +0123456789 | yyyyyyyyyyyyyyyy +-[ RECORD 3 ]----+--------------------- +0123456789abcdef | xxxxxx +0123456789 | yyyyyyyyyyyyyy +-[ RECORD 4 ]----+--------------------- +0123456789abcdef | xxxxxxxx +0123456789 | yyyyyyyyyyyy +-[ RECORD 5 ]----+--------------------- +0123456789abcdef | xxxxxxxxxx +0123456789 | yyyyyyyyyy +-[ RECORD 6 ]----+--------------------- +0123456789abcdef | xxxxxxxxxxxx +0123456789 | yyyyyyyy +-[ RECORD 7 ]----+--------------------- +0123456789abcdef | xxxxxxxxxxxxxx +0123456789 | yyyyyy +-[ RECORD 8 ]----+--------------------- +0123456789abcdef | xxxxxxxxxxxxxxxx +0123456789 | yyyy +-[ RECORD 9 ]----+--------------------- +0123456789abcdef | xxxxxxxxxxxxxxxxxx +0123456789 | yy +-[ RECORD 10 ]---+--------------------- +0123456789abcdef | xxxxxxxxxxxxxxxxxxxx +0123456789 | + +\pset format wrapped +execute q; +-[ RECORD 1 ]----+--------------------- +0123456789abcdef | xx +0123456789 | yyyyyyyyyyyyyyyyyy +-[ RECORD 2 ]----+--------------------- +0123456789abcdef | xxxx +0123456789 | yyyyyyyyyyyyyyyy +-[ RECORD 3 ]----+--------------------- +0123456789abcdef | xxxxxx +0123456789 | yyyyyyyyyyyyyy +-[ RECORD 4 ]----+--------------------- +0123456789abcdef | xxxxxxxx +0123456789 | yyyyyyyyyyyy +-[ RECORD 5 ]----+--------------------- +0123456789abcdef | xxxxxxxxxx +0123456789 | yyyyyyyyyy +-[ RECORD 6 ]----+--------------------- +0123456789abcdef | xxxxxxxxxxxx +0123456789 | yyyyyyyy +-[ RECORD 7 ]----+--------------------- +0123456789abcdef | xxxxxxxxxxxxxx +0123456789 | yyyyyy +-[ RECORD 8 ]----+--------------------- +0123456789abcdef | xxxxxxxxxxxxxxxx +0123456789 | yyyy +-[ RECORD 9 ]----+--------------------- +0123456789abcdef | xxxxxxxxxxxxxxxxxx +0123456789 | yy +-[ RECORD 10 ]---+--------------------- +0123456789abcdef | xxxxxxxxxxxxxxxxxxxx +0123456789 | + +\pset border 2 +\pset format unaligned +execute q; +0123456789abcdef|xx +0123456789|yyyyyyyyyyyyyyyyyy + +0123456789abcdef|xxxx +0123456789|yyyyyyyyyyyyyyyy + +0123456789abcdef|xxxxxx +0123456789|yyyyyyyyyyyyyy + +0123456789abcdef|xxxxxxxx +0123456789|yyyyyyyyyyyy + +0123456789abcdef|xxxxxxxxxx +0123456789|yyyyyyyyyy + +0123456789abcdef|xxxxxxxxxxxx +0123456789|yyyyyyyy + +0123456789abcdef|xxxxxxxxxxxxxx +0123456789|yyyyyy + +0123456789abcdef|xxxxxxxxxxxxxxxx +0123456789|yyyy + +0123456789abcdef|xxxxxxxxxxxxxxxxxx +0123456789|yy + +0123456789abcdef|xxxxxxxxxxxxxxxxxxxx +0123456789| +\pset format aligned +execute q; ++-[ RECORD 1 ]-----+----------------------+ +| 0123456789abcdef | xx | +| 0123456789 | yyyyyyyyyyyyyyyyyy | ++-[ RECORD 2 ]-----+----------------------+ +| 0123456789abcdef | xxxx | +| 0123456789 | yyyyyyyyyyyyyyyy | ++-[ RECORD 3 ]-----+----------------------+ +| 0123456789abcdef | xxxxxx | +| 0123456789 | yyyyyyyyyyyyyy | ++-[ RECORD 4 ]-----+----------------------+ +| 0123456789abcdef | xxxxxxxx | +| 0123456789 | yyyyyyyyyyyy | ++-[ RECORD 5 ]-----+----------------------+ +| 0123456789abcdef | xxxxxxxxxx | +| 0123456789 | yyyyyyyyyy | ++-[ RECORD 6 ]-----+----------------------+ +| 0123456789abcdef | xxxxxxxxxxxx | +| 0123456789 | yyyyyyyy | ++-[ RECORD 7 ]-----+----------------------+ +| 0123456789abcdef | xxxxxxxxxxxxxx | +| 0123456789 | yyyyyy | ++-[ RECORD 8 ]-----+----------------------+ +| 0123456789abcdef | xxxxxxxxxxxxxxxx | +| 0123456789 | yyyy | ++-[ RECORD 9 ]-----+----------------------+ +| 0123456789abcdef | xxxxxxxxxxxxxxxxxx | +| 0123456789 | yy | ++-[ RECORD 10 ]----+----------------------+ +| 0123456789abcdef | xxxxxxxxxxxxxxxxxxxx | +| 0123456789 | | ++------------------+----------------------+ + +\pset format wrapped +execute q; ++-[ RECORD 1 ]-----+-------------------+ +| 0123456789abcdef | xx | +| 0123456789 | yyyyyyyyyyyyyyyyy | +| ; y | ++-[ RECORD 2 ]-----+-------------------+ +| 0123456789abcdef | xxxx | +| 0123456789 | yyyyyyyyyyyyyyyy | ++-[ RECORD 3 ]-----+-------------------+ +| 0123456789abcdef | xxxxxx | +| 0123456789 | yyyyyyyyyyyyyy | ++-[ RECORD 4 ]-----+-------------------+ +| 0123456789abcdef | xxxxxxxx | +| 0123456789 | yyyyyyyyyyyy | ++-[ RECORD 5 ]-----+-------------------+ +| 0123456789abcdef | xxxxxxxxxx | +| 0123456789 | yyyyyyyyyy | ++-[ RECORD 6 ]-----+-------------------+ +| 0123456789abcdef | xxxxxxxxxxxx | +| 0123456789 | yyyyyyyy | ++-[ RECORD 7 ]-----+-------------------+ +| 0123456789abcdef | xxxxxxxxxxxxxx | +| 0123456789 | yyyyyy | ++-[ RECORD 8 ]-----+-------------------+ +| 0123456789abcdef | xxxxxxxxxxxxxxxx | +| 0123456789 | yyyy | ++-[ RECORD 9 ]-----+-------------------+ +| 0123456789abcdef | xxxxxxxxxxxxxxxxx | +| ; x | +| 0123456789 | yy | ++-[ RECORD 10 ]----+-------------------+ +| 0123456789abcdef | xxxxxxxxxxxxxxxxx | +| ; xxx | +| 0123456789 | | ++------------------+-------------------+ + +deallocate q; +\pset linestyle ascii +\pset border 1 +-- support table for output-format tests (useful to create a footer) +create table psql_serial_tab (id serial); +-- test header/footer/tuples_only behavior in aligned/unaligned/wrapped cases +\pset format aligned +\pset expanded off +\d psql_serial_tab_id_seq + Sequence "public.psql_serial_tab_id_seq" + Type | Start | Minimum | Maximum | Increment | Cycles? | Cache +---------+-------+---------+------------+-----------+---------+------- + integer | 1 | 1 | 2147483647 | 1 | no | 1 +Owned by: public.psql_serial_tab.id + +\pset tuples_only true +\df exp + pg_catalog | exp | double precision | double precision | func + pg_catalog | exp | numeric | numeric | func + +\pset tuples_only false +\pset expanded on +\d psql_serial_tab_id_seq +Sequence "public.psql_serial_tab_id_seq" +-[ RECORD 1 ]--------- +Type | integer +Start | 1 +Minimum | 1 +Maximum | 2147483647 +Increment | 1 +Cycles? | no +Cache | 1 + +Owned by: public.psql_serial_tab.id + +\pset tuples_only true +\df exp +Schema | pg_catalog +Name | exp +Result data type | double precision +Argument data types | double precision +Type | func +--------------------+----------------- +Schema | pg_catalog +Name | exp +Result data type | numeric +Argument data types | numeric +Type | func + +\pset tuples_only false +-- empty table is a special case for this format +select 1 where false; +(0 rows) + +\pset format unaligned +\pset expanded off +\d psql_serial_tab_id_seq +Sequence "public.psql_serial_tab_id_seq" +Type|Start|Minimum|Maximum|Increment|Cycles?|Cache +integer|1|1|2147483647|1|no|1 +Owned by: public.psql_serial_tab.id +\pset tuples_only true +\df exp +pg_catalog|exp|double precision|double precision|func +pg_catalog|exp|numeric|numeric|func +\pset tuples_only false +\pset expanded on +\d psql_serial_tab_id_seq +Sequence "public.psql_serial_tab_id_seq" + +Type|integer +Start|1 +Minimum|1 +Maximum|2147483647 +Increment|1 +Cycles?|no +Cache|1 + +Owned by: public.psql_serial_tab.id +\pset tuples_only true +\df exp +Schema|pg_catalog +Name|exp +Result data type|double precision +Argument data types|double precision +Type|func + +Schema|pg_catalog +Name|exp +Result data type|numeric +Argument data types|numeric +Type|func +\pset tuples_only false +\pset format wrapped +\pset expanded off +\d psql_serial_tab_id_seq + Sequence "public.psql_serial_tab_id_seq" + Type | Start | Minimum | Maximum | Increment | Cycles? | Cache +---------+-------+---------+------------+-----------+---------+------- + integer | 1 | 1 | 2147483647 | 1 | no | 1 +Owned by: public.psql_serial_tab.id + +\pset tuples_only true +\df exp + pg_catalog | exp | double precision | double precision | func + pg_catalog | exp | numeric | numeric | func + +\pset tuples_only false +\pset expanded on +\d psql_serial_tab_id_seq +Sequence "public.psql_serial_tab_id_seq" +-[ RECORD 1 ]--------- +Type | integer +Start | 1 +Minimum | 1 +Maximum | 2147483647 +Increment | 1 +Cycles? | no +Cache | 1 + +Owned by: public.psql_serial_tab.id + +\pset tuples_only true +\df exp +Schema | pg_catalog +Name | exp +Result data type | double precision +Argument data types | double precision +Type | func +--------------------+----------------- +Schema | pg_catalog +Name | exp +Result data type | numeric +Argument data types | numeric +Type | func + +\pset tuples_only false +-- check conditional am display +\pset expanded off +CREATE SCHEMA tableam_display; +CREATE ROLE regress_display_role; +ALTER SCHEMA tableam_display OWNER TO regress_display_role; +SET search_path TO tableam_display; +CREATE ACCESS METHOD heap_psql TYPE TABLE HANDLER heap_tableam_handler; +SET ROLE TO regress_display_role; +-- Use only relations with a physical size of zero. +CREATE TABLE tbl_heap_psql(f1 int, f2 char(100)) using heap_psql; +CREATE TABLE tbl_heap(f1 int, f2 char(100)) using heap; +CREATE VIEW view_heap_psql AS SELECT f1 from tbl_heap_psql; +CREATE MATERIALIZED VIEW mat_view_heap_psql USING heap_psql AS SELECT f1 from tbl_heap_psql; +\d+ tbl_heap_psql + Table "tableam_display.tbl_heap_psql" + Column | Type | Collation | Nullable | Default | Storage | Stats target | Description +--------+----------------+-----------+----------+---------+----------+--------------+------------- + f1 | integer | | | | plain | | + f2 | character(100) | | | | extended | | + +\d+ tbl_heap + Table "tableam_display.tbl_heap" + Column | Type | Collation | Nullable | Default | Storage | Stats target | Description +--------+----------------+-----------+----------+---------+----------+--------------+------------- + f1 | integer | | | | plain | | + f2 | character(100) | | | | extended | | + +\set HIDE_TABLEAM off +\d+ tbl_heap_psql + Table "tableam_display.tbl_heap_psql" + Column | Type | Collation | Nullable | Default | Storage | Stats target | Description +--------+----------------+-----------+----------+---------+----------+--------------+------------- + f1 | integer | | | | plain | | + f2 | character(100) | | | | extended | | +Access method: heap_psql + +\d+ tbl_heap + Table "tableam_display.tbl_heap" + Column | Type | Collation | Nullable | Default | Storage | Stats target | Description +--------+----------------+-----------+----------+---------+----------+--------------+------------- + f1 | integer | | | | plain | | + f2 | character(100) | | | | extended | | +Access method: heap + +-- AM is displayed for tables, indexes and materialized views. +\d+ + List of relations + Schema | Name | Type | Owner | Persistence | Access method | Size | Description +-----------------+--------------------+-------------------+----------------------+-------------+---------------+---------+------------- + tableam_display | mat_view_heap_psql | materialized view | regress_display_role | permanent | heap_psql | 0 bytes | + tableam_display | tbl_heap | table | regress_display_role | permanent | heap | 0 bytes | + tableam_display | tbl_heap_psql | table | regress_display_role | permanent | heap_psql | 0 bytes | + tableam_display | view_heap_psql | view | regress_display_role | permanent | | 0 bytes | +(4 rows) + +\dt+ + List of relations + Schema | Name | Type | Owner | Persistence | Access method | Size | Description +-----------------+---------------+-------+----------------------+-------------+---------------+---------+------------- + tableam_display | tbl_heap | table | regress_display_role | permanent | heap | 0 bytes | + tableam_display | tbl_heap_psql | table | regress_display_role | permanent | heap_psql | 0 bytes | +(2 rows) + +\dm+ + List of relations + Schema | Name | Type | Owner | Persistence | Access method | Size | Description +-----------------+--------------------+-------------------+----------------------+-------------+---------------+---------+------------- + tableam_display | mat_view_heap_psql | materialized view | regress_display_role | permanent | heap_psql | 0 bytes | +(1 row) + +-- But not for views and sequences. +\dv+ + List of relations + Schema | Name | Type | Owner | Persistence | Size | Description +-----------------+----------------+------+----------------------+-------------+---------+------------- + tableam_display | view_heap_psql | view | regress_display_role | permanent | 0 bytes | +(1 row) + +\set HIDE_TABLEAM on +\d+ + List of relations + Schema | Name | Type | Owner | Persistence | Size | Description +-----------------+--------------------+-------------------+----------------------+-------------+---------+------------- + tableam_display | mat_view_heap_psql | materialized view | regress_display_role | permanent | 0 bytes | + tableam_display | tbl_heap | table | regress_display_role | permanent | 0 bytes | + tableam_display | tbl_heap_psql | table | regress_display_role | permanent | 0 bytes | + tableam_display | view_heap_psql | view | regress_display_role | permanent | 0 bytes | +(4 rows) + +RESET ROLE; +RESET search_path; +DROP SCHEMA tableam_display CASCADE; +NOTICE: drop cascades to 4 other objects +DETAIL: drop cascades to table tableam_display.tbl_heap_psql +drop cascades to table tableam_display.tbl_heap +drop cascades to view tableam_display.view_heap_psql +drop cascades to materialized view tableam_display.mat_view_heap_psql +DROP ACCESS METHOD heap_psql; +DROP ROLE regress_display_role; +-- test numericlocale (as best we can without control of psql's locale) +\pset format aligned +\pset expanded off +\pset numericlocale true +select n, -n as m, n * 111 as x, '1e90'::float8 as f +from generate_series(0,3) n; + n | m | x | f +---+----+-----+------- + 0 | 0 | 0 | 1e+90 + 1 | -1 | 111 | 1e+90 + 2 | -2 | 222 | 1e+90 + 3 | -3 | 333 | 1e+90 +(4 rows) + +\pset numericlocale false +-- test asciidoc output format +\pset format asciidoc +\pset border 1 +\pset expanded off +\d psql_serial_tab_id_seq + +.Sequence "public.psql_serial_tab_id_seq" +[options="header",cols="l,>l,>l,>l,l",frame="none"] +|==== +^l|Type ^l|Start ^l|Minimum ^l|Maximum ^l|Increment ^l|Cycles? ^l|Cache +|integer |1 |1 |2147483647 |1 |no |1 +|==== + +.... +Owned by: public.psql_serial_tab.id +.... +\pset tuples_only true +\df exp + +[cols="l|1 +l|1 +l|2147483647 +l|1 +l|1 +|==== + +.... +Owned by: public.psql_serial_tab.id +.... +\pset tuples_only true +\df exp + +[cols="h,l",frame="none"] +|==== +2+| +l|1 +2+^|Record 2 +l|2 +|==== +\pset border 1 +execute q; + +[cols="h,l",frame="none"] +|==== +2+^|Record 1 +l|1 +2+^|Record 2 +l|2 +|==== +\pset border 2 +execute q; + +[cols="h,l",frame="all",grid="all"] +|==== +2+^|Record 1 +l|1 +2+^|Record 2 +l|2 +|==== +deallocate q; +-- test csv output format +\pset format csv +\pset border 1 +\pset expanded off +\d psql_serial_tab_id_seq +Type,Start,Minimum,Maximum,Increment,Cycles?,Cache +integer,1,1,2147483647,1,no,1 +\pset tuples_only true +\df exp +pg_catalog,exp,double precision,double precision,func +pg_catalog,exp,numeric,numeric,func +\pset tuples_only false +\pset expanded on +\d psql_serial_tab_id_seq +Type,integer +Start,1 +Minimum,1 +Maximum,2147483647 +Increment,1 +Cycles?,no +Cache,1 +\pset tuples_only true +\df exp +Schema,pg_catalog +Name,exp +Result data type,double precision +Argument data types,double precision +Type,func +Schema,pg_catalog +Name,exp +Result data type,numeric +Argument data types,numeric +Type,func +\pset tuples_only false +prepare q as + select 'some"text' as "a""title", E' \n' as "junk", + ' ' as "empty", n as int + from generate_series(1,2) as n; +\pset expanded off +execute q; +"a""title",junk,empty,int +"some""text"," +", ,1 +"some""text"," +", ,2 +\pset expanded on +execute q; +"a""title","some""text" +junk," +" +empty, +int,1 +"a""title","some""text" +junk," +" +empty, +int,2 +deallocate q; +-- special cases +\pset expanded off +select 'comma,comma' as comma, 'semi;semi' as semi; +comma,semi +"comma,comma",semi;semi +\pset csv_fieldsep ';' +select 'comma,comma' as comma, 'semi;semi' as semi; +comma;semi +comma,comma;"semi;semi" +select '\.' as data; +data +"\." +\pset csv_fieldsep '.' +select '\' as d1, '' as d2; +"d1"."d2" +"\"."" +-- illegal csv separators +\pset csv_fieldsep '' +\pset: csv_fieldsep must be a single one-byte character +\pset csv_fieldsep '\0' +\pset: csv_fieldsep must be a single one-byte character +\pset csv_fieldsep '\n' +\pset: csv_fieldsep cannot be a double quote, a newline, or a carriage return +\pset csv_fieldsep '\r' +\pset: csv_fieldsep cannot be a double quote, a newline, or a carriage return +\pset csv_fieldsep '"' +\pset: csv_fieldsep cannot be a double quote, a newline, or a carriage return +\pset csv_fieldsep ',,' +\pset: csv_fieldsep must be a single one-byte character +\pset csv_fieldsep ',' +-- test html output format +\pset format html +\pset border 1 +\pset expanded off +\d psql_serial_tab_id_seq +
+ + + + + + + + + + + + + + + + + + + +
Sequence "public.psql_serial_tab_id_seq"
TypeStartMinimumMaximumIncrementCycles?Cache
integer1121474836471no1
+

Owned by: public.psql_serial_tab.id
+

+\pset tuples_only true +\df exp + + + + + + + + + + + + + + + +
pg_catalogexpdouble precisiondouble precisionfunc
pg_catalogexpnumericnumericfunc
+ +\pset tuples_only false +\pset expanded on +\d psql_serial_tab_id_seq + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Sequence "public.psql_serial_tab_id_seq"
Record 1
Typeinteger
Start1
Minimum1
Maximum2147483647
Increment1
Cycles?no
Cache1
+

Owned by: public.psql_serial_tab.id
+

+\pset tuples_only true +\df exp + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
 
Schemapg_catalog
Nameexp
Result data typedouble precision
Argument data typesdouble precision
Typefunc
 
Schemapg_catalog
Nameexp
Result data typenumeric
Argument data typesnumeric
Typefunc
+ +\pset tuples_only false +prepare q as + select 'some"text' as "a&title", E' \n' as "junk", + ' ' as "empty", n as int + from generate_series(1,2) as n; +\pset expanded off +\pset border 0 +execute q; + + + + + + + + + + + + + + + + + + + +
a&titlejunkemptyint
some"text  <foo>
+<bar>
  1
some"text  <foo>
+<bar>
  2
+

(2 rows)
+

+\pset border 1 +execute q; + + + + + + + + + + + + + + + + + + + +
a&titlejunkemptyint
some"text  <foo>
+<bar>
  1
some"text  <foo>
+<bar>
  2
+

(2 rows)
+

+\pset tableattr foobar +execute q; + + + + + + + + + + + + + + + + + + + +
a&titlejunkemptyint
some"text  <foo>
+<bar>
  1
some"text  <foo>
+<bar>
  2
+

(2 rows)
+

+\pset tableattr +\pset expanded on +\pset border 0 +execute q; + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Record 1
a&titlesome"text
junk  <foo>
+<bar>
empty 
int1
Record 2
a&titlesome"text
junk  <foo>
+<bar>
empty 
int2
+ +\pset border 1 +execute q; + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Record 1
a&titlesome"text
junk  <foo>
+<bar>
empty 
int1
Record 2
a&titlesome"text
junk  <foo>
+<bar>
empty 
int2
+ +\pset tableattr foobar +execute q; + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Record 1
a&titlesome"text
junk  <foo>
+<bar>
empty 
int1
Record 2
a&titlesome"text
junk  <foo>
+<bar>
empty 
int2
+ +\pset tableattr +deallocate q; +-- test latex output format +\pset format latex +\pset border 1 +\pset expanded off +\d psql_serial_tab_id_seq +\begin{center} +Sequence "public.psql\_serial\_tab\_id\_seq" +\end{center} + +\begin{tabular}{l | r | r | r | r | l | r} +\textit{Type} & \textit{Start} & \textit{Minimum} & \textit{Maximum} & \textit{Increment} & \textit{Cycles?} & \textit{Cache} \\ +\hline +integer & 1 & 1 & 2147483647 & 1 & no & 1 \\ +\end{tabular} + +\noindent Owned by: public.psql\_serial\_tab.id \\ + +\pset tuples_only true +\df exp +\begin{tabular}{l | l | l | l | l} +pg\_catalog & exp & double precision & double precision & func \\ +pg\_catalog & exp & numeric & numeric & func \\ +\end{tabular} + +\noindent +\pset tuples_only false +\pset expanded on +\d psql_serial_tab_id_seq +\begin{center} +Sequence "public.psql\_serial\_tab\_id\_seq" +\end{center} + +\begin{tabular}{c|l} +\multicolumn{2}{c}{\textit{Record 1}} \\ +\hline +Type & integer \\ +Start & 1 \\ +Minimum & 1 \\ +Maximum & 2147483647 \\ +Increment & 1 \\ +Cycles? & no \\ +Cache & 1 \\ +\end{tabular} + +\noindent Owned by: public.psql\_serial\_tab.id \\ + +\pset tuples_only true +\df exp +\begin{tabular}{c|l} +\hline +Schema & pg\_catalog \\ +Name & exp \\ +Result data type & double precision \\ +Argument data types & double precision \\ +Type & func \\ +\hline +Schema & pg\_catalog \\ +Name & exp \\ +Result data type & numeric \\ +Argument data types & numeric \\ +Type & func \\ +\end{tabular} + +\noindent +\pset tuples_only false +prepare q as + select 'some\more_text' as "a$title", E' #%&^~|\n{bar}' as "junk", + ' ' as "empty", n as int + from generate_series(1,2) as n; +\pset expanded off +\pset border 0 +execute q; +\begin{tabular}{lllr} +\textit{a\$title} & \textit{junk} & \textit{empty} & \textit{int} \\ +\hline +some\textbackslash{}more\_text & \#\textless{}foo\textgreater{}\%\&\^{}\~{}\textbar{}\\\{bar\} & & 1 \\ +some\textbackslash{}more\_text & \#\textless{}foo\textgreater{}\%\&\^{}\~{}\textbar{}\\\{bar\} & & 2 \\ +\end{tabular} + +\noindent (2 rows) \\ + +\pset border 1 +execute q; +\begin{tabular}{l | l | l | r} +\textit{a\$title} & \textit{junk} & \textit{empty} & \textit{int} \\ +\hline +some\textbackslash{}more\_text & \#\textless{}foo\textgreater{}\%\&\^{}\~{}\textbar{}\\\{bar\} & & 1 \\ +some\textbackslash{}more\_text & \#\textless{}foo\textgreater{}\%\&\^{}\~{}\textbar{}\\\{bar\} & & 2 \\ +\end{tabular} + +\noindent (2 rows) \\ + +\pset border 2 +execute q; +\begin{tabular}{| l | l | l | r |} +\hline +\textit{a\$title} & \textit{junk} & \textit{empty} & \textit{int} \\ +\hline +some\textbackslash{}more\_text & \#\textless{}foo\textgreater{}\%\&\^{}\~{}\textbar{}\\\{bar\} & & 1 \\ +some\textbackslash{}more\_text & \#\textless{}foo\textgreater{}\%\&\^{}\~{}\textbar{}\\\{bar\} & & 2 \\ +\hline +\end{tabular} + +\noindent (2 rows) \\ + +\pset border 3 +execute q; +\begin{tabular}{| l | l | l | r |} +\hline +\textit{a\$title} & \textit{junk} & \textit{empty} & \textit{int} \\ +\hline +some\textbackslash{}more\_text & \#\textless{}foo\textgreater{}\%\&\^{}\~{}\textbar{}\\\{bar\} & & 1 \\ +\hline +some\textbackslash{}more\_text & \#\textless{}foo\textgreater{}\%\&\^{}\~{}\textbar{}\\\{bar\} & & 2 \\ +\hline +\end{tabular} + +\noindent (2 rows) \\ + +\pset expanded on +\pset border 0 +execute q; +\begin{tabular}{cl} +\multicolumn{2}{c}{\textit{Record 1}} \\ +a\$title & some\textbackslash{}more\_text \\ +junk & \#\textless{}foo\textgreater{}\%\&\^{}\~{}\textbar{}\\\{bar\} \\ +empty & \\ +int & 1 \\ +\multicolumn{2}{c}{\textit{Record 2}} \\ +a\$title & some\textbackslash{}more\_text \\ +junk & \#\textless{}foo\textgreater{}\%\&\^{}\~{}\textbar{}\\\{bar\} \\ +empty & \\ +int & 2 \\ +\end{tabular} + +\noindent +\pset border 1 +execute q; +\begin{tabular}{c|l} +\multicolumn{2}{c}{\textit{Record 1}} \\ +\hline +a\$title & some\textbackslash{}more\_text \\ +junk & \#\textless{}foo\textgreater{}\%\&\^{}\~{}\textbar{}\\\{bar\} \\ +empty & \\ +int & 1 \\ +\multicolumn{2}{c}{\textit{Record 2}} \\ +\hline +a\$title & some\textbackslash{}more\_text \\ +junk & \#\textless{}foo\textgreater{}\%\&\^{}\~{}\textbar{}\\\{bar\} \\ +empty & \\ +int & 2 \\ +\end{tabular} + +\noindent +\pset border 2 +execute q; +\begin{tabular}{|c|l|} +\hline +\multicolumn{2}{|c|}{\textit{Record 1}} \\ +\hline +a\$title & some\textbackslash{}more\_text \\ +junk & \#\textless{}foo\textgreater{}\%\&\^{}\~{}\textbar{}\\\{bar\} \\ +empty & \\ +int & 1 \\ +\hline +\multicolumn{2}{|c|}{\textit{Record 2}} \\ +\hline +a\$title & some\textbackslash{}more\_text \\ +junk & \#\textless{}foo\textgreater{}\%\&\^{}\~{}\textbar{}\\\{bar\} \\ +empty & \\ +int & 2 \\ +\hline +\end{tabular} + +\noindent +\pset border 3 +execute q; +\begin{tabular}{|c|l|} +\hline +\multicolumn{2}{|c|}{\textit{Record 1}} \\ +\hline +a\$title & some\textbackslash{}more\_text \\ +junk & \#\textless{}foo\textgreater{}\%\&\^{}\~{}\textbar{}\\\{bar\} \\ +empty & \\ +int & 1 \\ +\hline +\multicolumn{2}{|c|}{\textit{Record 2}} \\ +\hline +a\$title & some\textbackslash{}more\_text \\ +junk & \#\textless{}foo\textgreater{}\%\&\^{}\~{}\textbar{}\\\{bar\} \\ +empty & \\ +int & 2 \\ +\hline +\end{tabular} + +\noindent +deallocate q; +-- test latex-longtable output format +\pset format latex-longtable +\pset border 1 +\pset expanded off +\d psql_serial_tab_id_seq +\begin{longtable}{l | r | r | r | r | l | r} +\small\textbf{\textit{Type}} & \small\textbf{\textit{Start}} & \small\textbf{\textit{Minimum}} & \small\textbf{\textit{Maximum}} & \small\textbf{\textit{Increment}} & \small\textbf{\textit{Cycles?}} & \small\textbf{\textit{Cache}} \\ +\midrule +\endfirsthead +\small\textbf{\textit{Type}} & \small\textbf{\textit{Start}} & \small\textbf{\textit{Minimum}} & \small\textbf{\textit{Maximum}} & \small\textbf{\textit{Increment}} & \small\textbf{\textit{Cycles?}} & \small\textbf{\textit{Cache}} \\ +\midrule +\endhead +\caption[Sequence "public.psql\_serial\_tab\_id\_seq" (Continued)]{Sequence "public.psql\_serial\_tab\_id\_seq"} +\endfoot +\caption[Sequence "public.psql\_serial\_tab\_id\_seq"]{Sequence "public.psql\_serial\_tab\_id\_seq"} +\endlastfoot +\raggedright{integer} +& +\raggedright{1} +& +\raggedright{1} +& +\raggedright{2147483647} +& +\raggedright{1} +& +\raggedright{no} +& +\raggedright{1} \tabularnewline +\end{longtable} +\pset tuples_only true +\df exp +\begin{longtable}{l | l | l | l | l} +\raggedright{pg\_catalog} +& +\raggedright{exp} +& +\raggedright{double precision} +& +\raggedright{double precision} +& +\raggedright{func} \tabularnewline +\raggedright{pg\_catalog} +& +\raggedright{exp} +& +\raggedright{numeric} +& +\raggedright{numeric} +& +\raggedright{func} \tabularnewline +\end{longtable} +\pset tuples_only false +\pset expanded on +\d psql_serial_tab_id_seq +\begin{center} +Sequence "public.psql\_serial\_tab\_id\_seq" +\end{center} + +\begin{tabular}{c|l} +\multicolumn{2}{c}{\textit{Record 1}} \\ +\hline +Type & integer \\ +Start & 1 \\ +Minimum & 1 \\ +Maximum & 2147483647 \\ +Increment & 1 \\ +Cycles? & no \\ +Cache & 1 \\ +\end{tabular} + +\noindent Owned by: public.psql\_serial\_tab.id \\ + +\pset tuples_only true +\df exp +\begin{tabular}{c|l} +\hline +Schema & pg\_catalog \\ +Name & exp \\ +Result data type & double precision \\ +Argument data types & double precision \\ +Type & func \\ +\hline +Schema & pg\_catalog \\ +Name & exp \\ +Result data type & numeric \\ +Argument data types & numeric \\ +Type & func \\ +\end{tabular} + +\noindent +\pset tuples_only false +prepare q as + select 'some\more_text' as "a$title", E' #%&^~|\n{bar}' as "junk", + ' ' as "empty", n as int + from generate_series(1,2) as n; +\pset expanded off +\pset border 0 +execute q; +\begin{longtable}{lllr} +\small\textbf{\textit{a\$title}} & \small\textbf{\textit{junk}} & \small\textbf{\textit{empty}} & \small\textbf{\textit{int}} \\ +\midrule +\endfirsthead +\small\textbf{\textit{a\$title}} & \small\textbf{\textit{junk}} & \small\textbf{\textit{empty}} & \small\textbf{\textit{int}} \\ +\midrule +\endhead +\raggedright{some\textbackslash{}more\_text} +& +\raggedright{ \#\textless{}foo\textgreater{}\%\&\^{}\~{}\textbar{}\\\{bar\}} +& +\raggedright{ } +& +\raggedright{1} \tabularnewline +\raggedright{some\textbackslash{}more\_text} +& +\raggedright{ \#\textless{}foo\textgreater{}\%\&\^{}\~{}\textbar{}\\\{bar\}} +& +\raggedright{ } +& +\raggedright{2} \tabularnewline +\end{longtable} +\pset border 1 +execute q; +\begin{longtable}{l | l | l | r} +\small\textbf{\textit{a\$title}} & \small\textbf{\textit{junk}} & \small\textbf{\textit{empty}} & \small\textbf{\textit{int}} \\ +\midrule +\endfirsthead +\small\textbf{\textit{a\$title}} & \small\textbf{\textit{junk}} & \small\textbf{\textit{empty}} & \small\textbf{\textit{int}} \\ +\midrule +\endhead +\raggedright{some\textbackslash{}more\_text} +& +\raggedright{ \#\textless{}foo\textgreater{}\%\&\^{}\~{}\textbar{}\\\{bar\}} +& +\raggedright{ } +& +\raggedright{1} \tabularnewline +\raggedright{some\textbackslash{}more\_text} +& +\raggedright{ \#\textless{}foo\textgreater{}\%\&\^{}\~{}\textbar{}\\\{bar\}} +& +\raggedright{ } +& +\raggedright{2} \tabularnewline +\end{longtable} +\pset border 2 +execute q; +\begin{longtable}{| l | l | l | r |} +\toprule +\small\textbf{\textit{a\$title}} & \small\textbf{\textit{junk}} & \small\textbf{\textit{empty}} & \small\textbf{\textit{int}} \\ +\midrule +\endfirsthead +\toprule +\small\textbf{\textit{a\$title}} & \small\textbf{\textit{junk}} & \small\textbf{\textit{empty}} & \small\textbf{\textit{int}} \\ +\midrule +\endhead +\bottomrule +\endfoot +\bottomrule +\endlastfoot +\raggedright{some\textbackslash{}more\_text} +& +\raggedright{ \#\textless{}foo\textgreater{}\%\&\^{}\~{}\textbar{}\\\{bar\}} +& +\raggedright{ } +& +\raggedright{1} \tabularnewline +\raggedright{some\textbackslash{}more\_text} +& +\raggedright{ \#\textless{}foo\textgreater{}\%\&\^{}\~{}\textbar{}\\\{bar\}} +& +\raggedright{ } +& +\raggedright{2} \tabularnewline +\end{longtable} +\pset border 3 +execute q; +\begin{longtable}{| l | l | l | r |} +\toprule +\small\textbf{\textit{a\$title}} & \small\textbf{\textit{junk}} & \small\textbf{\textit{empty}} & \small\textbf{\textit{int}} \\ +\midrule +\endfirsthead +\toprule +\small\textbf{\textit{a\$title}} & \small\textbf{\textit{junk}} & \small\textbf{\textit{empty}} & \small\textbf{\textit{int}} \\ +\endhead +\bottomrule +\endfoot +\bottomrule +\endlastfoot +\raggedright{some\textbackslash{}more\_text} +& +\raggedright{ \#\textless{}foo\textgreater{}\%\&\^{}\~{}\textbar{}\\\{bar\}} +& +\raggedright{ } +& +\raggedright{1} \tabularnewline + \hline +\raggedright{some\textbackslash{}more\_text} +& +\raggedright{ \#\textless{}foo\textgreater{}\%\&\^{}\~{}\textbar{}\\\{bar\}} +& +\raggedright{ } +& +\raggedright{2} \tabularnewline + \hline +\end{longtable} +\pset tableattr lr +execute q; +\begin{longtable}{| p{lr\textwidth} | p{lr\textwidth} | p{lr\textwidth} | r |} +\toprule +\small\textbf{\textit{a\$title}} & \small\textbf{\textit{junk}} & \small\textbf{\textit{empty}} & \small\textbf{\textit{int}} \\ +\midrule +\endfirsthead +\toprule +\small\textbf{\textit{a\$title}} & \small\textbf{\textit{junk}} & \small\textbf{\textit{empty}} & \small\textbf{\textit{int}} \\ +\endhead +\bottomrule +\endfoot +\bottomrule +\endlastfoot +\raggedright{some\textbackslash{}more\_text} +& +\raggedright{ \#\textless{}foo\textgreater{}\%\&\^{}\~{}\textbar{}\\\{bar\}} +& +\raggedright{ } +& +\raggedright{1} \tabularnewline + \hline +\raggedright{some\textbackslash{}more\_text} +& +\raggedright{ \#\textless{}foo\textgreater{}\%\&\^{}\~{}\textbar{}\\\{bar\}} +& +\raggedright{ } +& +\raggedright{2} \tabularnewline + \hline +\end{longtable} +\pset tableattr +\pset expanded on +\pset border 0 +execute q; +\begin{tabular}{cl} +\multicolumn{2}{c}{\textit{Record 1}} \\ +a\$title & some\textbackslash{}more\_text \\ +junk & \#\textless{}foo\textgreater{}\%\&\^{}\~{}\textbar{}\\\{bar\} \\ +empty & \\ +int & 1 \\ +\multicolumn{2}{c}{\textit{Record 2}} \\ +a\$title & some\textbackslash{}more\_text \\ +junk & \#\textless{}foo\textgreater{}\%\&\^{}\~{}\textbar{}\\\{bar\} \\ +empty & \\ +int & 2 \\ +\end{tabular} + +\noindent +\pset border 1 +execute q; +\begin{tabular}{c|l} +\multicolumn{2}{c}{\textit{Record 1}} \\ +\hline +a\$title & some\textbackslash{}more\_text \\ +junk & \#\textless{}foo\textgreater{}\%\&\^{}\~{}\textbar{}\\\{bar\} \\ +empty & \\ +int & 1 \\ +\multicolumn{2}{c}{\textit{Record 2}} \\ +\hline +a\$title & some\textbackslash{}more\_text \\ +junk & \#\textless{}foo\textgreater{}\%\&\^{}\~{}\textbar{}\\\{bar\} \\ +empty & \\ +int & 2 \\ +\end{tabular} + +\noindent +\pset border 2 +execute q; +\begin{tabular}{|c|l|} +\hline +\multicolumn{2}{|c|}{\textit{Record 1}} \\ +\hline +a\$title & some\textbackslash{}more\_text \\ +junk & \#\textless{}foo\textgreater{}\%\&\^{}\~{}\textbar{}\\\{bar\} \\ +empty & \\ +int & 1 \\ +\hline +\multicolumn{2}{|c|}{\textit{Record 2}} \\ +\hline +a\$title & some\textbackslash{}more\_text \\ +junk & \#\textless{}foo\textgreater{}\%\&\^{}\~{}\textbar{}\\\{bar\} \\ +empty & \\ +int & 2 \\ +\hline +\end{tabular} + +\noindent +\pset border 3 +execute q; +\begin{tabular}{|c|l|} +\hline +\multicolumn{2}{|c|}{\textit{Record 1}} \\ +\hline +a\$title & some\textbackslash{}more\_text \\ +junk & \#\textless{}foo\textgreater{}\%\&\^{}\~{}\textbar{}\\\{bar\} \\ +empty & \\ +int & 1 \\ +\hline +\multicolumn{2}{|c|}{\textit{Record 2}} \\ +\hline +a\$title & some\textbackslash{}more\_text \\ +junk & \#\textless{}foo\textgreater{}\%\&\^{}\~{}\textbar{}\\\{bar\} \\ +empty & \\ +int & 2 \\ +\hline +\end{tabular} + +\noindent +\pset tableattr lr +execute q; +\begin{tabular}{|c|l|} +\hline +\multicolumn{2}{|c|}{\textit{Record 1}} \\ +\hline +a\$title & some\textbackslash{}more\_text \\ +junk & \#\textless{}foo\textgreater{}\%\&\^{}\~{}\textbar{}\\\{bar\} \\ +empty & \\ +int & 1 \\ +\hline +\multicolumn{2}{|c|}{\textit{Record 2}} \\ +\hline +a\$title & some\textbackslash{}more\_text \\ +junk & \#\textless{}foo\textgreater{}\%\&\^{}\~{}\textbar{}\\\{bar\} \\ +empty & \\ +int & 2 \\ +\hline +\end{tabular} + +\noindent +\pset tableattr +deallocate q; +-- test troff-ms output format +\pset format troff-ms +\pset border 1 +\pset expanded off +\d psql_serial_tab_id_seq +.LP +.DS C +Sequence "public.psql_serial_tab_id_seq" +.DE +.LP +.TS +center; +l | r | r | r | r | l | r. +\fIType\fP \fIStart\fP \fIMinimum\fP \fIMaximum\fP \fIIncrement\fP \fICycles?\fP \fICache\fP +_ +integer 1 1 2147483647 1 no 1 +.TE +.DS L +Owned by: public.psql_serial_tab.id +.DE +\pset tuples_only true +\df exp +.LP +.TS +center; +l | l | l | l | l. +pg_catalog exp double precision double precision func +pg_catalog exp numeric numeric func +.TE +.DS L +.DE +\pset tuples_only false +\pset expanded on +\d psql_serial_tab_id_seq +.LP +.DS C +Sequence "public.psql_serial_tab_id_seq" +.DE +.LP +.TS +center; +c s. +\fIRecord 1\fP +_ +.T& +c | l. +Type integer +Start 1 +Minimum 1 +Maximum 2147483647 +Increment 1 +Cycles? no +Cache 1 +.TE +.DS L +Owned by: public.psql_serial_tab.id +.DE +\pset tuples_only true +\df exp +.LP +.TS +center; +c l; +_ +Schema pg_catalog +Name exp +Result data type double precision +Argument data types double precision +Type func +_ +Schema pg_catalog +Name exp +Result data type numeric +Argument data types numeric +Type func +.TE +.DS L +.DE +\pset tuples_only false +prepare q as + select 'some\text' as "a\title", E' \n' as "junk", + ' ' as "empty", n as int + from generate_series(1,2) as n; +\pset expanded off +\pset border 0 +execute q; +.LP +.TS +center; +lllr. +\fIa\(rstitle\fP \fIjunk\fP \fIempty\fP \fIint\fP +_ +some\(rstext + 1 +some\(rstext + 2 +.TE +.DS L +(2 rows) +.DE +\pset border 1 +execute q; +.LP +.TS +center; +l | l | l | r. +\fIa\(rstitle\fP \fIjunk\fP \fIempty\fP \fIint\fP +_ +some\(rstext + 1 +some\(rstext + 2 +.TE +.DS L +(2 rows) +.DE +\pset border 2 +execute q; +.LP +.TS +center box; +l | l | l | r. +\fIa\(rstitle\fP \fIjunk\fP \fIempty\fP \fIint\fP +_ +some\(rstext + 1 +some\(rstext + 2 +.TE +.DS L +(2 rows) +.DE +\pset expanded on +\pset border 0 +execute q; +.LP +.TS +center; +c s. +\fIRecord 1\fP +.T& +c l. +a\(rstitle some\(rstext +junk + +empty +int 1 +.T& +c s. +\fIRecord 2\fP +.T& +c l. +a\(rstitle some\(rstext +junk + +empty +int 2 +.TE +.DS L +.DE +\pset border 1 +execute q; +.LP +.TS +center; +c s. +\fIRecord 1\fP +_ +.T& +c | l. +a\(rstitle some\(rstext +junk + +empty +int 1 +.T& +c s. +\fIRecord 2\fP +_ +.T& +c | l. +a\(rstitle some\(rstext +junk + +empty +int 2 +.TE +.DS L +.DE +\pset border 2 +execute q; +.LP +.TS +center box; +c s. +\fIRecord 1\fP +_ +.T& +c l. +a\(rstitle some\(rstext +junk + +empty +int 1 +_ +.T& +c s. +\fIRecord 2\fP +_ +.T& +c l. +a\(rstitle some\(rstext +junk + +empty +int 2 +.TE +.DS L +.DE +deallocate q; +-- check ambiguous format requests +\pset format a +\pset: ambiguous abbreviation "a" matches both "aligned" and "asciidoc" +\pset format l +-- clean up after output format tests +drop table psql_serial_tab; +\pset format aligned +\pset expanded off +\pset border 1 +-- \echo and allied features +\echo this is a test +this is a test +\echo -n without newline +without newline\echo with -n newline +with -n newline +\echo '-n' with newline +-n with newline +\set foo bar +\echo foo = :foo +foo = bar +\qecho this is a test +this is a test +\qecho foo = :foo +foo = bar +\warn this is a test +this is a test +\warn foo = :foo +foo = bar +-- tests for \if ... \endif +\if true + select 'okay'; + ?column? +---------- + okay +(1 row) + + select 'still okay'; + ?column? +------------ + still okay +(1 row) + +\else + not okay; + still not okay +\endif +-- at this point query buffer should still have last valid line +\g + ?column? +------------ + still okay +(1 row) + +-- \if should work okay on part of a query +select + \if true + 42 + \else + (bogus + \endif + forty_two; + forty_two +----------- + 42 +(1 row) + +select \if false \\ (bogus \else \\ 42 \endif \\ forty_two; + forty_two +----------- + 42 +(1 row) + +-- test a large nested if using a variety of true-equivalents +\if true + \if 1 + \if yes + \if on + \echo 'all true' +all true + \else + \echo 'should not print #1-1' + \endif + \else + \echo 'should not print #1-2' + \endif + \else + \echo 'should not print #1-3' + \endif +\else + \echo 'should not print #1-4' +\endif +-- test a variety of false-equivalents in an if/elif/else structure +\if false + \echo 'should not print #2-1' +\elif 0 + \echo 'should not print #2-2' +\elif no + \echo 'should not print #2-3' +\elif off + \echo 'should not print #2-4' +\else + \echo 'all false' +all false +\endif +-- test true-false elif after initial true branch +\if true + \echo 'should print #2-5' +should print #2-5 +\elif true + \echo 'should not print #2-6' +\elif false + \echo 'should not print #2-7' +\else + \echo 'should not print #2-8' +\endif +-- test simple true-then-else +\if true + \echo 'first thing true' +first thing true +\else + \echo 'should not print #3-1' +\endif +-- test simple false-true-else +\if false + \echo 'should not print #4-1' +\elif true + \echo 'second thing true' +second thing true +\else + \echo 'should not print #5-1' +\endif +-- invalid boolean expressions are false +\if invalid boolean expression +unrecognized value "invalid boolean expression" for "\if expression": Boolean expected + \echo 'will not print #6-1' +\else + \echo 'will print anyway #6-2' +will print anyway #6-2 +\endif +-- test un-matched endif +\endif +\endif: no matching \if +-- test un-matched else +\else +\else: no matching \if +-- test un-matched elif +\elif +\elif: no matching \if +-- test double-else error +\if true +\else +\else +\else: cannot occur after \else +\endif +-- test elif out-of-order +\if false +\else +\elif +\elif: cannot occur after \else +\endif +-- test if-endif matching in a false branch +\if false + \if false + \echo 'should not print #7-1' + \else + \echo 'should not print #7-2' + \endif + \echo 'should not print #7-3' +\else + \echo 'should print #7-4' +should print #7-4 +\endif +-- show that vars and backticks are not expanded when ignoring extra args +\set foo bar +\echo :foo :'foo' :"foo" +bar 'bar' "bar" +\pset fieldsep | `nosuchcommand` :foo :'foo' :"foo" +\pset: extra argument "nosuchcommand" ignored +\pset: extra argument ":foo" ignored +\pset: extra argument ":'foo'" ignored +\pset: extra argument ":"foo"" ignored +-- show that vars and backticks are not expanded and commands are ignored +-- when in a false if-branch +\set try_to_quit '\\q' +\if false + :try_to_quit + \echo `nosuchcommand` :foo :'foo' :"foo" + \pset fieldsep | `nosuchcommand` :foo :'foo' :"foo" + \a + \C arg1 + \c arg1 arg2 arg3 arg4 + \cd arg1 + \conninfo + \copy arg1 arg2 arg3 arg4 arg5 arg6 + \copyright + SELECT 1 as one, 2, 3 \crosstabview + \dt arg1 + \e arg1 arg2 + \ef whole_line + \ev whole_line + \echo arg1 arg2 arg3 arg4 arg5 + \echo arg1 + \encoding arg1 + \errverbose + \f arg1 + \g arg1 + \gx arg1 + \gexec + SELECT 1 AS one \gset + \h + \? + \html + \i arg1 + \ir arg1 + \l arg1 + \lo arg1 arg2 +invalid command \lo + \lo_list + \o arg1 + \p + \password arg1 + \prompt arg1 arg2 + \pset arg1 arg2 + \q + \reset + \s arg1 + \set arg1 arg2 arg3 arg4 arg5 arg6 arg7 + \setenv arg1 arg2 + \sf whole_line + \sv whole_line + \t arg1 + \T arg1 + \timing arg1 + \unset arg1 + \w arg1 + \watch arg1 arg2 + \x arg1 + -- \else here is eaten as part of OT_FILEPIPE argument + \w |/no/such/file \else + -- \endif here is eaten as part of whole-line argument + \! whole_line \endif + \z +\else + \echo 'should print #8-1' +should print #8-1 +\endif +-- :{?...} defined variable test +\set i 1 +\if :{?i} + \echo '#9-1 ok, variable i is defined' +#9-1 ok, variable i is defined +\else + \echo 'should not print #9-2' +\endif +\if :{?no_such_variable} + \echo 'should not print #10-1' +\else + \echo '#10-2 ok, variable no_such_variable is not defined' +#10-2 ok, variable no_such_variable is not defined +\endif +SELECT :{?i} AS i_is_defined; + i_is_defined +-------------- + t +(1 row) + +SELECT NOT :{?no_such_var} AS no_such_var_is_not_defined; + no_such_var_is_not_defined +---------------------------- + t +(1 row) + +-- SHOW_CONTEXT +\set SHOW_CONTEXT never +do $$ +begin + raise notice 'foo'; + raise exception 'bar'; +end $$; +NOTICE: foo +ERROR: bar +\set SHOW_CONTEXT errors +do $$ +begin + raise notice 'foo'; + raise exception 'bar'; +end $$; +NOTICE: foo +ERROR: bar +CONTEXT: PL/pgSQL function inline_code_block line 4 at RAISE +\set SHOW_CONTEXT always +do $$ +begin + raise notice 'foo'; + raise exception 'bar'; +end $$; +NOTICE: foo +CONTEXT: PL/pgSQL function inline_code_block line 3 at RAISE +ERROR: bar +CONTEXT: PL/pgSQL function inline_code_block line 4 at RAISE +-- test printing and clearing the query buffer +SELECT 1; + ?column? +---------- + 1 +(1 row) + +\p +SELECT 1; +SELECT 2 \r +\p +SELECT 1; +SELECT 3 \p +SELECT 3 +UNION SELECT 4 \p +SELECT 3 +UNION SELECT 4 +UNION SELECT 5 +ORDER BY 1; + ?column? +---------- + 3 + 4 + 5 +(3 rows) + +\r +\p +SELECT 3 +UNION SELECT 4 +UNION SELECT 5 +ORDER BY 1; +-- tests for special result variables +-- working query, 2 rows selected +SELECT 1 AS stuff UNION SELECT 2; + stuff +------- + 1 + 2 +(2 rows) + +\echo 'error:' :ERROR +error: false +\echo 'error code:' :SQLSTATE +error code: 00000 +\echo 'number of rows:' :ROW_COUNT +number of rows: 2 +-- syntax error +SELECT 1 UNION; +ERROR: syntax error at or near ";" +LINE 1: SELECT 1 UNION; + ^ +\echo 'error:' :ERROR +error: true +\echo 'error code:' :SQLSTATE +error code: 42601 +\echo 'number of rows:' :ROW_COUNT +number of rows: 0 +\echo 'last error message:' :LAST_ERROR_MESSAGE +last error message: syntax error at or near ";" +\echo 'last error code:' :LAST_ERROR_SQLSTATE +last error code: 42601 +-- empty query +; +\echo 'error:' :ERROR +error: false +\echo 'error code:' :SQLSTATE +error code: 00000 +\echo 'number of rows:' :ROW_COUNT +number of rows: 0 +-- must have kept previous values +\echo 'last error message:' :LAST_ERROR_MESSAGE +last error message: syntax error at or near ";" +\echo 'last error code:' :LAST_ERROR_SQLSTATE +last error code: 42601 +-- other query error +DROP TABLE this_table_does_not_exist; +ERROR: table "this_table_does_not_exist" does not exist +\echo 'error:' :ERROR +error: true +\echo 'error code:' :SQLSTATE +error code: 42P01 +\echo 'number of rows:' :ROW_COUNT +number of rows: 0 +\echo 'last error message:' :LAST_ERROR_MESSAGE +last error message: table "this_table_does_not_exist" does not exist +\echo 'last error code:' :LAST_ERROR_SQLSTATE +last error code: 42P01 +-- nondefault verbosity error settings (except verbose, which is too unstable) +\set VERBOSITY terse +SELECT 1 UNION; +ERROR: syntax error at or near ";" at character 15 +\echo 'error:' :ERROR +error: true +\echo 'error code:' :SQLSTATE +error code: 42601 +\echo 'last error message:' :LAST_ERROR_MESSAGE +last error message: syntax error at or near ";" +\set VERBOSITY sqlstate +SELECT 1/0; +ERROR: 22012 +\echo 'error:' :ERROR +error: true +\echo 'error code:' :SQLSTATE +error code: 22012 +\echo 'last error message:' :LAST_ERROR_MESSAGE +last error message: division by zero +\set VERBOSITY default +-- working \gdesc +SELECT 3 AS three, 4 AS four \gdesc + Column | Type +--------+--------- + three | integer + four | integer +(2 rows) + +\echo 'error:' :ERROR +error: false +\echo 'error code:' :SQLSTATE +error code: 00000 +\echo 'number of rows:' :ROW_COUNT +number of rows: 2 +-- \gdesc with an error +SELECT 4 AS \gdesc +ERROR: syntax error at end of input +LINE 1: SELECT 4 AS + ^ +\echo 'error:' :ERROR +error: true +\echo 'error code:' :SQLSTATE +error code: 42601 +\echo 'number of rows:' :ROW_COUNT +number of rows: 0 +\echo 'last error message:' :LAST_ERROR_MESSAGE +last error message: syntax error at end of input +\echo 'last error code:' :LAST_ERROR_SQLSTATE +last error code: 42601 +-- check row count for a cursor-fetched query +\set FETCH_COUNT 10 +select unique2 from tenk1 order by unique2 limit 19; + unique2 +--------- + 0 + 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 + 10 + 11 + 12 + 13 + 14 + 15 + 16 + 17 + 18 +(19 rows) + +\echo 'error:' :ERROR +error: false +\echo 'error code:' :SQLSTATE +error code: 00000 +\echo 'number of rows:' :ROW_COUNT +number of rows: 19 +-- cursor-fetched query with an error after the first group +select 1/(15-unique2) from tenk1 order by unique2 limit 19; + ?column? +---------- + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 +ERROR: division by zero +\echo 'error:' :ERROR +error: true +\echo 'error code:' :SQLSTATE +error code: 22012 +\echo 'number of rows:' :ROW_COUNT +number of rows: 0 +\echo 'last error message:' :LAST_ERROR_MESSAGE +last error message: division by zero +\echo 'last error code:' :LAST_ERROR_SQLSTATE +last error code: 22012 +\unset FETCH_COUNT +create schema testpart; +create role regress_partitioning_role; +alter schema testpart owner to regress_partitioning_role; +set role to regress_partitioning_role; +-- run test inside own schema and hide other partitions +set search_path to testpart; +create table testtable_apple(logdate date); +create table testtable_orange(logdate date); +create index testtable_apple_index on testtable_apple(logdate); +create index testtable_orange_index on testtable_orange(logdate); +create table testpart_apple(logdate date) partition by range(logdate); +create table testpart_orange(logdate date) partition by range(logdate); +create index testpart_apple_index on testpart_apple(logdate); +create index testpart_orange_index on testpart_orange(logdate); +-- only partition related object should be displayed +\dP test*apple* + List of partitioned relations + Schema | Name | Owner | Type | Parent name | Table +----------+----------------------+---------------------------+-------------------+-------------+---------------- + testpart | testpart_apple | regress_partitioning_role | partitioned table | | + testpart | testpart_apple_index | regress_partitioning_role | partitioned index | | testpart_apple +(2 rows) + +\dPt test*apple* + List of partitioned tables + Schema | Name | Owner | Parent name +----------+----------------+---------------------------+------------- + testpart | testpart_apple | regress_partitioning_role | +(1 row) + +\dPi test*apple* + List of partitioned indexes + Schema | Name | Owner | Parent name | Table +----------+----------------------+---------------------------+-------------+---------------- + testpart | testpart_apple_index | regress_partitioning_role | | testpart_apple +(1 row) + +drop table testtable_apple; +drop table testtable_orange; +drop table testpart_apple; +drop table testpart_orange; +create table parent_tab (id int) partition by range (id); +create index parent_index on parent_tab (id); +create table child_0_10 partition of parent_tab + for values from (0) to (10); +create table child_10_20 partition of parent_tab + for values from (10) to (20); +create table child_20_30 partition of parent_tab + for values from (20) to (30); +insert into parent_tab values (generate_series(0,29)); +create table child_30_40 partition of parent_tab +for values from (30) to (40) + partition by range(id); +create table child_30_35 partition of child_30_40 + for values from (30) to (35); +create table child_35_40 partition of child_30_40 + for values from (35) to (40); +insert into parent_tab values (generate_series(30,39)); +\dPt + List of partitioned tables + Schema | Name | Owner +----------+------------+--------------------------- + testpart | parent_tab | regress_partitioning_role +(1 row) + +\dPi + List of partitioned indexes + Schema | Name | Owner | Table +----------+--------------+---------------------------+------------ + testpart | parent_index | regress_partitioning_role | parent_tab +(1 row) + +\dP testpart.* + List of partitioned relations + Schema | Name | Owner | Type | Parent name | Table +----------+--------------------+---------------------------+-------------------+--------------+------------- + testpart | parent_tab | regress_partitioning_role | partitioned table | | + testpart | child_30_40 | regress_partitioning_role | partitioned table | parent_tab | + testpart | parent_index | regress_partitioning_role | partitioned index | | parent_tab + testpart | child_30_40_id_idx | regress_partitioning_role | partitioned index | parent_index | child_30_40 +(4 rows) + +\dP + List of partitioned relations + Schema | Name | Owner | Type | Table +----------+--------------+---------------------------+-------------------+------------ + testpart | parent_tab | regress_partitioning_role | partitioned table | + testpart | parent_index | regress_partitioning_role | partitioned index | parent_tab +(2 rows) + +\dPtn + List of partitioned tables + Schema | Name | Owner | Parent name +----------+-------------+---------------------------+------------- + testpart | parent_tab | regress_partitioning_role | + testpart | child_30_40 | regress_partitioning_role | parent_tab +(2 rows) + +\dPin + List of partitioned indexes + Schema | Name | Owner | Parent name | Table +----------+--------------------+---------------------------+--------------+------------- + testpart | parent_index | regress_partitioning_role | | parent_tab + testpart | child_30_40_id_idx | regress_partitioning_role | parent_index | child_30_40 +(2 rows) + +\dPn + List of partitioned relations + Schema | Name | Owner | Type | Parent name | Table +----------+--------------------+---------------------------+-------------------+--------------+------------- + testpart | parent_tab | regress_partitioning_role | partitioned table | | + testpart | child_30_40 | regress_partitioning_role | partitioned table | parent_tab | + testpart | parent_index | regress_partitioning_role | partitioned index | | parent_tab + testpart | child_30_40_id_idx | regress_partitioning_role | partitioned index | parent_index | child_30_40 +(4 rows) + +\dPn testpart.* + List of partitioned relations + Schema | Name | Owner | Type | Parent name | Table +----------+--------------------+---------------------------+-------------------+--------------+------------- + testpart | parent_tab | regress_partitioning_role | partitioned table | | + testpart | child_30_40 | regress_partitioning_role | partitioned table | parent_tab | + testpart | parent_index | regress_partitioning_role | partitioned index | | parent_tab + testpart | child_30_40_id_idx | regress_partitioning_role | partitioned index | parent_index | child_30_40 +(4 rows) + +drop table parent_tab cascade; +drop schema testpart; +set search_path to default; +set role to default; +drop role regress_partitioning_role; +-- \d on toast table (use pg_statistic's toast table, which has a known name) +\d pg_toast.pg_toast_2619 +TOAST table "pg_toast.pg_toast_2619" + Column | Type +------------+--------- + chunk_id | oid + chunk_seq | integer + chunk_data | bytea +Owning table: "pg_catalog.pg_statistic" +Indexes: + "pg_toast_2619_index" PRIMARY KEY, btree (chunk_id, chunk_seq) + +-- check printing info about access methods +\dA +List of access methods + Name | Type +--------+------- + brin | Index + btree | Index + gin | Index + gist | Index + hash | Index + heap | Table + heap2 | Table + spgist | Index +(8 rows) + +\dA * +List of access methods + Name | Type +--------+------- + brin | Index + btree | Index + gin | Index + gist | Index + hash | Index + heap | Table + heap2 | Table + spgist | Index +(8 rows) + +\dA h* +List of access methods + Name | Type +-------+------- + hash | Index + heap | Table + heap2 | Table +(3 rows) + +\dA foo +List of access methods + Name | Type +------+------ +(0 rows) + +\dA foo bar +List of access methods + Name | Type +------+------ +(0 rows) + +\dA: extra argument "bar" ignored +\dA+ + List of access methods + Name | Type | Handler | Description +--------+-------+----------------------+---------------------------------------- + brin | Index | brinhandler | block range index (BRIN) access method + btree | Index | bthandler | b-tree index access method + gin | Index | ginhandler | GIN index access method + gist | Index | gisthandler | GiST index access method + hash | Index | hashhandler | hash index access method + heap | Table | heap_tableam_handler | heap table access method + heap2 | Table | heap_tableam_handler | + spgist | Index | spghandler | SP-GiST index access method +(8 rows) + +\dA+ * + List of access methods + Name | Type | Handler | Description +--------+-------+----------------------+---------------------------------------- + brin | Index | brinhandler | block range index (BRIN) access method + btree | Index | bthandler | b-tree index access method + gin | Index | ginhandler | GIN index access method + gist | Index | gisthandler | GiST index access method + hash | Index | hashhandler | hash index access method + heap | Table | heap_tableam_handler | heap table access method + heap2 | Table | heap_tableam_handler | + spgist | Index | spghandler | SP-GiST index access method +(8 rows) + +\dA+ h* + List of access methods + Name | Type | Handler | Description +-------+-------+----------------------+-------------------------- + hash | Index | hashhandler | hash index access method + heap | Table | heap_tableam_handler | heap table access method + heap2 | Table | heap_tableam_handler | +(3 rows) + +\dA+ foo + List of access methods + Name | Type | Handler | Description +------+------+---------+------------- +(0 rows) + +\dAc brin pg*.oid* + List of operator classes + AM | Input type | Storage type | Operator class | Default? +------+------------+--------------+----------------------+---------- + brin | oid | | oid_bloom_ops | no + brin | oid | | oid_minmax_multi_ops | no + brin | oid | | oid_minmax_ops | yes +(3 rows) + +\dAf spgist + List of operator families + AM | Operator family | Applicable types +--------+-----------------+------------------ + spgist | box_ops | box + spgist | kd_point_ops | point + spgist | network_ops | inet + spgist | poly_ops | polygon + spgist | quad_point_ops | point + spgist | range_ops | anyrange + spgist | text_ops | text +(7 rows) + +\dAf btree int4 + List of operator families + AM | Operator family | Applicable types +-------+-----------------+--------------------------- + btree | integer_ops | smallint, integer, bigint +(1 row) + +\dAo+ btree float_ops + List of operators of operator families + AM | Operator family | Operator | Strategy | Purpose | Sort opfamily +-------+-----------------+---------------------------------------+----------+---------+--------------- + btree | float_ops | <(double precision,double precision) | 1 | search | + btree | float_ops | <=(double precision,double precision) | 2 | search | + btree | float_ops | =(double precision,double precision) | 3 | search | + btree | float_ops | >=(double precision,double precision) | 4 | search | + btree | float_ops | >(double precision,double precision) | 5 | search | + btree | float_ops | <(real,real) | 1 | search | + btree | float_ops | <=(real,real) | 2 | search | + btree | float_ops | =(real,real) | 3 | search | + btree | float_ops | >=(real,real) | 4 | search | + btree | float_ops | >(real,real) | 5 | search | + btree | float_ops | <(double precision,real) | 1 | search | + btree | float_ops | <=(double precision,real) | 2 | search | + btree | float_ops | =(double precision,real) | 3 | search | + btree | float_ops | >=(double precision,real) | 4 | search | + btree | float_ops | >(double precision,real) | 5 | search | + btree | float_ops | <(real,double precision) | 1 | search | + btree | float_ops | <=(real,double precision) | 2 | search | + btree | float_ops | =(real,double precision) | 3 | search | + btree | float_ops | >=(real,double precision) | 4 | search | + btree | float_ops | >(real,double precision) | 5 | search | +(20 rows) + +\dAo * pg_catalog.jsonb_path_ops + List of operators of operator families + AM | Operator family | Operator | Strategy | Purpose +-----+-----------------+--------------------+----------+--------- + gin | jsonb_path_ops | @>(jsonb,jsonb) | 7 | search + gin | jsonb_path_ops | @?(jsonb,jsonpath) | 15 | search + gin | jsonb_path_ops | @@(jsonb,jsonpath) | 16 | search +(3 rows) + +\dAp+ btree float_ops + List of support functions of operator families + AM | Operator family | Registered left type | Registered right type | Number | Function +-------+-----------------+----------------------+-----------------------+--------+------------------------------------------------------------------------------ + btree | float_ops | double precision | double precision | 1 | btfloat8cmp(double precision,double precision) + btree | float_ops | double precision | double precision | 2 | btfloat8sortsupport(internal) + btree | float_ops | double precision | double precision | 3 | in_range(double precision,double precision,double precision,boolean,boolean) + btree | float_ops | real | real | 1 | btfloat4cmp(real,real) + btree | float_ops | real | real | 2 | btfloat4sortsupport(internal) + btree | float_ops | double precision | real | 1 | btfloat84cmp(double precision,real) + btree | float_ops | real | double precision | 1 | btfloat48cmp(real,double precision) + btree | float_ops | real | double precision | 3 | in_range(real,real,double precision,boolean,boolean) +(8 rows) + +\dAp * pg_catalog.uuid_ops + List of support functions of operator families + AM | Operator family | Registered left type | Registered right type | Number | Function +-------+-----------------+----------------------+-----------------------+--------+-------------------- + btree | uuid_ops | uuid | uuid | 1 | uuid_cmp + btree | uuid_ops | uuid | uuid | 2 | uuid_sortsupport + btree | uuid_ops | uuid | uuid | 4 | btequalimage + hash | uuid_ops | uuid | uuid | 1 | uuid_hash + hash | uuid_ops | uuid | uuid | 2 | uuid_hash_extended +(5 rows) + +-- check \dconfig +set work_mem = 10240; +\dconfig work_mem +List of configuration parameters + Parameter | Value +-----------+------- + work_mem | 10MB +(1 row) + +\dconfig+ work* + List of configuration parameters + Parameter | Value | Type | Context | Access privileges +-----------+-------+---------+---------+------------------- + work_mem | 10MB | integer | user | +(1 row) + +reset work_mem; +-- check \df, \do with argument specifications +\df *sqrt + List of functions + Schema | Name | Result data type | Argument data types | Type +------------+--------------+------------------+---------------------+------ + pg_catalog | dsqrt | double precision | double precision | func + pg_catalog | numeric_sqrt | numeric | numeric | func + pg_catalog | sqrt | double precision | double precision | func + pg_catalog | sqrt | numeric | numeric | func +(4 rows) + +\df *sqrt num* + List of functions + Schema | Name | Result data type | Argument data types | Type +------------+--------------+------------------+---------------------+------ + pg_catalog | numeric_sqrt | numeric | numeric | func + pg_catalog | sqrt | numeric | numeric | func +(2 rows) + +\df int*pl + List of functions + Schema | Name | Result data type | Argument data types | Type +------------+-------------+------------------+---------------------+------ + pg_catalog | int24pl | integer | smallint, integer | func + pg_catalog | int28pl | bigint | smallint, bigint | func + pg_catalog | int2pl | smallint | smallint, smallint | func + pg_catalog | int42pl | integer | integer, smallint | func + pg_catalog | int48pl | bigint | integer, bigint | func + pg_catalog | int4pl | integer | integer, integer | func + pg_catalog | int82pl | bigint | bigint, smallint | func + pg_catalog | int84pl | bigint | bigint, integer | func + pg_catalog | int8pl | bigint | bigint, bigint | func + pg_catalog | interval_pl | interval | interval, interval | func +(10 rows) + +\df int*pl int4 + List of functions + Schema | Name | Result data type | Argument data types | Type +------------+---------+------------------+---------------------+------ + pg_catalog | int42pl | integer | integer, smallint | func + pg_catalog | int48pl | bigint | integer, bigint | func + pg_catalog | int4pl | integer | integer, integer | func +(3 rows) + +\df int*pl * pg_catalog.int8 + List of functions + Schema | Name | Result data type | Argument data types | Type +------------+---------+------------------+---------------------+------ + pg_catalog | int28pl | bigint | smallint, bigint | func + pg_catalog | int48pl | bigint | integer, bigint | func + pg_catalog | int8pl | bigint | bigint, bigint | func +(3 rows) + +\df acl* aclitem[] + List of functions + Schema | Name | Result data type | Argument data types | Type +------------+-------------+------------------+----------------------------------------------------------------------------------------------------+------ + pg_catalog | aclcontains | boolean | aclitem[], aclitem | func + pg_catalog | aclexplode | SETOF record | acl aclitem[], OUT grantor oid, OUT grantee oid, OUT privilege_type text, OUT is_grantable boolean | func + pg_catalog | aclinsert | aclitem[] | aclitem[], aclitem | func + pg_catalog | aclremove | aclitem[] | aclitem[], aclitem | func +(4 rows) + +\df has_database_privilege oid text + List of functions + Schema | Name | Result data type | Argument data types | Type +------------+------------------------+------------------+---------------------+------ + pg_catalog | has_database_privilege | boolean | oid, text | func + pg_catalog | has_database_privilege | boolean | oid, text, text | func +(2 rows) + +\df has_database_privilege oid text - + List of functions + Schema | Name | Result data type | Argument data types | Type +------------+------------------------+------------------+---------------------+------ + pg_catalog | has_database_privilege | boolean | oid, text | func +(1 row) + +\dfa bit* small* + List of functions + Schema | Name | Result data type | Argument data types | Type +------------+---------+------------------+---------------------+------ + pg_catalog | bit_and | smallint | smallint | agg + pg_catalog | bit_or | smallint | smallint | agg + pg_catalog | bit_xor | smallint | smallint | agg +(3 rows) + +\df *._pg_expandarray + List of functions + Schema | Name | Result data type | Argument data types | Type +--------------------+-----------------+------------------+-------------------------------------------+------ + information_schema | _pg_expandarray | SETOF record | anyarray, OUT x anyelement, OUT n integer | func +(1 row) + +\do - pg_catalog.int4 + List of operators + Schema | Name | Left arg type | Right arg type | Result type | Description +------------+------+---------------+----------------+-------------+------------- + pg_catalog | - | | integer | integer | negate +(1 row) + +\do && anyarray * + List of operators + Schema | Name | Left arg type | Right arg type | Result type | Description +------------+------+---------------+----------------+-------------+------------- + pg_catalog | && | anyarray | anyarray | boolean | overlaps +(1 row) + +-- check \df+ +-- we have to use functions with a predictable owner name, so make a role +create role regress_psql_user superuser; +begin; +set session authorization regress_psql_user; +create function psql_df_internal (float8) + returns float8 + language internal immutable parallel safe strict + as 'dsin'; +create function psql_df_sql (x integer) + returns integer + security definer + begin atomic select x + 1; end; +create function psql_df_plpgsql () + returns void + language plpgsql + as $$ begin return; end; $$; +comment on function psql_df_plpgsql () is 'some comment'; +\df+ psql_df_* + List of functions + Schema | Name | Result data type | Argument data types | Type | Volatility | Parallel | Owner | Security | Access privileges | Language | Internal name | Description +--------+------------------+------------------+---------------------+------+------------+----------+-------------------+----------+-------------------+----------+---------------+-------------- + public | psql_df_internal | double precision | double precision | func | immutable | safe | regress_psql_user | invoker | | internal | dsin | + public | psql_df_plpgsql | void | | func | volatile | unsafe | regress_psql_user | invoker | | plpgsql | | some comment + public | psql_df_sql | integer | x integer | func | volatile | unsafe | regress_psql_user | definer | | sql | | +(3 rows) + +rollback; +drop role regress_psql_user; +-- check \sf +\sf information_schema._pg_expandarray +CREATE OR REPLACE FUNCTION information_schema._pg_expandarray(anyarray, OUT x anyelement, OUT n integer) + RETURNS SETOF record + LANGUAGE sql + IMMUTABLE PARALLEL SAFE STRICT +AS $function$select $1[s], + s operator(pg_catalog.-) pg_catalog.array_lower($1,1) operator(pg_catalog.+) 1 + from pg_catalog.generate_series(pg_catalog.array_lower($1,1), + pg_catalog.array_upper($1,1), + 1) as g(s)$function$ +\sf+ information_schema._pg_expandarray + CREATE OR REPLACE FUNCTION information_schema._pg_expandarray(anyarray, OUT x anyelement, OUT n integer) + RETURNS SETOF record + LANGUAGE sql + IMMUTABLE PARALLEL SAFE STRICT +1 AS $function$select $1[s], +2 s operator(pg_catalog.-) pg_catalog.array_lower($1,1) operator(pg_catalog.+) 1 +3 from pg_catalog.generate_series(pg_catalog.array_lower($1,1), +4 pg_catalog.array_upper($1,1), +5 1) as g(s)$function$ +\sf+ interval_pl_time + CREATE OR REPLACE FUNCTION pg_catalog.interval_pl_time(interval, time without time zone) + RETURNS time without time zone + LANGUAGE sql + IMMUTABLE PARALLEL SAFE STRICT COST 1 +1 RETURN ($2 + $1) +\sf ts_debug(text) +CREATE OR REPLACE FUNCTION pg_catalog.ts_debug(document text, OUT alias text, OUT description text, OUT token text, OUT dictionaries regdictionary[], OUT dictionary regdictionary, OUT lexemes text[]) + RETURNS SETOF record + LANGUAGE sql + STABLE PARALLEL SAFE STRICT +BEGIN ATOMIC + SELECT ts_debug.alias, + ts_debug.description, + ts_debug.token, + ts_debug.dictionaries, + ts_debug.dictionary, + ts_debug.lexemes + FROM ts_debug(get_current_ts_config(), ts_debug.document) ts_debug(alias, description, token, dictionaries, dictionary, lexemes); +END +\sf+ ts_debug(text) + CREATE OR REPLACE FUNCTION pg_catalog.ts_debug(document text, OUT alias text, OUT description text, OUT token text, OUT dictionaries regdictionary[], OUT dictionary regdictionary, OUT lexemes text[]) + RETURNS SETOF record + LANGUAGE sql + STABLE PARALLEL SAFE STRICT +1 BEGIN ATOMIC +2 SELECT ts_debug.alias, +3 ts_debug.description, +4 ts_debug.token, +5 ts_debug.dictionaries, +6 ts_debug.dictionary, +7 ts_debug.lexemes +8 FROM ts_debug(get_current_ts_config(), ts_debug.document) ts_debug(alias, description, token, dictionaries, dictionary, lexemes); +9 END +-- AUTOCOMMIT +CREATE TABLE ac_test (a int); +\set AUTOCOMMIT off +INSERT INTO ac_test VALUES (1); +COMMIT; +SELECT * FROM ac_test; + a +--- + 1 +(1 row) + +COMMIT; +INSERT INTO ac_test VALUES (2); +ROLLBACK; +SELECT * FROM ac_test; + a +--- + 1 +(1 row) + +COMMIT; +BEGIN; +INSERT INTO ac_test VALUES (3); +COMMIT; +SELECT * FROM ac_test; + a +--- + 1 + 3 +(2 rows) + +COMMIT; +BEGIN; +INSERT INTO ac_test VALUES (4); +ROLLBACK; +SELECT * FROM ac_test; + a +--- + 1 + 3 +(2 rows) + +COMMIT; +\set AUTOCOMMIT on +DROP TABLE ac_test; +SELECT * FROM ac_test; -- should be gone now +ERROR: relation "ac_test" does not exist +LINE 1: SELECT * FROM ac_test; + ^ +-- ON_ERROR_ROLLBACK +\set ON_ERROR_ROLLBACK on +CREATE TABLE oer_test (a int); +BEGIN; +INSERT INTO oer_test VALUES (1); +INSERT INTO oer_test VALUES ('foo'); +ERROR: invalid input syntax for type integer: "foo" +LINE 1: INSERT INTO oer_test VALUES ('foo'); + ^ +INSERT INTO oer_test VALUES (3); +COMMIT; +SELECT * FROM oer_test; + a +--- + 1 + 3 +(2 rows) + +BEGIN; +INSERT INTO oer_test VALUES (4); +ROLLBACK; +SELECT * FROM oer_test; + a +--- + 1 + 3 +(2 rows) + +BEGIN; +INSERT INTO oer_test VALUES (5); +COMMIT AND CHAIN; +INSERT INTO oer_test VALUES (6); +COMMIT; +SELECT * FROM oer_test; + a +--- + 1 + 3 + 5 + 6 +(4 rows) + +DROP TABLE oer_test; +\set ON_ERROR_ROLLBACK off +-- ECHO errors +\set ECHO errors +ERROR: relation "notexists" does not exist +LINE 1: SELECT * FROM notexists; + ^ +STATEMENT: SELECT * FROM notexists; +-- +-- combined queries +-- +CREATE FUNCTION warn(msg TEXT) RETURNS BOOLEAN LANGUAGE plpgsql +AS $$ + BEGIN RAISE NOTICE 'warn %', msg ; RETURN TRUE ; END +$$; +-- show both +SELECT 1 AS one \; SELECT warn('1.5') \; SELECT 2 AS two ; +NOTICE: warn 1.5 +CONTEXT: PL/pgSQL function warn(text) line 2 at RAISE + one +----- + 1 +(1 row) + + warn +------ + t +(1 row) + + two +----- + 2 +(1 row) + +-- \gset applies to last query only +SELECT 3 AS three \; SELECT warn('3.5') \; SELECT 4 AS four \gset +NOTICE: warn 3.5 +CONTEXT: PL/pgSQL function warn(text) line 2 at RAISE + three +------- + 3 +(1 row) + + warn +------ + t +(1 row) + +\echo :three :four +:three 4 +-- syntax error stops all processing +SELECT 5 \; SELECT 6 + \; SELECT warn('6.5') \; SELECT 7 ; +ERROR: syntax error at or near ";" +LINE 1: SELECT 5 ; SELECT 6 + ; SELECT warn('6.5') ; SELECT 7 ; + ^ +-- with aborted transaction, stop on first error +BEGIN \; SELECT 8 AS eight \; SELECT 9/0 AS nine \; ROLLBACK \; SELECT 10 AS ten ; + eight +------- + 8 +(1 row) + +ERROR: division by zero +-- close previously aborted transaction +ROLLBACK; +-- miscellaneous SQL commands +-- (non SELECT output is sent to stderr, thus is not shown in expected results) +SELECT 'ok' AS "begin" \; +CREATE TABLE psql_comics(s TEXT) \; +INSERT INTO psql_comics VALUES ('Calvin'), ('hobbes') \; +COPY psql_comics FROM STDIN \; +UPDATE psql_comics SET s = 'Hobbes' WHERE s = 'hobbes' \; +DELETE FROM psql_comics WHERE s = 'Moe' \; +COPY psql_comics TO STDOUT \; +TRUNCATE psql_comics \; +DROP TABLE psql_comics \; +SELECT 'ok' AS "done" ; + begin +------- + ok +(1 row) + +Calvin +Susie +Hobbes + done +------ + ok +(1 row) + +\set SHOW_ALL_RESULTS off +SELECT 1 AS one \; SELECT warn('1.5') \; SELECT 2 AS two ; +NOTICE: warn 1.5 +CONTEXT: PL/pgSQL function warn(text) line 2 at RAISE + two +----- + 2 +(1 row) + +\set SHOW_ALL_RESULTS on +DROP FUNCTION warn(TEXT); +-- +-- \g with file +-- +\getenv abs_builddir PG_ABS_BUILDDIR +\set g_out_file :abs_builddir '/results/psql-output1' +CREATE TEMPORARY TABLE reload_output( + lineno int NOT NULL GENERATED ALWAYS AS IDENTITY, + line text +); +SELECT 1 AS a \g :g_out_file +COPY reload_output(line) FROM :'g_out_file'; +SELECT 2 AS b\; SELECT 3 AS c\; SELECT 4 AS d \g :g_out_file +COPY reload_output(line) FROM :'g_out_file'; +COPY (SELECT 'foo') TO STDOUT \; COPY (SELECT 'bar') TO STDOUT \g :g_out_file +COPY reload_output(line) FROM :'g_out_file'; +SELECT line FROM reload_output ORDER BY lineno; + line +--------- + a + --- + 1 + (1 row) + + b + --- + 2 + (1 row) + + c + --- + 3 + (1 row) + + d + --- + 4 + (1 row) + + foo + bar +(22 rows) + +TRUNCATE TABLE reload_output; +-- +-- \o with file +-- +\set o_out_file :abs_builddir '/results/psql-output2' +\o :o_out_file +SELECT max(unique1) FROM onek; +SELECT 1 AS a\; SELECT 2 AS b\; SELECT 3 AS c; +-- COPY TO file +-- The data goes to :g_out_file and the status to :o_out_file +\set QUIET false +COPY (SELECT unique1 FROM onek ORDER BY unique1 LIMIT 10) TO :'g_out_file'; +-- DML command status +UPDATE onek SET unique1 = unique1 WHERE false; +\set QUIET true +\o +-- Check the contents of the files generated. +COPY reload_output(line) FROM :'g_out_file'; +SELECT line FROM reload_output ORDER BY lineno; + line +------ + 0 + 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 +(10 rows) + +TRUNCATE TABLE reload_output; +COPY reload_output(line) FROM :'o_out_file'; +SELECT line FROM reload_output ORDER BY lineno; + line +---------- + max + ----- + 999 + (1 row) + + a + --- + 1 + (1 row) + + b + --- + 2 + (1 row) + + c + --- + 3 + (1 row) + + COPY 10 + UPDATE 0 +(22 rows) + +TRUNCATE TABLE reload_output; +-- Multiple COPY TO STDOUT with output file +\o :o_out_file +-- The data goes to :o_out_file with no status generated. +COPY (SELECT 'foo1') TO STDOUT \; COPY (SELECT 'bar1') TO STDOUT; +-- Combination of \o and \g file with multiple COPY queries. +COPY (SELECT 'foo2') TO STDOUT \; COPY (SELECT 'bar2') TO STDOUT \g :g_out_file +\o +-- Check the contents of the files generated. +COPY reload_output(line) FROM :'g_out_file'; +SELECT line FROM reload_output ORDER BY lineno; + line +------ + foo2 + bar2 +(2 rows) + +TRUNCATE TABLE reload_output; +COPY reload_output(line) FROM :'o_out_file'; +SELECT line FROM reload_output ORDER BY lineno; + line +------ + foo1 + bar1 +(2 rows) + +DROP TABLE reload_output; +-- +-- AUTOCOMMIT and combined queries +-- +\set AUTOCOMMIT off +\echo '# AUTOCOMMIT:' :AUTOCOMMIT +# AUTOCOMMIT: off +-- BEGIN is now implicit +CREATE TABLE foo(s TEXT) \; +ROLLBACK; +CREATE TABLE foo(s TEXT) \; +INSERT INTO foo(s) VALUES ('hello'), ('world') \; +COMMIT; +DROP TABLE foo \; +ROLLBACK; +-- table foo is still there +SELECT * FROM foo ORDER BY 1 \; +DROP TABLE foo \; +COMMIT; + s +------- + hello + world +(2 rows) + +\set AUTOCOMMIT on +\echo '# AUTOCOMMIT:' :AUTOCOMMIT +# AUTOCOMMIT: on +-- BEGIN now explicit for multi-statement transactions +BEGIN \; +CREATE TABLE foo(s TEXT) \; +INSERT INTO foo(s) VALUES ('hello'), ('world') \; +COMMIT; +BEGIN \; +DROP TABLE foo \; +ROLLBACK \; +-- implicit transactions +SELECT * FROM foo ORDER BY 1 \; +DROP TABLE foo; + s +------- + hello + world +(2 rows) + +-- +-- test ON_ERROR_ROLLBACK and combined queries +-- +CREATE FUNCTION psql_error(msg TEXT) RETURNS BOOLEAN AS $$ + BEGIN + RAISE EXCEPTION 'error %', msg; + END; +$$ LANGUAGE plpgsql; +\set ON_ERROR_ROLLBACK on +\echo '# ON_ERROR_ROLLBACK:' :ON_ERROR_ROLLBACK +# ON_ERROR_ROLLBACK: on +\echo '# AUTOCOMMIT:' :AUTOCOMMIT +# AUTOCOMMIT: on +BEGIN; +CREATE TABLE bla(s NO_SUCH_TYPE); -- fails +ERROR: type "no_such_type" does not exist +LINE 1: CREATE TABLE bla(s NO_SUCH_TYPE); + ^ +CREATE TABLE bla(s TEXT); -- succeeds +SELECT psql_error('oops!'); -- fails +ERROR: error oops! +CONTEXT: PL/pgSQL function psql_error(text) line 3 at RAISE +INSERT INTO bla VALUES ('Calvin'), ('Hobbes'); +COMMIT; +SELECT * FROM bla ORDER BY 1; + s +-------- + Calvin + Hobbes +(2 rows) + +BEGIN; +INSERT INTO bla VALUES ('Susie'); -- succeeds +-- now with combined queries +INSERT INTO bla VALUES ('Rosalyn') \; -- will rollback +SELECT 'before error' AS show \; -- will show nevertheless! + SELECT psql_error('boum!') \; -- failure + SELECT 'after error' AS noshow; -- hidden by preceding error + show +-------------- + before error +(1 row) + +ERROR: error boum! +CONTEXT: PL/pgSQL function psql_error(text) line 3 at RAISE +INSERT INTO bla(s) VALUES ('Moe') \; -- will rollback + SELECT psql_error('bam!'); +ERROR: error bam! +CONTEXT: PL/pgSQL function psql_error(text) line 3 at RAISE +INSERT INTO bla VALUES ('Miss Wormwood'); -- succeeds +COMMIT; +SELECT * FROM bla ORDER BY 1; + s +--------------- + Calvin + Hobbes + Miss Wormwood + Susie +(4 rows) + +-- some with autocommit off +\set AUTOCOMMIT off +\echo '# AUTOCOMMIT:' :AUTOCOMMIT +# AUTOCOMMIT: off +-- implicit BEGIN +INSERT INTO bla VALUES ('Dad'); -- succeeds +SELECT psql_error('bad!'); -- implicit partial rollback +ERROR: error bad! +CONTEXT: PL/pgSQL function psql_error(text) line 3 at RAISE +INSERT INTO bla VALUES ('Mum') \; -- will rollback +SELECT COUNT(*) AS "#mum" +FROM bla WHERE s = 'Mum' \; -- but be counted here +SELECT psql_error('bad!'); -- implicit partial rollback + #mum +------ + 1 +(1 row) + +ERROR: error bad! +CONTEXT: PL/pgSQL function psql_error(text) line 3 at RAISE +COMMIT; +SELECT COUNT(*) AS "#mum" +FROM bla WHERE s = 'Mum' \; -- no mum here +SELECT * FROM bla ORDER BY 1; + #mum +------ + 0 +(1 row) + + s +--------------- + Calvin + Dad + Hobbes + Miss Wormwood + Susie +(5 rows) + +-- reset all +\set AUTOCOMMIT on +\set ON_ERROR_ROLLBACK off +\echo '# final ON_ERROR_ROLLBACK:' :ON_ERROR_ROLLBACK +# final ON_ERROR_ROLLBACK: off +DROP TABLE bla; +DROP FUNCTION psql_error; +-- check describing invalid multipart names +\dA regression.heap +improper qualified name (too many dotted names): regression.heap +\dA nonesuch.heap +improper qualified name (too many dotted names): nonesuch.heap +\dt host.regression.pg_catalog.pg_class +improper qualified name (too many dotted names): host.regression.pg_catalog.pg_class +\dt |.pg_catalog.pg_class +cross-database references are not implemented: |.pg_catalog.pg_class +\dt nonesuch.pg_catalog.pg_class +cross-database references are not implemented: nonesuch.pg_catalog.pg_class +\da host.regression.pg_catalog.sum +improper qualified name (too many dotted names): host.regression.pg_catalog.sum +\da +.pg_catalog.sum +cross-database references are not implemented: +.pg_catalog.sum +\da nonesuch.pg_catalog.sum +cross-database references are not implemented: nonesuch.pg_catalog.sum +\dAc nonesuch.brin +improper qualified name (too many dotted names): nonesuch.brin +\dAc regression.brin +improper qualified name (too many dotted names): regression.brin +\dAf nonesuch.brin +improper qualified name (too many dotted names): nonesuch.brin +\dAf regression.brin +improper qualified name (too many dotted names): regression.brin +\dAo nonesuch.brin +improper qualified name (too many dotted names): nonesuch.brin +\dAo regression.brin +improper qualified name (too many dotted names): regression.brin +\dAp nonesuch.brin +improper qualified name (too many dotted names): nonesuch.brin +\dAp regression.brin +improper qualified name (too many dotted names): regression.brin +\db nonesuch.pg_default +improper qualified name (too many dotted names): nonesuch.pg_default +\db regression.pg_default +improper qualified name (too many dotted names): regression.pg_default +\dc host.regression.public.conversion +improper qualified name (too many dotted names): host.regression.public.conversion +\dc (.public.conversion +cross-database references are not implemented: (.public.conversion +\dc nonesuch.public.conversion +cross-database references are not implemented: nonesuch.public.conversion +\dC host.regression.pg_catalog.int8 +improper qualified name (too many dotted names): host.regression.pg_catalog.int8 +\dC ).pg_catalog.int8 +cross-database references are not implemented: ).pg_catalog.int8 +\dC nonesuch.pg_catalog.int8 +cross-database references are not implemented: nonesuch.pg_catalog.int8 +\dd host.regression.pg_catalog.pg_class +improper qualified name (too many dotted names): host.regression.pg_catalog.pg_class +\dd [.pg_catalog.pg_class +cross-database references are not implemented: [.pg_catalog.pg_class +\dd nonesuch.pg_catalog.pg_class +cross-database references are not implemented: nonesuch.pg_catalog.pg_class +\dD host.regression.public.gtestdomain1 +improper qualified name (too many dotted names): host.regression.public.gtestdomain1 +\dD ].public.gtestdomain1 +cross-database references are not implemented: ].public.gtestdomain1 +\dD nonesuch.public.gtestdomain1 +cross-database references are not implemented: nonesuch.public.gtestdomain1 +\ddp host.regression.pg_catalog.pg_class +improper qualified name (too many dotted names): host.regression.pg_catalog.pg_class +\ddp {.pg_catalog.pg_class +cross-database references are not implemented: {.pg_catalog.pg_class +\ddp nonesuch.pg_catalog.pg_class +cross-database references are not implemented: nonesuch.pg_catalog.pg_class +\dE host.regression.public.ft +improper qualified name (too many dotted names): host.regression.public.ft +\dE }.public.ft +cross-database references are not implemented: }.public.ft +\dE nonesuch.public.ft +cross-database references are not implemented: nonesuch.public.ft +\di host.regression.public.tenk1_hundred +improper qualified name (too many dotted names): host.regression.public.tenk1_hundred +\di ..public.tenk1_hundred +improper qualified name (too many dotted names): ..public.tenk1_hundred +\di nonesuch.public.tenk1_hundred +cross-database references are not implemented: nonesuch.public.tenk1_hundred +\dm host.regression.public.mvtest_bb +improper qualified name (too many dotted names): host.regression.public.mvtest_bb +\dm ^.public.mvtest_bb +cross-database references are not implemented: ^.public.mvtest_bb +\dm nonesuch.public.mvtest_bb +cross-database references are not implemented: nonesuch.public.mvtest_bb +\ds host.regression.public.check_seq +improper qualified name (too many dotted names): host.regression.public.check_seq +\ds regression|mydb.public.check_seq +cross-database references are not implemented: regression|mydb.public.check_seq +\ds nonesuch.public.check_seq +cross-database references are not implemented: nonesuch.public.check_seq +\dt host.regression.public.b_star +improper qualified name (too many dotted names): host.regression.public.b_star +\dt regres+ion.public.b_star +cross-database references are not implemented: regres+ion.public.b_star +\dt nonesuch.public.b_star +cross-database references are not implemented: nonesuch.public.b_star +\dv host.regression.public.shoe +improper qualified name (too many dotted names): host.regression.public.shoe +\dv regress(ion).public.shoe +cross-database references are not implemented: regress(ion).public.shoe +\dv nonesuch.public.shoe +cross-database references are not implemented: nonesuch.public.shoe +\des nonesuch.server +improper qualified name (too many dotted names): nonesuch.server +\des regression.server +improper qualified name (too many dotted names): regression.server +\des nonesuch.server +improper qualified name (too many dotted names): nonesuch.server +\des regression.server +improper qualified name (too many dotted names): regression.server +\des nonesuch.username +improper qualified name (too many dotted names): nonesuch.username +\des regression.username +improper qualified name (too many dotted names): regression.username +\dew nonesuch.fdw +improper qualified name (too many dotted names): nonesuch.fdw +\dew regression.fdw +improper qualified name (too many dotted names): regression.fdw +\df host.regression.public.namelen +improper qualified name (too many dotted names): host.regression.public.namelen +\df regres[qrstuv]ion.public.namelen +cross-database references are not implemented: regres[qrstuv]ion.public.namelen +\df nonesuch.public.namelen +cross-database references are not implemented: nonesuch.public.namelen +\dF host.regression.pg_catalog.arabic +improper qualified name (too many dotted names): host.regression.pg_catalog.arabic +\dF regres{1,2}ion.pg_catalog.arabic +cross-database references are not implemented: regres{1,2}ion.pg_catalog.arabic +\dF nonesuch.pg_catalog.arabic +cross-database references are not implemented: nonesuch.pg_catalog.arabic +\dFd host.regression.pg_catalog.arabic_stem +improper qualified name (too many dotted names): host.regression.pg_catalog.arabic_stem +\dFd regres?ion.pg_catalog.arabic_stem +cross-database references are not implemented: regres?ion.pg_catalog.arabic_stem +\dFd nonesuch.pg_catalog.arabic_stem +cross-database references are not implemented: nonesuch.pg_catalog.arabic_stem +\dFp host.regression.pg_catalog.default +improper qualified name (too many dotted names): host.regression.pg_catalog.default +\dFp ^regression.pg_catalog.default +cross-database references are not implemented: ^regression.pg_catalog.default +\dFp nonesuch.pg_catalog.default +cross-database references are not implemented: nonesuch.pg_catalog.default +\dFt host.regression.pg_catalog.ispell +improper qualified name (too many dotted names): host.regression.pg_catalog.ispell +\dFt regression$.pg_catalog.ispell +cross-database references are not implemented: regression$.pg_catalog.ispell +\dFt nonesuch.pg_catalog.ispell +cross-database references are not implemented: nonesuch.pg_catalog.ispell +\dg nonesuch.pg_database_owner +improper qualified name (too many dotted names): nonesuch.pg_database_owner +\dg regression.pg_database_owner +improper qualified name (too many dotted names): regression.pg_database_owner +\dL host.regression.plpgsql +improper qualified name (too many dotted names): host.regression.plpgsql +\dL *.plpgsql +cross-database references are not implemented: *.plpgsql +\dL nonesuch.plpgsql +cross-database references are not implemented: nonesuch.plpgsql +\dn host.regression.public +improper qualified name (too many dotted names): host.regression.public +\dn """".public +cross-database references are not implemented: """".public +\dn nonesuch.public +cross-database references are not implemented: nonesuch.public +\do host.regression.public.!=- +improper qualified name (too many dotted names): host.regression.public.!=- +\do "regression|mydb".public.!=- +cross-database references are not implemented: "regression|mydb".public.!=- +\do nonesuch.public.!=- +cross-database references are not implemented: nonesuch.public.!=- +\dO host.regression.pg_catalog.POSIX +improper qualified name (too many dotted names): host.regression.pg_catalog.POSIX +\dO .pg_catalog.POSIX +cross-database references are not implemented: .pg_catalog.POSIX +\dO nonesuch.pg_catalog.POSIX +cross-database references are not implemented: nonesuch.pg_catalog.POSIX +\dp host.regression.public.a_star +improper qualified name (too many dotted names): host.regression.public.a_star +\dp "regres+ion".public.a_star +cross-database references are not implemented: "regres+ion".public.a_star +\dp nonesuch.public.a_star +cross-database references are not implemented: nonesuch.public.a_star +\dP host.regression.public.mlparted +improper qualified name (too many dotted names): host.regression.public.mlparted +\dP "regres(sion)".public.mlparted +cross-database references are not implemented: "regres(sion)".public.mlparted +\dP nonesuch.public.mlparted +cross-database references are not implemented: nonesuch.public.mlparted +\drds nonesuch.lc_messages +improper qualified name (too many dotted names): nonesuch.lc_messages +\drds regression.lc_messages +improper qualified name (too many dotted names): regression.lc_messages +\dRp public.mypub +improper qualified name (too many dotted names): public.mypub +\dRp regression.mypub +improper qualified name (too many dotted names): regression.mypub +\dRs public.mysub +improper qualified name (too many dotted names): public.mysub +\dRs regression.mysub +improper qualified name (too many dotted names): regression.mysub +\dT host.regression.public.widget +improper qualified name (too many dotted names): host.regression.public.widget +\dT "regression{1,2}".public.widget +cross-database references are not implemented: "regression{1,2}".public.widget +\dT nonesuch.public.widget +cross-database references are not implemented: nonesuch.public.widget +\dx regression.plpgsql +improper qualified name (too many dotted names): regression.plpgsql +\dx nonesuch.plpgsql +improper qualified name (too many dotted names): nonesuch.plpgsql +\dX host.regression.public.func_deps_stat +improper qualified name (too many dotted names): host.regression.public.func_deps_stat +\dX "^regression$".public.func_deps_stat +cross-database references are not implemented: "^regression$".public.func_deps_stat +\dX nonesuch.public.func_deps_stat +cross-database references are not implemented: nonesuch.public.func_deps_stat +\dy regression.myevt +improper qualified name (too many dotted names): regression.myevt +\dy nonesuch.myevt +improper qualified name (too many dotted names): nonesuch.myevt +-- check that dots within quoted name segments are not counted +\dA "no.such.access.method" +List of access methods + Name | Type +------+------ +(0 rows) + +\dt "no.such.table.relation" + List of relations + Schema | Name | Type | Owner +--------+------+------+------- +(0 rows) + +\da "no.such.aggregate.function" + List of aggregate functions + Schema | Name | Result data type | Argument data types | Description +--------+------+------------------+---------------------+------------- +(0 rows) + +\dAc "no.such.operator.class" + List of operator classes + AM | Input type | Storage type | Operator class | Default? +----+------------+--------------+----------------+---------- +(0 rows) + +\dAf "no.such.operator.family" + List of operator families + AM | Operator family | Applicable types +----+-----------------+------------------ +(0 rows) + +\dAo "no.such.operator.of.operator.family" + List of operators of operator families + AM | Operator family | Operator | Strategy | Purpose +----+-----------------+----------+----------+--------- +(0 rows) + +\dAp "no.such.operator.support.function.of.operator.family" + List of support functions of operator families + AM | Operator family | Registered left type | Registered right type | Number | Function +----+-----------------+----------------------+-----------------------+--------+---------- +(0 rows) + +\db "no.such.tablespace" + List of tablespaces + Name | Owner | Location +------+-------+---------- +(0 rows) + +\dc "no.such.conversion" + List of conversions + Schema | Name | Source | Destination | Default? +--------+------+--------+-------------+---------- +(0 rows) + +\dC "no.such.cast" + List of casts + Source type | Target type | Function | Implicit? +-------------+-------------+----------+----------- +(0 rows) + +\dd "no.such.object.description" + Object descriptions + Schema | Name | Object | Description +--------+------+--------+------------- +(0 rows) + +\dD "no.such.domain" + List of domains + Schema | Name | Type | Collation | Nullable | Default | Check +--------+------+------+-----------+----------+---------+------- +(0 rows) + +\ddp "no.such.default.access.privilege" + Default access privileges + Owner | Schema | Type | Access privileges +-------+--------+------+------------------- +(0 rows) + +\di "no.such.index.relation" + List of relations + Schema | Name | Type | Owner | Table +--------+------+------+-------+------- +(0 rows) + +\dm "no.such.materialized.view" + List of relations + Schema | Name | Type | Owner +--------+------+------+------- +(0 rows) + +\ds "no.such.relation" + List of relations + Schema | Name | Type | Owner +--------+------+------+------- +(0 rows) + +\dt "no.such.relation" + List of relations + Schema | Name | Type | Owner +--------+------+------+------- +(0 rows) + +\dv "no.such.relation" + List of relations + Schema | Name | Type | Owner +--------+------+------+------- +(0 rows) + +\des "no.such.foreign.server" + List of foreign servers + Name | Owner | Foreign-data wrapper +------+-------+---------------------- +(0 rows) + +\dew "no.such.foreign.data.wrapper" + List of foreign-data wrappers + Name | Owner | Handler | Validator +------+-------+---------+----------- +(0 rows) + +\df "no.such.function" + List of functions + Schema | Name | Result data type | Argument data types | Type +--------+------+------------------+---------------------+------ +(0 rows) + +\dF "no.such.text.search.configuration" +List of text search configurations + Schema | Name | Description +--------+------+------------- +(0 rows) + +\dFd "no.such.text.search.dictionary" +List of text search dictionaries + Schema | Name | Description +--------+------+------------- +(0 rows) + +\dFp "no.such.text.search.parser" + List of text search parsers + Schema | Name | Description +--------+------+------------- +(0 rows) + +\dFt "no.such.text.search.template" +List of text search templates + Schema | Name | Description +--------+------+------------- +(0 rows) + +\dg "no.such.role" + List of roles + Role name | Attributes +-----------+------------ + +\dL "no.such.language" + List of languages + Name | Owner | Trusted | Description +------+-------+---------+------------- +(0 rows) + +\dn "no.such.schema" +List of schemas + Name | Owner +------+------- +(0 rows) + +\do "no.such.operator" + List of operators + Schema | Name | Left arg type | Right arg type | Result type | Description +--------+------+---------------+----------------+-------------+------------- +(0 rows) + +\dO "no.such.collation" + List of collations + Schema | Name | Provider | Collate | Ctype | ICU Locale | ICU Rules | Deterministic? +--------+------+----------+---------+-------+------------+-----------+---------------- +(0 rows) + +\dp "no.such.access.privilege" + Access privileges + Schema | Name | Type | Access privileges | Column privileges | Policies +--------+------+------+-------------------+-------------------+---------- +(0 rows) + +\dP "no.such.partitioned.relation" + List of partitioned relations + Schema | Name | Owner | Type | Parent name | Table +--------+------+-------+------+-------------+------- +(0 rows) + +\drds "no.such.setting" + List of settings + Role | Database | Settings +------+----------+---------- +(0 rows) + +\dRp "no.such.publication" + List of publications + Name | Owner | All tables | Inserts | Updates | Deletes | Truncates | Via root +------+-------+------------+---------+---------+---------+-----------+---------- +(0 rows) + +\dRs "no.such.subscription" + List of subscriptions + Name | Owner | Enabled | Publication +------+-------+---------+------------- +(0 rows) + +\dT "no.such.data.type" + List of data types + Schema | Name | Description +--------+------+------------- +(0 rows) + +\dx "no.such.installed.extension" + List of installed extensions + Name | Version | Schema | Description +------+---------+--------+------------- +(0 rows) + +\dX "no.such.extended.statistics" + List of extended statistics + Schema | Name | Definition | Ndistinct | Dependencies | MCV +--------+------+------------+-----------+--------------+----- +(0 rows) + +\dy "no.such.event.trigger" + List of event triggers + Name | Event | Owner | Enabled | Function | Tags +------+-------+-------+---------+----------+------ +(0 rows) + +-- again, but with dotted schema qualifications. +\dA "no.such.schema"."no.such.access.method" +improper qualified name (too many dotted names): "no.such.schema"."no.such.access.method" +\dt "no.such.schema"."no.such.table.relation" + List of relations + Schema | Name | Type | Owner +--------+------+------+------- +(0 rows) + +\da "no.such.schema"."no.such.aggregate.function" + List of aggregate functions + Schema | Name | Result data type | Argument data types | Description +--------+------+------------------+---------------------+------------- +(0 rows) + +\dAc "no.such.schema"."no.such.operator.class" +improper qualified name (too many dotted names): "no.such.schema"."no.such.operator.class" +\dAf "no.such.schema"."no.such.operator.family" +improper qualified name (too many dotted names): "no.such.schema"."no.such.operator.family" +\dAo "no.such.schema"."no.such.operator.of.operator.family" +improper qualified name (too many dotted names): "no.such.schema"."no.such.operator.of.operator.family" +\dAp "no.such.schema"."no.such.operator.support.function.of.operator.family" +improper qualified name (too many dotted names): "no.such.schema"."no.such.operator.support.function.of.operator.family" +\db "no.such.schema"."no.such.tablespace" +improper qualified name (too many dotted names): "no.such.schema"."no.such.tablespace" +\dc "no.such.schema"."no.such.conversion" + List of conversions + Schema | Name | Source | Destination | Default? +--------+------+--------+-------------+---------- +(0 rows) + +\dC "no.such.schema"."no.such.cast" + List of casts + Source type | Target type | Function | Implicit? +-------------+-------------+----------+----------- +(0 rows) + +\dd "no.such.schema"."no.such.object.description" + Object descriptions + Schema | Name | Object | Description +--------+------+--------+------------- +(0 rows) + +\dD "no.such.schema"."no.such.domain" + List of domains + Schema | Name | Type | Collation | Nullable | Default | Check +--------+------+------+-----------+----------+---------+------- +(0 rows) + +\ddp "no.such.schema"."no.such.default.access.privilege" + Default access privileges + Owner | Schema | Type | Access privileges +-------+--------+------+------------------- +(0 rows) + +\di "no.such.schema"."no.such.index.relation" + List of relations + Schema | Name | Type | Owner | Table +--------+------+------+-------+------- +(0 rows) + +\dm "no.such.schema"."no.such.materialized.view" + List of relations + Schema | Name | Type | Owner +--------+------+------+------- +(0 rows) + +\ds "no.such.schema"."no.such.relation" + List of relations + Schema | Name | Type | Owner +--------+------+------+------- +(0 rows) + +\dt "no.such.schema"."no.such.relation" + List of relations + Schema | Name | Type | Owner +--------+------+------+------- +(0 rows) + +\dv "no.such.schema"."no.such.relation" + List of relations + Schema | Name | Type | Owner +--------+------+------+------- +(0 rows) + +\des "no.such.schema"."no.such.foreign.server" +improper qualified name (too many dotted names): "no.such.schema"."no.such.foreign.server" +\dew "no.such.schema"."no.such.foreign.data.wrapper" +improper qualified name (too many dotted names): "no.such.schema"."no.such.foreign.data.wrapper" +\df "no.such.schema"."no.such.function" + List of functions + Schema | Name | Result data type | Argument data types | Type +--------+------+------------------+---------------------+------ +(0 rows) + +\dF "no.such.schema"."no.such.text.search.configuration" +List of text search configurations + Schema | Name | Description +--------+------+------------- +(0 rows) + +\dFd "no.such.schema"."no.such.text.search.dictionary" +List of text search dictionaries + Schema | Name | Description +--------+------+------------- +(0 rows) + +\dFp "no.such.schema"."no.such.text.search.parser" + List of text search parsers + Schema | Name | Description +--------+------+------------- +(0 rows) + +\dFt "no.such.schema"."no.such.text.search.template" +List of text search templates + Schema | Name | Description +--------+------+------------- +(0 rows) + +\dg "no.such.schema"."no.such.role" +improper qualified name (too many dotted names): "no.such.schema"."no.such.role" +\dL "no.such.schema"."no.such.language" +cross-database references are not implemented: "no.such.schema"."no.such.language" +\do "no.such.schema"."no.such.operator" + List of operators + Schema | Name | Left arg type | Right arg type | Result type | Description +--------+------+---------------+----------------+-------------+------------- +(0 rows) + +\dO "no.such.schema"."no.such.collation" + List of collations + Schema | Name | Provider | Collate | Ctype | ICU Locale | ICU Rules | Deterministic? +--------+------+----------+---------+-------+------------+-----------+---------------- +(0 rows) + +\dp "no.such.schema"."no.such.access.privilege" + Access privileges + Schema | Name | Type | Access privileges | Column privileges | Policies +--------+------+------+-------------------+-------------------+---------- +(0 rows) + +\dP "no.such.schema"."no.such.partitioned.relation" + List of partitioned relations + Schema | Name | Owner | Type | Parent name | Table +--------+------+-------+------+-------------+------- +(0 rows) + +\drds "no.such.schema"."no.such.setting" +improper qualified name (too many dotted names): "no.such.schema"."no.such.setting" +\dRp "no.such.schema"."no.such.publication" +improper qualified name (too many dotted names): "no.such.schema"."no.such.publication" +\dRs "no.such.schema"."no.such.subscription" +improper qualified name (too many dotted names): "no.such.schema"."no.such.subscription" +\dT "no.such.schema"."no.such.data.type" + List of data types + Schema | Name | Description +--------+------+------------- +(0 rows) + +\dx "no.such.schema"."no.such.installed.extension" +improper qualified name (too many dotted names): "no.such.schema"."no.such.installed.extension" +\dX "no.such.schema"."no.such.extended.statistics" + List of extended statistics + Schema | Name | Definition | Ndistinct | Dependencies | MCV +--------+------+------------+-----------+--------------+----- +(0 rows) + +\dy "no.such.schema"."no.such.event.trigger" +improper qualified name (too many dotted names): "no.such.schema"."no.such.event.trigger" +-- again, but with current database and dotted schema qualifications. +\dt regression."no.such.schema"."no.such.table.relation" + List of relations + Schema | Name | Type | Owner +--------+------+------+------- +(0 rows) + +\da regression."no.such.schema"."no.such.aggregate.function" + List of aggregate functions + Schema | Name | Result data type | Argument data types | Description +--------+------+------------------+---------------------+------------- +(0 rows) + +\dc regression."no.such.schema"."no.such.conversion" + List of conversions + Schema | Name | Source | Destination | Default? +--------+------+--------+-------------+---------- +(0 rows) + +\dC regression."no.such.schema"."no.such.cast" + List of casts + Source type | Target type | Function | Implicit? +-------------+-------------+----------+----------- +(0 rows) + +\dd regression."no.such.schema"."no.such.object.description" + Object descriptions + Schema | Name | Object | Description +--------+------+--------+------------- +(0 rows) + +\dD regression."no.such.schema"."no.such.domain" + List of domains + Schema | Name | Type | Collation | Nullable | Default | Check +--------+------+------+-----------+----------+---------+------- +(0 rows) + +\di regression."no.such.schema"."no.such.index.relation" + List of relations + Schema | Name | Type | Owner | Table +--------+------+------+-------+------- +(0 rows) + +\dm regression."no.such.schema"."no.such.materialized.view" + List of relations + Schema | Name | Type | Owner +--------+------+------+------- +(0 rows) + +\ds regression."no.such.schema"."no.such.relation" + List of relations + Schema | Name | Type | Owner +--------+------+------+------- +(0 rows) + +\dt regression."no.such.schema"."no.such.relation" + List of relations + Schema | Name | Type | Owner +--------+------+------+------- +(0 rows) + +\dv regression."no.such.schema"."no.such.relation" + List of relations + Schema | Name | Type | Owner +--------+------+------+------- +(0 rows) + +\df regression."no.such.schema"."no.such.function" + List of functions + Schema | Name | Result data type | Argument data types | Type +--------+------+------------------+---------------------+------ +(0 rows) + +\dF regression."no.such.schema"."no.such.text.search.configuration" +List of text search configurations + Schema | Name | Description +--------+------+------------- +(0 rows) + +\dFd regression."no.such.schema"."no.such.text.search.dictionary" +List of text search dictionaries + Schema | Name | Description +--------+------+------------- +(0 rows) + +\dFp regression."no.such.schema"."no.such.text.search.parser" + List of text search parsers + Schema | Name | Description +--------+------+------------- +(0 rows) + +\dFt regression."no.such.schema"."no.such.text.search.template" +List of text search templates + Schema | Name | Description +--------+------+------------- +(0 rows) + +\do regression."no.such.schema"."no.such.operator" + List of operators + Schema | Name | Left arg type | Right arg type | Result type | Description +--------+------+---------------+----------------+-------------+------------- +(0 rows) + +\dO regression."no.such.schema"."no.such.collation" + List of collations + Schema | Name | Provider | Collate | Ctype | ICU Locale | ICU Rules | Deterministic? +--------+------+----------+---------+-------+------------+-----------+---------------- +(0 rows) + +\dp regression."no.such.schema"."no.such.access.privilege" + Access privileges + Schema | Name | Type | Access privileges | Column privileges | Policies +--------+------+------+-------------------+-------------------+---------- +(0 rows) + +\dP regression."no.such.schema"."no.such.partitioned.relation" + List of partitioned relations + Schema | Name | Owner | Type | Parent name | Table +--------+------+-------+------+-------------+------- +(0 rows) + +\dT regression."no.such.schema"."no.such.data.type" + List of data types + Schema | Name | Description +--------+------+------------- +(0 rows) + +\dX regression."no.such.schema"."no.such.extended.statistics" + List of extended statistics + Schema | Name | Definition | Ndistinct | Dependencies | MCV +--------+------+------------+-----------+--------------+----- +(0 rows) + +-- again, but with dotted database and dotted schema qualifications. +\dt "no.such.database"."no.such.schema"."no.such.table.relation" +cross-database references are not implemented: "no.such.database"."no.such.schema"."no.such.table.relation" +\da "no.such.database"."no.such.schema"."no.such.aggregate.function" +cross-database references are not implemented: "no.such.database"."no.such.schema"."no.such.aggregate.function" +\dc "no.such.database"."no.such.schema"."no.such.conversion" +cross-database references are not implemented: "no.such.database"."no.such.schema"."no.such.conversion" +\dC "no.such.database"."no.such.schema"."no.such.cast" +cross-database references are not implemented: "no.such.database"."no.such.schema"."no.such.cast" +\dd "no.such.database"."no.such.schema"."no.such.object.description" +cross-database references are not implemented: "no.such.database"."no.such.schema"."no.such.object.description" +\dD "no.such.database"."no.such.schema"."no.such.domain" +cross-database references are not implemented: "no.such.database"."no.such.schema"."no.such.domain" +\ddp "no.such.database"."no.such.schema"."no.such.default.access.privilege" +cross-database references are not implemented: "no.such.database"."no.such.schema"."no.such.default.access.privilege" +\di "no.such.database"."no.such.schema"."no.such.index.relation" +cross-database references are not implemented: "no.such.database"."no.such.schema"."no.such.index.relation" +\dm "no.such.database"."no.such.schema"."no.such.materialized.view" +cross-database references are not implemented: "no.such.database"."no.such.schema"."no.such.materialized.view" +\ds "no.such.database"."no.such.schema"."no.such.relation" +cross-database references are not implemented: "no.such.database"."no.such.schema"."no.such.relation" +\dt "no.such.database"."no.such.schema"."no.such.relation" +cross-database references are not implemented: "no.such.database"."no.such.schema"."no.such.relation" +\dv "no.such.database"."no.such.schema"."no.such.relation" +cross-database references are not implemented: "no.such.database"."no.such.schema"."no.such.relation" +\df "no.such.database"."no.such.schema"."no.such.function" +cross-database references are not implemented: "no.such.database"."no.such.schema"."no.such.function" +\dF "no.such.database"."no.such.schema"."no.such.text.search.configuration" +cross-database references are not implemented: "no.such.database"."no.such.schema"."no.such.text.search.configuration" +\dFd "no.such.database"."no.such.schema"."no.such.text.search.dictionary" +cross-database references are not implemented: "no.such.database"."no.such.schema"."no.such.text.search.dictionary" +\dFp "no.such.database"."no.such.schema"."no.such.text.search.parser" +cross-database references are not implemented: "no.such.database"."no.such.schema"."no.such.text.search.parser" +\dFt "no.such.database"."no.such.schema"."no.such.text.search.template" +cross-database references are not implemented: "no.such.database"."no.such.schema"."no.such.text.search.template" +\do "no.such.database"."no.such.schema"."no.such.operator" +cross-database references are not implemented: "no.such.database"."no.such.schema"."no.such.operator" +\dO "no.such.database"."no.such.schema"."no.such.collation" +cross-database references are not implemented: "no.such.database"."no.such.schema"."no.such.collation" +\dp "no.such.database"."no.such.schema"."no.such.access.privilege" +cross-database references are not implemented: "no.such.database"."no.such.schema"."no.such.access.privilege" +\dP "no.such.database"."no.such.schema"."no.such.partitioned.relation" +cross-database references are not implemented: "no.such.database"."no.such.schema"."no.such.partitioned.relation" +\dT "no.such.database"."no.such.schema"."no.such.data.type" +cross-database references are not implemented: "no.such.database"."no.such.schema"."no.such.data.type" +\dX "no.such.database"."no.such.schema"."no.such.extended.statistics" +cross-database references are not implemented: "no.such.database"."no.such.schema"."no.such.extended.statistics" +-- check \drg and \du +CREATE ROLE regress_du_role0; +CREATE ROLE regress_du_role1; +CREATE ROLE regress_du_role2; +CREATE ROLE regress_du_admin; +GRANT regress_du_role0 TO regress_du_admin WITH ADMIN TRUE; +GRANT regress_du_role1 TO regress_du_admin WITH ADMIN TRUE; +GRANT regress_du_role2 TO regress_du_admin WITH ADMIN TRUE; +GRANT regress_du_role0 TO regress_du_role1 WITH ADMIN TRUE, INHERIT TRUE, SET TRUE GRANTED BY regress_du_admin; +GRANT regress_du_role0 TO regress_du_role2 WITH ADMIN TRUE, INHERIT FALSE, SET FALSE GRANTED BY regress_du_admin; +GRANT regress_du_role1 TO regress_du_role2 WITH ADMIN TRUE , INHERIT FALSE, SET TRUE GRANTED BY regress_du_admin; +GRANT regress_du_role0 TO regress_du_role1 WITH ADMIN FALSE, INHERIT TRUE, SET FALSE GRANTED BY regress_du_role1; +GRANT regress_du_role0 TO regress_du_role2 WITH ADMIN FALSE, INHERIT TRUE , SET TRUE GRANTED BY regress_du_role1; +GRANT regress_du_role0 TO regress_du_role1 WITH ADMIN FALSE, INHERIT FALSE, SET TRUE GRANTED BY regress_du_role2; +GRANT regress_du_role0 TO regress_du_role2 WITH ADMIN FALSE, INHERIT FALSE, SET FALSE GRANTED BY regress_du_role2; +\drg regress_du_role* + List of role grants + Role name | Member of | Options | Grantor +------------------+------------------+---------------------+------------------ + regress_du_role1 | regress_du_role0 | ADMIN, INHERIT, SET | regress_du_admin + regress_du_role1 | regress_du_role0 | INHERIT | regress_du_role1 + regress_du_role1 | regress_du_role0 | SET | regress_du_role2 + regress_du_role2 | regress_du_role0 | ADMIN | regress_du_admin + regress_du_role2 | regress_du_role0 | INHERIT, SET | regress_du_role1 + regress_du_role2 | regress_du_role0 | | regress_du_role2 + regress_du_role2 | regress_du_role1 | ADMIN, SET | regress_du_admin +(7 rows) + +\du regress_du_role* + List of roles + Role name | Attributes +------------------+-------------- + regress_du_role0 | Cannot login + regress_du_role1 | Cannot login + regress_du_role2 | Cannot login + +DROP ROLE regress_du_role0; +DROP ROLE regress_du_role1; +DROP ROLE regress_du_role2; +DROP ROLE regress_du_admin; diff --git a/src/test/regress/expected/psql_crosstab.out b/src/test/regress/expected/psql_crosstab.out new file mode 100644 index 0000000..e09e331 --- /dev/null +++ b/src/test/regress/expected/psql_crosstab.out @@ -0,0 +1,216 @@ +-- +-- \crosstabview +-- +CREATE TABLE ctv_data (v, h, c, i, d) AS +VALUES + ('v1','h2','foo', 3, '2015-04-01'::date), + ('v2','h1','bar', 3, '2015-01-02'), + ('v1','h0','baz', NULL, '2015-07-12'), + ('v0','h4','qux', 4, '2015-07-15'), + ('v0','h4','dbl', -3, '2014-12-15'), + ('v0',NULL,'qux', 5, '2014-07-15'), + ('v1','h2','quux',7, '2015-04-04'); +-- make plans more stable +ANALYZE ctv_data; +-- running \crosstabview after query uses query in buffer +SELECT v, EXTRACT(year FROM d), count(*) + FROM ctv_data + GROUP BY 1, 2 + ORDER BY 1, 2; + v | extract | count +----+---------+------- + v0 | 2014 | 2 + v0 | 2015 | 1 + v1 | 2015 | 3 + v2 | 2015 | 1 +(4 rows) + +-- basic usage with 3 columns + \crosstabview + v | 2014 | 2015 +----+------+------ + v0 | 2 | 1 + v1 | | 3 + v2 | | 1 +(3 rows) + +-- ordered months in horizontal header, quoted column name +SELECT v, to_char(d, 'Mon') AS "month name", EXTRACT(month FROM d) AS num, + count(*) FROM ctv_data GROUP BY 1,2,3 ORDER BY 1 + \crosstabview v "month name" 4 num + v | Jan | Apr | Jul | Dec +----+-----+-----+-----+----- + v0 | | | 2 | 1 + v1 | | 2 | 1 | + v2 | 1 | | | +(3 rows) + +-- ordered months in vertical header, ordered years in horizontal header +SELECT EXTRACT(year FROM d) AS year, to_char(d,'Mon') AS """month"" name", + EXTRACT(month FROM d) AS month, + format('sum=%s avg=%s', sum(i), avg(i)::numeric(2,1)) + FROM ctv_data + GROUP BY EXTRACT(year FROM d), to_char(d,'Mon'), EXTRACT(month FROM d) +ORDER BY month +\crosstabview """month"" name" year format year + "month" name | 2014 | 2015 +--------------+-----------------+---------------- + Jan | | sum=3 avg=3.0 + Apr | | sum=10 avg=5.0 + Jul | sum=5 avg=5.0 | sum=4 avg=4.0 + Dec | sum=-3 avg=-3.0 | +(4 rows) + +-- combine contents vertically into the same cell (V/H duplicates) +SELECT v, h, string_agg(c, E'\n') FROM ctv_data GROUP BY v, h ORDER BY 1,2,3 + \crosstabview 1 2 3 + v | h4 | | h0 | h2 | h1 +----+-----+-----+-----+------+----- + v0 | qux+| qux | | | + | dbl | | | | + v1 | | | baz | foo +| + | | | | quux | + v2 | | | | | bar +(3 rows) + +-- horizontal ASC order from window function +SELECT v,h, string_agg(c, E'\n') AS c, row_number() OVER(ORDER BY h) AS r +FROM ctv_data GROUP BY v, h ORDER BY 1,3,2 + \crosstabview v h c r + v | h0 | h1 | h2 | h4 | +----+-----+-----+------+-----+----- + v0 | | | | qux+| qux + | | | | dbl | + v1 | baz | | foo +| | + | | | quux | | + v2 | | bar | | | +(3 rows) + +-- horizontal DESC order from window function +SELECT v, h, string_agg(c, E'\n') AS c, row_number() OVER(ORDER BY h DESC) AS r +FROM ctv_data GROUP BY v, h ORDER BY 1,3,2 + \crosstabview v h c r + v | | h4 | h2 | h1 | h0 +----+-----+-----+------+-----+----- + v0 | qux | qux+| | | + | | dbl | | | + v1 | | | foo +| | baz + | | | quux | | + v2 | | | | bar | +(3 rows) + +-- horizontal ASC order from window function, NULLs pushed rightmost +SELECT v,h, string_agg(c, E'\n') AS c, row_number() OVER(ORDER BY h NULLS LAST) AS r +FROM ctv_data GROUP BY v, h ORDER BY 1,3,2 + \crosstabview v h c r + v | h0 | h1 | h2 | h4 | +----+-----+-----+------+-----+----- + v0 | | | | qux+| qux + | | | | dbl | + v1 | baz | | foo +| | + | | | quux | | + v2 | | bar | | | +(3 rows) + +-- only null, no column name, 2 columns: error +SELECT null,null \crosstabview +\crosstabview: query must return at least three columns +-- only null, no column name, 3 columns: works +SELECT null,null,null \crosstabview + ?column? | +----------+-- + | +(1 row) + +-- null display +\pset null '#null#' +SELECT v,h, string_agg(i::text, E'\n') AS i FROM ctv_data +GROUP BY v, h ORDER BY h,v + \crosstabview v h i + v | h0 | h1 | h2 | h4 | #null# +----+--------+----+----+----+-------- + v1 | #null# | | 3 +| | + | | | 7 | | + v2 | | 3 | | | + v0 | | | | 4 +| 5 + | | | | -3 | +(3 rows) + +\pset null '' +-- refer to columns by position +SELECT v,h,string_agg(i::text, E'\n'), string_agg(c, E'\n') +FROM ctv_data GROUP BY v, h ORDER BY h,v + \crosstabview 2 1 4 + h | v1 | v2 | v0 +----+------+-----+----- + h0 | baz | | + h1 | | bar | + h2 | foo +| | + | quux | | + h4 | | | qux+ + | | | dbl + | | | qux +(5 rows) + +-- refer to columns by positions and names mixed +SELECT v,h, string_agg(i::text, E'\n') AS i, string_agg(c, E'\n') AS c +FROM ctv_data GROUP BY v, h ORDER BY h,v + \crosstabview 1 "h" 4 + v | h0 | h1 | h2 | h4 | +----+-----+-----+------+-----+----- + v1 | baz | | foo +| | + | | | quux | | + v2 | | bar | | | + v0 | | | | qux+| qux + | | | | dbl | +(3 rows) + +-- refer to columns by quoted names, check downcasing of unquoted name +SELECT 1 as "22", 2 as b, 3 as "Foo" + \crosstabview "22" B "Foo" + 22 | 2 +----+--- + 1 | 3 +(1 row) + +-- error: bad column name +SELECT v,h,c,i FROM ctv_data + \crosstabview v h j +\crosstabview: column name not found: "j" +-- error: need to quote name +SELECT 1 as "22", 2 as b, 3 as "Foo" + \crosstabview 1 2 Foo +\crosstabview: column name not found: "foo" +-- error: need to not quote name +SELECT 1 as "22", 2 as b, 3 as "Foo" + \crosstabview 1 "B" "Foo" +\crosstabview: column name not found: "B" +-- error: bad column number +SELECT v,h,i,c FROM ctv_data + \crosstabview 2 1 5 +\crosstabview: column number 5 is out of range 1..4 +-- error: same H and V columns +SELECT v,h,i,c FROM ctv_data + \crosstabview 2 h 4 +\crosstabview: vertical and horizontal headers must be different columns +-- error: too many columns +SELECT a,a,1 FROM generate_series(1,3000) AS a + \crosstabview +\crosstabview: maximum number of columns (1600) exceeded +-- error: only one column +SELECT 1 \crosstabview +\crosstabview: query must return at least three columns +DROP TABLE ctv_data; +-- check error reporting (bug #14476) +CREATE TABLE ctv_data (x int, y int, v text); +INSERT INTO ctv_data SELECT 1, x, '*' || x FROM generate_series(1,10) x; +SELECT * FROM ctv_data \crosstabview + x | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 +---+----+----+----+----+----+----+----+----+----+----- + 1 | *1 | *2 | *3 | *4 | *5 | *6 | *7 | *8 | *9 | *10 +(1 row) + +INSERT INTO ctv_data VALUES (1, 10, '*'); -- duplicate data to cause error +SELECT * FROM ctv_data \crosstabview +\crosstabview: query result contains multiple data values for row "1", column "10" +DROP TABLE ctv_data; diff --git a/src/test/regress/expected/publication.out b/src/test/regress/expected/publication.out new file mode 100644 index 0000000..69dc6cf --- /dev/null +++ b/src/test/regress/expected/publication.out @@ -0,0 +1,1737 @@ +-- +-- PUBLICATION +-- +CREATE ROLE regress_publication_user LOGIN SUPERUSER; +CREATE ROLE regress_publication_user2; +CREATE ROLE regress_publication_user_dummy LOGIN NOSUPERUSER; +SET SESSION AUTHORIZATION 'regress_publication_user'; +-- suppress warning that depends on wal_level +SET client_min_messages = 'ERROR'; +CREATE PUBLICATION testpub_default; +RESET client_min_messages; +COMMENT ON PUBLICATION testpub_default IS 'test publication'; +SELECT obj_description(p.oid, 'pg_publication') FROM pg_publication p; + obj_description +------------------ + test publication +(1 row) + +SET client_min_messages = 'ERROR'; +CREATE PUBLICATION testpib_ins_trunct WITH (publish = insert); +RESET client_min_messages; +ALTER PUBLICATION testpub_default SET (publish = update); +-- error cases +CREATE PUBLICATION testpub_xxx WITH (foo); +ERROR: unrecognized publication parameter: "foo" +CREATE PUBLICATION testpub_xxx WITH (publish = 'cluster, vacuum'); +ERROR: unrecognized value for publication option "publish": "cluster" +CREATE PUBLICATION testpub_xxx WITH (publish_via_partition_root = 'true', publish_via_partition_root = '0'); +ERROR: conflicting or redundant options +LINE 1: ...ub_xxx WITH (publish_via_partition_root = 'true', publish_vi... + ^ +\dRp + List of publications + Name | Owner | All tables | Inserts | Updates | Deletes | Truncates | Via root +--------------------+--------------------------+------------+---------+---------+---------+-----------+---------- + testpib_ins_trunct | regress_publication_user | f | t | f | f | f | f + testpub_default | regress_publication_user | f | f | t | f | f | f +(2 rows) + +ALTER PUBLICATION testpub_default SET (publish = 'insert, update, delete'); +\dRp + List of publications + Name | Owner | All tables | Inserts | Updates | Deletes | Truncates | Via root +--------------------+--------------------------+------------+---------+---------+---------+-----------+---------- + testpib_ins_trunct | regress_publication_user | f | t | f | f | f | f + testpub_default | regress_publication_user | f | t | t | t | f | f +(2 rows) + +--- adding tables +CREATE SCHEMA pub_test; +CREATE TABLE testpub_tbl1 (id serial primary key, data text); +CREATE TABLE pub_test.testpub_nopk (foo int, bar int); +CREATE VIEW testpub_view AS SELECT 1; +CREATE TABLE testpub_parted (a int) PARTITION BY LIST (a); +SET client_min_messages = 'ERROR'; +CREATE PUBLICATION testpub_foralltables FOR ALL TABLES WITH (publish = 'insert'); +RESET client_min_messages; +ALTER PUBLICATION testpub_foralltables SET (publish = 'insert, update'); +CREATE TABLE testpub_tbl2 (id serial primary key, data text); +-- fail - can't add to for all tables publication +ALTER PUBLICATION testpub_foralltables ADD TABLE testpub_tbl2; +ERROR: publication "testpub_foralltables" is defined as FOR ALL TABLES +DETAIL: Tables cannot be added to or dropped from FOR ALL TABLES publications. +-- fail - can't drop from all tables publication +ALTER PUBLICATION testpub_foralltables DROP TABLE testpub_tbl2; +ERROR: publication "testpub_foralltables" is defined as FOR ALL TABLES +DETAIL: Tables cannot be added to or dropped from FOR ALL TABLES publications. +-- fail - can't add to for all tables publication +ALTER PUBLICATION testpub_foralltables SET TABLE pub_test.testpub_nopk; +ERROR: publication "testpub_foralltables" is defined as FOR ALL TABLES +DETAIL: Tables cannot be added to or dropped from FOR ALL TABLES publications. +-- fail - can't add schema to 'FOR ALL TABLES' publication +ALTER PUBLICATION testpub_foralltables ADD TABLES IN SCHEMA pub_test; +ERROR: publication "testpub_foralltables" is defined as FOR ALL TABLES +DETAIL: Schemas cannot be added to or dropped from FOR ALL TABLES publications. +-- fail - can't drop schema from 'FOR ALL TABLES' publication +ALTER PUBLICATION testpub_foralltables DROP TABLES IN SCHEMA pub_test; +ERROR: publication "testpub_foralltables" is defined as FOR ALL TABLES +DETAIL: Schemas cannot be added to or dropped from FOR ALL TABLES publications. +-- fail - can't set schema to 'FOR ALL TABLES' publication +ALTER PUBLICATION testpub_foralltables SET TABLES IN SCHEMA pub_test; +ERROR: publication "testpub_foralltables" is defined as FOR ALL TABLES +DETAIL: Schemas cannot be added to or dropped from FOR ALL TABLES publications. +SET client_min_messages = 'ERROR'; +CREATE PUBLICATION testpub_fortable FOR TABLE testpub_tbl1; +RESET client_min_messages; +-- should be able to add schema to 'FOR TABLE' publication +ALTER PUBLICATION testpub_fortable ADD TABLES IN SCHEMA pub_test; +\dRp+ testpub_fortable + Publication testpub_fortable + Owner | All tables | Inserts | Updates | Deletes | Truncates | Via root +--------------------------+------------+---------+---------+---------+-----------+---------- + regress_publication_user | f | t | t | t | t | f +Tables: + "public.testpub_tbl1" +Tables from schemas: + "pub_test" + +-- should be able to drop schema from 'FOR TABLE' publication +ALTER PUBLICATION testpub_fortable DROP TABLES IN SCHEMA pub_test; +\dRp+ testpub_fortable + Publication testpub_fortable + Owner | All tables | Inserts | Updates | Deletes | Truncates | Via root +--------------------------+------------+---------+---------+---------+-----------+---------- + regress_publication_user | f | t | t | t | t | f +Tables: + "public.testpub_tbl1" + +-- should be able to set schema to 'FOR TABLE' publication +ALTER PUBLICATION testpub_fortable SET TABLES IN SCHEMA pub_test; +\dRp+ testpub_fortable + Publication testpub_fortable + Owner | All tables | Inserts | Updates | Deletes | Truncates | Via root +--------------------------+------------+---------+---------+---------+-----------+---------- + regress_publication_user | f | t | t | t | t | f +Tables from schemas: + "pub_test" + +SET client_min_messages = 'ERROR'; +CREATE PUBLICATION testpub_forschema FOR TABLES IN SCHEMA pub_test; +-- should be able to create publication with schema and table of the same +-- schema +CREATE PUBLICATION testpub_for_tbl_schema FOR TABLES IN SCHEMA pub_test, TABLE pub_test.testpub_nopk; +RESET client_min_messages; +\dRp+ testpub_for_tbl_schema + Publication testpub_for_tbl_schema + Owner | All tables | Inserts | Updates | Deletes | Truncates | Via root +--------------------------+------------+---------+---------+---------+-----------+---------- + regress_publication_user | f | t | t | t | t | f +Tables: + "pub_test.testpub_nopk" +Tables from schemas: + "pub_test" + +-- weird parser corner case +CREATE PUBLICATION testpub_parsertst FOR TABLE pub_test.testpub_nopk, CURRENT_SCHEMA; +ERROR: invalid table name +LINE 1: ...estpub_parsertst FOR TABLE pub_test.testpub_nopk, CURRENT_SC... + ^ +CREATE PUBLICATION testpub_parsertst FOR TABLES IN SCHEMA foo, test.foo; +ERROR: invalid schema name +LINE 1: ...CATION testpub_parsertst FOR TABLES IN SCHEMA foo, test.foo; + ^ +-- should be able to add a table of the same schema to the schema publication +ALTER PUBLICATION testpub_forschema ADD TABLE pub_test.testpub_nopk; +\dRp+ testpub_forschema + Publication testpub_forschema + Owner | All tables | Inserts | Updates | Deletes | Truncates | Via root +--------------------------+------------+---------+---------+---------+-----------+---------- + regress_publication_user | f | t | t | t | t | f +Tables: + "pub_test.testpub_nopk" +Tables from schemas: + "pub_test" + +-- should be able to drop the table +ALTER PUBLICATION testpub_forschema DROP TABLE pub_test.testpub_nopk; +\dRp+ testpub_forschema + Publication testpub_forschema + Owner | All tables | Inserts | Updates | Deletes | Truncates | Via root +--------------------------+------------+---------+---------+---------+-----------+---------- + regress_publication_user | f | t | t | t | t | f +Tables from schemas: + "pub_test" + +-- fail - can't drop a table from the schema publication which isn't in the +-- publication +ALTER PUBLICATION testpub_forschema DROP TABLE pub_test.testpub_nopk; +ERROR: relation "testpub_nopk" is not part of the publication +-- should be able to set table to schema publication +ALTER PUBLICATION testpub_forschema SET TABLE pub_test.testpub_nopk; +\dRp+ testpub_forschema + Publication testpub_forschema + Owner | All tables | Inserts | Updates | Deletes | Truncates | Via root +--------------------------+------------+---------+---------+---------+-----------+---------- + regress_publication_user | f | t | t | t | t | f +Tables: + "pub_test.testpub_nopk" + +SELECT pubname, puballtables FROM pg_publication WHERE pubname = 'testpub_foralltables'; + pubname | puballtables +----------------------+-------------- + testpub_foralltables | t +(1 row) + +\d+ testpub_tbl2 + Table "public.testpub_tbl2" + Column | Type | Collation | Nullable | Default | Storage | Stats target | Description +--------+---------+-----------+----------+------------------------------------------+----------+--------------+------------- + id | integer | | not null | nextval('testpub_tbl2_id_seq'::regclass) | plain | | + data | text | | | | extended | | +Indexes: + "testpub_tbl2_pkey" PRIMARY KEY, btree (id) +Publications: + "testpub_foralltables" + +\dRp+ testpub_foralltables + Publication testpub_foralltables + Owner | All tables | Inserts | Updates | Deletes | Truncates | Via root +--------------------------+------------+---------+---------+---------+-----------+---------- + regress_publication_user | t | t | t | f | f | f +(1 row) + +DROP TABLE testpub_tbl2; +DROP PUBLICATION testpub_foralltables, testpub_fortable, testpub_forschema, testpub_for_tbl_schema; +CREATE TABLE testpub_tbl3 (a int); +CREATE TABLE testpub_tbl3a (b text) INHERITS (testpub_tbl3); +SET client_min_messages = 'ERROR'; +CREATE PUBLICATION testpub3 FOR TABLE testpub_tbl3; +CREATE PUBLICATION testpub4 FOR TABLE ONLY testpub_tbl3; +RESET client_min_messages; +\dRp+ testpub3 + Publication testpub3 + Owner | All tables | Inserts | Updates | Deletes | Truncates | Via root +--------------------------+------------+---------+---------+---------+-----------+---------- + regress_publication_user | f | t | t | t | t | f +Tables: + "public.testpub_tbl3" + "public.testpub_tbl3a" + +\dRp+ testpub4 + Publication testpub4 + Owner | All tables | Inserts | Updates | Deletes | Truncates | Via root +--------------------------+------------+---------+---------+---------+-----------+---------- + regress_publication_user | f | t | t | t | t | f +Tables: + "public.testpub_tbl3" + +DROP TABLE testpub_tbl3, testpub_tbl3a; +DROP PUBLICATION testpub3, testpub4; +-- Tests for partitioned tables +SET client_min_messages = 'ERROR'; +CREATE PUBLICATION testpub_forparted; +CREATE PUBLICATION testpub_forparted1; +RESET client_min_messages; +CREATE TABLE testpub_parted1 (LIKE testpub_parted); +CREATE TABLE testpub_parted2 (LIKE testpub_parted); +ALTER PUBLICATION testpub_forparted1 SET (publish='insert'); +ALTER TABLE testpub_parted ATTACH PARTITION testpub_parted1 FOR VALUES IN (1); +ALTER TABLE testpub_parted ATTACH PARTITION testpub_parted2 FOR VALUES IN (2); +-- works despite missing REPLICA IDENTITY, because updates are not replicated +UPDATE testpub_parted1 SET a = 1; +-- only parent is listed as being in publication, not the partition +ALTER PUBLICATION testpub_forparted ADD TABLE testpub_parted; +\dRp+ testpub_forparted + Publication testpub_forparted + Owner | All tables | Inserts | Updates | Deletes | Truncates | Via root +--------------------------+------------+---------+---------+---------+-----------+---------- + regress_publication_user | f | t | t | t | t | f +Tables: + "public.testpub_parted" + +-- works despite missing REPLICA IDENTITY, because no actual update happened +UPDATE testpub_parted SET a = 1 WHERE false; +-- should now fail, because parent's publication replicates updates +UPDATE testpub_parted1 SET a = 1; +ERROR: cannot update table "testpub_parted1" because it does not have a replica identity and publishes updates +HINT: To enable updating the table, set REPLICA IDENTITY using ALTER TABLE. +ALTER TABLE testpub_parted DETACH PARTITION testpub_parted1; +-- works again, because parent's publication is no longer considered +UPDATE testpub_parted1 SET a = 1; +ALTER PUBLICATION testpub_forparted SET (publish_via_partition_root = true); +\dRp+ testpub_forparted + Publication testpub_forparted + Owner | All tables | Inserts | Updates | Deletes | Truncates | Via root +--------------------------+------------+---------+---------+---------+-----------+---------- + regress_publication_user | f | t | t | t | t | t +Tables: + "public.testpub_parted" + +-- still fail, because parent's publication replicates updates +UPDATE testpub_parted2 SET a = 2; +ERROR: cannot update table "testpub_parted2" because it does not have a replica identity and publishes updates +HINT: To enable updating the table, set REPLICA IDENTITY using ALTER TABLE. +ALTER PUBLICATION testpub_forparted DROP TABLE testpub_parted; +-- works again, because update is no longer replicated +UPDATE testpub_parted2 SET a = 2; +DROP TABLE testpub_parted1, testpub_parted2; +DROP PUBLICATION testpub_forparted, testpub_forparted1; +-- Tests for row filters +CREATE TABLE testpub_rf_tbl1 (a integer, b text); +CREATE TABLE testpub_rf_tbl2 (c text, d integer); +CREATE TABLE testpub_rf_tbl3 (e integer); +CREATE TABLE testpub_rf_tbl4 (g text); +CREATE TABLE testpub_rf_tbl5 (a xml); +CREATE SCHEMA testpub_rf_schema1; +CREATE TABLE testpub_rf_schema1.testpub_rf_tbl5 (h integer); +CREATE SCHEMA testpub_rf_schema2; +CREATE TABLE testpub_rf_schema2.testpub_rf_tbl6 (i integer); +SET client_min_messages = 'ERROR'; +-- Firstly, test using the option publish='insert' because the row filter +-- validation of referenced columns is less strict than for delete/update. +CREATE PUBLICATION testpub5 FOR TABLE testpub_rf_tbl1, testpub_rf_tbl2 WHERE (c <> 'test' AND d < 5) WITH (publish = 'insert'); +RESET client_min_messages; +\dRp+ testpub5 + Publication testpub5 + Owner | All tables | Inserts | Updates | Deletes | Truncates | Via root +--------------------------+------------+---------+---------+---------+-----------+---------- + regress_publication_user | f | t | f | f | f | f +Tables: + "public.testpub_rf_tbl1" + "public.testpub_rf_tbl2" WHERE ((c <> 'test'::text) AND (d < 5)) + +\d testpub_rf_tbl3 + Table "public.testpub_rf_tbl3" + Column | Type | Collation | Nullable | Default +--------+---------+-----------+----------+--------- + e | integer | | | + +ALTER PUBLICATION testpub5 ADD TABLE testpub_rf_tbl3 WHERE (e > 1000 AND e < 2000); +\dRp+ testpub5 + Publication testpub5 + Owner | All tables | Inserts | Updates | Deletes | Truncates | Via root +--------------------------+------------+---------+---------+---------+-----------+---------- + regress_publication_user | f | t | f | f | f | f +Tables: + "public.testpub_rf_tbl1" + "public.testpub_rf_tbl2" WHERE ((c <> 'test'::text) AND (d < 5)) + "public.testpub_rf_tbl3" WHERE ((e > 1000) AND (e < 2000)) + +\d testpub_rf_tbl3 + Table "public.testpub_rf_tbl3" + Column | Type | Collation | Nullable | Default +--------+---------+-----------+----------+--------- + e | integer | | | +Publications: + "testpub5" WHERE ((e > 1000) AND (e < 2000)) + +ALTER PUBLICATION testpub5 DROP TABLE testpub_rf_tbl2; +\dRp+ testpub5 + Publication testpub5 + Owner | All tables | Inserts | Updates | Deletes | Truncates | Via root +--------------------------+------------+---------+---------+---------+-----------+---------- + regress_publication_user | f | t | f | f | f | f +Tables: + "public.testpub_rf_tbl1" + "public.testpub_rf_tbl3" WHERE ((e > 1000) AND (e < 2000)) + +-- remove testpub_rf_tbl1 and add testpub_rf_tbl3 again (another WHERE expression) +ALTER PUBLICATION testpub5 SET TABLE testpub_rf_tbl3 WHERE (e > 300 AND e < 500); +\dRp+ testpub5 + Publication testpub5 + Owner | All tables | Inserts | Updates | Deletes | Truncates | Via root +--------------------------+------------+---------+---------+---------+-----------+---------- + regress_publication_user | f | t | f | f | f | f +Tables: + "public.testpub_rf_tbl3" WHERE ((e > 300) AND (e < 500)) + +\d testpub_rf_tbl3 + Table "public.testpub_rf_tbl3" + Column | Type | Collation | Nullable | Default +--------+---------+-----------+----------+--------- + e | integer | | | +Publications: + "testpub5" WHERE ((e > 300) AND (e < 500)) + +-- test \d (now it displays filter information) +SET client_min_messages = 'ERROR'; +CREATE PUBLICATION testpub_rf_yes FOR TABLE testpub_rf_tbl1 WHERE (a > 1) WITH (publish = 'insert'); +CREATE PUBLICATION testpub_rf_no FOR TABLE testpub_rf_tbl1; +RESET client_min_messages; +\d testpub_rf_tbl1 + Table "public.testpub_rf_tbl1" + Column | Type | Collation | Nullable | Default +--------+---------+-----------+----------+--------- + a | integer | | | + b | text | | | +Publications: + "testpub_rf_no" + "testpub_rf_yes" WHERE (a > 1) + +DROP PUBLICATION testpub_rf_yes, testpub_rf_no; +-- some more syntax tests to exercise other parser pathways +SET client_min_messages = 'ERROR'; +CREATE PUBLICATION testpub_syntax1 FOR TABLE testpub_rf_tbl1, ONLY testpub_rf_tbl3 WHERE (e < 999) WITH (publish = 'insert'); +RESET client_min_messages; +\dRp+ testpub_syntax1 + Publication testpub_syntax1 + Owner | All tables | Inserts | Updates | Deletes | Truncates | Via root +--------------------------+------------+---------+---------+---------+-----------+---------- + regress_publication_user | f | t | f | f | f | f +Tables: + "public.testpub_rf_tbl1" + "public.testpub_rf_tbl3" WHERE (e < 999) + +DROP PUBLICATION testpub_syntax1; +SET client_min_messages = 'ERROR'; +CREATE PUBLICATION testpub_syntax2 FOR TABLE testpub_rf_tbl1, testpub_rf_schema1.testpub_rf_tbl5 WHERE (h < 999) WITH (publish = 'insert'); +RESET client_min_messages; +\dRp+ testpub_syntax2 + Publication testpub_syntax2 + Owner | All tables | Inserts | Updates | Deletes | Truncates | Via root +--------------------------+------------+---------+---------+---------+-----------+---------- + regress_publication_user | f | t | f | f | f | f +Tables: + "public.testpub_rf_tbl1" + "testpub_rf_schema1.testpub_rf_tbl5" WHERE (h < 999) + +DROP PUBLICATION testpub_syntax2; +-- fail - schemas don't allow WHERE clause +SET client_min_messages = 'ERROR'; +CREATE PUBLICATION testpub_syntax3 FOR TABLES IN SCHEMA testpub_rf_schema1 WHERE (a = 123); +ERROR: syntax error at or near "WHERE" +LINE 1: ...b_syntax3 FOR TABLES IN SCHEMA testpub_rf_schema1 WHERE (a =... + ^ +CREATE PUBLICATION testpub_syntax3 FOR TABLES IN SCHEMA testpub_rf_schema1, testpub_rf_schema1 WHERE (a = 123); +ERROR: WHERE clause not allowed for schema +LINE 1: ..._syntax3 FOR TABLES IN SCHEMA testpub_rf_schema1, testpub_rf... + ^ +RESET client_min_messages; +-- fail - duplicate tables are not allowed if that table has any WHERE clause +SET client_min_messages = 'ERROR'; +CREATE PUBLICATION testpub_dups FOR TABLE testpub_rf_tbl1 WHERE (a = 1), testpub_rf_tbl1 WITH (publish = 'insert'); +ERROR: conflicting or redundant WHERE clauses for table "testpub_rf_tbl1" +CREATE PUBLICATION testpub_dups FOR TABLE testpub_rf_tbl1, testpub_rf_tbl1 WHERE (a = 2) WITH (publish = 'insert'); +ERROR: conflicting or redundant WHERE clauses for table "testpub_rf_tbl1" +RESET client_min_messages; +-- fail - publication WHERE clause must be boolean +ALTER PUBLICATION testpub5 SET TABLE testpub_rf_tbl3 WHERE (1234); +ERROR: argument of PUBLICATION WHERE must be type boolean, not type integer +LINE 1: ...PUBLICATION testpub5 SET TABLE testpub_rf_tbl3 WHERE (1234); + ^ +-- fail - aggregate functions not allowed in WHERE clause +ALTER PUBLICATION testpub5 SET TABLE testpub_rf_tbl3 WHERE (e < AVG(e)); +ERROR: aggregate functions are not allowed in WHERE +LINE 1: ...ATION testpub5 SET TABLE testpub_rf_tbl3 WHERE (e < AVG(e)); + ^ +-- fail - user-defined operators are not allowed +CREATE FUNCTION testpub_rf_func1(integer, integer) RETURNS boolean AS $$ SELECT hashint4($1) > $2 $$ LANGUAGE SQL; +CREATE OPERATOR =#> (PROCEDURE = testpub_rf_func1, LEFTARG = integer, RIGHTARG = integer); +CREATE PUBLICATION testpub6 FOR TABLE testpub_rf_tbl3 WHERE (e =#> 27); +ERROR: invalid publication WHERE expression +LINE 1: ...ICATION testpub6 FOR TABLE testpub_rf_tbl3 WHERE (e =#> 27); + ^ +DETAIL: User-defined operators are not allowed. +-- fail - user-defined functions are not allowed +CREATE FUNCTION testpub_rf_func2() RETURNS integer AS $$ BEGIN RETURN 123; END; $$ LANGUAGE plpgsql; +ALTER PUBLICATION testpub5 ADD TABLE testpub_rf_tbl1 WHERE (a >= testpub_rf_func2()); +ERROR: invalid publication WHERE expression +LINE 1: ...ON testpub5 ADD TABLE testpub_rf_tbl1 WHERE (a >= testpub_rf... + ^ +DETAIL: User-defined or built-in mutable functions are not allowed. +-- fail - non-immutable functions are not allowed. random() is volatile. +ALTER PUBLICATION testpub5 ADD TABLE testpub_rf_tbl1 WHERE (a < random()); +ERROR: invalid publication WHERE expression +LINE 1: ...ION testpub5 ADD TABLE testpub_rf_tbl1 WHERE (a < random()); + ^ +DETAIL: User-defined or built-in mutable functions are not allowed. +-- fail - user-defined collations are not allowed +CREATE COLLATION user_collation FROM "C"; +ALTER PUBLICATION testpub5 ADD TABLE testpub_rf_tbl1 WHERE (b < '2' COLLATE user_collation); +ERROR: invalid publication WHERE expression +LINE 1: ...ICATION testpub5 ADD TABLE testpub_rf_tbl1 WHERE (b < '2' CO... + ^ +DETAIL: User-defined collations are not allowed. +-- ok - NULLIF is allowed +ALTER PUBLICATION testpub5 SET TABLE testpub_rf_tbl1 WHERE (NULLIF(1,2) = a); +-- ok - built-in operators are allowed +ALTER PUBLICATION testpub5 SET TABLE testpub_rf_tbl1 WHERE (a IS NULL); +ALTER PUBLICATION testpub5 SET TABLE testpub_rf_tbl1 WHERE ((a > 5) IS FALSE); +ALTER PUBLICATION testpub5 SET TABLE testpub_rf_tbl1 WHERE (a IS DISTINCT FROM 5); +ALTER PUBLICATION testpub5 SET TABLE testpub_rf_tbl1 WHERE ((a, a + 1) < (2, 3)); +-- ok - built-in type coercions between two binary compatible datatypes are allowed +ALTER PUBLICATION testpub5 SET TABLE testpub_rf_tbl1 WHERE (b::varchar < '2'); +-- ok - immutable built-in functions are allowed +ALTER PUBLICATION testpub5 SET TABLE testpub_rf_tbl4 WHERE (length(g) < 6); +-- fail - user-defined types are not allowed +CREATE TYPE rf_bug_status AS ENUM ('new', 'open', 'closed'); +CREATE TABLE rf_bug (id serial, description text, status rf_bug_status); +CREATE PUBLICATION testpub6 FOR TABLE rf_bug WHERE (status = 'open') WITH (publish = 'insert'); +ERROR: invalid publication WHERE expression +LINE 1: ...EATE PUBLICATION testpub6 FOR TABLE rf_bug WHERE (status = '... + ^ +DETAIL: User-defined types are not allowed. +DROP TABLE rf_bug; +DROP TYPE rf_bug_status; +-- fail - row filter expression is not simple +CREATE PUBLICATION testpub6 FOR TABLE testpub_rf_tbl1 WHERE (a IN (SELECT generate_series(1,5))); +ERROR: invalid publication WHERE expression +LINE 1: ...ICATION testpub6 FOR TABLE testpub_rf_tbl1 WHERE (a IN (SELE... + ^ +DETAIL: Only columns, constants, built-in operators, built-in data types, built-in collations, and immutable built-in functions are allowed. +-- fail - system columns are not allowed +CREATE PUBLICATION testpub6 FOR TABLE testpub_rf_tbl1 WHERE ('(0,1)'::tid = ctid); +ERROR: invalid publication WHERE expression +LINE 1: ...tpub6 FOR TABLE testpub_rf_tbl1 WHERE ('(0,1)'::tid = ctid); + ^ +DETAIL: System columns are not allowed. +-- ok - conditional expressions are allowed +ALTER PUBLICATION testpub5 SET TABLE testpub_rf_tbl5 WHERE (a IS DOCUMENT); +ALTER PUBLICATION testpub5 SET TABLE testpub_rf_tbl5 WHERE (xmlexists('//foo[text() = ''bar'']' PASSING BY VALUE a)); +ALTER PUBLICATION testpub5 SET TABLE testpub_rf_tbl1 WHERE (NULLIF(1, 2) = a); +ALTER PUBLICATION testpub5 SET TABLE testpub_rf_tbl1 WHERE (CASE a WHEN 5 THEN true ELSE false END); +ALTER PUBLICATION testpub5 SET TABLE testpub_rf_tbl1 WHERE (COALESCE(b, 'foo') = 'foo'); +ALTER PUBLICATION testpub5 SET TABLE testpub_rf_tbl1 WHERE (GREATEST(a, 10) > 10); +ALTER PUBLICATION testpub5 SET TABLE testpub_rf_tbl1 WHERE (a IN (2, 4, 6)); +ALTER PUBLICATION testpub5 SET TABLE testpub_rf_tbl1 WHERE (ARRAY[a] <@ ARRAY[2, 4, 6]); +ALTER PUBLICATION testpub5 SET TABLE testpub_rf_tbl1 WHERE (ROW(a, 2) IS NULL); +-- fail - WHERE not allowed in DROP +ALTER PUBLICATION testpub5 DROP TABLE testpub_rf_tbl1 WHERE (e < 27); +ERROR: cannot use a WHERE clause when removing a table from a publication +-- fail - cannot ALTER SET table which is a member of a pre-existing schema +SET client_min_messages = 'ERROR'; +CREATE PUBLICATION testpub6 FOR TABLES IN SCHEMA testpub_rf_schema2; +-- should be able to set publication with schema and table of the same schema +ALTER PUBLICATION testpub6 SET TABLES IN SCHEMA testpub_rf_schema2, TABLE testpub_rf_schema2.testpub_rf_tbl6 WHERE (i < 99); +RESET client_min_messages; +\dRp+ testpub6 + Publication testpub6 + Owner | All tables | Inserts | Updates | Deletes | Truncates | Via root +--------------------------+------------+---------+---------+---------+-----------+---------- + regress_publication_user | f | t | t | t | t | f +Tables: + "testpub_rf_schema2.testpub_rf_tbl6" WHERE (i < 99) +Tables from schemas: + "testpub_rf_schema2" + +DROP TABLE testpub_rf_tbl1; +DROP TABLE testpub_rf_tbl2; +DROP TABLE testpub_rf_tbl3; +DROP TABLE testpub_rf_tbl4; +DROP TABLE testpub_rf_tbl5; +DROP TABLE testpub_rf_schema1.testpub_rf_tbl5; +DROP TABLE testpub_rf_schema2.testpub_rf_tbl6; +DROP SCHEMA testpub_rf_schema1; +DROP SCHEMA testpub_rf_schema2; +DROP PUBLICATION testpub5; +DROP PUBLICATION testpub6; +DROP OPERATOR =#>(integer, integer); +DROP FUNCTION testpub_rf_func1(integer, integer); +DROP FUNCTION testpub_rf_func2(); +DROP COLLATION user_collation; +-- ====================================================== +-- More row filter tests for validating column references +CREATE TABLE rf_tbl_abcd_nopk(a int, b int, c int, d int); +CREATE TABLE rf_tbl_abcd_pk(a int, b int, c int, d int, PRIMARY KEY(a,b)); +CREATE TABLE rf_tbl_abcd_part_pk (a int PRIMARY KEY, b int) PARTITION by RANGE (a); +CREATE TABLE rf_tbl_abcd_part_pk_1 (b int, a int PRIMARY KEY); +ALTER TABLE rf_tbl_abcd_part_pk ATTACH PARTITION rf_tbl_abcd_part_pk_1 FOR VALUES FROM (1) TO (10); +-- Case 1. REPLICA IDENTITY DEFAULT (means use primary key or nothing) +-- 1a. REPLICA IDENTITY is DEFAULT and table has a PK. +SET client_min_messages = 'ERROR'; +CREATE PUBLICATION testpub6 FOR TABLE rf_tbl_abcd_pk WHERE (a > 99); +RESET client_min_messages; +-- ok - "a" is a PK col +UPDATE rf_tbl_abcd_pk SET a = 1; +ALTER PUBLICATION testpub6 SET TABLE rf_tbl_abcd_pk WHERE (b > 99); +-- ok - "b" is a PK col +UPDATE rf_tbl_abcd_pk SET a = 1; +ALTER PUBLICATION testpub6 SET TABLE rf_tbl_abcd_pk WHERE (c > 99); +-- fail - "c" is not part of the PK +UPDATE rf_tbl_abcd_pk SET a = 1; +ERROR: cannot update table "rf_tbl_abcd_pk" +DETAIL: Column used in the publication WHERE expression is not part of the replica identity. +ALTER PUBLICATION testpub6 SET TABLE rf_tbl_abcd_pk WHERE (d > 99); +-- fail - "d" is not part of the PK +UPDATE rf_tbl_abcd_pk SET a = 1; +ERROR: cannot update table "rf_tbl_abcd_pk" +DETAIL: Column used in the publication WHERE expression is not part of the replica identity. +-- 1b. REPLICA IDENTITY is DEFAULT and table has no PK +ALTER PUBLICATION testpub6 SET TABLE rf_tbl_abcd_nopk WHERE (a > 99); +-- fail - "a" is not part of REPLICA IDENTITY +UPDATE rf_tbl_abcd_nopk SET a = 1; +ERROR: cannot update table "rf_tbl_abcd_nopk" +DETAIL: Column used in the publication WHERE expression is not part of the replica identity. +-- Case 2. REPLICA IDENTITY FULL +ALTER TABLE rf_tbl_abcd_pk REPLICA IDENTITY FULL; +ALTER TABLE rf_tbl_abcd_nopk REPLICA IDENTITY FULL; +ALTER PUBLICATION testpub6 SET TABLE rf_tbl_abcd_pk WHERE (c > 99); +-- ok - "c" is in REPLICA IDENTITY now even though not in PK +UPDATE rf_tbl_abcd_pk SET a = 1; +ALTER PUBLICATION testpub6 SET TABLE rf_tbl_abcd_nopk WHERE (a > 99); +-- ok - "a" is in REPLICA IDENTITY now +UPDATE rf_tbl_abcd_nopk SET a = 1; +-- Case 3. REPLICA IDENTITY NOTHING +ALTER TABLE rf_tbl_abcd_pk REPLICA IDENTITY NOTHING; +ALTER TABLE rf_tbl_abcd_nopk REPLICA IDENTITY NOTHING; +ALTER PUBLICATION testpub6 SET TABLE rf_tbl_abcd_pk WHERE (a > 99); +-- fail - "a" is in PK but it is not part of REPLICA IDENTITY NOTHING +UPDATE rf_tbl_abcd_pk SET a = 1; +ERROR: cannot update table "rf_tbl_abcd_pk" +DETAIL: Column used in the publication WHERE expression is not part of the replica identity. +ALTER PUBLICATION testpub6 SET TABLE rf_tbl_abcd_pk WHERE (c > 99); +-- fail - "c" is not in PK and not in REPLICA IDENTITY NOTHING +UPDATE rf_tbl_abcd_pk SET a = 1; +ERROR: cannot update table "rf_tbl_abcd_pk" +DETAIL: Column used in the publication WHERE expression is not part of the replica identity. +ALTER PUBLICATION testpub6 SET TABLE rf_tbl_abcd_nopk WHERE (a > 99); +-- fail - "a" is not in REPLICA IDENTITY NOTHING +UPDATE rf_tbl_abcd_nopk SET a = 1; +ERROR: cannot update table "rf_tbl_abcd_nopk" +DETAIL: Column used in the publication WHERE expression is not part of the replica identity. +-- Case 4. REPLICA IDENTITY INDEX +ALTER TABLE rf_tbl_abcd_pk ALTER COLUMN c SET NOT NULL; +CREATE UNIQUE INDEX idx_abcd_pk_c ON rf_tbl_abcd_pk(c); +ALTER TABLE rf_tbl_abcd_pk REPLICA IDENTITY USING INDEX idx_abcd_pk_c; +ALTER TABLE rf_tbl_abcd_nopk ALTER COLUMN c SET NOT NULL; +CREATE UNIQUE INDEX idx_abcd_nopk_c ON rf_tbl_abcd_nopk(c); +ALTER TABLE rf_tbl_abcd_nopk REPLICA IDENTITY USING INDEX idx_abcd_nopk_c; +ALTER PUBLICATION testpub6 SET TABLE rf_tbl_abcd_pk WHERE (a > 99); +-- fail - "a" is in PK but it is not part of REPLICA IDENTITY INDEX +UPDATE rf_tbl_abcd_pk SET a = 1; +ERROR: cannot update table "rf_tbl_abcd_pk" +DETAIL: Column used in the publication WHERE expression is not part of the replica identity. +ALTER PUBLICATION testpub6 SET TABLE rf_tbl_abcd_pk WHERE (c > 99); +-- ok - "c" is not in PK but it is part of REPLICA IDENTITY INDEX +UPDATE rf_tbl_abcd_pk SET a = 1; +ALTER PUBLICATION testpub6 SET TABLE rf_tbl_abcd_nopk WHERE (a > 99); +-- fail - "a" is not in REPLICA IDENTITY INDEX +UPDATE rf_tbl_abcd_nopk SET a = 1; +ERROR: cannot update table "rf_tbl_abcd_nopk" +DETAIL: Column used in the publication WHERE expression is not part of the replica identity. +ALTER PUBLICATION testpub6 SET TABLE rf_tbl_abcd_nopk WHERE (c > 99); +-- ok - "c" is part of REPLICA IDENTITY INDEX +UPDATE rf_tbl_abcd_nopk SET a = 1; +-- Tests for partitioned table +-- set PUBLISH_VIA_PARTITION_ROOT to false and test row filter for partitioned +-- table +ALTER PUBLICATION testpub6 SET (PUBLISH_VIA_PARTITION_ROOT=0); +-- fail - cannot use row filter for partitioned table +ALTER PUBLICATION testpub6 SET TABLE rf_tbl_abcd_part_pk WHERE (a > 99); +ERROR: cannot use publication WHERE clause for relation "rf_tbl_abcd_part_pk" +DETAIL: WHERE clause cannot be used for a partitioned table when publish_via_partition_root is false. +-- ok - can use row filter for partition +ALTER PUBLICATION testpub6 SET TABLE rf_tbl_abcd_part_pk_1 WHERE (a > 99); +-- ok - "a" is a PK col +UPDATE rf_tbl_abcd_part_pk SET a = 1; +-- set PUBLISH_VIA_PARTITION_ROOT to true and test row filter for partitioned +-- table +ALTER PUBLICATION testpub6 SET (PUBLISH_VIA_PARTITION_ROOT=1); +-- ok - can use row filter for partitioned table +ALTER PUBLICATION testpub6 SET TABLE rf_tbl_abcd_part_pk WHERE (a > 99); +-- ok - "a" is a PK col +UPDATE rf_tbl_abcd_part_pk SET a = 1; +-- fail - cannot set PUBLISH_VIA_PARTITION_ROOT to false if any row filter is +-- used for partitioned table +ALTER PUBLICATION testpub6 SET (PUBLISH_VIA_PARTITION_ROOT=0); +ERROR: cannot set parameter "publish_via_partition_root" to false for publication "testpub6" +DETAIL: The publication contains a WHERE clause for partitioned table "rf_tbl_abcd_part_pk", which is not allowed when "publish_via_partition_root" is false. +-- remove partitioned table's row filter +ALTER PUBLICATION testpub6 SET TABLE rf_tbl_abcd_part_pk; +-- ok - we don't have row filter for partitioned table. +ALTER PUBLICATION testpub6 SET (PUBLISH_VIA_PARTITION_ROOT=0); +-- Now change the root filter to use a column "b" +-- (which is not in the replica identity) +ALTER PUBLICATION testpub6 SET TABLE rf_tbl_abcd_part_pk_1 WHERE (b > 99); +-- ok - we don't have row filter for partitioned table. +ALTER PUBLICATION testpub6 SET (PUBLISH_VIA_PARTITION_ROOT=0); +-- fail - "b" is not in REPLICA IDENTITY INDEX +UPDATE rf_tbl_abcd_part_pk SET a = 1; +ERROR: cannot update table "rf_tbl_abcd_part_pk_1" +DETAIL: Column used in the publication WHERE expression is not part of the replica identity. +-- set PUBLISH_VIA_PARTITION_ROOT to true +-- can use row filter for partitioned table +ALTER PUBLICATION testpub6 SET (PUBLISH_VIA_PARTITION_ROOT=1); +-- ok - can use row filter for partitioned table +ALTER PUBLICATION testpub6 SET TABLE rf_tbl_abcd_part_pk WHERE (b > 99); +-- fail - "b" is not in REPLICA IDENTITY INDEX +UPDATE rf_tbl_abcd_part_pk SET a = 1; +ERROR: cannot update table "rf_tbl_abcd_part_pk_1" +DETAIL: Column used in the publication WHERE expression is not part of the replica identity. +DROP PUBLICATION testpub6; +DROP TABLE rf_tbl_abcd_pk; +DROP TABLE rf_tbl_abcd_nopk; +DROP TABLE rf_tbl_abcd_part_pk; +-- ====================================================== +-- fail - duplicate tables are not allowed if that table has any column lists +SET client_min_messages = 'ERROR'; +CREATE PUBLICATION testpub_dups FOR TABLE testpub_tbl1 (a), testpub_tbl1 WITH (publish = 'insert'); +ERROR: conflicting or redundant column lists for table "testpub_tbl1" +CREATE PUBLICATION testpub_dups FOR TABLE testpub_tbl1, testpub_tbl1 (a) WITH (publish = 'insert'); +ERROR: conflicting or redundant column lists for table "testpub_tbl1" +RESET client_min_messages; +-- test for column lists +SET client_min_messages = 'ERROR'; +CREATE PUBLICATION testpub_fortable FOR TABLE testpub_tbl1; +CREATE PUBLICATION testpub_fortable_insert WITH (publish = 'insert'); +RESET client_min_messages; +CREATE TABLE testpub_tbl5 (a int PRIMARY KEY, b text, c text, + d int generated always as (a + length(b)) stored); +-- error: column "x" does not exist +ALTER PUBLICATION testpub_fortable ADD TABLE testpub_tbl5 (a, x); +ERROR: column "x" of relation "testpub_tbl5" does not exist +-- error: replica identity "a" not included in the column list +ALTER PUBLICATION testpub_fortable ADD TABLE testpub_tbl5 (b, c); +UPDATE testpub_tbl5 SET a = 1; +ERROR: cannot update table "testpub_tbl5" +DETAIL: Column list used by the publication does not cover the replica identity. +ALTER PUBLICATION testpub_fortable DROP TABLE testpub_tbl5; +-- error: generated column "d" can't be in list +ALTER PUBLICATION testpub_fortable ADD TABLE testpub_tbl5 (a, d); +ERROR: cannot use generated column "d" in publication column list +-- error: system attributes "ctid" not allowed in column list +ALTER PUBLICATION testpub_fortable ADD TABLE testpub_tbl5 (a, ctid); +ERROR: cannot use system column "ctid" in publication column list +-- ok +ALTER PUBLICATION testpub_fortable ADD TABLE testpub_tbl5 (a, c); +ALTER TABLE testpub_tbl5 DROP COLUMN c; -- no dice +ERROR: cannot drop column c of table testpub_tbl5 because other objects depend on it +DETAIL: publication of table testpub_tbl5 in publication testpub_fortable depends on column c of table testpub_tbl5 +HINT: Use DROP ... CASCADE to drop the dependent objects too. +-- ok: for insert-only publication, any column list is acceptable +ALTER PUBLICATION testpub_fortable_insert ADD TABLE testpub_tbl5 (b, c); +/* not all replica identities are good enough */ +CREATE UNIQUE INDEX testpub_tbl5_b_key ON testpub_tbl5 (b, c); +ALTER TABLE testpub_tbl5 ALTER b SET NOT NULL, ALTER c SET NOT NULL; +ALTER TABLE testpub_tbl5 REPLICA IDENTITY USING INDEX testpub_tbl5_b_key; +-- error: replica identity (b,c) is not covered by column list (a, c) +UPDATE testpub_tbl5 SET a = 1; +ERROR: cannot update table "testpub_tbl5" +DETAIL: Column list used by the publication does not cover the replica identity. +ALTER PUBLICATION testpub_fortable DROP TABLE testpub_tbl5; +-- error: change the replica identity to "b", and column list to (a, c) +-- then update fails, because (a, c) does not cover replica identity +ALTER TABLE testpub_tbl5 REPLICA IDENTITY USING INDEX testpub_tbl5_b_key; +ALTER PUBLICATION testpub_fortable ADD TABLE testpub_tbl5 (a, c); +UPDATE testpub_tbl5 SET a = 1; +ERROR: cannot update table "testpub_tbl5" +DETAIL: Column list used by the publication does not cover the replica identity. +/* But if upd/del are not published, it works OK */ +SET client_min_messages = 'ERROR'; +CREATE PUBLICATION testpub_table_ins WITH (publish = 'insert, truncate'); +RESET client_min_messages; +ALTER PUBLICATION testpub_table_ins ADD TABLE testpub_tbl5 (a); -- ok +\dRp+ testpub_table_ins + Publication testpub_table_ins + Owner | All tables | Inserts | Updates | Deletes | Truncates | Via root +--------------------------+------------+---------+---------+---------+-----------+---------- + regress_publication_user | f | t | f | f | t | f +Tables: + "public.testpub_tbl5" (a) + +-- tests with REPLICA IDENTITY FULL +CREATE TABLE testpub_tbl6 (a int, b text, c text); +ALTER TABLE testpub_tbl6 REPLICA IDENTITY FULL; +ALTER PUBLICATION testpub_fortable ADD TABLE testpub_tbl6 (a, b, c); +UPDATE testpub_tbl6 SET a = 1; +ERROR: cannot update table "testpub_tbl6" +DETAIL: Column list used by the publication does not cover the replica identity. +ALTER PUBLICATION testpub_fortable DROP TABLE testpub_tbl6; +ALTER PUBLICATION testpub_fortable ADD TABLE testpub_tbl6; -- ok +UPDATE testpub_tbl6 SET a = 1; +-- make sure changing the column list is propagated to the catalog +CREATE TABLE testpub_tbl7 (a int primary key, b text, c text); +ALTER PUBLICATION testpub_fortable ADD TABLE testpub_tbl7 (a, b); +\d+ testpub_tbl7 + Table "public.testpub_tbl7" + Column | Type | Collation | Nullable | Default | Storage | Stats target | Description +--------+---------+-----------+----------+---------+----------+--------------+------------- + a | integer | | not null | | plain | | + b | text | | | | extended | | + c | text | | | | extended | | +Indexes: + "testpub_tbl7_pkey" PRIMARY KEY, btree (a) +Publications: + "testpub_fortable" (a, b) + +-- ok: the column list is the same, we should skip this table (or at least not fail) +ALTER PUBLICATION testpub_fortable SET TABLE testpub_tbl7 (a, b); +\d+ testpub_tbl7 + Table "public.testpub_tbl7" + Column | Type | Collation | Nullable | Default | Storage | Stats target | Description +--------+---------+-----------+----------+---------+----------+--------------+------------- + a | integer | | not null | | plain | | + b | text | | | | extended | | + c | text | | | | extended | | +Indexes: + "testpub_tbl7_pkey" PRIMARY KEY, btree (a) +Publications: + "testpub_fortable" (a, b) + +-- ok: the column list changes, make sure the catalog gets updated +ALTER PUBLICATION testpub_fortable SET TABLE testpub_tbl7 (a, c); +\d+ testpub_tbl7 + Table "public.testpub_tbl7" + Column | Type | Collation | Nullable | Default | Storage | Stats target | Description +--------+---------+-----------+----------+---------+----------+--------------+------------- + a | integer | | not null | | plain | | + b | text | | | | extended | | + c | text | | | | extended | | +Indexes: + "testpub_tbl7_pkey" PRIMARY KEY, btree (a) +Publications: + "testpub_fortable" (a, c) + +-- column list for partitioned tables has to cover replica identities for +-- all child relations +CREATE TABLE testpub_tbl8 (a int, b text, c text) PARTITION BY HASH (a); +-- first partition has replica identity "a" +CREATE TABLE testpub_tbl8_0 PARTITION OF testpub_tbl8 FOR VALUES WITH (modulus 2, remainder 0); +ALTER TABLE testpub_tbl8_0 ADD PRIMARY KEY (a); +ALTER TABLE testpub_tbl8_0 REPLICA IDENTITY USING INDEX testpub_tbl8_0_pkey; +-- second partition has replica identity "b" +CREATE TABLE testpub_tbl8_1 PARTITION OF testpub_tbl8 FOR VALUES WITH (modulus 2, remainder 1); +ALTER TABLE testpub_tbl8_1 ADD PRIMARY KEY (b); +ALTER TABLE testpub_tbl8_1 REPLICA IDENTITY USING INDEX testpub_tbl8_1_pkey; +-- ok: column list covers both "a" and "b" +SET client_min_messages = 'ERROR'; +CREATE PUBLICATION testpub_col_list FOR TABLE testpub_tbl8 (a, b) WITH (publish_via_partition_root = 'true'); +RESET client_min_messages; +-- ok: the same thing, but try plain ADD TABLE +ALTER PUBLICATION testpub_col_list DROP TABLE testpub_tbl8; +ALTER PUBLICATION testpub_col_list ADD TABLE testpub_tbl8 (a, b); +UPDATE testpub_tbl8 SET a = 1; +-- failure: column list does not cover replica identity for the second partition +ALTER PUBLICATION testpub_col_list DROP TABLE testpub_tbl8; +ALTER PUBLICATION testpub_col_list ADD TABLE testpub_tbl8 (a, c); +UPDATE testpub_tbl8 SET a = 1; +ERROR: cannot update table "testpub_tbl8_1" +DETAIL: Column list used by the publication does not cover the replica identity. +ALTER PUBLICATION testpub_col_list DROP TABLE testpub_tbl8; +-- failure: one of the partitions has REPLICA IDENTITY FULL +ALTER TABLE testpub_tbl8_1 REPLICA IDENTITY FULL; +ALTER PUBLICATION testpub_col_list ADD TABLE testpub_tbl8 (a, c); +UPDATE testpub_tbl8 SET a = 1; +ERROR: cannot update table "testpub_tbl8_1" +DETAIL: Column list used by the publication does not cover the replica identity. +ALTER PUBLICATION testpub_col_list DROP TABLE testpub_tbl8; +-- add table and then try changing replica identity +ALTER TABLE testpub_tbl8_1 REPLICA IDENTITY USING INDEX testpub_tbl8_1_pkey; +ALTER PUBLICATION testpub_col_list ADD TABLE testpub_tbl8 (a, b); +-- failure: replica identity full can't be used with a column list +ALTER TABLE testpub_tbl8_1 REPLICA IDENTITY FULL; +UPDATE testpub_tbl8 SET a = 1; +ERROR: cannot update table "testpub_tbl8_1" +DETAIL: Column list used by the publication does not cover the replica identity. +-- failure: replica identity has to be covered by the column list +ALTER TABLE testpub_tbl8_1 DROP CONSTRAINT testpub_tbl8_1_pkey; +ALTER TABLE testpub_tbl8_1 ADD PRIMARY KEY (c); +ALTER TABLE testpub_tbl8_1 REPLICA IDENTITY USING INDEX testpub_tbl8_1_pkey; +UPDATE testpub_tbl8 SET a = 1; +ERROR: cannot update table "testpub_tbl8_1" +DETAIL: Column list used by the publication does not cover the replica identity. +DROP TABLE testpub_tbl8; +-- column list for partitioned tables has to cover replica identities for +-- all child relations +CREATE TABLE testpub_tbl8 (a int, b text, c text) PARTITION BY HASH (a); +ALTER PUBLICATION testpub_col_list ADD TABLE testpub_tbl8 (a, b); +-- first partition has replica identity "a" +CREATE TABLE testpub_tbl8_0 (a int, b text, c text); +ALTER TABLE testpub_tbl8_0 ADD PRIMARY KEY (a); +ALTER TABLE testpub_tbl8_0 REPLICA IDENTITY USING INDEX testpub_tbl8_0_pkey; +-- second partition has replica identity "b" +CREATE TABLE testpub_tbl8_1 (a int, b text, c text); +ALTER TABLE testpub_tbl8_1 ADD PRIMARY KEY (c); +ALTER TABLE testpub_tbl8_1 REPLICA IDENTITY USING INDEX testpub_tbl8_1_pkey; +-- ok: attaching first partition works, because (a) is in column list +ALTER TABLE testpub_tbl8 ATTACH PARTITION testpub_tbl8_0 FOR VALUES WITH (modulus 2, remainder 0); +-- failure: second partition has replica identity (c), which si not in column list +ALTER TABLE testpub_tbl8 ATTACH PARTITION testpub_tbl8_1 FOR VALUES WITH (modulus 2, remainder 1); +UPDATE testpub_tbl8 SET a = 1; +ERROR: cannot update table "testpub_tbl8_1" +DETAIL: Column list used by the publication does not cover the replica identity. +-- failure: changing replica identity to FULL for partition fails, because +-- of the column list on the parent +ALTER TABLE testpub_tbl8_0 REPLICA IDENTITY FULL; +UPDATE testpub_tbl8 SET a = 1; +ERROR: cannot update table "testpub_tbl8_0" +DETAIL: Column list used by the publication does not cover the replica identity. +-- test that using column list for table is disallowed if any schemas are +-- part of the publication +SET client_min_messages = 'ERROR'; +-- failure - cannot use column list and schema together +CREATE PUBLICATION testpub_tbl9 FOR TABLES IN SCHEMA public, TABLE public.testpub_tbl7(a); +ERROR: cannot use column list for relation "public.testpub_tbl7" in publication "testpub_tbl9" +DETAIL: Column lists cannot be specified in publications containing FOR TABLES IN SCHEMA elements. +-- ok - only publish schema +CREATE PUBLICATION testpub_tbl9 FOR TABLES IN SCHEMA public; +-- failure - add a table with column list when there is already a schema in the +-- publication +ALTER PUBLICATION testpub_tbl9 ADD TABLE public.testpub_tbl7(a); +ERROR: cannot use column list for relation "public.testpub_tbl7" in publication "testpub_tbl9" +DETAIL: Column lists cannot be specified in publications containing FOR TABLES IN SCHEMA elements. +-- ok - only publish table with column list +ALTER PUBLICATION testpub_tbl9 SET TABLE public.testpub_tbl7(a); +-- failure - specify a schema when there is already a column list in the +-- publication +ALTER PUBLICATION testpub_tbl9 ADD TABLES IN SCHEMA public; +ERROR: cannot add schema to publication "testpub_tbl9" +DETAIL: Schemas cannot be added if any tables that specify a column list are already part of the publication. +-- failure - cannot SET column list and schema together +ALTER PUBLICATION testpub_tbl9 SET TABLES IN SCHEMA public, TABLE public.testpub_tbl7(a); +ERROR: cannot use column list for relation "public.testpub_tbl7" in publication "testpub_tbl9" +DETAIL: Column lists cannot be specified in publications containing FOR TABLES IN SCHEMA elements. +-- ok - drop table +ALTER PUBLICATION testpub_tbl9 DROP TABLE public.testpub_tbl7; +-- failure - cannot ADD column list and schema together +ALTER PUBLICATION testpub_tbl9 ADD TABLES IN SCHEMA public, TABLE public.testpub_tbl7(a); +ERROR: cannot use column list for relation "public.testpub_tbl7" in publication "testpub_tbl9" +DETAIL: Column lists cannot be specified in publications containing FOR TABLES IN SCHEMA elements. +RESET client_min_messages; +DROP TABLE testpub_tbl5, testpub_tbl6, testpub_tbl7, testpub_tbl8, testpub_tbl8_1; +DROP PUBLICATION testpub_table_ins, testpub_fortable, testpub_fortable_insert, testpub_col_list, testpub_tbl9; +-- ====================================================== +-- Test combination of column list and row filter +SET client_min_messages = 'ERROR'; +CREATE PUBLICATION testpub_both_filters; +RESET client_min_messages; +CREATE TABLE testpub_tbl_both_filters (a int, b int, c int, PRIMARY KEY (a,c)); +ALTER TABLE testpub_tbl_both_filters REPLICA IDENTITY USING INDEX testpub_tbl_both_filters_pkey; +ALTER PUBLICATION testpub_both_filters ADD TABLE testpub_tbl_both_filters (a,c) WHERE (c != 1); +\dRp+ testpub_both_filters + Publication testpub_both_filters + Owner | All tables | Inserts | Updates | Deletes | Truncates | Via root +--------------------------+------------+---------+---------+---------+-----------+---------- + regress_publication_user | f | t | t | t | t | f +Tables: + "public.testpub_tbl_both_filters" (a, c) WHERE (c <> 1) + +\d+ testpub_tbl_both_filters + Table "public.testpub_tbl_both_filters" + Column | Type | Collation | Nullable | Default | Storage | Stats target | Description +--------+---------+-----------+----------+---------+---------+--------------+------------- + a | integer | | not null | | plain | | + b | integer | | | | plain | | + c | integer | | not null | | plain | | +Indexes: + "testpub_tbl_both_filters_pkey" PRIMARY KEY, btree (a, c) REPLICA IDENTITY +Publications: + "testpub_both_filters" (a, c) WHERE (c <> 1) + +DROP TABLE testpub_tbl_both_filters; +DROP PUBLICATION testpub_both_filters; +-- ====================================================== +-- More column list tests for validating column references +CREATE TABLE rf_tbl_abcd_nopk(a int, b int, c int, d int); +CREATE TABLE rf_tbl_abcd_pk(a int, b int, c int, d int, PRIMARY KEY(a,b)); +CREATE TABLE rf_tbl_abcd_part_pk (a int PRIMARY KEY, b int) PARTITION by RANGE (a); +CREATE TABLE rf_tbl_abcd_part_pk_1 (b int, a int PRIMARY KEY); +ALTER TABLE rf_tbl_abcd_part_pk ATTACH PARTITION rf_tbl_abcd_part_pk_1 FOR VALUES FROM (1) TO (10); +-- Case 1. REPLICA IDENTITY DEFAULT (means use primary key or nothing) +-- 1a. REPLICA IDENTITY is DEFAULT and table has a PK. +SET client_min_messages = 'ERROR'; +CREATE PUBLICATION testpub6 FOR TABLE rf_tbl_abcd_pk (a, b); +RESET client_min_messages; +-- ok - (a,b) coverts all PK cols +UPDATE rf_tbl_abcd_pk SET a = 1; +ALTER PUBLICATION testpub6 SET TABLE rf_tbl_abcd_pk (a, b, c); +-- ok - (a,b,c) coverts all PK cols +UPDATE rf_tbl_abcd_pk SET a = 1; +ALTER PUBLICATION testpub6 SET TABLE rf_tbl_abcd_pk (a); +-- fail - "b" is missing from the column list +UPDATE rf_tbl_abcd_pk SET a = 1; +ERROR: cannot update table "rf_tbl_abcd_pk" +DETAIL: Column list used by the publication does not cover the replica identity. +ALTER PUBLICATION testpub6 SET TABLE rf_tbl_abcd_pk (b); +-- fail - "a" is missing from the column list +UPDATE rf_tbl_abcd_pk SET a = 1; +ERROR: cannot update table "rf_tbl_abcd_pk" +DETAIL: Column list used by the publication does not cover the replica identity. +-- 1b. REPLICA IDENTITY is DEFAULT and table has no PK +ALTER PUBLICATION testpub6 SET TABLE rf_tbl_abcd_nopk (a); +-- ok - there's no replica identity, so any column list works +-- note: it fails anyway, just a bit later because UPDATE requires RI +UPDATE rf_tbl_abcd_nopk SET a = 1; +ERROR: cannot update table "rf_tbl_abcd_nopk" because it does not have a replica identity and publishes updates +HINT: To enable updating the table, set REPLICA IDENTITY using ALTER TABLE. +-- Case 2. REPLICA IDENTITY FULL +ALTER TABLE rf_tbl_abcd_pk REPLICA IDENTITY FULL; +ALTER TABLE rf_tbl_abcd_nopk REPLICA IDENTITY FULL; +ALTER PUBLICATION testpub6 SET TABLE rf_tbl_abcd_pk (c); +-- fail - with REPLICA IDENTITY FULL no column list is allowed +UPDATE rf_tbl_abcd_pk SET a = 1; +ERROR: cannot update table "rf_tbl_abcd_pk" +DETAIL: Column list used by the publication does not cover the replica identity. +ALTER PUBLICATION testpub6 SET TABLE rf_tbl_abcd_nopk (a, b, c, d); +-- fail - with REPLICA IDENTITY FULL no column list is allowed +UPDATE rf_tbl_abcd_nopk SET a = 1; +ERROR: cannot update table "rf_tbl_abcd_nopk" +DETAIL: Column list used by the publication does not cover the replica identity. +-- Case 3. REPLICA IDENTITY NOTHING +ALTER TABLE rf_tbl_abcd_pk REPLICA IDENTITY NOTHING; +ALTER TABLE rf_tbl_abcd_nopk REPLICA IDENTITY NOTHING; +ALTER PUBLICATION testpub6 SET TABLE rf_tbl_abcd_pk (a); +-- ok - REPLICA IDENTITY NOTHING means all column lists are valid +-- it still fails later because without RI we can't replicate updates +UPDATE rf_tbl_abcd_pk SET a = 1; +ERROR: cannot update table "rf_tbl_abcd_pk" because it does not have a replica identity and publishes updates +HINT: To enable updating the table, set REPLICA IDENTITY using ALTER TABLE. +ALTER PUBLICATION testpub6 SET TABLE rf_tbl_abcd_pk (a, b, c, d); +-- ok - REPLICA IDENTITY NOTHING means all column lists are valid +-- it still fails later because without RI we can't replicate updates +UPDATE rf_tbl_abcd_pk SET a = 1; +ERROR: cannot update table "rf_tbl_abcd_pk" because it does not have a replica identity and publishes updates +HINT: To enable updating the table, set REPLICA IDENTITY using ALTER TABLE. +ALTER PUBLICATION testpub6 SET TABLE rf_tbl_abcd_nopk (d); +-- ok - REPLICA IDENTITY NOTHING means all column lists are valid +-- it still fails later because without RI we can't replicate updates +UPDATE rf_tbl_abcd_nopk SET a = 1; +ERROR: cannot update table "rf_tbl_abcd_nopk" because it does not have a replica identity and publishes updates +HINT: To enable updating the table, set REPLICA IDENTITY using ALTER TABLE. +-- Case 4. REPLICA IDENTITY INDEX +ALTER TABLE rf_tbl_abcd_pk ALTER COLUMN c SET NOT NULL; +CREATE UNIQUE INDEX idx_abcd_pk_c ON rf_tbl_abcd_pk(c); +ALTER TABLE rf_tbl_abcd_pk REPLICA IDENTITY USING INDEX idx_abcd_pk_c; +ALTER TABLE rf_tbl_abcd_nopk ALTER COLUMN c SET NOT NULL; +CREATE UNIQUE INDEX idx_abcd_nopk_c ON rf_tbl_abcd_nopk(c); +ALTER TABLE rf_tbl_abcd_nopk REPLICA IDENTITY USING INDEX idx_abcd_nopk_c; +ALTER PUBLICATION testpub6 SET TABLE rf_tbl_abcd_pk (a); +-- fail - column list "a" does not cover the REPLICA IDENTITY INDEX on "c" +UPDATE rf_tbl_abcd_pk SET a = 1; +ERROR: cannot update table "rf_tbl_abcd_pk" +DETAIL: Column list used by the publication does not cover the replica identity. +ALTER PUBLICATION testpub6 SET TABLE rf_tbl_abcd_pk (c); +-- ok - column list "c" does cover the REPLICA IDENTITY INDEX on "c" +UPDATE rf_tbl_abcd_pk SET a = 1; +ALTER PUBLICATION testpub6 SET TABLE rf_tbl_abcd_nopk (a); +-- fail - column list "a" does not cover the REPLICA IDENTITY INDEX on "c" +UPDATE rf_tbl_abcd_nopk SET a = 1; +ERROR: cannot update table "rf_tbl_abcd_nopk" +DETAIL: Column list used by the publication does not cover the replica identity. +ALTER PUBLICATION testpub6 SET TABLE rf_tbl_abcd_nopk (c); +-- ok - column list "c" does cover the REPLICA IDENTITY INDEX on "c" +UPDATE rf_tbl_abcd_nopk SET a = 1; +-- Tests for partitioned table +-- set PUBLISH_VIA_PARTITION_ROOT to false and test column list for partitioned +-- table +ALTER PUBLICATION testpub6 SET (PUBLISH_VIA_PARTITION_ROOT=0); +-- fail - cannot use column list for partitioned table +ALTER PUBLICATION testpub6 SET TABLE rf_tbl_abcd_part_pk (a); +ERROR: cannot use column list for relation "public.rf_tbl_abcd_part_pk" in publication "testpub6" +DETAIL: Column lists cannot be specified for partitioned tables when publish_via_partition_root is false. +-- ok - can use column list for partition +ALTER PUBLICATION testpub6 SET TABLE rf_tbl_abcd_part_pk_1 (a); +-- ok - "a" is a PK col +UPDATE rf_tbl_abcd_part_pk SET a = 1; +-- set PUBLISH_VIA_PARTITION_ROOT to true and test column list for partitioned +-- table +ALTER PUBLICATION testpub6 SET (PUBLISH_VIA_PARTITION_ROOT=1); +-- ok - can use column list for partitioned table +ALTER PUBLICATION testpub6 SET TABLE rf_tbl_abcd_part_pk (a); +-- ok - "a" is a PK col +UPDATE rf_tbl_abcd_part_pk SET a = 1; +-- fail - cannot set PUBLISH_VIA_PARTITION_ROOT to false if any column list is +-- used for partitioned table +ALTER PUBLICATION testpub6 SET (PUBLISH_VIA_PARTITION_ROOT=0); +ERROR: cannot set parameter "publish_via_partition_root" to false for publication "testpub6" +DETAIL: The publication contains a column list for partitioned table "rf_tbl_abcd_part_pk", which is not allowed when "publish_via_partition_root" is false. +-- remove partitioned table's column list +ALTER PUBLICATION testpub6 SET TABLE rf_tbl_abcd_part_pk; +-- ok - we don't have column list for partitioned table. +ALTER PUBLICATION testpub6 SET (PUBLISH_VIA_PARTITION_ROOT=0); +-- Now change the root column list to use a column "b" +-- (which is not in the replica identity) +ALTER PUBLICATION testpub6 SET TABLE rf_tbl_abcd_part_pk_1 (b); +-- ok - we don't have column list for partitioned table. +ALTER PUBLICATION testpub6 SET (PUBLISH_VIA_PARTITION_ROOT=0); +-- fail - "b" is not in REPLICA IDENTITY INDEX +UPDATE rf_tbl_abcd_part_pk SET a = 1; +ERROR: cannot update table "rf_tbl_abcd_part_pk_1" +DETAIL: Column list used by the publication does not cover the replica identity. +-- set PUBLISH_VIA_PARTITION_ROOT to true +-- can use column list for partitioned table +ALTER PUBLICATION testpub6 SET (PUBLISH_VIA_PARTITION_ROOT=1); +-- ok - can use column list for partitioned table +ALTER PUBLICATION testpub6 SET TABLE rf_tbl_abcd_part_pk (b); +-- fail - "b" is not in REPLICA IDENTITY INDEX +UPDATE rf_tbl_abcd_part_pk SET a = 1; +ERROR: cannot update table "rf_tbl_abcd_part_pk_1" +DETAIL: Column list used by the publication does not cover the replica identity. +DROP PUBLICATION testpub6; +DROP TABLE rf_tbl_abcd_pk; +DROP TABLE rf_tbl_abcd_nopk; +DROP TABLE rf_tbl_abcd_part_pk; +-- ====================================================== +-- Test cache invalidation FOR ALL TABLES publication +SET client_min_messages = 'ERROR'; +CREATE TABLE testpub_tbl4(a int); +INSERT INTO testpub_tbl4 values(1); +UPDATE testpub_tbl4 set a = 2; +CREATE PUBLICATION testpub_foralltables FOR ALL TABLES; +RESET client_min_messages; +-- fail missing REPLICA IDENTITY +UPDATE testpub_tbl4 set a = 3; +ERROR: cannot update table "testpub_tbl4" because it does not have a replica identity and publishes updates +HINT: To enable updating the table, set REPLICA IDENTITY using ALTER TABLE. +DROP PUBLICATION testpub_foralltables; +-- should pass after dropping the publication +UPDATE testpub_tbl4 set a = 3; +DROP TABLE testpub_tbl4; +-- fail - view +CREATE PUBLICATION testpub_fortbl FOR TABLE testpub_view; +ERROR: cannot add relation "testpub_view" to publication +DETAIL: This operation is not supported for views. +CREATE TEMPORARY TABLE testpub_temptbl(a int); +-- fail - temporary table +CREATE PUBLICATION testpub_fortemptbl FOR TABLE testpub_temptbl; +ERROR: cannot add relation "testpub_temptbl" to publication +DETAIL: This operation is not supported for temporary tables. +DROP TABLE testpub_temptbl; +CREATE UNLOGGED TABLE testpub_unloggedtbl(a int); +-- fail - unlogged table +CREATE PUBLICATION testpub_forunloggedtbl FOR TABLE testpub_unloggedtbl; +ERROR: cannot add relation "testpub_unloggedtbl" to publication +DETAIL: This operation is not supported for unlogged tables. +DROP TABLE testpub_unloggedtbl; +-- fail - system table +CREATE PUBLICATION testpub_forsystemtbl FOR TABLE pg_publication; +ERROR: cannot add relation "pg_publication" to publication +DETAIL: This operation is not supported for system tables. +SET client_min_messages = 'ERROR'; +CREATE PUBLICATION testpub_fortbl FOR TABLE testpub_tbl1, pub_test.testpub_nopk; +RESET client_min_messages; +-- fail - already added +ALTER PUBLICATION testpub_fortbl ADD TABLE testpub_tbl1; +ERROR: relation "testpub_tbl1" is already member of publication "testpub_fortbl" +-- fail - already added +CREATE PUBLICATION testpub_fortbl FOR TABLE testpub_tbl1; +ERROR: publication "testpub_fortbl" already exists +\dRp+ testpub_fortbl + Publication testpub_fortbl + Owner | All tables | Inserts | Updates | Deletes | Truncates | Via root +--------------------------+------------+---------+---------+---------+-----------+---------- + regress_publication_user | f | t | t | t | t | f +Tables: + "pub_test.testpub_nopk" + "public.testpub_tbl1" + +-- fail - view +ALTER PUBLICATION testpub_default ADD TABLE testpub_view; +ERROR: cannot add relation "testpub_view" to publication +DETAIL: This operation is not supported for views. +ALTER PUBLICATION testpub_default ADD TABLE testpub_tbl1; +ALTER PUBLICATION testpub_default SET TABLE testpub_tbl1; +ALTER PUBLICATION testpub_default ADD TABLE pub_test.testpub_nopk; +ALTER PUBLICATION testpib_ins_trunct ADD TABLE pub_test.testpub_nopk, testpub_tbl1; +\d+ pub_test.testpub_nopk + Table "pub_test.testpub_nopk" + Column | Type | Collation | Nullable | Default | Storage | Stats target | Description +--------+---------+-----------+----------+---------+---------+--------------+------------- + foo | integer | | | | plain | | + bar | integer | | | | plain | | +Publications: + "testpib_ins_trunct" + "testpub_default" + "testpub_fortbl" + +\d+ testpub_tbl1 + Table "public.testpub_tbl1" + Column | Type | Collation | Nullable | Default | Storage | Stats target | Description +--------+---------+-----------+----------+------------------------------------------+----------+--------------+------------- + id | integer | | not null | nextval('testpub_tbl1_id_seq'::regclass) | plain | | + data | text | | | | extended | | +Indexes: + "testpub_tbl1_pkey" PRIMARY KEY, btree (id) +Publications: + "testpib_ins_trunct" + "testpub_default" + "testpub_fortbl" + +\dRp+ testpub_default + Publication testpub_default + Owner | All tables | Inserts | Updates | Deletes | Truncates | Via root +--------------------------+------------+---------+---------+---------+-----------+---------- + regress_publication_user | f | t | t | t | f | f +Tables: + "pub_test.testpub_nopk" + "public.testpub_tbl1" + +ALTER PUBLICATION testpub_default DROP TABLE testpub_tbl1, pub_test.testpub_nopk; +-- fail - nonexistent +ALTER PUBLICATION testpub_default DROP TABLE pub_test.testpub_nopk; +ERROR: relation "testpub_nopk" is not part of the publication +\d+ testpub_tbl1 + Table "public.testpub_tbl1" + Column | Type | Collation | Nullable | Default | Storage | Stats target | Description +--------+---------+-----------+----------+------------------------------------------+----------+--------------+------------- + id | integer | | not null | nextval('testpub_tbl1_id_seq'::regclass) | plain | | + data | text | | | | extended | | +Indexes: + "testpub_tbl1_pkey" PRIMARY KEY, btree (id) +Publications: + "testpib_ins_trunct" + "testpub_fortbl" + +-- verify relation cache invalidation when a primary key is added using +-- an existing index +CREATE TABLE pub_test.testpub_addpk (id int not null, data int); +ALTER PUBLICATION testpub_default ADD TABLE pub_test.testpub_addpk; +INSERT INTO pub_test.testpub_addpk VALUES(1, 11); +CREATE UNIQUE INDEX testpub_addpk_id_idx ON pub_test.testpub_addpk(id); +-- fail: +UPDATE pub_test.testpub_addpk SET id = 2; +ERROR: cannot update table "testpub_addpk" because it does not have a replica identity and publishes updates +HINT: To enable updating the table, set REPLICA IDENTITY using ALTER TABLE. +ALTER TABLE pub_test.testpub_addpk ADD PRIMARY KEY USING INDEX testpub_addpk_id_idx; +-- now it should work: +UPDATE pub_test.testpub_addpk SET id = 2; +DROP TABLE pub_test.testpub_addpk; +-- permissions +SET ROLE regress_publication_user2; +CREATE PUBLICATION testpub2; -- fail +ERROR: permission denied for database regression +SET ROLE regress_publication_user; +GRANT CREATE ON DATABASE regression TO regress_publication_user2; +SET ROLE regress_publication_user2; +SET client_min_messages = 'ERROR'; +CREATE PUBLICATION testpub2; -- ok +CREATE PUBLICATION testpub3 FOR TABLES IN SCHEMA pub_test; -- fail +ERROR: must be superuser to create FOR TABLES IN SCHEMA publication +CREATE PUBLICATION testpub3; -- ok +RESET client_min_messages; +ALTER PUBLICATION testpub2 ADD TABLE testpub_tbl1; -- fail +ERROR: must be owner of table testpub_tbl1 +ALTER PUBLICATION testpub3 ADD TABLES IN SCHEMA pub_test; -- fail +ERROR: must be superuser to add or set schemas +SET ROLE regress_publication_user; +GRANT regress_publication_user TO regress_publication_user2; +SET ROLE regress_publication_user2; +ALTER PUBLICATION testpub2 ADD TABLE testpub_tbl1; -- ok +DROP PUBLICATION testpub2; +DROP PUBLICATION testpub3; +SET ROLE regress_publication_user; +CREATE ROLE regress_publication_user3; +GRANT regress_publication_user2 TO regress_publication_user3; +SET client_min_messages = 'ERROR'; +CREATE PUBLICATION testpub4 FOR TABLES IN SCHEMA pub_test; +RESET client_min_messages; +ALTER PUBLICATION testpub4 OWNER TO regress_publication_user3; +SET ROLE regress_publication_user3; +-- fail - new owner must be superuser +ALTER PUBLICATION testpub4 owner to regress_publication_user2; -- fail +ERROR: permission denied to change owner of publication "testpub4" +HINT: The owner of a FOR TABLES IN SCHEMA publication must be a superuser. +ALTER PUBLICATION testpub4 owner to regress_publication_user; -- ok +SET ROLE regress_publication_user; +DROP PUBLICATION testpub4; +DROP ROLE regress_publication_user3; +REVOKE CREATE ON DATABASE regression FROM regress_publication_user2; +DROP TABLE testpub_parted; +DROP TABLE testpub_tbl1; +\dRp+ testpub_default + Publication testpub_default + Owner | All tables | Inserts | Updates | Deletes | Truncates | Via root +--------------------------+------------+---------+---------+---------+-----------+---------- + regress_publication_user | f | t | t | t | f | f +(1 row) + +-- fail - must be owner of publication +SET ROLE regress_publication_user_dummy; +ALTER PUBLICATION testpub_default RENAME TO testpub_dummy; +ERROR: must be owner of publication testpub_default +RESET ROLE; +ALTER PUBLICATION testpub_default RENAME TO testpub_foo; +\dRp testpub_foo + List of publications + Name | Owner | All tables | Inserts | Updates | Deletes | Truncates | Via root +-------------+--------------------------+------------+---------+---------+---------+-----------+---------- + testpub_foo | regress_publication_user | f | t | t | t | f | f +(1 row) + +-- rename back to keep the rest simple +ALTER PUBLICATION testpub_foo RENAME TO testpub_default; +ALTER PUBLICATION testpub_default OWNER TO regress_publication_user2; +\dRp testpub_default + List of publications + Name | Owner | All tables | Inserts | Updates | Deletes | Truncates | Via root +-----------------+---------------------------+------------+---------+---------+---------+-----------+---------- + testpub_default | regress_publication_user2 | f | t | t | t | f | f +(1 row) + +-- adding schemas and tables +CREATE SCHEMA pub_test1; +CREATE SCHEMA pub_test2; +CREATE SCHEMA pub_test3; +CREATE SCHEMA "CURRENT_SCHEMA"; +CREATE TABLE pub_test1.tbl (id int, data text); +CREATE TABLE pub_test1.tbl1 (id serial primary key, data text); +CREATE TABLE pub_test2.tbl1 (id serial primary key, data text); +CREATE TABLE "CURRENT_SCHEMA"."CURRENT_SCHEMA"(id int); +-- suppress warning that depends on wal_level +SET client_min_messages = 'ERROR'; +CREATE PUBLICATION testpub1_forschema FOR TABLES IN SCHEMA pub_test1; +\dRp+ testpub1_forschema + Publication testpub1_forschema + Owner | All tables | Inserts | Updates | Deletes | Truncates | Via root +--------------------------+------------+---------+---------+---------+-----------+---------- + regress_publication_user | f | t | t | t | t | f +Tables from schemas: + "pub_test1" + +CREATE PUBLICATION testpub2_forschema FOR TABLES IN SCHEMA pub_test1, pub_test2, pub_test3; +\dRp+ testpub2_forschema + Publication testpub2_forschema + Owner | All tables | Inserts | Updates | Deletes | Truncates | Via root +--------------------------+------------+---------+---------+---------+-----------+---------- + regress_publication_user | f | t | t | t | t | f +Tables from schemas: + "pub_test1" + "pub_test2" + "pub_test3" + +-- check create publication on CURRENT_SCHEMA +CREATE PUBLICATION testpub3_forschema FOR TABLES IN SCHEMA CURRENT_SCHEMA; +CREATE PUBLICATION testpub4_forschema FOR TABLES IN SCHEMA "CURRENT_SCHEMA"; +CREATE PUBLICATION testpub5_forschema FOR TABLES IN SCHEMA CURRENT_SCHEMA, "CURRENT_SCHEMA"; +CREATE PUBLICATION testpub6_forschema FOR TABLES IN SCHEMA "CURRENT_SCHEMA", CURRENT_SCHEMA; +CREATE PUBLICATION testpub_fortable FOR TABLE "CURRENT_SCHEMA"."CURRENT_SCHEMA"; +RESET client_min_messages; +\dRp+ testpub3_forschema + Publication testpub3_forschema + Owner | All tables | Inserts | Updates | Deletes | Truncates | Via root +--------------------------+------------+---------+---------+---------+-----------+---------- + regress_publication_user | f | t | t | t | t | f +Tables from schemas: + "public" + +\dRp+ testpub4_forschema + Publication testpub4_forschema + Owner | All tables | Inserts | Updates | Deletes | Truncates | Via root +--------------------------+------------+---------+---------+---------+-----------+---------- + regress_publication_user | f | t | t | t | t | f +Tables from schemas: + "CURRENT_SCHEMA" + +\dRp+ testpub5_forschema + Publication testpub5_forschema + Owner | All tables | Inserts | Updates | Deletes | Truncates | Via root +--------------------------+------------+---------+---------+---------+-----------+---------- + regress_publication_user | f | t | t | t | t | f +Tables from schemas: + "CURRENT_SCHEMA" + "public" + +\dRp+ testpub6_forschema + Publication testpub6_forschema + Owner | All tables | Inserts | Updates | Deletes | Truncates | Via root +--------------------------+------------+---------+---------+---------+-----------+---------- + regress_publication_user | f | t | t | t | t | f +Tables from schemas: + "CURRENT_SCHEMA" + "public" + +\dRp+ testpub_fortable + Publication testpub_fortable + Owner | All tables | Inserts | Updates | Deletes | Truncates | Via root +--------------------------+------------+---------+---------+---------+-----------+---------- + regress_publication_user | f | t | t | t | t | f +Tables: + "CURRENT_SCHEMA.CURRENT_SCHEMA" + +-- check create publication on CURRENT_SCHEMA where search_path is not set +SET SEARCH_PATH=''; +CREATE PUBLICATION testpub_forschema FOR TABLES IN SCHEMA CURRENT_SCHEMA; +ERROR: no schema has been selected for CURRENT_SCHEMA +RESET SEARCH_PATH; +-- check create publication on CURRENT_SCHEMA where TABLE/TABLES in SCHEMA +-- is not specified +CREATE PUBLICATION testpub_forschema1 FOR CURRENT_SCHEMA; +ERROR: invalid publication object list +LINE 1: CREATE PUBLICATION testpub_forschema1 FOR CURRENT_SCHEMA; + ^ +DETAIL: One of TABLE or TABLES IN SCHEMA must be specified before a standalone table or schema name. +-- check create publication on CURRENT_SCHEMA along with FOR TABLE +CREATE PUBLICATION testpub_forschema1 FOR TABLE CURRENT_SCHEMA; +ERROR: syntax error at or near "CURRENT_SCHEMA" +LINE 1: CREATE PUBLICATION testpub_forschema1 FOR TABLE CURRENT_SCHE... + ^ +-- check create publication on a schema that does not exist +CREATE PUBLICATION testpub_forschema FOR TABLES IN SCHEMA non_existent_schema; +ERROR: schema "non_existent_schema" does not exist +-- check create publication on a system schema +CREATE PUBLICATION testpub_forschema FOR TABLES IN SCHEMA pg_catalog; +ERROR: cannot add schema "pg_catalog" to publication +DETAIL: This operation is not supported for system schemas. +-- check create publication on an object which is not schema +CREATE PUBLICATION testpub1_forschema1 FOR TABLES IN SCHEMA testpub_view; +ERROR: schema "testpub_view" does not exist +-- dropping the schema should reflect the change in publication +DROP SCHEMA pub_test3; +\dRp+ testpub2_forschema + Publication testpub2_forschema + Owner | All tables | Inserts | Updates | Deletes | Truncates | Via root +--------------------------+------------+---------+---------+---------+-----------+---------- + regress_publication_user | f | t | t | t | t | f +Tables from schemas: + "pub_test1" + "pub_test2" + +-- renaming the schema should reflect the change in publication +ALTER SCHEMA pub_test1 RENAME to pub_test1_renamed; +\dRp+ testpub2_forschema + Publication testpub2_forschema + Owner | All tables | Inserts | Updates | Deletes | Truncates | Via root +--------------------------+------------+---------+---------+---------+-----------+---------- + regress_publication_user | f | t | t | t | t | f +Tables from schemas: + "pub_test1_renamed" + "pub_test2" + +ALTER SCHEMA pub_test1_renamed RENAME to pub_test1; +\dRp+ testpub2_forschema + Publication testpub2_forschema + Owner | All tables | Inserts | Updates | Deletes | Truncates | Via root +--------------------------+------------+---------+---------+---------+-----------+---------- + regress_publication_user | f | t | t | t | t | f +Tables from schemas: + "pub_test1" + "pub_test2" + +-- alter publication add schema +ALTER PUBLICATION testpub1_forschema ADD TABLES IN SCHEMA pub_test2; +\dRp+ testpub1_forschema + Publication testpub1_forschema + Owner | All tables | Inserts | Updates | Deletes | Truncates | Via root +--------------------------+------------+---------+---------+---------+-----------+---------- + regress_publication_user | f | t | t | t | t | f +Tables from schemas: + "pub_test1" + "pub_test2" + +-- add non existent schema +ALTER PUBLICATION testpub1_forschema ADD TABLES IN SCHEMA non_existent_schema; +ERROR: schema "non_existent_schema" does not exist +\dRp+ testpub1_forschema + Publication testpub1_forschema + Owner | All tables | Inserts | Updates | Deletes | Truncates | Via root +--------------------------+------------+---------+---------+---------+-----------+---------- + regress_publication_user | f | t | t | t | t | f +Tables from schemas: + "pub_test1" + "pub_test2" + +-- add a schema which is already added to the publication +ALTER PUBLICATION testpub1_forschema ADD TABLES IN SCHEMA pub_test1; +ERROR: schema "pub_test1" is already member of publication "testpub1_forschema" +\dRp+ testpub1_forschema + Publication testpub1_forschema + Owner | All tables | Inserts | Updates | Deletes | Truncates | Via root +--------------------------+------------+---------+---------+---------+-----------+---------- + regress_publication_user | f | t | t | t | t | f +Tables from schemas: + "pub_test1" + "pub_test2" + +-- alter publication drop schema +ALTER PUBLICATION testpub1_forschema DROP TABLES IN SCHEMA pub_test2; +\dRp+ testpub1_forschema + Publication testpub1_forschema + Owner | All tables | Inserts | Updates | Deletes | Truncates | Via root +--------------------------+------------+---------+---------+---------+-----------+---------- + regress_publication_user | f | t | t | t | t | f +Tables from schemas: + "pub_test1" + +-- drop schema that is not present in the publication +ALTER PUBLICATION testpub1_forschema DROP TABLES IN SCHEMA pub_test2; +ERROR: tables from schema "pub_test2" are not part of the publication +\dRp+ testpub1_forschema + Publication testpub1_forschema + Owner | All tables | Inserts | Updates | Deletes | Truncates | Via root +--------------------------+------------+---------+---------+---------+-----------+---------- + regress_publication_user | f | t | t | t | t | f +Tables from schemas: + "pub_test1" + +-- drop a schema that does not exist in the system +ALTER PUBLICATION testpub1_forschema DROP TABLES IN SCHEMA non_existent_schema; +ERROR: schema "non_existent_schema" does not exist +\dRp+ testpub1_forschema + Publication testpub1_forschema + Owner | All tables | Inserts | Updates | Deletes | Truncates | Via root +--------------------------+------------+---------+---------+---------+-----------+---------- + regress_publication_user | f | t | t | t | t | f +Tables from schemas: + "pub_test1" + +-- drop all schemas +ALTER PUBLICATION testpub1_forschema DROP TABLES IN SCHEMA pub_test1; +\dRp+ testpub1_forschema + Publication testpub1_forschema + Owner | All tables | Inserts | Updates | Deletes | Truncates | Via root +--------------------------+------------+---------+---------+---------+-----------+---------- + regress_publication_user | f | t | t | t | t | f +(1 row) + +-- alter publication set multiple schema +ALTER PUBLICATION testpub1_forschema SET TABLES IN SCHEMA pub_test1, pub_test2; +\dRp+ testpub1_forschema + Publication testpub1_forschema + Owner | All tables | Inserts | Updates | Deletes | Truncates | Via root +--------------------------+------------+---------+---------+---------+-----------+---------- + regress_publication_user | f | t | t | t | t | f +Tables from schemas: + "pub_test1" + "pub_test2" + +-- alter publication set non-existent schema +ALTER PUBLICATION testpub1_forschema SET TABLES IN SCHEMA non_existent_schema; +ERROR: schema "non_existent_schema" does not exist +\dRp+ testpub1_forschema + Publication testpub1_forschema + Owner | All tables | Inserts | Updates | Deletes | Truncates | Via root +--------------------------+------------+---------+---------+---------+-----------+---------- + regress_publication_user | f | t | t | t | t | f +Tables from schemas: + "pub_test1" + "pub_test2" + +-- alter publication set it duplicate schemas should set the schemas after +-- removing the duplicate schemas +ALTER PUBLICATION testpub1_forschema SET TABLES IN SCHEMA pub_test1, pub_test1; +\dRp+ testpub1_forschema + Publication testpub1_forschema + Owner | All tables | Inserts | Updates | Deletes | Truncates | Via root +--------------------------+------------+---------+---------+---------+-----------+---------- + regress_publication_user | f | t | t | t | t | f +Tables from schemas: + "pub_test1" + +-- Verify that it fails to add a schema with a column specification +ALTER PUBLICATION testpub1_forschema ADD TABLES IN SCHEMA foo (a, b); +ERROR: syntax error at or near "(" +LINE 1: ...LICATION testpub1_forschema ADD TABLES IN SCHEMA foo (a, b); + ^ +ALTER PUBLICATION testpub1_forschema ADD TABLES IN SCHEMA foo, bar (a, b); +ERROR: column specification not allowed for schema +LINE 1: ...TION testpub1_forschema ADD TABLES IN SCHEMA foo, bar (a, b)... + ^ +-- cleanup pub_test1 schema for invalidation tests +ALTER PUBLICATION testpub2_forschema DROP TABLES IN SCHEMA pub_test1; +DROP PUBLICATION testpub3_forschema, testpub4_forschema, testpub5_forschema, testpub6_forschema, testpub_fortable; +DROP SCHEMA "CURRENT_SCHEMA" CASCADE; +NOTICE: drop cascades to table "CURRENT_SCHEMA"."CURRENT_SCHEMA" +-- verify relation cache invalidations through update statement for the +-- default REPLICA IDENTITY on the relation, if schema is part of the +-- publication then update will fail because relation's relreplident +-- option will be set, if schema is not part of the publication then update +-- will be successful. +INSERT INTO pub_test1.tbl VALUES(1, 'test'); +-- fail +UPDATE pub_test1.tbl SET id = 2; +ERROR: cannot update table "tbl" because it does not have a replica identity and publishes updates +HINT: To enable updating the table, set REPLICA IDENTITY using ALTER TABLE. +ALTER PUBLICATION testpub1_forschema DROP TABLES IN SCHEMA pub_test1; +-- success +UPDATE pub_test1.tbl SET id = 2; +ALTER PUBLICATION testpub1_forschema SET TABLES IN SCHEMA pub_test1; +-- fail +UPDATE pub_test1.tbl SET id = 2; +ERROR: cannot update table "tbl" because it does not have a replica identity and publishes updates +HINT: To enable updating the table, set REPLICA IDENTITY using ALTER TABLE. +-- verify invalidation of partition table having parent and child tables in +-- different schema +CREATE SCHEMA pub_testpart1; +CREATE SCHEMA pub_testpart2; +CREATE TABLE pub_testpart1.parent1 (a int) partition by list (a); +CREATE TABLE pub_testpart2.child_parent1 partition of pub_testpart1.parent1 for values in (1); +INSERT INTO pub_testpart2.child_parent1 values(1); +UPDATE pub_testpart2.child_parent1 set a = 1; +SET client_min_messages = 'ERROR'; +CREATE PUBLICATION testpubpart_forschema FOR TABLES IN SCHEMA pub_testpart1; +RESET client_min_messages; +-- fail +UPDATE pub_testpart1.parent1 set a = 1; +ERROR: cannot update table "child_parent1" because it does not have a replica identity and publishes updates +HINT: To enable updating the table, set REPLICA IDENTITY using ALTER TABLE. +UPDATE pub_testpart2.child_parent1 set a = 1; +ERROR: cannot update table "child_parent1" because it does not have a replica identity and publishes updates +HINT: To enable updating the table, set REPLICA IDENTITY using ALTER TABLE. +DROP PUBLICATION testpubpart_forschema; +-- verify invalidation of partition tables for schema publication that has +-- parent and child tables of different partition hierarchies +CREATE TABLE pub_testpart2.parent2 (a int) partition by list (a); +CREATE TABLE pub_testpart1.child_parent2 partition of pub_testpart2.parent2 for values in (1); +INSERT INTO pub_testpart1.child_parent2 values(1); +UPDATE pub_testpart1.child_parent2 set a = 1; +SET client_min_messages = 'ERROR'; +CREATE PUBLICATION testpubpart_forschema FOR TABLES IN SCHEMA pub_testpart2; +RESET client_min_messages; +-- fail +UPDATE pub_testpart2.child_parent1 set a = 1; +ERROR: cannot update table "child_parent1" because it does not have a replica identity and publishes updates +HINT: To enable updating the table, set REPLICA IDENTITY using ALTER TABLE. +UPDATE pub_testpart2.parent2 set a = 1; +ERROR: cannot update table "child_parent2" because it does not have a replica identity and publishes updates +HINT: To enable updating the table, set REPLICA IDENTITY using ALTER TABLE. +UPDATE pub_testpart1.child_parent2 set a = 1; +ERROR: cannot update table "child_parent2" because it does not have a replica identity and publishes updates +HINT: To enable updating the table, set REPLICA IDENTITY using ALTER TABLE. +-- alter publication set 'TABLES IN SCHEMA' on an empty publication. +SET client_min_messages = 'ERROR'; +CREATE PUBLICATION testpub3_forschema; +RESET client_min_messages; +\dRp+ testpub3_forschema + Publication testpub3_forschema + Owner | All tables | Inserts | Updates | Deletes | Truncates | Via root +--------------------------+------------+---------+---------+---------+-----------+---------- + regress_publication_user | f | t | t | t | t | f +(1 row) + +ALTER PUBLICATION testpub3_forschema SET TABLES IN SCHEMA pub_test1; +\dRp+ testpub3_forschema + Publication testpub3_forschema + Owner | All tables | Inserts | Updates | Deletes | Truncates | Via root +--------------------------+------------+---------+---------+---------+-----------+---------- + regress_publication_user | f | t | t | t | t | f +Tables from schemas: + "pub_test1" + +-- create publication including both 'FOR TABLE' and 'FOR TABLES IN SCHEMA' +SET client_min_messages = 'ERROR'; +CREATE PUBLICATION testpub_forschema_fortable FOR TABLES IN SCHEMA pub_test1, TABLE pub_test2.tbl1; +CREATE PUBLICATION testpub_fortable_forschema FOR TABLE pub_test2.tbl1, TABLES IN SCHEMA pub_test1; +RESET client_min_messages; +\dRp+ testpub_forschema_fortable + Publication testpub_forschema_fortable + Owner | All tables | Inserts | Updates | Deletes | Truncates | Via root +--------------------------+------------+---------+---------+---------+-----------+---------- + regress_publication_user | f | t | t | t | t | f +Tables: + "pub_test2.tbl1" +Tables from schemas: + "pub_test1" + +\dRp+ testpub_fortable_forschema + Publication testpub_fortable_forschema + Owner | All tables | Inserts | Updates | Deletes | Truncates | Via root +--------------------------+------------+---------+---------+---------+-----------+---------- + regress_publication_user | f | t | t | t | t | f +Tables: + "pub_test2.tbl1" +Tables from schemas: + "pub_test1" + +-- fail specifying table without any of 'FOR TABLES IN SCHEMA' or +--'FOR TABLE' or 'FOR ALL TABLES' +CREATE PUBLICATION testpub_error FOR pub_test2.tbl1; +ERROR: invalid publication object list +LINE 1: CREATE PUBLICATION testpub_error FOR pub_test2.tbl1; + ^ +DETAIL: One of TABLE or TABLES IN SCHEMA must be specified before a standalone table or schema name. +DROP VIEW testpub_view; +DROP PUBLICATION testpub_default; +DROP PUBLICATION testpib_ins_trunct; +DROP PUBLICATION testpub_fortbl; +DROP PUBLICATION testpub1_forschema; +DROP PUBLICATION testpub2_forschema; +DROP PUBLICATION testpub3_forschema; +DROP PUBLICATION testpub_forschema_fortable; +DROP PUBLICATION testpub_fortable_forschema; +DROP PUBLICATION testpubpart_forschema; +DROP SCHEMA pub_test CASCADE; +NOTICE: drop cascades to table pub_test.testpub_nopk +DROP SCHEMA pub_test1 CASCADE; +NOTICE: drop cascades to 2 other objects +DETAIL: drop cascades to table pub_test1.tbl +drop cascades to table pub_test1.tbl1 +DROP SCHEMA pub_test2 CASCADE; +NOTICE: drop cascades to table pub_test2.tbl1 +DROP SCHEMA pub_testpart1 CASCADE; +NOTICE: drop cascades to 2 other objects +DETAIL: drop cascades to table pub_testpart1.parent1 +drop cascades to table pub_testpart1.child_parent2 +DROP SCHEMA pub_testpart2 CASCADE; +NOTICE: drop cascades to table pub_testpart2.parent2 +-- Test the list of partitions published with or without +-- 'PUBLISH_VIA_PARTITION_ROOT' parameter +SET client_min_messages = 'ERROR'; +CREATE SCHEMA sch1; +CREATE SCHEMA sch2; +CREATE TABLE sch1.tbl1 (a int) PARTITION BY RANGE(a); +CREATE TABLE sch2.tbl1_part1 PARTITION OF sch1.tbl1 FOR VALUES FROM (1) to (10); +-- Schema publication that does not include the schema that has the parent table +CREATE PUBLICATION pub FOR TABLES IN SCHEMA sch2 WITH (PUBLISH_VIA_PARTITION_ROOT=1); +SELECT * FROM pg_publication_tables; + pubname | schemaname | tablename | attnames | rowfilter +---------+------------+------------+----------+----------- + pub | sch2 | tbl1_part1 | {a} | +(1 row) + +DROP PUBLICATION pub; +-- Table publication that does not include the parent table +CREATE PUBLICATION pub FOR TABLE sch2.tbl1_part1 WITH (PUBLISH_VIA_PARTITION_ROOT=1); +SELECT * FROM pg_publication_tables; + pubname | schemaname | tablename | attnames | rowfilter +---------+------------+------------+----------+----------- + pub | sch2 | tbl1_part1 | {a} | +(1 row) + +-- Table publication that includes both the parent table and the child table +ALTER PUBLICATION pub ADD TABLE sch1.tbl1; +SELECT * FROM pg_publication_tables; + pubname | schemaname | tablename | attnames | rowfilter +---------+------------+-----------+----------+----------- + pub | sch1 | tbl1 | {a} | +(1 row) + +DROP PUBLICATION pub; +-- Schema publication that does not include the schema that has the parent table +CREATE PUBLICATION pub FOR TABLES IN SCHEMA sch2 WITH (PUBLISH_VIA_PARTITION_ROOT=0); +SELECT * FROM pg_publication_tables; + pubname | schemaname | tablename | attnames | rowfilter +---------+------------+------------+----------+----------- + pub | sch2 | tbl1_part1 | {a} | +(1 row) + +DROP PUBLICATION pub; +-- Table publication that does not include the parent table +CREATE PUBLICATION pub FOR TABLE sch2.tbl1_part1 WITH (PUBLISH_VIA_PARTITION_ROOT=0); +SELECT * FROM pg_publication_tables; + pubname | schemaname | tablename | attnames | rowfilter +---------+------------+------------+----------+----------- + pub | sch2 | tbl1_part1 | {a} | +(1 row) + +-- Table publication that includes both the parent table and the child table +ALTER PUBLICATION pub ADD TABLE sch1.tbl1; +SELECT * FROM pg_publication_tables; + pubname | schemaname | tablename | attnames | rowfilter +---------+------------+------------+----------+----------- + pub | sch2 | tbl1_part1 | {a} | +(1 row) + +DROP PUBLICATION pub; +DROP TABLE sch2.tbl1_part1; +DROP TABLE sch1.tbl1; +CREATE TABLE sch1.tbl1 (a int) PARTITION BY RANGE(a); +CREATE TABLE sch1.tbl1_part1 PARTITION OF sch1.tbl1 FOR VALUES FROM (1) to (10); +CREATE TABLE sch1.tbl1_part2 PARTITION OF sch1.tbl1 FOR VALUES FROM (10) to (20); +CREATE TABLE sch1.tbl1_part3 (a int) PARTITION BY RANGE(a); +ALTER TABLE sch1.tbl1 ATTACH PARTITION sch1.tbl1_part3 FOR VALUES FROM (20) to (30); +CREATE PUBLICATION pub FOR TABLES IN SCHEMA sch1 WITH (PUBLISH_VIA_PARTITION_ROOT=1); +SELECT * FROM pg_publication_tables; + pubname | schemaname | tablename | attnames | rowfilter +---------+------------+-----------+----------+----------- + pub | sch1 | tbl1 | {a} | +(1 row) + +RESET client_min_messages; +DROP PUBLICATION pub; +DROP TABLE sch1.tbl1; +DROP SCHEMA sch1 cascade; +DROP SCHEMA sch2 cascade; +RESET SESSION AUTHORIZATION; +DROP ROLE regress_publication_user, regress_publication_user2; +DROP ROLE regress_publication_user_dummy; diff --git a/src/test/regress/expected/random.out b/src/test/regress/expected/random.out new file mode 100644 index 0000000..2235907 --- /dev/null +++ b/src/test/regress/expected/random.out @@ -0,0 +1,178 @@ +-- +-- RANDOM +-- Test random() and allies +-- +-- Tests in this file may have a small probability of failure, +-- since we are dealing with randomness. Try to keep the failure +-- risk for any one test case under 1e-9. +-- +-- There should be no duplicates in 1000 random() values. +-- (Assuming 52 random bits in the float8 results, we could +-- take as many as 3000 values and still have less than 1e-9 chance +-- of failure, per https://en.wikipedia.org/wiki/Birthday_problem) +SELECT r, count(*) +FROM (SELECT random() r FROM generate_series(1, 1000)) ss +GROUP BY r HAVING count(*) > 1; + r | count +---+------- +(0 rows) + +-- The range should be [0, 1). We can expect that at least one out of 2000 +-- random values is in the lowest or highest 1% of the range with failure +-- probability less than about 1e-9. +SELECT count(*) FILTER (WHERE r < 0 OR r >= 1) AS out_of_range, + (count(*) FILTER (WHERE r < 0.01)) > 0 AS has_small, + (count(*) FILTER (WHERE r > 0.99)) > 0 AS has_large +FROM (SELECT random() r FROM generate_series(1, 2000)) ss; + out_of_range | has_small | has_large +--------------+-----------+----------- + 0 | t | t +(1 row) + +-- Check for uniform distribution using the Kolmogorov-Smirnov test. +CREATE FUNCTION ks_test_uniform_random() +RETURNS boolean AS +$$ +DECLARE + n int := 1000; -- Number of samples + c float8 := 1.94947; -- Critical value for 99.9% confidence + ok boolean; +BEGIN + ok := ( + WITH samples AS ( + SELECT random() r FROM generate_series(1, n) ORDER BY 1 + ), indexed_samples AS ( + SELECT (row_number() OVER())-1.0 i, r FROM samples + ) + SELECT max(abs(i/n-r)) < c / sqrt(n) FROM indexed_samples + ); + RETURN ok; +END +$$ +LANGUAGE plpgsql; +-- As written, ks_test_uniform_random() returns true about 99.9% +-- of the time. To get down to a roughly 1e-9 test failure rate, +-- just run it 3 times and accept if any one of them passes. +SELECT ks_test_uniform_random() OR + ks_test_uniform_random() OR + ks_test_uniform_random() AS uniform; + uniform +--------- + t +(1 row) + +-- now test random_normal() +-- As above, there should be no duplicates in 1000 random_normal() values. +SELECT r, count(*) +FROM (SELECT random_normal() r FROM generate_series(1, 1000)) ss +GROUP BY r HAVING count(*) > 1; + r | count +---+------- +(0 rows) + +-- ... unless we force the range (standard deviation) to zero. +-- This is a good place to check that the mean input does something, too. +SELECT r, count(*) +FROM (SELECT random_normal(10, 0) r FROM generate_series(1, 100)) ss +GROUP BY r; + r | count +----+------- + 10 | 100 +(1 row) + +SELECT r, count(*) +FROM (SELECT random_normal(-10, 0) r FROM generate_series(1, 100)) ss +GROUP BY r; + r | count +-----+------- + -10 | 100 +(1 row) + +-- Check standard normal distribution using the Kolmogorov-Smirnov test. +CREATE FUNCTION ks_test_normal_random() +RETURNS boolean AS +$$ +DECLARE + n int := 1000; -- Number of samples + c float8 := 1.94947; -- Critical value for 99.9% confidence + ok boolean; +BEGIN + ok := ( + WITH samples AS ( + SELECT random_normal() r FROM generate_series(1, n) ORDER BY 1 + ), indexed_samples AS ( + SELECT (row_number() OVER())-1.0 i, r FROM samples + ) + SELECT max(abs((1+erf(r/sqrt(2)))/2 - i/n)) < c / sqrt(n) + FROM indexed_samples + ); + RETURN ok; +END +$$ +LANGUAGE plpgsql; +-- As above, ks_test_normal_random() returns true about 99.9% +-- of the time, so try it 3 times and accept if any test passes. +SELECT ks_test_normal_random() OR + ks_test_normal_random() OR + ks_test_normal_random() AS standard_normal; + standard_normal +----------------- + t +(1 row) + +-- setseed() should produce a reproducible series of random() values. +SELECT setseed(0.5); + setseed +--------- + +(1 row) + +SELECT random() FROM generate_series(1, 10); + random +--------------------- + 0.9851677175347999 + 0.825301858027981 + 0.12974610012450416 + 0.16356291958601088 + 0.6476186144084 + 0.8822771983038762 + 0.1404566845227775 + 0.15619865764623442 + 0.5145227426983392 + 0.7712969548127826 +(10 rows) + +-- Likewise for random_normal(); however, since its implementation relies +-- on libm functions that have different roundoff behaviors on different +-- machines, we have to round off the results a bit to get consistent output. +SET extra_float_digits = -1; +SELECT random_normal() FROM generate_series(1, 10); + random_normal +------------------- + 0.20853464493838 + 0.26453024054096 + -0.60675246790043 + 0.82579942785265 + 1.7011161173536 + -0.22344546371619 + 0.249712419191 + -1.2494722990669 + 0.12562715204368 + 0.47539161454401 +(10 rows) + +SELECT random_normal(mean => 1, stddev => 0.1) r FROM generate_series(1, 10); + r +------------------ + 1.0060597281173 + 1.09685453015 + 1.0286920613201 + 0.90947567671234 + 0.98372476313426 + 0.93934454957762 + 1.1871350020636 + 0.96225768429293 + 0.91444120680041 + 0.96403105557543 +(10 rows) + diff --git a/src/test/regress/expected/rangefuncs.out b/src/test/regress/expected/rangefuncs.out new file mode 100644 index 0000000..fbb840e --- /dev/null +++ b/src/test/regress/expected/rangefuncs.out @@ -0,0 +1,2487 @@ +CREATE TABLE rngfunc2(rngfuncid int, f2 int); +INSERT INTO rngfunc2 VALUES(1, 11); +INSERT INTO rngfunc2 VALUES(2, 22); +INSERT INTO rngfunc2 VALUES(1, 111); +CREATE FUNCTION rngfunct(int) returns setof rngfunc2 as 'SELECT * FROM rngfunc2 WHERE rngfuncid = $1 ORDER BY f2;' LANGUAGE SQL; +-- function with ORDINALITY +select * from rngfunct(1) with ordinality as z(a,b,ord); + a | b | ord +---+-----+----- + 1 | 11 | 1 + 1 | 111 | 2 +(2 rows) + +select * from rngfunct(1) with ordinality as z(a,b,ord) where b > 100; -- ordinal 2, not 1 + a | b | ord +---+-----+----- + 1 | 111 | 2 +(1 row) + +-- ordinality vs. column names and types +select a,b,ord from rngfunct(1) with ordinality as z(a,b,ord); + a | b | ord +---+-----+----- + 1 | 11 | 1 + 1 | 111 | 2 +(2 rows) + +select a,ord from unnest(array['a','b']) with ordinality as z(a,ord); + a | ord +---+----- + a | 1 + b | 2 +(2 rows) + +select * from unnest(array['a','b']) with ordinality as z(a,ord); + a | ord +---+----- + a | 1 + b | 2 +(2 rows) + +select a,ord from unnest(array[1.0::float8]) with ordinality as z(a,ord); + a | ord +---+----- + 1 | 1 +(1 row) + +select * from unnest(array[1.0::float8]) with ordinality as z(a,ord); + a | ord +---+----- + 1 | 1 +(1 row) + +select row_to_json(s.*) from generate_series(11,14) with ordinality s; + row_to_json +------------------------- + {"s":11,"ordinality":1} + {"s":12,"ordinality":2} + {"s":13,"ordinality":3} + {"s":14,"ordinality":4} +(4 rows) + +-- ordinality vs. views +create temporary view vw_ord as select * from (values (1)) v(n) join rngfunct(1) with ordinality as z(a,b,ord) on (n=ord); +select * from vw_ord; + n | a | b | ord +---+---+----+----- + 1 | 1 | 11 | 1 +(1 row) + +select definition from pg_views where viewname='vw_ord'; + definition +------------------------------------------------------------------------- + SELECT v.n, + + z.a, + + z.b, + + z.ord + + FROM (( VALUES (1)) v(n) + + JOIN rngfunct(1) WITH ORDINALITY z(a, b, ord) ON ((v.n = z.ord))); +(1 row) + +drop view vw_ord; +-- multiple functions +select * from rows from(rngfunct(1),rngfunct(2)) with ordinality as z(a,b,c,d,ord); + a | b | c | d | ord +---+-----+---+----+----- + 1 | 11 | 2 | 22 | 1 + 1 | 111 | | | 2 +(2 rows) + +create temporary view vw_ord as select * from (values (1)) v(n) join rows from(rngfunct(1),rngfunct(2)) with ordinality as z(a,b,c,d,ord) on (n=ord); +select * from vw_ord; + n | a | b | c | d | ord +---+---+----+---+----+----- + 1 | 1 | 11 | 2 | 22 | 1 +(1 row) + +select definition from pg_views where viewname='vw_ord'; + definition +------------------------------------------------------------------------------------------------------- + SELECT v.n, + + z.a, + + z.b, + + z.c, + + z.d, + + z.ord + + FROM (( VALUES (1)) v(n) + + JOIN ROWS FROM(rngfunct(1), rngfunct(2)) WITH ORDINALITY z(a, b, c, d, ord) ON ((v.n = z.ord))); +(1 row) + +drop view vw_ord; +-- expansions of unnest() +select * from unnest(array[10,20],array['foo','bar'],array[1.0]); + unnest | unnest | unnest +--------+--------+-------- + 10 | foo | 1.0 + 20 | bar | +(2 rows) + +select * from unnest(array[10,20],array['foo','bar'],array[1.0]) with ordinality as z(a,b,c,ord); + a | b | c | ord +----+-----+-----+----- + 10 | foo | 1.0 | 1 + 20 | bar | | 2 +(2 rows) + +select * from rows from(unnest(array[10,20],array['foo','bar'],array[1.0])) with ordinality as z(a,b,c,ord); + a | b | c | ord +----+-----+-----+----- + 10 | foo | 1.0 | 1 + 20 | bar | | 2 +(2 rows) + +select * from rows from(unnest(array[10,20],array['foo','bar']), generate_series(101,102)) with ordinality as z(a,b,c,ord); + a | b | c | ord +----+-----+-----+----- + 10 | foo | 101 | 1 + 20 | bar | 102 | 2 +(2 rows) + +create temporary view vw_ord as select * from unnest(array[10,20],array['foo','bar'],array[1.0]) as z(a,b,c); +select * from vw_ord; + a | b | c +----+-----+----- + 10 | foo | 1.0 + 20 | bar | +(2 rows) + +select definition from pg_views where viewname='vw_ord'; + definition +---------------------------------------------------------------------------------------- + SELECT a, + + b, + + c + + FROM UNNEST(ARRAY[10, 20], ARRAY['foo'::text, 'bar'::text], ARRAY[1.0]) z(a, b, c); +(1 row) + +drop view vw_ord; +create temporary view vw_ord as select * from rows from(unnest(array[10,20],array['foo','bar'],array[1.0])) as z(a,b,c); +select * from vw_ord; + a | b | c +----+-----+----- + 10 | foo | 1.0 + 20 | bar | +(2 rows) + +select definition from pg_views where viewname='vw_ord'; + definition +---------------------------------------------------------------------------------------- + SELECT a, + + b, + + c + + FROM UNNEST(ARRAY[10, 20], ARRAY['foo'::text, 'bar'::text], ARRAY[1.0]) z(a, b, c); +(1 row) + +drop view vw_ord; +create temporary view vw_ord as select * from rows from(unnest(array[10,20],array['foo','bar']), generate_series(1,2)) as z(a,b,c); +select * from vw_ord; + a | b | c +----+-----+--- + 10 | foo | 1 + 20 | bar | 2 +(2 rows) + +select definition from pg_views where viewname='vw_ord'; + definition +---------------------------------------------------------------------------------------------------------------------- + SELECT a, + + b, + + c + + FROM ROWS FROM(unnest(ARRAY[10, 20]), unnest(ARRAY['foo'::text, 'bar'::text]), generate_series(1, 2)) z(a, b, c); +(1 row) + +drop view vw_ord; +-- ordinality and multiple functions vs. rewind and reverse scan +begin; +declare rf_cur scroll cursor for select * from rows from(generate_series(1,5),generate_series(1,2)) with ordinality as g(i,j,o); +fetch all from rf_cur; + i | j | o +---+---+--- + 1 | 1 | 1 + 2 | 2 | 2 + 3 | | 3 + 4 | | 4 + 5 | | 5 +(5 rows) + +fetch backward all from rf_cur; + i | j | o +---+---+--- + 5 | | 5 + 4 | | 4 + 3 | | 3 + 2 | 2 | 2 + 1 | 1 | 1 +(5 rows) + +fetch all from rf_cur; + i | j | o +---+---+--- + 1 | 1 | 1 + 2 | 2 | 2 + 3 | | 3 + 4 | | 4 + 5 | | 5 +(5 rows) + +fetch next from rf_cur; + i | j | o +---+---+--- +(0 rows) + +fetch next from rf_cur; + i | j | o +---+---+--- +(0 rows) + +fetch prior from rf_cur; + i | j | o +---+---+--- + 5 | | 5 +(1 row) + +fetch absolute 1 from rf_cur; + i | j | o +---+---+--- + 1 | 1 | 1 +(1 row) + +fetch next from rf_cur; + i | j | o +---+---+--- + 2 | 2 | 2 +(1 row) + +fetch next from rf_cur; + i | j | o +---+---+--- + 3 | | 3 +(1 row) + +fetch next from rf_cur; + i | j | o +---+---+--- + 4 | | 4 +(1 row) + +fetch prior from rf_cur; + i | j | o +---+---+--- + 3 | | 3 +(1 row) + +fetch prior from rf_cur; + i | j | o +---+---+--- + 2 | 2 | 2 +(1 row) + +fetch prior from rf_cur; + i | j | o +---+---+--- + 1 | 1 | 1 +(1 row) + +commit; +-- function with implicit LATERAL +select * from rngfunc2, rngfunct(rngfunc2.rngfuncid) z where rngfunc2.f2 = z.f2; + rngfuncid | f2 | rngfuncid | f2 +-----------+-----+-----------+----- + 1 | 11 | 1 | 11 + 2 | 22 | 2 | 22 + 1 | 111 | 1 | 111 +(3 rows) + +-- function with implicit LATERAL and explicit ORDINALITY +select * from rngfunc2, rngfunct(rngfunc2.rngfuncid) with ordinality as z(rngfuncid,f2,ord) where rngfunc2.f2 = z.f2; + rngfuncid | f2 | rngfuncid | f2 | ord +-----------+-----+-----------+-----+----- + 1 | 11 | 1 | 11 | 1 + 2 | 22 | 2 | 22 | 1 + 1 | 111 | 1 | 111 | 2 +(3 rows) + +-- function in subselect +select * from rngfunc2 where f2 in (select f2 from rngfunct(rngfunc2.rngfuncid) z where z.rngfuncid = rngfunc2.rngfuncid) ORDER BY 1,2; + rngfuncid | f2 +-----------+----- + 1 | 11 + 1 | 111 + 2 | 22 +(3 rows) + +-- function in subselect +select * from rngfunc2 where f2 in (select f2 from rngfunct(1) z where z.rngfuncid = rngfunc2.rngfuncid) ORDER BY 1,2; + rngfuncid | f2 +-----------+----- + 1 | 11 + 1 | 111 +(2 rows) + +-- function in subselect +select * from rngfunc2 where f2 in (select f2 from rngfunct(rngfunc2.rngfuncid) z where z.rngfuncid = 1) ORDER BY 1,2; + rngfuncid | f2 +-----------+----- + 1 | 11 + 1 | 111 +(2 rows) + +-- nested functions +select rngfunct.rngfuncid, rngfunct.f2 from rngfunct(sin(pi()/2)::int) ORDER BY 1,2; + rngfuncid | f2 +-----------+----- + 1 | 11 + 1 | 111 +(2 rows) + +CREATE TABLE rngfunc (rngfuncid int, rngfuncsubid int, rngfuncname text, primary key(rngfuncid,rngfuncsubid)); +INSERT INTO rngfunc VALUES(1,1,'Joe'); +INSERT INTO rngfunc VALUES(1,2,'Ed'); +INSERT INTO rngfunc VALUES(2,1,'Mary'); +-- sql, proretset = f, prorettype = b +CREATE FUNCTION getrngfunc1(int) RETURNS int AS 'SELECT $1;' LANGUAGE SQL; +SELECT * FROM getrngfunc1(1) AS t1; + t1 +---- + 1 +(1 row) + +SELECT * FROM getrngfunc1(1) WITH ORDINALITY AS t1(v,o); + v | o +---+--- + 1 | 1 +(1 row) + +CREATE VIEW vw_getrngfunc AS SELECT * FROM getrngfunc1(1); +SELECT * FROM vw_getrngfunc; + getrngfunc1 +------------- + 1 +(1 row) + +DROP VIEW vw_getrngfunc; +CREATE VIEW vw_getrngfunc AS SELECT * FROM getrngfunc1(1) WITH ORDINALITY as t1(v,o); +SELECT * FROM vw_getrngfunc; + v | o +---+--- + 1 | 1 +(1 row) + +DROP VIEW vw_getrngfunc; +-- sql, proretset = t, prorettype = b +CREATE FUNCTION getrngfunc2(int) RETURNS setof int AS 'SELECT rngfuncid FROM rngfunc WHERE rngfuncid = $1;' LANGUAGE SQL; +SELECT * FROM getrngfunc2(1) AS t1; + t1 +---- + 1 + 1 +(2 rows) + +SELECT * FROM getrngfunc2(1) WITH ORDINALITY AS t1(v,o); + v | o +---+--- + 1 | 1 + 1 | 2 +(2 rows) + +CREATE VIEW vw_getrngfunc AS SELECT * FROM getrngfunc2(1); +SELECT * FROM vw_getrngfunc; + getrngfunc2 +------------- + 1 + 1 +(2 rows) + +DROP VIEW vw_getrngfunc; +CREATE VIEW vw_getrngfunc AS SELECT * FROM getrngfunc2(1) WITH ORDINALITY AS t1(v,o); +SELECT * FROM vw_getrngfunc; + v | o +---+--- + 1 | 1 + 1 | 2 +(2 rows) + +DROP VIEW vw_getrngfunc; +-- sql, proretset = t, prorettype = b +CREATE FUNCTION getrngfunc3(int) RETURNS setof text AS 'SELECT rngfuncname FROM rngfunc WHERE rngfuncid = $1;' LANGUAGE SQL; +SELECT * FROM getrngfunc3(1) AS t1; + t1 +----- + Joe + Ed +(2 rows) + +SELECT * FROM getrngfunc3(1) WITH ORDINALITY AS t1(v,o); + v | o +-----+--- + Joe | 1 + Ed | 2 +(2 rows) + +CREATE VIEW vw_getrngfunc AS SELECT * FROM getrngfunc3(1); +SELECT * FROM vw_getrngfunc; + getrngfunc3 +------------- + Joe + Ed +(2 rows) + +DROP VIEW vw_getrngfunc; +CREATE VIEW vw_getrngfunc AS SELECT * FROM getrngfunc3(1) WITH ORDINALITY AS t1(v,o); +SELECT * FROM vw_getrngfunc; + v | o +-----+--- + Joe | 1 + Ed | 2 +(2 rows) + +DROP VIEW vw_getrngfunc; +-- sql, proretset = f, prorettype = c +CREATE FUNCTION getrngfunc4(int) RETURNS rngfunc AS 'SELECT * FROM rngfunc WHERE rngfuncid = $1;' LANGUAGE SQL; +SELECT * FROM getrngfunc4(1) AS t1; + rngfuncid | rngfuncsubid | rngfuncname +-----------+--------------+------------- + 1 | 1 | Joe +(1 row) + +SELECT * FROM getrngfunc4(1) WITH ORDINALITY AS t1(a,b,c,o); + a | b | c | o +---+---+-----+--- + 1 | 1 | Joe | 1 +(1 row) + +CREATE VIEW vw_getrngfunc AS SELECT * FROM getrngfunc4(1); +SELECT * FROM vw_getrngfunc; + rngfuncid | rngfuncsubid | rngfuncname +-----------+--------------+------------- + 1 | 1 | Joe +(1 row) + +DROP VIEW vw_getrngfunc; +CREATE VIEW vw_getrngfunc AS SELECT * FROM getrngfunc4(1) WITH ORDINALITY AS t1(a,b,c,o); +SELECT * FROM vw_getrngfunc; + a | b | c | o +---+---+-----+--- + 1 | 1 | Joe | 1 +(1 row) + +DROP VIEW vw_getrngfunc; +-- sql, proretset = t, prorettype = c +CREATE FUNCTION getrngfunc5(int) RETURNS setof rngfunc AS 'SELECT * FROM rngfunc WHERE rngfuncid = $1;' LANGUAGE SQL; +SELECT * FROM getrngfunc5(1) AS t1; + rngfuncid | rngfuncsubid | rngfuncname +-----------+--------------+------------- + 1 | 1 | Joe + 1 | 2 | Ed +(2 rows) + +SELECT * FROM getrngfunc5(1) WITH ORDINALITY AS t1(a,b,c,o); + a | b | c | o +---+---+-----+--- + 1 | 1 | Joe | 1 + 1 | 2 | Ed | 2 +(2 rows) + +CREATE VIEW vw_getrngfunc AS SELECT * FROM getrngfunc5(1); +SELECT * FROM vw_getrngfunc; + rngfuncid | rngfuncsubid | rngfuncname +-----------+--------------+------------- + 1 | 1 | Joe + 1 | 2 | Ed +(2 rows) + +DROP VIEW vw_getrngfunc; +CREATE VIEW vw_getrngfunc AS SELECT * FROM getrngfunc5(1) WITH ORDINALITY AS t1(a,b,c,o); +SELECT * FROM vw_getrngfunc; + a | b | c | o +---+---+-----+--- + 1 | 1 | Joe | 1 + 1 | 2 | Ed | 2 +(2 rows) + +DROP VIEW vw_getrngfunc; +-- sql, proretset = f, prorettype = record +CREATE FUNCTION getrngfunc6(int) RETURNS RECORD AS 'SELECT * FROM rngfunc WHERE rngfuncid = $1;' LANGUAGE SQL; +SELECT * FROM getrngfunc6(1) AS t1(rngfuncid int, rngfuncsubid int, rngfuncname text); + rngfuncid | rngfuncsubid | rngfuncname +-----------+--------------+------------- + 1 | 1 | Joe +(1 row) + +SELECT * FROM ROWS FROM( getrngfunc6(1) AS (rngfuncid int, rngfuncsubid int, rngfuncname text) ) WITH ORDINALITY; + rngfuncid | rngfuncsubid | rngfuncname | ordinality +-----------+--------------+-------------+------------ + 1 | 1 | Joe | 1 +(1 row) + +CREATE VIEW vw_getrngfunc AS SELECT * FROM getrngfunc6(1) AS +(rngfuncid int, rngfuncsubid int, rngfuncname text); +SELECT * FROM vw_getrngfunc; + rngfuncid | rngfuncsubid | rngfuncname +-----------+--------------+------------- + 1 | 1 | Joe +(1 row) + +DROP VIEW vw_getrngfunc; +CREATE VIEW vw_getrngfunc AS + SELECT * FROM ROWS FROM( getrngfunc6(1) AS (rngfuncid int, rngfuncsubid int, rngfuncname text) ) + WITH ORDINALITY; +SELECT * FROM vw_getrngfunc; + rngfuncid | rngfuncsubid | rngfuncname | ordinality +-----------+--------------+-------------+------------ + 1 | 1 | Joe | 1 +(1 row) + +DROP VIEW vw_getrngfunc; +-- sql, proretset = t, prorettype = record +CREATE FUNCTION getrngfunc7(int) RETURNS setof record AS 'SELECT * FROM rngfunc WHERE rngfuncid = $1;' LANGUAGE SQL; +SELECT * FROM getrngfunc7(1) AS t1(rngfuncid int, rngfuncsubid int, rngfuncname text); + rngfuncid | rngfuncsubid | rngfuncname +-----------+--------------+------------- + 1 | 1 | Joe + 1 | 2 | Ed +(2 rows) + +SELECT * FROM ROWS FROM( getrngfunc7(1) AS (rngfuncid int, rngfuncsubid int, rngfuncname text) ) WITH ORDINALITY; + rngfuncid | rngfuncsubid | rngfuncname | ordinality +-----------+--------------+-------------+------------ + 1 | 1 | Joe | 1 + 1 | 2 | Ed | 2 +(2 rows) + +CREATE VIEW vw_getrngfunc AS SELECT * FROM getrngfunc7(1) AS +(rngfuncid int, rngfuncsubid int, rngfuncname text); +SELECT * FROM vw_getrngfunc; + rngfuncid | rngfuncsubid | rngfuncname +-----------+--------------+------------- + 1 | 1 | Joe + 1 | 2 | Ed +(2 rows) + +DROP VIEW vw_getrngfunc; +CREATE VIEW vw_getrngfunc AS + SELECT * FROM ROWS FROM( getrngfunc7(1) AS (rngfuncid int, rngfuncsubid int, rngfuncname text) ) + WITH ORDINALITY; +SELECT * FROM vw_getrngfunc; + rngfuncid | rngfuncsubid | rngfuncname | ordinality +-----------+--------------+-------------+------------ + 1 | 1 | Joe | 1 + 1 | 2 | Ed | 2 +(2 rows) + +DROP VIEW vw_getrngfunc; +-- plpgsql, proretset = f, prorettype = b +CREATE FUNCTION getrngfunc8(int) RETURNS int AS 'DECLARE rngfuncint int; BEGIN SELECT rngfuncid into rngfuncint FROM rngfunc WHERE rngfuncid = $1; RETURN rngfuncint; END;' LANGUAGE plpgsql; +SELECT * FROM getrngfunc8(1) AS t1; + t1 +---- + 1 +(1 row) + +SELECT * FROM getrngfunc8(1) WITH ORDINALITY AS t1(v,o); + v | o +---+--- + 1 | 1 +(1 row) + +CREATE VIEW vw_getrngfunc AS SELECT * FROM getrngfunc8(1); +SELECT * FROM vw_getrngfunc; + getrngfunc8 +------------- + 1 +(1 row) + +DROP VIEW vw_getrngfunc; +CREATE VIEW vw_getrngfunc AS SELECT * FROM getrngfunc8(1) WITH ORDINALITY AS t1(v,o); +SELECT * FROM vw_getrngfunc; + v | o +---+--- + 1 | 1 +(1 row) + +DROP VIEW vw_getrngfunc; +-- plpgsql, proretset = f, prorettype = c +CREATE FUNCTION getrngfunc9(int) RETURNS rngfunc AS 'DECLARE rngfunctup rngfunc%ROWTYPE; BEGIN SELECT * into rngfunctup FROM rngfunc WHERE rngfuncid = $1; RETURN rngfunctup; END;' LANGUAGE plpgsql; +SELECT * FROM getrngfunc9(1) AS t1; + rngfuncid | rngfuncsubid | rngfuncname +-----------+--------------+------------- + 1 | 1 | Joe +(1 row) + +SELECT * FROM getrngfunc9(1) WITH ORDINALITY AS t1(a,b,c,o); + a | b | c | o +---+---+-----+--- + 1 | 1 | Joe | 1 +(1 row) + +CREATE VIEW vw_getrngfunc AS SELECT * FROM getrngfunc9(1); +SELECT * FROM vw_getrngfunc; + rngfuncid | rngfuncsubid | rngfuncname +-----------+--------------+------------- + 1 | 1 | Joe +(1 row) + +DROP VIEW vw_getrngfunc; +CREATE VIEW vw_getrngfunc AS SELECT * FROM getrngfunc9(1) WITH ORDINALITY AS t1(a,b,c,o); +SELECT * FROM vw_getrngfunc; + a | b | c | o +---+---+-----+--- + 1 | 1 | Joe | 1 +(1 row) + +DROP VIEW vw_getrngfunc; +-- mix 'n match kinds, to exercise expandRTE and related logic +select * from rows from(getrngfunc1(1),getrngfunc2(1),getrngfunc3(1),getrngfunc4(1),getrngfunc5(1), + getrngfunc6(1) AS (rngfuncid int, rngfuncsubid int, rngfuncname text), + getrngfunc7(1) AS (rngfuncid int, rngfuncsubid int, rngfuncname text), + getrngfunc8(1),getrngfunc9(1)) + with ordinality as t1(a,b,c,d,e,f,g,h,i,j,k,l,m,o,p,q,r,s,t,u); + a | b | c | d | e | f | g | h | i | j | k | l | m | o | p | q | r | s | t | u +---+---+-----+---+---+-----+---+---+-----+---+---+-----+---+---+-----+---+---+---+-----+--- + 1 | 1 | Joe | 1 | 1 | Joe | 1 | 1 | Joe | 1 | 1 | Joe | 1 | 1 | Joe | 1 | 1 | 1 | Joe | 1 + | 1 | Ed | | | | 1 | 2 | Ed | | | | 1 | 2 | Ed | | | | | 2 +(2 rows) + +select * from rows from(getrngfunc9(1),getrngfunc8(1), + getrngfunc7(1) AS (rngfuncid int, rngfuncsubid int, rngfuncname text), + getrngfunc6(1) AS (rngfuncid int, rngfuncsubid int, rngfuncname text), + getrngfunc5(1),getrngfunc4(1),getrngfunc3(1),getrngfunc2(1),getrngfunc1(1)) + with ordinality as t1(a,b,c,d,e,f,g,h,i,j,k,l,m,o,p,q,r,s,t,u); + a | b | c | d | e | f | g | h | i | j | k | l | m | o | p | q | r | s | t | u +---+---+-----+---+---+---+-----+---+---+-----+---+---+-----+---+---+-----+-----+---+---+--- + 1 | 1 | Joe | 1 | 1 | 1 | Joe | 1 | 1 | Joe | 1 | 1 | Joe | 1 | 1 | Joe | Joe | 1 | 1 | 1 + | | | | 1 | 2 | Ed | | | | 1 | 2 | Ed | | | | Ed | 1 | | 2 +(2 rows) + +create temporary view vw_rngfunc as + select * from rows from(getrngfunc9(1), + getrngfunc7(1) AS (rngfuncid int, rngfuncsubid int, rngfuncname text), + getrngfunc1(1)) + with ordinality as t1(a,b,c,d,e,f,g,n); +select * from vw_rngfunc; + a | b | c | d | e | f | g | n +---+---+-----+---+---+-----+---+--- + 1 | 1 | Joe | 1 | 1 | Joe | 1 | 1 + | | | 1 | 2 | Ed | | 2 +(2 rows) + +select pg_get_viewdef('vw_rngfunc'); + pg_get_viewdef +------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ + SELECT a, + + b, + + c, + + d, + + e, + + f, + + g, + + n + + FROM ROWS FROM(getrngfunc9(1), getrngfunc7(1) AS (rngfuncid integer, rngfuncsubid integer, rngfuncname text), getrngfunc1(1)) WITH ORDINALITY t1(a, b, c, d, e, f, g, n); +(1 row) + +drop view vw_rngfunc; +DROP FUNCTION getrngfunc1(int); +DROP FUNCTION getrngfunc2(int); +DROP FUNCTION getrngfunc3(int); +DROP FUNCTION getrngfunc4(int); +DROP FUNCTION getrngfunc5(int); +DROP FUNCTION getrngfunc6(int); +DROP FUNCTION getrngfunc7(int); +DROP FUNCTION getrngfunc8(int); +DROP FUNCTION getrngfunc9(int); +DROP FUNCTION rngfunct(int); +DROP TABLE rngfunc2; +DROP TABLE rngfunc; +-- Rescan tests -- +CREATE TEMPORARY SEQUENCE rngfunc_rescan_seq1; +CREATE TEMPORARY SEQUENCE rngfunc_rescan_seq2; +CREATE TYPE rngfunc_rescan_t AS (i integer, s bigint); +CREATE FUNCTION rngfunc_sql(int,int) RETURNS setof rngfunc_rescan_t AS 'SELECT i, nextval(''rngfunc_rescan_seq1'') FROM generate_series($1,$2) i;' LANGUAGE SQL; +-- plpgsql functions use materialize mode +CREATE FUNCTION rngfunc_mat(int,int) RETURNS setof rngfunc_rescan_t AS 'begin for i in $1..$2 loop return next (i, nextval(''rngfunc_rescan_seq2'')); end loop; end;' LANGUAGE plpgsql; +--invokes ExecReScanFunctionScan - all these cases should materialize the function only once +-- LEFT JOIN on a condition that the planner can't prove to be true is used to ensure the function +-- is on the inner path of a nestloop join +SELECT setval('rngfunc_rescan_seq1',1,false),setval('rngfunc_rescan_seq2',1,false); + setval | setval +--------+-------- + 1 | 1 +(1 row) + +SELECT * FROM (VALUES (1),(2),(3)) v(r) LEFT JOIN rngfunc_sql(11,13) ON (r+i)<100; + r | i | s +---+----+--- + 1 | 11 | 1 + 1 | 12 | 2 + 1 | 13 | 3 + 2 | 11 | 1 + 2 | 12 | 2 + 2 | 13 | 3 + 3 | 11 | 1 + 3 | 12 | 2 + 3 | 13 | 3 +(9 rows) + +SELECT setval('rngfunc_rescan_seq1',1,false),setval('rngfunc_rescan_seq2',1,false); + setval | setval +--------+-------- + 1 | 1 +(1 row) + +SELECT * FROM (VALUES (1),(2),(3)) v(r) LEFT JOIN rngfunc_sql(11,13) WITH ORDINALITY AS f(i,s,o) ON (r+i)<100; + r | i | s | o +---+----+---+--- + 1 | 11 | 1 | 1 + 1 | 12 | 2 | 2 + 1 | 13 | 3 | 3 + 2 | 11 | 1 | 1 + 2 | 12 | 2 | 2 + 2 | 13 | 3 | 3 + 3 | 11 | 1 | 1 + 3 | 12 | 2 | 2 + 3 | 13 | 3 | 3 +(9 rows) + +SELECT setval('rngfunc_rescan_seq1',1,false),setval('rngfunc_rescan_seq2',1,false); + setval | setval +--------+-------- + 1 | 1 +(1 row) + +SELECT * FROM (VALUES (1),(2),(3)) v(r) LEFT JOIN rngfunc_mat(11,13) ON (r+i)<100; + r | i | s +---+----+--- + 1 | 11 | 1 + 1 | 12 | 2 + 1 | 13 | 3 + 2 | 11 | 1 + 2 | 12 | 2 + 2 | 13 | 3 + 3 | 11 | 1 + 3 | 12 | 2 + 3 | 13 | 3 +(9 rows) + +SELECT setval('rngfunc_rescan_seq1',1,false),setval('rngfunc_rescan_seq2',1,false); + setval | setval +--------+-------- + 1 | 1 +(1 row) + +SELECT * FROM (VALUES (1),(2),(3)) v(r) LEFT JOIN rngfunc_mat(11,13) WITH ORDINALITY AS f(i,s,o) ON (r+i)<100; + r | i | s | o +---+----+---+--- + 1 | 11 | 1 | 1 + 1 | 12 | 2 | 2 + 1 | 13 | 3 | 3 + 2 | 11 | 1 | 1 + 2 | 12 | 2 | 2 + 2 | 13 | 3 | 3 + 3 | 11 | 1 | 1 + 3 | 12 | 2 | 2 + 3 | 13 | 3 | 3 +(9 rows) + +SELECT setval('rngfunc_rescan_seq1',1,false),setval('rngfunc_rescan_seq2',1,false); + setval | setval +--------+-------- + 1 | 1 +(1 row) + +SELECT * FROM (VALUES (1),(2),(3)) v(r) LEFT JOIN ROWS FROM( rngfunc_sql(11,13), rngfunc_mat(11,13) ) WITH ORDINALITY AS f(i1,s1,i2,s2,o) ON (r+i1+i2)<100; + r | i1 | s1 | i2 | s2 | o +---+----+----+----+----+--- + 1 | 11 | 1 | 11 | 1 | 1 + 1 | 12 | 2 | 12 | 2 | 2 + 1 | 13 | 3 | 13 | 3 | 3 + 2 | 11 | 1 | 11 | 1 | 1 + 2 | 12 | 2 | 12 | 2 | 2 + 2 | 13 | 3 | 13 | 3 | 3 + 3 | 11 | 1 | 11 | 1 | 1 + 3 | 12 | 2 | 12 | 2 | 2 + 3 | 13 | 3 | 13 | 3 | 3 +(9 rows) + +SELECT * FROM (VALUES (1),(2),(3)) v(r) LEFT JOIN generate_series(11,13) f(i) ON (r+i)<100; + r | i +---+---- + 1 | 11 + 1 | 12 + 1 | 13 + 2 | 11 + 2 | 12 + 2 | 13 + 3 | 11 + 3 | 12 + 3 | 13 +(9 rows) + +SELECT * FROM (VALUES (1),(2),(3)) v(r) LEFT JOIN generate_series(11,13) WITH ORDINALITY AS f(i,o) ON (r+i)<100; + r | i | o +---+----+--- + 1 | 11 | 1 + 1 | 12 | 2 + 1 | 13 | 3 + 2 | 11 | 1 + 2 | 12 | 2 + 2 | 13 | 3 + 3 | 11 | 1 + 3 | 12 | 2 + 3 | 13 | 3 +(9 rows) + +SELECT * FROM (VALUES (1),(2),(3)) v(r) LEFT JOIN unnest(array[10,20,30]) f(i) ON (r+i)<100; + r | i +---+---- + 1 | 10 + 1 | 20 + 1 | 30 + 2 | 10 + 2 | 20 + 2 | 30 + 3 | 10 + 3 | 20 + 3 | 30 +(9 rows) + +SELECT * FROM (VALUES (1),(2),(3)) v(r) LEFT JOIN unnest(array[10,20,30]) WITH ORDINALITY AS f(i,o) ON (r+i)<100; + r | i | o +---+----+--- + 1 | 10 | 1 + 1 | 20 | 2 + 1 | 30 | 3 + 2 | 10 | 1 + 2 | 20 | 2 + 2 | 30 | 3 + 3 | 10 | 1 + 3 | 20 | 2 + 3 | 30 | 3 +(9 rows) + +--invokes ExecReScanFunctionScan with chgParam != NULL (using implied LATERAL) +SELECT setval('rngfunc_rescan_seq1',1,false),setval('rngfunc_rescan_seq2',1,false); + setval | setval +--------+-------- + 1 | 1 +(1 row) + +SELECT * FROM (VALUES (1),(2),(3)) v(r), rngfunc_sql(10+r,13); + r | i | s +---+----+--- + 1 | 11 | 1 + 1 | 12 | 2 + 1 | 13 | 3 + 2 | 12 | 4 + 2 | 13 | 5 + 3 | 13 | 6 +(6 rows) + +SELECT setval('rngfunc_rescan_seq1',1,false),setval('rngfunc_rescan_seq2',1,false); + setval | setval +--------+-------- + 1 | 1 +(1 row) + +SELECT * FROM (VALUES (1),(2),(3)) v(r), rngfunc_sql(10+r,13) WITH ORDINALITY AS f(i,s,o); + r | i | s | o +---+----+---+--- + 1 | 11 | 1 | 1 + 1 | 12 | 2 | 2 + 1 | 13 | 3 | 3 + 2 | 12 | 4 | 1 + 2 | 13 | 5 | 2 + 3 | 13 | 6 | 1 +(6 rows) + +SELECT setval('rngfunc_rescan_seq1',1,false),setval('rngfunc_rescan_seq2',1,false); + setval | setval +--------+-------- + 1 | 1 +(1 row) + +SELECT * FROM (VALUES (1),(2),(3)) v(r), rngfunc_sql(11,10+r); + r | i | s +---+----+--- + 1 | 11 | 1 + 2 | 11 | 2 + 2 | 12 | 3 + 3 | 11 | 4 + 3 | 12 | 5 + 3 | 13 | 6 +(6 rows) + +SELECT setval('rngfunc_rescan_seq1',1,false),setval('rngfunc_rescan_seq2',1,false); + setval | setval +--------+-------- + 1 | 1 +(1 row) + +SELECT * FROM (VALUES (1),(2),(3)) v(r), rngfunc_sql(11,10+r) WITH ORDINALITY AS f(i,s,o); + r | i | s | o +---+----+---+--- + 1 | 11 | 1 | 1 + 2 | 11 | 2 | 1 + 2 | 12 | 3 | 2 + 3 | 11 | 4 | 1 + 3 | 12 | 5 | 2 + 3 | 13 | 6 | 3 +(6 rows) + +SELECT setval('rngfunc_rescan_seq1',1,false),setval('rngfunc_rescan_seq2',1,false); + setval | setval +--------+-------- + 1 | 1 +(1 row) + +SELECT * FROM (VALUES (11,12),(13,15),(16,20)) v(r1,r2), rngfunc_sql(r1,r2); + r1 | r2 | i | s +----+----+----+---- + 11 | 12 | 11 | 1 + 11 | 12 | 12 | 2 + 13 | 15 | 13 | 3 + 13 | 15 | 14 | 4 + 13 | 15 | 15 | 5 + 16 | 20 | 16 | 6 + 16 | 20 | 17 | 7 + 16 | 20 | 18 | 8 + 16 | 20 | 19 | 9 + 16 | 20 | 20 | 10 +(10 rows) + +SELECT setval('rngfunc_rescan_seq1',1,false),setval('rngfunc_rescan_seq2',1,false); + setval | setval +--------+-------- + 1 | 1 +(1 row) + +SELECT * FROM (VALUES (11,12),(13,15),(16,20)) v(r1,r2), rngfunc_sql(r1,r2) WITH ORDINALITY AS f(i,s,o); + r1 | r2 | i | s | o +----+----+----+----+--- + 11 | 12 | 11 | 1 | 1 + 11 | 12 | 12 | 2 | 2 + 13 | 15 | 13 | 3 | 1 + 13 | 15 | 14 | 4 | 2 + 13 | 15 | 15 | 5 | 3 + 16 | 20 | 16 | 6 | 1 + 16 | 20 | 17 | 7 | 2 + 16 | 20 | 18 | 8 | 3 + 16 | 20 | 19 | 9 | 4 + 16 | 20 | 20 | 10 | 5 +(10 rows) + +SELECT setval('rngfunc_rescan_seq1',1,false),setval('rngfunc_rescan_seq2',1,false); + setval | setval +--------+-------- + 1 | 1 +(1 row) + +SELECT * FROM (VALUES (1),(2),(3)) v(r), rngfunc_mat(10+r,13); + r | i | s +---+----+--- + 1 | 11 | 1 + 1 | 12 | 2 + 1 | 13 | 3 + 2 | 12 | 4 + 2 | 13 | 5 + 3 | 13 | 6 +(6 rows) + +SELECT setval('rngfunc_rescan_seq1',1,false),setval('rngfunc_rescan_seq2',1,false); + setval | setval +--------+-------- + 1 | 1 +(1 row) + +SELECT * FROM (VALUES (1),(2),(3)) v(r), rngfunc_mat(10+r,13) WITH ORDINALITY AS f(i,s,o); + r | i | s | o +---+----+---+--- + 1 | 11 | 1 | 1 + 1 | 12 | 2 | 2 + 1 | 13 | 3 | 3 + 2 | 12 | 4 | 1 + 2 | 13 | 5 | 2 + 3 | 13 | 6 | 1 +(6 rows) + +SELECT setval('rngfunc_rescan_seq1',1,false),setval('rngfunc_rescan_seq2',1,false); + setval | setval +--------+-------- + 1 | 1 +(1 row) + +SELECT * FROM (VALUES (1),(2),(3)) v(r), rngfunc_mat(11,10+r); + r | i | s +---+----+--- + 1 | 11 | 1 + 2 | 11 | 2 + 2 | 12 | 3 + 3 | 11 | 4 + 3 | 12 | 5 + 3 | 13 | 6 +(6 rows) + +SELECT setval('rngfunc_rescan_seq1',1,false),setval('rngfunc_rescan_seq2',1,false); + setval | setval +--------+-------- + 1 | 1 +(1 row) + +SELECT * FROM (VALUES (1),(2),(3)) v(r), rngfunc_mat(11,10+r) WITH ORDINALITY AS f(i,s,o); + r | i | s | o +---+----+---+--- + 1 | 11 | 1 | 1 + 2 | 11 | 2 | 1 + 2 | 12 | 3 | 2 + 3 | 11 | 4 | 1 + 3 | 12 | 5 | 2 + 3 | 13 | 6 | 3 +(6 rows) + +SELECT setval('rngfunc_rescan_seq1',1,false),setval('rngfunc_rescan_seq2',1,false); + setval | setval +--------+-------- + 1 | 1 +(1 row) + +SELECT * FROM (VALUES (11,12),(13,15),(16,20)) v(r1,r2), rngfunc_mat(r1,r2); + r1 | r2 | i | s +----+----+----+---- + 11 | 12 | 11 | 1 + 11 | 12 | 12 | 2 + 13 | 15 | 13 | 3 + 13 | 15 | 14 | 4 + 13 | 15 | 15 | 5 + 16 | 20 | 16 | 6 + 16 | 20 | 17 | 7 + 16 | 20 | 18 | 8 + 16 | 20 | 19 | 9 + 16 | 20 | 20 | 10 +(10 rows) + +SELECT setval('rngfunc_rescan_seq1',1,false),setval('rngfunc_rescan_seq2',1,false); + setval | setval +--------+-------- + 1 | 1 +(1 row) + +SELECT * FROM (VALUES (11,12),(13,15),(16,20)) v(r1,r2), rngfunc_mat(r1,r2) WITH ORDINALITY AS f(i,s,o); + r1 | r2 | i | s | o +----+----+----+----+--- + 11 | 12 | 11 | 1 | 1 + 11 | 12 | 12 | 2 | 2 + 13 | 15 | 13 | 3 | 1 + 13 | 15 | 14 | 4 | 2 + 13 | 15 | 15 | 5 | 3 + 16 | 20 | 16 | 6 | 1 + 16 | 20 | 17 | 7 | 2 + 16 | 20 | 18 | 8 | 3 + 16 | 20 | 19 | 9 | 4 + 16 | 20 | 20 | 10 | 5 +(10 rows) + +-- selective rescan of multiple functions: +SELECT setval('rngfunc_rescan_seq1',1,false),setval('rngfunc_rescan_seq2',1,false); + setval | setval +--------+-------- + 1 | 1 +(1 row) + +SELECT * FROM (VALUES (1),(2),(3)) v(r), ROWS FROM( rngfunc_sql(11,11), rngfunc_mat(10+r,13) ); + r | i | s | i | s +---+----+---+----+--- + 1 | 11 | 1 | 11 | 1 + 1 | | | 12 | 2 + 1 | | | 13 | 3 + 2 | 11 | 1 | 12 | 4 + 2 | | | 13 | 5 + 3 | 11 | 1 | 13 | 6 +(6 rows) + +SELECT setval('rngfunc_rescan_seq1',1,false),setval('rngfunc_rescan_seq2',1,false); + setval | setval +--------+-------- + 1 | 1 +(1 row) + +SELECT * FROM (VALUES (1),(2),(3)) v(r), ROWS FROM( rngfunc_sql(10+r,13), rngfunc_mat(11,11) ); + r | i | s | i | s +---+----+---+----+--- + 1 | 11 | 1 | 11 | 1 + 1 | 12 | 2 | | + 1 | 13 | 3 | | + 2 | 12 | 4 | 11 | 1 + 2 | 13 | 5 | | + 3 | 13 | 6 | 11 | 1 +(6 rows) + +SELECT setval('rngfunc_rescan_seq1',1,false),setval('rngfunc_rescan_seq2',1,false); + setval | setval +--------+-------- + 1 | 1 +(1 row) + +SELECT * FROM (VALUES (1),(2),(3)) v(r), ROWS FROM( rngfunc_sql(10+r,13), rngfunc_mat(10+r,13) ); + r | i | s | i | s +---+----+---+----+--- + 1 | 11 | 1 | 11 | 1 + 1 | 12 | 2 | 12 | 2 + 1 | 13 | 3 | 13 | 3 + 2 | 12 | 4 | 12 | 4 + 2 | 13 | 5 | 13 | 5 + 3 | 13 | 6 | 13 | 6 +(6 rows) + +SELECT setval('rngfunc_rescan_seq1',1,false),setval('rngfunc_rescan_seq2',1,false); + setval | setval +--------+-------- + 1 | 1 +(1 row) + +SELECT * FROM generate_series(1,2) r1, generate_series(r1,3) r2, ROWS FROM( rngfunc_sql(10+r1,13), rngfunc_mat(10+r2,13) ); + r1 | r2 | i | s | i | s +----+----+----+----+----+--- + 1 | 1 | 11 | 1 | 11 | 1 + 1 | 1 | 12 | 2 | 12 | 2 + 1 | 1 | 13 | 3 | 13 | 3 + 1 | 2 | 11 | 4 | 12 | 4 + 1 | 2 | 12 | 5 | 13 | 5 + 1 | 2 | 13 | 6 | | + 1 | 3 | 11 | 7 | 13 | 6 + 1 | 3 | 12 | 8 | | + 1 | 3 | 13 | 9 | | + 2 | 2 | 12 | 10 | 12 | 7 + 2 | 2 | 13 | 11 | 13 | 8 + 2 | 3 | 12 | 12 | 13 | 9 + 2 | 3 | 13 | 13 | | +(13 rows) + +SELECT * FROM (VALUES (1),(2),(3)) v(r), generate_series(10+r,20-r) f(i); + r | i +---+---- + 1 | 11 + 1 | 12 + 1 | 13 + 1 | 14 + 1 | 15 + 1 | 16 + 1 | 17 + 1 | 18 + 1 | 19 + 2 | 12 + 2 | 13 + 2 | 14 + 2 | 15 + 2 | 16 + 2 | 17 + 2 | 18 + 3 | 13 + 3 | 14 + 3 | 15 + 3 | 16 + 3 | 17 +(21 rows) + +SELECT * FROM (VALUES (1),(2),(3)) v(r), generate_series(10+r,20-r) WITH ORDINALITY AS f(i,o); + r | i | o +---+----+--- + 1 | 11 | 1 + 1 | 12 | 2 + 1 | 13 | 3 + 1 | 14 | 4 + 1 | 15 | 5 + 1 | 16 | 6 + 1 | 17 | 7 + 1 | 18 | 8 + 1 | 19 | 9 + 2 | 12 | 1 + 2 | 13 | 2 + 2 | 14 | 3 + 2 | 15 | 4 + 2 | 16 | 5 + 2 | 17 | 6 + 2 | 18 | 7 + 3 | 13 | 1 + 3 | 14 | 2 + 3 | 15 | 3 + 3 | 16 | 4 + 3 | 17 | 5 +(21 rows) + +SELECT * FROM (VALUES (1),(2),(3)) v(r), unnest(array[r*10,r*20,r*30]) f(i); + r | i +---+---- + 1 | 10 + 1 | 20 + 1 | 30 + 2 | 20 + 2 | 40 + 2 | 60 + 3 | 30 + 3 | 60 + 3 | 90 +(9 rows) + +SELECT * FROM (VALUES (1),(2),(3)) v(r), unnest(array[r*10,r*20,r*30]) WITH ORDINALITY AS f(i,o); + r | i | o +---+----+--- + 1 | 10 | 1 + 1 | 20 | 2 + 1 | 30 | 3 + 2 | 20 | 1 + 2 | 40 | 2 + 2 | 60 | 3 + 3 | 30 | 1 + 3 | 60 | 2 + 3 | 90 | 3 +(9 rows) + +-- deep nesting +SELECT * FROM (VALUES (1),(2),(3)) v1(r1), + LATERAL (SELECT r1, * FROM (VALUES (10),(20),(30)) v2(r2) + LEFT JOIN generate_series(21,23) f(i) ON ((r2+i)<100) OFFSET 0) s1; + r1 | r1 | r2 | i +----+----+----+---- + 1 | 1 | 10 | 21 + 1 | 1 | 10 | 22 + 1 | 1 | 10 | 23 + 1 | 1 | 20 | 21 + 1 | 1 | 20 | 22 + 1 | 1 | 20 | 23 + 1 | 1 | 30 | 21 + 1 | 1 | 30 | 22 + 1 | 1 | 30 | 23 + 2 | 2 | 10 | 21 + 2 | 2 | 10 | 22 + 2 | 2 | 10 | 23 + 2 | 2 | 20 | 21 + 2 | 2 | 20 | 22 + 2 | 2 | 20 | 23 + 2 | 2 | 30 | 21 + 2 | 2 | 30 | 22 + 2 | 2 | 30 | 23 + 3 | 3 | 10 | 21 + 3 | 3 | 10 | 22 + 3 | 3 | 10 | 23 + 3 | 3 | 20 | 21 + 3 | 3 | 20 | 22 + 3 | 3 | 20 | 23 + 3 | 3 | 30 | 21 + 3 | 3 | 30 | 22 + 3 | 3 | 30 | 23 +(27 rows) + +SELECT * FROM (VALUES (1),(2),(3)) v1(r1), + LATERAL (SELECT r1, * FROM (VALUES (10),(20),(30)) v2(r2) + LEFT JOIN generate_series(20+r1,23) f(i) ON ((r2+i)<100) OFFSET 0) s1; + r1 | r1 | r2 | i +----+----+----+---- + 1 | 1 | 10 | 21 + 1 | 1 | 10 | 22 + 1 | 1 | 10 | 23 + 1 | 1 | 20 | 21 + 1 | 1 | 20 | 22 + 1 | 1 | 20 | 23 + 1 | 1 | 30 | 21 + 1 | 1 | 30 | 22 + 1 | 1 | 30 | 23 + 2 | 2 | 10 | 22 + 2 | 2 | 10 | 23 + 2 | 2 | 20 | 22 + 2 | 2 | 20 | 23 + 2 | 2 | 30 | 22 + 2 | 2 | 30 | 23 + 3 | 3 | 10 | 23 + 3 | 3 | 20 | 23 + 3 | 3 | 30 | 23 +(18 rows) + +SELECT * FROM (VALUES (1),(2),(3)) v1(r1), + LATERAL (SELECT r1, * FROM (VALUES (10),(20),(30)) v2(r2) + LEFT JOIN generate_series(r2,r2+3) f(i) ON ((r2+i)<100) OFFSET 0) s1; + r1 | r1 | r2 | i +----+----+----+---- + 1 | 1 | 10 | 10 + 1 | 1 | 10 | 11 + 1 | 1 | 10 | 12 + 1 | 1 | 10 | 13 + 1 | 1 | 20 | 20 + 1 | 1 | 20 | 21 + 1 | 1 | 20 | 22 + 1 | 1 | 20 | 23 + 1 | 1 | 30 | 30 + 1 | 1 | 30 | 31 + 1 | 1 | 30 | 32 + 1 | 1 | 30 | 33 + 2 | 2 | 10 | 10 + 2 | 2 | 10 | 11 + 2 | 2 | 10 | 12 + 2 | 2 | 10 | 13 + 2 | 2 | 20 | 20 + 2 | 2 | 20 | 21 + 2 | 2 | 20 | 22 + 2 | 2 | 20 | 23 + 2 | 2 | 30 | 30 + 2 | 2 | 30 | 31 + 2 | 2 | 30 | 32 + 2 | 2 | 30 | 33 + 3 | 3 | 10 | 10 + 3 | 3 | 10 | 11 + 3 | 3 | 10 | 12 + 3 | 3 | 10 | 13 + 3 | 3 | 20 | 20 + 3 | 3 | 20 | 21 + 3 | 3 | 20 | 22 + 3 | 3 | 20 | 23 + 3 | 3 | 30 | 30 + 3 | 3 | 30 | 31 + 3 | 3 | 30 | 32 + 3 | 3 | 30 | 33 +(36 rows) + +SELECT * FROM (VALUES (1),(2),(3)) v1(r1), + LATERAL (SELECT r1, * FROM (VALUES (10),(20),(30)) v2(r2) + LEFT JOIN generate_series(r1,2+r2/5) f(i) ON ((r2+i)<100) OFFSET 0) s1; + r1 | r1 | r2 | i +----+----+----+--- + 1 | 1 | 10 | 1 + 1 | 1 | 10 | 2 + 1 | 1 | 10 | 3 + 1 | 1 | 10 | 4 + 1 | 1 | 20 | 1 + 1 | 1 | 20 | 2 + 1 | 1 | 20 | 3 + 1 | 1 | 20 | 4 + 1 | 1 | 20 | 5 + 1 | 1 | 20 | 6 + 1 | 1 | 30 | 1 + 1 | 1 | 30 | 2 + 1 | 1 | 30 | 3 + 1 | 1 | 30 | 4 + 1 | 1 | 30 | 5 + 1 | 1 | 30 | 6 + 1 | 1 | 30 | 7 + 1 | 1 | 30 | 8 + 2 | 2 | 10 | 2 + 2 | 2 | 10 | 3 + 2 | 2 | 10 | 4 + 2 | 2 | 20 | 2 + 2 | 2 | 20 | 3 + 2 | 2 | 20 | 4 + 2 | 2 | 20 | 5 + 2 | 2 | 20 | 6 + 2 | 2 | 30 | 2 + 2 | 2 | 30 | 3 + 2 | 2 | 30 | 4 + 2 | 2 | 30 | 5 + 2 | 2 | 30 | 6 + 2 | 2 | 30 | 7 + 2 | 2 | 30 | 8 + 3 | 3 | 10 | 3 + 3 | 3 | 10 | 4 + 3 | 3 | 20 | 3 + 3 | 3 | 20 | 4 + 3 | 3 | 20 | 5 + 3 | 3 | 20 | 6 + 3 | 3 | 30 | 3 + 3 | 3 | 30 | 4 + 3 | 3 | 30 | 5 + 3 | 3 | 30 | 6 + 3 | 3 | 30 | 7 + 3 | 3 | 30 | 8 +(45 rows) + +-- check handling of FULL JOIN with multiple lateral references (bug #15741) +SELECT * +FROM (VALUES (1),(2)) v1(r1) + LEFT JOIN LATERAL ( + SELECT * + FROM generate_series(1, v1.r1) AS gs1 + LEFT JOIN LATERAL ( + SELECT * + FROM generate_series(1, gs1) AS gs2 + LEFT JOIN generate_series(1, gs2) AS gs3 ON TRUE + ) AS ss1 ON TRUE + FULL JOIN generate_series(1, v1.r1) AS gs4 ON FALSE + ) AS ss0 ON TRUE; + r1 | gs1 | gs2 | gs3 | gs4 +----+-----+-----+-----+----- + 1 | | | | 1 + 1 | 1 | 1 | 1 | + 2 | | | | 1 + 2 | | | | 2 + 2 | 1 | 1 | 1 | + 2 | 2 | 1 | 1 | + 2 | 2 | 2 | 1 | + 2 | 2 | 2 | 2 | +(8 rows) + +DROP FUNCTION rngfunc_sql(int,int); +DROP FUNCTION rngfunc_mat(int,int); +DROP SEQUENCE rngfunc_rescan_seq1; +DROP SEQUENCE rngfunc_rescan_seq2; +-- +-- Test cases involving OUT parameters +-- +CREATE FUNCTION rngfunc(in f1 int, out f2 int) +AS 'select $1+1' LANGUAGE sql; +SELECT rngfunc(42); + rngfunc +--------- + 43 +(1 row) + +SELECT * FROM rngfunc(42); + f2 +---- + 43 +(1 row) + +SELECT * FROM rngfunc(42) AS p(x); + x +---- + 43 +(1 row) + +-- explicit spec of return type is OK +CREATE OR REPLACE FUNCTION rngfunc(in f1 int, out f2 int) RETURNS int +AS 'select $1+1' LANGUAGE sql; +-- error, wrong result type +CREATE OR REPLACE FUNCTION rngfunc(in f1 int, out f2 int) RETURNS float +AS 'select $1+1' LANGUAGE sql; +ERROR: function result type must be integer because of OUT parameters +-- with multiple OUT params you must get a RECORD result +CREATE OR REPLACE FUNCTION rngfunc(in f1 int, out f2 int, out f3 text) RETURNS int +AS 'select $1+1' LANGUAGE sql; +ERROR: function result type must be record because of OUT parameters +CREATE OR REPLACE FUNCTION rngfunc(in f1 int, out f2 int, out f3 text) +RETURNS record +AS 'select $1+1' LANGUAGE sql; +ERROR: cannot change return type of existing function +HINT: Use DROP FUNCTION rngfunc(integer) first. +CREATE OR REPLACE FUNCTION rngfuncr(in f1 int, out f2 int, out text) +AS $$select $1-1, $1::text || 'z'$$ LANGUAGE sql; +SELECT f1, rngfuncr(f1) FROM int4_tbl; + f1 | rngfuncr +-------------+---------------------------- + 0 | (-1,0z) + 123456 | (123455,123456z) + -123456 | (-123457,-123456z) + 2147483647 | (2147483646,2147483647z) + -2147483647 | (-2147483648,-2147483647z) +(5 rows) + +SELECT * FROM rngfuncr(42); + f2 | column2 +----+--------- + 41 | 42z +(1 row) + +SELECT * FROM rngfuncr(42) AS p(a,b); + a | b +----+----- + 41 | 42z +(1 row) + +CREATE OR REPLACE FUNCTION rngfuncb(in f1 int, inout f2 int, out text) +AS $$select $2-1, $1::text || 'z'$$ LANGUAGE sql; +SELECT f1, rngfuncb(f1, f1/2) FROM int4_tbl; + f1 | rngfuncb +-------------+---------------------------- + 0 | (-1,0z) + 123456 | (61727,123456z) + -123456 | (-61729,-123456z) + 2147483647 | (1073741822,2147483647z) + -2147483647 | (-1073741824,-2147483647z) +(5 rows) + +SELECT * FROM rngfuncb(42, 99); + f2 | column2 +----+--------- + 98 | 42z +(1 row) + +SELECT * FROM rngfuncb(42, 99) AS p(a,b); + a | b +----+----- + 98 | 42z +(1 row) + +-- Can reference function with or without OUT params for DROP, etc +DROP FUNCTION rngfunc(int); +DROP FUNCTION rngfuncr(in f2 int, out f1 int, out text); +DROP FUNCTION rngfuncb(in f1 int, inout f2 int); +-- +-- For my next trick, polymorphic OUT parameters +-- +CREATE FUNCTION dup (f1 anyelement, f2 out anyelement, f3 out anyarray) +AS 'select $1, array[$1,$1]' LANGUAGE sql; +SELECT dup(22); + dup +---------------- + (22,"{22,22}") +(1 row) + +SELECT dup('xyz'); -- fails +ERROR: could not determine polymorphic type because input has type unknown +SELECT dup('xyz'::text); + dup +------------------- + (xyz,"{xyz,xyz}") +(1 row) + +SELECT * FROM dup('xyz'::text); + f2 | f3 +-----+----------- + xyz | {xyz,xyz} +(1 row) + +-- fails, as we are attempting to rename first argument +CREATE OR REPLACE FUNCTION dup (inout f2 anyelement, out f3 anyarray) +AS 'select $1, array[$1,$1]' LANGUAGE sql; +ERROR: cannot change name of input parameter "f1" +HINT: Use DROP FUNCTION dup(anyelement) first. +DROP FUNCTION dup(anyelement); +-- equivalent behavior, though different name exposed for input arg +CREATE OR REPLACE FUNCTION dup (inout f2 anyelement, out f3 anyarray) +AS 'select $1, array[$1,$1]' LANGUAGE sql; +SELECT dup(22); + dup +---------------- + (22,"{22,22}") +(1 row) + +DROP FUNCTION dup(anyelement); +-- fails, no way to deduce outputs +CREATE FUNCTION bad (f1 int, out f2 anyelement, out f3 anyarray) +AS 'select $1, array[$1,$1]' LANGUAGE sql; +ERROR: cannot determine result data type +DETAIL: A result of type anyelement requires at least one input of type anyelement, anyarray, anynonarray, anyenum, anyrange, or anymultirange. +CREATE FUNCTION dup (f1 anycompatible, f2 anycompatiblearray, f3 out anycompatible, f4 out anycompatiblearray) +AS 'select $1, $2' LANGUAGE sql; +SELECT dup(22, array[44]); + dup +----------- + (22,{44}) +(1 row) + +SELECT dup(4.5, array[44]); + dup +------------ + (4.5,{44}) +(1 row) + +SELECT dup(22, array[44::bigint]); + dup +----------- + (22,{44}) +(1 row) + +SELECT *, pg_typeof(f3), pg_typeof(f4) FROM dup(22, array[44::bigint]); + f3 | f4 | pg_typeof | pg_typeof +----+------+-----------+----------- + 22 | {44} | bigint | bigint[] +(1 row) + +DROP FUNCTION dup(f1 anycompatible, f2 anycompatiblearray); +CREATE FUNCTION dup (f1 anycompatiblerange, f2 out anycompatible, f3 out anycompatiblearray, f4 out anycompatiblerange) +AS 'select lower($1), array[lower($1), upper($1)], $1' LANGUAGE sql; +SELECT dup(int4range(4,7)); + dup +--------------------- + (4,"{4,7}","[4,7)") +(1 row) + +SELECT dup(numrange(4,7)); + dup +--------------------- + (4,"{4,7}","[4,7)") +(1 row) + +SELECT dup(textrange('aaa', 'bbb')); + dup +------------------------------- + (aaa,"{aaa,bbb}","[aaa,bbb)") +(1 row) + +DROP FUNCTION dup(f1 anycompatiblerange); +-- fails, no way to deduce outputs +CREATE FUNCTION bad (f1 anyarray, out f2 anycompatible, out f3 anycompatiblearray) +AS 'select $1, array[$1,$1]' LANGUAGE sql; +ERROR: cannot determine result data type +DETAIL: A result of type anycompatible requires at least one input of type anycompatible, anycompatiblearray, anycompatiblenonarray, anycompatiblerange, or anycompatiblemultirange. +-- +-- table functions +-- +CREATE OR REPLACE FUNCTION rngfunc() +RETURNS TABLE(a int) +AS $$ SELECT a FROM generate_series(1,5) a(a) $$ LANGUAGE sql; +SELECT * FROM rngfunc(); + a +--- + 1 + 2 + 3 + 4 + 5 +(5 rows) + +DROP FUNCTION rngfunc(); +CREATE OR REPLACE FUNCTION rngfunc(int) +RETURNS TABLE(a int, b int) +AS $$ SELECT a, b + FROM generate_series(1,$1) a(a), + generate_series(1,$1) b(b) $$ LANGUAGE sql; +SELECT * FROM rngfunc(3); + a | b +---+--- + 1 | 1 + 1 | 2 + 1 | 3 + 2 | 1 + 2 | 2 + 2 | 3 + 3 | 1 + 3 | 2 + 3 | 3 +(9 rows) + +DROP FUNCTION rngfunc(int); +-- case that causes change of typmod knowledge during inlining +CREATE OR REPLACE FUNCTION rngfunc() +RETURNS TABLE(a varchar(5)) +AS $$ SELECT 'hello'::varchar(5) $$ LANGUAGE sql STABLE; +SELECT * FROM rngfunc() GROUP BY 1; + a +------- + hello +(1 row) + +DROP FUNCTION rngfunc(); +-- +-- some tests on SQL functions with RETURNING +-- +create temp table tt(f1 serial, data text); +create function insert_tt(text) returns int as +$$ insert into tt(data) values($1) returning f1 $$ +language sql; +select insert_tt('foo'); + insert_tt +----------- + 1 +(1 row) + +select insert_tt('bar'); + insert_tt +----------- + 2 +(1 row) + +select * from tt; + f1 | data +----+------ + 1 | foo + 2 | bar +(2 rows) + +-- insert will execute to completion even if function needs just 1 row +create or replace function insert_tt(text) returns int as +$$ insert into tt(data) values($1),($1||$1) returning f1 $$ +language sql; +select insert_tt('fool'); + insert_tt +----------- + 3 +(1 row) + +select * from tt; + f1 | data +----+---------- + 1 | foo + 2 | bar + 3 | fool + 4 | foolfool +(4 rows) + +-- setof does what's expected +create or replace function insert_tt2(text,text) returns setof int as +$$ insert into tt(data) values($1),($2) returning f1 $$ +language sql; +select insert_tt2('foolish','barrish'); + insert_tt2 +------------ + 5 + 6 +(2 rows) + +select * from insert_tt2('baz','quux'); + insert_tt2 +------------ + 7 + 8 +(2 rows) + +select * from tt; + f1 | data +----+---------- + 1 | foo + 2 | bar + 3 | fool + 4 | foolfool + 5 | foolish + 6 | barrish + 7 | baz + 8 | quux +(8 rows) + +-- limit doesn't prevent execution to completion +select insert_tt2('foolish','barrish') limit 1; + insert_tt2 +------------ + 9 +(1 row) + +select * from tt; + f1 | data +----+---------- + 1 | foo + 2 | bar + 3 | fool + 4 | foolfool + 5 | foolish + 6 | barrish + 7 | baz + 8 | quux + 9 | foolish + 10 | barrish +(10 rows) + +-- triggers will fire, too +create function noticetrigger() returns trigger as $$ +begin + raise notice 'noticetrigger % %', new.f1, new.data; + return null; +end $$ language plpgsql; +create trigger tnoticetrigger after insert on tt for each row +execute procedure noticetrigger(); +select insert_tt2('foolme','barme') limit 1; +NOTICE: noticetrigger 11 foolme +NOTICE: noticetrigger 12 barme + insert_tt2 +------------ + 11 +(1 row) + +select * from tt; + f1 | data +----+---------- + 1 | foo + 2 | bar + 3 | fool + 4 | foolfool + 5 | foolish + 6 | barrish + 7 | baz + 8 | quux + 9 | foolish + 10 | barrish + 11 | foolme + 12 | barme +(12 rows) + +-- and rules work +create temp table tt_log(f1 int, data text); +create rule insert_tt_rule as on insert to tt do also + insert into tt_log values(new.*); +select insert_tt2('foollog','barlog') limit 1; +NOTICE: noticetrigger 13 foollog +NOTICE: noticetrigger 14 barlog + insert_tt2 +------------ + 13 +(1 row) + +select * from tt; + f1 | data +----+---------- + 1 | foo + 2 | bar + 3 | fool + 4 | foolfool + 5 | foolish + 6 | barrish + 7 | baz + 8 | quux + 9 | foolish + 10 | barrish + 11 | foolme + 12 | barme + 13 | foollog + 14 | barlog +(14 rows) + +-- note that nextval() gets executed a second time in the rule expansion, +-- which is expected. +select * from tt_log; + f1 | data +----+--------- + 15 | foollog + 16 | barlog +(2 rows) + +-- test case for a whole-row-variable bug +create function rngfunc1(n integer, out a text, out b text) + returns setof record + language sql + as $$ select 'foo ' || i, 'bar ' || i from generate_series(1,$1) i $$; +set work_mem='64kB'; +select t.a, t, t.a from rngfunc1(10000) t limit 1; + a | t | a +-------+-------------------+------- + foo 1 | ("foo 1","bar 1") | foo 1 +(1 row) + +reset work_mem; +select t.a, t, t.a from rngfunc1(10000) t limit 1; + a | t | a +-------+-------------------+------- + foo 1 | ("foo 1","bar 1") | foo 1 +(1 row) + +drop function rngfunc1(n integer); +-- test use of SQL functions returning record +-- this is supported in some cases where the query doesn't specify +-- the actual record type ... +create function array_to_set(anyarray) returns setof record as $$ + select i AS "index", $1[i] AS "value" from generate_subscripts($1, 1) i +$$ language sql strict immutable; +select array_to_set(array['one', 'two']); + array_to_set +-------------- + (1,one) + (2,two) +(2 rows) + +select * from array_to_set(array['one', 'two']) as t(f1 int,f2 text); + f1 | f2 +----+----- + 1 | one + 2 | two +(2 rows) + +select * from array_to_set(array['one', 'two']); -- fail +ERROR: a column definition list is required for functions returning "record" +LINE 1: select * from array_to_set(array['one', 'two']); + ^ +-- after-the-fact coercion of the columns is now possible, too +select * from array_to_set(array['one', 'two']) as t(f1 numeric(4,2),f2 text); + f1 | f2 +------+----- + 1.00 | one + 2.00 | two +(2 rows) + +-- and if it doesn't work, you get a compile-time not run-time error +select * from array_to_set(array['one', 'two']) as t(f1 point,f2 text); +ERROR: return type mismatch in function declared to return record +DETAIL: Final statement returns integer instead of point at column 1. +CONTEXT: SQL function "array_to_set" during startup +-- with "strict", this function can't be inlined in FROM +explain (verbose, costs off) + select * from array_to_set(array['one', 'two']) as t(f1 numeric(4,2),f2 text); + QUERY PLAN +---------------------------------------------------- + Function Scan on public.array_to_set t + Output: f1, f2 + Function Call: array_to_set('{one,two}'::text[]) +(3 rows) + +-- but without, it can be: +create or replace function array_to_set(anyarray) returns setof record as $$ + select i AS "index", $1[i] AS "value" from generate_subscripts($1, 1) i +$$ language sql immutable; +select array_to_set(array['one', 'two']); + array_to_set +-------------- + (1,one) + (2,two) +(2 rows) + +select * from array_to_set(array['one', 'two']) as t(f1 int,f2 text); + f1 | f2 +----+----- + 1 | one + 2 | two +(2 rows) + +select * from array_to_set(array['one', 'two']) as t(f1 numeric(4,2),f2 text); + f1 | f2 +------+----- + 1.00 | one + 2.00 | two +(2 rows) + +select * from array_to_set(array['one', 'two']) as t(f1 point,f2 text); +ERROR: return type mismatch in function declared to return record +DETAIL: Final statement returns integer instead of point at column 1. +CONTEXT: SQL function "array_to_set" during inlining +explain (verbose, costs off) + select * from array_to_set(array['one', 'two']) as t(f1 numeric(4,2),f2 text); + QUERY PLAN +-------------------------------------------------------------- + Function Scan on pg_catalog.generate_subscripts i + Output: i.i, ('{one,two}'::text[])[i.i] + Function Call: generate_subscripts('{one,two}'::text[], 1) +(3 rows) + +create temp table rngfunc(f1 int8, f2 int8); +create function testrngfunc() returns record as $$ + insert into rngfunc values (1,2) returning *; +$$ language sql; +select testrngfunc(); + testrngfunc +------------- + (1,2) +(1 row) + +select * from testrngfunc() as t(f1 int8,f2 int8); + f1 | f2 +----+---- + 1 | 2 +(1 row) + +select * from testrngfunc(); -- fail +ERROR: a column definition list is required for functions returning "record" +LINE 1: select * from testrngfunc(); + ^ +drop function testrngfunc(); +create function testrngfunc() returns setof record as $$ + insert into rngfunc values (1,2), (3,4) returning *; +$$ language sql; +select testrngfunc(); + testrngfunc +------------- + (1,2) + (3,4) +(2 rows) + +select * from testrngfunc() as t(f1 int8,f2 int8); + f1 | f2 +----+---- + 1 | 2 + 3 | 4 +(2 rows) + +select * from testrngfunc(); -- fail +ERROR: a column definition list is required for functions returning "record" +LINE 1: select * from testrngfunc(); + ^ +drop function testrngfunc(); +-- Check that typmod imposed by a composite type is honored +create type rngfunc_type as (f1 numeric(35,6), f2 numeric(35,2)); +create function testrngfunc() returns rngfunc_type as $$ + select 7.136178319899999964, 7.136178319899999964; +$$ language sql immutable; +explain (verbose, costs off) +select testrngfunc(); + QUERY PLAN +------------------------------------------- + Result + Output: '(7.136178,7.14)'::rngfunc_type +(2 rows) + +select testrngfunc(); + testrngfunc +----------------- + (7.136178,7.14) +(1 row) + +explain (verbose, costs off) +select * from testrngfunc(); + QUERY PLAN +-------------------------------------------------- + Function Scan on testrngfunc + Output: f1, f2 + Function Call: '(7.136178,7.14)'::rngfunc_type +(3 rows) + +select * from testrngfunc(); + f1 | f2 +----------+------ + 7.136178 | 7.14 +(1 row) + +create or replace function testrngfunc() returns rngfunc_type as $$ + select 7.136178319899999964, 7.136178319899999964; +$$ language sql volatile; +explain (verbose, costs off) +select testrngfunc(); + QUERY PLAN +------------------------- + Result + Output: testrngfunc() +(2 rows) + +select testrngfunc(); + testrngfunc +----------------- + (7.136178,7.14) +(1 row) + +explain (verbose, costs off) +select * from testrngfunc(); + QUERY PLAN +------------------------------------- + Function Scan on public.testrngfunc + Output: f1, f2 + Function Call: testrngfunc() +(3 rows) + +select * from testrngfunc(); + f1 | f2 +----------+------ + 7.136178 | 7.14 +(1 row) + +drop function testrngfunc(); +create function testrngfunc() returns setof rngfunc_type as $$ + select 7.136178319899999964, 7.136178319899999964; +$$ language sql immutable; +explain (verbose, costs off) +select testrngfunc(); + QUERY PLAN +------------------------- + ProjectSet + Output: testrngfunc() + -> Result +(3 rows) + +select testrngfunc(); + testrngfunc +----------------- + (7.136178,7.14) +(1 row) + +explain (verbose, costs off) +select * from testrngfunc(); + QUERY PLAN +-------------------------------------------------------- + Result + Output: 7.136178::numeric(35,6), 7.14::numeric(35,2) +(2 rows) + +select * from testrngfunc(); + f1 | f2 +----------+------ + 7.136178 | 7.14 +(1 row) + +create or replace function testrngfunc() returns setof rngfunc_type as $$ + select 7.136178319899999964, 7.136178319899999964; +$$ language sql volatile; +explain (verbose, costs off) +select testrngfunc(); + QUERY PLAN +------------------------- + ProjectSet + Output: testrngfunc() + -> Result +(3 rows) + +select testrngfunc(); + testrngfunc +----------------- + (7.136178,7.14) +(1 row) + +explain (verbose, costs off) +select * from testrngfunc(); + QUERY PLAN +------------------------------------- + Function Scan on public.testrngfunc + Output: f1, f2 + Function Call: testrngfunc() +(3 rows) + +select * from testrngfunc(); + f1 | f2 +----------+------ + 7.136178 | 7.14 +(1 row) + +create or replace function testrngfunc() returns setof rngfunc_type as $$ + select 1, 2 union select 3, 4 order by 1; +$$ language sql immutable; +explain (verbose, costs off) +select testrngfunc(); + QUERY PLAN +------------------------- + ProjectSet + Output: testrngfunc() + -> Result +(3 rows) + +select testrngfunc(); + testrngfunc +----------------- + (1.000000,2.00) + (3.000000,4.00) +(2 rows) + +explain (verbose, costs off) +select * from testrngfunc(); + QUERY PLAN +---------------------------------------------------------- + Subquery Scan on "*SELECT*" + Output: "*SELECT*"."?column?", "*SELECT*"."?column?_1" + -> Unique + Output: (1), (2) + -> Sort + Output: (1), (2) + Sort Key: (1), (2) + -> Append + -> Result + Output: 1, 2 + -> Result + Output: 3, 4 +(12 rows) + +select * from testrngfunc(); + f1 | f2 +----------+------ + 1.000000 | 2.00 + 3.000000 | 4.00 +(2 rows) + +-- Check a couple of error cases while we're here +select * from testrngfunc() as t(f1 int8,f2 int8); -- fail, composite result +ERROR: a column definition list is redundant for a function returning a named composite type +LINE 1: select * from testrngfunc() as t(f1 int8,f2 int8); + ^ +select * from pg_get_keywords() as t(f1 int8,f2 int8); -- fail, OUT params +ERROR: a column definition list is redundant for a function with OUT parameters +LINE 1: select * from pg_get_keywords() as t(f1 int8,f2 int8); + ^ +select * from sin(3) as t(f1 int8,f2 int8); -- fail, scalar result type +ERROR: a column definition list is only allowed for functions returning "record" +LINE 1: select * from sin(3) as t(f1 int8,f2 int8); + ^ +drop type rngfunc_type cascade; +NOTICE: drop cascades to function testrngfunc() +-- +-- Check some cases involving added/dropped columns in a rowtype result +-- +create temp table users (userid text, seq int, email text, todrop bool, moredrop int, enabled bool); +insert into users values ('id',1,'email',true,11,true); +insert into users values ('id2',2,'email2',true,12,true); +alter table users drop column todrop; +create or replace function get_first_user() returns users as +$$ SELECT * FROM users ORDER BY userid LIMIT 1; $$ +language sql stable; +SELECT get_first_user(); + get_first_user +------------------- + (id,1,email,11,t) +(1 row) + +SELECT * FROM get_first_user(); + userid | seq | email | moredrop | enabled +--------+-----+-------+----------+--------- + id | 1 | email | 11 | t +(1 row) + +create or replace function get_users() returns setof users as +$$ SELECT * FROM users ORDER BY userid; $$ +language sql stable; +SELECT get_users(); + get_users +--------------------- + (id,1,email,11,t) + (id2,2,email2,12,t) +(2 rows) + +SELECT * FROM get_users(); + userid | seq | email | moredrop | enabled +--------+-----+--------+----------+--------- + id | 1 | email | 11 | t + id2 | 2 | email2 | 12 | t +(2 rows) + +SELECT * FROM get_users() WITH ORDINALITY; -- make sure ordinality copes + userid | seq | email | moredrop | enabled | ordinality +--------+-----+--------+----------+---------+------------ + id | 1 | email | 11 | t | 1 + id2 | 2 | email2 | 12 | t | 2 +(2 rows) + +-- multiple functions vs. dropped columns +SELECT * FROM ROWS FROM(generate_series(10,11), get_users()) WITH ORDINALITY; + generate_series | userid | seq | email | moredrop | enabled | ordinality +-----------------+--------+-----+--------+----------+---------+------------ + 10 | id | 1 | email | 11 | t | 1 + 11 | id2 | 2 | email2 | 12 | t | 2 +(2 rows) + +SELECT * FROM ROWS FROM(get_users(), generate_series(10,11)) WITH ORDINALITY; + userid | seq | email | moredrop | enabled | generate_series | ordinality +--------+-----+--------+----------+---------+-----------------+------------ + id | 1 | email | 11 | t | 10 | 1 + id2 | 2 | email2 | 12 | t | 11 | 2 +(2 rows) + +-- check that we can cope with post-parsing changes in rowtypes +create temp view usersview as +SELECT * FROM ROWS FROM(get_users(), generate_series(10,11)) WITH ORDINALITY; +select * from usersview; + userid | seq | email | moredrop | enabled | generate_series | ordinality +--------+-----+--------+----------+---------+-----------------+------------ + id | 1 | email | 11 | t | 10 | 1 + id2 | 2 | email2 | 12 | t | 11 | 2 +(2 rows) + +alter table users add column junk text; +select * from usersview; + userid | seq | email | moredrop | enabled | generate_series | ordinality +--------+-----+--------+----------+---------+-----------------+------------ + id | 1 | email | 11 | t | 10 | 1 + id2 | 2 | email2 | 12 | t | 11 | 2 +(2 rows) + +alter table users drop column moredrop; -- fail, view has reference +ERROR: cannot drop column moredrop of table users because other objects depend on it +DETAIL: view usersview depends on column moredrop of table users +HINT: Use DROP ... CASCADE to drop the dependent objects too. +-- We used to have a bug that would allow the above to succeed, posing +-- hazards for later execution of the view. Check that the internal +-- defenses for those hazards haven't bit-rotted, in case some other +-- bug with similar symptoms emerges. +begin; +-- destroy the dependency entry that prevents the DROP: +delete from pg_depend where + objid = (select oid from pg_rewrite + where ev_class = 'usersview'::regclass and rulename = '_RETURN') + and refobjsubid = 5 +returning pg_describe_object(classid, objid, objsubid) as obj, + pg_describe_object(refclassid, refobjid, refobjsubid) as ref, + deptype; + obj | ref | deptype +--------------------------------+--------------------------------+--------- + rule _RETURN on view usersview | column moredrop of table users | n +(1 row) + +alter table users drop column moredrop; +select * from usersview; -- expect clean failure +ERROR: attribute 5 of type record has been dropped +rollback; +alter table users alter column seq type numeric; -- fail, view has reference +ERROR: cannot alter type of a column used by a view or rule +DETAIL: rule _RETURN on view usersview depends on column "seq" +-- likewise, check we don't crash if the dependency goes wrong +begin; +-- destroy the dependency entry that prevents the ALTER: +delete from pg_depend where + objid = (select oid from pg_rewrite + where ev_class = 'usersview'::regclass and rulename = '_RETURN') + and refobjsubid = 2 +returning pg_describe_object(classid, objid, objsubid) as obj, + pg_describe_object(refclassid, refobjid, refobjsubid) as ref, + deptype; + obj | ref | deptype +--------------------------------+---------------------------+--------- + rule _RETURN on view usersview | column seq of table users | n +(1 row) + +alter table users alter column seq type numeric; +select * from usersview; -- expect clean failure +ERROR: attribute 2 of type record has wrong type +DETAIL: Table has type numeric, but query expects integer. +rollback; +drop view usersview; +drop function get_first_user(); +drop function get_users(); +drop table users; +-- check behavior with type coercion required for a set-op +create or replace function rngfuncbar() returns setof text as +$$ select 'foo'::varchar union all select 'bar'::varchar ; $$ +language sql stable; +select rngfuncbar(); + rngfuncbar +------------ + foo + bar +(2 rows) + +select * from rngfuncbar(); + rngfuncbar +------------ + foo + bar +(2 rows) + +-- this function is now inlinable, too: +explain (verbose, costs off) select * from rngfuncbar(); + QUERY PLAN +------------------------------------------------ + Result + Output: ('foo'::character varying) + -> Append + -> Result + Output: 'foo'::character varying + -> Result + Output: 'bar'::character varying +(7 rows) + +drop function rngfuncbar(); +-- check handling of a SQL function with multiple OUT params (bug #5777) +create or replace function rngfuncbar(out integer, out numeric) as +$$ select (1, 2.1) $$ language sql; +select * from rngfuncbar(); + column1 | column2 +---------+--------- + 1 | 2.1 +(1 row) + +create or replace function rngfuncbar(out integer, out numeric) as +$$ select (1, 2) $$ language sql; +select * from rngfuncbar(); -- fail +ERROR: function return row and query-specified return row do not match +DETAIL: Returned type integer at ordinal position 2, but query expects numeric. +create or replace function rngfuncbar(out integer, out numeric) as +$$ select (1, 2.1, 3) $$ language sql; +select * from rngfuncbar(); -- fail +ERROR: function return row and query-specified return row do not match +DETAIL: Returned row contains 3 attributes, but query expects 2. +drop function rngfuncbar(); +-- check whole-row-Var handling in nested lateral functions (bug #11703) +create function extractq2(t int8_tbl) returns int8 as $$ + select t.q2 +$$ language sql immutable; +explain (verbose, costs off) +select x from int8_tbl, extractq2(int8_tbl) f(x); + QUERY PLAN +------------------------------------------ + Nested Loop + Output: f.x + -> Seq Scan on public.int8_tbl + Output: int8_tbl.q1, int8_tbl.q2 + -> Function Scan on f + Output: f.x + Function Call: int8_tbl.q2 +(7 rows) + +select x from int8_tbl, extractq2(int8_tbl) f(x); + x +------------------- + 456 + 4567890123456789 + 123 + 4567890123456789 + -4567890123456789 +(5 rows) + +create function extractq2_2(t int8_tbl) returns table(ret1 int8) as $$ + select extractq2(t) offset 0 +$$ language sql immutable; +explain (verbose, costs off) +select x from int8_tbl, extractq2_2(int8_tbl) f(x); + QUERY PLAN +----------------------------------- + Nested Loop + Output: ((int8_tbl.*).q2) + -> Seq Scan on public.int8_tbl + Output: int8_tbl.* + -> Result + Output: (int8_tbl.*).q2 +(6 rows) + +select x from int8_tbl, extractq2_2(int8_tbl) f(x); + x +------------------- + 456 + 4567890123456789 + 123 + 4567890123456789 + -4567890123456789 +(5 rows) + +-- without the "offset 0", this function gets optimized quite differently +create function extractq2_2_opt(t int8_tbl) returns table(ret1 int8) as $$ + select extractq2(t) +$$ language sql immutable; +explain (verbose, costs off) +select x from int8_tbl, extractq2_2_opt(int8_tbl) f(x); + QUERY PLAN +----------------------------- + Seq Scan on public.int8_tbl + Output: int8_tbl.q2 +(2 rows) + +select x from int8_tbl, extractq2_2_opt(int8_tbl) f(x); + x +------------------- + 456 + 4567890123456789 + 123 + 4567890123456789 + -4567890123456789 +(5 rows) + +-- check handling of nulls in SRF results (bug #7808) +create type rngfunc2 as (a integer, b text); +select *, row_to_json(u) from unnest(array[(1,'foo')::rngfunc2, null::rngfunc2]) u; + a | b | row_to_json +---+-----+--------------------- + 1 | foo | {"a":1,"b":"foo"} + | | {"a":null,"b":null} +(2 rows) + +select *, row_to_json(u) from unnest(array[null::rngfunc2, null::rngfunc2]) u; + a | b | row_to_json +---+---+--------------------- + | | {"a":null,"b":null} + | | {"a":null,"b":null} +(2 rows) + +select *, row_to_json(u) from unnest(array[null::rngfunc2, (1,'foo')::rngfunc2, null::rngfunc2]) u; + a | b | row_to_json +---+-----+--------------------- + | | {"a":null,"b":null} + 1 | foo | {"a":1,"b":"foo"} + | | {"a":null,"b":null} +(3 rows) + +select *, row_to_json(u) from unnest(array[]::rngfunc2[]) u; + a | b | row_to_json +---+---+------------- +(0 rows) + +drop type rngfunc2; +-- check handling of functions pulled up into function RTEs (bug #17227) +explain (verbose, costs off) +select * from + (select jsonb_path_query_array(module->'lectures', '$[*]') as lecture + from unnest(array['{"lectures": [{"id": "1"}]}'::jsonb]) + as unnested_modules(module)) as ss, + jsonb_to_recordset(ss.lecture) as j (id text); + QUERY PLAN +-------------------------------------------------------------------------------------------------------------------------------------------------------- + Nested Loop + Output: jsonb_path_query_array((unnested_modules.module -> 'lectures'::text), '$[*]'::jsonpath, '{}'::jsonb, false), j.id + -> Function Scan on pg_catalog.unnest unnested_modules + Output: unnested_modules.module + Function Call: unnest('{"{\"lectures\": [{\"id\": \"1\"}]}"}'::jsonb[]) + -> Function Scan on pg_catalog.jsonb_to_recordset j + Output: j.id + Function Call: jsonb_to_recordset(jsonb_path_query_array((unnested_modules.module -> 'lectures'::text), '$[*]'::jsonpath, '{}'::jsonb, false)) +(8 rows) + +select * from + (select jsonb_path_query_array(module->'lectures', '$[*]') as lecture + from unnest(array['{"lectures": [{"id": "1"}]}'::jsonb]) + as unnested_modules(module)) as ss, + jsonb_to_recordset(ss.lecture) as j (id text); + lecture | id +---------------+---- + [{"id": "1"}] | 1 +(1 row) + diff --git a/src/test/regress/expected/rangetypes.out b/src/test/regress/expected/rangetypes.out new file mode 100644 index 0000000..ee02ff0 --- /dev/null +++ b/src/test/regress/expected/rangetypes.out @@ -0,0 +1,1836 @@ +-- Tests for range data types. +-- +-- test input parser +-- (type textrange was already made in test_setup.sql) +-- +-- negative tests; should fail +select ''::textrange; +ERROR: malformed range literal: "" +LINE 1: select ''::textrange; + ^ +DETAIL: Missing left parenthesis or bracket. +select '-[a,z)'::textrange; +ERROR: malformed range literal: "-[a,z)" +LINE 1: select '-[a,z)'::textrange; + ^ +DETAIL: Missing left parenthesis or bracket. +select '[a,z) - '::textrange; +ERROR: malformed range literal: "[a,z) - " +LINE 1: select '[a,z) - '::textrange; + ^ +DETAIL: Junk after right parenthesis or bracket. +select '(",a)'::textrange; +ERROR: malformed range literal: "(",a)" +LINE 1: select '(",a)'::textrange; + ^ +DETAIL: Unexpected end of input. +select '(,,a)'::textrange; +ERROR: malformed range literal: "(,,a)" +LINE 1: select '(,,a)'::textrange; + ^ +DETAIL: Too many commas. +select '(),a)'::textrange; +ERROR: malformed range literal: "(),a)" +LINE 1: select '(),a)'::textrange; + ^ +DETAIL: Missing comma after lower bound. +select '(a,))'::textrange; +ERROR: malformed range literal: "(a,))" +LINE 1: select '(a,))'::textrange; + ^ +DETAIL: Junk after right parenthesis or bracket. +select '(],a)'::textrange; +ERROR: malformed range literal: "(],a)" +LINE 1: select '(],a)'::textrange; + ^ +DETAIL: Missing comma after lower bound. +select '(a,])'::textrange; +ERROR: malformed range literal: "(a,])" +LINE 1: select '(a,])'::textrange; + ^ +DETAIL: Junk after right parenthesis or bracket. +select '[z,a]'::textrange; +ERROR: range lower bound must be less than or equal to range upper bound +LINE 1: select '[z,a]'::textrange; + ^ +-- should succeed +select ' empty '::textrange; + textrange +----------- + empty +(1 row) + +select ' ( empty, empty ) '::textrange; + textrange +---------------------- + (" empty"," empty ") +(1 row) + +select ' ( " a " " a ", " z " " z " ) '::textrange; + textrange +-------------------------- + (" a a "," z z ") +(1 row) + +select '(a,)'::textrange; + textrange +----------- + (a,) +(1 row) + +select '[,z]'::textrange; + textrange +----------- + (,z] +(1 row) + +select '[a,]'::textrange; + textrange +----------- + [a,) +(1 row) + +select '(,)'::textrange; + textrange +----------- + (,) +(1 row) + +select '[ , ]'::textrange; + textrange +----------- + [" "," "] +(1 row) + +select '["",""]'::textrange; + textrange +----------- + ["",""] +(1 row) + +select '[",",","]'::textrange; + textrange +----------- + [",",","] +(1 row) + +select '["\\","\\"]'::textrange; + textrange +------------- + ["\\","\\"] +(1 row) + +select '(\\,a)'::textrange; + textrange +----------- + ("\\",a) +(1 row) + +select '((,z)'::textrange; + textrange +----------- + ("(",z) +(1 row) + +select '([,z)'::textrange; + textrange +----------- + ("[",z) +(1 row) + +select '(!,()'::textrange; + textrange +----------- + (!,"(") +(1 row) + +select '(!,[)'::textrange; + textrange +----------- + (!,"[") +(1 row) + +select '[a,a]'::textrange; + textrange +----------- + [a,a] +(1 row) + +-- these are allowed but normalize to empty: +select '[a,a)'::textrange; + textrange +----------- + empty +(1 row) + +select '(a,a]'::textrange; + textrange +----------- + empty +(1 row) + +select '(a,a)'::textrange; + textrange +----------- + empty +(1 row) + +-- Also try it with non-error-throwing API +select pg_input_is_valid('(1,4)', 'int4range'); + pg_input_is_valid +------------------- + t +(1 row) + +select pg_input_is_valid('(1,4', 'int4range'); + pg_input_is_valid +------------------- + f +(1 row) + +select * from pg_input_error_info('(1,4', 'int4range'); + message | detail | hint | sql_error_code +---------------------------------+--------------------------+------+---------------- + malformed range literal: "(1,4" | Unexpected end of input. | | 22P02 +(1 row) + +select pg_input_is_valid('(4,1)', 'int4range'); + pg_input_is_valid +------------------- + f +(1 row) + +select * from pg_input_error_info('(4,1)', 'int4range'); + message | detail | hint | sql_error_code +-------------------------------------------------------------------+--------+------+---------------- + range lower bound must be less than or equal to range upper bound | | | 22000 +(1 row) + +select pg_input_is_valid('(4,zed)', 'int4range'); + pg_input_is_valid +------------------- + f +(1 row) + +select * from pg_input_error_info('(4,zed)', 'int4range'); + message | detail | hint | sql_error_code +----------------------------------------------+--------+------+---------------- + invalid input syntax for type integer: "zed" | | | 22P02 +(1 row) + +select pg_input_is_valid('[1,2147483647]', 'int4range'); + pg_input_is_valid +------------------- + f +(1 row) + +select * from pg_input_error_info('[1,2147483647]', 'int4range'); + message | detail | hint | sql_error_code +----------------------+--------+------+---------------- + integer out of range | | | 22003 +(1 row) + +select pg_input_is_valid('[2000-01-01,5874897-12-31]', 'daterange'); + pg_input_is_valid +------------------- + f +(1 row) + +select * from pg_input_error_info('[2000-01-01,5874897-12-31]', 'daterange'); + message | detail | hint | sql_error_code +-------------------+--------+------+---------------- + date out of range | | | 22008 +(1 row) + +-- +-- create some test data and test the operators +-- +CREATE TABLE numrange_test (nr NUMRANGE); +create index numrange_test_btree on numrange_test(nr); +INSERT INTO numrange_test VALUES('[,)'); +INSERT INTO numrange_test VALUES('[3,]'); +INSERT INTO numrange_test VALUES('[, 5)'); +INSERT INTO numrange_test VALUES(numrange(1.1, 2.2)); +INSERT INTO numrange_test VALUES('empty'); +INSERT INTO numrange_test VALUES(numrange(1.7, 1.7, '[]')); +SELECT nr, isempty(nr), lower(nr), upper(nr) FROM numrange_test; + nr | isempty | lower | upper +-----------+---------+-------+------- + (,) | f | | + [3,) | f | 3 | + (,5) | f | | 5 + [1.1,2.2) | f | 1.1 | 2.2 + empty | t | | + [1.7,1.7] | f | 1.7 | 1.7 +(6 rows) + +SELECT nr, lower_inc(nr), lower_inf(nr), upper_inc(nr), upper_inf(nr) FROM numrange_test; + nr | lower_inc | lower_inf | upper_inc | upper_inf +-----------+-----------+-----------+-----------+----------- + (,) | f | t | f | t + [3,) | t | f | f | t + (,5) | f | t | f | f + [1.1,2.2) | t | f | f | f + empty | f | f | f | f + [1.7,1.7] | t | f | t | f +(6 rows) + +SELECT * FROM numrange_test WHERE range_contains(nr, numrange(1.9,1.91)); + nr +----------- + (,) + (,5) + [1.1,2.2) +(3 rows) + +SELECT * FROM numrange_test WHERE nr @> numrange(1.0,10000.1); + nr +----- + (,) +(1 row) + +SELECT * FROM numrange_test WHERE range_contained_by(numrange(-1e7,-10000.1), nr); + nr +------ + (,) + (,5) +(2 rows) + +SELECT * FROM numrange_test WHERE 1.9 <@ nr; + nr +----------- + (,) + (,5) + [1.1,2.2) +(3 rows) + +select * from numrange_test where nr = 'empty'; + nr +------- + empty +(1 row) + +select * from numrange_test where nr = '(1.1, 2.2)'; + nr +---- +(0 rows) + +select * from numrange_test where nr = '[1.1, 2.2)'; + nr +----------- + [1.1,2.2) +(1 row) + +select * from numrange_test where nr < 'empty'; + nr +---- +(0 rows) + +select * from numrange_test where nr < numrange(-1000.0, -1000.0,'[]'); + nr +------- + (,) + (,5) + empty +(3 rows) + +select * from numrange_test where nr < numrange(0.0, 1.0,'[]'); + nr +------- + (,) + (,5) + empty +(3 rows) + +select * from numrange_test where nr < numrange(1000.0, 1001.0,'[]'); + nr +----------- + (,) + [3,) + (,5) + [1.1,2.2) + empty + [1.7,1.7] +(6 rows) + +select * from numrange_test where nr <= 'empty'; + nr +------- + empty +(1 row) + +select * from numrange_test where nr >= 'empty'; + nr +----------- + (,) + [3,) + (,5) + [1.1,2.2) + empty + [1.7,1.7] +(6 rows) + +select * from numrange_test where nr > 'empty'; + nr +----------- + (,) + [3,) + (,5) + [1.1,2.2) + [1.7,1.7] +(5 rows) + +select * from numrange_test where nr > numrange(-1001.0, -1000.0,'[]'); + nr +----------- + [3,) + [1.1,2.2) + [1.7,1.7] +(3 rows) + +select * from numrange_test where nr > numrange(0.0, 1.0,'[]'); + nr +----------- + [3,) + [1.1,2.2) + [1.7,1.7] +(3 rows) + +select * from numrange_test where nr > numrange(1000.0, 1000.0,'[]'); + nr +---- +(0 rows) + +select numrange(2.0, 1.0); +ERROR: range lower bound must be less than or equal to range upper bound +select numrange(2.0, 3.0) -|- numrange(3.0, 4.0); + ?column? +---------- + t +(1 row) + +select range_adjacent(numrange(2.0, 3.0), numrange(3.1, 4.0)); + range_adjacent +---------------- + f +(1 row) + +select range_adjacent(numrange(2.0, 3.0), numrange(3.1, null)); + range_adjacent +---------------- + f +(1 row) + +select numrange(2.0, 3.0, '[]') -|- numrange(3.0, 4.0, '()'); + ?column? +---------- + t +(1 row) + +select numrange(1.0, 2.0) -|- numrange(2.0, 3.0,'[]'); + ?column? +---------- + t +(1 row) + +select range_adjacent(numrange(2.0, 3.0, '(]'), numrange(1.0, 2.0, '(]')); + range_adjacent +---------------- + t +(1 row) + +select numrange(1.1, 3.3) <@ numrange(0.1,10.1); + ?column? +---------- + t +(1 row) + +select numrange(0.1, 10.1) <@ numrange(1.1,3.3); + ?column? +---------- + f +(1 row) + +select numrange(1.1, 2.2) - numrange(2.0, 3.0); + ?column? +----------- + [1.1,2.0) +(1 row) + +select numrange(1.1, 2.2) - numrange(2.2, 3.0); + ?column? +----------- + [1.1,2.2) +(1 row) + +select numrange(1.1, 2.2,'[]') - numrange(2.0, 3.0); + ?column? +----------- + [1.1,2.0) +(1 row) + +select range_minus(numrange(10.1,12.2,'[]'), numrange(110.0,120.2,'(]')); + range_minus +------------- + [10.1,12.2] +(1 row) + +select range_minus(numrange(10.1,12.2,'[]'), numrange(0.0,120.2,'(]')); + range_minus +------------- + empty +(1 row) + +select numrange(4.5, 5.5, '[]') && numrange(5.5, 6.5); + ?column? +---------- + t +(1 row) + +select numrange(1.0, 2.0) << numrange(3.0, 4.0); + ?column? +---------- + t +(1 row) + +select numrange(1.0, 3.0,'[]') << numrange(3.0, 4.0,'[]'); + ?column? +---------- + f +(1 row) + +select numrange(1.0, 3.0,'()') << numrange(3.0, 4.0,'()'); + ?column? +---------- + t +(1 row) + +select numrange(1.0, 2.0) >> numrange(3.0, 4.0); + ?column? +---------- + f +(1 row) + +select numrange(3.0, 70.0) &< numrange(6.6, 100.0); + ?column? +---------- + t +(1 row) + +select numrange(1.1, 2.2) < numrange(1.0, 200.2); + ?column? +---------- + f +(1 row) + +select numrange(1.1, 2.2) < numrange(1.1, 1.2); + ?column? +---------- + f +(1 row) + +select numrange(1.0, 2.0) + numrange(2.0, 3.0); + ?column? +----------- + [1.0,3.0) +(1 row) + +select numrange(1.0, 2.0) + numrange(1.5, 3.0); + ?column? +----------- + [1.0,3.0) +(1 row) + +select numrange(1.0, 2.0) + numrange(2.5, 3.0); -- should fail +ERROR: result of range union would not be contiguous +select range_merge(numrange(1.0, 2.0), numrange(2.0, 3.0)); + range_merge +------------- + [1.0,3.0) +(1 row) + +select range_merge(numrange(1.0, 2.0), numrange(1.5, 3.0)); + range_merge +------------- + [1.0,3.0) +(1 row) + +select range_merge(numrange(1.0, 2.0), numrange(2.5, 3.0)); -- shouldn't fail + range_merge +------------- + [1.0,3.0) +(1 row) + +select numrange(1.0, 2.0) * numrange(2.0, 3.0); + ?column? +---------- + empty +(1 row) + +select numrange(1.0, 2.0) * numrange(1.5, 3.0); + ?column? +----------- + [1.5,2.0) +(1 row) + +select numrange(1.0, 2.0) * numrange(2.5, 3.0); + ?column? +---------- + empty +(1 row) + +select range_intersect_agg(nr) from numrange_test; + range_intersect_agg +--------------------- + empty +(1 row) + +select range_intersect_agg(nr) from numrange_test where false; + range_intersect_agg +--------------------- + +(1 row) + +select range_intersect_agg(nr) from numrange_test where nr @> 4.0; + range_intersect_agg +--------------------- + [3,5) +(1 row) + +analyze numrange_test; +create table numrange_test2(nr numrange); +create index numrange_test2_hash_idx on numrange_test2 using hash (nr); +INSERT INTO numrange_test2 VALUES('[, 5)'); +INSERT INTO numrange_test2 VALUES(numrange(1.1, 2.2)); +INSERT INTO numrange_test2 VALUES(numrange(1.1, 2.2)); +INSERT INTO numrange_test2 VALUES(numrange(1.1, 2.2,'()')); +INSERT INTO numrange_test2 VALUES('empty'); +select * from numrange_test2 where nr = 'empty'::numrange; + nr +------- + empty +(1 row) + +select * from numrange_test2 where nr = numrange(1.1, 2.2); + nr +----------- + [1.1,2.2) + [1.1,2.2) +(2 rows) + +select * from numrange_test2 where nr = numrange(1.1, 2.3); + nr +---- +(0 rows) + +set enable_nestloop=t; +set enable_hashjoin=f; +set enable_mergejoin=f; +select * from numrange_test natural join numrange_test2 order by nr; + nr +----------- + empty + (,5) + [1.1,2.2) + [1.1,2.2) +(4 rows) + +set enable_nestloop=f; +set enable_hashjoin=t; +set enable_mergejoin=f; +select * from numrange_test natural join numrange_test2 order by nr; + nr +----------- + empty + (,5) + [1.1,2.2) + [1.1,2.2) +(4 rows) + +set enable_nestloop=f; +set enable_hashjoin=f; +set enable_mergejoin=t; +select * from numrange_test natural join numrange_test2 order by nr; + nr +----------- + empty + (,5) + [1.1,2.2) + [1.1,2.2) +(4 rows) + +set enable_nestloop to default; +set enable_hashjoin to default; +set enable_mergejoin to default; +-- keep numrange_test around to help exercise dump/reload +DROP TABLE numrange_test2; +-- +-- Apply a subset of the above tests on a collatable type, too +-- +CREATE TABLE textrange_test (tr textrange); +create index textrange_test_btree on textrange_test(tr); +INSERT INTO textrange_test VALUES('[,)'); +INSERT INTO textrange_test VALUES('["a",]'); +INSERT INTO textrange_test VALUES('[,"q")'); +INSERT INTO textrange_test VALUES(textrange('b', 'g')); +INSERT INTO textrange_test VALUES('empty'); +INSERT INTO textrange_test VALUES(textrange('d', 'd', '[]')); +SELECT tr, isempty(tr), lower(tr), upper(tr) FROM textrange_test; + tr | isempty | lower | upper +-------+---------+-------+------- + (,) | f | | + [a,) | f | a | + (,q) | f | | q + [b,g) | f | b | g + empty | t | | + [d,d] | f | d | d +(6 rows) + +SELECT tr, lower_inc(tr), lower_inf(tr), upper_inc(tr), upper_inf(tr) FROM textrange_test; + tr | lower_inc | lower_inf | upper_inc | upper_inf +-------+-----------+-----------+-----------+----------- + (,) | f | t | f | t + [a,) | t | f | f | t + (,q) | f | t | f | f + [b,g) | t | f | f | f + empty | f | f | f | f + [d,d] | t | f | t | f +(6 rows) + +SELECT * FROM textrange_test WHERE range_contains(tr, textrange('f', 'fx')); + tr +------- + (,) + [a,) + (,q) + [b,g) +(4 rows) + +SELECT * FROM textrange_test WHERE tr @> textrange('a', 'z'); + tr +------ + (,) + [a,) +(2 rows) + +SELECT * FROM textrange_test WHERE range_contained_by(textrange('0','9'), tr); + tr +------ + (,) + (,q) +(2 rows) + +SELECT * FROM textrange_test WHERE 'e'::text <@ tr; + tr +------- + (,) + [a,) + (,q) + [b,g) +(4 rows) + +select * from textrange_test where tr = 'empty'; + tr +------- + empty +(1 row) + +select * from textrange_test where tr = '("b","g")'; + tr +---- +(0 rows) + +select * from textrange_test where tr = '["b","g")'; + tr +------- + [b,g) +(1 row) + +select * from textrange_test where tr < 'empty'; + tr +---- +(0 rows) + +-- test canonical form for int4range +select int4range(1, 10, '[]'); + int4range +----------- + [1,11) +(1 row) + +select int4range(1, 10, '[)'); + int4range +----------- + [1,10) +(1 row) + +select int4range(1, 10, '(]'); + int4range +----------- + [2,11) +(1 row) + +select int4range(1, 10, '()'); + int4range +----------- + [2,10) +(1 row) + +select int4range(1, 2, '()'); + int4range +----------- + empty +(1 row) + +-- test canonical form for daterange +select daterange('2000-01-10'::date, '2000-01-20'::date, '[]'); + daterange +------------------------- + [01-10-2000,01-21-2000) +(1 row) + +select daterange('2000-01-10'::date, '2000-01-20'::date, '[)'); + daterange +------------------------- + [01-10-2000,01-20-2000) +(1 row) + +select daterange('2000-01-10'::date, '2000-01-20'::date, '(]'); + daterange +------------------------- + [01-11-2000,01-21-2000) +(1 row) + +select daterange('2000-01-10'::date, '2000-01-20'::date, '()'); + daterange +------------------------- + [01-11-2000,01-20-2000) +(1 row) + +select daterange('2000-01-10'::date, '2000-01-11'::date, '()'); + daterange +----------- + empty +(1 row) + +select daterange('2000-01-10'::date, '2000-01-11'::date, '(]'); + daterange +------------------------- + [01-11-2000,01-12-2000) +(1 row) + +select daterange('-infinity'::date, '2000-01-01'::date, '()'); + daterange +------------------------ + (-infinity,01-01-2000) +(1 row) + +select daterange('-infinity'::date, '2000-01-01'::date, '[)'); + daterange +------------------------ + [-infinity,01-01-2000) +(1 row) + +select daterange('2000-01-01'::date, 'infinity'::date, '[)'); + daterange +----------------------- + [01-01-2000,infinity) +(1 row) + +select daterange('2000-01-01'::date, 'infinity'::date, '[]'); + daterange +----------------------- + [01-01-2000,infinity] +(1 row) + +-- test GiST index that's been built incrementally +create table test_range_gist(ir int4range); +create index test_range_gist_idx on test_range_gist using gist (ir); +insert into test_range_gist select int4range(g, g+10) from generate_series(1,2000) g; +insert into test_range_gist select 'empty'::int4range from generate_series(1,500) g; +insert into test_range_gist select int4range(g, g+10000) from generate_series(1,1000) g; +insert into test_range_gist select 'empty'::int4range from generate_series(1,500) g; +insert into test_range_gist select int4range(NULL,g*10,'(]') from generate_series(1,100) g; +insert into test_range_gist select int4range(g*10,NULL,'(]') from generate_series(1,100) g; +insert into test_range_gist select int4range(g, g+10) from generate_series(1,2000) g; +-- test statistics and selectivity estimation as well +-- +-- We don't check the accuracy of selectivity estimation, but at least check +-- it doesn't fall. +analyze test_range_gist; +-- first, verify non-indexed results +SET enable_seqscan = t; +SET enable_indexscan = f; +SET enable_bitmapscan = f; +select count(*) from test_range_gist where ir @> 'empty'::int4range; + count +------- + 6200 +(1 row) + +select count(*) from test_range_gist where ir = int4range(10,20); + count +------- + 2 +(1 row) + +select count(*) from test_range_gist where ir @> 10; + count +------- + 130 +(1 row) + +select count(*) from test_range_gist where ir @> int4range(10,20); + count +------- + 111 +(1 row) + +select count(*) from test_range_gist where ir && int4range(10,20); + count +------- + 158 +(1 row) + +select count(*) from test_range_gist where ir <@ int4range(10,50); + count +------- + 1062 +(1 row) + +select count(*) from test_range_gist where ir << int4range(100,500); + count +------- + 189 +(1 row) + +select count(*) from test_range_gist where ir >> int4range(100,500); + count +------- + 3554 +(1 row) + +select count(*) from test_range_gist where ir &< int4range(100,500); + count +------- + 1029 +(1 row) + +select count(*) from test_range_gist where ir &> int4range(100,500); + count +------- + 4794 +(1 row) + +select count(*) from test_range_gist where ir -|- int4range(100,500); + count +------- + 5 +(1 row) + +select count(*) from test_range_gist where ir @> '{}'::int4multirange; + count +------- + 6200 +(1 row) + +select count(*) from test_range_gist where ir @> int4multirange(int4range(10,20), int4range(30,40)); + count +------- + 107 +(1 row) + +select count(*) from test_range_gist where ir && '{(10,20),(30,40),(50,60)}'::int4multirange; + count +------- + 271 +(1 row) + +select count(*) from test_range_gist where ir <@ '{(10,30),(40,60),(70,90)}'::int4multirange; + count +------- + 1060 +(1 row) + +select count(*) from test_range_gist where ir << int4multirange(int4range(100,200), int4range(400,500)); + count +------- + 189 +(1 row) + +select count(*) from test_range_gist where ir >> int4multirange(int4range(100,200), int4range(400,500)); + count +------- + 3554 +(1 row) + +select count(*) from test_range_gist where ir &< int4multirange(int4range(100,200), int4range(400,500)); + count +------- + 1029 +(1 row) + +select count(*) from test_range_gist where ir &> int4multirange(int4range(100,200), int4range(400,500)); + count +------- + 4794 +(1 row) + +select count(*) from test_range_gist where ir -|- int4multirange(int4range(100,200), int4range(400,500)); + count +------- + 5 +(1 row) + +-- now check same queries using index +SET enable_seqscan = f; +SET enable_indexscan = t; +SET enable_bitmapscan = f; +select count(*) from test_range_gist where ir @> 'empty'::int4range; + count +------- + 6200 +(1 row) + +select count(*) from test_range_gist where ir = int4range(10,20); + count +------- + 2 +(1 row) + +select count(*) from test_range_gist where ir @> 10; + count +------- + 130 +(1 row) + +select count(*) from test_range_gist where ir @> int4range(10,20); + count +------- + 111 +(1 row) + +select count(*) from test_range_gist where ir && int4range(10,20); + count +------- + 158 +(1 row) + +select count(*) from test_range_gist where ir <@ int4range(10,50); + count +------- + 1062 +(1 row) + +select count(*) from test_range_gist where ir << int4range(100,500); + count +------- + 189 +(1 row) + +select count(*) from test_range_gist where ir >> int4range(100,500); + count +------- + 3554 +(1 row) + +select count(*) from test_range_gist where ir &< int4range(100,500); + count +------- + 1029 +(1 row) + +select count(*) from test_range_gist where ir &> int4range(100,500); + count +------- + 4794 +(1 row) + +select count(*) from test_range_gist where ir -|- int4range(100,500); + count +------- + 5 +(1 row) + +select count(*) from test_range_gist where ir @> '{}'::int4multirange; + count +------- + 6200 +(1 row) + +select count(*) from test_range_gist where ir @> int4multirange(int4range(10,20), int4range(30,40)); + count +------- + 107 +(1 row) + +select count(*) from test_range_gist where ir && '{(10,20),(30,40),(50,60)}'::int4multirange; + count +------- + 271 +(1 row) + +select count(*) from test_range_gist where ir <@ '{(10,30),(40,60),(70,90)}'::int4multirange; + count +------- + 1060 +(1 row) + +select count(*) from test_range_gist where ir << int4multirange(int4range(100,200), int4range(400,500)); + count +------- + 189 +(1 row) + +select count(*) from test_range_gist where ir >> int4multirange(int4range(100,200), int4range(400,500)); + count +------- + 3554 +(1 row) + +select count(*) from test_range_gist where ir &< int4multirange(int4range(100,200), int4range(400,500)); + count +------- + 1029 +(1 row) + +select count(*) from test_range_gist where ir &> int4multirange(int4range(100,200), int4range(400,500)); + count +------- + 4794 +(1 row) + +select count(*) from test_range_gist where ir -|- int4multirange(int4range(100,200), int4range(400,500)); + count +------- + 5 +(1 row) + +-- now check same queries using a bulk-loaded index +drop index test_range_gist_idx; +create index test_range_gist_idx on test_range_gist using gist (ir); +select count(*) from test_range_gist where ir @> 'empty'::int4range; + count +------- + 6200 +(1 row) + +select count(*) from test_range_gist where ir = int4range(10,20); + count +------- + 2 +(1 row) + +select count(*) from test_range_gist where ir @> 10; + count +------- + 130 +(1 row) + +select count(*) from test_range_gist where ir @> int4range(10,20); + count +------- + 111 +(1 row) + +select count(*) from test_range_gist where ir && int4range(10,20); + count +------- + 158 +(1 row) + +select count(*) from test_range_gist where ir <@ int4range(10,50); + count +------- + 1062 +(1 row) + +select count(*) from test_range_gist where ir << int4range(100,500); + count +------- + 189 +(1 row) + +select count(*) from test_range_gist where ir >> int4range(100,500); + count +------- + 3554 +(1 row) + +select count(*) from test_range_gist where ir &< int4range(100,500); + count +------- + 1029 +(1 row) + +select count(*) from test_range_gist where ir &> int4range(100,500); + count +------- + 4794 +(1 row) + +select count(*) from test_range_gist where ir -|- int4range(100,500); + count +------- + 5 +(1 row) + +select count(*) from test_range_gist where ir @> '{}'::int4multirange; + count +------- + 6200 +(1 row) + +select count(*) from test_range_gist where ir @> int4multirange(int4range(10,20), int4range(30,40)); + count +------- + 107 +(1 row) + +select count(*) from test_range_gist where ir && '{(10,20),(30,40),(50,60)}'::int4multirange; + count +------- + 271 +(1 row) + +select count(*) from test_range_gist where ir <@ '{(10,30),(40,60),(70,90)}'::int4multirange; + count +------- + 1060 +(1 row) + +select count(*) from test_range_gist where ir << int4multirange(int4range(100,200), int4range(400,500)); + count +------- + 189 +(1 row) + +select count(*) from test_range_gist where ir >> int4multirange(int4range(100,200), int4range(400,500)); + count +------- + 3554 +(1 row) + +select count(*) from test_range_gist where ir &< int4multirange(int4range(100,200), int4range(400,500)); + count +------- + 1029 +(1 row) + +select count(*) from test_range_gist where ir &> int4multirange(int4range(100,200), int4range(400,500)); + count +------- + 4794 +(1 row) + +select count(*) from test_range_gist where ir -|- int4multirange(int4range(100,200), int4range(400,500)); + count +------- + 5 +(1 row) + +-- test SP-GiST index that's been built incrementally +create table test_range_spgist(ir int4range); +create index test_range_spgist_idx on test_range_spgist using spgist (ir); +insert into test_range_spgist select int4range(g, g+10) from generate_series(1,2000) g; +insert into test_range_spgist select 'empty'::int4range from generate_series(1,500) g; +insert into test_range_spgist select int4range(g, g+10000) from generate_series(1,1000) g; +insert into test_range_spgist select 'empty'::int4range from generate_series(1,500) g; +insert into test_range_spgist select int4range(NULL,g*10,'(]') from generate_series(1,100) g; +insert into test_range_spgist select int4range(g*10,NULL,'(]') from generate_series(1,100) g; +insert into test_range_spgist select int4range(g, g+10) from generate_series(1,2000) g; +-- first, verify non-indexed results +SET enable_seqscan = t; +SET enable_indexscan = f; +SET enable_bitmapscan = f; +select count(*) from test_range_spgist where ir @> 'empty'::int4range; + count +------- + 6200 +(1 row) + +select count(*) from test_range_spgist where ir = int4range(10,20); + count +------- + 2 +(1 row) + +select count(*) from test_range_spgist where ir @> 10; + count +------- + 130 +(1 row) + +select count(*) from test_range_spgist where ir @> int4range(10,20); + count +------- + 111 +(1 row) + +select count(*) from test_range_spgist where ir && int4range(10,20); + count +------- + 158 +(1 row) + +select count(*) from test_range_spgist where ir <@ int4range(10,50); + count +------- + 1062 +(1 row) + +select count(*) from test_range_spgist where ir << int4range(100,500); + count +------- + 189 +(1 row) + +select count(*) from test_range_spgist where ir >> int4range(100,500); + count +------- + 3554 +(1 row) + +select count(*) from test_range_spgist where ir &< int4range(100,500); + count +------- + 1029 +(1 row) + +select count(*) from test_range_spgist where ir &> int4range(100,500); + count +------- + 4794 +(1 row) + +select count(*) from test_range_spgist where ir -|- int4range(100,500); + count +------- + 5 +(1 row) + +-- now check same queries using index +SET enable_seqscan = f; +SET enable_indexscan = t; +SET enable_bitmapscan = f; +select count(*) from test_range_spgist where ir @> 'empty'::int4range; + count +------- + 6200 +(1 row) + +select count(*) from test_range_spgist where ir = int4range(10,20); + count +------- + 2 +(1 row) + +select count(*) from test_range_spgist where ir @> 10; + count +------- + 130 +(1 row) + +select count(*) from test_range_spgist where ir @> int4range(10,20); + count +------- + 111 +(1 row) + +select count(*) from test_range_spgist where ir && int4range(10,20); + count +------- + 158 +(1 row) + +select count(*) from test_range_spgist where ir <@ int4range(10,50); + count +------- + 1062 +(1 row) + +select count(*) from test_range_spgist where ir << int4range(100,500); + count +------- + 189 +(1 row) + +select count(*) from test_range_spgist where ir >> int4range(100,500); + count +------- + 3554 +(1 row) + +select count(*) from test_range_spgist where ir &< int4range(100,500); + count +------- + 1029 +(1 row) + +select count(*) from test_range_spgist where ir &> int4range(100,500); + count +------- + 4794 +(1 row) + +select count(*) from test_range_spgist where ir -|- int4range(100,500); + count +------- + 5 +(1 row) + +-- now check same queries using a bulk-loaded index +drop index test_range_spgist_idx; +create index test_range_spgist_idx on test_range_spgist using spgist (ir); +select count(*) from test_range_spgist where ir @> 'empty'::int4range; + count +------- + 6200 +(1 row) + +select count(*) from test_range_spgist where ir = int4range(10,20); + count +------- + 2 +(1 row) + +select count(*) from test_range_spgist where ir @> 10; + count +------- + 130 +(1 row) + +select count(*) from test_range_spgist where ir @> int4range(10,20); + count +------- + 111 +(1 row) + +select count(*) from test_range_spgist where ir && int4range(10,20); + count +------- + 158 +(1 row) + +select count(*) from test_range_spgist where ir <@ int4range(10,50); + count +------- + 1062 +(1 row) + +select count(*) from test_range_spgist where ir << int4range(100,500); + count +------- + 189 +(1 row) + +select count(*) from test_range_spgist where ir >> int4range(100,500); + count +------- + 3554 +(1 row) + +select count(*) from test_range_spgist where ir &< int4range(100,500); + count +------- + 1029 +(1 row) + +select count(*) from test_range_spgist where ir &> int4range(100,500); + count +------- + 4794 +(1 row) + +select count(*) from test_range_spgist where ir -|- int4range(100,500); + count +------- + 5 +(1 row) + +-- test index-only scans +explain (costs off) +select ir from test_range_spgist where ir -|- int4range(10,20) order by ir; + QUERY PLAN +------------------------------------------------------------------------ + Sort + Sort Key: ir + -> Index Only Scan using test_range_spgist_idx on test_range_spgist + Index Cond: (ir -|- '[10,20)'::int4range) +(4 rows) + +select ir from test_range_spgist where ir -|- int4range(10,20) order by ir; + ir +------------ + [20,30) + [20,30) + [20,10020) +(3 rows) + +RESET enable_seqscan; +RESET enable_indexscan; +RESET enable_bitmapscan; +-- test elem <@ range operator +create table test_range_elem(i int4); +create index test_range_elem_idx on test_range_elem (i); +insert into test_range_elem select i from generate_series(1,100) i; +SET enable_seqscan = f; +select count(*) from test_range_elem where i <@ int4range(10,50); + count +------- + 40 +(1 row) + +-- also test spgist index on anyrange expression +create index on test_range_elem using spgist(int4range(i,i+10)); +explain (costs off) +select count(*) from test_range_elem where int4range(i,i+10) <@ int4range(10,30); + QUERY PLAN +------------------------------------------------------------------------- + Aggregate + -> Index Scan using test_range_elem_int4range_idx on test_range_elem + Index Cond: (int4range(i, (i + 10)) <@ '[10,30)'::int4range) +(3 rows) + +select count(*) from test_range_elem where int4range(i,i+10) <@ int4range(10,30); + count +------- + 11 +(1 row) + +RESET enable_seqscan; +drop table test_range_elem; +-- +-- Btree_gist is not included by default, so to test exclusion +-- constraints with range types, use singleton int ranges for the "=" +-- portion of the constraint. +-- +create table test_range_excl( + room int4range, + speaker int4range, + during tsrange, + exclude using gist (room with =, during with &&), + exclude using gist (speaker with =, during with &&) +); +insert into test_range_excl + values(int4range(123, 123, '[]'), int4range(1, 1, '[]'), '[2010-01-02 10:00, 2010-01-02 11:00)'); +insert into test_range_excl + values(int4range(123, 123, '[]'), int4range(2, 2, '[]'), '[2010-01-02 11:00, 2010-01-02 12:00)'); +insert into test_range_excl + values(int4range(123, 123, '[]'), int4range(3, 3, '[]'), '[2010-01-02 10:10, 2010-01-02 11:00)'); +ERROR: conflicting key value violates exclusion constraint "test_range_excl_room_during_excl" +DETAIL: Key (room, during)=([123,124), ["Sat Jan 02 10:10:00 2010","Sat Jan 02 11:00:00 2010")) conflicts with existing key (room, during)=([123,124), ["Sat Jan 02 10:00:00 2010","Sat Jan 02 11:00:00 2010")). +insert into test_range_excl + values(int4range(124, 124, '[]'), int4range(3, 3, '[]'), '[2010-01-02 10:10, 2010-01-02 11:10)'); +insert into test_range_excl + values(int4range(125, 125, '[]'), int4range(1, 1, '[]'), '[2010-01-02 10:10, 2010-01-02 11:00)'); +ERROR: conflicting key value violates exclusion constraint "test_range_excl_speaker_during_excl" +DETAIL: Key (speaker, during)=([1,2), ["Sat Jan 02 10:10:00 2010","Sat Jan 02 11:00:00 2010")) conflicts with existing key (speaker, during)=([1,2), ["Sat Jan 02 10:00:00 2010","Sat Jan 02 11:00:00 2010")). +-- test bigint ranges +select int8range(10000000000::int8, 20000000000::int8,'(]'); + int8range +--------------------------- + [10000000001,20000000001) +(1 row) + +-- test tstz ranges +set timezone to '-08'; +select '[2010-01-01 01:00:00 -05, 2010-01-01 02:00:00 -08)'::tstzrange; + tstzrange +----------------------------------------------------------------- + ["Thu Dec 31 22:00:00 2009 -08","Fri Jan 01 02:00:00 2010 -08") +(1 row) + +-- should fail +select '[2010-01-01 01:00:00 -08, 2010-01-01 02:00:00 -05)'::tstzrange; +ERROR: range lower bound must be less than or equal to range upper bound +LINE 1: select '[2010-01-01 01:00:00 -08, 2010-01-01 02:00:00 -05)':... + ^ +set timezone to default; +-- +-- Test user-defined range of floats +-- (type float8range was already made in test_setup.sql) +-- +--should fail +create type bogus_float8range as range (subtype=float8, subtype_diff=float4mi); +ERROR: function float4mi(double precision, double precision) does not exist +select '[123.001, 5.e9)'::float8range @> 888.882::float8; + ?column? +---------- + t +(1 row) + +create table float8range_test(f8r float8range, i int); +insert into float8range_test values(float8range(-100.00007, '1.111113e9'), 42); +select * from float8range_test; + f8r | i +-------------------------+---- + [-100.00007,1111113000) | 42 +(1 row) + +drop table float8range_test; +-- +-- Test range types over domains +-- +create domain mydomain as int4; +create type mydomainrange as range(subtype=mydomain); +select '[4,50)'::mydomainrange @> 7::mydomain; + ?column? +---------- + t +(1 row) + +drop domain mydomain; -- fail +ERROR: cannot drop type mydomain because other objects depend on it +DETAIL: type mydomainrange depends on type mydomain +HINT: Use DROP ... CASCADE to drop the dependent objects too. +drop domain mydomain cascade; +NOTICE: drop cascades to type mydomainrange +-- +-- Test domains over range types +-- +create domain restrictedrange as int4range check (upper(value) < 10); +select '[4,5)'::restrictedrange @> 7; + ?column? +---------- + f +(1 row) + +select '[4,50)'::restrictedrange @> 7; -- should fail +ERROR: value for domain restrictedrange violates check constraint "restrictedrange_check" +drop domain restrictedrange; +-- +-- Test multiple range types over the same subtype +-- +create type textrange1 as range(subtype=text, collation="C"); +create type textrange2 as range(subtype=text, collation="C"); +select textrange1('a','Z') @> 'b'::text; +ERROR: range lower bound must be less than or equal to range upper bound +select textrange2('a','z') @> 'b'::text; + ?column? +---------- + t +(1 row) + +drop type textrange1; +drop type textrange2; +-- +-- Test polymorphic type system +-- +create function anyarray_anyrange_func(a anyarray, r anyrange) + returns anyelement as 'select $1[1] + lower($2);' language sql; +select anyarray_anyrange_func(ARRAY[1,2], int4range(10,20)); + anyarray_anyrange_func +------------------------ + 11 +(1 row) + +-- should fail +select anyarray_anyrange_func(ARRAY[1,2], numrange(10,20)); +ERROR: function anyarray_anyrange_func(integer[], numrange) does not exist +LINE 1: select anyarray_anyrange_func(ARRAY[1,2], numrange(10,20)); + ^ +HINT: No function matches the given name and argument types. You might need to add explicit type casts. +drop function anyarray_anyrange_func(anyarray, anyrange); +-- should fail +create function bogus_func(anyelement) + returns anyrange as 'select int4range(1,10)' language sql; +ERROR: cannot determine result data type +DETAIL: A result of type anyrange requires at least one input of type anyrange or anymultirange. +-- should fail +create function bogus_func(int) + returns anyrange as 'select int4range(1,10)' language sql; +ERROR: cannot determine result data type +DETAIL: A result of type anyrange requires at least one input of type anyrange or anymultirange. +create function range_add_bounds(anyrange) + returns anyelement as 'select lower($1) + upper($1)' language sql; +select range_add_bounds(int4range(1, 17)); + range_add_bounds +------------------ + 18 +(1 row) + +select range_add_bounds(numrange(1.0001, 123.123)); + range_add_bounds +------------------ + 124.1231 +(1 row) + +create function rangetypes_sql(q anyrange, b anyarray, out c anyelement) + as $$ select upper($1) + $2[1] $$ + language sql; +select rangetypes_sql(int4range(1,10), ARRAY[2,20]); + rangetypes_sql +---------------- + 12 +(1 row) + +select rangetypes_sql(numrange(1,10), ARRAY[2,20]); -- match failure +ERROR: function rangetypes_sql(numrange, integer[]) does not exist +LINE 1: select rangetypes_sql(numrange(1,10), ARRAY[2,20]); + ^ +HINT: No function matches the given name and argument types. You might need to add explicit type casts. +create function anycompatiblearray_anycompatiblerange_func(a anycompatiblearray, r anycompatiblerange) + returns anycompatible as 'select $1[1] + lower($2);' language sql; +select anycompatiblearray_anycompatiblerange_func(ARRAY[1,2], int4range(10,20)); + anycompatiblearray_anycompatiblerange_func +-------------------------------------------- + 11 +(1 row) + +select anycompatiblearray_anycompatiblerange_func(ARRAY[1,2], numrange(10,20)); + anycompatiblearray_anycompatiblerange_func +-------------------------------------------- + 11 +(1 row) + +-- should fail +select anycompatiblearray_anycompatiblerange_func(ARRAY[1.1,2], int4range(10,20)); +ERROR: function anycompatiblearray_anycompatiblerange_func(numeric[], int4range) does not exist +LINE 1: select anycompatiblearray_anycompatiblerange_func(ARRAY[1.1,... + ^ +HINT: No function matches the given name and argument types. You might need to add explicit type casts. +drop function anycompatiblearray_anycompatiblerange_func(anycompatiblearray, anycompatiblerange); +-- should fail +create function bogus_func(anycompatible) + returns anycompatiblerange as 'select int4range(1,10)' language sql; +ERROR: cannot determine result data type +DETAIL: A result of type anycompatiblerange requires at least one input of type anycompatiblerange or anycompatiblemultirange. +-- +-- Arrays of ranges +-- +select ARRAY[numrange(1.1, 1.2), numrange(12.3, 155.5)]; + array +------------------------------ + {"[1.1,1.2)","[12.3,155.5)"} +(1 row) + +create table i8r_array (f1 int, f2 int8range[]); +insert into i8r_array values (42, array[int8range(1,10), int8range(2,20)]); +select * from i8r_array; + f1 | f2 +----+--------------------- + 42 | {"[1,10)","[2,20)"} +(1 row) + +drop table i8r_array; +-- +-- Ranges of arrays +-- +create type arrayrange as range (subtype=int4[]); +select arrayrange(ARRAY[1,2], ARRAY[2,1]); + arrayrange +------------------- + ["{1,2}","{2,1}") +(1 row) + +select arrayrange(ARRAY[2,1], ARRAY[1,2]); -- fail +ERROR: range lower bound must be less than or equal to range upper bound +select array[1,1] <@ arrayrange(array[1,2], array[2,1]); + ?column? +---------- + f +(1 row) + +select array[1,3] <@ arrayrange(array[1,2], array[2,1]); + ?column? +---------- + t +(1 row) + +-- +-- Ranges of composites +-- +create type two_ints as (a int, b int); +create type two_ints_range as range (subtype = two_ints); +-- with debug_parallel_query on, this exercises tqueue.c's range remapping +select *, row_to_json(upper(t)) as u from + (values (two_ints_range(row(1,2), row(3,4))), + (two_ints_range(row(5,6), row(7,8)))) v(t); + t | u +-------------------+--------------- + ["(1,2)","(3,4)") | {"a":3,"b":4} + ["(5,6)","(7,8)") | {"a":7,"b":8} +(2 rows) + +-- this must be rejected to avoid self-inclusion issues: +alter type two_ints add attribute c two_ints_range; +ERROR: composite type two_ints cannot be made a member of itself +drop type two_ints cascade; +NOTICE: drop cascades to type two_ints_range +-- +-- Check behavior when subtype lacks a hash function +-- +create type cashrange as range (subtype = money); +set enable_sort = off; -- try to make it pick a hash setop implementation +select '(2,5)'::cashrange except select '(5,6)'::cashrange; + cashrange +--------------- + ($2.00,$5.00) +(1 row) + +reset enable_sort; +-- +-- OUT/INOUT/TABLE functions +-- +-- infer anyrange from anyrange +create function outparam_succeed(i anyrange, out r anyrange, out t text) + as $$ select $1, 'foo'::text $$ language sql; +select * from outparam_succeed(int4range(1,2)); + r | t +-------+----- + [1,2) | foo +(1 row) + +create function outparam2_succeed(r anyrange, out lu anyarray, out ul anyarray) + as $$ select array[lower($1), upper($1)], array[upper($1), lower($1)] $$ + language sql; +select * from outparam2_succeed(int4range(1,11)); + lu | ul +--------+-------- + {1,11} | {11,1} +(1 row) + +-- infer anyarray from anyrange +create function outparam_succeed2(i anyrange, out r anyarray, out t text) + as $$ select ARRAY[upper($1)], 'foo'::text $$ language sql; +select * from outparam_succeed2(int4range(int4range(1,2))); + r | t +-----+----- + {2} | foo +(1 row) + +-- infer anyelement from anyrange +create function inoutparam_succeed(out i anyelement, inout r anyrange) + as $$ select upper($1), $1 $$ language sql; +select * from inoutparam_succeed(int4range(1,2)); + i | r +---+------- + 2 | [1,2) +(1 row) + +create function table_succeed(r anyrange) + returns table(l anyelement, u anyelement) + as $$ select lower($1), upper($1) $$ + language sql; +select * from table_succeed(int4range(1,11)); + l | u +---+---- + 1 | 11 +(1 row) + +-- should fail +create function outparam_fail(i anyelement, out r anyrange, out t text) + as $$ select '[1,10]', 'foo' $$ language sql; +ERROR: cannot determine result data type +DETAIL: A result of type anyrange requires at least one input of type anyrange or anymultirange. +--should fail +create function inoutparam_fail(inout i anyelement, out r anyrange) + as $$ select $1, '[1,10]' $$ language sql; +ERROR: cannot determine result data type +DETAIL: A result of type anyrange requires at least one input of type anyrange or anymultirange. +--should fail +create function table_fail(i anyelement) returns table(i anyelement, r anyrange) + as $$ select $1, '[1,10]' $$ language sql; +ERROR: cannot determine result data type +DETAIL: A result of type anyrange requires at least one input of type anyrange or anymultirange. diff --git a/src/test/regress/expected/regex.out b/src/test/regress/expected/regex.out new file mode 100644 index 0000000..ae0de73 --- /dev/null +++ b/src/test/regress/expected/regex.out @@ -0,0 +1,645 @@ +-- +-- Regular expression tests +-- +-- Don't want to have to double backslashes in regexes +set standard_conforming_strings = on; +-- Test simple quantified backrefs +select 'bbbbb' ~ '^([bc])\1*$' as t; + t +--- + t +(1 row) + +select 'ccc' ~ '^([bc])\1*$' as t; + t +--- + t +(1 row) + +select 'xxx' ~ '^([bc])\1*$' as f; + f +--- + f +(1 row) + +select 'bbc' ~ '^([bc])\1*$' as f; + f +--- + f +(1 row) + +select 'b' ~ '^([bc])\1*$' as t; + t +--- + t +(1 row) + +-- Test quantified backref within a larger expression +select 'abc abc abc' ~ '^(\w+)( \1)+$' as t; + t +--- + t +(1 row) + +select 'abc abd abc' ~ '^(\w+)( \1)+$' as f; + f +--- + f +(1 row) + +select 'abc abc abd' ~ '^(\w+)( \1)+$' as f; + f +--- + f +(1 row) + +select 'abc abc abc' ~ '^(.+)( \1)+$' as t; + t +--- + t +(1 row) + +select 'abc abd abc' ~ '^(.+)( \1)+$' as f; + f +--- + f +(1 row) + +select 'abc abc abd' ~ '^(.+)( \1)+$' as f; + f +--- + f +(1 row) + +-- Test some cases that crashed in 9.2beta1 due to pmatch[] array overrun +select substring('asd TO foo' from ' TO (([a-z0-9._]+|"([^"]+|"")+")+)'); + substring +----------- + foo +(1 row) + +select substring('a' from '((a))+'); + substring +----------- + a +(1 row) + +select substring('a' from '((a)+)'); + substring +----------- + a +(1 row) + +-- Test regexp_match() +select regexp_match('abc', ''); + regexp_match +-------------- + {""} +(1 row) + +select regexp_match('abc', 'bc'); + regexp_match +-------------- + {bc} +(1 row) + +select regexp_match('abc', 'd') is null; + ?column? +---------- + t +(1 row) + +select regexp_match('abc', '(B)(c)', 'i'); + regexp_match +-------------- + {b,c} +(1 row) + +select regexp_match('abc', 'Bd', 'ig'); -- error +ERROR: regexp_match() does not support the "global" option +HINT: Use the regexp_matches function instead. +-- Test lookahead constraints +select regexp_matches('ab', 'a(?=b)b*'); + regexp_matches +---------------- + {ab} +(1 row) + +select regexp_matches('a', 'a(?=b)b*'); + regexp_matches +---------------- +(0 rows) + +select regexp_matches('abc', 'a(?=b)b*(?=c)c*'); + regexp_matches +---------------- + {abc} +(1 row) + +select regexp_matches('ab', 'a(?=b)b*(?=c)c*'); + regexp_matches +---------------- +(0 rows) + +select regexp_matches('ab', 'a(?!b)b*'); + regexp_matches +---------------- +(0 rows) + +select regexp_matches('a', 'a(?!b)b*'); + regexp_matches +---------------- + {a} +(1 row) + +select regexp_matches('b', '(?=b)b'); + regexp_matches +---------------- + {b} +(1 row) + +select regexp_matches('a', '(?=b)b'); + regexp_matches +---------------- +(0 rows) + +-- Test lookbehind constraints +select regexp_matches('abb', '(?<=a)b*'); + regexp_matches +---------------- + {bb} +(1 row) + +select regexp_matches('a', 'a(?<=a)b*'); + regexp_matches +---------------- + {a} +(1 row) + +select regexp_matches('abc', 'a(?<=a)b*(?<=b)c*'); + regexp_matches +---------------- + {abc} +(1 row) + +select regexp_matches('ab', 'a(?<=a)b*(?<=b)c*'); + regexp_matches +---------------- + {ab} +(1 row) + +select regexp_matches('ab', 'a*(?= 'abc'::text) AND (proname < 'abd'::text)) + Filter: (proname ~ '^abc'::text) +(3 rows) + +explain (costs off) select * from pg_proc where proname ~ '^abc$'; + QUERY PLAN +------------------------------------------------------------ + Index Scan using pg_proc_proname_args_nsp_index on pg_proc + Index Cond: (proname = 'abc'::text) + Filter: (proname ~ '^abc$'::text) +(3 rows) + +explain (costs off) select * from pg_proc where proname ~ '^abcd*e'; + QUERY PLAN +---------------------------------------------------------------------- + Index Scan using pg_proc_proname_args_nsp_index on pg_proc + Index Cond: ((proname >= 'abc'::text) AND (proname < 'abd'::text)) + Filter: (proname ~ '^abcd*e'::text) +(3 rows) + +explain (costs off) select * from pg_proc where proname ~ '^abc+d'; + QUERY PLAN +---------------------------------------------------------------------- + Index Scan using pg_proc_proname_args_nsp_index on pg_proc + Index Cond: ((proname >= 'abc'::text) AND (proname < 'abd'::text)) + Filter: (proname ~ '^abc+d'::text) +(3 rows) + +explain (costs off) select * from pg_proc where proname ~ '^(abc)(def)'; + QUERY PLAN +---------------------------------------------------------------------------- + Index Scan using pg_proc_proname_args_nsp_index on pg_proc + Index Cond: ((proname >= 'abcdef'::text) AND (proname < 'abcdeg'::text)) + Filter: (proname ~ '^(abc)(def)'::text) +(3 rows) + +explain (costs off) select * from pg_proc where proname ~ '^(abc)$'; + QUERY PLAN +------------------------------------------------------------ + Index Scan using pg_proc_proname_args_nsp_index on pg_proc + Index Cond: (proname = 'abc'::text) + Filter: (proname ~ '^(abc)$'::text) +(3 rows) + +explain (costs off) select * from pg_proc where proname ~ '^(abc)?d'; + QUERY PLAN +---------------------------------------- + Seq Scan on pg_proc + Filter: (proname ~ '^(abc)?d'::text) +(2 rows) + +explain (costs off) select * from pg_proc where proname ~ '^abcd(x|(?=\w\w)q)'; + QUERY PLAN +------------------------------------------------------------------------ + Index Scan using pg_proc_proname_args_nsp_index on pg_proc + Index Cond: ((proname >= 'abcd'::text) AND (proname < 'abce'::text)) + Filter: (proname ~ '^abcd(x|(?=\w\w)q)'::text) +(3 rows) + +-- Test for infinite loop in pullback() (CVE-2007-4772) +select 'a' ~ '($|^)*'; + ?column? +---------- + t +(1 row) + +-- These cases expose a bug in the original fix for CVE-2007-4772 +select 'a' ~ '(^)+^'; + ?column? +---------- + t +(1 row) + +select 'a' ~ '$($$)+'; + ?column? +---------- + t +(1 row) + +-- More cases of infinite loop in pullback(), not fixed by CVE-2007-4772 fix +select 'a' ~ '($^)+'; + ?column? +---------- + f +(1 row) + +select 'a' ~ '(^$)*'; + ?column? +---------- + t +(1 row) + +select 'aa bb cc' ~ '(^(?!aa))+'; + ?column? +---------- + f +(1 row) + +select 'aa x' ~ '(^(?!aa)(?!bb)(?!cc))+'; + ?column? +---------- + f +(1 row) + +select 'bb x' ~ '(^(?!aa)(?!bb)(?!cc))+'; + ?column? +---------- + f +(1 row) + +select 'cc x' ~ '(^(?!aa)(?!bb)(?!cc))+'; + ?column? +---------- + f +(1 row) + +select 'dd x' ~ '(^(?!aa)(?!bb)(?!cc))+'; + ?column? +---------- + t +(1 row) + +-- Test for infinite loop in fixempties() (Tcl bugs 3604074, 3606683) +select 'a' ~ '((((((a)*)*)*)*)*)*'; + ?column? +---------- + t +(1 row) + +select 'a' ~ '((((((a+|)+|)+|)+|)+|)+|)'; + ?column? +---------- + t +(1 row) + +-- These cases used to give too-many-states failures +select 'x' ~ 'abcd(\m)+xyz'; + ?column? +---------- + f +(1 row) + +select 'a' ~ '^abcd*(((((^(a c(e?d)a+|)+|)+|)+|)+|a)+|)'; + ?column? +---------- + f +(1 row) + +select 'x' ~ 'a^(^)bcd*xy(((((($a+|)+|)+|)+$|)+|)+|)^$'; + ?column? +---------- + f +(1 row) + +select 'x' ~ 'xyz(\Y\Y)+'; + ?column? +---------- + f +(1 row) + +select 'x' ~ 'x|(?:\M)+'; + ?column? +---------- + t +(1 row) + +-- This generates O(N) states but O(N^2) arcs, so it causes problems +-- if arc count is not constrained +select 'x' ~ repeat('x*y*z*', 1000); +ERROR: invalid regular expression: regular expression is too complex +-- Test backref in combination with non-greedy quantifier +-- https://core.tcl.tk/tcl/tktview/6585b21ca8fa6f3678d442b97241fdd43dba2ec0 +select 'Programmer' ~ '(\w).*?\1' as t; + t +--- + t +(1 row) + +select regexp_matches('Programmer', '(\w)(.*?\1)', 'g'); + regexp_matches +---------------- + {r,ogr} + {m,m} +(2 rows) + +-- Test for proper matching of non-greedy iteration (bug #11478) +select regexp_matches('foo/bar/baz', + '^([^/]+?)(?:/([^/]+?))(?:/([^/]+?))?$', ''); + regexp_matches +---------------- + {foo,bar,baz} +(1 row) + +-- Test that greediness can be overridden by outer quantifier +select regexp_matches('llmmmfff', '^(l*)(.*)(f*)$'); + regexp_matches +---------------- + {ll,mmmfff,""} +(1 row) + +select regexp_matches('llmmmfff', '^(l*){1,1}(.*)(f*)$'); + regexp_matches +---------------- + {ll,mmmfff,""} +(1 row) + +select regexp_matches('llmmmfff', '^(l*){1,1}?(.*)(f*)$'); + regexp_matches +------------------ + {"",llmmmfff,""} +(1 row) + +select regexp_matches('llmmmfff', '^(l*){1,1}?(.*){1,1}?(f*)$'); + regexp_matches +---------------- + {"",llmmm,fff} +(1 row) + +select regexp_matches('llmmmfff', '^(l*?)(.*)(f*)$'); + regexp_matches +------------------ + {"",llmmmfff,""} +(1 row) + +select regexp_matches('llmmmfff', '^(l*?){1,1}(.*)(f*)$'); + regexp_matches +---------------- + {ll,mmmfff,""} +(1 row) + +select regexp_matches('llmmmfff', '^(l*?){1,1}?(.*)(f*)$'); + regexp_matches +------------------ + {"",llmmmfff,""} +(1 row) + +select regexp_matches('llmmmfff', '^(l*?){1,1}?(.*){1,1}?(f*)$'); + regexp_matches +---------------- + {"",llmmm,fff} +(1 row) + +-- Test for infinite loop in cfindloop with zero-length possible match +-- but no actual match (can only happen in the presence of backrefs) +select 'a' ~ '$()|^\1'; + ?column? +---------- + f +(1 row) + +select 'a' ~ '.. ()|\1'; + ?column? +---------- + f +(1 row) + +select 'a' ~ '()*\1'; + ?column? +---------- + t +(1 row) + +select 'a' ~ '()+\1'; + ?column? +---------- + t +(1 row) + +-- Test incorrect removal of capture groups within {0} +select 'xxx' ~ '(.){0}(\1)' as f; + f +--- + f +(1 row) + +select 'xxx' ~ '((.)){0}(\2)' as f; + f +--- + f +(1 row) + +select 'xyz' ~ '((.)){0}(\2){0}' as t; + t +--- + t +(1 row) + +-- Test ancient oversight in when to apply zaptreesubs +select 'abcdef' ~ '^(.)\1|\1.' as f; + f +--- + f +(1 row) + +select 'abadef' ~ '^((.)\2|..)\2' as f; + f +--- + f +(1 row) + +-- Add coverage for some cases in checkmatchall +select regexp_match('xy', '.|...'); + regexp_match +-------------- + {x} +(1 row) + +select regexp_match('xyz', '.|...'); + regexp_match +-------------- + {xyz} +(1 row) + +select regexp_match('xy', '.*'); + regexp_match +-------------- + {xy} +(1 row) + +select regexp_match('fooba', '(?:..)*'); + regexp_match +-------------- + {foob} +(1 row) + +select regexp_match('xyz', repeat('.', 260)); + regexp_match +-------------- + +(1 row) + +select regexp_match('foo', '(?:.|){99}'); + regexp_match +-------------- + {foo} +(1 row) + +-- Error conditions +select 'xyz' ~ 'x(\w)(?=\1)'; -- no backrefs in LACONs +ERROR: invalid regular expression: invalid backreference number +select 'xyz' ~ 'x(\w)(?=(\1))'; +ERROR: invalid regular expression: invalid backreference number +select 'a' ~ '\x7fffffff'; -- invalid chr code +ERROR: invalid regular expression: invalid escape \ sequence diff --git a/src/test/regress/expected/regproc.out b/src/test/regress/expected/regproc.out new file mode 100644 index 0000000..a942085 --- /dev/null +++ b/src/test/regress/expected/regproc.out @@ -0,0 +1,546 @@ +-- +-- regproc +-- +/* If objects exist, return oids */ +CREATE ROLE regress_regrole_test; +-- without schemaname +SELECT regoper('||/'); + regoper +--------- + ||/ +(1 row) + +SELECT regoperator('+(int4,int4)'); + regoperator +-------------------- + +(integer,integer) +(1 row) + +SELECT regproc('now'); + regproc +--------- + now +(1 row) + +SELECT regprocedure('abs(numeric)'); + regprocedure +-------------- + abs(numeric) +(1 row) + +SELECT regclass('pg_class'); + regclass +---------- + pg_class +(1 row) + +SELECT regtype('int4'); + regtype +--------- + integer +(1 row) + +SELECT regcollation('"POSIX"'); + regcollation +-------------- + "POSIX" +(1 row) + +SELECT to_regoper('||/'); + to_regoper +------------ + ||/ +(1 row) + +SELECT to_regoperator('+(int4,int4)'); + to_regoperator +-------------------- + +(integer,integer) +(1 row) + +SELECT to_regproc('now'); + to_regproc +------------ + now +(1 row) + +SELECT to_regprocedure('abs(numeric)'); + to_regprocedure +----------------- + abs(numeric) +(1 row) + +SELECT to_regclass('pg_class'); + to_regclass +------------- + pg_class +(1 row) + +SELECT to_regtype('int4'); + to_regtype +------------ + integer +(1 row) + +SELECT to_regcollation('"POSIX"'); + to_regcollation +----------------- + "POSIX" +(1 row) + +-- with schemaname +SELECT regoper('pg_catalog.||/'); + regoper +--------- + ||/ +(1 row) + +SELECT regoperator('pg_catalog.+(int4,int4)'); + regoperator +-------------------- + +(integer,integer) +(1 row) + +SELECT regproc('pg_catalog.now'); + regproc +--------- + now +(1 row) + +SELECT regprocedure('pg_catalog.abs(numeric)'); + regprocedure +-------------- + abs(numeric) +(1 row) + +SELECT regclass('pg_catalog.pg_class'); + regclass +---------- + pg_class +(1 row) + +SELECT regtype('pg_catalog.int4'); + regtype +--------- + integer +(1 row) + +SELECT regcollation('pg_catalog."POSIX"'); + regcollation +-------------- + "POSIX" +(1 row) + +SELECT to_regoper('pg_catalog.||/'); + to_regoper +------------ + ||/ +(1 row) + +SELECT to_regproc('pg_catalog.now'); + to_regproc +------------ + now +(1 row) + +SELECT to_regprocedure('pg_catalog.abs(numeric)'); + to_regprocedure +----------------- + abs(numeric) +(1 row) + +SELECT to_regclass('pg_catalog.pg_class'); + to_regclass +------------- + pg_class +(1 row) + +SELECT to_regtype('pg_catalog.int4'); + to_regtype +------------ + integer +(1 row) + +SELECT to_regcollation('pg_catalog."POSIX"'); + to_regcollation +----------------- + "POSIX" +(1 row) + +-- schemaname not applicable +SELECT regrole('regress_regrole_test'); + regrole +---------------------- + regress_regrole_test +(1 row) + +SELECT regrole('"regress_regrole_test"'); + regrole +---------------------- + regress_regrole_test +(1 row) + +SELECT regnamespace('pg_catalog'); + regnamespace +-------------- + pg_catalog +(1 row) + +SELECT regnamespace('"pg_catalog"'); + regnamespace +-------------- + pg_catalog +(1 row) + +SELECT to_regrole('regress_regrole_test'); + to_regrole +---------------------- + regress_regrole_test +(1 row) + +SELECT to_regrole('"regress_regrole_test"'); + to_regrole +---------------------- + regress_regrole_test +(1 row) + +SELECT to_regnamespace('pg_catalog'); + to_regnamespace +----------------- + pg_catalog +(1 row) + +SELECT to_regnamespace('"pg_catalog"'); + to_regnamespace +----------------- + pg_catalog +(1 row) + +/* If objects don't exist, raise errors. */ +DROP ROLE regress_regrole_test; +-- without schemaname +SELECT regoper('||//'); +ERROR: operator does not exist: ||// +LINE 1: SELECT regoper('||//'); + ^ +SELECT regoperator('++(int4,int4)'); +ERROR: operator does not exist: ++(int4,int4) +LINE 1: SELECT regoperator('++(int4,int4)'); + ^ +SELECT regproc('know'); +ERROR: function "know" does not exist +LINE 1: SELECT regproc('know'); + ^ +SELECT regprocedure('absinthe(numeric)'); +ERROR: function "absinthe(numeric)" does not exist +LINE 1: SELECT regprocedure('absinthe(numeric)'); + ^ +SELECT regclass('pg_classes'); +ERROR: relation "pg_classes" does not exist +LINE 1: SELECT regclass('pg_classes'); + ^ +SELECT regtype('int3'); +ERROR: type "int3" does not exist +LINE 1: SELECT regtype('int3'); + ^ +-- with schemaname +SELECT regoper('ng_catalog.||/'); +ERROR: operator does not exist: ng_catalog.||/ +LINE 1: SELECT regoper('ng_catalog.||/'); + ^ +SELECT regoperator('ng_catalog.+(int4,int4)'); +ERROR: operator does not exist: ng_catalog.+(int4,int4) +LINE 1: SELECT regoperator('ng_catalog.+(int4,int4)'); + ^ +SELECT regproc('ng_catalog.now'); +ERROR: function "ng_catalog.now" does not exist +LINE 1: SELECT regproc('ng_catalog.now'); + ^ +SELECT regprocedure('ng_catalog.abs(numeric)'); +ERROR: function "ng_catalog.abs(numeric)" does not exist +LINE 1: SELECT regprocedure('ng_catalog.abs(numeric)'); + ^ +SELECT regclass('ng_catalog.pg_class'); +ERROR: relation "ng_catalog.pg_class" does not exist +LINE 1: SELECT regclass('ng_catalog.pg_class'); + ^ +SELECT regtype('ng_catalog.int4'); +ERROR: schema "ng_catalog" does not exist +LINE 1: SELECT regtype('ng_catalog.int4'); + ^ +\set VERBOSITY sqlstate \\ -- error message is encoding-dependent +SELECT regcollation('ng_catalog."POSIX"'); +ERROR: 42704 +\set VERBOSITY default +-- schemaname not applicable +SELECT regrole('regress_regrole_test'); +ERROR: role "regress_regrole_test" does not exist +LINE 1: SELECT regrole('regress_regrole_test'); + ^ +SELECT regrole('"regress_regrole_test"'); +ERROR: role "regress_regrole_test" does not exist +LINE 1: SELECT regrole('"regress_regrole_test"'); + ^ +SELECT regrole('Nonexistent'); +ERROR: role "nonexistent" does not exist +LINE 1: SELECT regrole('Nonexistent'); + ^ +SELECT regrole('"Nonexistent"'); +ERROR: role "Nonexistent" does not exist +LINE 1: SELECT regrole('"Nonexistent"'); + ^ +SELECT regrole('foo.bar'); +ERROR: invalid name syntax +LINE 1: SELECT regrole('foo.bar'); + ^ +SELECT regnamespace('Nonexistent'); +ERROR: schema "nonexistent" does not exist +LINE 1: SELECT regnamespace('Nonexistent'); + ^ +SELECT regnamespace('"Nonexistent"'); +ERROR: schema "Nonexistent" does not exist +LINE 1: SELECT regnamespace('"Nonexistent"'); + ^ +SELECT regnamespace('foo.bar'); +ERROR: invalid name syntax +LINE 1: SELECT regnamespace('foo.bar'); + ^ +/* If objects don't exist, return NULL with no error. */ +-- without schemaname +SELECT to_regoper('||//'); + to_regoper +------------ + +(1 row) + +SELECT to_regoperator('++(int4,int4)'); + to_regoperator +---------------- + +(1 row) + +SELECT to_regproc('know'); + to_regproc +------------ + +(1 row) + +SELECT to_regprocedure('absinthe(numeric)'); + to_regprocedure +----------------- + +(1 row) + +SELECT to_regclass('pg_classes'); + to_regclass +------------- + +(1 row) + +SELECT to_regtype('int3'); + to_regtype +------------ + +(1 row) + +SELECT to_regcollation('notacollation'); + to_regcollation +----------------- + +(1 row) + +-- with schemaname +SELECT to_regoper('ng_catalog.||/'); + to_regoper +------------ + +(1 row) + +SELECT to_regoperator('ng_catalog.+(int4,int4)'); + to_regoperator +---------------- + +(1 row) + +SELECT to_regproc('ng_catalog.now'); + to_regproc +------------ + +(1 row) + +SELECT to_regprocedure('ng_catalog.abs(numeric)'); + to_regprocedure +----------------- + +(1 row) + +SELECT to_regclass('ng_catalog.pg_class'); + to_regclass +------------- + +(1 row) + +SELECT to_regtype('ng_catalog.int4'); + to_regtype +------------ + +(1 row) + +SELECT to_regcollation('ng_catalog."POSIX"'); + to_regcollation +----------------- + +(1 row) + +-- schemaname not applicable +SELECT to_regrole('regress_regrole_test'); + to_regrole +------------ + +(1 row) + +SELECT to_regrole('"regress_regrole_test"'); + to_regrole +------------ + +(1 row) + +SELECT to_regrole('foo.bar'); + to_regrole +------------ + +(1 row) + +SELECT to_regrole('Nonexistent'); + to_regrole +------------ + +(1 row) + +SELECT to_regrole('"Nonexistent"'); + to_regrole +------------ + +(1 row) + +SELECT to_regrole('foo.bar'); + to_regrole +------------ + +(1 row) + +SELECT to_regnamespace('Nonexistent'); + to_regnamespace +----------------- + +(1 row) + +SELECT to_regnamespace('"Nonexistent"'); + to_regnamespace +----------------- + +(1 row) + +SELECT to_regnamespace('foo.bar'); + to_regnamespace +----------------- + +(1 row) + +-- Test soft-error API +SELECT * FROM pg_input_error_info('ng_catalog.pg_class', 'regclass'); + message | detail | hint | sql_error_code +-----------------------------------------------+--------+------+---------------- + relation "ng_catalog.pg_class" does not exist | | | 42P01 +(1 row) + +SELECT pg_input_is_valid('ng_catalog."POSIX"', 'regcollation'); + pg_input_is_valid +------------------- + f +(1 row) + +SELECT * FROM pg_input_error_info('no_such_config', 'regconfig'); + message | detail | hint | sql_error_code +-----------------------------------------------------------+--------+------+---------------- + text search configuration "no_such_config" does not exist | | | 42704 +(1 row) + +SELECT * FROM pg_input_error_info('no_such_dictionary', 'regdictionary'); + message | detail | hint | sql_error_code +------------------------------------------------------------+--------+------+---------------- + text search dictionary "no_such_dictionary" does not exist | | | 42704 +(1 row) + +SELECT * FROM pg_input_error_info('Nonexistent', 'regnamespace'); + message | detail | hint | sql_error_code +-------------------------------------+--------+------+---------------- + schema "nonexistent" does not exist | | | 3F000 +(1 row) + +SELECT * FROM pg_input_error_info('ng_catalog.||/', 'regoper'); + message | detail | hint | sql_error_code +-----------------------------------------+--------+------+---------------- + operator does not exist: ng_catalog.||/ | | | 42883 +(1 row) + +SELECT * FROM pg_input_error_info('-', 'regoper'); + message | detail | hint | sql_error_code +--------------------------------+--------+------+---------------- + more than one operator named - | | | 42725 +(1 row) + +SELECT * FROM pg_input_error_info('ng_catalog.+(int4,int4)', 'regoperator'); + message | detail | hint | sql_error_code +--------------------------------------------------+--------+------+---------------- + operator does not exist: ng_catalog.+(int4,int4) | | | 42883 +(1 row) + +SELECT * FROM pg_input_error_info('-', 'regoperator'); + message | detail | hint | sql_error_code +-----------------------------+--------+------+---------------- + expected a left parenthesis | | | 22P02 +(1 row) + +SELECT * FROM pg_input_error_info('ng_catalog.now', 'regproc'); + message | detail | hint | sql_error_code +------------------------------------------+--------+------+---------------- + function "ng_catalog.now" does not exist | | | 42883 +(1 row) + +SELECT * FROM pg_input_error_info('ng_catalog.abs(numeric)', 'regprocedure'); + message | detail | hint | sql_error_code +---------------------------------------------------+--------+------+---------------- + function "ng_catalog.abs(numeric)" does not exist | | | 42883 +(1 row) + +SELECT * FROM pg_input_error_info('ng_catalog.abs(numeric', 'regprocedure'); + message | detail | hint | sql_error_code +------------------------------+--------+------+---------------- + expected a right parenthesis | | | 22P02 +(1 row) + +SELECT * FROM pg_input_error_info('regress_regrole_test', 'regrole'); + message | detail | hint | sql_error_code +--------------------------------------------+--------+------+---------------- + role "regress_regrole_test" does not exist | | | 42704 +(1 row) + +SELECT * FROM pg_input_error_info('no_such_type', 'regtype'); + message | detail | hint | sql_error_code +------------------------------------+--------+------+---------------- + type "no_such_type" does not exist | | | 42704 +(1 row) + +-- Some cases that should be soft errors, but are not yet +SELECT * FROM pg_input_error_info('incorrect type name syntax', 'regtype'); +ERROR: syntax error at or near "type" +LINE 1: SELECT * FROM pg_input_error_info('incorrect type name synta... + ^ +CONTEXT: invalid type name "incorrect type name syntax" +SELECT * FROM pg_input_error_info('numeric(1,2,3)', 'regtype'); -- bogus typmod +ERROR: invalid NUMERIC type modifier +SELECT * FROM pg_input_error_info('way.too.many.names', 'regtype'); +ERROR: improper qualified name (too many dotted names): way.too.many.names +SELECT * FROM pg_input_error_info('no_such_catalog.schema.name', 'regtype'); +ERROR: cross-database references are not implemented: no_such_catalog.schema.name diff --git a/src/test/regress/expected/reindex_catalog.out b/src/test/regress/expected/reindex_catalog.out new file mode 100644 index 0000000..204f056 --- /dev/null +++ b/src/test/regress/expected/reindex_catalog.out @@ -0,0 +1,48 @@ +-- +-- Check that system tables can be reindexed. +-- +-- Note that this test currently is not included in the default +-- schedules, as currently reindexing catalog tables can cause +-- deadlocks: +-- +-- * The lock upgrade between the ShareLock acquired for the reindex +-- and RowExclusiveLock needed for pg_class/pg_index locks can +-- trigger deadlocks. +-- +-- * The uniqueness checks performed when reindexing a unique/primary +-- key index possibly need to wait for the transaction of a +-- about-to-deleted row in pg_class to commit. That can cause +-- deadlocks because, in contrast to user tables, locks on catalog +-- tables are routinely released before commit - therefore the lock +-- held for reindexing doesn't guarantee that no running transaction +-- performed modifications in the table underlying the index. +-- +-- This is particularly problematic as such conflicts can be +-- triggered even when run in isolation, as a previous session's +-- temporary table cleanup might still be running (even when the +-- session ended from a client perspective). +-- Check reindexing of whole tables +REINDEX TABLE pg_class; -- mapped, non-shared, critical +REINDEX TABLE pg_index; -- non-mapped, non-shared, critical +REINDEX TABLE pg_operator; -- non-mapped, non-shared, critical +REINDEX TABLE pg_database; -- mapped, shared, critical +REINDEX TABLE pg_shdescription; -- mapped, shared non-critical +-- Check that individual system indexes can be reindexed. That's a bit +-- different from the entire-table case because reindex_relation +-- treats e.g. pg_class special. +REINDEX INDEX pg_class_oid_index; -- mapped, non-shared, critical +REINDEX INDEX pg_class_relname_nsp_index; -- mapped, non-shared, non-critical +REINDEX INDEX pg_index_indexrelid_index; -- non-mapped, non-shared, critical +REINDEX INDEX pg_index_indrelid_index; -- non-mapped, non-shared, non-critical +REINDEX INDEX pg_database_oid_index; -- mapped, shared, critical +REINDEX INDEX pg_shdescription_o_c_index; -- mapped, shared, non-critical +-- Check the same REINDEX INDEX statements under parallelism. +BEGIN; +SET min_parallel_table_scan_size = 0; +REINDEX INDEX pg_class_oid_index; -- mapped, non-shared, critical +REINDEX INDEX pg_class_relname_nsp_index; -- mapped, non-shared, non-critical +REINDEX INDEX pg_index_indexrelid_index; -- non-mapped, non-shared, critical +REINDEX INDEX pg_index_indrelid_index; -- non-mapped, non-shared, non-critical +REINDEX INDEX pg_database_oid_index; -- mapped, shared, critical +REINDEX INDEX pg_shdescription_o_c_index; -- mapped, shared, non-critical +ROLLBACK; diff --git a/src/test/regress/expected/reloptions.out b/src/test/regress/expected/reloptions.out new file mode 100644 index 0000000..b6aef6f --- /dev/null +++ b/src/test/regress/expected/reloptions.out @@ -0,0 +1,226 @@ +-- Simple create +CREATE TABLE reloptions_test(i INT) WITH (FiLLFaCToR=30, + autovacuum_enabled = false, autovacuum_analyze_scale_factor = 0.2); +SELECT reloptions FROM pg_class WHERE oid = 'reloptions_test'::regclass; + reloptions +------------------------------------------------------------------------------ + {fillfactor=30,autovacuum_enabled=false,autovacuum_analyze_scale_factor=0.2} +(1 row) + +-- Fail min/max values check +CREATE TABLE reloptions_test2(i INT) WITH (fillfactor=2); +ERROR: value 2 out of bounds for option "fillfactor" +DETAIL: Valid values are between "10" and "100". +CREATE TABLE reloptions_test2(i INT) WITH (fillfactor=110); +ERROR: value 110 out of bounds for option "fillfactor" +DETAIL: Valid values are between "10" and "100". +CREATE TABLE reloptions_test2(i INT) WITH (autovacuum_analyze_scale_factor = -10.0); +ERROR: value -10.0 out of bounds for option "autovacuum_analyze_scale_factor" +DETAIL: Valid values are between "0.000000" and "100.000000". +CREATE TABLE reloptions_test2(i INT) WITH (autovacuum_analyze_scale_factor = 110.0); +ERROR: value 110.0 out of bounds for option "autovacuum_analyze_scale_factor" +DETAIL: Valid values are between "0.000000" and "100.000000". +-- Fail when option and namespace do not exist +CREATE TABLE reloptions_test2(i INT) WITH (not_existing_option=2); +ERROR: unrecognized parameter "not_existing_option" +CREATE TABLE reloptions_test2(i INT) WITH (not_existing_namespace.fillfactor=2); +ERROR: unrecognized parameter namespace "not_existing_namespace" +-- Fail while setting improper values +CREATE TABLE reloptions_test2(i INT) WITH (fillfactor=-30.1); +ERROR: value -30.1 out of bounds for option "fillfactor" +DETAIL: Valid values are between "10" and "100". +CREATE TABLE reloptions_test2(i INT) WITH (fillfactor='string'); +ERROR: invalid value for integer option "fillfactor": string +CREATE TABLE reloptions_test2(i INT) WITH (fillfactor=true); +ERROR: invalid value for integer option "fillfactor": true +CREATE TABLE reloptions_test2(i INT) WITH (autovacuum_enabled=12); +ERROR: invalid value for boolean option "autovacuum_enabled": 12 +CREATE TABLE reloptions_test2(i INT) WITH (autovacuum_enabled=30.5); +ERROR: invalid value for boolean option "autovacuum_enabled": 30.5 +CREATE TABLE reloptions_test2(i INT) WITH (autovacuum_enabled='string'); +ERROR: invalid value for boolean option "autovacuum_enabled": string +CREATE TABLE reloptions_test2(i INT) WITH (autovacuum_analyze_scale_factor='string'); +ERROR: invalid value for floating point option "autovacuum_analyze_scale_factor": string +CREATE TABLE reloptions_test2(i INT) WITH (autovacuum_analyze_scale_factor=true); +ERROR: invalid value for floating point option "autovacuum_analyze_scale_factor": true +-- Fail if option is specified twice +CREATE TABLE reloptions_test2(i INT) WITH (fillfactor=30, fillfactor=40); +ERROR: parameter "fillfactor" specified more than once +-- Specifying name only for a non-Boolean option should fail +CREATE TABLE reloptions_test2(i INT) WITH (fillfactor); +ERROR: invalid value for integer option "fillfactor": true +-- Simple ALTER TABLE +ALTER TABLE reloptions_test SET (fillfactor=31, + autovacuum_analyze_scale_factor = 0.3); +SELECT reloptions FROM pg_class WHERE oid = 'reloptions_test'::regclass; + reloptions +------------------------------------------------------------------------------ + {autovacuum_enabled=false,fillfactor=31,autovacuum_analyze_scale_factor=0.3} +(1 row) + +-- Set boolean option to true without specifying value +ALTER TABLE reloptions_test SET (autovacuum_enabled, fillfactor=32); +SELECT reloptions FROM pg_class WHERE oid = 'reloptions_test'::regclass; + reloptions +----------------------------------------------------------------------------- + {autovacuum_analyze_scale_factor=0.3,autovacuum_enabled=true,fillfactor=32} +(1 row) + +-- Check that RESET works well +ALTER TABLE reloptions_test RESET (fillfactor); +SELECT reloptions FROM pg_class WHERE oid = 'reloptions_test'::regclass; + reloptions +--------------------------------------------------------------- + {autovacuum_analyze_scale_factor=0.3,autovacuum_enabled=true} +(1 row) + +-- Resetting all values causes the column to become null +ALTER TABLE reloptions_test RESET (autovacuum_enabled, + autovacuum_analyze_scale_factor); +SELECT reloptions FROM pg_class WHERE oid = 'reloptions_test'::regclass AND + reloptions IS NULL; + reloptions +------------ + +(1 row) + +-- RESET fails if a value is specified +ALTER TABLE reloptions_test RESET (fillfactor=12); +ERROR: RESET must not include values for parameters +-- Test vacuum_truncate option +DROP TABLE reloptions_test; +CREATE TEMP TABLE reloptions_test(i INT NOT NULL, j text) + WITH (vacuum_truncate=false, + toast.vacuum_truncate=false, + autovacuum_enabled=false); +SELECT reloptions FROM pg_class WHERE oid = 'reloptions_test'::regclass; + reloptions +-------------------------------------------------- + {vacuum_truncate=false,autovacuum_enabled=false} +(1 row) + +INSERT INTO reloptions_test VALUES (1, NULL), (NULL, NULL); +ERROR: null value in column "i" of relation "reloptions_test" violates not-null constraint +DETAIL: Failing row contains (null, null). +-- Do an aggressive vacuum to prevent page-skipping. +VACUUM (FREEZE, DISABLE_PAGE_SKIPPING) reloptions_test; +SELECT pg_relation_size('reloptions_test') > 0; + ?column? +---------- + t +(1 row) + +SELECT reloptions FROM pg_class WHERE oid = + (SELECT reltoastrelid FROM pg_class + WHERE oid = 'reloptions_test'::regclass); + reloptions +------------------------- + {vacuum_truncate=false} +(1 row) + +ALTER TABLE reloptions_test RESET (vacuum_truncate); +SELECT reloptions FROM pg_class WHERE oid = 'reloptions_test'::regclass; + reloptions +---------------------------- + {autovacuum_enabled=false} +(1 row) + +INSERT INTO reloptions_test VALUES (1, NULL), (NULL, NULL); +ERROR: null value in column "i" of relation "reloptions_test" violates not-null constraint +DETAIL: Failing row contains (null, null). +-- Do an aggressive vacuum to prevent page-skipping. +VACUUM (FREEZE, DISABLE_PAGE_SKIPPING) reloptions_test; +SELECT pg_relation_size('reloptions_test') = 0; + ?column? +---------- + t +(1 row) + +-- Test toast.* options +DROP TABLE reloptions_test; +CREATE TABLE reloptions_test (s VARCHAR) + WITH (toast.autovacuum_vacuum_cost_delay = 23); +SELECT reltoastrelid as toast_oid + FROM pg_class WHERE oid = 'reloptions_test'::regclass \gset +SELECT reloptions FROM pg_class WHERE oid = :toast_oid; + reloptions +----------------------------------- + {autovacuum_vacuum_cost_delay=23} +(1 row) + +ALTER TABLE reloptions_test SET (toast.autovacuum_vacuum_cost_delay = 24); +SELECT reloptions FROM pg_class WHERE oid = :toast_oid; + reloptions +----------------------------------- + {autovacuum_vacuum_cost_delay=24} +(1 row) + +ALTER TABLE reloptions_test RESET (toast.autovacuum_vacuum_cost_delay); +SELECT reloptions FROM pg_class WHERE oid = :toast_oid; + reloptions +------------ + +(1 row) + +-- Fail on non-existent options in toast namespace +CREATE TABLE reloptions_test2 (i int) WITH (toast.not_existing_option = 42); +ERROR: unrecognized parameter "not_existing_option" +-- Mix TOAST & heap +DROP TABLE reloptions_test; +CREATE TABLE reloptions_test (s VARCHAR) WITH + (toast.autovacuum_vacuum_cost_delay = 23, + autovacuum_vacuum_cost_delay = 24, fillfactor = 40); +SELECT reloptions FROM pg_class WHERE oid = 'reloptions_test'::regclass; + reloptions +------------------------------------------------- + {autovacuum_vacuum_cost_delay=24,fillfactor=40} +(1 row) + +SELECT reloptions FROM pg_class WHERE oid = ( + SELECT reltoastrelid FROM pg_class WHERE oid = 'reloptions_test'::regclass); + reloptions +----------------------------------- + {autovacuum_vacuum_cost_delay=23} +(1 row) + +-- +-- CREATE INDEX, ALTER INDEX for btrees +-- +CREATE INDEX reloptions_test_idx ON reloptions_test (s) WITH (fillfactor=30); +SELECT reloptions FROM pg_class WHERE oid = 'reloptions_test_idx'::regclass; + reloptions +----------------- + {fillfactor=30} +(1 row) + +-- Fail when option and namespace do not exist +CREATE INDEX reloptions_test_idx ON reloptions_test (s) + WITH (not_existing_option=2); +ERROR: unrecognized parameter "not_existing_option" +CREATE INDEX reloptions_test_idx ON reloptions_test (s) + WITH (not_existing_ns.fillfactor=2); +ERROR: unrecognized parameter namespace "not_existing_ns" +-- Check allowed ranges +CREATE INDEX reloptions_test_idx2 ON reloptions_test (s) WITH (fillfactor=1); +ERROR: value 1 out of bounds for option "fillfactor" +DETAIL: Valid values are between "10" and "100". +CREATE INDEX reloptions_test_idx2 ON reloptions_test (s) WITH (fillfactor=130); +ERROR: value 130 out of bounds for option "fillfactor" +DETAIL: Valid values are between "10" and "100". +-- Check ALTER +ALTER INDEX reloptions_test_idx SET (fillfactor=40); +SELECT reloptions FROM pg_class WHERE oid = 'reloptions_test_idx'::regclass; + reloptions +----------------- + {fillfactor=40} +(1 row) + +-- Check ALTER on empty reloption list +CREATE INDEX reloptions_test_idx3 ON reloptions_test (s); +ALTER INDEX reloptions_test_idx3 SET (fillfactor=40); +SELECT reloptions FROM pg_class WHERE oid = 'reloptions_test_idx3'::regclass; + reloptions +----------------- + {fillfactor=40} +(1 row) + diff --git a/src/test/regress/expected/replica_identity.out b/src/test/regress/expected/replica_identity.out new file mode 100644 index 0000000..7d798ef --- /dev/null +++ b/src/test/regress/expected/replica_identity.out @@ -0,0 +1,270 @@ +CREATE TABLE test_replica_identity ( + id serial primary key, + keya text not null, + keyb text not null, + nonkey text, + CONSTRAINT test_replica_identity_unique_defer UNIQUE (keya, keyb) DEFERRABLE, + CONSTRAINT test_replica_identity_unique_nondefer UNIQUE (keya, keyb) +) ; +CREATE TABLE test_replica_identity_othertable (id serial primary key); +CREATE INDEX test_replica_identity_keyab ON test_replica_identity (keya, keyb); +CREATE UNIQUE INDEX test_replica_identity_keyab_key ON test_replica_identity (keya, keyb); +CREATE UNIQUE INDEX test_replica_identity_nonkey ON test_replica_identity (keya, nonkey); +CREATE INDEX test_replica_identity_hash ON test_replica_identity USING hash (nonkey); +CREATE UNIQUE INDEX test_replica_identity_expr ON test_replica_identity (keya, keyb, (3)); +CREATE UNIQUE INDEX test_replica_identity_partial ON test_replica_identity (keya, keyb) WHERE keyb != '3'; +-- default is 'd'/DEFAULT for user created tables +SELECT relreplident FROM pg_class WHERE oid = 'test_replica_identity'::regclass; + relreplident +-------------- + d +(1 row) + +-- but 'none' for system tables +SELECT relreplident FROM pg_class WHERE oid = 'pg_class'::regclass; + relreplident +-------------- + n +(1 row) + +SELECT relreplident FROM pg_class WHERE oid = 'pg_constraint'::regclass; + relreplident +-------------- + n +(1 row) + +---- +-- Make sure we detect ineligible indexes +---- +-- fail, not unique +ALTER TABLE test_replica_identity REPLICA IDENTITY USING INDEX test_replica_identity_keyab; +ERROR: cannot use non-unique index "test_replica_identity_keyab" as replica identity +-- fail, not a candidate key, nullable column +ALTER TABLE test_replica_identity REPLICA IDENTITY USING INDEX test_replica_identity_nonkey; +ERROR: index "test_replica_identity_nonkey" cannot be used as replica identity because column "nonkey" is nullable +-- fail, hash indexes cannot do uniqueness +ALTER TABLE test_replica_identity REPLICA IDENTITY USING INDEX test_replica_identity_hash; +ERROR: cannot use non-unique index "test_replica_identity_hash" as replica identity +-- fail, expression index +ALTER TABLE test_replica_identity REPLICA IDENTITY USING INDEX test_replica_identity_expr; +ERROR: cannot use expression index "test_replica_identity_expr" as replica identity +-- fail, partial index +ALTER TABLE test_replica_identity REPLICA IDENTITY USING INDEX test_replica_identity_partial; +ERROR: cannot use partial index "test_replica_identity_partial" as replica identity +-- fail, not our index +ALTER TABLE test_replica_identity REPLICA IDENTITY USING INDEX test_replica_identity_othertable_pkey; +ERROR: "test_replica_identity_othertable_pkey" is not an index for table "test_replica_identity" +-- fail, deferrable +ALTER TABLE test_replica_identity REPLICA IDENTITY USING INDEX test_replica_identity_unique_defer; +ERROR: cannot use non-immediate index "test_replica_identity_unique_defer" as replica identity +SELECT relreplident FROM pg_class WHERE oid = 'test_replica_identity'::regclass; + relreplident +-------------- + d +(1 row) + +---- +-- Make sure index cases succeed +---- +-- succeed, primary key +ALTER TABLE test_replica_identity REPLICA IDENTITY USING INDEX test_replica_identity_pkey; +SELECT relreplident FROM pg_class WHERE oid = 'test_replica_identity'::regclass; + relreplident +-------------- + i +(1 row) + +\d test_replica_identity + Table "public.test_replica_identity" + Column | Type | Collation | Nullable | Default +--------+---------+-----------+----------+--------------------------------------------------- + id | integer | | not null | nextval('test_replica_identity_id_seq'::regclass) + keya | text | | not null | + keyb | text | | not null | + nonkey | text | | | +Indexes: + "test_replica_identity_pkey" PRIMARY KEY, btree (id) REPLICA IDENTITY + "test_replica_identity_expr" UNIQUE, btree (keya, keyb, (3)) + "test_replica_identity_hash" hash (nonkey) + "test_replica_identity_keyab" btree (keya, keyb) + "test_replica_identity_keyab_key" UNIQUE, btree (keya, keyb) + "test_replica_identity_nonkey" UNIQUE, btree (keya, nonkey) + "test_replica_identity_partial" UNIQUE, btree (keya, keyb) WHERE keyb <> '3'::text + "test_replica_identity_unique_defer" UNIQUE CONSTRAINT, btree (keya, keyb) DEFERRABLE + "test_replica_identity_unique_nondefer" UNIQUE CONSTRAINT, btree (keya, keyb) + +-- succeed, nondeferrable unique constraint over nonnullable cols +ALTER TABLE test_replica_identity REPLICA IDENTITY USING INDEX test_replica_identity_unique_nondefer; +-- succeed unique index over nonnullable cols +ALTER TABLE test_replica_identity REPLICA IDENTITY USING INDEX test_replica_identity_keyab_key; +ALTER TABLE test_replica_identity REPLICA IDENTITY USING INDEX test_replica_identity_keyab_key; +SELECT relreplident FROM pg_class WHERE oid = 'test_replica_identity'::regclass; + relreplident +-------------- + i +(1 row) + +\d test_replica_identity + Table "public.test_replica_identity" + Column | Type | Collation | Nullable | Default +--------+---------+-----------+----------+--------------------------------------------------- + id | integer | | not null | nextval('test_replica_identity_id_seq'::regclass) + keya | text | | not null | + keyb | text | | not null | + nonkey | text | | | +Indexes: + "test_replica_identity_pkey" PRIMARY KEY, btree (id) + "test_replica_identity_expr" UNIQUE, btree (keya, keyb, (3)) + "test_replica_identity_hash" hash (nonkey) + "test_replica_identity_keyab" btree (keya, keyb) + "test_replica_identity_keyab_key" UNIQUE, btree (keya, keyb) REPLICA IDENTITY + "test_replica_identity_nonkey" UNIQUE, btree (keya, nonkey) + "test_replica_identity_partial" UNIQUE, btree (keya, keyb) WHERE keyb <> '3'::text + "test_replica_identity_unique_defer" UNIQUE CONSTRAINT, btree (keya, keyb) DEFERRABLE + "test_replica_identity_unique_nondefer" UNIQUE CONSTRAINT, btree (keya, keyb) + +SELECT count(*) FROM pg_index WHERE indrelid = 'test_replica_identity'::regclass AND indisreplident; + count +------- + 1 +(1 row) + +---- +-- Make sure non index cases work +---- +ALTER TABLE test_replica_identity REPLICA IDENTITY DEFAULT; +SELECT relreplident FROM pg_class WHERE oid = 'test_replica_identity'::regclass; + relreplident +-------------- + d +(1 row) + +SELECT count(*) FROM pg_index WHERE indrelid = 'test_replica_identity'::regclass AND indisreplident; + count +------- + 0 +(1 row) + +ALTER TABLE test_replica_identity REPLICA IDENTITY FULL; +SELECT relreplident FROM pg_class WHERE oid = 'test_replica_identity'::regclass; + relreplident +-------------- + f +(1 row) + +\d+ test_replica_identity + Table "public.test_replica_identity" + Column | Type | Collation | Nullable | Default | Storage | Stats target | Description +--------+---------+-----------+----------+---------------------------------------------------+----------+--------------+------------- + id | integer | | not null | nextval('test_replica_identity_id_seq'::regclass) | plain | | + keya | text | | not null | | extended | | + keyb | text | | not null | | extended | | + nonkey | text | | | | extended | | +Indexes: + "test_replica_identity_pkey" PRIMARY KEY, btree (id) + "test_replica_identity_expr" UNIQUE, btree (keya, keyb, (3)) + "test_replica_identity_hash" hash (nonkey) + "test_replica_identity_keyab" btree (keya, keyb) + "test_replica_identity_keyab_key" UNIQUE, btree (keya, keyb) + "test_replica_identity_nonkey" UNIQUE, btree (keya, nonkey) + "test_replica_identity_partial" UNIQUE, btree (keya, keyb) WHERE keyb <> '3'::text + "test_replica_identity_unique_defer" UNIQUE CONSTRAINT, btree (keya, keyb) DEFERRABLE + "test_replica_identity_unique_nondefer" UNIQUE CONSTRAINT, btree (keya, keyb) +Replica Identity: FULL + +ALTER TABLE test_replica_identity REPLICA IDENTITY NOTHING; +SELECT relreplident FROM pg_class WHERE oid = 'test_replica_identity'::regclass; + relreplident +-------------- + n +(1 row) + +--- +-- Test that ALTER TABLE rewrite preserves nondefault replica identity +--- +-- constraint variant +CREATE TABLE test_replica_identity2 (id int UNIQUE NOT NULL); +ALTER TABLE test_replica_identity2 REPLICA IDENTITY USING INDEX test_replica_identity2_id_key; +\d test_replica_identity2 + Table "public.test_replica_identity2" + Column | Type | Collation | Nullable | Default +--------+---------+-----------+----------+--------- + id | integer | | not null | +Indexes: + "test_replica_identity2_id_key" UNIQUE CONSTRAINT, btree (id) REPLICA IDENTITY + +ALTER TABLE test_replica_identity2 ALTER COLUMN id TYPE bigint; +\d test_replica_identity2 + Table "public.test_replica_identity2" + Column | Type | Collation | Nullable | Default +--------+--------+-----------+----------+--------- + id | bigint | | not null | +Indexes: + "test_replica_identity2_id_key" UNIQUE CONSTRAINT, btree (id) REPLICA IDENTITY + +-- straight index variant +CREATE TABLE test_replica_identity3 (id int NOT NULL); +CREATE UNIQUE INDEX test_replica_identity3_id_key ON test_replica_identity3 (id); +ALTER TABLE test_replica_identity3 REPLICA IDENTITY USING INDEX test_replica_identity3_id_key; +\d test_replica_identity3 + Table "public.test_replica_identity3" + Column | Type | Collation | Nullable | Default +--------+---------+-----------+----------+--------- + id | integer | | not null | +Indexes: + "test_replica_identity3_id_key" UNIQUE, btree (id) REPLICA IDENTITY + +ALTER TABLE test_replica_identity3 ALTER COLUMN id TYPE bigint; +\d test_replica_identity3 + Table "public.test_replica_identity3" + Column | Type | Collation | Nullable | Default +--------+--------+-----------+----------+--------- + id | bigint | | not null | +Indexes: + "test_replica_identity3_id_key" UNIQUE, btree (id) REPLICA IDENTITY + +-- ALTER TABLE DROP NOT NULL is not allowed for columns part of an index +-- used as replica identity. +ALTER TABLE test_replica_identity3 ALTER COLUMN id DROP NOT NULL; +ERROR: column "id" is in index used as replica identity +-- +-- Test that replica identity can be set on an index that's not yet valid. +-- (This matches the way pg_dump will try to dump a partitioned table.) +-- +CREATE TABLE test_replica_identity4(id integer NOT NULL) PARTITION BY LIST (id); +CREATE TABLE test_replica_identity4_1(id integer NOT NULL); +ALTER TABLE ONLY test_replica_identity4 + ATTACH PARTITION test_replica_identity4_1 FOR VALUES IN (1); +ALTER TABLE ONLY test_replica_identity4 + ADD CONSTRAINT test_replica_identity4_pkey PRIMARY KEY (id); +ALTER TABLE ONLY test_replica_identity4 + REPLICA IDENTITY USING INDEX test_replica_identity4_pkey; +ALTER TABLE ONLY test_replica_identity4_1 + ADD CONSTRAINT test_replica_identity4_1_pkey PRIMARY KEY (id); +\d+ test_replica_identity4 + Partitioned table "public.test_replica_identity4" + Column | Type | Collation | Nullable | Default | Storage | Stats target | Description +--------+---------+-----------+----------+---------+---------+--------------+------------- + id | integer | | not null | | plain | | +Partition key: LIST (id) +Indexes: + "test_replica_identity4_pkey" PRIMARY KEY, btree (id) INVALID REPLICA IDENTITY +Partitions: test_replica_identity4_1 FOR VALUES IN (1) + +ALTER INDEX test_replica_identity4_pkey + ATTACH PARTITION test_replica_identity4_1_pkey; +\d+ test_replica_identity4 + Partitioned table "public.test_replica_identity4" + Column | Type | Collation | Nullable | Default | Storage | Stats target | Description +--------+---------+-----------+----------+---------+---------+--------------+------------- + id | integer | | not null | | plain | | +Partition key: LIST (id) +Indexes: + "test_replica_identity4_pkey" PRIMARY KEY, btree (id) REPLICA IDENTITY +Partitions: test_replica_identity4_1 FOR VALUES IN (1) + +DROP TABLE test_replica_identity; +DROP TABLE test_replica_identity2; +DROP TABLE test_replica_identity3; +DROP TABLE test_replica_identity4; +DROP TABLE test_replica_identity_othertable; diff --git a/src/test/regress/expected/returning.out b/src/test/regress/expected/returning.out new file mode 100644 index 0000000..cb51bb8 --- /dev/null +++ b/src/test/regress/expected/returning.out @@ -0,0 +1,357 @@ +-- +-- Test INSERT/UPDATE/DELETE RETURNING +-- +-- Simple cases +CREATE TEMP TABLE foo (f1 serial, f2 text, f3 int default 42); +INSERT INTO foo (f2,f3) + VALUES ('test', DEFAULT), ('More', 11), (upper('more'), 7+9) + RETURNING *, f1+f3 AS sum; + f1 | f2 | f3 | sum +----+------+----+----- + 1 | test | 42 | 43 + 2 | More | 11 | 13 + 3 | MORE | 16 | 19 +(3 rows) + +SELECT * FROM foo; + f1 | f2 | f3 +----+------+---- + 1 | test | 42 + 2 | More | 11 + 3 | MORE | 16 +(3 rows) + +UPDATE foo SET f2 = lower(f2), f3 = DEFAULT RETURNING foo.*, f1+f3 AS sum13; + f1 | f2 | f3 | sum13 +----+------+----+------- + 1 | test | 42 | 43 + 2 | more | 42 | 44 + 3 | more | 42 | 45 +(3 rows) + +SELECT * FROM foo; + f1 | f2 | f3 +----+------+---- + 1 | test | 42 + 2 | more | 42 + 3 | more | 42 +(3 rows) + +DELETE FROM foo WHERE f1 > 2 RETURNING f3, f2, f1, least(f1,f3); + f3 | f2 | f1 | least +----+------+----+------- + 42 | more | 3 | 3 +(1 row) + +SELECT * FROM foo; + f1 | f2 | f3 +----+------+---- + 1 | test | 42 + 2 | more | 42 +(2 rows) + +-- Subplans and initplans in the RETURNING list +INSERT INTO foo SELECT f1+10, f2, f3+99 FROM foo + RETURNING *, f1+112 IN (SELECT q1 FROM int8_tbl) AS subplan, + EXISTS(SELECT * FROM int4_tbl) AS initplan; + f1 | f2 | f3 | subplan | initplan +----+------+-----+---------+---------- + 11 | test | 141 | t | t + 12 | more | 141 | f | t +(2 rows) + +UPDATE foo SET f3 = f3 * 2 + WHERE f1 > 10 + RETURNING *, f1+112 IN (SELECT q1 FROM int8_tbl) AS subplan, + EXISTS(SELECT * FROM int4_tbl) AS initplan; + f1 | f2 | f3 | subplan | initplan +----+------+-----+---------+---------- + 11 | test | 282 | t | t + 12 | more | 282 | f | t +(2 rows) + +DELETE FROM foo + WHERE f1 > 10 + RETURNING *, f1+112 IN (SELECT q1 FROM int8_tbl) AS subplan, + EXISTS(SELECT * FROM int4_tbl) AS initplan; + f1 | f2 | f3 | subplan | initplan +----+------+-----+---------+---------- + 11 | test | 282 | t | t + 12 | more | 282 | f | t +(2 rows) + +-- Joins +UPDATE foo SET f3 = f3*2 + FROM int4_tbl i + WHERE foo.f1 + 123455 = i.f1 + RETURNING foo.*, i.f1 as "i.f1"; + f1 | f2 | f3 | i.f1 +----+------+----+-------- + 1 | test | 84 | 123456 +(1 row) + +SELECT * FROM foo; + f1 | f2 | f3 +----+------+---- + 2 | more | 42 + 1 | test | 84 +(2 rows) + +DELETE FROM foo + USING int4_tbl i + WHERE foo.f1 + 123455 = i.f1 + RETURNING foo.*, i.f1 as "i.f1"; + f1 | f2 | f3 | i.f1 +----+------+----+-------- + 1 | test | 84 | 123456 +(1 row) + +SELECT * FROM foo; + f1 | f2 | f3 +----+------+---- + 2 | more | 42 +(1 row) + +-- Check inheritance cases +CREATE TEMP TABLE foochild (fc int) INHERITS (foo); +INSERT INTO foochild VALUES(123,'child',999,-123); +ALTER TABLE foo ADD COLUMN f4 int8 DEFAULT 99; +SELECT * FROM foo; + f1 | f2 | f3 | f4 +-----+-------+-----+---- + 2 | more | 42 | 99 + 123 | child | 999 | 99 +(2 rows) + +SELECT * FROM foochild; + f1 | f2 | f3 | fc | f4 +-----+-------+-----+------+---- + 123 | child | 999 | -123 | 99 +(1 row) + +UPDATE foo SET f4 = f4 + f3 WHERE f4 = 99 RETURNING *; + f1 | f2 | f3 | f4 +-----+-------+-----+------ + 2 | more | 42 | 141 + 123 | child | 999 | 1098 +(2 rows) + +SELECT * FROM foo; + f1 | f2 | f3 | f4 +-----+-------+-----+------ + 2 | more | 42 | 141 + 123 | child | 999 | 1098 +(2 rows) + +SELECT * FROM foochild; + f1 | f2 | f3 | fc | f4 +-----+-------+-----+------+------ + 123 | child | 999 | -123 | 1098 +(1 row) + +UPDATE foo SET f3 = f3*2 + FROM int8_tbl i + WHERE foo.f1 = i.q2 + RETURNING *; + f1 | f2 | f3 | f4 | q1 | q2 +-----+-------+------+------+------------------+----- + 123 | child | 1998 | 1098 | 4567890123456789 | 123 +(1 row) + +SELECT * FROM foo; + f1 | f2 | f3 | f4 +-----+-------+------+------ + 2 | more | 42 | 141 + 123 | child | 1998 | 1098 +(2 rows) + +SELECT * FROM foochild; + f1 | f2 | f3 | fc | f4 +-----+-------+------+------+------ + 123 | child | 1998 | -123 | 1098 +(1 row) + +DELETE FROM foo + USING int8_tbl i + WHERE foo.f1 = i.q2 + RETURNING *; + f1 | f2 | f3 | f4 | q1 | q2 +-----+-------+------+------+------------------+----- + 123 | child | 1998 | 1098 | 4567890123456789 | 123 +(1 row) + +SELECT * FROM foo; + f1 | f2 | f3 | f4 +----+------+----+----- + 2 | more | 42 | 141 +(1 row) + +SELECT * FROM foochild; + f1 | f2 | f3 | fc | f4 +----+----+----+----+---- +(0 rows) + +DROP TABLE foochild; +-- Rules and views +CREATE TEMP VIEW voo AS SELECT f1, f2 FROM foo; +CREATE RULE voo_i AS ON INSERT TO voo DO INSTEAD + INSERT INTO foo VALUES(new.*, 57); +INSERT INTO voo VALUES(11,'zit'); +-- fails: +INSERT INTO voo VALUES(12,'zoo') RETURNING *, f1*2; +ERROR: cannot perform INSERT RETURNING on relation "voo" +HINT: You need an unconditional ON INSERT DO INSTEAD rule with a RETURNING clause. +-- fails, incompatible list: +CREATE OR REPLACE RULE voo_i AS ON INSERT TO voo DO INSTEAD + INSERT INTO foo VALUES(new.*, 57) RETURNING *; +ERROR: RETURNING list has too many entries +CREATE OR REPLACE RULE voo_i AS ON INSERT TO voo DO INSTEAD + INSERT INTO foo VALUES(new.*, 57) RETURNING f1, f2; +-- should still work +INSERT INTO voo VALUES(13,'zit2'); +-- works now +INSERT INTO voo VALUES(14,'zoo2') RETURNING *; + f1 | f2 +----+------ + 14 | zoo2 +(1 row) + +SELECT * FROM foo; + f1 | f2 | f3 | f4 +----+------+----+----- + 2 | more | 42 | 141 + 11 | zit | 57 | 99 + 13 | zit2 | 57 | 99 + 14 | zoo2 | 57 | 99 +(4 rows) + +SELECT * FROM voo; + f1 | f2 +----+------ + 2 | more + 11 | zit + 13 | zit2 + 14 | zoo2 +(4 rows) + +CREATE OR REPLACE RULE voo_u AS ON UPDATE TO voo DO INSTEAD + UPDATE foo SET f1 = new.f1, f2 = new.f2 WHERE f1 = old.f1 + RETURNING f1, f2; +update voo set f1 = f1 + 1 where f2 = 'zoo2'; +update voo set f1 = f1 + 1 where f2 = 'zoo2' RETURNING *, f1*2; + f1 | f2 | ?column? +----+------+---------- + 16 | zoo2 | 32 +(1 row) + +SELECT * FROM foo; + f1 | f2 | f3 | f4 +----+------+----+----- + 2 | more | 42 | 141 + 11 | zit | 57 | 99 + 13 | zit2 | 57 | 99 + 16 | zoo2 | 57 | 99 +(4 rows) + +SELECT * FROM voo; + f1 | f2 +----+------ + 2 | more + 11 | zit + 13 | zit2 + 16 | zoo2 +(4 rows) + +CREATE OR REPLACE RULE voo_d AS ON DELETE TO voo DO INSTEAD + DELETE FROM foo WHERE f1 = old.f1 + RETURNING f1, f2; +DELETE FROM foo WHERE f1 = 13; +DELETE FROM foo WHERE f2 = 'zit' RETURNING *; + f1 | f2 | f3 | f4 +----+-----+----+---- + 11 | zit | 57 | 99 +(1 row) + +SELECT * FROM foo; + f1 | f2 | f3 | f4 +----+------+----+----- + 2 | more | 42 | 141 + 16 | zoo2 | 57 | 99 +(2 rows) + +SELECT * FROM voo; + f1 | f2 +----+------ + 2 | more + 16 | zoo2 +(2 rows) + +-- Try a join case +CREATE TEMP TABLE joinme (f2j text, other int); +INSERT INTO joinme VALUES('more', 12345); +INSERT INTO joinme VALUES('zoo2', 54321); +INSERT INTO joinme VALUES('other', 0); +CREATE TEMP VIEW joinview AS + SELECT foo.*, other FROM foo JOIN joinme ON (f2 = f2j); +SELECT * FROM joinview; + f1 | f2 | f3 | f4 | other +----+------+----+-----+------- + 2 | more | 42 | 141 | 12345 + 16 | zoo2 | 57 | 99 | 54321 +(2 rows) + +CREATE RULE joinview_u AS ON UPDATE TO joinview DO INSTEAD + UPDATE foo SET f1 = new.f1, f3 = new.f3 + FROM joinme WHERE f2 = f2j AND f2 = old.f2 + RETURNING foo.*, other; +UPDATE joinview SET f1 = f1 + 1 WHERE f3 = 57 RETURNING *, other + 1; + f1 | f2 | f3 | f4 | other | ?column? +----+------+----+----+-------+---------- + 17 | zoo2 | 57 | 99 | 54321 | 54322 +(1 row) + +SELECT * FROM joinview; + f1 | f2 | f3 | f4 | other +----+------+----+-----+------- + 2 | more | 42 | 141 | 12345 + 17 | zoo2 | 57 | 99 | 54321 +(2 rows) + +SELECT * FROM foo; + f1 | f2 | f3 | f4 +----+------+----+----- + 2 | more | 42 | 141 + 17 | zoo2 | 57 | 99 +(2 rows) + +SELECT * FROM voo; + f1 | f2 +----+------ + 2 | more + 17 | zoo2 +(2 rows) + +-- Check aliased target relation +INSERT INTO foo AS bar DEFAULT VALUES RETURNING *; -- ok + f1 | f2 | f3 | f4 +----+----+----+---- + 4 | | 42 | 99 +(1 row) + +INSERT INTO foo AS bar DEFAULT VALUES RETURNING foo.*; -- fails, wrong name +ERROR: invalid reference to FROM-clause entry for table "foo" +LINE 1: INSERT INTO foo AS bar DEFAULT VALUES RETURNING foo.*; + ^ +HINT: Perhaps you meant to reference the table alias "bar". +INSERT INTO foo AS bar DEFAULT VALUES RETURNING bar.*; -- ok + f1 | f2 | f3 | f4 +----+----+----+---- + 5 | | 42 | 99 +(1 row) + +INSERT INTO foo AS bar DEFAULT VALUES RETURNING bar.f3; -- ok + f3 +---- + 42 +(1 row) + diff --git a/src/test/regress/expected/roleattributes.out b/src/test/regress/expected/roleattributes.out new file mode 100644 index 0000000..5e6969b --- /dev/null +++ b/src/test/regress/expected/roleattributes.out @@ -0,0 +1,249 @@ +-- default for superuser is false +CREATE ROLE regress_test_def_superuser; +SELECT rolname, rolsuper, rolinherit, rolcreaterole, rolcreatedb, rolcanlogin, rolreplication, rolbypassrls, rolconnlimit, rolpassword, rolvaliduntil FROM pg_authid WHERE rolname = 'regress_test_def_superuser'; + rolname | rolsuper | rolinherit | rolcreaterole | rolcreatedb | rolcanlogin | rolreplication | rolbypassrls | rolconnlimit | rolpassword | rolvaliduntil +----------------------------+----------+------------+---------------+-------------+-------------+----------------+--------------+--------------+-------------+--------------- + regress_test_def_superuser | f | t | f | f | f | f | f | -1 | | +(1 row) + +CREATE ROLE regress_test_superuser WITH SUPERUSER; +SELECT rolname, rolsuper, rolinherit, rolcreaterole, rolcreatedb, rolcanlogin, rolreplication, rolbypassrls, rolconnlimit, rolpassword, rolvaliduntil FROM pg_authid WHERE rolname = 'regress_test_superuser'; + rolname | rolsuper | rolinherit | rolcreaterole | rolcreatedb | rolcanlogin | rolreplication | rolbypassrls | rolconnlimit | rolpassword | rolvaliduntil +------------------------+----------+------------+---------------+-------------+-------------+----------------+--------------+--------------+-------------+--------------- + regress_test_superuser | t | t | f | f | f | f | f | -1 | | +(1 row) + +ALTER ROLE regress_test_superuser WITH NOSUPERUSER; +SELECT rolname, rolsuper, rolinherit, rolcreaterole, rolcreatedb, rolcanlogin, rolreplication, rolbypassrls, rolconnlimit, rolpassword, rolvaliduntil FROM pg_authid WHERE rolname = 'regress_test_superuser'; + rolname | rolsuper | rolinherit | rolcreaterole | rolcreatedb | rolcanlogin | rolreplication | rolbypassrls | rolconnlimit | rolpassword | rolvaliduntil +------------------------+----------+------------+---------------+-------------+-------------+----------------+--------------+--------------+-------------+--------------- + regress_test_superuser | f | t | f | f | f | f | f | -1 | | +(1 row) + +ALTER ROLE regress_test_superuser WITH SUPERUSER; +SELECT rolname, rolsuper, rolinherit, rolcreaterole, rolcreatedb, rolcanlogin, rolreplication, rolbypassrls, rolconnlimit, rolpassword, rolvaliduntil FROM pg_authid WHERE rolname = 'regress_test_superuser'; + rolname | rolsuper | rolinherit | rolcreaterole | rolcreatedb | rolcanlogin | rolreplication | rolbypassrls | rolconnlimit | rolpassword | rolvaliduntil +------------------------+----------+------------+---------------+-------------+-------------+----------------+--------------+--------------+-------------+--------------- + regress_test_superuser | t | t | f | f | f | f | f | -1 | | +(1 row) + +-- default for inherit is true +CREATE ROLE regress_test_def_inherit; +SELECT rolname, rolsuper, rolinherit, rolcreaterole, rolcreatedb, rolcanlogin, rolreplication, rolbypassrls, rolconnlimit, rolpassword, rolvaliduntil FROM pg_authid WHERE rolname = 'regress_test_def_inherit'; + rolname | rolsuper | rolinherit | rolcreaterole | rolcreatedb | rolcanlogin | rolreplication | rolbypassrls | rolconnlimit | rolpassword | rolvaliduntil +--------------------------+----------+------------+---------------+-------------+-------------+----------------+--------------+--------------+-------------+--------------- + regress_test_def_inherit | f | t | f | f | f | f | f | -1 | | +(1 row) + +CREATE ROLE regress_test_inherit WITH NOINHERIT; +SELECT rolname, rolsuper, rolinherit, rolcreaterole, rolcreatedb, rolcanlogin, rolreplication, rolbypassrls, rolconnlimit, rolpassword, rolvaliduntil FROM pg_authid WHERE rolname = 'regress_test_inherit'; + rolname | rolsuper | rolinherit | rolcreaterole | rolcreatedb | rolcanlogin | rolreplication | rolbypassrls | rolconnlimit | rolpassword | rolvaliduntil +----------------------+----------+------------+---------------+-------------+-------------+----------------+--------------+--------------+-------------+--------------- + regress_test_inherit | f | f | f | f | f | f | f | -1 | | +(1 row) + +ALTER ROLE regress_test_inherit WITH INHERIT; +SELECT rolname, rolsuper, rolinherit, rolcreaterole, rolcreatedb, rolcanlogin, rolreplication, rolbypassrls, rolconnlimit, rolpassword, rolvaliduntil FROM pg_authid WHERE rolname = 'regress_test_inherit'; + rolname | rolsuper | rolinherit | rolcreaterole | rolcreatedb | rolcanlogin | rolreplication | rolbypassrls | rolconnlimit | rolpassword | rolvaliduntil +----------------------+----------+------------+---------------+-------------+-------------+----------------+--------------+--------------+-------------+--------------- + regress_test_inherit | f | t | f | f | f | f | f | -1 | | +(1 row) + +ALTER ROLE regress_test_inherit WITH NOINHERIT; +SELECT rolname, rolsuper, rolinherit, rolcreaterole, rolcreatedb, rolcanlogin, rolreplication, rolbypassrls, rolconnlimit, rolpassword, rolvaliduntil FROM pg_authid WHERE rolname = 'regress_test_inherit'; + rolname | rolsuper | rolinherit | rolcreaterole | rolcreatedb | rolcanlogin | rolreplication | rolbypassrls | rolconnlimit | rolpassword | rolvaliduntil +----------------------+----------+------------+---------------+-------------+-------------+----------------+--------------+--------------+-------------+--------------- + regress_test_inherit | f | f | f | f | f | f | f | -1 | | +(1 row) + +-- default for create role is false +CREATE ROLE regress_test_def_createrole; +SELECT rolname, rolsuper, rolinherit, rolcreaterole, rolcreatedb, rolcanlogin, rolreplication, rolbypassrls, rolconnlimit, rolpassword, rolvaliduntil FROM pg_authid WHERE rolname = 'regress_test_def_createrole'; + rolname | rolsuper | rolinherit | rolcreaterole | rolcreatedb | rolcanlogin | rolreplication | rolbypassrls | rolconnlimit | rolpassword | rolvaliduntil +-----------------------------+----------+------------+---------------+-------------+-------------+----------------+--------------+--------------+-------------+--------------- + regress_test_def_createrole | f | t | f | f | f | f | f | -1 | | +(1 row) + +CREATE ROLE regress_test_createrole WITH CREATEROLE; +SELECT rolname, rolsuper, rolinherit, rolcreaterole, rolcreatedb, rolcanlogin, rolreplication, rolbypassrls, rolconnlimit, rolpassword, rolvaliduntil FROM pg_authid WHERE rolname = 'regress_test_createrole'; + rolname | rolsuper | rolinherit | rolcreaterole | rolcreatedb | rolcanlogin | rolreplication | rolbypassrls | rolconnlimit | rolpassword | rolvaliduntil +-------------------------+----------+------------+---------------+-------------+-------------+----------------+--------------+--------------+-------------+--------------- + regress_test_createrole | f | t | t | f | f | f | f | -1 | | +(1 row) + +ALTER ROLE regress_test_createrole WITH NOCREATEROLE; +SELECT rolname, rolsuper, rolinherit, rolcreaterole, rolcreatedb, rolcanlogin, rolreplication, rolbypassrls, rolconnlimit, rolpassword, rolvaliduntil FROM pg_authid WHERE rolname = 'regress_test_createrole'; + rolname | rolsuper | rolinherit | rolcreaterole | rolcreatedb | rolcanlogin | rolreplication | rolbypassrls | rolconnlimit | rolpassword | rolvaliduntil +-------------------------+----------+------------+---------------+-------------+-------------+----------------+--------------+--------------+-------------+--------------- + regress_test_createrole | f | t | f | f | f | f | f | -1 | | +(1 row) + +ALTER ROLE regress_test_createrole WITH CREATEROLE; +SELECT rolname, rolsuper, rolinherit, rolcreaterole, rolcreatedb, rolcanlogin, rolreplication, rolbypassrls, rolconnlimit, rolpassword, rolvaliduntil FROM pg_authid WHERE rolname = 'regress_test_createrole'; + rolname | rolsuper | rolinherit | rolcreaterole | rolcreatedb | rolcanlogin | rolreplication | rolbypassrls | rolconnlimit | rolpassword | rolvaliduntil +-------------------------+----------+------------+---------------+-------------+-------------+----------------+--------------+--------------+-------------+--------------- + regress_test_createrole | f | t | t | f | f | f | f | -1 | | +(1 row) + +-- default for create database is false +CREATE ROLE regress_test_def_createdb; +SELECT rolname, rolsuper, rolinherit, rolcreaterole, rolcreatedb, rolcanlogin, rolreplication, rolbypassrls, rolconnlimit, rolpassword, rolvaliduntil FROM pg_authid WHERE rolname = 'regress_test_def_createdb'; + rolname | rolsuper | rolinherit | rolcreaterole | rolcreatedb | rolcanlogin | rolreplication | rolbypassrls | rolconnlimit | rolpassword | rolvaliduntil +---------------------------+----------+------------+---------------+-------------+-------------+----------------+--------------+--------------+-------------+--------------- + regress_test_def_createdb | f | t | f | f | f | f | f | -1 | | +(1 row) + +CREATE ROLE regress_test_createdb WITH CREATEDB; +SELECT rolname, rolsuper, rolinherit, rolcreaterole, rolcreatedb, rolcanlogin, rolreplication, rolbypassrls, rolconnlimit, rolpassword, rolvaliduntil FROM pg_authid WHERE rolname = 'regress_test_createdb'; + rolname | rolsuper | rolinherit | rolcreaterole | rolcreatedb | rolcanlogin | rolreplication | rolbypassrls | rolconnlimit | rolpassword | rolvaliduntil +-----------------------+----------+------------+---------------+-------------+-------------+----------------+--------------+--------------+-------------+--------------- + regress_test_createdb | f | t | f | t | f | f | f | -1 | | +(1 row) + +ALTER ROLE regress_test_createdb WITH NOCREATEDB; +SELECT rolname, rolsuper, rolinherit, rolcreaterole, rolcreatedb, rolcanlogin, rolreplication, rolbypassrls, rolconnlimit, rolpassword, rolvaliduntil FROM pg_authid WHERE rolname = 'regress_test_createdb'; + rolname | rolsuper | rolinherit | rolcreaterole | rolcreatedb | rolcanlogin | rolreplication | rolbypassrls | rolconnlimit | rolpassword | rolvaliduntil +-----------------------+----------+------------+---------------+-------------+-------------+----------------+--------------+--------------+-------------+--------------- + regress_test_createdb | f | t | f | f | f | f | f | -1 | | +(1 row) + +ALTER ROLE regress_test_createdb WITH CREATEDB; +SELECT rolname, rolsuper, rolinherit, rolcreaterole, rolcreatedb, rolcanlogin, rolreplication, rolbypassrls, rolconnlimit, rolpassword, rolvaliduntil FROM pg_authid WHERE rolname = 'regress_test_createdb'; + rolname | rolsuper | rolinherit | rolcreaterole | rolcreatedb | rolcanlogin | rolreplication | rolbypassrls | rolconnlimit | rolpassword | rolvaliduntil +-----------------------+----------+------------+---------------+-------------+-------------+----------------+--------------+--------------+-------------+--------------- + regress_test_createdb | f | t | f | t | f | f | f | -1 | | +(1 row) + +-- default for can login is false for role +CREATE ROLE regress_test_def_role_canlogin; +SELECT rolname, rolsuper, rolinherit, rolcreaterole, rolcreatedb, rolcanlogin, rolreplication, rolbypassrls, rolconnlimit, rolpassword, rolvaliduntil FROM pg_authid WHERE rolname = 'regress_test_def_role_canlogin'; + rolname | rolsuper | rolinherit | rolcreaterole | rolcreatedb | rolcanlogin | rolreplication | rolbypassrls | rolconnlimit | rolpassword | rolvaliduntil +--------------------------------+----------+------------+---------------+-------------+-------------+----------------+--------------+--------------+-------------+--------------- + regress_test_def_role_canlogin | f | t | f | f | f | f | f | -1 | | +(1 row) + +CREATE ROLE regress_test_role_canlogin WITH LOGIN; +SELECT rolname, rolsuper, rolinherit, rolcreaterole, rolcreatedb, rolcanlogin, rolreplication, rolbypassrls, rolconnlimit, rolpassword, rolvaliduntil FROM pg_authid WHERE rolname = 'regress_test_role_canlogin'; + rolname | rolsuper | rolinherit | rolcreaterole | rolcreatedb | rolcanlogin | rolreplication | rolbypassrls | rolconnlimit | rolpassword | rolvaliduntil +----------------------------+----------+------------+---------------+-------------+-------------+----------------+--------------+--------------+-------------+--------------- + regress_test_role_canlogin | f | t | f | f | t | f | f | -1 | | +(1 row) + +ALTER ROLE regress_test_role_canlogin WITH NOLOGIN; +SELECT rolname, rolsuper, rolinherit, rolcreaterole, rolcreatedb, rolcanlogin, rolreplication, rolbypassrls, rolconnlimit, rolpassword, rolvaliduntil FROM pg_authid WHERE rolname = 'regress_test_role_canlogin'; + rolname | rolsuper | rolinherit | rolcreaterole | rolcreatedb | rolcanlogin | rolreplication | rolbypassrls | rolconnlimit | rolpassword | rolvaliduntil +----------------------------+----------+------------+---------------+-------------+-------------+----------------+--------------+--------------+-------------+--------------- + regress_test_role_canlogin | f | t | f | f | f | f | f | -1 | | +(1 row) + +ALTER ROLE regress_test_role_canlogin WITH LOGIN; +SELECT rolname, rolsuper, rolinherit, rolcreaterole, rolcreatedb, rolcanlogin, rolreplication, rolbypassrls, rolconnlimit, rolpassword, rolvaliduntil FROM pg_authid WHERE rolname = 'regress_test_role_canlogin'; + rolname | rolsuper | rolinherit | rolcreaterole | rolcreatedb | rolcanlogin | rolreplication | rolbypassrls | rolconnlimit | rolpassword | rolvaliduntil +----------------------------+----------+------------+---------------+-------------+-------------+----------------+--------------+--------------+-------------+--------------- + regress_test_role_canlogin | f | t | f | f | t | f | f | -1 | | +(1 row) + +-- default for can login is true for user +CREATE USER regress_test_def_user_canlogin; +SELECT rolname, rolsuper, rolinherit, rolcreaterole, rolcreatedb, rolcanlogin, rolreplication, rolbypassrls, rolconnlimit, rolpassword, rolvaliduntil FROM pg_authid WHERE rolname = 'regress_test_def_user_canlogin'; + rolname | rolsuper | rolinherit | rolcreaterole | rolcreatedb | rolcanlogin | rolreplication | rolbypassrls | rolconnlimit | rolpassword | rolvaliduntil +--------------------------------+----------+------------+---------------+-------------+-------------+----------------+--------------+--------------+-------------+--------------- + regress_test_def_user_canlogin | f | t | f | f | t | f | f | -1 | | +(1 row) + +CREATE USER regress_test_user_canlogin WITH NOLOGIN; +SELECT rolname, rolsuper, rolinherit, rolcreaterole, rolcreatedb, rolcanlogin, rolreplication, rolbypassrls, rolconnlimit, rolpassword, rolvaliduntil FROM pg_authid WHERE rolname = 'regress_test_user_canlogin'; + rolname | rolsuper | rolinherit | rolcreaterole | rolcreatedb | rolcanlogin | rolreplication | rolbypassrls | rolconnlimit | rolpassword | rolvaliduntil +----------------------------+----------+------------+---------------+-------------+-------------+----------------+--------------+--------------+-------------+--------------- + regress_test_user_canlogin | f | t | f | f | f | f | f | -1 | | +(1 row) + +ALTER USER regress_test_user_canlogin WITH LOGIN; +SELECT rolname, rolsuper, rolinherit, rolcreaterole, rolcreatedb, rolcanlogin, rolreplication, rolbypassrls, rolconnlimit, rolpassword, rolvaliduntil FROM pg_authid WHERE rolname = 'regress_test_user_canlogin'; + rolname | rolsuper | rolinherit | rolcreaterole | rolcreatedb | rolcanlogin | rolreplication | rolbypassrls | rolconnlimit | rolpassword | rolvaliduntil +----------------------------+----------+------------+---------------+-------------+-------------+----------------+--------------+--------------+-------------+--------------- + regress_test_user_canlogin | f | t | f | f | t | f | f | -1 | | +(1 row) + +ALTER USER regress_test_user_canlogin WITH NOLOGIN; +SELECT rolname, rolsuper, rolinherit, rolcreaterole, rolcreatedb, rolcanlogin, rolreplication, rolbypassrls, rolconnlimit, rolpassword, rolvaliduntil FROM pg_authid WHERE rolname = 'regress_test_user_canlogin'; + rolname | rolsuper | rolinherit | rolcreaterole | rolcreatedb | rolcanlogin | rolreplication | rolbypassrls | rolconnlimit | rolpassword | rolvaliduntil +----------------------------+----------+------------+---------------+-------------+-------------+----------------+--------------+--------------+-------------+--------------- + regress_test_user_canlogin | f | t | f | f | f | f | f | -1 | | +(1 row) + +-- default for replication is false +CREATE ROLE regress_test_def_replication; +SELECT rolname, rolsuper, rolinherit, rolcreaterole, rolcreatedb, rolcanlogin, rolreplication, rolbypassrls, rolconnlimit, rolpassword, rolvaliduntil FROM pg_authid WHERE rolname = 'regress_test_def_replication'; + rolname | rolsuper | rolinherit | rolcreaterole | rolcreatedb | rolcanlogin | rolreplication | rolbypassrls | rolconnlimit | rolpassword | rolvaliduntil +------------------------------+----------+------------+---------------+-------------+-------------+----------------+--------------+--------------+-------------+--------------- + regress_test_def_replication | f | t | f | f | f | f | f | -1 | | +(1 row) + +CREATE ROLE regress_test_replication WITH REPLICATION; +SELECT rolname, rolsuper, rolinherit, rolcreaterole, rolcreatedb, rolcanlogin, rolreplication, rolbypassrls, rolconnlimit, rolpassword, rolvaliduntil FROM pg_authid WHERE rolname = 'regress_test_replication'; + rolname | rolsuper | rolinherit | rolcreaterole | rolcreatedb | rolcanlogin | rolreplication | rolbypassrls | rolconnlimit | rolpassword | rolvaliduntil +--------------------------+----------+------------+---------------+-------------+-------------+----------------+--------------+--------------+-------------+--------------- + regress_test_replication | f | t | f | f | f | t | f | -1 | | +(1 row) + +ALTER ROLE regress_test_replication WITH NOREPLICATION; +SELECT rolname, rolsuper, rolinherit, rolcreaterole, rolcreatedb, rolcanlogin, rolreplication, rolbypassrls, rolconnlimit, rolpassword, rolvaliduntil FROM pg_authid WHERE rolname = 'regress_test_replication'; + rolname | rolsuper | rolinherit | rolcreaterole | rolcreatedb | rolcanlogin | rolreplication | rolbypassrls | rolconnlimit | rolpassword | rolvaliduntil +--------------------------+----------+------------+---------------+-------------+-------------+----------------+--------------+--------------+-------------+--------------- + regress_test_replication | f | t | f | f | f | f | f | -1 | | +(1 row) + +ALTER ROLE regress_test_replication WITH REPLICATION; +SELECT rolname, rolsuper, rolinherit, rolcreaterole, rolcreatedb, rolcanlogin, rolreplication, rolbypassrls, rolconnlimit, rolpassword, rolvaliduntil FROM pg_authid WHERE rolname = 'regress_test_replication'; + rolname | rolsuper | rolinherit | rolcreaterole | rolcreatedb | rolcanlogin | rolreplication | rolbypassrls | rolconnlimit | rolpassword | rolvaliduntil +--------------------------+----------+------------+---------------+-------------+-------------+----------------+--------------+--------------+-------------+--------------- + regress_test_replication | f | t | f | f | f | t | f | -1 | | +(1 row) + +-- default for bypassrls is false +CREATE ROLE regress_test_def_bypassrls; +SELECT rolname, rolsuper, rolinherit, rolcreaterole, rolcreatedb, rolcanlogin, rolreplication, rolbypassrls, rolconnlimit, rolpassword, rolvaliduntil FROM pg_authid WHERE rolname = 'regress_test_def_bypassrls'; + rolname | rolsuper | rolinherit | rolcreaterole | rolcreatedb | rolcanlogin | rolreplication | rolbypassrls | rolconnlimit | rolpassword | rolvaliduntil +----------------------------+----------+------------+---------------+-------------+-------------+----------------+--------------+--------------+-------------+--------------- + regress_test_def_bypassrls | f | t | f | f | f | f | f | -1 | | +(1 row) + +CREATE ROLE regress_test_bypassrls WITH BYPASSRLS; +SELECT rolname, rolsuper, rolinherit, rolcreaterole, rolcreatedb, rolcanlogin, rolreplication, rolbypassrls, rolconnlimit, rolpassword, rolvaliduntil FROM pg_authid WHERE rolname = 'regress_test_bypassrls'; + rolname | rolsuper | rolinherit | rolcreaterole | rolcreatedb | rolcanlogin | rolreplication | rolbypassrls | rolconnlimit | rolpassword | rolvaliduntil +------------------------+----------+------------+---------------+-------------+-------------+----------------+--------------+--------------+-------------+--------------- + regress_test_bypassrls | f | t | f | f | f | f | t | -1 | | +(1 row) + +ALTER ROLE regress_test_bypassrls WITH NOBYPASSRLS; +SELECT rolname, rolsuper, rolinherit, rolcreaterole, rolcreatedb, rolcanlogin, rolreplication, rolbypassrls, rolconnlimit, rolpassword, rolvaliduntil FROM pg_authid WHERE rolname = 'regress_test_bypassrls'; + rolname | rolsuper | rolinherit | rolcreaterole | rolcreatedb | rolcanlogin | rolreplication | rolbypassrls | rolconnlimit | rolpassword | rolvaliduntil +------------------------+----------+------------+---------------+-------------+-------------+----------------+--------------+--------------+-------------+--------------- + regress_test_bypassrls | f | t | f | f | f | f | f | -1 | | +(1 row) + +ALTER ROLE regress_test_bypassrls WITH BYPASSRLS; +SELECT rolname, rolsuper, rolinherit, rolcreaterole, rolcreatedb, rolcanlogin, rolreplication, rolbypassrls, rolconnlimit, rolpassword, rolvaliduntil FROM pg_authid WHERE rolname = 'regress_test_bypassrls'; + rolname | rolsuper | rolinherit | rolcreaterole | rolcreatedb | rolcanlogin | rolreplication | rolbypassrls | rolconnlimit | rolpassword | rolvaliduntil +------------------------+----------+------------+---------------+-------------+-------------+----------------+--------------+--------------+-------------+--------------- + regress_test_bypassrls | f | t | f | f | f | f | t | -1 | | +(1 row) + +-- clean up roles +DROP ROLE regress_test_def_superuser; +DROP ROLE regress_test_superuser; +DROP ROLE regress_test_def_inherit; +DROP ROLE regress_test_inherit; +DROP ROLE regress_test_def_createrole; +DROP ROLE regress_test_createrole; +DROP ROLE regress_test_def_createdb; +DROP ROLE regress_test_createdb; +DROP ROLE regress_test_def_role_canlogin; +DROP ROLE regress_test_role_canlogin; +DROP USER regress_test_def_user_canlogin; +DROP USER regress_test_user_canlogin; +DROP ROLE regress_test_def_replication; +DROP ROLE regress_test_replication; +DROP ROLE regress_test_def_bypassrls; +DROP ROLE regress_test_bypassrls; diff --git a/src/test/regress/expected/rowsecurity.out b/src/test/regress/expected/rowsecurity.out new file mode 100644 index 0000000..97ca9bf --- /dev/null +++ b/src/test/regress/expected/rowsecurity.out @@ -0,0 +1,4547 @@ +-- +-- Test of Row-level security feature +-- +-- Clean up in case a prior regression run failed +-- Suppress NOTICE messages when users/groups don't exist +SET client_min_messages TO 'warning'; +DROP USER IF EXISTS regress_rls_alice; +DROP USER IF EXISTS regress_rls_bob; +DROP USER IF EXISTS regress_rls_carol; +DROP USER IF EXISTS regress_rls_dave; +DROP USER IF EXISTS regress_rls_exempt_user; +DROP ROLE IF EXISTS regress_rls_group1; +DROP ROLE IF EXISTS regress_rls_group2; +DROP SCHEMA IF EXISTS regress_rls_schema CASCADE; +RESET client_min_messages; +-- initial setup +CREATE USER regress_rls_alice NOLOGIN; +CREATE USER regress_rls_bob NOLOGIN; +CREATE USER regress_rls_carol NOLOGIN; +CREATE USER regress_rls_dave NOLOGIN; +CREATE USER regress_rls_exempt_user BYPASSRLS NOLOGIN; +CREATE ROLE regress_rls_group1 NOLOGIN; +CREATE ROLE regress_rls_group2 NOLOGIN; +GRANT regress_rls_group1 TO regress_rls_bob; +GRANT regress_rls_group2 TO regress_rls_carol; +CREATE SCHEMA regress_rls_schema; +GRANT ALL ON SCHEMA regress_rls_schema to public; +SET search_path = regress_rls_schema; +-- setup of malicious function +CREATE OR REPLACE FUNCTION f_leak(text) RETURNS bool + COST 0.0000001 LANGUAGE plpgsql + AS 'BEGIN RAISE NOTICE ''f_leak => %'', $1; RETURN true; END'; +GRANT EXECUTE ON FUNCTION f_leak(text) TO public; +-- BASIC Row-Level Security Scenario +SET SESSION AUTHORIZATION regress_rls_alice; +CREATE TABLE uaccount ( + pguser name primary key, + seclv int +); +GRANT SELECT ON uaccount TO public; +INSERT INTO uaccount VALUES + ('regress_rls_alice', 99), + ('regress_rls_bob', 1), + ('regress_rls_carol', 2), + ('regress_rls_dave', 3); +CREATE TABLE category ( + cid int primary key, + cname text +); +GRANT ALL ON category TO public; +INSERT INTO category VALUES + (11, 'novel'), + (22, 'science fiction'), + (33, 'technology'), + (44, 'manga'); +CREATE TABLE document ( + did int primary key, + cid int references category(cid), + dlevel int not null, + dauthor name, + dtitle text +); +GRANT ALL ON document TO public; +INSERT INTO document VALUES + ( 1, 11, 1, 'regress_rls_bob', 'my first novel'), + ( 2, 11, 2, 'regress_rls_bob', 'my second novel'), + ( 3, 22, 2, 'regress_rls_bob', 'my science fiction'), + ( 4, 44, 1, 'regress_rls_bob', 'my first manga'), + ( 5, 44, 2, 'regress_rls_bob', 'my second manga'), + ( 6, 22, 1, 'regress_rls_carol', 'great science fiction'), + ( 7, 33, 2, 'regress_rls_carol', 'great technology book'), + ( 8, 44, 1, 'regress_rls_carol', 'great manga'), + ( 9, 22, 1, 'regress_rls_dave', 'awesome science fiction'), + (10, 33, 2, 'regress_rls_dave', 'awesome technology book'); +ALTER TABLE document ENABLE ROW LEVEL SECURITY; +-- user's security level must be higher than or equal to document's +CREATE POLICY p1 ON document AS PERMISSIVE + USING (dlevel <= (SELECT seclv FROM uaccount WHERE pguser = current_user)); +-- try to create a policy of bogus type +CREATE POLICY p1 ON document AS UGLY + USING (dlevel <= (SELECT seclv FROM uaccount WHERE pguser = current_user)); +ERROR: unrecognized row security option "ugly" +LINE 1: CREATE POLICY p1 ON document AS UGLY + ^ +HINT: Only PERMISSIVE or RESTRICTIVE policies are supported currently. +-- but Dave isn't allowed to anything at cid 50 or above +-- this is to make sure that we sort the policies by name first +-- when applying WITH CHECK, a later INSERT by Dave should fail due +-- to p1r first +CREATE POLICY p2r ON document AS RESTRICTIVE TO regress_rls_dave + USING (cid <> 44 AND cid < 50); +-- and Dave isn't allowed to see manga documents +CREATE POLICY p1r ON document AS RESTRICTIVE TO regress_rls_dave + USING (cid <> 44); +\dp + Access privileges + Schema | Name | Type | Access privileges | Column privileges | Policies +--------------------+----------+-------+---------------------------------------------+-------------------+-------------------------------------------- + regress_rls_schema | category | table | regress_rls_alice=arwdDxt/regress_rls_alice+| | + | | | =arwdDxt/regress_rls_alice | | + regress_rls_schema | document | table | regress_rls_alice=arwdDxt/regress_rls_alice+| | p1: + + | | | =arwdDxt/regress_rls_alice | | (u): (dlevel <= ( SELECT uaccount.seclv + + | | | | | FROM uaccount + + | | | | | WHERE (uaccount.pguser = CURRENT_USER)))+ + | | | | | p2r (RESTRICTIVE): + + | | | | | (u): ((cid <> 44) AND (cid < 50)) + + | | | | | to: regress_rls_dave + + | | | | | p1r (RESTRICTIVE): + + | | | | | (u): (cid <> 44) + + | | | | | to: regress_rls_dave + regress_rls_schema | uaccount | table | regress_rls_alice=arwdDxt/regress_rls_alice+| | + | | | =r/regress_rls_alice | | +(3 rows) + +\d document + Table "regress_rls_schema.document" + Column | Type | Collation | Nullable | Default +---------+---------+-----------+----------+--------- + did | integer | | not null | + cid | integer | | | + dlevel | integer | | not null | + dauthor | name | | | + dtitle | text | | | +Indexes: + "document_pkey" PRIMARY KEY, btree (did) +Foreign-key constraints: + "document_cid_fkey" FOREIGN KEY (cid) REFERENCES category(cid) +Policies: + POLICY "p1" + USING ((dlevel <= ( SELECT uaccount.seclv + FROM uaccount + WHERE (uaccount.pguser = CURRENT_USER)))) + POLICY "p1r" AS RESTRICTIVE + TO regress_rls_dave + USING ((cid <> 44)) + POLICY "p2r" AS RESTRICTIVE + TO regress_rls_dave + USING (((cid <> 44) AND (cid < 50))) + +SELECT * FROM pg_policies WHERE schemaname = 'regress_rls_schema' AND tablename = 'document' ORDER BY policyname; + schemaname | tablename | policyname | permissive | roles | cmd | qual | with_check +--------------------+-----------+------------+-------------+--------------------+-----+--------------------------------------------+------------ + regress_rls_schema | document | p1 | PERMISSIVE | {public} | ALL | (dlevel <= ( SELECT uaccount.seclv +| + | | | | | | FROM uaccount +| + | | | | | | WHERE (uaccount.pguser = CURRENT_USER))) | + regress_rls_schema | document | p1r | RESTRICTIVE | {regress_rls_dave} | ALL | (cid <> 44) | + regress_rls_schema | document | p2r | RESTRICTIVE | {regress_rls_dave} | ALL | ((cid <> 44) AND (cid < 50)) | +(3 rows) + +-- viewpoint from regress_rls_bob +SET SESSION AUTHORIZATION regress_rls_bob; +SET row_security TO ON; +SELECT * FROM document WHERE f_leak(dtitle) ORDER BY did; +NOTICE: f_leak => my first novel +NOTICE: f_leak => my first manga +NOTICE: f_leak => great science fiction +NOTICE: f_leak => great manga +NOTICE: f_leak => awesome science fiction + did | cid | dlevel | dauthor | dtitle +-----+-----+--------+-------------------+------------------------- + 1 | 11 | 1 | regress_rls_bob | my first novel + 4 | 44 | 1 | regress_rls_bob | my first manga + 6 | 22 | 1 | regress_rls_carol | great science fiction + 8 | 44 | 1 | regress_rls_carol | great manga + 9 | 22 | 1 | regress_rls_dave | awesome science fiction +(5 rows) + +SELECT * FROM document NATURAL JOIN category WHERE f_leak(dtitle) ORDER BY did; +NOTICE: f_leak => my first novel +NOTICE: f_leak => my first manga +NOTICE: f_leak => great science fiction +NOTICE: f_leak => great manga +NOTICE: f_leak => awesome science fiction + cid | did | dlevel | dauthor | dtitle | cname +-----+-----+--------+-------------------+-------------------------+----------------- + 11 | 1 | 1 | regress_rls_bob | my first novel | novel + 44 | 4 | 1 | regress_rls_bob | my first manga | manga + 22 | 6 | 1 | regress_rls_carol | great science fiction | science fiction + 44 | 8 | 1 | regress_rls_carol | great manga | manga + 22 | 9 | 1 | regress_rls_dave | awesome science fiction | science fiction +(5 rows) + +-- try a sampled version +SELECT * FROM document TABLESAMPLE BERNOULLI(50) REPEATABLE(0) + WHERE f_leak(dtitle) ORDER BY did; +NOTICE: f_leak => my first manga +NOTICE: f_leak => great science fiction +NOTICE: f_leak => great manga +NOTICE: f_leak => awesome science fiction + did | cid | dlevel | dauthor | dtitle +-----+-----+--------+-------------------+------------------------- + 4 | 44 | 1 | regress_rls_bob | my first manga + 6 | 22 | 1 | regress_rls_carol | great science fiction + 8 | 44 | 1 | regress_rls_carol | great manga + 9 | 22 | 1 | regress_rls_dave | awesome science fiction +(4 rows) + +-- viewpoint from regress_rls_carol +SET SESSION AUTHORIZATION regress_rls_carol; +SELECT * FROM document WHERE f_leak(dtitle) ORDER BY did; +NOTICE: f_leak => my first novel +NOTICE: f_leak => my second novel +NOTICE: f_leak => my science fiction +NOTICE: f_leak => my first manga +NOTICE: f_leak => my second manga +NOTICE: f_leak => great science fiction +NOTICE: f_leak => great technology book +NOTICE: f_leak => great manga +NOTICE: f_leak => awesome science fiction +NOTICE: f_leak => awesome technology book + did | cid | dlevel | dauthor | dtitle +-----+-----+--------+-------------------+------------------------- + 1 | 11 | 1 | regress_rls_bob | my first novel + 2 | 11 | 2 | regress_rls_bob | my second novel + 3 | 22 | 2 | regress_rls_bob | my science fiction + 4 | 44 | 1 | regress_rls_bob | my first manga + 5 | 44 | 2 | regress_rls_bob | my second manga + 6 | 22 | 1 | regress_rls_carol | great science fiction + 7 | 33 | 2 | regress_rls_carol | great technology book + 8 | 44 | 1 | regress_rls_carol | great manga + 9 | 22 | 1 | regress_rls_dave | awesome science fiction + 10 | 33 | 2 | regress_rls_dave | awesome technology book +(10 rows) + +SELECT * FROM document NATURAL JOIN category WHERE f_leak(dtitle) ORDER BY did; +NOTICE: f_leak => my first novel +NOTICE: f_leak => my second novel +NOTICE: f_leak => my science fiction +NOTICE: f_leak => my first manga +NOTICE: f_leak => my second manga +NOTICE: f_leak => great science fiction +NOTICE: f_leak => great technology book +NOTICE: f_leak => great manga +NOTICE: f_leak => awesome science fiction +NOTICE: f_leak => awesome technology book + cid | did | dlevel | dauthor | dtitle | cname +-----+-----+--------+-------------------+-------------------------+----------------- + 11 | 1 | 1 | regress_rls_bob | my first novel | novel + 11 | 2 | 2 | regress_rls_bob | my second novel | novel + 22 | 3 | 2 | regress_rls_bob | my science fiction | science fiction + 44 | 4 | 1 | regress_rls_bob | my first manga | manga + 44 | 5 | 2 | regress_rls_bob | my second manga | manga + 22 | 6 | 1 | regress_rls_carol | great science fiction | science fiction + 33 | 7 | 2 | regress_rls_carol | great technology book | technology + 44 | 8 | 1 | regress_rls_carol | great manga | manga + 22 | 9 | 1 | regress_rls_dave | awesome science fiction | science fiction + 33 | 10 | 2 | regress_rls_dave | awesome technology book | technology +(10 rows) + +-- try a sampled version +SELECT * FROM document TABLESAMPLE BERNOULLI(50) REPEATABLE(0) + WHERE f_leak(dtitle) ORDER BY did; +NOTICE: f_leak => my first manga +NOTICE: f_leak => my second manga +NOTICE: f_leak => great science fiction +NOTICE: f_leak => great manga +NOTICE: f_leak => awesome science fiction + did | cid | dlevel | dauthor | dtitle +-----+-----+--------+-------------------+------------------------- + 4 | 44 | 1 | regress_rls_bob | my first manga + 5 | 44 | 2 | regress_rls_bob | my second manga + 6 | 22 | 1 | regress_rls_carol | great science fiction + 8 | 44 | 1 | regress_rls_carol | great manga + 9 | 22 | 1 | regress_rls_dave | awesome science fiction +(5 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM document WHERE f_leak(dtitle); + QUERY PLAN +---------------------------------------------------- + Seq Scan on document + Filter: ((dlevel <= $0) AND f_leak(dtitle)) + InitPlan 1 (returns $0) + -> Index Scan using uaccount_pkey on uaccount + Index Cond: (pguser = CURRENT_USER) +(5 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM document NATURAL JOIN category WHERE f_leak(dtitle); + QUERY PLAN +----------------------------------------------------------- + Hash Join + Hash Cond: (category.cid = document.cid) + InitPlan 1 (returns $0) + -> Index Scan using uaccount_pkey on uaccount + Index Cond: (pguser = CURRENT_USER) + -> Seq Scan on category + -> Hash + -> Seq Scan on document + Filter: ((dlevel <= $0) AND f_leak(dtitle)) +(9 rows) + +-- viewpoint from regress_rls_dave +SET SESSION AUTHORIZATION regress_rls_dave; +SELECT * FROM document WHERE f_leak(dtitle) ORDER BY did; +NOTICE: f_leak => my first novel +NOTICE: f_leak => my second novel +NOTICE: f_leak => my science fiction +NOTICE: f_leak => great science fiction +NOTICE: f_leak => great technology book +NOTICE: f_leak => awesome science fiction +NOTICE: f_leak => awesome technology book + did | cid | dlevel | dauthor | dtitle +-----+-----+--------+-------------------+------------------------- + 1 | 11 | 1 | regress_rls_bob | my first novel + 2 | 11 | 2 | regress_rls_bob | my second novel + 3 | 22 | 2 | regress_rls_bob | my science fiction + 6 | 22 | 1 | regress_rls_carol | great science fiction + 7 | 33 | 2 | regress_rls_carol | great technology book + 9 | 22 | 1 | regress_rls_dave | awesome science fiction + 10 | 33 | 2 | regress_rls_dave | awesome technology book +(7 rows) + +SELECT * FROM document NATURAL JOIN category WHERE f_leak(dtitle) ORDER BY did; +NOTICE: f_leak => my first novel +NOTICE: f_leak => my second novel +NOTICE: f_leak => my science fiction +NOTICE: f_leak => great science fiction +NOTICE: f_leak => great technology book +NOTICE: f_leak => awesome science fiction +NOTICE: f_leak => awesome technology book + cid | did | dlevel | dauthor | dtitle | cname +-----+-----+--------+-------------------+-------------------------+----------------- + 11 | 1 | 1 | regress_rls_bob | my first novel | novel + 11 | 2 | 2 | regress_rls_bob | my second novel | novel + 22 | 3 | 2 | regress_rls_bob | my science fiction | science fiction + 22 | 6 | 1 | regress_rls_carol | great science fiction | science fiction + 33 | 7 | 2 | regress_rls_carol | great technology book | technology + 22 | 9 | 1 | regress_rls_dave | awesome science fiction | science fiction + 33 | 10 | 2 | regress_rls_dave | awesome technology book | technology +(7 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM document WHERE f_leak(dtitle); + QUERY PLAN +---------------------------------------------------------------------------------------------- + Seq Scan on document + Filter: ((cid <> 44) AND (cid <> 44) AND (cid < 50) AND (dlevel <= $0) AND f_leak(dtitle)) + InitPlan 1 (returns $0) + -> Index Scan using uaccount_pkey on uaccount + Index Cond: (pguser = CURRENT_USER) +(5 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM document NATURAL JOIN category WHERE f_leak(dtitle); + QUERY PLAN +---------------------------------------------------------------------------------------------------------- + Hash Join + Hash Cond: (category.cid = document.cid) + InitPlan 1 (returns $0) + -> Index Scan using uaccount_pkey on uaccount + Index Cond: (pguser = CURRENT_USER) + -> Seq Scan on category + -> Hash + -> Seq Scan on document + Filter: ((cid <> 44) AND (cid <> 44) AND (cid < 50) AND (dlevel <= $0) AND f_leak(dtitle)) +(9 rows) + +-- 44 would technically fail for both p2r and p1r, but we should get an error +-- back from p1r for this because it sorts first +INSERT INTO document VALUES (100, 44, 1, 'regress_rls_dave', 'testing sorting of policies'); -- fail +ERROR: new row violates row-level security policy "p1r" for table "document" +-- Just to see a p2r error +INSERT INTO document VALUES (100, 55, 1, 'regress_rls_dave', 'testing sorting of policies'); -- fail +ERROR: new row violates row-level security policy "p2r" for table "document" +-- only owner can change policies +ALTER POLICY p1 ON document USING (true); --fail +ERROR: must be owner of table document +DROP POLICY p1 ON document; --fail +ERROR: must be owner of relation document +SET SESSION AUTHORIZATION regress_rls_alice; +ALTER POLICY p1 ON document USING (dauthor = current_user); +-- viewpoint from regress_rls_bob again +SET SESSION AUTHORIZATION regress_rls_bob; +SELECT * FROM document WHERE f_leak(dtitle) ORDER BY did; +NOTICE: f_leak => my first novel +NOTICE: f_leak => my second novel +NOTICE: f_leak => my science fiction +NOTICE: f_leak => my first manga +NOTICE: f_leak => my second manga + did | cid | dlevel | dauthor | dtitle +-----+-----+--------+-----------------+-------------------- + 1 | 11 | 1 | regress_rls_bob | my first novel + 2 | 11 | 2 | regress_rls_bob | my second novel + 3 | 22 | 2 | regress_rls_bob | my science fiction + 4 | 44 | 1 | regress_rls_bob | my first manga + 5 | 44 | 2 | regress_rls_bob | my second manga +(5 rows) + +SELECT * FROM document NATURAL JOIN category WHERE f_leak(dtitle) ORDER by did; +NOTICE: f_leak => my first novel +NOTICE: f_leak => my second novel +NOTICE: f_leak => my science fiction +NOTICE: f_leak => my first manga +NOTICE: f_leak => my second manga + cid | did | dlevel | dauthor | dtitle | cname +-----+-----+--------+-----------------+--------------------+----------------- + 11 | 1 | 1 | regress_rls_bob | my first novel | novel + 11 | 2 | 2 | regress_rls_bob | my second novel | novel + 22 | 3 | 2 | regress_rls_bob | my science fiction | science fiction + 44 | 4 | 1 | regress_rls_bob | my first manga | manga + 44 | 5 | 2 | regress_rls_bob | my second manga | manga +(5 rows) + +-- viewpoint from rls_regres_carol again +SET SESSION AUTHORIZATION regress_rls_carol; +SELECT * FROM document WHERE f_leak(dtitle) ORDER BY did; +NOTICE: f_leak => great science fiction +NOTICE: f_leak => great technology book +NOTICE: f_leak => great manga + did | cid | dlevel | dauthor | dtitle +-----+-----+--------+-------------------+----------------------- + 6 | 22 | 1 | regress_rls_carol | great science fiction + 7 | 33 | 2 | regress_rls_carol | great technology book + 8 | 44 | 1 | regress_rls_carol | great manga +(3 rows) + +SELECT * FROM document NATURAL JOIN category WHERE f_leak(dtitle) ORDER by did; +NOTICE: f_leak => great science fiction +NOTICE: f_leak => great technology book +NOTICE: f_leak => great manga + cid | did | dlevel | dauthor | dtitle | cname +-----+-----+--------+-------------------+-----------------------+----------------- + 22 | 6 | 1 | regress_rls_carol | great science fiction | science fiction + 33 | 7 | 2 | regress_rls_carol | great technology book | technology + 44 | 8 | 1 | regress_rls_carol | great manga | manga +(3 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM document WHERE f_leak(dtitle); + QUERY PLAN +--------------------------------------------------------- + Seq Scan on document + Filter: ((dauthor = CURRENT_USER) AND f_leak(dtitle)) +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM document NATURAL JOIN category WHERE f_leak(dtitle); + QUERY PLAN +--------------------------------------------------------------- + Nested Loop + -> Seq Scan on document + Filter: ((dauthor = CURRENT_USER) AND f_leak(dtitle)) + -> Index Scan using category_pkey on category + Index Cond: (cid = document.cid) +(5 rows) + +-- interaction of FK/PK constraints +SET SESSION AUTHORIZATION regress_rls_alice; +CREATE POLICY p2 ON category + USING (CASE WHEN current_user = 'regress_rls_bob' THEN cid IN (11, 33) + WHEN current_user = 'regress_rls_carol' THEN cid IN (22, 44) + ELSE false END); +ALTER TABLE category ENABLE ROW LEVEL SECURITY; +-- cannot delete PK referenced by invisible FK +SET SESSION AUTHORIZATION regress_rls_bob; +SELECT * FROM document d FULL OUTER JOIN category c on d.cid = c.cid ORDER BY d.did, c.cid; + did | cid | dlevel | dauthor | dtitle | cid | cname +-----+-----+--------+-----------------+--------------------+-----+------------ + 1 | 11 | 1 | regress_rls_bob | my first novel | 11 | novel + 2 | 11 | 2 | regress_rls_bob | my second novel | 11 | novel + 3 | 22 | 2 | regress_rls_bob | my science fiction | | + 4 | 44 | 1 | regress_rls_bob | my first manga | | + 5 | 44 | 2 | regress_rls_bob | my second manga | | + | | | | | 33 | technology +(6 rows) + +DELETE FROM category WHERE cid = 33; -- fails with FK violation +ERROR: update or delete on table "category" violates foreign key constraint "document_cid_fkey" on table "document" +DETAIL: Key is still referenced from table "document". +-- can insert FK referencing invisible PK +SET SESSION AUTHORIZATION regress_rls_carol; +SELECT * FROM document d FULL OUTER JOIN category c on d.cid = c.cid ORDER BY d.did, c.cid; + did | cid | dlevel | dauthor | dtitle | cid | cname +-----+-----+--------+-------------------+-----------------------+-----+----------------- + 6 | 22 | 1 | regress_rls_carol | great science fiction | 22 | science fiction + 7 | 33 | 2 | regress_rls_carol | great technology book | | + 8 | 44 | 1 | regress_rls_carol | great manga | 44 | manga +(3 rows) + +INSERT INTO document VALUES (11, 33, 1, current_user, 'hoge'); +-- UNIQUE or PRIMARY KEY constraint violation DOES reveal presence of row +SET SESSION AUTHORIZATION regress_rls_bob; +INSERT INTO document VALUES (8, 44, 1, 'regress_rls_bob', 'my third manga'); -- Must fail with unique violation, revealing presence of did we can't see +ERROR: duplicate key value violates unique constraint "document_pkey" +SELECT * FROM document WHERE did = 8; -- and confirm we can't see it + did | cid | dlevel | dauthor | dtitle +-----+-----+--------+---------+-------- +(0 rows) + +-- RLS policies are checked before constraints +INSERT INTO document VALUES (8, 44, 1, 'regress_rls_carol', 'my third manga'); -- Should fail with RLS check violation, not duplicate key violation +ERROR: new row violates row-level security policy for table "document" +UPDATE document SET did = 8, dauthor = 'regress_rls_carol' WHERE did = 5; -- Should fail with RLS check violation, not duplicate key violation +ERROR: new row violates row-level security policy for table "document" +-- database superuser does bypass RLS policy when enabled +RESET SESSION AUTHORIZATION; +SET row_security TO ON; +SELECT * FROM document; + did | cid | dlevel | dauthor | dtitle +-----+-----+--------+-------------------+------------------------- + 1 | 11 | 1 | regress_rls_bob | my first novel + 2 | 11 | 2 | regress_rls_bob | my second novel + 3 | 22 | 2 | regress_rls_bob | my science fiction + 4 | 44 | 1 | regress_rls_bob | my first manga + 5 | 44 | 2 | regress_rls_bob | my second manga + 6 | 22 | 1 | regress_rls_carol | great science fiction + 7 | 33 | 2 | regress_rls_carol | great technology book + 8 | 44 | 1 | regress_rls_carol | great manga + 9 | 22 | 1 | regress_rls_dave | awesome science fiction + 10 | 33 | 2 | regress_rls_dave | awesome technology book + 11 | 33 | 1 | regress_rls_carol | hoge +(11 rows) + +SELECT * FROM category; + cid | cname +-----+----------------- + 11 | novel + 22 | science fiction + 33 | technology + 44 | manga +(4 rows) + +-- database superuser does bypass RLS policy when disabled +RESET SESSION AUTHORIZATION; +SET row_security TO OFF; +SELECT * FROM document; + did | cid | dlevel | dauthor | dtitle +-----+-----+--------+-------------------+------------------------- + 1 | 11 | 1 | regress_rls_bob | my first novel + 2 | 11 | 2 | regress_rls_bob | my second novel + 3 | 22 | 2 | regress_rls_bob | my science fiction + 4 | 44 | 1 | regress_rls_bob | my first manga + 5 | 44 | 2 | regress_rls_bob | my second manga + 6 | 22 | 1 | regress_rls_carol | great science fiction + 7 | 33 | 2 | regress_rls_carol | great technology book + 8 | 44 | 1 | regress_rls_carol | great manga + 9 | 22 | 1 | regress_rls_dave | awesome science fiction + 10 | 33 | 2 | regress_rls_dave | awesome technology book + 11 | 33 | 1 | regress_rls_carol | hoge +(11 rows) + +SELECT * FROM category; + cid | cname +-----+----------------- + 11 | novel + 22 | science fiction + 33 | technology + 44 | manga +(4 rows) + +-- database non-superuser with bypass privilege can bypass RLS policy when disabled +SET SESSION AUTHORIZATION regress_rls_exempt_user; +SET row_security TO OFF; +SELECT * FROM document; + did | cid | dlevel | dauthor | dtitle +-----+-----+--------+-------------------+------------------------- + 1 | 11 | 1 | regress_rls_bob | my first novel + 2 | 11 | 2 | regress_rls_bob | my second novel + 3 | 22 | 2 | regress_rls_bob | my science fiction + 4 | 44 | 1 | regress_rls_bob | my first manga + 5 | 44 | 2 | regress_rls_bob | my second manga + 6 | 22 | 1 | regress_rls_carol | great science fiction + 7 | 33 | 2 | regress_rls_carol | great technology book + 8 | 44 | 1 | regress_rls_carol | great manga + 9 | 22 | 1 | regress_rls_dave | awesome science fiction + 10 | 33 | 2 | regress_rls_dave | awesome technology book + 11 | 33 | 1 | regress_rls_carol | hoge +(11 rows) + +SELECT * FROM category; + cid | cname +-----+----------------- + 11 | novel + 22 | science fiction + 33 | technology + 44 | manga +(4 rows) + +-- RLS policy does not apply to table owner when RLS enabled. +SET SESSION AUTHORIZATION regress_rls_alice; +SET row_security TO ON; +SELECT * FROM document; + did | cid | dlevel | dauthor | dtitle +-----+-----+--------+-------------------+------------------------- + 1 | 11 | 1 | regress_rls_bob | my first novel + 2 | 11 | 2 | regress_rls_bob | my second novel + 3 | 22 | 2 | regress_rls_bob | my science fiction + 4 | 44 | 1 | regress_rls_bob | my first manga + 5 | 44 | 2 | regress_rls_bob | my second manga + 6 | 22 | 1 | regress_rls_carol | great science fiction + 7 | 33 | 2 | regress_rls_carol | great technology book + 8 | 44 | 1 | regress_rls_carol | great manga + 9 | 22 | 1 | regress_rls_dave | awesome science fiction + 10 | 33 | 2 | regress_rls_dave | awesome technology book + 11 | 33 | 1 | regress_rls_carol | hoge +(11 rows) + +SELECT * FROM category; + cid | cname +-----+----------------- + 11 | novel + 22 | science fiction + 33 | technology + 44 | manga +(4 rows) + +-- RLS policy does not apply to table owner when RLS disabled. +SET SESSION AUTHORIZATION regress_rls_alice; +SET row_security TO OFF; +SELECT * FROM document; + did | cid | dlevel | dauthor | dtitle +-----+-----+--------+-------------------+------------------------- + 1 | 11 | 1 | regress_rls_bob | my first novel + 2 | 11 | 2 | regress_rls_bob | my second novel + 3 | 22 | 2 | regress_rls_bob | my science fiction + 4 | 44 | 1 | regress_rls_bob | my first manga + 5 | 44 | 2 | regress_rls_bob | my second manga + 6 | 22 | 1 | regress_rls_carol | great science fiction + 7 | 33 | 2 | regress_rls_carol | great technology book + 8 | 44 | 1 | regress_rls_carol | great manga + 9 | 22 | 1 | regress_rls_dave | awesome science fiction + 10 | 33 | 2 | regress_rls_dave | awesome technology book + 11 | 33 | 1 | regress_rls_carol | hoge +(11 rows) + +SELECT * FROM category; + cid | cname +-----+----------------- + 11 | novel + 22 | science fiction + 33 | technology + 44 | manga +(4 rows) + +-- +-- Table inheritance and RLS policy +-- +SET SESSION AUTHORIZATION regress_rls_alice; +SET row_security TO ON; +CREATE TABLE t1 (id int not null primary key, a int, junk1 text, b text); +ALTER TABLE t1 DROP COLUMN junk1; -- just a disturbing factor +GRANT ALL ON t1 TO public; +COPY t1 FROM stdin WITH ; +CREATE TABLE t2 (c float) INHERITS (t1); +GRANT ALL ON t2 TO public; +COPY t2 FROM stdin; +CREATE TABLE t3 (id int not null primary key, c text, b text, a int); +ALTER TABLE t3 INHERIT t1; +GRANT ALL ON t3 TO public; +COPY t3(id, a,b,c) FROM stdin; +CREATE POLICY p1 ON t1 FOR ALL TO PUBLIC USING (a % 2 = 0); -- be even number +CREATE POLICY p2 ON t2 FOR ALL TO PUBLIC USING (a % 2 = 1); -- be odd number +ALTER TABLE t1 ENABLE ROW LEVEL SECURITY; +ALTER TABLE t2 ENABLE ROW LEVEL SECURITY; +SET SESSION AUTHORIZATION regress_rls_bob; +SELECT * FROM t1; + id | a | b +-----+---+----- + 102 | 2 | bbb + 104 | 4 | dad + 202 | 2 | bcd + 204 | 4 | def + 302 | 2 | yyy +(5 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM t1; + QUERY PLAN +------------------------------- + Append + -> Seq Scan on t1 t1_1 + Filter: ((a % 2) = 0) + -> Seq Scan on t2 t1_2 + Filter: ((a % 2) = 0) + -> Seq Scan on t3 t1_3 + Filter: ((a % 2) = 0) +(7 rows) + +SELECT * FROM t1 WHERE f_leak(b); +NOTICE: f_leak => bbb +NOTICE: f_leak => dad +NOTICE: f_leak => bcd +NOTICE: f_leak => def +NOTICE: f_leak => yyy + id | a | b +-----+---+----- + 102 | 2 | bbb + 104 | 4 | dad + 202 | 2 | bcd + 204 | 4 | def + 302 | 2 | yyy +(5 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM t1 WHERE f_leak(b); + QUERY PLAN +----------------------------------------------- + Append + -> Seq Scan on t1 t1_1 + Filter: (((a % 2) = 0) AND f_leak(b)) + -> Seq Scan on t2 t1_2 + Filter: (((a % 2) = 0) AND f_leak(b)) + -> Seq Scan on t3 t1_3 + Filter: (((a % 2) = 0) AND f_leak(b)) +(7 rows) + +-- reference to system column +SELECT tableoid::regclass, * FROM t1; + tableoid | id | a | b +----------+-----+---+----- + t1 | 102 | 2 | bbb + t1 | 104 | 4 | dad + t2 | 202 | 2 | bcd + t2 | 204 | 4 | def + t3 | 302 | 2 | yyy +(5 rows) + +EXPLAIN (COSTS OFF) SELECT *, t1 FROM t1; + QUERY PLAN +------------------------------- + Append + -> Seq Scan on t1 t1_1 + Filter: ((a % 2) = 0) + -> Seq Scan on t2 t1_2 + Filter: ((a % 2) = 0) + -> Seq Scan on t3 t1_3 + Filter: ((a % 2) = 0) +(7 rows) + +-- reference to whole-row reference +SELECT *, t1 FROM t1; + id | a | b | t1 +-----+---+-----+------------- + 102 | 2 | bbb | (102,2,bbb) + 104 | 4 | dad | (104,4,dad) + 202 | 2 | bcd | (202,2,bcd) + 204 | 4 | def | (204,4,def) + 302 | 2 | yyy | (302,2,yyy) +(5 rows) + +EXPLAIN (COSTS OFF) SELECT *, t1 FROM t1; + QUERY PLAN +------------------------------- + Append + -> Seq Scan on t1 t1_1 + Filter: ((a % 2) = 0) + -> Seq Scan on t2 t1_2 + Filter: ((a % 2) = 0) + -> Seq Scan on t3 t1_3 + Filter: ((a % 2) = 0) +(7 rows) + +-- for share/update lock +SELECT * FROM t1 FOR SHARE; + id | a | b +-----+---+----- + 102 | 2 | bbb + 104 | 4 | dad + 202 | 2 | bcd + 204 | 4 | def + 302 | 2 | yyy +(5 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM t1 FOR SHARE; + QUERY PLAN +------------------------------------- + LockRows + -> Append + -> Seq Scan on t1 t1_1 + Filter: ((a % 2) = 0) + -> Seq Scan on t2 t1_2 + Filter: ((a % 2) = 0) + -> Seq Scan on t3 t1_3 + Filter: ((a % 2) = 0) +(8 rows) + +SELECT * FROM t1 WHERE f_leak(b) FOR SHARE; +NOTICE: f_leak => bbb +NOTICE: f_leak => dad +NOTICE: f_leak => bcd +NOTICE: f_leak => def +NOTICE: f_leak => yyy + id | a | b +-----+---+----- + 102 | 2 | bbb + 104 | 4 | dad + 202 | 2 | bcd + 204 | 4 | def + 302 | 2 | yyy +(5 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM t1 WHERE f_leak(b) FOR SHARE; + QUERY PLAN +----------------------------------------------------- + LockRows + -> Append + -> Seq Scan on t1 t1_1 + Filter: (((a % 2) = 0) AND f_leak(b)) + -> Seq Scan on t2 t1_2 + Filter: (((a % 2) = 0) AND f_leak(b)) + -> Seq Scan on t3 t1_3 + Filter: (((a % 2) = 0) AND f_leak(b)) +(8 rows) + +-- union all query +SELECT a, b, tableoid::regclass FROM t2 UNION ALL SELECT a, b, tableoid::regclass FROM t3; + a | b | tableoid +---+-----+---------- + 1 | abc | t2 + 3 | cde | t2 + 1 | xxx | t3 + 2 | yyy | t3 + 3 | zzz | t3 +(5 rows) + +EXPLAIN (COSTS OFF) SELECT a, b, tableoid::regclass FROM t2 UNION ALL SELECT a, b, tableoid::regclass FROM t3; + QUERY PLAN +------------------------------- + Append + -> Seq Scan on t2 + Filter: ((a % 2) = 1) + -> Seq Scan on t3 +(4 rows) + +-- superuser is allowed to bypass RLS checks +RESET SESSION AUTHORIZATION; +SET row_security TO OFF; +SELECT * FROM t1 WHERE f_leak(b); +NOTICE: f_leak => aba +NOTICE: f_leak => bbb +NOTICE: f_leak => ccc +NOTICE: f_leak => dad +NOTICE: f_leak => abc +NOTICE: f_leak => bcd +NOTICE: f_leak => cde +NOTICE: f_leak => def +NOTICE: f_leak => xxx +NOTICE: f_leak => yyy +NOTICE: f_leak => zzz + id | a | b +-----+---+----- + 101 | 1 | aba + 102 | 2 | bbb + 103 | 3 | ccc + 104 | 4 | dad + 201 | 1 | abc + 202 | 2 | bcd + 203 | 3 | cde + 204 | 4 | def + 301 | 1 | xxx + 302 | 2 | yyy + 303 | 3 | zzz +(11 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM t1 WHERE f_leak(b); + QUERY PLAN +--------------------------- + Append + -> Seq Scan on t1 t1_1 + Filter: f_leak(b) + -> Seq Scan on t2 t1_2 + Filter: f_leak(b) + -> Seq Scan on t3 t1_3 + Filter: f_leak(b) +(7 rows) + +-- non-superuser with bypass privilege can bypass RLS policy when disabled +SET SESSION AUTHORIZATION regress_rls_exempt_user; +SET row_security TO OFF; +SELECT * FROM t1 WHERE f_leak(b); +NOTICE: f_leak => aba +NOTICE: f_leak => bbb +NOTICE: f_leak => ccc +NOTICE: f_leak => dad +NOTICE: f_leak => abc +NOTICE: f_leak => bcd +NOTICE: f_leak => cde +NOTICE: f_leak => def +NOTICE: f_leak => xxx +NOTICE: f_leak => yyy +NOTICE: f_leak => zzz + id | a | b +-----+---+----- + 101 | 1 | aba + 102 | 2 | bbb + 103 | 3 | ccc + 104 | 4 | dad + 201 | 1 | abc + 202 | 2 | bcd + 203 | 3 | cde + 204 | 4 | def + 301 | 1 | xxx + 302 | 2 | yyy + 303 | 3 | zzz +(11 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM t1 WHERE f_leak(b); + QUERY PLAN +--------------------------- + Append + -> Seq Scan on t1 t1_1 + Filter: f_leak(b) + -> Seq Scan on t2 t1_2 + Filter: f_leak(b) + -> Seq Scan on t3 t1_3 + Filter: f_leak(b) +(7 rows) + +-- +-- Partitioned Tables +-- +SET SESSION AUTHORIZATION regress_rls_alice; +CREATE TABLE part_document ( + did int, + cid int, + dlevel int not null, + dauthor name, + dtitle text +) PARTITION BY RANGE (cid); +GRANT ALL ON part_document TO public; +-- Create partitions for document categories +CREATE TABLE part_document_fiction PARTITION OF part_document FOR VALUES FROM (11) to (12); +CREATE TABLE part_document_satire PARTITION OF part_document FOR VALUES FROM (55) to (56); +CREATE TABLE part_document_nonfiction PARTITION OF part_document FOR VALUES FROM (99) to (100); +GRANT ALL ON part_document_fiction TO public; +GRANT ALL ON part_document_satire TO public; +GRANT ALL ON part_document_nonfiction TO public; +INSERT INTO part_document VALUES + ( 1, 11, 1, 'regress_rls_bob', 'my first novel'), + ( 2, 11, 2, 'regress_rls_bob', 'my second novel'), + ( 3, 99, 2, 'regress_rls_bob', 'my science textbook'), + ( 4, 55, 1, 'regress_rls_bob', 'my first satire'), + ( 5, 99, 2, 'regress_rls_bob', 'my history book'), + ( 6, 11, 1, 'regress_rls_carol', 'great science fiction'), + ( 7, 99, 2, 'regress_rls_carol', 'great technology book'), + ( 8, 55, 2, 'regress_rls_carol', 'great satire'), + ( 9, 11, 1, 'regress_rls_dave', 'awesome science fiction'), + (10, 99, 2, 'regress_rls_dave', 'awesome technology book'); +ALTER TABLE part_document ENABLE ROW LEVEL SECURITY; +-- Create policy on parent +-- user's security level must be higher than or equal to document's +CREATE POLICY pp1 ON part_document AS PERMISSIVE + USING (dlevel <= (SELECT seclv FROM uaccount WHERE pguser = current_user)); +-- Dave is only allowed to see cid < 55 +CREATE POLICY pp1r ON part_document AS RESTRICTIVE TO regress_rls_dave + USING (cid < 55); +\d+ part_document + Partitioned table "regress_rls_schema.part_document" + Column | Type | Collation | Nullable | Default | Storage | Stats target | Description +---------+---------+-----------+----------+---------+----------+--------------+------------- + did | integer | | | | plain | | + cid | integer | | | | plain | | + dlevel | integer | | not null | | plain | | + dauthor | name | | | | plain | | + dtitle | text | | | | extended | | +Partition key: RANGE (cid) +Policies: + POLICY "pp1" + USING ((dlevel <= ( SELECT uaccount.seclv + FROM uaccount + WHERE (uaccount.pguser = CURRENT_USER)))) + POLICY "pp1r" AS RESTRICTIVE + TO regress_rls_dave + USING ((cid < 55)) +Partitions: part_document_fiction FOR VALUES FROM (11) TO (12), + part_document_nonfiction FOR VALUES FROM (99) TO (100), + part_document_satire FOR VALUES FROM (55) TO (56) + +SELECT * FROM pg_policies WHERE schemaname = 'regress_rls_schema' AND tablename like '%part_document%' ORDER BY policyname; + schemaname | tablename | policyname | permissive | roles | cmd | qual | with_check +--------------------+---------------+------------+-------------+--------------------+-----+--------------------------------------------+------------ + regress_rls_schema | part_document | pp1 | PERMISSIVE | {public} | ALL | (dlevel <= ( SELECT uaccount.seclv +| + | | | | | | FROM uaccount +| + | | | | | | WHERE (uaccount.pguser = CURRENT_USER))) | + regress_rls_schema | part_document | pp1r | RESTRICTIVE | {regress_rls_dave} | ALL | (cid < 55) | +(2 rows) + +-- viewpoint from regress_rls_bob +SET SESSION AUTHORIZATION regress_rls_bob; +SET row_security TO ON; +SELECT * FROM part_document WHERE f_leak(dtitle) ORDER BY did; +NOTICE: f_leak => my first novel +NOTICE: f_leak => great science fiction +NOTICE: f_leak => awesome science fiction +NOTICE: f_leak => my first satire + did | cid | dlevel | dauthor | dtitle +-----+-----+--------+-------------------+------------------------- + 1 | 11 | 1 | regress_rls_bob | my first novel + 4 | 55 | 1 | regress_rls_bob | my first satire + 6 | 11 | 1 | regress_rls_carol | great science fiction + 9 | 11 | 1 | regress_rls_dave | awesome science fiction +(4 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM part_document WHERE f_leak(dtitle); + QUERY PLAN +------------------------------------------------------------ + Append + InitPlan 1 (returns $0) + -> Index Scan using uaccount_pkey on uaccount + Index Cond: (pguser = CURRENT_USER) + -> Seq Scan on part_document_fiction part_document_1 + Filter: ((dlevel <= $0) AND f_leak(dtitle)) + -> Seq Scan on part_document_satire part_document_2 + Filter: ((dlevel <= $0) AND f_leak(dtitle)) + -> Seq Scan on part_document_nonfiction part_document_3 + Filter: ((dlevel <= $0) AND f_leak(dtitle)) +(10 rows) + +-- viewpoint from regress_rls_carol +SET SESSION AUTHORIZATION regress_rls_carol; +SELECT * FROM part_document WHERE f_leak(dtitle) ORDER BY did; +NOTICE: f_leak => my first novel +NOTICE: f_leak => my second novel +NOTICE: f_leak => great science fiction +NOTICE: f_leak => awesome science fiction +NOTICE: f_leak => my first satire +NOTICE: f_leak => great satire +NOTICE: f_leak => my science textbook +NOTICE: f_leak => my history book +NOTICE: f_leak => great technology book +NOTICE: f_leak => awesome technology book + did | cid | dlevel | dauthor | dtitle +-----+-----+--------+-------------------+------------------------- + 1 | 11 | 1 | regress_rls_bob | my first novel + 2 | 11 | 2 | regress_rls_bob | my second novel + 3 | 99 | 2 | regress_rls_bob | my science textbook + 4 | 55 | 1 | regress_rls_bob | my first satire + 5 | 99 | 2 | regress_rls_bob | my history book + 6 | 11 | 1 | regress_rls_carol | great science fiction + 7 | 99 | 2 | regress_rls_carol | great technology book + 8 | 55 | 2 | regress_rls_carol | great satire + 9 | 11 | 1 | regress_rls_dave | awesome science fiction + 10 | 99 | 2 | regress_rls_dave | awesome technology book +(10 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM part_document WHERE f_leak(dtitle); + QUERY PLAN +------------------------------------------------------------ + Append + InitPlan 1 (returns $0) + -> Index Scan using uaccount_pkey on uaccount + Index Cond: (pguser = CURRENT_USER) + -> Seq Scan on part_document_fiction part_document_1 + Filter: ((dlevel <= $0) AND f_leak(dtitle)) + -> Seq Scan on part_document_satire part_document_2 + Filter: ((dlevel <= $0) AND f_leak(dtitle)) + -> Seq Scan on part_document_nonfiction part_document_3 + Filter: ((dlevel <= $0) AND f_leak(dtitle)) +(10 rows) + +-- viewpoint from regress_rls_dave +SET SESSION AUTHORIZATION regress_rls_dave; +SELECT * FROM part_document WHERE f_leak(dtitle) ORDER BY did; +NOTICE: f_leak => my first novel +NOTICE: f_leak => my second novel +NOTICE: f_leak => great science fiction +NOTICE: f_leak => awesome science fiction + did | cid | dlevel | dauthor | dtitle +-----+-----+--------+-------------------+------------------------- + 1 | 11 | 1 | regress_rls_bob | my first novel + 2 | 11 | 2 | regress_rls_bob | my second novel + 6 | 11 | 1 | regress_rls_carol | great science fiction + 9 | 11 | 1 | regress_rls_dave | awesome science fiction +(4 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM part_document WHERE f_leak(dtitle); + QUERY PLAN +-------------------------------------------------------------- + Seq Scan on part_document_fiction part_document + Filter: ((cid < 55) AND (dlevel <= $0) AND f_leak(dtitle)) + InitPlan 1 (returns $0) + -> Index Scan using uaccount_pkey on uaccount + Index Cond: (pguser = CURRENT_USER) +(5 rows) + +-- pp1 ERROR +INSERT INTO part_document VALUES (100, 11, 5, 'regress_rls_dave', 'testing pp1'); -- fail +ERROR: new row violates row-level security policy for table "part_document" +-- pp1r ERROR +INSERT INTO part_document VALUES (100, 99, 1, 'regress_rls_dave', 'testing pp1r'); -- fail +ERROR: new row violates row-level security policy "pp1r" for table "part_document" +-- Show that RLS policy does not apply for direct inserts to children +-- This should fail with RLS POLICY pp1r violation. +INSERT INTO part_document VALUES (100, 55, 1, 'regress_rls_dave', 'testing RLS with partitions'); -- fail +ERROR: new row violates row-level security policy "pp1r" for table "part_document" +-- But this should succeed. +INSERT INTO part_document_satire VALUES (100, 55, 1, 'regress_rls_dave', 'testing RLS with partitions'); -- success +-- We still cannot see the row using the parent +SELECT * FROM part_document WHERE f_leak(dtitle) ORDER BY did; +NOTICE: f_leak => my first novel +NOTICE: f_leak => my second novel +NOTICE: f_leak => great science fiction +NOTICE: f_leak => awesome science fiction + did | cid | dlevel | dauthor | dtitle +-----+-----+--------+-------------------+------------------------- + 1 | 11 | 1 | regress_rls_bob | my first novel + 2 | 11 | 2 | regress_rls_bob | my second novel + 6 | 11 | 1 | regress_rls_carol | great science fiction + 9 | 11 | 1 | regress_rls_dave | awesome science fiction +(4 rows) + +-- But we can if we look directly +SELECT * FROM part_document_satire WHERE f_leak(dtitle) ORDER BY did; +NOTICE: f_leak => my first satire +NOTICE: f_leak => great satire +NOTICE: f_leak => testing RLS with partitions + did | cid | dlevel | dauthor | dtitle +-----+-----+--------+-------------------+----------------------------- + 4 | 55 | 1 | regress_rls_bob | my first satire + 8 | 55 | 2 | regress_rls_carol | great satire + 100 | 55 | 1 | regress_rls_dave | testing RLS with partitions +(3 rows) + +-- Turn on RLS and create policy on child to show RLS is checked before constraints +SET SESSION AUTHORIZATION regress_rls_alice; +ALTER TABLE part_document_satire ENABLE ROW LEVEL SECURITY; +CREATE POLICY pp3 ON part_document_satire AS RESTRICTIVE + USING (cid < 55); +-- This should fail with RLS violation now. +SET SESSION AUTHORIZATION regress_rls_dave; +INSERT INTO part_document_satire VALUES (101, 55, 1, 'regress_rls_dave', 'testing RLS with partitions'); -- fail +ERROR: new row violates row-level security policy for table "part_document_satire" +-- And now we cannot see directly into the partition either, due to RLS +SELECT * FROM part_document_satire WHERE f_leak(dtitle) ORDER BY did; + did | cid | dlevel | dauthor | dtitle +-----+-----+--------+---------+-------- +(0 rows) + +-- The parent looks same as before +-- viewpoint from regress_rls_dave +SELECT * FROM part_document WHERE f_leak(dtitle) ORDER BY did; +NOTICE: f_leak => my first novel +NOTICE: f_leak => my second novel +NOTICE: f_leak => great science fiction +NOTICE: f_leak => awesome science fiction + did | cid | dlevel | dauthor | dtitle +-----+-----+--------+-------------------+------------------------- + 1 | 11 | 1 | regress_rls_bob | my first novel + 2 | 11 | 2 | regress_rls_bob | my second novel + 6 | 11 | 1 | regress_rls_carol | great science fiction + 9 | 11 | 1 | regress_rls_dave | awesome science fiction +(4 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM part_document WHERE f_leak(dtitle); + QUERY PLAN +-------------------------------------------------------------- + Seq Scan on part_document_fiction part_document + Filter: ((cid < 55) AND (dlevel <= $0) AND f_leak(dtitle)) + InitPlan 1 (returns $0) + -> Index Scan using uaccount_pkey on uaccount + Index Cond: (pguser = CURRENT_USER) +(5 rows) + +-- viewpoint from regress_rls_carol +SET SESSION AUTHORIZATION regress_rls_carol; +SELECT * FROM part_document WHERE f_leak(dtitle) ORDER BY did; +NOTICE: f_leak => my first novel +NOTICE: f_leak => my second novel +NOTICE: f_leak => great science fiction +NOTICE: f_leak => awesome science fiction +NOTICE: f_leak => my first satire +NOTICE: f_leak => great satire +NOTICE: f_leak => testing RLS with partitions +NOTICE: f_leak => my science textbook +NOTICE: f_leak => my history book +NOTICE: f_leak => great technology book +NOTICE: f_leak => awesome technology book + did | cid | dlevel | dauthor | dtitle +-----+-----+--------+-------------------+----------------------------- + 1 | 11 | 1 | regress_rls_bob | my first novel + 2 | 11 | 2 | regress_rls_bob | my second novel + 3 | 99 | 2 | regress_rls_bob | my science textbook + 4 | 55 | 1 | regress_rls_bob | my first satire + 5 | 99 | 2 | regress_rls_bob | my history book + 6 | 11 | 1 | regress_rls_carol | great science fiction + 7 | 99 | 2 | regress_rls_carol | great technology book + 8 | 55 | 2 | regress_rls_carol | great satire + 9 | 11 | 1 | regress_rls_dave | awesome science fiction + 10 | 99 | 2 | regress_rls_dave | awesome technology book + 100 | 55 | 1 | regress_rls_dave | testing RLS with partitions +(11 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM part_document WHERE f_leak(dtitle); + QUERY PLAN +------------------------------------------------------------ + Append + InitPlan 1 (returns $0) + -> Index Scan using uaccount_pkey on uaccount + Index Cond: (pguser = CURRENT_USER) + -> Seq Scan on part_document_fiction part_document_1 + Filter: ((dlevel <= $0) AND f_leak(dtitle)) + -> Seq Scan on part_document_satire part_document_2 + Filter: ((dlevel <= $0) AND f_leak(dtitle)) + -> Seq Scan on part_document_nonfiction part_document_3 + Filter: ((dlevel <= $0) AND f_leak(dtitle)) +(10 rows) + +-- only owner can change policies +ALTER POLICY pp1 ON part_document USING (true); --fail +ERROR: must be owner of table part_document +DROP POLICY pp1 ON part_document; --fail +ERROR: must be owner of relation part_document +SET SESSION AUTHORIZATION regress_rls_alice; +ALTER POLICY pp1 ON part_document USING (dauthor = current_user); +-- viewpoint from regress_rls_bob again +SET SESSION AUTHORIZATION regress_rls_bob; +SELECT * FROM part_document WHERE f_leak(dtitle) ORDER BY did; +NOTICE: f_leak => my first novel +NOTICE: f_leak => my second novel +NOTICE: f_leak => my first satire +NOTICE: f_leak => my science textbook +NOTICE: f_leak => my history book + did | cid | dlevel | dauthor | dtitle +-----+-----+--------+-----------------+--------------------- + 1 | 11 | 1 | regress_rls_bob | my first novel + 2 | 11 | 2 | regress_rls_bob | my second novel + 3 | 99 | 2 | regress_rls_bob | my science textbook + 4 | 55 | 1 | regress_rls_bob | my first satire + 5 | 99 | 2 | regress_rls_bob | my history book +(5 rows) + +-- viewpoint from rls_regres_carol again +SET SESSION AUTHORIZATION regress_rls_carol; +SELECT * FROM part_document WHERE f_leak(dtitle) ORDER BY did; +NOTICE: f_leak => great science fiction +NOTICE: f_leak => great satire +NOTICE: f_leak => great technology book + did | cid | dlevel | dauthor | dtitle +-----+-----+--------+-------------------+----------------------- + 6 | 11 | 1 | regress_rls_carol | great science fiction + 7 | 99 | 2 | regress_rls_carol | great technology book + 8 | 55 | 2 | regress_rls_carol | great satire +(3 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM part_document WHERE f_leak(dtitle); + QUERY PLAN +--------------------------------------------------------------- + Append + -> Seq Scan on part_document_fiction part_document_1 + Filter: ((dauthor = CURRENT_USER) AND f_leak(dtitle)) + -> Seq Scan on part_document_satire part_document_2 + Filter: ((dauthor = CURRENT_USER) AND f_leak(dtitle)) + -> Seq Scan on part_document_nonfiction part_document_3 + Filter: ((dauthor = CURRENT_USER) AND f_leak(dtitle)) +(7 rows) + +-- database superuser does bypass RLS policy when enabled +RESET SESSION AUTHORIZATION; +SET row_security TO ON; +SELECT * FROM part_document ORDER BY did; + did | cid | dlevel | dauthor | dtitle +-----+-----+--------+-------------------+----------------------------- + 1 | 11 | 1 | regress_rls_bob | my first novel + 2 | 11 | 2 | regress_rls_bob | my second novel + 3 | 99 | 2 | regress_rls_bob | my science textbook + 4 | 55 | 1 | regress_rls_bob | my first satire + 5 | 99 | 2 | regress_rls_bob | my history book + 6 | 11 | 1 | regress_rls_carol | great science fiction + 7 | 99 | 2 | regress_rls_carol | great technology book + 8 | 55 | 2 | regress_rls_carol | great satire + 9 | 11 | 1 | regress_rls_dave | awesome science fiction + 10 | 99 | 2 | regress_rls_dave | awesome technology book + 100 | 55 | 1 | regress_rls_dave | testing RLS with partitions +(11 rows) + +SELECT * FROM part_document_satire ORDER by did; + did | cid | dlevel | dauthor | dtitle +-----+-----+--------+-------------------+----------------------------- + 4 | 55 | 1 | regress_rls_bob | my first satire + 8 | 55 | 2 | regress_rls_carol | great satire + 100 | 55 | 1 | regress_rls_dave | testing RLS with partitions +(3 rows) + +-- database non-superuser with bypass privilege can bypass RLS policy when disabled +SET SESSION AUTHORIZATION regress_rls_exempt_user; +SET row_security TO OFF; +SELECT * FROM part_document ORDER BY did; + did | cid | dlevel | dauthor | dtitle +-----+-----+--------+-------------------+----------------------------- + 1 | 11 | 1 | regress_rls_bob | my first novel + 2 | 11 | 2 | regress_rls_bob | my second novel + 3 | 99 | 2 | regress_rls_bob | my science textbook + 4 | 55 | 1 | regress_rls_bob | my first satire + 5 | 99 | 2 | regress_rls_bob | my history book + 6 | 11 | 1 | regress_rls_carol | great science fiction + 7 | 99 | 2 | regress_rls_carol | great technology book + 8 | 55 | 2 | regress_rls_carol | great satire + 9 | 11 | 1 | regress_rls_dave | awesome science fiction + 10 | 99 | 2 | regress_rls_dave | awesome technology book + 100 | 55 | 1 | regress_rls_dave | testing RLS with partitions +(11 rows) + +SELECT * FROM part_document_satire ORDER by did; + did | cid | dlevel | dauthor | dtitle +-----+-----+--------+-------------------+----------------------------- + 4 | 55 | 1 | regress_rls_bob | my first satire + 8 | 55 | 2 | regress_rls_carol | great satire + 100 | 55 | 1 | regress_rls_dave | testing RLS with partitions +(3 rows) + +-- RLS policy does not apply to table owner when RLS enabled. +SET SESSION AUTHORIZATION regress_rls_alice; +SET row_security TO ON; +SELECT * FROM part_document ORDER by did; + did | cid | dlevel | dauthor | dtitle +-----+-----+--------+-------------------+----------------------------- + 1 | 11 | 1 | regress_rls_bob | my first novel + 2 | 11 | 2 | regress_rls_bob | my second novel + 3 | 99 | 2 | regress_rls_bob | my science textbook + 4 | 55 | 1 | regress_rls_bob | my first satire + 5 | 99 | 2 | regress_rls_bob | my history book + 6 | 11 | 1 | regress_rls_carol | great science fiction + 7 | 99 | 2 | regress_rls_carol | great technology book + 8 | 55 | 2 | regress_rls_carol | great satire + 9 | 11 | 1 | regress_rls_dave | awesome science fiction + 10 | 99 | 2 | regress_rls_dave | awesome technology book + 100 | 55 | 1 | regress_rls_dave | testing RLS with partitions +(11 rows) + +SELECT * FROM part_document_satire ORDER by did; + did | cid | dlevel | dauthor | dtitle +-----+-----+--------+-------------------+----------------------------- + 4 | 55 | 1 | regress_rls_bob | my first satire + 8 | 55 | 2 | regress_rls_carol | great satire + 100 | 55 | 1 | regress_rls_dave | testing RLS with partitions +(3 rows) + +-- When RLS disabled, other users get ERROR. +SET SESSION AUTHORIZATION regress_rls_dave; +SET row_security TO OFF; +SELECT * FROM part_document ORDER by did; +ERROR: query would be affected by row-level security policy for table "part_document" +SELECT * FROM part_document_satire ORDER by did; +ERROR: query would be affected by row-level security policy for table "part_document_satire" +-- Check behavior with a policy that uses a SubPlan not an InitPlan. +SET SESSION AUTHORIZATION regress_rls_alice; +SET row_security TO ON; +CREATE POLICY pp3 ON part_document AS RESTRICTIVE + USING ((SELECT dlevel <= seclv FROM uaccount WHERE pguser = current_user)); +SET SESSION AUTHORIZATION regress_rls_carol; +INSERT INTO part_document VALUES (100, 11, 5, 'regress_rls_carol', 'testing pp3'); -- fail +ERROR: new row violates row-level security policy "pp3" for table "part_document" +----- Dependencies ----- +SET SESSION AUTHORIZATION regress_rls_alice; +SET row_security TO ON; +CREATE TABLE dependee (x integer, y integer); +CREATE TABLE dependent (x integer, y integer); +CREATE POLICY d1 ON dependent FOR ALL + TO PUBLIC + USING (x = (SELECT d.x FROM dependee d WHERE d.y = y)); +DROP TABLE dependee; -- Should fail without CASCADE due to dependency on row security qual? +ERROR: cannot drop table dependee because other objects depend on it +DETAIL: policy d1 on table dependent depends on table dependee +HINT: Use DROP ... CASCADE to drop the dependent objects too. +DROP TABLE dependee CASCADE; +NOTICE: drop cascades to policy d1 on table dependent +EXPLAIN (COSTS OFF) SELECT * FROM dependent; -- After drop, should be unqualified + QUERY PLAN +----------------------- + Seq Scan on dependent +(1 row) + +----- RECURSION ---- +-- +-- Simple recursion +-- +SET SESSION AUTHORIZATION regress_rls_alice; +CREATE TABLE rec1 (x integer, y integer); +CREATE POLICY r1 ON rec1 USING (x = (SELECT r.x FROM rec1 r WHERE y = r.y)); +ALTER TABLE rec1 ENABLE ROW LEVEL SECURITY; +SET SESSION AUTHORIZATION regress_rls_bob; +SELECT * FROM rec1; -- fail, direct recursion +ERROR: infinite recursion detected in policy for relation "rec1" +-- +-- Mutual recursion +-- +SET SESSION AUTHORIZATION regress_rls_alice; +CREATE TABLE rec2 (a integer, b integer); +ALTER POLICY r1 ON rec1 USING (x = (SELECT a FROM rec2 WHERE b = y)); +CREATE POLICY r2 ON rec2 USING (a = (SELECT x FROM rec1 WHERE y = b)); +ALTER TABLE rec2 ENABLE ROW LEVEL SECURITY; +SET SESSION AUTHORIZATION regress_rls_bob; +SELECT * FROM rec1; -- fail, mutual recursion +ERROR: infinite recursion detected in policy for relation "rec1" +-- +-- Mutual recursion via views +-- +SET SESSION AUTHORIZATION regress_rls_bob; +CREATE VIEW rec1v AS SELECT * FROM rec1; +CREATE VIEW rec2v AS SELECT * FROM rec2; +SET SESSION AUTHORIZATION regress_rls_alice; +ALTER POLICY r1 ON rec1 USING (x = (SELECT a FROM rec2v WHERE b = y)); +ALTER POLICY r2 ON rec2 USING (a = (SELECT x FROM rec1v WHERE y = b)); +SET SESSION AUTHORIZATION regress_rls_bob; +SELECT * FROM rec1; -- fail, mutual recursion via views +ERROR: infinite recursion detected in policy for relation "rec1" +-- +-- Mutual recursion via .s.b views +-- +SET SESSION AUTHORIZATION regress_rls_bob; +DROP VIEW rec1v, rec2v CASCADE; +NOTICE: drop cascades to 2 other objects +DETAIL: drop cascades to policy r1 on table rec1 +drop cascades to policy r2 on table rec2 +CREATE VIEW rec1v WITH (security_barrier) AS SELECT * FROM rec1; +CREATE VIEW rec2v WITH (security_barrier) AS SELECT * FROM rec2; +SET SESSION AUTHORIZATION regress_rls_alice; +CREATE POLICY r1 ON rec1 USING (x = (SELECT a FROM rec2v WHERE b = y)); +CREATE POLICY r2 ON rec2 USING (a = (SELECT x FROM rec1v WHERE y = b)); +SET SESSION AUTHORIZATION regress_rls_bob; +SELECT * FROM rec1; -- fail, mutual recursion via s.b. views +ERROR: infinite recursion detected in policy for relation "rec1" +-- +-- recursive RLS and VIEWs in policy +-- +SET SESSION AUTHORIZATION regress_rls_alice; +CREATE TABLE s1 (a int, b text); +INSERT INTO s1 (SELECT x, public.fipshash(x::text) FROM generate_series(-10,10) x); +CREATE TABLE s2 (x int, y text); +INSERT INTO s2 (SELECT x, public.fipshash(x::text) FROM generate_series(-6,6) x); +GRANT SELECT ON s1, s2 TO regress_rls_bob; +CREATE POLICY p1 ON s1 USING (a in (select x from s2 where y like '%2f%')); +CREATE POLICY p2 ON s2 USING (x in (select a from s1 where b like '%22%')); +CREATE POLICY p3 ON s1 FOR INSERT WITH CHECK (a = (SELECT a FROM s1)); +ALTER TABLE s1 ENABLE ROW LEVEL SECURITY; +ALTER TABLE s2 ENABLE ROW LEVEL SECURITY; +SET SESSION AUTHORIZATION regress_rls_bob; +CREATE VIEW v2 AS SELECT * FROM s2 WHERE y like '%af%'; +SELECT * FROM s1 WHERE f_leak(b); -- fail (infinite recursion) +ERROR: infinite recursion detected in policy for relation "s1" +INSERT INTO s1 VALUES (1, 'foo'); -- fail (infinite recursion) +ERROR: infinite recursion detected in policy for relation "s1" +SET SESSION AUTHORIZATION regress_rls_alice; +DROP POLICY p3 on s1; +ALTER POLICY p2 ON s2 USING (x % 2 = 0); +SET SESSION AUTHORIZATION regress_rls_bob; +SELECT * FROM s1 WHERE f_leak(b); -- OK +NOTICE: f_leak => 03b26944890929ff751653acb2f2af79 + a | b +----+---------------------------------- + -6 | 03b26944890929ff751653acb2f2af79 +(1 row) + +EXPLAIN (COSTS OFF) SELECT * FROM only s1 WHERE f_leak(b); + QUERY PLAN +----------------------------------------------------------- + Seq Scan on s1 + Filter: ((hashed SubPlan 1) AND f_leak(b)) + SubPlan 1 + -> Seq Scan on s2 + Filter: (((x % 2) = 0) AND (y ~~ '%2f%'::text)) +(5 rows) + +SET SESSION AUTHORIZATION regress_rls_alice; +ALTER POLICY p1 ON s1 USING (a in (select x from v2)); -- using VIEW in RLS policy +SET SESSION AUTHORIZATION regress_rls_bob; +SELECT * FROM s1 WHERE f_leak(b); -- OK +NOTICE: f_leak => 03b26944890929ff751653acb2f2af79 + a | b +----+---------------------------------- + -6 | 03b26944890929ff751653acb2f2af79 +(1 row) + +EXPLAIN (COSTS OFF) SELECT * FROM s1 WHERE f_leak(b); + QUERY PLAN +----------------------------------------------------------- + Seq Scan on s1 + Filter: ((hashed SubPlan 1) AND f_leak(b)) + SubPlan 1 + -> Seq Scan on s2 + Filter: (((x % 2) = 0) AND (y ~~ '%af%'::text)) +(5 rows) + +SELECT (SELECT x FROM s1 LIMIT 1) xx, * FROM s2 WHERE y like '%28%'; + xx | x | y +----+----+---------------------------------- + -4 | -4 | e5e0093f285a4fb94c3fcc2ad7fd04ed +(1 row) + +EXPLAIN (COSTS OFF) SELECT (SELECT x FROM s1 LIMIT 1) xx, * FROM s2 WHERE y like '%28%'; + QUERY PLAN +------------------------------------------------------------------------- + Seq Scan on s2 + Filter: (((x % 2) = 0) AND (y ~~ '%28%'::text)) + SubPlan 2 + -> Limit + -> Seq Scan on s1 + Filter: (hashed SubPlan 1) + SubPlan 1 + -> Seq Scan on s2 s2_1 + Filter: (((x % 2) = 0) AND (y ~~ '%af%'::text)) +(9 rows) + +SET SESSION AUTHORIZATION regress_rls_alice; +ALTER POLICY p2 ON s2 USING (x in (select a from s1 where b like '%d2%')); +SET SESSION AUTHORIZATION regress_rls_bob; +SELECT * FROM s1 WHERE f_leak(b); -- fail (infinite recursion via view) +ERROR: infinite recursion detected in policy for relation "s1" +-- prepared statement with regress_rls_alice privilege +PREPARE p1(int) AS SELECT * FROM t1 WHERE a <= $1; +EXECUTE p1(2); + id | a | b +-----+---+----- + 102 | 2 | bbb + 202 | 2 | bcd + 302 | 2 | yyy +(3 rows) + +EXPLAIN (COSTS OFF) EXECUTE p1(2); + QUERY PLAN +---------------------------------------------- + Append + -> Seq Scan on t1 t1_1 + Filter: ((a <= 2) AND ((a % 2) = 0)) + -> Seq Scan on t2 t1_2 + Filter: ((a <= 2) AND ((a % 2) = 0)) + -> Seq Scan on t3 t1_3 + Filter: ((a <= 2) AND ((a % 2) = 0)) +(7 rows) + +-- superuser is allowed to bypass RLS checks +RESET SESSION AUTHORIZATION; +SET row_security TO OFF; +SELECT * FROM t1 WHERE f_leak(b); +NOTICE: f_leak => aba +NOTICE: f_leak => bbb +NOTICE: f_leak => ccc +NOTICE: f_leak => dad +NOTICE: f_leak => abc +NOTICE: f_leak => bcd +NOTICE: f_leak => cde +NOTICE: f_leak => def +NOTICE: f_leak => xxx +NOTICE: f_leak => yyy +NOTICE: f_leak => zzz + id | a | b +-----+---+----- + 101 | 1 | aba + 102 | 2 | bbb + 103 | 3 | ccc + 104 | 4 | dad + 201 | 1 | abc + 202 | 2 | bcd + 203 | 3 | cde + 204 | 4 | def + 301 | 1 | xxx + 302 | 2 | yyy + 303 | 3 | zzz +(11 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM t1 WHERE f_leak(b); + QUERY PLAN +--------------------------- + Append + -> Seq Scan on t1 t1_1 + Filter: f_leak(b) + -> Seq Scan on t2 t1_2 + Filter: f_leak(b) + -> Seq Scan on t3 t1_3 + Filter: f_leak(b) +(7 rows) + +-- plan cache should be invalidated +EXECUTE p1(2); + id | a | b +-----+---+----- + 101 | 1 | aba + 102 | 2 | bbb + 201 | 1 | abc + 202 | 2 | bcd + 301 | 1 | xxx + 302 | 2 | yyy +(6 rows) + +EXPLAIN (COSTS OFF) EXECUTE p1(2); + QUERY PLAN +--------------------------- + Append + -> Seq Scan on t1 t1_1 + Filter: (a <= 2) + -> Seq Scan on t2 t1_2 + Filter: (a <= 2) + -> Seq Scan on t3 t1_3 + Filter: (a <= 2) +(7 rows) + +PREPARE p2(int) AS SELECT * FROM t1 WHERE a = $1; +EXECUTE p2(2); + id | a | b +-----+---+----- + 102 | 2 | bbb + 202 | 2 | bcd + 302 | 2 | yyy +(3 rows) + +EXPLAIN (COSTS OFF) EXECUTE p2(2); + QUERY PLAN +--------------------------- + Append + -> Seq Scan on t1 t1_1 + Filter: (a = 2) + -> Seq Scan on t2 t1_2 + Filter: (a = 2) + -> Seq Scan on t3 t1_3 + Filter: (a = 2) +(7 rows) + +-- also, case when privilege switch from superuser +SET SESSION AUTHORIZATION regress_rls_bob; +SET row_security TO ON; +EXECUTE p2(2); + id | a | b +-----+---+----- + 102 | 2 | bbb + 202 | 2 | bcd + 302 | 2 | yyy +(3 rows) + +EXPLAIN (COSTS OFF) EXECUTE p2(2); + QUERY PLAN +--------------------------------------------- + Append + -> Seq Scan on t1 t1_1 + Filter: ((a = 2) AND ((a % 2) = 0)) + -> Seq Scan on t2 t1_2 + Filter: ((a = 2) AND ((a % 2) = 0)) + -> Seq Scan on t3 t1_3 + Filter: ((a = 2) AND ((a % 2) = 0)) +(7 rows) + +-- +-- UPDATE / DELETE and Row-level security +-- +SET SESSION AUTHORIZATION regress_rls_bob; +EXPLAIN (COSTS OFF) UPDATE t1 SET b = b || b WHERE f_leak(b); + QUERY PLAN +----------------------------------------------------------- + Update on t1 + Update on t1 t1_1 + Update on t2 t1_2 + Update on t3 t1_3 + -> Result + -> Append + -> Seq Scan on t1 t1_1 + Filter: (((a % 2) = 0) AND f_leak(b)) + -> Seq Scan on t2 t1_2 + Filter: (((a % 2) = 0) AND f_leak(b)) + -> Seq Scan on t3 t1_3 + Filter: (((a % 2) = 0) AND f_leak(b)) +(12 rows) + +UPDATE t1 SET b = b || b WHERE f_leak(b); +NOTICE: f_leak => bbb +NOTICE: f_leak => dad +NOTICE: f_leak => bcd +NOTICE: f_leak => def +NOTICE: f_leak => yyy +EXPLAIN (COSTS OFF) UPDATE only t1 SET b = b || '_updt' WHERE f_leak(b); + QUERY PLAN +----------------------------------------------- + Update on t1 + -> Seq Scan on t1 + Filter: (((a % 2) = 0) AND f_leak(b)) +(3 rows) + +UPDATE only t1 SET b = b || '_updt' WHERE f_leak(b); +NOTICE: f_leak => bbbbbb +NOTICE: f_leak => daddad +-- returning clause with system column +UPDATE only t1 SET b = b WHERE f_leak(b) RETURNING tableoid::regclass, *, t1; +NOTICE: f_leak => bbbbbb_updt +NOTICE: f_leak => daddad_updt + tableoid | id | a | b | t1 +----------+-----+---+-------------+--------------------- + t1 | 102 | 2 | bbbbbb_updt | (102,2,bbbbbb_updt) + t1 | 104 | 4 | daddad_updt | (104,4,daddad_updt) +(2 rows) + +UPDATE t1 SET b = b WHERE f_leak(b) RETURNING *; +NOTICE: f_leak => bbbbbb_updt +NOTICE: f_leak => daddad_updt +NOTICE: f_leak => bcdbcd +NOTICE: f_leak => defdef +NOTICE: f_leak => yyyyyy + id | a | b +-----+---+------------- + 102 | 2 | bbbbbb_updt + 104 | 4 | daddad_updt + 202 | 2 | bcdbcd + 204 | 4 | defdef + 302 | 2 | yyyyyy +(5 rows) + +UPDATE t1 SET b = b WHERE f_leak(b) RETURNING tableoid::regclass, *, t1; +NOTICE: f_leak => bbbbbb_updt +NOTICE: f_leak => daddad_updt +NOTICE: f_leak => bcdbcd +NOTICE: f_leak => defdef +NOTICE: f_leak => yyyyyy + tableoid | id | a | b | t1 +----------+-----+---+-------------+--------------------- + t1 | 102 | 2 | bbbbbb_updt | (102,2,bbbbbb_updt) + t1 | 104 | 4 | daddad_updt | (104,4,daddad_updt) + t2 | 202 | 2 | bcdbcd | (202,2,bcdbcd) + t2 | 204 | 4 | defdef | (204,4,defdef) + t3 | 302 | 2 | yyyyyy | (302,2,yyyyyy) +(5 rows) + +-- updates with from clause +EXPLAIN (COSTS OFF) UPDATE t2 SET b=t2.b FROM t3 +WHERE t2.a = 3 and t3.a = 2 AND f_leak(t2.b) AND f_leak(t3.b); + QUERY PLAN +----------------------------------------------------------------- + Update on t2 + -> Nested Loop + -> Seq Scan on t2 + Filter: ((a = 3) AND ((a % 2) = 1) AND f_leak(b)) + -> Seq Scan on t3 + Filter: ((a = 2) AND f_leak(b)) +(6 rows) + +UPDATE t2 SET b=t2.b FROM t3 +WHERE t2.a = 3 and t3.a = 2 AND f_leak(t2.b) AND f_leak(t3.b); +NOTICE: f_leak => cde +NOTICE: f_leak => yyyyyy +EXPLAIN (COSTS OFF) UPDATE t1 SET b=t1.b FROM t2 +WHERE t1.a = 3 and t2.a = 3 AND f_leak(t1.b) AND f_leak(t2.b); + QUERY PLAN +----------------------------------------------------------------------- + Update on t1 + Update on t1 t1_1 + Update on t2 t1_2 + Update on t3 t1_3 + -> Nested Loop + -> Seq Scan on t2 + Filter: ((a = 3) AND ((a % 2) = 1) AND f_leak(b)) + -> Append + -> Seq Scan on t1 t1_1 + Filter: ((a = 3) AND ((a % 2) = 0) AND f_leak(b)) + -> Seq Scan on t2 t1_2 + Filter: ((a = 3) AND ((a % 2) = 0) AND f_leak(b)) + -> Seq Scan on t3 t1_3 + Filter: ((a = 3) AND ((a % 2) = 0) AND f_leak(b)) +(14 rows) + +UPDATE t1 SET b=t1.b FROM t2 +WHERE t1.a = 3 and t2.a = 3 AND f_leak(t1.b) AND f_leak(t2.b); +NOTICE: f_leak => cde +EXPLAIN (COSTS OFF) UPDATE t2 SET b=t2.b FROM t1 +WHERE t1.a = 3 and t2.a = 3 AND f_leak(t1.b) AND f_leak(t2.b); + QUERY PLAN +----------------------------------------------------------------------- + Update on t2 + -> Nested Loop + -> Seq Scan on t2 + Filter: ((a = 3) AND ((a % 2) = 1) AND f_leak(b)) + -> Append + -> Seq Scan on t1 t1_1 + Filter: ((a = 3) AND ((a % 2) = 0) AND f_leak(b)) + -> Seq Scan on t2 t1_2 + Filter: ((a = 3) AND ((a % 2) = 0) AND f_leak(b)) + -> Seq Scan on t3 t1_3 + Filter: ((a = 3) AND ((a % 2) = 0) AND f_leak(b)) +(11 rows) + +UPDATE t2 SET b=t2.b FROM t1 +WHERE t1.a = 3 and t2.a = 3 AND f_leak(t1.b) AND f_leak(t2.b); +NOTICE: f_leak => cde +-- updates with from clause self join +EXPLAIN (COSTS OFF) UPDATE t2 t2_1 SET b = t2_2.b FROM t2 t2_2 +WHERE t2_1.a = 3 AND t2_2.a = t2_1.a AND t2_2.b = t2_1.b +AND f_leak(t2_1.b) AND f_leak(t2_2.b) RETURNING *, t2_1, t2_2; + QUERY PLAN +----------------------------------------------------------------- + Update on t2 t2_1 + -> Nested Loop + Join Filter: (t2_1.b = t2_2.b) + -> Seq Scan on t2 t2_1 + Filter: ((a = 3) AND ((a % 2) = 1) AND f_leak(b)) + -> Seq Scan on t2 t2_2 + Filter: ((a = 3) AND ((a % 2) = 1) AND f_leak(b)) +(7 rows) + +UPDATE t2 t2_1 SET b = t2_2.b FROM t2 t2_2 +WHERE t2_1.a = 3 AND t2_2.a = t2_1.a AND t2_2.b = t2_1.b +AND f_leak(t2_1.b) AND f_leak(t2_2.b) RETURNING *, t2_1, t2_2; +NOTICE: f_leak => cde +NOTICE: f_leak => cde + id | a | b | c | id | a | b | c | t2_1 | t2_2 +-----+---+-----+-----+-----+---+-----+-----+-----------------+----------------- + 203 | 3 | cde | 3.3 | 203 | 3 | cde | 3.3 | (203,3,cde,3.3) | (203,3,cde,3.3) +(1 row) + +EXPLAIN (COSTS OFF) UPDATE t1 t1_1 SET b = t1_2.b FROM t1 t1_2 +WHERE t1_1.a = 4 AND t1_2.a = t1_1.a AND t1_2.b = t1_1.b +AND f_leak(t1_1.b) AND f_leak(t1_2.b) RETURNING *, t1_1, t1_2; + QUERY PLAN +----------------------------------------------------------------------------- + Update on t1 t1_1 + Update on t1 t1_1_1 + Update on t2 t1_1_2 + Update on t3 t1_1_3 + -> Nested Loop + Join Filter: (t1_1.b = t1_2.b) + -> Append + -> Seq Scan on t1 t1_1_1 + Filter: ((a = 4) AND ((a % 2) = 0) AND f_leak(b)) + -> Seq Scan on t2 t1_1_2 + Filter: ((a = 4) AND ((a % 2) = 0) AND f_leak(b)) + -> Seq Scan on t3 t1_1_3 + Filter: ((a = 4) AND ((a % 2) = 0) AND f_leak(b)) + -> Materialize + -> Append + -> Seq Scan on t1 t1_2_1 + Filter: ((a = 4) AND ((a % 2) = 0) AND f_leak(b)) + -> Seq Scan on t2 t1_2_2 + Filter: ((a = 4) AND ((a % 2) = 0) AND f_leak(b)) + -> Seq Scan on t3 t1_2_3 + Filter: ((a = 4) AND ((a % 2) = 0) AND f_leak(b)) +(21 rows) + +UPDATE t1 t1_1 SET b = t1_2.b FROM t1 t1_2 +WHERE t1_1.a = 4 AND t1_2.a = t1_1.a AND t1_2.b = t1_1.b +AND f_leak(t1_1.b) AND f_leak(t1_2.b) RETURNING *, t1_1, t1_2; +NOTICE: f_leak => daddad_updt +NOTICE: f_leak => daddad_updt +NOTICE: f_leak => defdef +NOTICE: f_leak => defdef + id | a | b | id | a | b | t1_1 | t1_2 +-----+---+-------------+-----+---+-------------+---------------------+--------------------- + 104 | 4 | daddad_updt | 104 | 4 | daddad_updt | (104,4,daddad_updt) | (104,4,daddad_updt) + 204 | 4 | defdef | 204 | 4 | defdef | (204,4,defdef) | (204,4,defdef) +(2 rows) + +RESET SESSION AUTHORIZATION; +SET row_security TO OFF; +SELECT * FROM t1 ORDER BY a,b; + id | a | b +-----+---+------------- + 101 | 1 | aba + 201 | 1 | abc + 301 | 1 | xxx + 102 | 2 | bbbbbb_updt + 202 | 2 | bcdbcd + 302 | 2 | yyyyyy + 103 | 3 | ccc + 203 | 3 | cde + 303 | 3 | zzz + 104 | 4 | daddad_updt + 204 | 4 | defdef +(11 rows) + +SET SESSION AUTHORIZATION regress_rls_bob; +SET row_security TO ON; +EXPLAIN (COSTS OFF) DELETE FROM only t1 WHERE f_leak(b); + QUERY PLAN +----------------------------------------------- + Delete on t1 + -> Seq Scan on t1 + Filter: (((a % 2) = 0) AND f_leak(b)) +(3 rows) + +EXPLAIN (COSTS OFF) DELETE FROM t1 WHERE f_leak(b); + QUERY PLAN +----------------------------------------------------- + Delete on t1 + Delete on t1 t1_1 + Delete on t2 t1_2 + Delete on t3 t1_3 + -> Append + -> Seq Scan on t1 t1_1 + Filter: (((a % 2) = 0) AND f_leak(b)) + -> Seq Scan on t2 t1_2 + Filter: (((a % 2) = 0) AND f_leak(b)) + -> Seq Scan on t3 t1_3 + Filter: (((a % 2) = 0) AND f_leak(b)) +(11 rows) + +DELETE FROM only t1 WHERE f_leak(b) RETURNING tableoid::regclass, *, t1; +NOTICE: f_leak => bbbbbb_updt +NOTICE: f_leak => daddad_updt + tableoid | id | a | b | t1 +----------+-----+---+-------------+--------------------- + t1 | 102 | 2 | bbbbbb_updt | (102,2,bbbbbb_updt) + t1 | 104 | 4 | daddad_updt | (104,4,daddad_updt) +(2 rows) + +DELETE FROM t1 WHERE f_leak(b) RETURNING tableoid::regclass, *, t1; +NOTICE: f_leak => bcdbcd +NOTICE: f_leak => defdef +NOTICE: f_leak => yyyyyy + tableoid | id | a | b | t1 +----------+-----+---+--------+---------------- + t2 | 202 | 2 | bcdbcd | (202,2,bcdbcd) + t2 | 204 | 4 | defdef | (204,4,defdef) + t3 | 302 | 2 | yyyyyy | (302,2,yyyyyy) +(3 rows) + +-- +-- S.b. view on top of Row-level security +-- +SET SESSION AUTHORIZATION regress_rls_alice; +CREATE TABLE b1 (a int, b text); +INSERT INTO b1 (SELECT x, public.fipshash(x::text) FROM generate_series(-10,10) x); +CREATE POLICY p1 ON b1 USING (a % 2 = 0); +ALTER TABLE b1 ENABLE ROW LEVEL SECURITY; +GRANT ALL ON b1 TO regress_rls_bob; +SET SESSION AUTHORIZATION regress_rls_bob; +CREATE VIEW bv1 WITH (security_barrier) AS SELECT * FROM b1 WHERE a > 0 WITH CHECK OPTION; +GRANT ALL ON bv1 TO regress_rls_carol; +SET SESSION AUTHORIZATION regress_rls_carol; +EXPLAIN (COSTS OFF) SELECT * FROM bv1 WHERE f_leak(b); + QUERY PLAN +--------------------------------------------- + Subquery Scan on bv1 + Filter: f_leak(bv1.b) + -> Seq Scan on b1 + Filter: ((a > 0) AND ((a % 2) = 0)) +(4 rows) + +SELECT * FROM bv1 WHERE f_leak(b); +NOTICE: f_leak => d4735e3a265e16eee03f59718b9b5d03 +NOTICE: f_leak => 4b227777d4dd1fc61c6f884f48641d02 +NOTICE: f_leak => e7f6c011776e8db7cd330b54174fd76f +NOTICE: f_leak => 2c624232cdd221771294dfbb310aca00 +NOTICE: f_leak => 4a44dc15364204a80fe80e9039455cc1 + a | b +----+---------------------------------- + 2 | d4735e3a265e16eee03f59718b9b5d03 + 4 | 4b227777d4dd1fc61c6f884f48641d02 + 6 | e7f6c011776e8db7cd330b54174fd76f + 8 | 2c624232cdd221771294dfbb310aca00 + 10 | 4a44dc15364204a80fe80e9039455cc1 +(5 rows) + +INSERT INTO bv1 VALUES (-1, 'xxx'); -- should fail view WCO +ERROR: new row violates row-level security policy for table "b1" +INSERT INTO bv1 VALUES (11, 'xxx'); -- should fail RLS check +ERROR: new row violates row-level security policy for table "b1" +INSERT INTO bv1 VALUES (12, 'xxx'); -- ok +EXPLAIN (COSTS OFF) UPDATE bv1 SET b = 'yyy' WHERE a = 4 AND f_leak(b); + QUERY PLAN +----------------------------------------------------------------------- + Update on b1 + -> Seq Scan on b1 + Filter: ((a > 0) AND (a = 4) AND ((a % 2) = 0) AND f_leak(b)) +(3 rows) + +UPDATE bv1 SET b = 'yyy' WHERE a = 4 AND f_leak(b); +NOTICE: f_leak => 4b227777d4dd1fc61c6f884f48641d02 +EXPLAIN (COSTS OFF) DELETE FROM bv1 WHERE a = 6 AND f_leak(b); + QUERY PLAN +----------------------------------------------------------------------- + Delete on b1 + -> Seq Scan on b1 + Filter: ((a > 0) AND (a = 6) AND ((a % 2) = 0) AND f_leak(b)) +(3 rows) + +DELETE FROM bv1 WHERE a = 6 AND f_leak(b); +NOTICE: f_leak => e7f6c011776e8db7cd330b54174fd76f +SET SESSION AUTHORIZATION regress_rls_alice; +SELECT * FROM b1; + a | b +-----+---------------------------------- + -10 | c171d4ec282b23db89a99880cd624e9b + -9 | d5c534fde62beb89c745a59952c8efed + -8 | e91592205d3881e3ea35d66973bb4898 + -7 | a770d3270c9dcdedf12ed9fd70444f7c + -6 | 03b26944890929ff751653acb2f2af79 + -5 | 37aa1ccf80e481832b2db282d4d4f895 + -4 | e5e0093f285a4fb94c3fcc2ad7fd04ed + -3 | 615bdd17c2556f82f384392ea8557f8c + -2 | cf3bae39dd692048a8bf961182e6a34d + -1 | 1bad6b8cf97131fceab8543e81f77571 + 0 | 5feceb66ffc86f38d952786c6d696c79 + 1 | 6b86b273ff34fce19d6b804eff5a3f57 + 2 | d4735e3a265e16eee03f59718b9b5d03 + 3 | 4e07408562bedb8b60ce05c1decfe3ad + 5 | ef2d127de37b942baad06145e54b0c61 + 7 | 7902699be42c8a8e46fbbb4501726517 + 8 | 2c624232cdd221771294dfbb310aca00 + 9 | 19581e27de7ced00ff1ce50b2047e7a5 + 10 | 4a44dc15364204a80fe80e9039455cc1 + 12 | xxx + 4 | yyy +(21 rows) + +-- +-- INSERT ... ON CONFLICT DO UPDATE and Row-level security +-- +SET SESSION AUTHORIZATION regress_rls_alice; +DROP POLICY p1 ON document; +DROP POLICY p1r ON document; +CREATE POLICY p1 ON document FOR SELECT USING (true); +CREATE POLICY p2 ON document FOR INSERT WITH CHECK (dauthor = current_user); +CREATE POLICY p3 ON document FOR UPDATE + USING (cid = (SELECT cid from category WHERE cname = 'novel')) + WITH CHECK (dauthor = current_user); +SET SESSION AUTHORIZATION regress_rls_bob; +-- Exists... +SELECT * FROM document WHERE did = 2; + did | cid | dlevel | dauthor | dtitle +-----+-----+--------+-----------------+----------------- + 2 | 11 | 2 | regress_rls_bob | my second novel +(1 row) + +-- ...so violates actual WITH CHECK OPTION within UPDATE (not INSERT, since +-- alternative UPDATE path happens to be taken): +INSERT INTO document VALUES (2, (SELECT cid from category WHERE cname = 'novel'), 1, 'regress_rls_carol', 'my first novel') + ON CONFLICT (did) DO UPDATE SET dtitle = EXCLUDED.dtitle, dauthor = EXCLUDED.dauthor; +ERROR: new row violates row-level security policy for table "document" +-- Violates USING qual for UPDATE policy p3. +-- +-- UPDATE path is taken, but UPDATE fails purely because *existing* row to be +-- updated is not a "novel"/cid 11 (row is not leaked, even though we have +-- SELECT privileges sufficient to see the row in this instance): +INSERT INTO document VALUES (33, 22, 1, 'regress_rls_bob', 'okay science fiction'); -- preparation for next statement +INSERT INTO document VALUES (33, (SELECT cid from category WHERE cname = 'novel'), 1, 'regress_rls_bob', 'Some novel, replaces sci-fi') -- takes UPDATE path + ON CONFLICT (did) DO UPDATE SET dtitle = EXCLUDED.dtitle; +ERROR: new row violates row-level security policy (USING expression) for table "document" +-- Fine (we UPDATE, since INSERT WCOs and UPDATE security barrier quals + WCOs +-- not violated): +INSERT INTO document VALUES (2, (SELECT cid from category WHERE cname = 'novel'), 1, 'regress_rls_bob', 'my first novel') + ON CONFLICT (did) DO UPDATE SET dtitle = EXCLUDED.dtitle RETURNING *; + did | cid | dlevel | dauthor | dtitle +-----+-----+--------+-----------------+---------------- + 2 | 11 | 2 | regress_rls_bob | my first novel +(1 row) + +-- Fine (we INSERT, so "cid = 33" ("technology") isn't evaluated): +INSERT INTO document VALUES (78, (SELECT cid from category WHERE cname = 'novel'), 1, 'regress_rls_bob', 'some technology novel') + ON CONFLICT (did) DO UPDATE SET dtitle = EXCLUDED.dtitle, cid = 33 RETURNING *; + did | cid | dlevel | dauthor | dtitle +-----+-----+--------+-----------------+----------------------- + 78 | 11 | 1 | regress_rls_bob | some technology novel +(1 row) + +-- Fine (same query, but we UPDATE, so "cid = 33", ("technology") is not the +-- case in respect of *existing* tuple): +INSERT INTO document VALUES (78, (SELECT cid from category WHERE cname = 'novel'), 1, 'regress_rls_bob', 'some technology novel') + ON CONFLICT (did) DO UPDATE SET dtitle = EXCLUDED.dtitle, cid = 33 RETURNING *; + did | cid | dlevel | dauthor | dtitle +-----+-----+--------+-----------------+----------------------- + 78 | 33 | 1 | regress_rls_bob | some technology novel +(1 row) + +-- Same query a third time, but now fails due to existing tuple finally not +-- passing quals: +INSERT INTO document VALUES (78, (SELECT cid from category WHERE cname = 'novel'), 1, 'regress_rls_bob', 'some technology novel') + ON CONFLICT (did) DO UPDATE SET dtitle = EXCLUDED.dtitle, cid = 33 RETURNING *; +ERROR: new row violates row-level security policy (USING expression) for table "document" +-- Don't fail just because INSERT doesn't satisfy WITH CHECK option that +-- originated as a barrier/USING() qual from the UPDATE. Note that the UPDATE +-- path *isn't* taken, and so UPDATE-related policy does not apply: +INSERT INTO document VALUES (79, (SELECT cid from category WHERE cname = 'technology'), 1, 'regress_rls_bob', 'technology book, can only insert') + ON CONFLICT (did) DO UPDATE SET dtitle = EXCLUDED.dtitle RETURNING *; + did | cid | dlevel | dauthor | dtitle +-----+-----+--------+-----------------+---------------------------------- + 79 | 33 | 1 | regress_rls_bob | technology book, can only insert +(1 row) + +-- But this time, the same statement fails, because the UPDATE path is taken, +-- and updating the row just inserted falls afoul of security barrier qual +-- (enforced as WCO) -- what we might have updated target tuple to is +-- irrelevant, in fact. +INSERT INTO document VALUES (79, (SELECT cid from category WHERE cname = 'technology'), 1, 'regress_rls_bob', 'technology book, can only insert') + ON CONFLICT (did) DO UPDATE SET dtitle = EXCLUDED.dtitle RETURNING *; +ERROR: new row violates row-level security policy (USING expression) for table "document" +-- Test default USING qual enforced as WCO +SET SESSION AUTHORIZATION regress_rls_alice; +DROP POLICY p1 ON document; +DROP POLICY p2 ON document; +DROP POLICY p3 ON document; +CREATE POLICY p3_with_default ON document FOR UPDATE + USING (cid = (SELECT cid from category WHERE cname = 'novel')); +SET SESSION AUTHORIZATION regress_rls_bob; +-- Just because WCO-style enforcement of USING quals occurs with +-- existing/target tuple does not mean that the implementation can be allowed +-- to fail to also enforce this qual against the final tuple appended to +-- relation (since in the absence of an explicit WCO, this is also interpreted +-- as an UPDATE/ALL WCO in general). +-- +-- UPDATE path is taken here (fails due to existing tuple). Note that this is +-- not reported as a "USING expression", because it's an RLS UPDATE check that originated as +-- a USING qual for the purposes of RLS in general, as opposed to an explicit +-- USING qual that is ordinarily a security barrier. We leave it up to the +-- UPDATE to make this fail: +INSERT INTO document VALUES (79, (SELECT cid from category WHERE cname = 'technology'), 1, 'regress_rls_bob', 'technology book, can only insert') + ON CONFLICT (did) DO UPDATE SET dtitle = EXCLUDED.dtitle RETURNING *; +ERROR: new row violates row-level security policy for table "document" +-- UPDATE path is taken here. Existing tuple passes, since its cid +-- corresponds to "novel", but default USING qual is enforced against +-- post-UPDATE tuple too (as always when updating with a policy that lacks an +-- explicit WCO), and so this fails: +INSERT INTO document VALUES (2, (SELECT cid from category WHERE cname = 'technology'), 1, 'regress_rls_bob', 'my first novel') + ON CONFLICT (did) DO UPDATE SET cid = EXCLUDED.cid, dtitle = EXCLUDED.dtitle RETURNING *; +ERROR: new row violates row-level security policy for table "document" +SET SESSION AUTHORIZATION regress_rls_alice; +DROP POLICY p3_with_default ON document; +-- +-- Test ALL policies with ON CONFLICT DO UPDATE (much the same as existing UPDATE +-- tests) +-- +CREATE POLICY p3_with_all ON document FOR ALL + USING (cid = (SELECT cid from category WHERE cname = 'novel')) + WITH CHECK (dauthor = current_user); +SET SESSION AUTHORIZATION regress_rls_bob; +-- Fails, since ALL WCO is enforced in insert path: +INSERT INTO document VALUES (80, (SELECT cid from category WHERE cname = 'novel'), 1, 'regress_rls_carol', 'my first novel') + ON CONFLICT (did) DO UPDATE SET dtitle = EXCLUDED.dtitle, cid = 33; +ERROR: new row violates row-level security policy for table "document" +-- Fails, since ALL policy USING qual is enforced (existing, target tuple is in +-- violation, since it has the "manga" cid): +INSERT INTO document VALUES (4, (SELECT cid from category WHERE cname = 'novel'), 1, 'regress_rls_bob', 'my first novel') + ON CONFLICT (did) DO UPDATE SET dtitle = EXCLUDED.dtitle; +ERROR: new row violates row-level security policy (USING expression) for table "document" +-- Fails, since ALL WCO are enforced: +INSERT INTO document VALUES (1, (SELECT cid from category WHERE cname = 'novel'), 1, 'regress_rls_bob', 'my first novel') + ON CONFLICT (did) DO UPDATE SET dauthor = 'regress_rls_carol'; +ERROR: new row violates row-level security policy for table "document" +-- +-- MERGE +-- +RESET SESSION AUTHORIZATION; +DROP POLICY p3_with_all ON document; +ALTER TABLE document ADD COLUMN dnotes text DEFAULT ''; +-- all documents are readable +CREATE POLICY p1 ON document FOR SELECT USING (true); +-- one may insert documents only authored by them +CREATE POLICY p2 ON document FOR INSERT WITH CHECK (dauthor = current_user); +-- one may only update documents in 'novel' category and new dlevel must be > 0 +CREATE POLICY p3 ON document FOR UPDATE + USING (cid = (SELECT cid from category WHERE cname = 'novel')) + WITH CHECK (dlevel > 0); +-- one may only delete documents in 'manga' category +CREATE POLICY p4 ON document FOR DELETE + USING (cid = (SELECT cid from category WHERE cname = 'manga')); +SELECT * FROM document; + did | cid | dlevel | dauthor | dtitle | dnotes +-----+-----+--------+-------------------+----------------------------------+-------- + 1 | 11 | 1 | regress_rls_bob | my first novel | + 3 | 22 | 2 | regress_rls_bob | my science fiction | + 4 | 44 | 1 | regress_rls_bob | my first manga | + 5 | 44 | 2 | regress_rls_bob | my second manga | + 6 | 22 | 1 | regress_rls_carol | great science fiction | + 7 | 33 | 2 | regress_rls_carol | great technology book | + 8 | 44 | 1 | regress_rls_carol | great manga | + 9 | 22 | 1 | regress_rls_dave | awesome science fiction | + 10 | 33 | 2 | regress_rls_dave | awesome technology book | + 11 | 33 | 1 | regress_rls_carol | hoge | + 33 | 22 | 1 | regress_rls_bob | okay science fiction | + 2 | 11 | 2 | regress_rls_bob | my first novel | + 78 | 33 | 1 | regress_rls_bob | some technology novel | + 79 | 33 | 1 | regress_rls_bob | technology book, can only insert | +(14 rows) + +SET SESSION AUTHORIZATION regress_rls_bob; +-- Fails, since update violates WITH CHECK qual on dlevel +MERGE INTO document d +USING (SELECT 1 as sdid) s +ON did = s.sdid +WHEN MATCHED THEN + UPDATE SET dnotes = dnotes || ' notes added by merge1 ', dlevel = 0; +ERROR: new row violates row-level security policy for table "document" +-- Should be OK since USING and WITH CHECK quals pass +MERGE INTO document d +USING (SELECT 1 as sdid) s +ON did = s.sdid +WHEN MATCHED THEN + UPDATE SET dnotes = dnotes || ' notes added by merge2 '; +-- Even when dlevel is updated explicitly, but to the existing value +MERGE INTO document d +USING (SELECT 1 as sdid) s +ON did = s.sdid +WHEN MATCHED THEN + UPDATE SET dnotes = dnotes || ' notes added by merge3 ', dlevel = 1; +-- There is a MATCH for did = 3, but UPDATE's USING qual does not allow +-- updating an item in category 'science fiction' +MERGE INTO document d +USING (SELECT 3 as sdid) s +ON did = s.sdid +WHEN MATCHED THEN + UPDATE SET dnotes = dnotes || ' notes added by merge '; +ERROR: target row violates row-level security policy (USING expression) for table "document" +-- The same thing with DELETE action, but fails again because no permissions +-- to delete items in 'science fiction' category that did 3 belongs to. +MERGE INTO document d +USING (SELECT 3 as sdid) s +ON did = s.sdid +WHEN MATCHED THEN + DELETE; +ERROR: target row violates row-level security policy (USING expression) for table "document" +-- Document with did 4 belongs to 'manga' category which is allowed for +-- deletion. But this fails because the UPDATE action is matched first and +-- UPDATE policy does not allow updation in the category. +MERGE INTO document d +USING (SELECT 4 as sdid) s +ON did = s.sdid +WHEN MATCHED AND dnotes = '' THEN + UPDATE SET dnotes = dnotes || ' notes added by merge ' +WHEN MATCHED THEN + DELETE; +ERROR: target row violates row-level security policy (USING expression) for table "document" +-- UPDATE action is not matched this time because of the WHEN qual. +-- DELETE still fails because role regress_rls_bob does not have SELECT +-- privileges on 'manga' category row in the category table. +MERGE INTO document d +USING (SELECT 4 as sdid) s +ON did = s.sdid +WHEN MATCHED AND dnotes <> '' THEN + UPDATE SET dnotes = dnotes || ' notes added by merge ' +WHEN MATCHED THEN + DELETE; +ERROR: target row violates row-level security policy (USING expression) for table "document" +-- OK if DELETE is replaced with DO NOTHING +MERGE INTO document d +USING (SELECT 4 as sdid) s +ON did = s.sdid +WHEN MATCHED AND dnotes <> '' THEN + UPDATE SET dnotes = dnotes || ' notes added by merge ' +WHEN MATCHED THEN + DO NOTHING; +SELECT * FROM document WHERE did = 4; + did | cid | dlevel | dauthor | dtitle | dnotes +-----+-----+--------+-----------------+----------------+-------- + 4 | 44 | 1 | regress_rls_bob | my first manga | +(1 row) + +-- Switch to regress_rls_carol role and try the DELETE again. It should succeed +-- this time +RESET SESSION AUTHORIZATION; +SET SESSION AUTHORIZATION regress_rls_carol; +MERGE INTO document d +USING (SELECT 4 as sdid) s +ON did = s.sdid +WHEN MATCHED AND dnotes <> '' THEN + UPDATE SET dnotes = dnotes || ' notes added by merge ' +WHEN MATCHED THEN + DELETE; +-- Switch back to regress_rls_bob role +RESET SESSION AUTHORIZATION; +SET SESSION AUTHORIZATION regress_rls_bob; +-- Try INSERT action. This fails because we are trying to insert +-- dauthor = regress_rls_dave and INSERT's WITH CHECK does not allow +-- that +MERGE INTO document d +USING (SELECT 12 as sdid) s +ON did = s.sdid +WHEN MATCHED THEN + DELETE +WHEN NOT MATCHED THEN + INSERT VALUES (12, 11, 1, 'regress_rls_dave', 'another novel'); +ERROR: new row violates row-level security policy for table "document" +-- This should be fine +MERGE INTO document d +USING (SELECT 12 as sdid) s +ON did = s.sdid +WHEN MATCHED THEN + DELETE +WHEN NOT MATCHED THEN + INSERT VALUES (12, 11, 1, 'regress_rls_bob', 'another novel'); +-- ok +MERGE INTO document d +USING (SELECT 1 as sdid) s +ON did = s.sdid +WHEN MATCHED THEN + UPDATE SET dnotes = dnotes || ' notes added by merge4 ' +WHEN NOT MATCHED THEN + INSERT VALUES (12, 11, 1, 'regress_rls_bob', 'another novel'); +-- drop and create a new SELECT policy which prevents us from reading +-- any document except with category 'novel' +RESET SESSION AUTHORIZATION; +DROP POLICY p1 ON document; +CREATE POLICY p1 ON document FOR SELECT + USING (cid = (SELECT cid from category WHERE cname = 'novel')); +SET SESSION AUTHORIZATION regress_rls_bob; +-- MERGE can no longer see the matching row and hence attempts the +-- NOT MATCHED action, which results in unique key violation +MERGE INTO document d +USING (SELECT 7 as sdid) s +ON did = s.sdid +WHEN MATCHED THEN + UPDATE SET dnotes = dnotes || ' notes added by merge5 ' +WHEN NOT MATCHED THEN + INSERT VALUES (12, 11, 1, 'regress_rls_bob', 'another novel'); +ERROR: duplicate key value violates unique constraint "document_pkey" +-- UPDATE action fails if new row is not visible +MERGE INTO document d +USING (SELECT 1 as sdid) s +ON did = s.sdid +WHEN MATCHED THEN + UPDATE SET dnotes = dnotes || ' notes added by merge6 ', + cid = (SELECT cid from category WHERE cname = 'technology'); +ERROR: new row violates row-level security policy for table "document" +-- but OK if new row is visible +MERGE INTO document d +USING (SELECT 1 as sdid) s +ON did = s.sdid +WHEN MATCHED THEN + UPDATE SET dnotes = dnotes || ' notes added by merge7 ', + cid = (SELECT cid from category WHERE cname = 'novel'); +-- OK to insert a new row that is not visible +MERGE INTO document d +USING (SELECT 13 as sdid) s +ON did = s.sdid +WHEN MATCHED THEN + UPDATE SET dnotes = dnotes || ' notes added by merge8 ' +WHEN NOT MATCHED THEN + INSERT VALUES (13, 44, 1, 'regress_rls_bob', 'new manga'); +RESET SESSION AUTHORIZATION; +-- drop the restrictive SELECT policy so that we can look at the +-- final state of the table +DROP POLICY p1 ON document; +-- Just check everything went per plan +SELECT * FROM document; + did | cid | dlevel | dauthor | dtitle | dnotes +-----+-----+--------+-------------------+----------------------------------+---------------------------------------------------------------------------------------------- + 3 | 22 | 2 | regress_rls_bob | my science fiction | + 5 | 44 | 2 | regress_rls_bob | my second manga | + 6 | 22 | 1 | regress_rls_carol | great science fiction | + 7 | 33 | 2 | regress_rls_carol | great technology book | + 8 | 44 | 1 | regress_rls_carol | great manga | + 9 | 22 | 1 | regress_rls_dave | awesome science fiction | + 10 | 33 | 2 | regress_rls_dave | awesome technology book | + 11 | 33 | 1 | regress_rls_carol | hoge | + 33 | 22 | 1 | regress_rls_bob | okay science fiction | + 2 | 11 | 2 | regress_rls_bob | my first novel | + 78 | 33 | 1 | regress_rls_bob | some technology novel | + 79 | 33 | 1 | regress_rls_bob | technology book, can only insert | + 12 | 11 | 1 | regress_rls_bob | another novel | + 1 | 11 | 1 | regress_rls_bob | my first novel | notes added by merge2 notes added by merge3 notes added by merge4 notes added by merge7 + 13 | 44 | 1 | regress_rls_bob | new manga | +(15 rows) + +-- +-- ROLE/GROUP +-- +SET SESSION AUTHORIZATION regress_rls_alice; +CREATE TABLE z1 (a int, b text); +CREATE TABLE z2 (a int, b text); +GRANT SELECT ON z1,z2 TO regress_rls_group1, regress_rls_group2, + regress_rls_bob, regress_rls_carol; +INSERT INTO z1 VALUES + (1, 'aba'), + (2, 'bbb'), + (3, 'ccc'), + (4, 'dad'); +CREATE POLICY p1 ON z1 TO regress_rls_group1 USING (a % 2 = 0); +CREATE POLICY p2 ON z1 TO regress_rls_group2 USING (a % 2 = 1); +ALTER TABLE z1 ENABLE ROW LEVEL SECURITY; +SET SESSION AUTHORIZATION regress_rls_bob; +SELECT * FROM z1 WHERE f_leak(b); +NOTICE: f_leak => bbb +NOTICE: f_leak => dad + a | b +---+----- + 2 | bbb + 4 | dad +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM z1 WHERE f_leak(b); + QUERY PLAN +----------------------------------------- + Seq Scan on z1 + Filter: (((a % 2) = 0) AND f_leak(b)) +(2 rows) + +PREPARE plancache_test AS SELECT * FROM z1 WHERE f_leak(b); +EXPLAIN (COSTS OFF) EXECUTE plancache_test; + QUERY PLAN +----------------------------------------- + Seq Scan on z1 + Filter: (((a % 2) = 0) AND f_leak(b)) +(2 rows) + +PREPARE plancache_test2 AS WITH q AS MATERIALIZED (SELECT * FROM z1 WHERE f_leak(b)) SELECT * FROM q,z2; +EXPLAIN (COSTS OFF) EXECUTE plancache_test2; + QUERY PLAN +------------------------------------------------- + Nested Loop + CTE q + -> Seq Scan on z1 + Filter: (((a % 2) = 0) AND f_leak(b)) + -> CTE Scan on q + -> Materialize + -> Seq Scan on z2 +(7 rows) + +PREPARE plancache_test3 AS WITH q AS MATERIALIZED (SELECT * FROM z2) SELECT * FROM q,z1 WHERE f_leak(z1.b); +EXPLAIN (COSTS OFF) EXECUTE plancache_test3; + QUERY PLAN +----------------------------------------------------- + Nested Loop + CTE q + -> Seq Scan on z2 + -> CTE Scan on q + -> Materialize + -> Seq Scan on z1 + Filter: (((a % 2) = 0) AND f_leak(b)) +(7 rows) + +SET ROLE regress_rls_group1; +SELECT * FROM z1 WHERE f_leak(b); +NOTICE: f_leak => bbb +NOTICE: f_leak => dad + a | b +---+----- + 2 | bbb + 4 | dad +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM z1 WHERE f_leak(b); + QUERY PLAN +----------------------------------------- + Seq Scan on z1 + Filter: (((a % 2) = 0) AND f_leak(b)) +(2 rows) + +EXPLAIN (COSTS OFF) EXECUTE plancache_test; + QUERY PLAN +----------------------------------------- + Seq Scan on z1 + Filter: (((a % 2) = 0) AND f_leak(b)) +(2 rows) + +EXPLAIN (COSTS OFF) EXECUTE plancache_test2; + QUERY PLAN +------------------------------------------------- + Nested Loop + CTE q + -> Seq Scan on z1 + Filter: (((a % 2) = 0) AND f_leak(b)) + -> CTE Scan on q + -> Materialize + -> Seq Scan on z2 +(7 rows) + +EXPLAIN (COSTS OFF) EXECUTE plancache_test3; + QUERY PLAN +----------------------------------------------------- + Nested Loop + CTE q + -> Seq Scan on z2 + -> CTE Scan on q + -> Materialize + -> Seq Scan on z1 + Filter: (((a % 2) = 0) AND f_leak(b)) +(7 rows) + +SET SESSION AUTHORIZATION regress_rls_carol; +SELECT * FROM z1 WHERE f_leak(b); +NOTICE: f_leak => aba +NOTICE: f_leak => ccc + a | b +---+----- + 1 | aba + 3 | ccc +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM z1 WHERE f_leak(b); + QUERY PLAN +----------------------------------------- + Seq Scan on z1 + Filter: (((a % 2) = 1) AND f_leak(b)) +(2 rows) + +EXPLAIN (COSTS OFF) EXECUTE plancache_test; + QUERY PLAN +----------------------------------------- + Seq Scan on z1 + Filter: (((a % 2) = 1) AND f_leak(b)) +(2 rows) + +EXPLAIN (COSTS OFF) EXECUTE plancache_test2; + QUERY PLAN +------------------------------------------------- + Nested Loop + CTE q + -> Seq Scan on z1 + Filter: (((a % 2) = 1) AND f_leak(b)) + -> CTE Scan on q + -> Materialize + -> Seq Scan on z2 +(7 rows) + +EXPLAIN (COSTS OFF) EXECUTE plancache_test3; + QUERY PLAN +----------------------------------------------------- + Nested Loop + CTE q + -> Seq Scan on z2 + -> CTE Scan on q + -> Materialize + -> Seq Scan on z1 + Filter: (((a % 2) = 1) AND f_leak(b)) +(7 rows) + +SET ROLE regress_rls_group2; +SELECT * FROM z1 WHERE f_leak(b); +NOTICE: f_leak => aba +NOTICE: f_leak => ccc + a | b +---+----- + 1 | aba + 3 | ccc +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM z1 WHERE f_leak(b); + QUERY PLAN +----------------------------------------- + Seq Scan on z1 + Filter: (((a % 2) = 1) AND f_leak(b)) +(2 rows) + +EXPLAIN (COSTS OFF) EXECUTE plancache_test; + QUERY PLAN +----------------------------------------- + Seq Scan on z1 + Filter: (((a % 2) = 1) AND f_leak(b)) +(2 rows) + +EXPLAIN (COSTS OFF) EXECUTE plancache_test2; + QUERY PLAN +------------------------------------------------- + Nested Loop + CTE q + -> Seq Scan on z1 + Filter: (((a % 2) = 1) AND f_leak(b)) + -> CTE Scan on q + -> Materialize + -> Seq Scan on z2 +(7 rows) + +EXPLAIN (COSTS OFF) EXECUTE plancache_test3; + QUERY PLAN +----------------------------------------------------- + Nested Loop + CTE q + -> Seq Scan on z2 + -> CTE Scan on q + -> Materialize + -> Seq Scan on z1 + Filter: (((a % 2) = 1) AND f_leak(b)) +(7 rows) + +-- +-- Views should follow policy for view owner. +-- +-- View and Table owner are the same. +SET SESSION AUTHORIZATION regress_rls_alice; +CREATE VIEW rls_view AS SELECT * FROM z1 WHERE f_leak(b); +GRANT SELECT ON rls_view TO regress_rls_bob; +-- Query as role that is not owner of view or table. Should return all records. +SET SESSION AUTHORIZATION regress_rls_bob; +SELECT * FROM rls_view; +NOTICE: f_leak => aba +NOTICE: f_leak => bbb +NOTICE: f_leak => ccc +NOTICE: f_leak => dad + a | b +---+----- + 1 | aba + 2 | bbb + 3 | ccc + 4 | dad +(4 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM rls_view; + QUERY PLAN +--------------------- + Seq Scan on z1 + Filter: f_leak(b) +(2 rows) + +-- Query as view/table owner. Should return all records. +SET SESSION AUTHORIZATION regress_rls_alice; +SELECT * FROM rls_view; +NOTICE: f_leak => aba +NOTICE: f_leak => bbb +NOTICE: f_leak => ccc +NOTICE: f_leak => dad + a | b +---+----- + 1 | aba + 2 | bbb + 3 | ccc + 4 | dad +(4 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM rls_view; + QUERY PLAN +--------------------- + Seq Scan on z1 + Filter: f_leak(b) +(2 rows) + +DROP VIEW rls_view; +-- View and Table owners are different. +SET SESSION AUTHORIZATION regress_rls_bob; +CREATE VIEW rls_view AS SELECT * FROM z1 WHERE f_leak(b); +GRANT SELECT ON rls_view TO regress_rls_alice; +-- Query as role that is not owner of view but is owner of table. +-- Should return records based on view owner policies. +SET SESSION AUTHORIZATION regress_rls_alice; +SELECT * FROM rls_view; +NOTICE: f_leak => bbb +NOTICE: f_leak => dad + a | b +---+----- + 2 | bbb + 4 | dad +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM rls_view; + QUERY PLAN +----------------------------------------- + Seq Scan on z1 + Filter: (((a % 2) = 0) AND f_leak(b)) +(2 rows) + +-- Query as role that is not owner of table but is owner of view. +-- Should return records based on view owner policies. +SET SESSION AUTHORIZATION regress_rls_bob; +SELECT * FROM rls_view; +NOTICE: f_leak => bbb +NOTICE: f_leak => dad + a | b +---+----- + 2 | bbb + 4 | dad +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM rls_view; + QUERY PLAN +----------------------------------------- + Seq Scan on z1 + Filter: (((a % 2) = 0) AND f_leak(b)) +(2 rows) + +-- Query as role that is not the owner of the table or view without permissions. +SET SESSION AUTHORIZATION regress_rls_carol; +SELECT * FROM rls_view; --fail - permission denied. +ERROR: permission denied for view rls_view +EXPLAIN (COSTS OFF) SELECT * FROM rls_view; --fail - permission denied. +ERROR: permission denied for view rls_view +-- Query as role that is not the owner of the table or view with permissions. +SET SESSION AUTHORIZATION regress_rls_bob; +GRANT SELECT ON rls_view TO regress_rls_carol; +SET SESSION AUTHORIZATION regress_rls_carol; +SELECT * FROM rls_view; +NOTICE: f_leak => bbb +NOTICE: f_leak => dad + a | b +---+----- + 2 | bbb + 4 | dad +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM rls_view; + QUERY PLAN +----------------------------------------- + Seq Scan on z1 + Filter: (((a % 2) = 0) AND f_leak(b)) +(2 rows) + +-- Policy requiring access to another table. +SET SESSION AUTHORIZATION regress_rls_alice; +CREATE TABLE z1_blacklist (a int); +INSERT INTO z1_blacklist VALUES (3), (4); +CREATE POLICY p3 ON z1 AS RESTRICTIVE USING (a NOT IN (SELECT a FROM z1_blacklist)); +-- Query as role that is not owner of table but is owner of view without permissions. +SET SESSION AUTHORIZATION regress_rls_bob; +SELECT * FROM rls_view; --fail - permission denied. +ERROR: permission denied for table z1_blacklist +EXPLAIN (COSTS OFF) SELECT * FROM rls_view; --fail - permission denied. +ERROR: permission denied for table z1_blacklist +-- Query as role that is not the owner of the table or view without permissions. +SET SESSION AUTHORIZATION regress_rls_carol; +SELECT * FROM rls_view; --fail - permission denied. +ERROR: permission denied for table z1_blacklist +EXPLAIN (COSTS OFF) SELECT * FROM rls_view; --fail - permission denied. +ERROR: permission denied for table z1_blacklist +-- Query as role that is not owner of table but is owner of view with permissions. +SET SESSION AUTHORIZATION regress_rls_alice; +GRANT SELECT ON z1_blacklist TO regress_rls_bob; +SET SESSION AUTHORIZATION regress_rls_bob; +SELECT * FROM rls_view; +NOTICE: f_leak => bbb + a | b +---+----- + 2 | bbb +(1 row) + +EXPLAIN (COSTS OFF) SELECT * FROM rls_view; + QUERY PLAN +---------------------------------------------------------------------- + Seq Scan on z1 + Filter: ((NOT (hashed SubPlan 1)) AND ((a % 2) = 0) AND f_leak(b)) + SubPlan 1 + -> Seq Scan on z1_blacklist +(4 rows) + +-- Query as role that is not the owner of the table or view with permissions. +SET SESSION AUTHORIZATION regress_rls_carol; +SELECT * FROM rls_view; +NOTICE: f_leak => bbb + a | b +---+----- + 2 | bbb +(1 row) + +EXPLAIN (COSTS OFF) SELECT * FROM rls_view; + QUERY PLAN +---------------------------------------------------------------------- + Seq Scan on z1 + Filter: ((NOT (hashed SubPlan 1)) AND ((a % 2) = 0) AND f_leak(b)) + SubPlan 1 + -> Seq Scan on z1_blacklist +(4 rows) + +SET SESSION AUTHORIZATION regress_rls_alice; +REVOKE SELECT ON z1_blacklist FROM regress_rls_bob; +DROP POLICY p3 ON z1; +SET SESSION AUTHORIZATION regress_rls_bob; +DROP VIEW rls_view; +-- +-- Security invoker views should follow policy for current user. +-- +-- View and table owner are the same. +SET SESSION AUTHORIZATION regress_rls_alice; +CREATE VIEW rls_view WITH (security_invoker) AS + SELECT * FROM z1 WHERE f_leak(b); +GRANT SELECT ON rls_view TO regress_rls_bob; +GRANT SELECT ON rls_view TO regress_rls_carol; +-- Query as table owner. Should return all records. +SELECT * FROM rls_view; +NOTICE: f_leak => aba +NOTICE: f_leak => bbb +NOTICE: f_leak => ccc +NOTICE: f_leak => dad + a | b +---+----- + 1 | aba + 2 | bbb + 3 | ccc + 4 | dad +(4 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM rls_view; + QUERY PLAN +--------------------- + Seq Scan on z1 + Filter: f_leak(b) +(2 rows) + +-- Queries as other users. +-- Should return records based on current user's policies. +SET SESSION AUTHORIZATION regress_rls_bob; +SELECT * FROM rls_view; +NOTICE: f_leak => bbb +NOTICE: f_leak => dad + a | b +---+----- + 2 | bbb + 4 | dad +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM rls_view; + QUERY PLAN +----------------------------------------- + Seq Scan on z1 + Filter: (((a % 2) = 0) AND f_leak(b)) +(2 rows) + +SET SESSION AUTHORIZATION regress_rls_carol; +SELECT * FROM rls_view; +NOTICE: f_leak => aba +NOTICE: f_leak => ccc + a | b +---+----- + 1 | aba + 3 | ccc +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM rls_view; + QUERY PLAN +----------------------------------------- + Seq Scan on z1 + Filter: (((a % 2) = 1) AND f_leak(b)) +(2 rows) + +-- View and table owners are different. +SET SESSION AUTHORIZATION regress_rls_alice; +DROP VIEW rls_view; +SET SESSION AUTHORIZATION regress_rls_bob; +CREATE VIEW rls_view WITH (security_invoker) AS + SELECT * FROM z1 WHERE f_leak(b); +GRANT SELECT ON rls_view TO regress_rls_alice; +GRANT SELECT ON rls_view TO regress_rls_carol; +-- Query as table owner. Should return all records. +SET SESSION AUTHORIZATION regress_rls_alice; +SELECT * FROM rls_view; +NOTICE: f_leak => aba +NOTICE: f_leak => bbb +NOTICE: f_leak => ccc +NOTICE: f_leak => dad + a | b +---+----- + 1 | aba + 2 | bbb + 3 | ccc + 4 | dad +(4 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM rls_view; + QUERY PLAN +--------------------- + Seq Scan on z1 + Filter: f_leak(b) +(2 rows) + +-- Queries as other users. +-- Should return records based on current user's policies. +SET SESSION AUTHORIZATION regress_rls_bob; +SELECT * FROM rls_view; +NOTICE: f_leak => bbb +NOTICE: f_leak => dad + a | b +---+----- + 2 | bbb + 4 | dad +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM rls_view; + QUERY PLAN +----------------------------------------- + Seq Scan on z1 + Filter: (((a % 2) = 0) AND f_leak(b)) +(2 rows) + +SET SESSION AUTHORIZATION regress_rls_carol; +SELECT * FROM rls_view; +NOTICE: f_leak => aba +NOTICE: f_leak => ccc + a | b +---+----- + 1 | aba + 3 | ccc +(2 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM rls_view; + QUERY PLAN +----------------------------------------- + Seq Scan on z1 + Filter: (((a % 2) = 1) AND f_leak(b)) +(2 rows) + +-- Policy requiring access to another table. +SET SESSION AUTHORIZATION regress_rls_alice; +CREATE POLICY p3 ON z1 AS RESTRICTIVE USING (a NOT IN (SELECT a FROM z1_blacklist)); +-- Query as role that is not owner of table but is owner of view without permissions. +SET SESSION AUTHORIZATION regress_rls_bob; +SELECT * FROM rls_view; --fail - permission denied. +ERROR: permission denied for table z1_blacklist +EXPLAIN (COSTS OFF) SELECT * FROM rls_view; --fail - permission denied. +ERROR: permission denied for table z1_blacklist +-- Query as role that is not the owner of the table or view without permissions. +SET SESSION AUTHORIZATION regress_rls_carol; +SELECT * FROM rls_view; --fail - permission denied. +ERROR: permission denied for table z1_blacklist +EXPLAIN (COSTS OFF) SELECT * FROM rls_view; --fail - permission denied. +ERROR: permission denied for table z1_blacklist +-- Query as role that is not owner of table but is owner of view with permissions. +SET SESSION AUTHORIZATION regress_rls_alice; +GRANT SELECT ON z1_blacklist TO regress_rls_bob; +SET SESSION AUTHORIZATION regress_rls_bob; +SELECT * FROM rls_view; +NOTICE: f_leak => bbb + a | b +---+----- + 2 | bbb +(1 row) + +EXPLAIN (COSTS OFF) SELECT * FROM rls_view; + QUERY PLAN +---------------------------------------------------------------------- + Seq Scan on z1 + Filter: ((NOT (hashed SubPlan 1)) AND ((a % 2) = 0) AND f_leak(b)) + SubPlan 1 + -> Seq Scan on z1_blacklist +(4 rows) + +-- Query as role that is not the owner of the table or view without permissions. +SET SESSION AUTHORIZATION regress_rls_carol; +SELECT * FROM rls_view; --fail - permission denied. +ERROR: permission denied for table z1_blacklist +EXPLAIN (COSTS OFF) SELECT * FROM rls_view; --fail - permission denied. +ERROR: permission denied for table z1_blacklist +-- Query as role that is not the owner of the table or view with permissions. +SET SESSION AUTHORIZATION regress_rls_alice; +GRANT SELECT ON z1_blacklist TO regress_rls_carol; +SET SESSION AUTHORIZATION regress_rls_carol; +SELECT * FROM rls_view; +NOTICE: f_leak => aba + a | b +---+----- + 1 | aba +(1 row) + +EXPLAIN (COSTS OFF) SELECT * FROM rls_view; + QUERY PLAN +---------------------------------------------------------------------- + Seq Scan on z1 + Filter: ((NOT (hashed SubPlan 1)) AND ((a % 2) = 1) AND f_leak(b)) + SubPlan 1 + -> Seq Scan on z1_blacklist +(4 rows) + +SET SESSION AUTHORIZATION regress_rls_bob; +DROP VIEW rls_view; +-- +-- Command specific +-- +SET SESSION AUTHORIZATION regress_rls_alice; +CREATE TABLE x1 (a int, b text, c text); +GRANT ALL ON x1 TO PUBLIC; +INSERT INTO x1 VALUES + (1, 'abc', 'regress_rls_bob'), + (2, 'bcd', 'regress_rls_bob'), + (3, 'cde', 'regress_rls_carol'), + (4, 'def', 'regress_rls_carol'), + (5, 'efg', 'regress_rls_bob'), + (6, 'fgh', 'regress_rls_bob'), + (7, 'fgh', 'regress_rls_carol'), + (8, 'fgh', 'regress_rls_carol'); +CREATE POLICY p0 ON x1 FOR ALL USING (c = current_user); +CREATE POLICY p1 ON x1 FOR SELECT USING (a % 2 = 0); +CREATE POLICY p2 ON x1 FOR INSERT WITH CHECK (a % 2 = 1); +CREATE POLICY p3 ON x1 FOR UPDATE USING (a % 2 = 0); +CREATE POLICY p4 ON x1 FOR DELETE USING (a < 8); +ALTER TABLE x1 ENABLE ROW LEVEL SECURITY; +SET SESSION AUTHORIZATION regress_rls_bob; +SELECT * FROM x1 WHERE f_leak(b) ORDER BY a ASC; +NOTICE: f_leak => abc +NOTICE: f_leak => bcd +NOTICE: f_leak => def +NOTICE: f_leak => efg +NOTICE: f_leak => fgh +NOTICE: f_leak => fgh + a | b | c +---+-----+------------------- + 1 | abc | regress_rls_bob + 2 | bcd | regress_rls_bob + 4 | def | regress_rls_carol + 5 | efg | regress_rls_bob + 6 | fgh | regress_rls_bob + 8 | fgh | regress_rls_carol +(6 rows) + +UPDATE x1 SET b = b || '_updt' WHERE f_leak(b) RETURNING *; +NOTICE: f_leak => abc +NOTICE: f_leak => bcd +NOTICE: f_leak => def +NOTICE: f_leak => efg +NOTICE: f_leak => fgh +NOTICE: f_leak => fgh + a | b | c +---+----------+------------------- + 1 | abc_updt | regress_rls_bob + 2 | bcd_updt | regress_rls_bob + 4 | def_updt | regress_rls_carol + 5 | efg_updt | regress_rls_bob + 6 | fgh_updt | regress_rls_bob + 8 | fgh_updt | regress_rls_carol +(6 rows) + +SET SESSION AUTHORIZATION regress_rls_carol; +SELECT * FROM x1 WHERE f_leak(b) ORDER BY a ASC; +NOTICE: f_leak => cde +NOTICE: f_leak => fgh +NOTICE: f_leak => bcd_updt +NOTICE: f_leak => def_updt +NOTICE: f_leak => fgh_updt +NOTICE: f_leak => fgh_updt + a | b | c +---+----------+------------------- + 2 | bcd_updt | regress_rls_bob + 3 | cde | regress_rls_carol + 4 | def_updt | regress_rls_carol + 6 | fgh_updt | regress_rls_bob + 7 | fgh | regress_rls_carol + 8 | fgh_updt | regress_rls_carol +(6 rows) + +UPDATE x1 SET b = b || '_updt' WHERE f_leak(b) RETURNING *; +NOTICE: f_leak => cde +NOTICE: f_leak => fgh +NOTICE: f_leak => bcd_updt +NOTICE: f_leak => def_updt +NOTICE: f_leak => fgh_updt +NOTICE: f_leak => fgh_updt + a | b | c +---+---------------+------------------- + 3 | cde_updt | regress_rls_carol + 7 | fgh_updt | regress_rls_carol + 2 | bcd_updt_updt | regress_rls_bob + 4 | def_updt_updt | regress_rls_carol + 6 | fgh_updt_updt | regress_rls_bob + 8 | fgh_updt_updt | regress_rls_carol +(6 rows) + +DELETE FROM x1 WHERE f_leak(b) RETURNING *; +NOTICE: f_leak => cde_updt +NOTICE: f_leak => fgh_updt +NOTICE: f_leak => bcd_updt_updt +NOTICE: f_leak => def_updt_updt +NOTICE: f_leak => fgh_updt_updt +NOTICE: f_leak => fgh_updt_updt + a | b | c +---+---------------+------------------- + 3 | cde_updt | regress_rls_carol + 7 | fgh_updt | regress_rls_carol + 2 | bcd_updt_updt | regress_rls_bob + 4 | def_updt_updt | regress_rls_carol + 6 | fgh_updt_updt | regress_rls_bob + 8 | fgh_updt_updt | regress_rls_carol +(6 rows) + +-- +-- Duplicate Policy Names +-- +SET SESSION AUTHORIZATION regress_rls_alice; +CREATE TABLE y1 (a int, b text); +CREATE TABLE y2 (a int, b text); +GRANT ALL ON y1, y2 TO regress_rls_bob; +CREATE POLICY p1 ON y1 FOR ALL USING (a % 2 = 0); +CREATE POLICY p2 ON y1 FOR SELECT USING (a > 2); +CREATE POLICY p1 ON y1 FOR SELECT USING (a % 2 = 1); --fail +ERROR: policy "p1" for table "y1" already exists +CREATE POLICY p1 ON y2 FOR ALL USING (a % 2 = 0); --OK +ALTER TABLE y1 ENABLE ROW LEVEL SECURITY; +ALTER TABLE y2 ENABLE ROW LEVEL SECURITY; +-- +-- Expression structure with SBV +-- +-- Create view as table owner. RLS should NOT be applied. +SET SESSION AUTHORIZATION regress_rls_alice; +CREATE VIEW rls_sbv WITH (security_barrier) AS + SELECT * FROM y1 WHERE f_leak(b); +EXPLAIN (COSTS OFF) SELECT * FROM rls_sbv WHERE (a = 1); + QUERY PLAN +----------------------------------- + Seq Scan on y1 + Filter: (f_leak(b) AND (a = 1)) +(2 rows) + +DROP VIEW rls_sbv; +-- Create view as role that does not own table. RLS should be applied. +SET SESSION AUTHORIZATION regress_rls_bob; +CREATE VIEW rls_sbv WITH (security_barrier) AS + SELECT * FROM y1 WHERE f_leak(b); +EXPLAIN (COSTS OFF) SELECT * FROM rls_sbv WHERE (a = 1); + QUERY PLAN +------------------------------------------------------------------ + Seq Scan on y1 + Filter: ((a = 1) AND ((a > 2) OR ((a % 2) = 0)) AND f_leak(b)) +(2 rows) + +DROP VIEW rls_sbv; +-- +-- Expression structure +-- +SET SESSION AUTHORIZATION regress_rls_alice; +INSERT INTO y2 (SELECT x, public.fipshash(x::text) FROM generate_series(0,20) x); +CREATE POLICY p2 ON y2 USING (a % 3 = 0); +CREATE POLICY p3 ON y2 USING (a % 4 = 0); +SET SESSION AUTHORIZATION regress_rls_bob; +SELECT * FROM y2 WHERE f_leak(b); +NOTICE: f_leak => 5feceb66ffc86f38d952786c6d696c79 +NOTICE: f_leak => d4735e3a265e16eee03f59718b9b5d03 +NOTICE: f_leak => 4e07408562bedb8b60ce05c1decfe3ad +NOTICE: f_leak => 4b227777d4dd1fc61c6f884f48641d02 +NOTICE: f_leak => e7f6c011776e8db7cd330b54174fd76f +NOTICE: f_leak => 2c624232cdd221771294dfbb310aca00 +NOTICE: f_leak => 19581e27de7ced00ff1ce50b2047e7a5 +NOTICE: f_leak => 4a44dc15364204a80fe80e9039455cc1 +NOTICE: f_leak => 6b51d431df5d7f141cbececcf79edf3d +NOTICE: f_leak => 8527a891e224136950ff32ca212b45bc +NOTICE: f_leak => e629fa6598d732768f7c726b4b621285 +NOTICE: f_leak => b17ef6d19c7a5b1ee83b907c595526dc +NOTICE: f_leak => 4ec9599fc203d176a301536c2e091a19 +NOTICE: f_leak => f5ca38f748a1d6eaf726b8a42fb575c3 + a | b +----+---------------------------------- + 0 | 5feceb66ffc86f38d952786c6d696c79 + 2 | d4735e3a265e16eee03f59718b9b5d03 + 3 | 4e07408562bedb8b60ce05c1decfe3ad + 4 | 4b227777d4dd1fc61c6f884f48641d02 + 6 | e7f6c011776e8db7cd330b54174fd76f + 8 | 2c624232cdd221771294dfbb310aca00 + 9 | 19581e27de7ced00ff1ce50b2047e7a5 + 10 | 4a44dc15364204a80fe80e9039455cc1 + 12 | 6b51d431df5d7f141cbececcf79edf3d + 14 | 8527a891e224136950ff32ca212b45bc + 15 | e629fa6598d732768f7c726b4b621285 + 16 | b17ef6d19c7a5b1ee83b907c595526dc + 18 | 4ec9599fc203d176a301536c2e091a19 + 20 | f5ca38f748a1d6eaf726b8a42fb575c3 +(14 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM y2 WHERE f_leak(b); + QUERY PLAN +----------------------------------------------------------------------------- + Seq Scan on y2 + Filter: ((((a % 4) = 0) OR ((a % 3) = 0) OR ((a % 2) = 0)) AND f_leak(b)) +(2 rows) + +-- +-- Qual push-down of leaky functions, when not referring to table +-- +SELECT * FROM y2 WHERE f_leak('abc'); +NOTICE: f_leak => abc +NOTICE: f_leak => abc +NOTICE: f_leak => abc +NOTICE: f_leak => abc +NOTICE: f_leak => abc +NOTICE: f_leak => abc +NOTICE: f_leak => abc +NOTICE: f_leak => abc +NOTICE: f_leak => abc +NOTICE: f_leak => abc +NOTICE: f_leak => abc +NOTICE: f_leak => abc +NOTICE: f_leak => abc +NOTICE: f_leak => abc +NOTICE: f_leak => abc +NOTICE: f_leak => abc +NOTICE: f_leak => abc +NOTICE: f_leak => abc +NOTICE: f_leak => abc +NOTICE: f_leak => abc +NOTICE: f_leak => abc + a | b +----+---------------------------------- + 0 | 5feceb66ffc86f38d952786c6d696c79 + 2 | d4735e3a265e16eee03f59718b9b5d03 + 3 | 4e07408562bedb8b60ce05c1decfe3ad + 4 | 4b227777d4dd1fc61c6f884f48641d02 + 6 | e7f6c011776e8db7cd330b54174fd76f + 8 | 2c624232cdd221771294dfbb310aca00 + 9 | 19581e27de7ced00ff1ce50b2047e7a5 + 10 | 4a44dc15364204a80fe80e9039455cc1 + 12 | 6b51d431df5d7f141cbececcf79edf3d + 14 | 8527a891e224136950ff32ca212b45bc + 15 | e629fa6598d732768f7c726b4b621285 + 16 | b17ef6d19c7a5b1ee83b907c595526dc + 18 | 4ec9599fc203d176a301536c2e091a19 + 20 | f5ca38f748a1d6eaf726b8a42fb575c3 +(14 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM y2 WHERE f_leak('abc'); + QUERY PLAN +--------------------------------------------------------------------------------------- + Seq Scan on y2 + Filter: (f_leak('abc'::text) AND (((a % 4) = 0) OR ((a % 3) = 0) OR ((a % 2) = 0))) +(2 rows) + +CREATE TABLE test_qual_pushdown ( + abc text +); +INSERT INTO test_qual_pushdown VALUES ('abc'),('def'); +SELECT * FROM y2 JOIN test_qual_pushdown ON (b = abc) WHERE f_leak(abc); +NOTICE: f_leak => abc +NOTICE: f_leak => def + a | b | abc +---+---+----- +(0 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM y2 JOIN test_qual_pushdown ON (b = abc) WHERE f_leak(abc); + QUERY PLAN +------------------------------------------------------------------------- + Hash Join + Hash Cond: (test_qual_pushdown.abc = y2.b) + -> Seq Scan on test_qual_pushdown + Filter: f_leak(abc) + -> Hash + -> Seq Scan on y2 + Filter: (((a % 4) = 0) OR ((a % 3) = 0) OR ((a % 2) = 0)) +(7 rows) + +SELECT * FROM y2 JOIN test_qual_pushdown ON (b = abc) WHERE f_leak(b); +NOTICE: f_leak => 5feceb66ffc86f38d952786c6d696c79 +NOTICE: f_leak => d4735e3a265e16eee03f59718b9b5d03 +NOTICE: f_leak => 4e07408562bedb8b60ce05c1decfe3ad +NOTICE: f_leak => 4b227777d4dd1fc61c6f884f48641d02 +NOTICE: f_leak => e7f6c011776e8db7cd330b54174fd76f +NOTICE: f_leak => 2c624232cdd221771294dfbb310aca00 +NOTICE: f_leak => 19581e27de7ced00ff1ce50b2047e7a5 +NOTICE: f_leak => 4a44dc15364204a80fe80e9039455cc1 +NOTICE: f_leak => 6b51d431df5d7f141cbececcf79edf3d +NOTICE: f_leak => 8527a891e224136950ff32ca212b45bc +NOTICE: f_leak => e629fa6598d732768f7c726b4b621285 +NOTICE: f_leak => b17ef6d19c7a5b1ee83b907c595526dc +NOTICE: f_leak => 4ec9599fc203d176a301536c2e091a19 +NOTICE: f_leak => f5ca38f748a1d6eaf726b8a42fb575c3 + a | b | abc +---+---+----- +(0 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM y2 JOIN test_qual_pushdown ON (b = abc) WHERE f_leak(b); + QUERY PLAN +----------------------------------------------------------------------------------------- + Hash Join + Hash Cond: (test_qual_pushdown.abc = y2.b) + -> Seq Scan on test_qual_pushdown + -> Hash + -> Seq Scan on y2 + Filter: ((((a % 4) = 0) OR ((a % 3) = 0) OR ((a % 2) = 0)) AND f_leak(b)) +(6 rows) + +DROP TABLE test_qual_pushdown; +-- +-- Plancache invalidate on user change. +-- +RESET SESSION AUTHORIZATION; +DROP TABLE t1 CASCADE; +NOTICE: drop cascades to 2 other objects +DETAIL: drop cascades to table t2 +drop cascades to table t3 +CREATE TABLE t1 (a integer); +GRANT SELECT ON t1 TO regress_rls_bob, regress_rls_carol; +CREATE POLICY p1 ON t1 TO regress_rls_bob USING ((a % 2) = 0); +CREATE POLICY p2 ON t1 TO regress_rls_carol USING ((a % 4) = 0); +ALTER TABLE t1 ENABLE ROW LEVEL SECURITY; +-- Prepare as regress_rls_bob +SET ROLE regress_rls_bob; +PREPARE role_inval AS SELECT * FROM t1; +-- Check plan +EXPLAIN (COSTS OFF) EXECUTE role_inval; + QUERY PLAN +------------------------- + Seq Scan on t1 + Filter: ((a % 2) = 0) +(2 rows) + +-- Change to regress_rls_carol +SET ROLE regress_rls_carol; +-- Check plan- should be different +EXPLAIN (COSTS OFF) EXECUTE role_inval; + QUERY PLAN +------------------------- + Seq Scan on t1 + Filter: ((a % 4) = 0) +(2 rows) + +-- Change back to regress_rls_bob +SET ROLE regress_rls_bob; +-- Check plan- should be back to original +EXPLAIN (COSTS OFF) EXECUTE role_inval; + QUERY PLAN +------------------------- + Seq Scan on t1 + Filter: ((a % 2) = 0) +(2 rows) + +-- +-- CTE and RLS +-- +RESET SESSION AUTHORIZATION; +DROP TABLE t1 CASCADE; +CREATE TABLE t1 (a integer, b text); +CREATE POLICY p1 ON t1 USING (a % 2 = 0); +ALTER TABLE t1 ENABLE ROW LEVEL SECURITY; +GRANT ALL ON t1 TO regress_rls_bob; +INSERT INTO t1 (SELECT x, public.fipshash(x::text) FROM generate_series(0,20) x); +SET SESSION AUTHORIZATION regress_rls_bob; +WITH cte1 AS MATERIALIZED (SELECT * FROM t1 WHERE f_leak(b)) SELECT * FROM cte1; +NOTICE: f_leak => 5feceb66ffc86f38d952786c6d696c79 +NOTICE: f_leak => d4735e3a265e16eee03f59718b9b5d03 +NOTICE: f_leak => 4b227777d4dd1fc61c6f884f48641d02 +NOTICE: f_leak => e7f6c011776e8db7cd330b54174fd76f +NOTICE: f_leak => 2c624232cdd221771294dfbb310aca00 +NOTICE: f_leak => 4a44dc15364204a80fe80e9039455cc1 +NOTICE: f_leak => 6b51d431df5d7f141cbececcf79edf3d +NOTICE: f_leak => 8527a891e224136950ff32ca212b45bc +NOTICE: f_leak => b17ef6d19c7a5b1ee83b907c595526dc +NOTICE: f_leak => 4ec9599fc203d176a301536c2e091a19 +NOTICE: f_leak => f5ca38f748a1d6eaf726b8a42fb575c3 + a | b +----+---------------------------------- + 0 | 5feceb66ffc86f38d952786c6d696c79 + 2 | d4735e3a265e16eee03f59718b9b5d03 + 4 | 4b227777d4dd1fc61c6f884f48641d02 + 6 | e7f6c011776e8db7cd330b54174fd76f + 8 | 2c624232cdd221771294dfbb310aca00 + 10 | 4a44dc15364204a80fe80e9039455cc1 + 12 | 6b51d431df5d7f141cbececcf79edf3d + 14 | 8527a891e224136950ff32ca212b45bc + 16 | b17ef6d19c7a5b1ee83b907c595526dc + 18 | 4ec9599fc203d176a301536c2e091a19 + 20 | f5ca38f748a1d6eaf726b8a42fb575c3 +(11 rows) + +EXPLAIN (COSTS OFF) +WITH cte1 AS MATERIALIZED (SELECT * FROM t1 WHERE f_leak(b)) SELECT * FROM cte1; + QUERY PLAN +------------------------------------------------- + CTE Scan on cte1 + CTE cte1 + -> Seq Scan on t1 + Filter: (((a % 2) = 0) AND f_leak(b)) +(4 rows) + +WITH cte1 AS (UPDATE t1 SET a = a + 1 RETURNING *) SELECT * FROM cte1; --fail +ERROR: new row violates row-level security policy for table "t1" +WITH cte1 AS (UPDATE t1 SET a = a RETURNING *) SELECT * FROM cte1; --ok + a | b +----+---------------------------------- + 0 | 5feceb66ffc86f38d952786c6d696c79 + 2 | d4735e3a265e16eee03f59718b9b5d03 + 4 | 4b227777d4dd1fc61c6f884f48641d02 + 6 | e7f6c011776e8db7cd330b54174fd76f + 8 | 2c624232cdd221771294dfbb310aca00 + 10 | 4a44dc15364204a80fe80e9039455cc1 + 12 | 6b51d431df5d7f141cbececcf79edf3d + 14 | 8527a891e224136950ff32ca212b45bc + 16 | b17ef6d19c7a5b1ee83b907c595526dc + 18 | 4ec9599fc203d176a301536c2e091a19 + 20 | f5ca38f748a1d6eaf726b8a42fb575c3 +(11 rows) + +WITH cte1 AS (INSERT INTO t1 VALUES (21, 'Fail') RETURNING *) SELECT * FROM cte1; --fail +ERROR: new row violates row-level security policy for table "t1" +WITH cte1 AS (INSERT INTO t1 VALUES (20, 'Success') RETURNING *) SELECT * FROM cte1; --ok + a | b +----+--------- + 20 | Success +(1 row) + +-- +-- Rename Policy +-- +RESET SESSION AUTHORIZATION; +ALTER POLICY p1 ON t1 RENAME TO p1; --fail +ERROR: policy "p1" for table "t1" already exists +SELECT polname, relname + FROM pg_policy pol + JOIN pg_class pc ON (pc.oid = pol.polrelid) + WHERE relname = 't1'; + polname | relname +---------+--------- + p1 | t1 +(1 row) + +ALTER POLICY p1 ON t1 RENAME TO p2; --ok +SELECT polname, relname + FROM pg_policy pol + JOIN pg_class pc ON (pc.oid = pol.polrelid) + WHERE relname = 't1'; + polname | relname +---------+--------- + p2 | t1 +(1 row) + +-- +-- Check INSERT SELECT +-- +SET SESSION AUTHORIZATION regress_rls_bob; +CREATE TABLE t2 (a integer, b text); +INSERT INTO t2 (SELECT * FROM t1); +EXPLAIN (COSTS OFF) INSERT INTO t2 (SELECT * FROM t1); + QUERY PLAN +------------------------------- + Insert on t2 + -> Seq Scan on t1 + Filter: ((a % 2) = 0) +(3 rows) + +SELECT * FROM t2; + a | b +----+---------------------------------- + 0 | 5feceb66ffc86f38d952786c6d696c79 + 2 | d4735e3a265e16eee03f59718b9b5d03 + 4 | 4b227777d4dd1fc61c6f884f48641d02 + 6 | e7f6c011776e8db7cd330b54174fd76f + 8 | 2c624232cdd221771294dfbb310aca00 + 10 | 4a44dc15364204a80fe80e9039455cc1 + 12 | 6b51d431df5d7f141cbececcf79edf3d + 14 | 8527a891e224136950ff32ca212b45bc + 16 | b17ef6d19c7a5b1ee83b907c595526dc + 18 | 4ec9599fc203d176a301536c2e091a19 + 20 | f5ca38f748a1d6eaf726b8a42fb575c3 + 20 | Success +(12 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM t2; + QUERY PLAN +---------------- + Seq Scan on t2 +(1 row) + +CREATE TABLE t3 AS SELECT * FROM t1; +SELECT * FROM t3; + a | b +----+---------------------------------- + 0 | 5feceb66ffc86f38d952786c6d696c79 + 2 | d4735e3a265e16eee03f59718b9b5d03 + 4 | 4b227777d4dd1fc61c6f884f48641d02 + 6 | e7f6c011776e8db7cd330b54174fd76f + 8 | 2c624232cdd221771294dfbb310aca00 + 10 | 4a44dc15364204a80fe80e9039455cc1 + 12 | 6b51d431df5d7f141cbececcf79edf3d + 14 | 8527a891e224136950ff32ca212b45bc + 16 | b17ef6d19c7a5b1ee83b907c595526dc + 18 | 4ec9599fc203d176a301536c2e091a19 + 20 | f5ca38f748a1d6eaf726b8a42fb575c3 + 20 | Success +(12 rows) + +SELECT * INTO t4 FROM t1; +SELECT * FROM t4; + a | b +----+---------------------------------- + 0 | 5feceb66ffc86f38d952786c6d696c79 + 2 | d4735e3a265e16eee03f59718b9b5d03 + 4 | 4b227777d4dd1fc61c6f884f48641d02 + 6 | e7f6c011776e8db7cd330b54174fd76f + 8 | 2c624232cdd221771294dfbb310aca00 + 10 | 4a44dc15364204a80fe80e9039455cc1 + 12 | 6b51d431df5d7f141cbececcf79edf3d + 14 | 8527a891e224136950ff32ca212b45bc + 16 | b17ef6d19c7a5b1ee83b907c595526dc + 18 | 4ec9599fc203d176a301536c2e091a19 + 20 | f5ca38f748a1d6eaf726b8a42fb575c3 + 20 | Success +(12 rows) + +-- +-- RLS with JOIN +-- +SET SESSION AUTHORIZATION regress_rls_alice; +CREATE TABLE blog (id integer, author text, post text); +CREATE TABLE comment (blog_id integer, message text); +GRANT ALL ON blog, comment TO regress_rls_bob; +CREATE POLICY blog_1 ON blog USING (id % 2 = 0); +ALTER TABLE blog ENABLE ROW LEVEL SECURITY; +INSERT INTO blog VALUES + (1, 'alice', 'blog #1'), + (2, 'bob', 'blog #1'), + (3, 'alice', 'blog #2'), + (4, 'alice', 'blog #3'), + (5, 'john', 'blog #1'); +INSERT INTO comment VALUES + (1, 'cool blog'), + (1, 'fun blog'), + (3, 'crazy blog'), + (5, 'what?'), + (4, 'insane!'), + (2, 'who did it?'); +SET SESSION AUTHORIZATION regress_rls_bob; +-- Check RLS JOIN with Non-RLS. +SELECT id, author, message FROM blog JOIN comment ON id = blog_id; + id | author | message +----+--------+------------- + 4 | alice | insane! + 2 | bob | who did it? +(2 rows) + +-- Check Non-RLS JOIN with RLS. +SELECT id, author, message FROM comment JOIN blog ON id = blog_id; + id | author | message +----+--------+------------- + 4 | alice | insane! + 2 | bob | who did it? +(2 rows) + +SET SESSION AUTHORIZATION regress_rls_alice; +CREATE POLICY comment_1 ON comment USING (blog_id < 4); +ALTER TABLE comment ENABLE ROW LEVEL SECURITY; +SET SESSION AUTHORIZATION regress_rls_bob; +-- Check RLS JOIN RLS +SELECT id, author, message FROM blog JOIN comment ON id = blog_id; + id | author | message +----+--------+------------- + 2 | bob | who did it? +(1 row) + +SELECT id, author, message FROM comment JOIN blog ON id = blog_id; + id | author | message +----+--------+------------- + 2 | bob | who did it? +(1 row) + +SET SESSION AUTHORIZATION regress_rls_alice; +DROP TABLE blog, comment; +-- +-- Default Deny Policy +-- +RESET SESSION AUTHORIZATION; +DROP POLICY p2 ON t1; +ALTER TABLE t1 OWNER TO regress_rls_alice; +-- Check that default deny does not apply to superuser. +RESET SESSION AUTHORIZATION; +SELECT * FROM t1; + a | b +----+---------------------------------- + 1 | 6b86b273ff34fce19d6b804eff5a3f57 + 3 | 4e07408562bedb8b60ce05c1decfe3ad + 5 | ef2d127de37b942baad06145e54b0c61 + 7 | 7902699be42c8a8e46fbbb4501726517 + 9 | 19581e27de7ced00ff1ce50b2047e7a5 + 11 | 4fc82b26aecb47d2868c4efbe3581732 + 13 | 3fdba35f04dc8c462986c992bcf87554 + 15 | e629fa6598d732768f7c726b4b621285 + 17 | 4523540f1504cd17100c4835e85b7eef + 19 | 9400f1b21cb527d7fa3d3eabba93557a + 0 | 5feceb66ffc86f38d952786c6d696c79 + 2 | d4735e3a265e16eee03f59718b9b5d03 + 4 | 4b227777d4dd1fc61c6f884f48641d02 + 6 | e7f6c011776e8db7cd330b54174fd76f + 8 | 2c624232cdd221771294dfbb310aca00 + 10 | 4a44dc15364204a80fe80e9039455cc1 + 12 | 6b51d431df5d7f141cbececcf79edf3d + 14 | 8527a891e224136950ff32ca212b45bc + 16 | b17ef6d19c7a5b1ee83b907c595526dc + 18 | 4ec9599fc203d176a301536c2e091a19 + 20 | f5ca38f748a1d6eaf726b8a42fb575c3 + 20 | Success +(22 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM t1; + QUERY PLAN +---------------- + Seq Scan on t1 +(1 row) + +-- Check that default deny does not apply to table owner. +SET SESSION AUTHORIZATION regress_rls_alice; +SELECT * FROM t1; + a | b +----+---------------------------------- + 1 | 6b86b273ff34fce19d6b804eff5a3f57 + 3 | 4e07408562bedb8b60ce05c1decfe3ad + 5 | ef2d127de37b942baad06145e54b0c61 + 7 | 7902699be42c8a8e46fbbb4501726517 + 9 | 19581e27de7ced00ff1ce50b2047e7a5 + 11 | 4fc82b26aecb47d2868c4efbe3581732 + 13 | 3fdba35f04dc8c462986c992bcf87554 + 15 | e629fa6598d732768f7c726b4b621285 + 17 | 4523540f1504cd17100c4835e85b7eef + 19 | 9400f1b21cb527d7fa3d3eabba93557a + 0 | 5feceb66ffc86f38d952786c6d696c79 + 2 | d4735e3a265e16eee03f59718b9b5d03 + 4 | 4b227777d4dd1fc61c6f884f48641d02 + 6 | e7f6c011776e8db7cd330b54174fd76f + 8 | 2c624232cdd221771294dfbb310aca00 + 10 | 4a44dc15364204a80fe80e9039455cc1 + 12 | 6b51d431df5d7f141cbececcf79edf3d + 14 | 8527a891e224136950ff32ca212b45bc + 16 | b17ef6d19c7a5b1ee83b907c595526dc + 18 | 4ec9599fc203d176a301536c2e091a19 + 20 | f5ca38f748a1d6eaf726b8a42fb575c3 + 20 | Success +(22 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM t1; + QUERY PLAN +---------------- + Seq Scan on t1 +(1 row) + +-- Check that default deny applies to non-owner/non-superuser when RLS on. +SET SESSION AUTHORIZATION regress_rls_bob; +SET row_security TO ON; +SELECT * FROM t1; + a | b +---+--- +(0 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM t1; + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +SET SESSION AUTHORIZATION regress_rls_bob; +SELECT * FROM t1; + a | b +---+--- +(0 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM t1; + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +-- +-- COPY TO/FROM +-- +RESET SESSION AUTHORIZATION; +DROP TABLE copy_t CASCADE; +ERROR: table "copy_t" does not exist +CREATE TABLE copy_t (a integer, b text); +CREATE POLICY p1 ON copy_t USING (a % 2 = 0); +ALTER TABLE copy_t ENABLE ROW LEVEL SECURITY; +GRANT ALL ON copy_t TO regress_rls_bob, regress_rls_exempt_user; +INSERT INTO copy_t (SELECT x, public.fipshash(x::text) FROM generate_series(0,10) x); +-- Check COPY TO as Superuser/owner. +RESET SESSION AUTHORIZATION; +SET row_security TO OFF; +COPY (SELECT * FROM copy_t ORDER BY a ASC) TO STDOUT WITH DELIMITER ','; +0,5feceb66ffc86f38d952786c6d696c79 +1,6b86b273ff34fce19d6b804eff5a3f57 +2,d4735e3a265e16eee03f59718b9b5d03 +3,4e07408562bedb8b60ce05c1decfe3ad +4,4b227777d4dd1fc61c6f884f48641d02 +5,ef2d127de37b942baad06145e54b0c61 +6,e7f6c011776e8db7cd330b54174fd76f +7,7902699be42c8a8e46fbbb4501726517 +8,2c624232cdd221771294dfbb310aca00 +9,19581e27de7ced00ff1ce50b2047e7a5 +10,4a44dc15364204a80fe80e9039455cc1 +SET row_security TO ON; +COPY (SELECT * FROM copy_t ORDER BY a ASC) TO STDOUT WITH DELIMITER ','; +0,5feceb66ffc86f38d952786c6d696c79 +1,6b86b273ff34fce19d6b804eff5a3f57 +2,d4735e3a265e16eee03f59718b9b5d03 +3,4e07408562bedb8b60ce05c1decfe3ad +4,4b227777d4dd1fc61c6f884f48641d02 +5,ef2d127de37b942baad06145e54b0c61 +6,e7f6c011776e8db7cd330b54174fd76f +7,7902699be42c8a8e46fbbb4501726517 +8,2c624232cdd221771294dfbb310aca00 +9,19581e27de7ced00ff1ce50b2047e7a5 +10,4a44dc15364204a80fe80e9039455cc1 +-- Check COPY TO as user with permissions. +SET SESSION AUTHORIZATION regress_rls_bob; +SET row_security TO OFF; +COPY (SELECT * FROM copy_t ORDER BY a ASC) TO STDOUT WITH DELIMITER ','; --fail - would be affected by RLS +ERROR: query would be affected by row-level security policy for table "copy_t" +SET row_security TO ON; +COPY (SELECT * FROM copy_t ORDER BY a ASC) TO STDOUT WITH DELIMITER ','; --ok +0,5feceb66ffc86f38d952786c6d696c79 +2,d4735e3a265e16eee03f59718b9b5d03 +4,4b227777d4dd1fc61c6f884f48641d02 +6,e7f6c011776e8db7cd330b54174fd76f +8,2c624232cdd221771294dfbb310aca00 +10,4a44dc15364204a80fe80e9039455cc1 +-- Check COPY TO as user with permissions and BYPASSRLS +SET SESSION AUTHORIZATION regress_rls_exempt_user; +SET row_security TO OFF; +COPY (SELECT * FROM copy_t ORDER BY a ASC) TO STDOUT WITH DELIMITER ','; --ok +0,5feceb66ffc86f38d952786c6d696c79 +1,6b86b273ff34fce19d6b804eff5a3f57 +2,d4735e3a265e16eee03f59718b9b5d03 +3,4e07408562bedb8b60ce05c1decfe3ad +4,4b227777d4dd1fc61c6f884f48641d02 +5,ef2d127de37b942baad06145e54b0c61 +6,e7f6c011776e8db7cd330b54174fd76f +7,7902699be42c8a8e46fbbb4501726517 +8,2c624232cdd221771294dfbb310aca00 +9,19581e27de7ced00ff1ce50b2047e7a5 +10,4a44dc15364204a80fe80e9039455cc1 +SET row_security TO ON; +COPY (SELECT * FROM copy_t ORDER BY a ASC) TO STDOUT WITH DELIMITER ','; --ok +0,5feceb66ffc86f38d952786c6d696c79 +1,6b86b273ff34fce19d6b804eff5a3f57 +2,d4735e3a265e16eee03f59718b9b5d03 +3,4e07408562bedb8b60ce05c1decfe3ad +4,4b227777d4dd1fc61c6f884f48641d02 +5,ef2d127de37b942baad06145e54b0c61 +6,e7f6c011776e8db7cd330b54174fd76f +7,7902699be42c8a8e46fbbb4501726517 +8,2c624232cdd221771294dfbb310aca00 +9,19581e27de7ced00ff1ce50b2047e7a5 +10,4a44dc15364204a80fe80e9039455cc1 +-- Check COPY TO as user without permissions. SET row_security TO OFF; +SET SESSION AUTHORIZATION regress_rls_carol; +SET row_security TO OFF; +COPY (SELECT * FROM copy_t ORDER BY a ASC) TO STDOUT WITH DELIMITER ','; --fail - would be affected by RLS +ERROR: query would be affected by row-level security policy for table "copy_t" +SET row_security TO ON; +COPY (SELECT * FROM copy_t ORDER BY a ASC) TO STDOUT WITH DELIMITER ','; --fail - permission denied +ERROR: permission denied for table copy_t +-- Check COPY relation TO; keep it just one row to avoid reordering issues +RESET SESSION AUTHORIZATION; +SET row_security TO ON; +CREATE TABLE copy_rel_to (a integer, b text); +CREATE POLICY p1 ON copy_rel_to USING (a % 2 = 0); +ALTER TABLE copy_rel_to ENABLE ROW LEVEL SECURITY; +GRANT ALL ON copy_rel_to TO regress_rls_bob, regress_rls_exempt_user; +INSERT INTO copy_rel_to VALUES (1, public.fipshash('1')); +-- Check COPY TO as Superuser/owner. +RESET SESSION AUTHORIZATION; +SET row_security TO OFF; +COPY copy_rel_to TO STDOUT WITH DELIMITER ','; +1,6b86b273ff34fce19d6b804eff5a3f57 +SET row_security TO ON; +COPY copy_rel_to TO STDOUT WITH DELIMITER ','; +1,6b86b273ff34fce19d6b804eff5a3f57 +-- Check COPY TO as user with permissions. +SET SESSION AUTHORIZATION regress_rls_bob; +SET row_security TO OFF; +COPY copy_rel_to TO STDOUT WITH DELIMITER ','; --fail - would be affected by RLS +ERROR: query would be affected by row-level security policy for table "copy_rel_to" +SET row_security TO ON; +COPY copy_rel_to TO STDOUT WITH DELIMITER ','; --ok +-- Check COPY TO as user with permissions and BYPASSRLS +SET SESSION AUTHORIZATION regress_rls_exempt_user; +SET row_security TO OFF; +COPY copy_rel_to TO STDOUT WITH DELIMITER ','; --ok +1,6b86b273ff34fce19d6b804eff5a3f57 +SET row_security TO ON; +COPY copy_rel_to TO STDOUT WITH DELIMITER ','; --ok +1,6b86b273ff34fce19d6b804eff5a3f57 +-- Check COPY TO as user without permissions. SET row_security TO OFF; +SET SESSION AUTHORIZATION regress_rls_carol; +SET row_security TO OFF; +COPY copy_rel_to TO STDOUT WITH DELIMITER ','; --fail - permission denied +ERROR: permission denied for table copy_rel_to +SET row_security TO ON; +COPY copy_rel_to TO STDOUT WITH DELIMITER ','; --fail - permission denied +ERROR: permission denied for table copy_rel_to +-- Check behavior with a child table. +RESET SESSION AUTHORIZATION; +SET row_security TO ON; +CREATE TABLE copy_rel_to_child () INHERITS (copy_rel_to); +INSERT INTO copy_rel_to_child VALUES (1, 'one'), (2, 'two'); +-- Check COPY TO as Superuser/owner. +RESET SESSION AUTHORIZATION; +SET row_security TO OFF; +COPY copy_rel_to TO STDOUT WITH DELIMITER ','; +1,6b86b273ff34fce19d6b804eff5a3f57 +SET row_security TO ON; +COPY copy_rel_to TO STDOUT WITH DELIMITER ','; +1,6b86b273ff34fce19d6b804eff5a3f57 +-- Check COPY TO as user with permissions. +SET SESSION AUTHORIZATION regress_rls_bob; +SET row_security TO OFF; +COPY copy_rel_to TO STDOUT WITH DELIMITER ','; --fail - would be affected by RLS +ERROR: query would be affected by row-level security policy for table "copy_rel_to" +SET row_security TO ON; +COPY copy_rel_to TO STDOUT WITH DELIMITER ','; --ok +-- Check COPY TO as user with permissions and BYPASSRLS +SET SESSION AUTHORIZATION regress_rls_exempt_user; +SET row_security TO OFF; +COPY copy_rel_to TO STDOUT WITH DELIMITER ','; --ok +1,6b86b273ff34fce19d6b804eff5a3f57 +SET row_security TO ON; +COPY copy_rel_to TO STDOUT WITH DELIMITER ','; --ok +1,6b86b273ff34fce19d6b804eff5a3f57 +-- Check COPY TO as user without permissions. SET row_security TO OFF; +SET SESSION AUTHORIZATION regress_rls_carol; +SET row_security TO OFF; +COPY copy_rel_to TO STDOUT WITH DELIMITER ','; --fail - permission denied +ERROR: permission denied for table copy_rel_to +SET row_security TO ON; +COPY copy_rel_to TO STDOUT WITH DELIMITER ','; --fail - permission denied +ERROR: permission denied for table copy_rel_to +-- Check COPY FROM as Superuser/owner. +RESET SESSION AUTHORIZATION; +SET row_security TO OFF; +COPY copy_t FROM STDIN; --ok +SET row_security TO ON; +COPY copy_t FROM STDIN; --ok +-- Check COPY FROM as user with permissions. +SET SESSION AUTHORIZATION regress_rls_bob; +SET row_security TO OFF; +COPY copy_t FROM STDIN; --fail - would be affected by RLS. +ERROR: query would be affected by row-level security policy for table "copy_t" +SET row_security TO ON; +COPY copy_t FROM STDIN; --fail - COPY FROM not supported by RLS. +ERROR: COPY FROM not supported with row-level security +HINT: Use INSERT statements instead. +-- Check COPY FROM as user with permissions and BYPASSRLS +SET SESSION AUTHORIZATION regress_rls_exempt_user; +SET row_security TO ON; +COPY copy_t FROM STDIN; --ok +-- Check COPY FROM as user without permissions. +SET SESSION AUTHORIZATION regress_rls_carol; +SET row_security TO OFF; +COPY copy_t FROM STDIN; --fail - permission denied. +ERROR: permission denied for table copy_t +SET row_security TO ON; +COPY copy_t FROM STDIN; --fail - permission denied. +ERROR: permission denied for table copy_t +RESET SESSION AUTHORIZATION; +DROP TABLE copy_t; +DROP TABLE copy_rel_to CASCADE; +NOTICE: drop cascades to table copy_rel_to_child +-- Check WHERE CURRENT OF +SET SESSION AUTHORIZATION regress_rls_alice; +CREATE TABLE current_check (currentid int, payload text, rlsuser text); +GRANT ALL ON current_check TO PUBLIC; +INSERT INTO current_check VALUES + (1, 'abc', 'regress_rls_bob'), + (2, 'bcd', 'regress_rls_bob'), + (3, 'cde', 'regress_rls_bob'), + (4, 'def', 'regress_rls_bob'); +CREATE POLICY p1 ON current_check FOR SELECT USING (currentid % 2 = 0); +CREATE POLICY p2 ON current_check FOR DELETE USING (currentid = 4 AND rlsuser = current_user); +CREATE POLICY p3 ON current_check FOR UPDATE USING (currentid = 4) WITH CHECK (rlsuser = current_user); +ALTER TABLE current_check ENABLE ROW LEVEL SECURITY; +SET SESSION AUTHORIZATION regress_rls_bob; +-- Can SELECT even rows +SELECT * FROM current_check; + currentid | payload | rlsuser +-----------+---------+----------------- + 2 | bcd | regress_rls_bob + 4 | def | regress_rls_bob +(2 rows) + +-- Cannot UPDATE row 2 +UPDATE current_check SET payload = payload || '_new' WHERE currentid = 2 RETURNING *; + currentid | payload | rlsuser +-----------+---------+--------- +(0 rows) + +BEGIN; +DECLARE current_check_cursor SCROLL CURSOR FOR SELECT * FROM current_check; +-- Returns rows that can be seen according to SELECT policy, like plain SELECT +-- above (even rows) +FETCH ABSOLUTE 1 FROM current_check_cursor; + currentid | payload | rlsuser +-----------+---------+----------------- + 2 | bcd | regress_rls_bob +(1 row) + +-- Still cannot UPDATE row 2 through cursor +UPDATE current_check SET payload = payload || '_new' WHERE CURRENT OF current_check_cursor RETURNING *; + currentid | payload | rlsuser +-----------+---------+--------- +(0 rows) + +-- Can update row 4 through cursor, which is the next visible row +FETCH RELATIVE 1 FROM current_check_cursor; + currentid | payload | rlsuser +-----------+---------+----------------- + 4 | def | regress_rls_bob +(1 row) + +UPDATE current_check SET payload = payload || '_new' WHERE CURRENT OF current_check_cursor RETURNING *; + currentid | payload | rlsuser +-----------+---------+----------------- + 4 | def_new | regress_rls_bob +(1 row) + +SELECT * FROM current_check; + currentid | payload | rlsuser +-----------+---------+----------------- + 2 | bcd | regress_rls_bob + 4 | def_new | regress_rls_bob +(2 rows) + +-- Plan should be a subquery TID scan +EXPLAIN (COSTS OFF) UPDATE current_check SET payload = payload WHERE CURRENT OF current_check_cursor; + QUERY PLAN +------------------------------------------------------------- + Update on current_check + -> Tid Scan on current_check + TID Cond: CURRENT OF current_check_cursor + Filter: ((currentid = 4) AND ((currentid % 2) = 0)) +(4 rows) + +-- Similarly can only delete row 4 +FETCH ABSOLUTE 1 FROM current_check_cursor; + currentid | payload | rlsuser +-----------+---------+----------------- + 2 | bcd | regress_rls_bob +(1 row) + +DELETE FROM current_check WHERE CURRENT OF current_check_cursor RETURNING *; + currentid | payload | rlsuser +-----------+---------+--------- +(0 rows) + +FETCH RELATIVE 1 FROM current_check_cursor; + currentid | payload | rlsuser +-----------+---------+----------------- + 4 | def | regress_rls_bob +(1 row) + +DELETE FROM current_check WHERE CURRENT OF current_check_cursor RETURNING *; + currentid | payload | rlsuser +-----------+---------+----------------- + 4 | def_new | regress_rls_bob +(1 row) + +SELECT * FROM current_check; + currentid | payload | rlsuser +-----------+---------+----------------- + 2 | bcd | regress_rls_bob +(1 row) + +COMMIT; +-- +-- check pg_stats view filtering +-- +SET row_security TO ON; +SET SESSION AUTHORIZATION regress_rls_alice; +ANALYZE current_check; +-- Stats visible +SELECT row_security_active('current_check'); + row_security_active +--------------------- + f +(1 row) + +SELECT attname, most_common_vals FROM pg_stats + WHERE tablename = 'current_check' + ORDER BY 1; + attname | most_common_vals +-----------+------------------- + currentid | + payload | + rlsuser | {regress_rls_bob} +(3 rows) + +SET SESSION AUTHORIZATION regress_rls_bob; +-- Stats not visible +SELECT row_security_active('current_check'); + row_security_active +--------------------- + t +(1 row) + +SELECT attname, most_common_vals FROM pg_stats + WHERE tablename = 'current_check' + ORDER BY 1; + attname | most_common_vals +---------+------------------ +(0 rows) + +-- +-- Collation support +-- +BEGIN; +CREATE TABLE coll_t (c) AS VALUES ('bar'::text); +CREATE POLICY coll_p ON coll_t USING (c < ('foo'::text COLLATE "C")); +ALTER TABLE coll_t ENABLE ROW LEVEL SECURITY; +GRANT SELECT ON coll_t TO regress_rls_alice; +SELECT (string_to_array(polqual, ':'))[7] AS inputcollid FROM pg_policy WHERE polrelid = 'coll_t'::regclass; + inputcollid +------------------ + inputcollid 950 +(1 row) + +SET SESSION AUTHORIZATION regress_rls_alice; +SELECT * FROM coll_t; + c +----- + bar +(1 row) + +ROLLBACK; +-- +-- Shared Object Dependencies +-- +RESET SESSION AUTHORIZATION; +BEGIN; +CREATE ROLE regress_rls_eve; +CREATE ROLE regress_rls_frank; +CREATE TABLE tbl1 (c) AS VALUES ('bar'::text); +GRANT SELECT ON TABLE tbl1 TO regress_rls_eve; +CREATE POLICY P ON tbl1 TO regress_rls_eve, regress_rls_frank USING (true); +SELECT refclassid::regclass, deptype + FROM pg_depend + WHERE classid = 'pg_policy'::regclass + AND refobjid = 'tbl1'::regclass; + refclassid | deptype +------------+--------- + pg_class | a +(1 row) + +SELECT refclassid::regclass, deptype + FROM pg_shdepend + WHERE classid = 'pg_policy'::regclass + AND refobjid IN ('regress_rls_eve'::regrole, 'regress_rls_frank'::regrole); + refclassid | deptype +------------+--------- + pg_authid | r + pg_authid | r +(2 rows) + +SAVEPOINT q; +DROP ROLE regress_rls_eve; --fails due to dependency on POLICY p +ERROR: role "regress_rls_eve" cannot be dropped because some objects depend on it +DETAIL: privileges for table tbl1 +target of policy p on table tbl1 +ROLLBACK TO q; +ALTER POLICY p ON tbl1 TO regress_rls_frank USING (true); +SAVEPOINT q; +DROP ROLE regress_rls_eve; --fails due to dependency on GRANT SELECT +ERROR: role "regress_rls_eve" cannot be dropped because some objects depend on it +DETAIL: privileges for table tbl1 +ROLLBACK TO q; +REVOKE ALL ON TABLE tbl1 FROM regress_rls_eve; +SAVEPOINT q; +DROP ROLE regress_rls_eve; --succeeds +ROLLBACK TO q; +SAVEPOINT q; +DROP ROLE regress_rls_frank; --fails due to dependency on POLICY p +ERROR: role "regress_rls_frank" cannot be dropped because some objects depend on it +DETAIL: target of policy p on table tbl1 +ROLLBACK TO q; +DROP POLICY p ON tbl1; +SAVEPOINT q; +DROP ROLE regress_rls_frank; -- succeeds +ROLLBACK TO q; +ROLLBACK; -- cleanup +-- +-- Policy expression handling +-- +BEGIN; +CREATE TABLE t (c) AS VALUES ('bar'::text); +CREATE POLICY p ON t USING (max(c)); -- fails: aggregate functions are not allowed in policy expressions +ERROR: aggregate functions are not allowed in policy expressions +ROLLBACK; +-- +-- Non-target relations are only subject to SELECT policies +-- +SET SESSION AUTHORIZATION regress_rls_alice; +CREATE TABLE r1 (a int); +CREATE TABLE r2 (a int); +INSERT INTO r1 VALUES (10), (20); +INSERT INTO r2 VALUES (10), (20); +GRANT ALL ON r1, r2 TO regress_rls_bob; +CREATE POLICY p1 ON r1 USING (true); +ALTER TABLE r1 ENABLE ROW LEVEL SECURITY; +CREATE POLICY p1 ON r2 FOR SELECT USING (true); +CREATE POLICY p2 ON r2 FOR INSERT WITH CHECK (false); +CREATE POLICY p3 ON r2 FOR UPDATE USING (false); +CREATE POLICY p4 ON r2 FOR DELETE USING (false); +ALTER TABLE r2 ENABLE ROW LEVEL SECURITY; +SET SESSION AUTHORIZATION regress_rls_bob; +SELECT * FROM r1; + a +---- + 10 + 20 +(2 rows) + +SELECT * FROM r2; + a +---- + 10 + 20 +(2 rows) + +-- r2 is read-only +INSERT INTO r2 VALUES (2); -- Not allowed +ERROR: new row violates row-level security policy for table "r2" +UPDATE r2 SET a = 2 RETURNING *; -- Updates nothing + a +--- +(0 rows) + +DELETE FROM r2 RETURNING *; -- Deletes nothing + a +--- +(0 rows) + +-- r2 can be used as a non-target relation in DML +INSERT INTO r1 SELECT a + 1 FROM r2 RETURNING *; -- OK + a +---- + 11 + 21 +(2 rows) + +UPDATE r1 SET a = r2.a + 2 FROM r2 WHERE r1.a = r2.a RETURNING *; -- OK + a | a +----+---- + 12 | 10 + 22 | 20 +(2 rows) + +DELETE FROM r1 USING r2 WHERE r1.a = r2.a + 2 RETURNING *; -- OK + a | a +----+---- + 12 | 10 + 22 | 20 +(2 rows) + +SELECT * FROM r1; + a +---- + 11 + 21 +(2 rows) + +SELECT * FROM r2; + a +---- + 10 + 20 +(2 rows) + +SET SESSION AUTHORIZATION regress_rls_alice; +DROP TABLE r1; +DROP TABLE r2; +-- +-- FORCE ROW LEVEL SECURITY applies RLS to owners too +-- +SET SESSION AUTHORIZATION regress_rls_alice; +SET row_security = on; +CREATE TABLE r1 (a int); +INSERT INTO r1 VALUES (10), (20); +CREATE POLICY p1 ON r1 USING (false); +ALTER TABLE r1 ENABLE ROW LEVEL SECURITY; +ALTER TABLE r1 FORCE ROW LEVEL SECURITY; +-- No error, but no rows +TABLE r1; + a +--- +(0 rows) + +-- RLS error +INSERT INTO r1 VALUES (1); +ERROR: new row violates row-level security policy for table "r1" +-- No error (unable to see any rows to update) +UPDATE r1 SET a = 1; +TABLE r1; + a +--- +(0 rows) + +-- No error (unable to see any rows to delete) +DELETE FROM r1; +TABLE r1; + a +--- +(0 rows) + +SET row_security = off; +-- these all fail, would be affected by RLS +TABLE r1; +ERROR: query would be affected by row-level security policy for table "r1" +HINT: To disable the policy for the table's owner, use ALTER TABLE NO FORCE ROW LEVEL SECURITY. +UPDATE r1 SET a = 1; +ERROR: query would be affected by row-level security policy for table "r1" +HINT: To disable the policy for the table's owner, use ALTER TABLE NO FORCE ROW LEVEL SECURITY. +DELETE FROM r1; +ERROR: query would be affected by row-level security policy for table "r1" +HINT: To disable the policy for the table's owner, use ALTER TABLE NO FORCE ROW LEVEL SECURITY. +DROP TABLE r1; +-- +-- FORCE ROW LEVEL SECURITY does not break RI +-- +SET SESSION AUTHORIZATION regress_rls_alice; +SET row_security = on; +CREATE TABLE r1 (a int PRIMARY KEY); +CREATE TABLE r2 (a int REFERENCES r1); +INSERT INTO r1 VALUES (10), (20); +INSERT INTO r2 VALUES (10), (20); +-- Create policies on r2 which prevent the +-- owner from seeing any rows, but RI should +-- still see them. +CREATE POLICY p1 ON r2 USING (false); +ALTER TABLE r2 ENABLE ROW LEVEL SECURITY; +ALTER TABLE r2 FORCE ROW LEVEL SECURITY; +-- Errors due to rows in r2 +DELETE FROM r1; +ERROR: update or delete on table "r1" violates foreign key constraint "r2_a_fkey" on table "r2" +DETAIL: Key (a)=(10) is still referenced from table "r2". +-- Reset r2 to no-RLS +DROP POLICY p1 ON r2; +ALTER TABLE r2 NO FORCE ROW LEVEL SECURITY; +ALTER TABLE r2 DISABLE ROW LEVEL SECURITY; +-- clean out r2 for INSERT test below +DELETE FROM r2; +-- Change r1 to not allow rows to be seen +CREATE POLICY p1 ON r1 USING (false); +ALTER TABLE r1 ENABLE ROW LEVEL SECURITY; +ALTER TABLE r1 FORCE ROW LEVEL SECURITY; +-- No rows seen +TABLE r1; + a +--- +(0 rows) + +-- No error, RI still sees that row exists in r1 +INSERT INTO r2 VALUES (10); +DROP TABLE r2; +DROP TABLE r1; +-- Ensure cascaded DELETE works +CREATE TABLE r1 (a int PRIMARY KEY); +CREATE TABLE r2 (a int REFERENCES r1 ON DELETE CASCADE); +INSERT INTO r1 VALUES (10), (20); +INSERT INTO r2 VALUES (10), (20); +-- Create policies on r2 which prevent the +-- owner from seeing any rows, but RI should +-- still see them. +CREATE POLICY p1 ON r2 USING (false); +ALTER TABLE r2 ENABLE ROW LEVEL SECURITY; +ALTER TABLE r2 FORCE ROW LEVEL SECURITY; +-- Deletes all records from both +DELETE FROM r1; +-- Remove FORCE from r2 +ALTER TABLE r2 NO FORCE ROW LEVEL SECURITY; +-- As owner, we now bypass RLS +-- verify no rows in r2 now +TABLE r2; + a +--- +(0 rows) + +DROP TABLE r2; +DROP TABLE r1; +-- Ensure cascaded UPDATE works +CREATE TABLE r1 (a int PRIMARY KEY); +CREATE TABLE r2 (a int REFERENCES r1 ON UPDATE CASCADE); +INSERT INTO r1 VALUES (10), (20); +INSERT INTO r2 VALUES (10), (20); +-- Create policies on r2 which prevent the +-- owner from seeing any rows, but RI should +-- still see them. +CREATE POLICY p1 ON r2 USING (false); +ALTER TABLE r2 ENABLE ROW LEVEL SECURITY; +ALTER TABLE r2 FORCE ROW LEVEL SECURITY; +-- Updates records in both +UPDATE r1 SET a = a+5; +-- Remove FORCE from r2 +ALTER TABLE r2 NO FORCE ROW LEVEL SECURITY; +-- As owner, we now bypass RLS +-- verify records in r2 updated +TABLE r2; + a +---- + 15 + 25 +(2 rows) + +DROP TABLE r2; +DROP TABLE r1; +-- +-- Test INSERT+RETURNING applies SELECT policies as +-- WithCheckOptions (meaning an error is thrown) +-- +SET SESSION AUTHORIZATION regress_rls_alice; +SET row_security = on; +CREATE TABLE r1 (a int); +CREATE POLICY p1 ON r1 FOR SELECT USING (false); +CREATE POLICY p2 ON r1 FOR INSERT WITH CHECK (true); +ALTER TABLE r1 ENABLE ROW LEVEL SECURITY; +ALTER TABLE r1 FORCE ROW LEVEL SECURITY; +-- Works fine +INSERT INTO r1 VALUES (10), (20); +-- No error, but no rows +TABLE r1; + a +--- +(0 rows) + +SET row_security = off; +-- fail, would be affected by RLS +TABLE r1; +ERROR: query would be affected by row-level security policy for table "r1" +HINT: To disable the policy for the table's owner, use ALTER TABLE NO FORCE ROW LEVEL SECURITY. +SET row_security = on; +-- Error +INSERT INTO r1 VALUES (10), (20) RETURNING *; +ERROR: new row violates row-level security policy for table "r1" +DROP TABLE r1; +-- +-- Test UPDATE+RETURNING applies SELECT policies as +-- WithCheckOptions (meaning an error is thrown) +-- +SET SESSION AUTHORIZATION regress_rls_alice; +SET row_security = on; +CREATE TABLE r1 (a int PRIMARY KEY); +CREATE POLICY p1 ON r1 FOR SELECT USING (a < 20); +CREATE POLICY p2 ON r1 FOR UPDATE USING (a < 20) WITH CHECK (true); +CREATE POLICY p3 ON r1 FOR INSERT WITH CHECK (true); +INSERT INTO r1 VALUES (10); +ALTER TABLE r1 ENABLE ROW LEVEL SECURITY; +ALTER TABLE r1 FORCE ROW LEVEL SECURITY; +-- Works fine +UPDATE r1 SET a = 30; +-- Show updated rows +ALTER TABLE r1 NO FORCE ROW LEVEL SECURITY; +TABLE r1; + a +---- + 30 +(1 row) + +-- reset value in r1 for test with RETURNING +UPDATE r1 SET a = 10; +-- Verify row reset +TABLE r1; + a +---- + 10 +(1 row) + +ALTER TABLE r1 FORCE ROW LEVEL SECURITY; +-- Error +UPDATE r1 SET a = 30 RETURNING *; +ERROR: new row violates row-level security policy for table "r1" +-- UPDATE path of INSERT ... ON CONFLICT DO UPDATE should also error out +INSERT INTO r1 VALUES (10) + ON CONFLICT (a) DO UPDATE SET a = 30 RETURNING *; +ERROR: new row violates row-level security policy for table "r1" +-- Should still error out without RETURNING (use of arbiter always requires +-- SELECT permissions) +INSERT INTO r1 VALUES (10) + ON CONFLICT (a) DO UPDATE SET a = 30; +ERROR: new row violates row-level security policy for table "r1" +INSERT INTO r1 VALUES (10) + ON CONFLICT ON CONSTRAINT r1_pkey DO UPDATE SET a = 30; +ERROR: new row violates row-level security policy for table "r1" +DROP TABLE r1; +-- Check dependency handling +RESET SESSION AUTHORIZATION; +CREATE TABLE dep1 (c1 int); +CREATE TABLE dep2 (c1 int); +CREATE POLICY dep_p1 ON dep1 TO regress_rls_bob USING (c1 > (select max(dep2.c1) from dep2)); +ALTER POLICY dep_p1 ON dep1 TO regress_rls_bob,regress_rls_carol; +-- Should return one +SELECT count(*) = 1 FROM pg_depend + WHERE objid = (SELECT oid FROM pg_policy WHERE polname = 'dep_p1') + AND refobjid = (SELECT oid FROM pg_class WHERE relname = 'dep2'); + ?column? +---------- + t +(1 row) + +ALTER POLICY dep_p1 ON dep1 USING (true); +-- Should return one +SELECT count(*) = 1 FROM pg_shdepend + WHERE objid = (SELECT oid FROM pg_policy WHERE polname = 'dep_p1') + AND refobjid = (SELECT oid FROM pg_authid WHERE rolname = 'regress_rls_bob'); + ?column? +---------- + t +(1 row) + +-- Should return one +SELECT count(*) = 1 FROM pg_shdepend + WHERE objid = (SELECT oid FROM pg_policy WHERE polname = 'dep_p1') + AND refobjid = (SELECT oid FROM pg_authid WHERE rolname = 'regress_rls_carol'); + ?column? +---------- + t +(1 row) + +-- Should return zero +SELECT count(*) = 0 FROM pg_depend + WHERE objid = (SELECT oid FROM pg_policy WHERE polname = 'dep_p1') + AND refobjid = (SELECT oid FROM pg_class WHERE relname = 'dep2'); + ?column? +---------- + t +(1 row) + +-- DROP OWNED BY testing +RESET SESSION AUTHORIZATION; +CREATE ROLE regress_rls_dob_role1; +CREATE ROLE regress_rls_dob_role2; +CREATE TABLE dob_t1 (c1 int); +CREATE TABLE dob_t2 (c1 int) PARTITION BY RANGE (c1); +CREATE POLICY p1 ON dob_t1 TO regress_rls_dob_role1 USING (true); +DROP OWNED BY regress_rls_dob_role1; +DROP POLICY p1 ON dob_t1; -- should fail, already gone +ERROR: policy "p1" for table "dob_t1" does not exist +CREATE POLICY p1 ON dob_t1 TO regress_rls_dob_role1,regress_rls_dob_role2 USING (true); +DROP OWNED BY regress_rls_dob_role1; +DROP POLICY p1 ON dob_t1; -- should succeed +-- same cases with duplicate polroles entries +CREATE POLICY p1 ON dob_t1 TO regress_rls_dob_role1,regress_rls_dob_role1 USING (true); +DROP OWNED BY regress_rls_dob_role1; +DROP POLICY p1 ON dob_t1; -- should fail, already gone +ERROR: policy "p1" for table "dob_t1" does not exist +CREATE POLICY p1 ON dob_t1 TO regress_rls_dob_role1,regress_rls_dob_role1,regress_rls_dob_role2 USING (true); +DROP OWNED BY regress_rls_dob_role1; +DROP POLICY p1 ON dob_t1; -- should succeed +-- partitioned target +CREATE POLICY p1 ON dob_t2 TO regress_rls_dob_role1,regress_rls_dob_role2 USING (true); +DROP OWNED BY regress_rls_dob_role1; +DROP POLICY p1 ON dob_t2; -- should succeed +DROP USER regress_rls_dob_role1; +DROP USER regress_rls_dob_role2; +-- Bug #15708: view + table with RLS should check policies as view owner +CREATE TABLE ref_tbl (a int); +INSERT INTO ref_tbl VALUES (1); +CREATE TABLE rls_tbl (a int); +INSERT INTO rls_tbl VALUES (10); +ALTER TABLE rls_tbl ENABLE ROW LEVEL SECURITY; +CREATE POLICY p1 ON rls_tbl USING (EXISTS (SELECT 1 FROM ref_tbl)); +GRANT SELECT ON ref_tbl TO regress_rls_bob; +GRANT SELECT ON rls_tbl TO regress_rls_bob; +CREATE VIEW rls_view AS SELECT * FROM rls_tbl; +ALTER VIEW rls_view OWNER TO regress_rls_bob; +GRANT SELECT ON rls_view TO regress_rls_alice; +SET SESSION AUTHORIZATION regress_rls_alice; +SELECT * FROM ref_tbl; -- Permission denied +ERROR: permission denied for table ref_tbl +SELECT * FROM rls_tbl; -- Permission denied +ERROR: permission denied for table rls_tbl +SELECT * FROM rls_view; -- OK + a +---- + 10 +(1 row) + +RESET SESSION AUTHORIZATION; +DROP VIEW rls_view; +DROP TABLE rls_tbl; +DROP TABLE ref_tbl; +-- Leaky operator test +CREATE TABLE rls_tbl (a int); +INSERT INTO rls_tbl SELECT x/10 FROM generate_series(1, 100) x; +ANALYZE rls_tbl; +ALTER TABLE rls_tbl ENABLE ROW LEVEL SECURITY; +GRANT SELECT ON rls_tbl TO regress_rls_alice; +SET SESSION AUTHORIZATION regress_rls_alice; +CREATE FUNCTION op_leak(int, int) RETURNS bool + AS 'BEGIN RAISE NOTICE ''op_leak => %, %'', $1, $2; RETURN $1 < $2; END' + LANGUAGE plpgsql; +CREATE OPERATOR <<< (procedure = op_leak, leftarg = int, rightarg = int, + restrict = scalarltsel); +SELECT * FROM rls_tbl WHERE a <<< 1000; + a +--- +(0 rows) + +DROP OPERATOR <<< (int, int); +DROP FUNCTION op_leak(int, int); +RESET SESSION AUTHORIZATION; +DROP TABLE rls_tbl; +-- Bug #16006: whole-row Vars in a policy don't play nice with sub-selects +SET SESSION AUTHORIZATION regress_rls_alice; +CREATE TABLE rls_tbl (a int, b int, c int); +CREATE POLICY p1 ON rls_tbl USING (rls_tbl >= ROW(1,1,1)); +ALTER TABLE rls_tbl ENABLE ROW LEVEL SECURITY; +ALTER TABLE rls_tbl FORCE ROW LEVEL SECURITY; +INSERT INTO rls_tbl SELECT 10, 20, 30; +EXPLAIN (VERBOSE, COSTS OFF) +INSERT INTO rls_tbl + SELECT * FROM (SELECT b, c FROM rls_tbl ORDER BY a) ss; + QUERY PLAN +-------------------------------------------------------------------- + Insert on regress_rls_schema.rls_tbl + -> Subquery Scan on ss + Output: ss.b, ss.c, NULL::integer + -> Sort + Output: rls_tbl_1.b, rls_tbl_1.c, rls_tbl_1.a + Sort Key: rls_tbl_1.a + -> Seq Scan on regress_rls_schema.rls_tbl rls_tbl_1 + Output: rls_tbl_1.b, rls_tbl_1.c, rls_tbl_1.a + Filter: (rls_tbl_1.* >= '(1,1,1)'::record) +(9 rows) + +INSERT INTO rls_tbl + SELECT * FROM (SELECT b, c FROM rls_tbl ORDER BY a) ss; +SELECT * FROM rls_tbl; + a | b | c +----+----+---- + 10 | 20 | 30 + 20 | 30 | +(2 rows) + +DROP TABLE rls_tbl; +RESET SESSION AUTHORIZATION; +-- CVE-2023-2455: inlining an SRF may introduce an RLS dependency +create table rls_t (c text); +insert into rls_t values ('invisible to bob'); +alter table rls_t enable row level security; +grant select on rls_t to regress_rls_alice, regress_rls_bob; +create policy p1 on rls_t for select to regress_rls_alice using (true); +create policy p2 on rls_t for select to regress_rls_bob using (false); +create function rls_f () returns setof rls_t + stable language sql + as $$ select * from rls_t $$; +prepare q as select current_user, * from rls_f(); +set role regress_rls_alice; +execute q; + current_user | c +-------------------+------------------ + regress_rls_alice | invisible to bob +(1 row) + +set role regress_rls_bob; +execute q; + current_user | c +--------------+--- +(0 rows) + +RESET ROLE; +DROP FUNCTION rls_f(); +DROP TABLE rls_t; +-- +-- Clean up objects +-- +RESET SESSION AUTHORIZATION; +DROP SCHEMA regress_rls_schema CASCADE; +NOTICE: drop cascades to 30 other objects +DETAIL: drop cascades to function f_leak(text) +drop cascades to table uaccount +drop cascades to table category +drop cascades to table document +drop cascades to table part_document +drop cascades to table dependent +drop cascades to table rec1 +drop cascades to table rec2 +drop cascades to view rec1v +drop cascades to view rec2v +drop cascades to table s1 +drop cascades to table s2 +drop cascades to view v2 +drop cascades to table b1 +drop cascades to view bv1 +drop cascades to table z1 +drop cascades to table z2 +drop cascades to table z1_blacklist +drop cascades to table x1 +drop cascades to table y1 +drop cascades to table y2 +drop cascades to table t1 +drop cascades to table t2 +drop cascades to table t3 +drop cascades to table t4 +drop cascades to table current_check +drop cascades to table dep1 +drop cascades to table dep2 +drop cascades to table dob_t1 +drop cascades to table dob_t2 +DROP USER regress_rls_alice; +DROP USER regress_rls_bob; +DROP USER regress_rls_carol; +DROP USER regress_rls_dave; +DROP USER regress_rls_exempt_user; +DROP ROLE regress_rls_group1; +DROP ROLE regress_rls_group2; +-- Arrange to have a few policies left over, for testing +-- pg_dump/pg_restore +CREATE SCHEMA regress_rls_schema; +CREATE TABLE rls_tbl (c1 int); +ALTER TABLE rls_tbl ENABLE ROW LEVEL SECURITY; +CREATE POLICY p1 ON rls_tbl USING (c1 > 5); +CREATE POLICY p2 ON rls_tbl FOR SELECT USING (c1 <= 3); +CREATE POLICY p3 ON rls_tbl FOR UPDATE USING (c1 <= 3) WITH CHECK (c1 > 5); +CREATE POLICY p4 ON rls_tbl FOR DELETE USING (c1 <= 3); +CREATE TABLE rls_tbl_force (c1 int); +ALTER TABLE rls_tbl_force ENABLE ROW LEVEL SECURITY; +ALTER TABLE rls_tbl_force FORCE ROW LEVEL SECURITY; +CREATE POLICY p1 ON rls_tbl_force USING (c1 = 5) WITH CHECK (c1 < 5); +CREATE POLICY p2 ON rls_tbl_force FOR SELECT USING (c1 = 8); +CREATE POLICY p3 ON rls_tbl_force FOR UPDATE USING (c1 = 8) WITH CHECK (c1 >= 5); +CREATE POLICY p4 ON rls_tbl_force FOR DELETE USING (c1 = 8); diff --git a/src/test/regress/expected/rowtypes.out b/src/test/regress/expected/rowtypes.out new file mode 100644 index 0000000..8f3c153 --- /dev/null +++ b/src/test/regress/expected/rowtypes.out @@ -0,0 +1,1342 @@ +-- +-- ROWTYPES +-- +-- Make both a standalone composite type and a table rowtype +create type complex as (r float8, i float8); +create temp table fullname (first text, last text); +-- Nested composite +create type quad as (c1 complex, c2 complex); +-- Some simple tests of I/O conversions and row construction +select (1.1,2.2)::complex, row((3.3,4.4),(5.5,null))::quad; + row | row +-----------+------------------------ + (1.1,2.2) | ("(3.3,4.4)","(5.5,)") +(1 row) + +select row('Joe', 'Blow')::fullname, '(Joe,Blow)'::fullname; + row | fullname +------------+------------ + (Joe,Blow) | (Joe,Blow) +(1 row) + +select '(Joe,von Blow)'::fullname, '(Joe,d''Blow)'::fullname; + fullname | fullname +------------------+-------------- + (Joe,"von Blow") | (Joe,d'Blow) +(1 row) + +select '(Joe,"von""Blow")'::fullname, E'(Joe,d\\\\Blow)'::fullname; + fullname | fullname +-------------------+----------------- + (Joe,"von""Blow") | (Joe,"d\\Blow") +(1 row) + +select '(Joe,"Blow,Jr")'::fullname; + fullname +----------------- + (Joe,"Blow,Jr") +(1 row) + +select '(Joe,)'::fullname; -- ok, null 2nd column + fullname +---------- + (Joe,) +(1 row) + +select '(Joe)'::fullname; -- bad +ERROR: malformed record literal: "(Joe)" +LINE 1: select '(Joe)'::fullname; + ^ +DETAIL: Too few columns. +select '(Joe,,)'::fullname; -- bad +ERROR: malformed record literal: "(Joe,,)" +LINE 1: select '(Joe,,)'::fullname; + ^ +DETAIL: Too many columns. +select '[]'::fullname; -- bad +ERROR: malformed record literal: "[]" +LINE 1: select '[]'::fullname; + ^ +DETAIL: Missing left parenthesis. +select ' (Joe,Blow) '::fullname; -- ok, extra whitespace + fullname +------------ + (Joe,Blow) +(1 row) + +select '(Joe,Blow) /'::fullname; -- bad +ERROR: malformed record literal: "(Joe,Blow) /" +LINE 1: select '(Joe,Blow) /'::fullname; + ^ +DETAIL: Junk after right parenthesis. +-- test non-error-throwing API +SELECT pg_input_is_valid('(1,2)', 'complex'); + pg_input_is_valid +------------------- + t +(1 row) + +SELECT pg_input_is_valid('(1,2', 'complex'); + pg_input_is_valid +------------------- + f +(1 row) + +SELECT pg_input_is_valid('(1,zed)', 'complex'); + pg_input_is_valid +------------------- + f +(1 row) + +SELECT * FROM pg_input_error_info('(1,zed)', 'complex'); + message | detail | hint | sql_error_code +-------------------------------------------------------+--------+------+---------------- + invalid input syntax for type double precision: "zed" | | | 22P02 +(1 row) + +SELECT * FROM pg_input_error_info('(1,1e400)', 'complex'); + message | detail | hint | sql_error_code +---------------------------------------------------+--------+------+---------------- + "1e400" is out of range for type double precision | | | 22003 +(1 row) + +create temp table quadtable(f1 int, q quad); +insert into quadtable values (1, ((3.3,4.4),(5.5,6.6))); +insert into quadtable values (2, ((null,4.4),(5.5,6.6))); +select * from quadtable; + f1 | q +----+--------------------------- + 1 | ("(3.3,4.4)","(5.5,6.6)") + 2 | ("(,4.4)","(5.5,6.6)") +(2 rows) + +select f1, q.c1 from quadtable; -- fails, q is a table reference +ERROR: missing FROM-clause entry for table "q" +LINE 1: select f1, q.c1 from quadtable; + ^ +select f1, (q).c1, (qq.q).c1.i from quadtable qq; + f1 | c1 | i +----+-----------+----- + 1 | (3.3,4.4) | 4.4 + 2 | (,4.4) | 4.4 +(2 rows) + +create temp table people (fn fullname, bd date); +insert into people values ('(Joe,Blow)', '1984-01-10'); +select * from people; + fn | bd +------------+------------ + (Joe,Blow) | 01-10-1984 +(1 row) + +-- at the moment this will not work due to ALTER TABLE inadequacy: +alter table fullname add column suffix text default ''; +ERROR: cannot alter table "fullname" because column "people.fn" uses its row type +-- but this should work: +alter table fullname add column suffix text default null; +select * from people; + fn | bd +-------------+------------ + (Joe,Blow,) | 01-10-1984 +(1 row) + +-- test insertion/updating of subfields +update people set fn.suffix = 'Jr'; +select * from people; + fn | bd +---------------+------------ + (Joe,Blow,Jr) | 01-10-1984 +(1 row) + +insert into quadtable (f1, q.c1.r, q.c2.i) values(44,55,66); +update quadtable set q.c1.r = 12 where f1 = 2; +update quadtable set q.c1 = 12; -- error, type mismatch +ERROR: subfield "c1" is of type complex but expression is of type integer +LINE 1: update quadtable set q.c1 = 12; + ^ +HINT: You will need to rewrite or cast the expression. +select * from quadtable; + f1 | q +----+--------------------------- + 1 | ("(3.3,4.4)","(5.5,6.6)") + 44 | ("(55,)","(,66)") + 2 | ("(12,4.4)","(5.5,6.6)") +(3 rows) + +-- The object here is to ensure that toasted references inside +-- composite values don't cause problems. The large f1 value will +-- be toasted inside pp, it must still work after being copied to people. +create temp table pp (f1 text); +insert into pp values (repeat('abcdefghijkl', 100000)); +insert into people select ('Jim', f1, null)::fullname, current_date from pp; +select (fn).first, substr((fn).last, 1, 20), length((fn).last) from people; + first | substr | length +-------+----------------------+--------- + Joe | Blow | 4 + Jim | abcdefghijklabcdefgh | 1200000 +(2 rows) + +-- try an update on a toasted composite value, too +update people set fn.first = 'Jack'; +select (fn).first, substr((fn).last, 1, 20), length((fn).last) from people; + first | substr | length +-------+----------------------+--------- + Jack | Blow | 4 + Jack | abcdefghijklabcdefgh | 1200000 +(2 rows) + +-- Test row comparison semantics. Prior to PG 8.2 we did this in a totally +-- non-spec-compliant way. +select ROW(1,2) < ROW(1,3) as true; + true +------ + t +(1 row) + +select ROW(1,2) < ROW(1,1) as false; + false +------- + f +(1 row) + +select ROW(1,2) < ROW(1,NULL) as null; + null +------ + +(1 row) + +select ROW(1,2,3) < ROW(1,3,NULL) as true; -- the NULL is not examined + true +------ + t +(1 row) + +select ROW(11,'ABC') < ROW(11,'DEF') as true; + true +------ + t +(1 row) + +select ROW(11,'ABC') > ROW(11,'DEF') as false; + false +------- + f +(1 row) + +select ROW(12,'ABC') > ROW(11,'DEF') as true; + true +------ + t +(1 row) + +-- = and <> have different NULL-behavior than < etc +select ROW(1,2,3) < ROW(1,NULL,4) as null; + null +------ + +(1 row) + +select ROW(1,2,3) = ROW(1,NULL,4) as false; + false +------- + f +(1 row) + +select ROW(1,2,3) <> ROW(1,NULL,4) as true; + true +------ + t +(1 row) + +-- We allow operators beyond the six standard ones, if they have btree +-- operator classes. +select ROW('ABC','DEF') ~<=~ ROW('DEF','ABC') as true; + true +------ + t +(1 row) + +select ROW('ABC','DEF') ~>=~ ROW('DEF','ABC') as false; + false +------- + f +(1 row) + +select ROW('ABC','DEF') ~~ ROW('DEF','ABC') as fail; +ERROR: could not determine interpretation of row comparison operator ~~ +LINE 1: select ROW('ABC','DEF') ~~ ROW('DEF','ABC') as fail; + ^ +HINT: Row comparison operators must be associated with btree operator families. +-- Comparisons of ROW() expressions can cope with some type mismatches +select ROW(1,2) = ROW(1,2::int8); + ?column? +---------- + t +(1 row) + +select ROW(1,2) in (ROW(3,4), ROW(1,2)); + ?column? +---------- + t +(1 row) + +select ROW(1,2) in (ROW(3,4), ROW(1,2::int8)); + ?column? +---------- + t +(1 row) + +-- Check row comparison with a subselect +select unique1, unique2 from tenk1 +where (unique1, unique2) < any (select ten, ten from tenk1 where hundred < 3) + and unique1 <= 20 +order by 1; + unique1 | unique2 +---------+--------- + 0 | 9998 + 1 | 2838 +(2 rows) + +-- Also check row comparison with an indexable condition +explain (costs off) +select thousand, tenthous from tenk1 +where (thousand, tenthous) >= (997, 5000) +order by thousand, tenthous; + QUERY PLAN +----------------------------------------------------------- + Index Only Scan using tenk1_thous_tenthous on tenk1 + Index Cond: (ROW(thousand, tenthous) >= ROW(997, 5000)) +(2 rows) + +select thousand, tenthous from tenk1 +where (thousand, tenthous) >= (997, 5000) +order by thousand, tenthous; + thousand | tenthous +----------+---------- + 997 | 5997 + 997 | 6997 + 997 | 7997 + 997 | 8997 + 997 | 9997 + 998 | 998 + 998 | 1998 + 998 | 2998 + 998 | 3998 + 998 | 4998 + 998 | 5998 + 998 | 6998 + 998 | 7998 + 998 | 8998 + 998 | 9998 + 999 | 999 + 999 | 1999 + 999 | 2999 + 999 | 3999 + 999 | 4999 + 999 | 5999 + 999 | 6999 + 999 | 7999 + 999 | 8999 + 999 | 9999 +(25 rows) + +explain (costs off) +select thousand, tenthous, four from tenk1 +where (thousand, tenthous, four) > (998, 5000, 3) +order by thousand, tenthous; + QUERY PLAN +----------------------------------------------------------------------- + Sort + Sort Key: thousand, tenthous + -> Bitmap Heap Scan on tenk1 + Filter: (ROW(thousand, tenthous, four) > ROW(998, 5000, 3)) + -> Bitmap Index Scan on tenk1_thous_tenthous + Index Cond: (ROW(thousand, tenthous) >= ROW(998, 5000)) +(6 rows) + +select thousand, tenthous, four from tenk1 +where (thousand, tenthous, four) > (998, 5000, 3) +order by thousand, tenthous; + thousand | tenthous | four +----------+----------+------ + 998 | 5998 | 2 + 998 | 6998 | 2 + 998 | 7998 | 2 + 998 | 8998 | 2 + 998 | 9998 | 2 + 999 | 999 | 3 + 999 | 1999 | 3 + 999 | 2999 | 3 + 999 | 3999 | 3 + 999 | 4999 | 3 + 999 | 5999 | 3 + 999 | 6999 | 3 + 999 | 7999 | 3 + 999 | 8999 | 3 + 999 | 9999 | 3 +(15 rows) + +explain (costs off) +select thousand, tenthous from tenk1 +where (998, 5000) < (thousand, tenthous) +order by thousand, tenthous; + QUERY PLAN +---------------------------------------------------------- + Index Only Scan using tenk1_thous_tenthous on tenk1 + Index Cond: (ROW(thousand, tenthous) > ROW(998, 5000)) +(2 rows) + +select thousand, tenthous from tenk1 +where (998, 5000) < (thousand, tenthous) +order by thousand, tenthous; + thousand | tenthous +----------+---------- + 998 | 5998 + 998 | 6998 + 998 | 7998 + 998 | 8998 + 998 | 9998 + 999 | 999 + 999 | 1999 + 999 | 2999 + 999 | 3999 + 999 | 4999 + 999 | 5999 + 999 | 6999 + 999 | 7999 + 999 | 8999 + 999 | 9999 +(15 rows) + +explain (costs off) +select thousand, hundred from tenk1 +where (998, 5000) < (thousand, hundred) +order by thousand, hundred; + QUERY PLAN +----------------------------------------------------------- + Sort + Sort Key: thousand, hundred + -> Bitmap Heap Scan on tenk1 + Filter: (ROW(998, 5000) < ROW(thousand, hundred)) + -> Bitmap Index Scan on tenk1_thous_tenthous + Index Cond: (thousand >= 998) +(6 rows) + +select thousand, hundred from tenk1 +where (998, 5000) < (thousand, hundred) +order by thousand, hundred; + thousand | hundred +----------+--------- + 999 | 99 + 999 | 99 + 999 | 99 + 999 | 99 + 999 | 99 + 999 | 99 + 999 | 99 + 999 | 99 + 999 | 99 + 999 | 99 +(10 rows) + +-- Test case for bug #14010: indexed row comparisons fail with nulls +create temp table test_table (a text, b text); +insert into test_table values ('a', 'b'); +insert into test_table select 'a', null from generate_series(1,1000); +insert into test_table values ('b', 'a'); +create index on test_table (a,b); +set enable_sort = off; +explain (costs off) +select a,b from test_table where (a,b) > ('a','a') order by a,b; + QUERY PLAN +-------------------------------------------------------- + Index Only Scan using test_table_a_b_idx on test_table + Index Cond: (ROW(a, b) > ROW('a'::text, 'a'::text)) +(2 rows) + +select a,b from test_table where (a,b) > ('a','a') order by a,b; + a | b +---+--- + a | b + b | a +(2 rows) + +reset enable_sort; +-- Check row comparisons with IN +select * from int8_tbl i8 where i8 in (row(123,456)); -- fail, type mismatch +ERROR: cannot compare dissimilar column types bigint and integer at record column 1 +explain (costs off) +select * from int8_tbl i8 +where i8 in (row(123,456)::int8_tbl, '(4567890123456789,123)'); + QUERY PLAN +------------------------------------------------------------------------------- + Seq Scan on int8_tbl i8 + Filter: (i8.* = ANY ('{"(123,456)","(4567890123456789,123)"}'::int8_tbl[])) +(2 rows) + +select * from int8_tbl i8 +where i8 in (row(123,456)::int8_tbl, '(4567890123456789,123)'); + q1 | q2 +------------------+----- + 123 | 456 + 4567890123456789 | 123 +(2 rows) + +-- Check ability to select columns from an anonymous rowtype +select (row(1, 2.0)).f1; + f1 +---- + 1 +(1 row) + +select (row(1, 2.0)).f2; + f2 +----- + 2.0 +(1 row) + +select (row(1, 2.0)).nosuch; -- fail +ERROR: could not identify column "nosuch" in record data type +LINE 1: select (row(1, 2.0)).nosuch; + ^ +select (row(1, 2.0)).*; + f1 | f2 +----+----- + 1 | 2.0 +(1 row) + +select (r).f1 from (select row(1, 2.0) as r) ss; + f1 +---- + 1 +(1 row) + +select (r).f3 from (select row(1, 2.0) as r) ss; -- fail +ERROR: could not identify column "f3" in record data type +LINE 1: select (r).f3 from (select row(1, 2.0) as r) ss; + ^ +select (r).* from (select row(1, 2.0) as r) ss; + f1 | f2 +----+----- + 1 | 2.0 +(1 row) + +-- Check some corner cases involving empty rowtypes +select ROW(); + row +----- + () +(1 row) + +select ROW() IS NULL; + ?column? +---------- + t +(1 row) + +select ROW() = ROW(); +ERROR: cannot compare rows of zero length +LINE 1: select ROW() = ROW(); + ^ +-- Check ability to create arrays of anonymous rowtypes +select array[ row(1,2), row(3,4), row(5,6) ]; + array +--------------------------- + {"(1,2)","(3,4)","(5,6)"} +(1 row) + +-- Check ability to compare an anonymous row to elements of an array +select row(1,1.1) = any (array[ row(7,7.7), row(1,1.1), row(0,0.0) ]); + ?column? +---------- + t +(1 row) + +select row(1,1.1) = any (array[ row(7,7.7), row(1,1.0), row(0,0.0) ]); + ?column? +---------- + f +(1 row) + +-- Check behavior with a non-comparable rowtype +create type cantcompare as (p point, r float8); +create temp table cc (f1 cantcompare); +insert into cc values('("(1,2)",3)'); +insert into cc values('("(4,5)",6)'); +select * from cc order by f1; -- fail, but should complain about cantcompare +ERROR: could not identify an ordering operator for type cantcompare +LINE 1: select * from cc order by f1; + ^ +HINT: Use an explicit ordering operator or modify the query. +-- +-- Tests for record_{eq,cmp} +-- +create type testtype1 as (a int, b int); +-- all true +select row(1, 2)::testtype1 < row(1, 3)::testtype1; + ?column? +---------- + t +(1 row) + +select row(1, 2)::testtype1 <= row(1, 3)::testtype1; + ?column? +---------- + t +(1 row) + +select row(1, 2)::testtype1 = row(1, 2)::testtype1; + ?column? +---------- + t +(1 row) + +select row(1, 2)::testtype1 <> row(1, 3)::testtype1; + ?column? +---------- + t +(1 row) + +select row(1, 3)::testtype1 >= row(1, 2)::testtype1; + ?column? +---------- + t +(1 row) + +select row(1, 3)::testtype1 > row(1, 2)::testtype1; + ?column? +---------- + t +(1 row) + +-- all false +select row(1, -2)::testtype1 < row(1, -3)::testtype1; + ?column? +---------- + f +(1 row) + +select row(1, -2)::testtype1 <= row(1, -3)::testtype1; + ?column? +---------- + f +(1 row) + +select row(1, -2)::testtype1 = row(1, -3)::testtype1; + ?column? +---------- + f +(1 row) + +select row(1, -2)::testtype1 <> row(1, -2)::testtype1; + ?column? +---------- + f +(1 row) + +select row(1, -3)::testtype1 >= row(1, -2)::testtype1; + ?column? +---------- + f +(1 row) + +select row(1, -3)::testtype1 > row(1, -2)::testtype1; + ?column? +---------- + f +(1 row) + +-- true, but see *< below +select row(1, -2)::testtype1 < row(1, 3)::testtype1; + ?column? +---------- + t +(1 row) + +-- mismatches +create type testtype3 as (a int, b text); +select row(1, 2)::testtype1 < row(1, 'abc')::testtype3; +ERROR: cannot compare dissimilar column types integer and text at record column 2 +select row(1, 2)::testtype1 <> row(1, 'abc')::testtype3; +ERROR: cannot compare dissimilar column types integer and text at record column 2 +create type testtype5 as (a int); +select row(1, 2)::testtype1 < row(1)::testtype5; +ERROR: cannot compare record types with different numbers of columns +select row(1, 2)::testtype1 <> row(1)::testtype5; +ERROR: cannot compare record types with different numbers of columns +-- non-comparable types +create type testtype6 as (a int, b point); +select row(1, '(1,2)')::testtype6 < row(1, '(1,3)')::testtype6; +ERROR: could not identify a comparison function for type point +select row(1, '(1,2)')::testtype6 <> row(1, '(1,3)')::testtype6; +ERROR: could not identify an equality operator for type point +drop type testtype1, testtype3, testtype5, testtype6; +-- +-- Tests for record_image_{eq,cmp} +-- +create type testtype1 as (a int, b int); +-- all true +select row(1, 2)::testtype1 *< row(1, 3)::testtype1; + ?column? +---------- + t +(1 row) + +select row(1, 2)::testtype1 *<= row(1, 3)::testtype1; + ?column? +---------- + t +(1 row) + +select row(1, 2)::testtype1 *= row(1, 2)::testtype1; + ?column? +---------- + t +(1 row) + +select row(1, 2)::testtype1 *<> row(1, 3)::testtype1; + ?column? +---------- + t +(1 row) + +select row(1, 3)::testtype1 *>= row(1, 2)::testtype1; + ?column? +---------- + t +(1 row) + +select row(1, 3)::testtype1 *> row(1, 2)::testtype1; + ?column? +---------- + t +(1 row) + +-- all false +select row(1, -2)::testtype1 *< row(1, -3)::testtype1; + ?column? +---------- + f +(1 row) + +select row(1, -2)::testtype1 *<= row(1, -3)::testtype1; + ?column? +---------- + f +(1 row) + +select row(1, -2)::testtype1 *= row(1, -3)::testtype1; + ?column? +---------- + f +(1 row) + +select row(1, -2)::testtype1 *<> row(1, -2)::testtype1; + ?column? +---------- + f +(1 row) + +select row(1, -3)::testtype1 *>= row(1, -2)::testtype1; + ?column? +---------- + f +(1 row) + +select row(1, -3)::testtype1 *> row(1, -2)::testtype1; + ?column? +---------- + f +(1 row) + +-- This returns the "wrong" order because record_image_cmp works on +-- unsigned datums without knowing about the actual data type. +select row(1, -2)::testtype1 *< row(1, 3)::testtype1; + ?column? +---------- + f +(1 row) + +-- other types +create type testtype2 as (a smallint, b bool); -- byval different sizes +select row(1, true)::testtype2 *< row(2, true)::testtype2; + ?column? +---------- + t +(1 row) + +select row(-2, true)::testtype2 *< row(-1, true)::testtype2; + ?column? +---------- + t +(1 row) + +select row(0, false)::testtype2 *< row(0, true)::testtype2; + ?column? +---------- + t +(1 row) + +select row(0, false)::testtype2 *<> row(0, true)::testtype2; + ?column? +---------- + t +(1 row) + +create type testtype3 as (a int, b text); -- variable length +select row(1, 'abc')::testtype3 *< row(1, 'abd')::testtype3; + ?column? +---------- + t +(1 row) + +select row(1, 'abc')::testtype3 *< row(1, 'abcd')::testtype3; + ?column? +---------- + t +(1 row) + +select row(1, 'abc')::testtype3 *> row(1, 'abd')::testtype3; + ?column? +---------- + f +(1 row) + +select row(1, 'abc')::testtype3 *<> row(1, 'abd')::testtype3; + ?column? +---------- + t +(1 row) + +create type testtype4 as (a int, b point); -- by ref, fixed length +select row(1, '(1,2)')::testtype4 *< row(1, '(1,3)')::testtype4; + ?column? +---------- + t +(1 row) + +select row(1, '(1,2)')::testtype4 *<> row(1, '(1,3)')::testtype4; + ?column? +---------- + t +(1 row) + +-- mismatches +select row(1, 2)::testtype1 *< row(1, 'abc')::testtype3; +ERROR: cannot compare dissimilar column types integer and text at record column 2 +select row(1, 2)::testtype1 *<> row(1, 'abc')::testtype3; +ERROR: cannot compare dissimilar column types integer and text at record column 2 +create type testtype5 as (a int); +select row(1, 2)::testtype1 *< row(1)::testtype5; +ERROR: cannot compare record types with different numbers of columns +select row(1, 2)::testtype1 *<> row(1)::testtype5; +ERROR: cannot compare record types with different numbers of columns +-- non-comparable types +create type testtype6 as (a int, b point); +select row(1, '(1,2)')::testtype6 *< row(1, '(1,3)')::testtype6; + ?column? +---------- + t +(1 row) + +select row(1, '(1,2)')::testtype6 *>= row(1, '(1,3)')::testtype6; + ?column? +---------- + f +(1 row) + +select row(1, '(1,2)')::testtype6 *<> row(1, '(1,3)')::testtype6; + ?column? +---------- + t +(1 row) + +-- anonymous rowtypes in coldeflists +select q.a, q.b = row(2), q.c = array[row(3)], q.d = row(row(4)) from + unnest(array[row(1, row(2), array[row(3)], row(row(4))), + row(2, row(3), array[row(4)], row(row(5)))]) + as q(a int, b record, c record[], d record); + a | ?column? | ?column? | ?column? +---+----------+----------+---------- + 1 | t | t | t + 2 | f | f | f +(2 rows) + +drop type testtype1, testtype2, testtype3, testtype4, testtype5, testtype6; +-- +-- Test case derived from bug #5716: check multiple uses of a rowtype result +-- +BEGIN; +CREATE TABLE price ( + id SERIAL PRIMARY KEY, + active BOOLEAN NOT NULL, + price NUMERIC +); +CREATE TYPE price_input AS ( + id INTEGER, + price NUMERIC +); +CREATE TYPE price_key AS ( + id INTEGER +); +CREATE FUNCTION price_key_from_table(price) RETURNS price_key AS $$ + SELECT $1.id +$$ LANGUAGE SQL; +CREATE FUNCTION price_key_from_input(price_input) RETURNS price_key AS $$ + SELECT $1.id +$$ LANGUAGE SQL; +insert into price values (1,false,42), (10,false,100), (11,true,17.99); +UPDATE price + SET active = true, price = input_prices.price + FROM unnest(ARRAY[(10, 123.00), (11, 99.99)]::price_input[]) input_prices + WHERE price_key_from_table(price.*) = price_key_from_input(input_prices.*); +select * from price; + id | active | price +----+--------+-------- + 1 | f | 42 + 10 | t | 123.00 + 11 | t | 99.99 +(3 rows) + +rollback; +-- +-- Test case derived from bug #9085: check * qualification of composite +-- parameters for SQL functions +-- +create temp table compos (f1 int, f2 text); +create function fcompos1(v compos) returns void as $$ +insert into compos values (v); -- fail +$$ language sql; +ERROR: column "f1" is of type integer but expression is of type compos +LINE 2: insert into compos values (v); -- fail + ^ +HINT: You will need to rewrite or cast the expression. +create function fcompos1(v compos) returns void as $$ +insert into compos values (v.*); +$$ language sql; +create function fcompos2(v compos) returns void as $$ +select fcompos1(v); +$$ language sql; +create function fcompos3(v compos) returns void as $$ +select fcompos1(fcompos3.v.*); +$$ language sql; +select fcompos1(row(1,'one')); + fcompos1 +---------- + +(1 row) + +select fcompos2(row(2,'two')); + fcompos2 +---------- + +(1 row) + +select fcompos3(row(3,'three')); + fcompos3 +---------- + +(1 row) + +select * from compos; + f1 | f2 +----+------- + 1 | one + 2 | two + 3 | three +(3 rows) + +-- +-- We allow I/O conversion casts from composite types to strings to be +-- invoked via cast syntax, but not functional syntax. This is because +-- the latter is too prone to be invoked unintentionally. +-- +select cast (fullname as text) from fullname; + fullname +---------- +(0 rows) + +select fullname::text from fullname; + fullname +---------- +(0 rows) + +select text(fullname) from fullname; -- error +ERROR: function text(fullname) does not exist +LINE 1: select text(fullname) from fullname; + ^ +HINT: No function matches the given name and argument types. You might need to add explicit type casts. +select fullname.text from fullname; -- error +ERROR: column fullname.text does not exist +LINE 1: select fullname.text from fullname; + ^ +-- same, but RECORD instead of named composite type: +select cast (row('Jim', 'Beam') as text); + row +------------ + (Jim,Beam) +(1 row) + +select (row('Jim', 'Beam'))::text; + row +------------ + (Jim,Beam) +(1 row) + +select text(row('Jim', 'Beam')); -- error +ERROR: function text(record) does not exist +LINE 1: select text(row('Jim', 'Beam')); + ^ +HINT: No function matches the given name and argument types. You might need to add explicit type casts. +select (row('Jim', 'Beam')).text; -- error +ERROR: could not identify column "text" in record data type +LINE 1: select (row('Jim', 'Beam')).text; + ^ +-- +-- Check the equivalence of functional and column notation +-- +insert into fullname values ('Joe', 'Blow'); +select f.last from fullname f; + last +------ + Blow +(1 row) + +select last(f) from fullname f; + last +------ + Blow +(1 row) + +create function longname(fullname) returns text language sql +as $$select $1.first || ' ' || $1.last$$; +select f.longname from fullname f; + longname +---------- + Joe Blow +(1 row) + +select longname(f) from fullname f; + longname +---------- + Joe Blow +(1 row) + +-- Starting in v11, the notational form does matter if there's ambiguity +alter table fullname add column longname text; +select f.longname from fullname f; + longname +---------- + +(1 row) + +select longname(f) from fullname f; + longname +---------- + Joe Blow +(1 row) + +-- +-- Test that composite values are seen to have the correct column names +-- (bug #11210 and other reports) +-- +select row_to_json(i) from int8_tbl i; + row_to_json +------------------------------------------------ + {"q1":123,"q2":456} + {"q1":123,"q2":4567890123456789} + {"q1":4567890123456789,"q2":123} + {"q1":4567890123456789,"q2":4567890123456789} + {"q1":4567890123456789,"q2":-4567890123456789} +(5 rows) + +-- since "i" is of type "int8_tbl", attaching aliases doesn't change anything: +select row_to_json(i) from int8_tbl i(x,y); + row_to_json +------------------------------------------------ + {"q1":123,"q2":456} + {"q1":123,"q2":4567890123456789} + {"q1":4567890123456789,"q2":123} + {"q1":4567890123456789,"q2":4567890123456789} + {"q1":4567890123456789,"q2":-4567890123456789} +(5 rows) + +-- in these examples, we'll report the exposed column names of the subselect: +select row_to_json(ss) from + (select q1, q2 from int8_tbl) as ss; + row_to_json +------------------------------------------------ + {"q1":123,"q2":456} + {"q1":123,"q2":4567890123456789} + {"q1":4567890123456789,"q2":123} + {"q1":4567890123456789,"q2":4567890123456789} + {"q1":4567890123456789,"q2":-4567890123456789} +(5 rows) + +select row_to_json(ss) from + (select q1, q2 from int8_tbl offset 0) as ss; + row_to_json +------------------------------------------------ + {"q1":123,"q2":456} + {"q1":123,"q2":4567890123456789} + {"q1":4567890123456789,"q2":123} + {"q1":4567890123456789,"q2":4567890123456789} + {"q1":4567890123456789,"q2":-4567890123456789} +(5 rows) + +select row_to_json(ss) from + (select q1 as a, q2 as b from int8_tbl) as ss; + row_to_json +---------------------------------------------- + {"a":123,"b":456} + {"a":123,"b":4567890123456789} + {"a":4567890123456789,"b":123} + {"a":4567890123456789,"b":4567890123456789} + {"a":4567890123456789,"b":-4567890123456789} +(5 rows) + +select row_to_json(ss) from + (select q1 as a, q2 as b from int8_tbl offset 0) as ss; + row_to_json +---------------------------------------------- + {"a":123,"b":456} + {"a":123,"b":4567890123456789} + {"a":4567890123456789,"b":123} + {"a":4567890123456789,"b":4567890123456789} + {"a":4567890123456789,"b":-4567890123456789} +(5 rows) + +select row_to_json(ss) from + (select q1 as a, q2 as b from int8_tbl) as ss(x,y); + row_to_json +---------------------------------------------- + {"x":123,"y":456} + {"x":123,"y":4567890123456789} + {"x":4567890123456789,"y":123} + {"x":4567890123456789,"y":4567890123456789} + {"x":4567890123456789,"y":-4567890123456789} +(5 rows) + +select row_to_json(ss) from + (select q1 as a, q2 as b from int8_tbl offset 0) as ss(x,y); + row_to_json +---------------------------------------------- + {"x":123,"y":456} + {"x":123,"y":4567890123456789} + {"x":4567890123456789,"y":123} + {"x":4567890123456789,"y":4567890123456789} + {"x":4567890123456789,"y":-4567890123456789} +(5 rows) + +explain (costs off) +select row_to_json(q) from + (select thousand, tenthous from tenk1 + where thousand = 42 and tenthous < 2000 offset 0) q; + QUERY PLAN +------------------------------------------------------------- + Subquery Scan on q + -> Index Only Scan using tenk1_thous_tenthous on tenk1 + Index Cond: ((thousand = 42) AND (tenthous < 2000)) +(3 rows) + +select row_to_json(q) from + (select thousand, tenthous from tenk1 + where thousand = 42 and tenthous < 2000 offset 0) q; + row_to_json +--------------------------------- + {"thousand":42,"tenthous":42} + {"thousand":42,"tenthous":1042} +(2 rows) + +select row_to_json(q) from + (select thousand as x, tenthous as y from tenk1 + where thousand = 42 and tenthous < 2000 offset 0) q; + row_to_json +------------------- + {"x":42,"y":42} + {"x":42,"y":1042} +(2 rows) + +select row_to_json(q) from + (select thousand as x, tenthous as y from tenk1 + where thousand = 42 and tenthous < 2000 offset 0) q(a,b); + row_to_json +------------------- + {"a":42,"b":42} + {"a":42,"b":1042} +(2 rows) + +create temp table tt1 as select * from int8_tbl limit 2; +create temp table tt2 () inherits(tt1); +insert into tt2 values(0,0); +select row_to_json(r) from (select q2,q1 from tt1 offset 0) r; + row_to_json +---------------------------------- + {"q2":456,"q1":123} + {"q2":4567890123456789,"q1":123} + {"q2":0,"q1":0} +(3 rows) + +-- check no-op rowtype conversions +create temp table tt3 () inherits(tt2); +insert into tt3 values(33,44); +select row_to_json(tt3::tt2::tt1) from tt3; + row_to_json +------------------- + {"q1":33,"q2":44} +(1 row) + +-- +-- IS [NOT] NULL should not recurse into nested composites (bug #14235) +-- +explain (verbose, costs off) +select r, r is null as isnull, r is not null as isnotnull +from (values (1,row(1,2)), (1,row(null,null)), (1,null), + (null,row(1,2)), (null,row(null,null)), (null,null) ) r(a,b); + QUERY PLAN +----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Values Scan on "*VALUES*" + Output: ROW("*VALUES*".column1, "*VALUES*".column2), (("*VALUES*".column1 IS NULL) AND ("*VALUES*".column2 IS NOT DISTINCT FROM NULL)), (("*VALUES*".column1 IS NOT NULL) AND ("*VALUES*".column2 IS DISTINCT FROM NULL)) +(2 rows) + +select r, r is null as isnull, r is not null as isnotnull +from (values (1,row(1,2)), (1,row(null,null)), (1,null), + (null,row(1,2)), (null,row(null,null)), (null,null) ) r(a,b); + r | isnull | isnotnull +-------------+--------+----------- + (1,"(1,2)") | f | t + (1,"(,)") | f | t + (1,) | f | f + (,"(1,2)") | f | f + (,"(,)") | f | f + (,) | t | f +(6 rows) + +explain (verbose, costs off) +with r(a,b) as materialized + (values (1,row(1,2)), (1,row(null,null)), (1,null), + (null,row(1,2)), (null,row(null,null)), (null,null) ) +select r, r is null as isnull, r is not null as isnotnull from r; + QUERY PLAN +---------------------------------------------------------- + CTE Scan on r + Output: r.*, (r.* IS NULL), (r.* IS NOT NULL) + CTE r + -> Values Scan on "*VALUES*" + Output: "*VALUES*".column1, "*VALUES*".column2 +(5 rows) + +with r(a,b) as materialized + (values (1,row(1,2)), (1,row(null,null)), (1,null), + (null,row(1,2)), (null,row(null,null)), (null,null) ) +select r, r is null as isnull, r is not null as isnotnull from r; + r | isnull | isnotnull +-------------+--------+----------- + (1,"(1,2)") | f | t + (1,"(,)") | f | t + (1,) | f | f + (,"(1,2)") | f | f + (,"(,)") | f | f + (,) | t | f +(6 rows) + +-- +-- Check parsing of indirect references to composite values (bug #18077) +-- +explain (verbose, costs off) +with cte(c) as materialized (select row(1, 2)), + cte2(c) as (select * from cte) +select * from cte2 as t +where (select * from (select c as c1) s + where (select (c1).f1 > 0)) is not null; + QUERY PLAN +-------------------------------------------- + CTE Scan on cte + Output: cte.c + Filter: ((SubPlan 3) IS NOT NULL) + CTE cte + -> Result + Output: '(1,2)'::record + SubPlan 3 + -> Result + Output: cte.c + One-Time Filter: $2 + InitPlan 2 (returns $2) + -> Result + Output: ((cte.c).f1 > 0) +(13 rows) + +with cte(c) as materialized (select row(1, 2)), + cte2(c) as (select * from cte) +select * from cte2 as t +where (select * from (select c as c1) s + where (select (c1).f1 > 0)) is not null; + c +------- + (1,2) +(1 row) + +-- Also check deparsing of such cases +create view composite_v as +with cte(c) as materialized (select row(1, 2)), + cte2(c) as (select * from cte) +select 1 as one from cte2 as t +where (select * from (select c as c1) s + where (select (c1).f1 > 0)) is not null; +select pg_get_viewdef('composite_v', true); + pg_get_viewdef +-------------------------------------------------------- + WITH cte(c) AS MATERIALIZED ( + + SELECT ROW(1, 2) AS "row" + + ), cte2(c) AS ( + + SELECT cte.c + + FROM cte + + ) + + SELECT 1 AS one + + FROM cte2 t + + WHERE (( SELECT s.c1 + + FROM ( SELECT t.c AS c1) s + + WHERE ( SELECT (s.c1).f1 > 0))) IS NOT NULL; +(1 row) + +drop view composite_v; +-- +-- Tests for component access / FieldSelect +-- +CREATE TABLE compositetable(a text, b text); +INSERT INTO compositetable(a, b) VALUES('fa', 'fb'); +-- composite type columns can't directly be accessed (error) +SELECT d.a FROM (SELECT compositetable AS d FROM compositetable) s; +ERROR: missing FROM-clause entry for table "d" +LINE 1: SELECT d.a FROM (SELECT compositetable AS d FROM compositeta... + ^ +-- but can be accessed with proper parens +SELECT (d).a, (d).b FROM (SELECT compositetable AS d FROM compositetable) s; + a | b +----+---- + fa | fb +(1 row) + +-- system columns can't be accessed in composite types (error) +SELECT (d).ctid FROM (SELECT compositetable AS d FROM compositetable) s; +ERROR: column "ctid" not found in data type compositetable +LINE 1: SELECT (d).ctid FROM (SELECT compositetable AS d FROM compos... + ^ +-- accessing non-existing column in NULL datum errors out +SELECT (NULL::compositetable).nonexistent; +ERROR: column "nonexistent" not found in data type compositetable +LINE 1: SELECT (NULL::compositetable).nonexistent; + ^ +-- existing column in a NULL composite yield NULL +SELECT (NULL::compositetable).a; + a +--- + +(1 row) + +-- oids can't be accessed in composite types (error) +SELECT (NULL::compositetable).oid; +ERROR: column "oid" not found in data type compositetable +LINE 1: SELECT (NULL::compositetable).oid; + ^ +DROP TABLE compositetable; diff --git a/src/test/regress/expected/rules.out b/src/test/regress/expected/rules.out new file mode 100644 index 0000000..7fd81e6 --- /dev/null +++ b/src/test/regress/expected/rules.out @@ -0,0 +1,3752 @@ +-- +-- RULES +-- From Jan's original setup_ruletest.sql and run_ruletest.sql +-- - thomas 1998-09-13 +-- +-- +-- Tables and rules for the view test +-- +create table rtest_t1 (a int4, b int4); +create table rtest_t2 (a int4, b int4); +create table rtest_t3 (a int4, b int4); +create view rtest_v1 as select * from rtest_t1; +create rule rtest_v1_ins as on insert to rtest_v1 do instead + insert into rtest_t1 values (new.a, new.b); +create rule rtest_v1_upd as on update to rtest_v1 do instead + update rtest_t1 set a = new.a, b = new.b + where a = old.a; +create rule rtest_v1_del as on delete to rtest_v1 do instead + delete from rtest_t1 where a = old.a; +-- Test comments +COMMENT ON RULE rtest_v1_bad ON rtest_v1 IS 'bad rule'; +ERROR: rule "rtest_v1_bad" for relation "rtest_v1" does not exist +COMMENT ON RULE rtest_v1_del ON rtest_v1 IS 'delete rule'; +COMMENT ON RULE rtest_v1_del ON rtest_v1 IS NULL; +-- +-- Tables and rules for the constraint update/delete test +-- +-- Note: +-- Now that we have multiple action rule support, we check +-- both possible syntaxes to define them (The last action +-- can but must not have a semicolon at the end). +-- +create table rtest_system (sysname text, sysdesc text); +create table rtest_interface (sysname text, ifname text); +create table rtest_person (pname text, pdesc text); +create table rtest_admin (pname text, sysname text); +create rule rtest_sys_upd as on update to rtest_system do also ( + update rtest_interface set sysname = new.sysname + where sysname = old.sysname; + update rtest_admin set sysname = new.sysname + where sysname = old.sysname + ); +create rule rtest_sys_del as on delete to rtest_system do also ( + delete from rtest_interface where sysname = old.sysname; + delete from rtest_admin where sysname = old.sysname; + ); +create rule rtest_pers_upd as on update to rtest_person do also + update rtest_admin set pname = new.pname where pname = old.pname; +create rule rtest_pers_del as on delete to rtest_person do also + delete from rtest_admin where pname = old.pname; +-- +-- Tables and rules for the logging test +-- +create table rtest_emp (ename char(20), salary money); +create table rtest_emplog (ename char(20), who name, action char(10), newsal money, oldsal money); +create table rtest_empmass (ename char(20), salary money); +create rule rtest_emp_ins as on insert to rtest_emp do + insert into rtest_emplog values (new.ename, current_user, + 'hired', new.salary, '0.00'); +create rule rtest_emp_upd as on update to rtest_emp where new.salary != old.salary do + insert into rtest_emplog values (new.ename, current_user, + 'honored', new.salary, old.salary); +create rule rtest_emp_del as on delete to rtest_emp do + insert into rtest_emplog values (old.ename, current_user, + 'fired', '0.00', old.salary); +-- +-- Tables and rules for the multiple cascaded qualified instead +-- rule test +-- +create table rtest_t4 (a int4, b text); +create table rtest_t5 (a int4, b text); +create table rtest_t6 (a int4, b text); +create table rtest_t7 (a int4, b text); +create table rtest_t8 (a int4, b text); +create table rtest_t9 (a int4, b text); +create rule rtest_t4_ins1 as on insert to rtest_t4 + where new.a >= 10 and new.a < 20 do instead + insert into rtest_t5 values (new.a, new.b); +create rule rtest_t4_ins2 as on insert to rtest_t4 + where new.a >= 20 and new.a < 30 do + insert into rtest_t6 values (new.a, new.b); +create rule rtest_t5_ins as on insert to rtest_t5 + where new.a > 15 do + insert into rtest_t7 values (new.a, new.b); +create rule rtest_t6_ins as on insert to rtest_t6 + where new.a > 25 do instead + insert into rtest_t8 values (new.a, new.b); +-- +-- Tables and rules for the rule fire order test +-- +-- As of PG 7.3, the rules should fire in order by name, regardless +-- of INSTEAD attributes or creation order. +-- +create table rtest_order1 (a int4); +create table rtest_order2 (a int4, b int4, c text); +create sequence rtest_seq; +create rule rtest_order_r3 as on insert to rtest_order1 do instead + insert into rtest_order2 values (new.a, nextval('rtest_seq'), + 'rule 3 - this should run 3rd'); +create rule rtest_order_r4 as on insert to rtest_order1 + where a < 100 do instead + insert into rtest_order2 values (new.a, nextval('rtest_seq'), + 'rule 4 - this should run 4th'); +create rule rtest_order_r2 as on insert to rtest_order1 do + insert into rtest_order2 values (new.a, nextval('rtest_seq'), + 'rule 2 - this should run 2nd'); +create rule rtest_order_r1 as on insert to rtest_order1 do instead + insert into rtest_order2 values (new.a, nextval('rtest_seq'), + 'rule 1 - this should run 1st'); +-- +-- Tables and rules for the instead nothing test +-- +create table rtest_nothn1 (a int4, b text); +create table rtest_nothn2 (a int4, b text); +create table rtest_nothn3 (a int4, b text); +create table rtest_nothn4 (a int4, b text); +create rule rtest_nothn_r1 as on insert to rtest_nothn1 + where new.a >= 10 and new.a < 20 do instead nothing; +create rule rtest_nothn_r2 as on insert to rtest_nothn1 + where new.a >= 30 and new.a < 40 do instead nothing; +create rule rtest_nothn_r3 as on insert to rtest_nothn2 + where new.a >= 100 do instead + insert into rtest_nothn3 values (new.a, new.b); +create rule rtest_nothn_r4 as on insert to rtest_nothn2 + do instead nothing; +-- +-- Tests on a view that is select * of a table +-- and has insert/update/delete instead rules to +-- behave close like the real table. +-- +-- +-- We need test date later +-- +insert into rtest_t2 values (1, 21); +insert into rtest_t2 values (2, 22); +insert into rtest_t2 values (3, 23); +insert into rtest_t3 values (1, 31); +insert into rtest_t3 values (2, 32); +insert into rtest_t3 values (3, 33); +insert into rtest_t3 values (4, 34); +insert into rtest_t3 values (5, 35); +-- insert values +insert into rtest_v1 values (1, 11); +insert into rtest_v1 values (2, 12); +select * from rtest_v1; + a | b +---+---- + 1 | 11 + 2 | 12 +(2 rows) + +-- delete with constant expression +delete from rtest_v1 where a = 1; +select * from rtest_v1; + a | b +---+---- + 2 | 12 +(1 row) + +insert into rtest_v1 values (1, 11); +delete from rtest_v1 where b = 12; +select * from rtest_v1; + a | b +---+---- + 1 | 11 +(1 row) + +insert into rtest_v1 values (2, 12); +insert into rtest_v1 values (2, 13); +select * from rtest_v1; + a | b +---+---- + 1 | 11 + 2 | 12 + 2 | 13 +(3 rows) + +** Remember the delete rule on rtest_v1: It says +** DO INSTEAD DELETE FROM rtest_t1 WHERE a = old.a +** So this time both rows with a = 2 must get deleted +\p +** Remember the delete rule on rtest_v1: It says +** DO INSTEAD DELETE FROM rtest_t1 WHERE a = old.a +** So this time both rows with a = 2 must get deleted +\r +delete from rtest_v1 where b = 12; +select * from rtest_v1; + a | b +---+---- + 1 | 11 +(1 row) + +delete from rtest_v1; +-- insert select +insert into rtest_v1 select * from rtest_t2; +select * from rtest_v1; + a | b +---+---- + 1 | 21 + 2 | 22 + 3 | 23 +(3 rows) + +delete from rtest_v1; +-- same with swapped targetlist +insert into rtest_v1 (b, a) select b, a from rtest_t2; +select * from rtest_v1; + a | b +---+---- + 1 | 21 + 2 | 22 + 3 | 23 +(3 rows) + +-- now with only one target attribute +insert into rtest_v1 (a) select a from rtest_t3; +select * from rtest_v1; + a | b +---+---- + 1 | 21 + 2 | 22 + 3 | 23 + 1 | + 2 | + 3 | + 4 | + 5 | +(8 rows) + +select * from rtest_v1 where b isnull; + a | b +---+--- + 1 | + 2 | + 3 | + 4 | + 5 | +(5 rows) + +-- let attribute a differ (must be done on rtest_t1 - see above) +update rtest_t1 set a = a + 10 where b isnull; +delete from rtest_v1 where b isnull; +select * from rtest_v1; + a | b +---+---- + 1 | 21 + 2 | 22 + 3 | 23 +(3 rows) + +-- now updates with constant expression +update rtest_v1 set b = 42 where a = 2; +select * from rtest_v1; + a | b +---+---- + 1 | 21 + 3 | 23 + 2 | 42 +(3 rows) + +update rtest_v1 set b = 99 where b = 42; +select * from rtest_v1; + a | b +---+---- + 1 | 21 + 3 | 23 + 2 | 99 +(3 rows) + +update rtest_v1 set b = 88 where b < 50; +select * from rtest_v1; + a | b +---+---- + 2 | 99 + 1 | 88 + 3 | 88 +(3 rows) + +delete from rtest_v1; +insert into rtest_v1 select rtest_t2.a, rtest_t3.b + from rtest_t2, rtest_t3 + where rtest_t2.a = rtest_t3.a; +select * from rtest_v1; + a | b +---+---- + 1 | 31 + 2 | 32 + 3 | 33 +(3 rows) + +-- updates in a mergejoin +update rtest_v1 set b = rtest_t2.b from rtest_t2 where rtest_v1.a = rtest_t2.a; +select * from rtest_v1; + a | b +---+---- + 1 | 21 + 2 | 22 + 3 | 23 +(3 rows) + +insert into rtest_v1 select * from rtest_t3; +select * from rtest_v1; + a | b +---+---- + 1 | 21 + 2 | 22 + 3 | 23 + 1 | 31 + 2 | 32 + 3 | 33 + 4 | 34 + 5 | 35 +(8 rows) + +update rtest_t1 set a = a + 10 where b > 30; +select * from rtest_v1; + a | b +----+---- + 1 | 21 + 2 | 22 + 3 | 23 + 11 | 31 + 12 | 32 + 13 | 33 + 14 | 34 + 15 | 35 +(8 rows) + +update rtest_v1 set a = rtest_t3.a + 20 from rtest_t3 where rtest_v1.b = rtest_t3.b; +select * from rtest_v1; + a | b +----+---- + 1 | 21 + 2 | 22 + 3 | 23 + 21 | 31 + 22 | 32 + 23 | 33 + 24 | 34 + 25 | 35 +(8 rows) + +-- +-- Test for constraint updates/deletes +-- +insert into rtest_system values ('orion', 'Linux Jan Wieck'); +insert into rtest_system values ('notjw', 'WinNT Jan Wieck (notebook)'); +insert into rtest_system values ('neptun', 'Fileserver'); +insert into rtest_interface values ('orion', 'eth0'); +insert into rtest_interface values ('orion', 'eth1'); +insert into rtest_interface values ('notjw', 'eth0'); +insert into rtest_interface values ('neptun', 'eth0'); +insert into rtest_person values ('jw', 'Jan Wieck'); +insert into rtest_person values ('bm', 'Bruce Momjian'); +insert into rtest_admin values ('jw', 'orion'); +insert into rtest_admin values ('jw', 'notjw'); +insert into rtest_admin values ('bm', 'neptun'); +update rtest_system set sysname = 'pluto' where sysname = 'neptun'; +select * from rtest_interface; + sysname | ifname +---------+-------- + orion | eth0 + orion | eth1 + notjw | eth0 + pluto | eth0 +(4 rows) + +select * from rtest_admin; + pname | sysname +-------+--------- + jw | orion + jw | notjw + bm | pluto +(3 rows) + +update rtest_person set pname = 'jwieck' where pdesc = 'Jan Wieck'; +-- Note: use ORDER BY here to ensure consistent output across all systems. +-- The above UPDATE affects two rows with equal keys, so they could be +-- updated in either order depending on the whim of the local qsort(). +select * from rtest_admin order by pname, sysname; + pname | sysname +--------+--------- + bm | pluto + jwieck | notjw + jwieck | orion +(3 rows) + +delete from rtest_system where sysname = 'orion'; +select * from rtest_interface; + sysname | ifname +---------+-------- + notjw | eth0 + pluto | eth0 +(2 rows) + +select * from rtest_admin; + pname | sysname +--------+--------- + bm | pluto + jwieck | notjw +(2 rows) + +-- +-- Rule qualification test +-- +insert into rtest_emp values ('wiecc', '5000.00'); +insert into rtest_emp values ('gates', '80000.00'); +update rtest_emp set ename = 'wiecx' where ename = 'wiecc'; +update rtest_emp set ename = 'wieck', salary = '6000.00' where ename = 'wiecx'; +update rtest_emp set salary = '7000.00' where ename = 'wieck'; +delete from rtest_emp where ename = 'gates'; +select ename, who = current_user as "matches user", action, newsal, oldsal from rtest_emplog order by ename, action, newsal; + ename | matches user | action | newsal | oldsal +----------------------+--------------+------------+------------+------------ + gates | t | fired | $0.00 | $80,000.00 + gates | t | hired | $80,000.00 | $0.00 + wiecc | t | hired | $5,000.00 | $0.00 + wieck | t | honored | $6,000.00 | $5,000.00 + wieck | t | honored | $7,000.00 | $6,000.00 +(5 rows) + +insert into rtest_empmass values ('meyer', '4000.00'); +insert into rtest_empmass values ('maier', '5000.00'); +insert into rtest_empmass values ('mayr', '6000.00'); +insert into rtest_emp select * from rtest_empmass; +select ename, who = current_user as "matches user", action, newsal, oldsal from rtest_emplog order by ename, action, newsal; + ename | matches user | action | newsal | oldsal +----------------------+--------------+------------+------------+------------ + gates | t | fired | $0.00 | $80,000.00 + gates | t | hired | $80,000.00 | $0.00 + maier | t | hired | $5,000.00 | $0.00 + mayr | t | hired | $6,000.00 | $0.00 + meyer | t | hired | $4,000.00 | $0.00 + wiecc | t | hired | $5,000.00 | $0.00 + wieck | t | honored | $6,000.00 | $5,000.00 + wieck | t | honored | $7,000.00 | $6,000.00 +(8 rows) + +update rtest_empmass set salary = salary + '1000.00'; +update rtest_emp set salary = rtest_empmass.salary from rtest_empmass where rtest_emp.ename = rtest_empmass.ename; +select ename, who = current_user as "matches user", action, newsal, oldsal from rtest_emplog order by ename, action, newsal; + ename | matches user | action | newsal | oldsal +----------------------+--------------+------------+------------+------------ + gates | t | fired | $0.00 | $80,000.00 + gates | t | hired | $80,000.00 | $0.00 + maier | t | hired | $5,000.00 | $0.00 + maier | t | honored | $6,000.00 | $5,000.00 + mayr | t | hired | $6,000.00 | $0.00 + mayr | t | honored | $7,000.00 | $6,000.00 + meyer | t | hired | $4,000.00 | $0.00 + meyer | t | honored | $5,000.00 | $4,000.00 + wiecc | t | hired | $5,000.00 | $0.00 + wieck | t | honored | $6,000.00 | $5,000.00 + wieck | t | honored | $7,000.00 | $6,000.00 +(11 rows) + +delete from rtest_emp using rtest_empmass where rtest_emp.ename = rtest_empmass.ename; +select ename, who = current_user as "matches user", action, newsal, oldsal from rtest_emplog order by ename, action, newsal; + ename | matches user | action | newsal | oldsal +----------------------+--------------+------------+------------+------------ + gates | t | fired | $0.00 | $80,000.00 + gates | t | hired | $80,000.00 | $0.00 + maier | t | fired | $0.00 | $6,000.00 + maier | t | hired | $5,000.00 | $0.00 + maier | t | honored | $6,000.00 | $5,000.00 + mayr | t | fired | $0.00 | $7,000.00 + mayr | t | hired | $6,000.00 | $0.00 + mayr | t | honored | $7,000.00 | $6,000.00 + meyer | t | fired | $0.00 | $5,000.00 + meyer | t | hired | $4,000.00 | $0.00 + meyer | t | honored | $5,000.00 | $4,000.00 + wiecc | t | hired | $5,000.00 | $0.00 + wieck | t | honored | $6,000.00 | $5,000.00 + wieck | t | honored | $7,000.00 | $6,000.00 +(14 rows) + +-- +-- Multiple cascaded qualified instead rule test +-- +insert into rtest_t4 values (1, 'Record should go to rtest_t4'); +insert into rtest_t4 values (2, 'Record should go to rtest_t4'); +insert into rtest_t4 values (10, 'Record should go to rtest_t5'); +insert into rtest_t4 values (15, 'Record should go to rtest_t5'); +insert into rtest_t4 values (19, 'Record should go to rtest_t5 and t7'); +insert into rtest_t4 values (20, 'Record should go to rtest_t4 and t6'); +insert into rtest_t4 values (26, 'Record should go to rtest_t4 and t8'); +insert into rtest_t4 values (28, 'Record should go to rtest_t4 and t8'); +insert into rtest_t4 values (30, 'Record should go to rtest_t4'); +insert into rtest_t4 values (40, 'Record should go to rtest_t4'); +select * from rtest_t4; + a | b +----+------------------------------------- + 1 | Record should go to rtest_t4 + 2 | Record should go to rtest_t4 + 20 | Record should go to rtest_t4 and t6 + 26 | Record should go to rtest_t4 and t8 + 28 | Record should go to rtest_t4 and t8 + 30 | Record should go to rtest_t4 + 40 | Record should go to rtest_t4 +(7 rows) + +select * from rtest_t5; + a | b +----+------------------------------------- + 10 | Record should go to rtest_t5 + 15 | Record should go to rtest_t5 + 19 | Record should go to rtest_t5 and t7 +(3 rows) + +select * from rtest_t6; + a | b +----+------------------------------------- + 20 | Record should go to rtest_t4 and t6 +(1 row) + +select * from rtest_t7; + a | b +----+------------------------------------- + 19 | Record should go to rtest_t5 and t7 +(1 row) + +select * from rtest_t8; + a | b +----+------------------------------------- + 26 | Record should go to rtest_t4 and t8 + 28 | Record should go to rtest_t4 and t8 +(2 rows) + +delete from rtest_t4; +delete from rtest_t5; +delete from rtest_t6; +delete from rtest_t7; +delete from rtest_t8; +insert into rtest_t9 values (1, 'Record should go to rtest_t4'); +insert into rtest_t9 values (2, 'Record should go to rtest_t4'); +insert into rtest_t9 values (10, 'Record should go to rtest_t5'); +insert into rtest_t9 values (15, 'Record should go to rtest_t5'); +insert into rtest_t9 values (19, 'Record should go to rtest_t5 and t7'); +insert into rtest_t9 values (20, 'Record should go to rtest_t4 and t6'); +insert into rtest_t9 values (26, 'Record should go to rtest_t4 and t8'); +insert into rtest_t9 values (28, 'Record should go to rtest_t4 and t8'); +insert into rtest_t9 values (30, 'Record should go to rtest_t4'); +insert into rtest_t9 values (40, 'Record should go to rtest_t4'); +insert into rtest_t4 select * from rtest_t9 where a < 20; +select * from rtest_t4; + a | b +---+------------------------------ + 1 | Record should go to rtest_t4 + 2 | Record should go to rtest_t4 +(2 rows) + +select * from rtest_t5; + a | b +----+------------------------------------- + 10 | Record should go to rtest_t5 + 15 | Record should go to rtest_t5 + 19 | Record should go to rtest_t5 and t7 +(3 rows) + +select * from rtest_t6; + a | b +---+--- +(0 rows) + +select * from rtest_t7; + a | b +----+------------------------------------- + 19 | Record should go to rtest_t5 and t7 +(1 row) + +select * from rtest_t8; + a | b +---+--- +(0 rows) + +insert into rtest_t4 select * from rtest_t9 where b ~ 'and t8'; +select * from rtest_t4; + a | b +----+------------------------------------- + 1 | Record should go to rtest_t4 + 2 | Record should go to rtest_t4 + 26 | Record should go to rtest_t4 and t8 + 28 | Record should go to rtest_t4 and t8 +(4 rows) + +select * from rtest_t5; + a | b +----+------------------------------------- + 10 | Record should go to rtest_t5 + 15 | Record should go to rtest_t5 + 19 | Record should go to rtest_t5 and t7 +(3 rows) + +select * from rtest_t6; + a | b +---+--- +(0 rows) + +select * from rtest_t7; + a | b +----+------------------------------------- + 19 | Record should go to rtest_t5 and t7 +(1 row) + +select * from rtest_t8; + a | b +----+------------------------------------- + 26 | Record should go to rtest_t4 and t8 + 28 | Record should go to rtest_t4 and t8 +(2 rows) + +insert into rtest_t4 select a + 1, b from rtest_t9 where a in (20, 30, 40); +select * from rtest_t4; + a | b +----+------------------------------------- + 1 | Record should go to rtest_t4 + 2 | Record should go to rtest_t4 + 26 | Record should go to rtest_t4 and t8 + 28 | Record should go to rtest_t4 and t8 + 21 | Record should go to rtest_t4 and t6 + 31 | Record should go to rtest_t4 + 41 | Record should go to rtest_t4 +(7 rows) + +select * from rtest_t5; + a | b +----+------------------------------------- + 10 | Record should go to rtest_t5 + 15 | Record should go to rtest_t5 + 19 | Record should go to rtest_t5 and t7 +(3 rows) + +select * from rtest_t6; + a | b +----+------------------------------------- + 21 | Record should go to rtest_t4 and t6 +(1 row) + +select * from rtest_t7; + a | b +----+------------------------------------- + 19 | Record should go to rtest_t5 and t7 +(1 row) + +select * from rtest_t8; + a | b +----+------------------------------------- + 26 | Record should go to rtest_t4 and t8 + 28 | Record should go to rtest_t4 and t8 +(2 rows) + +-- +-- Check that the ordering of rules fired is correct +-- +insert into rtest_order1 values (1); +select * from rtest_order2; + a | b | c +---+---+------------------------------ + 1 | 1 | rule 1 - this should run 1st + 1 | 2 | rule 2 - this should run 2nd + 1 | 3 | rule 3 - this should run 3rd + 1 | 4 | rule 4 - this should run 4th +(4 rows) + +-- +-- Check if instead nothing w/without qualification works +-- +insert into rtest_nothn1 values (1, 'want this'); +insert into rtest_nothn1 values (2, 'want this'); +insert into rtest_nothn1 values (10, 'don''t want this'); +insert into rtest_nothn1 values (19, 'don''t want this'); +insert into rtest_nothn1 values (20, 'want this'); +insert into rtest_nothn1 values (29, 'want this'); +insert into rtest_nothn1 values (30, 'don''t want this'); +insert into rtest_nothn1 values (39, 'don''t want this'); +insert into rtest_nothn1 values (40, 'want this'); +insert into rtest_nothn1 values (50, 'want this'); +insert into rtest_nothn1 values (60, 'want this'); +select * from rtest_nothn1; + a | b +----+----------- + 1 | want this + 2 | want this + 20 | want this + 29 | want this + 40 | want this + 50 | want this + 60 | want this +(7 rows) + +insert into rtest_nothn2 values (10, 'too small'); +insert into rtest_nothn2 values (50, 'too small'); +insert into rtest_nothn2 values (100, 'OK'); +insert into rtest_nothn2 values (200, 'OK'); +select * from rtest_nothn2; + a | b +---+--- +(0 rows) + +select * from rtest_nothn3; + a | b +-----+---- + 100 | OK + 200 | OK +(2 rows) + +delete from rtest_nothn1; +delete from rtest_nothn2; +delete from rtest_nothn3; +insert into rtest_nothn4 values (1, 'want this'); +insert into rtest_nothn4 values (2, 'want this'); +insert into rtest_nothn4 values (10, 'don''t want this'); +insert into rtest_nothn4 values (19, 'don''t want this'); +insert into rtest_nothn4 values (20, 'want this'); +insert into rtest_nothn4 values (29, 'want this'); +insert into rtest_nothn4 values (30, 'don''t want this'); +insert into rtest_nothn4 values (39, 'don''t want this'); +insert into rtest_nothn4 values (40, 'want this'); +insert into rtest_nothn4 values (50, 'want this'); +insert into rtest_nothn4 values (60, 'want this'); +insert into rtest_nothn1 select * from rtest_nothn4; +select * from rtest_nothn1; + a | b +----+----------- + 1 | want this + 2 | want this + 20 | want this + 29 | want this + 40 | want this + 50 | want this + 60 | want this +(7 rows) + +delete from rtest_nothn4; +insert into rtest_nothn4 values (10, 'too small'); +insert into rtest_nothn4 values (50, 'too small'); +insert into rtest_nothn4 values (100, 'OK'); +insert into rtest_nothn4 values (200, 'OK'); +insert into rtest_nothn2 select * from rtest_nothn4; +select * from rtest_nothn2; + a | b +---+--- +(0 rows) + +select * from rtest_nothn3; + a | b +-----+---- + 100 | OK + 200 | OK +(2 rows) + +create table rtest_view1 (a int4, b text, v bool); +create table rtest_view2 (a int4); +create table rtest_view3 (a int4, b text); +create table rtest_view4 (a int4, b text, c int4); +create view rtest_vview1 as select a, b from rtest_view1 X + where 0 < (select count(*) from rtest_view2 Y where Y.a = X.a); +create view rtest_vview2 as select a, b from rtest_view1 where v; +create view rtest_vview3 as select a, b from rtest_vview2 X + where 0 < (select count(*) from rtest_view2 Y where Y.a = X.a); +create view rtest_vview4 as select X.a, X.b, count(Y.a) as refcount + from rtest_view1 X, rtest_view2 Y + where X.a = Y.a + group by X.a, X.b; +create function rtest_viewfunc1(int4) returns int4 as + 'select count(*)::int4 from rtest_view2 where a = $1' + language sql; +create view rtest_vview5 as select a, b, rtest_viewfunc1(a) as refcount + from rtest_view1; +insert into rtest_view1 values (1, 'item 1', 't'); +insert into rtest_view1 values (2, 'item 2', 't'); +insert into rtest_view1 values (3, 'item 3', 't'); +insert into rtest_view1 values (4, 'item 4', 'f'); +insert into rtest_view1 values (5, 'item 5', 't'); +insert into rtest_view1 values (6, 'item 6', 'f'); +insert into rtest_view1 values (7, 'item 7', 't'); +insert into rtest_view1 values (8, 'item 8', 't'); +insert into rtest_view2 values (2); +insert into rtest_view2 values (2); +insert into rtest_view2 values (4); +insert into rtest_view2 values (5); +insert into rtest_view2 values (7); +insert into rtest_view2 values (7); +insert into rtest_view2 values (7); +insert into rtest_view2 values (7); +select * from rtest_vview1; + a | b +---+-------- + 2 | item 2 + 4 | item 4 + 5 | item 5 + 7 | item 7 +(4 rows) + +select * from rtest_vview2; + a | b +---+-------- + 1 | item 1 + 2 | item 2 + 3 | item 3 + 5 | item 5 + 7 | item 7 + 8 | item 8 +(6 rows) + +select * from rtest_vview3; + a | b +---+-------- + 2 | item 2 + 5 | item 5 + 7 | item 7 +(3 rows) + +select * from rtest_vview4 order by a, b; + a | b | refcount +---+--------+---------- + 2 | item 2 | 2 + 4 | item 4 | 1 + 5 | item 5 | 1 + 7 | item 7 | 4 +(4 rows) + +select * from rtest_vview5; + a | b | refcount +---+--------+---------- + 1 | item 1 | 0 + 2 | item 2 | 2 + 3 | item 3 | 0 + 4 | item 4 | 1 + 5 | item 5 | 1 + 6 | item 6 | 0 + 7 | item 7 | 4 + 8 | item 8 | 0 +(8 rows) + +insert into rtest_view3 select * from rtest_vview1 where a < 7; +select * from rtest_view3; + a | b +---+-------- + 2 | item 2 + 4 | item 4 + 5 | item 5 +(3 rows) + +delete from rtest_view3; +insert into rtest_view3 select * from rtest_vview2 where a != 5 and b !~ '2'; +select * from rtest_view3; + a | b +---+-------- + 1 | item 1 + 3 | item 3 + 7 | item 7 + 8 | item 8 +(4 rows) + +delete from rtest_view3; +insert into rtest_view3 select * from rtest_vview3; +select * from rtest_view3; + a | b +---+-------- + 2 | item 2 + 5 | item 5 + 7 | item 7 +(3 rows) + +delete from rtest_view3; +insert into rtest_view4 select * from rtest_vview4 where 3 > refcount; +select * from rtest_view4 order by a, b; + a | b | c +---+--------+--- + 2 | item 2 | 2 + 4 | item 4 | 1 + 5 | item 5 | 1 +(3 rows) + +delete from rtest_view4; +insert into rtest_view4 select * from rtest_vview5 where a > 2 and refcount = 0; +select * from rtest_view4; + a | b | c +---+--------+--- + 3 | item 3 | 0 + 6 | item 6 | 0 + 8 | item 8 | 0 +(3 rows) + +delete from rtest_view4; +-- +-- Test for computations in views +-- +create table rtest_comp ( + part text, + unit char(4), + size float +); +create table rtest_unitfact ( + unit char(4), + factor float +); +create view rtest_vcomp as + select X.part, (X.size * Y.factor) as size_in_cm + from rtest_comp X, rtest_unitfact Y + where X.unit = Y.unit; +insert into rtest_unitfact values ('m', 100.0); +insert into rtest_unitfact values ('cm', 1.0); +insert into rtest_unitfact values ('inch', 2.54); +insert into rtest_comp values ('p1', 'm', 5.0); +insert into rtest_comp values ('p2', 'm', 3.0); +insert into rtest_comp values ('p3', 'cm', 5.0); +insert into rtest_comp values ('p4', 'cm', 15.0); +insert into rtest_comp values ('p5', 'inch', 7.0); +insert into rtest_comp values ('p6', 'inch', 4.4); +select * from rtest_vcomp order by part; + part | size_in_cm +------+-------------------- + p1 | 500 + p2 | 300 + p3 | 5 + p4 | 15 + p5 | 17.78 + p6 | 11.176000000000002 +(6 rows) + +select * from rtest_vcomp where size_in_cm > 10.0 order by size_in_cm using >; + part | size_in_cm +------+-------------------- + p1 | 500 + p2 | 300 + p5 | 17.78 + p4 | 15 + p6 | 11.176000000000002 +(5 rows) + +-- +-- In addition run the (slightly modified) queries from the +-- programmers manual section on the rule system. +-- +CREATE TABLE shoe_data ( + shoename char(10), -- primary key + sh_avail integer, -- available # of pairs + slcolor char(10), -- preferred shoelace color + slminlen float, -- minimum shoelace length + slmaxlen float, -- maximum shoelace length + slunit char(8) -- length unit +); +CREATE TABLE shoelace_data ( + sl_name char(10), -- primary key + sl_avail integer, -- available # of pairs + sl_color char(10), -- shoelace color + sl_len float, -- shoelace length + sl_unit char(8) -- length unit +); +CREATE TABLE unit ( + un_name char(8), -- the primary key + un_fact float -- factor to transform to cm +); +CREATE VIEW shoe AS + SELECT sh.shoename, + sh.sh_avail, + sh.slcolor, + sh.slminlen, + sh.slminlen * un.un_fact AS slminlen_cm, + sh.slmaxlen, + sh.slmaxlen * un.un_fact AS slmaxlen_cm, + sh.slunit + FROM shoe_data sh, unit un + WHERE sh.slunit = un.un_name; +CREATE VIEW shoelace AS + SELECT s.sl_name, + s.sl_avail, + s.sl_color, + s.sl_len, + s.sl_unit, + s.sl_len * u.un_fact AS sl_len_cm + FROM shoelace_data s, unit u + WHERE s.sl_unit = u.un_name; +CREATE VIEW shoe_ready AS + SELECT rsh.shoename, + rsh.sh_avail, + rsl.sl_name, + rsl.sl_avail, + int4smaller(rsh.sh_avail, rsl.sl_avail) AS total_avail + FROM shoe rsh, shoelace rsl + WHERE rsl.sl_color = rsh.slcolor + AND rsl.sl_len_cm >= rsh.slminlen_cm + AND rsl.sl_len_cm <= rsh.slmaxlen_cm; +INSERT INTO unit VALUES ('cm', 1.0); +INSERT INTO unit VALUES ('m', 100.0); +INSERT INTO unit VALUES ('inch', 2.54); +INSERT INTO shoe_data VALUES ('sh1', 2, 'black', 70.0, 90.0, 'cm'); +INSERT INTO shoe_data VALUES ('sh2', 0, 'black', 30.0, 40.0, 'inch'); +INSERT INTO shoe_data VALUES ('sh3', 4, 'brown', 50.0, 65.0, 'cm'); +INSERT INTO shoe_data VALUES ('sh4', 3, 'brown', 40.0, 50.0, 'inch'); +INSERT INTO shoelace_data VALUES ('sl1', 5, 'black', 80.0, 'cm'); +INSERT INTO shoelace_data VALUES ('sl2', 6, 'black', 100.0, 'cm'); +INSERT INTO shoelace_data VALUES ('sl3', 0, 'black', 35.0 , 'inch'); +INSERT INTO shoelace_data VALUES ('sl4', 8, 'black', 40.0 , 'inch'); +INSERT INTO shoelace_data VALUES ('sl5', 4, 'brown', 1.0 , 'm'); +INSERT INTO shoelace_data VALUES ('sl6', 0, 'brown', 0.9 , 'm'); +INSERT INTO shoelace_data VALUES ('sl7', 7, 'brown', 60 , 'cm'); +INSERT INTO shoelace_data VALUES ('sl8', 1, 'brown', 40 , 'inch'); +-- SELECTs in doc +SELECT * FROM shoelace ORDER BY sl_name; + sl_name | sl_avail | sl_color | sl_len | sl_unit | sl_len_cm +------------+----------+------------+--------+----------+----------- + sl1 | 5 | black | 80 | cm | 80 + sl2 | 6 | black | 100 | cm | 100 + sl3 | 0 | black | 35 | inch | 88.9 + sl4 | 8 | black | 40 | inch | 101.6 + sl5 | 4 | brown | 1 | m | 100 + sl6 | 0 | brown | 0.9 | m | 90 + sl7 | 7 | brown | 60 | cm | 60 + sl8 | 1 | brown | 40 | inch | 101.6 +(8 rows) + +SELECT * FROM shoe_ready WHERE total_avail >= 2 ORDER BY 1; + shoename | sh_avail | sl_name | sl_avail | total_avail +------------+----------+------------+----------+------------- + sh1 | 2 | sl1 | 5 | 2 + sh3 | 4 | sl7 | 7 | 4 +(2 rows) + + CREATE TABLE shoelace_log ( + sl_name char(10), -- shoelace changed + sl_avail integer, -- new available value + log_who name, -- who did it + log_when timestamp -- when + ); +-- Want "log_who" to be CURRENT_USER, +-- but that is non-portable for the regression test +-- - thomas 1999-02-21 + CREATE RULE log_shoelace AS ON UPDATE TO shoelace_data + WHERE NEW.sl_avail != OLD.sl_avail + DO INSERT INTO shoelace_log VALUES ( + NEW.sl_name, + NEW.sl_avail, + 'Al Bundy', + 'epoch' + ); +UPDATE shoelace_data SET sl_avail = 6 WHERE sl_name = 'sl7'; +SELECT * FROM shoelace_log; + sl_name | sl_avail | log_who | log_when +------------+----------+----------+-------------------------- + sl7 | 6 | Al Bundy | Thu Jan 01 00:00:00 1970 +(1 row) + + CREATE RULE shoelace_ins AS ON INSERT TO shoelace + DO INSTEAD + INSERT INTO shoelace_data VALUES ( + NEW.sl_name, + NEW.sl_avail, + NEW.sl_color, + NEW.sl_len, + NEW.sl_unit); + CREATE RULE shoelace_upd AS ON UPDATE TO shoelace + DO INSTEAD + UPDATE shoelace_data SET + sl_name = NEW.sl_name, + sl_avail = NEW.sl_avail, + sl_color = NEW.sl_color, + sl_len = NEW.sl_len, + sl_unit = NEW.sl_unit + WHERE sl_name = OLD.sl_name; + CREATE RULE shoelace_del AS ON DELETE TO shoelace + DO INSTEAD + DELETE FROM shoelace_data + WHERE sl_name = OLD.sl_name; + CREATE TABLE shoelace_arrive ( + arr_name char(10), + arr_quant integer + ); + CREATE TABLE shoelace_ok ( + ok_name char(10), + ok_quant integer + ); + CREATE RULE shoelace_ok_ins AS ON INSERT TO shoelace_ok + DO INSTEAD + UPDATE shoelace SET + sl_avail = sl_avail + NEW.ok_quant + WHERE sl_name = NEW.ok_name; +INSERT INTO shoelace_arrive VALUES ('sl3', 10); +INSERT INTO shoelace_arrive VALUES ('sl6', 20); +INSERT INTO shoelace_arrive VALUES ('sl8', 20); +SELECT * FROM shoelace ORDER BY sl_name; + sl_name | sl_avail | sl_color | sl_len | sl_unit | sl_len_cm +------------+----------+------------+--------+----------+----------- + sl1 | 5 | black | 80 | cm | 80 + sl2 | 6 | black | 100 | cm | 100 + sl3 | 0 | black | 35 | inch | 88.9 + sl4 | 8 | black | 40 | inch | 101.6 + sl5 | 4 | brown | 1 | m | 100 + sl6 | 0 | brown | 0.9 | m | 90 + sl7 | 6 | brown | 60 | cm | 60 + sl8 | 1 | brown | 40 | inch | 101.6 +(8 rows) + +insert into shoelace_ok select * from shoelace_arrive; +SELECT * FROM shoelace ORDER BY sl_name; + sl_name | sl_avail | sl_color | sl_len | sl_unit | sl_len_cm +------------+----------+------------+--------+----------+----------- + sl1 | 5 | black | 80 | cm | 80 + sl2 | 6 | black | 100 | cm | 100 + sl3 | 10 | black | 35 | inch | 88.9 + sl4 | 8 | black | 40 | inch | 101.6 + sl5 | 4 | brown | 1 | m | 100 + sl6 | 20 | brown | 0.9 | m | 90 + sl7 | 6 | brown | 60 | cm | 60 + sl8 | 21 | brown | 40 | inch | 101.6 +(8 rows) + +SELECT * FROM shoelace_log ORDER BY sl_name; + sl_name | sl_avail | log_who | log_when +------------+----------+----------+-------------------------- + sl3 | 10 | Al Bundy | Thu Jan 01 00:00:00 1970 + sl6 | 20 | Al Bundy | Thu Jan 01 00:00:00 1970 + sl7 | 6 | Al Bundy | Thu Jan 01 00:00:00 1970 + sl8 | 21 | Al Bundy | Thu Jan 01 00:00:00 1970 +(4 rows) + + CREATE VIEW shoelace_obsolete AS + SELECT * FROM shoelace WHERE NOT EXISTS + (SELECT shoename FROM shoe WHERE slcolor = sl_color); + CREATE VIEW shoelace_candelete AS + SELECT * FROM shoelace_obsolete WHERE sl_avail = 0; +insert into shoelace values ('sl9', 0, 'pink', 35.0, 'inch', 0.0); +insert into shoelace values ('sl10', 1000, 'magenta', 40.0, 'inch', 0.0); +-- Unsupported (even though a similar updatable view construct is) +insert into shoelace values ('sl10', 1000, 'magenta', 40.0, 'inch', 0.0) + on conflict do nothing; +ERROR: INSERT with ON CONFLICT clause cannot be used with table that has INSERT or UPDATE rules +SELECT * FROM shoelace_obsolete ORDER BY sl_len_cm; + sl_name | sl_avail | sl_color | sl_len | sl_unit | sl_len_cm +------------+----------+------------+--------+----------+----------- + sl9 | 0 | pink | 35 | inch | 88.9 + sl10 | 1000 | magenta | 40 | inch | 101.6 +(2 rows) + +SELECT * FROM shoelace_candelete; + sl_name | sl_avail | sl_color | sl_len | sl_unit | sl_len_cm +------------+----------+------------+--------+----------+----------- + sl9 | 0 | pink | 35 | inch | 88.9 +(1 row) + +DELETE FROM shoelace WHERE EXISTS + (SELECT * FROM shoelace_candelete + WHERE sl_name = shoelace.sl_name); +SELECT * FROM shoelace ORDER BY sl_name; + sl_name | sl_avail | sl_color | sl_len | sl_unit | sl_len_cm +------------+----------+------------+--------+----------+----------- + sl1 | 5 | black | 80 | cm | 80 + sl10 | 1000 | magenta | 40 | inch | 101.6 + sl2 | 6 | black | 100 | cm | 100 + sl3 | 10 | black | 35 | inch | 88.9 + sl4 | 8 | black | 40 | inch | 101.6 + sl5 | 4 | brown | 1 | m | 100 + sl6 | 20 | brown | 0.9 | m | 90 + sl7 | 6 | brown | 60 | cm | 60 + sl8 | 21 | brown | 40 | inch | 101.6 +(9 rows) + +SELECT * FROM shoe ORDER BY shoename; + shoename | sh_avail | slcolor | slminlen | slminlen_cm | slmaxlen | slmaxlen_cm | slunit +------------+----------+------------+----------+-------------+----------+-------------+---------- + sh1 | 2 | black | 70 | 70 | 90 | 90 | cm + sh2 | 0 | black | 30 | 76.2 | 40 | 101.6 | inch + sh3 | 4 | brown | 50 | 50 | 65 | 65 | cm + sh4 | 3 | brown | 40 | 101.6 | 50 | 127 | inch +(4 rows) + +SELECT count(*) FROM shoe; + count +------- + 4 +(1 row) + +-- +-- Simple test of qualified ON INSERT ... this did not work in 7.0 ... +-- +create table rules_foo (f1 int); +create table rules_foo2 (f1 int); +create rule rules_foorule as on insert to rules_foo where f1 < 100 +do instead nothing; +insert into rules_foo values(1); +insert into rules_foo values(1001); +select * from rules_foo; + f1 +------ + 1001 +(1 row) + +drop rule rules_foorule on rules_foo; +-- this should fail because f1 is not exposed for unqualified reference: +create rule rules_foorule as on insert to rules_foo where f1 < 100 +do instead insert into rules_foo2 values (f1); +ERROR: column "f1" does not exist +LINE 2: do instead insert into rules_foo2 values (f1); + ^ +DETAIL: There are columns named "f1", but they are in tables that cannot be referenced from this part of the query. +HINT: Try using a table-qualified name. +-- this is the correct way: +create rule rules_foorule as on insert to rules_foo where f1 < 100 +do instead insert into rules_foo2 values (new.f1); +insert into rules_foo values(2); +insert into rules_foo values(100); +select * from rules_foo; + f1 +------ + 1001 + 100 +(2 rows) + +select * from rules_foo2; + f1 +---- + 2 +(1 row) + +drop rule rules_foorule on rules_foo; +drop table rules_foo; +drop table rules_foo2; +-- +-- Test rules containing INSERT ... SELECT, which is a very ugly special +-- case as of 7.1. Example is based on bug report from Joel Burton. +-- +create table pparent (pid int, txt text); +insert into pparent values (1,'parent1'); +insert into pparent values (2,'parent2'); +create table cchild (pid int, descrip text); +insert into cchild values (1,'descrip1'); +create view vview as + select pparent.pid, txt, descrip from + pparent left join cchild using (pid); +create rule rrule as + on update to vview do instead +( + insert into cchild (pid, descrip) + select old.pid, new.descrip where old.descrip isnull; + update cchild set descrip = new.descrip where cchild.pid = old.pid; +); +select * from vview; + pid | txt | descrip +-----+---------+---------- + 1 | parent1 | descrip1 + 2 | parent2 | +(2 rows) + +update vview set descrip='test1' where pid=1; +select * from vview; + pid | txt | descrip +-----+---------+--------- + 1 | parent1 | test1 + 2 | parent2 | +(2 rows) + +update vview set descrip='test2' where pid=2; +select * from vview; + pid | txt | descrip +-----+---------+--------- + 1 | parent1 | test1 + 2 | parent2 | test2 +(2 rows) + +update vview set descrip='test3' where pid=3; +select * from vview; + pid | txt | descrip +-----+---------+--------- + 1 | parent1 | test1 + 2 | parent2 | test2 +(2 rows) + +select * from cchild; + pid | descrip +-----+--------- + 1 | test1 + 2 | test2 +(2 rows) + +drop rule rrule on vview; +drop view vview; +drop table pparent; +drop table cchild; +-- +-- Check that ruleutils are working +-- +-- temporarily disable fancy output, so view changes create less diff noise +\a\t +SELECT viewname, definition FROM pg_views +WHERE schemaname = 'pg_catalog' +ORDER BY viewname; +pg_available_extension_versions| SELECT e.name, + e.version, + (x.extname IS NOT NULL) AS installed, + e.superuser, + e.trusted, + e.relocatable, + e.schema, + e.requires, + e.comment + FROM (pg_available_extension_versions() e(name, version, superuser, trusted, relocatable, schema, requires, comment) + LEFT JOIN pg_extension x ON (((e.name = x.extname) AND (e.version = x.extversion)))); +pg_available_extensions| SELECT e.name, + e.default_version, + x.extversion AS installed_version, + e.comment + FROM (pg_available_extensions() e(name, default_version, comment) + LEFT JOIN pg_extension x ON ((e.name = x.extname))); +pg_backend_memory_contexts| SELECT name, + ident, + parent, + level, + total_bytes, + total_nblocks, + free_bytes, + free_chunks, + used_bytes + FROM pg_get_backend_memory_contexts() pg_get_backend_memory_contexts(name, ident, parent, level, total_bytes, total_nblocks, free_bytes, free_chunks, used_bytes); +pg_config| SELECT name, + setting + FROM pg_config() pg_config(name, setting); +pg_cursors| SELECT name, + statement, + is_holdable, + is_binary, + is_scrollable, + creation_time + FROM pg_cursor() c(name, statement, is_holdable, is_binary, is_scrollable, creation_time); +pg_file_settings| SELECT sourcefile, + sourceline, + seqno, + name, + setting, + applied, + error + FROM pg_show_all_file_settings() a(sourcefile, sourceline, seqno, name, setting, applied, error); +pg_group| SELECT rolname AS groname, + oid AS grosysid, + ARRAY( SELECT pg_auth_members.member + FROM pg_auth_members + WHERE (pg_auth_members.roleid = pg_authid.oid)) AS grolist + FROM pg_authid + WHERE (NOT rolcanlogin); +pg_hba_file_rules| SELECT rule_number, + file_name, + line_number, + type, + database, + user_name, + address, + netmask, + auth_method, + options, + error + FROM pg_hba_file_rules() a(rule_number, file_name, line_number, type, database, user_name, address, netmask, auth_method, options, error); +pg_ident_file_mappings| SELECT map_number, + file_name, + line_number, + map_name, + sys_name, + pg_username, + error + FROM pg_ident_file_mappings() a(map_number, file_name, line_number, map_name, sys_name, pg_username, error); +pg_indexes| SELECT n.nspname AS schemaname, + c.relname AS tablename, + i.relname AS indexname, + t.spcname AS tablespace, + pg_get_indexdef(i.oid) AS indexdef + FROM ((((pg_index x + JOIN pg_class c ON ((c.oid = x.indrelid))) + JOIN pg_class i ON ((i.oid = x.indexrelid))) + LEFT JOIN pg_namespace n ON ((n.oid = c.relnamespace))) + LEFT JOIN pg_tablespace t ON ((t.oid = i.reltablespace))) + WHERE ((c.relkind = ANY (ARRAY['r'::"char", 'm'::"char", 'p'::"char"])) AND (i.relkind = ANY (ARRAY['i'::"char", 'I'::"char"]))); +pg_locks| SELECT locktype, + database, + relation, + page, + tuple, + virtualxid, + transactionid, + classid, + objid, + objsubid, + virtualtransaction, + pid, + mode, + granted, + fastpath, + waitstart + FROM pg_lock_status() l(locktype, database, relation, page, tuple, virtualxid, transactionid, classid, objid, objsubid, virtualtransaction, pid, mode, granted, fastpath, waitstart); +pg_matviews| SELECT n.nspname AS schemaname, + c.relname AS matviewname, + pg_get_userbyid(c.relowner) AS matviewowner, + t.spcname AS tablespace, + c.relhasindex AS hasindexes, + c.relispopulated AS ispopulated, + pg_get_viewdef(c.oid) AS definition + FROM ((pg_class c + LEFT JOIN pg_namespace n ON ((n.oid = c.relnamespace))) + LEFT JOIN pg_tablespace t ON ((t.oid = c.reltablespace))) + WHERE (c.relkind = 'm'::"char"); +pg_policies| SELECT n.nspname AS schemaname, + c.relname AS tablename, + pol.polname AS policyname, + CASE + WHEN pol.polpermissive THEN 'PERMISSIVE'::text + ELSE 'RESTRICTIVE'::text + END AS permissive, + CASE + WHEN (pol.polroles = '{0}'::oid[]) THEN (string_to_array('public'::text, ''::text))::name[] + ELSE ARRAY( SELECT pg_authid.rolname + FROM pg_authid + WHERE (pg_authid.oid = ANY (pol.polroles)) + ORDER BY pg_authid.rolname) + END AS roles, + CASE pol.polcmd + WHEN 'r'::"char" THEN 'SELECT'::text + WHEN 'a'::"char" THEN 'INSERT'::text + WHEN 'w'::"char" THEN 'UPDATE'::text + WHEN 'd'::"char" THEN 'DELETE'::text + WHEN '*'::"char" THEN 'ALL'::text + ELSE NULL::text + END AS cmd, + pg_get_expr(pol.polqual, pol.polrelid) AS qual, + pg_get_expr(pol.polwithcheck, pol.polrelid) AS with_check + FROM ((pg_policy pol + JOIN pg_class c ON ((c.oid = pol.polrelid))) + LEFT JOIN pg_namespace n ON ((n.oid = c.relnamespace))); +pg_prepared_statements| SELECT name, + statement, + prepare_time, + parameter_types, + result_types, + from_sql, + generic_plans, + custom_plans + FROM pg_prepared_statement() p(name, statement, prepare_time, parameter_types, result_types, from_sql, generic_plans, custom_plans); +pg_prepared_xacts| SELECT p.transaction, + p.gid, + p.prepared, + u.rolname AS owner, + d.datname AS database + FROM ((pg_prepared_xact() p(transaction, gid, prepared, ownerid, dbid) + LEFT JOIN pg_authid u ON ((p.ownerid = u.oid))) + LEFT JOIN pg_database d ON ((p.dbid = d.oid))); +pg_publication_tables| SELECT p.pubname, + n.nspname AS schemaname, + c.relname AS tablename, + ( SELECT array_agg(a.attname ORDER BY a.attnum) AS array_agg + FROM pg_attribute a + WHERE ((a.attrelid = gpt.relid) AND (a.attnum = ANY ((gpt.attrs)::smallint[])))) AS attnames, + pg_get_expr(gpt.qual, gpt.relid) AS rowfilter + FROM pg_publication p, + LATERAL pg_get_publication_tables(VARIADIC ARRAY[(p.pubname)::text]) gpt(pubid, relid, attrs, qual), + (pg_class c + JOIN pg_namespace n ON ((n.oid = c.relnamespace))) + WHERE (c.oid = gpt.relid); +pg_replication_origin_status| SELECT local_id, + external_id, + remote_lsn, + local_lsn + FROM pg_show_replication_origin_status() pg_show_replication_origin_status(local_id, external_id, remote_lsn, local_lsn); +pg_replication_slots| SELECT l.slot_name, + l.plugin, + l.slot_type, + l.datoid, + d.datname AS database, + l.temporary, + l.active, + l.active_pid, + l.xmin, + l.catalog_xmin, + l.restart_lsn, + l.confirmed_flush_lsn, + l.wal_status, + l.safe_wal_size, + l.two_phase, + l.conflicting + FROM (pg_get_replication_slots() l(slot_name, plugin, slot_type, datoid, temporary, active, active_pid, xmin, catalog_xmin, restart_lsn, confirmed_flush_lsn, wal_status, safe_wal_size, two_phase, conflicting) + LEFT JOIN pg_database d ON ((l.datoid = d.oid))); +pg_roles| SELECT pg_authid.rolname, + pg_authid.rolsuper, + pg_authid.rolinherit, + pg_authid.rolcreaterole, + pg_authid.rolcreatedb, + pg_authid.rolcanlogin, + pg_authid.rolreplication, + pg_authid.rolconnlimit, + '********'::text AS rolpassword, + pg_authid.rolvaliduntil, + pg_authid.rolbypassrls, + s.setconfig AS rolconfig, + pg_authid.oid + FROM (pg_authid + LEFT JOIN pg_db_role_setting s ON (((pg_authid.oid = s.setrole) AND (s.setdatabase = (0)::oid)))); +pg_rules| SELECT n.nspname AS schemaname, + c.relname AS tablename, + r.rulename, + pg_get_ruledef(r.oid) AS definition + FROM ((pg_rewrite r + JOIN pg_class c ON ((c.oid = r.ev_class))) + LEFT JOIN pg_namespace n ON ((n.oid = c.relnamespace))) + WHERE (r.rulename <> '_RETURN'::name); +pg_seclabels| SELECT l.objoid, + l.classoid, + l.objsubid, + CASE + WHEN (rel.relkind = ANY (ARRAY['r'::"char", 'p'::"char"])) THEN 'table'::text + WHEN (rel.relkind = 'v'::"char") THEN 'view'::text + WHEN (rel.relkind = 'm'::"char") THEN 'materialized view'::text + WHEN (rel.relkind = 'S'::"char") THEN 'sequence'::text + WHEN (rel.relkind = 'f'::"char") THEN 'foreign table'::text + ELSE NULL::text + END AS objtype, + rel.relnamespace AS objnamespace, + CASE + WHEN pg_table_is_visible(rel.oid) THEN quote_ident((rel.relname)::text) + ELSE ((quote_ident((nsp.nspname)::text) || '.'::text) || quote_ident((rel.relname)::text)) + END AS objname, + l.provider, + l.label + FROM ((pg_seclabel l + JOIN pg_class rel ON (((l.classoid = rel.tableoid) AND (l.objoid = rel.oid)))) + JOIN pg_namespace nsp ON ((rel.relnamespace = nsp.oid))) + WHERE (l.objsubid = 0) +UNION ALL + SELECT l.objoid, + l.classoid, + l.objsubid, + 'column'::text AS objtype, + rel.relnamespace AS objnamespace, + (( + CASE + WHEN pg_table_is_visible(rel.oid) THEN quote_ident((rel.relname)::text) + ELSE ((quote_ident((nsp.nspname)::text) || '.'::text) || quote_ident((rel.relname)::text)) + END || '.'::text) || (att.attname)::text) AS objname, + l.provider, + l.label + FROM (((pg_seclabel l + JOIN pg_class rel ON (((l.classoid = rel.tableoid) AND (l.objoid = rel.oid)))) + JOIN pg_attribute att ON (((rel.oid = att.attrelid) AND (l.objsubid = att.attnum)))) + JOIN pg_namespace nsp ON ((rel.relnamespace = nsp.oid))) + WHERE (l.objsubid <> 0) +UNION ALL + SELECT l.objoid, + l.classoid, + l.objsubid, + CASE pro.prokind + WHEN 'a'::"char" THEN 'aggregate'::text + WHEN 'f'::"char" THEN 'function'::text + WHEN 'p'::"char" THEN 'procedure'::text + WHEN 'w'::"char" THEN 'window'::text + ELSE NULL::text + END AS objtype, + pro.pronamespace AS objnamespace, + ((( + CASE + WHEN pg_function_is_visible(pro.oid) THEN quote_ident((pro.proname)::text) + ELSE ((quote_ident((nsp.nspname)::text) || '.'::text) || quote_ident((pro.proname)::text)) + END || '('::text) || pg_get_function_arguments(pro.oid)) || ')'::text) AS objname, + l.provider, + l.label + FROM ((pg_seclabel l + JOIN pg_proc pro ON (((l.classoid = pro.tableoid) AND (l.objoid = pro.oid)))) + JOIN pg_namespace nsp ON ((pro.pronamespace = nsp.oid))) + WHERE (l.objsubid = 0) +UNION ALL + SELECT l.objoid, + l.classoid, + l.objsubid, + CASE + WHEN (typ.typtype = 'd'::"char") THEN 'domain'::text + ELSE 'type'::text + END AS objtype, + typ.typnamespace AS objnamespace, + CASE + WHEN pg_type_is_visible(typ.oid) THEN quote_ident((typ.typname)::text) + ELSE ((quote_ident((nsp.nspname)::text) || '.'::text) || quote_ident((typ.typname)::text)) + END AS objname, + l.provider, + l.label + FROM ((pg_seclabel l + JOIN pg_type typ ON (((l.classoid = typ.tableoid) AND (l.objoid = typ.oid)))) + JOIN pg_namespace nsp ON ((typ.typnamespace = nsp.oid))) + WHERE (l.objsubid = 0) +UNION ALL + SELECT l.objoid, + l.classoid, + l.objsubid, + 'large object'::text AS objtype, + NULL::oid AS objnamespace, + (l.objoid)::text AS objname, + l.provider, + l.label + FROM (pg_seclabel l + JOIN pg_largeobject_metadata lom ON ((l.objoid = lom.oid))) + WHERE ((l.classoid = ('pg_largeobject'::regclass)::oid) AND (l.objsubid = 0)) +UNION ALL + SELECT l.objoid, + l.classoid, + l.objsubid, + 'language'::text AS objtype, + NULL::oid AS objnamespace, + quote_ident((lan.lanname)::text) AS objname, + l.provider, + l.label + FROM (pg_seclabel l + JOIN pg_language lan ON (((l.classoid = lan.tableoid) AND (l.objoid = lan.oid)))) + WHERE (l.objsubid = 0) +UNION ALL + SELECT l.objoid, + l.classoid, + l.objsubid, + 'schema'::text AS objtype, + nsp.oid AS objnamespace, + quote_ident((nsp.nspname)::text) AS objname, + l.provider, + l.label + FROM (pg_seclabel l + JOIN pg_namespace nsp ON (((l.classoid = nsp.tableoid) AND (l.objoid = nsp.oid)))) + WHERE (l.objsubid = 0) +UNION ALL + SELECT l.objoid, + l.classoid, + l.objsubid, + 'event trigger'::text AS objtype, + NULL::oid AS objnamespace, + quote_ident((evt.evtname)::text) AS objname, + l.provider, + l.label + FROM (pg_seclabel l + JOIN pg_event_trigger evt ON (((l.classoid = evt.tableoid) AND (l.objoid = evt.oid)))) + WHERE (l.objsubid = 0) +UNION ALL + SELECT l.objoid, + l.classoid, + l.objsubid, + 'publication'::text AS objtype, + NULL::oid AS objnamespace, + quote_ident((p.pubname)::text) AS objname, + l.provider, + l.label + FROM (pg_seclabel l + JOIN pg_publication p ON (((l.classoid = p.tableoid) AND (l.objoid = p.oid)))) + WHERE (l.objsubid = 0) +UNION ALL + SELECT l.objoid, + l.classoid, + 0 AS objsubid, + 'subscription'::text AS objtype, + NULL::oid AS objnamespace, + quote_ident((s.subname)::text) AS objname, + l.provider, + l.label + FROM (pg_shseclabel l + JOIN pg_subscription s ON (((l.classoid = s.tableoid) AND (l.objoid = s.oid)))) +UNION ALL + SELECT l.objoid, + l.classoid, + 0 AS objsubid, + 'database'::text AS objtype, + NULL::oid AS objnamespace, + quote_ident((dat.datname)::text) AS objname, + l.provider, + l.label + FROM (pg_shseclabel l + JOIN pg_database dat ON (((l.classoid = dat.tableoid) AND (l.objoid = dat.oid)))) +UNION ALL + SELECT l.objoid, + l.classoid, + 0 AS objsubid, + 'tablespace'::text AS objtype, + NULL::oid AS objnamespace, + quote_ident((spc.spcname)::text) AS objname, + l.provider, + l.label + FROM (pg_shseclabel l + JOIN pg_tablespace spc ON (((l.classoid = spc.tableoid) AND (l.objoid = spc.oid)))) +UNION ALL + SELECT l.objoid, + l.classoid, + 0 AS objsubid, + 'role'::text AS objtype, + NULL::oid AS objnamespace, + quote_ident((rol.rolname)::text) AS objname, + l.provider, + l.label + FROM (pg_shseclabel l + JOIN pg_authid rol ON (((l.classoid = rol.tableoid) AND (l.objoid = rol.oid)))); +pg_sequences| SELECT n.nspname AS schemaname, + c.relname AS sequencename, + pg_get_userbyid(c.relowner) AS sequenceowner, + (s.seqtypid)::regtype AS data_type, + s.seqstart AS start_value, + s.seqmin AS min_value, + s.seqmax AS max_value, + s.seqincrement AS increment_by, + s.seqcycle AS cycle, + s.seqcache AS cache_size, + CASE + WHEN has_sequence_privilege(c.oid, 'SELECT,USAGE'::text) THEN pg_sequence_last_value((c.oid)::regclass) + ELSE NULL::bigint + END AS last_value + FROM ((pg_sequence s + JOIN pg_class c ON ((c.oid = s.seqrelid))) + LEFT JOIN pg_namespace n ON ((n.oid = c.relnamespace))) + WHERE ((NOT pg_is_other_temp_schema(n.oid)) AND (c.relkind = 'S'::"char")); +pg_settings| SELECT name, + setting, + unit, + category, + short_desc, + extra_desc, + context, + vartype, + source, + min_val, + max_val, + enumvals, + boot_val, + reset_val, + sourcefile, + sourceline, + pending_restart + FROM pg_show_all_settings() a(name, setting, unit, category, short_desc, extra_desc, context, vartype, source, min_val, max_val, enumvals, boot_val, reset_val, sourcefile, sourceline, pending_restart); +pg_shadow| SELECT pg_authid.rolname AS usename, + pg_authid.oid AS usesysid, + pg_authid.rolcreatedb AS usecreatedb, + pg_authid.rolsuper AS usesuper, + pg_authid.rolreplication AS userepl, + pg_authid.rolbypassrls AS usebypassrls, + pg_authid.rolpassword AS passwd, + pg_authid.rolvaliduntil AS valuntil, + s.setconfig AS useconfig + FROM (pg_authid + LEFT JOIN pg_db_role_setting s ON (((pg_authid.oid = s.setrole) AND (s.setdatabase = (0)::oid)))) + WHERE pg_authid.rolcanlogin; +pg_shmem_allocations| SELECT name, + off, + size, + allocated_size + FROM pg_get_shmem_allocations() pg_get_shmem_allocations(name, off, size, allocated_size); +pg_stat_activity| SELECT s.datid, + d.datname, + s.pid, + s.leader_pid, + s.usesysid, + u.rolname AS usename, + s.application_name, + s.client_addr, + s.client_hostname, + s.client_port, + s.backend_start, + s.xact_start, + s.query_start, + s.state_change, + s.wait_event_type, + s.wait_event, + s.state, + s.backend_xid, + s.backend_xmin, + s.query_id, + s.query, + s.backend_type + FROM ((pg_stat_get_activity(NULL::integer) s(datid, pid, usesysid, application_name, state, query, wait_event_type, wait_event, xact_start, query_start, backend_start, state_change, client_addr, client_hostname, client_port, backend_xid, backend_xmin, backend_type, ssl, sslversion, sslcipher, sslbits, ssl_client_dn, ssl_client_serial, ssl_issuer_dn, gss_auth, gss_princ, gss_enc, gss_delegation, leader_pid, query_id) + LEFT JOIN pg_database d ON ((s.datid = d.oid))) + LEFT JOIN pg_authid u ON ((s.usesysid = u.oid))); +pg_stat_all_indexes| SELECT c.oid AS relid, + i.oid AS indexrelid, + n.nspname AS schemaname, + c.relname, + i.relname AS indexrelname, + pg_stat_get_numscans(i.oid) AS idx_scan, + pg_stat_get_lastscan(i.oid) AS last_idx_scan, + pg_stat_get_tuples_returned(i.oid) AS idx_tup_read, + pg_stat_get_tuples_fetched(i.oid) AS idx_tup_fetch + FROM (((pg_class c + JOIN pg_index x ON ((c.oid = x.indrelid))) + JOIN pg_class i ON ((i.oid = x.indexrelid))) + LEFT JOIN pg_namespace n ON ((n.oid = c.relnamespace))) + WHERE (c.relkind = ANY (ARRAY['r'::"char", 't'::"char", 'm'::"char"])); +pg_stat_all_tables| SELECT c.oid AS relid, + n.nspname AS schemaname, + c.relname, + pg_stat_get_numscans(c.oid) AS seq_scan, + pg_stat_get_lastscan(c.oid) AS last_seq_scan, + pg_stat_get_tuples_returned(c.oid) AS seq_tup_read, + (sum(pg_stat_get_numscans(i.indexrelid)))::bigint AS idx_scan, + max(pg_stat_get_lastscan(i.indexrelid)) AS last_idx_scan, + ((sum(pg_stat_get_tuples_fetched(i.indexrelid)))::bigint + pg_stat_get_tuples_fetched(c.oid)) AS idx_tup_fetch, + pg_stat_get_tuples_inserted(c.oid) AS n_tup_ins, + pg_stat_get_tuples_updated(c.oid) AS n_tup_upd, + pg_stat_get_tuples_deleted(c.oid) AS n_tup_del, + pg_stat_get_tuples_hot_updated(c.oid) AS n_tup_hot_upd, + pg_stat_get_tuples_newpage_updated(c.oid) AS n_tup_newpage_upd, + pg_stat_get_live_tuples(c.oid) AS n_live_tup, + pg_stat_get_dead_tuples(c.oid) AS n_dead_tup, + pg_stat_get_mod_since_analyze(c.oid) AS n_mod_since_analyze, + pg_stat_get_ins_since_vacuum(c.oid) AS n_ins_since_vacuum, + pg_stat_get_last_vacuum_time(c.oid) AS last_vacuum, + pg_stat_get_last_autovacuum_time(c.oid) AS last_autovacuum, + pg_stat_get_last_analyze_time(c.oid) AS last_analyze, + pg_stat_get_last_autoanalyze_time(c.oid) AS last_autoanalyze, + pg_stat_get_vacuum_count(c.oid) AS vacuum_count, + pg_stat_get_autovacuum_count(c.oid) AS autovacuum_count, + pg_stat_get_analyze_count(c.oid) AS analyze_count, + pg_stat_get_autoanalyze_count(c.oid) AS autoanalyze_count + FROM ((pg_class c + LEFT JOIN pg_index i ON ((c.oid = i.indrelid))) + LEFT JOIN pg_namespace n ON ((n.oid = c.relnamespace))) + WHERE (c.relkind = ANY (ARRAY['r'::"char", 't'::"char", 'm'::"char", 'p'::"char"])) + GROUP BY c.oid, n.nspname, c.relname; +pg_stat_archiver| SELECT archived_count, + last_archived_wal, + last_archived_time, + failed_count, + last_failed_wal, + last_failed_time, + stats_reset + FROM pg_stat_get_archiver() s(archived_count, last_archived_wal, last_archived_time, failed_count, last_failed_wal, last_failed_time, stats_reset); +pg_stat_bgwriter| SELECT pg_stat_get_bgwriter_timed_checkpoints() AS checkpoints_timed, + pg_stat_get_bgwriter_requested_checkpoints() AS checkpoints_req, + pg_stat_get_checkpoint_write_time() AS checkpoint_write_time, + pg_stat_get_checkpoint_sync_time() AS checkpoint_sync_time, + pg_stat_get_bgwriter_buf_written_checkpoints() AS buffers_checkpoint, + pg_stat_get_bgwriter_buf_written_clean() AS buffers_clean, + pg_stat_get_bgwriter_maxwritten_clean() AS maxwritten_clean, + pg_stat_get_buf_written_backend() AS buffers_backend, + pg_stat_get_buf_fsync_backend() AS buffers_backend_fsync, + pg_stat_get_buf_alloc() AS buffers_alloc, + pg_stat_get_bgwriter_stat_reset_time() AS stats_reset; +pg_stat_database| SELECT oid AS datid, + datname, + CASE + WHEN (oid = (0)::oid) THEN 0 + ELSE pg_stat_get_db_numbackends(oid) + END AS numbackends, + pg_stat_get_db_xact_commit(oid) AS xact_commit, + pg_stat_get_db_xact_rollback(oid) AS xact_rollback, + (pg_stat_get_db_blocks_fetched(oid) - pg_stat_get_db_blocks_hit(oid)) AS blks_read, + pg_stat_get_db_blocks_hit(oid) AS blks_hit, + pg_stat_get_db_tuples_returned(oid) AS tup_returned, + pg_stat_get_db_tuples_fetched(oid) AS tup_fetched, + pg_stat_get_db_tuples_inserted(oid) AS tup_inserted, + pg_stat_get_db_tuples_updated(oid) AS tup_updated, + pg_stat_get_db_tuples_deleted(oid) AS tup_deleted, + pg_stat_get_db_conflict_all(oid) AS conflicts, + pg_stat_get_db_temp_files(oid) AS temp_files, + pg_stat_get_db_temp_bytes(oid) AS temp_bytes, + pg_stat_get_db_deadlocks(oid) AS deadlocks, + pg_stat_get_db_checksum_failures(oid) AS checksum_failures, + pg_stat_get_db_checksum_last_failure(oid) AS checksum_last_failure, + pg_stat_get_db_blk_read_time(oid) AS blk_read_time, + pg_stat_get_db_blk_write_time(oid) AS blk_write_time, + pg_stat_get_db_session_time(oid) AS session_time, + pg_stat_get_db_active_time(oid) AS active_time, + pg_stat_get_db_idle_in_transaction_time(oid) AS idle_in_transaction_time, + pg_stat_get_db_sessions(oid) AS sessions, + pg_stat_get_db_sessions_abandoned(oid) AS sessions_abandoned, + pg_stat_get_db_sessions_fatal(oid) AS sessions_fatal, + pg_stat_get_db_sessions_killed(oid) AS sessions_killed, + pg_stat_get_db_stat_reset_time(oid) AS stats_reset + FROM ( SELECT 0 AS oid, + NULL::name AS datname + UNION ALL + SELECT pg_database.oid, + pg_database.datname + FROM pg_database) d; +pg_stat_database_conflicts| SELECT oid AS datid, + datname, + pg_stat_get_db_conflict_tablespace(oid) AS confl_tablespace, + pg_stat_get_db_conflict_lock(oid) AS confl_lock, + pg_stat_get_db_conflict_snapshot(oid) AS confl_snapshot, + pg_stat_get_db_conflict_bufferpin(oid) AS confl_bufferpin, + pg_stat_get_db_conflict_startup_deadlock(oid) AS confl_deadlock, + pg_stat_get_db_conflict_logicalslot(oid) AS confl_active_logicalslot + FROM pg_database d; +pg_stat_gssapi| SELECT pid, + gss_auth AS gss_authenticated, + gss_princ AS principal, + gss_enc AS encrypted, + gss_delegation AS credentials_delegated + FROM pg_stat_get_activity(NULL::integer) s(datid, pid, usesysid, application_name, state, query, wait_event_type, wait_event, xact_start, query_start, backend_start, state_change, client_addr, client_hostname, client_port, backend_xid, backend_xmin, backend_type, ssl, sslversion, sslcipher, sslbits, ssl_client_dn, ssl_client_serial, ssl_issuer_dn, gss_auth, gss_princ, gss_enc, gss_delegation, leader_pid, query_id) + WHERE (client_port IS NOT NULL); +pg_stat_io| SELECT backend_type, + object, + context, + reads, + read_time, + writes, + write_time, + writebacks, + writeback_time, + extends, + extend_time, + op_bytes, + hits, + evictions, + reuses, + fsyncs, + fsync_time, + stats_reset + FROM pg_stat_get_io() b(backend_type, object, context, reads, read_time, writes, write_time, writebacks, writeback_time, extends, extend_time, op_bytes, hits, evictions, reuses, fsyncs, fsync_time, stats_reset); +pg_stat_progress_analyze| SELECT s.pid, + s.datid, + d.datname, + s.relid, + CASE s.param1 + WHEN 0 THEN 'initializing'::text + WHEN 1 THEN 'acquiring sample rows'::text + WHEN 2 THEN 'acquiring inherited sample rows'::text + WHEN 3 THEN 'computing statistics'::text + WHEN 4 THEN 'computing extended statistics'::text + WHEN 5 THEN 'finalizing analyze'::text + ELSE NULL::text + END AS phase, + s.param2 AS sample_blks_total, + s.param3 AS sample_blks_scanned, + s.param4 AS ext_stats_total, + s.param5 AS ext_stats_computed, + s.param6 AS child_tables_total, + s.param7 AS child_tables_done, + (s.param8)::oid AS current_child_table_relid + FROM (pg_stat_get_progress_info('ANALYZE'::text) s(pid, datid, relid, param1, param2, param3, param4, param5, param6, param7, param8, param9, param10, param11, param12, param13, param14, param15, param16, param17, param18, param19, param20) + LEFT JOIN pg_database d ON ((s.datid = d.oid))); +pg_stat_progress_basebackup| SELECT pid, + CASE param1 + WHEN 0 THEN 'initializing'::text + WHEN 1 THEN 'waiting for checkpoint to finish'::text + WHEN 2 THEN 'estimating backup size'::text + WHEN 3 THEN 'streaming database files'::text + WHEN 4 THEN 'waiting for wal archiving to finish'::text + WHEN 5 THEN 'transferring wal files'::text + ELSE NULL::text + END AS phase, + CASE param2 + WHEN '-1'::integer THEN NULL::bigint + ELSE param2 + END AS backup_total, + param3 AS backup_streamed, + param4 AS tablespaces_total, + param5 AS tablespaces_streamed + FROM pg_stat_get_progress_info('BASEBACKUP'::text) s(pid, datid, relid, param1, param2, param3, param4, param5, param6, param7, param8, param9, param10, param11, param12, param13, param14, param15, param16, param17, param18, param19, param20); +pg_stat_progress_cluster| SELECT s.pid, + s.datid, + d.datname, + s.relid, + CASE s.param1 + WHEN 1 THEN 'CLUSTER'::text + WHEN 2 THEN 'VACUUM FULL'::text + ELSE NULL::text + END AS command, + CASE s.param2 + WHEN 0 THEN 'initializing'::text + WHEN 1 THEN 'seq scanning heap'::text + WHEN 2 THEN 'index scanning heap'::text + WHEN 3 THEN 'sorting tuples'::text + WHEN 4 THEN 'writing new heap'::text + WHEN 5 THEN 'swapping relation files'::text + WHEN 6 THEN 'rebuilding index'::text + WHEN 7 THEN 'performing final cleanup'::text + ELSE NULL::text + END AS phase, + (s.param3)::oid AS cluster_index_relid, + s.param4 AS heap_tuples_scanned, + s.param5 AS heap_tuples_written, + s.param6 AS heap_blks_total, + s.param7 AS heap_blks_scanned, + s.param8 AS index_rebuild_count + FROM (pg_stat_get_progress_info('CLUSTER'::text) s(pid, datid, relid, param1, param2, param3, param4, param5, param6, param7, param8, param9, param10, param11, param12, param13, param14, param15, param16, param17, param18, param19, param20) + LEFT JOIN pg_database d ON ((s.datid = d.oid))); +pg_stat_progress_copy| SELECT s.pid, + s.datid, + d.datname, + s.relid, + CASE s.param5 + WHEN 1 THEN 'COPY FROM'::text + WHEN 2 THEN 'COPY TO'::text + ELSE NULL::text + END AS command, + CASE s.param6 + WHEN 1 THEN 'FILE'::text + WHEN 2 THEN 'PROGRAM'::text + WHEN 3 THEN 'PIPE'::text + WHEN 4 THEN 'CALLBACK'::text + ELSE NULL::text + END AS type, + s.param1 AS bytes_processed, + s.param2 AS bytes_total, + s.param3 AS tuples_processed, + s.param4 AS tuples_excluded + FROM (pg_stat_get_progress_info('COPY'::text) s(pid, datid, relid, param1, param2, param3, param4, param5, param6, param7, param8, param9, param10, param11, param12, param13, param14, param15, param16, param17, param18, param19, param20) + LEFT JOIN pg_database d ON ((s.datid = d.oid))); +pg_stat_progress_create_index| SELECT s.pid, + s.datid, + d.datname, + s.relid, + (s.param7)::oid AS index_relid, + CASE s.param1 + WHEN 1 THEN 'CREATE INDEX'::text + WHEN 2 THEN 'CREATE INDEX CONCURRENTLY'::text + WHEN 3 THEN 'REINDEX'::text + WHEN 4 THEN 'REINDEX CONCURRENTLY'::text + ELSE NULL::text + END AS command, + CASE s.param10 + WHEN 0 THEN 'initializing'::text + WHEN 1 THEN 'waiting for writers before build'::text + WHEN 2 THEN ('building index'::text || COALESCE((': '::text || pg_indexam_progress_phasename((s.param9)::oid, s.param11)), ''::text)) + WHEN 3 THEN 'waiting for writers before validation'::text + WHEN 4 THEN 'index validation: scanning index'::text + WHEN 5 THEN 'index validation: sorting tuples'::text + WHEN 6 THEN 'index validation: scanning table'::text + WHEN 7 THEN 'waiting for old snapshots'::text + WHEN 8 THEN 'waiting for readers before marking dead'::text + WHEN 9 THEN 'waiting for readers before dropping'::text + ELSE NULL::text + END AS phase, + s.param4 AS lockers_total, + s.param5 AS lockers_done, + s.param6 AS current_locker_pid, + s.param16 AS blocks_total, + s.param17 AS blocks_done, + s.param12 AS tuples_total, + s.param13 AS tuples_done, + s.param14 AS partitions_total, + s.param15 AS partitions_done + FROM (pg_stat_get_progress_info('CREATE INDEX'::text) s(pid, datid, relid, param1, param2, param3, param4, param5, param6, param7, param8, param9, param10, param11, param12, param13, param14, param15, param16, param17, param18, param19, param20) + LEFT JOIN pg_database d ON ((s.datid = d.oid))); +pg_stat_progress_vacuum| SELECT s.pid, + s.datid, + d.datname, + s.relid, + CASE s.param1 + WHEN 0 THEN 'initializing'::text + WHEN 1 THEN 'scanning heap'::text + WHEN 2 THEN 'vacuuming indexes'::text + WHEN 3 THEN 'vacuuming heap'::text + WHEN 4 THEN 'cleaning up indexes'::text + WHEN 5 THEN 'truncating heap'::text + WHEN 6 THEN 'performing final cleanup'::text + ELSE NULL::text + END AS phase, + s.param2 AS heap_blks_total, + s.param3 AS heap_blks_scanned, + s.param4 AS heap_blks_vacuumed, + s.param5 AS index_vacuum_count, + s.param6 AS max_dead_tuples, + s.param7 AS num_dead_tuples + FROM (pg_stat_get_progress_info('VACUUM'::text) s(pid, datid, relid, param1, param2, param3, param4, param5, param6, param7, param8, param9, param10, param11, param12, param13, param14, param15, param16, param17, param18, param19, param20) + LEFT JOIN pg_database d ON ((s.datid = d.oid))); +pg_stat_recovery_prefetch| SELECT stats_reset, + prefetch, + hit, + skip_init, + skip_new, + skip_fpw, + skip_rep, + wal_distance, + block_distance, + io_depth + FROM pg_stat_get_recovery_prefetch() s(stats_reset, prefetch, hit, skip_init, skip_new, skip_fpw, skip_rep, wal_distance, block_distance, io_depth); +pg_stat_replication| SELECT s.pid, + s.usesysid, + u.rolname AS usename, + s.application_name, + s.client_addr, + s.client_hostname, + s.client_port, + s.backend_start, + s.backend_xmin, + w.state, + w.sent_lsn, + w.write_lsn, + w.flush_lsn, + w.replay_lsn, + w.write_lag, + w.flush_lag, + w.replay_lag, + w.sync_priority, + w.sync_state, + w.reply_time + FROM ((pg_stat_get_activity(NULL::integer) s(datid, pid, usesysid, application_name, state, query, wait_event_type, wait_event, xact_start, query_start, backend_start, state_change, client_addr, client_hostname, client_port, backend_xid, backend_xmin, backend_type, ssl, sslversion, sslcipher, sslbits, ssl_client_dn, ssl_client_serial, ssl_issuer_dn, gss_auth, gss_princ, gss_enc, gss_delegation, leader_pid, query_id) + JOIN pg_stat_get_wal_senders() w(pid, state, sent_lsn, write_lsn, flush_lsn, replay_lsn, write_lag, flush_lag, replay_lag, sync_priority, sync_state, reply_time) ON ((s.pid = w.pid))) + LEFT JOIN pg_authid u ON ((s.usesysid = u.oid))); +pg_stat_replication_slots| SELECT s.slot_name, + s.spill_txns, + s.spill_count, + s.spill_bytes, + s.stream_txns, + s.stream_count, + s.stream_bytes, + s.total_txns, + s.total_bytes, + s.stats_reset + FROM pg_replication_slots r, + LATERAL pg_stat_get_replication_slot((r.slot_name)::text) s(slot_name, spill_txns, spill_count, spill_bytes, stream_txns, stream_count, stream_bytes, total_txns, total_bytes, stats_reset) + WHERE (r.datoid IS NOT NULL); +pg_stat_slru| SELECT name, + blks_zeroed, + blks_hit, + blks_read, + blks_written, + blks_exists, + flushes, + truncates, + stats_reset + FROM pg_stat_get_slru() s(name, blks_zeroed, blks_hit, blks_read, blks_written, blks_exists, flushes, truncates, stats_reset); +pg_stat_ssl| SELECT pid, + ssl, + sslversion AS version, + sslcipher AS cipher, + sslbits AS bits, + ssl_client_dn AS client_dn, + ssl_client_serial AS client_serial, + ssl_issuer_dn AS issuer_dn + FROM pg_stat_get_activity(NULL::integer) s(datid, pid, usesysid, application_name, state, query, wait_event_type, wait_event, xact_start, query_start, backend_start, state_change, client_addr, client_hostname, client_port, backend_xid, backend_xmin, backend_type, ssl, sslversion, sslcipher, sslbits, ssl_client_dn, ssl_client_serial, ssl_issuer_dn, gss_auth, gss_princ, gss_enc, gss_delegation, leader_pid, query_id) + WHERE (client_port IS NOT NULL); +pg_stat_subscription| SELECT su.oid AS subid, + su.subname, + st.pid, + st.leader_pid, + st.relid, + st.received_lsn, + st.last_msg_send_time, + st.last_msg_receipt_time, + st.latest_end_lsn, + st.latest_end_time + FROM (pg_subscription su + LEFT JOIN pg_stat_get_subscription(NULL::oid) st(subid, relid, pid, leader_pid, received_lsn, last_msg_send_time, last_msg_receipt_time, latest_end_lsn, latest_end_time) ON ((st.subid = su.oid))); +pg_stat_subscription_stats| SELECT ss.subid, + s.subname, + ss.apply_error_count, + ss.sync_error_count, + ss.stats_reset + FROM pg_subscription s, + LATERAL pg_stat_get_subscription_stats(s.oid) ss(subid, apply_error_count, sync_error_count, stats_reset); +pg_stat_sys_indexes| SELECT relid, + indexrelid, + schemaname, + relname, + indexrelname, + idx_scan, + last_idx_scan, + idx_tup_read, + idx_tup_fetch + FROM pg_stat_all_indexes + WHERE ((schemaname = ANY (ARRAY['pg_catalog'::name, 'information_schema'::name])) OR (schemaname ~ '^pg_toast'::text)); +pg_stat_sys_tables| SELECT relid, + schemaname, + relname, + seq_scan, + last_seq_scan, + seq_tup_read, + idx_scan, + last_idx_scan, + idx_tup_fetch, + n_tup_ins, + n_tup_upd, + n_tup_del, + n_tup_hot_upd, + n_tup_newpage_upd, + n_live_tup, + n_dead_tup, + n_mod_since_analyze, + n_ins_since_vacuum, + last_vacuum, + last_autovacuum, + last_analyze, + last_autoanalyze, + vacuum_count, + autovacuum_count, + analyze_count, + autoanalyze_count + FROM pg_stat_all_tables + WHERE ((schemaname = ANY (ARRAY['pg_catalog'::name, 'information_schema'::name])) OR (schemaname ~ '^pg_toast'::text)); +pg_stat_user_functions| SELECT p.oid AS funcid, + n.nspname AS schemaname, + p.proname AS funcname, + pg_stat_get_function_calls(p.oid) AS calls, + pg_stat_get_function_total_time(p.oid) AS total_time, + pg_stat_get_function_self_time(p.oid) AS self_time + FROM (pg_proc p + LEFT JOIN pg_namespace n ON ((n.oid = p.pronamespace))) + WHERE ((p.prolang <> (12)::oid) AND (pg_stat_get_function_calls(p.oid) IS NOT NULL)); +pg_stat_user_indexes| SELECT relid, + indexrelid, + schemaname, + relname, + indexrelname, + idx_scan, + last_idx_scan, + idx_tup_read, + idx_tup_fetch + FROM pg_stat_all_indexes + WHERE ((schemaname <> ALL (ARRAY['pg_catalog'::name, 'information_schema'::name])) AND (schemaname !~ '^pg_toast'::text)); +pg_stat_user_tables| SELECT relid, + schemaname, + relname, + seq_scan, + last_seq_scan, + seq_tup_read, + idx_scan, + last_idx_scan, + idx_tup_fetch, + n_tup_ins, + n_tup_upd, + n_tup_del, + n_tup_hot_upd, + n_tup_newpage_upd, + n_live_tup, + n_dead_tup, + n_mod_since_analyze, + n_ins_since_vacuum, + last_vacuum, + last_autovacuum, + last_analyze, + last_autoanalyze, + vacuum_count, + autovacuum_count, + analyze_count, + autoanalyze_count + FROM pg_stat_all_tables + WHERE ((schemaname <> ALL (ARRAY['pg_catalog'::name, 'information_schema'::name])) AND (schemaname !~ '^pg_toast'::text)); +pg_stat_wal| SELECT wal_records, + wal_fpi, + wal_bytes, + wal_buffers_full, + wal_write, + wal_sync, + wal_write_time, + wal_sync_time, + stats_reset + FROM pg_stat_get_wal() w(wal_records, wal_fpi, wal_bytes, wal_buffers_full, wal_write, wal_sync, wal_write_time, wal_sync_time, stats_reset); +pg_stat_wal_receiver| SELECT pid, + status, + receive_start_lsn, + receive_start_tli, + written_lsn, + flushed_lsn, + received_tli, + last_msg_send_time, + last_msg_receipt_time, + latest_end_lsn, + latest_end_time, + slot_name, + sender_host, + sender_port, + conninfo + FROM pg_stat_get_wal_receiver() s(pid, status, receive_start_lsn, receive_start_tli, written_lsn, flushed_lsn, received_tli, last_msg_send_time, last_msg_receipt_time, latest_end_lsn, latest_end_time, slot_name, sender_host, sender_port, conninfo) + WHERE (pid IS NOT NULL); +pg_stat_xact_all_tables| SELECT c.oid AS relid, + n.nspname AS schemaname, + c.relname, + pg_stat_get_xact_numscans(c.oid) AS seq_scan, + pg_stat_get_xact_tuples_returned(c.oid) AS seq_tup_read, + (sum(pg_stat_get_xact_numscans(i.indexrelid)))::bigint AS idx_scan, + ((sum(pg_stat_get_xact_tuples_fetched(i.indexrelid)))::bigint + pg_stat_get_xact_tuples_fetched(c.oid)) AS idx_tup_fetch, + pg_stat_get_xact_tuples_inserted(c.oid) AS n_tup_ins, + pg_stat_get_xact_tuples_updated(c.oid) AS n_tup_upd, + pg_stat_get_xact_tuples_deleted(c.oid) AS n_tup_del, + pg_stat_get_xact_tuples_hot_updated(c.oid) AS n_tup_hot_upd, + pg_stat_get_xact_tuples_newpage_updated(c.oid) AS n_tup_newpage_upd + FROM ((pg_class c + LEFT JOIN pg_index i ON ((c.oid = i.indrelid))) + LEFT JOIN pg_namespace n ON ((n.oid = c.relnamespace))) + WHERE (c.relkind = ANY (ARRAY['r'::"char", 't'::"char", 'm'::"char", 'p'::"char"])) + GROUP BY c.oid, n.nspname, c.relname; +pg_stat_xact_sys_tables| SELECT relid, + schemaname, + relname, + seq_scan, + seq_tup_read, + idx_scan, + idx_tup_fetch, + n_tup_ins, + n_tup_upd, + n_tup_del, + n_tup_hot_upd, + n_tup_newpage_upd + FROM pg_stat_xact_all_tables + WHERE ((schemaname = ANY (ARRAY['pg_catalog'::name, 'information_schema'::name])) OR (schemaname ~ '^pg_toast'::text)); +pg_stat_xact_user_functions| SELECT p.oid AS funcid, + n.nspname AS schemaname, + p.proname AS funcname, + pg_stat_get_xact_function_calls(p.oid) AS calls, + pg_stat_get_xact_function_total_time(p.oid) AS total_time, + pg_stat_get_xact_function_self_time(p.oid) AS self_time + FROM (pg_proc p + LEFT JOIN pg_namespace n ON ((n.oid = p.pronamespace))) + WHERE ((p.prolang <> (12)::oid) AND (pg_stat_get_xact_function_calls(p.oid) IS NOT NULL)); +pg_stat_xact_user_tables| SELECT relid, + schemaname, + relname, + seq_scan, + seq_tup_read, + idx_scan, + idx_tup_fetch, + n_tup_ins, + n_tup_upd, + n_tup_del, + n_tup_hot_upd, + n_tup_newpage_upd + FROM pg_stat_xact_all_tables + WHERE ((schemaname <> ALL (ARRAY['pg_catalog'::name, 'information_schema'::name])) AND (schemaname !~ '^pg_toast'::text)); +pg_statio_all_indexes| SELECT c.oid AS relid, + i.oid AS indexrelid, + n.nspname AS schemaname, + c.relname, + i.relname AS indexrelname, + (pg_stat_get_blocks_fetched(i.oid) - pg_stat_get_blocks_hit(i.oid)) AS idx_blks_read, + pg_stat_get_blocks_hit(i.oid) AS idx_blks_hit + FROM (((pg_class c + JOIN pg_index x ON ((c.oid = x.indrelid))) + JOIN pg_class i ON ((i.oid = x.indexrelid))) + LEFT JOIN pg_namespace n ON ((n.oid = c.relnamespace))) + WHERE (c.relkind = ANY (ARRAY['r'::"char", 't'::"char", 'm'::"char"])); +pg_statio_all_sequences| SELECT c.oid AS relid, + n.nspname AS schemaname, + c.relname, + (pg_stat_get_blocks_fetched(c.oid) - pg_stat_get_blocks_hit(c.oid)) AS blks_read, + pg_stat_get_blocks_hit(c.oid) AS blks_hit + FROM (pg_class c + LEFT JOIN pg_namespace n ON ((n.oid = c.relnamespace))) + WHERE (c.relkind = 'S'::"char"); +pg_statio_all_tables| SELECT c.oid AS relid, + n.nspname AS schemaname, + c.relname, + (pg_stat_get_blocks_fetched(c.oid) - pg_stat_get_blocks_hit(c.oid)) AS heap_blks_read, + pg_stat_get_blocks_hit(c.oid) AS heap_blks_hit, + i.idx_blks_read, + i.idx_blks_hit, + (pg_stat_get_blocks_fetched(t.oid) - pg_stat_get_blocks_hit(t.oid)) AS toast_blks_read, + pg_stat_get_blocks_hit(t.oid) AS toast_blks_hit, + x.idx_blks_read AS tidx_blks_read, + x.idx_blks_hit AS tidx_blks_hit + FROM ((((pg_class c + LEFT JOIN pg_class t ON ((c.reltoastrelid = t.oid))) + LEFT JOIN pg_namespace n ON ((n.oid = c.relnamespace))) + LEFT JOIN LATERAL ( SELECT (sum((pg_stat_get_blocks_fetched(pg_index.indexrelid) - pg_stat_get_blocks_hit(pg_index.indexrelid))))::bigint AS idx_blks_read, + (sum(pg_stat_get_blocks_hit(pg_index.indexrelid)))::bigint AS idx_blks_hit + FROM pg_index + WHERE (pg_index.indrelid = c.oid)) i ON (true)) + LEFT JOIN LATERAL ( SELECT (sum((pg_stat_get_blocks_fetched(pg_index.indexrelid) - pg_stat_get_blocks_hit(pg_index.indexrelid))))::bigint AS idx_blks_read, + (sum(pg_stat_get_blocks_hit(pg_index.indexrelid)))::bigint AS idx_blks_hit + FROM pg_index + WHERE (pg_index.indrelid = t.oid)) x ON (true)) + WHERE (c.relkind = ANY (ARRAY['r'::"char", 't'::"char", 'm'::"char"])); +pg_statio_sys_indexes| SELECT relid, + indexrelid, + schemaname, + relname, + indexrelname, + idx_blks_read, + idx_blks_hit + FROM pg_statio_all_indexes + WHERE ((schemaname = ANY (ARRAY['pg_catalog'::name, 'information_schema'::name])) OR (schemaname ~ '^pg_toast'::text)); +pg_statio_sys_sequences| SELECT relid, + schemaname, + relname, + blks_read, + blks_hit + FROM pg_statio_all_sequences + WHERE ((schemaname = ANY (ARRAY['pg_catalog'::name, 'information_schema'::name])) OR (schemaname ~ '^pg_toast'::text)); +pg_statio_sys_tables| SELECT relid, + schemaname, + relname, + heap_blks_read, + heap_blks_hit, + idx_blks_read, + idx_blks_hit, + toast_blks_read, + toast_blks_hit, + tidx_blks_read, + tidx_blks_hit + FROM pg_statio_all_tables + WHERE ((schemaname = ANY (ARRAY['pg_catalog'::name, 'information_schema'::name])) OR (schemaname ~ '^pg_toast'::text)); +pg_statio_user_indexes| SELECT relid, + indexrelid, + schemaname, + relname, + indexrelname, + idx_blks_read, + idx_blks_hit + FROM pg_statio_all_indexes + WHERE ((schemaname <> ALL (ARRAY['pg_catalog'::name, 'information_schema'::name])) AND (schemaname !~ '^pg_toast'::text)); +pg_statio_user_sequences| SELECT relid, + schemaname, + relname, + blks_read, + blks_hit + FROM pg_statio_all_sequences + WHERE ((schemaname <> ALL (ARRAY['pg_catalog'::name, 'information_schema'::name])) AND (schemaname !~ '^pg_toast'::text)); +pg_statio_user_tables| SELECT relid, + schemaname, + relname, + heap_blks_read, + heap_blks_hit, + idx_blks_read, + idx_blks_hit, + toast_blks_read, + toast_blks_hit, + tidx_blks_read, + tidx_blks_hit + FROM pg_statio_all_tables + WHERE ((schemaname <> ALL (ARRAY['pg_catalog'::name, 'information_schema'::name])) AND (schemaname !~ '^pg_toast'::text)); +pg_stats| SELECT n.nspname AS schemaname, + c.relname AS tablename, + a.attname, + s.stainherit AS inherited, + s.stanullfrac AS null_frac, + s.stawidth AS avg_width, + s.stadistinct AS n_distinct, + CASE + WHEN (s.stakind1 = 1) THEN s.stavalues1 + WHEN (s.stakind2 = 1) THEN s.stavalues2 + WHEN (s.stakind3 = 1) THEN s.stavalues3 + WHEN (s.stakind4 = 1) THEN s.stavalues4 + WHEN (s.stakind5 = 1) THEN s.stavalues5 + ELSE NULL::anyarray + END AS most_common_vals, + CASE + WHEN (s.stakind1 = 1) THEN s.stanumbers1 + WHEN (s.stakind2 = 1) THEN s.stanumbers2 + WHEN (s.stakind3 = 1) THEN s.stanumbers3 + WHEN (s.stakind4 = 1) THEN s.stanumbers4 + WHEN (s.stakind5 = 1) THEN s.stanumbers5 + ELSE NULL::real[] + END AS most_common_freqs, + CASE + WHEN (s.stakind1 = 2) THEN s.stavalues1 + WHEN (s.stakind2 = 2) THEN s.stavalues2 + WHEN (s.stakind3 = 2) THEN s.stavalues3 + WHEN (s.stakind4 = 2) THEN s.stavalues4 + WHEN (s.stakind5 = 2) THEN s.stavalues5 + ELSE NULL::anyarray + END AS histogram_bounds, + CASE + WHEN (s.stakind1 = 3) THEN s.stanumbers1[1] + WHEN (s.stakind2 = 3) THEN s.stanumbers2[1] + WHEN (s.stakind3 = 3) THEN s.stanumbers3[1] + WHEN (s.stakind4 = 3) THEN s.stanumbers4[1] + WHEN (s.stakind5 = 3) THEN s.stanumbers5[1] + ELSE NULL::real + END AS correlation, + CASE + WHEN (s.stakind1 = 4) THEN s.stavalues1 + WHEN (s.stakind2 = 4) THEN s.stavalues2 + WHEN (s.stakind3 = 4) THEN s.stavalues3 + WHEN (s.stakind4 = 4) THEN s.stavalues4 + WHEN (s.stakind5 = 4) THEN s.stavalues5 + ELSE NULL::anyarray + END AS most_common_elems, + CASE + WHEN (s.stakind1 = 4) THEN s.stanumbers1 + WHEN (s.stakind2 = 4) THEN s.stanumbers2 + WHEN (s.stakind3 = 4) THEN s.stanumbers3 + WHEN (s.stakind4 = 4) THEN s.stanumbers4 + WHEN (s.stakind5 = 4) THEN s.stanumbers5 + ELSE NULL::real[] + END AS most_common_elem_freqs, + CASE + WHEN (s.stakind1 = 5) THEN s.stanumbers1 + WHEN (s.stakind2 = 5) THEN s.stanumbers2 + WHEN (s.stakind3 = 5) THEN s.stanumbers3 + WHEN (s.stakind4 = 5) THEN s.stanumbers4 + WHEN (s.stakind5 = 5) THEN s.stanumbers5 + ELSE NULL::real[] + END AS elem_count_histogram + FROM (((pg_statistic s + JOIN pg_class c ON ((c.oid = s.starelid))) + JOIN pg_attribute a ON (((c.oid = a.attrelid) AND (a.attnum = s.staattnum)))) + LEFT JOIN pg_namespace n ON ((n.oid = c.relnamespace))) + WHERE ((NOT a.attisdropped) AND has_column_privilege(c.oid, a.attnum, 'select'::text) AND ((c.relrowsecurity = false) OR (NOT row_security_active(c.oid)))); +pg_stats_ext| SELECT cn.nspname AS schemaname, + c.relname AS tablename, + sn.nspname AS statistics_schemaname, + s.stxname AS statistics_name, + pg_get_userbyid(s.stxowner) AS statistics_owner, + ( SELECT array_agg(a.attname ORDER BY a.attnum) AS array_agg + FROM (unnest(s.stxkeys) k(k) + JOIN pg_attribute a ON (((a.attrelid = s.stxrelid) AND (a.attnum = k.k))))) AS attnames, + pg_get_statisticsobjdef_expressions(s.oid) AS exprs, + s.stxkind AS kinds, + sd.stxdinherit AS inherited, + sd.stxdndistinct AS n_distinct, + sd.stxddependencies AS dependencies, + m.most_common_vals, + m.most_common_val_nulls, + m.most_common_freqs, + m.most_common_base_freqs + FROM (((((pg_statistic_ext s + JOIN pg_class c ON ((c.oid = s.stxrelid))) + JOIN pg_statistic_ext_data sd ON ((s.oid = sd.stxoid))) + LEFT JOIN pg_namespace cn ON ((cn.oid = c.relnamespace))) + LEFT JOIN pg_namespace sn ON ((sn.oid = s.stxnamespace))) + LEFT JOIN LATERAL ( SELECT array_agg(pg_mcv_list_items."values") AS most_common_vals, + array_agg(pg_mcv_list_items.nulls) AS most_common_val_nulls, + array_agg(pg_mcv_list_items.frequency) AS most_common_freqs, + array_agg(pg_mcv_list_items.base_frequency) AS most_common_base_freqs + FROM pg_mcv_list_items(sd.stxdmcv) pg_mcv_list_items(index, "values", nulls, frequency, base_frequency)) m ON ((sd.stxdmcv IS NOT NULL))) + WHERE ((NOT (EXISTS ( SELECT 1 + FROM (unnest(s.stxkeys) k(k) + JOIN pg_attribute a ON (((a.attrelid = s.stxrelid) AND (a.attnum = k.k)))) + WHERE (NOT has_column_privilege(c.oid, a.attnum, 'select'::text))))) AND ((c.relrowsecurity = false) OR (NOT row_security_active(c.oid)))); +pg_stats_ext_exprs| SELECT cn.nspname AS schemaname, + c.relname AS tablename, + sn.nspname AS statistics_schemaname, + s.stxname AS statistics_name, + pg_get_userbyid(s.stxowner) AS statistics_owner, + stat.expr, + sd.stxdinherit AS inherited, + (stat.a).stanullfrac AS null_frac, + (stat.a).stawidth AS avg_width, + (stat.a).stadistinct AS n_distinct, + CASE + WHEN ((stat.a).stakind1 = 1) THEN (stat.a).stavalues1 + WHEN ((stat.a).stakind2 = 1) THEN (stat.a).stavalues2 + WHEN ((stat.a).stakind3 = 1) THEN (stat.a).stavalues3 + WHEN ((stat.a).stakind4 = 1) THEN (stat.a).stavalues4 + WHEN ((stat.a).stakind5 = 1) THEN (stat.a).stavalues5 + ELSE NULL::anyarray + END AS most_common_vals, + CASE + WHEN ((stat.a).stakind1 = 1) THEN (stat.a).stanumbers1 + WHEN ((stat.a).stakind2 = 1) THEN (stat.a).stanumbers2 + WHEN ((stat.a).stakind3 = 1) THEN (stat.a).stanumbers3 + WHEN ((stat.a).stakind4 = 1) THEN (stat.a).stanumbers4 + WHEN ((stat.a).stakind5 = 1) THEN (stat.a).stanumbers5 + ELSE NULL::real[] + END AS most_common_freqs, + CASE + WHEN ((stat.a).stakind1 = 2) THEN (stat.a).stavalues1 + WHEN ((stat.a).stakind2 = 2) THEN (stat.a).stavalues2 + WHEN ((stat.a).stakind3 = 2) THEN (stat.a).stavalues3 + WHEN ((stat.a).stakind4 = 2) THEN (stat.a).stavalues4 + WHEN ((stat.a).stakind5 = 2) THEN (stat.a).stavalues5 + ELSE NULL::anyarray + END AS histogram_bounds, + CASE + WHEN ((stat.a).stakind1 = 3) THEN (stat.a).stanumbers1[1] + WHEN ((stat.a).stakind2 = 3) THEN (stat.a).stanumbers2[1] + WHEN ((stat.a).stakind3 = 3) THEN (stat.a).stanumbers3[1] + WHEN ((stat.a).stakind4 = 3) THEN (stat.a).stanumbers4[1] + WHEN ((stat.a).stakind5 = 3) THEN (stat.a).stanumbers5[1] + ELSE NULL::real + END AS correlation, + CASE + WHEN ((stat.a).stakind1 = 4) THEN (stat.a).stavalues1 + WHEN ((stat.a).stakind2 = 4) THEN (stat.a).stavalues2 + WHEN ((stat.a).stakind3 = 4) THEN (stat.a).stavalues3 + WHEN ((stat.a).stakind4 = 4) THEN (stat.a).stavalues4 + WHEN ((stat.a).stakind5 = 4) THEN (stat.a).stavalues5 + ELSE NULL::anyarray + END AS most_common_elems, + CASE + WHEN ((stat.a).stakind1 = 4) THEN (stat.a).stanumbers1 + WHEN ((stat.a).stakind2 = 4) THEN (stat.a).stanumbers2 + WHEN ((stat.a).stakind3 = 4) THEN (stat.a).stanumbers3 + WHEN ((stat.a).stakind4 = 4) THEN (stat.a).stanumbers4 + WHEN ((stat.a).stakind5 = 4) THEN (stat.a).stanumbers5 + ELSE NULL::real[] + END AS most_common_elem_freqs, + CASE + WHEN ((stat.a).stakind1 = 5) THEN (stat.a).stanumbers1 + WHEN ((stat.a).stakind2 = 5) THEN (stat.a).stanumbers2 + WHEN ((stat.a).stakind3 = 5) THEN (stat.a).stanumbers3 + WHEN ((stat.a).stakind4 = 5) THEN (stat.a).stanumbers4 + WHEN ((stat.a).stakind5 = 5) THEN (stat.a).stanumbers5 + ELSE NULL::real[] + END AS elem_count_histogram + FROM (((((pg_statistic_ext s + JOIN pg_class c ON ((c.oid = s.stxrelid))) + LEFT JOIN pg_statistic_ext_data sd ON ((s.oid = sd.stxoid))) + LEFT JOIN pg_namespace cn ON ((cn.oid = c.relnamespace))) + LEFT JOIN pg_namespace sn ON ((sn.oid = s.stxnamespace))) + JOIN LATERAL ( SELECT unnest(pg_get_statisticsobjdef_expressions(s.oid)) AS expr, + unnest(sd.stxdexpr) AS a) stat ON ((stat.expr IS NOT NULL))); +pg_tables| SELECT n.nspname AS schemaname, + c.relname AS tablename, + pg_get_userbyid(c.relowner) AS tableowner, + t.spcname AS tablespace, + c.relhasindex AS hasindexes, + c.relhasrules AS hasrules, + c.relhastriggers AS hastriggers, + c.relrowsecurity AS rowsecurity + FROM ((pg_class c + LEFT JOIN pg_namespace n ON ((n.oid = c.relnamespace))) + LEFT JOIN pg_tablespace t ON ((t.oid = c.reltablespace))) + WHERE (c.relkind = ANY (ARRAY['r'::"char", 'p'::"char"])); +pg_timezone_abbrevs| SELECT abbrev, + utc_offset, + is_dst + FROM pg_timezone_abbrevs() pg_timezone_abbrevs(abbrev, utc_offset, is_dst); +pg_timezone_names| SELECT name, + abbrev, + utc_offset, + is_dst + FROM pg_timezone_names() pg_timezone_names(name, abbrev, utc_offset, is_dst); +pg_user| SELECT usename, + usesysid, + usecreatedb, + usesuper, + userepl, + usebypassrls, + '********'::text AS passwd, + valuntil, + useconfig + FROM pg_shadow; +pg_user_mappings| SELECT u.oid AS umid, + s.oid AS srvid, + s.srvname, + u.umuser, + CASE + WHEN (u.umuser = (0)::oid) THEN 'public'::name + ELSE a.rolname + END AS usename, + CASE + WHEN (((u.umuser <> (0)::oid) AND (a.rolname = CURRENT_USER) AND (pg_has_role(s.srvowner, 'USAGE'::text) OR has_server_privilege(s.oid, 'USAGE'::text))) OR ((u.umuser = (0)::oid) AND pg_has_role(s.srvowner, 'USAGE'::text)) OR ( SELECT pg_authid.rolsuper + FROM pg_authid + WHERE (pg_authid.rolname = CURRENT_USER))) THEN u.umoptions + ELSE NULL::text[] + END AS umoptions + FROM ((pg_user_mapping u + JOIN pg_foreign_server s ON ((u.umserver = s.oid))) + LEFT JOIN pg_authid a ON ((a.oid = u.umuser))); +pg_views| SELECT n.nspname AS schemaname, + c.relname AS viewname, + pg_get_userbyid(c.relowner) AS viewowner, + pg_get_viewdef(c.oid) AS definition + FROM (pg_class c + LEFT JOIN pg_namespace n ON ((n.oid = c.relnamespace))) + WHERE (c.relkind = 'v'::"char"); +SELECT tablename, rulename, definition FROM pg_rules +WHERE schemaname = 'pg_catalog' +ORDER BY tablename, rulename; +pg_settings|pg_settings_n|CREATE RULE pg_settings_n AS + ON UPDATE TO pg_catalog.pg_settings DO INSTEAD NOTHING; +pg_settings|pg_settings_u|CREATE RULE pg_settings_u AS + ON UPDATE TO pg_catalog.pg_settings + WHERE (new.name = old.name) DO SELECT set_config(old.name, new.setting, false) AS set_config; +-- restore normal output mode +\a\t +-- +-- CREATE OR REPLACE RULE +-- +CREATE TABLE ruletest_tbl (a int, b int); +CREATE TABLE ruletest_tbl2 (a int, b int); +CREATE OR REPLACE RULE myrule AS ON INSERT TO ruletest_tbl + DO INSTEAD INSERT INTO ruletest_tbl2 VALUES (10, 10); +INSERT INTO ruletest_tbl VALUES (99, 99); +CREATE OR REPLACE RULE myrule AS ON INSERT TO ruletest_tbl + DO INSTEAD INSERT INTO ruletest_tbl2 VALUES (1000, 1000); +INSERT INTO ruletest_tbl VALUES (99, 99); +SELECT * FROM ruletest_tbl2; + a | b +------+------ + 10 | 10 + 1000 | 1000 +(2 rows) + +-- Check that rewrite rules splitting one INSERT into multiple +-- conditional statements does not disable FK checking. +create table rule_and_refint_t1 ( + id1a integer, + id1b integer, + primary key (id1a, id1b) +); +create table rule_and_refint_t2 ( + id2a integer, + id2c integer, + primary key (id2a, id2c) +); +create table rule_and_refint_t3 ( + id3a integer, + id3b integer, + id3c integer, + data text, + primary key (id3a, id3b, id3c), + foreign key (id3a, id3b) references rule_and_refint_t1 (id1a, id1b), + foreign key (id3a, id3c) references rule_and_refint_t2 (id2a, id2c) +); +insert into rule_and_refint_t1 values (1, 11); +insert into rule_and_refint_t1 values (1, 12); +insert into rule_and_refint_t1 values (2, 21); +insert into rule_and_refint_t1 values (2, 22); +insert into rule_and_refint_t2 values (1, 11); +insert into rule_and_refint_t2 values (1, 12); +insert into rule_and_refint_t2 values (2, 21); +insert into rule_and_refint_t2 values (2, 22); +insert into rule_and_refint_t3 values (1, 11, 11, 'row1'); +insert into rule_and_refint_t3 values (1, 11, 12, 'row2'); +insert into rule_and_refint_t3 values (1, 12, 11, 'row3'); +insert into rule_and_refint_t3 values (1, 12, 12, 'row4'); +insert into rule_and_refint_t3 values (1, 11, 13, 'row5'); +ERROR: insert or update on table "rule_and_refint_t3" violates foreign key constraint "rule_and_refint_t3_id3a_id3c_fkey" +DETAIL: Key (id3a, id3c)=(1, 13) is not present in table "rule_and_refint_t2". +insert into rule_and_refint_t3 values (1, 13, 11, 'row6'); +ERROR: insert or update on table "rule_and_refint_t3" violates foreign key constraint "rule_and_refint_t3_id3a_id3b_fkey" +DETAIL: Key (id3a, id3b)=(1, 13) is not present in table "rule_and_refint_t1". +-- Ordinary table +insert into rule_and_refint_t3 values (1, 13, 11, 'row6') + on conflict do nothing; +ERROR: insert or update on table "rule_and_refint_t3" violates foreign key constraint "rule_and_refint_t3_id3a_id3b_fkey" +DETAIL: Key (id3a, id3b)=(1, 13) is not present in table "rule_and_refint_t1". +-- rule not fired, so fk violation +insert into rule_and_refint_t3 values (1, 13, 11, 'row6') + on conflict (id3a, id3b, id3c) do update + set id3b = excluded.id3b; +ERROR: insert or update on table "rule_and_refint_t3" violates foreign key constraint "rule_and_refint_t3_id3a_id3b_fkey" +DETAIL: Key (id3a, id3b)=(1, 13) is not present in table "rule_and_refint_t1". +-- rule fired, so unsupported +insert into shoelace values ('sl9', 0, 'pink', 35.0, 'inch', 0.0) + on conflict (sl_name) do update + set sl_avail = excluded.sl_avail; +ERROR: INSERT with ON CONFLICT clause cannot be used with table that has INSERT or UPDATE rules +create rule rule_and_refint_t3_ins as on insert to rule_and_refint_t3 + where (exists (select 1 from rule_and_refint_t3 + where (((rule_and_refint_t3.id3a = new.id3a) + and (rule_and_refint_t3.id3b = new.id3b)) + and (rule_and_refint_t3.id3c = new.id3c)))) + do instead update rule_and_refint_t3 set data = new.data + where (((rule_and_refint_t3.id3a = new.id3a) + and (rule_and_refint_t3.id3b = new.id3b)) + and (rule_and_refint_t3.id3c = new.id3c)); +insert into rule_and_refint_t3 values (1, 11, 13, 'row7'); +ERROR: insert or update on table "rule_and_refint_t3" violates foreign key constraint "rule_and_refint_t3_id3a_id3c_fkey" +DETAIL: Key (id3a, id3c)=(1, 13) is not present in table "rule_and_refint_t2". +insert into rule_and_refint_t3 values (1, 13, 11, 'row8'); +ERROR: insert or update on table "rule_and_refint_t3" violates foreign key constraint "rule_and_refint_t3_id3a_id3b_fkey" +DETAIL: Key (id3a, id3b)=(1, 13) is not present in table "rule_and_refint_t1". +-- +-- disallow dropping a view's rule (bug #5072) +-- +create view rules_fooview as select 'rules_foo'::text; +drop rule "_RETURN" on rules_fooview; +ERROR: cannot drop rule _RETURN on view rules_fooview because view rules_fooview requires it +HINT: You can drop view rules_fooview instead. +drop view rules_fooview; +-- +-- We used to allow converting a table to a view by creating a "_RETURN" +-- rule for it, but no more. +-- +create table rules_fooview (x int, y text); +create rule "_RETURN" as on select to rules_fooview do instead + select 1 as x, 'aaa'::text as y; +ERROR: relation "rules_fooview" cannot have ON SELECT rules +DETAIL: This operation is not supported for tables. +drop table rules_fooview; +-- likewise, converting a partitioned table or partition to view is not allowed +create table rules_fooview (x int, y text) partition by list (x); +create rule "_RETURN" as on select to rules_fooview do instead + select 1 as x, 'aaa'::text as y; +ERROR: relation "rules_fooview" cannot have ON SELECT rules +DETAIL: This operation is not supported for partitioned tables. +create table rules_fooview_part partition of rules_fooview for values in (1); +create rule "_RETURN" as on select to rules_fooview_part do instead + select 1 as x, 'aaa'::text as y; +ERROR: relation "rules_fooview_part" cannot have ON SELECT rules +DETAIL: This operation is not supported for tables. +drop table rules_fooview; +-- +-- check for planner problems with complex inherited UPDATES +-- +create table id (id serial primary key, name text); +-- currently, must respecify PKEY for each inherited subtable +create table test_1 (id integer primary key) inherits (id); +NOTICE: merging column "id" with inherited definition +create table test_2 (id integer primary key) inherits (id); +NOTICE: merging column "id" with inherited definition +create table test_3 (id integer primary key) inherits (id); +NOTICE: merging column "id" with inherited definition +insert into test_1 (name) values ('Test 1'); +insert into test_1 (name) values ('Test 2'); +insert into test_2 (name) values ('Test 3'); +insert into test_2 (name) values ('Test 4'); +insert into test_3 (name) values ('Test 5'); +insert into test_3 (name) values ('Test 6'); +create view id_ordered as select * from id order by id; +create rule update_id_ordered as on update to id_ordered + do instead update id set name = new.name where id = old.id; +select * from id_ordered; + id | name +----+-------- + 1 | Test 1 + 2 | Test 2 + 3 | Test 3 + 4 | Test 4 + 5 | Test 5 + 6 | Test 6 +(6 rows) + +update id_ordered set name = 'update 2' where id = 2; +update id_ordered set name = 'update 4' where id = 4; +update id_ordered set name = 'update 5' where id = 5; +select * from id_ordered; + id | name +----+---------- + 1 | Test 1 + 2 | update 2 + 3 | Test 3 + 4 | update 4 + 5 | update 5 + 6 | Test 6 +(6 rows) + +drop table id cascade; +NOTICE: drop cascades to 4 other objects +DETAIL: drop cascades to table test_1 +drop cascades to table test_2 +drop cascades to table test_3 +drop cascades to view id_ordered +-- +-- check corner case where an entirely-dummy subplan is created by +-- constraint exclusion +-- +create temp table t1 (a integer primary key); +create temp table t1_1 (check (a >= 0 and a < 10)) inherits (t1); +create temp table t1_2 (check (a >= 10 and a < 20)) inherits (t1); +create rule t1_ins_1 as on insert to t1 + where new.a >= 0 and new.a < 10 + do instead + insert into t1_1 values (new.a); +create rule t1_ins_2 as on insert to t1 + where new.a >= 10 and new.a < 20 + do instead + insert into t1_2 values (new.a); +create rule t1_upd_1 as on update to t1 + where old.a >= 0 and old.a < 10 + do instead + update t1_1 set a = new.a where a = old.a; +create rule t1_upd_2 as on update to t1 + where old.a >= 10 and old.a < 20 + do instead + update t1_2 set a = new.a where a = old.a; +set constraint_exclusion = on; +insert into t1 select * from generate_series(5,19,1) g; +update t1 set a = 4 where a = 5; +select * from only t1; + a +--- +(0 rows) + +select * from only t1_1; + a +--- + 6 + 7 + 8 + 9 + 4 +(5 rows) + +select * from only t1_2; + a +---- + 10 + 11 + 12 + 13 + 14 + 15 + 16 + 17 + 18 + 19 +(10 rows) + +reset constraint_exclusion; +-- test FOR UPDATE in rules +create table rules_base(f1 int, f2 int); +insert into rules_base values(1,2), (11,12); +create rule r1 as on update to rules_base do instead + select * from rules_base where f1 = 1 for update; +update rules_base set f2 = f2 + 1; + f1 | f2 +----+---- + 1 | 2 +(1 row) + +create or replace rule r1 as on update to rules_base do instead + select * from rules_base where f1 = 11 for update of rules_base; +update rules_base set f2 = f2 + 1; + f1 | f2 +----+---- + 11 | 12 +(1 row) + +create or replace rule r1 as on update to rules_base do instead + select * from rules_base where f1 = 11 for update of old; -- error +ERROR: relation "old" in FOR UPDATE clause not found in FROM clause +LINE 2: select * from rules_base where f1 = 11 for update of old; + ^ +drop table rules_base; +-- test various flavors of pg_get_viewdef() +select pg_get_viewdef('shoe'::regclass) as unpretty; + unpretty +------------------------------------------------ + SELECT sh.shoename, + + sh.sh_avail, + + sh.slcolor, + + sh.slminlen, + + (sh.slminlen * un.un_fact) AS slminlen_cm,+ + sh.slmaxlen, + + (sh.slmaxlen * un.un_fact) AS slmaxlen_cm,+ + sh.slunit + + FROM shoe_data sh, + + unit un + + WHERE (sh.slunit = un.un_name); +(1 row) + +select pg_get_viewdef('shoe'::regclass,true) as pretty; + pretty +---------------------------------------------- + SELECT sh.shoename, + + sh.sh_avail, + + sh.slcolor, + + sh.slminlen, + + sh.slminlen * un.un_fact AS slminlen_cm,+ + sh.slmaxlen, + + sh.slmaxlen * un.un_fact AS slmaxlen_cm,+ + sh.slunit + + FROM shoe_data sh, + + unit un + + WHERE sh.slunit = un.un_name; +(1 row) + +select pg_get_viewdef('shoe'::regclass,0) as prettier; + prettier +---------------------------------------------- + SELECT sh.shoename, + + sh.sh_avail, + + sh.slcolor, + + sh.slminlen, + + sh.slminlen * un.un_fact AS slminlen_cm,+ + sh.slmaxlen, + + sh.slmaxlen * un.un_fact AS slmaxlen_cm,+ + sh.slunit + + FROM shoe_data sh, + + unit un + + WHERE sh.slunit = un.un_name; +(1 row) + +-- +-- check multi-row VALUES in rules +-- +create table rules_src(f1 int, f2 int default 0); +create table rules_log(f1 int, f2 int, tag text, id serial); +insert into rules_src values(1,2), (11,12); +create rule r1 as on update to rules_src do also + insert into rules_log values(old.*, 'old', default), (new.*, 'new', default); +update rules_src set f2 = f2 + 1; +update rules_src set f2 = f2 * 10; +select * from rules_src; + f1 | f2 +----+----- + 1 | 30 + 11 | 130 +(2 rows) + +select * from rules_log; + f1 | f2 | tag | id +----+-----+-----+---- + 1 | 2 | old | 1 + 1 | 3 | new | 2 + 11 | 12 | old | 3 + 11 | 13 | new | 4 + 1 | 3 | old | 5 + 1 | 30 | new | 6 + 11 | 13 | old | 7 + 11 | 130 | new | 8 +(8 rows) + +create rule r2 as on update to rules_src do also + values(old.*, 'old'), (new.*, 'new'); +update rules_src set f2 = f2 / 10; + column1 | column2 | column3 +---------+---------+--------- + 1 | 30 | old + 1 | 3 | new + 11 | 130 | old + 11 | 13 | new +(4 rows) + +create rule r3 as on insert to rules_src do also + insert into rules_log values(null, null, '-', default), (new.*, 'new', default); +insert into rules_src values(22,23), (33,default); +select * from rules_src; + f1 | f2 +----+---- + 1 | 3 + 11 | 13 + 22 | 23 + 33 | 0 +(4 rows) + +select * from rules_log; + f1 | f2 | tag | id +----+-----+-----+---- + 1 | 2 | old | 1 + 1 | 3 | new | 2 + 11 | 12 | old | 3 + 11 | 13 | new | 4 + 1 | 3 | old | 5 + 1 | 30 | new | 6 + 11 | 13 | old | 7 + 11 | 130 | new | 8 + 1 | 30 | old | 9 + 1 | 3 | new | 10 + 11 | 130 | old | 11 + 11 | 13 | new | 12 + | | - | 13 + 22 | 23 | new | 14 + | | - | 15 + 33 | 0 | new | 16 +(16 rows) + +create rule r4 as on delete to rules_src do notify rules_src_deletion; +-- +-- Ensure an aliased target relation for insert is correctly deparsed. +-- +create rule r5 as on insert to rules_src do instead insert into rules_log AS trgt SELECT NEW.* RETURNING trgt.f1, trgt.f2; +create rule r6 as on update to rules_src do instead UPDATE rules_log AS trgt SET tag = 'updated' WHERE trgt.f1 = new.f1; +-- +-- Check deparse disambiguation of INSERT/UPDATE/DELETE targets. +-- +create rule r7 as on delete to rules_src do instead + with wins as (insert into int4_tbl as trgt values (0) returning *), + wupd as (update int4_tbl trgt set f1 = f1+1 returning *), + wdel as (delete from int4_tbl trgt where f1 = 0 returning *) + insert into rules_log AS trgt select old.* from wins, wupd, wdel + returning trgt.f1, trgt.f2; +-- check display of all rules added above +\d+ rules_src + Table "public.rules_src" + Column | Type | Collation | Nullable | Default | Storage | Stats target | Description +--------+---------+-----------+----------+---------+---------+--------------+------------- + f1 | integer | | | | plain | | + f2 | integer | | | 0 | plain | | +Rules: + r1 AS + ON UPDATE TO rules_src DO INSERT INTO rules_log (f1, f2, tag, id) VALUES (old.f1,old.f2,'old'::text,DEFAULT), (new.f1,new.f2,'new'::text,DEFAULT) + r2 AS + ON UPDATE TO rules_src DO VALUES (old.f1,old.f2,'old'::text), (new.f1,new.f2,'new'::text) + r3 AS + ON INSERT TO rules_src DO INSERT INTO rules_log (f1, f2, tag, id) VALUES (NULL::integer,NULL::integer,'-'::text,DEFAULT), (new.f1,new.f2,'new'::text,DEFAULT) + r4 AS + ON DELETE TO rules_src DO + NOTIFY rules_src_deletion + r5 AS + ON INSERT TO rules_src DO INSTEAD INSERT INTO rules_log AS trgt (f1, f2) SELECT new.f1, + new.f2 + RETURNING trgt.f1, + trgt.f2 + r6 AS + ON UPDATE TO rules_src DO INSTEAD UPDATE rules_log trgt SET tag = 'updated'::text + WHERE trgt.f1 = new.f1 + r7 AS + ON DELETE TO rules_src DO INSTEAD WITH wins AS ( + INSERT INTO int4_tbl AS trgt_1 (f1) + VALUES (0) + RETURNING trgt_1.f1 + ), wupd AS ( + UPDATE int4_tbl trgt_1 SET f1 = trgt_1.f1 + 1 + RETURNING trgt_1.f1 + ), wdel AS ( + DELETE FROM int4_tbl trgt_1 + WHERE trgt_1.f1 = 0 + RETURNING trgt_1.f1 + ) + INSERT INTO rules_log AS trgt (f1, f2) SELECT old.f1, + old.f2 + FROM wins, + wupd, + wdel + RETURNING trgt.f1, + trgt.f2 + +-- +-- Also check multiassignment deparsing. +-- +create table rule_t1(f1 int, f2 int); +create table rule_dest(f1 int, f2 int[], tag text); +create rule rr as on update to rule_t1 do instead UPDATE rule_dest trgt + SET (f2[1], f1, tag) = (SELECT new.f2, new.f1, 'updated'::varchar) + WHERE trgt.f1 = new.f1 RETURNING new.*; +\d+ rule_t1 + Table "public.rule_t1" + Column | Type | Collation | Nullable | Default | Storage | Stats target | Description +--------+---------+-----------+----------+---------+---------+--------------+------------- + f1 | integer | | | | plain | | + f2 | integer | | | | plain | | +Rules: + rr AS + ON UPDATE TO rule_t1 DO INSTEAD UPDATE rule_dest trgt SET (f2[1], f1, tag) = ( SELECT new.f2, + new.f1, + 'updated'::character varying AS "varchar") + WHERE trgt.f1 = new.f1 + RETURNING new.f1, + new.f2 + +drop table rule_t1, rule_dest; +-- +-- Test implicit LATERAL references to old/new in rules +-- +CREATE TABLE rule_t1(a int, b text DEFAULT 'xxx', c int); +CREATE VIEW rule_v1 AS SELECT * FROM rule_t1; +CREATE RULE v1_ins AS ON INSERT TO rule_v1 + DO ALSO INSERT INTO rule_t1 + SELECT * FROM (SELECT a + 10 FROM rule_t1 WHERE a = NEW.a) tt; +CREATE RULE v1_upd AS ON UPDATE TO rule_v1 + DO ALSO UPDATE rule_t1 t + SET c = tt.a * 10 + FROM (SELECT a FROM rule_t1 WHERE a = OLD.a) tt WHERE t.a = tt.a; +INSERT INTO rule_v1 VALUES (1, 'a'), (2, 'b'); +UPDATE rule_v1 SET b = upper(b); +SELECT * FROM rule_t1; + a | b | c +----+-----+----- + 1 | A | 10 + 2 | B | 20 + 11 | XXX | 110 + 12 | XXX | 120 +(4 rows) + +DROP TABLE rule_t1 CASCADE; +NOTICE: drop cascades to view rule_v1 +-- +-- check alter rename rule +-- +CREATE TABLE rule_t1 (a INT); +CREATE VIEW rule_v1 AS SELECT * FROM rule_t1; +CREATE RULE InsertRule AS + ON INSERT TO rule_v1 + DO INSTEAD + INSERT INTO rule_t1 VALUES(new.a); +ALTER RULE InsertRule ON rule_v1 RENAME to NewInsertRule; +INSERT INTO rule_v1 VALUES(1); +SELECT * FROM rule_v1; + a +--- + 1 +(1 row) + +\d+ rule_v1 + View "public.rule_v1" + Column | Type | Collation | Nullable | Default | Storage | Description +--------+---------+-----------+----------+---------+---------+------------- + a | integer | | | | plain | +View definition: + SELECT a + FROM rule_t1; +Rules: + newinsertrule AS + ON INSERT TO rule_v1 DO INSTEAD INSERT INTO rule_t1 (a) + VALUES (new.a) + +-- +-- error conditions for alter rename rule +-- +ALTER RULE InsertRule ON rule_v1 RENAME TO NewInsertRule; -- doesn't exist +ERROR: rule "insertrule" for relation "rule_v1" does not exist +ALTER RULE NewInsertRule ON rule_v1 RENAME TO "_RETURN"; -- already exists +ERROR: rule "_RETURN" for relation "rule_v1" already exists +ALTER RULE "_RETURN" ON rule_v1 RENAME TO abc; -- ON SELECT rule cannot be renamed +ERROR: renaming an ON SELECT rule is not allowed +DROP VIEW rule_v1; +DROP TABLE rule_t1; +-- +-- check display of VALUES in view definitions +-- +create view rule_v1 as values(1,2); +\d+ rule_v1 + View "public.rule_v1" + Column | Type | Collation | Nullable | Default | Storage | Description +---------+---------+-----------+----------+---------+---------+------------- + column1 | integer | | | | plain | + column2 | integer | | | | plain | +View definition: + VALUES (1,2); + +alter table rule_v1 rename column column2 to q2; +\d+ rule_v1 + View "public.rule_v1" + Column | Type | Collation | Nullable | Default | Storage | Description +---------+---------+-----------+----------+---------+---------+------------- + column1 | integer | | | | plain | + q2 | integer | | | | plain | +View definition: + SELECT column1, + column2 AS q2 + FROM (VALUES (1,2)) "*VALUES*"; + +drop view rule_v1; +create view rule_v1(x) as values(1,2); +\d+ rule_v1 + View "public.rule_v1" + Column | Type | Collation | Nullable | Default | Storage | Description +---------+---------+-----------+----------+---------+---------+------------- + x | integer | | | | plain | + column2 | integer | | | | plain | +View definition: + SELECT column1 AS x, + column2 + FROM (VALUES (1,2)) "*VALUES*"; + +drop view rule_v1; +create view rule_v1(x) as select * from (values(1,2)) v; +\d+ rule_v1 + View "public.rule_v1" + Column | Type | Collation | Nullable | Default | Storage | Description +---------+---------+-----------+----------+---------+---------+------------- + x | integer | | | | plain | + column2 | integer | | | | plain | +View definition: + SELECT column1 AS x, + column2 + FROM ( VALUES (1,2)) v; + +drop view rule_v1; +create view rule_v1(x) as select * from (values(1,2)) v(q,w); +\d+ rule_v1 + View "public.rule_v1" + Column | Type | Collation | Nullable | Default | Storage | Description +--------+---------+-----------+----------+---------+---------+------------- + x | integer | | | | plain | + w | integer | | | | plain | +View definition: + SELECT q AS x, + w + FROM ( VALUES (1,2)) v(q, w); + +drop view rule_v1; +-- +-- Check DO INSTEAD rules with ON CONFLICT +-- +CREATE TABLE hats ( + hat_name char(10) primary key, + hat_color char(10) -- hat color +); +CREATE TABLE hat_data ( + hat_name char(10), + hat_color char(10) -- hat color +); +create unique index hat_data_unique_idx + on hat_data (hat_name COLLATE "C" bpchar_pattern_ops); +-- DO NOTHING with ON CONFLICT +CREATE RULE hat_nosert AS ON INSERT TO hats + DO INSTEAD + INSERT INTO hat_data VALUES ( + NEW.hat_name, + NEW.hat_color) + ON CONFLICT (hat_name COLLATE "C" bpchar_pattern_ops) WHERE hat_color = 'green' + DO NOTHING + RETURNING *; +SELECT definition FROM pg_rules WHERE tablename = 'hats' ORDER BY rulename; + definition +--------------------------------------------------------------------------------------------- + CREATE RULE hat_nosert AS + + ON INSERT TO public.hats DO INSTEAD INSERT INTO hat_data (hat_name, hat_color) + + VALUES (new.hat_name, new.hat_color) ON CONFLICT(hat_name COLLATE "C" bpchar_pattern_ops)+ + WHERE (hat_color = 'green'::bpchar) DO NOTHING + + RETURNING hat_data.hat_name, + + hat_data.hat_color; +(1 row) + +-- Works (projects row) +INSERT INTO hats VALUES ('h7', 'black') RETURNING *; + hat_name | hat_color +------------+------------ + h7 | black +(1 row) + +-- Works (does nothing) +INSERT INTO hats VALUES ('h7', 'black') RETURNING *; + hat_name | hat_color +----------+----------- +(0 rows) + +SELECT tablename, rulename, definition FROM pg_rules + WHERE tablename = 'hats'; + tablename | rulename | definition +-----------+------------+--------------------------------------------------------------------------------------------- + hats | hat_nosert | CREATE RULE hat_nosert AS + + | | ON INSERT TO public.hats DO INSTEAD INSERT INTO hat_data (hat_name, hat_color) + + | | VALUES (new.hat_name, new.hat_color) ON CONFLICT(hat_name COLLATE "C" bpchar_pattern_ops)+ + | | WHERE (hat_color = 'green'::bpchar) DO NOTHING + + | | RETURNING hat_data.hat_name, + + | | hat_data.hat_color; +(1 row) + +DROP RULE hat_nosert ON hats; +-- DO NOTHING without ON CONFLICT +CREATE RULE hat_nosert_all AS ON INSERT TO hats + DO INSTEAD + INSERT INTO hat_data VALUES ( + NEW.hat_name, + NEW.hat_color) + ON CONFLICT + DO NOTHING + RETURNING *; +SELECT definition FROM pg_rules WHERE tablename = 'hats' ORDER BY rulename; + definition +------------------------------------------------------------------------------------- + CREATE RULE hat_nosert_all AS + + ON INSERT TO public.hats DO INSTEAD INSERT INTO hat_data (hat_name, hat_color)+ + VALUES (new.hat_name, new.hat_color) ON CONFLICT DO NOTHING + + RETURNING hat_data.hat_name, + + hat_data.hat_color; +(1 row) + +DROP RULE hat_nosert_all ON hats; +-- Works (does nothing) +INSERT INTO hats VALUES ('h7', 'black') RETURNING *; + hat_name | hat_color +------------+------------ + h7 | black +(1 row) + +-- DO UPDATE with a WHERE clause +CREATE RULE hat_upsert AS ON INSERT TO hats + DO INSTEAD + INSERT INTO hat_data VALUES ( + NEW.hat_name, + NEW.hat_color) + ON CONFLICT (hat_name) + DO UPDATE + SET hat_name = hat_data.hat_name, hat_color = excluded.hat_color + WHERE excluded.hat_color <> 'forbidden' AND hat_data.* != excluded.* + RETURNING *; +SELECT definition FROM pg_rules WHERE tablename = 'hats' ORDER BY rulename; + definition +----------------------------------------------------------------------------------------------------------------------------------------- + CREATE RULE hat_upsert AS + + ON INSERT TO public.hats DO INSTEAD INSERT INTO hat_data (hat_name, hat_color) + + VALUES (new.hat_name, new.hat_color) ON CONFLICT(hat_name) DO UPDATE SET hat_name = hat_data.hat_name, hat_color = excluded.hat_color+ + WHERE ((excluded.hat_color <> 'forbidden'::bpchar) AND (hat_data.* <> excluded.*)) + + RETURNING hat_data.hat_name, + + hat_data.hat_color; +(1 row) + +-- Works (does upsert) +INSERT INTO hats VALUES ('h8', 'black') RETURNING *; + hat_name | hat_color +------------+------------ + h8 | black +(1 row) + +SELECT * FROM hat_data WHERE hat_name = 'h8'; + hat_name | hat_color +------------+------------ + h8 | black +(1 row) + +INSERT INTO hats VALUES ('h8', 'white') RETURNING *; + hat_name | hat_color +------------+------------ + h8 | white +(1 row) + +SELECT * FROM hat_data WHERE hat_name = 'h8'; + hat_name | hat_color +------------+------------ + h8 | white +(1 row) + +INSERT INTO hats VALUES ('h8', 'forbidden') RETURNING *; + hat_name | hat_color +----------+----------- +(0 rows) + +SELECT * FROM hat_data WHERE hat_name = 'h8'; + hat_name | hat_color +------------+------------ + h8 | white +(1 row) + +SELECT tablename, rulename, definition FROM pg_rules + WHERE tablename = 'hats'; + tablename | rulename | definition +-----------+------------+----------------------------------------------------------------------------------------------------------------------------------------- + hats | hat_upsert | CREATE RULE hat_upsert AS + + | | ON INSERT TO public.hats DO INSTEAD INSERT INTO hat_data (hat_name, hat_color) + + | | VALUES (new.hat_name, new.hat_color) ON CONFLICT(hat_name) DO UPDATE SET hat_name = hat_data.hat_name, hat_color = excluded.hat_color+ + | | WHERE ((excluded.hat_color <> 'forbidden'::bpchar) AND (hat_data.* <> excluded.*)) + + | | RETURNING hat_data.hat_name, + + | | hat_data.hat_color; +(1 row) + +-- ensure explain works for on insert conflict rules +explain (costs off) INSERT INTO hats VALUES ('h8', 'forbidden') RETURNING *; + QUERY PLAN +------------------------------------------------------------------------------------------------- + Insert on hat_data + Conflict Resolution: UPDATE + Conflict Arbiter Indexes: hat_data_unique_idx + Conflict Filter: ((excluded.hat_color <> 'forbidden'::bpchar) AND (hat_data.* <> excluded.*)) + -> Result +(5 rows) + +-- ensure upserting into a rule, with a CTE (different offsets!) works +WITH data(hat_name, hat_color) AS MATERIALIZED ( + VALUES ('h8', 'green'), + ('h9', 'blue'), + ('h7', 'forbidden') +) +INSERT INTO hats + SELECT * FROM data +RETURNING *; + hat_name | hat_color +------------+------------ + h8 | green + h9 | blue +(2 rows) + +EXPLAIN (costs off) +WITH data(hat_name, hat_color) AS MATERIALIZED ( + VALUES ('h8', 'green'), + ('h9', 'blue'), + ('h7', 'forbidden') +) +INSERT INTO hats + SELECT * FROM data +RETURNING *; + QUERY PLAN +------------------------------------------------------------------------------------------------- + Insert on hat_data + Conflict Resolution: UPDATE + Conflict Arbiter Indexes: hat_data_unique_idx + Conflict Filter: ((excluded.hat_color <> 'forbidden'::bpchar) AND (hat_data.* <> excluded.*)) + CTE data + -> Values Scan on "*VALUES*" + -> CTE Scan on data +(7 rows) + +SELECT * FROM hat_data WHERE hat_name IN ('h8', 'h9', 'h7') ORDER BY hat_name; + hat_name | hat_color +------------+------------ + h7 | black + h8 | green + h9 | blue +(3 rows) + +DROP RULE hat_upsert ON hats; +drop table hats; +drop table hat_data; +-- test for pg_get_functiondef properly regurgitating SET parameters +-- Note that the function is kept around to stress pg_dump. +CREATE FUNCTION func_with_set_params() RETURNS integer + AS 'select 1;' + LANGUAGE SQL + SET search_path TO PG_CATALOG + SET extra_float_digits TO 2 + SET work_mem TO '4MB' + SET datestyle to iso, mdy + SET local_preload_libraries TO "Mixed/Case", 'c:/''a"/path', '', '0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789' + IMMUTABLE STRICT; +SELECT pg_get_functiondef('func_with_set_params()'::regprocedure); + pg_get_functiondef +-------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + CREATE OR REPLACE FUNCTION public.func_with_set_params() + + RETURNS integer + + LANGUAGE sql + + IMMUTABLE STRICT + + SET search_path TO 'pg_catalog' + + SET extra_float_digits TO '2' + + SET work_mem TO '4MB' + + SET "DateStyle" TO 'iso, mdy' + + SET local_preload_libraries TO 'Mixed/Case', 'c:/''a"/path', '', '0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789'+ + AS $function$select 1;$function$ + + +(1 row) + +-- tests for pg_get_*def with invalid objects +SELECT pg_get_constraintdef(0); + pg_get_constraintdef +---------------------- + +(1 row) + +SELECT pg_get_functiondef(0); + pg_get_functiondef +-------------------- + +(1 row) + +SELECT pg_get_indexdef(0); + pg_get_indexdef +----------------- + +(1 row) + +SELECT pg_get_ruledef(0); + pg_get_ruledef +---------------- + +(1 row) + +SELECT pg_get_statisticsobjdef(0); + pg_get_statisticsobjdef +------------------------- + +(1 row) + +SELECT pg_get_triggerdef(0); + pg_get_triggerdef +------------------- + +(1 row) + +SELECT pg_get_viewdef(0); + pg_get_viewdef +---------------- + +(1 row) + +SELECT pg_get_function_arguments(0); + pg_get_function_arguments +--------------------------- + +(1 row) + +SELECT pg_get_function_identity_arguments(0); + pg_get_function_identity_arguments +------------------------------------ + +(1 row) + +SELECT pg_get_function_result(0); + pg_get_function_result +------------------------ + +(1 row) + +SELECT pg_get_function_arg_default(0, 0); + pg_get_function_arg_default +----------------------------- + +(1 row) + +SELECT pg_get_function_arg_default('pg_class'::regclass, 0); + pg_get_function_arg_default +----------------------------- + +(1 row) + +SELECT pg_get_partkeydef(0); + pg_get_partkeydef +------------------- + +(1 row) + +-- test rename for a rule defined on a partitioned table +CREATE TABLE rules_parted_table (a int) PARTITION BY LIST (a); +CREATE TABLE rules_parted_table_1 PARTITION OF rules_parted_table FOR VALUES IN (1); +CREATE RULE rules_parted_table_insert AS ON INSERT to rules_parted_table + DO INSTEAD INSERT INTO rules_parted_table_1 VALUES (NEW.*); +ALTER RULE rules_parted_table_insert ON rules_parted_table RENAME TO rules_parted_table_insert_redirect; +DROP TABLE rules_parted_table; +-- +-- test MERGE +-- +CREATE TABLE rule_merge1 (a int, b text); +CREATE TABLE rule_merge2 (a int, b text); +CREATE RULE rule1 AS ON INSERT TO rule_merge1 + DO INSTEAD INSERT INTO rule_merge2 VALUES (NEW.*); +CREATE RULE rule2 AS ON UPDATE TO rule_merge1 + DO INSTEAD UPDATE rule_merge2 SET a = NEW.a, b = NEW.b + WHERE a = OLD.a; +CREATE RULE rule3 AS ON DELETE TO rule_merge1 + DO INSTEAD DELETE FROM rule_merge2 WHERE a = OLD.a; +-- MERGE not supported for table with rules +MERGE INTO rule_merge1 t USING (SELECT 1 AS a) s + ON t.a = s.a + WHEN MATCHED AND t.a < 2 THEN + UPDATE SET b = b || ' updated by merge' + WHEN MATCHED AND t.a > 2 THEN + DELETE + WHEN NOT MATCHED THEN + INSERT VALUES (s.a, ''); +ERROR: cannot execute MERGE on relation "rule_merge1" +DETAIL: MERGE is not supported for relations with rules. +-- should be ok with the other table though +MERGE INTO rule_merge2 t USING (SELECT 1 AS a) s + ON t.a = s.a + WHEN MATCHED AND t.a < 2 THEN + UPDATE SET b = b || ' updated by merge' + WHEN MATCHED AND t.a > 2 THEN + DELETE + WHEN NOT MATCHED THEN + INSERT VALUES (s.a, ''); +-- test deparsing +CREATE TABLE sf_target(id int, data text, filling int[]); +CREATE FUNCTION merge_sf_test() + RETURNS void + LANGUAGE sql +BEGIN ATOMIC + MERGE INTO sf_target t + USING rule_merge1 s + ON (s.a = t.id) +WHEN MATCHED + AND (s.a + t.id) = 42 + THEN UPDATE SET data = repeat(t.data, s.a) || s.b, id = length(s.b) +WHEN NOT MATCHED + AND (s.b IS NOT NULL) + THEN INSERT (data, id) + VALUES (s.b, s.a) +WHEN MATCHED + AND length(s.b || t.data) > 10 + THEN UPDATE SET data = s.b +WHEN MATCHED + AND s.a > 200 + THEN UPDATE SET filling[s.a] = t.id +WHEN MATCHED + AND s.a > 100 + THEN DELETE +WHEN MATCHED + THEN DO NOTHING +WHEN NOT MATCHED + AND s.a > 200 + THEN INSERT DEFAULT VALUES +WHEN NOT MATCHED + AND s.a > 100 + THEN INSERT (id, data) OVERRIDING USER VALUE + VALUES (s.a, DEFAULT) +WHEN NOT MATCHED + AND s.a > 0 + THEN INSERT + VALUES (s.a, s.b, DEFAULT) +WHEN NOT MATCHED + THEN INSERT (filling[1], id) + VALUES (s.a, s.a); +END; +\sf merge_sf_test +CREATE OR REPLACE FUNCTION public.merge_sf_test() + RETURNS void + LANGUAGE sql +BEGIN ATOMIC + MERGE INTO sf_target t + USING rule_merge1 s + ON (s.a = t.id) + WHEN MATCHED + AND ((s.a + t.id) = 42) + THEN UPDATE SET data = (repeat(t.data, s.a) || s.b), id = length(s.b) + WHEN NOT MATCHED + AND (s.b IS NOT NULL) + THEN INSERT (data, id) + VALUES (s.b, s.a) + WHEN MATCHED + AND (length((s.b || t.data)) > 10) + THEN UPDATE SET data = s.b + WHEN MATCHED + AND (s.a > 200) + THEN UPDATE SET filling[s.a] = t.id + WHEN MATCHED + AND (s.a > 100) + THEN DELETE + WHEN MATCHED + THEN DO NOTHING + WHEN NOT MATCHED + AND (s.a > 200) + THEN INSERT DEFAULT VALUES + WHEN NOT MATCHED + AND (s.a > 100) + THEN INSERT (id, data) OVERRIDING USER VALUE + VALUES (s.a, DEFAULT) + WHEN NOT MATCHED + AND (s.a > 0) + THEN INSERT (id, data, filling) + VALUES (s.a, s.b, DEFAULT) + WHEN NOT MATCHED + THEN INSERT (filling[1], id) + VALUES (s.a, s.a); +END +DROP FUNCTION merge_sf_test; +DROP TABLE sf_target; +-- +-- Test enabling/disabling +-- +CREATE TABLE ruletest1 (a int); +CREATE TABLE ruletest2 (b int); +CREATE RULE rule1 AS ON INSERT TO ruletest1 + DO INSTEAD INSERT INTO ruletest2 VALUES (NEW.*); +INSERT INTO ruletest1 VALUES (1); +ALTER TABLE ruletest1 DISABLE RULE rule1; +INSERT INTO ruletest1 VALUES (2); +ALTER TABLE ruletest1 ENABLE RULE rule1; +SET session_replication_role = replica; +INSERT INTO ruletest1 VALUES (3); +ALTER TABLE ruletest1 ENABLE REPLICA RULE rule1; +INSERT INTO ruletest1 VALUES (4); +RESET session_replication_role; +INSERT INTO ruletest1 VALUES (5); +SELECT * FROM ruletest1; + a +--- + 2 + 3 + 5 +(3 rows) + +SELECT * FROM ruletest2; + b +--- + 1 + 4 +(2 rows) + +DROP TABLE ruletest1; +DROP TABLE ruletest2; +-- +-- Test non-SELECT rule on security invoker view. +-- Should use view owner's permissions. +-- +CREATE USER regress_rule_user1; +CREATE TABLE ruletest_t1 (x int); +CREATE TABLE ruletest_t2 (x int); +CREATE VIEW ruletest_v1 WITH (security_invoker=true) AS + SELECT * FROM ruletest_t1; +GRANT INSERT ON ruletest_v1 TO regress_rule_user1; +CREATE RULE rule1 AS ON INSERT TO ruletest_v1 + DO INSTEAD INSERT INTO ruletest_t2 VALUES (NEW.*); +SET SESSION AUTHORIZATION regress_rule_user1; +INSERT INTO ruletest_v1 VALUES (1); +RESET SESSION AUTHORIZATION; +-- Test that main query's relation's permissions are checked before +-- the rule action's relation's. +CREATE TABLE ruletest_t3 (x int); +CREATE RULE rule2 AS ON UPDATE TO ruletest_t1 + DO INSTEAD INSERT INTO ruletest_t2 VALUES (OLD.*); +REVOKE ALL ON ruletest_t2 FROM regress_rule_user1; +REVOKE ALL ON ruletest_t3 FROM regress_rule_user1; +ALTER TABLE ruletest_t1 OWNER TO regress_rule_user1; +SET SESSION AUTHORIZATION regress_rule_user1; +UPDATE ruletest_t1 t1 SET x = 0 FROM ruletest_t3 t3 WHERE t1.x = t3.x; +ERROR: permission denied for table ruletest_t3 +RESET SESSION AUTHORIZATION; +SELECT * FROM ruletest_t1; + x +--- +(0 rows) + +SELECT * FROM ruletest_t2; + x +--- + 1 +(1 row) + +DROP VIEW ruletest_v1; +DROP RULE rule2 ON ruletest_t1; +DROP TABLE ruletest_t3; +DROP TABLE ruletest_t2; +DROP TABLE ruletest_t1; +DROP USER regress_rule_user1; diff --git a/src/test/regress/expected/sanity_check.out b/src/test/regress/expected/sanity_check.out new file mode 100644 index 0000000..c5c675b --- /dev/null +++ b/src/test/regress/expected/sanity_check.out @@ -0,0 +1,56 @@ +VACUUM; +-- +-- Sanity check: every system catalog that has OIDs should have +-- a unique index on OID. This ensures that the OIDs will be unique, +-- even after the OID counter wraps around. +-- We exclude non-system tables from the check by looking at nspname. +-- +SELECT relname, nspname + FROM pg_class c LEFT JOIN pg_namespace n ON n.oid = relnamespace JOIN pg_attribute a ON (attrelid = c.oid AND attname = 'oid') + WHERE relkind = 'r' and c.oid < 16384 + AND ((nspname ~ '^pg_') IS NOT FALSE) + AND NOT EXISTS (SELECT 1 FROM pg_index i WHERE indrelid = c.oid + AND indkey[0] = a.attnum AND indnatts = 1 + AND indisunique AND indimmediate); + relname | nspname +---------+--------- +(0 rows) + +-- check that relations without storage don't have relfilenode +SELECT relname, relkind + FROM pg_class + WHERE relkind IN ('v', 'c', 'f', 'p', 'I') + AND relfilenode <> 0; + relname | relkind +---------+--------- +(0 rows) + +-- +-- When ALIGNOF_DOUBLE==4 (e.g. AIX), the C ABI may impose 8-byte alignment on +-- some of the C types that correspond to TYPALIGN_DOUBLE SQL types. To ensure +-- catalog C struct layout matches catalog tuple layout, arrange for the tuple +-- offset of each fixed-width, attalign='d' catalog column to be divisible by 8 +-- unconditionally. Keep such columns before the first NameData column of the +-- catalog, since packagers can override NAMEDATALEN to an odd number. +-- +WITH check_columns AS ( + SELECT relname, attname, + array( + SELECT t.oid + FROM pg_type t JOIN pg_attribute pa ON t.oid = pa.atttypid + WHERE pa.attrelid = a.attrelid AND + pa.attnum > 0 AND pa.attnum < a.attnum + ORDER BY pa.attnum) AS coltypes + FROM pg_attribute a JOIN pg_class c ON c.oid = attrelid + JOIN pg_namespace n ON c.relnamespace = n.oid + WHERE attalign = 'd' AND relkind = 'r' AND + attnotnull AND attlen <> -1 AND n.nspname = 'pg_catalog' +) +SELECT relname, attname, coltypes, get_columns_length(coltypes) + FROM check_columns + WHERE get_columns_length(coltypes) % 8 != 0 OR + 'name'::regtype::oid = ANY(coltypes); + relname | attname | coltypes | get_columns_length +---------+---------+----------+-------------------- +(0 rows) + diff --git a/src/test/regress/expected/security_label.out b/src/test/regress/expected/security_label.out new file mode 100644 index 0000000..a8e01a6 --- /dev/null +++ b/src/test/regress/expected/security_label.out @@ -0,0 +1,44 @@ +-- +-- Test for facilities of security label +-- +-- initial setups +SET client_min_messages TO 'warning'; +DROP ROLE IF EXISTS regress_seclabel_user1; +DROP ROLE IF EXISTS regress_seclabel_user2; +RESET client_min_messages; +CREATE USER regress_seclabel_user1 WITH CREATEROLE; +CREATE USER regress_seclabel_user2; +CREATE TABLE seclabel_tbl1 (a int, b text); +CREATE TABLE seclabel_tbl2 (x int, y text); +CREATE VIEW seclabel_view1 AS SELECT * FROM seclabel_tbl2; +CREATE FUNCTION seclabel_four() RETURNS integer AS $$SELECT 4$$ language sql; +CREATE DOMAIN seclabel_domain AS text; +ALTER TABLE seclabel_tbl1 OWNER TO regress_seclabel_user1; +ALTER TABLE seclabel_tbl2 OWNER TO regress_seclabel_user2; +-- +-- Test of SECURITY LABEL statement without a plugin +-- +SECURITY LABEL ON TABLE seclabel_tbl1 IS 'classified'; -- fail +ERROR: no security label providers have been loaded +SECURITY LABEL FOR 'dummy' ON TABLE seclabel_tbl1 IS 'classified'; -- fail +ERROR: security label provider "dummy" is not loaded +SECURITY LABEL ON TABLE seclabel_tbl1 IS '...invalid label...'; -- fail +ERROR: no security label providers have been loaded +SECURITY LABEL ON TABLE seclabel_tbl3 IS 'unclassified'; -- fail +ERROR: no security label providers have been loaded +SECURITY LABEL ON ROLE regress_seclabel_user1 IS 'classified'; -- fail +ERROR: no security label providers have been loaded +SECURITY LABEL FOR 'dummy' ON ROLE regress_seclabel_user1 IS 'classified'; -- fail +ERROR: security label provider "dummy" is not loaded +SECURITY LABEL ON ROLE regress_seclabel_user1 IS '...invalid label...'; -- fail +ERROR: no security label providers have been loaded +SECURITY LABEL ON ROLE regress_seclabel_user3 IS 'unclassified'; -- fail +ERROR: no security label providers have been loaded +-- clean up objects +DROP FUNCTION seclabel_four(); +DROP DOMAIN seclabel_domain; +DROP VIEW seclabel_view1; +DROP TABLE seclabel_tbl1; +DROP TABLE seclabel_tbl2; +DROP USER regress_seclabel_user1; +DROP USER regress_seclabel_user2; diff --git a/src/test/regress/expected/select.out b/src/test/regress/expected/select.out new file mode 100644 index 0000000..33a6dce --- /dev/null +++ b/src/test/regress/expected/select.out @@ -0,0 +1,970 @@ +-- +-- SELECT +-- +-- btree index +-- awk '{if($1<10){print;}else{next;}}' onek.data | sort +0n -1 +-- +SELECT * FROM onek + WHERE onek.unique1 < 10 + ORDER BY onek.unique1; + unique1 | unique2 | two | four | ten | twenty | hundred | thousand | twothousand | fivethous | tenthous | odd | even | stringu1 | stringu2 | string4 +---------+---------+-----+------+-----+--------+---------+----------+-------------+-----------+----------+-----+------+----------+----------+--------- + 0 | 998 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | AAAAAA | KMBAAA | OOOOxx + 1 | 214 | 1 | 1 | 1 | 1 | 1 | 1 | 1 | 1 | 1 | 2 | 3 | BAAAAA | GIAAAA | OOOOxx + 2 | 326 | 0 | 2 | 2 | 2 | 2 | 2 | 2 | 2 | 2 | 4 | 5 | CAAAAA | OMAAAA | OOOOxx + 3 | 431 | 1 | 3 | 3 | 3 | 3 | 3 | 3 | 3 | 3 | 6 | 7 | DAAAAA | PQAAAA | VVVVxx + 4 | 833 | 0 | 0 | 4 | 4 | 4 | 4 | 4 | 4 | 4 | 8 | 9 | EAAAAA | BGBAAA | HHHHxx + 5 | 541 | 1 | 1 | 5 | 5 | 5 | 5 | 5 | 5 | 5 | 10 | 11 | FAAAAA | VUAAAA | HHHHxx + 6 | 978 | 0 | 2 | 6 | 6 | 6 | 6 | 6 | 6 | 6 | 12 | 13 | GAAAAA | QLBAAA | OOOOxx + 7 | 647 | 1 | 3 | 7 | 7 | 7 | 7 | 7 | 7 | 7 | 14 | 15 | HAAAAA | XYAAAA | VVVVxx + 8 | 653 | 0 | 0 | 8 | 8 | 8 | 8 | 8 | 8 | 8 | 16 | 17 | IAAAAA | DZAAAA | HHHHxx + 9 | 49 | 1 | 1 | 9 | 9 | 9 | 9 | 9 | 9 | 9 | 18 | 19 | JAAAAA | XBAAAA | HHHHxx +(10 rows) + +-- +-- awk '{if($1<20){print $1,$14;}else{next;}}' onek.data | sort +0nr -1 +-- +SELECT onek.unique1, onek.stringu1 FROM onek + WHERE onek.unique1 < 20 + ORDER BY unique1 using >; + unique1 | stringu1 +---------+---------- + 19 | TAAAAA + 18 | SAAAAA + 17 | RAAAAA + 16 | QAAAAA + 15 | PAAAAA + 14 | OAAAAA + 13 | NAAAAA + 12 | MAAAAA + 11 | LAAAAA + 10 | KAAAAA + 9 | JAAAAA + 8 | IAAAAA + 7 | HAAAAA + 6 | GAAAAA + 5 | FAAAAA + 4 | EAAAAA + 3 | DAAAAA + 2 | CAAAAA + 1 | BAAAAA + 0 | AAAAAA +(20 rows) + +-- +-- awk '{if($1>980){print $1,$14;}else{next;}}' onek.data | sort +1d -2 +-- +SELECT onek.unique1, onek.stringu1 FROM onek + WHERE onek.unique1 > 980 + ORDER BY stringu1 using <; + unique1 | stringu1 +---------+---------- + 988 | AMAAAA + 989 | BMAAAA + 990 | CMAAAA + 991 | DMAAAA + 992 | EMAAAA + 993 | FMAAAA + 994 | GMAAAA + 995 | HMAAAA + 996 | IMAAAA + 997 | JMAAAA + 998 | KMAAAA + 999 | LMAAAA + 981 | TLAAAA + 982 | ULAAAA + 983 | VLAAAA + 984 | WLAAAA + 985 | XLAAAA + 986 | YLAAAA + 987 | ZLAAAA +(19 rows) + +-- +-- awk '{if($1>980){print $1,$16;}else{next;}}' onek.data | +-- sort +1d -2 +0nr -1 +-- +SELECT onek.unique1, onek.string4 FROM onek + WHERE onek.unique1 > 980 + ORDER BY string4 using <, unique1 using >; + unique1 | string4 +---------+--------- + 999 | AAAAxx + 995 | AAAAxx + 983 | AAAAxx + 982 | AAAAxx + 981 | AAAAxx + 998 | HHHHxx + 997 | HHHHxx + 993 | HHHHxx + 990 | HHHHxx + 986 | HHHHxx + 996 | OOOOxx + 991 | OOOOxx + 988 | OOOOxx + 987 | OOOOxx + 985 | OOOOxx + 994 | VVVVxx + 992 | VVVVxx + 989 | VVVVxx + 984 | VVVVxx +(19 rows) + +-- +-- awk '{if($1>980){print $1,$16;}else{next;}}' onek.data | +-- sort +1dr -2 +0n -1 +-- +SELECT onek.unique1, onek.string4 FROM onek + WHERE onek.unique1 > 980 + ORDER BY string4 using >, unique1 using <; + unique1 | string4 +---------+--------- + 984 | VVVVxx + 989 | VVVVxx + 992 | VVVVxx + 994 | VVVVxx + 985 | OOOOxx + 987 | OOOOxx + 988 | OOOOxx + 991 | OOOOxx + 996 | OOOOxx + 986 | HHHHxx + 990 | HHHHxx + 993 | HHHHxx + 997 | HHHHxx + 998 | HHHHxx + 981 | AAAAxx + 982 | AAAAxx + 983 | AAAAxx + 995 | AAAAxx + 999 | AAAAxx +(19 rows) + +-- +-- awk '{if($1<20){print $1,$16;}else{next;}}' onek.data | +-- sort +0nr -1 +1d -2 +-- +SELECT onek.unique1, onek.string4 FROM onek + WHERE onek.unique1 < 20 + ORDER BY unique1 using >, string4 using <; + unique1 | string4 +---------+--------- + 19 | OOOOxx + 18 | VVVVxx + 17 | HHHHxx + 16 | OOOOxx + 15 | VVVVxx + 14 | AAAAxx + 13 | OOOOxx + 12 | AAAAxx + 11 | OOOOxx + 10 | AAAAxx + 9 | HHHHxx + 8 | HHHHxx + 7 | VVVVxx + 6 | OOOOxx + 5 | HHHHxx + 4 | HHHHxx + 3 | VVVVxx + 2 | OOOOxx + 1 | OOOOxx + 0 | OOOOxx +(20 rows) + +-- +-- awk '{if($1<20){print $1,$16;}else{next;}}' onek.data | +-- sort +0n -1 +1dr -2 +-- +SELECT onek.unique1, onek.string4 FROM onek + WHERE onek.unique1 < 20 + ORDER BY unique1 using <, string4 using >; + unique1 | string4 +---------+--------- + 0 | OOOOxx + 1 | OOOOxx + 2 | OOOOxx + 3 | VVVVxx + 4 | HHHHxx + 5 | HHHHxx + 6 | OOOOxx + 7 | VVVVxx + 8 | HHHHxx + 9 | HHHHxx + 10 | AAAAxx + 11 | OOOOxx + 12 | AAAAxx + 13 | OOOOxx + 14 | AAAAxx + 15 | VVVVxx + 16 | OOOOxx + 17 | HHHHxx + 18 | VVVVxx + 19 | OOOOxx +(20 rows) + +-- +-- test partial btree indexes +-- +-- As of 7.2, planner probably won't pick an indexscan without stats, +-- so ANALYZE first. Also, we want to prevent it from picking a bitmapscan +-- followed by sort, because that could hide index ordering problems. +-- +ANALYZE onek2; +SET enable_seqscan TO off; +SET enable_bitmapscan TO off; +SET enable_sort TO off; +-- +-- awk '{if($1<10){print $0;}else{next;}}' onek.data | sort +0n -1 +-- +SELECT onek2.* FROM onek2 WHERE onek2.unique1 < 10; + unique1 | unique2 | two | four | ten | twenty | hundred | thousand | twothousand | fivethous | tenthous | odd | even | stringu1 | stringu2 | string4 +---------+---------+-----+------+-----+--------+---------+----------+-------------+-----------+----------+-----+------+----------+----------+--------- + 0 | 998 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | AAAAAA | KMBAAA | OOOOxx + 1 | 214 | 1 | 1 | 1 | 1 | 1 | 1 | 1 | 1 | 1 | 2 | 3 | BAAAAA | GIAAAA | OOOOxx + 2 | 326 | 0 | 2 | 2 | 2 | 2 | 2 | 2 | 2 | 2 | 4 | 5 | CAAAAA | OMAAAA | OOOOxx + 3 | 431 | 1 | 3 | 3 | 3 | 3 | 3 | 3 | 3 | 3 | 6 | 7 | DAAAAA | PQAAAA | VVVVxx + 4 | 833 | 0 | 0 | 4 | 4 | 4 | 4 | 4 | 4 | 4 | 8 | 9 | EAAAAA | BGBAAA | HHHHxx + 5 | 541 | 1 | 1 | 5 | 5 | 5 | 5 | 5 | 5 | 5 | 10 | 11 | FAAAAA | VUAAAA | HHHHxx + 6 | 978 | 0 | 2 | 6 | 6 | 6 | 6 | 6 | 6 | 6 | 12 | 13 | GAAAAA | QLBAAA | OOOOxx + 7 | 647 | 1 | 3 | 7 | 7 | 7 | 7 | 7 | 7 | 7 | 14 | 15 | HAAAAA | XYAAAA | VVVVxx + 8 | 653 | 0 | 0 | 8 | 8 | 8 | 8 | 8 | 8 | 8 | 16 | 17 | IAAAAA | DZAAAA | HHHHxx + 9 | 49 | 1 | 1 | 9 | 9 | 9 | 9 | 9 | 9 | 9 | 18 | 19 | JAAAAA | XBAAAA | HHHHxx +(10 rows) + +-- +-- awk '{if($1<20){print $1,$14;}else{next;}}' onek.data | sort +0nr -1 +-- +SELECT onek2.unique1, onek2.stringu1 FROM onek2 + WHERE onek2.unique1 < 20 + ORDER BY unique1 using >; + unique1 | stringu1 +---------+---------- + 19 | TAAAAA + 18 | SAAAAA + 17 | RAAAAA + 16 | QAAAAA + 15 | PAAAAA + 14 | OAAAAA + 13 | NAAAAA + 12 | MAAAAA + 11 | LAAAAA + 10 | KAAAAA + 9 | JAAAAA + 8 | IAAAAA + 7 | HAAAAA + 6 | GAAAAA + 5 | FAAAAA + 4 | EAAAAA + 3 | DAAAAA + 2 | CAAAAA + 1 | BAAAAA + 0 | AAAAAA +(20 rows) + +-- +-- awk '{if($1>980){print $1,$14;}else{next;}}' onek.data | sort +1d -2 +-- +SELECT onek2.unique1, onek2.stringu1 FROM onek2 + WHERE onek2.unique1 > 980; + unique1 | stringu1 +---------+---------- + 981 | TLAAAA + 982 | ULAAAA + 983 | VLAAAA + 984 | WLAAAA + 985 | XLAAAA + 986 | YLAAAA + 987 | ZLAAAA + 988 | AMAAAA + 989 | BMAAAA + 990 | CMAAAA + 991 | DMAAAA + 992 | EMAAAA + 993 | FMAAAA + 994 | GMAAAA + 995 | HMAAAA + 996 | IMAAAA + 997 | JMAAAA + 998 | KMAAAA + 999 | LMAAAA +(19 rows) + +RESET enable_seqscan; +RESET enable_bitmapscan; +RESET enable_sort; +-- +-- awk '{print $1,$2;}' person.data | +-- awk '{if(NF!=2){print $3,$2;}else{print;}}' - emp.data | +-- awk '{if(NF!=2){print $3,$2;}else{print;}}' - student.data | +-- awk 'BEGIN{FS=" ";}{if(NF!=2){print $4,$5;}else{print;}}' - stud_emp.data +-- +-- SELECT name, age FROM person*; ??? check if different +SELECT p.name, p.age FROM person* p; + name | age +---------+----- + mike | 40 + joe | 20 + sally | 34 + sandra | 19 + alex | 30 + sue | 50 + denise | 24 + sarah | 88 + teresa | 38 + nan | 28 + leah | 68 + wendy | 78 + melissa | 28 + joan | 18 + mary | 8 + jane | 58 + liza | 38 + jean | 28 + jenifer | 38 + juanita | 58 + susan | 78 + zena | 98 + martie | 88 + chris | 78 + pat | 18 + zola | 58 + louise | 98 + edna | 18 + bertha | 88 + sumi | 38 + koko | 88 + gina | 18 + rean | 48 + sharon | 78 + paula | 68 + julie | 68 + belinda | 38 + karen | 48 + carina | 58 + diane | 18 + esther | 98 + trudy | 88 + fanny | 8 + carmen | 78 + lita | 25 + pamela | 48 + sandy | 38 + trisha | 88 + uma | 78 + velma | 68 + sharon | 25 + sam | 30 + bill | 20 + fred | 28 + larry | 60 + jeff | 23 + cim | 30 + linda | 19 +(58 rows) + +-- +-- awk '{print $1,$2;}' person.data | +-- awk '{if(NF!=2){print $3,$2;}else{print;}}' - emp.data | +-- awk '{if(NF!=2){print $3,$2;}else{print;}}' - student.data | +-- awk 'BEGIN{FS=" ";}{if(NF!=1){print $4,$5;}else{print;}}' - stud_emp.data | +-- sort +1nr -2 +-- +SELECT p.name, p.age FROM person* p ORDER BY age using >, name; + name | age +---------+----- + esther | 98 + louise | 98 + zena | 98 + bertha | 88 + koko | 88 + martie | 88 + sarah | 88 + trisha | 88 + trudy | 88 + carmen | 78 + chris | 78 + sharon | 78 + susan | 78 + uma | 78 + wendy | 78 + julie | 68 + leah | 68 + paula | 68 + velma | 68 + larry | 60 + carina | 58 + jane | 58 + juanita | 58 + zola | 58 + sue | 50 + karen | 48 + pamela | 48 + rean | 48 + mike | 40 + belinda | 38 + jenifer | 38 + liza | 38 + sandy | 38 + sumi | 38 + teresa | 38 + sally | 34 + alex | 30 + cim | 30 + sam | 30 + fred | 28 + jean | 28 + melissa | 28 + nan | 28 + lita | 25 + sharon | 25 + denise | 24 + jeff | 23 + bill | 20 + joe | 20 + linda | 19 + sandra | 19 + diane | 18 + edna | 18 + gina | 18 + joan | 18 + pat | 18 + fanny | 8 + mary | 8 +(58 rows) + +-- +-- Test some cases involving whole-row Var referencing a subquery +-- +select foo from (select 1 offset 0) as foo; + foo +----- + (1) +(1 row) + +select foo from (select null offset 0) as foo; + foo +----- + () +(1 row) + +select foo from (select 'xyzzy',1,null offset 0) as foo; + foo +------------ + (xyzzy,1,) +(1 row) + +-- +-- Test VALUES lists +-- +select * from onek, (values(147, 'RFAAAA'), (931, 'VJAAAA')) as v (i, j) + WHERE onek.unique1 = v.i and onek.stringu1 = v.j; + unique1 | unique2 | two | four | ten | twenty | hundred | thousand | twothousand | fivethous | tenthous | odd | even | stringu1 | stringu2 | string4 | i | j +---------+---------+-----+------+-----+--------+---------+----------+-------------+-----------+----------+-----+------+----------+----------+---------+-----+-------- + 147 | 0 | 1 | 3 | 7 | 7 | 7 | 47 | 147 | 147 | 147 | 14 | 15 | RFAAAA | AAAAAA | AAAAxx | 147 | RFAAAA + 931 | 1 | 1 | 3 | 1 | 11 | 1 | 31 | 131 | 431 | 931 | 2 | 3 | VJAAAA | BAAAAA | HHHHxx | 931 | VJAAAA +(2 rows) + +-- a more complex case +-- looks like we're coding lisp :-) +select * from onek, + (values ((select i from + (values(10000), (2), (389), (1000), (2000), ((select 10029))) as foo(i) + order by i asc limit 1))) bar (i) + where onek.unique1 = bar.i; + unique1 | unique2 | two | four | ten | twenty | hundred | thousand | twothousand | fivethous | tenthous | odd | even | stringu1 | stringu2 | string4 | i +---------+---------+-----+------+-----+--------+---------+----------+-------------+-----------+----------+-----+------+----------+----------+---------+--- + 2 | 326 | 0 | 2 | 2 | 2 | 2 | 2 | 2 | 2 | 2 | 4 | 5 | CAAAAA | OMAAAA | OOOOxx | 2 +(1 row) + +-- try VALUES in a subquery +select * from onek + where (unique1,ten) in (values (1,1), (20,0), (99,9), (17,99)) + order by unique1; + unique1 | unique2 | two | four | ten | twenty | hundred | thousand | twothousand | fivethous | tenthous | odd | even | stringu1 | stringu2 | string4 +---------+---------+-----+------+-----+--------+---------+----------+-------------+-----------+----------+-----+------+----------+----------+--------- + 1 | 214 | 1 | 1 | 1 | 1 | 1 | 1 | 1 | 1 | 1 | 2 | 3 | BAAAAA | GIAAAA | OOOOxx + 20 | 306 | 0 | 0 | 0 | 0 | 0 | 20 | 20 | 20 | 20 | 0 | 1 | UAAAAA | ULAAAA | OOOOxx + 99 | 101 | 1 | 3 | 9 | 19 | 9 | 99 | 99 | 99 | 99 | 18 | 19 | VDAAAA | XDAAAA | HHHHxx +(3 rows) + +-- VALUES is also legal as a standalone query or a set-operation member +VALUES (1,2), (3,4+4), (7,77.7); + column1 | column2 +---------+--------- + 1 | 2 + 3 | 8 + 7 | 77.7 +(3 rows) + +VALUES (1,2), (3,4+4), (7,77.7) +UNION ALL +SELECT 2+2, 57 +UNION ALL +TABLE int8_tbl; + column1 | column2 +------------------+------------------- + 1 | 2 + 3 | 8 + 7 | 77.7 + 4 | 57 + 123 | 456 + 123 | 4567890123456789 + 4567890123456789 | 123 + 4567890123456789 | 4567890123456789 + 4567890123456789 | -4567890123456789 +(9 rows) + +-- corner case: VALUES with no columns +CREATE TEMP TABLE nocols(); +INSERT INTO nocols DEFAULT VALUES; +SELECT * FROM nocols n, LATERAL (VALUES(n.*)) v; +-- +(1 row) + +-- +-- Test ORDER BY options +-- +CREATE TEMP TABLE foo (f1 int); +INSERT INTO foo VALUES (42),(3),(10),(7),(null),(null),(1); +SELECT * FROM foo ORDER BY f1; + f1 +---- + 1 + 3 + 7 + 10 + 42 + + +(7 rows) + +SELECT * FROM foo ORDER BY f1 ASC; -- same thing + f1 +---- + 1 + 3 + 7 + 10 + 42 + + +(7 rows) + +SELECT * FROM foo ORDER BY f1 NULLS FIRST; + f1 +---- + + + 1 + 3 + 7 + 10 + 42 +(7 rows) + +SELECT * FROM foo ORDER BY f1 DESC; + f1 +---- + + + 42 + 10 + 7 + 3 + 1 +(7 rows) + +SELECT * FROM foo ORDER BY f1 DESC NULLS LAST; + f1 +---- + 42 + 10 + 7 + 3 + 1 + + +(7 rows) + +-- check if indexscans do the right things +CREATE INDEX fooi ON foo (f1); +SET enable_sort = false; +SELECT * FROM foo ORDER BY f1; + f1 +---- + 1 + 3 + 7 + 10 + 42 + + +(7 rows) + +SELECT * FROM foo ORDER BY f1 NULLS FIRST; + f1 +---- + + + 1 + 3 + 7 + 10 + 42 +(7 rows) + +SELECT * FROM foo ORDER BY f1 DESC; + f1 +---- + + + 42 + 10 + 7 + 3 + 1 +(7 rows) + +SELECT * FROM foo ORDER BY f1 DESC NULLS LAST; + f1 +---- + 42 + 10 + 7 + 3 + 1 + + +(7 rows) + +DROP INDEX fooi; +CREATE INDEX fooi ON foo (f1 DESC); +SELECT * FROM foo ORDER BY f1; + f1 +---- + 1 + 3 + 7 + 10 + 42 + + +(7 rows) + +SELECT * FROM foo ORDER BY f1 NULLS FIRST; + f1 +---- + + + 1 + 3 + 7 + 10 + 42 +(7 rows) + +SELECT * FROM foo ORDER BY f1 DESC; + f1 +---- + + + 42 + 10 + 7 + 3 + 1 +(7 rows) + +SELECT * FROM foo ORDER BY f1 DESC NULLS LAST; + f1 +---- + 42 + 10 + 7 + 3 + 1 + + +(7 rows) + +DROP INDEX fooi; +CREATE INDEX fooi ON foo (f1 DESC NULLS LAST); +SELECT * FROM foo ORDER BY f1; + f1 +---- + 1 + 3 + 7 + 10 + 42 + + +(7 rows) + +SELECT * FROM foo ORDER BY f1 NULLS FIRST; + f1 +---- + + + 1 + 3 + 7 + 10 + 42 +(7 rows) + +SELECT * FROM foo ORDER BY f1 DESC; + f1 +---- + + + 42 + 10 + 7 + 3 + 1 +(7 rows) + +SELECT * FROM foo ORDER BY f1 DESC NULLS LAST; + f1 +---- + 42 + 10 + 7 + 3 + 1 + + +(7 rows) + +-- +-- Test planning of some cases with partial indexes +-- +-- partial index is usable +explain (costs off) +select * from onek2 where unique2 = 11 and stringu1 = 'ATAAAA'; + QUERY PLAN +----------------------------------------- + Index Scan using onek2_u2_prtl on onek2 + Index Cond: (unique2 = 11) + Filter: (stringu1 = 'ATAAAA'::name) +(3 rows) + +select * from onek2 where unique2 = 11 and stringu1 = 'ATAAAA'; + unique1 | unique2 | two | four | ten | twenty | hundred | thousand | twothousand | fivethous | tenthous | odd | even | stringu1 | stringu2 | string4 +---------+---------+-----+------+-----+--------+---------+----------+-------------+-----------+----------+-----+------+----------+----------+--------- + 494 | 11 | 0 | 2 | 4 | 14 | 4 | 94 | 94 | 494 | 494 | 8 | 9 | ATAAAA | LAAAAA | VVVVxx +(1 row) + +-- actually run the query with an analyze to use the partial index +explain (costs off, analyze on, timing off, summary off) +select * from onek2 where unique2 = 11 and stringu1 = 'ATAAAA'; + QUERY PLAN +----------------------------------------------------------------- + Index Scan using onek2_u2_prtl on onek2 (actual rows=1 loops=1) + Index Cond: (unique2 = 11) + Filter: (stringu1 = 'ATAAAA'::name) +(3 rows) + +explain (costs off) +select unique2 from onek2 where unique2 = 11 and stringu1 = 'ATAAAA'; + QUERY PLAN +----------------------------------------- + Index Scan using onek2_u2_prtl on onek2 + Index Cond: (unique2 = 11) + Filter: (stringu1 = 'ATAAAA'::name) +(3 rows) + +select unique2 from onek2 where unique2 = 11 and stringu1 = 'ATAAAA'; + unique2 +--------- + 11 +(1 row) + +-- partial index predicate implies clause, so no need for retest +explain (costs off) +select * from onek2 where unique2 = 11 and stringu1 < 'B'; + QUERY PLAN +----------------------------------------- + Index Scan using onek2_u2_prtl on onek2 + Index Cond: (unique2 = 11) +(2 rows) + +select * from onek2 where unique2 = 11 and stringu1 < 'B'; + unique1 | unique2 | two | four | ten | twenty | hundred | thousand | twothousand | fivethous | tenthous | odd | even | stringu1 | stringu2 | string4 +---------+---------+-----+------+-----+--------+---------+----------+-------------+-----------+----------+-----+------+----------+----------+--------- + 494 | 11 | 0 | 2 | 4 | 14 | 4 | 94 | 94 | 494 | 494 | 8 | 9 | ATAAAA | LAAAAA | VVVVxx +(1 row) + +explain (costs off) +select unique2 from onek2 where unique2 = 11 and stringu1 < 'B'; + QUERY PLAN +---------------------------------------------- + Index Only Scan using onek2_u2_prtl on onek2 + Index Cond: (unique2 = 11) +(2 rows) + +select unique2 from onek2 where unique2 = 11 and stringu1 < 'B'; + unique2 +--------- + 11 +(1 row) + +-- but if it's an update target, must retest anyway +explain (costs off) +select unique2 from onek2 where unique2 = 11 and stringu1 < 'B' for update; + QUERY PLAN +----------------------------------------------- + LockRows + -> Index Scan using onek2_u2_prtl on onek2 + Index Cond: (unique2 = 11) + Filter: (stringu1 < 'B'::name) +(4 rows) + +select unique2 from onek2 where unique2 = 11 and stringu1 < 'B' for update; + unique2 +--------- + 11 +(1 row) + +-- partial index is not applicable +explain (costs off) +select unique2 from onek2 where unique2 = 11 and stringu1 < 'C'; + QUERY PLAN +------------------------------------------------------- + Seq Scan on onek2 + Filter: ((stringu1 < 'C'::name) AND (unique2 = 11)) +(2 rows) + +select unique2 from onek2 where unique2 = 11 and stringu1 < 'C'; + unique2 +--------- + 11 +(1 row) + +-- partial index implies clause, but bitmap scan must recheck predicate anyway +SET enable_indexscan TO off; +explain (costs off) +select unique2 from onek2 where unique2 = 11 and stringu1 < 'B'; + QUERY PLAN +------------------------------------------------------------- + Bitmap Heap Scan on onek2 + Recheck Cond: ((unique2 = 11) AND (stringu1 < 'B'::name)) + -> Bitmap Index Scan on onek2_u2_prtl + Index Cond: (unique2 = 11) +(4 rows) + +select unique2 from onek2 where unique2 = 11 and stringu1 < 'B'; + unique2 +--------- + 11 +(1 row) + +RESET enable_indexscan; +-- check multi-index cases too +explain (costs off) +select unique1, unique2 from onek2 + where (unique2 = 11 or unique1 = 0) and stringu1 < 'B'; + QUERY PLAN +-------------------------------------------------------------------------------- + Bitmap Heap Scan on onek2 + Recheck Cond: (((unique2 = 11) AND (stringu1 < 'B'::name)) OR (unique1 = 0)) + Filter: (stringu1 < 'B'::name) + -> BitmapOr + -> Bitmap Index Scan on onek2_u2_prtl + Index Cond: (unique2 = 11) + -> Bitmap Index Scan on onek2_u1_prtl + Index Cond: (unique1 = 0) +(8 rows) + +select unique1, unique2 from onek2 + where (unique2 = 11 or unique1 = 0) and stringu1 < 'B'; + unique1 | unique2 +---------+--------- + 494 | 11 + 0 | 998 +(2 rows) + +explain (costs off) +select unique1, unique2 from onek2 + where (unique2 = 11 and stringu1 < 'B') or unique1 = 0; + QUERY PLAN +-------------------------------------------------------------------------------- + Bitmap Heap Scan on onek2 + Recheck Cond: (((unique2 = 11) AND (stringu1 < 'B'::name)) OR (unique1 = 0)) + -> BitmapOr + -> Bitmap Index Scan on onek2_u2_prtl + Index Cond: (unique2 = 11) + -> Bitmap Index Scan on onek2_u1_prtl + Index Cond: (unique1 = 0) +(7 rows) + +select unique1, unique2 from onek2 + where (unique2 = 11 and stringu1 < 'B') or unique1 = 0; + unique1 | unique2 +---------+--------- + 494 | 11 + 0 | 998 +(2 rows) + +-- +-- Test some corner cases that have been known to confuse the planner +-- +-- ORDER BY on a constant doesn't really need any sorting +SELECT 1 AS x ORDER BY x; + x +--- + 1 +(1 row) + +-- But ORDER BY on a set-valued expression does +create function sillysrf(int) returns setof int as + 'values (1),(10),(2),($1)' language sql immutable; +select sillysrf(42); + sillysrf +---------- + 1 + 10 + 2 + 42 +(4 rows) + +select sillysrf(-1) order by 1; + sillysrf +---------- + -1 + 1 + 2 + 10 +(4 rows) + +drop function sillysrf(int); +-- X = X isn't a no-op, it's effectively X IS NOT NULL assuming = is strict +-- (see bug #5084) +select * from (values (2),(null),(1)) v(k) where k = k order by k; + k +--- + 1 + 2 +(2 rows) + +select * from (values (2),(null),(1)) v(k) where k = k; + k +--- + 2 + 1 +(2 rows) + +-- Test partitioned tables with no partitions, which should be handled the +-- same as the non-inheritance case when expanding its RTE. +create table list_parted_tbl (a int,b int) partition by list (a); +create table list_parted_tbl1 partition of list_parted_tbl + for values in (1) partition by list(b); +explain (costs off) select * from list_parted_tbl; + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +drop table list_parted_tbl; diff --git a/src/test/regress/expected/select_distinct.out b/src/test/regress/expected/select_distinct.out new file mode 100644 index 0000000..9d44ea8 --- /dev/null +++ b/src/test/regress/expected/select_distinct.out @@ -0,0 +1,446 @@ +-- +-- SELECT_DISTINCT +-- +-- +-- awk '{print $3;}' onek.data | sort -n | uniq +-- +SELECT DISTINCT two FROM onek ORDER BY 1; + two +----- + 0 + 1 +(2 rows) + +-- +-- awk '{print $5;}' onek.data | sort -n | uniq +-- +SELECT DISTINCT ten FROM onek ORDER BY 1; + ten +----- + 0 + 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 +(10 rows) + +-- +-- awk '{print $16;}' onek.data | sort -d | uniq +-- +SELECT DISTINCT string4 FROM onek ORDER BY 1; + string4 +--------- + AAAAxx + HHHHxx + OOOOxx + VVVVxx +(4 rows) + +-- +-- awk '{print $3,$16,$5;}' onek.data | sort -d | uniq | +-- sort +0n -1 +1d -2 +2n -3 +-- +SELECT DISTINCT two, string4, ten + FROM onek + ORDER BY two using <, string4 using <, ten using <; + two | string4 | ten +-----+---------+----- + 0 | AAAAxx | 0 + 0 | AAAAxx | 2 + 0 | AAAAxx | 4 + 0 | AAAAxx | 6 + 0 | AAAAxx | 8 + 0 | HHHHxx | 0 + 0 | HHHHxx | 2 + 0 | HHHHxx | 4 + 0 | HHHHxx | 6 + 0 | HHHHxx | 8 + 0 | OOOOxx | 0 + 0 | OOOOxx | 2 + 0 | OOOOxx | 4 + 0 | OOOOxx | 6 + 0 | OOOOxx | 8 + 0 | VVVVxx | 0 + 0 | VVVVxx | 2 + 0 | VVVVxx | 4 + 0 | VVVVxx | 6 + 0 | VVVVxx | 8 + 1 | AAAAxx | 1 + 1 | AAAAxx | 3 + 1 | AAAAxx | 5 + 1 | AAAAxx | 7 + 1 | AAAAxx | 9 + 1 | HHHHxx | 1 + 1 | HHHHxx | 3 + 1 | HHHHxx | 5 + 1 | HHHHxx | 7 + 1 | HHHHxx | 9 + 1 | OOOOxx | 1 + 1 | OOOOxx | 3 + 1 | OOOOxx | 5 + 1 | OOOOxx | 7 + 1 | OOOOxx | 9 + 1 | VVVVxx | 1 + 1 | VVVVxx | 3 + 1 | VVVVxx | 5 + 1 | VVVVxx | 7 + 1 | VVVVxx | 9 +(40 rows) + +-- +-- awk '{print $2;}' person.data | +-- awk '{if(NF!=1){print $2;}else{print;}}' - emp.data | +-- awk '{if(NF!=1){print $2;}else{print;}}' - student.data | +-- awk 'BEGIN{FS=" ";}{if(NF!=1){print $5;}else{print;}}' - stud_emp.data | +-- sort -n -r | uniq +-- +SELECT DISTINCT p.age FROM person* p ORDER BY age using >; + age +----- + 98 + 88 + 78 + 68 + 60 + 58 + 50 + 48 + 40 + 38 + 34 + 30 + 28 + 25 + 24 + 23 + 20 + 19 + 18 + 8 +(20 rows) + +-- +-- Check mentioning same column more than once +-- +EXPLAIN (VERBOSE, COSTS OFF) +SELECT count(*) FROM + (SELECT DISTINCT two, four, two FROM tenk1) ss; + QUERY PLAN +-------------------------------------------------------- + Aggregate + Output: count(*) + -> HashAggregate + Output: tenk1.two, tenk1.four, tenk1.two + Group Key: tenk1.two, tenk1.four + -> Seq Scan on public.tenk1 + Output: tenk1.two, tenk1.four, tenk1.two +(7 rows) + +SELECT count(*) FROM + (SELECT DISTINCT two, four, two FROM tenk1) ss; + count +------- + 4 +(1 row) + +-- +-- Compare results between plans using sorting and plans using hash +-- aggregation. Force spilling in both cases by setting work_mem low. +-- +SET work_mem='64kB'; +-- Produce results with sorting. +SET enable_hashagg=FALSE; +SET jit_above_cost=0; +EXPLAIN (costs off) +SELECT DISTINCT g%1000 FROM generate_series(0,9999) g; + QUERY PLAN +------------------------------------------------ + Unique + -> Sort + Sort Key: ((g % 1000)) + -> Function Scan on generate_series g +(4 rows) + +CREATE TABLE distinct_group_1 AS +SELECT DISTINCT g%1000 FROM generate_series(0,9999) g; +SET jit_above_cost TO DEFAULT; +CREATE TABLE distinct_group_2 AS +SELECT DISTINCT (g%1000)::text FROM generate_series(0,9999) g; +SET enable_seqscan = 0; +-- Check to see we get an incremental sort plan +EXPLAIN (costs off) +SELECT DISTINCT hundred, two FROM tenk1; + QUERY PLAN +----------------------------------------------------- + Unique + -> Incremental Sort + Sort Key: hundred, two + Presorted Key: hundred + -> Index Scan using tenk1_hundred on tenk1 +(5 rows) + +RESET enable_seqscan; +SET enable_hashagg=TRUE; +-- Produce results with hash aggregation. +SET enable_sort=FALSE; +SET jit_above_cost=0; +EXPLAIN (costs off) +SELECT DISTINCT g%1000 FROM generate_series(0,9999) g; + QUERY PLAN +------------------------------------------ + HashAggregate + Group Key: (g % 1000) + -> Function Scan on generate_series g +(3 rows) + +CREATE TABLE distinct_hash_1 AS +SELECT DISTINCT g%1000 FROM generate_series(0,9999) g; +SET jit_above_cost TO DEFAULT; +CREATE TABLE distinct_hash_2 AS +SELECT DISTINCT (g%1000)::text FROM generate_series(0,9999) g; +SET enable_sort=TRUE; +SET work_mem TO DEFAULT; +-- Compare results +(SELECT * FROM distinct_hash_1 EXCEPT SELECT * FROM distinct_group_1) + UNION ALL +(SELECT * FROM distinct_group_1 EXCEPT SELECT * FROM distinct_hash_1); + ?column? +---------- +(0 rows) + +(SELECT * FROM distinct_hash_1 EXCEPT SELECT * FROM distinct_group_1) + UNION ALL +(SELECT * FROM distinct_group_1 EXCEPT SELECT * FROM distinct_hash_1); + ?column? +---------- +(0 rows) + +DROP TABLE distinct_hash_1; +DROP TABLE distinct_hash_2; +DROP TABLE distinct_group_1; +DROP TABLE distinct_group_2; +-- Test parallel DISTINCT +SET parallel_tuple_cost=0; +SET parallel_setup_cost=0; +SET min_parallel_table_scan_size=0; +SET max_parallel_workers_per_gather=2; +-- Ensure we get a parallel plan +EXPLAIN (costs off) +SELECT DISTINCT four FROM tenk1; + QUERY PLAN +---------------------------------------------------- + Unique + -> Sort + Sort Key: four + -> Gather + Workers Planned: 2 + -> HashAggregate + Group Key: four + -> Parallel Seq Scan on tenk1 +(8 rows) + +-- Ensure the parallel plan produces the correct results +SELECT DISTINCT four FROM tenk1; + four +------ + 0 + 1 + 2 + 3 +(4 rows) + +CREATE OR REPLACE FUNCTION distinct_func(a INT) RETURNS INT AS $$ + BEGIN + RETURN a; + END; +$$ LANGUAGE plpgsql PARALLEL UNSAFE; +-- Ensure we don't do parallel distinct with a parallel unsafe function +EXPLAIN (COSTS OFF) +SELECT DISTINCT distinct_func(1) FROM tenk1; + QUERY PLAN +---------------------------------------------------------- + Unique + -> Sort + Sort Key: (distinct_func(1)) + -> Index Only Scan using tenk1_hundred on tenk1 +(4 rows) + +-- make the function parallel safe +CREATE OR REPLACE FUNCTION distinct_func(a INT) RETURNS INT AS $$ + BEGIN + RETURN a; + END; +$$ LANGUAGE plpgsql PARALLEL SAFE; +-- Ensure we do parallel distinct now that the function is parallel safe +EXPLAIN (COSTS OFF) +SELECT DISTINCT distinct_func(1) FROM tenk1; + QUERY PLAN +---------------------------------------------------- + Unique + -> Gather Merge + Workers Planned: 2 + -> Unique + -> Sort + Sort Key: (distinct_func(1)) + -> Parallel Seq Scan on tenk1 +(7 rows) + +RESET max_parallel_workers_per_gather; +RESET min_parallel_table_scan_size; +RESET parallel_setup_cost; +RESET parallel_tuple_cost; +-- +-- Test the planner's ability to use a LIMIT 1 instead of a Unique node when +-- all of the distinct_pathkeys have been marked as redundant +-- +-- Ensure we get a plan with a Limit 1 +EXPLAIN (COSTS OFF) +SELECT DISTINCT four FROM tenk1 WHERE four = 0; + QUERY PLAN +---------------------------- + Limit + -> Seq Scan on tenk1 + Filter: (four = 0) +(3 rows) + +-- Ensure the above gives us the correct result +SELECT DISTINCT four FROM tenk1 WHERE four = 0; + four +------ + 0 +(1 row) + +-- Ensure we get a plan with a Limit 1 +EXPLAIN (COSTS OFF) +SELECT DISTINCT four FROM tenk1 WHERE four = 0 AND two <> 0; + QUERY PLAN +--------------------------------------------- + Limit + -> Seq Scan on tenk1 + Filter: ((two <> 0) AND (four = 0)) +(3 rows) + +-- Ensure no rows are returned +SELECT DISTINCT four FROM tenk1 WHERE four = 0 AND two <> 0; + four +------ +(0 rows) + +-- Ensure we get a plan with a Limit 1 when the SELECT list contains constants +EXPLAIN (COSTS OFF) +SELECT DISTINCT four,1,2,3 FROM tenk1 WHERE four = 0; + QUERY PLAN +---------------------------- + Limit + -> Seq Scan on tenk1 + Filter: (four = 0) +(3 rows) + +-- Ensure we only get 1 row +SELECT DISTINCT four,1,2,3 FROM tenk1 WHERE four = 0; + four | ?column? | ?column? | ?column? +------+----------+----------+---------- + 0 | 1 | 2 | 3 +(1 row) + +-- +-- Also, some tests of IS DISTINCT FROM, which doesn't quite deserve its +-- very own regression file. +-- +CREATE TEMP TABLE disttable (f1 integer); +INSERT INTO DISTTABLE VALUES(1); +INSERT INTO DISTTABLE VALUES(2); +INSERT INTO DISTTABLE VALUES(3); +INSERT INTO DISTTABLE VALUES(NULL); +-- basic cases +SELECT f1, f1 IS DISTINCT FROM 2 as "not 2" FROM disttable; + f1 | not 2 +----+------- + 1 | t + 2 | f + 3 | t + | t +(4 rows) + +SELECT f1, f1 IS DISTINCT FROM NULL as "not null" FROM disttable; + f1 | not null +----+---------- + 1 | t + 2 | t + 3 | t + | f +(4 rows) + +SELECT f1, f1 IS DISTINCT FROM f1 as "false" FROM disttable; + f1 | false +----+------- + 1 | f + 2 | f + 3 | f + | f +(4 rows) + +SELECT f1, f1 IS DISTINCT FROM f1+1 as "not null" FROM disttable; + f1 | not null +----+---------- + 1 | t + 2 | t + 3 | t + | f +(4 rows) + +-- check that optimizer constant-folds it properly +SELECT 1 IS DISTINCT FROM 2 as "yes"; + yes +----- + t +(1 row) + +SELECT 2 IS DISTINCT FROM 2 as "no"; + no +---- + f +(1 row) + +SELECT 2 IS DISTINCT FROM null as "yes"; + yes +----- + t +(1 row) + +SELECT null IS DISTINCT FROM null as "no"; + no +---- + f +(1 row) + +-- negated form +SELECT 1 IS NOT DISTINCT FROM 2 as "no"; + no +---- + f +(1 row) + +SELECT 2 IS NOT DISTINCT FROM 2 as "yes"; + yes +----- + t +(1 row) + +SELECT 2 IS NOT DISTINCT FROM null as "no"; + no +---- + f +(1 row) + +SELECT null IS NOT DISTINCT FROM null as "yes"; + yes +----- + t +(1 row) + diff --git a/src/test/regress/expected/select_distinct_on.out b/src/test/regress/expected/select_distinct_on.out new file mode 100644 index 0000000..b2978c1 --- /dev/null +++ b/src/test/regress/expected/select_distinct_on.out @@ -0,0 +1,125 @@ +-- +-- SELECT_DISTINCT_ON +-- +SELECT DISTINCT ON (string4) string4, two, ten + FROM onek + ORDER BY string4 using <, two using >, ten using <; + string4 | two | ten +---------+-----+----- + AAAAxx | 1 | 1 + HHHHxx | 1 | 1 + OOOOxx | 1 | 1 + VVVVxx | 1 | 1 +(4 rows) + +-- this will fail due to conflict of ordering requirements +SELECT DISTINCT ON (string4, ten) string4, two, ten + FROM onek + ORDER BY string4 using <, two using <, ten using <; +ERROR: SELECT DISTINCT ON expressions must match initial ORDER BY expressions +LINE 1: SELECT DISTINCT ON (string4, ten) string4, two, ten + ^ +SELECT DISTINCT ON (string4, ten) string4, ten, two + FROM onek + ORDER BY string4 using <, ten using >, two using <; + string4 | ten | two +---------+-----+----- + AAAAxx | 9 | 1 + AAAAxx | 8 | 0 + AAAAxx | 7 | 1 + AAAAxx | 6 | 0 + AAAAxx | 5 | 1 + AAAAxx | 4 | 0 + AAAAxx | 3 | 1 + AAAAxx | 2 | 0 + AAAAxx | 1 | 1 + AAAAxx | 0 | 0 + HHHHxx | 9 | 1 + HHHHxx | 8 | 0 + HHHHxx | 7 | 1 + HHHHxx | 6 | 0 + HHHHxx | 5 | 1 + HHHHxx | 4 | 0 + HHHHxx | 3 | 1 + HHHHxx | 2 | 0 + HHHHxx | 1 | 1 + HHHHxx | 0 | 0 + OOOOxx | 9 | 1 + OOOOxx | 8 | 0 + OOOOxx | 7 | 1 + OOOOxx | 6 | 0 + OOOOxx | 5 | 1 + OOOOxx | 4 | 0 + OOOOxx | 3 | 1 + OOOOxx | 2 | 0 + OOOOxx | 1 | 1 + OOOOxx | 0 | 0 + VVVVxx | 9 | 1 + VVVVxx | 8 | 0 + VVVVxx | 7 | 1 + VVVVxx | 6 | 0 + VVVVxx | 5 | 1 + VVVVxx | 4 | 0 + VVVVxx | 3 | 1 + VVVVxx | 2 | 0 + VVVVxx | 1 | 1 + VVVVxx | 0 | 0 +(40 rows) + +-- bug #5049: early 8.4.x chokes on volatile DISTINCT ON clauses +select distinct on (1) floor(random()) as r, f1 from int4_tbl order by 1,2; + r | f1 +---+------------- + 0 | -2147483647 +(1 row) + +-- +-- Test the planner's ability to use a LIMIT 1 instead of a Unique node when +-- all of the distinct_pathkeys have been marked as redundant +-- +-- Ensure we also get a LIMIT plan with DISTINCT ON +EXPLAIN (COSTS OFF) +SELECT DISTINCT ON (four) four,two + FROM tenk1 WHERE four = 0 ORDER BY 1; + QUERY PLAN +---------------------------------- + Result + -> Limit + -> Seq Scan on tenk1 + Filter: (four = 0) +(4 rows) + +-- and check the result of the above query is correct +SELECT DISTINCT ON (four) four,two + FROM tenk1 WHERE four = 0 ORDER BY 1; + four | two +------+----- + 0 | 0 +(1 row) + +-- Ensure a Sort -> Limit is used when the ORDER BY contains additional cols +EXPLAIN (COSTS OFF) +SELECT DISTINCT ON (four) four,two + FROM tenk1 WHERE four = 0 ORDER BY 1,2; + QUERY PLAN +---------------------------------- + Limit + -> Sort + Sort Key: two + -> Seq Scan on tenk1 + Filter: (four = 0) +(5 rows) + +-- Same again but use a column that is indexed so that we get an index scan +-- then a limit +EXPLAIN (COSTS OFF) +SELECT DISTINCT ON (four) four,hundred + FROM tenk1 WHERE four = 0 ORDER BY 1,2; + QUERY PLAN +----------------------------------------------------- + Result + -> Limit + -> Index Scan using tenk1_hundred on tenk1 + Filter: (four = 0) +(4 rows) + diff --git a/src/test/regress/expected/select_having.out b/src/test/regress/expected/select_having.out new file mode 100644 index 0000000..3950c0b --- /dev/null +++ b/src/test/regress/expected/select_having.out @@ -0,0 +1,93 @@ +-- +-- SELECT_HAVING +-- +-- load test data +CREATE TABLE test_having (a int, b int, c char(8), d char); +INSERT INTO test_having VALUES (0, 1, 'XXXX', 'A'); +INSERT INTO test_having VALUES (1, 2, 'AAAA', 'b'); +INSERT INTO test_having VALUES (2, 2, 'AAAA', 'c'); +INSERT INTO test_having VALUES (3, 3, 'BBBB', 'D'); +INSERT INTO test_having VALUES (4, 3, 'BBBB', 'e'); +INSERT INTO test_having VALUES (5, 3, 'bbbb', 'F'); +INSERT INTO test_having VALUES (6, 4, 'cccc', 'g'); +INSERT INTO test_having VALUES (7, 4, 'cccc', 'h'); +INSERT INTO test_having VALUES (8, 4, 'CCCC', 'I'); +INSERT INTO test_having VALUES (9, 4, 'CCCC', 'j'); +SELECT b, c FROM test_having + GROUP BY b, c HAVING count(*) = 1 ORDER BY b, c; + b | c +---+---------- + 1 | XXXX + 3 | bbbb +(2 rows) + +-- HAVING is effectively equivalent to WHERE in this case +SELECT b, c FROM test_having + GROUP BY b, c HAVING b = 3 ORDER BY b, c; + b | c +---+---------- + 3 | BBBB + 3 | bbbb +(2 rows) + +SELECT lower(c), count(c) FROM test_having + GROUP BY lower(c) HAVING count(*) > 2 OR min(a) = max(a) + ORDER BY lower(c); + lower | count +-------+------- + bbbb | 3 + cccc | 4 + xxxx | 1 +(3 rows) + +SELECT c, max(a) FROM test_having + GROUP BY c HAVING count(*) > 2 OR min(a) = max(a) + ORDER BY c; + c | max +----------+----- + XXXX | 0 + bbbb | 5 +(2 rows) + +-- test degenerate cases involving HAVING without GROUP BY +-- Per SQL spec, these should generate 0 or 1 row, even without aggregates +SELECT min(a), max(a) FROM test_having HAVING min(a) = max(a); + min | max +-----+----- +(0 rows) + +SELECT min(a), max(a) FROM test_having HAVING min(a) < max(a); + min | max +-----+----- + 0 | 9 +(1 row) + +-- errors: ungrouped column references +SELECT a FROM test_having HAVING min(a) < max(a); +ERROR: column "test_having.a" must appear in the GROUP BY clause or be used in an aggregate function +LINE 1: SELECT a FROM test_having HAVING min(a) < max(a); + ^ +SELECT 1 AS one FROM test_having HAVING a > 1; +ERROR: column "test_having.a" must appear in the GROUP BY clause or be used in an aggregate function +LINE 1: SELECT 1 AS one FROM test_having HAVING a > 1; + ^ +-- the really degenerate case: need not scan table at all +SELECT 1 AS one FROM test_having HAVING 1 > 2; + one +----- +(0 rows) + +SELECT 1 AS one FROM test_having HAVING 1 < 2; + one +----- + 1 +(1 row) + +-- and just to prove that we aren't scanning the table: +SELECT 1 AS one FROM test_having WHERE 1/a = 1 HAVING 1 < 2; + one +----- + 1 +(1 row) + +DROP TABLE test_having; diff --git a/src/test/regress/expected/select_having_1.out b/src/test/regress/expected/select_having_1.out new file mode 100644 index 0000000..5c58da1 --- /dev/null +++ b/src/test/regress/expected/select_having_1.out @@ -0,0 +1,93 @@ +-- +-- SELECT_HAVING +-- +-- load test data +CREATE TABLE test_having (a int, b int, c char(8), d char); +INSERT INTO test_having VALUES (0, 1, 'XXXX', 'A'); +INSERT INTO test_having VALUES (1, 2, 'AAAA', 'b'); +INSERT INTO test_having VALUES (2, 2, 'AAAA', 'c'); +INSERT INTO test_having VALUES (3, 3, 'BBBB', 'D'); +INSERT INTO test_having VALUES (4, 3, 'BBBB', 'e'); +INSERT INTO test_having VALUES (5, 3, 'bbbb', 'F'); +INSERT INTO test_having VALUES (6, 4, 'cccc', 'g'); +INSERT INTO test_having VALUES (7, 4, 'cccc', 'h'); +INSERT INTO test_having VALUES (8, 4, 'CCCC', 'I'); +INSERT INTO test_having VALUES (9, 4, 'CCCC', 'j'); +SELECT b, c FROM test_having + GROUP BY b, c HAVING count(*) = 1 ORDER BY b, c; + b | c +---+---------- + 1 | XXXX + 3 | bbbb +(2 rows) + +-- HAVING is effectively equivalent to WHERE in this case +SELECT b, c FROM test_having + GROUP BY b, c HAVING b = 3 ORDER BY b, c; + b | c +---+---------- + 3 | BBBB + 3 | bbbb +(2 rows) + +SELECT lower(c), count(c) FROM test_having + GROUP BY lower(c) HAVING count(*) > 2 OR min(a) = max(a) + ORDER BY lower(c); + lower | count +-------+------- + bbbb | 3 + cccc | 4 + xxxx | 1 +(3 rows) + +SELECT c, max(a) FROM test_having + GROUP BY c HAVING count(*) > 2 OR min(a) = max(a) + ORDER BY c; + c | max +----------+----- + bbbb | 5 + XXXX | 0 +(2 rows) + +-- test degenerate cases involving HAVING without GROUP BY +-- Per SQL spec, these should generate 0 or 1 row, even without aggregates +SELECT min(a), max(a) FROM test_having HAVING min(a) = max(a); + min | max +-----+----- +(0 rows) + +SELECT min(a), max(a) FROM test_having HAVING min(a) < max(a); + min | max +-----+----- + 0 | 9 +(1 row) + +-- errors: ungrouped column references +SELECT a FROM test_having HAVING min(a) < max(a); +ERROR: column "test_having.a" must appear in the GROUP BY clause or be used in an aggregate function +LINE 1: SELECT a FROM test_having HAVING min(a) < max(a); + ^ +SELECT 1 AS one FROM test_having HAVING a > 1; +ERROR: column "test_having.a" must appear in the GROUP BY clause or be used in an aggregate function +LINE 1: SELECT 1 AS one FROM test_having HAVING a > 1; + ^ +-- the really degenerate case: need not scan table at all +SELECT 1 AS one FROM test_having HAVING 1 > 2; + one +----- +(0 rows) + +SELECT 1 AS one FROM test_having HAVING 1 < 2; + one +----- + 1 +(1 row) + +-- and just to prove that we aren't scanning the table: +SELECT 1 AS one FROM test_having WHERE 1/a = 1 HAVING 1 < 2; + one +----- + 1 +(1 row) + +DROP TABLE test_having; diff --git a/src/test/regress/expected/select_having_2.out b/src/test/regress/expected/select_having_2.out new file mode 100644 index 0000000..7087fb1 --- /dev/null +++ b/src/test/regress/expected/select_having_2.out @@ -0,0 +1,93 @@ +-- +-- SELECT_HAVING +-- +-- load test data +CREATE TABLE test_having (a int, b int, c char(8), d char); +INSERT INTO test_having VALUES (0, 1, 'XXXX', 'A'); +INSERT INTO test_having VALUES (1, 2, 'AAAA', 'b'); +INSERT INTO test_having VALUES (2, 2, 'AAAA', 'c'); +INSERT INTO test_having VALUES (3, 3, 'BBBB', 'D'); +INSERT INTO test_having VALUES (4, 3, 'BBBB', 'e'); +INSERT INTO test_having VALUES (5, 3, 'bbbb', 'F'); +INSERT INTO test_having VALUES (6, 4, 'cccc', 'g'); +INSERT INTO test_having VALUES (7, 4, 'cccc', 'h'); +INSERT INTO test_having VALUES (8, 4, 'CCCC', 'I'); +INSERT INTO test_having VALUES (9, 4, 'CCCC', 'j'); +SELECT b, c FROM test_having + GROUP BY b, c HAVING count(*) = 1 ORDER BY b, c; + b | c +---+---------- + 1 | XXXX + 3 | bbbb +(2 rows) + +-- HAVING is effectively equivalent to WHERE in this case +SELECT b, c FROM test_having + GROUP BY b, c HAVING b = 3 ORDER BY b, c; + b | c +---+---------- + 3 | bbbb + 3 | BBBB +(2 rows) + +SELECT lower(c), count(c) FROM test_having + GROUP BY lower(c) HAVING count(*) > 2 OR min(a) = max(a) + ORDER BY lower(c); + lower | count +-------+------- + bbbb | 3 + cccc | 4 + xxxx | 1 +(3 rows) + +SELECT c, max(a) FROM test_having + GROUP BY c HAVING count(*) > 2 OR min(a) = max(a) + ORDER BY c; + c | max +----------+----- + bbbb | 5 + XXXX | 0 +(2 rows) + +-- test degenerate cases involving HAVING without GROUP BY +-- Per SQL spec, these should generate 0 or 1 row, even without aggregates +SELECT min(a), max(a) FROM test_having HAVING min(a) = max(a); + min | max +-----+----- +(0 rows) + +SELECT min(a), max(a) FROM test_having HAVING min(a) < max(a); + min | max +-----+----- + 0 | 9 +(1 row) + +-- errors: ungrouped column references +SELECT a FROM test_having HAVING min(a) < max(a); +ERROR: column "test_having.a" must appear in the GROUP BY clause or be used in an aggregate function +LINE 1: SELECT a FROM test_having HAVING min(a) < max(a); + ^ +SELECT 1 AS one FROM test_having HAVING a > 1; +ERROR: column "test_having.a" must appear in the GROUP BY clause or be used in an aggregate function +LINE 1: SELECT 1 AS one FROM test_having HAVING a > 1; + ^ +-- the really degenerate case: need not scan table at all +SELECT 1 AS one FROM test_having HAVING 1 > 2; + one +----- +(0 rows) + +SELECT 1 AS one FROM test_having HAVING 1 < 2; + one +----- + 1 +(1 row) + +-- and just to prove that we aren't scanning the table: +SELECT 1 AS one FROM test_having WHERE 1/a = 1 HAVING 1 < 2; + one +----- + 1 +(1 row) + +DROP TABLE test_having; diff --git a/src/test/regress/expected/select_implicit.out b/src/test/regress/expected/select_implicit.out new file mode 100644 index 0000000..27c07de --- /dev/null +++ b/src/test/regress/expected/select_implicit.out @@ -0,0 +1,338 @@ +-- +-- SELECT_IMPLICIT +-- Test cases for queries with ordering terms missing from the target list. +-- This used to be called "junkfilter.sql". +-- The parser uses the term "resjunk" to handle these cases. +-- - thomas 1998-07-09 +-- +-- load test data +CREATE TABLE test_missing_target (a int, b int, c char(8), d char); +INSERT INTO test_missing_target VALUES (0, 1, 'XXXX', 'A'); +INSERT INTO test_missing_target VALUES (1, 2, 'ABAB', 'b'); +INSERT INTO test_missing_target VALUES (2, 2, 'ABAB', 'c'); +INSERT INTO test_missing_target VALUES (3, 3, 'BBBB', 'D'); +INSERT INTO test_missing_target VALUES (4, 3, 'BBBB', 'e'); +INSERT INTO test_missing_target VALUES (5, 3, 'bbbb', 'F'); +INSERT INTO test_missing_target VALUES (6, 4, 'cccc', 'g'); +INSERT INTO test_missing_target VALUES (7, 4, 'cccc', 'h'); +INSERT INTO test_missing_target VALUES (8, 4, 'CCCC', 'I'); +INSERT INTO test_missing_target VALUES (9, 4, 'CCCC', 'j'); +-- w/ existing GROUP BY target +SELECT c, count(*) FROM test_missing_target GROUP BY test_missing_target.c ORDER BY c; + c | count +----------+------- + ABAB | 2 + BBBB | 2 + CCCC | 2 + XXXX | 1 + bbbb | 1 + cccc | 2 +(6 rows) + +-- w/o existing GROUP BY target using a relation name in GROUP BY clause +SELECT count(*) FROM test_missing_target GROUP BY test_missing_target.c ORDER BY c; + count +------- + 2 + 2 + 2 + 1 + 1 + 2 +(6 rows) + +-- w/o existing GROUP BY target and w/o existing a different ORDER BY target +-- failure expected +SELECT count(*) FROM test_missing_target GROUP BY a ORDER BY b; +ERROR: column "test_missing_target.b" must appear in the GROUP BY clause or be used in an aggregate function +LINE 1: ...ECT count(*) FROM test_missing_target GROUP BY a ORDER BY b; + ^ +-- w/o existing GROUP BY target and w/o existing same ORDER BY target +SELECT count(*) FROM test_missing_target GROUP BY b ORDER BY b; + count +------- + 1 + 2 + 3 + 4 +(4 rows) + +-- w/ existing GROUP BY target using a relation name in target +SELECT test_missing_target.b, count(*) + FROM test_missing_target GROUP BY b ORDER BY b; + b | count +---+------- + 1 | 1 + 2 | 2 + 3 | 3 + 4 | 4 +(4 rows) + +-- w/o existing GROUP BY target +SELECT c FROM test_missing_target ORDER BY a; + c +---------- + XXXX + ABAB + ABAB + BBBB + BBBB + bbbb + cccc + cccc + CCCC + CCCC +(10 rows) + +-- w/o existing ORDER BY target +SELECT count(*) FROM test_missing_target GROUP BY b ORDER BY b desc; + count +------- + 4 + 3 + 2 + 1 +(4 rows) + +-- group using reference number +SELECT count(*) FROM test_missing_target ORDER BY 1 desc; + count +------- + 10 +(1 row) + +-- order using reference number +SELECT c, count(*) FROM test_missing_target GROUP BY 1 ORDER BY 1; + c | count +----------+------- + ABAB | 2 + BBBB | 2 + CCCC | 2 + XXXX | 1 + bbbb | 1 + cccc | 2 +(6 rows) + +-- group using reference number out of range +-- failure expected +SELECT c, count(*) FROM test_missing_target GROUP BY 3; +ERROR: GROUP BY position 3 is not in select list +LINE 1: SELECT c, count(*) FROM test_missing_target GROUP BY 3; + ^ +-- group w/o existing GROUP BY and ORDER BY target under ambiguous condition +-- failure expected +SELECT count(*) FROM test_missing_target x, test_missing_target y + WHERE x.a = y.a + GROUP BY b ORDER BY b; +ERROR: column reference "b" is ambiguous +LINE 3: GROUP BY b ORDER BY b; + ^ +-- order w/ target under ambiguous condition +-- failure NOT expected +SELECT a, a FROM test_missing_target + ORDER BY a; + a | a +---+--- + 0 | 0 + 1 | 1 + 2 | 2 + 3 | 3 + 4 | 4 + 5 | 5 + 6 | 6 + 7 | 7 + 8 | 8 + 9 | 9 +(10 rows) + +-- order expression w/ target under ambiguous condition +-- failure NOT expected +SELECT a/2, a/2 FROM test_missing_target + ORDER BY a/2; + ?column? | ?column? +----------+---------- + 0 | 0 + 0 | 0 + 1 | 1 + 1 | 1 + 2 | 2 + 2 | 2 + 3 | 3 + 3 | 3 + 4 | 4 + 4 | 4 +(10 rows) + +-- group expression w/ target under ambiguous condition +-- failure NOT expected +SELECT a/2, a/2 FROM test_missing_target + GROUP BY a/2 ORDER BY a/2; + ?column? | ?column? +----------+---------- + 0 | 0 + 1 | 1 + 2 | 2 + 3 | 3 + 4 | 4 +(5 rows) + +-- group w/ existing GROUP BY target under ambiguous condition +SELECT x.b, count(*) FROM test_missing_target x, test_missing_target y + WHERE x.a = y.a + GROUP BY x.b ORDER BY x.b; + b | count +---+------- + 1 | 1 + 2 | 2 + 3 | 3 + 4 | 4 +(4 rows) + +-- group w/o existing GROUP BY target under ambiguous condition +SELECT count(*) FROM test_missing_target x, test_missing_target y + WHERE x.a = y.a + GROUP BY x.b ORDER BY x.b; + count +------- + 1 + 2 + 3 + 4 +(4 rows) + +-- group w/o existing GROUP BY target under ambiguous condition +-- into a table +CREATE TABLE test_missing_target2 AS +SELECT count(*) +FROM test_missing_target x, test_missing_target y + WHERE x.a = y.a + GROUP BY x.b ORDER BY x.b; +SELECT * FROM test_missing_target2; + count +------- + 1 + 2 + 3 + 4 +(4 rows) + +-- Functions and expressions +-- w/ existing GROUP BY target +SELECT a%2, count(b) FROM test_missing_target +GROUP BY test_missing_target.a%2 +ORDER BY test_missing_target.a%2; + ?column? | count +----------+------- + 0 | 5 + 1 | 5 +(2 rows) + +-- w/o existing GROUP BY target using a relation name in GROUP BY clause +SELECT count(c) FROM test_missing_target +GROUP BY lower(test_missing_target.c) +ORDER BY lower(test_missing_target.c); + count +------- + 2 + 3 + 4 + 1 +(4 rows) + +-- w/o existing GROUP BY target and w/o existing a different ORDER BY target +-- failure expected +SELECT count(a) FROM test_missing_target GROUP BY a ORDER BY b; +ERROR: column "test_missing_target.b" must appear in the GROUP BY clause or be used in an aggregate function +LINE 1: ...ECT count(a) FROM test_missing_target GROUP BY a ORDER BY b; + ^ +-- w/o existing GROUP BY target and w/o existing same ORDER BY target +SELECT count(b) FROM test_missing_target GROUP BY b/2 ORDER BY b/2; + count +------- + 1 + 5 + 4 +(3 rows) + +-- w/ existing GROUP BY target using a relation name in target +SELECT lower(test_missing_target.c), count(c) + FROM test_missing_target GROUP BY lower(c) ORDER BY lower(c); + lower | count +-------+------- + abab | 2 + bbbb | 3 + cccc | 4 + xxxx | 1 +(4 rows) + +-- w/o existing GROUP BY target +SELECT a FROM test_missing_target ORDER BY upper(d); + a +--- + 0 + 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 +(10 rows) + +-- w/o existing ORDER BY target +SELECT count(b) FROM test_missing_target + GROUP BY (b + 1) / 2 ORDER BY (b + 1) / 2 desc; + count +------- + 7 + 3 +(2 rows) + +-- group w/o existing GROUP BY and ORDER BY target under ambiguous condition +-- failure expected +SELECT count(x.a) FROM test_missing_target x, test_missing_target y + WHERE x.a = y.a + GROUP BY b/2 ORDER BY b/2; +ERROR: column reference "b" is ambiguous +LINE 3: GROUP BY b/2 ORDER BY b/2; + ^ +-- group w/ existing GROUP BY target under ambiguous condition +SELECT x.b/2, count(x.b) FROM test_missing_target x, test_missing_target y + WHERE x.a = y.a + GROUP BY x.b/2 ORDER BY x.b/2; + ?column? | count +----------+------- + 0 | 1 + 1 | 5 + 2 | 4 +(3 rows) + +-- group w/o existing GROUP BY target under ambiguous condition +-- failure expected due to ambiguous b in count(b) +SELECT count(b) FROM test_missing_target x, test_missing_target y + WHERE x.a = y.a + GROUP BY x.b/2; +ERROR: column reference "b" is ambiguous +LINE 1: SELECT count(b) FROM test_missing_target x, test_missing_tar... + ^ +-- group w/o existing GROUP BY target under ambiguous condition +-- into a table +CREATE TABLE test_missing_target3 AS +SELECT count(x.b) +FROM test_missing_target x, test_missing_target y + WHERE x.a = y.a + GROUP BY x.b/2 ORDER BY x.b/2; +SELECT * FROM test_missing_target3; + count +------- + 1 + 5 + 4 +(3 rows) + +-- Cleanup +DROP TABLE test_missing_target; +DROP TABLE test_missing_target2; +DROP TABLE test_missing_target3; diff --git a/src/test/regress/expected/select_implicit_1.out b/src/test/regress/expected/select_implicit_1.out new file mode 100644 index 0000000..d67521e --- /dev/null +++ b/src/test/regress/expected/select_implicit_1.out @@ -0,0 +1,338 @@ +-- +-- SELECT_IMPLICIT +-- Test cases for queries with ordering terms missing from the target list. +-- This used to be called "junkfilter.sql". +-- The parser uses the term "resjunk" to handle these cases. +-- - thomas 1998-07-09 +-- +-- load test data +CREATE TABLE test_missing_target (a int, b int, c char(8), d char); +INSERT INTO test_missing_target VALUES (0, 1, 'XXXX', 'A'); +INSERT INTO test_missing_target VALUES (1, 2, 'ABAB', 'b'); +INSERT INTO test_missing_target VALUES (2, 2, 'ABAB', 'c'); +INSERT INTO test_missing_target VALUES (3, 3, 'BBBB', 'D'); +INSERT INTO test_missing_target VALUES (4, 3, 'BBBB', 'e'); +INSERT INTO test_missing_target VALUES (5, 3, 'bbbb', 'F'); +INSERT INTO test_missing_target VALUES (6, 4, 'cccc', 'g'); +INSERT INTO test_missing_target VALUES (7, 4, 'cccc', 'h'); +INSERT INTO test_missing_target VALUES (8, 4, 'CCCC', 'I'); +INSERT INTO test_missing_target VALUES (9, 4, 'CCCC', 'j'); +-- w/ existing GROUP BY target +SELECT c, count(*) FROM test_missing_target GROUP BY test_missing_target.c ORDER BY c; + c | count +----------+------- + ABAB | 2 + BBBB | 2 + bbbb | 1 + CCCC | 2 + cccc | 2 + XXXX | 1 +(6 rows) + +-- w/o existing GROUP BY target using a relation name in GROUP BY clause +SELECT count(*) FROM test_missing_target GROUP BY test_missing_target.c ORDER BY c; + count +------- + 2 + 2 + 1 + 2 + 2 + 1 +(6 rows) + +-- w/o existing GROUP BY target and w/o existing a different ORDER BY target +-- failure expected +SELECT count(*) FROM test_missing_target GROUP BY a ORDER BY b; +ERROR: column "test_missing_target.b" must appear in the GROUP BY clause or be used in an aggregate function +LINE 1: ...ECT count(*) FROM test_missing_target GROUP BY a ORDER BY b; + ^ +-- w/o existing GROUP BY target and w/o existing same ORDER BY target +SELECT count(*) FROM test_missing_target GROUP BY b ORDER BY b; + count +------- + 1 + 2 + 3 + 4 +(4 rows) + +-- w/ existing GROUP BY target using a relation name in target +SELECT test_missing_target.b, count(*) + FROM test_missing_target GROUP BY b ORDER BY b; + b | count +---+------- + 1 | 1 + 2 | 2 + 3 | 3 + 4 | 4 +(4 rows) + +-- w/o existing GROUP BY target +SELECT c FROM test_missing_target ORDER BY a; + c +---------- + XXXX + ABAB + ABAB + BBBB + BBBB + bbbb + cccc + cccc + CCCC + CCCC +(10 rows) + +-- w/o existing ORDER BY target +SELECT count(*) FROM test_missing_target GROUP BY b ORDER BY b desc; + count +------- + 4 + 3 + 2 + 1 +(4 rows) + +-- group using reference number +SELECT count(*) FROM test_missing_target ORDER BY 1 desc; + count +------- + 10 +(1 row) + +-- order using reference number +SELECT c, count(*) FROM test_missing_target GROUP BY 1 ORDER BY 1; + c | count +----------+------- + ABAB | 2 + BBBB | 2 + bbbb | 1 + CCCC | 2 + cccc | 2 + XXXX | 1 +(6 rows) + +-- group using reference number out of range +-- failure expected +SELECT c, count(*) FROM test_missing_target GROUP BY 3; +ERROR: GROUP BY position 3 is not in select list +LINE 1: SELECT c, count(*) FROM test_missing_target GROUP BY 3; + ^ +-- group w/o existing GROUP BY and ORDER BY target under ambiguous condition +-- failure expected +SELECT count(*) FROM test_missing_target x, test_missing_target y + WHERE x.a = y.a + GROUP BY b ORDER BY b; +ERROR: column reference "b" is ambiguous +LINE 3: GROUP BY b ORDER BY b; + ^ +-- order w/ target under ambiguous condition +-- failure NOT expected +SELECT a, a FROM test_missing_target + ORDER BY a; + a | a +---+--- + 0 | 0 + 1 | 1 + 2 | 2 + 3 | 3 + 4 | 4 + 5 | 5 + 6 | 6 + 7 | 7 + 8 | 8 + 9 | 9 +(10 rows) + +-- order expression w/ target under ambiguous condition +-- failure NOT expected +SELECT a/2, a/2 FROM test_missing_target + ORDER BY a/2; + ?column? | ?column? +----------+---------- + 0 | 0 + 0 | 0 + 1 | 1 + 1 | 1 + 2 | 2 + 2 | 2 + 3 | 3 + 3 | 3 + 4 | 4 + 4 | 4 +(10 rows) + +-- group expression w/ target under ambiguous condition +-- failure NOT expected +SELECT a/2, a/2 FROM test_missing_target + GROUP BY a/2 ORDER BY a/2; + ?column? | ?column? +----------+---------- + 0 | 0 + 1 | 1 + 2 | 2 + 3 | 3 + 4 | 4 +(5 rows) + +-- group w/ existing GROUP BY target under ambiguous condition +SELECT x.b, count(*) FROM test_missing_target x, test_missing_target y + WHERE x.a = y.a + GROUP BY x.b ORDER BY x.b; + b | count +---+------- + 1 | 1 + 2 | 2 + 3 | 3 + 4 | 4 +(4 rows) + +-- group w/o existing GROUP BY target under ambiguous condition +SELECT count(*) FROM test_missing_target x, test_missing_target y + WHERE x.a = y.a + GROUP BY x.b ORDER BY x.b; + count +------- + 1 + 2 + 3 + 4 +(4 rows) + +-- group w/o existing GROUP BY target under ambiguous condition +-- into a table +CREATE TABLE test_missing_target2 AS +SELECT count(*) +FROM test_missing_target x, test_missing_target y + WHERE x.a = y.a + GROUP BY x.b ORDER BY x.b; +SELECT * FROM test_missing_target2; + count +------- + 1 + 2 + 3 + 4 +(4 rows) + +-- Functions and expressions +-- w/ existing GROUP BY target +SELECT a%2, count(b) FROM test_missing_target +GROUP BY test_missing_target.a%2 +ORDER BY test_missing_target.a%2; + ?column? | count +----------+------- + 0 | 5 + 1 | 5 +(2 rows) + +-- w/o existing GROUP BY target using a relation name in GROUP BY clause +SELECT count(c) FROM test_missing_target +GROUP BY lower(test_missing_target.c) +ORDER BY lower(test_missing_target.c); + count +------- + 2 + 3 + 4 + 1 +(4 rows) + +-- w/o existing GROUP BY target and w/o existing a different ORDER BY target +-- failure expected +SELECT count(a) FROM test_missing_target GROUP BY a ORDER BY b; +ERROR: column "test_missing_target.b" must appear in the GROUP BY clause or be used in an aggregate function +LINE 1: ...ECT count(a) FROM test_missing_target GROUP BY a ORDER BY b; + ^ +-- w/o existing GROUP BY target and w/o existing same ORDER BY target +SELECT count(b) FROM test_missing_target GROUP BY b/2 ORDER BY b/2; + count +------- + 1 + 5 + 4 +(3 rows) + +-- w/ existing GROUP BY target using a relation name in target +SELECT lower(test_missing_target.c), count(c) + FROM test_missing_target GROUP BY lower(c) ORDER BY lower(c); + lower | count +-------+------- + abab | 2 + bbbb | 3 + cccc | 4 + xxxx | 1 +(4 rows) + +-- w/o existing GROUP BY target +SELECT a FROM test_missing_target ORDER BY upper(d); + a +--- + 0 + 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 +(10 rows) + +-- w/o existing ORDER BY target +SELECT count(b) FROM test_missing_target + GROUP BY (b + 1) / 2 ORDER BY (b + 1) / 2 desc; + count +------- + 7 + 3 +(2 rows) + +-- group w/o existing GROUP BY and ORDER BY target under ambiguous condition +-- failure expected +SELECT count(x.a) FROM test_missing_target x, test_missing_target y + WHERE x.a = y.a + GROUP BY b/2 ORDER BY b/2; +ERROR: column reference "b" is ambiguous +LINE 3: GROUP BY b/2 ORDER BY b/2; + ^ +-- group w/ existing GROUP BY target under ambiguous condition +SELECT x.b/2, count(x.b) FROM test_missing_target x, test_missing_target y + WHERE x.a = y.a + GROUP BY x.b/2 ORDER BY x.b/2; + ?column? | count +----------+------- + 0 | 1 + 1 | 5 + 2 | 4 +(3 rows) + +-- group w/o existing GROUP BY target under ambiguous condition +-- failure expected due to ambiguous b in count(b) +SELECT count(b) FROM test_missing_target x, test_missing_target y + WHERE x.a = y.a + GROUP BY x.b/2; +ERROR: column reference "b" is ambiguous +LINE 1: SELECT count(b) FROM test_missing_target x, test_missing_tar... + ^ +-- group w/o existing GROUP BY target under ambiguous condition +-- into a table +CREATE TABLE test_missing_target3 AS +SELECT count(x.b) +FROM test_missing_target x, test_missing_target y + WHERE x.a = y.a + GROUP BY x.b/2 ORDER BY x.b/2; +SELECT * FROM test_missing_target3; + count +------- + 1 + 5 + 4 +(3 rows) + +-- Cleanup +DROP TABLE test_missing_target; +DROP TABLE test_missing_target2; +DROP TABLE test_missing_target3; diff --git a/src/test/regress/expected/select_implicit_2.out b/src/test/regress/expected/select_implicit_2.out new file mode 100644 index 0000000..7a353d0 --- /dev/null +++ b/src/test/regress/expected/select_implicit_2.out @@ -0,0 +1,338 @@ +-- +-- SELECT_IMPLICIT +-- Test cases for queries with ordering terms missing from the target list. +-- This used to be called "junkfilter.sql". +-- The parser uses the term "resjunk" to handle these cases. +-- - thomas 1998-07-09 +-- +-- load test data +CREATE TABLE test_missing_target (a int, b int, c char(8), d char); +INSERT INTO test_missing_target VALUES (0, 1, 'XXXX', 'A'); +INSERT INTO test_missing_target VALUES (1, 2, 'ABAB', 'b'); +INSERT INTO test_missing_target VALUES (2, 2, 'ABAB', 'c'); +INSERT INTO test_missing_target VALUES (3, 3, 'BBBB', 'D'); +INSERT INTO test_missing_target VALUES (4, 3, 'BBBB', 'e'); +INSERT INTO test_missing_target VALUES (5, 3, 'bbbb', 'F'); +INSERT INTO test_missing_target VALUES (6, 4, 'cccc', 'g'); +INSERT INTO test_missing_target VALUES (7, 4, 'cccc', 'h'); +INSERT INTO test_missing_target VALUES (8, 4, 'CCCC', 'I'); +INSERT INTO test_missing_target VALUES (9, 4, 'CCCC', 'j'); +-- w/ existing GROUP BY target +SELECT c, count(*) FROM test_missing_target GROUP BY test_missing_target.c ORDER BY c; + c | count +----------+------- + ABAB | 2 + bbbb | 1 + BBBB | 2 + cccc | 2 + CCCC | 2 + XXXX | 1 +(6 rows) + +-- w/o existing GROUP BY target using a relation name in GROUP BY clause +SELECT count(*) FROM test_missing_target GROUP BY test_missing_target.c ORDER BY c; + count +------- + 2 + 1 + 2 + 2 + 2 + 1 +(6 rows) + +-- w/o existing GROUP BY target and w/o existing a different ORDER BY target +-- failure expected +SELECT count(*) FROM test_missing_target GROUP BY a ORDER BY b; +ERROR: column "test_missing_target.b" must appear in the GROUP BY clause or be used in an aggregate function +LINE 1: ...ECT count(*) FROM test_missing_target GROUP BY a ORDER BY b; + ^ +-- w/o existing GROUP BY target and w/o existing same ORDER BY target +SELECT count(*) FROM test_missing_target GROUP BY b ORDER BY b; + count +------- + 1 + 2 + 3 + 4 +(4 rows) + +-- w/ existing GROUP BY target using a relation name in target +SELECT test_missing_target.b, count(*) + FROM test_missing_target GROUP BY b ORDER BY b; + b | count +---+------- + 1 | 1 + 2 | 2 + 3 | 3 + 4 | 4 +(4 rows) + +-- w/o existing GROUP BY target +SELECT c FROM test_missing_target ORDER BY a; + c +---------- + XXXX + ABAB + ABAB + BBBB + BBBB + bbbb + cccc + cccc + CCCC + CCCC +(10 rows) + +-- w/o existing ORDER BY target +SELECT count(*) FROM test_missing_target GROUP BY b ORDER BY b desc; + count +------- + 4 + 3 + 2 + 1 +(4 rows) + +-- group using reference number +SELECT count(*) FROM test_missing_target ORDER BY 1 desc; + count +------- + 10 +(1 row) + +-- order using reference number +SELECT c, count(*) FROM test_missing_target GROUP BY 1 ORDER BY 1; + c | count +----------+------- + ABAB | 2 + bbbb | 1 + BBBB | 2 + cccc | 2 + CCCC | 2 + XXXX | 1 +(6 rows) + +-- group using reference number out of range +-- failure expected +SELECT c, count(*) FROM test_missing_target GROUP BY 3; +ERROR: GROUP BY position 3 is not in select list +LINE 1: SELECT c, count(*) FROM test_missing_target GROUP BY 3; + ^ +-- group w/o existing GROUP BY and ORDER BY target under ambiguous condition +-- failure expected +SELECT count(*) FROM test_missing_target x, test_missing_target y + WHERE x.a = y.a + GROUP BY b ORDER BY b; +ERROR: column reference "b" is ambiguous +LINE 3: GROUP BY b ORDER BY b; + ^ +-- order w/ target under ambiguous condition +-- failure NOT expected +SELECT a, a FROM test_missing_target + ORDER BY a; + a | a +---+--- + 0 | 0 + 1 | 1 + 2 | 2 + 3 | 3 + 4 | 4 + 5 | 5 + 6 | 6 + 7 | 7 + 8 | 8 + 9 | 9 +(10 rows) + +-- order expression w/ target under ambiguous condition +-- failure NOT expected +SELECT a/2, a/2 FROM test_missing_target + ORDER BY a/2; + ?column? | ?column? +----------+---------- + 0 | 0 + 0 | 0 + 1 | 1 + 1 | 1 + 2 | 2 + 2 | 2 + 3 | 3 + 3 | 3 + 4 | 4 + 4 | 4 +(10 rows) + +-- group expression w/ target under ambiguous condition +-- failure NOT expected +SELECT a/2, a/2 FROM test_missing_target + GROUP BY a/2 ORDER BY a/2; + ?column? | ?column? +----------+---------- + 0 | 0 + 1 | 1 + 2 | 2 + 3 | 3 + 4 | 4 +(5 rows) + +-- group w/ existing GROUP BY target under ambiguous condition +SELECT x.b, count(*) FROM test_missing_target x, test_missing_target y + WHERE x.a = y.a + GROUP BY x.b ORDER BY x.b; + b | count +---+------- + 1 | 1 + 2 | 2 + 3 | 3 + 4 | 4 +(4 rows) + +-- group w/o existing GROUP BY target under ambiguous condition +SELECT count(*) FROM test_missing_target x, test_missing_target y + WHERE x.a = y.a + GROUP BY x.b ORDER BY x.b; + count +------- + 1 + 2 + 3 + 4 +(4 rows) + +-- group w/o existing GROUP BY target under ambiguous condition +-- into a table +CREATE TABLE test_missing_target2 AS +SELECT count(*) +FROM test_missing_target x, test_missing_target y + WHERE x.a = y.a + GROUP BY x.b ORDER BY x.b; +SELECT * FROM test_missing_target2; + count +------- + 1 + 2 + 3 + 4 +(4 rows) + +-- Functions and expressions +-- w/ existing GROUP BY target +SELECT a%2, count(b) FROM test_missing_target +GROUP BY test_missing_target.a%2 +ORDER BY test_missing_target.a%2; + ?column? | count +----------+------- + 0 | 5 + 1 | 5 +(2 rows) + +-- w/o existing GROUP BY target using a relation name in GROUP BY clause +SELECT count(c) FROM test_missing_target +GROUP BY lower(test_missing_target.c) +ORDER BY lower(test_missing_target.c); + count +------- + 2 + 3 + 4 + 1 +(4 rows) + +-- w/o existing GROUP BY target and w/o existing a different ORDER BY target +-- failure expected +SELECT count(a) FROM test_missing_target GROUP BY a ORDER BY b; +ERROR: column "test_missing_target.b" must appear in the GROUP BY clause or be used in an aggregate function +LINE 1: ...ECT count(a) FROM test_missing_target GROUP BY a ORDER BY b; + ^ +-- w/o existing GROUP BY target and w/o existing same ORDER BY target +SELECT count(b) FROM test_missing_target GROUP BY b/2 ORDER BY b/2; + count +------- + 1 + 5 + 4 +(3 rows) + +-- w/ existing GROUP BY target using a relation name in target +SELECT lower(test_missing_target.c), count(c) + FROM test_missing_target GROUP BY lower(c) ORDER BY lower(c); + lower | count +-------+------- + abab | 2 + bbbb | 3 + cccc | 4 + xxxx | 1 +(4 rows) + +-- w/o existing GROUP BY target +SELECT a FROM test_missing_target ORDER BY upper(d); + a +--- + 0 + 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 +(10 rows) + +-- w/o existing ORDER BY target +SELECT count(b) FROM test_missing_target + GROUP BY (b + 1) / 2 ORDER BY (b + 1) / 2 desc; + count +------- + 7 + 3 +(2 rows) + +-- group w/o existing GROUP BY and ORDER BY target under ambiguous condition +-- failure expected +SELECT count(x.a) FROM test_missing_target x, test_missing_target y + WHERE x.a = y.a + GROUP BY b/2 ORDER BY b/2; +ERROR: column reference "b" is ambiguous +LINE 3: GROUP BY b/2 ORDER BY b/2; + ^ +-- group w/ existing GROUP BY target under ambiguous condition +SELECT x.b/2, count(x.b) FROM test_missing_target x, test_missing_target y + WHERE x.a = y.a + GROUP BY x.b/2 ORDER BY x.b/2; + ?column? | count +----------+------- + 0 | 1 + 1 | 5 + 2 | 4 +(3 rows) + +-- group w/o existing GROUP BY target under ambiguous condition +-- failure expected due to ambiguous b in count(b) +SELECT count(b) FROM test_missing_target x, test_missing_target y + WHERE x.a = y.a + GROUP BY x.b/2; +ERROR: column reference "b" is ambiguous +LINE 1: SELECT count(b) FROM test_missing_target x, test_missing_tar... + ^ +-- group w/o existing GROUP BY target under ambiguous condition +-- into a table +CREATE TABLE test_missing_target3 AS +SELECT count(x.b) +FROM test_missing_target x, test_missing_target y + WHERE x.a = y.a + GROUP BY x.b/2 ORDER BY x.b/2; +SELECT * FROM test_missing_target3; + count +------- + 1 + 5 + 4 +(3 rows) + +-- Cleanup +DROP TABLE test_missing_target; +DROP TABLE test_missing_target2; +DROP TABLE test_missing_target3; diff --git a/src/test/regress/expected/select_into.out b/src/test/regress/expected/select_into.out new file mode 100644 index 0000000..b79fe9a --- /dev/null +++ b/src/test/regress/expected/select_into.out @@ -0,0 +1,222 @@ +-- +-- SELECT_INTO +-- +SELECT * + INTO TABLE sitmp1 + FROM onek + WHERE onek.unique1 < 2; +DROP TABLE sitmp1; +SELECT * + INTO TABLE sitmp1 + FROM onek2 + WHERE onek2.unique1 < 2; +DROP TABLE sitmp1; +-- +-- SELECT INTO and INSERT permission, if owner is not allowed to insert. +-- +CREATE SCHEMA selinto_schema; +CREATE USER regress_selinto_user; +ALTER DEFAULT PRIVILEGES FOR ROLE regress_selinto_user + REVOKE INSERT ON TABLES FROM regress_selinto_user; +GRANT ALL ON SCHEMA selinto_schema TO public; +SET SESSION AUTHORIZATION regress_selinto_user; +-- WITH DATA, passes. +CREATE TABLE selinto_schema.tbl_withdata1 (a) + AS SELECT generate_series(1,3) WITH DATA; +INSERT INTO selinto_schema.tbl_withdata1 VALUES (4); +ERROR: permission denied for table tbl_withdata1 +EXPLAIN (ANALYZE, COSTS OFF, SUMMARY OFF, TIMING OFF) + CREATE TABLE selinto_schema.tbl_withdata2 (a) AS + SELECT generate_series(1,3) WITH DATA; + QUERY PLAN +-------------------------------------- + ProjectSet (actual rows=3 loops=1) + -> Result (actual rows=1 loops=1) +(2 rows) + +-- WITH NO DATA, passes. +CREATE TABLE selinto_schema.tbl_nodata1 (a) AS + SELECT generate_series(1,3) WITH NO DATA; +EXPLAIN (ANALYZE, COSTS OFF, SUMMARY OFF, TIMING OFF) + CREATE TABLE selinto_schema.tbl_nodata2 (a) AS + SELECT generate_series(1,3) WITH NO DATA; + QUERY PLAN +------------------------------- + ProjectSet (never executed) + -> Result (never executed) +(2 rows) + +-- EXECUTE and WITH DATA, passes. +PREPARE data_sel AS SELECT generate_series(1,3); +CREATE TABLE selinto_schema.tbl_withdata3 (a) AS + EXECUTE data_sel WITH DATA; +EXPLAIN (ANALYZE, COSTS OFF, SUMMARY OFF, TIMING OFF) + CREATE TABLE selinto_schema.tbl_withdata4 (a) AS + EXECUTE data_sel WITH DATA; + QUERY PLAN +-------------------------------------- + ProjectSet (actual rows=3 loops=1) + -> Result (actual rows=1 loops=1) +(2 rows) + +-- EXECUTE and WITH NO DATA, passes. +CREATE TABLE selinto_schema.tbl_nodata3 (a) AS + EXECUTE data_sel WITH NO DATA; +EXPLAIN (ANALYZE, COSTS OFF, SUMMARY OFF, TIMING OFF) + CREATE TABLE selinto_schema.tbl_nodata4 (a) AS + EXECUTE data_sel WITH NO DATA; + QUERY PLAN +------------------------------- + ProjectSet (never executed) + -> Result (never executed) +(2 rows) + +RESET SESSION AUTHORIZATION; +ALTER DEFAULT PRIVILEGES FOR ROLE regress_selinto_user + GRANT INSERT ON TABLES TO regress_selinto_user; +SET SESSION AUTHORIZATION regress_selinto_user; +RESET SESSION AUTHORIZATION; +DEALLOCATE data_sel; +DROP SCHEMA selinto_schema CASCADE; +NOTICE: drop cascades to 8 other objects +DETAIL: drop cascades to table selinto_schema.tbl_withdata1 +drop cascades to table selinto_schema.tbl_withdata2 +drop cascades to table selinto_schema.tbl_nodata1 +drop cascades to table selinto_schema.tbl_nodata2 +drop cascades to table selinto_schema.tbl_withdata3 +drop cascades to table selinto_schema.tbl_withdata4 +drop cascades to table selinto_schema.tbl_nodata3 +drop cascades to table selinto_schema.tbl_nodata4 +DROP USER regress_selinto_user; +-- Tests for WITH NO DATA and column name consistency +CREATE TABLE ctas_base (i int, j int); +INSERT INTO ctas_base VALUES (1, 2); +CREATE TABLE ctas_nodata (ii, jj, kk) AS SELECT i, j FROM ctas_base; -- Error +ERROR: too many column names were specified +CREATE TABLE ctas_nodata (ii, jj, kk) AS SELECT i, j FROM ctas_base WITH NO DATA; -- Error +ERROR: too many column names were specified +CREATE TABLE ctas_nodata (ii, jj) AS SELECT i, j FROM ctas_base; -- OK +CREATE TABLE ctas_nodata_2 (ii, jj) AS SELECT i, j FROM ctas_base WITH NO DATA; -- OK +CREATE TABLE ctas_nodata_3 (ii) AS SELECT i, j FROM ctas_base; -- OK +CREATE TABLE ctas_nodata_4 (ii) AS SELECT i, j FROM ctas_base WITH NO DATA; -- OK +SELECT * FROM ctas_nodata; + ii | jj +----+---- + 1 | 2 +(1 row) + +SELECT * FROM ctas_nodata_2; + ii | jj +----+---- +(0 rows) + +SELECT * FROM ctas_nodata_3; + ii | j +----+--- + 1 | 2 +(1 row) + +SELECT * FROM ctas_nodata_4; + ii | j +----+--- +(0 rows) + +DROP TABLE ctas_base; +DROP TABLE ctas_nodata; +DROP TABLE ctas_nodata_2; +DROP TABLE ctas_nodata_3; +DROP TABLE ctas_nodata_4; +-- +-- CREATE TABLE AS/SELECT INTO as last command in a SQL function +-- have been known to cause problems +-- +CREATE FUNCTION make_table() RETURNS VOID +AS $$ + CREATE TABLE created_table AS SELECT * FROM int8_tbl; +$$ LANGUAGE SQL; +SELECT make_table(); + make_table +------------ + +(1 row) + +SELECT * FROM created_table; + q1 | q2 +------------------+------------------- + 123 | 456 + 123 | 4567890123456789 + 4567890123456789 | 123 + 4567890123456789 | 4567890123456789 + 4567890123456789 | -4567890123456789 +(5 rows) + +-- Try EXPLAIN ANALYZE SELECT INTO and EXPLAIN ANALYZE CREATE TABLE AS +-- WITH NO DATA, but hide the outputs since they won't be stable. +DO $$ +BEGIN + EXECUTE 'EXPLAIN ANALYZE SELECT * INTO TABLE easi FROM int8_tbl'; + EXECUTE 'EXPLAIN ANALYZE CREATE TABLE easi2 AS SELECT * FROM int8_tbl WITH NO DATA'; +END$$; +DROP TABLE created_table; +DROP TABLE easi, easi2; +-- +-- Disallowed uses of SELECT ... INTO. All should fail +-- +DECLARE foo CURSOR FOR SELECT 1 INTO int4_tbl; +ERROR: SELECT ... INTO is not allowed here +LINE 1: DECLARE foo CURSOR FOR SELECT 1 INTO int4_tbl; + ^ +COPY (SELECT 1 INTO frak UNION SELECT 2) TO 'blob'; +ERROR: COPY (SELECT INTO) is not supported +SELECT * FROM (SELECT 1 INTO f) bar; +ERROR: SELECT ... INTO is not allowed here +LINE 1: SELECT * FROM (SELECT 1 INTO f) bar; + ^ +CREATE VIEW foo AS SELECT 1 INTO int4_tbl; +ERROR: views must not contain SELECT INTO +INSERT INTO int4_tbl SELECT 1 INTO f; +ERROR: SELECT ... INTO is not allowed here +LINE 1: INSERT INTO int4_tbl SELECT 1 INTO f; + ^ +-- Test CREATE TABLE AS ... IF NOT EXISTS +CREATE TABLE ctas_ine_tbl AS SELECT 1; +CREATE TABLE ctas_ine_tbl AS SELECT 1 / 0; -- error +ERROR: relation "ctas_ine_tbl" already exists +CREATE TABLE IF NOT EXISTS ctas_ine_tbl AS SELECT 1 / 0; -- ok +NOTICE: relation "ctas_ine_tbl" already exists, skipping +CREATE TABLE ctas_ine_tbl AS SELECT 1 / 0 WITH NO DATA; -- error +ERROR: relation "ctas_ine_tbl" already exists +CREATE TABLE IF NOT EXISTS ctas_ine_tbl AS SELECT 1 / 0 WITH NO DATA; -- ok +NOTICE: relation "ctas_ine_tbl" already exists, skipping +EXPLAIN (ANALYZE, COSTS OFF, SUMMARY OFF, TIMING OFF) + CREATE TABLE ctas_ine_tbl AS SELECT 1 / 0; -- error +ERROR: relation "ctas_ine_tbl" already exists +EXPLAIN (ANALYZE, COSTS OFF, SUMMARY OFF, TIMING OFF) + CREATE TABLE IF NOT EXISTS ctas_ine_tbl AS SELECT 1 / 0; -- ok +NOTICE: relation "ctas_ine_tbl" already exists, skipping + QUERY PLAN +------------ +(0 rows) + +EXPLAIN (ANALYZE, COSTS OFF, SUMMARY OFF, TIMING OFF) + CREATE TABLE ctas_ine_tbl AS SELECT 1 / 0 WITH NO DATA; -- error +ERROR: relation "ctas_ine_tbl" already exists +EXPLAIN (ANALYZE, COSTS OFF, SUMMARY OFF, TIMING OFF) + CREATE TABLE IF NOT EXISTS ctas_ine_tbl AS SELECT 1 / 0 WITH NO DATA; -- ok +NOTICE: relation "ctas_ine_tbl" already exists, skipping + QUERY PLAN +------------ +(0 rows) + +PREPARE ctas_ine_query AS SELECT 1 / 0; +EXPLAIN (ANALYZE, COSTS OFF, SUMMARY OFF, TIMING OFF) + CREATE TABLE ctas_ine_tbl AS EXECUTE ctas_ine_query; -- error +ERROR: relation "ctas_ine_tbl" already exists +EXPLAIN (ANALYZE, COSTS OFF, SUMMARY OFF, TIMING OFF) + CREATE TABLE IF NOT EXISTS ctas_ine_tbl AS EXECUTE ctas_ine_query; -- ok +NOTICE: relation "ctas_ine_tbl" already exists, skipping + QUERY PLAN +------------ +(0 rows) + +DROP TABLE ctas_ine_tbl; diff --git a/src/test/regress/expected/select_parallel.out b/src/test/regress/expected/select_parallel.out new file mode 100644 index 0000000..d88353d --- /dev/null +++ b/src/test/regress/expected/select_parallel.out @@ -0,0 +1,1221 @@ +-- +-- PARALLEL +-- +create function sp_parallel_restricted(int) returns int as + $$begin return $1; end$$ language plpgsql parallel restricted; +begin; +-- encourage use of parallel plans +set parallel_setup_cost=0; +set parallel_tuple_cost=0; +set min_parallel_table_scan_size=0; +set max_parallel_workers_per_gather=4; +-- Parallel Append with partial-subplans +explain (costs off) + select round(avg(aa)), sum(aa) from a_star; + QUERY PLAN +-------------------------------------------------------------- + Finalize Aggregate + -> Gather + Workers Planned: 3 + -> Partial Aggregate + -> Parallel Append + -> Parallel Seq Scan on d_star a_star_4 + -> Parallel Seq Scan on f_star a_star_6 + -> Parallel Seq Scan on e_star a_star_5 + -> Parallel Seq Scan on b_star a_star_2 + -> Parallel Seq Scan on c_star a_star_3 + -> Parallel Seq Scan on a_star a_star_1 +(11 rows) + +select round(avg(aa)), sum(aa) from a_star a1; + round | sum +-------+----- + 14 | 355 +(1 row) + +-- Parallel Append with both partial and non-partial subplans +alter table c_star set (parallel_workers = 0); +alter table d_star set (parallel_workers = 0); +explain (costs off) + select round(avg(aa)), sum(aa) from a_star; + QUERY PLAN +-------------------------------------------------------------- + Finalize Aggregate + -> Gather + Workers Planned: 3 + -> Partial Aggregate + -> Parallel Append + -> Seq Scan on d_star a_star_4 + -> Seq Scan on c_star a_star_3 + -> Parallel Seq Scan on f_star a_star_6 + -> Parallel Seq Scan on e_star a_star_5 + -> Parallel Seq Scan on b_star a_star_2 + -> Parallel Seq Scan on a_star a_star_1 +(11 rows) + +select round(avg(aa)), sum(aa) from a_star a2; + round | sum +-------+----- + 14 | 355 +(1 row) + +-- Parallel Append with only non-partial subplans +alter table a_star set (parallel_workers = 0); +alter table b_star set (parallel_workers = 0); +alter table e_star set (parallel_workers = 0); +alter table f_star set (parallel_workers = 0); +explain (costs off) + select round(avg(aa)), sum(aa) from a_star; + QUERY PLAN +----------------------------------------------------- + Finalize Aggregate + -> Gather + Workers Planned: 3 + -> Partial Aggregate + -> Parallel Append + -> Seq Scan on d_star a_star_4 + -> Seq Scan on f_star a_star_6 + -> Seq Scan on e_star a_star_5 + -> Seq Scan on b_star a_star_2 + -> Seq Scan on c_star a_star_3 + -> Seq Scan on a_star a_star_1 +(11 rows) + +select round(avg(aa)), sum(aa) from a_star a3; + round | sum +-------+----- + 14 | 355 +(1 row) + +-- Disable Parallel Append +alter table a_star reset (parallel_workers); +alter table b_star reset (parallel_workers); +alter table c_star reset (parallel_workers); +alter table d_star reset (parallel_workers); +alter table e_star reset (parallel_workers); +alter table f_star reset (parallel_workers); +set enable_parallel_append to off; +explain (costs off) + select round(avg(aa)), sum(aa) from a_star; + QUERY PLAN +-------------------------------------------------------------- + Finalize Aggregate + -> Gather + Workers Planned: 1 + -> Partial Aggregate + -> Append + -> Parallel Seq Scan on a_star a_star_1 + -> Parallel Seq Scan on b_star a_star_2 + -> Parallel Seq Scan on c_star a_star_3 + -> Parallel Seq Scan on d_star a_star_4 + -> Parallel Seq Scan on e_star a_star_5 + -> Parallel Seq Scan on f_star a_star_6 +(11 rows) + +select round(avg(aa)), sum(aa) from a_star a4; + round | sum +-------+----- + 14 | 355 +(1 row) + +reset enable_parallel_append; +-- Parallel Append that runs serially +create function sp_test_func() returns setof text as +$$ select 'foo'::varchar union all select 'bar'::varchar $$ +language sql stable; +select sp_test_func() order by 1; + sp_test_func +-------------- + bar + foo +(2 rows) + +-- Parallel Append is not to be used when the subpath depends on the outer param +create table part_pa_test(a int, b int) partition by range(a); +create table part_pa_test_p1 partition of part_pa_test for values from (minvalue) to (0); +create table part_pa_test_p2 partition of part_pa_test for values from (0) to (maxvalue); +explain (costs off) + select (select max((select pa1.b from part_pa_test pa1 where pa1.a = pa2.a))) + from part_pa_test pa2; + QUERY PLAN +-------------------------------------------------------------- + Aggregate + -> Gather + Workers Planned: 3 + -> Parallel Append + -> Parallel Seq Scan on part_pa_test_p1 pa2_1 + -> Parallel Seq Scan on part_pa_test_p2 pa2_2 + SubPlan 2 + -> Result + SubPlan 1 + -> Append + -> Seq Scan on part_pa_test_p1 pa1_1 + Filter: (a = pa2.a) + -> Seq Scan on part_pa_test_p2 pa1_2 + Filter: (a = pa2.a) +(14 rows) + +drop table part_pa_test; +-- test with leader participation disabled +set parallel_leader_participation = off; +explain (costs off) + select count(*) from tenk1 where stringu1 = 'GRAAAA'; + QUERY PLAN +--------------------------------------------------------- + Finalize Aggregate + -> Gather + Workers Planned: 4 + -> Partial Aggregate + -> Parallel Seq Scan on tenk1 + Filter: (stringu1 = 'GRAAAA'::name) +(6 rows) + +select count(*) from tenk1 where stringu1 = 'GRAAAA'; + count +------- + 15 +(1 row) + +-- test with leader participation disabled, but no workers available (so +-- the leader will have to run the plan despite the setting) +set max_parallel_workers = 0; +explain (costs off) + select count(*) from tenk1 where stringu1 = 'GRAAAA'; + QUERY PLAN +--------------------------------------------------------- + Finalize Aggregate + -> Gather + Workers Planned: 4 + -> Partial Aggregate + -> Parallel Seq Scan on tenk1 + Filter: (stringu1 = 'GRAAAA'::name) +(6 rows) + +select count(*) from tenk1 where stringu1 = 'GRAAAA'; + count +------- + 15 +(1 row) + +reset max_parallel_workers; +reset parallel_leader_participation; +-- test that parallel_restricted function doesn't run in worker +alter table tenk1 set (parallel_workers = 4); +explain (verbose, costs off) +select sp_parallel_restricted(unique1) from tenk1 + where stringu1 = 'GRAAAA' order by 1; + QUERY PLAN +--------------------------------------------------------- + Sort + Output: (sp_parallel_restricted(unique1)) + Sort Key: (sp_parallel_restricted(tenk1.unique1)) + -> Gather + Output: sp_parallel_restricted(unique1) + Workers Planned: 4 + -> Parallel Seq Scan on public.tenk1 + Output: unique1 + Filter: (tenk1.stringu1 = 'GRAAAA'::name) +(9 rows) + +-- test parallel plan when group by expression is in target list. +explain (costs off) + select length(stringu1) from tenk1 group by length(stringu1); + QUERY PLAN +--------------------------------------------------- + Finalize HashAggregate + Group Key: (length((stringu1)::text)) + -> Gather + Workers Planned: 4 + -> Partial HashAggregate + Group Key: length((stringu1)::text) + -> Parallel Seq Scan on tenk1 +(7 rows) + +select length(stringu1) from tenk1 group by length(stringu1); + length +-------- + 6 +(1 row) + +explain (costs off) + select stringu1, count(*) from tenk1 group by stringu1 order by stringu1; + QUERY PLAN +---------------------------------------------------- + Sort + Sort Key: stringu1 + -> Finalize HashAggregate + Group Key: stringu1 + -> Gather + Workers Planned: 4 + -> Partial HashAggregate + Group Key: stringu1 + -> Parallel Seq Scan on tenk1 +(9 rows) + +-- test that parallel plan for aggregates is not selected when +-- target list contains parallel restricted clause. +explain (costs off) + select sum(sp_parallel_restricted(unique1)) from tenk1 + group by(sp_parallel_restricted(unique1)); + QUERY PLAN +------------------------------------------------------------------- + HashAggregate + Group Key: sp_parallel_restricted(unique1) + -> Gather + Workers Planned: 4 + -> Parallel Index Only Scan using tenk1_unique1 on tenk1 +(5 rows) + +-- test prepared statement +prepare tenk1_count(integer) As select count((unique1)) from tenk1 where hundred > $1; +explain (costs off) execute tenk1_count(1); + QUERY PLAN +---------------------------------------------- + Finalize Aggregate + -> Gather + Workers Planned: 4 + -> Partial Aggregate + -> Parallel Seq Scan on tenk1 + Filter: (hundred > 1) +(6 rows) + +execute tenk1_count(1); + count +------- + 9800 +(1 row) + +deallocate tenk1_count; +-- test parallel plans for queries containing un-correlated subplans. +alter table tenk2 set (parallel_workers = 0); +explain (costs off) + select count(*) from tenk1 where (two, four) not in + (select hundred, thousand from tenk2 where thousand > 100); + QUERY PLAN +------------------------------------------------------ + Finalize Aggregate + -> Gather + Workers Planned: 4 + -> Partial Aggregate + -> Parallel Seq Scan on tenk1 + Filter: (NOT (hashed SubPlan 1)) + SubPlan 1 + -> Seq Scan on tenk2 + Filter: (thousand > 100) +(9 rows) + +select count(*) from tenk1 where (two, four) not in + (select hundred, thousand from tenk2 where thousand > 100); + count +------- + 10000 +(1 row) + +-- this is not parallel-safe due to use of random() within SubLink's testexpr: +explain (costs off) + select * from tenk1 where (unique1 + random())::integer not in + (select ten from tenk2); + QUERY PLAN +------------------------------------ + Seq Scan on tenk1 + Filter: (NOT (hashed SubPlan 1)) + SubPlan 1 + -> Seq Scan on tenk2 +(4 rows) + +alter table tenk2 reset (parallel_workers); +-- test parallel plan for a query containing initplan. +set enable_indexscan = off; +set enable_indexonlyscan = off; +set enable_bitmapscan = off; +alter table tenk2 set (parallel_workers = 2); +explain (costs off) + select count(*) from tenk1 + where tenk1.unique1 = (Select max(tenk2.unique1) from tenk2); + QUERY PLAN +------------------------------------------------------ + Aggregate + InitPlan 1 (returns $2) + -> Finalize Aggregate + -> Gather + Workers Planned: 2 + -> Partial Aggregate + -> Parallel Seq Scan on tenk2 + -> Gather + Workers Planned: 4 + Params Evaluated: $2 + -> Parallel Seq Scan on tenk1 + Filter: (unique1 = $2) +(12 rows) + +select count(*) from tenk1 + where tenk1.unique1 = (Select max(tenk2.unique1) from tenk2); + count +------- + 1 +(1 row) + +reset enable_indexscan; +reset enable_indexonlyscan; +reset enable_bitmapscan; +alter table tenk2 reset (parallel_workers); +-- test parallel index scans. +set enable_seqscan to off; +set enable_bitmapscan to off; +explain (costs off) + select count((unique1)) from tenk1 where hundred > 1; + QUERY PLAN +-------------------------------------------------------------------- + Finalize Aggregate + -> Gather + Workers Planned: 4 + -> Partial Aggregate + -> Parallel Index Scan using tenk1_hundred on tenk1 + Index Cond: (hundred > 1) +(6 rows) + +select count((unique1)) from tenk1 where hundred > 1; + count +------- + 9800 +(1 row) + +-- test parallel index-only scans. +explain (costs off) + select count(*) from tenk1 where thousand > 95; + QUERY PLAN +-------------------------------------------------------------------------------- + Finalize Aggregate + -> Gather + Workers Planned: 4 + -> Partial Aggregate + -> Parallel Index Only Scan using tenk1_thous_tenthous on tenk1 + Index Cond: (thousand > 95) +(6 rows) + +select count(*) from tenk1 where thousand > 95; + count +------- + 9040 +(1 row) + +-- test rescan cases too +set enable_material = false; +explain (costs off) +select * from + (select count(unique1) from tenk1 where hundred > 10) ss + right join (values (1),(2),(3)) v(x) on true; + QUERY PLAN +-------------------------------------------------------------------------- + Nested Loop Left Join + -> Values Scan on "*VALUES*" + -> Finalize Aggregate + -> Gather + Workers Planned: 4 + -> Partial Aggregate + -> Parallel Index Scan using tenk1_hundred on tenk1 + Index Cond: (hundred > 10) +(8 rows) + +select * from + (select count(unique1) from tenk1 where hundred > 10) ss + right join (values (1),(2),(3)) v(x) on true; + count | x +-------+--- + 8900 | 1 + 8900 | 2 + 8900 | 3 +(3 rows) + +explain (costs off) +select * from + (select count(*) from tenk1 where thousand > 99) ss + right join (values (1),(2),(3)) v(x) on true; + QUERY PLAN +-------------------------------------------------------------------------------------- + Nested Loop Left Join + -> Values Scan on "*VALUES*" + -> Finalize Aggregate + -> Gather + Workers Planned: 4 + -> Partial Aggregate + -> Parallel Index Only Scan using tenk1_thous_tenthous on tenk1 + Index Cond: (thousand > 99) +(8 rows) + +select * from + (select count(*) from tenk1 where thousand > 99) ss + right join (values (1),(2),(3)) v(x) on true; + count | x +-------+--- + 9000 | 1 + 9000 | 2 + 9000 | 3 +(3 rows) + +-- test rescans for a Limit node with a parallel node beneath it. +reset enable_seqscan; +set enable_indexonlyscan to off; +set enable_indexscan to off; +alter table tenk1 set (parallel_workers = 0); +alter table tenk2 set (parallel_workers = 1); +explain (costs off) +select count(*) from tenk1 + left join (select tenk2.unique1 from tenk2 order by 1 limit 1000) ss + on tenk1.unique1 < ss.unique1 + 1 + where tenk1.unique1 < 2; + QUERY PLAN +------------------------------------------------------------ + Aggregate + -> Nested Loop Left Join + Join Filter: (tenk1.unique1 < (tenk2.unique1 + 1)) + -> Seq Scan on tenk1 + Filter: (unique1 < 2) + -> Limit + -> Gather Merge + Workers Planned: 1 + -> Sort + Sort Key: tenk2.unique1 + -> Parallel Seq Scan on tenk2 +(11 rows) + +select count(*) from tenk1 + left join (select tenk2.unique1 from tenk2 order by 1 limit 1000) ss + on tenk1.unique1 < ss.unique1 + 1 + where tenk1.unique1 < 2; + count +------- + 1999 +(1 row) + +--reset the value of workers for each table as it was before this test. +alter table tenk1 set (parallel_workers = 4); +alter table tenk2 reset (parallel_workers); +reset enable_material; +reset enable_bitmapscan; +reset enable_indexonlyscan; +reset enable_indexscan; +-- test parallel bitmap heap scan. +set enable_seqscan to off; +set enable_indexscan to off; +set enable_hashjoin to off; +set enable_mergejoin to off; +set enable_material to off; +-- test prefetching, if the platform allows it +DO $$ +BEGIN + SET effective_io_concurrency = 50; +EXCEPTION WHEN invalid_parameter_value THEN +END $$; +set work_mem='64kB'; --set small work mem to force lossy pages +explain (costs off) + select count(*) from tenk1, tenk2 where tenk1.hundred > 1 and tenk2.thousand=0; + QUERY PLAN +------------------------------------------------------------ + Aggregate + -> Nested Loop + -> Seq Scan on tenk2 + Filter: (thousand = 0) + -> Gather + Workers Planned: 4 + -> Parallel Bitmap Heap Scan on tenk1 + Recheck Cond: (hundred > 1) + -> Bitmap Index Scan on tenk1_hundred + Index Cond: (hundred > 1) +(10 rows) + +select count(*) from tenk1, tenk2 where tenk1.hundred > 1 and tenk2.thousand=0; + count +------- + 98000 +(1 row) + +create table bmscantest (a int, t text); +insert into bmscantest select r, 'fooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooo' FROM generate_series(1,100000) r; +create index i_bmtest ON bmscantest(a); +select count(*) from bmscantest where a>1; + count +------- + 99999 +(1 row) + +-- test accumulation of stats for parallel nodes +reset enable_seqscan; +alter table tenk2 set (parallel_workers = 0); +explain (analyze, timing off, summary off, costs off) + select count(*) from tenk1, tenk2 where tenk1.hundred > 1 + and tenk2.thousand=0; + QUERY PLAN +-------------------------------------------------------------------------- + Aggregate (actual rows=1 loops=1) + -> Nested Loop (actual rows=98000 loops=1) + -> Seq Scan on tenk2 (actual rows=10 loops=1) + Filter: (thousand = 0) + Rows Removed by Filter: 9990 + -> Gather (actual rows=9800 loops=10) + Workers Planned: 4 + Workers Launched: 4 + -> Parallel Seq Scan on tenk1 (actual rows=1960 loops=50) + Filter: (hundred > 1) + Rows Removed by Filter: 40 +(11 rows) + +alter table tenk2 reset (parallel_workers); +reset work_mem; +create function explain_parallel_sort_stats() returns setof text +language plpgsql as +$$ +declare ln text; +begin + for ln in + explain (analyze, timing off, summary off, costs off) + select * from + (select ten from tenk1 where ten < 100 order by ten) ss + right join (values (1),(2),(3)) v(x) on true + loop + ln := regexp_replace(ln, 'Memory: \S*', 'Memory: xxx'); + return next ln; + end loop; +end; +$$; +select * from explain_parallel_sort_stats(); + explain_parallel_sort_stats +-------------------------------------------------------------------------- + Nested Loop Left Join (actual rows=30000 loops=1) + -> Values Scan on "*VALUES*" (actual rows=3 loops=1) + -> Gather Merge (actual rows=10000 loops=3) + Workers Planned: 4 + Workers Launched: 4 + -> Sort (actual rows=2000 loops=15) + Sort Key: tenk1.ten + Sort Method: quicksort Memory: xxx + Worker 0: Sort Method: quicksort Memory: xxx + Worker 1: Sort Method: quicksort Memory: xxx + Worker 2: Sort Method: quicksort Memory: xxx + Worker 3: Sort Method: quicksort Memory: xxx + -> Parallel Seq Scan on tenk1 (actual rows=2000 loops=15) + Filter: (ten < 100) +(14 rows) + +reset enable_indexscan; +reset enable_hashjoin; +reset enable_mergejoin; +reset enable_material; +reset effective_io_concurrency; +drop table bmscantest; +drop function explain_parallel_sort_stats(); +-- test parallel merge join path. +set enable_hashjoin to off; +set enable_nestloop to off; +explain (costs off) + select count(*) from tenk1, tenk2 where tenk1.unique1 = tenk2.unique1; + QUERY PLAN +------------------------------------------------------------------------------- + Finalize Aggregate + -> Gather + Workers Planned: 4 + -> Partial Aggregate + -> Merge Join + Merge Cond: (tenk1.unique1 = tenk2.unique1) + -> Parallel Index Only Scan using tenk1_unique1 on tenk1 + -> Index Only Scan using tenk2_unique1 on tenk2 +(8 rows) + +select count(*) from tenk1, tenk2 where tenk1.unique1 = tenk2.unique1; + count +------- + 10000 +(1 row) + +reset enable_hashjoin; +reset enable_nestloop; +-- test gather merge +set enable_hashagg = false; +explain (costs off) + select count(*) from tenk1 group by twenty; + QUERY PLAN +---------------------------------------------------- + Finalize GroupAggregate + Group Key: twenty + -> Gather Merge + Workers Planned: 4 + -> Partial GroupAggregate + Group Key: twenty + -> Sort + Sort Key: twenty + -> Parallel Seq Scan on tenk1 +(9 rows) + +select count(*) from tenk1 group by twenty; + count +------- + 500 + 500 + 500 + 500 + 500 + 500 + 500 + 500 + 500 + 500 + 500 + 500 + 500 + 500 + 500 + 500 + 500 + 500 + 500 + 500 +(20 rows) + +--test expressions in targetlist are pushed down for gather merge +create function sp_simple_func(var1 integer) returns integer +as $$ +begin + return var1 + 10; +end; +$$ language plpgsql PARALLEL SAFE; +explain (costs off, verbose) + select ten, sp_simple_func(ten) from tenk1 where ten < 100 order by ten; + QUERY PLAN +----------------------------------------------------- + Gather Merge + Output: ten, (sp_simple_func(ten)) + Workers Planned: 4 + -> Result + Output: ten, sp_simple_func(ten) + -> Sort + Output: ten + Sort Key: tenk1.ten + -> Parallel Seq Scan on public.tenk1 + Output: ten + Filter: (tenk1.ten < 100) +(11 rows) + +drop function sp_simple_func(integer); +-- test handling of SRFs in targetlist (bug in 10.0) +explain (costs off) + select count(*), generate_series(1,2) from tenk1 group by twenty; + QUERY PLAN +---------------------------------------------------------- + ProjectSet + -> Finalize GroupAggregate + Group Key: twenty + -> Gather Merge + Workers Planned: 4 + -> Partial GroupAggregate + Group Key: twenty + -> Sort + Sort Key: twenty + -> Parallel Seq Scan on tenk1 +(10 rows) + +select count(*), generate_series(1,2) from tenk1 group by twenty; + count | generate_series +-------+----------------- + 500 | 1 + 500 | 2 + 500 | 1 + 500 | 2 + 500 | 1 + 500 | 2 + 500 | 1 + 500 | 2 + 500 | 1 + 500 | 2 + 500 | 1 + 500 | 2 + 500 | 1 + 500 | 2 + 500 | 1 + 500 | 2 + 500 | 1 + 500 | 2 + 500 | 1 + 500 | 2 + 500 | 1 + 500 | 2 + 500 | 1 + 500 | 2 + 500 | 1 + 500 | 2 + 500 | 1 + 500 | 2 + 500 | 1 + 500 | 2 + 500 | 1 + 500 | 2 + 500 | 1 + 500 | 2 + 500 | 1 + 500 | 2 + 500 | 1 + 500 | 2 + 500 | 1 + 500 | 2 +(40 rows) + +-- test gather merge with parallel leader participation disabled +set parallel_leader_participation = off; +explain (costs off) + select count(*) from tenk1 group by twenty; + QUERY PLAN +---------------------------------------------------- + Finalize GroupAggregate + Group Key: twenty + -> Gather Merge + Workers Planned: 4 + -> Partial GroupAggregate + Group Key: twenty + -> Sort + Sort Key: twenty + -> Parallel Seq Scan on tenk1 +(9 rows) + +select count(*) from tenk1 group by twenty; + count +------- + 500 + 500 + 500 + 500 + 500 + 500 + 500 + 500 + 500 + 500 + 500 + 500 + 500 + 500 + 500 + 500 + 500 + 500 + 500 + 500 +(20 rows) + +reset parallel_leader_participation; +--test rescan behavior of gather merge +set enable_material = false; +explain (costs off) +select * from + (select string4, count(unique2) + from tenk1 group by string4 order by string4) ss + right join (values (1),(2),(3)) v(x) on true; + QUERY PLAN +---------------------------------------------------------- + Nested Loop Left Join + -> Values Scan on "*VALUES*" + -> Finalize GroupAggregate + Group Key: tenk1.string4 + -> Gather Merge + Workers Planned: 4 + -> Partial GroupAggregate + Group Key: tenk1.string4 + -> Sort + Sort Key: tenk1.string4 + -> Parallel Seq Scan on tenk1 +(11 rows) + +select * from + (select string4, count(unique2) + from tenk1 group by string4 order by string4) ss + right join (values (1),(2),(3)) v(x) on true; + string4 | count | x +---------+-------+--- + AAAAxx | 2500 | 1 + HHHHxx | 2500 | 1 + OOOOxx | 2500 | 1 + VVVVxx | 2500 | 1 + AAAAxx | 2500 | 2 + HHHHxx | 2500 | 2 + OOOOxx | 2500 | 2 + VVVVxx | 2500 | 2 + AAAAxx | 2500 | 3 + HHHHxx | 2500 | 3 + OOOOxx | 2500 | 3 + VVVVxx | 2500 | 3 +(12 rows) + +reset enable_material; +reset enable_hashagg; +-- check parallelized int8 aggregate (bug #14897) +explain (costs off) +select avg(unique1::int8) from tenk1; + QUERY PLAN +------------------------------------------------------------------------- + Finalize Aggregate + -> Gather + Workers Planned: 4 + -> Partial Aggregate + -> Parallel Index Only Scan using tenk1_unique1 on tenk1 +(5 rows) + +select avg(unique1::int8) from tenk1; + avg +----------------------- + 4999.5000000000000000 +(1 row) + +-- gather merge test with a LIMIT +explain (costs off) + select fivethous from tenk1 order by fivethous limit 4; + QUERY PLAN +---------------------------------------------- + Limit + -> Gather Merge + Workers Planned: 4 + -> Sort + Sort Key: fivethous + -> Parallel Seq Scan on tenk1 +(6 rows) + +select fivethous from tenk1 order by fivethous limit 4; + fivethous +----------- + 0 + 0 + 1 + 1 +(4 rows) + +-- gather merge test with 0 worker +set max_parallel_workers = 0; +explain (costs off) + select string4 from tenk1 order by string4 limit 5; + QUERY PLAN +---------------------------------------------- + Limit + -> Gather Merge + Workers Planned: 4 + -> Sort + Sort Key: string4 + -> Parallel Seq Scan on tenk1 +(6 rows) + +select string4 from tenk1 order by string4 limit 5; + string4 +--------- + AAAAxx + AAAAxx + AAAAxx + AAAAxx + AAAAxx +(5 rows) + +-- gather merge test with 0 workers, with parallel leader +-- participation disabled (the leader will have to run the plan +-- despite the setting) +set parallel_leader_participation = off; +explain (costs off) + select string4 from tenk1 order by string4 limit 5; + QUERY PLAN +---------------------------------------------- + Limit + -> Gather Merge + Workers Planned: 4 + -> Sort + Sort Key: string4 + -> Parallel Seq Scan on tenk1 +(6 rows) + +select string4 from tenk1 order by string4 limit 5; + string4 +--------- + AAAAxx + AAAAxx + AAAAxx + AAAAxx + AAAAxx +(5 rows) + +reset parallel_leader_participation; +reset max_parallel_workers; +SAVEPOINT settings; +SET LOCAL debug_parallel_query = 1; +explain (costs off) + select stringu1::int2 from tenk1 where unique1 = 1; + QUERY PLAN +----------------------------------------------- + Gather + Workers Planned: 1 + Single Copy: true + -> Index Scan using tenk1_unique1 on tenk1 + Index Cond: (unique1 = 1) +(5 rows) + +ROLLBACK TO SAVEPOINT settings; +-- exercise record typmod remapping between backends +CREATE FUNCTION make_record(n int) + RETURNS RECORD LANGUAGE plpgsql PARALLEL SAFE AS +$$ +BEGIN + RETURN CASE n + WHEN 1 THEN ROW(1) + WHEN 2 THEN ROW(1, 2) + WHEN 3 THEN ROW(1, 2, 3) + WHEN 4 THEN ROW(1, 2, 3, 4) + ELSE ROW(1, 2, 3, 4, 5) + END; +END; +$$; +SAVEPOINT settings; +SET LOCAL debug_parallel_query = 1; +SELECT make_record(x) FROM (SELECT generate_series(1, 5) x) ss ORDER BY x; + make_record +------------- + (1) + (1,2) + (1,2,3) + (1,2,3,4) + (1,2,3,4,5) +(5 rows) + +ROLLBACK TO SAVEPOINT settings; +DROP function make_record(n int); +-- test the sanity of parallel query after the active role is dropped. +drop role if exists regress_parallel_worker; +NOTICE: role "regress_parallel_worker" does not exist, skipping +create role regress_parallel_worker; +set role regress_parallel_worker; +reset session authorization; +drop role regress_parallel_worker; +set debug_parallel_query = 1; +select count(*) from tenk1; + count +------- + 10000 +(1 row) + +reset debug_parallel_query; +reset role; +-- Window function calculation can't be pushed to workers. +explain (costs off, verbose) + select count(*) from tenk1 a where (unique1, two) in + (select unique1, row_number() over() from tenk1 b); + QUERY PLAN +---------------------------------------------------------------------------------------------- + Aggregate + Output: count(*) + -> Hash Semi Join + Hash Cond: ((a.unique1 = b.unique1) AND (a.two = (row_number() OVER (?)))) + -> Gather + Output: a.unique1, a.two + Workers Planned: 4 + -> Parallel Seq Scan on public.tenk1 a + Output: a.unique1, a.two + -> Hash + Output: b.unique1, (row_number() OVER (?)) + -> WindowAgg + Output: b.unique1, row_number() OVER (?) + -> Gather + Output: b.unique1 + Workers Planned: 4 + -> Parallel Index Only Scan using tenk1_unique1 on public.tenk1 b + Output: b.unique1 +(18 rows) + +-- LIMIT/OFFSET within sub-selects can't be pushed to workers. +explain (costs off) + select * from tenk1 a where two in + (select two from tenk1 b where stringu1 like '%AAAA' limit 3); + QUERY PLAN +--------------------------------------------------------------- + Hash Semi Join + Hash Cond: (a.two = b.two) + -> Gather + Workers Planned: 4 + -> Parallel Seq Scan on tenk1 a + -> Hash + -> Limit + -> Gather + Workers Planned: 4 + -> Parallel Seq Scan on tenk1 b + Filter: (stringu1 ~~ '%AAAA'::text) +(11 rows) + +-- to increase the parallel query test coverage +SAVEPOINT settings; +SET LOCAL debug_parallel_query = 1; +EXPLAIN (analyze, timing off, summary off, costs off) SELECT * FROM tenk1; + QUERY PLAN +------------------------------------------------------------- + Gather (actual rows=10000 loops=1) + Workers Planned: 4 + Workers Launched: 4 + -> Parallel Seq Scan on tenk1 (actual rows=2000 loops=5) +(4 rows) + +ROLLBACK TO SAVEPOINT settings; +-- provoke error in worker +-- (make the error message long enough to require multiple bufferloads) +SAVEPOINT settings; +SET LOCAL debug_parallel_query = 1; +select (stringu1 || repeat('abcd', 5000))::int2 from tenk1 where unique1 = 1; +ERROR: invalid input syntax for type smallint: "BAAAAAabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcd" +CONTEXT: parallel worker +ROLLBACK TO SAVEPOINT settings; +-- test interaction with set-returning functions +SAVEPOINT settings; +-- multiple subqueries under a single Gather node +-- must set parallel_setup_cost > 0 to discourage multiple Gather nodes +SET LOCAL parallel_setup_cost = 10; +EXPLAIN (COSTS OFF) +SELECT unique1 FROM tenk1 WHERE fivethous = tenthous + 1 +UNION ALL +SELECT unique1 FROM tenk1 WHERE fivethous = tenthous + 1; + QUERY PLAN +---------------------------------------------------- + Gather + Workers Planned: 4 + -> Parallel Append + -> Parallel Seq Scan on tenk1 + Filter: (fivethous = (tenthous + 1)) + -> Parallel Seq Scan on tenk1 tenk1_1 + Filter: (fivethous = (tenthous + 1)) +(7 rows) + +ROLLBACK TO SAVEPOINT settings; +-- can't use multiple subqueries under a single Gather node due to initPlans +EXPLAIN (COSTS OFF) +SELECT unique1 FROM tenk1 WHERE fivethous = + (SELECT unique1 FROM tenk1 WHERE fivethous = 1 LIMIT 1) +UNION ALL +SELECT unique1 FROM tenk1 WHERE fivethous = + (SELECT unique2 FROM tenk1 WHERE fivethous = 1 LIMIT 1) +ORDER BY 1; + QUERY PLAN +-------------------------------------------------------------------- + Sort + Sort Key: tenk1.unique1 + -> Append + -> Gather + Workers Planned: 4 + Params Evaluated: $1 + InitPlan 1 (returns $1) + -> Limit + -> Gather + Workers Planned: 4 + -> Parallel Seq Scan on tenk1 tenk1_2 + Filter: (fivethous = 1) + -> Parallel Seq Scan on tenk1 + Filter: (fivethous = $1) + -> Gather + Workers Planned: 4 + Params Evaluated: $3 + InitPlan 2 (returns $3) + -> Limit + -> Gather + Workers Planned: 4 + -> Parallel Seq Scan on tenk1 tenk1_3 + Filter: (fivethous = 1) + -> Parallel Seq Scan on tenk1 tenk1_1 + Filter: (fivethous = $3) +(25 rows) + +-- test interaction with SRFs +SELECT * FROM information_schema.foreign_data_wrapper_options +ORDER BY 1, 2, 3; + foreign_data_wrapper_catalog | foreign_data_wrapper_name | option_name | option_value +------------------------------+---------------------------+-------------+-------------- +(0 rows) + +EXPLAIN (VERBOSE, COSTS OFF) +SELECT generate_series(1, two), array(select generate_series(1, two)) + FROM tenk1 ORDER BY tenthous; + QUERY PLAN +---------------------------------------------------------------------- + ProjectSet + Output: generate_series(1, tenk1.two), (SubPlan 1), tenk1.tenthous + -> Gather Merge + Output: tenk1.two, tenk1.tenthous + Workers Planned: 4 + -> Result + Output: tenk1.two, tenk1.tenthous + -> Sort + Output: tenk1.tenthous, tenk1.two + Sort Key: tenk1.tenthous + -> Parallel Seq Scan on public.tenk1 + Output: tenk1.tenthous, tenk1.two + SubPlan 1 + -> ProjectSet + Output: generate_series(1, tenk1.two) + -> Result +(16 rows) + +-- must disallow pushing sort below gather when pathkey contains an SRF +EXPLAIN (VERBOSE, COSTS OFF) +SELECT unnest(ARRAY[]::integer[]) + 1 AS pathkey + FROM tenk1 t1 JOIN tenk1 t2 ON TRUE + ORDER BY pathkey; + QUERY PLAN +----------------------------------------------------------------------------------------------------- + Sort + Output: (((unnest('{}'::integer[])) + 1)) + Sort Key: (((unnest('{}'::integer[])) + 1)) + -> Result + Output: ((unnest('{}'::integer[])) + 1) + -> ProjectSet + Output: unnest('{}'::integer[]) + -> Nested Loop + -> Gather + Workers Planned: 4 + -> Parallel Index Only Scan using tenk1_hundred on public.tenk1 t1 + -> Materialize + -> Gather + Workers Planned: 4 + -> Parallel Index Only Scan using tenk1_hundred on public.tenk1 t2 +(15 rows) + +-- test passing expanded-value representations to workers +CREATE FUNCTION make_some_array(int,int) returns int[] as +$$declare x int[]; + begin + x[1] := $1; + x[2] := $2; + return x; + end$$ language plpgsql parallel safe; +CREATE TABLE fooarr(f1 text, f2 int[], f3 text); +INSERT INTO fooarr VALUES('1', ARRAY[1,2], 'one'); +PREPARE pstmt(text, int[]) AS SELECT * FROM fooarr WHERE f1 = $1 AND f2 = $2; +EXPLAIN (COSTS OFF) EXECUTE pstmt('1', make_some_array(1,2)); + QUERY PLAN +------------------------------------------------------------------ + Gather + Workers Planned: 3 + -> Parallel Seq Scan on fooarr + Filter: ((f1 = '1'::text) AND (f2 = '{1,2}'::integer[])) +(4 rows) + +EXECUTE pstmt('1', make_some_array(1,2)); + f1 | f2 | f3 +----+-------+----- + 1 | {1,2} | one +(1 row) + +DEALLOCATE pstmt; +-- test interaction between subquery and partial_paths +CREATE VIEW tenk1_vw_sec WITH (security_barrier) AS SELECT * FROM tenk1; +EXPLAIN (COSTS OFF) +SELECT 1 FROM tenk1_vw_sec + WHERE (SELECT sum(f1) FROM int4_tbl WHERE f1 < unique1) < 100; + QUERY PLAN +------------------------------------------------------------------- + Subquery Scan on tenk1_vw_sec + Filter: ((SubPlan 1) < 100) + -> Gather + Workers Planned: 4 + -> Parallel Index Only Scan using tenk1_unique1 on tenk1 + SubPlan 1 + -> Aggregate + -> Seq Scan on int4_tbl + Filter: (f1 < tenk1_vw_sec.unique1) +(9 rows) + +rollback; diff --git a/src/test/regress/expected/select_views.out b/src/test/regress/expected/select_views.out new file mode 100644 index 0000000..1aeed84 --- /dev/null +++ b/src/test/regress/expected/select_views.out @@ -0,0 +1,1552 @@ +-- +-- SELECT_VIEWS +-- test the views defined in CREATE_VIEWS +-- +SELECT * FROM street; + name | thepath | cname +------------------------------------+----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+----------- + Access Rd 25 | [(-121.9283,37.894),(-121.9283,37.9)] | Oakland + Ada St | [(-122.2487,37.398),(-122.2496,37.401)] | Lafayette + Agua Fria Creek | [(-121.9254,37.922),(-121.9281,37.889)] | Oakland + Allen Ct | [(-122.0131,37.602),(-122.0117,37.597)] | Berkeley + Alvarado Niles Road | [(-122.0325,37.903),(-122.0316,37.9)] | Berkeley + Andrea Cir | [(-121.733218,37.88641),(-121.733286,37.90617)] | Oakland + Apricot Lane | [(-121.9471,37.401),(-121.9456,37.392)] | Oakland + Apricot Lane | [(-121.9471,37.401),(-121.9456,37.392)] | Oakland + Arden Road | [(-122.0978,37.177),(-122.1,37.177)] | Oakland + Arizona St | [(-122.0381,37.901),(-122.0367,37.898)] | Berkeley + Arlington Dr | [(-121.8802,37.408),(-121.8807,37.394)] | Oakland + Arlington Dr | [(-121.8802,37.408),(-121.8807,37.394)] | Oakland + Arlington Road | [(-121.7957,37.898),(-121.7956,37.906)] | Oakland + Arroyo Las Positas | [(-121.7973,37.997),(-121.7957,37.005)] | Oakland + Arroyo Las Positas | [(-121.7973,37.997),(-121.7957,37.005)] | Oakland + Arroyo Seco | [(-121.7073,37.766),(-121.6997,37.729)] | Oakland + Ash St | [(-122.0408,37.31),(-122.04,37.292)] | Oakland + Avenue 134th | [(-122.1823,37.002),(-122.1851,37.992)] | Oakland + Avenue 134th | [(-122.1823,37.002),(-122.1851,37.992)] | Berkeley + Avenue 140th | [(-122.1656,37.003),(-122.1691,37.988)] | Oakland + Avenue 140th | [(-122.1656,37.003),(-122.1691,37.988)] | Berkeley + Avenue D | [(-122.298,37.848),(-122.3024,37.849)] | Berkeley + B St | [(-122.1749,37.451),(-122.1743,37.443)] | Oakland + Bancroft Ave | [(-122.15714,37.4242),(-122.156,37.409)] | Oakland + Bancroft Ave | [(-122.1643,37.523),(-122.1631,37.508),(-122.1621,37.493)] | Oakland + Birch St | [(-122.1617,37.425),(-122.1614,37.417)] | Oakland + Birch St | [(-122.1673,37.509),(-122.1661,37.492)] | Oakland + Blacow Road | [(-122.0179,37.469),(-122.0167,37.465)] | Oakland + Bridgepointe Dr | [(-122.0514,37.305),(-122.0509,37.299)] | Oakland + Broadmore Ave | [(-122.095,37.522),(-122.0936,37.497)] | Oakland + Broadway | [(-122.2409,37.586),(-122.2395,37.601)] | Berkeley + Buckingham Blvd | [(-122.2231,37.59),(-122.2214,37.606)] | Berkeley + Butterfield Dr | [(-122.0838,37.002),(-122.0834,37.987)] | Oakland + Butterfield Dr | [(-122.0838,37.002),(-122.0834,37.987)] | Oakland + Butterfield Dr | [(-122.0838,37.002),(-122.0834,37.987)] | Berkeley + C St | [(-122.1768,37.46),(-122.1749,37.435)] | Oakland + Calaveras Creek | [(-121.8203,37.035),(-121.8207,37.931)] | Oakland + Calaveras Creek | [(-121.8203,37.035),(-121.8207,37.931)] | Oakland + California St | [(-122.2032,37.005),(-122.2016,37.996)] | Berkeley + California St | [(-122.2032,37.005),(-122.2016,37.996)] | Lafayette + Cameron Ave | [(-122.1316,37.502),(-122.1327,37.481)] | Oakland + Campus Dr | [(-122.1704,37.905),(-122.1678,37.868),(-122.1671,37.865)] | Berkeley + Capricorn Ave | [(-122.2176,37.404),(-122.2164,37.384)] | Lafayette + Carson St | [(-122.1846,37.9),(-122.1843,37.901)] | Berkeley + Cedar Blvd | [(-122.0282,37.446),(-122.0265,37.43)] | Oakland + Cedar St | [(-122.3011,37.737),(-122.2999,37.739)] | Berkeley + Celia St | [(-122.0611,37.3),(-122.0616,37.299)] | Oakland + Central Ave | [(-122.2343,37.602),(-122.2331,37.595)] | Berkeley + Chambers Dr | [(-122.2004,37.352),(-122.1972,37.368)] | Lafayette + Chambers Lane | [(-122.2001,37.359),(-122.1975,37.371)] | Lafayette + Champion St | [(-122.214,37.991),(-122.2147,37.002)] | Berkeley + Champion St | [(-122.214,37.991),(-122.2147,37.002)] | Lafayette + Chapman Dr | [(-122.0421,37.504),(-122.0414,37.498)] | Oakland + Charles St | [(-122.0255,37.505),(-122.0252,37.499)] | Oakland + Cherry St | [(-122.0437,37.42),(-122.0434,37.413)] | Oakland + Claremont Pl | [(-122.0542,37.995),(-122.0542,37.008)] | Oakland + Claremont Pl | [(-122.0542,37.995),(-122.0542,37.008)] | Oakland + Claremont Pl | [(-122.0542,37.995),(-122.0542,37.008)] | Berkeley + Coliseum Way | [(-122.2001,37.47),(-122.1978,37.516)] | Oakland + Coliseum Way | [(-122.2113,37.626),(-122.2085,37.592),(-122.2063,37.568)] | Berkeley + Coolidge Ave | [(-122.2007,37.058),(-122.1992,37.06)] | Lafayette + Cornell Ave | [(-122.2956,37.925),(-122.2949,37.906),(-122.2939,37.875)] | Berkeley + Corriea Way | [(-121.9501,37.402),(-121.9505,37.398)] | Oakland + Corriea Way | [(-121.9501,37.402),(-121.9505,37.398)] | Oakland + Cowing Road | [(-122.0002,37.934),(-121.9772,37.782)] | Oakland + Creston Road | [(-122.2639,37.002),(-122.2613,37.986),(-122.2602,37.978),(-122.2598,37.973)] | Berkeley + Creston Road | [(-122.2639,37.002),(-122.2613,37.986),(-122.2602,37.978),(-122.2598,37.973)] | Lafayette + Crow Canyon Creek | [(-122.043,37.905),(-122.0368,37.71)] | Berkeley + Crystaline Dr | [(-121.925856,37),(-121.925869,37.00527)] | Oakland + Cull Canyon Road | [(-122.0536,37.435),(-122.0499,37.315)] | Oakland + Cull Creek | [(-122.0624,37.875),(-122.0582,37.527)] | Berkeley + D St | [(-122.1811,37.505),(-122.1805,37.497)] | Oakland + Decoto Road | [(-122.0159,37.006),(-122.016,37.002),(-122.0164,37.993)] | Oakland + Decoto Road | [(-122.0159,37.006),(-122.016,37.002),(-122.0164,37.993)] | Oakland + Decoto Road | [(-122.0159,37.006),(-122.016,37.002),(-122.0164,37.993)] | Berkeley + Deering St | [(-122.2146,37.904),(-122.2126,37.897)] | Berkeley + Dimond Ave | [(-122.2167,37.994),(-122.2162,37.006)] | Berkeley + Dimond Ave | [(-122.2167,37.994),(-122.2162,37.006)] | Lafayette + Donna Way | [(-122.1333,37.606),(-122.1316,37.599)] | Berkeley + Driftwood Dr | [(-122.0109,37.482),(-122.0113,37.477)] | Oakland + Driscoll Road | [(-121.9482,37.403),(-121.948451,37.39995)] | Oakland + Driscoll Road | [(-121.9482,37.403),(-121.948451,37.39995)] | Oakland + E St | [(-122.1832,37.505),(-122.1826,37.498),(-122.182,37.49)] | Oakland + Eden Ave | [(-122.1143,37.505),(-122.1142,37.491)] | Oakland + Eden Creek | [(-122.022037,37.00675),(-122.0221,37.998)] | Oakland + Eden Creek | [(-122.022037,37.00675),(-122.0221,37.998)] | Oakland + Eden Creek | [(-122.022037,37.00675),(-122.0221,37.998)] | Berkeley + Edgewater Dr | [(-122.201,37.379),(-122.2042,37.41)] | Lafayette + Enos Way | [(-121.7677,37.896),(-121.7673,37.91)] | Oakland + Euclid Ave | [(-122.2671,37.009),(-122.2666,37.987)] | Berkeley + Euclid Ave | [(-122.2671,37.009),(-122.2666,37.987)] | Lafayette + Fairview Ave | [(-121.999,37.428),(-121.9863,37.351)] | Oakland + Fairview Ave | [(-121.999,37.428),(-121.9863,37.351)] | Oakland + Foothill Blvd | [(-122.2414,37.9),(-122.2403,37.893)] | Berkeley + Fountain St | [(-122.2306,37.593),(-122.2293,37.605)] | Berkeley + Gading Road | [(-122.0801,37.343),(-122.08,37.336)] | Oakland + Grizzly Peak Blvd | [(-122.2213,37.638),(-122.2127,37.581)] | Berkeley + Grove Way | [(-122.0643,37.884),(-122.062679,37.89162),(-122.061796,37.89578),(-122.0609,37.9)] | Berkeley + Harris Road | [(-122.0659,37.372),(-122.0675,37.363)] | Oakland + Heartwood Dr | [(-122.2006,37.341),(-122.1992,37.338)] | Lafayette + Hegenberger Exwy | [(-122.1946,37.52),(-122.1947,37.497)] | Oakland + Herrier St | [(-122.1943,37.006),(-122.1936,37.998)] | Oakland + Herrier St | [(-122.1943,37.006),(-122.1936,37.998)] | Berkeley + Hesperian Blvd | [(-122.097,37.333),(-122.0956,37.31),(-122.0946,37.293)] | Oakland + Hesperian Blvd | [(-122.097,37.333),(-122.0956,37.31),(-122.0946,37.293)] | Oakland + Hesperian Blvd | [(-122.1132,37.6),(-122.1123,37.586)] | Berkeley + Hollis St | [(-122.2885,37.397),(-122.289,37.414)] | Lafayette + I- 580 | [(-121.727,37.074),(-121.7229,37.093),(-121.722301,37.09522),(-121.721001,37.10005),(-121.7194,37.106),(-121.7188,37.109),(-121.7168,37.12),(-121.7163,37.123),(-121.7145,37.127),(-121.7096,37.148),(-121.707731,37.1568),(-121.7058,37.166),(-121.7055,37.168),(-121.7044,37.174),(-121.7038,37.172),(-121.7037,37.172),(-121.7027,37.175),(-121.7001,37.181),(-121.6957,37.191),(-121.6948,37.192),(-121.6897,37.204),(-121.6697,37.185)] | Oakland + I- 580 | [(-121.9322,37.989),(-121.9243,37.006),(-121.9217,37.014)] | Oakland + I- 580 | [(-121.9322,37.989),(-121.9243,37.006),(-121.9217,37.014)] | Oakland + I- 580 | [(-122.018,37.019),(-122.0009,37.032),(-121.9787,37.983),(-121.958,37.984),(-121.9571,37.986)] | Oakland + I- 580 | [(-122.018,37.019),(-122.0009,37.032),(-121.9787,37.983),(-121.958,37.984),(-121.9571,37.986)] | Oakland + I- 580 | [(-122.1108,37.023),(-122.1101,37.02),(-122.108103,37.00764),(-122.108,37.007),(-122.1069,37.998),(-122.1064,37.994),(-122.1053,37.982),(-122.1048,37.977),(-122.1032,37.958),(-122.1026,37.953),(-122.1013,37.938),(-122.0989,37.911),(-122.0984,37.91),(-122.098,37.908)] | Oakland + I- 580 | [(-122.1108,37.023),(-122.1101,37.02),(-122.108103,37.00764),(-122.108,37.007),(-122.1069,37.998),(-122.1064,37.994),(-122.1053,37.982),(-122.1048,37.977),(-122.1032,37.958),(-122.1026,37.953),(-122.1013,37.938),(-122.0989,37.911),(-122.0984,37.91),(-122.098,37.908)] | Berkeley + I- 580 | [(-122.1543,37.703),(-122.1535,37.694),(-122.1512,37.655),(-122.1475,37.603),(-122.1468,37.583),(-122.1472,37.569),(-122.149044,37.54874),(-122.1493,37.546),(-122.1501,37.532),(-122.1506,37.509),(-122.1495,37.482),(-122.1487,37.467),(-122.1477,37.447),(-122.1414,37.383),(-122.1404,37.376),(-122.1398,37.372),(-122.139,37.356),(-122.1388,37.353),(-122.1385,37.34),(-122.1382,37.33),(-122.1378,37.316)] | Oakland + I- 580 | [(-122.1543,37.703),(-122.1535,37.694),(-122.1512,37.655),(-122.1475,37.603),(-122.1468,37.583),(-122.1472,37.569),(-122.149044,37.54874),(-122.1493,37.546),(-122.1501,37.532),(-122.1506,37.509),(-122.1495,37.482),(-122.1487,37.467),(-122.1477,37.447),(-122.1414,37.383),(-122.1404,37.376),(-122.1398,37.372),(-122.139,37.356),(-122.1388,37.353),(-122.1385,37.34),(-122.1382,37.33),(-122.1378,37.316)] | Berkeley + I- 580 | [(-122.2197,37.99),(-122.22,37.99),(-122.222092,37.99523),(-122.2232,37.998),(-122.224146,37.99963),(-122.2261,37.003),(-122.2278,37.007),(-122.2302,37.026),(-122.2323,37.043),(-122.2344,37.059),(-122.235405,37.06427),(-122.2365,37.07)] | Berkeley + I- 580 | [(-122.2197,37.99),(-122.22,37.99),(-122.222092,37.99523),(-122.2232,37.998),(-122.224146,37.99963),(-122.2261,37.003),(-122.2278,37.007),(-122.2302,37.026),(-122.2323,37.043),(-122.2344,37.059),(-122.235405,37.06427),(-122.2365,37.07)] | Lafayette + I- 580 Ramp | [(-121.8521,37.011),(-121.8479,37.999),(-121.8476,37.999),(-121.8456,37.01),(-121.8455,37.011)] | Oakland + I- 580 Ramp | [(-121.8521,37.011),(-121.8479,37.999),(-121.8476,37.999),(-121.8456,37.01),(-121.8455,37.011)] | Oakland + I- 580 Ramp | [(-121.8743,37.014),(-121.8722,37.999),(-121.8714,37.999)] | Oakland + I- 580 Ramp | [(-121.8743,37.014),(-121.8722,37.999),(-121.8714,37.999)] | Oakland + I- 580 Ramp | [(-121.9043,37.998),(-121.9036,37.013),(-121.902632,37.0174),(-121.9025,37.018)] | Oakland + I- 580 Ramp | [(-121.9043,37.998),(-121.9036,37.013),(-121.902632,37.0174),(-121.9025,37.018)] | Oakland + I- 580 Ramp | [(-121.9368,37.986),(-121.936483,37.98832),(-121.9353,37.997),(-121.93504,37.00035),(-121.9346,37.006),(-121.933764,37.00031),(-121.9333,37.997),(-121.9322,37.989)] | Oakland + I- 580 Ramp | [(-121.9368,37.986),(-121.936483,37.98832),(-121.9353,37.997),(-121.93504,37.00035),(-121.9346,37.006),(-121.933764,37.00031),(-121.9333,37.997),(-121.9322,37.989)] | Oakland + I- 580 Ramp | [(-122.093241,37.90351),(-122.09364,37.89634),(-122.093788,37.89212)] | Berkeley + I- 580 Ramp | [(-122.0934,37.896),(-122.09257,37.89961),(-122.0911,37.906)] | Berkeley + I- 580 Ramp | [(-122.0941,37.897),(-122.0943,37.902)] | Berkeley + I- 580 Ramp | [(-122.096,37.888),(-122.0962,37.891),(-122.0964,37.9)] | Berkeley + I- 580 Ramp | [(-122.101,37.898),(-122.1005,37.902),(-122.0989,37.911)] | Berkeley + I- 580 Ramp | [(-122.1086,37.003),(-122.1068,37.993),(-122.1066,37.992),(-122.1053,37.982)] | Oakland + I- 580 Ramp | [(-122.1086,37.003),(-122.1068,37.993),(-122.1066,37.992),(-122.1053,37.982)] | Berkeley + I- 580 Ramp | [(-122.1414,37.383),(-122.1407,37.376),(-122.1403,37.372),(-122.139,37.356)] | Oakland + I- 580/I-680 Ramp | ((-121.9207,37.988),(-121.9192,37.016)) | Oakland + I- 580/I-680 Ramp | ((-121.9207,37.988),(-121.9192,37.016)) | Oakland + I- 680 | ((-121.939,37.15),(-121.9387,37.145),(-121.9373,37.125),(-121.934242,37.07643),(-121.933886,37.0709),(-121.9337,37.068),(-121.933122,37.06139),(-121.932736,37.05698),(-121.93222,37.05108),(-121.931844,37.04678),(-121.930113,37.027),(-121.926829,37),(-121.9265,37.998),(-121.9217,37.96),(-121.9203,37.949),(-121.9184,37.934)) | Oakland + I- 680 | ((-121.939,37.15),(-121.9387,37.145),(-121.9373,37.125),(-121.934242,37.07643),(-121.933886,37.0709),(-121.9337,37.068),(-121.933122,37.06139),(-121.932736,37.05698),(-121.93222,37.05108),(-121.931844,37.04678),(-121.930113,37.027),(-121.926829,37),(-121.9265,37.998),(-121.9217,37.96),(-121.9203,37.949),(-121.9184,37.934)) | Oakland + I- 680 | [(-121.9101,37.715),(-121.911269,37.74682),(-121.9119,37.764),(-121.9124,37.776),(-121.9174,37.905),(-121.9194,37.957),(-121.9207,37.988)] | Oakland + I- 680 | [(-121.9184,37.934),(-121.917,37.913),(-121.9122,37.83),(-121.9052,37.702)] | Oakland + I- 680 Ramp | [(-121.8833,37.376),(-121.8833,37.392),(-121.883,37.4),(-121.8835,37.402),(-121.8852,37.422)] | Oakland + I- 680 Ramp | [(-121.8833,37.376),(-121.8833,37.392),(-121.883,37.4),(-121.8835,37.402),(-121.8852,37.422)] | Oakland + I- 680 Ramp | [(-121.92,37.438),(-121.9218,37.424),(-121.9238,37.408),(-121.9252,37.392)] | Oakland + I- 680 Ramp | [(-121.92,37.438),(-121.9218,37.424),(-121.9238,37.408),(-121.9252,37.392)] | Oakland + I- 680 Ramp | [(-121.9238,37.402),(-121.9234,37.395),(-121.923,37.399)] | Oakland + I- 680 Ramp | [(-121.9238,37.402),(-121.9234,37.395),(-121.923,37.399)] | Oakland + I- 80 | ((-122.2937,37.277),(-122.3016,37.262)) | Lafayette + I- 80 | ((-122.2962,37.273),(-122.3004,37.264)) | Lafayette + I- 80 Ramp | [(-122.2962,37.413),(-122.2959,37.382),(-122.2951,37.372)] | Lafayette + I- 880 | ((-121.9669,37.075),(-121.9663,37.071),(-121.9656,37.065),(-121.9618,37.037),(-121.95689,37),(-121.948,37.933)) | Oakland + I- 880 | ((-121.9669,37.075),(-121.9663,37.071),(-121.9656,37.065),(-121.9618,37.037),(-121.95689,37),(-121.948,37.933)) | Oakland + I- 880 | [(-121.948,37.933),(-121.9471,37.925),(-121.9467,37.923),(-121.946,37.918),(-121.9452,37.912),(-121.937,37.852)] | Oakland + I- 880 | [(-122.0219,37.466),(-122.0205,37.447),(-122.020331,37.44447),(-122.020008,37.43962),(-122.0195,37.432),(-122.0193,37.429),(-122.0164,37.393),(-122.010219,37.34771),(-122.0041,37.313)] | Oakland + I- 880 | [(-122.0375,37.632),(-122.0359,37.619),(-122.0358,37.616),(-122.034514,37.60409),(-122.031876,37.57965),(-122.031193,37.57332),(-122.03016,37.56375),(-122.02943,37.55698),(-122.028689,37.54929),(-122.027833,37.53908),(-122.025979,37.51698),(-122.0238,37.491)] | Oakland + I- 880 | [(-122.0375,37.632),(-122.0359,37.619),(-122.0358,37.616),(-122.034514,37.60409),(-122.031876,37.57965),(-122.031193,37.57332),(-122.03016,37.56375),(-122.02943,37.55698),(-122.028689,37.54929),(-122.027833,37.53908),(-122.025979,37.51698),(-122.0238,37.491)] | Berkeley + I- 880 | [(-122.0612,37.003),(-122.0604,37.991),(-122.0596,37.982),(-122.0585,37.967),(-122.0583,37.961),(-122.0553,37.918),(-122.053635,37.89475),(-122.050759,37.8546),(-122.05,37.844),(-122.0485,37.817),(-122.0483,37.813),(-122.0482,37.811)] | Oakland + I- 880 | [(-122.0612,37.003),(-122.0604,37.991),(-122.0596,37.982),(-122.0585,37.967),(-122.0583,37.961),(-122.0553,37.918),(-122.053635,37.89475),(-122.050759,37.8546),(-122.05,37.844),(-122.0485,37.817),(-122.0483,37.813),(-122.0482,37.811)] | Oakland + I- 880 | [(-122.0612,37.003),(-122.0604,37.991),(-122.0596,37.982),(-122.0585,37.967),(-122.0583,37.961),(-122.0553,37.918),(-122.053635,37.89475),(-122.050759,37.8546),(-122.05,37.844),(-122.0485,37.817),(-122.0483,37.813),(-122.0482,37.811)] | Berkeley + I- 880 | [(-122.0831,37.312),(-122.0819,37.296),(-122.081,37.285),(-122.0786,37.248),(-122.078,37.24),(-122.077642,37.23496),(-122.076983,37.22567),(-122.076599,37.22026),(-122.076229,37.21505),(-122.0758,37.209)] | Oakland + I- 880 | [(-122.0978,37.528),(-122.096,37.496),(-122.0931,37.453),(-122.09277,37.4496),(-122.090189,37.41442),(-122.0896,37.405),(-122.085,37.34)] | Oakland + I- 880 | [(-122.1365,37.902),(-122.1358,37.898),(-122.1333,37.881),(-122.1323,37.874),(-122.1311,37.866),(-122.1308,37.865),(-122.1307,37.864),(-122.1289,37.851),(-122.1277,37.843),(-122.1264,37.834),(-122.1231,37.812),(-122.1165,37.766),(-122.1104,37.72),(-122.109695,37.71094),(-122.109,37.702),(-122.108312,37.69168),(-122.1076,37.681)] | Berkeley + I- 880 | [(-122.1755,37.185),(-122.1747,37.178),(-122.1742,37.173),(-122.1692,37.126),(-122.167792,37.11594),(-122.16757,37.11435),(-122.1671,37.111),(-122.1655,37.1),(-122.165169,37.09811),(-122.1641,37.092),(-122.1596,37.061),(-122.158381,37.05275),(-122.155991,37.03657),(-122.1531,37.017),(-122.1478,37.98),(-122.1407,37.932),(-122.1394,37.924),(-122.1389,37.92),(-122.1376,37.91)] | Oakland + I- 880 | [(-122.1755,37.185),(-122.1747,37.178),(-122.1742,37.173),(-122.1692,37.126),(-122.167792,37.11594),(-122.16757,37.11435),(-122.1671,37.111),(-122.1655,37.1),(-122.165169,37.09811),(-122.1641,37.092),(-122.1596,37.061),(-122.158381,37.05275),(-122.155991,37.03657),(-122.1531,37.017),(-122.1478,37.98),(-122.1407,37.932),(-122.1394,37.924),(-122.1389,37.92),(-122.1376,37.91)] | Berkeley + I- 880 | [(-122.2214,37.711),(-122.2202,37.699),(-122.2199,37.695),(-122.219,37.682),(-122.2184,37.672),(-122.2173,37.652),(-122.2159,37.638),(-122.2144,37.616),(-122.2138,37.612),(-122.2135,37.609),(-122.212,37.592),(-122.2116,37.586),(-122.2111,37.581)] | Berkeley + I- 880 | [(-122.2707,37.975),(-122.2693,37.972),(-122.2681,37.966),(-122.267,37.962),(-122.2659,37.957),(-122.2648,37.952),(-122.2636,37.946),(-122.2625,37.935),(-122.2617,37.927),(-122.2607,37.921),(-122.2593,37.916),(-122.258,37.911),(-122.2536,37.898),(-122.2432,37.858),(-122.2408,37.845),(-122.2386,37.827),(-122.2374,37.811)] | Berkeley + I- 880 Ramp | [(-122.0019,37.301),(-122.002,37.293)] | Oakland + I- 880 Ramp | [(-122.0041,37.313),(-122.0018,37.315),(-122.0007,37.315),(-122.0005,37.313),(-122.0002,37.308),(-121.9995,37.289)] | Oakland + I- 880 Ramp | [(-122.0041,37.313),(-122.0038,37.308),(-122.0039,37.284),(-122.0013,37.287),(-121.9995,37.289)] | Oakland + I- 880 Ramp | [(-122.0236,37.488),(-122.0231,37.458),(-122.0227,37.458),(-122.0223,37.452),(-122.0205,37.447)] | Oakland + I- 880 Ramp | [(-122.0238,37.491),(-122.0215,37.483),(-122.0211,37.477),(-122.0205,37.447)] | Oakland + I- 880 Ramp | [(-122.059,37.982),(-122.0577,37.984),(-122.0612,37.003)] | Oakland + I- 880 Ramp | [(-122.059,37.982),(-122.0577,37.984),(-122.0612,37.003)] | Oakland + I- 880 Ramp | [(-122.059,37.982),(-122.0577,37.984),(-122.0612,37.003)] | Berkeley + I- 880 Ramp | [(-122.0618,37.011),(-122.0631,37.982),(-122.0585,37.967)] | Oakland + I- 880 Ramp | [(-122.0618,37.011),(-122.0631,37.982),(-122.0585,37.967)] | Oakland + I- 880 Ramp | [(-122.0618,37.011),(-122.0631,37.982),(-122.0585,37.967)] | Berkeley + I- 880 Ramp | [(-122.085,37.34),(-122.0801,37.316),(-122.081,37.285)] | Oakland + I- 880 Ramp | [(-122.085,37.34),(-122.0801,37.316),(-122.081,37.285)] | Oakland + I- 880 Ramp | [(-122.085,37.34),(-122.0866,37.316),(-122.0819,37.296)] | Oakland + I- 880 Ramp | [(-122.085,37.34),(-122.0866,37.316),(-122.0819,37.296)] | Oakland + I- 880 Ramp | [(-122.1029,37.61),(-122.1013,37.587),(-122.0999,37.569)] | Berkeley + I- 880 Ramp | [(-122.1379,37.891),(-122.1383,37.897),(-122.1377,37.902)] | Berkeley + I- 880 Ramp | [(-122.1379,37.931),(-122.137597,37.92736),(-122.1374,37.925),(-122.1373,37.924),(-122.1369,37.914),(-122.1358,37.905),(-122.1365,37.908),(-122.1358,37.898)] | Berkeley + I- 880 Ramp | [(-122.2536,37.898),(-122.254,37.902)] | Berkeley + I- 880 Ramp | [(-122.2771,37.002),(-122.278,37)] | Lafayette + Indian Way | [(-122.2066,37.398),(-122.2045,37.411)] | Lafayette + Jackson St | [(-122.0845,37.6),(-122.0842,37.606)] | Berkeley + Johnson Dr | [(-121.9145,37.901),(-121.915,37.877)] | Oakland + Joyce St | [(-122.0792,37.604),(-122.0774,37.581)] | Berkeley + Juniper St | [(-121.7823,37.897),(-121.7815,37.9)] | Oakland + Kaiser Dr | [(-122.067163,37.47821),(-122.060402,37.51961)] | Oakland + Keeler Ave | [(-122.2578,37.906),(-122.2579,37.899)] | Berkeley + Kildare Road | [(-122.0968,37.016),(-122.0959,37)] | Oakland + La Playa Dr | [(-122.1039,37.545),(-122.101,37.493)] | Oakland + Laguna Ave | [(-122.2099,37.989),(-122.2089,37)] | Berkeley + Laguna Ave | [(-122.2099,37.989),(-122.2089,37)] | Lafayette + Lakehurst Cir | [(-122.284729,37.89025),(-122.286096,37.90364)] | Berkeley + Lakeshore Ave | [(-122.2586,37.99),(-122.2556,37.006)] | Berkeley + Lakeshore Ave | [(-122.2586,37.99),(-122.2556,37.006)] | Lafayette + Las Positas Road | [(-121.764488,37.99199),(-121.75569,37.02022)] | Oakland + Las Positas Road | [(-121.764488,37.99199),(-121.75569,37.02022)] | Oakland + Linden St | [(-122.2867,37.998),(-122.2864,37.008)] | Berkeley + Linden St | [(-122.2867,37.998),(-122.2864,37.008)] | Lafayette + Livermore Ave | [(-121.7687,37.448),(-121.769,37.375)] | Oakland + Livermore Ave | [(-121.7687,37.448),(-121.769,37.375)] | Oakland + Livermore Ave | [(-121.772719,37.99085),(-121.7728,37.001)] | Oakland + Livermore Ave | [(-121.772719,37.99085),(-121.7728,37.001)] | Oakland + Locust St | [(-122.1606,37.007),(-122.1593,37.987)] | Oakland + Locust St | [(-122.1606,37.007),(-122.1593,37.987)] | Berkeley + Logan Ct | [(-122.0053,37.492),(-122.0061,37.484)] | Oakland + Magnolia St | [(-122.0971,37.5),(-122.0962,37.484)] | Oakland + Mandalay Road | [(-122.2322,37.397),(-122.2321,37.403)] | Lafayette + Marin Ave | [(-122.2741,37.894),(-122.272,37.901)] | Berkeley + Martin Luther King Jr Way | [(-122.2712,37.608),(-122.2711,37.599)] | Berkeley + Mattos Dr | [(-122.0005,37.502),(-122.000898,37.49683)] | Oakland + Maubert Ave | [(-122.1114,37.009),(-122.1096,37.995)] | Oakland + Maubert Ave | [(-122.1114,37.009),(-122.1096,37.995)] | Berkeley + McClure Ave | [(-122.1431,37.001),(-122.1436,37.998)] | Oakland + McClure Ave | [(-122.1431,37.001),(-122.1436,37.998)] | Berkeley + Medlar Dr | [(-122.0627,37.378),(-122.0625,37.375)] | Oakland + Mildred Ct | [(-122.0002,37.388),(-121.9998,37.386)] | Oakland + Miller Road | [(-122.0902,37.645),(-122.0865,37.545)] | Berkeley + Miramar Ave | [(-122.1009,37.025),(-122.099089,37.03209)] | Oakland + Mission Blvd | [(-121.918886,37),(-121.9194,37.976),(-121.9198,37.975)] | Oakland + Mission Blvd | [(-121.918886,37),(-121.9194,37.976),(-121.9198,37.975)] | Oakland + Mission Blvd | [(-122.0006,37.896),(-121.9989,37.88)] | Oakland + Mission Blvd | [(-122.0006,37.896),(-121.9989,37.88)] | Berkeley + Moores Ave | [(-122.0087,37.301),(-122.0094,37.292)] | Oakland + National Ave | [(-122.1192,37.5),(-122.1281,37.489)] | Oakland + Navajo Ct | [(-121.8779,37.901),(-121.8783,37.9)] | Oakland + Newark Blvd | [(-122.0352,37.438),(-122.0341,37.423)] | Oakland + Oakland Inner Harbor | [(-122.2625,37.913),(-122.260016,37.89484)] | Berkeley + Oakridge Road | [(-121.8316,37.049),(-121.828382,37)] | Oakland + Oneil Ave | [(-122.076754,37.62476),(-122.0745,37.595)] | Berkeley + Parkridge Dr | [(-122.1438,37.884),(-122.1428,37.9)] | Berkeley + Parkside Dr | [(-122.0475,37.603),(-122.0443,37.596)] | Berkeley + Paseo Padre Pkwy | [(-121.9143,37.005),(-121.913522,37)] | Oakland + Paseo Padre Pkwy | [(-122.0021,37.639),(-121.996,37.628)] | Oakland + Paseo Padre Pkwy | [(-122.0021,37.639),(-121.996,37.628)] | Berkeley + Pearl St | [(-122.2383,37.594),(-122.2366,37.615)] | Berkeley + Periwinkle Road | [(-122.0451,37.301),(-122.044758,37.29844)] | Oakland + Pimlico Dr | [(-121.8616,37.998),(-121.8618,37.008)] | Oakland + Pimlico Dr | [(-121.8616,37.998),(-121.8618,37.008)] | Oakland + Portsmouth Ave | [(-122.1064,37.315),(-122.1064,37.308)] | Oakland + Proctor Ave | [(-122.2267,37.406),(-122.2251,37.386)] | Lafayette + Railroad Ave | [(-122.0245,37.013),(-122.0234,37.003),(-122.0223,37.993)] | Oakland + Railroad Ave | [(-122.0245,37.013),(-122.0234,37.003),(-122.0223,37.993)] | Oakland + Railroad Ave | [(-122.0245,37.013),(-122.0234,37.003),(-122.0223,37.993)] | Berkeley + Ranspot Dr | [(-122.0972,37.999),(-122.0959,37)] | Oakland + Ranspot Dr | [(-122.0972,37.999),(-122.0959,37)] | Oakland + Ranspot Dr | [(-122.0972,37.999),(-122.0959,37)] | Berkeley + Redding St | [(-122.1978,37.901),(-122.1975,37.895)] | Berkeley + Redwood Road | [(-122.1493,37.98),(-122.1437,37.001)] | Oakland + Redwood Road | [(-122.1493,37.98),(-122.1437,37.001)] | Berkeley + Roca Dr | [(-122.0335,37.609),(-122.0314,37.599)] | Berkeley + Rosedale Ct | [(-121.9232,37.9),(-121.924,37.897)] | Oakland + Sacramento St | [(-122.2799,37.606),(-122.2797,37.597)] | Berkeley + Saddle Brook Dr | [(-122.1478,37.909),(-122.1454,37.904),(-122.1451,37.888)] | Berkeley + Saginaw Ct | [(-121.8803,37.898),(-121.8806,37.901)] | Oakland + San Andreas Dr | [(-122.0609,37.9),(-122.0614,37.895)] | Berkeley + Santa Maria Ave | [(-122.0773,37),(-122.0773,37.98)] | Oakland + Santa Maria Ave | [(-122.0773,37),(-122.0773,37.98)] | Oakland + Santa Maria Ave | [(-122.0773,37),(-122.0773,37.98)] | Berkeley + Shattuck Ave | [(-122.2686,37.904),(-122.2686,37.897)] | Berkeley + Sheridan Road | [(-122.2279,37.425),(-122.2253,37.411),(-122.2223,37.377)] | Lafayette + Shoreline Dr | [(-122.2657,37.603),(-122.2648,37.6)] | Berkeley + Skyline Blvd | [(-122.1738,37.01),(-122.1714,37.996)] | Oakland + Skyline Blvd | [(-122.1738,37.01),(-122.1714,37.996)] | Berkeley + Skyline Dr | [(-122.0277,37.5),(-122.0284,37.498)] | Oakland + Skywest Dr | [(-122.1161,37.62),(-122.1123,37.586)] | Berkeley + Southern Pacific Railroad | [(-122.3002,37.674),(-122.2999,37.661)] | Berkeley + Sp Railroad | [(-121.893564,37.99009),(-121.897,37.016)] | Oakland + Sp Railroad | [(-121.893564,37.99009),(-121.897,37.016)] | Oakland + Sp Railroad | [(-121.9565,37.898),(-121.9562,37.9)] | Oakland + Sp Railroad | [(-122.0734,37.001),(-122.0734,37.997)] | Oakland + Sp Railroad | [(-122.0734,37.001),(-122.0734,37.997)] | Oakland + Sp Railroad | [(-122.0734,37.001),(-122.0734,37.997)] | Berkeley + Sp Railroad | [(-122.0914,37.601),(-122.087,37.56),(-122.086408,37.5551)] | Berkeley + Sp Railroad | [(-122.137792,37.003),(-122.1365,37.992),(-122.131257,37.94612)] | Oakland + Sp Railroad | [(-122.137792,37.003),(-122.1365,37.992),(-122.131257,37.94612)] | Berkeley + Sp Railroad | [(-122.1947,37.497),(-122.193328,37.4848)] | Oakland + Stanton Ave | [(-122.100392,37.0697),(-122.099513,37.06052)] | Oakland + State Hwy 123 | [(-122.3004,37.986),(-122.2998,37.969),(-122.2995,37.962),(-122.2992,37.952),(-122.299,37.942),(-122.2987,37.935),(-122.2984,37.924),(-122.2982,37.92),(-122.2976,37.904),(-122.297,37.88),(-122.2966,37.869),(-122.2959,37.848),(-122.2961,37.843)] | Berkeley + State Hwy 13 | [(-122.1797,37.943),(-122.179871,37.91849),(-122.18,37.9),(-122.179023,37.86615),(-122.1787,37.862),(-122.1781,37.851),(-122.1777,37.845),(-122.1773,37.839),(-122.177,37.833)] | Berkeley + State Hwy 13 | [(-122.2049,37.2),(-122.20328,37.17975),(-122.1989,37.125),(-122.198078,37.11641),(-122.1975,37.11)] | Lafayette + State Hwy 13 Ramp | [(-122.2244,37.427),(-122.223,37.414),(-122.2214,37.396),(-122.2213,37.388)] | Lafayette + State Hwy 238 | ((-122.098,37.908),(-122.0983,37.907),(-122.099,37.905),(-122.101,37.898),(-122.101535,37.89711),(-122.103173,37.89438),(-122.1046,37.892),(-122.106,37.89)) | Berkeley + State Hwy 238 Ramp | [(-122.1288,37.9),(-122.1293,37.895),(-122.1296,37.906)] | Berkeley + State Hwy 24 | [(-122.2674,37.246),(-122.2673,37.248),(-122.267,37.261),(-122.2668,37.271),(-122.2663,37.298),(-122.2659,37.315),(-122.2655,37.336),(-122.265007,37.35882),(-122.264443,37.37286),(-122.2641,37.381),(-122.2638,37.388),(-122.2631,37.396),(-122.2617,37.405),(-122.2615,37.407),(-122.2605,37.412)] | Lafayette + State Hwy 84 | [(-121.9565,37.898),(-121.956589,37.89911),(-121.9569,37.903),(-121.956,37.91),(-121.9553,37.919)] | Oakland + State Hwy 84 | [(-122.0671,37.426),(-122.07,37.402),(-122.074,37.37),(-122.0773,37.338)] | Oakland + State Hwy 92 | [(-122.1085,37.326),(-122.1095,37.322),(-122.1111,37.316),(-122.1119,37.313),(-122.1125,37.311),(-122.1131,37.308),(-122.1167,37.292),(-122.1187,37.285),(-122.12,37.28)] | Oakland + State Hwy 92 Ramp | [(-122.1086,37.321),(-122.1089,37.315),(-122.1111,37.316)] | Oakland + Stuart St | [(-122.2518,37.6),(-122.2507,37.601),(-122.2491,37.606)] | Berkeley + Sunol Ridge Trl | [(-121.9419,37.455),(-121.9345,37.38)] | Oakland + Sunol Ridge Trl | [(-121.9419,37.455),(-121.9345,37.38)] | Oakland + Tassajara Creek | [(-121.87866,37.98898),(-121.8782,37.015)] | Oakland + Tassajara Creek | [(-121.87866,37.98898),(-121.8782,37.015)] | Oakland + Taurus Ave | [(-122.2159,37.416),(-122.2128,37.389)] | Lafayette + Tennyson Road | [(-122.0891,37.317),(-122.0927,37.317)] | Oakland + Thackeray Ave | [(-122.072,37.305),(-122.0715,37.298)] | Oakland + Theresa Way | [(-121.7289,37.906),(-121.728,37.899)] | Oakland + Tissiack Way | [(-121.920364,37),(-121.9208,37.995)] | Oakland + Tissiack Way | [(-121.920364,37),(-121.9208,37.995)] | Oakland + Tupelo Ter | [(-122.059087,37.6113),(-122.057021,37.59942)] | Berkeley + Vallecitos Road | [(-121.8699,37.916),(-121.8703,37.891)] | Oakland + Warm Springs Blvd | [(-121.933956,37),(-121.9343,37.97)] | Oakland + Warm Springs Blvd | [(-121.933956,37),(-121.9343,37.97)] | Oakland + Welch Creek Road | [(-121.7695,37.386),(-121.7737,37.413)] | Oakland + Welch Creek Road | [(-121.7695,37.386),(-121.7737,37.413)] | Oakland + West Loop Road | [(-122.0576,37.604),(-122.0602,37.586)] | Berkeley + Western Pacific Railroad Spur | [(-122.0394,37.018),(-122.0394,37.961)] | Oakland + Western Pacific Railroad Spur | [(-122.0394,37.018),(-122.0394,37.961)] | Oakland + Western Pacific Railroad Spur | [(-122.0394,37.018),(-122.0394,37.961)] | Berkeley + Whitlock Creek | [(-121.74683,37.91276),(-121.733107,37)] | Oakland + Whitlock Creek | [(-121.74683,37.91276),(-121.733107,37)] | Oakland + Willimet Way | [(-122.0964,37.517),(-122.0949,37.493)] | Oakland + Wisconsin St | [(-122.1994,37.017),(-122.1975,37.998),(-122.1971,37.994)] | Oakland + Wisconsin St | [(-122.1994,37.017),(-122.1975,37.998),(-122.1971,37.994)] | Berkeley + Wp Railroad | [(-122.254,37.902),(-122.2506,37.891)] | Berkeley + 100th Ave | [(-122.1657,37.429),(-122.1647,37.432)] | Oakland + 107th Ave | [(-122.1555,37.403),(-122.1531,37.41)] | Oakland + 14th St | [(-122.299,37.147),(-122.3,37.148)] | Lafayette + 19th Ave | [(-122.2366,37.897),(-122.2359,37.905)] | Berkeley + 1st St | [(-121.75508,37.89294),(-121.753581,37.90031)] | Oakland + 5th St | [(-122.278,37),(-122.2792,37.005),(-122.2803,37.009)] | Lafayette + 5th St | [(-122.296,37.615),(-122.2953,37.598)] | Berkeley + 82nd Ave | [(-122.1695,37.596),(-122.1681,37.603)] | Berkeley + 85th Ave | [(-122.1877,37.466),(-122.186,37.476)] | Oakland + 89th Ave | [(-122.1822,37.459),(-122.1803,37.471)] | Oakland + 98th Ave | [(-122.1568,37.498),(-122.1558,37.502)] | Oakland + 98th Ave | [(-122.1693,37.438),(-122.1682,37.444)] | Oakland + 98th Ave | [(-122.2001,37.258),(-122.1974,37.27)] | Lafayette +(333 rows) + +SELECT name, #thepath FROM iexit ORDER BY name COLLATE "C", 2; + name | ?column? +------------------------------------+---------- + I- 580 | 2 + I- 580 | 2 + I- 580 | 2 + I- 580 | 2 + I- 580 | 2 + I- 580 | 2 + I- 580 | 2 + I- 580 | 2 + I- 580 | 2 + I- 580 | 2 + I- 580 | 2 + I- 580 | 3 + I- 580 | 3 + I- 580 | 3 + I- 580 | 3 + I- 580 | 3 + I- 580 | 3 + I- 580 | 3 + I- 580 | 3 + I- 580 | 3 + I- 580 | 3 + I- 580 | 3 + I- 580 | 3 + I- 580 | 3 + I- 580 | 3 + I- 580 | 3 + I- 580 | 3 + I- 580 | 3 + I- 580 | 3 + I- 580 | 4 + I- 580 | 4 + I- 580 | 4 + I- 580 | 4 + I- 580 | 5 + I- 580 | 5 + I- 580 | 5 + I- 580 | 5 + I- 580 | 5 + I- 580 | 6 + I- 580 | 6 + I- 580 | 6 + I- 580 | 6 + I- 580 | 6 + I- 580 | 6 + I- 580 | 6 + I- 580 | 6 + I- 580 | 6 + I- 580 | 6 + I- 580 | 6 + I- 580 | 6 + I- 580 | 6 + I- 580 | 6 + I- 580 | 6 + I- 580 | 6 + I- 580 | 6 + I- 580 | 6 + I- 580 | 6 + I- 580 | 6 + I- 580 | 6 + I- 580 | 6 + I- 580 | 6 + I- 580 | 7 + I- 580 | 7 + I- 580 | 7 + I- 580 | 7 + I- 580 | 7 + I- 580 | 7 + I- 580 | 7 + I- 580 | 8 + I- 580 | 8 + I- 580 | 8 + I- 580 | 8 + I- 580 | 8 + I- 580 | 8 + I- 580 | 8 + I- 580 | 8 + I- 580 | 8 + I- 580 | 9 + I- 580 | 9 + I- 580 | 9 + I- 580 | 9 + I- 580 | 9 + I- 580 | 12 + I- 580 | 12 + I- 580 | 12 + I- 580 | 12 + I- 580 | 12 + I- 580 | 12 + I- 580 | 12 + I- 580 | 12 + I- 580 | 12 + I- 580 | 12 + I- 580 | 13 + I- 580 | 13 + I- 580 | 13 + I- 580 | 13 + I- 580 | 13 + I- 580 | 13 + I- 580 | 14 + I- 580 | 14 + I- 580 | 14 + I- 580 | 14 + I- 580 | 14 + I- 580 | 14 + I- 580 | 14 + I- 580 | 14 + I- 580 | 18 + I- 580 | 18 + I- 580 | 18 + I- 580 | 18 + I- 580 | 18 + I- 580 | 18 + I- 580 | 21 + I- 580 | 21 + I- 580 | 21 + I- 580 | 21 + I- 580 | 21 + I- 580 | 21 + I- 580 | 21 + I- 580 | 21 + I- 580 | 21 + I- 580 | 21 + I- 580 | 22 + I- 580 | 22 + I- 580 Ramp | 2 + I- 580 Ramp | 2 + I- 580 Ramp | 2 + I- 580 Ramp | 2 + I- 580 Ramp | 2 + I- 580 Ramp | 2 + I- 580 Ramp | 2 + I- 580 Ramp | 2 + I- 580 Ramp | 2 + I- 580 Ramp | 2 + I- 580 Ramp | 2 + I- 580 Ramp | 2 + I- 580 Ramp | 2 + I- 580 Ramp | 2 + I- 580 Ramp | 2 + I- 580 Ramp | 2 + I- 580 Ramp | 2 + I- 580 Ramp | 2 + I- 580 Ramp | 2 + I- 580 Ramp | 2 + I- 580 Ramp | 2 + I- 580 Ramp | 2 + I- 580 Ramp | 2 + I- 580 Ramp | 2 + I- 580 Ramp | 2 + I- 580 Ramp | 2 + I- 580 Ramp | 2 + I- 580 Ramp | 2 + I- 580 Ramp | 2 + I- 580 Ramp | 2 + I- 580 Ramp | 2 + I- 580 Ramp | 2 + I- 580 Ramp | 2 + I- 580 Ramp | 2 + I- 580 Ramp | 2 + I- 580 Ramp | 2 + I- 580 Ramp | 2 + I- 580 Ramp | 2 + I- 580 Ramp | 2 + I- 580 Ramp | 2 + I- 580 Ramp | 2 + I- 580 Ramp | 2 + I- 580 Ramp | 2 + I- 580 Ramp | 2 + I- 580 Ramp | 2 + I- 580 Ramp | 2 + I- 580 Ramp | 2 + I- 580 Ramp | 2 + I- 580 Ramp | 2 + I- 580 Ramp | 2 + I- 580 Ramp | 2 + I- 580 Ramp | 2 + I- 580 Ramp | 2 + I- 580 Ramp | 2 + I- 580 Ramp | 2 + I- 580 Ramp | 2 + I- 580 Ramp | 2 + I- 580 Ramp | 2 + I- 580 Ramp | 2 + I- 580 Ramp | 2 + I- 580 Ramp | 2 + I- 580 Ramp | 2 + I- 580 Ramp | 2 + I- 580 Ramp | 2 + I- 580 Ramp | 2 + I- 580 Ramp | 2 + I- 580 Ramp | 2 + I- 580 Ramp | 2 + I- 580 Ramp | 2 + I- 580 Ramp | 2 + I- 580 Ramp | 2 + I- 580 Ramp | 2 + I- 580 Ramp | 3 + I- 580 Ramp | 3 + I- 580 Ramp | 3 + I- 580 Ramp | 3 + I- 580 Ramp | 3 + I- 580 Ramp | 3 + I- 580 Ramp | 3 + I- 580 Ramp | 3 + I- 580 Ramp | 3 + I- 580 Ramp | 3 + I- 580 Ramp | 3 + I- 580 Ramp | 3 + I- 580 Ramp | 3 + I- 580 Ramp | 3 + I- 580 Ramp | 3 + I- 580 Ramp | 3 + I- 580 Ramp | 3 + I- 580 Ramp | 3 + I- 580 Ramp | 3 + I- 580 Ramp | 3 + I- 580 Ramp | 3 + I- 580 Ramp | 3 + I- 580 Ramp | 3 + I- 580 Ramp | 3 + I- 580 Ramp | 3 + I- 580 Ramp | 3 + I- 580 Ramp | 3 + I- 580 Ramp | 3 + I- 580 Ramp | 3 + I- 580 Ramp | 3 + I- 580 Ramp | 3 + I- 580 Ramp | 3 + I- 580 Ramp | 3 + I- 580 Ramp | 3 + I- 580 Ramp | 3 + I- 580 Ramp | 3 + I- 580 Ramp | 3 + I- 580 Ramp | 3 + I- 580 Ramp | 3 + I- 580 Ramp | 3 + I- 580 Ramp | 3 + I- 580 Ramp | 3 + I- 580 Ramp | 3 + I- 580 Ramp | 3 + I- 580 Ramp | 3 + I- 580 Ramp | 3 + I- 580 Ramp | 3 + I- 580 Ramp | 3 + I- 580 Ramp | 3 + I- 580 Ramp | 3 + I- 580 Ramp | 3 + I- 580 Ramp | 3 + I- 580 Ramp | 3 + I- 580 Ramp | 3 + I- 580 Ramp | 3 + I- 580 Ramp | 3 + I- 580 Ramp | 3 + I- 580 Ramp | 3 + I- 580 Ramp | 3 + I- 580 Ramp | 3 + I- 580 Ramp | 3 + I- 580 Ramp | 3 + I- 580 Ramp | 3 + I- 580 Ramp | 3 + I- 580 Ramp | 3 + I- 580 Ramp | 3 + I- 580 Ramp | 3 + I- 580 Ramp | 3 + I- 580 Ramp | 3 + I- 580 Ramp | 3 + I- 580 Ramp | 3 + I- 580 Ramp | 3 + I- 580 Ramp | 3 + I- 580 Ramp | 3 + I- 580 Ramp | 3 + I- 580 Ramp | 3 + I- 580 Ramp | 3 + I- 580 Ramp | 3 + I- 580 Ramp | 3 + I- 580 Ramp | 3 + I- 580 Ramp | 3 + I- 580 Ramp | 3 + I- 580 Ramp | 3 + I- 580 Ramp | 3 + I- 580 Ramp | 3 + I- 580 Ramp | 3 + I- 580 Ramp | 3 + I- 580 Ramp | 3 + I- 580 Ramp | 3 + I- 580 Ramp | 3 + I- 580 Ramp | 3 + I- 580 Ramp | 3 + I- 580 Ramp | 3 + I- 580 Ramp | 3 + I- 580 Ramp | 3 + I- 580 Ramp | 3 + I- 580 Ramp | 3 + I- 580 Ramp | 3 + I- 580 Ramp | 3 + I- 580 Ramp | 3 + I- 580 Ramp | 3 + I- 580 Ramp | 3 + I- 580 Ramp | 3 + I- 580 Ramp | 3 + I- 580 Ramp | 3 + I- 580 Ramp | 3 + I- 580 Ramp | 3 + I- 580 Ramp | 3 + I- 580 Ramp | 3 + I- 580 Ramp | 3 + I- 580 Ramp | 3 + I- 580 Ramp | 3 + I- 580 Ramp | 3 + I- 580 Ramp | 4 + I- 580 Ramp | 4 + I- 580 Ramp | 4 + I- 580 Ramp | 4 + I- 580 Ramp | 4 + I- 580 Ramp | 4 + I- 580 Ramp | 4 + I- 580 Ramp | 4 + I- 580 Ramp | 4 + I- 580 Ramp | 4 + I- 580 Ramp | 4 + I- 580 Ramp | 4 + I- 580 Ramp | 4 + I- 580 Ramp | 4 + I- 580 Ramp | 4 + I- 580 Ramp | 4 + I- 580 Ramp | 4 + I- 580 Ramp | 4 + I- 580 Ramp | 4 + I- 580 Ramp | 4 + I- 580 Ramp | 4 + I- 580 Ramp | 4 + I- 580 Ramp | 4 + I- 580 Ramp | 4 + I- 580 Ramp | 4 + I- 580 Ramp | 4 + I- 580 Ramp | 4 + I- 580 Ramp | 4 + I- 580 Ramp | 4 + I- 580 Ramp | 4 + I- 580 Ramp | 4 + I- 580 Ramp | 4 + I- 580 Ramp | 4 + I- 580 Ramp | 4 + I- 580 Ramp | 4 + I- 580 Ramp | 4 + I- 580 Ramp | 4 + I- 580 Ramp | 4 + I- 580 Ramp | 4 + I- 580 Ramp | 4 + I- 580 Ramp | 4 + I- 580 Ramp | 5 + I- 580 Ramp | 5 + I- 580 Ramp | 5 + I- 580 Ramp | 5 + I- 580 Ramp | 5 + I- 580 Ramp | 5 + I- 580 Ramp | 5 + I- 580 Ramp | 5 + I- 580 Ramp | 5 + I- 580 Ramp | 5 + I- 580 Ramp | 5 + I- 580 Ramp | 5 + I- 580 Ramp | 5 + I- 580 Ramp | 5 + I- 580 Ramp | 6 + I- 580 Ramp | 6 + I- 580 Ramp | 6 + I- 580 Ramp | 7 + I- 580 Ramp | 8 + I- 580 Ramp | 8 + I- 580 Ramp | 8 + I- 580 Ramp | 8 + I- 580 Ramp | 8 + I- 580 Ramp | 8 + I- 580/I-680 Ramp | 2 + I- 580/I-680 Ramp | 2 + I- 580/I-680 Ramp | 2 + I- 580/I-680 Ramp | 2 + I- 580/I-680 Ramp | 2 + I- 580/I-680 Ramp | 2 + I- 580/I-680 Ramp | 4 + I- 580/I-680 Ramp | 4 + I- 580/I-680 Ramp | 4 + I- 580/I-680 Ramp | 4 + I- 580/I-680 Ramp | 5 + I- 580/I-680 Ramp | 6 + I- 580/I-680 Ramp | 6 + I- 580/I-680 Ramp | 6 + I- 680 | 2 + I- 680 | 2 + I- 680 | 2 + I- 680 | 2 + I- 680 | 2 + I- 680 | 2 + I- 680 | 2 + I- 680 | 3 + I- 680 | 3 + I- 680 | 3 + I- 680 | 4 + I- 680 | 4 + I- 680 | 4 + I- 680 | 5 + I- 680 | 5 + I- 680 | 5 + I- 680 | 7 + I- 680 | 7 + I- 680 | 7 + I- 680 | 7 + I- 680 | 8 + I- 680 | 8 + I- 680 | 8 + I- 680 | 8 + I- 680 | 10 + I- 680 | 10 + I- 680 | 10 + I- 680 | 10 + I- 680 | 10 + I- 680 | 10 + I- 680 | 10 + I- 680 | 16 + I- 680 | 16 + I- 680 | 16 + I- 680 | 16 + I- 680 | 16 + I- 680 | 16 + I- 680 | 16 + I- 680 | 16 + I- 680 Ramp | 2 + I- 680 Ramp | 2 + I- 680 Ramp | 2 + I- 680 Ramp | 2 + I- 680 Ramp | 2 + I- 680 Ramp | 2 + I- 680 Ramp | 2 + I- 680 Ramp | 2 + I- 680 Ramp | 2 + I- 680 Ramp | 2 + I- 680 Ramp | 2 + I- 680 Ramp | 2 + I- 680 Ramp | 2 + I- 680 Ramp | 2 + I- 680 Ramp | 2 + I- 680 Ramp | 3 + I- 680 Ramp | 3 + I- 680 Ramp | 3 + I- 680 Ramp | 3 + I- 680 Ramp | 3 + I- 680 Ramp | 3 + I- 680 Ramp | 3 + I- 680 Ramp | 3 + I- 680 Ramp | 3 + I- 680 Ramp | 3 + I- 680 Ramp | 3 + I- 680 Ramp | 3 + I- 680 Ramp | 3 + I- 680 Ramp | 3 + I- 680 Ramp | 3 + I- 680 Ramp | 3 + I- 680 Ramp | 3 + I- 680 Ramp | 3 + I- 680 Ramp | 3 + I- 680 Ramp | 3 + I- 680 Ramp | 3 + I- 680 Ramp | 3 + I- 680 Ramp | 3 + I- 680 Ramp | 3 + I- 680 Ramp | 3 + I- 680 Ramp | 3 + I- 680 Ramp | 4 + I- 680 Ramp | 4 + I- 680 Ramp | 4 + I- 680 Ramp | 5 + I- 680 Ramp | 5 + I- 680 Ramp | 5 + I- 680 Ramp | 5 + I- 680 Ramp | 5 + I- 680 Ramp | 5 + I- 680 Ramp | 6 + I- 680 Ramp | 6 + I- 680 Ramp | 6 + I- 680 Ramp | 6 + I- 680 Ramp | 7 + I- 680 Ramp | 7 + I- 680 Ramp | 7 + I- 680 Ramp | 7 + I- 680 Ramp | 8 + I- 680 Ramp | 8 + I- 680 Ramp | 8 + I- 680 Ramp | 8 + I- 80 | 2 + I- 80 | 2 + I- 80 | 2 + I- 80 | 2 + I- 80 | 2 + I- 80 | 2 + I- 80 | 2 + I- 80 | 2 + I- 80 | 2 + I- 80 | 2 + I- 80 | 2 + I- 80 | 2 + I- 80 | 2 + I- 80 | 2 + I- 80 | 3 + I- 80 | 3 + I- 80 | 3 + I- 80 | 4 + I- 80 | 4 + I- 80 | 4 + I- 80 | 4 + I- 80 | 4 + I- 80 | 5 + I- 80 | 5 + I- 80 | 5 + I- 80 | 5 + I- 80 | 5 + I- 80 | 5 + I- 80 | 5 + I- 80 | 5 + I- 80 | 5 + I- 80 | 11 + I- 80 | 11 + I- 80 | 11 + I- 80 | 11 + I- 80 Ramp | 2 + I- 80 Ramp | 2 + I- 80 Ramp | 2 + I- 80 Ramp | 2 + I- 80 Ramp | 2 + I- 80 Ramp | 2 + I- 80 Ramp | 2 + I- 80 Ramp | 2 + I- 80 Ramp | 2 + I- 80 Ramp | 2 + I- 80 Ramp | 2 + I- 80 Ramp | 2 + I- 80 Ramp | 2 + I- 80 Ramp | 2 + I- 80 Ramp | 2 + I- 80 Ramp | 2 + I- 80 Ramp | 2 + I- 80 Ramp | 2 + I- 80 Ramp | 2 + I- 80 Ramp | 3 + I- 80 Ramp | 3 + I- 80 Ramp | 3 + I- 80 Ramp | 3 + I- 80 Ramp | 3 + I- 80 Ramp | 3 + I- 80 Ramp | 3 + I- 80 Ramp | 3 + I- 80 Ramp | 3 + I- 80 Ramp | 4 + I- 80 Ramp | 4 + I- 80 Ramp | 4 + I- 80 Ramp | 4 + I- 80 Ramp | 5 + I- 80 Ramp | 5 + I- 80 Ramp | 5 + I- 80 Ramp | 5 + I- 80 Ramp | 5 + I- 80 Ramp | 5 + I- 80 Ramp | 5 + I- 80 Ramp | 7 + I- 80 Ramp | 7 + I- 80 Ramp | 7 + I- 80 Ramp | 7 + I- 880 | 2 + I- 880 | 2 + I- 880 | 2 + I- 880 | 2 + I- 880 | 2 + I- 880 | 5 + I- 880 | 5 + I- 880 | 5 + I- 880 | 5 + I- 880 | 5 + I- 880 | 5 + I- 880 | 6 + I- 880 | 6 + I- 880 | 6 + I- 880 | 6 + I- 880 | 6 + I- 880 | 6 + I- 880 | 6 + I- 880 | 6 + I- 880 | 6 + I- 880 | 6 + I- 880 | 6 + I- 880 | 6 + I- 880 | 6 + I- 880 | 6 + I- 880 | 7 + I- 880 | 7 + I- 880 | 7 + I- 880 | 7 + I- 880 | 7 + I- 880 | 7 + I- 880 | 7 + I- 880 | 9 + I- 880 | 9 + I- 880 | 9 + I- 880 | 9 + I- 880 | 9 + I- 880 | 9 + I- 880 | 9 + I- 880 | 10 + I- 880 | 10 + I- 880 | 10 + I- 880 | 10 + I- 880 | 10 + I- 880 | 10 + I- 880 | 10 + I- 880 | 10 + I- 880 | 10 + I- 880 | 10 + I- 880 | 10 + I- 880 | 10 + I- 880 | 12 + I- 880 | 12 + I- 880 | 12 + I- 880 | 12 + I- 880 | 12 + I- 880 | 12 + I- 880 | 12 + I- 880 | 12 + I- 880 | 12 + I- 880 | 12 + I- 880 | 12 + I- 880 | 13 + I- 880 | 13 + I- 880 | 13 + I- 880 | 13 + I- 880 | 13 + I- 880 | 13 + I- 880 | 13 + I- 880 | 13 + I- 880 | 13 + I- 880 | 13 + I- 880 | 13 + I- 880 | 13 + I- 880 | 14 + I- 880 | 14 + I- 880 | 14 + I- 880 | 14 + I- 880 | 14 + I- 880 | 14 + I- 880 | 17 + I- 880 | 17 + I- 880 | 17 + I- 880 | 17 + I- 880 | 17 + I- 880 | 17 + I- 880 | 17 + I- 880 | 17 + I- 880 | 17 + I- 880 | 17 + I- 880 | 17 + I- 880 | 17 + I- 880 | 17 + I- 880 | 17 + I- 880 | 17 + I- 880 | 17 + I- 880 | 17 + I- 880 | 17 + I- 880 | 17 + I- 880 | 17 + I- 880 | 17 + I- 880 | 19 + I- 880 | 19 + I- 880 | 19 + I- 880 | 19 + I- 880 | 19 + I- 880 | 19 + I- 880 | 19 + I- 880 | 19 + I- 880 | 19 + I- 880 | 19 + I- 880 Ramp | 2 + I- 880 Ramp | 2 + I- 880 Ramp | 2 + I- 880 Ramp | 2 + I- 880 Ramp | 2 + I- 880 Ramp | 2 + I- 880 Ramp | 2 + I- 880 Ramp | 2 + I- 880 Ramp | 2 + I- 880 Ramp | 2 + I- 880 Ramp | 2 + I- 880 Ramp | 2 + I- 880 Ramp | 2 + I- 880 Ramp | 2 + I- 880 Ramp | 2 + I- 880 Ramp | 2 + I- 880 Ramp | 2 + I- 880 Ramp | 2 + I- 880 Ramp | 2 + I- 880 Ramp | 2 + I- 880 Ramp | 2 + I- 880 Ramp | 2 + I- 880 Ramp | 2 + I- 880 Ramp | 2 + I- 880 Ramp | 2 + I- 880 Ramp | 2 + I- 880 Ramp | 2 + I- 880 Ramp | 2 + I- 880 Ramp | 2 + I- 880 Ramp | 2 + I- 880 Ramp | 2 + I- 880 Ramp | 2 + I- 880 Ramp | 2 + I- 880 Ramp | 2 + I- 880 Ramp | 2 + I- 880 Ramp | 2 + I- 880 Ramp | 2 + I- 880 Ramp | 2 + I- 880 Ramp | 2 + I- 880 Ramp | 3 + I- 880 Ramp | 3 + I- 880 Ramp | 3 + I- 880 Ramp | 3 + I- 880 Ramp | 3 + I- 880 Ramp | 3 + I- 880 Ramp | 3 + I- 880 Ramp | 3 + I- 880 Ramp | 3 + I- 880 Ramp | 3 + I- 880 Ramp | 3 + I- 880 Ramp | 3 + I- 880 Ramp | 3 + I- 880 Ramp | 3 + I- 880 Ramp | 3 + I- 880 Ramp | 3 + I- 880 Ramp | 3 + I- 880 Ramp | 3 + I- 880 Ramp | 3 + I- 880 Ramp | 3 + I- 880 Ramp | 3 + I- 880 Ramp | 3 + I- 880 Ramp | 3 + I- 880 Ramp | 3 + I- 880 Ramp | 3 + I- 880 Ramp | 3 + I- 880 Ramp | 3 + I- 880 Ramp | 3 + I- 880 Ramp | 3 + I- 880 Ramp | 3 + I- 880 Ramp | 3 + I- 880 Ramp | 3 + I- 880 Ramp | 3 + I- 880 Ramp | 3 + I- 880 Ramp | 3 + I- 880 Ramp | 3 + I- 880 Ramp | 3 + I- 880 Ramp | 3 + I- 880 Ramp | 3 + I- 880 Ramp | 3 + I- 880 Ramp | 3 + I- 880 Ramp | 3 + I- 880 Ramp | 3 + I- 880 Ramp | 3 + I- 880 Ramp | 3 + I- 880 Ramp | 3 + I- 880 Ramp | 3 + I- 880 Ramp | 3 + I- 880 Ramp | 3 + I- 880 Ramp | 3 + I- 880 Ramp | 3 + I- 880 Ramp | 3 + I- 880 Ramp | 3 + I- 880 Ramp | 3 + I- 880 Ramp | 3 + I- 880 Ramp | 3 + I- 880 Ramp | 3 + I- 880 Ramp | 3 + I- 880 Ramp | 3 + I- 880 Ramp | 3 + I- 880 Ramp | 3 + I- 880 Ramp | 3 + I- 880 Ramp | 3 + I- 880 Ramp | 3 + I- 880 Ramp | 3 + I- 880 Ramp | 3 + I- 880 Ramp | 3 + I- 880 Ramp | 3 + I- 880 Ramp | 3 + I- 880 Ramp | 4 + I- 880 Ramp | 4 + I- 880 Ramp | 4 + I- 880 Ramp | 4 + I- 880 Ramp | 4 + I- 880 Ramp | 4 + I- 880 Ramp | 4 + I- 880 Ramp | 4 + I- 880 Ramp | 4 + I- 880 Ramp | 4 + I- 880 Ramp | 4 + I- 880 Ramp | 4 + I- 880 Ramp | 4 + I- 880 Ramp | 4 + I- 880 Ramp | 4 + I- 880 Ramp | 4 + I- 880 Ramp | 4 + I- 880 Ramp | 4 + I- 880 Ramp | 4 + I- 880 Ramp | 4 + I- 880 Ramp | 4 + I- 880 Ramp | 4 + I- 880 Ramp | 4 + I- 880 Ramp | 4 + I- 880 Ramp | 4 + I- 880 Ramp | 4 + I- 880 Ramp | 4 + I- 880 Ramp | 5 + I- 880 Ramp | 5 + I- 880 Ramp | 5 + I- 880 Ramp | 5 + I- 880 Ramp | 5 + I- 880 Ramp | 5 + I- 880 Ramp | 5 + I- 880 Ramp | 5 + I- 880 Ramp | 5 + I- 880 Ramp | 5 + I- 880 Ramp | 5 + I- 880 Ramp | 5 + I- 880 Ramp | 5 + I- 880 Ramp | 5 + I- 880 Ramp | 5 + I- 880 Ramp | 5 + I- 880 Ramp | 5 + I- 880 Ramp | 5 + I- 880 Ramp | 5 + I- 880 Ramp | 5 + I- 880 Ramp | 5 + I- 880 Ramp | 5 + I- 880 Ramp | 5 + I- 880 Ramp | 5 + I- 880 Ramp | 5 + I- 880 Ramp | 5 + I- 880 Ramp | 5 + I- 880 Ramp | 5 + I- 880 Ramp | 5 + I- 880 Ramp | 5 + I- 880 Ramp | 5 + I- 880 Ramp | 5 + I- 880 Ramp | 5 + I- 880 Ramp | 5 + I- 880 Ramp | 6 + I- 880 Ramp | 6 + I- 880 Ramp | 6 + I- 880 Ramp | 6 + I- 880 Ramp | 6 + I- 880 Ramp | 6 + I- 880 Ramp | 6 + I- 880 Ramp | 6 + I- 880 Ramp | 6 + I- 880 Ramp | 6 + I- 880 Ramp | 6 + I- 880 Ramp | 6 + I- 880 Ramp | 6 + I- 880 Ramp | 6 + I- 880 Ramp | 6 + I- 880 Ramp | 6 + I- 880 Ramp | 8 + I- 880 Ramp | 8 + I- 880 Ramp | 8 + I- 980 | 2 + I- 980 | 2 + I- 980 | 2 + I- 980 | 2 + I- 980 | 2 + I- 980 | 2 + I- 980 | 2 + I- 980 | 2 + I- 980 | 3 + I- 980 | 3 + I- 980 | 3 + I- 980 | 3 + I- 980 | 3 + I- 980 | 3 + I- 980 | 3 + I- 980 | 3 + I- 980 | 3 + I- 980 | 4 + I- 980 | 4 + I- 980 | 5 + I- 980 | 5 + I- 980 | 7 + I- 980 | 7 + I- 980 | 7 + I- 980 | 7 + I- 980 | 12 + I- 980 Ramp | 3 + I- 980 Ramp | 3 + I- 980 Ramp | 3 + I- 980 Ramp | 7 +(896 rows) + +SELECT * FROM toyemp WHERE name = 'sharon'; + name | age | location | annualsal +--------+-----+----------+----------- + sharon | 25 | (15,12) | 12000 +(1 row) + +-- +-- Test for Leaky view scenario +-- +CREATE ROLE regress_alice; +CREATE FUNCTION f_leak (text) + RETURNS bool LANGUAGE 'plpgsql' COST 0.0000001 + AS 'BEGIN RAISE NOTICE ''f_leak => %'', $1; RETURN true; END'; +CREATE TABLE customer ( + cid int primary key, + name text not null, + tel text, + passwd text +); +CREATE TABLE credit_card ( + cid int references customer(cid), + cnum text, + climit int +); +CREATE TABLE credit_usage ( + cid int references customer(cid), + ymd date, + usage int +); +INSERT INTO customer + VALUES (101, 'regress_alice', '+81-12-3456-7890', 'passwd123'), + (102, 'regress_bob', '+01-234-567-8901', 'beafsteak'), + (103, 'regress_eve', '+49-8765-43210', 'hamburger'); +INSERT INTO credit_card + VALUES (101, '1111-2222-3333-4444', 4000), + (102, '5555-6666-7777-8888', 3000), + (103, '9801-2345-6789-0123', 2000); +INSERT INTO credit_usage + VALUES (101, '2011-09-15', 120), + (101, '2011-10-05', 90), + (101, '2011-10-18', 110), + (101, '2011-10-21', 200), + (101, '2011-11-10', 80), + (102, '2011-09-22', 300), + (102, '2011-10-12', 120), + (102, '2011-10-28', 200), + (103, '2011-10-15', 480); +CREATE VIEW my_property_normal AS + SELECT * FROM customer WHERE name = current_user; +CREATE VIEW my_property_secure WITH (security_barrier) AS + SELECT * FROM customer WHERE name = current_user; +CREATE VIEW my_credit_card_normal AS + SELECT * FROM customer l NATURAL JOIN credit_card r + WHERE l.name = current_user; +CREATE VIEW my_credit_card_secure WITH (security_barrier) AS + SELECT * FROM customer l NATURAL JOIN credit_card r + WHERE l.name = current_user; +CREATE VIEW my_credit_card_usage_normal AS + SELECT * FROM my_credit_card_secure l NATURAL JOIN credit_usage r; +CREATE VIEW my_credit_card_usage_secure WITH (security_barrier) AS + SELECT * FROM my_credit_card_secure l NATURAL JOIN credit_usage r; +GRANT SELECT ON my_property_normal TO public; +GRANT SELECT ON my_property_secure TO public; +GRANT SELECT ON my_credit_card_normal TO public; +GRANT SELECT ON my_credit_card_secure TO public; +GRANT SELECT ON my_credit_card_usage_normal TO public; +GRANT SELECT ON my_credit_card_usage_secure TO public; +-- +-- Run leaky view scenarios +-- +SET SESSION AUTHORIZATION regress_alice; +-- +-- scenario: if a qualifier with tiny-cost is given, it shall be launched +-- prior to the security policy of the view. +-- +SELECT * FROM my_property_normal WHERE f_leak(passwd); +NOTICE: f_leak => passwd123 +NOTICE: f_leak => beafsteak +NOTICE: f_leak => hamburger + cid | name | tel | passwd +-----+---------------+------------------+----------- + 101 | regress_alice | +81-12-3456-7890 | passwd123 +(1 row) + +EXPLAIN (COSTS OFF) SELECT * FROM my_property_normal WHERE f_leak(passwd); + QUERY PLAN +------------------------------------------------------ + Seq Scan on customer + Filter: (f_leak(passwd) AND (name = CURRENT_USER)) +(2 rows) + +SELECT * FROM my_property_secure WHERE f_leak(passwd); +NOTICE: f_leak => passwd123 + cid | name | tel | passwd +-----+---------------+------------------+----------- + 101 | regress_alice | +81-12-3456-7890 | passwd123 +(1 row) + +EXPLAIN (COSTS OFF) SELECT * FROM my_property_secure WHERE f_leak(passwd); + QUERY PLAN +--------------------------------------------- + Subquery Scan on my_property_secure + Filter: f_leak(my_property_secure.passwd) + -> Seq Scan on customer + Filter: (name = CURRENT_USER) +(4 rows) + +-- +-- scenario: qualifiers can be pushed down if they contain leaky functions, +-- provided they aren't passed data from inside the view. +-- +SELECT * FROM my_property_normal v + WHERE f_leak('passwd') AND f_leak(passwd); +NOTICE: f_leak => passwd +NOTICE: f_leak => passwd123 +NOTICE: f_leak => passwd +NOTICE: f_leak => beafsteak +NOTICE: f_leak => passwd +NOTICE: f_leak => hamburger + cid | name | tel | passwd +-----+---------------+------------------+----------- + 101 | regress_alice | +81-12-3456-7890 | passwd123 +(1 row) + +EXPLAIN (COSTS OFF) SELECT * FROM my_property_normal v + WHERE f_leak('passwd') AND f_leak(passwd); + QUERY PLAN +--------------------------------------------------------------------------------- + Seq Scan on customer + Filter: (f_leak('passwd'::text) AND f_leak(passwd) AND (name = CURRENT_USER)) +(2 rows) + +SELECT * FROM my_property_secure v + WHERE f_leak('passwd') AND f_leak(passwd); +NOTICE: f_leak => passwd +NOTICE: f_leak => passwd123 +NOTICE: f_leak => passwd +NOTICE: f_leak => passwd + cid | name | tel | passwd +-----+---------------+------------------+----------- + 101 | regress_alice | +81-12-3456-7890 | passwd123 +(1 row) + +EXPLAIN (COSTS OFF) SELECT * FROM my_property_secure v + WHERE f_leak('passwd') AND f_leak(passwd); + QUERY PLAN +-------------------------------------------------------------------- + Subquery Scan on v + Filter: f_leak(v.passwd) + -> Seq Scan on customer + Filter: (f_leak('passwd'::text) AND (name = CURRENT_USER)) +(4 rows) + +-- +-- scenario: if a qualifier references only one-side of a particular join- +-- tree, it shall be distributed to the most deep scan plan as +-- possible as we can. +-- +SELECT * FROM my_credit_card_normal WHERE f_leak(cnum); +NOTICE: f_leak => 1111-2222-3333-4444 +NOTICE: f_leak => 5555-6666-7777-8888 +NOTICE: f_leak => 9801-2345-6789-0123 + cid | name | tel | passwd | cnum | climit +-----+---------------+------------------+-----------+---------------------+-------- + 101 | regress_alice | +81-12-3456-7890 | passwd123 | 1111-2222-3333-4444 | 4000 +(1 row) + +EXPLAIN (COSTS OFF) SELECT * FROM my_credit_card_normal WHERE f_leak(cnum); + QUERY PLAN +--------------------------------------------- + Hash Join + Hash Cond: (r.cid = l.cid) + -> Seq Scan on credit_card r + Filter: f_leak(cnum) + -> Hash + -> Seq Scan on customer l + Filter: (name = CURRENT_USER) +(7 rows) + +SELECT * FROM my_credit_card_secure WHERE f_leak(cnum); +NOTICE: f_leak => 1111-2222-3333-4444 + cid | name | tel | passwd | cnum | climit +-----+---------------+------------------+-----------+---------------------+-------- + 101 | regress_alice | +81-12-3456-7890 | passwd123 | 1111-2222-3333-4444 | 4000 +(1 row) + +EXPLAIN (COSTS OFF) SELECT * FROM my_credit_card_secure WHERE f_leak(cnum); + QUERY PLAN +--------------------------------------------------- + Subquery Scan on my_credit_card_secure + Filter: f_leak(my_credit_card_secure.cnum) + -> Hash Join + Hash Cond: (r.cid = l.cid) + -> Seq Scan on credit_card r + -> Hash + -> Seq Scan on customer l + Filter: (name = CURRENT_USER) +(8 rows) + +-- +-- scenario: an external qualifier can be pushed-down by in-front-of the +-- views with "security_barrier" attribute, except for operators +-- implemented with leakproof functions. +-- +SELECT * FROM my_credit_card_usage_normal + WHERE f_leak(cnum) AND ymd >= '2011-10-01' AND ymd < '2011-11-01'; +NOTICE: f_leak => 1111-2222-3333-4444 + cid | name | tel | passwd | cnum | climit | ymd | usage +-----+---------------+------------------+-----------+---------------------+--------+------------+------- + 101 | regress_alice | +81-12-3456-7890 | passwd123 | 1111-2222-3333-4444 | 4000 | 10-05-2011 | 90 + 101 | regress_alice | +81-12-3456-7890 | passwd123 | 1111-2222-3333-4444 | 4000 | 10-18-2011 | 110 + 101 | regress_alice | +81-12-3456-7890 | passwd123 | 1111-2222-3333-4444 | 4000 | 10-21-2011 | 200 +(3 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM my_credit_card_usage_normal + WHERE f_leak(cnum) AND ymd >= '2011-10-01' AND ymd < '2011-11-01'; + QUERY PLAN +------------------------------------------------------------------------------ + Nested Loop + Join Filter: (l.cid = r.cid) + -> Seq Scan on credit_usage r + Filter: ((ymd >= '10-01-2011'::date) AND (ymd < '11-01-2011'::date)) + -> Materialize + -> Subquery Scan on l + Filter: f_leak(l.cnum) + -> Hash Join + Hash Cond: (r_1.cid = l_1.cid) + -> Seq Scan on credit_card r_1 + -> Hash + -> Seq Scan on customer l_1 + Filter: (name = CURRENT_USER) +(13 rows) + +SELECT * FROM my_credit_card_usage_secure + WHERE f_leak(cnum) AND ymd >= '2011-10-01' AND ymd < '2011-11-01'; +NOTICE: f_leak => 1111-2222-3333-4444 +NOTICE: f_leak => 1111-2222-3333-4444 +NOTICE: f_leak => 1111-2222-3333-4444 + cid | name | tel | passwd | cnum | climit | ymd | usage +-----+---------------+------------------+-----------+---------------------+--------+------------+------- + 101 | regress_alice | +81-12-3456-7890 | passwd123 | 1111-2222-3333-4444 | 4000 | 10-05-2011 | 90 + 101 | regress_alice | +81-12-3456-7890 | passwd123 | 1111-2222-3333-4444 | 4000 | 10-18-2011 | 110 + 101 | regress_alice | +81-12-3456-7890 | passwd123 | 1111-2222-3333-4444 | 4000 | 10-21-2011 | 200 +(3 rows) + +EXPLAIN (COSTS OFF) SELECT * FROM my_credit_card_usage_secure + WHERE f_leak(cnum) AND ymd >= '2011-10-01' AND ymd < '2011-11-01'; + QUERY PLAN +------------------------------------------------------------------------------------ + Subquery Scan on my_credit_card_usage_secure + Filter: f_leak(my_credit_card_usage_secure.cnum) + -> Nested Loop + Join Filter: (l.cid = r.cid) + -> Seq Scan on credit_usage r + Filter: ((ymd >= '10-01-2011'::date) AND (ymd < '11-01-2011'::date)) + -> Materialize + -> Hash Join + Hash Cond: (r_1.cid = l.cid) + -> Seq Scan on credit_card r_1 + -> Hash + -> Seq Scan on customer l + Filter: (name = CURRENT_USER) +(13 rows) + +-- +-- Test for the case when security_barrier gets changed between rewriter +-- and planner stage. +-- +PREPARE p1 AS SELECT * FROM my_property_normal WHERE f_leak(passwd); +PREPARE p2 AS SELECT * FROM my_property_secure WHERE f_leak(passwd); +EXECUTE p1; +NOTICE: f_leak => passwd123 +NOTICE: f_leak => beafsteak +NOTICE: f_leak => hamburger + cid | name | tel | passwd +-----+---------------+------------------+----------- + 101 | regress_alice | +81-12-3456-7890 | passwd123 +(1 row) + +EXECUTE p2; +NOTICE: f_leak => passwd123 + cid | name | tel | passwd +-----+---------------+------------------+----------- + 101 | regress_alice | +81-12-3456-7890 | passwd123 +(1 row) + +RESET SESSION AUTHORIZATION; +ALTER VIEW my_property_normal SET (security_barrier=true); +ALTER VIEW my_property_secure SET (security_barrier=false); +SET SESSION AUTHORIZATION regress_alice; +EXECUTE p1; -- To be perform as a view with security-barrier +NOTICE: f_leak => passwd123 + cid | name | tel | passwd +-----+---------------+------------------+----------- + 101 | regress_alice | +81-12-3456-7890 | passwd123 +(1 row) + +EXECUTE p2; -- To be perform as a view without security-barrier +NOTICE: f_leak => passwd123 +NOTICE: f_leak => beafsteak +NOTICE: f_leak => hamburger + cid | name | tel | passwd +-----+---------------+------------------+----------- + 101 | regress_alice | +81-12-3456-7890 | passwd123 +(1 row) + +-- Cleanup. +RESET SESSION AUTHORIZATION; +DROP ROLE regress_alice; diff --git a/src/test/regress/expected/sequence.out b/src/test/regress/expected/sequence.out new file mode 100644 index 0000000..7cb2f7c --- /dev/null +++ b/src/test/regress/expected/sequence.out @@ -0,0 +1,841 @@ +-- +-- CREATE SEQUENCE +-- +-- various error cases +CREATE SEQUENCE sequence_testx INCREMENT BY 0; +ERROR: INCREMENT must not be zero +CREATE SEQUENCE sequence_testx INCREMENT BY -1 MINVALUE 20; +ERROR: MINVALUE (20) must be less than MAXVALUE (-1) +CREATE SEQUENCE sequence_testx INCREMENT BY 1 MAXVALUE -20; +ERROR: MINVALUE (1) must be less than MAXVALUE (-20) +CREATE SEQUENCE sequence_testx INCREMENT BY -1 START 10; +ERROR: START value (10) cannot be greater than MAXVALUE (-1) +CREATE SEQUENCE sequence_testx INCREMENT BY 1 START -10; +ERROR: START value (-10) cannot be less than MINVALUE (1) +CREATE SEQUENCE sequence_testx CACHE 0; +ERROR: CACHE (0) must be greater than zero +-- OWNED BY errors +CREATE SEQUENCE sequence_testx OWNED BY nobody; -- nonsense word +ERROR: invalid OWNED BY option +HINT: Specify OWNED BY table.column or OWNED BY NONE. +CREATE SEQUENCE sequence_testx OWNED BY pg_class_oid_index.oid; -- not a table +ERROR: sequence cannot be owned by relation "pg_class_oid_index" +DETAIL: This operation is not supported for indexes. +CREATE SEQUENCE sequence_testx OWNED BY pg_class.relname; -- not same schema +ERROR: sequence must be in same schema as table it is linked to +CREATE TABLE sequence_test_table (a int); +CREATE SEQUENCE sequence_testx OWNED BY sequence_test_table.b; -- wrong column +ERROR: column "b" of relation "sequence_test_table" does not exist +DROP TABLE sequence_test_table; +-- sequence data types +CREATE SEQUENCE sequence_test5 AS integer; +CREATE SEQUENCE sequence_test6 AS smallint; +CREATE SEQUENCE sequence_test7 AS bigint; +CREATE SEQUENCE sequence_test8 AS integer MAXVALUE 100000; +CREATE SEQUENCE sequence_test9 AS integer INCREMENT BY -1; +CREATE SEQUENCE sequence_test10 AS integer MINVALUE -100000 START 1; +CREATE SEQUENCE sequence_test11 AS smallint; +CREATE SEQUENCE sequence_test12 AS smallint INCREMENT -1; +CREATE SEQUENCE sequence_test13 AS smallint MINVALUE -32768; +CREATE SEQUENCE sequence_test14 AS smallint MAXVALUE 32767 INCREMENT -1; +CREATE SEQUENCE sequence_testx AS text; +ERROR: sequence type must be smallint, integer, or bigint +CREATE SEQUENCE sequence_testx AS nosuchtype; +ERROR: type "nosuchtype" does not exist +LINE 1: CREATE SEQUENCE sequence_testx AS nosuchtype; + ^ +CREATE SEQUENCE sequence_testx AS smallint MAXVALUE 100000; +ERROR: MAXVALUE (100000) is out of range for sequence data type smallint +CREATE SEQUENCE sequence_testx AS smallint MINVALUE -100000; +ERROR: MINVALUE (-100000) is out of range for sequence data type smallint +ALTER SEQUENCE sequence_test5 AS smallint; -- success, max will be adjusted +ALTER SEQUENCE sequence_test8 AS smallint; -- fail, max has to be adjusted +ERROR: MAXVALUE (100000) is out of range for sequence data type smallint +ALTER SEQUENCE sequence_test8 AS smallint MAXVALUE 20000; -- ok now +ALTER SEQUENCE sequence_test9 AS smallint; -- success, min will be adjusted +ALTER SEQUENCE sequence_test10 AS smallint; -- fail, min has to be adjusted +ERROR: MINVALUE (-100000) is out of range for sequence data type smallint +ALTER SEQUENCE sequence_test10 AS smallint MINVALUE -20000; -- ok now +ALTER SEQUENCE sequence_test11 AS int; -- max will be adjusted +ALTER SEQUENCE sequence_test12 AS int; -- min will be adjusted +ALTER SEQUENCE sequence_test13 AS int; -- min and max will be adjusted +ALTER SEQUENCE sequence_test14 AS int; -- min and max will be adjusted +--- +--- test creation of SERIAL column +--- +CREATE TABLE serialTest1 (f1 text, f2 serial); +INSERT INTO serialTest1 VALUES ('foo'); +INSERT INTO serialTest1 VALUES ('bar'); +INSERT INTO serialTest1 VALUES ('force', 100); +INSERT INTO serialTest1 VALUES ('wrong', NULL); +ERROR: null value in column "f2" of relation "serialtest1" violates not-null constraint +DETAIL: Failing row contains (wrong, null). +SELECT * FROM serialTest1; + f1 | f2 +-------+----- + foo | 1 + bar | 2 + force | 100 +(3 rows) + +SELECT pg_get_serial_sequence('serialTest1', 'f2'); + pg_get_serial_sequence +--------------------------- + public.serialtest1_f2_seq +(1 row) + +-- test smallserial / bigserial +CREATE TABLE serialTest2 (f1 text, f2 serial, f3 smallserial, f4 serial2, + f5 bigserial, f6 serial8); +INSERT INTO serialTest2 (f1) + VALUES ('test_defaults'); +INSERT INTO serialTest2 (f1, f2, f3, f4, f5, f6) + VALUES ('test_max_vals', 2147483647, 32767, 32767, 9223372036854775807, + 9223372036854775807), + ('test_min_vals', -2147483648, -32768, -32768, -9223372036854775808, + -9223372036854775808); +-- All these INSERTs should fail: +INSERT INTO serialTest2 (f1, f3) + VALUES ('bogus', -32769); +ERROR: smallint out of range +INSERT INTO serialTest2 (f1, f4) + VALUES ('bogus', -32769); +ERROR: smallint out of range +INSERT INTO serialTest2 (f1, f3) + VALUES ('bogus', 32768); +ERROR: smallint out of range +INSERT INTO serialTest2 (f1, f4) + VALUES ('bogus', 32768); +ERROR: smallint out of range +INSERT INTO serialTest2 (f1, f5) + VALUES ('bogus', -9223372036854775809); +ERROR: bigint out of range +INSERT INTO serialTest2 (f1, f6) + VALUES ('bogus', -9223372036854775809); +ERROR: bigint out of range +INSERT INTO serialTest2 (f1, f5) + VALUES ('bogus', 9223372036854775808); +ERROR: bigint out of range +INSERT INTO serialTest2 (f1, f6) + VALUES ('bogus', 9223372036854775808); +ERROR: bigint out of range +SELECT * FROM serialTest2 ORDER BY f2 ASC; + f1 | f2 | f3 | f4 | f5 | f6 +---------------+-------------+--------+--------+----------------------+---------------------- + test_min_vals | -2147483648 | -32768 | -32768 | -9223372036854775808 | -9223372036854775808 + test_defaults | 1 | 1 | 1 | 1 | 1 + test_max_vals | 2147483647 | 32767 | 32767 | 9223372036854775807 | 9223372036854775807 +(3 rows) + +SELECT nextval('serialTest2_f2_seq'); + nextval +--------- + 2 +(1 row) + +SELECT nextval('serialTest2_f3_seq'); + nextval +--------- + 2 +(1 row) + +SELECT nextval('serialTest2_f4_seq'); + nextval +--------- + 2 +(1 row) + +SELECT nextval('serialTest2_f5_seq'); + nextval +--------- + 2 +(1 row) + +SELECT nextval('serialTest2_f6_seq'); + nextval +--------- + 2 +(1 row) + +-- basic sequence operations using both text and oid references +CREATE SEQUENCE sequence_test; +CREATE SEQUENCE IF NOT EXISTS sequence_test; +NOTICE: relation "sequence_test" already exists, skipping +SELECT nextval('sequence_test'::text); + nextval +--------- + 1 +(1 row) + +SELECT nextval('sequence_test'::regclass); + nextval +--------- + 2 +(1 row) + +SELECT currval('sequence_test'::text); + currval +--------- + 2 +(1 row) + +SELECT currval('sequence_test'::regclass); + currval +--------- + 2 +(1 row) + +SELECT setval('sequence_test'::text, 32); + setval +-------- + 32 +(1 row) + +SELECT nextval('sequence_test'::regclass); + nextval +--------- + 33 +(1 row) + +SELECT setval('sequence_test'::text, 99, false); + setval +-------- + 99 +(1 row) + +SELECT nextval('sequence_test'::regclass); + nextval +--------- + 99 +(1 row) + +SELECT setval('sequence_test'::regclass, 32); + setval +-------- + 32 +(1 row) + +SELECT nextval('sequence_test'::text); + nextval +--------- + 33 +(1 row) + +SELECT setval('sequence_test'::regclass, 99, false); + setval +-------- + 99 +(1 row) + +SELECT nextval('sequence_test'::text); + nextval +--------- + 99 +(1 row) + +DISCARD SEQUENCES; +SELECT currval('sequence_test'::regclass); +ERROR: currval of sequence "sequence_test" is not yet defined in this session +DROP SEQUENCE sequence_test; +-- renaming sequences +CREATE SEQUENCE foo_seq; +ALTER TABLE foo_seq RENAME TO foo_seq_new; +SELECT * FROM foo_seq_new; + last_value | log_cnt | is_called +------------+---------+----------- + 1 | 0 | f +(1 row) + +SELECT nextval('foo_seq_new'); + nextval +--------- + 1 +(1 row) + +SELECT nextval('foo_seq_new'); + nextval +--------- + 2 +(1 row) + +-- log_cnt can be higher if there is a checkpoint just at the right +-- time, so just test for the expected range +SELECT last_value, log_cnt IN (31, 32) AS log_cnt_ok, is_called FROM foo_seq_new; + last_value | log_cnt_ok | is_called +------------+------------+----------- + 2 | t | t +(1 row) + +DROP SEQUENCE foo_seq_new; +-- renaming serial sequences +ALTER TABLE serialtest1_f2_seq RENAME TO serialtest1_f2_foo; +INSERT INTO serialTest1 VALUES ('more'); +SELECT * FROM serialTest1; + f1 | f2 +-------+----- + foo | 1 + bar | 2 + force | 100 + more | 3 +(4 rows) + +-- +-- Check dependencies of serial and ordinary sequences +-- +CREATE TEMP SEQUENCE myseq2; +CREATE TEMP SEQUENCE myseq3; +CREATE TEMP TABLE t1 ( + f1 serial, + f2 int DEFAULT nextval('myseq2'), + f3 int DEFAULT nextval('myseq3'::text) +); +-- Both drops should fail, but with different error messages: +DROP SEQUENCE t1_f1_seq; +ERROR: cannot drop sequence t1_f1_seq because other objects depend on it +DETAIL: default value for column f1 of table t1 depends on sequence t1_f1_seq +HINT: Use DROP ... CASCADE to drop the dependent objects too. +DROP SEQUENCE myseq2; +ERROR: cannot drop sequence myseq2 because other objects depend on it +DETAIL: default value for column f2 of table t1 depends on sequence myseq2 +HINT: Use DROP ... CASCADE to drop the dependent objects too. +-- This however will work: +DROP SEQUENCE myseq3; +DROP TABLE t1; +-- Fails because no longer existent: +DROP SEQUENCE t1_f1_seq; +ERROR: sequence "t1_f1_seq" does not exist +-- Now OK: +DROP SEQUENCE myseq2; +-- +-- Alter sequence +-- +ALTER SEQUENCE IF EXISTS sequence_test2 RESTART WITH 24 + INCREMENT BY 4 MAXVALUE 36 MINVALUE 5 CYCLE; +NOTICE: relation "sequence_test2" does not exist, skipping +ALTER SEQUENCE serialTest1 CYCLE; -- error, not a sequence +ERROR: "serialtest1" is not a sequence +CREATE SEQUENCE sequence_test2 START WITH 32; +CREATE SEQUENCE sequence_test4 INCREMENT BY -1; +SELECT nextval('sequence_test2'); + nextval +--------- + 32 +(1 row) + +SELECT nextval('sequence_test4'); + nextval +--------- + -1 +(1 row) + +ALTER SEQUENCE sequence_test2 RESTART; +SELECT nextval('sequence_test2'); + nextval +--------- + 32 +(1 row) + +ALTER SEQUENCE sequence_test2 RESTART WITH 0; -- error +ERROR: RESTART value (0) cannot be less than MINVALUE (1) +ALTER SEQUENCE sequence_test4 RESTART WITH 40; -- error +ERROR: RESTART value (40) cannot be greater than MAXVALUE (-1) +-- test CYCLE and NO CYCLE +ALTER SEQUENCE sequence_test2 RESTART WITH 24 + INCREMENT BY 4 MAXVALUE 36 MINVALUE 5 CYCLE; +SELECT nextval('sequence_test2'); + nextval +--------- + 24 +(1 row) + +SELECT nextval('sequence_test2'); + nextval +--------- + 28 +(1 row) + +SELECT nextval('sequence_test2'); + nextval +--------- + 32 +(1 row) + +SELECT nextval('sequence_test2'); + nextval +--------- + 36 +(1 row) + +SELECT nextval('sequence_test2'); -- cycled + nextval +--------- + 5 +(1 row) + +ALTER SEQUENCE sequence_test2 RESTART WITH 24 + NO CYCLE; +SELECT nextval('sequence_test2'); + nextval +--------- + 24 +(1 row) + +SELECT nextval('sequence_test2'); + nextval +--------- + 28 +(1 row) + +SELECT nextval('sequence_test2'); + nextval +--------- + 32 +(1 row) + +SELECT nextval('sequence_test2'); + nextval +--------- + 36 +(1 row) + +SELECT nextval('sequence_test2'); -- error +ERROR: nextval: reached maximum value of sequence "sequence_test2" (36) +ALTER SEQUENCE sequence_test2 RESTART WITH -24 START WITH -24 + INCREMENT BY -4 MINVALUE -36 MAXVALUE -5 CYCLE; +SELECT nextval('sequence_test2'); + nextval +--------- + -24 +(1 row) + +SELECT nextval('sequence_test2'); + nextval +--------- + -28 +(1 row) + +SELECT nextval('sequence_test2'); + nextval +--------- + -32 +(1 row) + +SELECT nextval('sequence_test2'); + nextval +--------- + -36 +(1 row) + +SELECT nextval('sequence_test2'); -- cycled + nextval +--------- + -5 +(1 row) + +ALTER SEQUENCE sequence_test2 RESTART WITH -24 + NO CYCLE; +SELECT nextval('sequence_test2'); + nextval +--------- + -24 +(1 row) + +SELECT nextval('sequence_test2'); + nextval +--------- + -28 +(1 row) + +SELECT nextval('sequence_test2'); + nextval +--------- + -32 +(1 row) + +SELECT nextval('sequence_test2'); + nextval +--------- + -36 +(1 row) + +SELECT nextval('sequence_test2'); -- error +ERROR: nextval: reached minimum value of sequence "sequence_test2" (-36) +-- reset +ALTER SEQUENCE IF EXISTS sequence_test2 RESTART WITH 32 START WITH 32 + INCREMENT BY 4 MAXVALUE 36 MINVALUE 5 CYCLE; +SELECT setval('sequence_test2', -100); -- error +ERROR: setval: value -100 is out of bounds for sequence "sequence_test2" (5..36) +SELECT setval('sequence_test2', 100); -- error +ERROR: setval: value 100 is out of bounds for sequence "sequence_test2" (5..36) +SELECT setval('sequence_test2', 5); + setval +-------- + 5 +(1 row) + +CREATE SEQUENCE sequence_test3; -- not read from, to test is_called +-- Information schema +SELECT * FROM information_schema.sequences + WHERE sequence_name ~ ANY(ARRAY['sequence_test', 'serialtest']) + ORDER BY sequence_name ASC; + sequence_catalog | sequence_schema | sequence_name | data_type | numeric_precision | numeric_precision_radix | numeric_scale | start_value | minimum_value | maximum_value | increment | cycle_option +------------------+-----------------+--------------------+-----------+-------------------+-------------------------+---------------+-------------+----------------------+---------------------+-----------+-------------- + regression | public | sequence_test10 | smallint | 16 | 2 | 0 | 1 | -20000 | 32767 | 1 | NO + regression | public | sequence_test11 | integer | 32 | 2 | 0 | 1 | 1 | 2147483647 | 1 | NO + regression | public | sequence_test12 | integer | 32 | 2 | 0 | -1 | -2147483648 | -1 | -1 | NO + regression | public | sequence_test13 | integer | 32 | 2 | 0 | -32768 | -2147483648 | 2147483647 | 1 | NO + regression | public | sequence_test14 | integer | 32 | 2 | 0 | 32767 | -2147483648 | 2147483647 | -1 | NO + regression | public | sequence_test2 | bigint | 64 | 2 | 0 | 32 | 5 | 36 | 4 | YES + regression | public | sequence_test3 | bigint | 64 | 2 | 0 | 1 | 1 | 9223372036854775807 | 1 | NO + regression | public | sequence_test4 | bigint | 64 | 2 | 0 | -1 | -9223372036854775808 | -1 | -1 | NO + regression | public | sequence_test5 | smallint | 16 | 2 | 0 | 1 | 1 | 32767 | 1 | NO + regression | public | sequence_test6 | smallint | 16 | 2 | 0 | 1 | 1 | 32767 | 1 | NO + regression | public | sequence_test7 | bigint | 64 | 2 | 0 | 1 | 1 | 9223372036854775807 | 1 | NO + regression | public | sequence_test8 | smallint | 16 | 2 | 0 | 1 | 1 | 20000 | 1 | NO + regression | public | sequence_test9 | smallint | 16 | 2 | 0 | -1 | -32768 | -1 | -1 | NO + regression | public | serialtest1_f2_foo | integer | 32 | 2 | 0 | 1 | 1 | 2147483647 | 1 | NO + regression | public | serialtest2_f2_seq | integer | 32 | 2 | 0 | 1 | 1 | 2147483647 | 1 | NO + regression | public | serialtest2_f3_seq | smallint | 16 | 2 | 0 | 1 | 1 | 32767 | 1 | NO + regression | public | serialtest2_f4_seq | smallint | 16 | 2 | 0 | 1 | 1 | 32767 | 1 | NO + regression | public | serialtest2_f5_seq | bigint | 64 | 2 | 0 | 1 | 1 | 9223372036854775807 | 1 | NO + regression | public | serialtest2_f6_seq | bigint | 64 | 2 | 0 | 1 | 1 | 9223372036854775807 | 1 | NO +(19 rows) + +SELECT schemaname, sequencename, start_value, min_value, max_value, increment_by, cycle, cache_size, last_value +FROM pg_sequences +WHERE sequencename ~ ANY(ARRAY['sequence_test', 'serialtest']) + ORDER BY sequencename ASC; + schemaname | sequencename | start_value | min_value | max_value | increment_by | cycle | cache_size | last_value +------------+--------------------+-------------+----------------------+---------------------+--------------+-------+------------+------------ + public | sequence_test10 | 1 | -20000 | 32767 | 1 | f | 1 | + public | sequence_test11 | 1 | 1 | 2147483647 | 1 | f | 1 | + public | sequence_test12 | -1 | -2147483648 | -1 | -1 | f | 1 | + public | sequence_test13 | -32768 | -2147483648 | 2147483647 | 1 | f | 1 | + public | sequence_test14 | 32767 | -2147483648 | 2147483647 | -1 | f | 1 | + public | sequence_test2 | 32 | 5 | 36 | 4 | t | 1 | 5 + public | sequence_test3 | 1 | 1 | 9223372036854775807 | 1 | f | 1 | + public | sequence_test4 | -1 | -9223372036854775808 | -1 | -1 | f | 1 | -1 + public | sequence_test5 | 1 | 1 | 32767 | 1 | f | 1 | + public | sequence_test6 | 1 | 1 | 32767 | 1 | f | 1 | + public | sequence_test7 | 1 | 1 | 9223372036854775807 | 1 | f | 1 | + public | sequence_test8 | 1 | 1 | 20000 | 1 | f | 1 | + public | sequence_test9 | -1 | -32768 | -1 | -1 | f | 1 | + public | serialtest1_f2_foo | 1 | 1 | 2147483647 | 1 | f | 1 | 3 + public | serialtest2_f2_seq | 1 | 1 | 2147483647 | 1 | f | 1 | 2 + public | serialtest2_f3_seq | 1 | 1 | 32767 | 1 | f | 1 | 2 + public | serialtest2_f4_seq | 1 | 1 | 32767 | 1 | f | 1 | 2 + public | serialtest2_f5_seq | 1 | 1 | 9223372036854775807 | 1 | f | 1 | 2 + public | serialtest2_f6_seq | 1 | 1 | 9223372036854775807 | 1 | f | 1 | 2 +(19 rows) + +SELECT * FROM pg_sequence_parameters('sequence_test4'::regclass); + start_value | minimum_value | maximum_value | increment | cycle_option | cache_size | data_type +-------------+----------------------+---------------+-----------+--------------+------------+----------- + -1 | -9223372036854775808 | -1 | -1 | f | 1 | 20 +(1 row) + +\d sequence_test4 + Sequence "public.sequence_test4" + Type | Start | Minimum | Maximum | Increment | Cycles? | Cache +--------+-------+----------------------+---------+-----------+---------+------- + bigint | -1 | -9223372036854775808 | -1 | -1 | no | 1 + +\d serialtest2_f2_seq + Sequence "public.serialtest2_f2_seq" + Type | Start | Minimum | Maximum | Increment | Cycles? | Cache +---------+-------+---------+------------+-----------+---------+------- + integer | 1 | 1 | 2147483647 | 1 | no | 1 +Owned by: public.serialtest2.f2 + +-- Test comments +COMMENT ON SEQUENCE asdf IS 'won''t work'; +ERROR: relation "asdf" does not exist +COMMENT ON SEQUENCE sequence_test2 IS 'will work'; +COMMENT ON SEQUENCE sequence_test2 IS NULL; +-- Test lastval() +CREATE SEQUENCE seq; +SELECT nextval('seq'); + nextval +--------- + 1 +(1 row) + +SELECT lastval(); + lastval +--------- + 1 +(1 row) + +SELECT setval('seq', 99); + setval +-------- + 99 +(1 row) + +SELECT lastval(); + lastval +--------- + 99 +(1 row) + +DISCARD SEQUENCES; +SELECT lastval(); +ERROR: lastval is not yet defined in this session +CREATE SEQUENCE seq2; +SELECT nextval('seq2'); + nextval +--------- + 1 +(1 row) + +SELECT lastval(); + lastval +--------- + 1 +(1 row) + +DROP SEQUENCE seq2; +-- should fail +SELECT lastval(); +ERROR: lastval is not yet defined in this session +-- unlogged sequences +-- (more tests in src/test/recovery/) +CREATE UNLOGGED SEQUENCE sequence_test_unlogged; +ALTER SEQUENCE sequence_test_unlogged SET LOGGED; +\d sequence_test_unlogged + Sequence "public.sequence_test_unlogged" + Type | Start | Minimum | Maximum | Increment | Cycles? | Cache +--------+-------+---------+---------------------+-----------+---------+------- + bigint | 1 | 1 | 9223372036854775807 | 1 | no | 1 + +ALTER SEQUENCE sequence_test_unlogged SET UNLOGGED; +\d sequence_test_unlogged + Unlogged sequence "public.sequence_test_unlogged" + Type | Start | Minimum | Maximum | Increment | Cycles? | Cache +--------+-------+---------+---------------------+-----------+---------+------- + bigint | 1 | 1 | 9223372036854775807 | 1 | no | 1 + +DROP SEQUENCE sequence_test_unlogged; +-- Test sequences in read-only transactions +CREATE TEMPORARY SEQUENCE sequence_test_temp1; +START TRANSACTION READ ONLY; +SELECT nextval('sequence_test_temp1'); -- ok + nextval +--------- + 1 +(1 row) + +SELECT nextval('sequence_test2'); -- error +ERROR: cannot execute nextval() in a read-only transaction +ROLLBACK; +START TRANSACTION READ ONLY; +SELECT setval('sequence_test_temp1', 1); -- ok + setval +-------- + 1 +(1 row) + +SELECT setval('sequence_test2', 1); -- error +ERROR: cannot execute setval() in a read-only transaction +ROLLBACK; +-- privileges tests +CREATE USER regress_seq_user; +-- nextval +BEGIN; +SET LOCAL SESSION AUTHORIZATION regress_seq_user; +CREATE SEQUENCE seq3; +REVOKE ALL ON seq3 FROM regress_seq_user; +GRANT SELECT ON seq3 TO regress_seq_user; +SELECT nextval('seq3'); +ERROR: permission denied for sequence seq3 +ROLLBACK; +BEGIN; +SET LOCAL SESSION AUTHORIZATION regress_seq_user; +CREATE SEQUENCE seq3; +REVOKE ALL ON seq3 FROM regress_seq_user; +GRANT UPDATE ON seq3 TO regress_seq_user; +SELECT nextval('seq3'); + nextval +--------- + 1 +(1 row) + +ROLLBACK; +BEGIN; +SET LOCAL SESSION AUTHORIZATION regress_seq_user; +CREATE SEQUENCE seq3; +REVOKE ALL ON seq3 FROM regress_seq_user; +GRANT USAGE ON seq3 TO regress_seq_user; +SELECT nextval('seq3'); + nextval +--------- + 1 +(1 row) + +ROLLBACK; +-- currval +BEGIN; +SET LOCAL SESSION AUTHORIZATION regress_seq_user; +CREATE SEQUENCE seq3; +SELECT nextval('seq3'); + nextval +--------- + 1 +(1 row) + +REVOKE ALL ON seq3 FROM regress_seq_user; +GRANT SELECT ON seq3 TO regress_seq_user; +SELECT currval('seq3'); + currval +--------- + 1 +(1 row) + +ROLLBACK; +BEGIN; +SET LOCAL SESSION AUTHORIZATION regress_seq_user; +CREATE SEQUENCE seq3; +SELECT nextval('seq3'); + nextval +--------- + 1 +(1 row) + +REVOKE ALL ON seq3 FROM regress_seq_user; +GRANT UPDATE ON seq3 TO regress_seq_user; +SELECT currval('seq3'); +ERROR: permission denied for sequence seq3 +ROLLBACK; +BEGIN; +SET LOCAL SESSION AUTHORIZATION regress_seq_user; +CREATE SEQUENCE seq3; +SELECT nextval('seq3'); + nextval +--------- + 1 +(1 row) + +REVOKE ALL ON seq3 FROM regress_seq_user; +GRANT USAGE ON seq3 TO regress_seq_user; +SELECT currval('seq3'); + currval +--------- + 1 +(1 row) + +ROLLBACK; +-- lastval +BEGIN; +SET LOCAL SESSION AUTHORIZATION regress_seq_user; +CREATE SEQUENCE seq3; +SELECT nextval('seq3'); + nextval +--------- + 1 +(1 row) + +REVOKE ALL ON seq3 FROM regress_seq_user; +GRANT SELECT ON seq3 TO regress_seq_user; +SELECT lastval(); + lastval +--------- + 1 +(1 row) + +ROLLBACK; +BEGIN; +SET LOCAL SESSION AUTHORIZATION regress_seq_user; +CREATE SEQUENCE seq3; +SELECT nextval('seq3'); + nextval +--------- + 1 +(1 row) + +REVOKE ALL ON seq3 FROM regress_seq_user; +GRANT UPDATE ON seq3 TO regress_seq_user; +SELECT lastval(); +ERROR: permission denied for sequence seq3 +ROLLBACK; +BEGIN; +SET LOCAL SESSION AUTHORIZATION regress_seq_user; +CREATE SEQUENCE seq3; +SELECT nextval('seq3'); + nextval +--------- + 1 +(1 row) + +REVOKE ALL ON seq3 FROM regress_seq_user; +GRANT USAGE ON seq3 TO regress_seq_user; +SELECT lastval(); + lastval +--------- + 1 +(1 row) + +ROLLBACK; +-- setval +BEGIN; +SET LOCAL SESSION AUTHORIZATION regress_seq_user; +CREATE SEQUENCE seq3; +REVOKE ALL ON seq3 FROM regress_seq_user; +SAVEPOINT save; +SELECT setval('seq3', 5); +ERROR: permission denied for sequence seq3 +ROLLBACK TO save; +GRANT UPDATE ON seq3 TO regress_seq_user; +SELECT setval('seq3', 5); + setval +-------- + 5 +(1 row) + +SELECT nextval('seq3'); + nextval +--------- + 6 +(1 row) + +ROLLBACK; +-- ALTER SEQUENCE +BEGIN; +SET LOCAL SESSION AUTHORIZATION regress_seq_user; +ALTER SEQUENCE sequence_test2 START WITH 1; +ERROR: must be owner of sequence sequence_test2 +ROLLBACK; +-- Sequences should get wiped out as well: +DROP TABLE serialTest1, serialTest2; +-- Make sure sequences are gone: +SELECT * FROM information_schema.sequences WHERE sequence_name IN + ('sequence_test2', 'serialtest2_f2_seq', 'serialtest2_f3_seq', + 'serialtest2_f4_seq', 'serialtest2_f5_seq', 'serialtest2_f6_seq') + ORDER BY sequence_name ASC; + sequence_catalog | sequence_schema | sequence_name | data_type | numeric_precision | numeric_precision_radix | numeric_scale | start_value | minimum_value | maximum_value | increment | cycle_option +------------------+-----------------+----------------+-----------+-------------------+-------------------------+---------------+-------------+---------------+---------------+-----------+-------------- + regression | public | sequence_test2 | bigint | 64 | 2 | 0 | 32 | 5 | 36 | 4 | YES +(1 row) + +DROP USER regress_seq_user; +DROP SEQUENCE seq; +-- cache tests +CREATE SEQUENCE test_seq1 CACHE 10; +SELECT nextval('test_seq1'); + nextval +--------- + 1 +(1 row) + +SELECT nextval('test_seq1'); + nextval +--------- + 2 +(1 row) + +SELECT nextval('test_seq1'); + nextval +--------- + 3 +(1 row) + +DROP SEQUENCE test_seq1; diff --git a/src/test/regress/expected/spgist.out b/src/test/regress/expected/spgist.out new file mode 100644 index 0000000..2e91128 --- /dev/null +++ b/src/test/regress/expected/spgist.out @@ -0,0 +1,96 @@ +-- +-- Test SP-GiST indexes. +-- +-- There are other tests to test different SP-GiST opclasses. This is for +-- testing SP-GiST code itself. +create table spgist_point_tbl(id int4, p point); +create index spgist_point_idx on spgist_point_tbl using spgist(p) with (fillfactor = 75); +-- Test vacuum-root operation. It gets invoked when the root is also a leaf, +-- i.e. the index is very small. +insert into spgist_point_tbl (id, p) +select g, point(g*10, g*10) from generate_series(1, 10) g; +delete from spgist_point_tbl where id < 5; +vacuum spgist_point_tbl; +-- Insert more data, to make the index a few levels deep. +insert into spgist_point_tbl (id, p) +select g, point(g*10, g*10) from generate_series(1, 10000) g; +insert into spgist_point_tbl (id, p) +select g+100000, point(g*10+1, g*10+1) from generate_series(1, 10000) g; +-- To test vacuum, delete some entries from all over the index. +delete from spgist_point_tbl where id % 2 = 1; +-- And also delete some concentration of values. (SP-GiST doesn't currently +-- attempt to delete pages even when they become empty, but if it did, this +-- would exercise it) +delete from spgist_point_tbl where id < 10000; +vacuum spgist_point_tbl; +-- Test rescan paths (cf. bug #15378) +-- use box and && rather than point, so that rescan happens when the +-- traverse stack is non-empty +create table spgist_box_tbl(id serial, b box); +insert into spgist_box_tbl(b) +select box(point(i,j),point(i+s,j+s)) + from generate_series(1,100,5) i, + generate_series(1,100,5) j, + generate_series(1,10) s; +create index spgist_box_idx on spgist_box_tbl using spgist (b); +select count(*) + from (values (point(5,5)),(point(8,8)),(point(12,12))) v(p) + where exists(select * from spgist_box_tbl b where b.b && box(v.p,v.p)); + count +------- + 3 +(1 row) + +-- The point opclass's choose method only uses the spgMatchNode action, +-- so the other actions are not tested by the above. Create an index using +-- text opclass, which uses the others actions. +create table spgist_text_tbl(id int4, t text); +create index spgist_text_idx on spgist_text_tbl using spgist(t); +insert into spgist_text_tbl (id, t) +select g, 'f' || repeat('o', 100) || g from generate_series(1, 10000) g +union all +select g, 'baaaaaaaaaaaaaar' || g from generate_series(1, 1000) g; +-- Do a lot of insertions that have to split an existing node. Hopefully +-- one of these will cause the page to run out of space, causing the inner +-- tuple to be moved to another page. +insert into spgist_text_tbl (id, t) +select -g, 'f' || repeat('o', 100-g) || 'surprise' from generate_series(1, 100) g; +-- Test out-of-range fillfactor values +create index spgist_point_idx2 on spgist_point_tbl using spgist(p) with (fillfactor = 9); +ERROR: value 9 out of bounds for option "fillfactor" +DETAIL: Valid values are between "10" and "100". +create index spgist_point_idx2 on spgist_point_tbl using spgist(p) with (fillfactor = 101); +ERROR: value 101 out of bounds for option "fillfactor" +DETAIL: Valid values are between "10" and "100". +-- Modify fillfactor in existing index +alter index spgist_point_idx set (fillfactor = 90); +reindex index spgist_point_idx; +-- Test index over a domain +create domain spgist_text as varchar; +create table spgist_domain_tbl (f1 spgist_text); +create index spgist_domain_idx on spgist_domain_tbl using spgist(f1); +insert into spgist_domain_tbl values('fee'), ('fi'), ('fo'), ('fum'); +explain (costs off) +select * from spgist_domain_tbl where f1 = 'fo'; + QUERY PLAN +----------------------------------------------- + Bitmap Heap Scan on spgist_domain_tbl + Recheck Cond: ((f1)::text = 'fo'::text) + -> Bitmap Index Scan on spgist_domain_idx + Index Cond: ((f1)::text = 'fo'::text) +(4 rows) + +select * from spgist_domain_tbl where f1 = 'fo'; + f1 +---- + fo +(1 row) + +-- test an unlogged table, mostly to get coverage of spgistbuildempty +create unlogged table spgist_unlogged_tbl(id serial, b box); +create index spgist_unlogged_idx on spgist_unlogged_tbl using spgist (b); +insert into spgist_unlogged_tbl(b) +select box(point(i,j)) + from generate_series(1,100,5) i, + generate_series(1,10,5) j; +-- leave this table around, to help in testing dump/restore diff --git a/src/test/regress/expected/sqljson.out b/src/test/regress/expected/sqljson.out new file mode 100644 index 0000000..fa2abdb --- /dev/null +++ b/src/test/regress/expected/sqljson.out @@ -0,0 +1,948 @@ +-- JSON_OBJECT() +SELECT JSON_OBJECT(); + json_object +------------- + {} +(1 row) + +SELECT JSON_OBJECT(RETURNING json); + json_object +------------- + {} +(1 row) + +SELECT JSON_OBJECT(RETURNING json FORMAT JSON); + json_object +------------- + {} +(1 row) + +SELECT JSON_OBJECT(RETURNING jsonb); + json_object +------------- + {} +(1 row) + +SELECT JSON_OBJECT(RETURNING jsonb FORMAT JSON); + json_object +------------- + {} +(1 row) + +SELECT JSON_OBJECT(RETURNING text); + json_object +------------- + {} +(1 row) + +SELECT JSON_OBJECT(RETURNING text FORMAT JSON); + json_object +------------- + {} +(1 row) + +SELECT JSON_OBJECT(RETURNING text FORMAT JSON ENCODING UTF8); +ERROR: cannot set JSON encoding for non-bytea output types +LINE 1: SELECT JSON_OBJECT(RETURNING text FORMAT JSON ENCODING UTF8)... + ^ +SELECT JSON_OBJECT(RETURNING text FORMAT JSON ENCODING INVALID_ENCODING); +ERROR: unrecognized JSON encoding: invalid_encoding +SELECT JSON_OBJECT(RETURNING bytea); + json_object +------------- + \x7b7d +(1 row) + +SELECT JSON_OBJECT(RETURNING bytea FORMAT JSON); + json_object +------------- + \x7b7d +(1 row) + +SELECT JSON_OBJECT(RETURNING bytea FORMAT JSON ENCODING UTF8); + json_object +------------- + \x7b7d +(1 row) + +SELECT JSON_OBJECT(RETURNING bytea FORMAT JSON ENCODING UTF16); +ERROR: unsupported JSON encoding +LINE 1: SELECT JSON_OBJECT(RETURNING bytea FORMAT JSON ENCODING UTF1... + ^ +HINT: Only UTF8 JSON encoding is supported. +SELECT JSON_OBJECT(RETURNING bytea FORMAT JSON ENCODING UTF32); +ERROR: unsupported JSON encoding +LINE 1: SELECT JSON_OBJECT(RETURNING bytea FORMAT JSON ENCODING UTF3... + ^ +HINT: Only UTF8 JSON encoding is supported. +SELECT JSON_OBJECT('foo': NULL::int FORMAT JSON); +ERROR: cannot use non-string types with explicit FORMAT JSON clause +LINE 1: SELECT JSON_OBJECT('foo': NULL::int FORMAT JSON); + ^ +SELECT JSON_OBJECT('foo': NULL::int FORMAT JSON ENCODING UTF8); +ERROR: JSON ENCODING clause is only allowed for bytea input type +LINE 1: SELECT JSON_OBJECT('foo': NULL::int FORMAT JSON ENCODING UTF... + ^ +SELECT JSON_OBJECT('foo': NULL::json FORMAT JSON); + json_object +---------------- + {"foo" : null} +(1 row) + +SELECT JSON_OBJECT('foo': NULL::json FORMAT JSON ENCODING UTF8); +ERROR: JSON ENCODING clause is only allowed for bytea input type +LINE 1: SELECT JSON_OBJECT('foo': NULL::json FORMAT JSON ENCODING UT... + ^ +SELECT JSON_OBJECT('foo': NULL::jsonb FORMAT JSON); + json_object +--------------- + {"foo": null} +(1 row) + +SELECT JSON_OBJECT('foo': NULL::jsonb FORMAT JSON ENCODING UTF8); +ERROR: JSON ENCODING clause is only allowed for bytea input type +LINE 1: SELECT JSON_OBJECT('foo': NULL::jsonb FORMAT JSON ENCODING U... + ^ +SELECT JSON_OBJECT(NULL: 1); +ERROR: null value not allowed for object key +SELECT JSON_OBJECT('a': 2 + 3); + json_object +------------- + {"a" : 5} +(1 row) + +SELECT JSON_OBJECT('a' VALUE 2 + 3); + json_object +------------- + {"a" : 5} +(1 row) + +--SELECT JSON_OBJECT(KEY 'a' VALUE 2 + 3); +SELECT JSON_OBJECT('a' || 2: 1); + json_object +------------- + {"a2" : 1} +(1 row) + +SELECT JSON_OBJECT(('a' || 2) VALUE 1); + json_object +------------- + {"a2" : 1} +(1 row) + +--SELECT JSON_OBJECT('a' || 2 VALUE 1); +--SELECT JSON_OBJECT(KEY 'a' || 2 VALUE 1); +SELECT JSON_OBJECT('a': 2::text); + json_object +------------- + {"a" : "2"} +(1 row) + +SELECT JSON_OBJECT('a' VALUE 2::text); + json_object +------------- + {"a" : "2"} +(1 row) + +--SELECT JSON_OBJECT(KEY 'a' VALUE 2::text); +SELECT JSON_OBJECT(1::text: 2); + json_object +------------- + {"1" : 2} +(1 row) + +SELECT JSON_OBJECT((1::text) VALUE 2); + json_object +------------- + {"1" : 2} +(1 row) + +--SELECT JSON_OBJECT(1::text VALUE 2); +--SELECT JSON_OBJECT(KEY 1::text VALUE 2); +SELECT JSON_OBJECT(json '[1]': 123); +ERROR: key value must be scalar, not array, composite, or json +SELECT JSON_OBJECT(ARRAY[1,2,3]: 'aaa'); +ERROR: key value must be scalar, not array, composite, or json +SELECT JSON_OBJECT( + 'a': '123', + 1.23: 123, + 'c': json '[ 1,true,{ } ]', + 'd': jsonb '{ "x" : 123.45 }' +); + json_object +------------------------------------------------------------------- + {"a": "123", "c": [1, true, {}], "d": {"x": 123.45}, "1.23": 123} +(1 row) + +SELECT JSON_OBJECT( + 'a': '123', + 1.23: 123, + 'c': json '[ 1,true,{ } ]', + 'd': jsonb '{ "x" : 123.45 }' + RETURNING jsonb +); + json_object +------------------------------------------------------------------- + {"a": "123", "c": [1, true, {}], "d": {"x": 123.45}, "1.23": 123} +(1 row) + +/* +SELECT JSON_OBJECT( + 'a': '123', + KEY 1.23 VALUE 123, + 'c' VALUE json '[1, true, {}]' +); +*/ +SELECT JSON_OBJECT('a': '123', 'b': JSON_OBJECT('a': 111, 'b': 'aaa')); + json_object +----------------------------------------------- + {"a" : "123", "b" : {"a" : 111, "b" : "aaa"}} +(1 row) + +SELECT JSON_OBJECT('a': '123', 'b': JSON_OBJECT('a': 111, 'b': 'aaa' RETURNING jsonb)); + json_object +------------------------------------------- + {"a": "123", "b": {"a": 111, "b": "aaa"}} +(1 row) + +SELECT JSON_OBJECT('a': JSON_OBJECT('b': 1 RETURNING text)); + json_object +----------------------- + {"a" : "{\"b\" : 1}"} +(1 row) + +SELECT JSON_OBJECT('a': JSON_OBJECT('b': 1 RETURNING text) FORMAT JSON); + json_object +------------------- + {"a" : {"b" : 1}} +(1 row) + +SELECT JSON_OBJECT('a': JSON_OBJECT('b': 1 RETURNING bytea)); + json_object +--------------------------------- + {"a" : "\\x7b226222203a20317d"} +(1 row) + +SELECT JSON_OBJECT('a': JSON_OBJECT('b': 1 RETURNING bytea) FORMAT JSON); + json_object +------------------- + {"a" : {"b" : 1}} +(1 row) + +SELECT JSON_OBJECT('a': '1', 'b': NULL, 'c': 2); + json_object +---------------------------------- + {"a" : "1", "b" : null, "c" : 2} +(1 row) + +SELECT JSON_OBJECT('a': '1', 'b': NULL, 'c': 2 NULL ON NULL); + json_object +---------------------------------- + {"a" : "1", "b" : null, "c" : 2} +(1 row) + +SELECT JSON_OBJECT('a': '1', 'b': NULL, 'c': 2 ABSENT ON NULL); + json_object +---------------------- + {"a" : "1", "c" : 2} +(1 row) + +SELECT JSON_OBJECT(1: 1, '1': NULL WITH UNIQUE); +ERROR: duplicate JSON object key value: "1" +SELECT JSON_OBJECT(1: 1, '1': NULL ABSENT ON NULL WITH UNIQUE); +ERROR: duplicate JSON object key value: "1" +SELECT JSON_OBJECT(1: 1, '1': NULL NULL ON NULL WITH UNIQUE RETURNING jsonb); +ERROR: duplicate JSON object key value +SELECT JSON_OBJECT(1: 1, '1': NULL ABSENT ON NULL WITH UNIQUE RETURNING jsonb); +ERROR: duplicate JSON object key value +SELECT JSON_OBJECT(1: 1, '2': NULL, '1': 1 NULL ON NULL WITH UNIQUE); +ERROR: duplicate JSON object key value: "1" +SELECT JSON_OBJECT(1: 1, '2': NULL, '1': 1 ABSENT ON NULL WITH UNIQUE); +ERROR: duplicate JSON object key value: "1" +SELECT JSON_OBJECT(1: 1, '2': NULL, '1': 1 ABSENT ON NULL WITHOUT UNIQUE); + json_object +-------------------- + {"1" : 1, "1" : 1} +(1 row) + +SELECT JSON_OBJECT(1: 1, '2': NULL, '1': 1 ABSENT ON NULL WITH UNIQUE RETURNING jsonb); +ERROR: duplicate JSON object key value +SELECT JSON_OBJECT(1: 1, '2': NULL, '1': 1 ABSENT ON NULL WITHOUT UNIQUE RETURNING jsonb); + json_object +------------- + {"1": 1} +(1 row) + +SELECT JSON_OBJECT(1: 1, '2': NULL, '3': 1, 4: NULL, '5': 'a' ABSENT ON NULL WITH UNIQUE RETURNING jsonb); + json_object +---------------------------- + {"1": 1, "3": 1, "5": "a"} +(1 row) + +-- JSON_ARRAY() +SELECT JSON_ARRAY(); + json_array +------------ + [] +(1 row) + +SELECT JSON_ARRAY(RETURNING json); + json_array +------------ + [] +(1 row) + +SELECT JSON_ARRAY(RETURNING json FORMAT JSON); + json_array +------------ + [] +(1 row) + +SELECT JSON_ARRAY(RETURNING jsonb); + json_array +------------ + [] +(1 row) + +SELECT JSON_ARRAY(RETURNING jsonb FORMAT JSON); + json_array +------------ + [] +(1 row) + +SELECT JSON_ARRAY(RETURNING text); + json_array +------------ + [] +(1 row) + +SELECT JSON_ARRAY(RETURNING text FORMAT JSON); + json_array +------------ + [] +(1 row) + +SELECT JSON_ARRAY(RETURNING text FORMAT JSON ENCODING UTF8); +ERROR: cannot set JSON encoding for non-bytea output types +LINE 1: SELECT JSON_ARRAY(RETURNING text FORMAT JSON ENCODING UTF8); + ^ +SELECT JSON_ARRAY(RETURNING text FORMAT JSON ENCODING INVALID_ENCODING); +ERROR: unrecognized JSON encoding: invalid_encoding +SELECT JSON_ARRAY(RETURNING bytea); + json_array +------------ + \x5b5d +(1 row) + +SELECT JSON_ARRAY(RETURNING bytea FORMAT JSON); + json_array +------------ + \x5b5d +(1 row) + +SELECT JSON_ARRAY(RETURNING bytea FORMAT JSON ENCODING UTF8); + json_array +------------ + \x5b5d +(1 row) + +SELECT JSON_ARRAY(RETURNING bytea FORMAT JSON ENCODING UTF16); +ERROR: unsupported JSON encoding +LINE 1: SELECT JSON_ARRAY(RETURNING bytea FORMAT JSON ENCODING UTF16... + ^ +HINT: Only UTF8 JSON encoding is supported. +SELECT JSON_ARRAY(RETURNING bytea FORMAT JSON ENCODING UTF32); +ERROR: unsupported JSON encoding +LINE 1: SELECT JSON_ARRAY(RETURNING bytea FORMAT JSON ENCODING UTF32... + ^ +HINT: Only UTF8 JSON encoding is supported. +SELECT JSON_ARRAY('aaa', 111, true, array[1,2,3], NULL, json '{"a": [1]}', jsonb '["a",3]'); + json_array +----------------------------------------------------- + ["aaa", 111, true, [1, 2, 3], {"a": [1]}, ["a", 3]] +(1 row) + +SELECT JSON_ARRAY('a', NULL, 'b' NULL ON NULL); + json_array +------------------ + ["a", null, "b"] +(1 row) + +SELECT JSON_ARRAY('a', NULL, 'b' ABSENT ON NULL); + json_array +------------ + ["a", "b"] +(1 row) + +SELECT JSON_ARRAY(NULL, NULL, 'b' ABSENT ON NULL); + json_array +------------ + ["b"] +(1 row) + +SELECT JSON_ARRAY('a', NULL, 'b' NULL ON NULL RETURNING jsonb); + json_array +------------------ + ["a", null, "b"] +(1 row) + +SELECT JSON_ARRAY('a', NULL, 'b' ABSENT ON NULL RETURNING jsonb); + json_array +------------ + ["a", "b"] +(1 row) + +SELECT JSON_ARRAY(NULL, NULL, 'b' ABSENT ON NULL RETURNING jsonb); + json_array +------------ + ["b"] +(1 row) + +SELECT JSON_ARRAY(JSON_ARRAY('{ "a" : 123 }' RETURNING text)); + json_array +------------------------------- + ["[\"{ \\\"a\\\" : 123 }\"]"] +(1 row) + +SELECT JSON_ARRAY(JSON_ARRAY('{ "a" : 123 }' FORMAT JSON RETURNING text)); + json_array +----------------------- + ["[{ \"a\" : 123 }]"] +(1 row) + +SELECT JSON_ARRAY(JSON_ARRAY('{ "a" : 123 }' FORMAT JSON RETURNING text) FORMAT JSON); + json_array +------------------- + [[{ "a" : 123 }]] +(1 row) + +SELECT JSON_ARRAY(SELECT i FROM (VALUES (1), (2), (NULL), (4)) foo(i)); + json_array +------------ + [1, 2, 4] +(1 row) + +SELECT JSON_ARRAY(SELECT i FROM (VALUES (NULL::int[]), ('{1,2}'), (NULL), (NULL), ('{3,4}'), (NULL)) foo(i)); + json_array +------------ + [[1,2], + + [3,4]] +(1 row) + +SELECT JSON_ARRAY(SELECT i FROM (VALUES (NULL::int[]), ('{1,2}'), (NULL), (NULL), ('{3,4}'), (NULL)) foo(i) RETURNING jsonb); + json_array +------------------ + [[1, 2], [3, 4]] +(1 row) + +--SELECT JSON_ARRAY(SELECT i FROM (VALUES (NULL::int[]), ('{1,2}'), (NULL), (NULL), ('{3,4}'), (NULL)) foo(i) NULL ON NULL); +--SELECT JSON_ARRAY(SELECT i FROM (VALUES (NULL::int[]), ('{1,2}'), (NULL), (NULL), ('{3,4}'), (NULL)) foo(i) NULL ON NULL RETURNING jsonb); +SELECT JSON_ARRAY(SELECT i FROM (VALUES (3), (1), (NULL), (2)) foo(i) ORDER BY i); + json_array +------------ + [1, 2, 3] +(1 row) + +-- Should fail +SELECT JSON_ARRAY(SELECT FROM (VALUES (1)) foo(i)); +ERROR: subquery must return only one column +LINE 1: SELECT JSON_ARRAY(SELECT FROM (VALUES (1)) foo(i)); + ^ +SELECT JSON_ARRAY(SELECT i, i FROM (VALUES (1)) foo(i)); +ERROR: subquery must return only one column +LINE 1: SELECT JSON_ARRAY(SELECT i, i FROM (VALUES (1)) foo(i)); + ^ +SELECT JSON_ARRAY(SELECT * FROM (VALUES (1, 2)) foo(i, j)); +ERROR: subquery must return only one column +LINE 1: SELECT JSON_ARRAY(SELECT * FROM (VALUES (1, 2)) foo(i, j)); + ^ +-- JSON_ARRAYAGG() +SELECT JSON_ARRAYAGG(i) IS NULL, + JSON_ARRAYAGG(i RETURNING jsonb) IS NULL +FROM generate_series(1, 0) i; + ?column? | ?column? +----------+---------- + t | t +(1 row) + +SELECT JSON_ARRAYAGG(i), + JSON_ARRAYAGG(i RETURNING jsonb) +FROM generate_series(1, 5) i; + json_arrayagg | json_arrayagg +-----------------+----------------- + [1, 2, 3, 4, 5] | [1, 2, 3, 4, 5] +(1 row) + +SELECT JSON_ARRAYAGG(i ORDER BY i DESC) +FROM generate_series(1, 5) i; + json_arrayagg +----------------- + [5, 4, 3, 2, 1] +(1 row) + +SELECT JSON_ARRAYAGG(i::text::json) +FROM generate_series(1, 5) i; + json_arrayagg +----------------- + [1, 2, 3, 4, 5] +(1 row) + +SELECT JSON_ARRAYAGG(JSON_ARRAY(i, i + 1 RETURNING text) FORMAT JSON) +FROM generate_series(1, 5) i; + json_arrayagg +------------------------------------------ + [[1, 2], [2, 3], [3, 4], [4, 5], [5, 6]] +(1 row) + +SELECT JSON_ARRAYAGG(NULL), + JSON_ARRAYAGG(NULL RETURNING jsonb) +FROM generate_series(1, 5); + json_arrayagg | json_arrayagg +---------------+--------------- + [] | [] +(1 row) + +SELECT JSON_ARRAYAGG(NULL NULL ON NULL), + JSON_ARRAYAGG(NULL NULL ON NULL RETURNING jsonb) +FROM generate_series(1, 5); + json_arrayagg | json_arrayagg +--------------------------------+-------------------------------- + [null, null, null, null, null] | [null, null, null, null, null] +(1 row) + +\x +SELECT + JSON_ARRAYAGG(bar) as no_options, + JSON_ARRAYAGG(bar RETURNING jsonb) as returning_jsonb, + JSON_ARRAYAGG(bar ABSENT ON NULL) as absent_on_null, + JSON_ARRAYAGG(bar ABSENT ON NULL RETURNING jsonb) as absentonnull_returning_jsonb, + JSON_ARRAYAGG(bar NULL ON NULL) as null_on_null, + JSON_ARRAYAGG(bar NULL ON NULL RETURNING jsonb) as nullonnull_returning_jsonb, + JSON_ARRAYAGG(foo) as row_no_options, + JSON_ARRAYAGG(foo RETURNING jsonb) as row_returning_jsonb, + JSON_ARRAYAGG(foo ORDER BY bar) FILTER (WHERE bar > 2) as row_filtered_agg, + JSON_ARRAYAGG(foo ORDER BY bar RETURNING jsonb) FILTER (WHERE bar > 2) as row_filtered_agg_returning_jsonb +FROM + (VALUES (NULL), (3), (1), (NULL), (NULL), (5), (2), (4), (NULL)) foo(bar); +-[ RECORD 1 ]--------------------+------------------------------------------------------------------------------------------------------------------------- +no_options | [1, 2, 3, 4, 5] +returning_jsonb | [1, 2, 3, 4, 5] +absent_on_null | [1, 2, 3, 4, 5] +absentonnull_returning_jsonb | [1, 2, 3, 4, 5] +null_on_null | [1, 2, 3, 4, 5, null, null, null, null] +nullonnull_returning_jsonb | [1, 2, 3, 4, 5, null, null, null, null] +row_no_options | [{"bar":1}, + + | {"bar":2}, + + | {"bar":3}, + + | {"bar":4}, + + | {"bar":5}, + + | {"bar":null}, + + | {"bar":null}, + + | {"bar":null}, + + | {"bar":null}] +row_returning_jsonb | [{"bar": 1}, {"bar": 2}, {"bar": 3}, {"bar": 4}, {"bar": 5}, {"bar": null}, {"bar": null}, {"bar": null}, {"bar": null}] +row_filtered_agg | [{"bar":3}, + + | {"bar":4}, + + | {"bar":5}] +row_filtered_agg_returning_jsonb | [{"bar": 3}, {"bar": 4}, {"bar": 5}] + +\x +SELECT + bar, JSON_ARRAYAGG(bar) FILTER (WHERE bar > 2) OVER (PARTITION BY foo.bar % 2) +FROM + (VALUES (NULL), (3), (1), (NULL), (NULL), (5), (2), (4), (NULL), (5), (4)) foo(bar); + bar | json_arrayagg +-----+--------------- + 4 | [4, 4] + 4 | [4, 4] + 2 | [4, 4] + 5 | [5, 3, 5] + 3 | [5, 3, 5] + 1 | [5, 3, 5] + 5 | [5, 3, 5] + | + | + | + | +(11 rows) + +-- JSON_OBJECTAGG() +SELECT JSON_OBJECTAGG('key': 1) IS NULL, + JSON_OBJECTAGG('key': 1 RETURNING jsonb) IS NULL +WHERE FALSE; + ?column? | ?column? +----------+---------- + t | t +(1 row) + +SELECT JSON_OBJECTAGG(NULL: 1); +ERROR: null value not allowed for object key +SELECT JSON_OBJECTAGG(NULL: 1 RETURNING jsonb); +ERROR: field name must not be null +SELECT + JSON_OBJECTAGG(i: i), +-- JSON_OBJECTAGG(i VALUE i), +-- JSON_OBJECTAGG(KEY i VALUE i), + JSON_OBJECTAGG(i: i RETURNING jsonb) +FROM + generate_series(1, 5) i; + json_objectagg | json_objectagg +-------------------------------------------------+------------------------------------------ + { "1" : 1, "2" : 2, "3" : 3, "4" : 4, "5" : 5 } | {"1": 1, "2": 2, "3": 3, "4": 4, "5": 5} +(1 row) + +SELECT + JSON_OBJECTAGG(k: v), + JSON_OBJECTAGG(k: v NULL ON NULL), + JSON_OBJECTAGG(k: v ABSENT ON NULL), + JSON_OBJECTAGG(k: v RETURNING jsonb), + JSON_OBJECTAGG(k: v NULL ON NULL RETURNING jsonb), + JSON_OBJECTAGG(k: v ABSENT ON NULL RETURNING jsonb) +FROM + (VALUES (1, 1), (1, NULL), (2, NULL), (3, 3)) foo(k, v); + json_objectagg | json_objectagg | json_objectagg | json_objectagg | json_objectagg | json_objectagg +----------------------------------------------+----------------------------------------------+----------------------+--------------------------------+--------------------------------+------------------ + { "1" : 1, "1" : null, "2" : null, "3" : 3 } | { "1" : 1, "1" : null, "2" : null, "3" : 3 } | { "1" : 1, "3" : 3 } | {"1": null, "2": null, "3": 3} | {"1": null, "2": null, "3": 3} | {"1": 1, "3": 3} +(1 row) + +SELECT JSON_OBJECTAGG(k: v WITH UNIQUE KEYS) +FROM (VALUES (1, 1), (1, NULL), (2, 2)) foo(k, v); +ERROR: duplicate JSON object key value: "1" +SELECT JSON_OBJECTAGG(k: v ABSENT ON NULL WITH UNIQUE KEYS) +FROM (VALUES (1, 1), (1, NULL), (2, 2)) foo(k, v); +ERROR: duplicate JSON object key value: "1" +SELECT JSON_OBJECTAGG(k: v ABSENT ON NULL WITH UNIQUE KEYS) +FROM (VALUES (1, 1), (0, NULL), (3, NULL), (2, 2), (4, NULL)) foo(k, v); + json_objectagg +---------------------- + { "1" : 1, "2" : 2 } +(1 row) + +SELECT JSON_OBJECTAGG(k: v WITH UNIQUE KEYS RETURNING jsonb) +FROM (VALUES (1, 1), (1, NULL), (2, 2)) foo(k, v); +ERROR: duplicate JSON object key value +SELECT JSON_OBJECTAGG(k: v ABSENT ON NULL WITH UNIQUE KEYS RETURNING jsonb) +FROM (VALUES (1, 1), (1, NULL), (2, 2)) foo(k, v); +ERROR: duplicate JSON object key value +-- Test JSON_OBJECT deparsing +EXPLAIN (VERBOSE, COSTS OFF) +SELECT JSON_OBJECT('foo' : '1' FORMAT JSON, 'bar' : 'baz' RETURNING json); + QUERY PLAN +------------------------------------------------------------------------------ + Result + Output: JSON_OBJECT('foo' : '1'::json, 'bar' : 'baz'::text RETURNING json) +(2 rows) + +CREATE VIEW json_object_view AS +SELECT JSON_OBJECT('foo' : '1' FORMAT JSON, 'bar' : 'baz' RETURNING json); +\sv json_object_view +CREATE OR REPLACE VIEW public.json_object_view AS + SELECT JSON_OBJECT('foo' : '1'::text FORMAT JSON, 'bar' : 'baz'::text RETURNING json) AS "json_object" +DROP VIEW json_object_view; +-- Test JSON_ARRAY deparsing +EXPLAIN (VERBOSE, COSTS OFF) +SELECT JSON_ARRAY('1' FORMAT JSON, 2 RETURNING json); + QUERY PLAN +--------------------------------------------------- + Result + Output: JSON_ARRAY('1'::json, 2 RETURNING json) +(2 rows) + +CREATE VIEW json_array_view AS +SELECT JSON_ARRAY('1' FORMAT JSON, 2 RETURNING json); +\sv json_array_view +CREATE OR REPLACE VIEW public.json_array_view AS + SELECT JSON_ARRAY('1'::text FORMAT JSON, 2 RETURNING json) AS "json_array" +DROP VIEW json_array_view; +-- Test JSON_OBJECTAGG deparsing +EXPLAIN (VERBOSE, COSTS OFF) +SELECT JSON_OBJECTAGG(i: ('111' || i)::bytea FORMAT JSON WITH UNIQUE RETURNING text) FILTER (WHERE i > 3) +FROM generate_series(1,5) i; + QUERY PLAN +-------------------------------------------------------------------------------------------------------------------------------------- + Aggregate + Output: JSON_OBJECTAGG(i : (('111'::text || (i)::text))::bytea FORMAT JSON WITH UNIQUE KEYS RETURNING text) FILTER (WHERE (i > 3)) + -> Function Scan on pg_catalog.generate_series i + Output: i + Function Call: generate_series(1, 5) +(5 rows) + +EXPLAIN (VERBOSE, COSTS OFF) +SELECT JSON_OBJECTAGG(i: ('111' || i)::bytea FORMAT JSON WITH UNIQUE RETURNING text) OVER (PARTITION BY i % 2) +FROM generate_series(1,5) i; + QUERY PLAN +----------------------------------------------------------------------------------------------------------------------------------- + WindowAgg + Output: JSON_OBJECTAGG(i : (('111'::text || (i)::text))::bytea FORMAT JSON WITH UNIQUE KEYS RETURNING text) OVER (?), ((i % 2)) + -> Sort + Output: ((i % 2)), i + Sort Key: ((i.i % 2)) + -> Function Scan on pg_catalog.generate_series i + Output: (i % 2), i + Function Call: generate_series(1, 5) +(8 rows) + +CREATE VIEW json_objectagg_view AS +SELECT JSON_OBJECTAGG(i: ('111' || i)::bytea FORMAT JSON WITH UNIQUE RETURNING text) FILTER (WHERE i > 3) +FROM generate_series(1,5) i; +\sv json_objectagg_view +CREATE OR REPLACE VIEW public.json_objectagg_view AS + SELECT JSON_OBJECTAGG(i : ('111'::text || i)::bytea FORMAT JSON WITH UNIQUE KEYS RETURNING text) FILTER (WHERE i > 3) AS "json_objectagg" + FROM generate_series(1, 5) i(i) +DROP VIEW json_objectagg_view; +-- Test JSON_ARRAYAGG deparsing +EXPLAIN (VERBOSE, COSTS OFF) +SELECT JSON_ARRAYAGG(('111' || i)::bytea FORMAT JSON NULL ON NULL RETURNING text) FILTER (WHERE i > 3) +FROM generate_series(1,5) i; + QUERY PLAN +----------------------------------------------------------------------------------------------------------------------------- + Aggregate + Output: JSON_ARRAYAGG((('111'::text || (i)::text))::bytea FORMAT JSON NULL ON NULL RETURNING text) FILTER (WHERE (i > 3)) + -> Function Scan on pg_catalog.generate_series i + Output: i + Function Call: generate_series(1, 5) +(5 rows) + +EXPLAIN (VERBOSE, COSTS OFF) +SELECT JSON_ARRAYAGG(('111' || i)::bytea FORMAT JSON NULL ON NULL RETURNING text) OVER (PARTITION BY i % 2) +FROM generate_series(1,5) i; + QUERY PLAN +-------------------------------------------------------------------------------------------------------------------------- + WindowAgg + Output: JSON_ARRAYAGG((('111'::text || (i)::text))::bytea FORMAT JSON NULL ON NULL RETURNING text) OVER (?), ((i % 2)) + -> Sort + Output: ((i % 2)), i + Sort Key: ((i.i % 2)) + -> Function Scan on pg_catalog.generate_series i + Output: (i % 2), i + Function Call: generate_series(1, 5) +(8 rows) + +CREATE VIEW json_arrayagg_view AS +SELECT JSON_ARRAYAGG(('111' || i)::bytea FORMAT JSON NULL ON NULL RETURNING text) FILTER (WHERE i > 3) +FROM generate_series(1,5) i; +\sv json_arrayagg_view +CREATE OR REPLACE VIEW public.json_arrayagg_view AS + SELECT JSON_ARRAYAGG(('111'::text || i)::bytea FORMAT JSON NULL ON NULL RETURNING text) FILTER (WHERE i > 3) AS "json_arrayagg" + FROM generate_series(1, 5) i(i) +DROP VIEW json_arrayagg_view; +-- Test JSON_ARRAY(subquery) deparsing +EXPLAIN (VERBOSE, COSTS OFF) +SELECT JSON_ARRAY(SELECT i FROM (VALUES (1), (2), (NULL), (4)) foo(i) RETURNING jsonb); + QUERY PLAN +--------------------------------------------------------------------- + Result + Output: $0 + InitPlan 1 (returns $0) + -> Aggregate + Output: JSON_ARRAYAGG("*VALUES*".column1 RETURNING jsonb) + -> Values Scan on "*VALUES*" + Output: "*VALUES*".column1 +(7 rows) + +CREATE VIEW json_array_subquery_view AS +SELECT JSON_ARRAY(SELECT i FROM (VALUES (1), (2), (NULL), (4)) foo(i) RETURNING jsonb); +\sv json_array_subquery_view +CREATE OR REPLACE VIEW public.json_array_subquery_view AS + SELECT ( SELECT JSON_ARRAYAGG(q.a RETURNING jsonb) AS "json_arrayagg" + FROM ( SELECT foo.i + FROM ( VALUES (1), (2), (NULL::integer), (4)) foo(i)) q(a)) AS "json_array" +DROP VIEW json_array_subquery_view; +-- IS JSON predicate +SELECT NULL IS JSON; + ?column? +---------- + +(1 row) + +SELECT NULL IS NOT JSON; + ?column? +---------- + +(1 row) + +SELECT NULL::json IS JSON; + ?column? +---------- + +(1 row) + +SELECT NULL::jsonb IS JSON; + ?column? +---------- + +(1 row) + +SELECT NULL::text IS JSON; + ?column? +---------- + +(1 row) + +SELECT NULL::bytea IS JSON; + ?column? +---------- + +(1 row) + +SELECT NULL::int IS JSON; +ERROR: cannot use type integer in IS JSON predicate +SELECT '' IS JSON; + ?column? +---------- + f +(1 row) + +SELECT bytea '\x00' IS JSON; +ERROR: invalid byte sequence for encoding "UTF8": 0x00 +CREATE TABLE test_is_json (js text); +INSERT INTO test_is_json VALUES + (NULL), + (''), + ('123'), + ('"aaa "'), + ('true'), + ('null'), + ('[]'), + ('[1, "2", {}]'), + ('{}'), + ('{ "a": 1, "b": null }'), + ('{ "a": 1, "a": null }'), + ('{ "a": 1, "b": [{ "a": 1 }, { "a": 2 }] }'), + ('{ "a": 1, "b": [{ "a": 1, "b": 0, "a": 2 }] }'), + ('aaa'), + ('{a:1}'), + ('["a",]'); +SELECT + js, + js IS JSON "IS JSON", + js IS NOT JSON "IS NOT JSON", + js IS JSON VALUE "IS VALUE", + js IS JSON OBJECT "IS OBJECT", + js IS JSON ARRAY "IS ARRAY", + js IS JSON SCALAR "IS SCALAR", + js IS JSON WITHOUT UNIQUE KEYS "WITHOUT UNIQUE", + js IS JSON WITH UNIQUE KEYS "WITH UNIQUE" +FROM + test_is_json; + js | IS JSON | IS NOT JSON | IS VALUE | IS OBJECT | IS ARRAY | IS SCALAR | WITHOUT UNIQUE | WITH UNIQUE +-----------------------------------------------+---------+-------------+----------+-----------+----------+-----------+----------------+------------- + | | | | | | | | + | f | t | f | f | f | f | f | f + 123 | t | f | t | f | f | t | t | t + "aaa " | t | f | t | f | f | t | t | t + true | t | f | t | f | f | t | t | t + null | t | f | t | f | f | t | t | t + [] | t | f | t | f | t | f | t | t + [1, "2", {}] | t | f | t | f | t | f | t | t + {} | t | f | t | t | f | f | t | t + { "a": 1, "b": null } | t | f | t | t | f | f | t | t + { "a": 1, "a": null } | t | f | t | t | f | f | t | f + { "a": 1, "b": [{ "a": 1 }, { "a": 2 }] } | t | f | t | t | f | f | t | t + { "a": 1, "b": [{ "a": 1, "b": 0, "a": 2 }] } | t | f | t | t | f | f | t | f + aaa | f | t | f | f | f | f | f | f + {a:1} | f | t | f | f | f | f | f | f + ["a",] | f | t | f | f | f | f | f | f +(16 rows) + +SELECT + js, + js IS JSON "IS JSON", + js IS NOT JSON "IS NOT JSON", + js IS JSON VALUE "IS VALUE", + js IS JSON OBJECT "IS OBJECT", + js IS JSON ARRAY "IS ARRAY", + js IS JSON SCALAR "IS SCALAR", + js IS JSON WITHOUT UNIQUE KEYS "WITHOUT UNIQUE", + js IS JSON WITH UNIQUE KEYS "WITH UNIQUE" +FROM + (SELECT js::json FROM test_is_json WHERE js IS JSON) foo(js); + js | IS JSON | IS NOT JSON | IS VALUE | IS OBJECT | IS ARRAY | IS SCALAR | WITHOUT UNIQUE | WITH UNIQUE +-----------------------------------------------+---------+-------------+----------+-----------+----------+-----------+----------------+------------- + 123 | t | f | t | f | f | t | t | t + "aaa " | t | f | t | f | f | t | t | t + true | t | f | t | f | f | t | t | t + null | t | f | t | f | f | t | t | t + [] | t | f | t | f | t | f | t | t + [1, "2", {}] | t | f | t | f | t | f | t | t + {} | t | f | t | t | f | f | t | t + { "a": 1, "b": null } | t | f | t | t | f | f | t | t + { "a": 1, "a": null } | t | f | t | t | f | f | t | f + { "a": 1, "b": [{ "a": 1 }, { "a": 2 }] } | t | f | t | t | f | f | t | t + { "a": 1, "b": [{ "a": 1, "b": 0, "a": 2 }] } | t | f | t | t | f | f | t | f +(11 rows) + +SELECT + js0, + js IS JSON "IS JSON", + js IS NOT JSON "IS NOT JSON", + js IS JSON VALUE "IS VALUE", + js IS JSON OBJECT "IS OBJECT", + js IS JSON ARRAY "IS ARRAY", + js IS JSON SCALAR "IS SCALAR", + js IS JSON WITHOUT UNIQUE KEYS "WITHOUT UNIQUE", + js IS JSON WITH UNIQUE KEYS "WITH UNIQUE" +FROM + (SELECT js, js::bytea FROM test_is_json WHERE js IS JSON) foo(js0, js); + js0 | IS JSON | IS NOT JSON | IS VALUE | IS OBJECT | IS ARRAY | IS SCALAR | WITHOUT UNIQUE | WITH UNIQUE +-----------------------------------------------+---------+-------------+----------+-----------+----------+-----------+----------------+------------- + 123 | t | f | t | f | f | t | t | t + "aaa " | t | f | t | f | f | t | t | t + true | t | f | t | f | f | t | t | t + null | t | f | t | f | f | t | t | t + [] | t | f | t | f | t | f | t | t + [1, "2", {}] | t | f | t | f | t | f | t | t + {} | t | f | t | t | f | f | t | t + { "a": 1, "b": null } | t | f | t | t | f | f | t | t + { "a": 1, "a": null } | t | f | t | t | f | f | t | f + { "a": 1, "b": [{ "a": 1 }, { "a": 2 }] } | t | f | t | t | f | f | t | t + { "a": 1, "b": [{ "a": 1, "b": 0, "a": 2 }] } | t | f | t | t | f | f | t | f +(11 rows) + +SELECT + js, + js IS JSON "IS JSON", + js IS NOT JSON "IS NOT JSON", + js IS JSON VALUE "IS VALUE", + js IS JSON OBJECT "IS OBJECT", + js IS JSON ARRAY "IS ARRAY", + js IS JSON SCALAR "IS SCALAR", + js IS JSON WITHOUT UNIQUE KEYS "WITHOUT UNIQUE", + js IS JSON WITH UNIQUE KEYS "WITH UNIQUE" +FROM + (SELECT js::jsonb FROM test_is_json WHERE js IS JSON) foo(js); + js | IS JSON | IS NOT JSON | IS VALUE | IS OBJECT | IS ARRAY | IS SCALAR | WITHOUT UNIQUE | WITH UNIQUE +-------------------------------------+---------+-------------+----------+-----------+----------+-----------+----------------+------------- + 123 | t | f | t | f | f | t | t | t + "aaa " | t | f | t | f | f | t | t | t + true | t | f | t | f | f | t | t | t + null | t | f | t | f | f | t | t | t + [] | t | f | t | f | t | f | t | t + [1, "2", {}] | t | f | t | f | t | f | t | t + {} | t | f | t | t | f | f | t | t + {"a": 1, "b": null} | t | f | t | t | f | f | t | t + {"a": null} | t | f | t | t | f | f | t | t + {"a": 1, "b": [{"a": 1}, {"a": 2}]} | t | f | t | t | f | f | t | t + {"a": 1, "b": [{"a": 2, "b": 0}]} | t | f | t | t | f | f | t | t +(11 rows) + +-- Test IS JSON deparsing +EXPLAIN (VERBOSE, COSTS OFF) +SELECT '1' IS JSON AS "any", ('1' || i) IS JSON SCALAR AS "scalar", '[]' IS NOT JSON ARRAY AS "array", '{}' IS JSON OBJECT WITH UNIQUE AS "object" FROM generate_series(1, 3) i; + QUERY PLAN +---------------------------------------------------------------------------------------------------------------------------------------------------------- + Function Scan on pg_catalog.generate_series i + Output: ('1'::text IS JSON), (('1'::text || (i)::text) IS JSON SCALAR), (NOT ('[]'::text IS JSON ARRAY)), ('{}'::text IS JSON OBJECT WITH UNIQUE KEYS) + Function Call: generate_series(1, 3) +(3 rows) + +CREATE VIEW is_json_view AS +SELECT '1' IS JSON AS "any", ('1' || i) IS JSON SCALAR AS "scalar", '[]' IS NOT JSON ARRAY AS "array", '{}' IS JSON OBJECT WITH UNIQUE AS "object" FROM generate_series(1, 3) i; +\sv is_json_view +CREATE OR REPLACE VIEW public.is_json_view AS + SELECT '1'::text IS JSON AS "any", + ('1'::text || i) IS JSON SCALAR AS scalar, + NOT '[]'::text IS JSON ARRAY AS "array", + '{}'::text IS JSON OBJECT WITH UNIQUE KEYS AS object + FROM generate_series(1, 3) i(i) +DROP VIEW is_json_view; diff --git a/src/test/regress/expected/stats.out b/src/test/regress/expected/stats.out new file mode 100644 index 0000000..94187e5 --- /dev/null +++ b/src/test/regress/expected/stats.out @@ -0,0 +1,1631 @@ +-- +-- Test cumulative stats system +-- +-- Must be run after tenk2 has been created (by create_table), +-- populated (by create_misc) and indexed (by create_index). +-- +-- conditio sine qua non +SHOW track_counts; -- must be on + track_counts +-------------- + on +(1 row) + +-- ensure that both seqscan and indexscan plans are allowed +SET enable_seqscan TO on; +SET enable_indexscan TO on; +-- for the moment, we don't want index-only scans here +SET enable_indexonlyscan TO off; +-- not enabled by default, but we want to test it... +SET track_functions TO 'all'; +-- record dboid for later use +SELECT oid AS dboid from pg_database where datname = current_database() \gset +-- save counters +BEGIN; +SET LOCAL stats_fetch_consistency = snapshot; +CREATE TABLE prevstats AS +SELECT t.seq_scan, t.seq_tup_read, t.idx_scan, t.idx_tup_fetch, + (b.heap_blks_read + b.heap_blks_hit) AS heap_blks, + (b.idx_blks_read + b.idx_blks_hit) AS idx_blks, + pg_stat_get_snapshot_timestamp() as snap_ts + FROM pg_catalog.pg_stat_user_tables AS t, + pg_catalog.pg_statio_user_tables AS b + WHERE t.relname='tenk2' AND b.relname='tenk2'; +COMMIT; +-- test effects of TRUNCATE on n_live_tup/n_dead_tup counters +CREATE TABLE trunc_stats_test(id serial); +CREATE TABLE trunc_stats_test1(id serial, stuff text); +CREATE TABLE trunc_stats_test2(id serial); +CREATE TABLE trunc_stats_test3(id serial, stuff text); +CREATE TABLE trunc_stats_test4(id serial); +-- check that n_live_tup is reset to 0 after truncate +INSERT INTO trunc_stats_test DEFAULT VALUES; +INSERT INTO trunc_stats_test DEFAULT VALUES; +INSERT INTO trunc_stats_test DEFAULT VALUES; +TRUNCATE trunc_stats_test; +-- test involving a truncate in a transaction; 4 ins but only 1 live +INSERT INTO trunc_stats_test1 DEFAULT VALUES; +INSERT INTO trunc_stats_test1 DEFAULT VALUES; +INSERT INTO trunc_stats_test1 DEFAULT VALUES; +UPDATE trunc_stats_test1 SET id = id + 10 WHERE id IN (1, 2); +DELETE FROM trunc_stats_test1 WHERE id = 3; +BEGIN; +UPDATE trunc_stats_test1 SET id = id + 100; +TRUNCATE trunc_stats_test1; +INSERT INTO trunc_stats_test1 DEFAULT VALUES; +COMMIT; +-- use a savepoint: 1 insert, 1 live +BEGIN; +INSERT INTO trunc_stats_test2 DEFAULT VALUES; +INSERT INTO trunc_stats_test2 DEFAULT VALUES; +SAVEPOINT p1; +INSERT INTO trunc_stats_test2 DEFAULT VALUES; +TRUNCATE trunc_stats_test2; +INSERT INTO trunc_stats_test2 DEFAULT VALUES; +RELEASE SAVEPOINT p1; +COMMIT; +-- rollback a savepoint: this should count 4 inserts and have 2 +-- live tuples after commit (and 2 dead ones due to aborted subxact) +BEGIN; +INSERT INTO trunc_stats_test3 DEFAULT VALUES; +INSERT INTO trunc_stats_test3 DEFAULT VALUES; +SAVEPOINT p1; +INSERT INTO trunc_stats_test3 DEFAULT VALUES; +INSERT INTO trunc_stats_test3 DEFAULT VALUES; +TRUNCATE trunc_stats_test3; +INSERT INTO trunc_stats_test3 DEFAULT VALUES; +ROLLBACK TO SAVEPOINT p1; +COMMIT; +-- rollback a truncate: this should count 2 inserts and produce 2 dead tuples +BEGIN; +INSERT INTO trunc_stats_test4 DEFAULT VALUES; +INSERT INTO trunc_stats_test4 DEFAULT VALUES; +TRUNCATE trunc_stats_test4; +INSERT INTO trunc_stats_test4 DEFAULT VALUES; +ROLLBACK; +-- do a seqscan +SELECT count(*) FROM tenk2; + count +------- + 10000 +(1 row) + +-- do an indexscan +-- make sure it is not a bitmap scan, which might skip fetching heap tuples +SET enable_bitmapscan TO off; +SELECT count(*) FROM tenk2 WHERE unique1 = 1; + count +------- + 1 +(1 row) + +RESET enable_bitmapscan; +-- ensure pending stats are flushed +SELECT pg_stat_force_next_flush(); + pg_stat_force_next_flush +-------------------------- + +(1 row) + +-- check effects +BEGIN; +SET LOCAL stats_fetch_consistency = snapshot; +SELECT relname, n_tup_ins, n_tup_upd, n_tup_del, n_live_tup, n_dead_tup + FROM pg_stat_user_tables + WHERE relname like 'trunc_stats_test%' order by relname; + relname | n_tup_ins | n_tup_upd | n_tup_del | n_live_tup | n_dead_tup +-------------------+-----------+-----------+-----------+------------+------------ + trunc_stats_test | 3 | 0 | 0 | 0 | 0 + trunc_stats_test1 | 4 | 2 | 1 | 1 | 0 + trunc_stats_test2 | 1 | 0 | 0 | 1 | 0 + trunc_stats_test3 | 4 | 0 | 0 | 2 | 2 + trunc_stats_test4 | 2 | 0 | 0 | 0 | 2 +(5 rows) + +SELECT st.seq_scan >= pr.seq_scan + 1, + st.seq_tup_read >= pr.seq_tup_read + cl.reltuples, + st.idx_scan >= pr.idx_scan + 1, + st.idx_tup_fetch >= pr.idx_tup_fetch + 1 + FROM pg_stat_user_tables AS st, pg_class AS cl, prevstats AS pr + WHERE st.relname='tenk2' AND cl.relname='tenk2'; + ?column? | ?column? | ?column? | ?column? +----------+----------+----------+---------- + t | t | t | t +(1 row) + +SELECT st.heap_blks_read + st.heap_blks_hit >= pr.heap_blks + cl.relpages, + st.idx_blks_read + st.idx_blks_hit >= pr.idx_blks + 1 + FROM pg_statio_user_tables AS st, pg_class AS cl, prevstats AS pr + WHERE st.relname='tenk2' AND cl.relname='tenk2'; + ?column? | ?column? +----------+---------- + t | t +(1 row) + +SELECT pr.snap_ts < pg_stat_get_snapshot_timestamp() as snapshot_newer +FROM prevstats AS pr; + snapshot_newer +---------------- + t +(1 row) + +COMMIT; +---- +-- Basic tests for track_functions +--- +CREATE FUNCTION stats_test_func1() RETURNS VOID LANGUAGE plpgsql AS $$BEGIN END;$$; +SELECT 'stats_test_func1()'::regprocedure::oid AS stats_test_func1_oid \gset +CREATE FUNCTION stats_test_func2() RETURNS VOID LANGUAGE plpgsql AS $$BEGIN END;$$; +SELECT 'stats_test_func2()'::regprocedure::oid AS stats_test_func2_oid \gset +-- test that stats are accumulated +BEGIN; +SET LOCAL stats_fetch_consistency = none; +SELECT pg_stat_get_function_calls(:stats_test_func1_oid); + pg_stat_get_function_calls +---------------------------- + +(1 row) + +SELECT pg_stat_get_xact_function_calls(:stats_test_func1_oid); + pg_stat_get_xact_function_calls +--------------------------------- + +(1 row) + +SELECT stats_test_func1(); + stats_test_func1 +------------------ + +(1 row) + +SELECT pg_stat_get_xact_function_calls(:stats_test_func1_oid); + pg_stat_get_xact_function_calls +--------------------------------- + 1 +(1 row) + +SELECT stats_test_func1(); + stats_test_func1 +------------------ + +(1 row) + +SELECT pg_stat_get_xact_function_calls(:stats_test_func1_oid); + pg_stat_get_xact_function_calls +--------------------------------- + 2 +(1 row) + +SELECT pg_stat_get_function_calls(:stats_test_func1_oid); + pg_stat_get_function_calls +---------------------------- + 0 +(1 row) + +COMMIT; +-- Verify that function stats are not transactional +-- rolled back savepoint in committing transaction +BEGIN; +SELECT stats_test_func2(); + stats_test_func2 +------------------ + +(1 row) + +SAVEPOINT foo; +SELECT stats_test_func2(); + stats_test_func2 +------------------ + +(1 row) + +ROLLBACK TO SAVEPOINT foo; +SELECT pg_stat_get_xact_function_calls(:stats_test_func2_oid); + pg_stat_get_xact_function_calls +--------------------------------- + 2 +(1 row) + +SELECT stats_test_func2(); + stats_test_func2 +------------------ + +(1 row) + +COMMIT; +-- rolled back transaction +BEGIN; +SELECT stats_test_func2(); + stats_test_func2 +------------------ + +(1 row) + +ROLLBACK; +SELECT pg_stat_force_next_flush(); + pg_stat_force_next_flush +-------------------------- + +(1 row) + +-- check collected stats +SELECT funcname, calls FROM pg_stat_user_functions WHERE funcid = :stats_test_func1_oid; + funcname | calls +------------------+------- + stats_test_func1 | 2 +(1 row) + +SELECT funcname, calls FROM pg_stat_user_functions WHERE funcid = :stats_test_func2_oid; + funcname | calls +------------------+------- + stats_test_func2 | 4 +(1 row) + +-- check that a rolled back drop function stats leaves stats alive +BEGIN; +SELECT funcname, calls FROM pg_stat_user_functions WHERE funcid = :stats_test_func1_oid; + funcname | calls +------------------+------- + stats_test_func1 | 2 +(1 row) + +DROP FUNCTION stats_test_func1(); +-- shouldn't be visible via view +SELECT funcname, calls FROM pg_stat_user_functions WHERE funcid = :stats_test_func1_oid; + funcname | calls +----------+------- +(0 rows) + +-- but still via oid access +SELECT pg_stat_get_function_calls(:stats_test_func1_oid); + pg_stat_get_function_calls +---------------------------- + 2 +(1 row) + +ROLLBACK; +SELECT funcname, calls FROM pg_stat_user_functions WHERE funcid = :stats_test_func1_oid; + funcname | calls +------------------+------- + stats_test_func1 | 2 +(1 row) + +SELECT pg_stat_get_function_calls(:stats_test_func1_oid); + pg_stat_get_function_calls +---------------------------- + 2 +(1 row) + +-- check that function dropped in main transaction leaves no stats behind +BEGIN; +DROP FUNCTION stats_test_func1(); +COMMIT; +SELECT funcname, calls FROM pg_stat_user_functions WHERE funcid = :stats_test_func1_oid; + funcname | calls +----------+------- +(0 rows) + +SELECT pg_stat_get_function_calls(:stats_test_func1_oid); + pg_stat_get_function_calls +---------------------------- + +(1 row) + +-- check that function dropped in a subtransaction leaves no stats behind +BEGIN; +SELECT stats_test_func2(); + stats_test_func2 +------------------ + +(1 row) + +SAVEPOINT a; +SELECT stats_test_func2(); + stats_test_func2 +------------------ + +(1 row) + +SAVEPOINT b; +DROP FUNCTION stats_test_func2(); +COMMIT; +SELECT funcname, calls FROM pg_stat_user_functions WHERE funcid = :stats_test_func2_oid; + funcname | calls +----------+------- +(0 rows) + +SELECT pg_stat_get_function_calls(:stats_test_func2_oid); + pg_stat_get_function_calls +---------------------------- + +(1 row) + +-- Check that stats for relations are dropped. For that we need to access stats +-- by oid after the DROP TABLE. Save oids. +CREATE TABLE drop_stats_test(); +INSERT INTO drop_stats_test DEFAULT VALUES; +SELECT 'drop_stats_test'::regclass::oid AS drop_stats_test_oid \gset +CREATE TABLE drop_stats_test_xact(); +INSERT INTO drop_stats_test_xact DEFAULT VALUES; +SELECT 'drop_stats_test_xact'::regclass::oid AS drop_stats_test_xact_oid \gset +CREATE TABLE drop_stats_test_subxact(); +INSERT INTO drop_stats_test_subxact DEFAULT VALUES; +SELECT 'drop_stats_test_subxact'::regclass::oid AS drop_stats_test_subxact_oid \gset +SELECT pg_stat_force_next_flush(); + pg_stat_force_next_flush +-------------------------- + +(1 row) + +SELECT pg_stat_get_live_tuples(:drop_stats_test_oid); + pg_stat_get_live_tuples +------------------------- + 1 +(1 row) + +DROP TABLE drop_stats_test; +SELECT pg_stat_get_live_tuples(:drop_stats_test_oid); + pg_stat_get_live_tuples +------------------------- + 0 +(1 row) + +SELECT pg_stat_get_xact_tuples_inserted(:drop_stats_test_oid); + pg_stat_get_xact_tuples_inserted +---------------------------------- + 0 +(1 row) + +-- check that rollback protects against having stats dropped and that local +-- modifications don't pose a problem +SELECT pg_stat_get_live_tuples(:drop_stats_test_xact_oid); + pg_stat_get_live_tuples +------------------------- + 1 +(1 row) + +SELECT pg_stat_get_tuples_inserted(:drop_stats_test_xact_oid); + pg_stat_get_tuples_inserted +----------------------------- + 1 +(1 row) + +SELECT pg_stat_get_xact_tuples_inserted(:drop_stats_test_xact_oid); + pg_stat_get_xact_tuples_inserted +---------------------------------- + 0 +(1 row) + +BEGIN; +INSERT INTO drop_stats_test_xact DEFAULT VALUES; +SELECT pg_stat_get_xact_tuples_inserted(:drop_stats_test_xact_oid); + pg_stat_get_xact_tuples_inserted +---------------------------------- + 1 +(1 row) + +DROP TABLE drop_stats_test_xact; +SELECT pg_stat_get_xact_tuples_inserted(:drop_stats_test_xact_oid); + pg_stat_get_xact_tuples_inserted +---------------------------------- + 0 +(1 row) + +ROLLBACK; +SELECT pg_stat_force_next_flush(); + pg_stat_force_next_flush +-------------------------- + +(1 row) + +SELECT pg_stat_get_live_tuples(:drop_stats_test_xact_oid); + pg_stat_get_live_tuples +------------------------- + 1 +(1 row) + +SELECT pg_stat_get_tuples_inserted(:drop_stats_test_xact_oid); + pg_stat_get_tuples_inserted +----------------------------- + 2 +(1 row) + +-- transactional drop +SELECT pg_stat_get_live_tuples(:drop_stats_test_xact_oid); + pg_stat_get_live_tuples +------------------------- + 1 +(1 row) + +SELECT pg_stat_get_tuples_inserted(:drop_stats_test_xact_oid); + pg_stat_get_tuples_inserted +----------------------------- + 2 +(1 row) + +BEGIN; +INSERT INTO drop_stats_test_xact DEFAULT VALUES; +SELECT pg_stat_get_xact_tuples_inserted(:drop_stats_test_xact_oid); + pg_stat_get_xact_tuples_inserted +---------------------------------- + 1 +(1 row) + +DROP TABLE drop_stats_test_xact; +SELECT pg_stat_get_xact_tuples_inserted(:drop_stats_test_xact_oid); + pg_stat_get_xact_tuples_inserted +---------------------------------- + 0 +(1 row) + +COMMIT; +SELECT pg_stat_force_next_flush(); + pg_stat_force_next_flush +-------------------------- + +(1 row) + +SELECT pg_stat_get_live_tuples(:drop_stats_test_xact_oid); + pg_stat_get_live_tuples +------------------------- + 0 +(1 row) + +SELECT pg_stat_get_tuples_inserted(:drop_stats_test_xact_oid); + pg_stat_get_tuples_inserted +----------------------------- + 0 +(1 row) + +-- savepoint rollback (2 levels) +SELECT pg_stat_get_live_tuples(:drop_stats_test_subxact_oid); + pg_stat_get_live_tuples +------------------------- + 1 +(1 row) + +BEGIN; +INSERT INTO drop_stats_test_subxact DEFAULT VALUES; +SAVEPOINT sp1; +INSERT INTO drop_stats_test_subxact DEFAULT VALUES; +SELECT pg_stat_get_xact_tuples_inserted(:drop_stats_test_subxact_oid); + pg_stat_get_xact_tuples_inserted +---------------------------------- + 2 +(1 row) + +SAVEPOINT sp2; +DROP TABLE drop_stats_test_subxact; +ROLLBACK TO SAVEPOINT sp2; +SELECT pg_stat_get_xact_tuples_inserted(:drop_stats_test_subxact_oid); + pg_stat_get_xact_tuples_inserted +---------------------------------- + 2 +(1 row) + +COMMIT; +SELECT pg_stat_force_next_flush(); + pg_stat_force_next_flush +-------------------------- + +(1 row) + +SELECT pg_stat_get_live_tuples(:drop_stats_test_subxact_oid); + pg_stat_get_live_tuples +------------------------- + 3 +(1 row) + +-- savepoint rolback (1 level) +SELECT pg_stat_get_live_tuples(:drop_stats_test_subxact_oid); + pg_stat_get_live_tuples +------------------------- + 3 +(1 row) + +BEGIN; +SAVEPOINT sp1; +DROP TABLE drop_stats_test_subxact; +SAVEPOINT sp2; +ROLLBACK TO SAVEPOINT sp1; +COMMIT; +SELECT pg_stat_get_live_tuples(:drop_stats_test_subxact_oid); + pg_stat_get_live_tuples +------------------------- + 3 +(1 row) + +-- and now actually drop +SELECT pg_stat_get_live_tuples(:drop_stats_test_subxact_oid); + pg_stat_get_live_tuples +------------------------- + 3 +(1 row) + +BEGIN; +SAVEPOINT sp1; +DROP TABLE drop_stats_test_subxact; +SAVEPOINT sp2; +RELEASE SAVEPOINT sp1; +COMMIT; +SELECT pg_stat_get_live_tuples(:drop_stats_test_subxact_oid); + pg_stat_get_live_tuples +------------------------- + 0 +(1 row) + +DROP TABLE trunc_stats_test, trunc_stats_test1, trunc_stats_test2, trunc_stats_test3, trunc_stats_test4; +DROP TABLE prevstats; +----- +-- Test that last_seq_scan, last_idx_scan are correctly maintained +-- +-- Perform test using a temporary table. That way autovacuum etc won't +-- interfere. To be able to check that timestamps increase, we sleep for 100ms +-- between tests, assuming that there aren't systems with a coarser timestamp +-- granularity. +----- +BEGIN; +CREATE TEMPORARY TABLE test_last_scan(idx_col int primary key, noidx_col int); +INSERT INTO test_last_scan(idx_col, noidx_col) VALUES(1, 1); +SELECT pg_stat_force_next_flush(); + pg_stat_force_next_flush +-------------------------- + +(1 row) + +SELECT last_seq_scan, last_idx_scan FROM pg_stat_all_tables WHERE relid = 'test_last_scan'::regclass; + last_seq_scan | last_idx_scan +---------------+--------------- + | +(1 row) + +COMMIT; +SELECT pg_stat_reset_single_table_counters('test_last_scan'::regclass); + pg_stat_reset_single_table_counters +------------------------------------- + +(1 row) + +SELECT seq_scan, idx_scan FROM pg_stat_all_tables WHERE relid = 'test_last_scan'::regclass; + seq_scan | idx_scan +----------+---------- + 0 | 0 +(1 row) + +-- ensure we start out with exactly one index and sequential scan +BEGIN; +SET LOCAL enable_seqscan TO on; +SET LOCAL enable_indexscan TO on; +SET LOCAL enable_bitmapscan TO off; +EXPLAIN (COSTS off) SELECT count(*) FROM test_last_scan WHERE noidx_col = 1; + QUERY PLAN +---------------------------------- + Aggregate + -> Seq Scan on test_last_scan + Filter: (noidx_col = 1) +(3 rows) + +SELECT count(*) FROM test_last_scan WHERE noidx_col = 1; + count +------- + 1 +(1 row) + +SET LOCAL enable_seqscan TO off; +EXPLAIN (COSTS off) SELECT count(*) FROM test_last_scan WHERE idx_col = 1; + QUERY PLAN +-------------------------------------------------------------- + Aggregate + -> Index Scan using test_last_scan_pkey on test_last_scan + Index Cond: (idx_col = 1) +(3 rows) + +SELECT count(*) FROM test_last_scan WHERE idx_col = 1; + count +------- + 1 +(1 row) + +SELECT pg_stat_force_next_flush(); + pg_stat_force_next_flush +-------------------------- + +(1 row) + +COMMIT; +-- fetch timestamps from before the next test +SELECT last_seq_scan AS test_last_seq, last_idx_scan AS test_last_idx +FROM pg_stat_all_tables WHERE relid = 'test_last_scan'::regclass \gset +SELECT pg_sleep(0.1); -- assume a minimum timestamp granularity of 100ms + pg_sleep +---------- + +(1 row) + +-- cause one sequential scan +BEGIN; +SET LOCAL enable_seqscan TO on; +SET LOCAL enable_indexscan TO off; +SET LOCAL enable_bitmapscan TO off; +EXPLAIN (COSTS off) SELECT count(*) FROM test_last_scan WHERE noidx_col = 1; + QUERY PLAN +---------------------------------- + Aggregate + -> Seq Scan on test_last_scan + Filter: (noidx_col = 1) +(3 rows) + +SELECT count(*) FROM test_last_scan WHERE noidx_col = 1; + count +------- + 1 +(1 row) + +SELECT pg_stat_force_next_flush(); + pg_stat_force_next_flush +-------------------------- + +(1 row) + +COMMIT; +-- check that just sequential scan stats were incremented +SELECT seq_scan, :'test_last_seq' < last_seq_scan AS seq_ok, idx_scan, :'test_last_idx' = last_idx_scan AS idx_ok +FROM pg_stat_all_tables WHERE relid = 'test_last_scan'::regclass; + seq_scan | seq_ok | idx_scan | idx_ok +----------+--------+----------+-------- + 2 | t | 1 | t +(1 row) + +-- fetch timestamps from before the next test +SELECT last_seq_scan AS test_last_seq, last_idx_scan AS test_last_idx +FROM pg_stat_all_tables WHERE relid = 'test_last_scan'::regclass \gset +SELECT pg_sleep(0.1); + pg_sleep +---------- + +(1 row) + +-- cause one index scan +BEGIN; +SET LOCAL enable_seqscan TO off; +SET LOCAL enable_indexscan TO on; +SET LOCAL enable_bitmapscan TO off; +EXPLAIN (COSTS off) SELECT count(*) FROM test_last_scan WHERE idx_col = 1; + QUERY PLAN +-------------------------------------------------------------- + Aggregate + -> Index Scan using test_last_scan_pkey on test_last_scan + Index Cond: (idx_col = 1) +(3 rows) + +SELECT count(*) FROM test_last_scan WHERE idx_col = 1; + count +------- + 1 +(1 row) + +SELECT pg_stat_force_next_flush(); + pg_stat_force_next_flush +-------------------------- + +(1 row) + +COMMIT; +-- check that just index scan stats were incremented +SELECT seq_scan, :'test_last_seq' = last_seq_scan AS seq_ok, idx_scan, :'test_last_idx' < last_idx_scan AS idx_ok +FROM pg_stat_all_tables WHERE relid = 'test_last_scan'::regclass; + seq_scan | seq_ok | idx_scan | idx_ok +----------+--------+----------+-------- + 2 | t | 2 | t +(1 row) + +-- fetch timestamps from before the next test +SELECT last_seq_scan AS test_last_seq, last_idx_scan AS test_last_idx +FROM pg_stat_all_tables WHERE relid = 'test_last_scan'::regclass \gset +SELECT pg_sleep(0.1); + pg_sleep +---------- + +(1 row) + +-- cause one bitmap index scan +BEGIN; +SET LOCAL enable_seqscan TO off; +SET LOCAL enable_indexscan TO off; +SET LOCAL enable_bitmapscan TO on; +EXPLAIN (COSTS off) SELECT count(*) FROM test_last_scan WHERE idx_col = 1; + QUERY PLAN +------------------------------------------------------ + Aggregate + -> Bitmap Heap Scan on test_last_scan + Recheck Cond: (idx_col = 1) + -> Bitmap Index Scan on test_last_scan_pkey + Index Cond: (idx_col = 1) +(5 rows) + +SELECT count(*) FROM test_last_scan WHERE idx_col = 1; + count +------- + 1 +(1 row) + +SELECT pg_stat_force_next_flush(); + pg_stat_force_next_flush +-------------------------- + +(1 row) + +COMMIT; +-- check that just index scan stats were incremented +SELECT seq_scan, :'test_last_seq' = last_seq_scan AS seq_ok, idx_scan, :'test_last_idx' < last_idx_scan AS idx_ok +FROM pg_stat_all_tables WHERE relid = 'test_last_scan'::regclass; + seq_scan | seq_ok | idx_scan | idx_ok +----------+--------+----------+-------- + 2 | t | 3 | t +(1 row) + +----- +-- Test reset of some stats for shared table +----- +-- This updates the comment of the database currently in use in +-- pg_shdescription with a fake value, then sets it back to its +-- original value. +SELECT shobj_description(d.oid, 'pg_database') as description_before + FROM pg_database d WHERE datname = current_database() \gset +-- force some stats in pg_shdescription. +BEGIN; +SELECT current_database() as datname \gset +COMMENT ON DATABASE :"datname" IS 'This is a test comment'; +SELECT pg_stat_force_next_flush(); + pg_stat_force_next_flush +-------------------------- + +(1 row) + +COMMIT; +-- check that the stats are reset. +SELECT (n_tup_ins + n_tup_upd) > 0 AS has_data FROM pg_stat_all_tables + WHERE relid = 'pg_shdescription'::regclass; + has_data +---------- + t +(1 row) + +SELECT pg_stat_reset_single_table_counters('pg_shdescription'::regclass); + pg_stat_reset_single_table_counters +------------------------------------- + +(1 row) + +SELECT (n_tup_ins + n_tup_upd) > 0 AS has_data FROM pg_stat_all_tables + WHERE relid = 'pg_shdescription'::regclass; + has_data +---------- + f +(1 row) + +-- set back comment +\if :{?description_before} + COMMENT ON DATABASE :"datname" IS :'description_before'; +\else + COMMENT ON DATABASE :"datname" IS NULL; +\endif +----- +-- Test that various stats views are being properly populated +----- +-- Test that sessions is incremented when a new session is started in pg_stat_database +SELECT sessions AS db_stat_sessions FROM pg_stat_database WHERE datname = (SELECT current_database()) \gset +\c +SELECT pg_stat_force_next_flush(); + pg_stat_force_next_flush +-------------------------- + +(1 row) + +SELECT sessions > :db_stat_sessions FROM pg_stat_database WHERE datname = (SELECT current_database()); + ?column? +---------- + t +(1 row) + +-- Test pg_stat_bgwriter checkpointer-related stats, together with pg_stat_wal +SELECT checkpoints_req AS rqst_ckpts_before FROM pg_stat_bgwriter \gset +-- Test pg_stat_wal (and make a temp table so our temp schema exists) +SELECT wal_bytes AS wal_bytes_before FROM pg_stat_wal \gset +CREATE TEMP TABLE test_stats_temp AS SELECT 17; +DROP TABLE test_stats_temp; +-- Checkpoint twice: The checkpointer reports stats after reporting completion +-- of the checkpoint. But after a second checkpoint we'll see at least the +-- results of the first. +CHECKPOINT; +CHECKPOINT; +SELECT checkpoints_req > :rqst_ckpts_before FROM pg_stat_bgwriter; + ?column? +---------- + t +(1 row) + +SELECT wal_bytes > :wal_bytes_before FROM pg_stat_wal; + ?column? +---------- + t +(1 row) + +-- Test pg_stat_get_backend_idset() and some allied functions. +-- In particular, verify that their notion of backend ID matches +-- our temp schema index. +SELECT (current_schemas(true))[1] = ('pg_temp_' || beid::text) AS match +FROM pg_stat_get_backend_idset() beid +WHERE pg_stat_get_backend_pid(beid) = pg_backend_pid(); + match +------- + t +(1 row) + +----- +-- Test that resetting stats works for reset timestamp +----- +-- Test that reset_slru with a specified SLRU works. +SELECT stats_reset AS slru_commit_ts_reset_ts FROM pg_stat_slru WHERE name = 'CommitTs' \gset +SELECT stats_reset AS slru_notify_reset_ts FROM pg_stat_slru WHERE name = 'Notify' \gset +SELECT pg_stat_reset_slru('CommitTs'); + pg_stat_reset_slru +-------------------- + +(1 row) + +SELECT stats_reset > :'slru_commit_ts_reset_ts'::timestamptz FROM pg_stat_slru WHERE name = 'CommitTs'; + ?column? +---------- + t +(1 row) + +SELECT stats_reset AS slru_commit_ts_reset_ts FROM pg_stat_slru WHERE name = 'CommitTs' \gset +-- Test that multiple SLRUs are reset when no specific SLRU provided to reset function +SELECT pg_stat_reset_slru(NULL); + pg_stat_reset_slru +-------------------- + +(1 row) + +SELECT stats_reset > :'slru_commit_ts_reset_ts'::timestamptz FROM pg_stat_slru WHERE name = 'CommitTs'; + ?column? +---------- + t +(1 row) + +SELECT stats_reset > :'slru_notify_reset_ts'::timestamptz FROM pg_stat_slru WHERE name = 'Notify'; + ?column? +---------- + t +(1 row) + +-- Test that reset_shared with archiver specified as the stats type works +SELECT stats_reset AS archiver_reset_ts FROM pg_stat_archiver \gset +SELECT pg_stat_reset_shared('archiver'); + pg_stat_reset_shared +---------------------- + +(1 row) + +SELECT stats_reset > :'archiver_reset_ts'::timestamptz FROM pg_stat_archiver; + ?column? +---------- + t +(1 row) + +SELECT stats_reset AS archiver_reset_ts FROM pg_stat_archiver \gset +-- Test that reset_shared with bgwriter specified as the stats type works +SELECT stats_reset AS bgwriter_reset_ts FROM pg_stat_bgwriter \gset +SELECT pg_stat_reset_shared('bgwriter'); + pg_stat_reset_shared +---------------------- + +(1 row) + +SELECT stats_reset > :'bgwriter_reset_ts'::timestamptz FROM pg_stat_bgwriter; + ?column? +---------- + t +(1 row) + +SELECT stats_reset AS bgwriter_reset_ts FROM pg_stat_bgwriter \gset +-- Test that reset_shared with wal specified as the stats type works +SELECT stats_reset AS wal_reset_ts FROM pg_stat_wal \gset +SELECT pg_stat_reset_shared('wal'); + pg_stat_reset_shared +---------------------- + +(1 row) + +SELECT stats_reset > :'wal_reset_ts'::timestamptz FROM pg_stat_wal; + ?column? +---------- + t +(1 row) + +SELECT stats_reset AS wal_reset_ts FROM pg_stat_wal \gset +-- Test that reset_shared with no specified stats type doesn't reset anything +SELECT pg_stat_reset_shared(NULL); + pg_stat_reset_shared +---------------------- + +(1 row) + +SELECT stats_reset = :'archiver_reset_ts'::timestamptz FROM pg_stat_archiver; + ?column? +---------- + t +(1 row) + +SELECT stats_reset = :'bgwriter_reset_ts'::timestamptz FROM pg_stat_bgwriter; + ?column? +---------- + t +(1 row) + +SELECT stats_reset = :'wal_reset_ts'::timestamptz FROM pg_stat_wal; + ?column? +---------- + t +(1 row) + +-- Test that reset works for pg_stat_database +-- Since pg_stat_database stats_reset starts out as NULL, reset it once first so we have something to compare it to +SELECT pg_stat_reset(); + pg_stat_reset +--------------- + +(1 row) + +SELECT stats_reset AS db_reset_ts FROM pg_stat_database WHERE datname = (SELECT current_database()) \gset +SELECT pg_stat_reset(); + pg_stat_reset +--------------- + +(1 row) + +SELECT stats_reset > :'db_reset_ts'::timestamptz FROM pg_stat_database WHERE datname = (SELECT current_database()); + ?column? +---------- + t +(1 row) + +---- +-- pg_stat_get_snapshot_timestamp behavior +---- +BEGIN; +SET LOCAL stats_fetch_consistency = snapshot; +-- no snapshot yet, return NULL +SELECT pg_stat_get_snapshot_timestamp(); + pg_stat_get_snapshot_timestamp +-------------------------------- + +(1 row) + +-- any attempt at accessing stats will build snapshot +SELECT pg_stat_get_function_calls(0); + pg_stat_get_function_calls +---------------------------- + +(1 row) + +SELECT pg_stat_get_snapshot_timestamp() >= NOW(); + ?column? +---------- + t +(1 row) + +-- shows NULL again after clearing +SELECT pg_stat_clear_snapshot(); + pg_stat_clear_snapshot +------------------------ + +(1 row) + +SELECT pg_stat_get_snapshot_timestamp(); + pg_stat_get_snapshot_timestamp +-------------------------------- + +(1 row) + +COMMIT; +---- +-- Changing stats_fetch_consistency in a transaction. +---- +BEGIN; +-- Stats filled under the cache mode +SET LOCAL stats_fetch_consistency = cache; +SELECT pg_stat_get_function_calls(0); + pg_stat_get_function_calls +---------------------------- + +(1 row) + +SELECT pg_stat_get_snapshot_timestamp() IS NOT NULL AS snapshot_ok; + snapshot_ok +------------- + f +(1 row) + +-- Success in accessing pre-existing snapshot data. +SET LOCAL stats_fetch_consistency = snapshot; +SELECT pg_stat_get_snapshot_timestamp() IS NOT NULL AS snapshot_ok; + snapshot_ok +------------- + f +(1 row) + +SELECT pg_stat_get_function_calls(0); + pg_stat_get_function_calls +---------------------------- + +(1 row) + +SELECT pg_stat_get_snapshot_timestamp() IS NOT NULL AS snapshot_ok; + snapshot_ok +------------- + t +(1 row) + +-- Snapshot cleared. +SET LOCAL stats_fetch_consistency = none; +SELECT pg_stat_get_snapshot_timestamp() IS NOT NULL AS snapshot_ok; + snapshot_ok +------------- + f +(1 row) + +SELECT pg_stat_get_function_calls(0); + pg_stat_get_function_calls +---------------------------- + +(1 row) + +SELECT pg_stat_get_snapshot_timestamp() IS NOT NULL AS snapshot_ok; + snapshot_ok +------------- + f +(1 row) + +ROLLBACK; +---- +-- pg_stat_have_stats behavior +---- +-- fixed-numbered stats exist +SELECT pg_stat_have_stats('bgwriter', 0, 0); + pg_stat_have_stats +-------------------- + t +(1 row) + +-- unknown stats kinds error out +SELECT pg_stat_have_stats('zaphod', 0, 0); +ERROR: invalid statistics kind: "zaphod" +-- db stats have objoid 0 +SELECT pg_stat_have_stats('database', :dboid, 1); + pg_stat_have_stats +-------------------- + f +(1 row) + +SELECT pg_stat_have_stats('database', :dboid, 0); + pg_stat_have_stats +-------------------- + t +(1 row) + +-- pg_stat_have_stats returns true for committed index creation +CREATE table stats_test_tab1 as select generate_series(1,10) a; +CREATE index stats_test_idx1 on stats_test_tab1(a); +SELECT 'stats_test_idx1'::regclass::oid AS stats_test_idx1_oid \gset +SET enable_seqscan TO off; +select a from stats_test_tab1 where a = 3; + a +--- + 3 +(1 row) + +SELECT pg_stat_have_stats('relation', :dboid, :stats_test_idx1_oid); + pg_stat_have_stats +-------------------- + t +(1 row) + +-- pg_stat_have_stats returns false for dropped index with stats +SELECT pg_stat_have_stats('relation', :dboid, :stats_test_idx1_oid); + pg_stat_have_stats +-------------------- + t +(1 row) + +DROP index stats_test_idx1; +SELECT pg_stat_have_stats('relation', :dboid, :stats_test_idx1_oid); + pg_stat_have_stats +-------------------- + f +(1 row) + +-- pg_stat_have_stats returns false for rolled back index creation +BEGIN; +CREATE index stats_test_idx1 on stats_test_tab1(a); +SELECT 'stats_test_idx1'::regclass::oid AS stats_test_idx1_oid \gset +select a from stats_test_tab1 where a = 3; + a +--- + 3 +(1 row) + +SELECT pg_stat_have_stats('relation', :dboid, :stats_test_idx1_oid); + pg_stat_have_stats +-------------------- + t +(1 row) + +ROLLBACK; +SELECT pg_stat_have_stats('relation', :dboid, :stats_test_idx1_oid); + pg_stat_have_stats +-------------------- + f +(1 row) + +-- pg_stat_have_stats returns true for reindex CONCURRENTLY +CREATE index stats_test_idx1 on stats_test_tab1(a); +SELECT 'stats_test_idx1'::regclass::oid AS stats_test_idx1_oid \gset +select a from stats_test_tab1 where a = 3; + a +--- + 3 +(1 row) + +SELECT pg_stat_have_stats('relation', :dboid, :stats_test_idx1_oid); + pg_stat_have_stats +-------------------- + t +(1 row) + +REINDEX index CONCURRENTLY stats_test_idx1; +-- false for previous oid +SELECT pg_stat_have_stats('relation', :dboid, :stats_test_idx1_oid); + pg_stat_have_stats +-------------------- + f +(1 row) + +-- true for new oid +SELECT 'stats_test_idx1'::regclass::oid AS stats_test_idx1_oid \gset +SELECT pg_stat_have_stats('relation', :dboid, :stats_test_idx1_oid); + pg_stat_have_stats +-------------------- + t +(1 row) + +-- pg_stat_have_stats returns true for a rolled back drop index with stats +BEGIN; +SELECT pg_stat_have_stats('relation', :dboid, :stats_test_idx1_oid); + pg_stat_have_stats +-------------------- + t +(1 row) + +DROP index stats_test_idx1; +ROLLBACK; +SELECT pg_stat_have_stats('relation', :dboid, :stats_test_idx1_oid); + pg_stat_have_stats +-------------------- + t +(1 row) + +-- put enable_seqscan back to on +SET enable_seqscan TO on; +-- ensure that stats accessors handle NULL input correctly +SELECT pg_stat_get_replication_slot(NULL); + pg_stat_get_replication_slot +------------------------------ + +(1 row) + +SELECT pg_stat_get_subscription_stats(NULL); + pg_stat_get_subscription_stats +-------------------------------- + +(1 row) + +-- Test that the following operations are tracked in pg_stat_io: +-- - reads of target blocks into shared buffers +-- - writes of shared buffers to permanent storage +-- - extends of relations using shared buffers +-- - fsyncs done to ensure the durability of data dirtying shared buffers +-- - shared buffer hits +-- There is no test for blocks evicted from shared buffers, because we cannot +-- be sure of the state of shared buffers at the point the test is run. +-- Create a regular table and insert some data to generate IOCONTEXT_NORMAL +-- extends. +SELECT sum(extends) AS io_sum_shared_before_extends + FROM pg_stat_io WHERE context = 'normal' AND object = 'relation' \gset +SELECT sum(writes) AS writes, sum(fsyncs) AS fsyncs + FROM pg_stat_io + WHERE object = 'relation' \gset io_sum_shared_before_ +CREATE TABLE test_io_shared(a int); +INSERT INTO test_io_shared SELECT i FROM generate_series(1,100)i; +SELECT pg_stat_force_next_flush(); + pg_stat_force_next_flush +-------------------------- + +(1 row) + +SELECT sum(extends) AS io_sum_shared_after_extends + FROM pg_stat_io WHERE context = 'normal' AND object = 'relation' \gset +SELECT :io_sum_shared_after_extends > :io_sum_shared_before_extends; + ?column? +---------- + t +(1 row) + +-- After a checkpoint, there should be some additional IOCONTEXT_NORMAL writes +-- and fsyncs. +-- See comment above for rationale for two explicit CHECKPOINTs. +CHECKPOINT; +CHECKPOINT; +SELECT sum(writes) AS writes, sum(fsyncs) AS fsyncs + FROM pg_stat_io + WHERE object = 'relation' \gset io_sum_shared_after_ +SELECT :io_sum_shared_after_writes > :io_sum_shared_before_writes; + ?column? +---------- + t +(1 row) + +SELECT current_setting('fsync') = 'off' + OR :io_sum_shared_after_fsyncs > :io_sum_shared_before_fsyncs; + ?column? +---------- + t +(1 row) + +-- Change the tablespace so that the table is rewritten directly, then SELECT +-- from it to cause it to be read back into shared buffers. +SELECT sum(reads) AS io_sum_shared_before_reads + FROM pg_stat_io WHERE context = 'normal' AND object = 'relation' \gset +-- Do this in a transaction to prevent spurious failures due to concurrent accesses to our newly +-- rewritten table, e.g. by autovacuum. +BEGIN; +ALTER TABLE test_io_shared SET TABLESPACE regress_tblspace; +-- SELECT from the table so that the data is read into shared buffers and +-- context 'normal', object 'relation' reads are counted. +SELECT COUNT(*) FROM test_io_shared; + count +------- + 100 +(1 row) + +COMMIT; +SELECT pg_stat_force_next_flush(); + pg_stat_force_next_flush +-------------------------- + +(1 row) + +SELECT sum(reads) AS io_sum_shared_after_reads + FROM pg_stat_io WHERE context = 'normal' AND object = 'relation' \gset +SELECT :io_sum_shared_after_reads > :io_sum_shared_before_reads; + ?column? +---------- + t +(1 row) + +SELECT sum(hits) AS io_sum_shared_before_hits + FROM pg_stat_io WHERE context = 'normal' AND object = 'relation' \gset +-- Select from the table again to count hits. +-- Ensure we generate hits by forcing a nested loop self-join with no +-- materialize node. The outer side's buffer will stay pinned, preventing its +-- eviction, while we loop through the inner side and generate hits. +BEGIN; +SET LOCAL enable_nestloop TO on; SET LOCAL enable_mergejoin TO off; +SET LOCAL enable_hashjoin TO off; SET LOCAL enable_material TO off; +-- ensure plan stays as we expect it to +EXPLAIN (COSTS OFF) SELECT COUNT(*) FROM test_io_shared t1 INNER JOIN test_io_shared t2 USING (a); + QUERY PLAN +------------------------------------------- + Aggregate + -> Nested Loop + Join Filter: (t1.a = t2.a) + -> Seq Scan on test_io_shared t1 + -> Seq Scan on test_io_shared t2 +(5 rows) + +SELECT COUNT(*) FROM test_io_shared t1 INNER JOIN test_io_shared t2 USING (a); + count +------- + 100 +(1 row) + +COMMIT; +SELECT pg_stat_force_next_flush(); + pg_stat_force_next_flush +-------------------------- + +(1 row) + +SELECT sum(hits) AS io_sum_shared_after_hits + FROM pg_stat_io WHERE context = 'normal' AND object = 'relation' \gset +SELECT :io_sum_shared_after_hits > :io_sum_shared_before_hits; + ?column? +---------- + t +(1 row) + +DROP TABLE test_io_shared; +-- Test that the follow IOCONTEXT_LOCAL IOOps are tracked in pg_stat_io: +-- - eviction of local buffers in order to reuse them +-- - reads of temporary table blocks into local buffers +-- - writes of local buffers to permanent storage +-- - extends of temporary tables +-- Set temp_buffers to its minimum so that we can trigger writes with fewer +-- inserted tuples. Do so in a new session in case temporary tables have been +-- accessed by previous tests in this session. +\c +SET temp_buffers TO 100; +CREATE TEMPORARY TABLE test_io_local(a int, b TEXT); +SELECT sum(extends) AS extends, sum(evictions) AS evictions, sum(writes) AS writes + FROM pg_stat_io + WHERE context = 'normal' AND object = 'temp relation' \gset io_sum_local_before_ +-- Insert tuples into the temporary table, generating extends in the stats. +-- Insert enough values that we need to reuse and write out dirty local +-- buffers, generating evictions and writes. +INSERT INTO test_io_local SELECT generate_series(1, 5000) as id, repeat('a', 200); +-- Ensure the table is large enough to exceed our temp_buffers setting. +SELECT pg_relation_size('test_io_local') / current_setting('block_size')::int8 > 100; + ?column? +---------- + t +(1 row) + +SELECT sum(reads) AS io_sum_local_before_reads + FROM pg_stat_io WHERE context = 'normal' AND object = 'temp relation' \gset +-- Read in evicted buffers, generating reads. +SELECT COUNT(*) FROM test_io_local; + count +------- + 5000 +(1 row) + +SELECT pg_stat_force_next_flush(); + pg_stat_force_next_flush +-------------------------- + +(1 row) + +SELECT sum(evictions) AS evictions, + sum(reads) AS reads, + sum(writes) AS writes, + sum(extends) AS extends + FROM pg_stat_io + WHERE context = 'normal' AND object = 'temp relation' \gset io_sum_local_after_ +SELECT :io_sum_local_after_evictions > :io_sum_local_before_evictions, + :io_sum_local_after_reads > :io_sum_local_before_reads, + :io_sum_local_after_writes > :io_sum_local_before_writes, + :io_sum_local_after_extends > :io_sum_local_before_extends; + ?column? | ?column? | ?column? | ?column? +----------+----------+----------+---------- + t | t | t | t +(1 row) + +-- Change the tablespaces so that the temporary table is rewritten to other +-- local buffers, exercising a different codepath than standard local buffer +-- writes. +ALTER TABLE test_io_local SET TABLESPACE regress_tblspace; +SELECT pg_stat_force_next_flush(); + pg_stat_force_next_flush +-------------------------- + +(1 row) + +SELECT sum(writes) AS io_sum_local_new_tblspc_writes + FROM pg_stat_io WHERE context = 'normal' AND object = 'temp relation' \gset +SELECT :io_sum_local_new_tblspc_writes > :io_sum_local_after_writes; + ?column? +---------- + t +(1 row) + +RESET temp_buffers; +-- Test that reuse of strategy buffers and reads of blocks into these reused +-- buffers while VACUUMing are tracked in pg_stat_io. If there is sufficient +-- demand for shared buffers from concurrent queries, some buffers may be +-- pinned by other backends before they can be reused. In such cases, the +-- backend will evict a buffer from outside the ring and add it to the +-- ring. This is considered an eviction and not a reuse. +-- Set wal_skip_threshold smaller than the expected size of +-- test_io_vac_strategy so that, even if wal_level is minimal, VACUUM FULL will +-- fsync the newly rewritten test_io_vac_strategy instead of writing it to WAL. +-- Writing it to WAL will result in the newly written relation pages being in +-- shared buffers -- preventing us from testing BAS_VACUUM BufferAccessStrategy +-- reads. +SET wal_skip_threshold = '1 kB'; +SELECT sum(reuses) AS reuses, sum(reads) AS reads, sum(evictions) AS evictions + FROM pg_stat_io WHERE context = 'vacuum' \gset io_sum_vac_strategy_before_ +CREATE TABLE test_io_vac_strategy(a int, b int) WITH (autovacuum_enabled = 'false'); +INSERT INTO test_io_vac_strategy SELECT i, i from generate_series(1, 4500)i; +-- Ensure that the next VACUUM will need to perform IO by rewriting the table +-- first with VACUUM (FULL). +VACUUM (FULL) test_io_vac_strategy; +-- Use the minimum BUFFER_USAGE_LIMIT to cause reuses or evictions with the +-- smallest table possible. +VACUUM (PARALLEL 0, BUFFER_USAGE_LIMIT 128) test_io_vac_strategy; +SELECT pg_stat_force_next_flush(); + pg_stat_force_next_flush +-------------------------- + +(1 row) + +SELECT sum(reuses) AS reuses, sum(reads) AS reads, sum(evictions) AS evictions + FROM pg_stat_io WHERE context = 'vacuum' \gset io_sum_vac_strategy_after_ +SELECT :io_sum_vac_strategy_after_reads > :io_sum_vac_strategy_before_reads; + ?column? +---------- + t +(1 row) + +SELECT (:io_sum_vac_strategy_after_reuses + :io_sum_vac_strategy_after_evictions) > + (:io_sum_vac_strategy_before_reuses + :io_sum_vac_strategy_before_evictions); + ?column? +---------- + t +(1 row) + +RESET wal_skip_threshold; +-- Test that extends done by a CTAS, which uses a BAS_BULKWRITE +-- BufferAccessStrategy, are tracked in pg_stat_io. +SELECT sum(extends) AS io_sum_bulkwrite_strategy_extends_before + FROM pg_stat_io WHERE context = 'bulkwrite' \gset +CREATE TABLE test_io_bulkwrite_strategy AS SELECT i FROM generate_series(1,100)i; +SELECT pg_stat_force_next_flush(); + pg_stat_force_next_flush +-------------------------- + +(1 row) + +SELECT sum(extends) AS io_sum_bulkwrite_strategy_extends_after + FROM pg_stat_io WHERE context = 'bulkwrite' \gset +SELECT :io_sum_bulkwrite_strategy_extends_after > :io_sum_bulkwrite_strategy_extends_before; + ?column? +---------- + t +(1 row) + +-- Test IO stats reset +SELECT pg_stat_have_stats('io', 0, 0); + pg_stat_have_stats +-------------------- + t +(1 row) + +SELECT sum(evictions) + sum(reuses) + sum(extends) + sum(fsyncs) + sum(reads) + sum(writes) + sum(writebacks) + sum(hits) AS io_stats_pre_reset + FROM pg_stat_io \gset +SELECT pg_stat_reset_shared('io'); + pg_stat_reset_shared +---------------------- + +(1 row) + +SELECT sum(evictions) + sum(reuses) + sum(extends) + sum(fsyncs) + sum(reads) + sum(writes) + sum(writebacks) + sum(hits) AS io_stats_post_reset + FROM pg_stat_io \gset +SELECT :io_stats_post_reset < :io_stats_pre_reset; + ?column? +---------- + t +(1 row) + +-- test BRIN index doesn't block HOT update +CREATE TABLE brin_hot ( + id integer PRIMARY KEY, + val integer NOT NULL +) WITH (autovacuum_enabled = off, fillfactor = 70); +INSERT INTO brin_hot SELECT *, 0 FROM generate_series(1, 235); +CREATE INDEX val_brin ON brin_hot using brin(val); +CREATE FUNCTION wait_for_hot_stats() RETURNS void AS $$ +DECLARE + start_time timestamptz := clock_timestamp(); + updated bool; +BEGIN + -- we don't want to wait forever; loop will exit after 30 seconds + FOR i IN 1 .. 300 LOOP + SELECT (pg_stat_get_tuples_hot_updated('brin_hot'::regclass::oid) > 0) INTO updated; + EXIT WHEN updated; + + -- wait a little + PERFORM pg_sleep_for('100 milliseconds'); + -- reset stats snapshot so we can test again + PERFORM pg_stat_clear_snapshot(); + END LOOP; + -- report time waited in postmaster log (where it won't change test output) + RAISE log 'wait_for_hot_stats delayed % seconds', + EXTRACT(epoch FROM clock_timestamp() - start_time); +END +$$ LANGUAGE plpgsql; +UPDATE brin_hot SET val = -3 WHERE id = 42; +-- We can't just call wait_for_hot_stats() at this point, because we only +-- transmit stats when the session goes idle, and we probably didn't +-- transmit the last couple of counts yet thanks to the rate-limiting logic +-- in pgstat_report_stat(). But instead of waiting for the rate limiter's +-- timeout to elapse, let's just start a new session. The old one will +-- then send its stats before dying. +\c - +SELECT wait_for_hot_stats(); + wait_for_hot_stats +-------------------- + +(1 row) + +SELECT pg_stat_get_tuples_hot_updated('brin_hot'::regclass::oid); + pg_stat_get_tuples_hot_updated +-------------------------------- + 1 +(1 row) + +DROP TABLE brin_hot; +DROP FUNCTION wait_for_hot_stats(); +-- Test handling of index predicates - updating attributes in precicates +-- should not block HOT when summarizing indexes are involved. We update +-- a row that was not indexed due to the index predicate, and becomes +-- indexable - the HOT-updated tuple is forwarded to the BRIN index. +CREATE TABLE brin_hot_2 (a int, b int); +INSERT INTO brin_hot_2 VALUES (1, 100); +CREATE INDEX ON brin_hot_2 USING brin (b) WHERE a = 2; +UPDATE brin_hot_2 SET a = 2; +EXPLAIN (COSTS OFF) SELECT * FROM brin_hot_2 WHERE a = 2 AND b = 100; + QUERY PLAN +----------------------------------- + Seq Scan on brin_hot_2 + Filter: ((a = 2) AND (b = 100)) +(2 rows) + +SELECT COUNT(*) FROM brin_hot_2 WHERE a = 2 AND b = 100; + count +------- + 1 +(1 row) + +SET enable_seqscan = off; +EXPLAIN (COSTS OFF) SELECT * FROM brin_hot_2 WHERE a = 2 AND b = 100; + QUERY PLAN +--------------------------------------------- + Bitmap Heap Scan on brin_hot_2 + Recheck Cond: ((b = 100) AND (a = 2)) + -> Bitmap Index Scan on brin_hot_2_b_idx + Index Cond: (b = 100) +(4 rows) + +SELECT COUNT(*) FROM brin_hot_2 WHERE a = 2 AND b = 100; + count +------- + 1 +(1 row) + +DROP TABLE brin_hot_2; +-- Test that updates to indexed columns are still propagated to the +-- BRIN column. +-- https://postgr.es/m/05ebcb44-f383-86e3-4f31-0a97a55634cf@enterprisedb.com +CREATE TABLE brin_hot_3 (a int, filler text) WITH (fillfactor = 10); +INSERT INTO brin_hot_3 SELECT 1, repeat(' ', 500) FROM generate_series(1, 20); +CREATE INDEX ON brin_hot_3 USING brin (a) WITH (pages_per_range = 1); +UPDATE brin_hot_3 SET a = 2; +EXPLAIN (COSTS OFF) SELECT * FROM brin_hot_3 WHERE a = 2; + QUERY PLAN +--------------------------------------------- + Bitmap Heap Scan on brin_hot_3 + Recheck Cond: (a = 2) + -> Bitmap Index Scan on brin_hot_3_a_idx + Index Cond: (a = 2) +(4 rows) + +SELECT COUNT(*) FROM brin_hot_3 WHERE a = 2; + count +------- + 20 +(1 row) + +DROP TABLE brin_hot_3; +SET enable_seqscan = on; +-- End of Stats Test diff --git a/src/test/regress/expected/stats_ext.out b/src/test/regress/expected/stats_ext.out new file mode 100644 index 0000000..a430153 --- /dev/null +++ b/src/test/regress/expected/stats_ext.out @@ -0,0 +1,3292 @@ +-- Generic extended statistics support +-- +-- Note: tables for which we check estimated row counts should be created +-- with autovacuum_enabled = off, so that we don't have unstable results +-- from auto-analyze happening when we didn't expect it. +-- +-- check the number of estimated/actual rows in the top node +create function check_estimated_rows(text) returns table (estimated int, actual int) +language plpgsql as +$$ +declare + ln text; + tmp text[]; + first_row bool := true; +begin + for ln in + execute format('explain analyze %s', $1) + loop + if first_row then + first_row := false; + tmp := regexp_match(ln, 'rows=(\d*) .* rows=(\d*)'); + return query select tmp[1]::int, tmp[2]::int; + end if; + end loop; +end; +$$; +-- Verify failures +CREATE TABLE ext_stats_test (x text, y int, z int); +CREATE STATISTICS tst; +ERROR: syntax error at or near ";" +LINE 1: CREATE STATISTICS tst; + ^ +CREATE STATISTICS tst ON a, b; +ERROR: syntax error at or near ";" +LINE 1: CREATE STATISTICS tst ON a, b; + ^ +CREATE STATISTICS tst FROM sometab; +ERROR: syntax error at or near "FROM" +LINE 1: CREATE STATISTICS tst FROM sometab; + ^ +CREATE STATISTICS tst ON a, b FROM nonexistent; +ERROR: relation "nonexistent" does not exist +CREATE STATISTICS tst ON a, b FROM ext_stats_test; +ERROR: column "a" does not exist +CREATE STATISTICS tst ON x, x, y FROM ext_stats_test; +ERROR: duplicate column name in statistics definition +CREATE STATISTICS tst ON x, x, y, x, x, y, x, x, y FROM ext_stats_test; +ERROR: cannot have more than 8 columns in statistics +CREATE STATISTICS tst ON x, x, y, x, x, (x || 'x'), (y + 1), (x || 'x'), (x || 'x'), (y + 1) FROM ext_stats_test; +ERROR: cannot have more than 8 columns in statistics +CREATE STATISTICS tst ON (x || 'x'), (x || 'x'), (y + 1), (x || 'x'), (x || 'x'), (y + 1), (x || 'x'), (x || 'x'), (y + 1) FROM ext_stats_test; +ERROR: cannot have more than 8 columns in statistics +CREATE STATISTICS tst ON (x || 'x'), (x || 'x'), y FROM ext_stats_test; +ERROR: duplicate expression in statistics definition +CREATE STATISTICS tst (unrecognized) ON x, y FROM ext_stats_test; +ERROR: unrecognized statistics kind "unrecognized" +-- incorrect expressions +CREATE STATISTICS tst ON (y) FROM ext_stats_test; -- single column reference +ERROR: extended statistics require at least 2 columns +CREATE STATISTICS tst ON y + z FROM ext_stats_test; -- missing parentheses +ERROR: syntax error at or near "+" +LINE 1: CREATE STATISTICS tst ON y + z FROM ext_stats_test; + ^ +CREATE STATISTICS tst ON (x, y) FROM ext_stats_test; -- tuple expression +ERROR: syntax error at or near "," +LINE 1: CREATE STATISTICS tst ON (x, y) FROM ext_stats_test; + ^ +DROP TABLE ext_stats_test; +-- Ensure stats are dropped sanely, and test IF NOT EXISTS while at it +CREATE TABLE ab1 (a INTEGER, b INTEGER, c INTEGER); +CREATE STATISTICS IF NOT EXISTS ab1_a_b_stats ON a, b FROM ab1; +COMMENT ON STATISTICS ab1_a_b_stats IS 'new comment'; +CREATE ROLE regress_stats_ext; +SET SESSION AUTHORIZATION regress_stats_ext; +COMMENT ON STATISTICS ab1_a_b_stats IS 'changed comment'; +ERROR: must be owner of statistics object ab1_a_b_stats +DROP STATISTICS ab1_a_b_stats; +ERROR: must be owner of statistics object ab1_a_b_stats +ALTER STATISTICS ab1_a_b_stats RENAME TO ab1_a_b_stats_new; +ERROR: must be owner of statistics object ab1_a_b_stats +RESET SESSION AUTHORIZATION; +DROP ROLE regress_stats_ext; +CREATE STATISTICS IF NOT EXISTS ab1_a_b_stats ON a, b FROM ab1; +NOTICE: statistics object "ab1_a_b_stats" already exists, skipping +DROP STATISTICS ab1_a_b_stats; +CREATE SCHEMA regress_schema_2; +CREATE STATISTICS regress_schema_2.ab1_a_b_stats ON a, b FROM ab1; +-- Let's also verify the pg_get_statisticsobjdef output looks sane. +SELECT pg_get_statisticsobjdef(oid) FROM pg_statistic_ext WHERE stxname = 'ab1_a_b_stats'; + pg_get_statisticsobjdef +------------------------------------------------------------------- + CREATE STATISTICS regress_schema_2.ab1_a_b_stats ON a, b FROM ab1 +(1 row) + +DROP STATISTICS regress_schema_2.ab1_a_b_stats; +-- Ensure statistics are dropped when columns are +CREATE STATISTICS ab1_b_c_stats ON b, c FROM ab1; +CREATE STATISTICS ab1_a_b_c_stats ON a, b, c FROM ab1; +CREATE STATISTICS ab1_b_a_stats ON b, a FROM ab1; +ALTER TABLE ab1 DROP COLUMN a; +\d ab1 + Table "public.ab1" + Column | Type | Collation | Nullable | Default +--------+---------+-----------+----------+--------- + b | integer | | | + c | integer | | | +Statistics objects: + "public.ab1_b_c_stats" ON b, c FROM ab1 + +-- Ensure statistics are dropped when table is +SELECT stxname FROM pg_statistic_ext WHERE stxname LIKE 'ab1%'; + stxname +--------------- + ab1_b_c_stats +(1 row) + +DROP TABLE ab1; +SELECT stxname FROM pg_statistic_ext WHERE stxname LIKE 'ab1%'; + stxname +--------- +(0 rows) + +-- Ensure things work sanely with SET STATISTICS 0 +CREATE TABLE ab1 (a INTEGER, b INTEGER); +ALTER TABLE ab1 ALTER a SET STATISTICS 0; +INSERT INTO ab1 SELECT a, a%23 FROM generate_series(1, 1000) a; +CREATE STATISTICS ab1_a_b_stats ON a, b FROM ab1; +ANALYZE ab1; +WARNING: statistics object "public.ab1_a_b_stats" could not be computed for relation "public.ab1" +ALTER TABLE ab1 ALTER a SET STATISTICS -1; +-- setting statistics target 0 skips the statistics, without printing any message, so check catalog +ALTER STATISTICS ab1_a_b_stats SET STATISTICS 0; +\d ab1 + Table "public.ab1" + Column | Type | Collation | Nullable | Default +--------+---------+-----------+----------+--------- + a | integer | | | + b | integer | | | +Statistics objects: + "public.ab1_a_b_stats" ON a, b FROM ab1; STATISTICS 0 + +ANALYZE ab1; +SELECT stxname, stxdndistinct, stxddependencies, stxdmcv, stxdinherit + FROM pg_statistic_ext s LEFT JOIN pg_statistic_ext_data d ON (d.stxoid = s.oid) + WHERE s.stxname = 'ab1_a_b_stats'; + stxname | stxdndistinct | stxddependencies | stxdmcv | stxdinherit +---------------+---------------+------------------+---------+------------- + ab1_a_b_stats | | | | +(1 row) + +ALTER STATISTICS ab1_a_b_stats SET STATISTICS -1; +\d+ ab1 + Table "public.ab1" + Column | Type | Collation | Nullable | Default | Storage | Stats target | Description +--------+---------+-----------+----------+---------+---------+--------------+------------- + a | integer | | | | plain | | + b | integer | | | | plain | | +Statistics objects: + "public.ab1_a_b_stats" ON a, b FROM ab1 + +-- partial analyze doesn't build stats either +ANALYZE ab1 (a); +WARNING: statistics object "public.ab1_a_b_stats" could not be computed for relation "public.ab1" +ANALYZE ab1; +DROP TABLE ab1; +ALTER STATISTICS ab1_a_b_stats SET STATISTICS 0; +ERROR: statistics object "ab1_a_b_stats" does not exist +ALTER STATISTICS IF EXISTS ab1_a_b_stats SET STATISTICS 0; +NOTICE: statistics object "ab1_a_b_stats" does not exist, skipping +-- Ensure we can build statistics for tables with inheritance. +CREATE TABLE ab1 (a INTEGER, b INTEGER); +CREATE TABLE ab1c () INHERITS (ab1); +INSERT INTO ab1 VALUES (1,1); +CREATE STATISTICS ab1_a_b_stats ON a, b FROM ab1; +ANALYZE ab1; +DROP TABLE ab1 CASCADE; +NOTICE: drop cascades to table ab1c +-- Tests for stats with inheritance +CREATE TABLE stxdinh(a int, b int); +CREATE TABLE stxdinh1() INHERITS(stxdinh); +CREATE TABLE stxdinh2() INHERITS(stxdinh); +INSERT INTO stxdinh SELECT mod(a,50), mod(a,100) FROM generate_series(0, 1999) a; +INSERT INTO stxdinh1 SELECT mod(a,100), mod(a,100) FROM generate_series(0, 999) a; +INSERT INTO stxdinh2 SELECT mod(a,100), mod(a,100) FROM generate_series(0, 999) a; +VACUUM ANALYZE stxdinh, stxdinh1, stxdinh2; +-- Ensure non-inherited stats are not applied to inherited query +-- Without stats object, it looks like this +SELECT * FROM check_estimated_rows('SELECT a, b FROM stxdinh* GROUP BY 1, 2'); + estimated | actual +-----------+-------- + 400 | 150 +(1 row) + +SELECT * FROM check_estimated_rows('SELECT a, b FROM stxdinh* WHERE a = 0 AND b = 0'); + estimated | actual +-----------+-------- + 3 | 40 +(1 row) + +CREATE STATISTICS stxdinh ON a, b FROM stxdinh; +VACUUM ANALYZE stxdinh, stxdinh1, stxdinh2; +-- See if the extended stats affect the estimates +SELECT * FROM check_estimated_rows('SELECT a, b FROM stxdinh* GROUP BY 1, 2'); + estimated | actual +-----------+-------- + 150 | 150 +(1 row) + +-- Dependencies are applied at individual relations (within append), so +-- this estimate changes a bit because we improve estimates for the parent +SELECT * FROM check_estimated_rows('SELECT a, b FROM stxdinh* WHERE a = 0 AND b = 0'); + estimated | actual +-----------+-------- + 22 | 40 +(1 row) + +-- Ensure correct (non-inherited) stats are applied to inherited query +SELECT * FROM check_estimated_rows('SELECT a, b FROM ONLY stxdinh GROUP BY 1, 2'); + estimated | actual +-----------+-------- + 100 | 100 +(1 row) + +SELECT * FROM check_estimated_rows('SELECT a, b FROM ONLY stxdinh WHERE a = 0 AND b = 0'); + estimated | actual +-----------+-------- + 20 | 20 +(1 row) + +DROP TABLE stxdinh, stxdinh1, stxdinh2; +-- Ensure inherited stats ARE applied to inherited query in partitioned table +CREATE TABLE stxdinp(i int, a int, b int) PARTITION BY RANGE (i); +CREATE TABLE stxdinp1 PARTITION OF stxdinp FOR VALUES FROM (1) TO (100); +INSERT INTO stxdinp SELECT 1, a/100, a/100 FROM generate_series(1, 999) a; +CREATE STATISTICS stxdinp ON (a + 1), a, b FROM stxdinp; +VACUUM ANALYZE stxdinp; -- partitions are processed recursively +SELECT 1 FROM pg_statistic_ext WHERE stxrelid = 'stxdinp'::regclass; + ?column? +---------- + 1 +(1 row) + +SELECT * FROM check_estimated_rows('SELECT a, b FROM stxdinp GROUP BY 1, 2'); + estimated | actual +-----------+-------- + 10 | 10 +(1 row) + +SELECT * FROM check_estimated_rows('SELECT a + 1, b FROM ONLY stxdinp GROUP BY 1, 2'); + estimated | actual +-----------+-------- + 1 | 0 +(1 row) + +DROP TABLE stxdinp; +-- basic test for statistics on expressions +CREATE TABLE ab1 (a INTEGER, b INTEGER, c TIMESTAMP, d TIMESTAMPTZ); +-- expression stats may be built on a single expression column +CREATE STATISTICS ab1_exprstat_1 ON (a+b) FROM ab1; +-- with a single expression, we only enable expression statistics +CREATE STATISTICS ab1_exprstat_2 ON (a+b) FROM ab1; +SELECT stxkind FROM pg_statistic_ext WHERE stxname = 'ab1_exprstat_2'; + stxkind +--------- + {e} +(1 row) + +-- adding anything to the expression builds all statistics kinds +CREATE STATISTICS ab1_exprstat_3 ON (a+b), a FROM ab1; +SELECT stxkind FROM pg_statistic_ext WHERE stxname = 'ab1_exprstat_3'; + stxkind +----------- + {d,f,m,e} +(1 row) + +-- date_trunc on timestamptz is not immutable, but that should not matter +CREATE STATISTICS ab1_exprstat_4 ON date_trunc('day', d) FROM ab1; +-- date_trunc on timestamp is immutable +CREATE STATISTICS ab1_exprstat_5 ON date_trunc('day', c) FROM ab1; +-- check use of a boolean-returning expression +CREATE STATISTICS ab1_exprstat_6 ON + (case a when 1 then true else false end), b FROM ab1; +-- insert some data and run analyze, to test that these cases build properly +INSERT INTO ab1 +SELECT x / 10, x / 3, + '2020-10-01'::timestamp + x * interval '1 day', + '2020-10-01'::timestamptz + x * interval '1 day' +FROM generate_series(1, 100) x; +ANALYZE ab1; +-- apply some stats +SELECT * FROM check_estimated_rows('SELECT * FROM ab1 WHERE (case a when 1 then true else false end) AND b=2'); + estimated | actual +-----------+-------- + 1 | 0 +(1 row) + +DROP TABLE ab1; +-- Verify supported object types for extended statistics +CREATE schema tststats; +CREATE TABLE tststats.t (a int, b int, c text); +CREATE INDEX ti ON tststats.t (a, b); +CREATE SEQUENCE tststats.s; +CREATE VIEW tststats.v AS SELECT * FROM tststats.t; +CREATE MATERIALIZED VIEW tststats.mv AS SELECT * FROM tststats.t; +CREATE TYPE tststats.ty AS (a int, b int, c text); +CREATE FOREIGN DATA WRAPPER extstats_dummy_fdw; +CREATE SERVER extstats_dummy_srv FOREIGN DATA WRAPPER extstats_dummy_fdw; +CREATE FOREIGN TABLE tststats.f (a int, b int, c text) SERVER extstats_dummy_srv; +CREATE TABLE tststats.pt (a int, b int, c text) PARTITION BY RANGE (a, b); +CREATE TABLE tststats.pt1 PARTITION OF tststats.pt FOR VALUES FROM (-10, -10) TO (10, 10); +CREATE STATISTICS tststats.s1 ON a, b FROM tststats.t; +CREATE STATISTICS tststats.s2 ON a, b FROM tststats.ti; +ERROR: cannot define statistics for relation "ti" +DETAIL: This operation is not supported for indexes. +CREATE STATISTICS tststats.s3 ON a, b FROM tststats.s; +ERROR: cannot define statistics for relation "s" +DETAIL: This operation is not supported for sequences. +CREATE STATISTICS tststats.s4 ON a, b FROM tststats.v; +ERROR: cannot define statistics for relation "v" +DETAIL: This operation is not supported for views. +CREATE STATISTICS tststats.s5 ON a, b FROM tststats.mv; +CREATE STATISTICS tststats.s6 ON a, b FROM tststats.ty; +ERROR: cannot define statistics for relation "ty" +DETAIL: This operation is not supported for composite types. +CREATE STATISTICS tststats.s7 ON a, b FROM tststats.f; +CREATE STATISTICS tststats.s8 ON a, b FROM tststats.pt; +CREATE STATISTICS tststats.s9 ON a, b FROM tststats.pt1; +DO $$ +DECLARE + relname text := reltoastrelid::regclass FROM pg_class WHERE oid = 'tststats.t'::regclass; +BEGIN + EXECUTE 'CREATE STATISTICS tststats.s10 ON a, b FROM ' || relname; +EXCEPTION WHEN wrong_object_type THEN + RAISE NOTICE 'stats on toast table not created'; +END; +$$; +NOTICE: stats on toast table not created +DROP SCHEMA tststats CASCADE; +NOTICE: drop cascades to 7 other objects +DETAIL: drop cascades to table tststats.t +drop cascades to sequence tststats.s +drop cascades to view tststats.v +drop cascades to materialized view tststats.mv +drop cascades to type tststats.ty +drop cascades to foreign table tststats.f +drop cascades to table tststats.pt +DROP FOREIGN DATA WRAPPER extstats_dummy_fdw CASCADE; +NOTICE: drop cascades to server extstats_dummy_srv +-- n-distinct tests +CREATE TABLE ndistinct ( + filler1 TEXT, + filler2 NUMERIC, + a INT, + b INT, + filler3 DATE, + c INT, + d INT +) +WITH (autovacuum_enabled = off); +-- over-estimates when using only per-column statistics +INSERT INTO ndistinct (a, b, c, filler1) + SELECT i/100, i/100, i/100, cash_words((i/100)::money) + FROM generate_series(1,1000) s(i); +ANALYZE ndistinct; +-- Group Aggregate, due to over-estimate of the number of groups +SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY a, b'); + estimated | actual +-----------+-------- + 100 | 11 +(1 row) + +SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY b, c'); + estimated | actual +-----------+-------- + 100 | 11 +(1 row) + +SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY a, b, c'); + estimated | actual +-----------+-------- + 100 | 11 +(1 row) + +SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY a, b, c, d'); + estimated | actual +-----------+-------- + 200 | 11 +(1 row) + +SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY b, c, d'); + estimated | actual +-----------+-------- + 200 | 11 +(1 row) + +SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY a, b, (a+1)'); + estimated | actual +-----------+-------- + 100 | 11 +(1 row) + +SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY (a+1), (b+100)'); + estimated | actual +-----------+-------- + 100 | 11 +(1 row) + +SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY (a+1), (b+100), (2*c)'); + estimated | actual +-----------+-------- + 100 | 11 +(1 row) + +SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY a, (a+1), (b+100)'); + estimated | actual +-----------+-------- + 100 | 11 +(1 row) + +-- correct command +CREATE STATISTICS s10 ON a, b, c FROM ndistinct; +ANALYZE ndistinct; +SELECT s.stxkind, d.stxdndistinct + FROM pg_statistic_ext s, pg_statistic_ext_data d + WHERE s.stxrelid = 'ndistinct'::regclass + AND d.stxoid = s.oid; + stxkind | stxdndistinct +---------+----------------------------------------------------- + {d,f,m} | {"3, 4": 11, "3, 6": 11, "4, 6": 11, "3, 4, 6": 11} +(1 row) + +-- minor improvement, make sure the ctid does not break the matching +SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY ctid, a, b'); + estimated | actual +-----------+-------- + 1000 | 1000 +(1 row) + +-- Hash Aggregate, thanks to estimates improved by the statistic +SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY a, b'); + estimated | actual +-----------+-------- + 11 | 11 +(1 row) + +SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY b, c'); + estimated | actual +-----------+-------- + 11 | 11 +(1 row) + +SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY a, b, c'); + estimated | actual +-----------+-------- + 11 | 11 +(1 row) + +-- partial improvement (match on attributes) +SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY a, b, (a+1)'); + estimated | actual +-----------+-------- + 11 | 11 +(1 row) + +-- expressions - no improvement +SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY (a+1), (b+100)'); + estimated | actual +-----------+-------- + 11 | 11 +(1 row) + +SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY (a+1), (b+100), (2*c)'); + estimated | actual +-----------+-------- + 11 | 11 +(1 row) + +SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY a, (a+1), (b+100)'); + estimated | actual +-----------+-------- + 11 | 11 +(1 row) + +-- last two plans keep using Group Aggregate, because 'd' is not covered +-- by the statistic and while it's NULL-only we assume 200 values for it +SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY a, b, c, d'); + estimated | actual +-----------+-------- + 200 | 11 +(1 row) + +SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY b, c, d'); + estimated | actual +-----------+-------- + 200 | 11 +(1 row) + +TRUNCATE TABLE ndistinct; +-- under-estimates when using only per-column statistics +INSERT INTO ndistinct (a, b, c, filler1) + SELECT mod(i,13), mod(i,17), mod(i,19), + cash_words(mod(i,23)::int::money) + FROM generate_series(1,1000) s(i); +ANALYZE ndistinct; +SELECT s.stxkind, d.stxdndistinct + FROM pg_statistic_ext s, pg_statistic_ext_data d + WHERE s.stxrelid = 'ndistinct'::regclass + AND d.stxoid = s.oid; + stxkind | stxdndistinct +---------+---------------------------------------------------------- + {d,f,m} | {"3, 4": 221, "3, 6": 247, "4, 6": 323, "3, 4, 6": 1000} +(1 row) + +-- correct estimates +SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY a, b'); + estimated | actual +-----------+-------- + 221 | 221 +(1 row) + +SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY a, b, c'); + estimated | actual +-----------+-------- + 1000 | 1000 +(1 row) + +SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY a, b, c, d'); + estimated | actual +-----------+-------- + 1000 | 1000 +(1 row) + +SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY b, c, d'); + estimated | actual +-----------+-------- + 323 | 323 +(1 row) + +SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY a, d'); + estimated | actual +-----------+-------- + 200 | 13 +(1 row) + +SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY a, b, (a+1)'); + estimated | actual +-----------+-------- + 221 | 221 +(1 row) + +SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY (a+1), (b+100)'); + estimated | actual +-----------+-------- + 221 | 221 +(1 row) + +SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY (a+1), (b+100), (2*c)'); + estimated | actual +-----------+-------- + 1000 | 1000 +(1 row) + +SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY a, (a+1), (b+100)'); + estimated | actual +-----------+-------- + 221 | 221 +(1 row) + +DROP STATISTICS s10; +SELECT s.stxkind, d.stxdndistinct + FROM pg_statistic_ext s, pg_statistic_ext_data d + WHERE s.stxrelid = 'ndistinct'::regclass + AND d.stxoid = s.oid; + stxkind | stxdndistinct +---------+--------------- +(0 rows) + +-- dropping the statistics results in under-estimates +SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY a, b'); + estimated | actual +-----------+-------- + 100 | 221 +(1 row) + +SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY a, b, c'); + estimated | actual +-----------+-------- + 100 | 1000 +(1 row) + +SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY a, b, c, d'); + estimated | actual +-----------+-------- + 200 | 1000 +(1 row) + +SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY b, c, d'); + estimated | actual +-----------+-------- + 200 | 323 +(1 row) + +SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY a, d'); + estimated | actual +-----------+-------- + 200 | 13 +(1 row) + +SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY a, b, (a+1)'); + estimated | actual +-----------+-------- + 100 | 221 +(1 row) + +SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY (a+1), (b+100)'); + estimated | actual +-----------+-------- + 100 | 221 +(1 row) + +SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY (a+1), (b+100), (2*c)'); + estimated | actual +-----------+-------- + 100 | 1000 +(1 row) + +SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY a, (a+1), (b+100)'); + estimated | actual +-----------+-------- + 100 | 221 +(1 row) + +-- ndistinct estimates with statistics on expressions +SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY (a+1), (b+100)'); + estimated | actual +-----------+-------- + 100 | 221 +(1 row) + +SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY (a+1), (b+100), (2*c)'); + estimated | actual +-----------+-------- + 100 | 1000 +(1 row) + +SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY a, (a+1), (b+100)'); + estimated | actual +-----------+-------- + 100 | 221 +(1 row) + +CREATE STATISTICS s10 (ndistinct) ON (a+1), (b+100), (2*c) FROM ndistinct; +ANALYZE ndistinct; +SELECT s.stxkind, d.stxdndistinct + FROM pg_statistic_ext s, pg_statistic_ext_data d + WHERE s.stxrelid = 'ndistinct'::regclass + AND d.stxoid = s.oid; + stxkind | stxdndistinct +---------+------------------------------------------------------------------- + {d,e} | {"-1, -2": 221, "-1, -3": 247, "-2, -3": 323, "-1, -2, -3": 1000} +(1 row) + +SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY (a+1), (b+100)'); + estimated | actual +-----------+-------- + 221 | 221 +(1 row) + +SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY (a+1), (b+100), (2*c)'); + estimated | actual +-----------+-------- + 1000 | 1000 +(1 row) + +SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY a, (a+1), (b+100)'); + estimated | actual +-----------+-------- + 221 | 221 +(1 row) + +DROP STATISTICS s10; +-- a mix of attributes and expressions +SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY a, b'); + estimated | actual +-----------+-------- + 100 | 221 +(1 row) + +SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY a, (2*c)'); + estimated | actual +-----------+-------- + 100 | 247 +(1 row) + +SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY a, b, (2*c)'); + estimated | actual +-----------+-------- + 100 | 1000 +(1 row) + +CREATE STATISTICS s10 (ndistinct) ON a, b, (2*c) FROM ndistinct; +ANALYZE ndistinct; +SELECT s.stxkind, d.stxdndistinct + FROM pg_statistic_ext s, pg_statistic_ext_data d + WHERE s.stxrelid = 'ndistinct'::regclass + AND d.stxoid = s.oid; + stxkind | stxdndistinct +---------+------------------------------------------------------------- + {d,e} | {"3, 4": 221, "3, -1": 247, "4, -1": 323, "3, 4, -1": 1000} +(1 row) + +SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY a, b'); + estimated | actual +-----------+-------- + 221 | 221 +(1 row) + +SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY a, (2*c)'); + estimated | actual +-----------+-------- + 247 | 247 +(1 row) + +SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY a, b, (2*c)'); + estimated | actual +-----------+-------- + 1000 | 1000 +(1 row) + +DROP STATISTICS s10; +-- combination of multiple ndistinct statistics, with/without expressions +TRUNCATE ndistinct; +-- two mostly independent groups of columns +INSERT INTO ndistinct (a, b, c, d) + SELECT mod(i,3), mod(i,9), mod(i,5), mod(i,20) + FROM generate_series(1,1000) s(i); +ANALYZE ndistinct; +SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY a, b'); + estimated | actual +-----------+-------- + 27 | 9 +(1 row) + +SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY a, (b+1)'); + estimated | actual +-----------+-------- + 27 | 9 +(1 row) + +SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY (a*5), b'); + estimated | actual +-----------+-------- + 27 | 9 +(1 row) + +SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY (a*5), (b+1)'); + estimated | actual +-----------+-------- + 27 | 9 +(1 row) + +SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY (a*5), (b+1), c'); + estimated | actual +-----------+-------- + 100 | 45 +(1 row) + +SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY a, b, (c*10)'); + estimated | actual +-----------+-------- + 100 | 45 +(1 row) + +SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY a, (b+1), c, (d - 1)'); + estimated | actual +-----------+-------- + 100 | 180 +(1 row) + +-- basic statistics on both attributes (no expressions) +CREATE STATISTICS s11 (ndistinct) ON a, b FROM ndistinct; +CREATE STATISTICS s12 (ndistinct) ON c, d FROM ndistinct; +ANALYZE ndistinct; +SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY a, b'); + estimated | actual +-----------+-------- + 9 | 9 +(1 row) + +SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY a, (b+1)'); + estimated | actual +-----------+-------- + 9 | 9 +(1 row) + +SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY (a*5), b'); + estimated | actual +-----------+-------- + 9 | 9 +(1 row) + +SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY (a*5), (b+1)'); + estimated | actual +-----------+-------- + 9 | 9 +(1 row) + +SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY (a*5), (b+1), c'); + estimated | actual +-----------+-------- + 45 | 45 +(1 row) + +SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY a, b, (c*10)'); + estimated | actual +-----------+-------- + 45 | 45 +(1 row) + +SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY a, (b+1), c, (d - 1)'); + estimated | actual +-----------+-------- + 100 | 180 +(1 row) + +-- replace the second statistics by statistics on expressions +DROP STATISTICS s12; +CREATE STATISTICS s12 (ndistinct) ON (c * 10), (d - 1) FROM ndistinct; +ANALYZE ndistinct; +SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY a, b'); + estimated | actual +-----------+-------- + 9 | 9 +(1 row) + +SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY a, (b+1)'); + estimated | actual +-----------+-------- + 9 | 9 +(1 row) + +SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY (a*5), b'); + estimated | actual +-----------+-------- + 9 | 9 +(1 row) + +SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY (a*5), (b+1)'); + estimated | actual +-----------+-------- + 9 | 9 +(1 row) + +SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY (a*5), (b+1), c'); + estimated | actual +-----------+-------- + 45 | 45 +(1 row) + +SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY a, b, (c*10)'); + estimated | actual +-----------+-------- + 45 | 45 +(1 row) + +SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY a, (b+1), c, (d - 1)'); + estimated | actual +-----------+-------- + 100 | 180 +(1 row) + +-- replace the second statistics by statistics on both attributes and expressions +DROP STATISTICS s12; +CREATE STATISTICS s12 (ndistinct) ON c, d, (c * 10), (d - 1) FROM ndistinct; +ANALYZE ndistinct; +SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY a, b'); + estimated | actual +-----------+-------- + 9 | 9 +(1 row) + +SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY a, (b+1)'); + estimated | actual +-----------+-------- + 9 | 9 +(1 row) + +SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY (a*5), b'); + estimated | actual +-----------+-------- + 9 | 9 +(1 row) + +SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY (a*5), (b+1)'); + estimated | actual +-----------+-------- + 9 | 9 +(1 row) + +SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY (a*5), (b+1), c'); + estimated | actual +-----------+-------- + 45 | 45 +(1 row) + +SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY a, b, (c*10)'); + estimated | actual +-----------+-------- + 45 | 45 +(1 row) + +SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY a, (b+1), c, (d - 1)'); + estimated | actual +-----------+-------- + 100 | 180 +(1 row) + +-- replace the other statistics by statistics on both attributes and expressions +DROP STATISTICS s11; +CREATE STATISTICS s11 (ndistinct) ON a, b, (a*5), (b+1) FROM ndistinct; +ANALYZE ndistinct; +SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY a, b'); + estimated | actual +-----------+-------- + 9 | 9 +(1 row) + +SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY a, (b+1)'); + estimated | actual +-----------+-------- + 9 | 9 +(1 row) + +SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY (a*5), b'); + estimated | actual +-----------+-------- + 9 | 9 +(1 row) + +SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY (a*5), (b+1)'); + estimated | actual +-----------+-------- + 9 | 9 +(1 row) + +SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY (a*5), (b+1), c'); + estimated | actual +-----------+-------- + 45 | 45 +(1 row) + +SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY a, b, (c*10)'); + estimated | actual +-----------+-------- + 45 | 45 +(1 row) + +SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY a, (b+1), c, (d - 1)'); + estimated | actual +-----------+-------- + 100 | 180 +(1 row) + +-- replace statistics by somewhat overlapping ones (this expected to get worse estimate +-- because the first statistics shall be applied to 3 columns, and the second one can't +-- be really applied) +DROP STATISTICS s11; +DROP STATISTICS s12; +CREATE STATISTICS s11 (ndistinct) ON a, b, (a*5), (b+1) FROM ndistinct; +CREATE STATISTICS s12 (ndistinct) ON a, (b+1), (c * 10) FROM ndistinct; +ANALYZE ndistinct; +SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY a, b'); + estimated | actual +-----------+-------- + 9 | 9 +(1 row) + +SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY a, (b+1)'); + estimated | actual +-----------+-------- + 9 | 9 +(1 row) + +SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY (a*5), b'); + estimated | actual +-----------+-------- + 9 | 9 +(1 row) + +SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY (a*5), (b+1)'); + estimated | actual +-----------+-------- + 9 | 9 +(1 row) + +SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY (a*5), (b+1), c'); + estimated | actual +-----------+-------- + 45 | 45 +(1 row) + +SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY a, b, (c*10)'); + estimated | actual +-----------+-------- + 100 | 45 +(1 row) + +SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY a, (b+1), c, (d - 1)'); + estimated | actual +-----------+-------- + 100 | 180 +(1 row) + +DROP STATISTICS s11; +DROP STATISTICS s12; +-- functional dependencies tests +CREATE TABLE functional_dependencies ( + filler1 TEXT, + filler2 NUMERIC, + a INT, + b TEXT, + filler3 DATE, + c INT, + d TEXT +) +WITH (autovacuum_enabled = off); +CREATE INDEX fdeps_ab_idx ON functional_dependencies (a, b); +CREATE INDEX fdeps_abc_idx ON functional_dependencies (a, b, c); +-- random data (no functional dependencies) +INSERT INTO functional_dependencies (a, b, c, filler1) + SELECT mod(i, 5), mod(i, 7), mod(i, 11), i FROM generate_series(1,1000) s(i); +ANALYZE functional_dependencies; +SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE a = 1 AND b = ''1'''); + estimated | actual +-----------+-------- + 29 | 29 +(1 row) + +SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE a = 1 AND b = ''1'' AND c = 1'); + estimated | actual +-----------+-------- + 3 | 3 +(1 row) + +-- create statistics +CREATE STATISTICS func_deps_stat (dependencies) ON a, b, c FROM functional_dependencies; +ANALYZE functional_dependencies; +SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE a = 1 AND b = ''1'''); + estimated | actual +-----------+-------- + 29 | 29 +(1 row) + +SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE a = 1 AND b = ''1'' AND c = 1'); + estimated | actual +-----------+-------- + 3 | 3 +(1 row) + +-- a => b, a => c, b => c +TRUNCATE functional_dependencies; +DROP STATISTICS func_deps_stat; +-- now do the same thing, but with expressions +INSERT INTO functional_dependencies (a, b, c, filler1) + SELECT i, i, i, i FROM generate_series(1,5000) s(i); +ANALYZE functional_dependencies; +SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE mod(a, 11) = 1 AND mod(b::int, 13) = 1'); + estimated | actual +-----------+-------- + 1 | 35 +(1 row) + +SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE mod(a, 11) = 1 AND mod(b::int, 13) = 1 AND mod(c, 7) = 1'); + estimated | actual +-----------+-------- + 1 | 5 +(1 row) + +-- create statistics +CREATE STATISTICS func_deps_stat (dependencies) ON (mod(a,11)), (mod(b::int, 13)), (mod(c, 7)) FROM functional_dependencies; +ANALYZE functional_dependencies; +SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE mod(a, 11) = 1 AND mod(b::int, 13) = 1'); + estimated | actual +-----------+-------- + 35 | 35 +(1 row) + +SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE mod(a, 11) = 1 AND mod(b::int, 13) = 1 AND mod(c, 7) = 1'); + estimated | actual +-----------+-------- + 5 | 5 +(1 row) + +-- a => b, a => c, b => c +TRUNCATE functional_dependencies; +DROP STATISTICS func_deps_stat; +INSERT INTO functional_dependencies (a, b, c, filler1) + SELECT mod(i,100), mod(i,50), mod(i,25), i FROM generate_series(1,5000) s(i); +ANALYZE functional_dependencies; +SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE a = 1 AND b = ''1'''); + estimated | actual +-----------+-------- + 1 | 50 +(1 row) + +SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE a = 1 AND b = ''1'' AND c = 1'); + estimated | actual +-----------+-------- + 1 | 50 +(1 row) + +-- IN +SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE a IN (1, 51) AND b = ''1'''); + estimated | actual +-----------+-------- + 2 | 100 +(1 row) + +SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE a IN (1, 51) AND b IN (''1'', ''2'')'); + estimated | actual +-----------+-------- + 4 | 100 +(1 row) + +SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE a IN (1, 2, 51, 52) AND b IN (''1'', ''2'')'); + estimated | actual +-----------+-------- + 8 | 200 +(1 row) + +SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE a IN (1, 2, 51, 52) AND b = ''1'''); + estimated | actual +-----------+-------- + 4 | 100 +(1 row) + +SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE a IN (1, 26, 51, 76) AND b IN (''1'', ''26'') AND c = 1'); + estimated | actual +-----------+-------- + 1 | 200 +(1 row) + +SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE a IN (1, 26, 51, 76) AND b IN (''1'', ''26'') AND c IN (1)'); + estimated | actual +-----------+-------- + 1 | 200 +(1 row) + +SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE a IN (1, 2, 26, 27, 51, 52, 76, 77) AND b IN (''1'', ''2'', ''26'', ''27'') AND c IN (1, 2)'); + estimated | actual +-----------+-------- + 3 | 400 +(1 row) + +-- OR clauses referencing the same attribute +SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE (a = 1 OR a = 51) AND b = ''1'''); + estimated | actual +-----------+-------- + 2 | 100 +(1 row) + +SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE (a = 1 OR a = 51) AND (b = ''1'' OR b = ''2'')'); + estimated | actual +-----------+-------- + 4 | 100 +(1 row) + +SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE (a = 1 OR a = 2 OR a = 51 OR a = 52) AND (b = ''1'' OR b = ''2'')'); + estimated | actual +-----------+-------- + 8 | 200 +(1 row) + +-- OR clauses referencing different attributes +SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE (a = 1 OR b = ''1'') AND b = ''1'''); + estimated | actual +-----------+-------- + 3 | 100 +(1 row) + +-- ANY +SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE a = ANY (ARRAY[1, 51]) AND b = ''1'''); + estimated | actual +-----------+-------- + 2 | 100 +(1 row) + +SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE a = ANY (ARRAY[1, 51]) AND b = ANY (ARRAY[''1'', ''2''])'); + estimated | actual +-----------+-------- + 4 | 100 +(1 row) + +SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE a = ANY (ARRAY[1, 2, 51, 52]) AND b = ANY (ARRAY[''1'', ''2''])'); + estimated | actual +-----------+-------- + 8 | 200 +(1 row) + +SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE a = ANY (ARRAY[1, 26, 51, 76]) AND b = ANY (ARRAY[''1'', ''26'']) AND c = 1'); + estimated | actual +-----------+-------- + 1 | 200 +(1 row) + +SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE a = ANY (ARRAY[1, 26, 51, 76]) AND b = ANY (ARRAY[''1'', ''26'']) AND c = ANY (ARRAY[1])'); + estimated | actual +-----------+-------- + 1 | 200 +(1 row) + +SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE a = ANY (ARRAY[1, 2, 26, 27, 51, 52, 76, 77]) AND b = ANY (ARRAY[''1'', ''2'', ''26'', ''27'']) AND c = ANY (ARRAY[1, 2])'); + estimated | actual +-----------+-------- + 3 | 400 +(1 row) + +-- ANY with inequalities should not benefit from functional dependencies +SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE a < ANY (ARRAY[1, 51]) AND b > ''1'''); + estimated | actual +-----------+-------- + 2472 | 2400 +(1 row) + +SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE a >= ANY (ARRAY[1, 51]) AND b <= ANY (ARRAY[''1'', ''2''])'); + estimated | actual +-----------+-------- + 1441 | 1250 +(1 row) + +SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE a <= ANY (ARRAY[1, 2, 51, 52]) AND b >= ANY (ARRAY[''1'', ''2''])'); + estimated | actual +-----------+-------- + 3909 | 2550 +(1 row) + +-- ALL (should not benefit from functional dependencies) +SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE a IN (1, 51) AND b = ALL (ARRAY[''1''])'); + estimated | actual +-----------+-------- + 2 | 100 +(1 row) + +SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE a IN (1, 51) AND b = ALL (ARRAY[''1'', ''2''])'); + estimated | actual +-----------+-------- + 1 | 0 +(1 row) + +SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE a IN (1, 2, 51, 52) AND b = ALL (ARRAY[''1'', ''2''])'); + estimated | actual +-----------+-------- + 1 | 0 +(1 row) + +-- create statistics +CREATE STATISTICS func_deps_stat (dependencies) ON a, b, c FROM functional_dependencies; +ANALYZE functional_dependencies; +-- print the detected dependencies +SELECT dependencies FROM pg_stats_ext WHERE statistics_name = 'func_deps_stat'; + dependencies +------------------------------------------------------------------------------------------------------------ + {"3 => 4": 1.000000, "3 => 6": 1.000000, "4 => 6": 1.000000, "3, 4 => 6": 1.000000, "3, 6 => 4": 1.000000} +(1 row) + +SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE a = 1 AND b = ''1'''); + estimated | actual +-----------+-------- + 50 | 50 +(1 row) + +SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE a = 1 AND b = ''1'' AND c = 1'); + estimated | actual +-----------+-------- + 50 | 50 +(1 row) + +-- IN +SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE a IN (1, 51) AND b = ''1'''); + estimated | actual +-----------+-------- + 100 | 100 +(1 row) + +SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE a IN (1, 51) AND b IN (''1'', ''2'')'); + estimated | actual +-----------+-------- + 100 | 100 +(1 row) + +SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE a IN (1, 2, 51, 52) AND b IN (''1'', ''2'')'); + estimated | actual +-----------+-------- + 200 | 200 +(1 row) + +SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE a IN (1, 2, 51, 52) AND b = ''1'''); + estimated | actual +-----------+-------- + 100 | 100 +(1 row) + +SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE a IN (1, 26, 51, 76) AND b IN (''1'', ''26'') AND c = 1'); + estimated | actual +-----------+-------- + 200 | 200 +(1 row) + +SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE a IN (1, 26, 51, 76) AND b IN (''1'', ''26'') AND c IN (1)'); + estimated | actual +-----------+-------- + 200 | 200 +(1 row) + +SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE a IN (1, 2, 26, 27, 51, 52, 76, 77) AND b IN (''1'', ''2'', ''26'', ''27'') AND c IN (1, 2)'); + estimated | actual +-----------+-------- + 400 | 400 +(1 row) + +-- OR clauses referencing the same attribute +SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE (a = 1 OR a = 51) AND b = ''1'''); + estimated | actual +-----------+-------- + 99 | 100 +(1 row) + +SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE (a = 1 OR a = 51) AND (b = ''1'' OR b = ''2'')'); + estimated | actual +-----------+-------- + 99 | 100 +(1 row) + +SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE (a = 1 OR a = 2 OR a = 51 OR a = 52) AND (b = ''1'' OR b = ''2'')'); + estimated | actual +-----------+-------- + 197 | 200 +(1 row) + +-- OR clauses referencing different attributes are incompatible +SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE (a = 1 OR b = ''1'') AND b = ''1'''); + estimated | actual +-----------+-------- + 3 | 100 +(1 row) + +-- ANY +SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE a = ANY (ARRAY[1, 51]) AND b = ''1'''); + estimated | actual +-----------+-------- + 100 | 100 +(1 row) + +SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE a = ANY (ARRAY[1, 51]) AND b = ANY (ARRAY[''1'', ''2''])'); + estimated | actual +-----------+-------- + 100 | 100 +(1 row) + +SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE a = ANY (ARRAY[1, 2, 51, 52]) AND b = ANY (ARRAY[''1'', ''2''])'); + estimated | actual +-----------+-------- + 200 | 200 +(1 row) + +SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE a = ANY (ARRAY[1, 26, 51, 76]) AND b = ANY (ARRAY[''1'', ''26'']) AND c = 1'); + estimated | actual +-----------+-------- + 200 | 200 +(1 row) + +SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE a = ANY (ARRAY[1, 26, 51, 76]) AND b = ANY (ARRAY[''1'', ''26'']) AND c = ANY (ARRAY[1])'); + estimated | actual +-----------+-------- + 200 | 200 +(1 row) + +SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE a = ANY (ARRAY[1, 2, 26, 27, 51, 52, 76, 77]) AND b = ANY (ARRAY[''1'', ''2'', ''26'', ''27'']) AND c = ANY (ARRAY[1, 2])'); + estimated | actual +-----------+-------- + 400 | 400 +(1 row) + +-- ANY with inequalities should not benefit from functional dependencies +SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE a < ANY (ARRAY[1, 51]) AND b > ''1'''); + estimated | actual +-----------+-------- + 2472 | 2400 +(1 row) + +SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE a >= ANY (ARRAY[1, 51]) AND b <= ANY (ARRAY[''1'', ''2''])'); + estimated | actual +-----------+-------- + 1441 | 1250 +(1 row) + +SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE a <= ANY (ARRAY[1, 2, 51, 52]) AND b >= ANY (ARRAY[''1'', ''2''])'); + estimated | actual +-----------+-------- + 3909 | 2550 +(1 row) + +-- ALL (should not benefit from functional dependencies) +SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE a IN (1, 51) AND b = ALL (ARRAY[''1''])'); + estimated | actual +-----------+-------- + 2 | 100 +(1 row) + +SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE a IN (1, 51) AND b = ALL (ARRAY[''1'', ''2''])'); + estimated | actual +-----------+-------- + 1 | 0 +(1 row) + +SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE a IN (1, 2, 51, 52) AND b = ALL (ARRAY[''1'', ''2''])'); + estimated | actual +-----------+-------- + 1 | 0 +(1 row) + +-- changing the type of column c causes all its stats to be dropped, reverting +-- to default estimates without any statistics, i.e. 0.5% selectivity for each +-- condition +ALTER TABLE functional_dependencies ALTER COLUMN c TYPE numeric; +SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE a = 1 AND b = ''1'' AND c = 1'); + estimated | actual +-----------+-------- + 1 | 50 +(1 row) + +ANALYZE functional_dependencies; +SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE a = 1 AND b = ''1'' AND c = 1'); + estimated | actual +-----------+-------- + 50 | 50 +(1 row) + +DROP STATISTICS func_deps_stat; +-- now try functional dependencies with expressions +SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE (a * 2) = 2 AND upper(b) = ''1'''); + estimated | actual +-----------+-------- + 1 | 50 +(1 row) + +SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE (a * 2) = 2 AND upper(b) = ''1'' AND (c + 1) = 2'); + estimated | actual +-----------+-------- + 1 | 50 +(1 row) + +-- IN +SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE (a * 2) IN (2, 102) AND upper(b) = ''1'''); + estimated | actual +-----------+-------- + 1 | 100 +(1 row) + +SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE (a * 2) IN (2, 102) AND upper(b) IN (''1'', ''2'')'); + estimated | actual +-----------+-------- + 1 | 100 +(1 row) + +SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE (a * 2) IN (2, 4, 102, 104) AND upper(b) IN (''1'', ''2'')'); + estimated | actual +-----------+-------- + 1 | 200 +(1 row) + +SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE (a * 2) IN (2, 4, 102, 104) AND upper(b) = ''1'''); + estimated | actual +-----------+-------- + 1 | 100 +(1 row) + +SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE (a * 2) IN (2, 52, 102, 152) AND upper(b) IN (''1'', ''26'') AND (c + 1) = 2'); + estimated | actual +-----------+-------- + 1 | 200 +(1 row) + +SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE (a * 2) IN (2, 52, 102, 152) AND upper(b) IN (''1'', ''26'') AND (c + 1) IN (2)'); + estimated | actual +-----------+-------- + 1 | 200 +(1 row) + +SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE (a * 2) IN (2, 4, 52, 54, 102, 104, 152, 154) AND upper(b) IN (''1'', ''2'', ''26'', ''27'') AND (c + 1) IN (2, 3)'); + estimated | actual +-----------+-------- + 1 | 400 +(1 row) + +-- OR clauses referencing the same attribute +SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE ((a * 2) = 2 OR (a * 2) = 102) AND upper(b) = ''1'''); + estimated | actual +-----------+-------- + 1 | 100 +(1 row) + +SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE ((a * 2) = 2 OR (a * 2) = 102) AND (upper(b) = ''1'' OR upper(b) = ''2'')'); + estimated | actual +-----------+-------- + 1 | 100 +(1 row) + +SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE ((a * 2) = 2 OR (a * 2) = 4 OR (a * 2) = 102 OR (a * 2) = 104) AND (upper(b) = ''1'' OR upper(b) = ''2'')'); + estimated | actual +-----------+-------- + 1 | 200 +(1 row) + +-- OR clauses referencing different attributes +SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE ((a * 2) = 2 OR upper(b) = ''1'') AND upper(b) = ''1'''); + estimated | actual +-----------+-------- + 1 | 100 +(1 row) + +-- ANY +SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE (a * 2) = ANY (ARRAY[2, 102]) AND upper(b) = ''1'''); + estimated | actual +-----------+-------- + 1 | 100 +(1 row) + +SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE (a * 2) = ANY (ARRAY[2, 102]) AND upper(b) = ANY (ARRAY[''1'', ''2''])'); + estimated | actual +-----------+-------- + 1 | 100 +(1 row) + +SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE (a * 2) = ANY (ARRAY[2, 4, 102, 104]) AND upper(b) = ANY (ARRAY[''1'', ''2''])'); + estimated | actual +-----------+-------- + 1 | 200 +(1 row) + +SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE (a * 2) = ANY (ARRAY[2, 52, 102, 152]) AND upper(b) = ANY (ARRAY[''1'', ''26'']) AND (c + 1) = 2'); + estimated | actual +-----------+-------- + 1 | 200 +(1 row) + +SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE (a * 2) = ANY (ARRAY[2, 52, 102, 152]) AND upper(b) = ANY (ARRAY[''1'', ''26'']) AND (c + 1) = ANY (ARRAY[2])'); + estimated | actual +-----------+-------- + 1 | 200 +(1 row) + +SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE (a * 2) = ANY (ARRAY[2, 4, 52, 54, 102, 104, 152, 154]) AND upper(b) = ANY (ARRAY[''1'', ''2'', ''26'', ''27'']) AND (c + 1) = ANY (ARRAY[2, 3])'); + estimated | actual +-----------+-------- + 1 | 400 +(1 row) + +-- ANY with inequalities should not benefit from functional dependencies +-- the estimates however improve thanks to having expression statistics +SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE (a * 2) < ANY (ARRAY[2, 102]) AND upper(b) > ''1'''); + estimated | actual +-----------+-------- + 926 | 2400 +(1 row) + +SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE (a * 2) >= ANY (ARRAY[2, 102]) AND upper(b) <= ANY (ARRAY[''1'', ''2''])'); + estimated | actual +-----------+-------- + 1543 | 1250 +(1 row) + +SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE (a * 2) <= ANY (ARRAY[2, 4, 102, 104]) AND upper(b) >= ANY (ARRAY[''1'', ''2''])'); + estimated | actual +-----------+-------- + 2229 | 2550 +(1 row) + +-- ALL (should not benefit from functional dependencies) +SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE (a * 2) IN (2, 102) AND upper(b) = ALL (ARRAY[''1''])'); + estimated | actual +-----------+-------- + 1 | 100 +(1 row) + +SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE (a * 2) IN (2, 102) AND upper(b) = ALL (ARRAY[''1'', ''2''])'); + estimated | actual +-----------+-------- + 1 | 0 +(1 row) + +SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE (a * 2) IN (2, 4, 102, 104) AND upper(b) = ALL (ARRAY[''1'', ''2''])'); + estimated | actual +-----------+-------- + 1 | 0 +(1 row) + +-- create statistics on expressions +CREATE STATISTICS func_deps_stat (dependencies) ON (a * 2), upper(b), (c + 1) FROM functional_dependencies; +ANALYZE functional_dependencies; +-- print the detected dependencies +SELECT dependencies FROM pg_stats_ext WHERE statistics_name = 'func_deps_stat'; + dependencies +------------------------------------------------------------------------------------------------------------------------ + {"-1 => -2": 1.000000, "-1 => -3": 1.000000, "-2 => -3": 1.000000, "-1, -2 => -3": 1.000000, "-1, -3 => -2": 1.000000} +(1 row) + +SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE (a * 2) = 2 AND upper(b) = ''1'''); + estimated | actual +-----------+-------- + 50 | 50 +(1 row) + +SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE (a * 2) = 2 AND upper(b) = ''1'' AND (c + 1) = 2'); + estimated | actual +-----------+-------- + 50 | 50 +(1 row) + +-- IN +SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE (a * 2) IN (2, 102) AND upper(b) = ''1'''); + estimated | actual +-----------+-------- + 100 | 100 +(1 row) + +SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE (a * 2) IN (2, 102) AND upper(b) IN (''1'', ''2'')'); + estimated | actual +-----------+-------- + 100 | 100 +(1 row) + +SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE (a * 2) IN (2, 4, 102, 104) AND upper(b) IN (''1'', ''2'')'); + estimated | actual +-----------+-------- + 200 | 200 +(1 row) + +SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE (a * 2) IN (2, 4, 102, 104) AND upper(b) = ''1'''); + estimated | actual +-----------+-------- + 100 | 100 +(1 row) + +SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE (a * 2) IN (2, 52, 102, 152) AND upper(b) IN (''1'', ''26'') AND (c + 1) = 2'); + estimated | actual +-----------+-------- + 200 | 200 +(1 row) + +SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE (a * 2) IN (2, 52, 102, 152) AND upper(b) IN (''1'', ''26'') AND (c + 1) IN (2)'); + estimated | actual +-----------+-------- + 200 | 200 +(1 row) + +SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE (a * 2) IN (2, 4, 52, 54, 102, 104, 152, 154) AND upper(b) IN (''1'', ''2'', ''26'', ''27'') AND (c + 1) IN (2, 3)'); + estimated | actual +-----------+-------- + 400 | 400 +(1 row) + +-- OR clauses referencing the same attribute +SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE ((a * 2) = 2 OR (a * 2) = 102) AND upper(b) = ''1'''); + estimated | actual +-----------+-------- + 99 | 100 +(1 row) + +SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE ((a * 2) = 2 OR (a * 2) = 102) AND (upper(b) = ''1'' OR upper(b) = ''2'')'); + estimated | actual +-----------+-------- + 99 | 100 +(1 row) + +SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE ((a * 2) = 2 OR (a * 2) = 4 OR (a * 2) = 102 OR (a * 2) = 104) AND (upper(b) = ''1'' OR upper(b) = ''2'')'); + estimated | actual +-----------+-------- + 197 | 200 +(1 row) + +-- OR clauses referencing different attributes +SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE ((a * 2) = 2 OR upper(b) = ''1'') AND upper(b) = ''1'''); + estimated | actual +-----------+-------- + 3 | 100 +(1 row) + +-- ANY +SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE (a * 2) = ANY (ARRAY[2, 102]) AND upper(b) = ''1'''); + estimated | actual +-----------+-------- + 100 | 100 +(1 row) + +SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE (a * 2) = ANY (ARRAY[2, 102]) AND upper(b) = ANY (ARRAY[''1'', ''2''])'); + estimated | actual +-----------+-------- + 100 | 100 +(1 row) + +SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE (a * 2) = ANY (ARRAY[2, 4, 102, 104]) AND upper(b) = ANY (ARRAY[''1'', ''2''])'); + estimated | actual +-----------+-------- + 200 | 200 +(1 row) + +SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE (a * 2) = ANY (ARRAY[2, 52, 102, 152]) AND upper(b) = ANY (ARRAY[''1'', ''26'']) AND (c + 1) = 2'); + estimated | actual +-----------+-------- + 200 | 200 +(1 row) + +SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE (a * 2) = ANY (ARRAY[2, 52, 102, 152]) AND upper(b) = ANY (ARRAY[''1'', ''26'']) AND (c + 1) = ANY (ARRAY[2])'); + estimated | actual +-----------+-------- + 200 | 200 +(1 row) + +SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE (a * 2) = ANY (ARRAY[2, 4, 52, 54, 102, 104, 152, 154]) AND upper(b) = ANY (ARRAY[''1'', ''2'', ''26'', ''27'']) AND (c + 1) = ANY (ARRAY[2, 3])'); + estimated | actual +-----------+-------- + 400 | 400 +(1 row) + +-- ANY with inequalities should not benefit from functional dependencies +-- the estimates however improve thanks to having expression statistics +SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE (a * 2) < ANY (ARRAY[2, 102]) AND upper(b) > ''1'''); + estimated | actual +-----------+-------- + 2472 | 2400 +(1 row) + +SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE (a * 2) >= ANY (ARRAY[2, 102]) AND upper(b) <= ANY (ARRAY[''1'', ''2''])'); + estimated | actual +-----------+-------- + 1441 | 1250 +(1 row) + +SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE (a * 2) <= ANY (ARRAY[2, 4, 102, 104]) AND upper(b) >= ANY (ARRAY[''1'', ''2''])'); + estimated | actual +-----------+-------- + 3909 | 2550 +(1 row) + +-- ALL (should not benefit from functional dependencies) +SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE (a * 2) IN (2, 102) AND upper(b) = ALL (ARRAY[''1''])'); + estimated | actual +-----------+-------- + 2 | 100 +(1 row) + +SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE (a * 2) IN (2, 102) AND upper(b) = ALL (ARRAY[''1'', ''2''])'); + estimated | actual +-----------+-------- + 1 | 0 +(1 row) + +SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE (a * 2) IN (2, 4, 102, 104) AND upper(b) = ALL (ARRAY[''1'', ''2''])'); + estimated | actual +-----------+-------- + 1 | 0 +(1 row) + +-- check the ability to use multiple functional dependencies +CREATE TABLE functional_dependencies_multi ( + a INTEGER, + b INTEGER, + c INTEGER, + d INTEGER +) +WITH (autovacuum_enabled = off); +INSERT INTO functional_dependencies_multi (a, b, c, d) + SELECT + mod(i,7), + mod(i,7), + mod(i,11), + mod(i,11) + FROM generate_series(1,5000) s(i); +ANALYZE functional_dependencies_multi; +-- estimates without any functional dependencies +SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies_multi WHERE a = 0 AND b = 0'); + estimated | actual +-----------+-------- + 102 | 714 +(1 row) + +SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies_multi WHERE 0 = a AND 0 = b'); + estimated | actual +-----------+-------- + 102 | 714 +(1 row) + +SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies_multi WHERE c = 0 AND d = 0'); + estimated | actual +-----------+-------- + 41 | 454 +(1 row) + +SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies_multi WHERE a = 0 AND b = 0 AND c = 0 AND d = 0'); + estimated | actual +-----------+-------- + 1 | 64 +(1 row) + +SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies_multi WHERE 0 = a AND b = 0 AND 0 = c AND d = 0'); + estimated | actual +-----------+-------- + 1 | 64 +(1 row) + +-- create separate functional dependencies +CREATE STATISTICS functional_dependencies_multi_1 (dependencies) ON a, b FROM functional_dependencies_multi; +CREATE STATISTICS functional_dependencies_multi_2 (dependencies) ON c, d FROM functional_dependencies_multi; +ANALYZE functional_dependencies_multi; +SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies_multi WHERE a = 0 AND b = 0'); + estimated | actual +-----------+-------- + 714 | 714 +(1 row) + +SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies_multi WHERE 0 = a AND 0 = b'); + estimated | actual +-----------+-------- + 714 | 714 +(1 row) + +SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies_multi WHERE c = 0 AND d = 0'); + estimated | actual +-----------+-------- + 454 | 454 +(1 row) + +SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies_multi WHERE a = 0 AND b = 0 AND c = 0 AND d = 0'); + estimated | actual +-----------+-------- + 65 | 64 +(1 row) + +SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies_multi WHERE 0 = a AND b = 0 AND 0 = c AND d = 0'); + estimated | actual +-----------+-------- + 65 | 64 +(1 row) + +DROP TABLE functional_dependencies_multi; +-- MCV lists +CREATE TABLE mcv_lists ( + filler1 TEXT, + filler2 NUMERIC, + a INT, + b VARCHAR, + filler3 DATE, + c INT, + d TEXT, + ia INT[] +) +WITH (autovacuum_enabled = off); +-- random data (no MCV list) +INSERT INTO mcv_lists (a, b, c, filler1) + SELECT mod(i,37), mod(i,41), mod(i,43), mod(i,47) FROM generate_series(1,5000) s(i); +ANALYZE mcv_lists; +SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE a = 1 AND b = ''1'''); + estimated | actual +-----------+-------- + 3 | 4 +(1 row) + +SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE a = 1 AND b = ''1'' AND c = 1'); + estimated | actual +-----------+-------- + 1 | 1 +(1 row) + +-- create statistics +CREATE STATISTICS mcv_lists_stats (mcv) ON a, b, c FROM mcv_lists; +ANALYZE mcv_lists; +SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE a = 1 AND b = ''1'''); + estimated | actual +-----------+-------- + 3 | 4 +(1 row) + +SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE a = 1 AND b = ''1'' AND c = 1'); + estimated | actual +-----------+-------- + 1 | 1 +(1 row) + +TRUNCATE mcv_lists; +DROP STATISTICS mcv_lists_stats; +-- random data (no MCV list), but with expression +INSERT INTO mcv_lists (a, b, c, filler1) + SELECT i, i, i, i FROM generate_series(1,1000) s(i); +ANALYZE mcv_lists; +SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE mod(a,7) = 1 AND mod(b::int,11) = 1'); + estimated | actual +-----------+-------- + 1 | 13 +(1 row) + +SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE mod(a,7) = 1 AND mod(b::int,11) = 1 AND mod(c,13) = 1'); + estimated | actual +-----------+-------- + 1 | 1 +(1 row) + +-- create statistics +CREATE STATISTICS mcv_lists_stats (mcv) ON (mod(a,7)), (mod(b::int,11)), (mod(c,13)) FROM mcv_lists; +ANALYZE mcv_lists; +SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE mod(a,7) = 1 AND mod(b::int,11) = 1'); + estimated | actual +-----------+-------- + 13 | 13 +(1 row) + +SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE mod(a,7) = 1 AND mod(b::int,11) = 1 AND mod(c,13) = 1'); + estimated | actual +-----------+-------- + 1 | 1 +(1 row) + +-- 100 distinct combinations, all in the MCV list +TRUNCATE mcv_lists; +DROP STATISTICS mcv_lists_stats; +INSERT INTO mcv_lists (a, b, c, ia, filler1) + SELECT mod(i,100), mod(i,50), mod(i,25), array[mod(i,25)], i + FROM generate_series(1,5000) s(i); +ANALYZE mcv_lists; +SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE a = 1 AND b = ''1'''); + estimated | actual +-----------+-------- + 1 | 50 +(1 row) + +SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE 1 = a AND ''1'' = b'); + estimated | actual +-----------+-------- + 1 | 50 +(1 row) + +SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE a < 1 AND b < ''1'''); + estimated | actual +-----------+-------- + 1 | 50 +(1 row) + +SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE 1 > a AND ''1'' > b'); + estimated | actual +-----------+-------- + 1 | 50 +(1 row) + +SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE a <= 0 AND b <= ''0'''); + estimated | actual +-----------+-------- + 1 | 50 +(1 row) + +SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE 0 >= a AND ''0'' >= b'); + estimated | actual +-----------+-------- + 1 | 50 +(1 row) + +SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE a = 1 AND b = ''1'' AND c = 1'); + estimated | actual +-----------+-------- + 1 | 50 +(1 row) + +SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE a < 5 AND b < ''1'' AND c < 5'); + estimated | actual +-----------+-------- + 1 | 50 +(1 row) + +SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE a < 5 AND ''1'' > b AND 5 > c'); + estimated | actual +-----------+-------- + 1 | 50 +(1 row) + +SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE a <= 4 AND b <= ''0'' AND c <= 4'); + estimated | actual +-----------+-------- + 1 | 50 +(1 row) + +SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE 4 >= a AND ''0'' >= b AND 4 >= c'); + estimated | actual +-----------+-------- + 1 | 50 +(1 row) + +SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE a = 1 OR b = ''1'' OR c = 1'); + estimated | actual +-----------+-------- + 343 | 200 +(1 row) + +SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE a = 1 OR b = ''1'' OR c = 1 OR d IS NOT NULL'); + estimated | actual +-----------+-------- + 343 | 200 +(1 row) + +SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE a IN (1, 2, 51, 52) AND b IN ( ''1'', ''2'')'); + estimated | actual +-----------+-------- + 8 | 200 +(1 row) + +SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE a IN (1, 2, 51, 52, NULL) AND b IN ( ''1'', ''2'', NULL)'); + estimated | actual +-----------+-------- + 8 | 200 +(1 row) + +SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE a = ANY (ARRAY[1, 2, 51, 52]) AND b = ANY (ARRAY[''1'', ''2''])'); + estimated | actual +-----------+-------- + 8 | 200 +(1 row) + +SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE a = ANY (ARRAY[NULL, 1, 2, 51, 52]) AND b = ANY (ARRAY[''1'', ''2'', NULL])'); + estimated | actual +-----------+-------- + 8 | 200 +(1 row) + +SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE a <= ANY (ARRAY[1, 2, 3]) AND b IN (''1'', ''2'', ''3'')'); + estimated | actual +-----------+-------- + 26 | 150 +(1 row) + +SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE a <= ANY (ARRAY[1, NULL, 2, 3]) AND b IN (''1'', ''2'', NULL, ''3'')'); + estimated | actual +-----------+-------- + 26 | 150 +(1 row) + +SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE a < ALL (ARRAY[4, 5]) AND c > ANY (ARRAY[1, 2, 3])'); + estimated | actual +-----------+-------- + 10 | 100 +(1 row) + +SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE a < ALL (ARRAY[4, 5]) AND c > ANY (ARRAY[1, 2, 3, NULL])'); + estimated | actual +-----------+-------- + 10 | 100 +(1 row) + +SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE a < ALL (ARRAY[4, 5]) AND b IN (''1'', ''2'', ''3'') AND c > ANY (ARRAY[1, 2, 3])'); + estimated | actual +-----------+-------- + 1 | 100 +(1 row) + +SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE a < ALL (ARRAY[4, 5]) AND b IN (''1'', ''2'', NULL, ''3'') AND c > ANY (ARRAY[1, 2, NULL, 3])'); + estimated | actual +-----------+-------- + 1 | 100 +(1 row) + +SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE a = ANY (ARRAY[4,5]) AND 4 = ANY(ia)'); + estimated | actual +-----------+-------- + 4 | 50 +(1 row) + +-- create statistics +CREATE STATISTICS mcv_lists_stats (mcv) ON a, b, c, ia FROM mcv_lists; +ANALYZE mcv_lists; +SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE a = 1 AND b = ''1'''); + estimated | actual +-----------+-------- + 50 | 50 +(1 row) + +SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE 1 = a AND ''1'' = b'); + estimated | actual +-----------+-------- + 50 | 50 +(1 row) + +SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE a < 1 AND b < ''1'''); + estimated | actual +-----------+-------- + 50 | 50 +(1 row) + +SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE 1 > a AND ''1'' > b'); + estimated | actual +-----------+-------- + 50 | 50 +(1 row) + +SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE a <= 0 AND b <= ''0'''); + estimated | actual +-----------+-------- + 50 | 50 +(1 row) + +SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE 0 >= a AND ''0'' >= b'); + estimated | actual +-----------+-------- + 50 | 50 +(1 row) + +SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE a = 1 AND b = ''1'' AND c = 1'); + estimated | actual +-----------+-------- + 50 | 50 +(1 row) + +SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE a < 5 AND b < ''1'' AND c < 5'); + estimated | actual +-----------+-------- + 50 | 50 +(1 row) + +SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE a < 5 AND ''1'' > b AND 5 > c'); + estimated | actual +-----------+-------- + 50 | 50 +(1 row) + +SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE a <= 4 AND b <= ''0'' AND c <= 4'); + estimated | actual +-----------+-------- + 50 | 50 +(1 row) + +SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE 4 >= a AND ''0'' >= b AND 4 >= c'); + estimated | actual +-----------+-------- + 50 | 50 +(1 row) + +SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE a = 1 OR b = ''1'' OR c = 1'); + estimated | actual +-----------+-------- + 200 | 200 +(1 row) + +SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE a = 1 OR b = ''1'' OR c = 1 OR d IS NOT NULL'); + estimated | actual +-----------+-------- + 200 | 200 +(1 row) + +SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE a = 1 OR b = ''1'' OR c = 1 OR d IS NOT NULL'); + estimated | actual +-----------+-------- + 200 | 200 +(1 row) + +SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE a IN (1, 2, 51, 52) AND b IN ( ''1'', ''2'')'); + estimated | actual +-----------+-------- + 200 | 200 +(1 row) + +SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE a IN (1, 2, 51, 52, NULL) AND b IN ( ''1'', ''2'', NULL)'); + estimated | actual +-----------+-------- + 200 | 200 +(1 row) + +SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE a = ANY (ARRAY[1, 2, 51, 52]) AND b = ANY (ARRAY[''1'', ''2''])'); + estimated | actual +-----------+-------- + 200 | 200 +(1 row) + +SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE a = ANY (ARRAY[NULL, 1, 2, 51, 52]) AND b = ANY (ARRAY[''1'', ''2'', NULL])'); + estimated | actual +-----------+-------- + 200 | 200 +(1 row) + +SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE a <= ANY (ARRAY[1, 2, 3]) AND b IN (''1'', ''2'', ''3'')'); + estimated | actual +-----------+-------- + 150 | 150 +(1 row) + +SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE a <= ANY (ARRAY[1, NULL, 2, 3]) AND b IN (''1'', ''2'', NULL, ''3'')'); + estimated | actual +-----------+-------- + 150 | 150 +(1 row) + +SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE a < ALL (ARRAY[4, 5]) AND c > ANY (ARRAY[1, 2, 3])'); + estimated | actual +-----------+-------- + 100 | 100 +(1 row) + +SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE a < ALL (ARRAY[4, 5]) AND c > ANY (ARRAY[1, 2, 3, NULL])'); + estimated | actual +-----------+-------- + 100 | 100 +(1 row) + +SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE a < ALL (ARRAY[4, 5]) AND b IN (''1'', ''2'', ''3'') AND c > ANY (ARRAY[1, 2, 3])'); + estimated | actual +-----------+-------- + 100 | 100 +(1 row) + +SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE a < ALL (ARRAY[4, 5]) AND b IN (''1'', ''2'', NULL, ''3'') AND c > ANY (ARRAY[1, 2, NULL, 3])'); + estimated | actual +-----------+-------- + 100 | 100 +(1 row) + +SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE a = ANY (ARRAY[4,5]) AND 4 = ANY(ia)'); + estimated | actual +-----------+-------- + 4 | 50 +(1 row) + +-- check change of unrelated column type does not reset the MCV statistics +ALTER TABLE mcv_lists ALTER COLUMN d TYPE VARCHAR(64); +SELECT d.stxdmcv IS NOT NULL + FROM pg_statistic_ext s, pg_statistic_ext_data d + WHERE s.stxname = 'mcv_lists_stats' + AND d.stxoid = s.oid; + ?column? +---------- + t +(1 row) + +-- check change of column type resets the MCV statistics +ALTER TABLE mcv_lists ALTER COLUMN c TYPE numeric; +SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE a = 1 AND b = ''1'''); + estimated | actual +-----------+-------- + 1 | 50 +(1 row) + +ANALYZE mcv_lists; +SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE a = 1 AND b = ''1'''); + estimated | actual +-----------+-------- + 50 | 50 +(1 row) + +-- 100 distinct combinations, all in the MCV list, but with expressions +TRUNCATE mcv_lists; +DROP STATISTICS mcv_lists_stats; +INSERT INTO mcv_lists (a, b, c, filler1) + SELECT i, i, i, i FROM generate_series(1,1000) s(i); +ANALYZE mcv_lists; +-- without any stats on the expressions, we have to use default selectivities, which +-- is why the estimates here are different from the pre-computed case above +SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE mod(a,20) = 1 AND mod(b::int,10) = 1'); + estimated | actual +-----------+-------- + 1 | 50 +(1 row) + +SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE 1 = mod(a,20) AND 1 = mod(b::int,10)'); + estimated | actual +-----------+-------- + 1 | 50 +(1 row) + +SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE mod(a,20) < 1 AND mod(b::int,10) < 1'); + estimated | actual +-----------+-------- + 111 | 50 +(1 row) + +SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE 1 > mod(a,20) AND 1 > mod(b::int,10)'); + estimated | actual +-----------+-------- + 111 | 50 +(1 row) + +SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE mod(a,20) = 1 AND mod(b::int,10) = 1 AND mod(c,5) = 1'); + estimated | actual +-----------+-------- + 1 | 50 +(1 row) + +SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE mod(a,20) = 1 OR mod(b::int,10) = 1 OR mod(c,25) = 1 OR d IS NOT NULL'); + estimated | actual +-----------+-------- + 15 | 120 +(1 row) + +SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE mod(a,20) IN (1, 2, 51, 52, NULL) AND mod(b::int,10) IN ( 1, 2, NULL)'); + estimated | actual +-----------+-------- + 1 | 100 +(1 row) + +SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE mod(a,20) = ANY (ARRAY[1, 2, 51, 52]) AND mod(b::int,10) = ANY (ARRAY[1, 2])'); + estimated | actual +-----------+-------- + 1 | 100 +(1 row) + +SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE mod(a,20) <= ANY (ARRAY[1, NULL, 2, 3]) AND mod(b::int,10) IN (1, 2, NULL, 3)'); + estimated | actual +-----------+-------- + 11 | 150 +(1 row) + +SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE mod(a,20) < ALL (ARRAY[4, 5]) AND mod(b::int,10) IN (1, 2, 3) AND mod(c,5) > ANY (ARRAY[1, 2, 3])'); + estimated | actual +-----------+-------- + 1 | 100 +(1 row) + +-- create statistics with expressions only (we create three separate stats, in order not to build more complex extended stats) +CREATE STATISTICS mcv_lists_stats_1 ON (mod(a,20)) FROM mcv_lists; +CREATE STATISTICS mcv_lists_stats_2 ON (mod(b::int,10)) FROM mcv_lists; +CREATE STATISTICS mcv_lists_stats_3 ON (mod(c,5)) FROM mcv_lists; +ANALYZE mcv_lists; +SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE mod(a,20) = 1 AND mod(b::int,10) = 1'); + estimated | actual +-----------+-------- + 5 | 50 +(1 row) + +SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE 1 = mod(a,20) AND 1 = mod(b::int,10)'); + estimated | actual +-----------+-------- + 5 | 50 +(1 row) + +SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE mod(a,20) < 1 AND mod(b::int,10) < 1'); + estimated | actual +-----------+-------- + 5 | 50 +(1 row) + +SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE 1 > mod(a,20) AND 1 > mod(b::int,10)'); + estimated | actual +-----------+-------- + 5 | 50 +(1 row) + +SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE mod(a,20) = 1 AND mod(b::int,10) = 1 AND mod(c,5) = 1'); + estimated | actual +-----------+-------- + 1 | 50 +(1 row) + +SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE mod(a,20) = 1 OR mod(b::int,10) = 1 OR mod(c,25) = 1 OR d IS NOT NULL'); + estimated | actual +-----------+-------- + 149 | 120 +(1 row) + +SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE mod(a,20) IN (1, 2, 51, 52, NULL) AND mod(b::int,10) IN ( 1, 2, NULL)'); + estimated | actual +-----------+-------- + 20 | 100 +(1 row) + +SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE mod(a,20) = ANY (ARRAY[1, 2, 51, 52]) AND mod(b::int,10) = ANY (ARRAY[1, 2])'); + estimated | actual +-----------+-------- + 20 | 100 +(1 row) + +SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE mod(a,20) <= ANY (ARRAY[1, NULL, 2, 3]) AND mod(b::int,10) IN (1, 2, NULL, 3)'); + estimated | actual +-----------+-------- + 116 | 150 +(1 row) + +SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE mod(a,20) < ALL (ARRAY[4, 5]) AND mod(b::int,10) IN (1, 2, 3) AND mod(c,5) > ANY (ARRAY[1, 2, 3])'); + estimated | actual +-----------+-------- + 12 | 100 +(1 row) + +DROP STATISTICS mcv_lists_stats_1; +DROP STATISTICS mcv_lists_stats_2; +DROP STATISTICS mcv_lists_stats_3; +-- create statistics with both MCV and expressions +CREATE STATISTICS mcv_lists_stats (mcv) ON (mod(a,20)), (mod(b::int,10)), (mod(c,5)) FROM mcv_lists; +ANALYZE mcv_lists; +SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE mod(a,20) = 1 AND mod(b::int,10) = 1'); + estimated | actual +-----------+-------- + 50 | 50 +(1 row) + +SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE 1 = mod(a,20) AND 1 = mod(b::int,10)'); + estimated | actual +-----------+-------- + 50 | 50 +(1 row) + +SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE mod(a,20) < 1 AND mod(b::int,10) < 1'); + estimated | actual +-----------+-------- + 50 | 50 +(1 row) + +SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE 1 > mod(a,20) AND 1 > mod(b::int,10)'); + estimated | actual +-----------+-------- + 50 | 50 +(1 row) + +SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE mod(a,20) = 1 AND mod(b::int,10) = 1 AND mod(c,5) = 1'); + estimated | actual +-----------+-------- + 50 | 50 +(1 row) + +SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE mod(a,20) = 1 OR mod(b::int,10) = 1 OR mod(c,25) = 1 OR d IS NOT NULL'); + estimated | actual +-----------+-------- + 105 | 120 +(1 row) + +SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE mod(a,20) IN (1, 2, 51, 52, NULL) AND mod(b::int,10) IN ( 1, 2, NULL)'); + estimated | actual +-----------+-------- + 100 | 100 +(1 row) + +SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE mod(a,20) = ANY (ARRAY[1, 2, 51, 52]) AND mod(b::int,10) = ANY (ARRAY[1, 2])'); + estimated | actual +-----------+-------- + 100 | 100 +(1 row) + +SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE mod(a,20) <= ANY (ARRAY[1, NULL, 2, 3]) AND mod(b::int,10) IN (1, 2, NULL, 3)'); + estimated | actual +-----------+-------- + 150 | 150 +(1 row) + +SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE mod(a,20) < ALL (ARRAY[4, 5]) AND mod(b::int,10) IN (1, 2, 3) AND mod(c,5) > ANY (ARRAY[1, 2, 3])'); + estimated | actual +-----------+-------- + 100 | 100 +(1 row) + +-- we can't use the statistic for OR clauses that are not fully covered (missing 'd' attribute) +SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE mod(a,20) = 1 OR mod(b::int,10) = 1 OR mod(c,5) = 1 OR d IS NOT NULL'); + estimated | actual +-----------+-------- + 200 | 200 +(1 row) + +-- 100 distinct combinations with NULL values, all in the MCV list +TRUNCATE mcv_lists; +DROP STATISTICS mcv_lists_stats; +INSERT INTO mcv_lists (a, b, c, filler1) + SELECT + (CASE WHEN mod(i,100) = 1 THEN NULL ELSE mod(i,100) END), + (CASE WHEN mod(i,50) = 1 THEN NULL ELSE mod(i,50) END), + (CASE WHEN mod(i,25) = 1 THEN NULL ELSE mod(i,25) END), + i + FROM generate_series(1,5000) s(i); +ANALYZE mcv_lists; +SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE a IS NULL AND b IS NULL'); + estimated | actual +-----------+-------- + 1 | 50 +(1 row) + +SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE a IS NULL AND b IS NULL AND c IS NULL'); + estimated | actual +-----------+-------- + 1 | 50 +(1 row) + +SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE a IS NULL AND b IS NOT NULL'); + estimated | actual +-----------+-------- + 49 | 0 +(1 row) + +SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE a IS NOT NULL AND b IS NULL AND c IS NOT NULL'); + estimated | actual +-----------+-------- + 95 | 0 +(1 row) + +SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE a IN (0, 1) AND b IN (''0'', ''1'')'); + estimated | actual +-----------+-------- + 1 | 50 +(1 row) + +-- create statistics +CREATE STATISTICS mcv_lists_stats (mcv) ON a, b, c FROM mcv_lists; +ANALYZE mcv_lists; +SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE a IS NULL AND b IS NULL'); + estimated | actual +-----------+-------- + 50 | 50 +(1 row) + +SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE a IS NULL AND b IS NULL AND c IS NULL'); + estimated | actual +-----------+-------- + 50 | 50 +(1 row) + +SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE a IS NULL AND b IS NOT NULL'); + estimated | actual +-----------+-------- + 1 | 0 +(1 row) + +SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE a IS NOT NULL AND b IS NULL AND c IS NOT NULL'); + estimated | actual +-----------+-------- + 1 | 0 +(1 row) + +SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE a IN (0, 1) AND b IN (''0'', ''1'')'); + estimated | actual +-----------+-------- + 50 | 50 +(1 row) + +-- test pg_mcv_list_items with a very simple (single item) MCV list +TRUNCATE mcv_lists; +INSERT INTO mcv_lists (a, b, c) SELECT 1, 2, 3 FROM generate_series(1,1000) s(i); +ANALYZE mcv_lists; +SELECT m.* + FROM pg_statistic_ext s, pg_statistic_ext_data d, + pg_mcv_list_items(d.stxdmcv) m + WHERE s.stxname = 'mcv_lists_stats' + AND d.stxoid = s.oid; + index | values | nulls | frequency | base_frequency +-------+---------+---------+-----------+---------------- + 0 | {1,2,3} | {f,f,f} | 1 | 1 +(1 row) + +-- 2 distinct combinations with NULL values, all in the MCV list +TRUNCATE mcv_lists; +DROP STATISTICS mcv_lists_stats; +INSERT INTO mcv_lists (a, b, c, d) + SELECT + NULL, -- always NULL + (CASE WHEN mod(i,2) = 0 THEN NULL ELSE 'x' END), + (CASE WHEN mod(i,2) = 0 THEN NULL ELSE 0 END), + (CASE WHEN mod(i,2) = 0 THEN NULL ELSE 'x' END) + FROM generate_series(1,5000) s(i); +ANALYZE mcv_lists; +SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE b = ''x'' OR d = ''x'''); + estimated | actual +-----------+-------- + 3750 | 2500 +(1 row) + +SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE a = 1 OR b = ''x'' OR d = ''x'''); + estimated | actual +-----------+-------- + 3750 | 2500 +(1 row) + +SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE a IS NULL AND (b = ''x'' OR d = ''x'')'); + estimated | actual +-----------+-------- + 3750 | 2500 +(1 row) + +-- create statistics +CREATE STATISTICS mcv_lists_stats (mcv) ON a, b, d FROM mcv_lists; +ANALYZE mcv_lists; +-- test pg_mcv_list_items with MCV list containing variable-length data and NULLs +SELECT m.* + FROM pg_statistic_ext s, pg_statistic_ext_data d, + pg_mcv_list_items(d.stxdmcv) m + WHERE s.stxname = 'mcv_lists_stats' + AND d.stxoid = s.oid; + index | values | nulls | frequency | base_frequency +-------+------------------+---------+-----------+---------------- + 0 | {NULL,x,x} | {t,f,f} | 0.5 | 0.25 + 1 | {NULL,NULL,NULL} | {t,t,t} | 0.5 | 0.25 +(2 rows) + +SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE b = ''x'' OR d = ''x'''); + estimated | actual +-----------+-------- + 2500 | 2500 +(1 row) + +SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE a = 1 OR b = ''x'' OR d = ''x'''); + estimated | actual +-----------+-------- + 2500 | 2500 +(1 row) + +SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists WHERE a IS NULL AND (b = ''x'' OR d = ''x'')'); + estimated | actual +-----------+-------- + 2500 | 2500 +(1 row) + +-- mcv with pass-by-ref fixlen types, e.g. uuid +CREATE TABLE mcv_lists_uuid ( + a UUID, + b UUID, + c UUID +) +WITH (autovacuum_enabled = off); +INSERT INTO mcv_lists_uuid (a, b, c) + SELECT + fipshash(mod(i,100)::text)::uuid, + fipshash(mod(i,50)::text)::uuid, + fipshash(mod(i,25)::text)::uuid + FROM generate_series(1,5000) s(i); +ANALYZE mcv_lists_uuid; +SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists_uuid WHERE a = ''e7f6c011-776e-8db7-cd33-0b54174fd76f'' AND b = ''e7f6c011-776e-8db7-cd33-0b54174fd76f'''); + estimated | actual +-----------+-------- + 1 | 50 +(1 row) + +SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists_uuid WHERE a = ''e7f6c011-776e-8db7-cd33-0b54174fd76f'' AND b = ''e7f6c011-776e-8db7-cd33-0b54174fd76f'' AND c = ''e7f6c011-776e-8db7-cd33-0b54174fd76f'''); + estimated | actual +-----------+-------- + 1 | 50 +(1 row) + +CREATE STATISTICS mcv_lists_uuid_stats (mcv) ON a, b, c + FROM mcv_lists_uuid; +ANALYZE mcv_lists_uuid; +SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists_uuid WHERE a = ''e7f6c011-776e-8db7-cd33-0b54174fd76f'' AND b = ''e7f6c011-776e-8db7-cd33-0b54174fd76f'''); + estimated | actual +-----------+-------- + 50 | 50 +(1 row) + +SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists_uuid WHERE a = ''e7f6c011-776e-8db7-cd33-0b54174fd76f'' AND b = ''e7f6c011-776e-8db7-cd33-0b54174fd76f'' AND c = ''e7f6c011-776e-8db7-cd33-0b54174fd76f'''); + estimated | actual +-----------+-------- + 50 | 50 +(1 row) + +DROP TABLE mcv_lists_uuid; +-- mcv with arrays +CREATE TABLE mcv_lists_arrays ( + a TEXT[], + b NUMERIC[], + c INT[] +) +WITH (autovacuum_enabled = off); +INSERT INTO mcv_lists_arrays (a, b, c) + SELECT + ARRAY[fipshash((i/100)::text), fipshash((i/100-1)::text), fipshash((i/100+1)::text)], + ARRAY[(i/100-1)::numeric/1000, (i/100)::numeric/1000, (i/100+1)::numeric/1000], + ARRAY[(i/100-1), i/100, (i/100+1)] + FROM generate_series(1,5000) s(i); +CREATE STATISTICS mcv_lists_arrays_stats (mcv) ON a, b, c + FROM mcv_lists_arrays; +ANALYZE mcv_lists_arrays; +-- mcv with bool +CREATE TABLE mcv_lists_bool ( + a BOOL, + b BOOL, + c BOOL +) +WITH (autovacuum_enabled = off); +INSERT INTO mcv_lists_bool (a, b, c) + SELECT + (mod(i,2) = 0), (mod(i,4) = 0), (mod(i,8) = 0) + FROM generate_series(1,10000) s(i); +ANALYZE mcv_lists_bool; +SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists_bool WHERE a AND b AND c'); + estimated | actual +-----------+-------- + 156 | 1250 +(1 row) + +SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists_bool WHERE NOT a AND b AND c'); + estimated | actual +-----------+-------- + 156 | 0 +(1 row) + +SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists_bool WHERE NOT a AND NOT b AND c'); + estimated | actual +-----------+-------- + 469 | 0 +(1 row) + +SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists_bool WHERE NOT a AND b AND NOT c'); + estimated | actual +-----------+-------- + 1094 | 0 +(1 row) + +CREATE STATISTICS mcv_lists_bool_stats (mcv) ON a, b, c + FROM mcv_lists_bool; +ANALYZE mcv_lists_bool; +SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists_bool WHERE a AND b AND c'); + estimated | actual +-----------+-------- + 1250 | 1250 +(1 row) + +SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists_bool WHERE NOT a AND b AND c'); + estimated | actual +-----------+-------- + 1 | 0 +(1 row) + +SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists_bool WHERE NOT a AND NOT b AND c'); + estimated | actual +-----------+-------- + 1 | 0 +(1 row) + +SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists_bool WHERE NOT a AND b AND NOT c'); + estimated | actual +-----------+-------- + 1 | 0 +(1 row) + +-- mcv covering just a small fraction of data +CREATE TABLE mcv_lists_partial ( + a INT, + b INT, + c INT +); +-- 10 frequent groups, each with 100 elements +INSERT INTO mcv_lists_partial (a, b, c) + SELECT + mod(i,10), + mod(i,10), + mod(i,10) + FROM generate_series(0,999) s(i); +-- 100 groups that will make it to the MCV list (includes the 10 frequent ones) +INSERT INTO mcv_lists_partial (a, b, c) + SELECT + i, + i, + i + FROM generate_series(0,99) s(i); +-- 4000 groups in total, most of which won't make it (just a single item) +INSERT INTO mcv_lists_partial (a, b, c) + SELECT + i, + i, + i + FROM generate_series(0,3999) s(i); +ANALYZE mcv_lists_partial; +SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists_partial WHERE a = 0 AND b = 0 AND c = 0'); + estimated | actual +-----------+-------- + 1 | 102 +(1 row) + +SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists_partial WHERE a = 0 OR b = 0 OR c = 0'); + estimated | actual +-----------+-------- + 300 | 102 +(1 row) + +SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists_partial WHERE a = 10 AND b = 10 AND c = 10'); + estimated | actual +-----------+-------- + 1 | 2 +(1 row) + +SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists_partial WHERE a = 10 OR b = 10 OR c = 10'); + estimated | actual +-----------+-------- + 6 | 2 +(1 row) + +SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists_partial WHERE a = 0 AND b = 0 AND c = 10'); + estimated | actual +-----------+-------- + 1 | 0 +(1 row) + +SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists_partial WHERE a = 0 OR b = 0 OR c = 10'); + estimated | actual +-----------+-------- + 204 | 104 +(1 row) + +SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists_partial WHERE (a = 0 AND b = 0 AND c = 0) OR (a = 1 AND b = 1 AND c = 1) OR (a = 2 AND b = 2 AND c = 2)'); + estimated | actual +-----------+-------- + 1 | 306 +(1 row) + +SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists_partial WHERE (a = 0 AND b = 0) OR (a = 0 AND c = 0) OR (b = 0 AND c = 0)'); + estimated | actual +-----------+-------- + 6 | 102 +(1 row) + +CREATE STATISTICS mcv_lists_partial_stats (mcv) ON a, b, c + FROM mcv_lists_partial; +ANALYZE mcv_lists_partial; +SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists_partial WHERE a = 0 AND b = 0 AND c = 0'); + estimated | actual +-----------+-------- + 102 | 102 +(1 row) + +SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists_partial WHERE a = 0 OR b = 0 OR c = 0'); + estimated | actual +-----------+-------- + 96 | 102 +(1 row) + +SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists_partial WHERE a = 10 AND b = 10 AND c = 10'); + estimated | actual +-----------+-------- + 2 | 2 +(1 row) + +SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists_partial WHERE a = 10 OR b = 10 OR c = 10'); + estimated | actual +-----------+-------- + 2 | 2 +(1 row) + +SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists_partial WHERE a = 0 AND b = 0 AND c = 10'); + estimated | actual +-----------+-------- + 1 | 0 +(1 row) + +SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists_partial WHERE a = 0 OR b = 0 OR c = 10'); + estimated | actual +-----------+-------- + 102 | 104 +(1 row) + +SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists_partial WHERE (a = 0 AND b = 0 AND c = 0) OR (a = 1 AND b = 1 AND c = 1) OR (a = 2 AND b = 2 AND c = 2)'); + estimated | actual +-----------+-------- + 306 | 306 +(1 row) + +SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists_partial WHERE (a = 0 AND b = 0) OR (a = 0 AND c = 0) OR (b = 0 AND c = 0)'); + estimated | actual +-----------+-------- + 108 | 102 +(1 row) + +DROP TABLE mcv_lists_partial; +-- check the ability to use multiple MCV lists +CREATE TABLE mcv_lists_multi ( + a INTEGER, + b INTEGER, + c INTEGER, + d INTEGER +) +WITH (autovacuum_enabled = off); +INSERT INTO mcv_lists_multi (a, b, c, d) + SELECT + mod(i,5), + mod(i,5), + mod(i,7), + mod(i,7) + FROM generate_series(1,5000) s(i); +ANALYZE mcv_lists_multi; +-- estimates without any mcv statistics +SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists_multi WHERE a = 0 AND b = 0'); + estimated | actual +-----------+-------- + 200 | 1000 +(1 row) + +SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists_multi WHERE c = 0 AND d = 0'); + estimated | actual +-----------+-------- + 102 | 714 +(1 row) + +SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists_multi WHERE b = 0 AND c = 0'); + estimated | actual +-----------+-------- + 143 | 142 +(1 row) + +SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists_multi WHERE b = 0 OR c = 0'); + estimated | actual +-----------+-------- + 1571 | 1572 +(1 row) + +SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists_multi WHERE a = 0 AND b = 0 AND c = 0 AND d = 0'); + estimated | actual +-----------+-------- + 4 | 142 +(1 row) + +SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists_multi WHERE (a = 0 AND b = 0) OR (c = 0 AND d = 0)'); + estimated | actual +-----------+-------- + 298 | 1572 +(1 row) + +SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists_multi WHERE a = 0 OR b = 0 OR c = 0 OR d = 0'); + estimated | actual +-----------+-------- + 2649 | 1572 +(1 row) + +-- create separate MCV statistics +CREATE STATISTICS mcv_lists_multi_1 (mcv) ON a, b FROM mcv_lists_multi; +CREATE STATISTICS mcv_lists_multi_2 (mcv) ON c, d FROM mcv_lists_multi; +ANALYZE mcv_lists_multi; +SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists_multi WHERE a = 0 AND b = 0'); + estimated | actual +-----------+-------- + 1000 | 1000 +(1 row) + +SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists_multi WHERE c = 0 AND d = 0'); + estimated | actual +-----------+-------- + 714 | 714 +(1 row) + +SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists_multi WHERE b = 0 AND c = 0'); + estimated | actual +-----------+-------- + 143 | 142 +(1 row) + +SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists_multi WHERE b = 0 OR c = 0'); + estimated | actual +-----------+-------- + 1571 | 1572 +(1 row) + +SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists_multi WHERE a = 0 AND b = 0 AND c = 0 AND d = 0'); + estimated | actual +-----------+-------- + 143 | 142 +(1 row) + +SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists_multi WHERE (a = 0 AND b = 0) OR (c = 0 AND d = 0)'); + estimated | actual +-----------+-------- + 1571 | 1572 +(1 row) + +SELECT * FROM check_estimated_rows('SELECT * FROM mcv_lists_multi WHERE a = 0 OR b = 0 OR c = 0 OR d = 0'); + estimated | actual +-----------+-------- + 1571 | 1572 +(1 row) + +DROP TABLE mcv_lists_multi; +-- statistics on integer expressions +CREATE TABLE expr_stats (a int, b int, c int); +INSERT INTO expr_stats SELECT mod(i,10), mod(i,10), mod(i,10) FROM generate_series(1,1000) s(i); +ANALYZE expr_stats; +SELECT * FROM check_estimated_rows('SELECT * FROM expr_stats WHERE (2*a) = 0 AND (3*b) = 0'); + estimated | actual +-----------+-------- + 1 | 100 +(1 row) + +SELECT * FROM check_estimated_rows('SELECT * FROM expr_stats WHERE (a+b) = 0 AND (a-b) = 0'); + estimated | actual +-----------+-------- + 1 | 100 +(1 row) + +CREATE STATISTICS expr_stats_1 (mcv) ON (a+b), (a-b), (2*a), (3*b) FROM expr_stats; +ANALYZE expr_stats; +SELECT * FROM check_estimated_rows('SELECT * FROM expr_stats WHERE (2*a) = 0 AND (3*b) = 0'); + estimated | actual +-----------+-------- + 100 | 100 +(1 row) + +SELECT * FROM check_estimated_rows('SELECT * FROM expr_stats WHERE (a+b) = 0 AND (a-b) = 0'); + estimated | actual +-----------+-------- + 100 | 100 +(1 row) + +DROP STATISTICS expr_stats_1; +DROP TABLE expr_stats; +-- statistics on a mix columns and expressions +CREATE TABLE expr_stats (a int, b int, c int); +INSERT INTO expr_stats SELECT mod(i,10), mod(i,10), mod(i,10) FROM generate_series(1,1000) s(i); +ANALYZE expr_stats; +SELECT * FROM check_estimated_rows('SELECT * FROM expr_stats WHERE a = 0 AND (2*a) = 0 AND (3*b) = 0'); + estimated | actual +-----------+-------- + 1 | 100 +(1 row) + +SELECT * FROM check_estimated_rows('SELECT * FROM expr_stats WHERE a = 3 AND b = 3 AND (a-b) = 0'); + estimated | actual +-----------+-------- + 1 | 100 +(1 row) + +SELECT * FROM check_estimated_rows('SELECT * FROM expr_stats WHERE a = 0 AND b = 1 AND (a-b) = 0'); + estimated | actual +-----------+-------- + 1 | 0 +(1 row) + +CREATE STATISTICS expr_stats_1 (mcv) ON a, b, (2*a), (3*b), (a+b), (a-b) FROM expr_stats; +ANALYZE expr_stats; +SELECT * FROM check_estimated_rows('SELECT * FROM expr_stats WHERE a = 0 AND (2*a) = 0 AND (3*b) = 0'); + estimated | actual +-----------+-------- + 100 | 100 +(1 row) + +SELECT * FROM check_estimated_rows('SELECT * FROM expr_stats WHERE a = 3 AND b = 3 AND (a-b) = 0'); + estimated | actual +-----------+-------- + 100 | 100 +(1 row) + +SELECT * FROM check_estimated_rows('SELECT * FROM expr_stats WHERE a = 0 AND b = 1 AND (a-b) = 0'); + estimated | actual +-----------+-------- + 1 | 0 +(1 row) + +DROP TABLE expr_stats; +-- statistics on expressions with different data types +CREATE TABLE expr_stats (a int, b name, c text); +INSERT INTO expr_stats SELECT mod(i,10), fipshash(mod(i,10)::text), fipshash(mod(i,10)::text) FROM generate_series(1,1000) s(i); +ANALYZE expr_stats; +SELECT * FROM check_estimated_rows('SELECT * FROM expr_stats WHERE a = 0 AND (b || c) <= ''z'' AND (c || b) >= ''0'''); + estimated | actual +-----------+-------- + 11 | 100 +(1 row) + +CREATE STATISTICS expr_stats_1 (mcv) ON a, b, (b || c), (c || b) FROM expr_stats; +ANALYZE expr_stats; +SELECT * FROM check_estimated_rows('SELECT * FROM expr_stats WHERE a = 0 AND (b || c) <= ''z'' AND (c || b) >= ''0'''); + estimated | actual +-----------+-------- + 100 | 100 +(1 row) + +DROP TABLE expr_stats; +-- test handling of a mix of compatible and incompatible expressions +CREATE TABLE expr_stats_incompatible_test ( + c0 double precision, + c1 boolean NOT NULL +); +CREATE STATISTICS expr_stat_comp_1 ON c0, c1 FROM expr_stats_incompatible_test; +INSERT INTO expr_stats_incompatible_test VALUES (1234,false), (5678,true); +ANALYZE expr_stats_incompatible_test; +SELECT c0 FROM ONLY expr_stats_incompatible_test WHERE +( + upper('x') LIKE ('x'||('[0,1]'::int4range)) + AND + (c0 IN (0, 1) OR c1) +); + c0 +---- +(0 rows) + +DROP TABLE expr_stats_incompatible_test; +-- Permission tests. Users should not be able to see specific data values in +-- the extended statistics, if they lack permission to see those values in +-- the underlying table. +-- +-- Currently this is only relevant for MCV stats. +CREATE SCHEMA tststats; +CREATE TABLE tststats.priv_test_tbl ( + a int, + b int +); +INSERT INTO tststats.priv_test_tbl + SELECT mod(i,5), mod(i,10) FROM generate_series(1,100) s(i); +CREATE STATISTICS tststats.priv_test_stats (mcv) ON a, b + FROM tststats.priv_test_tbl; +ANALYZE tststats.priv_test_tbl; +-- Check printing info about extended statistics by \dX +create table stts_t1 (a int, b int); +create statistics (ndistinct) on a, b from stts_t1; +create statistics (ndistinct, dependencies) on a, b from stts_t1; +create statistics (ndistinct, dependencies, mcv) on a, b from stts_t1; +create table stts_t2 (a int, b int, c int); +create statistics on b, c from stts_t2; +create table stts_t3 (col1 int, col2 int, col3 int); +create statistics stts_hoge on col1, col2, col3 from stts_t3; +create schema stts_s1; +create schema stts_s2; +create statistics stts_s1.stts_foo on col1, col2 from stts_t3; +create statistics stts_s2.stts_yama (dependencies, mcv) on col1, col3 from stts_t3; +insert into stts_t1 select i,i from generate_series(1,100) i; +analyze stts_t1; +set search_path to public, stts_s1, stts_s2, tststats; +\dX + List of extended statistics + Schema | Name | Definition | Ndistinct | Dependencies | MCV +----------+------------------------+------------------------------------------------------------------+-----------+--------------+--------- + public | func_deps_stat | (a * 2), upper(b), (c + 1::numeric) FROM functional_dependencies | | defined | + public | mcv_lists_arrays_stats | a, b, c FROM mcv_lists_arrays | | | defined + public | mcv_lists_bool_stats | a, b, c FROM mcv_lists_bool | | | defined + public | mcv_lists_stats | a, b, d FROM mcv_lists | | | defined + public | stts_hoge | col1, col2, col3 FROM stts_t3 | defined | defined | defined + public | stts_t1_a_b_stat | a, b FROM stts_t1 | defined | | + public | stts_t1_a_b_stat1 | a, b FROM stts_t1 | defined | defined | + public | stts_t1_a_b_stat2 | a, b FROM stts_t1 | defined | defined | defined + public | stts_t2_b_c_stat | b, c FROM stts_t2 | defined | defined | defined + stts_s1 | stts_foo | col1, col2 FROM stts_t3 | defined | defined | defined + stts_s2 | stts_yama | col1, col3 FROM stts_t3 | | defined | defined + tststats | priv_test_stats | a, b FROM priv_test_tbl | | | defined +(12 rows) + +\dX stts_t* + List of extended statistics + Schema | Name | Definition | Ndistinct | Dependencies | MCV +--------+-------------------+-------------------+-----------+--------------+--------- + public | stts_t1_a_b_stat | a, b FROM stts_t1 | defined | | + public | stts_t1_a_b_stat1 | a, b FROM stts_t1 | defined | defined | + public | stts_t1_a_b_stat2 | a, b FROM stts_t1 | defined | defined | defined + public | stts_t2_b_c_stat | b, c FROM stts_t2 | defined | defined | defined +(4 rows) + +\dX *stts_hoge + List of extended statistics + Schema | Name | Definition | Ndistinct | Dependencies | MCV +--------+-----------+-------------------------------+-----------+--------------+--------- + public | stts_hoge | col1, col2, col3 FROM stts_t3 | defined | defined | defined +(1 row) + +\dX+ + List of extended statistics + Schema | Name | Definition | Ndistinct | Dependencies | MCV +----------+------------------------+------------------------------------------------------------------+-----------+--------------+--------- + public | func_deps_stat | (a * 2), upper(b), (c + 1::numeric) FROM functional_dependencies | | defined | + public | mcv_lists_arrays_stats | a, b, c FROM mcv_lists_arrays | | | defined + public | mcv_lists_bool_stats | a, b, c FROM mcv_lists_bool | | | defined + public | mcv_lists_stats | a, b, d FROM mcv_lists | | | defined + public | stts_hoge | col1, col2, col3 FROM stts_t3 | defined | defined | defined + public | stts_t1_a_b_stat | a, b FROM stts_t1 | defined | | + public | stts_t1_a_b_stat1 | a, b FROM stts_t1 | defined | defined | + public | stts_t1_a_b_stat2 | a, b FROM stts_t1 | defined | defined | defined + public | stts_t2_b_c_stat | b, c FROM stts_t2 | defined | defined | defined + stts_s1 | stts_foo | col1, col2 FROM stts_t3 | defined | defined | defined + stts_s2 | stts_yama | col1, col3 FROM stts_t3 | | defined | defined + tststats | priv_test_stats | a, b FROM priv_test_tbl | | | defined +(12 rows) + +\dX+ stts_t* + List of extended statistics + Schema | Name | Definition | Ndistinct | Dependencies | MCV +--------+-------------------+-------------------+-----------+--------------+--------- + public | stts_t1_a_b_stat | a, b FROM stts_t1 | defined | | + public | stts_t1_a_b_stat1 | a, b FROM stts_t1 | defined | defined | + public | stts_t1_a_b_stat2 | a, b FROM stts_t1 | defined | defined | defined + public | stts_t2_b_c_stat | b, c FROM stts_t2 | defined | defined | defined +(4 rows) + +\dX+ *stts_hoge + List of extended statistics + Schema | Name | Definition | Ndistinct | Dependencies | MCV +--------+-----------+-------------------------------+-----------+--------------+--------- + public | stts_hoge | col1, col2, col3 FROM stts_t3 | defined | defined | defined +(1 row) + +\dX+ stts_s2.stts_yama + List of extended statistics + Schema | Name | Definition | Ndistinct | Dependencies | MCV +---------+-----------+-------------------------+-----------+--------------+--------- + stts_s2 | stts_yama | col1, col3 FROM stts_t3 | | defined | defined +(1 row) + +create statistics (mcv) ON a, b, (a+b), (a-b) FROM stts_t1; +create statistics (mcv) ON a, b, (a+b), (a-b) FROM stts_t1; +create statistics (mcv) ON (a+b), (a-b) FROM stts_t1; +\dX stts_t*expr* + List of extended statistics + Schema | Name | Definition | Ndistinct | Dependencies | MCV +--------+-----------------------------+-------------------------------------+-----------+--------------+--------- + public | stts_t1_a_b_expr_expr_stat | a, b, (a + b), (a - b) FROM stts_t1 | | | defined + public | stts_t1_a_b_expr_expr_stat1 | a, b, (a + b), (a - b) FROM stts_t1 | | | defined + public | stts_t1_expr_expr_stat | (a + b), (a - b) FROM stts_t1 | | | defined +(3 rows) + +drop statistics stts_t1_a_b_expr_expr_stat; +drop statistics stts_t1_a_b_expr_expr_stat1; +drop statistics stts_t1_expr_expr_stat; +set search_path to public, stts_s1; +\dX + List of extended statistics + Schema | Name | Definition | Ndistinct | Dependencies | MCV +---------+------------------------+------------------------------------------------------------------+-----------+--------------+--------- + public | func_deps_stat | (a * 2), upper(b), (c + 1::numeric) FROM functional_dependencies | | defined | + public | mcv_lists_arrays_stats | a, b, c FROM mcv_lists_arrays | | | defined + public | mcv_lists_bool_stats | a, b, c FROM mcv_lists_bool | | | defined + public | mcv_lists_stats | a, b, d FROM mcv_lists | | | defined + public | stts_hoge | col1, col2, col3 FROM stts_t3 | defined | defined | defined + public | stts_t1_a_b_stat | a, b FROM stts_t1 | defined | | + public | stts_t1_a_b_stat1 | a, b FROM stts_t1 | defined | defined | + public | stts_t1_a_b_stat2 | a, b FROM stts_t1 | defined | defined | defined + public | stts_t2_b_c_stat | b, c FROM stts_t2 | defined | defined | defined + stts_s1 | stts_foo | col1, col2 FROM stts_t3 | defined | defined | defined +(10 rows) + +create role regress_stats_ext nosuperuser; +set role regress_stats_ext; +\dX + List of extended statistics + Schema | Name | Definition | Ndistinct | Dependencies | MCV +--------+------------------------+------------------------------------------------------------------+-----------+--------------+--------- + public | func_deps_stat | (a * 2), upper(b), (c + 1::numeric) FROM functional_dependencies | | defined | + public | mcv_lists_arrays_stats | a, b, c FROM mcv_lists_arrays | | | defined + public | mcv_lists_bool_stats | a, b, c FROM mcv_lists_bool | | | defined + public | mcv_lists_stats | a, b, d FROM mcv_lists | | | defined + public | stts_hoge | col1, col2, col3 FROM stts_t3 | defined | defined | defined + public | stts_t1_a_b_stat | a, b FROM stts_t1 | defined | | + public | stts_t1_a_b_stat1 | a, b FROM stts_t1 | defined | defined | + public | stts_t1_a_b_stat2 | a, b FROM stts_t1 | defined | defined | defined + public | stts_t2_b_c_stat | b, c FROM stts_t2 | defined | defined | defined +(9 rows) + +reset role; +drop table stts_t1, stts_t2, stts_t3; +drop schema stts_s1, stts_s2 cascade; +drop user regress_stats_ext; +reset search_path; +-- User with no access +CREATE USER regress_stats_user1; +GRANT USAGE ON SCHEMA tststats TO regress_stats_user1; +SET SESSION AUTHORIZATION regress_stats_user1; +SELECT * FROM tststats.priv_test_tbl; -- Permission denied +ERROR: permission denied for table priv_test_tbl +-- Check individual columns if we don't have table privilege +SELECT * FROM tststats.priv_test_tbl + WHERE a = 1 and tststats.priv_test_tbl.* > (1, 1) is not null; +ERROR: permission denied for table priv_test_tbl +-- Attempt to gain access using a leaky operator +CREATE FUNCTION op_leak(int, int) RETURNS bool + AS 'BEGIN RAISE NOTICE ''op_leak => %, %'', $1, $2; RETURN $1 < $2; END' + LANGUAGE plpgsql; +CREATE OPERATOR <<< (procedure = op_leak, leftarg = int, rightarg = int, + restrict = scalarltsel); +SELECT * FROM tststats.priv_test_tbl WHERE a <<< 0 AND b <<< 0; -- Permission denied +ERROR: permission denied for table priv_test_tbl +DELETE FROM tststats.priv_test_tbl WHERE a <<< 0 AND b <<< 0; -- Permission denied +ERROR: permission denied for table priv_test_tbl +-- Grant access via a security barrier view, but hide all data +RESET SESSION AUTHORIZATION; +CREATE VIEW tststats.priv_test_view WITH (security_barrier=true) + AS SELECT * FROM tststats.priv_test_tbl WHERE false; +GRANT SELECT, DELETE ON tststats.priv_test_view TO regress_stats_user1; +-- Should now have access via the view, but see nothing and leak nothing +SET SESSION AUTHORIZATION regress_stats_user1; +SELECT * FROM tststats.priv_test_view WHERE a <<< 0 AND b <<< 0; -- Should not leak + a | b +---+--- +(0 rows) + +DELETE FROM tststats.priv_test_view WHERE a <<< 0 AND b <<< 0; -- Should not leak +-- Grant table access, but hide all data with RLS +RESET SESSION AUTHORIZATION; +ALTER TABLE tststats.priv_test_tbl ENABLE ROW LEVEL SECURITY; +GRANT SELECT, DELETE ON tststats.priv_test_tbl TO regress_stats_user1; +-- Should now have direct table access, but see nothing and leak nothing +SET SESSION AUTHORIZATION regress_stats_user1; +SELECT * FROM tststats.priv_test_tbl WHERE a <<< 0 AND b <<< 0; -- Should not leak + a | b +---+--- +(0 rows) + +DELETE FROM tststats.priv_test_tbl WHERE a <<< 0 AND b <<< 0; -- Should not leak +-- Tidy up +DROP OPERATOR <<< (int, int); +DROP FUNCTION op_leak(int, int); +RESET SESSION AUTHORIZATION; +DROP SCHEMA tststats CASCADE; +NOTICE: drop cascades to 2 other objects +DETAIL: drop cascades to table tststats.priv_test_tbl +drop cascades to view tststats.priv_test_view +DROP USER regress_stats_user1; diff --git a/src/test/regress/expected/strings.out b/src/test/regress/expected/strings.out new file mode 100644 index 0000000..6269856 --- /dev/null +++ b/src/test/regress/expected/strings.out @@ -0,0 +1,2605 @@ +-- +-- STRINGS +-- Test various data entry syntaxes. +-- +-- SQL string continuation syntax +-- E021-03 character string literals +SELECT 'first line' +' - next line' + ' - third line' + AS "Three lines to one"; + Three lines to one +------------------------------------- + first line - next line - third line +(1 row) + +-- illegal string continuation syntax +SELECT 'first line' +' - next line' /* this comment is not allowed here */ +' - third line' + AS "Illegal comment within continuation"; +ERROR: syntax error at or near "' - third line'" +LINE 3: ' - third line' + ^ +-- Unicode escapes +SET standard_conforming_strings TO on; +SELECT U&'d\0061t\+000061' AS U&"d\0061t\+000061"; + data +------ + data +(1 row) + +SELECT U&'d!0061t\+000061' UESCAPE '!' AS U&"d*0061t\+000061" UESCAPE '*'; + dat\+000061 +------------- + dat\+000061 +(1 row) + +SELECT U&'a\\b' AS "a\b"; + a\b +----- + a\b +(1 row) + +SELECT U&' \' UESCAPE '!' AS "tricky"; + tricky +-------- + \ +(1 row) + +SELECT 'tricky' AS U&"\" UESCAPE '!'; + \ +-------- + tricky +(1 row) + +SELECT U&'wrong: \061'; +ERROR: invalid Unicode escape +LINE 1: SELECT U&'wrong: \061'; + ^ +HINT: Unicode escapes must be \XXXX or \+XXXXXX. +SELECT U&'wrong: \+0061'; +ERROR: invalid Unicode escape +LINE 1: SELECT U&'wrong: \+0061'; + ^ +HINT: Unicode escapes must be \XXXX or \+XXXXXX. +SELECT U&'wrong: +0061' UESCAPE +; +ERROR: UESCAPE must be followed by a simple string literal at or near "+" +LINE 1: SELECT U&'wrong: +0061' UESCAPE +; + ^ +SELECT U&'wrong: +0061' UESCAPE '+'; +ERROR: invalid Unicode escape character at or near "'+'" +LINE 1: SELECT U&'wrong: +0061' UESCAPE '+'; + ^ +SELECT U&'wrong: \db99'; +ERROR: invalid Unicode surrogate pair +LINE 1: SELECT U&'wrong: \db99'; + ^ +SELECT U&'wrong: \db99xy'; +ERROR: invalid Unicode surrogate pair +LINE 1: SELECT U&'wrong: \db99xy'; + ^ +SELECT U&'wrong: \db99\\'; +ERROR: invalid Unicode surrogate pair +LINE 1: SELECT U&'wrong: \db99\\'; + ^ +SELECT U&'wrong: \db99\0061'; +ERROR: invalid Unicode surrogate pair +LINE 1: SELECT U&'wrong: \db99\0061'; + ^ +SELECT U&'wrong: \+00db99\+000061'; +ERROR: invalid Unicode surrogate pair +LINE 1: SELECT U&'wrong: \+00db99\+000061'; + ^ +SELECT U&'wrong: \+2FFFFF'; +ERROR: invalid Unicode escape value +LINE 1: SELECT U&'wrong: \+2FFFFF'; + ^ +-- while we're here, check the same cases in E-style literals +SELECT E'd\u0061t\U00000061' AS "data"; + data +------ + data +(1 row) + +SELECT E'a\\b' AS "a\b"; + a\b +----- + a\b +(1 row) + +SELECT E'wrong: \u061'; +ERROR: invalid Unicode escape +LINE 1: SELECT E'wrong: \u061'; + ^ +HINT: Unicode escapes must be \uXXXX or \UXXXXXXXX. +SELECT E'wrong: \U0061'; +ERROR: invalid Unicode escape +LINE 1: SELECT E'wrong: \U0061'; + ^ +HINT: Unicode escapes must be \uXXXX or \UXXXXXXXX. +SELECT E'wrong: \udb99'; +ERROR: invalid Unicode surrogate pair at or near "'" +LINE 1: SELECT E'wrong: \udb99'; + ^ +SELECT E'wrong: \udb99xy'; +ERROR: invalid Unicode surrogate pair at or near "x" +LINE 1: SELECT E'wrong: \udb99xy'; + ^ +SELECT E'wrong: \udb99\\'; +ERROR: invalid Unicode surrogate pair at or near "\" +LINE 1: SELECT E'wrong: \udb99\\'; + ^ +SELECT E'wrong: \udb99\u0061'; +ERROR: invalid Unicode surrogate pair at or near "\u0061" +LINE 1: SELECT E'wrong: \udb99\u0061'; + ^ +SELECT E'wrong: \U0000db99\U00000061'; +ERROR: invalid Unicode surrogate pair at or near "\U00000061" +LINE 1: SELECT E'wrong: \U0000db99\U00000061'; + ^ +SELECT E'wrong: \U002FFFFF'; +ERROR: invalid Unicode escape value at or near "\U002FFFFF" +LINE 1: SELECT E'wrong: \U002FFFFF'; + ^ +SET standard_conforming_strings TO off; +SELECT U&'d\0061t\+000061' AS U&"d\0061t\+000061"; +ERROR: unsafe use of string constant with Unicode escapes +LINE 1: SELECT U&'d\0061t\+000061' AS U&"d\0061t\+000061"; + ^ +DETAIL: String constants with Unicode escapes cannot be used when standard_conforming_strings is off. +SELECT U&'d!0061t\+000061' UESCAPE '!' AS U&"d*0061t\+000061" UESCAPE '*'; +ERROR: unsafe use of string constant with Unicode escapes +LINE 1: SELECT U&'d!0061t\+000061' UESCAPE '!' AS U&"d*0061t\+000061... + ^ +DETAIL: String constants with Unicode escapes cannot be used when standard_conforming_strings is off. +SELECT U&' \' UESCAPE '!' AS "tricky"; +ERROR: unsafe use of string constant with Unicode escapes +LINE 1: SELECT U&' \' UESCAPE '!' AS "tricky"; + ^ +DETAIL: String constants with Unicode escapes cannot be used when standard_conforming_strings is off. +SELECT 'tricky' AS U&"\" UESCAPE '!'; + \ +-------- + tricky +(1 row) + +SELECT U&'wrong: \061'; +ERROR: unsafe use of string constant with Unicode escapes +LINE 1: SELECT U&'wrong: \061'; + ^ +DETAIL: String constants with Unicode escapes cannot be used when standard_conforming_strings is off. +SELECT U&'wrong: \+0061'; +ERROR: unsafe use of string constant with Unicode escapes +LINE 1: SELECT U&'wrong: \+0061'; + ^ +DETAIL: String constants with Unicode escapes cannot be used when standard_conforming_strings is off. +SELECT U&'wrong: +0061' UESCAPE '+'; +ERROR: unsafe use of string constant with Unicode escapes +LINE 1: SELECT U&'wrong: +0061' UESCAPE '+'; + ^ +DETAIL: String constants with Unicode escapes cannot be used when standard_conforming_strings is off. +RESET standard_conforming_strings; +-- bytea +SET bytea_output TO hex; +SELECT E'\\xDeAdBeEf'::bytea; + bytea +------------ + \xdeadbeef +(1 row) + +SELECT E'\\x De Ad Be Ef '::bytea; + bytea +------------ + \xdeadbeef +(1 row) + +SELECT E'\\xDeAdBeE'::bytea; +ERROR: invalid hexadecimal data: odd number of digits +LINE 1: SELECT E'\\xDeAdBeE'::bytea; + ^ +SELECT E'\\xDeAdBeEx'::bytea; +ERROR: invalid hexadecimal digit: "x" +LINE 1: SELECT E'\\xDeAdBeEx'::bytea; + ^ +SELECT E'\\xDe00BeEf'::bytea; + bytea +------------ + \xde00beef +(1 row) + +SELECT E'DeAdBeEf'::bytea; + bytea +-------------------- + \x4465416442654566 +(1 row) + +SELECT E'De\\000dBeEf'::bytea; + bytea +-------------------- + \x4465006442654566 +(1 row) + +SELECT E'De\123dBeEf'::bytea; + bytea +-------------------- + \x4465536442654566 +(1 row) + +SELECT E'De\\123dBeEf'::bytea; + bytea +-------------------- + \x4465536442654566 +(1 row) + +SELECT E'De\\678dBeEf'::bytea; +ERROR: invalid input syntax for type bytea +LINE 1: SELECT E'De\\678dBeEf'::bytea; + ^ +SET bytea_output TO escape; +SELECT E'\\xDeAdBeEf'::bytea; + bytea +------------------ + \336\255\276\357 +(1 row) + +SELECT E'\\x De Ad Be Ef '::bytea; + bytea +------------------ + \336\255\276\357 +(1 row) + +SELECT E'\\xDe00BeEf'::bytea; + bytea +------------------ + \336\000\276\357 +(1 row) + +SELECT E'DeAdBeEf'::bytea; + bytea +---------- + DeAdBeEf +(1 row) + +SELECT E'De\\000dBeEf'::bytea; + bytea +------------- + De\000dBeEf +(1 row) + +SELECT E'De\\123dBeEf'::bytea; + bytea +---------- + DeSdBeEf +(1 row) + +-- Test non-error-throwing API too +SELECT pg_input_is_valid(E'\\xDeAdBeE', 'bytea'); + pg_input_is_valid +------------------- + f +(1 row) + +SELECT * FROM pg_input_error_info(E'\\xDeAdBeE', 'bytea'); + message | detail | hint | sql_error_code +------------------------------------------------+--------+------+---------------- + invalid hexadecimal data: odd number of digits | | | 22023 +(1 row) + +SELECT * FROM pg_input_error_info(E'\\xDeAdBeEx', 'bytea'); + message | detail | hint | sql_error_code +--------------------------------+--------+------+---------------- + invalid hexadecimal digit: "x" | | | 22023 +(1 row) + +SELECT * FROM pg_input_error_info(E'foo\\99bar', 'bytea'); + message | detail | hint | sql_error_code +-------------------------------------+--------+------+---------------- + invalid input syntax for type bytea | | | 22P02 +(1 row) + +-- +-- test conversions between various string types +-- E021-10 implicit casting among the character data types +-- +SELECT CAST(f1 AS text) AS "text(char)" FROM CHAR_TBL; + text(char) +------------ + a + ab + abcd + abcd +(4 rows) + +SELECT CAST(f1 AS text) AS "text(varchar)" FROM VARCHAR_TBL; + text(varchar) +--------------- + a + ab + abcd + abcd +(4 rows) + +SELECT CAST(name 'namefield' AS text) AS "text(name)"; + text(name) +------------ + namefield +(1 row) + +-- since this is an explicit cast, it should truncate w/o error: +SELECT CAST(f1 AS char(10)) AS "char(text)" FROM TEXT_TBL; + char(text) +------------ + doh! + hi de ho n +(2 rows) + +-- note: implicit-cast case is tested in char.sql +SELECT CAST(f1 AS char(20)) AS "char(text)" FROM TEXT_TBL; + char(text) +---------------------- + doh! + hi de ho neighbor +(2 rows) + +SELECT CAST(f1 AS char(10)) AS "char(varchar)" FROM VARCHAR_TBL; + char(varchar) +--------------- + a + ab + abcd + abcd +(4 rows) + +SELECT CAST(name 'namefield' AS char(10)) AS "char(name)"; + char(name) +------------ + namefield +(1 row) + +SELECT CAST(f1 AS varchar) AS "varchar(text)" FROM TEXT_TBL; + varchar(text) +------------------- + doh! + hi de ho neighbor +(2 rows) + +SELECT CAST(f1 AS varchar) AS "varchar(char)" FROM CHAR_TBL; + varchar(char) +--------------- + a + ab + abcd + abcd +(4 rows) + +SELECT CAST(name 'namefield' AS varchar) AS "varchar(name)"; + varchar(name) +--------------- + namefield +(1 row) + +-- +-- test SQL string functions +-- E### and T### are feature reference numbers from SQL99 +-- +-- E021-09 trim function +SELECT TRIM(BOTH FROM ' bunch o blanks ') = 'bunch o blanks' AS "bunch o blanks"; + bunch o blanks +---------------- + t +(1 row) + +SELECT TRIM(LEADING FROM ' bunch o blanks ') = 'bunch o blanks ' AS "bunch o blanks "; + bunch o blanks +------------------ + t +(1 row) + +SELECT TRIM(TRAILING FROM ' bunch o blanks ') = ' bunch o blanks' AS " bunch o blanks"; + bunch o blanks +------------------ + t +(1 row) + +SELECT TRIM(BOTH 'x' FROM 'xxxxxsome Xsxxxxx') = 'some Xs' AS "some Xs"; + some Xs +--------- + t +(1 row) + +-- E021-06 substring expression +SELECT SUBSTRING('1234567890' FROM 3) = '34567890' AS "34567890"; + 34567890 +---------- + t +(1 row) + +SELECT SUBSTRING('1234567890' FROM 4 FOR 3) = '456' AS "456"; + 456 +----- + t +(1 row) + +-- test overflow cases +SELECT SUBSTRING('string' FROM 2 FOR 2147483646) AS "tring"; + tring +------- + tring +(1 row) + +SELECT SUBSTRING('string' FROM -10 FOR 2147483646) AS "string"; + string +-------- + string +(1 row) + +SELECT SUBSTRING('string' FROM -10 FOR -2147483646) AS "error"; +ERROR: negative substring length not allowed +-- T581 regular expression substring (with SQL's bizarre regexp syntax) +SELECT SUBSTRING('abcdefg' SIMILAR 'a#"(b_d)#"%' ESCAPE '#') AS "bcd"; + bcd +----- + bcd +(1 row) + +-- obsolete SQL99 syntax +SELECT SUBSTRING('abcdefg' FROM 'a#"(b_d)#"%' FOR '#') AS "bcd"; + bcd +----- + bcd +(1 row) + +-- No match should return NULL +SELECT SUBSTRING('abcdefg' SIMILAR '#"(b_d)#"%' ESCAPE '#') IS NULL AS "True"; + True +------ + t +(1 row) + +-- Null inputs should return NULL +SELECT SUBSTRING('abcdefg' SIMILAR '%' ESCAPE NULL) IS NULL AS "True"; + True +------ + t +(1 row) + +SELECT SUBSTRING(NULL SIMILAR '%' ESCAPE '#') IS NULL AS "True"; + True +------ + t +(1 row) + +SELECT SUBSTRING('abcdefg' SIMILAR NULL ESCAPE '#') IS NULL AS "True"; + True +------ + t +(1 row) + +-- The first and last parts should act non-greedy +SELECT SUBSTRING('abcdefg' SIMILAR 'a#"%#"g' ESCAPE '#') AS "bcdef"; + bcdef +------- + bcdef +(1 row) + +SELECT SUBSTRING('abcdefg' SIMILAR 'a*#"%#"g*' ESCAPE '#') AS "abcdefg"; + abcdefg +--------- + abcdefg +(1 row) + +-- Vertical bar in any part affects only that part +SELECT SUBSTRING('abcdefg' SIMILAR 'a|b#"%#"g' ESCAPE '#') AS "bcdef"; + bcdef +------- + bcdef +(1 row) + +SELECT SUBSTRING('abcdefg' SIMILAR 'a#"%#"x|g' ESCAPE '#') AS "bcdef"; + bcdef +------- + bcdef +(1 row) + +SELECT SUBSTRING('abcdefg' SIMILAR 'a#"%|ab#"g' ESCAPE '#') AS "bcdef"; + bcdef +------- + bcdef +(1 row) + +-- Can't have more than two part separators +SELECT SUBSTRING('abcdefg' SIMILAR 'a*#"%#"g*#"x' ESCAPE '#') AS "error"; +ERROR: SQL regular expression may not contain more than two escape-double-quote separators +CONTEXT: SQL function "substring" statement 1 +-- Postgres extension: with 0 or 1 separator, assume parts 1 and 3 are empty +SELECT SUBSTRING('abcdefg' SIMILAR 'a#"%g' ESCAPE '#') AS "bcdefg"; + bcdefg +-------- + bcdefg +(1 row) + +SELECT SUBSTRING('abcdefg' SIMILAR 'a%g' ESCAPE '#') AS "abcdefg"; + abcdefg +--------- + abcdefg +(1 row) + +-- substring() with just two arguments is not allowed by SQL spec; +-- we accept it, but we interpret the pattern as a POSIX regexp not SQL +SELECT SUBSTRING('abcdefg' FROM 'c.e') AS "cde"; + cde +----- + cde +(1 row) + +-- With a parenthesized subexpression, return only what matches the subexpr +SELECT SUBSTRING('abcdefg' FROM 'b(.*)f') AS "cde"; + cde +----- + cde +(1 row) + +-- Check case where we have a match, but not a subexpression match +SELECT SUBSTRING('foo' FROM 'foo(bar)?') IS NULL AS t; + t +--- + t +(1 row) + +-- Check behavior of SIMILAR TO, which uses largely the same regexp variant +SELECT 'abcdefg' SIMILAR TO '_bcd%' AS true; + true +------ + t +(1 row) + +SELECT 'abcdefg' SIMILAR TO 'bcd%' AS false; + false +------- + f +(1 row) + +SELECT 'abcdefg' SIMILAR TO '_bcd#%' ESCAPE '#' AS false; + false +------- + f +(1 row) + +SELECT 'abcd%' SIMILAR TO '_bcd#%' ESCAPE '#' AS true; + true +------ + t +(1 row) + +-- Postgres uses '\' as the default escape character, which is not per spec +SELECT 'abcdefg' SIMILAR TO '_bcd\%' AS false; + false +------- + f +(1 row) + +-- and an empty string to mean "no escape", which is also not per spec +SELECT 'abcd\efg' SIMILAR TO '_bcd\%' ESCAPE '' AS true; + true +------ + t +(1 row) + +-- these behaviors are per spec, though: +SELECT 'abcdefg' SIMILAR TO '_bcd%' ESCAPE NULL AS null; + null +------ + +(1 row) + +SELECT 'abcdefg' SIMILAR TO '_bcd#%' ESCAPE '##' AS error; +ERROR: invalid escape string +HINT: Escape string must be empty or one character. +-- Test backslash escapes in regexp_replace's replacement string +SELECT regexp_replace('1112223333', E'(\\d{3})(\\d{3})(\\d{4})', E'(\\1) \\2-\\3'); + regexp_replace +---------------- + (111) 222-3333 +(1 row) + +SELECT regexp_replace('foobarrbazz', E'(.)\\1', E'X\\&Y', 'g'); + regexp_replace +------------------- + fXooYbaXrrYbaXzzY +(1 row) + +SELECT regexp_replace('foobarrbazz', E'(.)\\1', E'X\\\\Y', 'g'); + regexp_replace +---------------- + fX\YbaX\YbaX\Y +(1 row) + +-- not an error, though perhaps it should be: +SELECT regexp_replace('foobarrbazz', E'(.)\\1', E'X\\Y\\1Z\\'); + regexp_replace +----------------- + fX\YoZ\barrbazz +(1 row) + +SELECT regexp_replace('AAA BBB CCC ', E'\\s+', ' ', 'g'); + regexp_replace +---------------- + AAA BBB CCC +(1 row) + +SELECT regexp_replace('AAA', '^|$', 'Z', 'g'); + regexp_replace +---------------- + ZAAAZ +(1 row) + +SELECT regexp_replace('AAA aaa', 'A+', 'Z', 'gi'); + regexp_replace +---------------- + Z Z +(1 row) + +-- invalid regexp option +SELECT regexp_replace('AAA aaa', 'A+', 'Z', 'z'); +ERROR: invalid regular expression option: "z" +-- extended regexp_replace tests +SELECT regexp_replace('A PostgreSQL function', 'A|e|i|o|u', 'X', 1); + regexp_replace +----------------------- + X PostgreSQL function +(1 row) + +SELECT regexp_replace('A PostgreSQL function', 'A|e|i|o|u', 'X', 1, 2); + regexp_replace +----------------------- + A PXstgreSQL function +(1 row) + +SELECT regexp_replace('A PostgreSQL function', 'a|e|i|o|u', 'X', 1, 0, 'i'); + regexp_replace +----------------------- + X PXstgrXSQL fXnctXXn +(1 row) + +SELECT regexp_replace('A PostgreSQL function', 'a|e|i|o|u', 'X', 1, 1, 'i'); + regexp_replace +----------------------- + X PostgreSQL function +(1 row) + +SELECT regexp_replace('A PostgreSQL function', 'a|e|i|o|u', 'X', 1, 2, 'i'); + regexp_replace +----------------------- + A PXstgreSQL function +(1 row) + +SELECT regexp_replace('A PostgreSQL function', 'a|e|i|o|u', 'X', 1, 3, 'i'); + regexp_replace +----------------------- + A PostgrXSQL function +(1 row) + +SELECT regexp_replace('A PostgreSQL function', 'a|e|i|o|u', 'X', 1, 9, 'i'); + regexp_replace +----------------------- + A PostgreSQL function +(1 row) + +SELECT regexp_replace('A PostgreSQL function', 'A|e|i|o|u', 'X', 7, 0, 'i'); + regexp_replace +----------------------- + A PostgrXSQL fXnctXXn +(1 row) + +-- 'g' flag should be ignored when N is specified +SELECT regexp_replace('A PostgreSQL function', 'a|e|i|o|u', 'X', 1, 1, 'g'); + regexp_replace +----------------------- + A PXstgreSQL function +(1 row) + +-- errors +SELECT regexp_replace('A PostgreSQL function', 'a|e|i|o|u', 'X', -1, 0, 'i'); +ERROR: invalid value for parameter "start": -1 +SELECT regexp_replace('A PostgreSQL function', 'a|e|i|o|u', 'X', 1, -1, 'i'); +ERROR: invalid value for parameter "n": -1 +-- erroneous invocation of non-extended form +SELECT regexp_replace('A PostgreSQL function', 'a|e|i|o|u', 'X', '1'); +ERROR: invalid regular expression option: "1" +HINT: If you meant to use regexp_replace() with a start parameter, cast the fourth argument to integer explicitly. +-- regexp_count tests +SELECT regexp_count('123123123123123', '(12)3'); + regexp_count +-------------- + 5 +(1 row) + +SELECT regexp_count('123123123123', '123', 1); + regexp_count +-------------- + 4 +(1 row) + +SELECT regexp_count('123123123123', '123', 3); + regexp_count +-------------- + 3 +(1 row) + +SELECT regexp_count('123123123123', '123', 33); + regexp_count +-------------- + 0 +(1 row) + +SELECT regexp_count('ABCABCABCABC', 'Abc', 1, ''); + regexp_count +-------------- + 0 +(1 row) + +SELECT regexp_count('ABCABCABCABC', 'Abc', 1, 'i'); + regexp_count +-------------- + 4 +(1 row) + +-- errors +SELECT regexp_count('123123123123', '123', 0); +ERROR: invalid value for parameter "start": 0 +SELECT regexp_count('123123123123', '123', -3); +ERROR: invalid value for parameter "start": -3 +-- regexp_like tests +SELECT regexp_like('Steven', '^Ste(v|ph)en$'); + regexp_like +------------- + t +(1 row) + +SELECT regexp_like('a'||CHR(10)||'d', 'a.d', 'n'); + regexp_like +------------- + f +(1 row) + +SELECT regexp_like('a'||CHR(10)||'d', 'a.d', 's'); + regexp_like +------------- + t +(1 row) + +SELECT regexp_like('abc', ' a . c ', 'x'); + regexp_like +------------- + t +(1 row) + +SELECT regexp_like('abc', 'a.c', 'g'); -- error +ERROR: regexp_like() does not support the "global" option +-- regexp_instr tests +SELECT regexp_instr('abcdefghi', 'd.f'); + regexp_instr +-------------- + 4 +(1 row) + +SELECT regexp_instr('abcdefghi', 'd.q'); + regexp_instr +-------------- + 0 +(1 row) + +SELECT regexp_instr('abcabcabc', 'a.c'); + regexp_instr +-------------- + 1 +(1 row) + +SELECT regexp_instr('abcabcabc', 'a.c', 2); + regexp_instr +-------------- + 4 +(1 row) + +SELECT regexp_instr('abcabcabc', 'a.c', 1, 3); + regexp_instr +-------------- + 7 +(1 row) + +SELECT regexp_instr('abcabcabc', 'a.c', 1, 4); + regexp_instr +-------------- + 0 +(1 row) + +SELECT regexp_instr('abcabcabc', 'A.C', 1, 2, 0, 'i'); + regexp_instr +-------------- + 4 +(1 row) + +SELECT regexp_instr('1234567890', '(123)(4(56)(78))', 1, 1, 0, 'i', 0); + regexp_instr +-------------- + 1 +(1 row) + +SELECT regexp_instr('1234567890', '(123)(4(56)(78))', 1, 1, 0, 'i', 1); + regexp_instr +-------------- + 1 +(1 row) + +SELECT regexp_instr('1234567890', '(123)(4(56)(78))', 1, 1, 0, 'i', 2); + regexp_instr +-------------- + 4 +(1 row) + +SELECT regexp_instr('1234567890', '(123)(4(56)(78))', 1, 1, 0, 'i', 3); + regexp_instr +-------------- + 5 +(1 row) + +SELECT regexp_instr('1234567890', '(123)(4(56)(78))', 1, 1, 0, 'i', 4); + regexp_instr +-------------- + 7 +(1 row) + +SELECT regexp_instr('1234567890', '(123)(4(56)(78))', 1, 1, 0, 'i', 5); + regexp_instr +-------------- + 0 +(1 row) + +SELECT regexp_instr('1234567890', '(123)(4(56)(78))', 1, 1, 1, 'i', 0); + regexp_instr +-------------- + 9 +(1 row) + +SELECT regexp_instr('1234567890', '(123)(4(56)(78))', 1, 1, 1, 'i', 1); + regexp_instr +-------------- + 4 +(1 row) + +SELECT regexp_instr('1234567890', '(123)(4(56)(78))', 1, 1, 1, 'i', 2); + regexp_instr +-------------- + 9 +(1 row) + +SELECT regexp_instr('1234567890', '(123)(4(56)(78))', 1, 1, 1, 'i', 3); + regexp_instr +-------------- + 7 +(1 row) + +SELECT regexp_instr('1234567890', '(123)(4(56)(78))', 1, 1, 1, 'i', 4); + regexp_instr +-------------- + 9 +(1 row) + +SELECT regexp_instr('1234567890', '(123)(4(56)(78))', 1, 1, 1, 'i', 5); + regexp_instr +-------------- + 0 +(1 row) + +-- Check case where we have a match, but not a subexpression match +SELECT regexp_instr('foo', 'foo(bar)?', 1, 1, 0, '', 1); + regexp_instr +-------------- + 0 +(1 row) + +-- errors +SELECT regexp_instr('abcabcabc', 'a.c', 0, 1); +ERROR: invalid value for parameter "start": 0 +SELECT regexp_instr('abcabcabc', 'a.c', 1, 0); +ERROR: invalid value for parameter "n": 0 +SELECT regexp_instr('abcabcabc', 'a.c', 1, 1, -1); +ERROR: invalid value for parameter "endoption": -1 +SELECT regexp_instr('abcabcabc', 'a.c', 1, 1, 2); +ERROR: invalid value for parameter "endoption": 2 +SELECT regexp_instr('abcabcabc', 'a.c', 1, 1, 0, 'g'); +ERROR: regexp_instr() does not support the "global" option +SELECT regexp_instr('abcabcabc', 'a.c', 1, 1, 0, '', -1); +ERROR: invalid value for parameter "subexpr": -1 +-- regexp_substr tests +SELECT regexp_substr('abcdefghi', 'd.f'); + regexp_substr +--------------- + def +(1 row) + +SELECT regexp_substr('abcdefghi', 'd.q') IS NULL AS t; + t +--- + t +(1 row) + +SELECT regexp_substr('abcabcabc', 'a.c'); + regexp_substr +--------------- + abc +(1 row) + +SELECT regexp_substr('abcabcabc', 'a.c', 2); + regexp_substr +--------------- + abc +(1 row) + +SELECT regexp_substr('abcabcabc', 'a.c', 1, 3); + regexp_substr +--------------- + abc +(1 row) + +SELECT regexp_substr('abcabcabc', 'a.c', 1, 4) IS NULL AS t; + t +--- + t +(1 row) + +SELECT regexp_substr('abcabcabc', 'A.C', 1, 2, 'i'); + regexp_substr +--------------- + abc +(1 row) + +SELECT regexp_substr('1234567890', '(123)(4(56)(78))', 1, 1, 'i', 0); + regexp_substr +--------------- + 12345678 +(1 row) + +SELECT regexp_substr('1234567890', '(123)(4(56)(78))', 1, 1, 'i', 1); + regexp_substr +--------------- + 123 +(1 row) + +SELECT regexp_substr('1234567890', '(123)(4(56)(78))', 1, 1, 'i', 2); + regexp_substr +--------------- + 45678 +(1 row) + +SELECT regexp_substr('1234567890', '(123)(4(56)(78))', 1, 1, 'i', 3); + regexp_substr +--------------- + 56 +(1 row) + +SELECT regexp_substr('1234567890', '(123)(4(56)(78))', 1, 1, 'i', 4); + regexp_substr +--------------- + 78 +(1 row) + +SELECT regexp_substr('1234567890', '(123)(4(56)(78))', 1, 1, 'i', 5) IS NULL AS t; + t +--- + t +(1 row) + +-- Check case where we have a match, but not a subexpression match +SELECT regexp_substr('foo', 'foo(bar)?', 1, 1, '', 1) IS NULL AS t; + t +--- + t +(1 row) + +-- errors +SELECT regexp_substr('abcabcabc', 'a.c', 0, 1); +ERROR: invalid value for parameter "start": 0 +SELECT regexp_substr('abcabcabc', 'a.c', 1, 0); +ERROR: invalid value for parameter "n": 0 +SELECT regexp_substr('abcabcabc', 'a.c', 1, 1, 'g'); +ERROR: regexp_substr() does not support the "global" option +SELECT regexp_substr('abcabcabc', 'a.c', 1, 1, '', -1); +ERROR: invalid value for parameter "subexpr": -1 +-- set so we can tell NULL from empty string +\pset null '\\N' +-- return all matches from regexp +SELECT regexp_matches('foobarbequebaz', $re$(bar)(beque)$re$); + regexp_matches +---------------- + {bar,beque} +(1 row) + +-- test case insensitive +SELECT regexp_matches('foObARbEqUEbAz', $re$(bar)(beque)$re$, 'i'); + regexp_matches +---------------- + {bAR,bEqUE} +(1 row) + +-- global option - more than one match +SELECT regexp_matches('foobarbequebazilbarfbonk', $re$(b[^b]+)(b[^b]+)$re$, 'g'); + regexp_matches +---------------- + {bar,beque} + {bazil,barf} +(2 rows) + +-- empty capture group (matched empty string) +SELECT regexp_matches('foobarbequebaz', $re$(bar)(.*)(beque)$re$); + regexp_matches +---------------- + {bar,"",beque} +(1 row) + +-- no match +SELECT regexp_matches('foobarbequebaz', $re$(bar)(.+)(beque)$re$); + regexp_matches +---------------- +(0 rows) + +-- optional capture group did not match, null entry in array +SELECT regexp_matches('foobarbequebaz', $re$(bar)(.+)?(beque)$re$); + regexp_matches +------------------ + {bar,NULL,beque} +(1 row) + +-- no capture groups +SELECT regexp_matches('foobarbequebaz', $re$barbeque$re$); + regexp_matches +---------------- + {barbeque} +(1 row) + +-- start/end-of-line matches are of zero length +SELECT regexp_matches('foo' || chr(10) || 'bar' || chr(10) || 'bequq' || chr(10) || 'baz', '^', 'mg'); + regexp_matches +---------------- + {""} + {""} + {""} + {""} +(4 rows) + +SELECT regexp_matches('foo' || chr(10) || 'bar' || chr(10) || 'bequq' || chr(10) || 'baz', '$', 'mg'); + regexp_matches +---------------- + {""} + {""} + {""} + {""} +(4 rows) + +SELECT regexp_matches('1' || chr(10) || '2' || chr(10) || '3' || chr(10) || '4' || chr(10), '^.?', 'mg'); + regexp_matches +---------------- + {1} + {2} + {3} + {4} + {""} +(5 rows) + +SELECT regexp_matches(chr(10) || '1' || chr(10) || '2' || chr(10) || '3' || chr(10) || '4' || chr(10), '.?$', 'mg'); + regexp_matches +---------------- + {""} + {1} + {""} + {2} + {""} + {3} + {""} + {4} + {""} + {""} +(10 rows) + +SELECT regexp_matches(chr(10) || '1' || chr(10) || '2' || chr(10) || '3' || chr(10) || '4', '.?$', 'mg'); + regexp_matches +---------------- + {""} + {1} + {""} + {2} + {""} + {3} + {""} + {4} + {""} +(9 rows) + +-- give me errors +SELECT regexp_matches('foobarbequebaz', $re$(bar)(beque)$re$, 'gz'); +ERROR: invalid regular expression option: "z" +SELECT regexp_matches('foobarbequebaz', $re$(barbeque$re$); +ERROR: invalid regular expression: parentheses () not balanced +SELECT regexp_matches('foobarbequebaz', $re$(bar)(beque){2,1}$re$); +ERROR: invalid regular expression: invalid repetition count(s) +-- split string on regexp +SELECT foo, length(foo) FROM regexp_split_to_table('the quick brown fox jumps over the lazy dog', $re$\s+$re$) AS foo; + foo | length +-------+-------- + the | 3 + quick | 5 + brown | 5 + fox | 3 + jumps | 5 + over | 4 + the | 3 + lazy | 4 + dog | 3 +(9 rows) + +SELECT regexp_split_to_array('the quick brown fox jumps over the lazy dog', $re$\s+$re$); + regexp_split_to_array +----------------------------------------------- + {the,quick,brown,fox,jumps,over,the,lazy,dog} +(1 row) + +SELECT foo, length(foo) FROM regexp_split_to_table('the quick brown fox jumps over the lazy dog', $re$\s*$re$) AS foo; + foo | length +-----+-------- + t | 1 + h | 1 + e | 1 + q | 1 + u | 1 + i | 1 + c | 1 + k | 1 + b | 1 + r | 1 + o | 1 + w | 1 + n | 1 + f | 1 + o | 1 + x | 1 + j | 1 + u | 1 + m | 1 + p | 1 + s | 1 + o | 1 + v | 1 + e | 1 + r | 1 + t | 1 + h | 1 + e | 1 + l | 1 + a | 1 + z | 1 + y | 1 + d | 1 + o | 1 + g | 1 +(35 rows) + +SELECT regexp_split_to_array('the quick brown fox jumps over the lazy dog', $re$\s*$re$); + regexp_split_to_array +------------------------------------------------------------------------- + {t,h,e,q,u,i,c,k,b,r,o,w,n,f,o,x,j,u,m,p,s,o,v,e,r,t,h,e,l,a,z,y,d,o,g} +(1 row) + +SELECT foo, length(foo) FROM regexp_split_to_table('the quick brown fox jumps over the lazy dog', '') AS foo; + foo | length +-----+-------- + t | 1 + h | 1 + e | 1 + | 1 + q | 1 + u | 1 + i | 1 + c | 1 + k | 1 + | 1 + b | 1 + r | 1 + o | 1 + w | 1 + n | 1 + | 1 + f | 1 + o | 1 + x | 1 + | 1 + j | 1 + u | 1 + m | 1 + p | 1 + s | 1 + | 1 + o | 1 + v | 1 + e | 1 + r | 1 + | 1 + t | 1 + h | 1 + e | 1 + | 1 + l | 1 + a | 1 + z | 1 + y | 1 + | 1 + d | 1 + o | 1 + g | 1 +(43 rows) + +SELECT regexp_split_to_array('the quick brown fox jumps over the lazy dog', ''); + regexp_split_to_array +--------------------------------------------------------------------------------------------------------- + {t,h,e," ",q,u,i,c,k," ",b,r,o,w,n," ",f,o,x," ",j,u,m,p,s," ",o,v,e,r," ",t,h,e," ",l,a,z,y," ",d,o,g} +(1 row) + +-- case insensitive +SELECT foo, length(foo) FROM regexp_split_to_table('thE QUick bROWn FOx jUMPs ovEr The lazy dOG', 'e', 'i') AS foo; + foo | length +---------------------------+-------- + th | 2 + QUick bROWn FOx jUMPs ov | 25 + r Th | 4 + lazy dOG | 9 +(4 rows) + +SELECT regexp_split_to_array('thE QUick bROWn FOx jUMPs ovEr The lazy dOG', 'e', 'i'); + regexp_split_to_array +----------------------------------------------------- + {th," QUick bROWn FOx jUMPs ov","r Th"," lazy dOG"} +(1 row) + +-- no match of pattern +SELECT foo, length(foo) FROM regexp_split_to_table('the quick brown fox jumps over the lazy dog', 'nomatch') AS foo; + foo | length +---------------------------------------------+-------- + the quick brown fox jumps over the lazy dog | 43 +(1 row) + +SELECT regexp_split_to_array('the quick brown fox jumps over the lazy dog', 'nomatch'); + regexp_split_to_array +------------------------------------------------- + {"the quick brown fox jumps over the lazy dog"} +(1 row) + +-- some corner cases +SELECT regexp_split_to_array('123456','1'); + regexp_split_to_array +----------------------- + {"",23456} +(1 row) + +SELECT regexp_split_to_array('123456','6'); + regexp_split_to_array +----------------------- + {12345,""} +(1 row) + +SELECT regexp_split_to_array('123456','.'); + regexp_split_to_array +------------------------ + {"","","","","","",""} +(1 row) + +SELECT regexp_split_to_array('123456',''); + regexp_split_to_array +----------------------- + {1,2,3,4,5,6} +(1 row) + +SELECT regexp_split_to_array('123456','(?:)'); + regexp_split_to_array +----------------------- + {1,2,3,4,5,6} +(1 row) + +SELECT regexp_split_to_array('1',''); + regexp_split_to_array +----------------------- + {1} +(1 row) + +-- errors +SELECT foo, length(foo) FROM regexp_split_to_table('thE QUick bROWn FOx jUMPs ovEr The lazy dOG', 'e', 'zippy') AS foo; +ERROR: invalid regular expression option: "z" +SELECT regexp_split_to_array('thE QUick bROWn FOx jUMPs ovEr The lazy dOG', 'e', 'iz'); +ERROR: invalid regular expression option: "z" +-- global option meaningless for regexp_split +SELECT foo, length(foo) FROM regexp_split_to_table('thE QUick bROWn FOx jUMPs ovEr The lazy dOG', 'e', 'g') AS foo; +ERROR: regexp_split_to_table() does not support the "global" option +SELECT regexp_split_to_array('thE QUick bROWn FOx jUMPs ovEr The lazy dOG', 'e', 'g'); +ERROR: regexp_split_to_array() does not support the "global" option +-- change NULL-display back +\pset null '' +-- E021-11 position expression +SELECT POSITION('4' IN '1234567890') = '4' AS "4"; + 4 +--- + t +(1 row) + +SELECT POSITION('5' IN '1234567890') = '5' AS "5"; + 5 +--- + t +(1 row) + +-- T312 character overlay function +SELECT OVERLAY('abcdef' PLACING '45' FROM 4) AS "abc45f"; + abc45f +-------- + abc45f +(1 row) + +SELECT OVERLAY('yabadoo' PLACING 'daba' FROM 5) AS "yabadaba"; + yabadaba +---------- + yabadaba +(1 row) + +SELECT OVERLAY('yabadoo' PLACING 'daba' FROM 5 FOR 0) AS "yabadabadoo"; + yabadabadoo +------------- + yabadabadoo +(1 row) + +SELECT OVERLAY('babosa' PLACING 'ubb' FROM 2 FOR 4) AS "bubba"; + bubba +------- + bubba +(1 row) + +-- +-- test LIKE +-- Be sure to form every test as a LIKE/NOT LIKE pair. +-- +-- simplest examples +-- E061-04 like predicate +SELECT 'hawkeye' LIKE 'h%' AS "true"; + true +------ + t +(1 row) + +SELECT 'hawkeye' NOT LIKE 'h%' AS "false"; + false +------- + f +(1 row) + +SELECT 'hawkeye' LIKE 'H%' AS "false"; + false +------- + f +(1 row) + +SELECT 'hawkeye' NOT LIKE 'H%' AS "true"; + true +------ + t +(1 row) + +SELECT 'hawkeye' LIKE 'indio%' AS "false"; + false +------- + f +(1 row) + +SELECT 'hawkeye' NOT LIKE 'indio%' AS "true"; + true +------ + t +(1 row) + +SELECT 'hawkeye' LIKE 'h%eye' AS "true"; + true +------ + t +(1 row) + +SELECT 'hawkeye' NOT LIKE 'h%eye' AS "false"; + false +------- + f +(1 row) + +SELECT 'indio' LIKE '_ndio' AS "true"; + true +------ + t +(1 row) + +SELECT 'indio' NOT LIKE '_ndio' AS "false"; + false +------- + f +(1 row) + +SELECT 'indio' LIKE 'in__o' AS "true"; + true +------ + t +(1 row) + +SELECT 'indio' NOT LIKE 'in__o' AS "false"; + false +------- + f +(1 row) + +SELECT 'indio' LIKE 'in_o' AS "false"; + false +------- + f +(1 row) + +SELECT 'indio' NOT LIKE 'in_o' AS "true"; + true +------ + t +(1 row) + +SELECT 'abc'::name LIKE '_b_' AS "true"; + true +------ + t +(1 row) + +SELECT 'abc'::name NOT LIKE '_b_' AS "false"; + false +------- + f +(1 row) + +SELECT 'abc'::bytea LIKE '_b_'::bytea AS "true"; + true +------ + t +(1 row) + +SELECT 'abc'::bytea NOT LIKE '_b_'::bytea AS "false"; + false +------- + f +(1 row) + +-- unused escape character +SELECT 'hawkeye' LIKE 'h%' ESCAPE '#' AS "true"; + true +------ + t +(1 row) + +SELECT 'hawkeye' NOT LIKE 'h%' ESCAPE '#' AS "false"; + false +------- + f +(1 row) + +SELECT 'indio' LIKE 'ind_o' ESCAPE '$' AS "true"; + true +------ + t +(1 row) + +SELECT 'indio' NOT LIKE 'ind_o' ESCAPE '$' AS "false"; + false +------- + f +(1 row) + +-- escape character +-- E061-05 like predicate with escape clause +SELECT 'h%' LIKE 'h#%' ESCAPE '#' AS "true"; + true +------ + t +(1 row) + +SELECT 'h%' NOT LIKE 'h#%' ESCAPE '#' AS "false"; + false +------- + f +(1 row) + +SELECT 'h%wkeye' LIKE 'h#%' ESCAPE '#' AS "false"; + false +------- + f +(1 row) + +SELECT 'h%wkeye' NOT LIKE 'h#%' ESCAPE '#' AS "true"; + true +------ + t +(1 row) + +SELECT 'h%wkeye' LIKE 'h#%%' ESCAPE '#' AS "true"; + true +------ + t +(1 row) + +SELECT 'h%wkeye' NOT LIKE 'h#%%' ESCAPE '#' AS "false"; + false +------- + f +(1 row) + +SELECT 'h%awkeye' LIKE 'h#%a%k%e' ESCAPE '#' AS "true"; + true +------ + t +(1 row) + +SELECT 'h%awkeye' NOT LIKE 'h#%a%k%e' ESCAPE '#' AS "false"; + false +------- + f +(1 row) + +SELECT 'indio' LIKE '_ndio' ESCAPE '$' AS "true"; + true +------ + t +(1 row) + +SELECT 'indio' NOT LIKE '_ndio' ESCAPE '$' AS "false"; + false +------- + f +(1 row) + +SELECT 'i_dio' LIKE 'i$_d_o' ESCAPE '$' AS "true"; + true +------ + t +(1 row) + +SELECT 'i_dio' NOT LIKE 'i$_d_o' ESCAPE '$' AS "false"; + false +------- + f +(1 row) + +SELECT 'i_dio' LIKE 'i$_nd_o' ESCAPE '$' AS "false"; + false +------- + f +(1 row) + +SELECT 'i_dio' NOT LIKE 'i$_nd_o' ESCAPE '$' AS "true"; + true +------ + t +(1 row) + +SELECT 'i_dio' LIKE 'i$_d%o' ESCAPE '$' AS "true"; + true +------ + t +(1 row) + +SELECT 'i_dio' NOT LIKE 'i$_d%o' ESCAPE '$' AS "false"; + false +------- + f +(1 row) + +SELECT 'a_c'::bytea LIKE 'a$__'::bytea ESCAPE '$'::bytea AS "true"; + true +------ + t +(1 row) + +SELECT 'a_c'::bytea NOT LIKE 'a$__'::bytea ESCAPE '$'::bytea AS "false"; + false +------- + f +(1 row) + +-- escape character same as pattern character +SELECT 'maca' LIKE 'm%aca' ESCAPE '%' AS "true"; + true +------ + t +(1 row) + +SELECT 'maca' NOT LIKE 'm%aca' ESCAPE '%' AS "false"; + false +------- + f +(1 row) + +SELECT 'ma%a' LIKE 'm%a%%a' ESCAPE '%' AS "true"; + true +------ + t +(1 row) + +SELECT 'ma%a' NOT LIKE 'm%a%%a' ESCAPE '%' AS "false"; + false +------- + f +(1 row) + +SELECT 'bear' LIKE 'b_ear' ESCAPE '_' AS "true"; + true +------ + t +(1 row) + +SELECT 'bear' NOT LIKE 'b_ear' ESCAPE '_' AS "false"; + false +------- + f +(1 row) + +SELECT 'be_r' LIKE 'b_e__r' ESCAPE '_' AS "true"; + true +------ + t +(1 row) + +SELECT 'be_r' NOT LIKE 'b_e__r' ESCAPE '_' AS "false"; + false +------- + f +(1 row) + +SELECT 'be_r' LIKE '__e__r' ESCAPE '_' AS "false"; + false +------- + f +(1 row) + +SELECT 'be_r' NOT LIKE '__e__r' ESCAPE '_' AS "true"; + true +------ + t +(1 row) + +-- +-- test ILIKE (case-insensitive LIKE) +-- Be sure to form every test as an ILIKE/NOT ILIKE pair. +-- +SELECT 'hawkeye' ILIKE 'h%' AS "true"; + true +------ + t +(1 row) + +SELECT 'hawkeye' NOT ILIKE 'h%' AS "false"; + false +------- + f +(1 row) + +SELECT 'hawkeye' ILIKE 'H%' AS "true"; + true +------ + t +(1 row) + +SELECT 'hawkeye' NOT ILIKE 'H%' AS "false"; + false +------- + f +(1 row) + +SELECT 'hawkeye' ILIKE 'H%Eye' AS "true"; + true +------ + t +(1 row) + +SELECT 'hawkeye' NOT ILIKE 'H%Eye' AS "false"; + false +------- + f +(1 row) + +SELECT 'Hawkeye' ILIKE 'h%' AS "true"; + true +------ + t +(1 row) + +SELECT 'Hawkeye' NOT ILIKE 'h%' AS "false"; + false +------- + f +(1 row) + +SELECT 'ABC'::name ILIKE '_b_' AS "true"; + true +------ + t +(1 row) + +SELECT 'ABC'::name NOT ILIKE '_b_' AS "false"; + false +------- + f +(1 row) + +-- +-- test %/_ combination cases, cf bugs #4821 and #5478 +-- +SELECT 'foo' LIKE '_%' as t, 'f' LIKE '_%' as t, '' LIKE '_%' as f; + t | t | f +---+---+--- + t | t | f +(1 row) + +SELECT 'foo' LIKE '%_' as t, 'f' LIKE '%_' as t, '' LIKE '%_' as f; + t | t | f +---+---+--- + t | t | f +(1 row) + +SELECT 'foo' LIKE '__%' as t, 'foo' LIKE '___%' as t, 'foo' LIKE '____%' as f; + t | t | f +---+---+--- + t | t | f +(1 row) + +SELECT 'foo' LIKE '%__' as t, 'foo' LIKE '%___' as t, 'foo' LIKE '%____' as f; + t | t | f +---+---+--- + t | t | f +(1 row) + +SELECT 'jack' LIKE '%____%' AS t; + t +--- + t +(1 row) + +-- +-- basic tests of LIKE with indexes +-- +CREATE TABLE texttest (a text PRIMARY KEY, b int); +SELECT * FROM texttest WHERE a LIKE '%1%'; + a | b +---+--- +(0 rows) + +CREATE TABLE byteatest (a bytea PRIMARY KEY, b int); +SELECT * FROM byteatest WHERE a LIKE '%1%'; + a | b +---+--- +(0 rows) + +DROP TABLE texttest, byteatest; +-- +-- test implicit type conversion +-- +-- E021-07 character concatenation +SELECT 'unknown' || ' and unknown' AS "Concat unknown types"; + Concat unknown types +---------------------- + unknown and unknown +(1 row) + +SELECT text 'text' || ' and unknown' AS "Concat text to unknown type"; + Concat text to unknown type +----------------------------- + text and unknown +(1 row) + +SELECT char(20) 'characters' || ' and text' AS "Concat char to unknown type"; + Concat char to unknown type +----------------------------- + characters and text +(1 row) + +SELECT text 'text' || char(20) ' and characters' AS "Concat text to char"; + Concat text to char +--------------------- + text and characters +(1 row) + +SELECT text 'text' || varchar ' and varchar' AS "Concat text to varchar"; + Concat text to varchar +------------------------ + text and varchar +(1 row) + +-- +-- test substr with toasted text values +-- +CREATE TABLE toasttest(f1 text); +insert into toasttest values(repeat('1234567890',10000)); +insert into toasttest values(repeat('1234567890',10000)); +-- +-- Ensure that some values are uncompressed, to test the faster substring +-- operation used in that case +-- +alter table toasttest alter column f1 set storage external; +insert into toasttest values(repeat('1234567890',10000)); +insert into toasttest values(repeat('1234567890',10000)); +-- If the starting position is zero or less, then return from the start of the string +-- adjusting the length to be consistent with the "negative start" per SQL. +SELECT substr(f1, -1, 5) from toasttest; + substr +-------- + 123 + 123 + 123 + 123 +(4 rows) + +-- If the length is less than zero, an ERROR is thrown. +SELECT substr(f1, 5, -1) from toasttest; +ERROR: negative substring length not allowed +-- If no third argument (length) is provided, the length to the end of the +-- string is assumed. +SELECT substr(f1, 99995) from toasttest; + substr +-------- + 567890 + 567890 + 567890 + 567890 +(4 rows) + +-- If start plus length is > string length, the result is truncated to +-- string length +SELECT substr(f1, 99995, 10) from toasttest; + substr +-------- + 567890 + 567890 + 567890 + 567890 +(4 rows) + +TRUNCATE TABLE toasttest; +INSERT INTO toasttest values (repeat('1234567890',300)); +INSERT INTO toasttest values (repeat('1234567890',300)); +INSERT INTO toasttest values (repeat('1234567890',300)); +INSERT INTO toasttest values (repeat('1234567890',300)); +-- expect >0 blocks +SELECT pg_relation_size(reltoastrelid) = 0 AS is_empty + FROM pg_class where relname = 'toasttest'; + is_empty +---------- + f +(1 row) + +TRUNCATE TABLE toasttest; +ALTER TABLE toasttest set (toast_tuple_target = 4080); +INSERT INTO toasttest values (repeat('1234567890',300)); +INSERT INTO toasttest values (repeat('1234567890',300)); +INSERT INTO toasttest values (repeat('1234567890',300)); +INSERT INTO toasttest values (repeat('1234567890',300)); +-- expect 0 blocks +SELECT pg_relation_size(reltoastrelid) = 0 AS is_empty + FROM pg_class where relname = 'toasttest'; + is_empty +---------- + t +(1 row) + +DROP TABLE toasttest; +-- +-- test substr with toasted bytea values +-- +CREATE TABLE toasttest(f1 bytea); +insert into toasttest values(decode(repeat('1234567890',10000),'escape')); +insert into toasttest values(decode(repeat('1234567890',10000),'escape')); +-- +-- Ensure that some values are uncompressed, to test the faster substring +-- operation used in that case +-- +alter table toasttest alter column f1 set storage external; +insert into toasttest values(decode(repeat('1234567890',10000),'escape')); +insert into toasttest values(decode(repeat('1234567890',10000),'escape')); +-- If the starting position is zero or less, then return from the start of the string +-- adjusting the length to be consistent with the "negative start" per SQL. +SELECT substr(f1, -1, 5) from toasttest; + substr +-------- + 123 + 123 + 123 + 123 +(4 rows) + +-- If the length is less than zero, an ERROR is thrown. +SELECT substr(f1, 5, -1) from toasttest; +ERROR: negative substring length not allowed +-- If no third argument (length) is provided, the length to the end of the +-- string is assumed. +SELECT substr(f1, 99995) from toasttest; + substr +-------- + 567890 + 567890 + 567890 + 567890 +(4 rows) + +-- If start plus length is > string length, the result is truncated to +-- string length +SELECT substr(f1, 99995, 10) from toasttest; + substr +-------- + 567890 + 567890 + 567890 + 567890 +(4 rows) + +DROP TABLE toasttest; +-- test internally compressing datums +-- this tests compressing a datum to a very small size which exercises a +-- corner case in packed-varlena handling: even though small, the compressed +-- datum must be given a 4-byte header because there are no bits to indicate +-- compression in a 1-byte header +CREATE TABLE toasttest (c char(4096)); +INSERT INTO toasttest VALUES('x'); +SELECT length(c), c::text FROM toasttest; + length | c +--------+--- + 1 | x +(1 row) + +SELECT c FROM toasttest; + c +------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ + x +(1 row) + +DROP TABLE toasttest; +-- +-- test length +-- +SELECT length('abcdef') AS "length_6"; + length_6 +---------- + 6 +(1 row) + +-- +-- test strpos +-- +SELECT strpos('abcdef', 'cd') AS "pos_3"; + pos_3 +------- + 3 +(1 row) + +SELECT strpos('abcdef', 'xy') AS "pos_0"; + pos_0 +------- + 0 +(1 row) + +SELECT strpos('abcdef', '') AS "pos_1"; + pos_1 +------- + 1 +(1 row) + +SELECT strpos('', 'xy') AS "pos_0"; + pos_0 +------- + 0 +(1 row) + +SELECT strpos('', '') AS "pos_1"; + pos_1 +------- + 1 +(1 row) + +-- +-- test replace +-- +SELECT replace('abcdef', 'de', '45') AS "abc45f"; + abc45f +-------- + abc45f +(1 row) + +SELECT replace('yabadabadoo', 'ba', '123') AS "ya123da123doo"; + ya123da123doo +--------------- + ya123da123doo +(1 row) + +SELECT replace('yabadoo', 'bad', '') AS "yaoo"; + yaoo +------ + yaoo +(1 row) + +-- +-- test split_part +-- +select split_part('','@',1) AS "empty string"; + empty string +-------------- + +(1 row) + +select split_part('','@',-1) AS "empty string"; + empty string +-------------- + +(1 row) + +select split_part('joeuser@mydatabase','',1) AS "joeuser@mydatabase"; + joeuser@mydatabase +-------------------- + joeuser@mydatabase +(1 row) + +select split_part('joeuser@mydatabase','',2) AS "empty string"; + empty string +-------------- + +(1 row) + +select split_part('joeuser@mydatabase','',-1) AS "joeuser@mydatabase"; + joeuser@mydatabase +-------------------- + joeuser@mydatabase +(1 row) + +select split_part('joeuser@mydatabase','',-2) AS "empty string"; + empty string +-------------- + +(1 row) + +select split_part('joeuser@mydatabase','@',0) AS "an error"; +ERROR: field position must not be zero +select split_part('joeuser@mydatabase','@@',1) AS "joeuser@mydatabase"; + joeuser@mydatabase +-------------------- + joeuser@mydatabase +(1 row) + +select split_part('joeuser@mydatabase','@@',2) AS "empty string"; + empty string +-------------- + +(1 row) + +select split_part('joeuser@mydatabase','@',1) AS "joeuser"; + joeuser +--------- + joeuser +(1 row) + +select split_part('joeuser@mydatabase','@',2) AS "mydatabase"; + mydatabase +------------ + mydatabase +(1 row) + +select split_part('joeuser@mydatabase','@',3) AS "empty string"; + empty string +-------------- + +(1 row) + +select split_part('@joeuser@mydatabase@','@',2) AS "joeuser"; + joeuser +--------- + joeuser +(1 row) + +select split_part('joeuser@mydatabase','@',-1) AS "mydatabase"; + mydatabase +------------ + mydatabase +(1 row) + +select split_part('joeuser@mydatabase','@',-2) AS "joeuser"; + joeuser +--------- + joeuser +(1 row) + +select split_part('joeuser@mydatabase','@',-3) AS "empty string"; + empty string +-------------- + +(1 row) + +select split_part('@joeuser@mydatabase@','@',-2) AS "mydatabase"; + mydatabase +------------ + mydatabase +(1 row) + +-- +-- test to_hex +-- +select to_hex(256*256*256 - 1) AS "ffffff"; + ffffff +-------- + ffffff +(1 row) + +select to_hex(256::bigint*256::bigint*256::bigint*256::bigint - 1) AS "ffffffff"; + ffffffff +---------- + ffffffff +(1 row) + +-- +-- SHA-2 +-- +SET bytea_output TO hex; +SELECT sha224(''); + sha224 +------------------------------------------------------------ + \xd14a028c2a3a2bc9476102bb288234c415a2b01f828ea62ac5b3e42f +(1 row) + +SELECT sha224('The quick brown fox jumps over the lazy dog.'); + sha224 +------------------------------------------------------------ + \x619cba8e8e05826e9b8c519c0a5c68f4fb653e8a3d8aa04bb2c8cd4c +(1 row) + +SELECT sha256(''); + sha256 +-------------------------------------------------------------------- + \xe3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855 +(1 row) + +SELECT sha256('The quick brown fox jumps over the lazy dog.'); + sha256 +-------------------------------------------------------------------- + \xef537f25c895bfa782526529a9b63d97aa631564d5d789c2b765448c8635fb6c +(1 row) + +SELECT sha384(''); + sha384 +---------------------------------------------------------------------------------------------------- + \x38b060a751ac96384cd9327eb1b1e36a21fdb71114be07434c0cc7bf63f6e1da274edebfe76f65fbd51ad2f14898b95b +(1 row) + +SELECT sha384('The quick brown fox jumps over the lazy dog.'); + sha384 +---------------------------------------------------------------------------------------------------- + \xed892481d8272ca6df370bf706e4d7bc1b5739fa2177aae6c50e946678718fc67a7af2819a021c2fc34e91bdb63409d7 +(1 row) + +SELECT sha512(''); + sha512 +------------------------------------------------------------------------------------------------------------------------------------ + \xcf83e1357eefb8bdf1542850d66d8007d620e4050b5715dc83f4a921d36ce9ce47d0d13c5d85f2b0ff8318d2877eec2f63b931bd47417a81a538327af927da3e +(1 row) + +SELECT sha512('The quick brown fox jumps over the lazy dog.'); + sha512 +------------------------------------------------------------------------------------------------------------------------------------ + \x91ea1245f20d46ae9a037a989f54f1f790f0a47607eeb8a14d12890cea77a1bbc6c7ed9cf205e67b7f2b8fd4c7dfd3a7a8617e45f3c463d481c7e586c39ac1ed +(1 row) + +-- +-- encode/decode +-- +SELECT encode('\x1234567890abcdef00', 'hex'); + encode +-------------------- + 1234567890abcdef00 +(1 row) + +SELECT decode('1234567890abcdef00', 'hex'); + decode +---------------------- + \x1234567890abcdef00 +(1 row) + +SELECT encode(('\x' || repeat('1234567890abcdef0001', 7))::bytea, 'base64'); + encode +------------------------------------------------------------------------------ + EjRWeJCrze8AARI0VniQq83vAAESNFZ4kKvN7wABEjRWeJCrze8AARI0VniQq83vAAESNFZ4kKvN+ + 7wABEjRWeJCrze8AAQ== +(1 row) + +SELECT decode(encode(('\x' || repeat('1234567890abcdef0001', 7))::bytea, + 'base64'), 'base64'); + decode +------------------------------------------------------------------------------------------------------------------------------------------------ + \x1234567890abcdef00011234567890abcdef00011234567890abcdef00011234567890abcdef00011234567890abcdef00011234567890abcdef00011234567890abcdef0001 +(1 row) + +SELECT encode('\x1234567890abcdef00', 'escape'); + encode +----------------------------- + \x124Vx\220\253\315\357\000 +(1 row) + +SELECT decode(encode('\x1234567890abcdef00', 'escape'), 'escape'); + decode +---------------------- + \x1234567890abcdef00 +(1 row) + +-- +-- get_bit/set_bit etc +-- +SELECT get_bit('\x1234567890abcdef00'::bytea, 43); + get_bit +--------- + 1 +(1 row) + +SELECT get_bit('\x1234567890abcdef00'::bytea, 99); -- error +ERROR: index 99 out of valid range, 0..71 +SELECT set_bit('\x1234567890abcdef00'::bytea, 43, 0); + set_bit +---------------------- + \x1234567890a3cdef00 +(1 row) + +SELECT set_bit('\x1234567890abcdef00'::bytea, 99, 0); -- error +ERROR: index 99 out of valid range, 0..71 +SELECT get_byte('\x1234567890abcdef00'::bytea, 3); + get_byte +---------- + 120 +(1 row) + +SELECT get_byte('\x1234567890abcdef00'::bytea, 99); -- error +ERROR: index 99 out of valid range, 0..8 +SELECT set_byte('\x1234567890abcdef00'::bytea, 7, 11); + set_byte +---------------------- + \x1234567890abcd0b00 +(1 row) + +SELECT set_byte('\x1234567890abcdef00'::bytea, 99, 11); -- error +ERROR: index 99 out of valid range, 0..8 +-- +-- test behavior of escape_string_warning and standard_conforming_strings options +-- +set escape_string_warning = off; +set standard_conforming_strings = off; +show escape_string_warning; + escape_string_warning +----------------------- + off +(1 row) + +show standard_conforming_strings; + standard_conforming_strings +----------------------------- + off +(1 row) + +set escape_string_warning = on; +set standard_conforming_strings = on; +show escape_string_warning; + escape_string_warning +----------------------- + on +(1 row) + +show standard_conforming_strings; + standard_conforming_strings +----------------------------- + on +(1 row) + +select 'a\bcd' as f1, 'a\b''cd' as f2, 'a\b''''cd' as f3, 'abcd\' as f4, 'ab\''cd' as f5, '\\' as f6; + f1 | f2 | f3 | f4 | f5 | f6 +-------+--------+---------+-------+--------+---- + a\bcd | a\b'cd | a\b''cd | abcd\ | ab\'cd | \\ +(1 row) + +set standard_conforming_strings = off; +select 'a\\bcd' as f1, 'a\\b\'cd' as f2, 'a\\b\'''cd' as f3, 'abcd\\' as f4, 'ab\\\'cd' as f5, '\\\\' as f6; +WARNING: nonstandard use of \\ in a string literal +LINE 1: select 'a\\bcd' as f1, 'a\\b\'cd' as f2, 'a\\b\'''cd' as f3,... + ^ +HINT: Use the escape string syntax for backslashes, e.g., E'\\'. +WARNING: nonstandard use of \\ in a string literal +LINE 1: select 'a\\bcd' as f1, 'a\\b\'cd' as f2, 'a\\b\'''cd' as f3,... + ^ +HINT: Use the escape string syntax for backslashes, e.g., E'\\'. +WARNING: nonstandard use of \\ in a string literal +LINE 1: select 'a\\bcd' as f1, 'a\\b\'cd' as f2, 'a\\b\'''cd' as f3,... + ^ +HINT: Use the escape string syntax for backslashes, e.g., E'\\'. +WARNING: nonstandard use of \\ in a string literal +LINE 1: ...bcd' as f1, 'a\\b\'cd' as f2, 'a\\b\'''cd' as f3, 'abcd\\' ... + ^ +HINT: Use the escape string syntax for backslashes, e.g., E'\\'. +WARNING: nonstandard use of \\ in a string literal +LINE 1: ...'cd' as f2, 'a\\b\'''cd' as f3, 'abcd\\' as f4, 'ab\\\'cd'... + ^ +HINT: Use the escape string syntax for backslashes, e.g., E'\\'. +WARNING: nonstandard use of \\ in a string literal +LINE 1: ...'''cd' as f3, 'abcd\\' as f4, 'ab\\\'cd' as f5, '\\\\' as ... + ^ +HINT: Use the escape string syntax for backslashes, e.g., E'\\'. + f1 | f2 | f3 | f4 | f5 | f6 +-------+--------+---------+-------+--------+---- + a\bcd | a\b'cd | a\b''cd | abcd\ | ab\'cd | \\ +(1 row) + +set escape_string_warning = off; +set standard_conforming_strings = on; +select 'a\bcd' as f1, 'a\b''cd' as f2, 'a\b''''cd' as f3, 'abcd\' as f4, 'ab\''cd' as f5, '\\' as f6; + f1 | f2 | f3 | f4 | f5 | f6 +-------+--------+---------+-------+--------+---- + a\bcd | a\b'cd | a\b''cd | abcd\ | ab\'cd | \\ +(1 row) + +set standard_conforming_strings = off; +select 'a\\bcd' as f1, 'a\\b\'cd' as f2, 'a\\b\'''cd' as f3, 'abcd\\' as f4, 'ab\\\'cd' as f5, '\\\\' as f6; + f1 | f2 | f3 | f4 | f5 | f6 +-------+--------+---------+-------+--------+---- + a\bcd | a\b'cd | a\b''cd | abcd\ | ab\'cd | \\ +(1 row) + +reset standard_conforming_strings; +-- +-- Additional string functions +-- +SET bytea_output TO escape; +SELECT initcap('hi THOMAS'); + initcap +----------- + Hi Thomas +(1 row) + +SELECT lpad('hi', 5, 'xy'); + lpad +------- + xyxhi +(1 row) + +SELECT lpad('hi', 5); + lpad +------- + hi +(1 row) + +SELECT lpad('hi', -5, 'xy'); + lpad +------ + +(1 row) + +SELECT lpad('hello', 2); + lpad +------ + he +(1 row) + +SELECT lpad('hi', 5, ''); + lpad +------ + hi +(1 row) + +SELECT rpad('hi', 5, 'xy'); + rpad +------- + hixyx +(1 row) + +SELECT rpad('hi', 5); + rpad +------- + hi +(1 row) + +SELECT rpad('hi', -5, 'xy'); + rpad +------ + +(1 row) + +SELECT rpad('hello', 2); + rpad +------ + he +(1 row) + +SELECT rpad('hi', 5, ''); + rpad +------ + hi +(1 row) + +SELECT ltrim('zzzytrim', 'xyz'); + ltrim +------- + trim +(1 row) + +SELECT translate('', '14', 'ax'); + translate +----------- + +(1 row) + +SELECT translate('12345', '14', 'ax'); + translate +----------- + a23x5 +(1 row) + +SELECT translate('12345', '134', 'a'); + translate +----------- + a25 +(1 row) + +SELECT ascii('x'); + ascii +------- + 120 +(1 row) + +SELECT ascii(''); + ascii +------- + 0 +(1 row) + +SELECT chr(65); + chr +----- + A +(1 row) + +SELECT chr(0); +ERROR: null character not permitted +SELECT repeat('Pg', 4); + repeat +---------- + PgPgPgPg +(1 row) + +SELECT repeat('Pg', -4); + repeat +-------- + +(1 row) + +SELECT SUBSTRING('1234567890'::bytea FROM 3) "34567890"; + 34567890 +---------- + 34567890 +(1 row) + +SELECT SUBSTRING('1234567890'::bytea FROM 4 FOR 3) AS "456"; + 456 +----- + 456 +(1 row) + +SELECT SUBSTRING('string'::bytea FROM 2 FOR 2147483646) AS "tring"; + tring +------- + tring +(1 row) + +SELECT SUBSTRING('string'::bytea FROM -10 FOR 2147483646) AS "string"; + string +-------- + string +(1 row) + +SELECT SUBSTRING('string'::bytea FROM -10 FOR -2147483646) AS "error"; +ERROR: negative substring length not allowed +SELECT trim(E'\\000'::bytea from E'\\000Tom\\000'::bytea); + btrim +------- + Tom +(1 row) + +SELECT trim(leading E'\\000'::bytea from E'\\000Tom\\000'::bytea); + ltrim +--------- + Tom\000 +(1 row) + +SELECT trim(trailing E'\\000'::bytea from E'\\000Tom\\000'::bytea); + rtrim +--------- + \000Tom +(1 row) + +SELECT btrim(E'\\000trim\\000'::bytea, E'\\000'::bytea); + btrim +------- + trim +(1 row) + +SELECT btrim(''::bytea, E'\\000'::bytea); + btrim +------- + +(1 row) + +SELECT btrim(E'\\000trim\\000'::bytea, ''::bytea); + btrim +-------------- + \000trim\000 +(1 row) + +SELECT encode(overlay(E'Th\\000omas'::bytea placing E'Th\\001omas'::bytea from 2),'escape'); + encode +------------- + TTh\x01omas +(1 row) + +SELECT encode(overlay(E'Th\\000omas'::bytea placing E'\\002\\003'::bytea from 8),'escape'); + encode +-------------------- + Th\000omas\x02\x03 +(1 row) + +SELECT encode(overlay(E'Th\\000omas'::bytea placing E'\\002\\003'::bytea from 5 for 3),'escape'); + encode +----------------- + Th\000o\x02\x03 +(1 row) + +SELECT bit_count('\x1234567890'::bytea); + bit_count +----------- + 15 +(1 row) + +SELECT unistr('\0064at\+0000610'); + unistr +-------- + data0 +(1 row) + +SELECT unistr('d\u0061t\U000000610'); + unistr +-------- + data0 +(1 row) + +SELECT unistr('a\\b'); + unistr +-------- + a\b +(1 row) + +-- errors: +SELECT unistr('wrong: \db99'); +ERROR: invalid Unicode surrogate pair +SELECT unistr('wrong: \db99\0061'); +ERROR: invalid Unicode surrogate pair +SELECT unistr('wrong: \+00db99\+000061'); +ERROR: invalid Unicode surrogate pair +SELECT unistr('wrong: \+2FFFFF'); +ERROR: invalid Unicode code point: 2FFFFF +SELECT unistr('wrong: \udb99\u0061'); +ERROR: invalid Unicode surrogate pair +SELECT unistr('wrong: \U0000db99\U00000061'); +ERROR: invalid Unicode surrogate pair +SELECT unistr('wrong: \U002FFFFF'); +ERROR: invalid Unicode code point: 2FFFFF +SELECT unistr('wrong: \xyz'); +ERROR: invalid Unicode escape +HINT: Unicode escapes must be \XXXX, \+XXXXXX, \uXXXX, or \UXXXXXXXX. diff --git a/src/test/regress/expected/subscription.out b/src/test/regress/expected/subscription.out new file mode 100644 index 0000000..b15eddb --- /dev/null +++ b/src/test/regress/expected/subscription.out @@ -0,0 +1,480 @@ +-- +-- SUBSCRIPTION +-- +CREATE ROLE regress_subscription_user LOGIN SUPERUSER; +CREATE ROLE regress_subscription_user2; +CREATE ROLE regress_subscription_user3 IN ROLE pg_create_subscription; +CREATE ROLE regress_subscription_user_dummy LOGIN NOSUPERUSER; +SET SESSION AUTHORIZATION 'regress_subscription_user'; +-- fail - no publications +CREATE SUBSCRIPTION regress_testsub CONNECTION 'foo'; +ERROR: syntax error at or near ";" +LINE 1: CREATE SUBSCRIPTION regress_testsub CONNECTION 'foo'; + ^ +-- fail - no connection +CREATE SUBSCRIPTION regress_testsub PUBLICATION foo; +ERROR: syntax error at or near "PUBLICATION" +LINE 1: CREATE SUBSCRIPTION regress_testsub PUBLICATION foo; + ^ +-- fail - cannot do CREATE SUBSCRIPTION CREATE SLOT inside transaction block +BEGIN; +CREATE SUBSCRIPTION regress_testsub CONNECTION 'testconn' PUBLICATION testpub WITH (create_slot); +ERROR: CREATE SUBSCRIPTION ... WITH (create_slot = true) cannot run inside a transaction block +COMMIT; +-- fail - invalid connection string +CREATE SUBSCRIPTION regress_testsub CONNECTION 'testconn' PUBLICATION testpub; +ERROR: invalid connection string syntax: missing "=" after "testconn" in connection info string + +-- fail - duplicate publications +CREATE SUBSCRIPTION regress_testsub CONNECTION 'dbname=regress_doesnotexist' PUBLICATION foo, testpub, foo WITH (connect = false); +ERROR: publication name "foo" used more than once +-- ok +CREATE SUBSCRIPTION regress_testsub CONNECTION 'dbname=regress_doesnotexist' PUBLICATION testpub WITH (connect = false); +WARNING: subscription was created, but is not connected +HINT: To initiate replication, you must manually create the replication slot, enable the subscription, and refresh the subscription. +COMMENT ON SUBSCRIPTION regress_testsub IS 'test subscription'; +SELECT obj_description(s.oid, 'pg_subscription') FROM pg_subscription s; + obj_description +------------------- + test subscription +(1 row) + +-- Check if the subscription stats are created and stats_reset is updated +-- by pg_stat_reset_subscription_stats(). +SELECT subname, stats_reset IS NULL stats_reset_is_null FROM pg_stat_subscription_stats WHERE subname = 'regress_testsub'; + subname | stats_reset_is_null +-----------------+--------------------- + regress_testsub | t +(1 row) + +SELECT pg_stat_reset_subscription_stats(oid) FROM pg_subscription WHERE subname = 'regress_testsub'; + pg_stat_reset_subscription_stats +---------------------------------- + +(1 row) + +SELECT subname, stats_reset IS NULL stats_reset_is_null FROM pg_stat_subscription_stats WHERE subname = 'regress_testsub'; + subname | stats_reset_is_null +-----------------+--------------------- + regress_testsub | f +(1 row) + +-- Reset the stats again and check if the new reset_stats is updated. +SELECT stats_reset as prev_stats_reset FROM pg_stat_subscription_stats WHERE subname = 'regress_testsub' \gset +SELECT pg_stat_reset_subscription_stats(oid) FROM pg_subscription WHERE subname = 'regress_testsub'; + pg_stat_reset_subscription_stats +---------------------------------- + +(1 row) + +SELECT :'prev_stats_reset' < stats_reset FROM pg_stat_subscription_stats WHERE subname = 'regress_testsub'; + ?column? +---------- + t +(1 row) + +-- fail - name already exists +CREATE SUBSCRIPTION regress_testsub CONNECTION 'dbname=regress_doesnotexist' PUBLICATION testpub WITH (connect = false); +ERROR: subscription "regress_testsub" already exists +-- fail - must be superuser +SET SESSION AUTHORIZATION 'regress_subscription_user2'; +CREATE SUBSCRIPTION regress_testsub2 CONNECTION 'dbname=regress_doesnotexist' PUBLICATION foo WITH (connect = false); +ERROR: permission denied to create subscription +DETAIL: Only roles with privileges of the "pg_create_subscription" role may create subscriptions. +SET SESSION AUTHORIZATION 'regress_subscription_user'; +-- fail - invalid option combinations +CREATE SUBSCRIPTION regress_testsub2 CONNECTION 'dbname=regress_doesnotexist' PUBLICATION testpub WITH (connect = false, copy_data = true); +ERROR: connect = false and copy_data = true are mutually exclusive options +CREATE SUBSCRIPTION regress_testsub2 CONNECTION 'dbname=regress_doesnotexist' PUBLICATION testpub WITH (connect = false, enabled = true); +ERROR: connect = false and enabled = true are mutually exclusive options +CREATE SUBSCRIPTION regress_testsub2 CONNECTION 'dbname=regress_doesnotexist' PUBLICATION testpub WITH (connect = false, create_slot = true); +ERROR: connect = false and create_slot = true are mutually exclusive options +CREATE SUBSCRIPTION regress_testsub2 CONNECTION 'dbname=regress_doesnotexist' PUBLICATION testpub WITH (slot_name = NONE, enabled = true); +ERROR: slot_name = NONE and enabled = true are mutually exclusive options +CREATE SUBSCRIPTION regress_testsub2 CONNECTION 'dbname=regress_doesnotexist' PUBLICATION testpub WITH (slot_name = NONE, enabled = false, create_slot = true); +ERROR: slot_name = NONE and create_slot = true are mutually exclusive options +CREATE SUBSCRIPTION regress_testsub2 CONNECTION 'dbname=regress_doesnotexist' PUBLICATION testpub WITH (slot_name = NONE); +ERROR: subscription with slot_name = NONE must also set enabled = false +CREATE SUBSCRIPTION regress_testsub2 CONNECTION 'dbname=regress_doesnotexist' PUBLICATION testpub WITH (slot_name = NONE, enabled = false); +ERROR: subscription with slot_name = NONE must also set create_slot = false +CREATE SUBSCRIPTION regress_testsub2 CONNECTION 'dbname=regress_doesnotexist' PUBLICATION testpub WITH (slot_name = NONE, create_slot = false); +ERROR: subscription with slot_name = NONE must also set enabled = false +-- ok - with slot_name = NONE +CREATE SUBSCRIPTION regress_testsub3 CONNECTION 'dbname=regress_doesnotexist' PUBLICATION testpub WITH (slot_name = NONE, connect = false); +WARNING: subscription was created, but is not connected +HINT: To initiate replication, you must manually create the replication slot, enable the subscription, and refresh the subscription. +-- fail +ALTER SUBSCRIPTION regress_testsub3 ENABLE; +ERROR: cannot enable subscription that does not have a slot name +ALTER SUBSCRIPTION regress_testsub3 REFRESH PUBLICATION; +ERROR: ALTER SUBSCRIPTION ... REFRESH is not allowed for disabled subscriptions +-- fail - origin must be either none or any +CREATE SUBSCRIPTION regress_testsub4 CONNECTION 'dbname=regress_doesnotexist' PUBLICATION testpub WITH (slot_name = NONE, connect = false, origin = foo); +ERROR: unrecognized origin value: "foo" +-- now it works +CREATE SUBSCRIPTION regress_testsub4 CONNECTION 'dbname=regress_doesnotexist' PUBLICATION testpub WITH (slot_name = NONE, connect = false, origin = none); +WARNING: subscription was created, but is not connected +HINT: To initiate replication, you must manually create the replication slot, enable the subscription, and refresh the subscription. +\dRs+ regress_testsub4 + List of subscriptions + Name | Owner | Enabled | Publication | Binary | Streaming | Two-phase commit | Disable on error | Origin | Password required | Run as owner? | Synchronous commit | Conninfo | Skip LSN +------------------+---------------------------+---------+-------------+--------+-----------+------------------+------------------+--------+-------------------+---------------+--------------------+-----------------------------+---------- + regress_testsub4 | regress_subscription_user | f | {testpub} | f | off | d | f | none | t | f | off | dbname=regress_doesnotexist | 0/0 +(1 row) + +ALTER SUBSCRIPTION regress_testsub4 SET (origin = any); +\dRs+ regress_testsub4 + List of subscriptions + Name | Owner | Enabled | Publication | Binary | Streaming | Two-phase commit | Disable on error | Origin | Password required | Run as owner? | Synchronous commit | Conninfo | Skip LSN +------------------+---------------------------+---------+-------------+--------+-----------+------------------+------------------+--------+-------------------+---------------+--------------------+-----------------------------+---------- + regress_testsub4 | regress_subscription_user | f | {testpub} | f | off | d | f | any | t | f | off | dbname=regress_doesnotexist | 0/0 +(1 row) + +DROP SUBSCRIPTION regress_testsub3; +DROP SUBSCRIPTION regress_testsub4; +-- fail, connection string does not parse +CREATE SUBSCRIPTION regress_testsub5 CONNECTION 'i_dont_exist=param' PUBLICATION testpub; +ERROR: invalid connection string syntax: invalid connection option "i_dont_exist" + +-- fail, connection string parses, but doesn't work (and does so without +-- connecting, so this is reliable and safe) +CREATE SUBSCRIPTION regress_testsub5 CONNECTION 'port=-1' PUBLICATION testpub; +ERROR: could not connect to the publisher: invalid port number: "-1" +-- fail - invalid connection string during ALTER +ALTER SUBSCRIPTION regress_testsub CONNECTION 'foobar'; +ERROR: invalid connection string syntax: missing "=" after "foobar" in connection info string + +\dRs+ + List of subscriptions + Name | Owner | Enabled | Publication | Binary | Streaming | Two-phase commit | Disable on error | Origin | Password required | Run as owner? | Synchronous commit | Conninfo | Skip LSN +-----------------+---------------------------+---------+-------------+--------+-----------+------------------+------------------+--------+-------------------+---------------+--------------------+-----------------------------+---------- + regress_testsub | regress_subscription_user | f | {testpub} | f | off | d | f | any | t | f | off | dbname=regress_doesnotexist | 0/0 +(1 row) + +ALTER SUBSCRIPTION regress_testsub SET PUBLICATION testpub2, testpub3 WITH (refresh = false); +ALTER SUBSCRIPTION regress_testsub CONNECTION 'dbname=regress_doesnotexist2'; +ALTER SUBSCRIPTION regress_testsub SET (slot_name = 'newname'); +ALTER SUBSCRIPTION regress_testsub SET (password_required = false); +ALTER SUBSCRIPTION regress_testsub SET (run_as_owner = true); +\dRs+ + List of subscriptions + Name | Owner | Enabled | Publication | Binary | Streaming | Two-phase commit | Disable on error | Origin | Password required | Run as owner? | Synchronous commit | Conninfo | Skip LSN +-----------------+---------------------------+---------+---------------------+--------+-----------+------------------+------------------+--------+-------------------+---------------+--------------------+------------------------------+---------- + regress_testsub | regress_subscription_user | f | {testpub2,testpub3} | f | off | d | f | any | f | t | off | dbname=regress_doesnotexist2 | 0/0 +(1 row) + +ALTER SUBSCRIPTION regress_testsub SET (password_required = true); +ALTER SUBSCRIPTION regress_testsub SET (run_as_owner = false); +-- fail +ALTER SUBSCRIPTION regress_testsub SET (slot_name = ''); +ERROR: replication slot name "" is too short +-- fail +ALTER SUBSCRIPTION regress_doesnotexist CONNECTION 'dbname=regress_doesnotexist2'; +ERROR: subscription "regress_doesnotexist" does not exist +ALTER SUBSCRIPTION regress_testsub SET (create_slot = false); +ERROR: unrecognized subscription parameter: "create_slot" +-- ok +ALTER SUBSCRIPTION regress_testsub SKIP (lsn = '0/12345'); +\dRs+ + List of subscriptions + Name | Owner | Enabled | Publication | Binary | Streaming | Two-phase commit | Disable on error | Origin | Password required | Run as owner? | Synchronous commit | Conninfo | Skip LSN +-----------------+---------------------------+---------+---------------------+--------+-----------+------------------+------------------+--------+-------------------+---------------+--------------------+------------------------------+---------- + regress_testsub | regress_subscription_user | f | {testpub2,testpub3} | f | off | d | f | any | t | f | off | dbname=regress_doesnotexist2 | 0/12345 +(1 row) + +-- ok - with lsn = NONE +ALTER SUBSCRIPTION regress_testsub SKIP (lsn = NONE); +-- fail +ALTER SUBSCRIPTION regress_testsub SKIP (lsn = '0/0'); +ERROR: invalid WAL location (LSN): 0/0 +\dRs+ + List of subscriptions + Name | Owner | Enabled | Publication | Binary | Streaming | Two-phase commit | Disable on error | Origin | Password required | Run as owner? | Synchronous commit | Conninfo | Skip LSN +-----------------+---------------------------+---------+---------------------+--------+-----------+------------------+------------------+--------+-------------------+---------------+--------------------+------------------------------+---------- + regress_testsub | regress_subscription_user | f | {testpub2,testpub3} | f | off | d | f | any | t | f | off | dbname=regress_doesnotexist2 | 0/0 +(1 row) + +BEGIN; +ALTER SUBSCRIPTION regress_testsub ENABLE; +\dRs + List of subscriptions + Name | Owner | Enabled | Publication +-----------------+---------------------------+---------+--------------------- + regress_testsub | regress_subscription_user | t | {testpub2,testpub3} +(1 row) + +ALTER SUBSCRIPTION regress_testsub DISABLE; +\dRs + List of subscriptions + Name | Owner | Enabled | Publication +-----------------+---------------------------+---------+--------------------- + regress_testsub | regress_subscription_user | f | {testpub2,testpub3} +(1 row) + +COMMIT; +-- fail - must be owner of subscription +SET ROLE regress_subscription_user_dummy; +ALTER SUBSCRIPTION regress_testsub RENAME TO regress_testsub_dummy; +ERROR: must be owner of subscription regress_testsub +RESET ROLE; +ALTER SUBSCRIPTION regress_testsub RENAME TO regress_testsub_foo; +ALTER SUBSCRIPTION regress_testsub_foo SET (synchronous_commit = local); +ALTER SUBSCRIPTION regress_testsub_foo SET (synchronous_commit = foobar); +ERROR: invalid value for parameter "synchronous_commit": "foobar" +HINT: Available values: local, remote_write, remote_apply, on, off. +\dRs+ + List of subscriptions + Name | Owner | Enabled | Publication | Binary | Streaming | Two-phase commit | Disable on error | Origin | Password required | Run as owner? | Synchronous commit | Conninfo | Skip LSN +---------------------+---------------------------+---------+---------------------+--------+-----------+------------------+------------------+--------+-------------------+---------------+--------------------+------------------------------+---------- + regress_testsub_foo | regress_subscription_user | f | {testpub2,testpub3} | f | off | d | f | any | t | f | local | dbname=regress_doesnotexist2 | 0/0 +(1 row) + +-- rename back to keep the rest simple +ALTER SUBSCRIPTION regress_testsub_foo RENAME TO regress_testsub; +-- ok, we're a superuser +ALTER SUBSCRIPTION regress_testsub OWNER TO regress_subscription_user2; +-- fail - cannot do DROP SUBSCRIPTION inside transaction block with slot name +BEGIN; +DROP SUBSCRIPTION regress_testsub; +ERROR: DROP SUBSCRIPTION cannot run inside a transaction block +COMMIT; +ALTER SUBSCRIPTION regress_testsub SET (slot_name = NONE); +-- now it works +BEGIN; +DROP SUBSCRIPTION regress_testsub; +COMMIT; +DROP SUBSCRIPTION IF EXISTS regress_testsub; +NOTICE: subscription "regress_testsub" does not exist, skipping +DROP SUBSCRIPTION regress_testsub; -- fail +ERROR: subscription "regress_testsub" does not exist +-- fail - binary must be boolean +CREATE SUBSCRIPTION regress_testsub CONNECTION 'dbname=regress_doesnotexist' PUBLICATION testpub WITH (connect = false, binary = foo); +ERROR: binary requires a Boolean value +-- now it works +CREATE SUBSCRIPTION regress_testsub CONNECTION 'dbname=regress_doesnotexist' PUBLICATION testpub WITH (connect = false, binary = true); +WARNING: subscription was created, but is not connected +HINT: To initiate replication, you must manually create the replication slot, enable the subscription, and refresh the subscription. +\dRs+ + List of subscriptions + Name | Owner | Enabled | Publication | Binary | Streaming | Two-phase commit | Disable on error | Origin | Password required | Run as owner? | Synchronous commit | Conninfo | Skip LSN +-----------------+---------------------------+---------+-------------+--------+-----------+------------------+------------------+--------+-------------------+---------------+--------------------+-----------------------------+---------- + regress_testsub | regress_subscription_user | f | {testpub} | t | off | d | f | any | t | f | off | dbname=regress_doesnotexist | 0/0 +(1 row) + +ALTER SUBSCRIPTION regress_testsub SET (binary = false); +ALTER SUBSCRIPTION regress_testsub SET (slot_name = NONE); +\dRs+ + List of subscriptions + Name | Owner | Enabled | Publication | Binary | Streaming | Two-phase commit | Disable on error | Origin | Password required | Run as owner? | Synchronous commit | Conninfo | Skip LSN +-----------------+---------------------------+---------+-------------+--------+-----------+------------------+------------------+--------+-------------------+---------------+--------------------+-----------------------------+---------- + regress_testsub | regress_subscription_user | f | {testpub} | f | off | d | f | any | t | f | off | dbname=regress_doesnotexist | 0/0 +(1 row) + +DROP SUBSCRIPTION regress_testsub; +-- fail - streaming must be boolean or 'parallel' +CREATE SUBSCRIPTION regress_testsub CONNECTION 'dbname=regress_doesnotexist' PUBLICATION testpub WITH (connect = false, streaming = foo); +ERROR: streaming requires a Boolean value or "parallel" +-- now it works +CREATE SUBSCRIPTION regress_testsub CONNECTION 'dbname=regress_doesnotexist' PUBLICATION testpub WITH (connect = false, streaming = true); +WARNING: subscription was created, but is not connected +HINT: To initiate replication, you must manually create the replication slot, enable the subscription, and refresh the subscription. +\dRs+ + List of subscriptions + Name | Owner | Enabled | Publication | Binary | Streaming | Two-phase commit | Disable on error | Origin | Password required | Run as owner? | Synchronous commit | Conninfo | Skip LSN +-----------------+---------------------------+---------+-------------+--------+-----------+------------------+------------------+--------+-------------------+---------------+--------------------+-----------------------------+---------- + regress_testsub | regress_subscription_user | f | {testpub} | f | on | d | f | any | t | f | off | dbname=regress_doesnotexist | 0/0 +(1 row) + +ALTER SUBSCRIPTION regress_testsub SET (streaming = parallel); +\dRs+ + List of subscriptions + Name | Owner | Enabled | Publication | Binary | Streaming | Two-phase commit | Disable on error | Origin | Password required | Run as owner? | Synchronous commit | Conninfo | Skip LSN +-----------------+---------------------------+---------+-------------+--------+-----------+------------------+------------------+--------+-------------------+---------------+--------------------+-----------------------------+---------- + regress_testsub | regress_subscription_user | f | {testpub} | f | parallel | d | f | any | t | f | off | dbname=regress_doesnotexist | 0/0 +(1 row) + +ALTER SUBSCRIPTION regress_testsub SET (streaming = false); +ALTER SUBSCRIPTION regress_testsub SET (slot_name = NONE); +\dRs+ + List of subscriptions + Name | Owner | Enabled | Publication | Binary | Streaming | Two-phase commit | Disable on error | Origin | Password required | Run as owner? | Synchronous commit | Conninfo | Skip LSN +-----------------+---------------------------+---------+-------------+--------+-----------+------------------+------------------+--------+-------------------+---------------+--------------------+-----------------------------+---------- + regress_testsub | regress_subscription_user | f | {testpub} | f | off | d | f | any | t | f | off | dbname=regress_doesnotexist | 0/0 +(1 row) + +-- fail - publication already exists +ALTER SUBSCRIPTION regress_testsub ADD PUBLICATION testpub WITH (refresh = false); +ERROR: publication "testpub" is already in subscription "regress_testsub" +-- fail - publication used more than once +ALTER SUBSCRIPTION regress_testsub ADD PUBLICATION testpub1, testpub1 WITH (refresh = false); +ERROR: publication name "testpub1" used more than once +-- ok - add two publications into subscription +ALTER SUBSCRIPTION regress_testsub ADD PUBLICATION testpub1, testpub2 WITH (refresh = false); +-- fail - publications already exist +ALTER SUBSCRIPTION regress_testsub ADD PUBLICATION testpub1, testpub2 WITH (refresh = false); +ERROR: publication "testpub1" is already in subscription "regress_testsub" +\dRs+ + List of subscriptions + Name | Owner | Enabled | Publication | Binary | Streaming | Two-phase commit | Disable on error | Origin | Password required | Run as owner? | Synchronous commit | Conninfo | Skip LSN +-----------------+---------------------------+---------+-----------------------------+--------+-----------+------------------+------------------+--------+-------------------+---------------+--------------------+-----------------------------+---------- + regress_testsub | regress_subscription_user | f | {testpub,testpub1,testpub2} | f | off | d | f | any | t | f | off | dbname=regress_doesnotexist | 0/0 +(1 row) + +-- fail - publication used more than once +ALTER SUBSCRIPTION regress_testsub DROP PUBLICATION testpub1, testpub1 WITH (refresh = false); +ERROR: publication name "testpub1" used more than once +-- fail - all publications are deleted +ALTER SUBSCRIPTION regress_testsub DROP PUBLICATION testpub, testpub1, testpub2 WITH (refresh = false); +ERROR: cannot drop all the publications from a subscription +-- fail - publication does not exist in subscription +ALTER SUBSCRIPTION regress_testsub DROP PUBLICATION testpub3 WITH (refresh = false); +ERROR: publication "testpub3" is not in subscription "regress_testsub" +-- ok - delete publications +ALTER SUBSCRIPTION regress_testsub DROP PUBLICATION testpub1, testpub2 WITH (refresh = false); +\dRs+ + List of subscriptions + Name | Owner | Enabled | Publication | Binary | Streaming | Two-phase commit | Disable on error | Origin | Password required | Run as owner? | Synchronous commit | Conninfo | Skip LSN +-----------------+---------------------------+---------+-------------+--------+-----------+------------------+------------------+--------+-------------------+---------------+--------------------+-----------------------------+---------- + regress_testsub | regress_subscription_user | f | {testpub} | f | off | d | f | any | t | f | off | dbname=regress_doesnotexist | 0/0 +(1 row) + +DROP SUBSCRIPTION regress_testsub; +CREATE SUBSCRIPTION regress_testsub CONNECTION 'dbname=regress_doesnotexist' PUBLICATION mypub + WITH (connect = false, create_slot = false, copy_data = false); +WARNING: subscription was created, but is not connected +HINT: To initiate replication, you must manually create the replication slot, enable the subscription, and refresh the subscription. +ALTER SUBSCRIPTION regress_testsub ENABLE; +-- fail - ALTER SUBSCRIPTION with refresh is not allowed in a transaction +-- block or function +BEGIN; +ALTER SUBSCRIPTION regress_testsub SET PUBLICATION mypub WITH (refresh = true); +ERROR: ALTER SUBSCRIPTION with refresh cannot run inside a transaction block +END; +BEGIN; +ALTER SUBSCRIPTION regress_testsub REFRESH PUBLICATION; +ERROR: ALTER SUBSCRIPTION ... REFRESH cannot run inside a transaction block +END; +CREATE FUNCTION func() RETURNS VOID AS +$$ ALTER SUBSCRIPTION regress_testsub SET PUBLICATION mypub WITH (refresh = true) $$ LANGUAGE SQL; +SELECT func(); +ERROR: ALTER SUBSCRIPTION with refresh cannot be executed from a function +CONTEXT: SQL function "func" statement 1 +ALTER SUBSCRIPTION regress_testsub DISABLE; +ALTER SUBSCRIPTION regress_testsub SET (slot_name = NONE); +DROP SUBSCRIPTION regress_testsub; +DROP FUNCTION func; +-- fail - two_phase must be boolean +CREATE SUBSCRIPTION regress_testsub CONNECTION 'dbname=regress_doesnotexist' PUBLICATION testpub WITH (connect = false, two_phase = foo); +ERROR: two_phase requires a Boolean value +-- now it works +CREATE SUBSCRIPTION regress_testsub CONNECTION 'dbname=regress_doesnotexist' PUBLICATION testpub WITH (connect = false, two_phase = true); +WARNING: subscription was created, but is not connected +HINT: To initiate replication, you must manually create the replication slot, enable the subscription, and refresh the subscription. +\dRs+ + List of subscriptions + Name | Owner | Enabled | Publication | Binary | Streaming | Two-phase commit | Disable on error | Origin | Password required | Run as owner? | Synchronous commit | Conninfo | Skip LSN +-----------------+---------------------------+---------+-------------+--------+-----------+------------------+------------------+--------+-------------------+---------------+--------------------+-----------------------------+---------- + regress_testsub | regress_subscription_user | f | {testpub} | f | off | p | f | any | t | f | off | dbname=regress_doesnotexist | 0/0 +(1 row) + +--fail - alter of two_phase option not supported. +ALTER SUBSCRIPTION regress_testsub SET (two_phase = false); +ERROR: unrecognized subscription parameter: "two_phase" +-- but can alter streaming when two_phase enabled +ALTER SUBSCRIPTION regress_testsub SET (streaming = true); +\dRs+ + List of subscriptions + Name | Owner | Enabled | Publication | Binary | Streaming | Two-phase commit | Disable on error | Origin | Password required | Run as owner? | Synchronous commit | Conninfo | Skip LSN +-----------------+---------------------------+---------+-------------+--------+-----------+------------------+------------------+--------+-------------------+---------------+--------------------+-----------------------------+---------- + regress_testsub | regress_subscription_user | f | {testpub} | f | on | p | f | any | t | f | off | dbname=regress_doesnotexist | 0/0 +(1 row) + +ALTER SUBSCRIPTION regress_testsub SET (slot_name = NONE); +DROP SUBSCRIPTION regress_testsub; +-- two_phase and streaming are compatible. +CREATE SUBSCRIPTION regress_testsub CONNECTION 'dbname=regress_doesnotexist' PUBLICATION testpub WITH (connect = false, streaming = true, two_phase = true); +WARNING: subscription was created, but is not connected +HINT: To initiate replication, you must manually create the replication slot, enable the subscription, and refresh the subscription. +\dRs+ + List of subscriptions + Name | Owner | Enabled | Publication | Binary | Streaming | Two-phase commit | Disable on error | Origin | Password required | Run as owner? | Synchronous commit | Conninfo | Skip LSN +-----------------+---------------------------+---------+-------------+--------+-----------+------------------+------------------+--------+-------------------+---------------+--------------------+-----------------------------+---------- + regress_testsub | regress_subscription_user | f | {testpub} | f | on | p | f | any | t | f | off | dbname=regress_doesnotexist | 0/0 +(1 row) + +ALTER SUBSCRIPTION regress_testsub SET (slot_name = NONE); +DROP SUBSCRIPTION regress_testsub; +-- fail - disable_on_error must be boolean +CREATE SUBSCRIPTION regress_testsub CONNECTION 'dbname=regress_doesnotexist' PUBLICATION testpub WITH (connect = false, disable_on_error = foo); +ERROR: disable_on_error requires a Boolean value +-- now it works +CREATE SUBSCRIPTION regress_testsub CONNECTION 'dbname=regress_doesnotexist' PUBLICATION testpub WITH (connect = false, disable_on_error = false); +WARNING: subscription was created, but is not connected +HINT: To initiate replication, you must manually create the replication slot, enable the subscription, and refresh the subscription. +\dRs+ + List of subscriptions + Name | Owner | Enabled | Publication | Binary | Streaming | Two-phase commit | Disable on error | Origin | Password required | Run as owner? | Synchronous commit | Conninfo | Skip LSN +-----------------+---------------------------+---------+-------------+--------+-----------+------------------+------------------+--------+-------------------+---------------+--------------------+-----------------------------+---------- + regress_testsub | regress_subscription_user | f | {testpub} | f | off | d | f | any | t | f | off | dbname=regress_doesnotexist | 0/0 +(1 row) + +ALTER SUBSCRIPTION regress_testsub SET (disable_on_error = true); +\dRs+ + List of subscriptions + Name | Owner | Enabled | Publication | Binary | Streaming | Two-phase commit | Disable on error | Origin | Password required | Run as owner? | Synchronous commit | Conninfo | Skip LSN +-----------------+---------------------------+---------+-------------+--------+-----------+------------------+------------------+--------+-------------------+---------------+--------------------+-----------------------------+---------- + regress_testsub | regress_subscription_user | f | {testpub} | f | off | d | t | any | t | f | off | dbname=regress_doesnotexist | 0/0 +(1 row) + +ALTER SUBSCRIPTION regress_testsub SET (slot_name = NONE); +DROP SUBSCRIPTION regress_testsub; +-- let's do some tests with pg_create_subscription rather than superuser +SET SESSION AUTHORIZATION regress_subscription_user3; +-- fail, not enough privileges +CREATE SUBSCRIPTION regress_testsub CONNECTION 'dbname=regress_doesnotexist' PUBLICATION testpub WITH (connect = false); +ERROR: permission denied for database regression +-- fail, must specify password +RESET SESSION AUTHORIZATION; +GRANT CREATE ON DATABASE REGRESSION TO regress_subscription_user3; +SET SESSION AUTHORIZATION regress_subscription_user3; +CREATE SUBSCRIPTION regress_testsub CONNECTION 'dbname=regress_doesnotexist' PUBLICATION testpub WITH (connect = false); +ERROR: password is required +DETAIL: Non-superusers must provide a password in the connection string. +-- fail, can't set password_required=false +RESET SESSION AUTHORIZATION; +GRANT CREATE ON DATABASE REGRESSION TO regress_subscription_user3; +SET SESSION AUTHORIZATION regress_subscription_user3; +CREATE SUBSCRIPTION regress_testsub CONNECTION 'dbname=regress_doesnotexist' PUBLICATION testpub WITH (connect = false, password_required = false); +ERROR: password_required=false is superuser-only +HINT: Subscriptions with the password_required option set to false may only be created or modified by the superuser. +-- ok +RESET SESSION AUTHORIZATION; +GRANT CREATE ON DATABASE REGRESSION TO regress_subscription_user3; +SET SESSION AUTHORIZATION regress_subscription_user3; +CREATE SUBSCRIPTION regress_testsub CONNECTION 'dbname=regress_doesnotexist password=regress_fakepassword' PUBLICATION testpub WITH (connect = false); +WARNING: subscription was created, but is not connected +HINT: To initiate replication, you must manually create the replication slot, enable the subscription, and refresh the subscription. +-- we cannot give the subscription away to some random user +ALTER SUBSCRIPTION regress_testsub OWNER TO regress_subscription_user; +ERROR: must be able to SET ROLE "regress_subscription_user" +-- but we can rename the subscription we just created +ALTER SUBSCRIPTION regress_testsub RENAME TO regress_testsub2; +-- ok, even after losing pg_create_subscription we can still rename it +RESET SESSION AUTHORIZATION; +REVOKE pg_create_subscription FROM regress_subscription_user3; +SET SESSION AUTHORIZATION regress_subscription_user3; +ALTER SUBSCRIPTION regress_testsub2 RENAME TO regress_testsub; +-- fail, after losing CREATE on the database we can't rename it any more +RESET SESSION AUTHORIZATION; +REVOKE CREATE ON DATABASE REGRESSION FROM regress_subscription_user3; +SET SESSION AUTHORIZATION regress_subscription_user3; +ALTER SUBSCRIPTION regress_testsub RENAME TO regress_testsub2; +ERROR: permission denied for database regression +-- ok, owning it is enough for this stuff +ALTER SUBSCRIPTION regress_testsub SET (slot_name = NONE); +DROP SUBSCRIPTION regress_testsub; +RESET SESSION AUTHORIZATION; +DROP ROLE regress_subscription_user; +DROP ROLE regress_subscription_user2; +DROP ROLE regress_subscription_user3; +DROP ROLE regress_subscription_user_dummy; diff --git a/src/test/regress/expected/subselect.out b/src/test/regress/expected/subselect.out new file mode 100644 index 0000000..22af8fa --- /dev/null +++ b/src/test/regress/expected/subselect.out @@ -0,0 +1,1928 @@ +-- +-- SUBSELECT +-- +SELECT 1 AS one WHERE 1 IN (SELECT 1); + one +----- + 1 +(1 row) + +SELECT 1 AS zero WHERE 1 NOT IN (SELECT 1); + zero +------ +(0 rows) + +SELECT 1 AS zero WHERE 1 IN (SELECT 2); + zero +------ +(0 rows) + +-- Check grammar's handling of extra parens in assorted contexts +SELECT * FROM (SELECT 1 AS x) ss; + x +--- + 1 +(1 row) + +SELECT * FROM ((SELECT 1 AS x)) ss; + x +--- + 1 +(1 row) + +SELECT * FROM ((SELECT 1 AS x)), ((SELECT * FROM ((SELECT 2 AS y)))); + x | y +---+--- + 1 | 2 +(1 row) + +(SELECT 2) UNION SELECT 2; + ?column? +---------- + 2 +(1 row) + +((SELECT 2)) UNION SELECT 2; + ?column? +---------- + 2 +(1 row) + +SELECT ((SELECT 2) UNION SELECT 2); + ?column? +---------- + 2 +(1 row) + +SELECT (((SELECT 2)) UNION SELECT 2); + ?column? +---------- + 2 +(1 row) + +SELECT (SELECT ARRAY[1,2,3])[1]; + array +------- + 1 +(1 row) + +SELECT ((SELECT ARRAY[1,2,3]))[2]; + array +------- + 2 +(1 row) + +SELECT (((SELECT ARRAY[1,2,3])))[3]; + array +------- + 3 +(1 row) + +-- Set up some simple test tables +CREATE TABLE SUBSELECT_TBL ( + f1 integer, + f2 integer, + f3 float +); +INSERT INTO SUBSELECT_TBL VALUES (1, 2, 3); +INSERT INTO SUBSELECT_TBL VALUES (2, 3, 4); +INSERT INTO SUBSELECT_TBL VALUES (3, 4, 5); +INSERT INTO SUBSELECT_TBL VALUES (1, 1, 1); +INSERT INTO SUBSELECT_TBL VALUES (2, 2, 2); +INSERT INTO SUBSELECT_TBL VALUES (3, 3, 3); +INSERT INTO SUBSELECT_TBL VALUES (6, 7, 8); +INSERT INTO SUBSELECT_TBL VALUES (8, 9, NULL); +SELECT * FROM SUBSELECT_TBL; + f1 | f2 | f3 +----+----+---- + 1 | 2 | 3 + 2 | 3 | 4 + 3 | 4 | 5 + 1 | 1 | 1 + 2 | 2 | 2 + 3 | 3 | 3 + 6 | 7 | 8 + 8 | 9 | +(8 rows) + +-- Uncorrelated subselects +SELECT f1 AS "Constant Select" FROM SUBSELECT_TBL + WHERE f1 IN (SELECT 1); + Constant Select +----------------- + 1 + 1 +(2 rows) + +SELECT f1 AS "Uncorrelated Field" FROM SUBSELECT_TBL + WHERE f1 IN (SELECT f2 FROM SUBSELECT_TBL); + Uncorrelated Field +-------------------- + 1 + 2 + 3 + 1 + 2 + 3 +(6 rows) + +SELECT f1 AS "Uncorrelated Field" FROM SUBSELECT_TBL + WHERE f1 IN (SELECT f2 FROM SUBSELECT_TBL WHERE + f2 IN (SELECT f1 FROM SUBSELECT_TBL)); + Uncorrelated Field +-------------------- + 1 + 2 + 3 + 1 + 2 + 3 +(6 rows) + +SELECT f1, f2 + FROM SUBSELECT_TBL + WHERE (f1, f2) NOT IN (SELECT f2, CAST(f3 AS int4) FROM SUBSELECT_TBL + WHERE f3 IS NOT NULL); + f1 | f2 +----+---- + 1 | 2 + 6 | 7 + 8 | 9 +(3 rows) + +-- Correlated subselects +SELECT f1 AS "Correlated Field", f2 AS "Second Field" + FROM SUBSELECT_TBL upper + WHERE f1 IN (SELECT f2 FROM SUBSELECT_TBL WHERE f1 = upper.f1); + Correlated Field | Second Field +------------------+-------------- + 1 | 2 + 2 | 3 + 3 | 4 + 1 | 1 + 2 | 2 + 3 | 3 +(6 rows) + +SELECT f1 AS "Correlated Field", f3 AS "Second Field" + FROM SUBSELECT_TBL upper + WHERE f1 IN + (SELECT f2 FROM SUBSELECT_TBL WHERE CAST(upper.f2 AS float) = f3); + Correlated Field | Second Field +------------------+-------------- + 2 | 4 + 3 | 5 + 1 | 1 + 2 | 2 + 3 | 3 +(5 rows) + +SELECT f1 AS "Correlated Field", f3 AS "Second Field" + FROM SUBSELECT_TBL upper + WHERE f3 IN (SELECT upper.f1 + f2 FROM SUBSELECT_TBL + WHERE f2 = CAST(f3 AS integer)); + Correlated Field | Second Field +------------------+-------------- + 1 | 3 + 2 | 4 + 3 | 5 + 6 | 8 +(4 rows) + +SELECT f1 AS "Correlated Field" + FROM SUBSELECT_TBL + WHERE (f1, f2) IN (SELECT f2, CAST(f3 AS int4) FROM SUBSELECT_TBL + WHERE f3 IS NOT NULL); + Correlated Field +------------------ + 2 + 3 + 1 + 2 + 3 +(5 rows) + +-- Subselects without aliases +SELECT count FROM (SELECT COUNT(DISTINCT name) FROM road); + count +------- + 2911 +(1 row) + +SELECT COUNT(*) FROM (SELECT DISTINCT name FROM road); + count +------- + 2911 +(1 row) + +SELECT * FROM (SELECT * FROM int4_tbl), (VALUES (123456)) WHERE f1 = column1; + f1 | column1 +--------+--------- + 123456 | 123456 +(1 row) + +CREATE VIEW view_unnamed_ss AS +SELECT * FROM (SELECT * FROM (SELECT abs(f1) AS a1 FROM int4_tbl)), + (SELECT * FROM int8_tbl) + WHERE a1 < 10 AND q1 > a1 ORDER BY q1, q2; +SELECT * FROM view_unnamed_ss; + a1 | q1 | q2 +----+------------------+------------------- + 0 | 123 | 456 + 0 | 123 | 4567890123456789 + 0 | 4567890123456789 | -4567890123456789 + 0 | 4567890123456789 | 123 + 0 | 4567890123456789 | 4567890123456789 +(5 rows) + +\sv view_unnamed_ss +CREATE OR REPLACE VIEW public.view_unnamed_ss AS + SELECT unnamed_subquery.a1, + unnamed_subquery_1.q1, + unnamed_subquery_1.q2 + FROM ( SELECT unnamed_subquery_2.a1 + FROM ( SELECT abs(int4_tbl.f1) AS a1 + FROM int4_tbl) unnamed_subquery_2) unnamed_subquery, + ( SELECT int8_tbl.q1, + int8_tbl.q2 + FROM int8_tbl) unnamed_subquery_1 + WHERE unnamed_subquery.a1 < 10 AND unnamed_subquery_1.q1 > unnamed_subquery.a1 + ORDER BY unnamed_subquery_1.q1, unnamed_subquery_1.q2 +DROP VIEW view_unnamed_ss; +-- Test matching of locking clause to correct alias +CREATE VIEW view_unnamed_ss_locking AS +SELECT * FROM (SELECT * FROM int4_tbl), int8_tbl AS unnamed_subquery + WHERE f1 = q1 + FOR UPDATE OF unnamed_subquery; +\sv view_unnamed_ss_locking +CREATE OR REPLACE VIEW public.view_unnamed_ss_locking AS + SELECT unnamed_subquery.f1, + unnamed_subquery_1.q1, + unnamed_subquery_1.q2 + FROM ( SELECT int4_tbl.f1 + FROM int4_tbl) unnamed_subquery, + int8_tbl unnamed_subquery_1 + WHERE unnamed_subquery.f1 = unnamed_subquery_1.q1 + FOR UPDATE OF unnamed_subquery_1 +DROP VIEW view_unnamed_ss_locking; +-- +-- Use some existing tables in the regression test +-- +SELECT ss.f1 AS "Correlated Field", ss.f3 AS "Second Field" + FROM SUBSELECT_TBL ss + WHERE f1 NOT IN (SELECT f1+1 FROM INT4_TBL + WHERE f1 != ss.f1 AND f1 < 2147483647); + Correlated Field | Second Field +------------------+-------------- + 2 | 4 + 3 | 5 + 2 | 2 + 3 | 3 + 6 | 8 + 8 | +(6 rows) + +select q1, float8(count(*)) / (select count(*) from int8_tbl) +from int8_tbl group by q1 order by q1; + q1 | ?column? +------------------+---------- + 123 | 0.4 + 4567890123456789 | 0.6 +(2 rows) + +-- Unspecified-type literals in output columns should resolve as text +SELECT *, pg_typeof(f1) FROM + (SELECT 'foo' AS f1 FROM generate_series(1,3)) ss ORDER BY 1; + f1 | pg_typeof +-----+----------- + foo | text + foo | text + foo | text +(3 rows) + +-- ... unless there's context to suggest differently +explain (verbose, costs off) select '42' union all select '43'; + QUERY PLAN +---------------------------- + Append + -> Result + Output: '42'::text + -> Result + Output: '43'::text +(5 rows) + +explain (verbose, costs off) select '42' union all select 43; + QUERY PLAN +-------------------- + Append + -> Result + Output: 42 + -> Result + Output: 43 +(5 rows) + +-- check materialization of an initplan reference (bug #14524) +explain (verbose, costs off) +select 1 = all (select (select 1)); + QUERY PLAN +----------------------------------- + Result + Output: (SubPlan 2) + SubPlan 2 + -> Materialize + Output: ($0) + InitPlan 1 (returns $0) + -> Result + Output: 1 + -> Result + Output: $0 +(10 rows) + +select 1 = all (select (select 1)); + ?column? +---------- + t +(1 row) + +-- +-- Check EXISTS simplification with LIMIT +-- +explain (costs off) +select * from int4_tbl o where exists + (select 1 from int4_tbl i where i.f1=o.f1 limit null); + QUERY PLAN +------------------------------------ + Hash Semi Join + Hash Cond: (o.f1 = i.f1) + -> Seq Scan on int4_tbl o + -> Hash + -> Seq Scan on int4_tbl i +(5 rows) + +explain (costs off) +select * from int4_tbl o where not exists + (select 1 from int4_tbl i where i.f1=o.f1 limit 1); + QUERY PLAN +------------------------------------ + Hash Anti Join + Hash Cond: (o.f1 = i.f1) + -> Seq Scan on int4_tbl o + -> Hash + -> Seq Scan on int4_tbl i +(5 rows) + +explain (costs off) +select * from int4_tbl o where exists + (select 1 from int4_tbl i where i.f1=o.f1 limit 0); + QUERY PLAN +-------------------------------------- + Seq Scan on int4_tbl o + Filter: (SubPlan 1) + SubPlan 1 + -> Limit + -> Seq Scan on int4_tbl i + Filter: (f1 = o.f1) +(6 rows) + +-- +-- Test cases to catch unpleasant interactions between IN-join processing +-- and subquery pullup. +-- +select count(*) from + (select 1 from tenk1 a + where unique1 IN (select hundred from tenk1 b)) ss; + count +------- + 100 +(1 row) + +select count(distinct ss.ten) from + (select ten from tenk1 a + where unique1 IN (select hundred from tenk1 b)) ss; + count +------- + 10 +(1 row) + +select count(*) from + (select 1 from tenk1 a + where unique1 IN (select distinct hundred from tenk1 b)) ss; + count +------- + 100 +(1 row) + +select count(distinct ss.ten) from + (select ten from tenk1 a + where unique1 IN (select distinct hundred from tenk1 b)) ss; + count +------- + 10 +(1 row) + +-- +-- Test cases to check for overenthusiastic optimization of +-- "IN (SELECT DISTINCT ...)" and related cases. Per example from +-- Luca Pireddu and Michael Fuhr. +-- +CREATE TEMP TABLE foo (id integer); +CREATE TEMP TABLE bar (id1 integer, id2 integer); +INSERT INTO foo VALUES (1); +INSERT INTO bar VALUES (1, 1); +INSERT INTO bar VALUES (2, 2); +INSERT INTO bar VALUES (3, 1); +-- These cases require an extra level of distinct-ing above subquery s +SELECT * FROM foo WHERE id IN + (SELECT id2 FROM (SELECT DISTINCT id1, id2 FROM bar) AS s); + id +---- + 1 +(1 row) + +SELECT * FROM foo WHERE id IN + (SELECT id2 FROM (SELECT id1,id2 FROM bar GROUP BY id1,id2) AS s); + id +---- + 1 +(1 row) + +SELECT * FROM foo WHERE id IN + (SELECT id2 FROM (SELECT id1, id2 FROM bar UNION + SELECT id1, id2 FROM bar) AS s); + id +---- + 1 +(1 row) + +-- These cases do not +SELECT * FROM foo WHERE id IN + (SELECT id2 FROM (SELECT DISTINCT ON (id2) id1, id2 FROM bar) AS s); + id +---- + 1 +(1 row) + +SELECT * FROM foo WHERE id IN + (SELECT id2 FROM (SELECT id2 FROM bar GROUP BY id2) AS s); + id +---- + 1 +(1 row) + +SELECT * FROM foo WHERE id IN + (SELECT id2 FROM (SELECT id2 FROM bar UNION + SELECT id2 FROM bar) AS s); + id +---- + 1 +(1 row) + +-- +-- Test case to catch problems with multiply nested sub-SELECTs not getting +-- recalculated properly. Per bug report from Didier Moens. +-- +CREATE TABLE orderstest ( + approver_ref integer, + po_ref integer, + ordercanceled boolean +); +INSERT INTO orderstest VALUES (1, 1, false); +INSERT INTO orderstest VALUES (66, 5, false); +INSERT INTO orderstest VALUES (66, 6, false); +INSERT INTO orderstest VALUES (66, 7, false); +INSERT INTO orderstest VALUES (66, 1, true); +INSERT INTO orderstest VALUES (66, 8, false); +INSERT INTO orderstest VALUES (66, 1, false); +INSERT INTO orderstest VALUES (77, 1, false); +INSERT INTO orderstest VALUES (1, 1, false); +INSERT INTO orderstest VALUES (66, 1, false); +INSERT INTO orderstest VALUES (1, 1, false); +CREATE VIEW orders_view AS +SELECT *, +(SELECT CASE + WHEN ord.approver_ref=1 THEN '---' ELSE 'Approved' + END) AS "Approved", +(SELECT CASE + WHEN ord.ordercanceled + THEN 'Canceled' + ELSE + (SELECT CASE + WHEN ord.po_ref=1 + THEN + (SELECT CASE + WHEN ord.approver_ref=1 + THEN '---' + ELSE 'Approved' + END) + ELSE 'PO' + END) +END) AS "Status", +(CASE + WHEN ord.ordercanceled + THEN 'Canceled' + ELSE + (CASE + WHEN ord.po_ref=1 + THEN + (CASE + WHEN ord.approver_ref=1 + THEN '---' + ELSE 'Approved' + END) + ELSE 'PO' + END) +END) AS "Status_OK" +FROM orderstest ord; +SELECT * FROM orders_view; + approver_ref | po_ref | ordercanceled | Approved | Status | Status_OK +--------------+--------+---------------+----------+----------+----------- + 1 | 1 | f | --- | --- | --- + 66 | 5 | f | Approved | PO | PO + 66 | 6 | f | Approved | PO | PO + 66 | 7 | f | Approved | PO | PO + 66 | 1 | t | Approved | Canceled | Canceled + 66 | 8 | f | Approved | PO | PO + 66 | 1 | f | Approved | Approved | Approved + 77 | 1 | f | Approved | Approved | Approved + 1 | 1 | f | --- | --- | --- + 66 | 1 | f | Approved | Approved | Approved + 1 | 1 | f | --- | --- | --- +(11 rows) + +DROP TABLE orderstest cascade; +NOTICE: drop cascades to view orders_view +-- +-- Test cases to catch situations where rule rewriter fails to propagate +-- hasSubLinks flag correctly. Per example from Kyle Bateman. +-- +create temp table parts ( + partnum text, + cost float8 +); +create temp table shipped ( + ttype char(2), + ordnum int4, + partnum text, + value float8 +); +create temp view shipped_view as + select * from shipped where ttype = 'wt'; +create rule shipped_view_insert as on insert to shipped_view do instead + insert into shipped values('wt', new.ordnum, new.partnum, new.value); +insert into parts (partnum, cost) values (1, 1234.56); +insert into shipped_view (ordnum, partnum, value) + values (0, 1, (select cost from parts where partnum = '1')); +select * from shipped_view; + ttype | ordnum | partnum | value +-------+--------+---------+--------- + wt | 0 | 1 | 1234.56 +(1 row) + +create rule shipped_view_update as on update to shipped_view do instead + update shipped set partnum = new.partnum, value = new.value + where ttype = new.ttype and ordnum = new.ordnum; +update shipped_view set value = 11 + from int4_tbl a join int4_tbl b + on (a.f1 = (select f1 from int4_tbl c where c.f1=b.f1)) + where ordnum = a.f1; +select * from shipped_view; + ttype | ordnum | partnum | value +-------+--------+---------+------- + wt | 0 | 1 | 11 +(1 row) + +select f1, ss1 as relabel from + (select *, (select sum(f1) from int4_tbl b where f1 >= a.f1) as ss1 + from int4_tbl a) ss; + f1 | relabel +-------------+------------ + 0 | 2147607103 + 123456 | 2147607103 + -123456 | 2147483647 + 2147483647 | 2147483647 + -2147483647 | 0 +(5 rows) + +-- +-- Test cases involving PARAM_EXEC parameters and min/max index optimizations. +-- Per bug report from David Sanchez i Gregori. +-- +select * from ( + select max(unique1) from tenk1 as a + where exists (select 1 from tenk1 as b where b.thousand = a.unique2) +) ss; + max +------ + 9997 +(1 row) + +select * from ( + select min(unique1) from tenk1 as a + where not exists (select 1 from tenk1 as b where b.unique2 = 10000) +) ss; + min +----- + 0 +(1 row) + +-- +-- Test that an IN implemented using a UniquePath does unique-ification +-- with the right semantics, as per bug #4113. (Unfortunately we have +-- no simple way to ensure that this test case actually chooses that type +-- of plan, but it does in releases 7.4-8.3. Note that an ordering difference +-- here might mean that some other plan type is being used, rendering the test +-- pointless.) +-- +create temp table numeric_table (num_col numeric); +insert into numeric_table values (1), (1.000000000000000000001), (2), (3); +create temp table float_table (float_col float8); +insert into float_table values (1), (2), (3); +select * from float_table + where float_col in (select num_col from numeric_table); + float_col +----------- + 1 + 2 + 3 +(3 rows) + +select * from numeric_table + where num_col in (select float_col from float_table); + num_col +------------------------- + 1 + 1.000000000000000000001 + 2 + 3 +(4 rows) + +-- +-- Test case for bug #4290: bogus calculation of subplan param sets +-- +create temp table ta (id int primary key, val int); +insert into ta values(1,1); +insert into ta values(2,2); +create temp table tb (id int primary key, aval int); +insert into tb values(1,1); +insert into tb values(2,1); +insert into tb values(3,2); +insert into tb values(4,2); +create temp table tc (id int primary key, aid int); +insert into tc values(1,1); +insert into tc values(2,2); +select + ( select min(tb.id) from tb + where tb.aval = (select ta.val from ta where ta.id = tc.aid) ) as min_tb_id +from tc; + min_tb_id +----------- + 1 + 3 +(2 rows) + +-- +-- Test case for 8.3 "failed to locate grouping columns" bug +-- +create temp table t1 (f1 numeric(14,0), f2 varchar(30)); +select * from + (select distinct f1, f2, (select f2 from t1 x where x.f1 = up.f1) as fs + from t1 up) ss +group by f1,f2,fs; + f1 | f2 | fs +----+----+---- +(0 rows) + +-- +-- Test case for bug #5514 (mishandling of whole-row Vars in subselects) +-- +create temp table table_a(id integer); +insert into table_a values (42); +create temp view view_a as select * from table_a; +select view_a from view_a; + view_a +-------- + (42) +(1 row) + +select (select view_a) from view_a; + view_a +-------- + (42) +(1 row) + +select (select (select view_a)) from view_a; + view_a +-------- + (42) +(1 row) + +select (select (a.*)::text) from view_a a; + a +------ + (42) +(1 row) + +-- +-- Check that whole-row Vars reading the result of a subselect don't include +-- any junk columns therein +-- +select q from (select max(f1) from int4_tbl group by f1 order by f1) q; + q +--------------- + (-2147483647) + (-123456) + (0) + (123456) + (2147483647) +(5 rows) + +with q as (select max(f1) from int4_tbl group by f1 order by f1) + select q from q; + q +--------------- + (-2147483647) + (-123456) + (0) + (123456) + (2147483647) +(5 rows) + +-- +-- Test case for sublinks pulled up into joinaliasvars lists in an +-- inherited update/delete query +-- +begin; -- this shouldn't delete anything, but be safe +delete from road +where exists ( + select 1 + from + int4_tbl cross join + ( select f1, array(select q1 from int8_tbl) as arr + from text_tbl ) ss + where road.name = ss.f1 ); +rollback; +-- +-- Test case for sublinks pushed down into subselects via join alias expansion +-- +select + (select sq1) as qq1 +from + (select exists(select 1 from int4_tbl where f1 = q2) as sq1, 42 as dummy + from int8_tbl) sq0 + join + int4_tbl i4 on dummy = i4.f1; + qq1 +----- +(0 rows) + +-- +-- Test case for subselect within UPDATE of INSERT...ON CONFLICT DO UPDATE +-- +create temp table upsert(key int4 primary key, val text); +insert into upsert values(1, 'val') on conflict (key) do update set val = 'not seen'; +insert into upsert values(1, 'val') on conflict (key) do update set val = 'seen with subselect ' || (select f1 from int4_tbl where f1 != 0 limit 1)::text; +select * from upsert; + key | val +-----+---------------------------- + 1 | seen with subselect 123456 +(1 row) + +with aa as (select 'int4_tbl' u from int4_tbl limit 1) +insert into upsert values (1, 'x'), (999, 'y') +on conflict (key) do update set val = (select u from aa) +returning *; + key | val +-----+---------- + 1 | int4_tbl + 999 | y +(2 rows) + +-- +-- Test case for cross-type partial matching in hashed subplan (bug #7597) +-- +create temp table outer_7597 (f1 int4, f2 int4); +insert into outer_7597 values (0, 0); +insert into outer_7597 values (1, 0); +insert into outer_7597 values (0, null); +insert into outer_7597 values (1, null); +create temp table inner_7597(c1 int8, c2 int8); +insert into inner_7597 values(0, null); +select * from outer_7597 where (f1, f2) not in (select * from inner_7597); + f1 | f2 +----+---- + 1 | 0 + 1 | +(2 rows) + +-- +-- Similar test case using text that verifies that collation +-- information is passed through by execTuplesEqual() in nodeSubplan.c +-- (otherwise it would error in texteq()) +-- +create temp table outer_text (f1 text, f2 text); +insert into outer_text values ('a', 'a'); +insert into outer_text values ('b', 'a'); +insert into outer_text values ('a', null); +insert into outer_text values ('b', null); +create temp table inner_text (c1 text, c2 text); +insert into inner_text values ('a', null); +insert into inner_text values ('123', '456'); +select * from outer_text where (f1, f2) not in (select * from inner_text); + f1 | f2 +----+---- + b | a + b | +(2 rows) + +-- +-- Another test case for cross-type hashed subplans: comparison of +-- inner-side values must be done with appropriate operator +-- +explain (verbose, costs off) +select 'foo'::text in (select 'bar'::name union all select 'bar'::name); + QUERY PLAN +------------------------------------- + Result + Output: (hashed SubPlan 1) + SubPlan 1 + -> Append + -> Result + Output: 'bar'::name + -> Result + Output: 'bar'::name +(8 rows) + +select 'foo'::text in (select 'bar'::name union all select 'bar'::name); + ?column? +---------- + f +(1 row) + +-- +-- Test that we don't try to hash nested records (bug #17363) +-- (Hashing could be supported, but for now we don't) +-- +explain (verbose, costs off) +select row(row(row(1))) = any (select row(row(1))); + QUERY PLAN +------------------------------------------- + Result + Output: (SubPlan 1) + SubPlan 1 + -> Materialize + Output: '("(1)")'::record + -> Result + Output: '("(1)")'::record +(7 rows) + +select row(row(row(1))) = any (select row(row(1))); + ?column? +---------- + t +(1 row) + +-- +-- Test case for premature memory release during hashing of subplan output +-- +select '1'::text in (select '1'::name union all select '1'::name); + ?column? +---------- + t +(1 row) + +-- +-- Test that we don't try to use a hashed subplan if the simplified +-- testexpr isn't of the right shape +-- +-- this fails by default, of course +select * from int8_tbl where q1 in (select c1 from inner_text); +ERROR: operator does not exist: bigint = text +LINE 1: select * from int8_tbl where q1 in (select c1 from inner_tex... + ^ +HINT: No operator matches the given name and argument types. You might need to add explicit type casts. +begin; +-- make an operator to allow it to succeed +create function bogus_int8_text_eq(int8, text) returns boolean +language sql as 'select $1::text = $2'; +create operator = (procedure=bogus_int8_text_eq, leftarg=int8, rightarg=text); +explain (costs off) +select * from int8_tbl where q1 in (select c1 from inner_text); + QUERY PLAN +-------------------------------- + Seq Scan on int8_tbl + Filter: (hashed SubPlan 1) + SubPlan 1 + -> Seq Scan on inner_text +(4 rows) + +select * from int8_tbl where q1 in (select c1 from inner_text); + q1 | q2 +-----+------------------ + 123 | 456 + 123 | 4567890123456789 +(2 rows) + +-- inlining of this function results in unusual number of hash clauses, +-- which we can still cope with +create or replace function bogus_int8_text_eq(int8, text) returns boolean +language sql as 'select $1::text = $2 and $1::text = $2'; +explain (costs off) +select * from int8_tbl where q1 in (select c1 from inner_text); + QUERY PLAN +-------------------------------- + Seq Scan on int8_tbl + Filter: (hashed SubPlan 1) + SubPlan 1 + -> Seq Scan on inner_text +(4 rows) + +select * from int8_tbl where q1 in (select c1 from inner_text); + q1 | q2 +-----+------------------ + 123 | 456 + 123 | 4567890123456789 +(2 rows) + +-- inlining of this function causes LHS and RHS to be switched, +-- which we can't cope with, so hashing should be abandoned +create or replace function bogus_int8_text_eq(int8, text) returns boolean +language sql as 'select $2 = $1::text'; +explain (costs off) +select * from int8_tbl where q1 in (select c1 from inner_text); + QUERY PLAN +-------------------------------------- + Seq Scan on int8_tbl + Filter: (SubPlan 1) + SubPlan 1 + -> Materialize + -> Seq Scan on inner_text +(5 rows) + +select * from int8_tbl where q1 in (select c1 from inner_text); + q1 | q2 +-----+------------------ + 123 | 456 + 123 | 4567890123456789 +(2 rows) + +rollback; -- to get rid of the bogus operator +-- +-- Test resolution of hashed vs non-hashed implementation of EXISTS subplan +-- +explain (costs off) +select count(*) from tenk1 t +where (exists(select 1 from tenk1 k where k.unique1 = t.unique2) or ten < 0); + QUERY PLAN +-------------------------------------------------------------- + Aggregate + -> Seq Scan on tenk1 t + Filter: ((hashed SubPlan 2) OR (ten < 0)) + SubPlan 2 + -> Index Only Scan using tenk1_unique1 on tenk1 k +(5 rows) + +select count(*) from tenk1 t +where (exists(select 1 from tenk1 k where k.unique1 = t.unique2) or ten < 0); + count +------- + 10000 +(1 row) + +explain (costs off) +select count(*) from tenk1 t +where (exists(select 1 from tenk1 k where k.unique1 = t.unique2) or ten < 0) + and thousand = 1; + QUERY PLAN +-------------------------------------------------------------- + Aggregate + -> Bitmap Heap Scan on tenk1 t + Recheck Cond: (thousand = 1) + Filter: ((SubPlan 1) OR (ten < 0)) + -> Bitmap Index Scan on tenk1_thous_tenthous + Index Cond: (thousand = 1) + SubPlan 1 + -> Index Only Scan using tenk1_unique1 on tenk1 k + Index Cond: (unique1 = t.unique2) +(9 rows) + +select count(*) from tenk1 t +where (exists(select 1 from tenk1 k where k.unique1 = t.unique2) or ten < 0) + and thousand = 1; + count +------- + 10 +(1 row) + +-- It's possible for the same EXISTS to get resolved both ways +create temp table exists_tbl (c1 int, c2 int, c3 int) partition by list (c1); +create temp table exists_tbl_null partition of exists_tbl for values in (null); +create temp table exists_tbl_def partition of exists_tbl default; +insert into exists_tbl select x, x/2, x+1 from generate_series(0,10) x; +analyze exists_tbl; +explain (costs off) +select * from exists_tbl t1 + where (exists(select 1 from exists_tbl t2 where t1.c1 = t2.c2) or c3 < 0); + QUERY PLAN +------------------------------------------------------ + Append + -> Seq Scan on exists_tbl_null t1_1 + Filter: ((SubPlan 1) OR (c3 < 0)) + SubPlan 1 + -> Append + -> Seq Scan on exists_tbl_null t2_1 + Filter: (t1_1.c1 = c2) + -> Seq Scan on exists_tbl_def t2_2 + Filter: (t1_1.c1 = c2) + -> Seq Scan on exists_tbl_def t1_2 + Filter: ((hashed SubPlan 2) OR (c3 < 0)) + SubPlan 2 + -> Append + -> Seq Scan on exists_tbl_null t2_4 + -> Seq Scan on exists_tbl_def t2_5 +(15 rows) + +select * from exists_tbl t1 + where (exists(select 1 from exists_tbl t2 where t1.c1 = t2.c2) or c3 < 0); + c1 | c2 | c3 +----+----+---- + 0 | 0 | 1 + 1 | 0 | 2 + 2 | 1 | 3 + 3 | 1 | 4 + 4 | 2 | 5 + 5 | 2 | 6 +(6 rows) + +-- +-- Test case for planner bug with nested EXISTS handling +-- +select a.thousand from tenk1 a, tenk1 b +where a.thousand = b.thousand + and exists ( select 1 from tenk1 c where b.hundred = c.hundred + and not exists ( select 1 from tenk1 d + where a.thousand = d.thousand ) ); + thousand +---------- +(0 rows) + +-- +-- Check that nested sub-selects are not pulled up if they contain volatiles +-- +explain (verbose, costs off) + select x, x from + (select (select now()) as x from (values(1),(2)) v(y)) ss; + QUERY PLAN +--------------------------- + Values Scan on "*VALUES*" + Output: $0, $1 + InitPlan 1 (returns $0) + -> Result + Output: now() + InitPlan 2 (returns $1) + -> Result + Output: now() +(8 rows) + +explain (verbose, costs off) + select x, x from + (select (select random()) as x from (values(1),(2)) v(y)) ss; + QUERY PLAN +---------------------------------- + Subquery Scan on ss + Output: ss.x, ss.x + -> Values Scan on "*VALUES*" + Output: $0 + InitPlan 1 (returns $0) + -> Result + Output: random() +(7 rows) + +explain (verbose, costs off) + select x, x from + (select (select now() where y=y) as x from (values(1),(2)) v(y)) ss; + QUERY PLAN +---------------------------------------------------------------------- + Values Scan on "*VALUES*" + Output: (SubPlan 1), (SubPlan 2) + SubPlan 1 + -> Result + Output: now() + One-Time Filter: ("*VALUES*".column1 = "*VALUES*".column1) + SubPlan 2 + -> Result + Output: now() + One-Time Filter: ("*VALUES*".column1 = "*VALUES*".column1) +(10 rows) + +explain (verbose, costs off) + select x, x from + (select (select random() where y=y) as x from (values(1),(2)) v(y)) ss; + QUERY PLAN +---------------------------------------------------------------------------- + Subquery Scan on ss + Output: ss.x, ss.x + -> Values Scan on "*VALUES*" + Output: (SubPlan 1) + SubPlan 1 + -> Result + Output: random() + One-Time Filter: ("*VALUES*".column1 = "*VALUES*".column1) +(8 rows) + +-- +-- Test rescan of a hashed subplan (the use of random() is to prevent the +-- sub-select from being pulled up, which would result in not hashing) +-- +explain (verbose, costs off) +select sum(ss.tst::int) from + onek o cross join lateral ( + select i.ten in (select f1 from int4_tbl where f1 <= o.hundred) as tst, + random() as r + from onek i where i.unique1 = o.unique1 ) ss +where o.ten = 0; + QUERY PLAN +--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Aggregate + Output: sum((((hashed SubPlan 1)))::integer) + -> Nested Loop + Output: ((hashed SubPlan 1)) + -> Seq Scan on public.onek o + Output: o.unique1, o.unique2, o.two, o.four, o.ten, o.twenty, o.hundred, o.thousand, o.twothousand, o.fivethous, o.tenthous, o.odd, o.even, o.stringu1, o.stringu2, o.string4 + Filter: (o.ten = 0) + -> Index Scan using onek_unique1 on public.onek i + Output: (hashed SubPlan 1), random() + Index Cond: (i.unique1 = o.unique1) + SubPlan 1 + -> Seq Scan on public.int4_tbl + Output: int4_tbl.f1 + Filter: (int4_tbl.f1 <= o.hundred) +(14 rows) + +select sum(ss.tst::int) from + onek o cross join lateral ( + select i.ten in (select f1 from int4_tbl where f1 <= o.hundred) as tst, + random() as r + from onek i where i.unique1 = o.unique1 ) ss +where o.ten = 0; + sum +----- + 100 +(1 row) + +-- +-- Test rescan of a SetOp node +-- +explain (costs off) +select count(*) from + onek o cross join lateral ( + select * from onek i1 where i1.unique1 = o.unique1 + except + select * from onek i2 where i2.unique1 = o.unique2 + ) ss +where o.ten = 1; + QUERY PLAN +------------------------------------------------------------------------------ + Aggregate + -> Nested Loop + -> Seq Scan on onek o + Filter: (ten = 1) + -> Subquery Scan on ss + -> HashSetOp Except + -> Append + -> Subquery Scan on "*SELECT* 1" + -> Index Scan using onek_unique1 on onek i1 + Index Cond: (unique1 = o.unique1) + -> Subquery Scan on "*SELECT* 2" + -> Index Scan using onek_unique1 on onek i2 + Index Cond: (unique1 = o.unique2) +(13 rows) + +select count(*) from + onek o cross join lateral ( + select * from onek i1 where i1.unique1 = o.unique1 + except + select * from onek i2 where i2.unique1 = o.unique2 + ) ss +where o.ten = 1; + count +------- + 100 +(1 row) + +-- +-- Test rescan of a RecursiveUnion node +-- +explain (costs off) +select sum(o.four), sum(ss.a) from + onek o cross join lateral ( + with recursive x(a) as + (select o.four as a + union + select a + 1 from x + where a < 10) + select * from x + ) ss +where o.ten = 1; + QUERY PLAN +--------------------------------------------------------- + Aggregate + -> Nested Loop + -> Seq Scan on onek o + Filter: (ten = 1) + -> Memoize + Cache Key: o.four + Cache Mode: binary + -> CTE Scan on x + CTE x + -> Recursive Union + -> Result + -> WorkTable Scan on x x_1 + Filter: (a < 10) +(13 rows) + +select sum(o.four), sum(ss.a) from + onek o cross join lateral ( + with recursive x(a) as + (select o.four as a + union + select a + 1 from x + where a < 10) + select * from x + ) ss +where o.ten = 1; + sum | sum +------+------ + 1700 | 5350 +(1 row) + +-- +-- Check we don't misoptimize a NOT IN where the subquery returns no rows. +-- +create temp table notinouter (a int); +create temp table notininner (b int not null); +insert into notinouter values (null), (1); +select * from notinouter where a not in (select b from notininner); + a +--- + + 1 +(2 rows) + +-- +-- Check we behave sanely in corner case of empty SELECT list (bug #8648) +-- +create temp table nocolumns(); +select exists(select * from nocolumns); + exists +-------- + f +(1 row) + +-- +-- Check behavior with a SubPlan in VALUES (bug #14924) +-- +select val.x + from generate_series(1,10) as s(i), + lateral ( + values ((select s.i + 1)), (s.i + 101) + ) as val(x) +where s.i < 10 and (select val.x) < 110; + x +----- + 2 + 102 + 3 + 103 + 4 + 104 + 5 + 105 + 6 + 106 + 7 + 107 + 8 + 108 + 9 + 109 + 10 +(17 rows) + +-- another variant of that (bug #16213) +explain (verbose, costs off) +select * from +(values + (3 not in (select * from (values (1), (2)) ss1)), + (false) +) ss; + QUERY PLAN +---------------------------------------- + Values Scan on "*VALUES*" + Output: "*VALUES*".column1 + SubPlan 1 + -> Values Scan on "*VALUES*_1" + Output: "*VALUES*_1".column1 +(5 rows) + +select * from +(values + (3 not in (select * from (values (1), (2)) ss1)), + (false) +) ss; + column1 +--------- + t + f +(2 rows) + +-- +-- Check sane behavior with nested IN SubLinks +-- +explain (verbose, costs off) +select * from int4_tbl where + (case when f1 in (select unique1 from tenk1 a) then f1 else null end) in + (select ten from tenk1 b); + QUERY PLAN +--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Nested Loop Semi Join + Output: int4_tbl.f1 + Join Filter: (CASE WHEN (hashed SubPlan 1) THEN int4_tbl.f1 ELSE NULL::integer END = b.ten) + -> Seq Scan on public.int4_tbl + Output: int4_tbl.f1 + -> Seq Scan on public.tenk1 b + Output: b.unique1, b.unique2, b.two, b.four, b.ten, b.twenty, b.hundred, b.thousand, b.twothousand, b.fivethous, b.tenthous, b.odd, b.even, b.stringu1, b.stringu2, b.string4 + SubPlan 1 + -> Index Only Scan using tenk1_unique1 on public.tenk1 a + Output: a.unique1 +(10 rows) + +select * from int4_tbl where + (case when f1 in (select unique1 from tenk1 a) then f1 else null end) in + (select ten from tenk1 b); + f1 +---- + 0 +(1 row) + +-- +-- Check for incorrect optimization when IN subquery contains a SRF +-- +explain (verbose, costs off) +select * from int4_tbl o where (f1, f1) in + (select f1, generate_series(1,50) / 10 g from int4_tbl i group by f1); + QUERY PLAN +------------------------------------------------------------------- + Nested Loop Semi Join + Output: o.f1 + Join Filter: (o.f1 = "ANY_subquery".f1) + -> Seq Scan on public.int4_tbl o + Output: o.f1 + -> Materialize + Output: "ANY_subquery".f1, "ANY_subquery".g + -> Subquery Scan on "ANY_subquery" + Output: "ANY_subquery".f1, "ANY_subquery".g + Filter: ("ANY_subquery".f1 = "ANY_subquery".g) + -> Result + Output: i.f1, ((generate_series(1, 50)) / 10) + -> ProjectSet + Output: generate_series(1, 50), i.f1 + -> HashAggregate + Output: i.f1 + Group Key: i.f1 + -> Seq Scan on public.int4_tbl i + Output: i.f1 +(19 rows) + +select * from int4_tbl o where (f1, f1) in + (select f1, generate_series(1,50) / 10 g from int4_tbl i group by f1); + f1 +---- + 0 +(1 row) + +-- +-- check for over-optimization of whole-row Var referencing an Append plan +-- +select (select q from + (select 1,2,3 where f1 > 0 + union all + select 4,5,6.0 where f1 <= 0 + ) q ) +from int4_tbl; + q +----------- + (4,5,6.0) + (1,2,3) + (4,5,6.0) + (1,2,3) + (4,5,6.0) +(5 rows) + +-- +-- Check for sane handling of a lateral reference in a subquery's quals +-- (most of the complication here is to prevent the test case from being +-- flattened too much) +-- +explain (verbose, costs off) +select * from + int4_tbl i4, + lateral ( + select i4.f1 > 1 as b, 1 as id + from (select random() order by 1) as t1 + union all + select true as b, 2 as id + ) as t2 +where b and f1 >= 0; + QUERY PLAN +-------------------------------------------- + Nested Loop + Output: i4.f1, ((i4.f1 > 1)), (1) + -> Seq Scan on public.int4_tbl i4 + Output: i4.f1 + Filter: (i4.f1 >= 0) + -> Append + -> Subquery Scan on t1 + Output: (i4.f1 > 1), 1 + Filter: (i4.f1 > 1) + -> Sort + Output: (random()) + Sort Key: (random()) + -> Result + Output: random() + -> Result + Output: true, 2 +(16 rows) + +select * from + int4_tbl i4, + lateral ( + select i4.f1 > 1 as b, 1 as id + from (select random() order by 1) as t1 + union all + select true as b, 2 as id + ) as t2 +where b and f1 >= 0; + f1 | b | id +------------+---+---- + 0 | t | 2 + 123456 | t | 1 + 123456 | t | 2 + 2147483647 | t | 1 + 2147483647 | t | 2 +(5 rows) + +-- +-- Check that volatile quals aren't pushed down past a DISTINCT: +-- nextval() should not be called more than the nominal number of times +-- +create temp sequence ts1; +select * from + (select distinct ten from tenk1) ss + where ten < 10 + nextval('ts1') + order by 1; + ten +----- + 0 + 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 +(10 rows) + +select nextval('ts1'); + nextval +--------- + 11 +(1 row) + +-- +-- Check that volatile quals aren't pushed down past a set-returning function; +-- while a nonvolatile qual can be, if it doesn't reference the SRF. +-- +create function tattle(x int, y int) returns bool +volatile language plpgsql as $$ +begin + raise notice 'x = %, y = %', x, y; + return x > y; +end$$; +explain (verbose, costs off) +select * from + (select 9 as x, unnest(array[1,2,3,11,12,13]) as u) ss + where tattle(x, 8); + QUERY PLAN +---------------------------------------------------------- + Subquery Scan on ss + Output: ss.x, ss.u + Filter: tattle(ss.x, 8) + -> ProjectSet + Output: 9, unnest('{1,2,3,11,12,13}'::integer[]) + -> Result +(6 rows) + +select * from + (select 9 as x, unnest(array[1,2,3,11,12,13]) as u) ss + where tattle(x, 8); +NOTICE: x = 9, y = 8 +NOTICE: x = 9, y = 8 +NOTICE: x = 9, y = 8 +NOTICE: x = 9, y = 8 +NOTICE: x = 9, y = 8 +NOTICE: x = 9, y = 8 + x | u +---+---- + 9 | 1 + 9 | 2 + 9 | 3 + 9 | 11 + 9 | 12 + 9 | 13 +(6 rows) + +-- if we pretend it's stable, we get different results: +alter function tattle(x int, y int) stable; +explain (verbose, costs off) +select * from + (select 9 as x, unnest(array[1,2,3,11,12,13]) as u) ss + where tattle(x, 8); + QUERY PLAN +---------------------------------------------------- + ProjectSet + Output: 9, unnest('{1,2,3,11,12,13}'::integer[]) + -> Result + One-Time Filter: tattle(9, 8) +(4 rows) + +select * from + (select 9 as x, unnest(array[1,2,3,11,12,13]) as u) ss + where tattle(x, 8); +NOTICE: x = 9, y = 8 + x | u +---+---- + 9 | 1 + 9 | 2 + 9 | 3 + 9 | 11 + 9 | 12 + 9 | 13 +(6 rows) + +-- although even a stable qual should not be pushed down if it references SRF +explain (verbose, costs off) +select * from + (select 9 as x, unnest(array[1,2,3,11,12,13]) as u) ss + where tattle(x, u); + QUERY PLAN +---------------------------------------------------------- + Subquery Scan on ss + Output: ss.x, ss.u + Filter: tattle(ss.x, ss.u) + -> ProjectSet + Output: 9, unnest('{1,2,3,11,12,13}'::integer[]) + -> Result +(6 rows) + +select * from + (select 9 as x, unnest(array[1,2,3,11,12,13]) as u) ss + where tattle(x, u); +NOTICE: x = 9, y = 1 +NOTICE: x = 9, y = 2 +NOTICE: x = 9, y = 3 +NOTICE: x = 9, y = 11 +NOTICE: x = 9, y = 12 +NOTICE: x = 9, y = 13 + x | u +---+--- + 9 | 1 + 9 | 2 + 9 | 3 +(3 rows) + +drop function tattle(x int, y int); +-- +-- Test that LIMIT can be pushed to SORT through a subquery that just projects +-- columns. We check for that having happened by looking to see if EXPLAIN +-- ANALYZE shows that a top-N sort was used. We must suppress or filter away +-- all the non-invariant parts of the EXPLAIN ANALYZE output. +-- +create table sq_limit (pk int primary key, c1 int, c2 int); +insert into sq_limit values + (1, 1, 1), + (2, 2, 2), + (3, 3, 3), + (4, 4, 4), + (5, 1, 1), + (6, 2, 2), + (7, 3, 3), + (8, 4, 4); +create function explain_sq_limit() returns setof text language plpgsql as +$$ +declare ln text; +begin + for ln in + explain (analyze, summary off, timing off, costs off) + select * from (select pk,c2 from sq_limit order by c1,pk) as x limit 3 + loop + ln := regexp_replace(ln, 'Memory: \S*', 'Memory: xxx'); + return next ln; + end loop; +end; +$$; +select * from explain_sq_limit(); + explain_sq_limit +---------------------------------------------------------------- + Limit (actual rows=3 loops=1) + -> Subquery Scan on x (actual rows=3 loops=1) + -> Sort (actual rows=3 loops=1) + Sort Key: sq_limit.c1, sq_limit.pk + Sort Method: top-N heapsort Memory: xxx + -> Seq Scan on sq_limit (actual rows=8 loops=1) +(6 rows) + +select * from (select pk,c2 from sq_limit order by c1,pk) as x limit 3; + pk | c2 +----+---- + 1 | 1 + 5 | 1 + 2 | 2 +(3 rows) + +drop function explain_sq_limit(); +drop table sq_limit; +-- +-- Ensure that backward scan direction isn't propagated into +-- expression subqueries (bug #15336) +-- +begin; +declare c1 scroll cursor for + select * from generate_series(1,4) i + where i <> all (values (2),(3)); +move forward all in c1; +fetch backward all in c1; + i +--- + 4 + 1 +(2 rows) + +commit; +-- +-- Tests for CTE inlining behavior +-- +-- Basic subquery that can be inlined +explain (verbose, costs off) +with x as (select * from (select f1 from subselect_tbl) ss) +select * from x where f1 = 1; + QUERY PLAN +---------------------------------- + Seq Scan on public.subselect_tbl + Output: subselect_tbl.f1 + Filter: (subselect_tbl.f1 = 1) +(3 rows) + +-- Explicitly request materialization +explain (verbose, costs off) +with x as materialized (select * from (select f1 from subselect_tbl) ss) +select * from x where f1 = 1; + QUERY PLAN +------------------------------------------ + CTE Scan on x + Output: x.f1 + Filter: (x.f1 = 1) + CTE x + -> Seq Scan on public.subselect_tbl + Output: subselect_tbl.f1 +(6 rows) + +-- Stable functions are safe to inline +explain (verbose, costs off) +with x as (select * from (select f1, now() from subselect_tbl) ss) +select * from x where f1 = 1; + QUERY PLAN +----------------------------------- + Seq Scan on public.subselect_tbl + Output: subselect_tbl.f1, now() + Filter: (subselect_tbl.f1 = 1) +(3 rows) + +-- Volatile functions prevent inlining +explain (verbose, costs off) +with x as (select * from (select f1, random() from subselect_tbl) ss) +select * from x where f1 = 1; + QUERY PLAN +---------------------------------------------- + CTE Scan on x + Output: x.f1, x.random + Filter: (x.f1 = 1) + CTE x + -> Seq Scan on public.subselect_tbl + Output: subselect_tbl.f1, random() +(6 rows) + +-- SELECT FOR UPDATE cannot be inlined +explain (verbose, costs off) +with x as (select * from (select f1 from subselect_tbl for update) ss) +select * from x where f1 = 1; + QUERY PLAN +-------------------------------------------------------------------- + CTE Scan on x + Output: x.f1 + Filter: (x.f1 = 1) + CTE x + -> Subquery Scan on ss + Output: ss.f1 + -> LockRows + Output: subselect_tbl.f1, subselect_tbl.ctid + -> Seq Scan on public.subselect_tbl + Output: subselect_tbl.f1, subselect_tbl.ctid +(10 rows) + +-- Multiply-referenced CTEs are inlined only when requested +explain (verbose, costs off) +with x as (select * from (select f1, now() as n from subselect_tbl) ss) +select * from x, x x2 where x.n = x2.n; + QUERY PLAN +------------------------------------------- + Merge Join + Output: x.f1, x.n, x2.f1, x2.n + Merge Cond: (x.n = x2.n) + CTE x + -> Seq Scan on public.subselect_tbl + Output: subselect_tbl.f1, now() + -> Sort + Output: x.f1, x.n + Sort Key: x.n + -> CTE Scan on x + Output: x.f1, x.n + -> Sort + Output: x2.f1, x2.n + Sort Key: x2.n + -> CTE Scan on x x2 + Output: x2.f1, x2.n +(16 rows) + +explain (verbose, costs off) +with x as not materialized (select * from (select f1, now() as n from subselect_tbl) ss) +select * from x, x x2 where x.n = x2.n; + QUERY PLAN +---------------------------------------------------------------------------- + Result + Output: subselect_tbl.f1, now(), subselect_tbl_1.f1, now() + One-Time Filter: (now() = now()) + -> Nested Loop + Output: subselect_tbl.f1, subselect_tbl_1.f1 + -> Seq Scan on public.subselect_tbl + Output: subselect_tbl.f1, subselect_tbl.f2, subselect_tbl.f3 + -> Materialize + Output: subselect_tbl_1.f1 + -> Seq Scan on public.subselect_tbl subselect_tbl_1 + Output: subselect_tbl_1.f1 +(11 rows) + +-- Multiply-referenced CTEs can't be inlined if they contain outer self-refs +explain (verbose, costs off) +with recursive x(a) as + ((values ('a'), ('b')) + union all + (with z as not materialized (select * from x) + select z.a || z1.a as a from z cross join z as z1 + where length(z.a || z1.a) < 5)) +select * from x; + QUERY PLAN +---------------------------------------------------------- + CTE Scan on x + Output: x.a + CTE x + -> Recursive Union + -> Values Scan on "*VALUES*" + Output: "*VALUES*".column1 + -> Nested Loop + Output: (z.a || z1.a) + Join Filter: (length((z.a || z1.a)) < 5) + CTE z + -> WorkTable Scan on x x_1 + Output: x_1.a + -> CTE Scan on z + Output: z.a + -> CTE Scan on z z1 + Output: z1.a +(16 rows) + +with recursive x(a) as + ((values ('a'), ('b')) + union all + (with z as not materialized (select * from x) + select z.a || z1.a as a from z cross join z as z1 + where length(z.a || z1.a) < 5)) +select * from x; + a +------ + a + b + aa + ab + ba + bb + aaaa + aaab + aaba + aabb + abaa + abab + abba + abbb + baaa + baab + baba + babb + bbaa + bbab + bbba + bbbb +(22 rows) + +explain (verbose, costs off) +with recursive x(a) as + ((values ('a'), ('b')) + union all + (with z as not materialized (select * from x) + select z.a || z.a as a from z + where length(z.a || z.a) < 5)) +select * from x; + QUERY PLAN +-------------------------------------------------------- + CTE Scan on x + Output: x.a + CTE x + -> Recursive Union + -> Values Scan on "*VALUES*" + Output: "*VALUES*".column1 + -> WorkTable Scan on x x_1 + Output: (x_1.a || x_1.a) + Filter: (length((x_1.a || x_1.a)) < 5) +(9 rows) + +with recursive x(a) as + ((values ('a'), ('b')) + union all + (with z as not materialized (select * from x) + select z.a || z.a as a from z + where length(z.a || z.a) < 5)) +select * from x; + a +------ + a + b + aa + bb + aaaa + bbbb +(6 rows) + +-- Check handling of outer references +explain (verbose, costs off) +with x as (select * from int4_tbl) +select * from (with y as (select * from x) select * from y) ss; + QUERY PLAN +----------------------------- + Seq Scan on public.int4_tbl + Output: int4_tbl.f1 +(2 rows) + +explain (verbose, costs off) +with x as materialized (select * from int4_tbl) +select * from (with y as (select * from x) select * from y) ss; + QUERY PLAN +------------------------------------- + CTE Scan on x + Output: x.f1 + CTE x + -> Seq Scan on public.int4_tbl + Output: int4_tbl.f1 +(5 rows) + +-- Ensure that we inline the currect CTE when there are +-- multiple CTEs with the same name +explain (verbose, costs off) +with x as (select 1 as y) +select * from (with x as (select 2 as y) select * from x) ss; + QUERY PLAN +------------- + Result + Output: 2 +(2 rows) + +-- Row marks are not pushed into CTEs +explain (verbose, costs off) +with x as (select * from subselect_tbl) +select * from x for update; + QUERY PLAN +---------------------------------------------------------------- + Seq Scan on public.subselect_tbl + Output: subselect_tbl.f1, subselect_tbl.f2, subselect_tbl.f3 +(2 rows) + diff --git a/src/test/regress/expected/sysviews.out b/src/test/regress/expected/sysviews.out new file mode 100644 index 0000000..001c6e7 --- /dev/null +++ b/src/test/regress/expected/sysviews.out @@ -0,0 +1,169 @@ +-- +-- Test assorted system views +-- +-- This test is mainly meant to provide some code coverage for the +-- set-returning functions that underlie certain system views. +-- The output of most of these functions is very environment-dependent, +-- so our ability to test with fixed expected output is pretty limited; +-- but even a trivial check of count(*) will exercise the normal code path +-- through the SRF. +select count(*) >= 0 as ok from pg_available_extension_versions; + ok +---- + t +(1 row) + +select count(*) >= 0 as ok from pg_available_extensions; + ok +---- + t +(1 row) + +-- The entire output of pg_backend_memory_contexts is not stable, +-- we test only the existence and basic condition of TopMemoryContext. +select name, ident, parent, level, total_bytes >= free_bytes + from pg_backend_memory_contexts where level = 0; + name | ident | parent | level | ?column? +------------------+-------+--------+-------+---------- + TopMemoryContext | | | 0 | t +(1 row) + +-- At introduction, pg_config had 23 entries; it may grow +select count(*) > 20 as ok from pg_config; + ok +---- + t +(1 row) + +-- We expect no cursors in this test; see also portals.sql +select count(*) = 0 as ok from pg_cursors; + ok +---- + t +(1 row) + +select count(*) >= 0 as ok from pg_file_settings; + ok +---- + t +(1 row) + +-- There will surely be at least one rule, with no errors. +select count(*) > 0 as ok, count(*) FILTER (WHERE error IS NOT NULL) = 0 AS no_err + from pg_hba_file_rules; + ok | no_err +----+-------- + t | t +(1 row) + +-- There may be no rules, and there should be no errors. +select count(*) >= 0 as ok, count(*) FILTER (WHERE error IS NOT NULL) = 0 AS no_err + from pg_ident_file_mappings; + ok | no_err +----+-------- + t | t +(1 row) + +-- There will surely be at least one active lock +select count(*) > 0 as ok from pg_locks; + ok +---- + t +(1 row) + +-- We expect no prepared statements in this test; see also prepare.sql +select count(*) = 0 as ok from pg_prepared_statements; + ok +---- + t +(1 row) + +-- See also prepared_xacts.sql +select count(*) >= 0 as ok from pg_prepared_xacts; + ok +---- + t +(1 row) + +-- There will surely be at least one SLRU cache +select count(*) > 0 as ok from pg_stat_slru; + ok +---- + t +(1 row) + +-- There must be only one record +select count(*) = 1 as ok from pg_stat_wal; + ok +---- + t +(1 row) + +-- We expect no walreceiver running in this test +select count(*) = 0 as ok from pg_stat_wal_receiver; + ok +---- + t +(1 row) + +-- This is to record the prevailing planner enable_foo settings during +-- a regression test run. +select name, setting from pg_settings where name like 'enable%'; + name | setting +--------------------------------+--------- + enable_async_append | on + enable_bitmapscan | on + enable_gathermerge | on + enable_hashagg | on + enable_hashjoin | on + enable_incremental_sort | on + enable_indexonlyscan | on + enable_indexscan | on + enable_material | on + enable_memoize | on + enable_mergejoin | on + enable_nestloop | on + enable_parallel_append | on + enable_parallel_hash | on + enable_partition_pruning | on + enable_partitionwise_aggregate | off + enable_partitionwise_join | off + enable_presorted_aggregate | on + enable_seqscan | on + enable_sort | on + enable_tidscan | on +(21 rows) + +-- Test that the pg_timezone_names and pg_timezone_abbrevs views are +-- more-or-less working. We can't test their contents in any great detail +-- without the outputs changing anytime IANA updates the underlying data, +-- but it seems reasonable to expect at least one entry per major meridian. +-- (At the time of writing, the actual counts are around 38 because of +-- zones using fractional GMT offsets, so this is a pretty loose test.) +select count(distinct utc_offset) >= 24 as ok from pg_timezone_names; + ok +---- + t +(1 row) + +select count(distinct utc_offset) >= 24 as ok from pg_timezone_abbrevs; + ok +---- + t +(1 row) + +-- Let's check the non-default timezone abbreviation sets, too +set timezone_abbreviations = 'Australia'; +select count(distinct utc_offset) >= 24 as ok from pg_timezone_abbrevs; + ok +---- + t +(1 row) + +set timezone_abbreviations = 'India'; +select count(distinct utc_offset) >= 24 as ok from pg_timezone_abbrevs; + ok +---- + t +(1 row) + diff --git a/src/test/regress/expected/tablesample.out b/src/test/regress/expected/tablesample.out new file mode 100644 index 0000000..9ff4611 --- /dev/null +++ b/src/test/regress/expected/tablesample.out @@ -0,0 +1,331 @@ +CREATE TABLE test_tablesample (id int, name text) WITH (fillfactor=10); +-- use fillfactor so we don't have to load too much data to get multiple pages +INSERT INTO test_tablesample + SELECT i, repeat(i::text, 200) FROM generate_series(0, 9) s(i); +SELECT t.id FROM test_tablesample AS t TABLESAMPLE SYSTEM (50) REPEATABLE (0); + id +---- + 3 + 4 + 5 + 6 + 7 + 8 +(6 rows) + +SELECT id FROM test_tablesample TABLESAMPLE SYSTEM (100.0/11) REPEATABLE (0); + id +---- +(0 rows) + +SELECT id FROM test_tablesample TABLESAMPLE SYSTEM (50) REPEATABLE (0); + id +---- + 3 + 4 + 5 + 6 + 7 + 8 +(6 rows) + +SELECT id FROM test_tablesample TABLESAMPLE BERNOULLI (50) REPEATABLE (0); + id +---- + 4 + 5 + 6 + 7 + 8 +(5 rows) + +SELECT id FROM test_tablesample TABLESAMPLE BERNOULLI (5.5) REPEATABLE (0); + id +---- + 7 +(1 row) + +-- 100% should give repeatable count results (ie, all rows) in any case +SELECT count(*) FROM test_tablesample TABLESAMPLE SYSTEM (100); + count +------- + 10 +(1 row) + +SELECT count(*) FROM test_tablesample TABLESAMPLE SYSTEM (100) REPEATABLE (1+2); + count +------- + 10 +(1 row) + +SELECT count(*) FROM test_tablesample TABLESAMPLE SYSTEM (100) REPEATABLE (0.4); + count +------- + 10 +(1 row) + +CREATE VIEW test_tablesample_v1 AS + SELECT id FROM test_tablesample TABLESAMPLE SYSTEM (10*2) REPEATABLE (2); +CREATE VIEW test_tablesample_v2 AS + SELECT id FROM test_tablesample TABLESAMPLE SYSTEM (99); +\d+ test_tablesample_v1 + View "public.test_tablesample_v1" + Column | Type | Collation | Nullable | Default | Storage | Description +--------+---------+-----------+----------+---------+---------+------------- + id | integer | | | | plain | +View definition: + SELECT id + FROM test_tablesample TABLESAMPLE system ((10 * 2)) REPEATABLE (2); + +\d+ test_tablesample_v2 + View "public.test_tablesample_v2" + Column | Type | Collation | Nullable | Default | Storage | Description +--------+---------+-----------+----------+---------+---------+------------- + id | integer | | | | plain | +View definition: + SELECT id + FROM test_tablesample TABLESAMPLE system (99); + +-- check a sampled query doesn't affect cursor in progress +BEGIN; +DECLARE tablesample_cur SCROLL CURSOR FOR + SELECT id FROM test_tablesample TABLESAMPLE SYSTEM (50) REPEATABLE (0); +FETCH FIRST FROM tablesample_cur; + id +---- + 3 +(1 row) + +FETCH NEXT FROM tablesample_cur; + id +---- + 4 +(1 row) + +FETCH NEXT FROM tablesample_cur; + id +---- + 5 +(1 row) + +SELECT id FROM test_tablesample TABLESAMPLE SYSTEM (50) REPEATABLE (0); + id +---- + 3 + 4 + 5 + 6 + 7 + 8 +(6 rows) + +FETCH NEXT FROM tablesample_cur; + id +---- + 6 +(1 row) + +FETCH NEXT FROM tablesample_cur; + id +---- + 7 +(1 row) + +FETCH NEXT FROM tablesample_cur; + id +---- + 8 +(1 row) + +FETCH FIRST FROM tablesample_cur; + id +---- + 3 +(1 row) + +FETCH NEXT FROM tablesample_cur; + id +---- + 4 +(1 row) + +FETCH NEXT FROM tablesample_cur; + id +---- + 5 +(1 row) + +FETCH NEXT FROM tablesample_cur; + id +---- + 6 +(1 row) + +FETCH NEXT FROM tablesample_cur; + id +---- + 7 +(1 row) + +FETCH NEXT FROM tablesample_cur; + id +---- + 8 +(1 row) + +CLOSE tablesample_cur; +END; +EXPLAIN (COSTS OFF) + SELECT id FROM test_tablesample TABLESAMPLE SYSTEM (50) REPEATABLE (2); + QUERY PLAN +-------------------------------------------------------------------- + Sample Scan on test_tablesample + Sampling: system ('50'::real) REPEATABLE ('2'::double precision) +(2 rows) + +EXPLAIN (COSTS OFF) + SELECT * FROM test_tablesample_v1; + QUERY PLAN +-------------------------------------------------------------------- + Sample Scan on test_tablesample + Sampling: system ('20'::real) REPEATABLE ('2'::double precision) +(2 rows) + +-- check inheritance behavior +explain (costs off) + select count(*) from person tablesample bernoulli (100); + QUERY PLAN +------------------------------------------------- + Aggregate + -> Append + -> Sample Scan on person person_1 + Sampling: bernoulli ('100'::real) + -> Sample Scan on emp person_2 + Sampling: bernoulli ('100'::real) + -> Sample Scan on student person_3 + Sampling: bernoulli ('100'::real) + -> Sample Scan on stud_emp person_4 + Sampling: bernoulli ('100'::real) +(10 rows) + +select count(*) from person tablesample bernoulli (100); + count +------- + 58 +(1 row) + +select count(*) from person; + count +------- + 58 +(1 row) + +-- check that collations get assigned within the tablesample arguments +SELECT count(*) FROM test_tablesample TABLESAMPLE bernoulli (('1'::text < '0'::text)::int); + count +------- + 0 +(1 row) + +-- check behavior during rescans, as well as correct handling of min/max pct +select * from + (values (0),(100)) v(pct), + lateral (select count(*) from tenk1 tablesample bernoulli (pct)) ss; + pct | count +-----+------- + 0 | 0 + 100 | 10000 +(2 rows) + +select * from + (values (0),(100)) v(pct), + lateral (select count(*) from tenk1 tablesample system (pct)) ss; + pct | count +-----+------- + 0 | 0 + 100 | 10000 +(2 rows) + +explain (costs off) +select pct, count(unique1) from + (values (0),(100)) v(pct), + lateral (select * from tenk1 tablesample bernoulli (pct)) ss + group by pct; + QUERY PLAN +-------------------------------------------------------- + HashAggregate + Group Key: "*VALUES*".column1 + -> Nested Loop + -> Values Scan on "*VALUES*" + -> Sample Scan on tenk1 + Sampling: bernoulli ("*VALUES*".column1) +(6 rows) + +select pct, count(unique1) from + (values (0),(100)) v(pct), + lateral (select * from tenk1 tablesample bernoulli (pct)) ss + group by pct; + pct | count +-----+------- + 100 | 10000 +(1 row) + +select pct, count(unique1) from + (values (0),(100)) v(pct), + lateral (select * from tenk1 tablesample system (pct)) ss + group by pct; + pct | count +-----+------- + 100 | 10000 +(1 row) + +-- errors +SELECT id FROM test_tablesample TABLESAMPLE FOOBAR (1); +ERROR: tablesample method foobar does not exist +LINE 1: SELECT id FROM test_tablesample TABLESAMPLE FOOBAR (1); + ^ +SELECT id FROM test_tablesample TABLESAMPLE SYSTEM (NULL); +ERROR: TABLESAMPLE parameter cannot be null +SELECT id FROM test_tablesample TABLESAMPLE SYSTEM (50) REPEATABLE (NULL); +ERROR: TABLESAMPLE REPEATABLE parameter cannot be null +SELECT id FROM test_tablesample TABLESAMPLE BERNOULLI (-1); +ERROR: sample percentage must be between 0 and 100 +SELECT id FROM test_tablesample TABLESAMPLE BERNOULLI (200); +ERROR: sample percentage must be between 0 and 100 +SELECT id FROM test_tablesample TABLESAMPLE SYSTEM (-1); +ERROR: sample percentage must be between 0 and 100 +SELECT id FROM test_tablesample TABLESAMPLE SYSTEM (200); +ERROR: sample percentage must be between 0 and 100 +SELECT id FROM test_tablesample_v1 TABLESAMPLE BERNOULLI (1); +ERROR: TABLESAMPLE clause can only be applied to tables and materialized views +LINE 1: SELECT id FROM test_tablesample_v1 TABLESAMPLE BERNOULLI (1)... + ^ +INSERT INTO test_tablesample_v1 VALUES(1); +ERROR: cannot insert into view "test_tablesample_v1" +DETAIL: Views containing TABLESAMPLE are not automatically updatable. +HINT: To enable inserting into the view, provide an INSTEAD OF INSERT trigger or an unconditional ON INSERT DO INSTEAD rule. +WITH query_select AS (SELECT * FROM test_tablesample) +SELECT * FROM query_select TABLESAMPLE BERNOULLI (5.5) REPEATABLE (1); +ERROR: TABLESAMPLE clause can only be applied to tables and materialized views +LINE 2: SELECT * FROM query_select TABLESAMPLE BERNOULLI (5.5) REPEA... + ^ +SELECT q.* FROM (SELECT * FROM test_tablesample) as q TABLESAMPLE BERNOULLI (5); +ERROR: syntax error at or near "TABLESAMPLE" +LINE 1: ...CT q.* FROM (SELECT * FROM test_tablesample) as q TABLESAMPL... + ^ +-- check partitioned tables support tablesample +create table parted_sample (a int) partition by list (a); +create table parted_sample_1 partition of parted_sample for values in (1); +create table parted_sample_2 partition of parted_sample for values in (2); +explain (costs off) + select * from parted_sample tablesample bernoulli (100); + QUERY PLAN +------------------------------------------- + Append + -> Sample Scan on parted_sample_1 + Sampling: bernoulli ('100'::real) + -> Sample Scan on parted_sample_2 + Sampling: bernoulli ('100'::real) +(5 rows) + +drop table parted_sample, parted_sample_1, parted_sample_2; diff --git a/src/test/regress/expected/tablespace.out b/src/test/regress/expected/tablespace.out new file mode 100644 index 0000000..9aabb85 --- /dev/null +++ b/src/test/regress/expected/tablespace.out @@ -0,0 +1,968 @@ +-- relative tablespace locations are not allowed +CREATE TABLESPACE regress_tblspace LOCATION 'relative'; -- fail +ERROR: tablespace location must be an absolute path +-- empty tablespace locations are not usually allowed +CREATE TABLESPACE regress_tblspace LOCATION ''; -- fail +ERROR: tablespace location must be an absolute path +-- as a special developer-only option to allow us to use tablespaces +-- with streaming replication on the same server, an empty location +-- can be allowed as a way to say that the tablespace should be created +-- as a directory in pg_tblspc, rather than being a symlink +SET allow_in_place_tablespaces = true; +-- create a tablespace using WITH clause +CREATE TABLESPACE regress_tblspacewith LOCATION '' WITH (some_nonexistent_parameter = true); -- fail +ERROR: unrecognized parameter "some_nonexistent_parameter" +CREATE TABLESPACE regress_tblspacewith LOCATION '' WITH (random_page_cost = 3.0); -- ok +-- check to see the parameter was used +SELECT spcoptions FROM pg_tablespace WHERE spcname = 'regress_tblspacewith'; + spcoptions +------------------------ + {random_page_cost=3.0} +(1 row) + +-- drop the tablespace so we can re-use the location +DROP TABLESPACE regress_tblspacewith; +-- This returns a relative path as of an effect of allow_in_place_tablespaces, +-- masking the tablespace OID used in the path name. +SELECT regexp_replace(pg_tablespace_location(oid), '(pg_tblspc)/(\d+)', '\1/NNN') + FROM pg_tablespace WHERE spcname = 'regress_tblspace'; + regexp_replace +---------------- + pg_tblspc/NNN +(1 row) + +-- try setting and resetting some properties for the new tablespace +ALTER TABLESPACE regress_tblspace SET (random_page_cost = 1.0, seq_page_cost = 1.1); +ALTER TABLESPACE regress_tblspace SET (some_nonexistent_parameter = true); -- fail +ERROR: unrecognized parameter "some_nonexistent_parameter" +ALTER TABLESPACE regress_tblspace RESET (random_page_cost = 2.0); -- fail +ERROR: RESET must not include values for parameters +ALTER TABLESPACE regress_tblspace RESET (random_page_cost, effective_io_concurrency); -- ok +-- REINDEX (TABLESPACE) +-- catalogs and system tablespaces +-- system catalog, fail +REINDEX (TABLESPACE regress_tblspace) TABLE pg_am; +ERROR: cannot move system relation "pg_am_name_index" +REINDEX (TABLESPACE regress_tblspace) TABLE CONCURRENTLY pg_am; +ERROR: cannot reindex system catalogs concurrently +-- shared catalog, fail +REINDEX (TABLESPACE regress_tblspace) TABLE pg_authid; +ERROR: cannot move system relation "pg_authid_rolname_index" +REINDEX (TABLESPACE regress_tblspace) TABLE CONCURRENTLY pg_authid; +ERROR: cannot reindex system catalogs concurrently +-- toast relations, fail +REINDEX (TABLESPACE regress_tblspace) INDEX pg_toast.pg_toast_1260_index; +ERROR: cannot move system relation "pg_toast_1260_index" +REINDEX (TABLESPACE regress_tblspace) INDEX CONCURRENTLY pg_toast.pg_toast_1260_index; +ERROR: cannot reindex system catalogs concurrently +REINDEX (TABLESPACE regress_tblspace) TABLE pg_toast.pg_toast_1260; +ERROR: cannot move system relation "pg_toast_1260_index" +REINDEX (TABLESPACE regress_tblspace) TABLE CONCURRENTLY pg_toast.pg_toast_1260; +ERROR: cannot reindex system catalogs concurrently +-- system catalog, fail +REINDEX (TABLESPACE pg_global) TABLE pg_authid; +ERROR: cannot move system relation "pg_authid_rolname_index" +REINDEX (TABLESPACE pg_global) TABLE CONCURRENTLY pg_authid; +ERROR: cannot reindex system catalogs concurrently +-- table with toast relation +CREATE TABLE regress_tblspace_test_tbl (num1 bigint, num2 double precision, t text); +INSERT INTO regress_tblspace_test_tbl (num1, num2, t) + SELECT round(random()*100), random(), 'text' + FROM generate_series(1, 10) s(i); +CREATE INDEX regress_tblspace_test_tbl_idx ON regress_tblspace_test_tbl (num1); +-- move to global tablespace, fail +REINDEX (TABLESPACE pg_global) INDEX regress_tblspace_test_tbl_idx; +ERROR: only shared relations can be placed in pg_global tablespace +REINDEX (TABLESPACE pg_global) INDEX CONCURRENTLY regress_tblspace_test_tbl_idx; +ERROR: cannot move non-shared relation to tablespace "pg_global" +-- check transactional behavior of REINDEX (TABLESPACE) +BEGIN; +REINDEX (TABLESPACE regress_tblspace) INDEX regress_tblspace_test_tbl_idx; +REINDEX (TABLESPACE regress_tblspace) TABLE regress_tblspace_test_tbl; +ROLLBACK; +-- no relation moved to the new tablespace +SELECT c.relname FROM pg_class c, pg_tablespace s + WHERE c.reltablespace = s.oid AND s.spcname = 'regress_tblspace'; + relname +--------- +(0 rows) + +-- check that all indexes are moved to a new tablespace with different +-- relfilenode. +-- Save first the existing relfilenode for the toast and main relations. +SELECT relfilenode as main_filenode FROM pg_class + WHERE relname = 'regress_tblspace_test_tbl_idx' \gset +SELECT relfilenode as toast_filenode FROM pg_class + WHERE oid = + (SELECT i.indexrelid + FROM pg_class c, + pg_index i + WHERE i.indrelid = c.reltoastrelid AND + c.relname = 'regress_tblspace_test_tbl') \gset +REINDEX (TABLESPACE regress_tblspace) TABLE regress_tblspace_test_tbl; +SELECT c.relname FROM pg_class c, pg_tablespace s + WHERE c.reltablespace = s.oid AND s.spcname = 'regress_tblspace' + ORDER BY c.relname; + relname +------------------------------- + regress_tblspace_test_tbl_idx +(1 row) + +ALTER TABLE regress_tblspace_test_tbl SET TABLESPACE regress_tblspace; +ALTER TABLE regress_tblspace_test_tbl SET TABLESPACE pg_default; +SELECT c.relname FROM pg_class c, pg_tablespace s + WHERE c.reltablespace = s.oid AND s.spcname = 'regress_tblspace' + ORDER BY c.relname; + relname +------------------------------- + regress_tblspace_test_tbl_idx +(1 row) + +-- Move back to the default tablespace. +ALTER INDEX regress_tblspace_test_tbl_idx SET TABLESPACE pg_default; +SELECT c.relname FROM pg_class c, pg_tablespace s + WHERE c.reltablespace = s.oid AND s.spcname = 'regress_tblspace' + ORDER BY c.relname; + relname +--------- +(0 rows) + +REINDEX (TABLESPACE regress_tblspace, CONCURRENTLY) TABLE regress_tblspace_test_tbl; +SELECT c.relname FROM pg_class c, pg_tablespace s + WHERE c.reltablespace = s.oid AND s.spcname = 'regress_tblspace' + ORDER BY c.relname; + relname +------------------------------- + regress_tblspace_test_tbl_idx +(1 row) + +SELECT relfilenode = :main_filenode AS main_same FROM pg_class + WHERE relname = 'regress_tblspace_test_tbl_idx'; + main_same +----------- + f +(1 row) + +SELECT relfilenode = :toast_filenode as toast_same FROM pg_class + WHERE oid = + (SELECT i.indexrelid + FROM pg_class c, + pg_index i + WHERE i.indrelid = c.reltoastrelid AND + c.relname = 'regress_tblspace_test_tbl'); + toast_same +------------ + f +(1 row) + +DROP TABLE regress_tblspace_test_tbl; +-- REINDEX (TABLESPACE) with partitions +-- Create a partition tree and check the set of relations reindexed +-- with their new tablespace. +CREATE TABLE tbspace_reindex_part (c1 int, c2 int) PARTITION BY RANGE (c1); +CREATE TABLE tbspace_reindex_part_0 PARTITION OF tbspace_reindex_part + FOR VALUES FROM (0) TO (10) PARTITION BY list (c2); +CREATE TABLE tbspace_reindex_part_0_1 PARTITION OF tbspace_reindex_part_0 + FOR VALUES IN (1); +CREATE TABLE tbspace_reindex_part_0_2 PARTITION OF tbspace_reindex_part_0 + FOR VALUES IN (2); +-- This partitioned table will have no partitions. +CREATE TABLE tbspace_reindex_part_10 PARTITION OF tbspace_reindex_part + FOR VALUES FROM (10) TO (20) PARTITION BY list (c2); +-- Create some partitioned indexes +CREATE INDEX tbspace_reindex_part_index ON ONLY tbspace_reindex_part (c1); +CREATE INDEX tbspace_reindex_part_index_0 ON ONLY tbspace_reindex_part_0 (c1); +ALTER INDEX tbspace_reindex_part_index ATTACH PARTITION tbspace_reindex_part_index_0; +-- This partitioned index will have no partitions. +CREATE INDEX tbspace_reindex_part_index_10 ON ONLY tbspace_reindex_part_10 (c1); +ALTER INDEX tbspace_reindex_part_index ATTACH PARTITION tbspace_reindex_part_index_10; +CREATE INDEX tbspace_reindex_part_index_0_1 ON ONLY tbspace_reindex_part_0_1 (c1); +ALTER INDEX tbspace_reindex_part_index_0 ATTACH PARTITION tbspace_reindex_part_index_0_1; +CREATE INDEX tbspace_reindex_part_index_0_2 ON ONLY tbspace_reindex_part_0_2 (c1); +ALTER INDEX tbspace_reindex_part_index_0 ATTACH PARTITION tbspace_reindex_part_index_0_2; +SELECT relid, parentrelid, level FROM pg_partition_tree('tbspace_reindex_part_index') + ORDER BY relid, level; + relid | parentrelid | level +--------------------------------+------------------------------+------- + tbspace_reindex_part_index | | 0 + tbspace_reindex_part_index_0 | tbspace_reindex_part_index | 1 + tbspace_reindex_part_index_10 | tbspace_reindex_part_index | 1 + tbspace_reindex_part_index_0_1 | tbspace_reindex_part_index_0 | 2 + tbspace_reindex_part_index_0_2 | tbspace_reindex_part_index_0 | 2 +(5 rows) + +-- Track the original tablespace, relfilenode and OID of each index +-- in the tree. +CREATE TEMP TABLE reindex_temp_before AS + SELECT oid, relname, relfilenode, reltablespace + FROM pg_class + WHERE relname ~ 'tbspace_reindex_part_index'; +REINDEX (TABLESPACE regress_tblspace, CONCURRENTLY) TABLE tbspace_reindex_part; +-- REINDEX CONCURRENTLY changes the OID of the old relation, hence a check +-- based on the relation name below. +SELECT b.relname, + CASE WHEN a.relfilenode = b.relfilenode THEN 'relfilenode is unchanged' + ELSE 'relfilenode has changed' END AS filenode, + CASE WHEN a.reltablespace = b.reltablespace THEN 'reltablespace is unchanged' + ELSE 'reltablespace has changed' END AS tbspace + FROM reindex_temp_before b JOIN pg_class a ON b.relname = a.relname + ORDER BY 1; + relname | filenode | tbspace +--------------------------------+--------------------------+---------------------------- + tbspace_reindex_part_index | relfilenode is unchanged | reltablespace is unchanged + tbspace_reindex_part_index_0 | relfilenode is unchanged | reltablespace is unchanged + tbspace_reindex_part_index_0_1 | relfilenode has changed | reltablespace has changed + tbspace_reindex_part_index_0_2 | relfilenode has changed | reltablespace has changed + tbspace_reindex_part_index_10 | relfilenode is unchanged | reltablespace is unchanged +(5 rows) + +DROP TABLE tbspace_reindex_part; +-- create a schema we can use +CREATE SCHEMA testschema; +-- try a table +CREATE TABLE testschema.foo (i int) TABLESPACE regress_tblspace; +SELECT relname, spcname FROM pg_catalog.pg_tablespace t, pg_catalog.pg_class c + where c.reltablespace = t.oid AND c.relname = 'foo'; + relname | spcname +---------+------------------ + foo | regress_tblspace +(1 row) + +INSERT INTO testschema.foo VALUES(1); +INSERT INTO testschema.foo VALUES(2); +-- tables from dynamic sources +CREATE TABLE testschema.asselect TABLESPACE regress_tblspace AS SELECT 1; +SELECT relname, spcname FROM pg_catalog.pg_tablespace t, pg_catalog.pg_class c + where c.reltablespace = t.oid AND c.relname = 'asselect'; + relname | spcname +----------+------------------ + asselect | regress_tblspace +(1 row) + +PREPARE selectsource(int) AS SELECT $1; +CREATE TABLE testschema.asexecute TABLESPACE regress_tblspace + AS EXECUTE selectsource(2); +SELECT relname, spcname FROM pg_catalog.pg_tablespace t, pg_catalog.pg_class c + where c.reltablespace = t.oid AND c.relname = 'asexecute'; + relname | spcname +-----------+------------------ + asexecute | regress_tblspace +(1 row) + +-- index +CREATE INDEX foo_idx on testschema.foo(i) TABLESPACE regress_tblspace; +SELECT relname, spcname FROM pg_catalog.pg_tablespace t, pg_catalog.pg_class c + where c.reltablespace = t.oid AND c.relname = 'foo_idx'; + relname | spcname +---------+------------------ + foo_idx | regress_tblspace +(1 row) + +-- check \d output +\d testschema.foo + Table "testschema.foo" + Column | Type | Collation | Nullable | Default +--------+---------+-----------+----------+--------- + i | integer | | | +Indexes: + "foo_idx" btree (i), tablespace "regress_tblspace" +Tablespace: "regress_tblspace" + +\d testschema.foo_idx + Index "testschema.foo_idx" + Column | Type | Key? | Definition +--------+---------+------+------------ + i | integer | yes | i +btree, for table "testschema.foo" +Tablespace: "regress_tblspace" + +-- +-- partitioned table +-- +CREATE TABLE testschema.part (a int) PARTITION BY LIST (a); +SET default_tablespace TO pg_global; +CREATE TABLE testschema.part_1 PARTITION OF testschema.part FOR VALUES IN (1); +ERROR: only shared relations can be placed in pg_global tablespace +RESET default_tablespace; +CREATE TABLE testschema.part_1 PARTITION OF testschema.part FOR VALUES IN (1); +SET default_tablespace TO regress_tblspace; +CREATE TABLE testschema.part_2 PARTITION OF testschema.part FOR VALUES IN (2); +SET default_tablespace TO pg_global; +CREATE TABLE testschema.part_3 PARTITION OF testschema.part FOR VALUES IN (3); +ERROR: only shared relations can be placed in pg_global tablespace +ALTER TABLE testschema.part SET TABLESPACE regress_tblspace; +CREATE TABLE testschema.part_3 PARTITION OF testschema.part FOR VALUES IN (3); +CREATE TABLE testschema.part_4 PARTITION OF testschema.part FOR VALUES IN (4) + TABLESPACE pg_default; +CREATE TABLE testschema.part_56 PARTITION OF testschema.part FOR VALUES IN (5, 6) + PARTITION BY LIST (a); +ALTER TABLE testschema.part SET TABLESPACE pg_default; +CREATE TABLE testschema.part_78 PARTITION OF testschema.part FOR VALUES IN (7, 8) + PARTITION BY LIST (a); +ERROR: only shared relations can be placed in pg_global tablespace +CREATE TABLE testschema.part_910 PARTITION OF testschema.part FOR VALUES IN (9, 10) + PARTITION BY LIST (a) TABLESPACE regress_tblspace; +RESET default_tablespace; +CREATE TABLE testschema.part_78 PARTITION OF testschema.part FOR VALUES IN (7, 8) + PARTITION BY LIST (a); +SELECT relname, spcname FROM pg_catalog.pg_class c + JOIN pg_catalog.pg_namespace n ON (c.relnamespace = n.oid) + LEFT JOIN pg_catalog.pg_tablespace t ON c.reltablespace = t.oid + where c.relname LIKE 'part%' AND n.nspname = 'testschema' order by relname; + relname | spcname +----------+------------------ + part | + part_1 | + part_2 | regress_tblspace + part_3 | regress_tblspace + part_4 | + part_56 | regress_tblspace + part_78 | + part_910 | regress_tblspace +(8 rows) + +RESET default_tablespace; +DROP TABLE testschema.part; +-- partitioned index +CREATE TABLE testschema.part (a int) PARTITION BY LIST (a); +CREATE TABLE testschema.part1 PARTITION OF testschema.part FOR VALUES IN (1); +CREATE INDEX part_a_idx ON testschema.part (a) TABLESPACE regress_tblspace; +CREATE TABLE testschema.part2 PARTITION OF testschema.part FOR VALUES IN (2); +SELECT relname, spcname FROM pg_catalog.pg_tablespace t, pg_catalog.pg_class c + where c.reltablespace = t.oid AND c.relname LIKE 'part%_idx' ORDER BY relname; + relname | spcname +-------------+------------------ + part1_a_idx | regress_tblspace + part2_a_idx | regress_tblspace + part_a_idx | regress_tblspace +(3 rows) + +\d testschema.part + Partitioned table "testschema.part" + Column | Type | Collation | Nullable | Default +--------+---------+-----------+----------+--------- + a | integer | | | +Partition key: LIST (a) +Indexes: + "part_a_idx" btree (a), tablespace "regress_tblspace" +Number of partitions: 2 (Use \d+ to list them.) + +\d+ testschema.part + Partitioned table "testschema.part" + Column | Type | Collation | Nullable | Default | Storage | Stats target | Description +--------+---------+-----------+----------+---------+---------+--------------+------------- + a | integer | | | | plain | | +Partition key: LIST (a) +Indexes: + "part_a_idx" btree (a), tablespace "regress_tblspace" +Partitions: testschema.part1 FOR VALUES IN (1), + testschema.part2 FOR VALUES IN (2) + +\d testschema.part1 + Table "testschema.part1" + Column | Type | Collation | Nullable | Default +--------+---------+-----------+----------+--------- + a | integer | | | +Partition of: testschema.part FOR VALUES IN (1) +Indexes: + "part1_a_idx" btree (a), tablespace "regress_tblspace" + +\d+ testschema.part1 + Table "testschema.part1" + Column | Type | Collation | Nullable | Default | Storage | Stats target | Description +--------+---------+-----------+----------+---------+---------+--------------+------------- + a | integer | | | | plain | | +Partition of: testschema.part FOR VALUES IN (1) +Partition constraint: ((a IS NOT NULL) AND (a = 1)) +Indexes: + "part1_a_idx" btree (a), tablespace "regress_tblspace" + +\d testschema.part_a_idx +Partitioned index "testschema.part_a_idx" + Column | Type | Key? | Definition +--------+---------+------+------------ + a | integer | yes | a +btree, for table "testschema.part" +Number of partitions: 2 (Use \d+ to list them.) +Tablespace: "regress_tblspace" + +\d+ testschema.part_a_idx + Partitioned index "testschema.part_a_idx" + Column | Type | Key? | Definition | Storage | Stats target +--------+---------+------+------------+---------+-------------- + a | integer | yes | a | plain | +btree, for table "testschema.part" +Partitions: testschema.part1_a_idx, + testschema.part2_a_idx +Tablespace: "regress_tblspace" + +-- partitioned rels cannot specify the default tablespace. These fail: +CREATE TABLE testschema.dflt (a int PRIMARY KEY) PARTITION BY LIST (a) TABLESPACE pg_default; +ERROR: cannot specify default tablespace for partitioned relations +CREATE TABLE testschema.dflt (a int PRIMARY KEY USING INDEX TABLESPACE pg_default) PARTITION BY LIST (a); +ERROR: cannot specify default tablespace for partitioned relations +SET default_tablespace TO 'pg_default'; +CREATE TABLE testschema.dflt (a int PRIMARY KEY) PARTITION BY LIST (a) TABLESPACE regress_tblspace; +ERROR: cannot specify default tablespace for partitioned relations +CREATE TABLE testschema.dflt (a int PRIMARY KEY USING INDEX TABLESPACE regress_tblspace) PARTITION BY LIST (a); +ERROR: cannot specify default tablespace for partitioned relations +-- but these work: +CREATE TABLE testschema.dflt (a int PRIMARY KEY USING INDEX TABLESPACE regress_tblspace) PARTITION BY LIST (a) TABLESPACE regress_tblspace; +SET default_tablespace TO ''; +CREATE TABLE testschema.dflt2 (a int PRIMARY KEY) PARTITION BY LIST (a); +DROP TABLE testschema.dflt, testschema.dflt2; +-- check that default_tablespace doesn't affect ALTER TABLE index rebuilds +CREATE TABLE testschema.test_default_tab(id bigint) TABLESPACE regress_tblspace; +INSERT INTO testschema.test_default_tab VALUES (1); +CREATE INDEX test_index1 on testschema.test_default_tab (id); +CREATE INDEX test_index2 on testschema.test_default_tab (id) TABLESPACE regress_tblspace; +ALTER TABLE testschema.test_default_tab ADD CONSTRAINT test_index3 PRIMARY KEY (id); +ALTER TABLE testschema.test_default_tab ADD CONSTRAINT test_index4 UNIQUE (id) USING INDEX TABLESPACE regress_tblspace; +\d testschema.test_index1 + Index "testschema.test_index1" + Column | Type | Key? | Definition +--------+--------+------+------------ + id | bigint | yes | id +btree, for table "testschema.test_default_tab" + +\d testschema.test_index2 + Index "testschema.test_index2" + Column | Type | Key? | Definition +--------+--------+------+------------ + id | bigint | yes | id +btree, for table "testschema.test_default_tab" +Tablespace: "regress_tblspace" + +\d testschema.test_index3 + Index "testschema.test_index3" + Column | Type | Key? | Definition +--------+--------+------+------------ + id | bigint | yes | id +primary key, btree, for table "testschema.test_default_tab" + +\d testschema.test_index4 + Index "testschema.test_index4" + Column | Type | Key? | Definition +--------+--------+------+------------ + id | bigint | yes | id +unique, btree, for table "testschema.test_default_tab" +Tablespace: "regress_tblspace" + +-- use a custom tablespace for default_tablespace +SET default_tablespace TO regress_tblspace; +-- tablespace should not change if no rewrite +ALTER TABLE testschema.test_default_tab ALTER id TYPE bigint; +\d testschema.test_index1 + Index "testschema.test_index1" + Column | Type | Key? | Definition +--------+--------+------+------------ + id | bigint | yes | id +btree, for table "testschema.test_default_tab" + +\d testschema.test_index2 + Index "testschema.test_index2" + Column | Type | Key? | Definition +--------+--------+------+------------ + id | bigint | yes | id +btree, for table "testschema.test_default_tab" +Tablespace: "regress_tblspace" + +\d testschema.test_index3 + Index "testschema.test_index3" + Column | Type | Key? | Definition +--------+--------+------+------------ + id | bigint | yes | id +primary key, btree, for table "testschema.test_default_tab" + +\d testschema.test_index4 + Index "testschema.test_index4" + Column | Type | Key? | Definition +--------+--------+------+------------ + id | bigint | yes | id +unique, btree, for table "testschema.test_default_tab" +Tablespace: "regress_tblspace" + +SELECT * FROM testschema.test_default_tab; + id +---- + 1 +(1 row) + +-- tablespace should not change even if there is an index rewrite +ALTER TABLE testschema.test_default_tab ALTER id TYPE int; +\d testschema.test_index1 + Index "testschema.test_index1" + Column | Type | Key? | Definition +--------+---------+------+------------ + id | integer | yes | id +btree, for table "testschema.test_default_tab" + +\d testschema.test_index2 + Index "testschema.test_index2" + Column | Type | Key? | Definition +--------+---------+------+------------ + id | integer | yes | id +btree, for table "testschema.test_default_tab" +Tablespace: "regress_tblspace" + +\d testschema.test_index3 + Index "testschema.test_index3" + Column | Type | Key? | Definition +--------+---------+------+------------ + id | integer | yes | id +primary key, btree, for table "testschema.test_default_tab" + +\d testschema.test_index4 + Index "testschema.test_index4" + Column | Type | Key? | Definition +--------+---------+------+------------ + id | integer | yes | id +unique, btree, for table "testschema.test_default_tab" +Tablespace: "regress_tblspace" + +SELECT * FROM testschema.test_default_tab; + id +---- + 1 +(1 row) + +-- now use the default tablespace for default_tablespace +SET default_tablespace TO ''; +-- tablespace should not change if no rewrite +ALTER TABLE testschema.test_default_tab ALTER id TYPE int; +\d testschema.test_index1 + Index "testschema.test_index1" + Column | Type | Key? | Definition +--------+---------+------+------------ + id | integer | yes | id +btree, for table "testschema.test_default_tab" + +\d testschema.test_index2 + Index "testschema.test_index2" + Column | Type | Key? | Definition +--------+---------+------+------------ + id | integer | yes | id +btree, for table "testschema.test_default_tab" +Tablespace: "regress_tblspace" + +\d testschema.test_index3 + Index "testschema.test_index3" + Column | Type | Key? | Definition +--------+---------+------+------------ + id | integer | yes | id +primary key, btree, for table "testschema.test_default_tab" + +\d testschema.test_index4 + Index "testschema.test_index4" + Column | Type | Key? | Definition +--------+---------+------+------------ + id | integer | yes | id +unique, btree, for table "testschema.test_default_tab" +Tablespace: "regress_tblspace" + +-- tablespace should not change even if there is an index rewrite +ALTER TABLE testschema.test_default_tab ALTER id TYPE bigint; +\d testschema.test_index1 + Index "testschema.test_index1" + Column | Type | Key? | Definition +--------+--------+------+------------ + id | bigint | yes | id +btree, for table "testschema.test_default_tab" + +\d testschema.test_index2 + Index "testschema.test_index2" + Column | Type | Key? | Definition +--------+--------+------+------------ + id | bigint | yes | id +btree, for table "testschema.test_default_tab" +Tablespace: "regress_tblspace" + +\d testschema.test_index3 + Index "testschema.test_index3" + Column | Type | Key? | Definition +--------+--------+------+------------ + id | bigint | yes | id +primary key, btree, for table "testschema.test_default_tab" + +\d testschema.test_index4 + Index "testschema.test_index4" + Column | Type | Key? | Definition +--------+--------+------+------------ + id | bigint | yes | id +unique, btree, for table "testschema.test_default_tab" +Tablespace: "regress_tblspace" + +DROP TABLE testschema.test_default_tab; +-- check that default_tablespace doesn't affect ALTER TABLE index rebuilds +-- (this time with a partitioned table) +CREATE TABLE testschema.test_default_tab_p(id bigint, val bigint) + PARTITION BY LIST (id) TABLESPACE regress_tblspace; +CREATE TABLE testschema.test_default_tab_p1 PARTITION OF testschema.test_default_tab_p + FOR VALUES IN (1); +INSERT INTO testschema.test_default_tab_p VALUES (1); +CREATE INDEX test_index1 on testschema.test_default_tab_p (val); +CREATE INDEX test_index2 on testschema.test_default_tab_p (val) TABLESPACE regress_tblspace; +ALTER TABLE testschema.test_default_tab_p ADD CONSTRAINT test_index3 PRIMARY KEY (id); +ALTER TABLE testschema.test_default_tab_p ADD CONSTRAINT test_index4 UNIQUE (id) USING INDEX TABLESPACE regress_tblspace; +\d testschema.test_index1 +Partitioned index "testschema.test_index1" + Column | Type | Key? | Definition +--------+--------+------+------------ + val | bigint | yes | val +btree, for table "testschema.test_default_tab_p" +Number of partitions: 1 (Use \d+ to list them.) + +\d testschema.test_index2 +Partitioned index "testschema.test_index2" + Column | Type | Key? | Definition +--------+--------+------+------------ + val | bigint | yes | val +btree, for table "testschema.test_default_tab_p" +Number of partitions: 1 (Use \d+ to list them.) +Tablespace: "regress_tblspace" + +\d testschema.test_index3 +Partitioned index "testschema.test_index3" + Column | Type | Key? | Definition +--------+--------+------+------------ + id | bigint | yes | id +primary key, btree, for table "testschema.test_default_tab_p" +Number of partitions: 1 (Use \d+ to list them.) + +\d testschema.test_index4 +Partitioned index "testschema.test_index4" + Column | Type | Key? | Definition +--------+--------+------+------------ + id | bigint | yes | id +unique, btree, for table "testschema.test_default_tab_p" +Number of partitions: 1 (Use \d+ to list them.) +Tablespace: "regress_tblspace" + +-- use a custom tablespace for default_tablespace +SET default_tablespace TO regress_tblspace; +-- tablespace should not change if no rewrite +ALTER TABLE testschema.test_default_tab_p ALTER val TYPE bigint; +\d testschema.test_index1 +Partitioned index "testschema.test_index1" + Column | Type | Key? | Definition +--------+--------+------+------------ + val | bigint | yes | val +btree, for table "testschema.test_default_tab_p" +Number of partitions: 1 (Use \d+ to list them.) + +\d testschema.test_index2 +Partitioned index "testschema.test_index2" + Column | Type | Key? | Definition +--------+--------+------+------------ + val | bigint | yes | val +btree, for table "testschema.test_default_tab_p" +Number of partitions: 1 (Use \d+ to list them.) +Tablespace: "regress_tblspace" + +\d testschema.test_index3 +Partitioned index "testschema.test_index3" + Column | Type | Key? | Definition +--------+--------+------+------------ + id | bigint | yes | id +primary key, btree, for table "testschema.test_default_tab_p" +Number of partitions: 1 (Use \d+ to list them.) + +\d testschema.test_index4 +Partitioned index "testschema.test_index4" + Column | Type | Key? | Definition +--------+--------+------+------------ + id | bigint | yes | id +unique, btree, for table "testschema.test_default_tab_p" +Number of partitions: 1 (Use \d+ to list them.) +Tablespace: "regress_tblspace" + +SELECT * FROM testschema.test_default_tab_p; + id | val +----+----- + 1 | +(1 row) + +-- tablespace should not change even if there is an index rewrite +ALTER TABLE testschema.test_default_tab_p ALTER val TYPE int; +\d testschema.test_index1 +Partitioned index "testschema.test_index1" + Column | Type | Key? | Definition +--------+---------+------+------------ + val | integer | yes | val +btree, for table "testschema.test_default_tab_p" +Number of partitions: 1 (Use \d+ to list them.) + +\d testschema.test_index2 +Partitioned index "testschema.test_index2" + Column | Type | Key? | Definition +--------+---------+------+------------ + val | integer | yes | val +btree, for table "testschema.test_default_tab_p" +Number of partitions: 1 (Use \d+ to list them.) +Tablespace: "regress_tblspace" + +\d testschema.test_index3 +Partitioned index "testschema.test_index3" + Column | Type | Key? | Definition +--------+--------+------+------------ + id | bigint | yes | id +primary key, btree, for table "testschema.test_default_tab_p" +Number of partitions: 1 (Use \d+ to list them.) + +\d testschema.test_index4 +Partitioned index "testschema.test_index4" + Column | Type | Key? | Definition +--------+--------+------+------------ + id | bigint | yes | id +unique, btree, for table "testschema.test_default_tab_p" +Number of partitions: 1 (Use \d+ to list them.) +Tablespace: "regress_tblspace" + +SELECT * FROM testschema.test_default_tab_p; + id | val +----+----- + 1 | +(1 row) + +-- now use the default tablespace for default_tablespace +SET default_tablespace TO ''; +-- tablespace should not change if no rewrite +ALTER TABLE testschema.test_default_tab_p ALTER val TYPE int; +\d testschema.test_index1 +Partitioned index "testschema.test_index1" + Column | Type | Key? | Definition +--------+---------+------+------------ + val | integer | yes | val +btree, for table "testschema.test_default_tab_p" +Number of partitions: 1 (Use \d+ to list them.) + +\d testschema.test_index2 +Partitioned index "testschema.test_index2" + Column | Type | Key? | Definition +--------+---------+------+------------ + val | integer | yes | val +btree, for table "testschema.test_default_tab_p" +Number of partitions: 1 (Use \d+ to list them.) +Tablespace: "regress_tblspace" + +\d testschema.test_index3 +Partitioned index "testschema.test_index3" + Column | Type | Key? | Definition +--------+--------+------+------------ + id | bigint | yes | id +primary key, btree, for table "testschema.test_default_tab_p" +Number of partitions: 1 (Use \d+ to list them.) + +\d testschema.test_index4 +Partitioned index "testschema.test_index4" + Column | Type | Key? | Definition +--------+--------+------+------------ + id | bigint | yes | id +unique, btree, for table "testschema.test_default_tab_p" +Number of partitions: 1 (Use \d+ to list them.) +Tablespace: "regress_tblspace" + +-- tablespace should not change even if there is an index rewrite +ALTER TABLE testschema.test_default_tab_p ALTER val TYPE bigint; +\d testschema.test_index1 +Partitioned index "testschema.test_index1" + Column | Type | Key? | Definition +--------+--------+------+------------ + val | bigint | yes | val +btree, for table "testschema.test_default_tab_p" +Number of partitions: 1 (Use \d+ to list them.) + +\d testschema.test_index2 +Partitioned index "testschema.test_index2" + Column | Type | Key? | Definition +--------+--------+------+------------ + val | bigint | yes | val +btree, for table "testschema.test_default_tab_p" +Number of partitions: 1 (Use \d+ to list them.) +Tablespace: "regress_tblspace" + +\d testschema.test_index3 +Partitioned index "testschema.test_index3" + Column | Type | Key? | Definition +--------+--------+------+------------ + id | bigint | yes | id +primary key, btree, for table "testschema.test_default_tab_p" +Number of partitions: 1 (Use \d+ to list them.) + +\d testschema.test_index4 +Partitioned index "testschema.test_index4" + Column | Type | Key? | Definition +--------+--------+------+------------ + id | bigint | yes | id +unique, btree, for table "testschema.test_default_tab_p" +Number of partitions: 1 (Use \d+ to list them.) +Tablespace: "regress_tblspace" + +DROP TABLE testschema.test_default_tab_p; +-- check that default_tablespace affects index additions in ALTER TABLE +CREATE TABLE testschema.test_tab(id int) TABLESPACE regress_tblspace; +INSERT INTO testschema.test_tab VALUES (1); +SET default_tablespace TO regress_tblspace; +ALTER TABLE testschema.test_tab ADD CONSTRAINT test_tab_unique UNIQUE (id); +SET default_tablespace TO ''; +ALTER TABLE testschema.test_tab ADD CONSTRAINT test_tab_pkey PRIMARY KEY (id); +\d testschema.test_tab_unique + Index "testschema.test_tab_unique" + Column | Type | Key? | Definition +--------+---------+------+------------ + id | integer | yes | id +unique, btree, for table "testschema.test_tab" +Tablespace: "regress_tblspace" + +\d testschema.test_tab_pkey + Index "testschema.test_tab_pkey" + Column | Type | Key? | Definition +--------+---------+------+------------ + id | integer | yes | id +primary key, btree, for table "testschema.test_tab" + +SELECT * FROM testschema.test_tab; + id +---- + 1 +(1 row) + +DROP TABLE testschema.test_tab; +-- check that default_tablespace is handled correctly by multi-command +-- ALTER TABLE that includes a tablespace-preserving rewrite +CREATE TABLE testschema.test_tab(a int, b int, c int); +SET default_tablespace TO regress_tblspace; +ALTER TABLE testschema.test_tab ADD CONSTRAINT test_tab_unique UNIQUE (a); +CREATE INDEX test_tab_a_idx ON testschema.test_tab (a); +SET default_tablespace TO ''; +CREATE INDEX test_tab_b_idx ON testschema.test_tab (b); +\d testschema.test_tab_unique + Index "testschema.test_tab_unique" + Column | Type | Key? | Definition +--------+---------+------+------------ + a | integer | yes | a +unique, btree, for table "testschema.test_tab" +Tablespace: "regress_tblspace" + +\d testschema.test_tab_a_idx + Index "testschema.test_tab_a_idx" + Column | Type | Key? | Definition +--------+---------+------+------------ + a | integer | yes | a +btree, for table "testschema.test_tab" +Tablespace: "regress_tblspace" + +\d testschema.test_tab_b_idx + Index "testschema.test_tab_b_idx" + Column | Type | Key? | Definition +--------+---------+------+------------ + b | integer | yes | b +btree, for table "testschema.test_tab" + +ALTER TABLE testschema.test_tab ALTER b TYPE bigint, ADD UNIQUE (c); +\d testschema.test_tab_unique + Index "testschema.test_tab_unique" + Column | Type | Key? | Definition +--------+---------+------+------------ + a | integer | yes | a +unique, btree, for table "testschema.test_tab" +Tablespace: "regress_tblspace" + +\d testschema.test_tab_a_idx + Index "testschema.test_tab_a_idx" + Column | Type | Key? | Definition +--------+---------+------+------------ + a | integer | yes | a +btree, for table "testschema.test_tab" +Tablespace: "regress_tblspace" + +\d testschema.test_tab_b_idx + Index "testschema.test_tab_b_idx" + Column | Type | Key? | Definition +--------+--------+------+------------ + b | bigint | yes | b +btree, for table "testschema.test_tab" + +DROP TABLE testschema.test_tab; +-- let's try moving a table from one place to another +CREATE TABLE testschema.atable AS VALUES (1), (2); +CREATE UNIQUE INDEX anindex ON testschema.atable(column1); +ALTER TABLE testschema.atable SET TABLESPACE regress_tblspace; +ALTER INDEX testschema.anindex SET TABLESPACE regress_tblspace; +ALTER INDEX testschema.part_a_idx SET TABLESPACE pg_global; +ERROR: only shared relations can be placed in pg_global tablespace +ALTER INDEX testschema.part_a_idx SET TABLESPACE pg_default; +ALTER INDEX testschema.part_a_idx SET TABLESPACE regress_tblspace; +INSERT INTO testschema.atable VALUES(3); -- ok +INSERT INTO testschema.atable VALUES(1); -- fail (checks index) +ERROR: duplicate key value violates unique constraint "anindex" +DETAIL: Key (column1)=(1) already exists. +SELECT COUNT(*) FROM testschema.atable; -- checks heap + count +------- + 3 +(1 row) + +-- let's try moving a materialized view from one place to another +CREATE MATERIALIZED VIEW testschema.amv AS SELECT * FROM testschema.atable; +ALTER MATERIALIZED VIEW testschema.amv SET TABLESPACE regress_tblspace; +REFRESH MATERIALIZED VIEW testschema.amv; +SELECT COUNT(*) FROM testschema.amv; + count +------- + 3 +(1 row) + +-- Will fail with bad path +CREATE TABLESPACE regress_badspace LOCATION '/no/such/location'; +ERROR: directory "/no/such/location" does not exist +-- No such tablespace +CREATE TABLE bar (i int) TABLESPACE regress_nosuchspace; +ERROR: tablespace "regress_nosuchspace" does not exist +-- Fail, in use for some partitioned object +DROP TABLESPACE regress_tblspace; +ERROR: tablespace "regress_tblspace" cannot be dropped because some objects depend on it +DETAIL: tablespace for index testschema.part_a_idx +ALTER INDEX testschema.part_a_idx SET TABLESPACE pg_default; +-- Fail, not empty +DROP TABLESPACE regress_tblspace; +ERROR: tablespace "regress_tblspace" is not empty +CREATE ROLE regress_tablespace_user1 login; +CREATE ROLE regress_tablespace_user2 login; +GRANT USAGE ON SCHEMA testschema TO regress_tablespace_user2; +ALTER TABLESPACE regress_tblspace OWNER TO regress_tablespace_user1; +CREATE TABLE testschema.tablespace_acl (c int); +-- new owner lacks permission to create this index from scratch +CREATE INDEX k ON testschema.tablespace_acl (c) TABLESPACE regress_tblspace; +ALTER TABLE testschema.tablespace_acl OWNER TO regress_tablespace_user2; +SET SESSION ROLE regress_tablespace_user2; +CREATE TABLE tablespace_table (i int) TABLESPACE regress_tblspace; -- fail +ERROR: permission denied for tablespace regress_tblspace +ALTER TABLE testschema.tablespace_acl ALTER c TYPE bigint; +REINDEX (TABLESPACE regress_tblspace) TABLE tablespace_table; -- fail +ERROR: permission denied for tablespace regress_tblspace +REINDEX (TABLESPACE regress_tblspace, CONCURRENTLY) TABLE tablespace_table; -- fail +ERROR: permission denied for tablespace regress_tblspace +RESET ROLE; +ALTER TABLESPACE regress_tblspace RENAME TO regress_tblspace_renamed; +ALTER TABLE ALL IN TABLESPACE regress_tblspace_renamed SET TABLESPACE pg_default; +ALTER INDEX ALL IN TABLESPACE regress_tblspace_renamed SET TABLESPACE pg_default; +ALTER MATERIALIZED VIEW ALL IN TABLESPACE regress_tblspace_renamed SET TABLESPACE pg_default; +-- Should show notice that nothing was done +ALTER TABLE ALL IN TABLESPACE regress_tblspace_renamed SET TABLESPACE pg_default; +NOTICE: no matching relations in tablespace "regress_tblspace_renamed" found +ALTER MATERIALIZED VIEW ALL IN TABLESPACE regress_tblspace_renamed SET TABLESPACE pg_default; +NOTICE: no matching relations in tablespace "regress_tblspace_renamed" found +-- Should succeed +DROP TABLESPACE regress_tblspace_renamed; +DROP SCHEMA testschema CASCADE; +NOTICE: drop cascades to 7 other objects +DETAIL: drop cascades to table testschema.foo +drop cascades to table testschema.asselect +drop cascades to table testschema.asexecute +drop cascades to table testschema.part +drop cascades to table testschema.atable +drop cascades to materialized view testschema.amv +drop cascades to table testschema.tablespace_acl +DROP ROLE regress_tablespace_user1; +DROP ROLE regress_tablespace_user2; diff --git a/src/test/regress/expected/temp.out b/src/test/regress/expected/temp.out new file mode 100644 index 0000000..2a246a7 --- /dev/null +++ b/src/test/regress/expected/temp.out @@ -0,0 +1,412 @@ +-- +-- TEMP +-- Test temp relations and indexes +-- +-- test temp table/index masking +CREATE TABLE temptest(col int); +CREATE INDEX i_temptest ON temptest(col); +CREATE TEMP TABLE temptest(tcol int); +CREATE INDEX i_temptest ON temptest(tcol); +SELECT * FROM temptest; + tcol +------ +(0 rows) + +DROP INDEX i_temptest; +DROP TABLE temptest; +SELECT * FROM temptest; + col +----- +(0 rows) + +DROP INDEX i_temptest; +DROP TABLE temptest; +-- test temp table selects +CREATE TABLE temptest(col int); +INSERT INTO temptest VALUES (1); +CREATE TEMP TABLE temptest(tcol float); +INSERT INTO temptest VALUES (2.1); +SELECT * FROM temptest; + tcol +------ + 2.1 +(1 row) + +DROP TABLE temptest; +SELECT * FROM temptest; + col +----- + 1 +(1 row) + +DROP TABLE temptest; +-- test temp table deletion +CREATE TEMP TABLE temptest(col int); +\c +SELECT * FROM temptest; +ERROR: relation "temptest" does not exist +LINE 1: SELECT * FROM temptest; + ^ +-- Test ON COMMIT DELETE ROWS +CREATE TEMP TABLE temptest(col int) ON COMMIT DELETE ROWS; +-- while we're here, verify successful truncation of index with SQL function +CREATE INDEX ON temptest(bit_length('')); +BEGIN; +INSERT INTO temptest VALUES (1); +INSERT INTO temptest VALUES (2); +SELECT * FROM temptest; + col +----- + 1 + 2 +(2 rows) + +COMMIT; +SELECT * FROM temptest; + col +----- +(0 rows) + +DROP TABLE temptest; +BEGIN; +CREATE TEMP TABLE temptest(col) ON COMMIT DELETE ROWS AS SELECT 1; +SELECT * FROM temptest; + col +----- + 1 +(1 row) + +COMMIT; +SELECT * FROM temptest; + col +----- +(0 rows) + +DROP TABLE temptest; +-- Test ON COMMIT DROP +BEGIN; +CREATE TEMP TABLE temptest(col int) ON COMMIT DROP; +INSERT INTO temptest VALUES (1); +INSERT INTO temptest VALUES (2); +SELECT * FROM temptest; + col +----- + 1 + 2 +(2 rows) + +COMMIT; +SELECT * FROM temptest; +ERROR: relation "temptest" does not exist +LINE 1: SELECT * FROM temptest; + ^ +BEGIN; +CREATE TEMP TABLE temptest(col) ON COMMIT DROP AS SELECT 1; +SELECT * FROM temptest; + col +----- + 1 +(1 row) + +COMMIT; +SELECT * FROM temptest; +ERROR: relation "temptest" does not exist +LINE 1: SELECT * FROM temptest; + ^ +-- Test it with a CHECK condition that produces a toasted pg_constraint entry +BEGIN; +do $$ +begin + execute format($cmd$ + CREATE TEMP TABLE temptest (col text CHECK (col < %L)) ON COMMIT DROP + $cmd$, + (SELECT string_agg(g.i::text || ':' || random()::text, '|') + FROM generate_series(1, 100) g(i))); +end$$; +SELECT * FROM temptest; + col +----- +(0 rows) + +COMMIT; +SELECT * FROM temptest; +ERROR: relation "temptest" does not exist +LINE 1: SELECT * FROM temptest; + ^ +-- ON COMMIT is only allowed for TEMP +CREATE TABLE temptest(col int) ON COMMIT DELETE ROWS; +ERROR: ON COMMIT can only be used on temporary tables +CREATE TABLE temptest(col) ON COMMIT DELETE ROWS AS SELECT 1; +ERROR: ON COMMIT can only be used on temporary tables +-- Test foreign keys +BEGIN; +CREATE TEMP TABLE temptest1(col int PRIMARY KEY); +CREATE TEMP TABLE temptest2(col int REFERENCES temptest1) + ON COMMIT DELETE ROWS; +INSERT INTO temptest1 VALUES (1); +INSERT INTO temptest2 VALUES (1); +COMMIT; +SELECT * FROM temptest1; + col +----- + 1 +(1 row) + +SELECT * FROM temptest2; + col +----- +(0 rows) + +BEGIN; +CREATE TEMP TABLE temptest3(col int PRIMARY KEY) ON COMMIT DELETE ROWS; +CREATE TEMP TABLE temptest4(col int REFERENCES temptest3); +COMMIT; +ERROR: unsupported ON COMMIT and foreign key combination +DETAIL: Table "temptest4" references "temptest3", but they do not have the same ON COMMIT setting. +-- Test manipulation of temp schema's placement in search path +create table public.whereami (f1 text); +insert into public.whereami values ('public'); +create temp table whereami (f1 text); +insert into whereami values ('temp'); +create function public.whoami() returns text + as $$select 'public'::text$$ language sql; +create function pg_temp.whoami() returns text + as $$select 'temp'::text$$ language sql; +-- default should have pg_temp implicitly first, but only for tables +select * from whereami; + f1 +------ + temp +(1 row) + +select whoami(); + whoami +-------- + public +(1 row) + +-- can list temp first explicitly, but it still doesn't affect functions +set search_path = pg_temp, public; +select * from whereami; + f1 +------ + temp +(1 row) + +select whoami(); + whoami +-------- + public +(1 row) + +-- or put it last for security +set search_path = public, pg_temp; +select * from whereami; + f1 +-------- + public +(1 row) + +select whoami(); + whoami +-------- + public +(1 row) + +-- you can invoke a temp function explicitly, though +select pg_temp.whoami(); + whoami +-------- + temp +(1 row) + +drop table public.whereami; +-- types in temp schema +set search_path = pg_temp, public; +create domain pg_temp.nonempty as text check (value <> ''); +-- function-syntax invocation of types matches rules for functions +select nonempty(''); +ERROR: function nonempty(unknown) does not exist +LINE 1: select nonempty(''); + ^ +HINT: No function matches the given name and argument types. You might need to add explicit type casts. +select pg_temp.nonempty(''); +ERROR: value for domain nonempty violates check constraint "nonempty_check" +-- other syntax matches rules for tables +select ''::nonempty; +ERROR: value for domain nonempty violates check constraint "nonempty_check" +reset search_path; +-- For partitioned temp tables, ON COMMIT actions ignore storage-less +-- partitioned tables. +begin; +create temp table temp_parted_oncommit (a int) + partition by list (a) on commit delete rows; +create temp table temp_parted_oncommit_1 + partition of temp_parted_oncommit + for values in (1) on commit delete rows; +insert into temp_parted_oncommit values (1); +commit; +-- partitions are emptied by the previous commit +select * from temp_parted_oncommit; + a +--- +(0 rows) + +drop table temp_parted_oncommit; +-- Check dependencies between ON COMMIT actions with a partitioned +-- table and its partitions. Using ON COMMIT DROP on a parent removes +-- the whole set. +begin; +create temp table temp_parted_oncommit_test (a int) + partition by list (a) on commit drop; +create temp table temp_parted_oncommit_test1 + partition of temp_parted_oncommit_test + for values in (1) on commit delete rows; +create temp table temp_parted_oncommit_test2 + partition of temp_parted_oncommit_test + for values in (2) on commit drop; +insert into temp_parted_oncommit_test values (1), (2); +commit; +-- no relations remain in this case. +select relname from pg_class where relname ~ '^temp_parted_oncommit_test'; + relname +--------- +(0 rows) + +-- Using ON COMMIT DELETE on a partitioned table does not remove +-- all rows if partitions preserve their data. +begin; +create temp table temp_parted_oncommit_test (a int) + partition by list (a) on commit delete rows; +create temp table temp_parted_oncommit_test1 + partition of temp_parted_oncommit_test + for values in (1) on commit preserve rows; +create temp table temp_parted_oncommit_test2 + partition of temp_parted_oncommit_test + for values in (2) on commit drop; +insert into temp_parted_oncommit_test values (1), (2); +commit; +-- Data from the remaining partition is still here as its rows are +-- preserved. +select * from temp_parted_oncommit_test; + a +--- + 1 +(1 row) + +-- two relations remain in this case. +select relname from pg_class where relname ~ '^temp_parted_oncommit_test' + order by relname; + relname +---------------------------- + temp_parted_oncommit_test + temp_parted_oncommit_test1 +(2 rows) + +drop table temp_parted_oncommit_test; +-- Check dependencies between ON COMMIT actions with inheritance trees. +-- Using ON COMMIT DROP on a parent removes the whole set. +begin; +create temp table temp_inh_oncommit_test (a int) on commit drop; +create temp table temp_inh_oncommit_test1 () + inherits(temp_inh_oncommit_test) on commit delete rows; +insert into temp_inh_oncommit_test1 values (1); +commit; +-- no relations remain in this case +select relname from pg_class where relname ~ '^temp_inh_oncommit_test'; + relname +--------- +(0 rows) + +-- Data on the parent is removed, and the child goes away. +begin; +create temp table temp_inh_oncommit_test (a int) on commit delete rows; +create temp table temp_inh_oncommit_test1 () + inherits(temp_inh_oncommit_test) on commit drop; +insert into temp_inh_oncommit_test1 values (1); +insert into temp_inh_oncommit_test values (1); +commit; +select * from temp_inh_oncommit_test; + a +--- +(0 rows) + +-- one relation remains +select relname from pg_class where relname ~ '^temp_inh_oncommit_test'; + relname +------------------------ + temp_inh_oncommit_test +(1 row) + +drop table temp_inh_oncommit_test; +-- Tests with two-phase commit +-- Transactions creating objects in a temporary namespace cannot be used +-- with two-phase commit. +-- These cases generate errors about temporary namespace. +-- Function creation +begin; +create function pg_temp.twophase_func() returns void as + $$ select '2pc_func'::text $$ language sql; +prepare transaction 'twophase_func'; +ERROR: cannot PREPARE a transaction that has operated on temporary objects +-- Function drop +create function pg_temp.twophase_func() returns void as + $$ select '2pc_func'::text $$ language sql; +begin; +drop function pg_temp.twophase_func(); +prepare transaction 'twophase_func'; +ERROR: cannot PREPARE a transaction that has operated on temporary objects +-- Operator creation +begin; +create operator pg_temp.@@ (leftarg = int4, rightarg = int4, procedure = int4mi); +prepare transaction 'twophase_operator'; +ERROR: cannot PREPARE a transaction that has operated on temporary objects +-- These generate errors about temporary tables. +begin; +create type pg_temp.twophase_type as (a int); +prepare transaction 'twophase_type'; +ERROR: cannot PREPARE a transaction that has operated on temporary objects +begin; +create view pg_temp.twophase_view as select 1; +prepare transaction 'twophase_view'; +ERROR: cannot PREPARE a transaction that has operated on temporary objects +begin; +create sequence pg_temp.twophase_seq; +prepare transaction 'twophase_sequence'; +ERROR: cannot PREPARE a transaction that has operated on temporary objects +-- Temporary tables cannot be used with two-phase commit. +create temp table twophase_tab (a int); +begin; +select a from twophase_tab; + a +--- +(0 rows) + +prepare transaction 'twophase_tab'; +ERROR: cannot PREPARE a transaction that has operated on temporary objects +begin; +insert into twophase_tab values (1); +prepare transaction 'twophase_tab'; +ERROR: cannot PREPARE a transaction that has operated on temporary objects +begin; +lock twophase_tab in access exclusive mode; +prepare transaction 'twophase_tab'; +ERROR: cannot PREPARE a transaction that has operated on temporary objects +begin; +drop table twophase_tab; +prepare transaction 'twophase_tab'; +ERROR: cannot PREPARE a transaction that has operated on temporary objects +-- Corner case: current_schema may create a temporary schema if namespace +-- creation is pending, so check after that. First reset the connection +-- to remove the temporary namespace. +\c - +SET search_path TO 'pg_temp'; +BEGIN; +SELECT current_schema() ~ 'pg_temp' AS is_temp_schema; + is_temp_schema +---------------- + t +(1 row) + +PREPARE TRANSACTION 'twophase_search'; +ERROR: cannot PREPARE a transaction that has operated on temporary objects diff --git a/src/test/regress/expected/test_setup.out b/src/test/regress/expected/test_setup.out new file mode 100644 index 0000000..5d9e6bf --- /dev/null +++ b/src/test/regress/expected/test_setup.out @@ -0,0 +1,245 @@ +-- +-- TEST_SETUP --- prepare environment expected by regression test scripts +-- +-- directory paths and dlsuffix are passed to us in environment variables +\getenv abs_srcdir PG_ABS_SRCDIR +\getenv libdir PG_LIBDIR +\getenv dlsuffix PG_DLSUFFIX +\set regresslib :libdir '/regress' :dlsuffix +-- +-- synchronous_commit=off delays when hint bits may be set. Some plans change +-- depending on the number of all-visible pages, which in turn can be +-- influenced by the delayed hint bits. Force synchronous_commit=on to avoid +-- that source of variability. +-- +SET synchronous_commit = on; +-- +-- Postgres formerly made the public schema read/write by default, +-- and most of the core regression tests still expect that. +-- +GRANT ALL ON SCHEMA public TO public; +-- Create a tablespace we can use in tests. +SET allow_in_place_tablespaces = true; +CREATE TABLESPACE regress_tblspace LOCATION ''; +-- +-- These tables have traditionally been referenced by many tests, +-- so create and populate them. Insert only non-error values here. +-- (Some subsequent tests try to insert erroneous values. That's okay +-- because the table won't actually change. Do not change the contents +-- of these tables in later tests, as it may affect other tests.) +-- +CREATE TABLE CHAR_TBL(f1 char(4)); +INSERT INTO CHAR_TBL (f1) VALUES + ('a'), + ('ab'), + ('abcd'), + ('abcd '); +VACUUM CHAR_TBL; +CREATE TABLE FLOAT8_TBL(f1 float8); +INSERT INTO FLOAT8_TBL(f1) VALUES + ('0.0'), + ('-34.84'), + ('-1004.30'), + ('-1.2345678901234e+200'), + ('-1.2345678901234e-200'); +VACUUM FLOAT8_TBL; +CREATE TABLE INT2_TBL(f1 int2); +INSERT INTO INT2_TBL(f1) VALUES + ('0 '), + (' 1234 '), + (' -1234'), + ('32767'), -- largest and smallest values + ('-32767'); +VACUUM INT2_TBL; +CREATE TABLE INT4_TBL(f1 int4); +INSERT INTO INT4_TBL(f1) VALUES + (' 0 '), + ('123456 '), + (' -123456'), + ('2147483647'), -- largest and smallest values + ('-2147483647'); +VACUUM INT4_TBL; +CREATE TABLE INT8_TBL(q1 int8, q2 int8); +INSERT INTO INT8_TBL VALUES + (' 123 ',' 456'), + ('123 ','4567890123456789'), + ('4567890123456789','123'), + (+4567890123456789,'4567890123456789'), + ('+4567890123456789','-4567890123456789'); +VACUUM INT8_TBL; +CREATE TABLE POINT_TBL(f1 point); +INSERT INTO POINT_TBL(f1) VALUES + ('(0.0,0.0)'), + ('(-10.0,0.0)'), + ('(-3.0,4.0)'), + ('(5.1, 34.5)'), + ('(-5.0,-12.0)'), + ('(1e-300,-1e-300)'), -- To underflow + ('(1e+300,Inf)'), -- To overflow + ('(Inf,1e+300)'), -- Transposed + (' ( Nan , NaN ) '), + ('10.0,10.0'); +-- We intentionally don't vacuum point_tbl here; geometry depends on that +CREATE TABLE TEXT_TBL (f1 text); +INSERT INTO TEXT_TBL VALUES + ('doh!'), + ('hi de ho neighbor'); +VACUUM TEXT_TBL; +CREATE TABLE VARCHAR_TBL(f1 varchar(4)); +INSERT INTO VARCHAR_TBL (f1) VALUES + ('a'), + ('ab'), + ('abcd'), + ('abcd '); +VACUUM VARCHAR_TBL; +CREATE TABLE onek ( + unique1 int4, + unique2 int4, + two int4, + four int4, + ten int4, + twenty int4, + hundred int4, + thousand int4, + twothousand int4, + fivethous int4, + tenthous int4, + odd int4, + even int4, + stringu1 name, + stringu2 name, + string4 name +); +\set filename :abs_srcdir '/data/onek.data' +COPY onek FROM :'filename'; +VACUUM ANALYZE onek; +CREATE TABLE onek2 AS SELECT * FROM onek; +VACUUM ANALYZE onek2; +CREATE TABLE tenk1 ( + unique1 int4, + unique2 int4, + two int4, + four int4, + ten int4, + twenty int4, + hundred int4, + thousand int4, + twothousand int4, + fivethous int4, + tenthous int4, + odd int4, + even int4, + stringu1 name, + stringu2 name, + string4 name +); +\set filename :abs_srcdir '/data/tenk.data' +COPY tenk1 FROM :'filename'; +VACUUM ANALYZE tenk1; +CREATE TABLE tenk2 AS SELECT * FROM tenk1; +VACUUM ANALYZE tenk2; +CREATE TABLE person ( + name text, + age int4, + location point +); +\set filename :abs_srcdir '/data/person.data' +COPY person FROM :'filename'; +VACUUM ANALYZE person; +CREATE TABLE emp ( + salary int4, + manager name +) INHERITS (person); +\set filename :abs_srcdir '/data/emp.data' +COPY emp FROM :'filename'; +VACUUM ANALYZE emp; +CREATE TABLE student ( + gpa float8 +) INHERITS (person); +\set filename :abs_srcdir '/data/student.data' +COPY student FROM :'filename'; +VACUUM ANALYZE student; +CREATE TABLE stud_emp ( + percent int4 +) INHERITS (emp, student); +NOTICE: merging multiple inherited definitions of column "name" +NOTICE: merging multiple inherited definitions of column "age" +NOTICE: merging multiple inherited definitions of column "location" +\set filename :abs_srcdir '/data/stud_emp.data' +COPY stud_emp FROM :'filename'; +VACUUM ANALYZE stud_emp; +CREATE TABLE road ( + name text, + thepath path +); +\set filename :abs_srcdir '/data/streets.data' +COPY road FROM :'filename'; +VACUUM ANALYZE road; +CREATE TABLE ihighway () INHERITS (road); +INSERT INTO ihighway + SELECT * + FROM ONLY road + WHERE name ~ 'I- .*'; +VACUUM ANALYZE ihighway; +CREATE TABLE shighway ( + surface text +) INHERITS (road); +INSERT INTO shighway + SELECT *, 'asphalt' + FROM ONLY road + WHERE name ~ 'State Hwy.*'; +VACUUM ANALYZE shighway; +-- +-- We must have some enum type in the database for opr_sanity and type_sanity. +-- +create type stoplight as enum ('red', 'yellow', 'green'); +-- +-- Also create some non-built-in range types. +-- +create type float8range as range (subtype = float8, subtype_diff = float8mi); +create type textrange as range (subtype = text, collation = "C"); +-- +-- Create some C functions that will be used by various tests. +-- +CREATE FUNCTION binary_coercible(oid, oid) + RETURNS bool + AS :'regresslib', 'binary_coercible' + LANGUAGE C STRICT STABLE PARALLEL SAFE; +CREATE FUNCTION ttdummy () + RETURNS trigger + AS :'regresslib' + LANGUAGE C; +CREATE FUNCTION get_columns_length(oid[]) + RETURNS int + AS :'regresslib' + LANGUAGE C STRICT STABLE PARALLEL SAFE; +-- Use hand-rolled hash functions and operator classes to get predictable +-- result on different machines. The hash function for int4 simply returns +-- the sum of the values passed to it and the one for text returns the length +-- of the non-empty string value passed to it or 0. +create function part_hashint4_noop(value int4, seed int8) + returns int8 as $$ + select value + seed; + $$ language sql strict immutable parallel safe; +create operator class part_test_int4_ops for type int4 using hash as + operator 1 =, + function 2 part_hashint4_noop(int4, int8); +create function part_hashtext_length(value text, seed int8) + returns int8 as $$ + select length(coalesce(value, ''))::int8 + $$ language sql strict immutable parallel safe; +create operator class part_test_text_ops for type text using hash as + operator 1 =, + function 2 part_hashtext_length(text, int8); +-- +-- These functions are used in tests that used to use md5(), which we now +-- mostly avoid so that the tests will pass in FIPS mode. +-- +create function fipshash(bytea) + returns text + strict immutable parallel safe leakproof + return substr(encode(sha256($1), 'hex'), 1, 32); +create function fipshash(text) + returns text + strict immutable parallel safe leakproof + return substr(encode(sha256($1::bytea), 'hex'), 1, 32); diff --git a/src/test/regress/expected/text.out b/src/test/regress/expected/text.out new file mode 100644 index 0000000..4c65b23 --- /dev/null +++ b/src/test/regress/expected/text.out @@ -0,0 +1,438 @@ +-- +-- TEXT +-- +SELECT text 'this is a text string' = text 'this is a text string' AS true; + true +------ + t +(1 row) + +SELECT text 'this is a text string' = text 'this is a text strin' AS false; + false +------- + f +(1 row) + +-- text_tbl was already created and filled in test_setup.sql. +SELECT * FROM TEXT_TBL; + f1 +------------------- + doh! + hi de ho neighbor +(2 rows) + +-- As of 8.3 we have removed most implicit casts to text, so that for example +-- this no longer works: +select length(42); +ERROR: function length(integer) does not exist +LINE 1: select length(42); + ^ +HINT: No function matches the given name and argument types. You might need to add explicit type casts. +-- But as a special exception for usability's sake, we still allow implicit +-- casting to text in concatenations, so long as the other input is text or +-- an unknown literal. So these work: +select 'four: '::text || 2+2; + ?column? +---------- + four: 4 +(1 row) + +select 'four: ' || 2+2; + ?column? +---------- + four: 4 +(1 row) + +-- but not this: +select 3 || 4.0; +ERROR: operator does not exist: integer || numeric +LINE 1: select 3 || 4.0; + ^ +HINT: No operator matches the given name and argument types. You might need to add explicit type casts. +/* + * various string functions + */ +select concat('one'); + concat +-------- + one +(1 row) + +select concat(1,2,3,'hello',true, false, to_date('20100309','YYYYMMDD')); + concat +---------------------- + 123hellotf03-09-2010 +(1 row) + +select concat_ws('#','one'); + concat_ws +----------- + one +(1 row) + +select concat_ws('#',1,2,3,'hello',true, false, to_date('20100309','YYYYMMDD')); + concat_ws +---------------------------- + 1#2#3#hello#t#f#03-09-2010 +(1 row) + +select concat_ws(',',10,20,null,30); + concat_ws +----------- + 10,20,30 +(1 row) + +select concat_ws('',10,20,null,30); + concat_ws +----------- + 102030 +(1 row) + +select concat_ws(NULL,10,20,null,30) is null; + ?column? +---------- + t +(1 row) + +select reverse('abcde'); + reverse +--------- + edcba +(1 row) + +select i, left('ahoj', i), right('ahoj', i) from generate_series(-5, 5) t(i) order by i; + i | left | right +----+------+------- + -5 | | + -4 | | + -3 | a | j + -2 | ah | oj + -1 | aho | hoj + 0 | | + 1 | a | j + 2 | ah | oj + 3 | aho | hoj + 4 | ahoj | ahoj + 5 | ahoj | ahoj +(11 rows) + +select quote_literal(''); + quote_literal +--------------- + '' +(1 row) + +select quote_literal('abc'''); + quote_literal +--------------- + 'abc''' +(1 row) + +select quote_literal(e'\\'); + quote_literal +--------------- + E'\\' +(1 row) + +-- check variadic labeled argument +select concat(variadic array[1,2,3]); + concat +-------- + 123 +(1 row) + +select concat_ws(',', variadic array[1,2,3]); + concat_ws +----------- + 1,2,3 +(1 row) + +select concat_ws(',', variadic NULL::int[]); + concat_ws +----------- + +(1 row) + +select concat(variadic NULL::int[]) is NULL; + ?column? +---------- + t +(1 row) + +select concat(variadic '{}'::int[]) = ''; + ?column? +---------- + t +(1 row) + +--should fail +select concat_ws(',', variadic 10); +ERROR: VARIADIC argument must be an array +LINE 1: select concat_ws(',', variadic 10); + ^ +/* + * format + */ +select format(NULL); + format +-------- + +(1 row) + +select format('Hello'); + format +-------- + Hello +(1 row) + +select format('Hello %s', 'World'); + format +------------- + Hello World +(1 row) + +select format('Hello %%'); + format +--------- + Hello % +(1 row) + +select format('Hello %%%%'); + format +---------- + Hello %% +(1 row) + +-- should fail +select format('Hello %s %s', 'World'); +ERROR: too few arguments for format() +select format('Hello %s'); +ERROR: too few arguments for format() +select format('Hello %x', 20); +ERROR: unrecognized format() type specifier "x" +HINT: For a single "%" use "%%". +-- check literal and sql identifiers +select format('INSERT INTO %I VALUES(%L,%L)', 'mytab', 10, 'Hello'); + format +---------------------------------------- + INSERT INTO mytab VALUES('10','Hello') +(1 row) + +select format('%s%s%s','Hello', NULL,'World'); + format +------------ + HelloWorld +(1 row) + +select format('INSERT INTO %I VALUES(%L,%L)', 'mytab', 10, NULL); + format +------------------------------------- + INSERT INTO mytab VALUES('10',NULL) +(1 row) + +select format('INSERT INTO %I VALUES(%L,%L)', 'mytab', NULL, 'Hello'); + format +---------------------------------------- + INSERT INTO mytab VALUES(NULL,'Hello') +(1 row) + +-- should fail, sql identifier cannot be NULL +select format('INSERT INTO %I VALUES(%L,%L)', NULL, 10, 'Hello'); +ERROR: null values cannot be formatted as an SQL identifier +-- check positional placeholders +select format('%1$s %3$s', 1, 2, 3); + format +-------- + 1 3 +(1 row) + +select format('%1$s %12$s', 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12); + format +-------- + 1 12 +(1 row) + +-- should fail +select format('%1$s %4$s', 1, 2, 3); +ERROR: too few arguments for format() +select format('%1$s %13$s', 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12); +ERROR: too few arguments for format() +select format('%0$s', 'Hello'); +ERROR: format specifies argument 0, but arguments are numbered from 1 +select format('%*0$s', 'Hello'); +ERROR: format specifies argument 0, but arguments are numbered from 1 +select format('%1$', 1); +ERROR: unterminated format() type specifier +HINT: For a single "%" use "%%". +select format('%1$1', 1); +ERROR: unterminated format() type specifier +HINT: For a single "%" use "%%". +-- check mix of positional and ordered placeholders +select format('Hello %s %1$s %s', 'World', 'Hello again'); + format +------------------------------- + Hello World World Hello again +(1 row) + +select format('Hello %s %s, %2$s %2$s', 'World', 'Hello again'); + format +-------------------------------------------------- + Hello World Hello again, Hello again Hello again +(1 row) + +-- check variadic labeled arguments +select format('%s, %s', variadic array['Hello','World']); + format +-------------- + Hello, World +(1 row) + +select format('%s, %s', variadic array[1, 2]); + format +-------- + 1, 2 +(1 row) + +select format('%s, %s', variadic array[true, false]); + format +-------- + t, f +(1 row) + +select format('%s, %s', variadic array[true, false]::text[]); + format +------------- + true, false +(1 row) + +-- check variadic with positional placeholders +select format('%2$s, %1$s', variadic array['first', 'second']); + format +--------------- + second, first +(1 row) + +select format('%2$s, %1$s', variadic array[1, 2]); + format +-------- + 2, 1 +(1 row) + +-- variadic argument can be array type NULL, but should not be referenced +select format('Hello', variadic NULL::int[]); + format +-------- + Hello +(1 row) + +-- variadic argument allows simulating more than FUNC_MAX_ARGS parameters +select format(string_agg('%s',','), variadic array_agg(i)) +from generate_series(1,200) g(i); + format +--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + 1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50,51,52,53,54,55,56,57,58,59,60,61,62,63,64,65,66,67,68,69,70,71,72,73,74,75,76,77,78,79,80,81,82,83,84,85,86,87,88,89,90,91,92,93,94,95,96,97,98,99,100,101,102,103,104,105,106,107,108,109,110,111,112,113,114,115,116,117,118,119,120,121,122,123,124,125,126,127,128,129,130,131,132,133,134,135,136,137,138,139,140,141,142,143,144,145,146,147,148,149,150,151,152,153,154,155,156,157,158,159,160,161,162,163,164,165,166,167,168,169,170,171,172,173,174,175,176,177,178,179,180,181,182,183,184,185,186,187,188,189,190,191,192,193,194,195,196,197,198,199,200 +(1 row) + +-- check field widths and left, right alignment +select format('>>%10s<<', 'Hello'); + format +---------------- + >> Hello<< +(1 row) + +select format('>>%10s<<', NULL); + format +---------------- + >> << +(1 row) + +select format('>>%10s<<', ''); + format +---------------- + >> << +(1 row) + +select format('>>%-10s<<', ''); + format +---------------- + >> << +(1 row) + +select format('>>%-10s<<', 'Hello'); + format +---------------- + >>Hello << +(1 row) + +select format('>>%-10s<<', NULL); + format +---------------- + >> << +(1 row) + +select format('>>%1$10s<<', 'Hello'); + format +---------------- + >> Hello<< +(1 row) + +select format('>>%1$-10I<<', 'Hello'); + format +---------------- + >>"Hello" << +(1 row) + +select format('>>%2$*1$L<<', 10, 'Hello'); + format +---------------- + >> 'Hello'<< +(1 row) + +select format('>>%2$*1$L<<', 10, NULL); + format +---------------- + >> NULL<< +(1 row) + +select format('>>%2$*1$L<<', -10, NULL); + format +---------------- + >>NULL << +(1 row) + +select format('>>%*s<<', 10, 'Hello'); + format +---------------- + >> Hello<< +(1 row) + +select format('>>%*1$s<<', 10, 'Hello'); + format +---------------- + >> Hello<< +(1 row) + +select format('>>%-s<<', 'Hello'); + format +----------- + >>Hello<< +(1 row) + +select format('>>%10L<<', NULL); + format +---------------- + >> NULL<< +(1 row) + +select format('>>%2$*1$L<<', NULL, 'Hello'); + format +------------- + >>'Hello'<< +(1 row) + +select format('>>%2$*1$L<<', 0, 'Hello'); + format +------------- + >>'Hello'<< +(1 row) + diff --git a/src/test/regress/expected/tid.out b/src/test/regress/expected/tid.out new file mode 100644 index 0000000..083c83a --- /dev/null +++ b/src/test/regress/expected/tid.out @@ -0,0 +1,121 @@ +-- basic tests for the TID data type +SELECT + '(0,0)'::tid as tid00, + '(0,1)'::tid as tid01, + '(-1,0)'::tid as tidm10, + '(4294967295,65535)'::tid as tidmax; + tid00 | tid01 | tidm10 | tidmax +-------+-------+----------------+-------------------- + (0,0) | (0,1) | (4294967295,0) | (4294967295,65535) +(1 row) + +SELECT '(4294967296,1)'::tid; -- error +ERROR: invalid input syntax for type tid: "(4294967296,1)" +LINE 1: SELECT '(4294967296,1)'::tid; + ^ +SELECT '(1,65536)'::tid; -- error +ERROR: invalid input syntax for type tid: "(1,65536)" +LINE 1: SELECT '(1,65536)'::tid; + ^ +-- Also try it with non-error-throwing API +SELECT pg_input_is_valid('(0)', 'tid'); + pg_input_is_valid +------------------- + f +(1 row) + +SELECT * FROM pg_input_error_info('(0)', 'tid'); + message | detail | hint | sql_error_code +------------------------------------------+--------+------+---------------- + invalid input syntax for type tid: "(0)" | | | 22P02 +(1 row) + +SELECT pg_input_is_valid('(0,-1)', 'tid'); + pg_input_is_valid +------------------- + f +(1 row) + +SELECT * FROM pg_input_error_info('(0,-1)', 'tid'); + message | detail | hint | sql_error_code +---------------------------------------------+--------+------+---------------- + invalid input syntax for type tid: "(0,-1)" | | | 22P02 +(1 row) + +-- tests for functions related to TID handling +CREATE TABLE tid_tab (a int); +-- min() and max() for TIDs +INSERT INTO tid_tab VALUES (1), (2); +SELECT min(ctid) FROM tid_tab; + min +------- + (0,1) +(1 row) + +SELECT max(ctid) FROM tid_tab; + max +------- + (0,2) +(1 row) + +TRUNCATE tid_tab; +-- Tests for currtid2() with various relation kinds +-- Materialized view +CREATE MATERIALIZED VIEW tid_matview AS SELECT a FROM tid_tab; +SELECT currtid2('tid_matview'::text, '(0,1)'::tid); -- fails +ERROR: tid (0, 1) is not valid for relation "tid_matview" +INSERT INTO tid_tab VALUES (1); +REFRESH MATERIALIZED VIEW tid_matview; +SELECT currtid2('tid_matview'::text, '(0,1)'::tid); -- ok + currtid2 +---------- + (0,1) +(1 row) + +DROP MATERIALIZED VIEW tid_matview; +TRUNCATE tid_tab; +-- Sequence +CREATE SEQUENCE tid_seq; +SELECT currtid2('tid_seq'::text, '(0,1)'::tid); -- ok + currtid2 +---------- + (0,1) +(1 row) + +DROP SEQUENCE tid_seq; +-- Index, fails with incorrect relation type +CREATE INDEX tid_ind ON tid_tab(a); +SELECT currtid2('tid_ind'::text, '(0,1)'::tid); -- fails +ERROR: cannot open relation "tid_ind" +DETAIL: This operation is not supported for indexes. +DROP INDEX tid_ind; +-- Partitioned table, no storage +CREATE TABLE tid_part (a int) PARTITION BY RANGE (a); +SELECT currtid2('tid_part'::text, '(0,1)'::tid); -- fails +ERROR: cannot look at latest visible tid for relation "public.tid_part" +DROP TABLE tid_part; +-- Views +-- ctid not defined in the view +CREATE VIEW tid_view_no_ctid AS SELECT a FROM tid_tab; +SELECT currtid2('tid_view_no_ctid'::text, '(0,1)'::tid); -- fails +ERROR: currtid cannot handle views with no CTID +DROP VIEW tid_view_no_ctid; +-- ctid fetched directly from the source table. +CREATE VIEW tid_view_with_ctid AS SELECT ctid, a FROM tid_tab; +SELECT currtid2('tid_view_with_ctid'::text, '(0,1)'::tid); -- fails +ERROR: tid (0, 1) is not valid for relation "tid_tab" +INSERT INTO tid_tab VALUES (1); +SELECT currtid2('tid_view_with_ctid'::text, '(0,1)'::tid); -- ok + currtid2 +---------- + (0,1) +(1 row) + +DROP VIEW tid_view_with_ctid; +TRUNCATE tid_tab; +-- ctid attribute with incorrect data type +CREATE VIEW tid_view_fake_ctid AS SELECT 1 AS ctid, 2 AS a; +SELECT currtid2('tid_view_fake_ctid'::text, '(0,1)'::tid); -- fails +ERROR: ctid isn't of type TID +DROP VIEW tid_view_fake_ctid; +DROP TABLE tid_tab CASCADE; diff --git a/src/test/regress/expected/tidrangescan.out b/src/test/regress/expected/tidrangescan.out new file mode 100644 index 0000000..721f3b9 --- /dev/null +++ b/src/test/regress/expected/tidrangescan.out @@ -0,0 +1,300 @@ +-- tests for tidrangescans +SET enable_seqscan TO off; +CREATE TABLE tidrangescan(id integer, data text); +-- empty table +EXPLAIN (COSTS OFF) +SELECT ctid FROM tidrangescan WHERE ctid < '(1, 0)'; + QUERY PLAN +----------------------------------- + Tid Range Scan on tidrangescan + TID Cond: (ctid < '(1,0)'::tid) +(2 rows) + +SELECT ctid FROM tidrangescan WHERE ctid < '(1, 0)'; + ctid +------ +(0 rows) + +EXPLAIN (COSTS OFF) +SELECT ctid FROM tidrangescan WHERE ctid > '(9, 0)'; + QUERY PLAN +----------------------------------- + Tid Range Scan on tidrangescan + TID Cond: (ctid > '(9,0)'::tid) +(2 rows) + +SELECT ctid FROM tidrangescan WHERE ctid > '(9, 0)'; + ctid +------ +(0 rows) + +-- insert enough tuples to fill at least two pages +INSERT INTO tidrangescan SELECT i,repeat('x', 100) FROM generate_series(1,200) AS s(i); +-- remove all tuples after the 10th tuple on each page. Trying to ensure +-- we get the same layout with all CPU architectures and smaller than standard +-- page sizes. +DELETE FROM tidrangescan +WHERE substring(ctid::text FROM ',(\d+)\)')::integer > 10 OR substring(ctid::text FROM '\((\d+),')::integer > 2; +VACUUM tidrangescan; +-- range scans with upper bound +EXPLAIN (COSTS OFF) +SELECT ctid FROM tidrangescan WHERE ctid < '(1,0)'; + QUERY PLAN +----------------------------------- + Tid Range Scan on tidrangescan + TID Cond: (ctid < '(1,0)'::tid) +(2 rows) + +SELECT ctid FROM tidrangescan WHERE ctid < '(1,0)'; + ctid +-------- + (0,1) + (0,2) + (0,3) + (0,4) + (0,5) + (0,6) + (0,7) + (0,8) + (0,9) + (0,10) +(10 rows) + +EXPLAIN (COSTS OFF) +SELECT ctid FROM tidrangescan WHERE ctid <= '(1,5)'; + QUERY PLAN +------------------------------------ + Tid Range Scan on tidrangescan + TID Cond: (ctid <= '(1,5)'::tid) +(2 rows) + +SELECT ctid FROM tidrangescan WHERE ctid <= '(1,5)'; + ctid +-------- + (0,1) + (0,2) + (0,3) + (0,4) + (0,5) + (0,6) + (0,7) + (0,8) + (0,9) + (0,10) + (1,1) + (1,2) + (1,3) + (1,4) + (1,5) +(15 rows) + +EXPLAIN (COSTS OFF) +SELECT ctid FROM tidrangescan WHERE ctid < '(0,0)'; + QUERY PLAN +----------------------------------- + Tid Range Scan on tidrangescan + TID Cond: (ctid < '(0,0)'::tid) +(2 rows) + +SELECT ctid FROM tidrangescan WHERE ctid < '(0,0)'; + ctid +------ +(0 rows) + +-- range scans with lower bound +EXPLAIN (COSTS OFF) +SELECT ctid FROM tidrangescan WHERE ctid > '(2,8)'; + QUERY PLAN +----------------------------------- + Tid Range Scan on tidrangescan + TID Cond: (ctid > '(2,8)'::tid) +(2 rows) + +SELECT ctid FROM tidrangescan WHERE ctid > '(2,8)'; + ctid +-------- + (2,9) + (2,10) +(2 rows) + +EXPLAIN (COSTS OFF) +SELECT ctid FROM tidrangescan WHERE '(2,8)' < ctid; + QUERY PLAN +----------------------------------- + Tid Range Scan on tidrangescan + TID Cond: ('(2,8)'::tid < ctid) +(2 rows) + +SELECT ctid FROM tidrangescan WHERE '(2,8)' < ctid; + ctid +-------- + (2,9) + (2,10) +(2 rows) + +EXPLAIN (COSTS OFF) +SELECT ctid FROM tidrangescan WHERE ctid >= '(2,8)'; + QUERY PLAN +------------------------------------ + Tid Range Scan on tidrangescan + TID Cond: (ctid >= '(2,8)'::tid) +(2 rows) + +SELECT ctid FROM tidrangescan WHERE ctid >= '(2,8)'; + ctid +-------- + (2,8) + (2,9) + (2,10) +(3 rows) + +EXPLAIN (COSTS OFF) +SELECT ctid FROM tidrangescan WHERE ctid >= '(100,0)'; + QUERY PLAN +-------------------------------------- + Tid Range Scan on tidrangescan + TID Cond: (ctid >= '(100,0)'::tid) +(2 rows) + +SELECT ctid FROM tidrangescan WHERE ctid >= '(100,0)'; + ctid +------ +(0 rows) + +-- range scans with both bounds +EXPLAIN (COSTS OFF) +SELECT ctid FROM tidrangescan WHERE ctid > '(1,4)' AND '(1,7)' >= ctid; + QUERY PLAN +---------------------------------------------------------------- + Tid Range Scan on tidrangescan + TID Cond: ((ctid > '(1,4)'::tid) AND ('(1,7)'::tid >= ctid)) +(2 rows) + +SELECT ctid FROM tidrangescan WHERE ctid > '(1,4)' AND '(1,7)' >= ctid; + ctid +------- + (1,5) + (1,6) + (1,7) +(3 rows) + +EXPLAIN (COSTS OFF) +SELECT ctid FROM tidrangescan WHERE '(1,7)' >= ctid AND ctid > '(1,4)'; + QUERY PLAN +---------------------------------------------------------------- + Tid Range Scan on tidrangescan + TID Cond: (('(1,7)'::tid >= ctid) AND (ctid > '(1,4)'::tid)) +(2 rows) + +SELECT ctid FROM tidrangescan WHERE '(1,7)' >= ctid AND ctid > '(1,4)'; + ctid +------- + (1,5) + (1,6) + (1,7) +(3 rows) + +-- extreme offsets +SELECT ctid FROM tidrangescan WHERE ctid > '(0,65535)' AND ctid < '(1,0)' LIMIT 1; + ctid +------ +(0 rows) + +SELECT ctid FROM tidrangescan WHERE ctid < '(0,0)' LIMIT 1; + ctid +------ +(0 rows) + +SELECT ctid FROM tidrangescan WHERE ctid > '(4294967295,65535)'; + ctid +------ +(0 rows) + +SELECT ctid FROM tidrangescan WHERE ctid < '(0,0)'; + ctid +------ +(0 rows) + +-- NULLs in the range cannot return tuples +SELECT ctid FROM tidrangescan WHERE ctid >= (SELECT NULL::tid); + ctid +------ +(0 rows) + +-- rescans +EXPLAIN (COSTS OFF) +SELECT t.ctid,t2.c FROM tidrangescan t, +LATERAL (SELECT count(*) c FROM tidrangescan t2 WHERE t2.ctid <= t.ctid) t2 +WHERE t.ctid < '(1,0)'; + QUERY PLAN +----------------------------------------------- + Nested Loop + -> Tid Range Scan on tidrangescan t + TID Cond: (ctid < '(1,0)'::tid) + -> Aggregate + -> Tid Range Scan on tidrangescan t2 + TID Cond: (ctid <= t.ctid) +(6 rows) + +SELECT t.ctid,t2.c FROM tidrangescan t, +LATERAL (SELECT count(*) c FROM tidrangescan t2 WHERE t2.ctid <= t.ctid) t2 +WHERE t.ctid < '(1,0)'; + ctid | c +--------+---- + (0,1) | 1 + (0,2) | 2 + (0,3) | 3 + (0,4) | 4 + (0,5) | 5 + (0,6) | 6 + (0,7) | 7 + (0,8) | 8 + (0,9) | 9 + (0,10) | 10 +(10 rows) + +-- cursors +-- Ensure we get a TID Range scan without a Materialize node. +EXPLAIN (COSTS OFF) +DECLARE c SCROLL CURSOR FOR SELECT ctid FROM tidrangescan WHERE ctid < '(1,0)'; + QUERY PLAN +----------------------------------- + Tid Range Scan on tidrangescan + TID Cond: (ctid < '(1,0)'::tid) +(2 rows) + +BEGIN; +DECLARE c SCROLL CURSOR FOR SELECT ctid FROM tidrangescan WHERE ctid < '(1,0)'; +FETCH NEXT c; + ctid +------- + (0,1) +(1 row) + +FETCH NEXT c; + ctid +------- + (0,2) +(1 row) + +FETCH PRIOR c; + ctid +------- + (0,1) +(1 row) + +FETCH FIRST c; + ctid +------- + (0,1) +(1 row) + +FETCH LAST c; + ctid +-------- + (0,10) +(1 row) + +COMMIT; +DROP TABLE tidrangescan; +RESET enable_seqscan; diff --git a/src/test/regress/expected/tidscan.out b/src/test/regress/expected/tidscan.out new file mode 100644 index 0000000..f133b5a --- /dev/null +++ b/src/test/regress/expected/tidscan.out @@ -0,0 +1,296 @@ +-- tests for tidscans +CREATE TABLE tidscan(id integer); +-- only insert a few rows, we don't want to spill onto a second table page +INSERT INTO tidscan VALUES (1), (2), (3); +-- show ctids +SELECT ctid, * FROM tidscan; + ctid | id +-------+---- + (0,1) | 1 + (0,2) | 2 + (0,3) | 3 +(3 rows) + +-- ctid equality - implemented as tidscan +EXPLAIN (COSTS OFF) +SELECT ctid, * FROM tidscan WHERE ctid = '(0,1)'; + QUERY PLAN +----------------------------------- + Tid Scan on tidscan + TID Cond: (ctid = '(0,1)'::tid) +(2 rows) + +SELECT ctid, * FROM tidscan WHERE ctid = '(0,1)'; + ctid | id +-------+---- + (0,1) | 1 +(1 row) + +EXPLAIN (COSTS OFF) +SELECT ctid, * FROM tidscan WHERE '(0,1)' = ctid; + QUERY PLAN +----------------------------------- + Tid Scan on tidscan + TID Cond: ('(0,1)'::tid = ctid) +(2 rows) + +SELECT ctid, * FROM tidscan WHERE '(0,1)' = ctid; + ctid | id +-------+---- + (0,1) | 1 +(1 row) + +-- OR'd clauses +EXPLAIN (COSTS OFF) +SELECT ctid, * FROM tidscan WHERE ctid = '(0,2)' OR '(0,1)' = ctid; + QUERY PLAN +-------------------------------------------------------------- + Tid Scan on tidscan + TID Cond: ((ctid = '(0,2)'::tid) OR ('(0,1)'::tid = ctid)) +(2 rows) + +SELECT ctid, * FROM tidscan WHERE ctid = '(0,2)' OR '(0,1)' = ctid; + ctid | id +-------+---- + (0,1) | 1 + (0,2) | 2 +(2 rows) + +-- ctid = ScalarArrayOp - implemented as tidscan +EXPLAIN (COSTS OFF) +SELECT ctid, * FROM tidscan WHERE ctid = ANY(ARRAY['(0,1)', '(0,2)']::tid[]); + QUERY PLAN +------------------------------------------------------- + Tid Scan on tidscan + TID Cond: (ctid = ANY ('{"(0,1)","(0,2)"}'::tid[])) +(2 rows) + +SELECT ctid, * FROM tidscan WHERE ctid = ANY(ARRAY['(0,1)', '(0,2)']::tid[]); + ctid | id +-------+---- + (0,1) | 1 + (0,2) | 2 +(2 rows) + +-- ctid != ScalarArrayOp - can't be implemented as tidscan +EXPLAIN (COSTS OFF) +SELECT ctid, * FROM tidscan WHERE ctid != ANY(ARRAY['(0,1)', '(0,2)']::tid[]); + QUERY PLAN +------------------------------------------------------ + Seq Scan on tidscan + Filter: (ctid <> ANY ('{"(0,1)","(0,2)"}'::tid[])) +(2 rows) + +SELECT ctid, * FROM tidscan WHERE ctid != ANY(ARRAY['(0,1)', '(0,2)']::tid[]); + ctid | id +-------+---- + (0,1) | 1 + (0,2) | 2 + (0,3) | 3 +(3 rows) + +-- tid equality extracted from sub-AND clauses +EXPLAIN (COSTS OFF) +SELECT ctid, * FROM tidscan +WHERE (id = 3 AND ctid IN ('(0,2)', '(0,3)')) OR (ctid = '(0,1)' AND id = 1); + QUERY PLAN +-------------------------------------------------------------------------------------------------------------- + Tid Scan on tidscan + TID Cond: ((ctid = ANY ('{"(0,2)","(0,3)"}'::tid[])) OR (ctid = '(0,1)'::tid)) + Filter: (((id = 3) AND (ctid = ANY ('{"(0,2)","(0,3)"}'::tid[]))) OR ((ctid = '(0,1)'::tid) AND (id = 1))) +(3 rows) + +SELECT ctid, * FROM tidscan +WHERE (id = 3 AND ctid IN ('(0,2)', '(0,3)')) OR (ctid = '(0,1)' AND id = 1); + ctid | id +-------+---- + (0,1) | 1 + (0,3) | 3 +(2 rows) + +-- nestloop-with-inner-tidscan joins on tid +SET enable_hashjoin TO off; -- otherwise hash join might win +EXPLAIN (COSTS OFF) +SELECT t1.ctid, t1.*, t2.ctid, t2.* +FROM tidscan t1 JOIN tidscan t2 ON t1.ctid = t2.ctid WHERE t1.id = 1; + QUERY PLAN +------------------------------------ + Nested Loop + -> Seq Scan on tidscan t1 + Filter: (id = 1) + -> Tid Scan on tidscan t2 + TID Cond: (t1.ctid = ctid) +(5 rows) + +SELECT t1.ctid, t1.*, t2.ctid, t2.* +FROM tidscan t1 JOIN tidscan t2 ON t1.ctid = t2.ctid WHERE t1.id = 1; + ctid | id | ctid | id +-------+----+-------+---- + (0,1) | 1 | (0,1) | 1 +(1 row) + +EXPLAIN (COSTS OFF) +SELECT t1.ctid, t1.*, t2.ctid, t2.* +FROM tidscan t1 LEFT JOIN tidscan t2 ON t1.ctid = t2.ctid WHERE t1.id = 1; + QUERY PLAN +------------------------------------ + Nested Loop Left Join + -> Seq Scan on tidscan t1 + Filter: (id = 1) + -> Tid Scan on tidscan t2 + TID Cond: (t1.ctid = ctid) +(5 rows) + +SELECT t1.ctid, t1.*, t2.ctid, t2.* +FROM tidscan t1 LEFT JOIN tidscan t2 ON t1.ctid = t2.ctid WHERE t1.id = 1; + ctid | id | ctid | id +-------+----+-------+---- + (0,1) | 1 | (0,1) | 1 +(1 row) + +RESET enable_hashjoin; +-- exercise backward scan and rewind +BEGIN; +DECLARE c CURSOR FOR +SELECT ctid, * FROM tidscan WHERE ctid = ANY(ARRAY['(0,1)', '(0,2)']::tid[]); +FETCH ALL FROM c; + ctid | id +-------+---- + (0,1) | 1 + (0,2) | 2 +(2 rows) + +FETCH BACKWARD 1 FROM c; + ctid | id +-------+---- + (0,2) | 2 +(1 row) + +FETCH FIRST FROM c; + ctid | id +-------+---- + (0,1) | 1 +(1 row) + +ROLLBACK; +-- tidscan via CURRENT OF +BEGIN; +DECLARE c CURSOR FOR SELECT ctid, * FROM tidscan; +FETCH NEXT FROM c; -- skip one row + ctid | id +-------+---- + (0,1) | 1 +(1 row) + +FETCH NEXT FROM c; + ctid | id +-------+---- + (0,2) | 2 +(1 row) + +-- perform update +EXPLAIN (ANALYZE, COSTS OFF, SUMMARY OFF, TIMING OFF) +UPDATE tidscan SET id = -id WHERE CURRENT OF c RETURNING *; + QUERY PLAN +--------------------------------------------------- + Update on tidscan (actual rows=1 loops=1) + -> Tid Scan on tidscan (actual rows=1 loops=1) + TID Cond: CURRENT OF c +(3 rows) + +FETCH NEXT FROM c; + ctid | id +-------+---- + (0,3) | 3 +(1 row) + +-- perform update +EXPLAIN (ANALYZE, COSTS OFF, SUMMARY OFF, TIMING OFF) +UPDATE tidscan SET id = -id WHERE CURRENT OF c RETURNING *; + QUERY PLAN +--------------------------------------------------- + Update on tidscan (actual rows=1 loops=1) + -> Tid Scan on tidscan (actual rows=1 loops=1) + TID Cond: CURRENT OF c +(3 rows) + +SELECT * FROM tidscan; + id +---- + 1 + -2 + -3 +(3 rows) + +-- position cursor past any rows +FETCH NEXT FROM c; + ctid | id +------+---- +(0 rows) + +-- should error out +EXPLAIN (ANALYZE, COSTS OFF, SUMMARY OFF, TIMING OFF) +UPDATE tidscan SET id = -id WHERE CURRENT OF c RETURNING *; +ERROR: cursor "c" is not positioned on a row +ROLLBACK; +-- bulk joins on CTID +-- (these plans don't use TID scans, but this still seems like an +-- appropriate place for these tests) +EXPLAIN (COSTS OFF) +SELECT count(*) FROM tenk1 t1 JOIN tenk1 t2 ON t1.ctid = t2.ctid; + QUERY PLAN +---------------------------------------- + Aggregate + -> Hash Join + Hash Cond: (t1.ctid = t2.ctid) + -> Seq Scan on tenk1 t1 + -> Hash + -> Seq Scan on tenk1 t2 +(6 rows) + +SELECT count(*) FROM tenk1 t1 JOIN tenk1 t2 ON t1.ctid = t2.ctid; + count +------- + 10000 +(1 row) + +SET enable_hashjoin TO off; +EXPLAIN (COSTS OFF) +SELECT count(*) FROM tenk1 t1 JOIN tenk1 t2 ON t1.ctid = t2.ctid; + QUERY PLAN +----------------------------------------- + Aggregate + -> Merge Join + Merge Cond: (t1.ctid = t2.ctid) + -> Sort + Sort Key: t1.ctid + -> Seq Scan on tenk1 t1 + -> Sort + Sort Key: t2.ctid + -> Seq Scan on tenk1 t2 +(9 rows) + +SELECT count(*) FROM tenk1 t1 JOIN tenk1 t2 ON t1.ctid = t2.ctid; + count +------- + 10000 +(1 row) + +RESET enable_hashjoin; +-- check predicate lock on CTID +BEGIN ISOLATION LEVEL SERIALIZABLE; +SELECT * FROM tidscan WHERE ctid = '(0,1)'; + id +---- + 1 +(1 row) + +-- locktype should be 'tuple' +SELECT locktype, mode FROM pg_locks WHERE pid = pg_backend_pid() AND mode = 'SIReadLock'; + locktype | mode +----------+------------ + tuple | SIReadLock +(1 row) + +ROLLBACK; +DROP TABLE tidscan; diff --git a/src/test/regress/expected/time.out b/src/test/regress/expected/time.out new file mode 100644 index 0000000..4247fae --- /dev/null +++ b/src/test/regress/expected/time.out @@ -0,0 +1,231 @@ +-- +-- TIME +-- +CREATE TABLE TIME_TBL (f1 time(2)); +INSERT INTO TIME_TBL VALUES ('00:00'); +INSERT INTO TIME_TBL VALUES ('01:00'); +-- as of 7.4, timezone spec should be accepted and ignored +INSERT INTO TIME_TBL VALUES ('02:03 PST'); +INSERT INTO TIME_TBL VALUES ('11:59 EDT'); +INSERT INTO TIME_TBL VALUES ('12:00'); +INSERT INTO TIME_TBL VALUES ('12:01'); +INSERT INTO TIME_TBL VALUES ('23:59'); +INSERT INTO TIME_TBL VALUES ('11:59:59.99 PM'); +INSERT INTO TIME_TBL VALUES ('2003-03-07 15:36:39 America/New_York'); +INSERT INTO TIME_TBL VALUES ('2003-07-07 15:36:39 America/New_York'); +-- this should fail (the timezone offset is not known) +INSERT INTO TIME_TBL VALUES ('15:36:39 America/New_York'); +ERROR: invalid input syntax for type time: "15:36:39 America/New_York" +LINE 1: INSERT INTO TIME_TBL VALUES ('15:36:39 America/New_York'); + ^ +SELECT f1 AS "Time" FROM TIME_TBL; + Time +------------- + 00:00:00 + 01:00:00 + 02:03:00 + 11:59:00 + 12:00:00 + 12:01:00 + 23:59:00 + 23:59:59.99 + 15:36:39 + 15:36:39 +(10 rows) + +SELECT f1 AS "Three" FROM TIME_TBL WHERE f1 < '05:06:07'; + Three +---------- + 00:00:00 + 01:00:00 + 02:03:00 +(3 rows) + +SELECT f1 AS "Five" FROM TIME_TBL WHERE f1 > '05:06:07'; + Five +------------- + 11:59:00 + 12:00:00 + 12:01:00 + 23:59:00 + 23:59:59.99 + 15:36:39 + 15:36:39 +(7 rows) + +SELECT f1 AS "None" FROM TIME_TBL WHERE f1 < '00:00'; + None +------ +(0 rows) + +SELECT f1 AS "Eight" FROM TIME_TBL WHERE f1 >= '00:00'; + Eight +------------- + 00:00:00 + 01:00:00 + 02:03:00 + 11:59:00 + 12:00:00 + 12:01:00 + 23:59:00 + 23:59:59.99 + 15:36:39 + 15:36:39 +(10 rows) + +-- Check edge cases +SELECT '23:59:59.999999'::time; + time +----------------- + 23:59:59.999999 +(1 row) + +SELECT '23:59:59.9999999'::time; -- rounds up + time +---------- + 24:00:00 +(1 row) + +SELECT '23:59:60'::time; -- rounds up + time +---------- + 24:00:00 +(1 row) + +SELECT '24:00:00'::time; -- allowed + time +---------- + 24:00:00 +(1 row) + +SELECT '24:00:00.01'::time; -- not allowed +ERROR: date/time field value out of range: "24:00:00.01" +LINE 1: SELECT '24:00:00.01'::time; + ^ +SELECT '23:59:60.01'::time; -- not allowed +ERROR: date/time field value out of range: "23:59:60.01" +LINE 1: SELECT '23:59:60.01'::time; + ^ +SELECT '24:01:00'::time; -- not allowed +ERROR: date/time field value out of range: "24:01:00" +LINE 1: SELECT '24:01:00'::time; + ^ +SELECT '25:00:00'::time; -- not allowed +ERROR: date/time field value out of range: "25:00:00" +LINE 1: SELECT '25:00:00'::time; + ^ +-- Test non-error-throwing API +SELECT pg_input_is_valid('12:00:00', 'time'); + pg_input_is_valid +------------------- + t +(1 row) + +SELECT pg_input_is_valid('25:00:00', 'time'); + pg_input_is_valid +------------------- + f +(1 row) + +SELECT pg_input_is_valid('15:36:39 America/New_York', 'time'); + pg_input_is_valid +------------------- + f +(1 row) + +SELECT * FROM pg_input_error_info('25:00:00', 'time'); + message | detail | hint | sql_error_code +------------------------------------------------+--------+------+---------------- + date/time field value out of range: "25:00:00" | | | 22008 +(1 row) + +SELECT * FROM pg_input_error_info('15:36:39 America/New_York', 'time'); + message | detail | hint | sql_error_code +-----------------------------------------------------------------+--------+------+---------------- + invalid input syntax for type time: "15:36:39 America/New_York" | | | 22007 +(1 row) + +-- +-- TIME simple math +-- +-- We now make a distinction between time and intervals, +-- and adding two times together makes no sense at all. +-- Leave in one query to show that it is rejected, +-- and do the rest of the testing in horology.sql +-- where we do mixed-type arithmetic. - thomas 2000-12-02 +SELECT f1 + time '00:01' AS "Illegal" FROM TIME_TBL; +ERROR: operator is not unique: time without time zone + time without time zone +LINE 1: SELECT f1 + time '00:01' AS "Illegal" FROM TIME_TBL; + ^ +HINT: Could not choose a best candidate operator. You might need to add explicit type casts. +-- +-- test EXTRACT +-- +SELECT EXTRACT(MICROSECOND FROM TIME '2020-05-26 13:30:25.575401'); + extract +---------- + 25575401 +(1 row) + +SELECT EXTRACT(MILLISECOND FROM TIME '2020-05-26 13:30:25.575401'); + extract +----------- + 25575.401 +(1 row) + +SELECT EXTRACT(SECOND FROM TIME '2020-05-26 13:30:25.575401'); + extract +----------- + 25.575401 +(1 row) + +SELECT EXTRACT(MINUTE FROM TIME '2020-05-26 13:30:25.575401'); + extract +--------- + 30 +(1 row) + +SELECT EXTRACT(HOUR FROM TIME '2020-05-26 13:30:25.575401'); + extract +--------- + 13 +(1 row) + +SELECT EXTRACT(DAY FROM TIME '2020-05-26 13:30:25.575401'); -- error +ERROR: unit "day" not supported for type time without time zone +SELECT EXTRACT(FORTNIGHT FROM TIME '2020-05-26 13:30:25.575401'); -- error +ERROR: unit "fortnight" not recognized for type time without time zone +SELECT EXTRACT(TIMEZONE FROM TIME '2020-05-26 13:30:25.575401'); -- error +ERROR: unit "timezone" not supported for type time without time zone +SELECT EXTRACT(EPOCH FROM TIME '2020-05-26 13:30:25.575401'); + extract +-------------- + 48625.575401 +(1 row) + +-- date_part implementation is mostly the same as extract, so only +-- test a few cases for additional coverage. +SELECT date_part('microsecond', TIME '2020-05-26 13:30:25.575401'); + date_part +----------- + 25575401 +(1 row) + +SELECT date_part('millisecond', TIME '2020-05-26 13:30:25.575401'); + date_part +----------- + 25575.401 +(1 row) + +SELECT date_part('second', TIME '2020-05-26 13:30:25.575401'); + date_part +----------- + 25.575401 +(1 row) + +SELECT date_part('epoch', TIME '2020-05-26 13:30:25.575401'); + date_part +-------------- + 48625.575401 +(1 row) + diff --git a/src/test/regress/expected/timestamp.out b/src/test/regress/expected/timestamp.out new file mode 100644 index 0000000..c64bcb7 --- /dev/null +++ b/src/test/regress/expected/timestamp.out @@ -0,0 +1,2127 @@ +-- +-- TIMESTAMP +-- +CREATE TABLE TIMESTAMP_TBL (d1 timestamp(2) without time zone); +-- Test shorthand input values +-- We can't just "select" the results since they aren't constants; test for +-- equality instead. We can do that by running the test inside a transaction +-- block, within which the value of 'now' shouldn't change, and so these +-- related values shouldn't either. +BEGIN; +INSERT INTO TIMESTAMP_TBL VALUES ('today'); +INSERT INTO TIMESTAMP_TBL VALUES ('yesterday'); +INSERT INTO TIMESTAMP_TBL VALUES ('tomorrow'); +-- time zone should be ignored by this data type +INSERT INTO TIMESTAMP_TBL VALUES ('tomorrow EST'); +INSERT INTO TIMESTAMP_TBL VALUES ('tomorrow zulu'); +SELECT count(*) AS One FROM TIMESTAMP_TBL WHERE d1 = timestamp without time zone 'today'; + one +----- + 1 +(1 row) + +SELECT count(*) AS Three FROM TIMESTAMP_TBL WHERE d1 = timestamp without time zone 'tomorrow'; + three +------- + 3 +(1 row) + +SELECT count(*) AS One FROM TIMESTAMP_TBL WHERE d1 = timestamp without time zone 'yesterday'; + one +----- + 1 +(1 row) + +COMMIT; +DELETE FROM TIMESTAMP_TBL; +-- Verify that 'now' *does* change over a reasonable interval such as 100 msec, +-- and that it doesn't change over the same interval within a transaction block +INSERT INTO TIMESTAMP_TBL VALUES ('now'); +SELECT pg_sleep(0.1); + pg_sleep +---------- + +(1 row) + +BEGIN; +INSERT INTO TIMESTAMP_TBL VALUES ('now'); +SELECT pg_sleep(0.1); + pg_sleep +---------- + +(1 row) + +INSERT INTO TIMESTAMP_TBL VALUES ('now'); +SELECT pg_sleep(0.1); + pg_sleep +---------- + +(1 row) + +SELECT count(*) AS two FROM TIMESTAMP_TBL WHERE d1 = timestamp(2) without time zone 'now'; + two +----- + 2 +(1 row) + +SELECT count(d1) AS three, count(DISTINCT d1) AS two FROM TIMESTAMP_TBL; + three | two +-------+----- + 3 | 2 +(1 row) + +COMMIT; +TRUNCATE TIMESTAMP_TBL; +-- Special values +INSERT INTO TIMESTAMP_TBL VALUES ('-infinity'); +INSERT INTO TIMESTAMP_TBL VALUES ('infinity'); +INSERT INTO TIMESTAMP_TBL VALUES ('epoch'); +SELECT timestamp 'infinity' = timestamp '+infinity' AS t; + t +--- + t +(1 row) + +-- Postgres v6.0 standard output format +INSERT INTO TIMESTAMP_TBL VALUES ('Mon Feb 10 17:32:01 1997 PST'); +-- Variations on Postgres v6.1 standard output format +INSERT INTO TIMESTAMP_TBL VALUES ('Mon Feb 10 17:32:01.000001 1997 PST'); +INSERT INTO TIMESTAMP_TBL VALUES ('Mon Feb 10 17:32:01.999999 1997 PST'); +INSERT INTO TIMESTAMP_TBL VALUES ('Mon Feb 10 17:32:01.4 1997 PST'); +INSERT INTO TIMESTAMP_TBL VALUES ('Mon Feb 10 17:32:01.5 1997 PST'); +INSERT INTO TIMESTAMP_TBL VALUES ('Mon Feb 10 17:32:01.6 1997 PST'); +-- ISO 8601 format +INSERT INTO TIMESTAMP_TBL VALUES ('1997-01-02'); +INSERT INTO TIMESTAMP_TBL VALUES ('1997-01-02 03:04:05'); +INSERT INTO TIMESTAMP_TBL VALUES ('1997-02-10 17:32:01-08'); +INSERT INTO TIMESTAMP_TBL VALUES ('1997-02-10 17:32:01-0800'); +INSERT INTO TIMESTAMP_TBL VALUES ('1997-02-10 17:32:01 -08:00'); +INSERT INTO TIMESTAMP_TBL VALUES ('19970210 173201 -0800'); +INSERT INTO TIMESTAMP_TBL VALUES ('1997-06-10 17:32:01 -07:00'); +INSERT INTO TIMESTAMP_TBL VALUES ('2001-09-22T18:19:20'); +-- POSIX format (note that the timezone abbrev is just decoration here) +INSERT INTO TIMESTAMP_TBL VALUES ('2000-03-15 08:14:01 GMT+8'); +INSERT INTO TIMESTAMP_TBL VALUES ('2000-03-15 13:14:02 GMT-1'); +INSERT INTO TIMESTAMP_TBL VALUES ('2000-03-15 12:14:03 GMT-2'); +INSERT INTO TIMESTAMP_TBL VALUES ('2000-03-15 03:14:04 PST+8'); +INSERT INTO TIMESTAMP_TBL VALUES ('2000-03-15 02:14:05 MST+7:00'); +-- Variations for acceptable input formats +INSERT INTO TIMESTAMP_TBL VALUES ('Feb 10 17:32:01 1997 -0800'); +INSERT INTO TIMESTAMP_TBL VALUES ('Feb 10 17:32:01 1997'); +INSERT INTO TIMESTAMP_TBL VALUES ('Feb 10 5:32PM 1997'); +INSERT INTO TIMESTAMP_TBL VALUES ('1997/02/10 17:32:01-0800'); +INSERT INTO TIMESTAMP_TBL VALUES ('1997-02-10 17:32:01 PST'); +INSERT INTO TIMESTAMP_TBL VALUES ('Feb-10-1997 17:32:01 PST'); +INSERT INTO TIMESTAMP_TBL VALUES ('02-10-1997 17:32:01 PST'); +INSERT INTO TIMESTAMP_TBL VALUES ('19970210 173201 PST'); +set datestyle to ymd; +INSERT INTO TIMESTAMP_TBL VALUES ('97FEB10 5:32:01PM UTC'); +INSERT INTO TIMESTAMP_TBL VALUES ('97/02/10 17:32:01 UTC'); +reset datestyle; +INSERT INTO TIMESTAMP_TBL VALUES ('1997.041 17:32:01 UTC'); +INSERT INTO TIMESTAMP_TBL VALUES ('19970210 173201 America/New_York'); +-- this fails (even though TZ is a no-op, we still look it up) +INSERT INTO TIMESTAMP_TBL VALUES ('19970710 173201 America/Does_not_exist'); +ERROR: time zone "america/does_not_exist" not recognized +LINE 1: INSERT INTO TIMESTAMP_TBL VALUES ('19970710 173201 America/D... + ^ +-- Test non-error-throwing API +SELECT pg_input_is_valid('now', 'timestamp'); + pg_input_is_valid +------------------- + t +(1 row) + +SELECT pg_input_is_valid('garbage', 'timestamp'); + pg_input_is_valid +------------------- + f +(1 row) + +SELECT pg_input_is_valid('2001-01-01 00:00 Nehwon/Lankhmar', 'timestamp'); + pg_input_is_valid +------------------- + f +(1 row) + +SELECT * FROM pg_input_error_info('garbage', 'timestamp'); + message | detail | hint | sql_error_code +----------------------------------------------------+--------+------+---------------- + invalid input syntax for type timestamp: "garbage" | | | 22007 +(1 row) + +SELECT * FROM pg_input_error_info('2001-01-01 00:00 Nehwon/Lankhmar', 'timestamp'); + message | detail | hint | sql_error_code +--------------------------------------------+--------+------+---------------- + time zone "nehwon/lankhmar" not recognized | | | 22023 +(1 row) + +-- Check date conversion and date arithmetic +INSERT INTO TIMESTAMP_TBL VALUES ('1997-06-10 18:32:01 PDT'); +INSERT INTO TIMESTAMP_TBL VALUES ('Feb 10 17:32:01 1997'); +INSERT INTO TIMESTAMP_TBL VALUES ('Feb 11 17:32:01 1997'); +INSERT INTO TIMESTAMP_TBL VALUES ('Feb 12 17:32:01 1997'); +INSERT INTO TIMESTAMP_TBL VALUES ('Feb 13 17:32:01 1997'); +INSERT INTO TIMESTAMP_TBL VALUES ('Feb 14 17:32:01 1997'); +INSERT INTO TIMESTAMP_TBL VALUES ('Feb 15 17:32:01 1997'); +INSERT INTO TIMESTAMP_TBL VALUES ('Feb 16 17:32:01 1997'); +INSERT INTO TIMESTAMP_TBL VALUES ('Feb 16 17:32:01 0097 BC'); +INSERT INTO TIMESTAMP_TBL VALUES ('Feb 16 17:32:01 0097'); +INSERT INTO TIMESTAMP_TBL VALUES ('Feb 16 17:32:01 0597'); +INSERT INTO TIMESTAMP_TBL VALUES ('Feb 16 17:32:01 1097'); +INSERT INTO TIMESTAMP_TBL VALUES ('Feb 16 17:32:01 1697'); +INSERT INTO TIMESTAMP_TBL VALUES ('Feb 16 17:32:01 1797'); +INSERT INTO TIMESTAMP_TBL VALUES ('Feb 16 17:32:01 1897'); +INSERT INTO TIMESTAMP_TBL VALUES ('Feb 16 17:32:01 1997'); +INSERT INTO TIMESTAMP_TBL VALUES ('Feb 16 17:32:01 2097'); +INSERT INTO TIMESTAMP_TBL VALUES ('Feb 28 17:32:01 1996'); +INSERT INTO TIMESTAMP_TBL VALUES ('Feb 29 17:32:01 1996'); +INSERT INTO TIMESTAMP_TBL VALUES ('Mar 01 17:32:01 1996'); +INSERT INTO TIMESTAMP_TBL VALUES ('Dec 30 17:32:01 1996'); +INSERT INTO TIMESTAMP_TBL VALUES ('Dec 31 17:32:01 1996'); +INSERT INTO TIMESTAMP_TBL VALUES ('Jan 01 17:32:01 1997'); +INSERT INTO TIMESTAMP_TBL VALUES ('Feb 28 17:32:01 1997'); +INSERT INTO TIMESTAMP_TBL VALUES ('Feb 29 17:32:01 1997'); +ERROR: date/time field value out of range: "Feb 29 17:32:01 1997" +LINE 1: INSERT INTO TIMESTAMP_TBL VALUES ('Feb 29 17:32:01 1997'); + ^ +INSERT INTO TIMESTAMP_TBL VALUES ('Mar 01 17:32:01 1997'); +INSERT INTO TIMESTAMP_TBL VALUES ('Dec 30 17:32:01 1997'); +INSERT INTO TIMESTAMP_TBL VALUES ('Dec 31 17:32:01 1997'); +INSERT INTO TIMESTAMP_TBL VALUES ('Dec 31 17:32:01 1999'); +INSERT INTO TIMESTAMP_TBL VALUES ('Jan 01 17:32:01 2000'); +INSERT INTO TIMESTAMP_TBL VALUES ('Dec 31 17:32:01 2000'); +INSERT INTO TIMESTAMP_TBL VALUES ('Jan 01 17:32:01 2001'); +-- Currently unsupported syntax and ranges +INSERT INTO TIMESTAMP_TBL VALUES ('Feb 16 17:32:01 -0097'); +ERROR: time zone displacement out of range: "Feb 16 17:32:01 -0097" +LINE 1: INSERT INTO TIMESTAMP_TBL VALUES ('Feb 16 17:32:01 -0097'); + ^ +INSERT INTO TIMESTAMP_TBL VALUES ('Feb 16 17:32:01 5097 BC'); +ERROR: timestamp out of range: "Feb 16 17:32:01 5097 BC" +LINE 1: INSERT INTO TIMESTAMP_TBL VALUES ('Feb 16 17:32:01 5097 BC')... + ^ +SELECT d1 FROM TIMESTAMP_TBL; + d1 +----------------------------- + -infinity + infinity + Thu Jan 01 00:00:00 1970 + Mon Feb 10 17:32:01 1997 + Mon Feb 10 17:32:01 1997 + Mon Feb 10 17:32:02 1997 + Mon Feb 10 17:32:01.4 1997 + Mon Feb 10 17:32:01.5 1997 + Mon Feb 10 17:32:01.6 1997 + Thu Jan 02 00:00:00 1997 + Thu Jan 02 03:04:05 1997 + Mon Feb 10 17:32:01 1997 + Mon Feb 10 17:32:01 1997 + Mon Feb 10 17:32:01 1997 + Mon Feb 10 17:32:01 1997 + Tue Jun 10 17:32:01 1997 + Sat Sep 22 18:19:20 2001 + Wed Mar 15 08:14:01 2000 + Wed Mar 15 13:14:02 2000 + Wed Mar 15 12:14:03 2000 + Wed Mar 15 03:14:04 2000 + Wed Mar 15 02:14:05 2000 + Mon Feb 10 17:32:01 1997 + Mon Feb 10 17:32:01 1997 + Mon Feb 10 17:32:00 1997 + Mon Feb 10 17:32:01 1997 + Mon Feb 10 17:32:01 1997 + Mon Feb 10 17:32:01 1997 + Mon Feb 10 17:32:01 1997 + Mon Feb 10 17:32:01 1997 + Mon Feb 10 17:32:01 1997 + Mon Feb 10 17:32:01 1997 + Mon Feb 10 17:32:01 1997 + Mon Feb 10 17:32:01 1997 + Tue Jun 10 18:32:01 1997 + Mon Feb 10 17:32:01 1997 + Tue Feb 11 17:32:01 1997 + Wed Feb 12 17:32:01 1997 + Thu Feb 13 17:32:01 1997 + Fri Feb 14 17:32:01 1997 + Sat Feb 15 17:32:01 1997 + Sun Feb 16 17:32:01 1997 + Tue Feb 16 17:32:01 0097 BC + Sat Feb 16 17:32:01 0097 + Thu Feb 16 17:32:01 0597 + Tue Feb 16 17:32:01 1097 + Sat Feb 16 17:32:01 1697 + Thu Feb 16 17:32:01 1797 + Tue Feb 16 17:32:01 1897 + Sun Feb 16 17:32:01 1997 + Sat Feb 16 17:32:01 2097 + Wed Feb 28 17:32:01 1996 + Thu Feb 29 17:32:01 1996 + Fri Mar 01 17:32:01 1996 + Mon Dec 30 17:32:01 1996 + Tue Dec 31 17:32:01 1996 + Wed Jan 01 17:32:01 1997 + Fri Feb 28 17:32:01 1997 + Sat Mar 01 17:32:01 1997 + Tue Dec 30 17:32:01 1997 + Wed Dec 31 17:32:01 1997 + Fri Dec 31 17:32:01 1999 + Sat Jan 01 17:32:01 2000 + Sun Dec 31 17:32:01 2000 + Mon Jan 01 17:32:01 2001 +(65 rows) + +-- Check behavior at the boundaries of the timestamp range +SELECT '4714-11-24 00:00:00 BC'::timestamp; + timestamp +----------------------------- + Mon Nov 24 00:00:00 4714 BC +(1 row) + +SELECT '4714-11-23 23:59:59 BC'::timestamp; -- out of range +ERROR: timestamp out of range: "4714-11-23 23:59:59 BC" +LINE 1: SELECT '4714-11-23 23:59:59 BC'::timestamp; + ^ +SELECT '294276-12-31 23:59:59'::timestamp; + timestamp +---------------------------- + Sun Dec 31 23:59:59 294276 +(1 row) + +SELECT '294277-01-01 00:00:00'::timestamp; -- out of range +ERROR: timestamp out of range: "294277-01-01 00:00:00" +LINE 1: SELECT '294277-01-01 00:00:00'::timestamp; + ^ +-- Demonstrate functions and operators +SELECT d1 FROM TIMESTAMP_TBL + WHERE d1 > timestamp without time zone '1997-01-02'; + d1 +---------------------------- + infinity + Mon Feb 10 17:32:01 1997 + Mon Feb 10 17:32:01 1997 + Mon Feb 10 17:32:02 1997 + Mon Feb 10 17:32:01.4 1997 + Mon Feb 10 17:32:01.5 1997 + Mon Feb 10 17:32:01.6 1997 + Thu Jan 02 03:04:05 1997 + Mon Feb 10 17:32:01 1997 + Mon Feb 10 17:32:01 1997 + Mon Feb 10 17:32:01 1997 + Mon Feb 10 17:32:01 1997 + Tue Jun 10 17:32:01 1997 + Sat Sep 22 18:19:20 2001 + Wed Mar 15 08:14:01 2000 + Wed Mar 15 13:14:02 2000 + Wed Mar 15 12:14:03 2000 + Wed Mar 15 03:14:04 2000 + Wed Mar 15 02:14:05 2000 + Mon Feb 10 17:32:01 1997 + Mon Feb 10 17:32:01 1997 + Mon Feb 10 17:32:00 1997 + Mon Feb 10 17:32:01 1997 + Mon Feb 10 17:32:01 1997 + Mon Feb 10 17:32:01 1997 + Mon Feb 10 17:32:01 1997 + Mon Feb 10 17:32:01 1997 + Mon Feb 10 17:32:01 1997 + Mon Feb 10 17:32:01 1997 + Mon Feb 10 17:32:01 1997 + Mon Feb 10 17:32:01 1997 + Tue Jun 10 18:32:01 1997 + Mon Feb 10 17:32:01 1997 + Tue Feb 11 17:32:01 1997 + Wed Feb 12 17:32:01 1997 + Thu Feb 13 17:32:01 1997 + Fri Feb 14 17:32:01 1997 + Sat Feb 15 17:32:01 1997 + Sun Feb 16 17:32:01 1997 + Sun Feb 16 17:32:01 1997 + Sat Feb 16 17:32:01 2097 + Fri Feb 28 17:32:01 1997 + Sat Mar 01 17:32:01 1997 + Tue Dec 30 17:32:01 1997 + Wed Dec 31 17:32:01 1997 + Fri Dec 31 17:32:01 1999 + Sat Jan 01 17:32:01 2000 + Sun Dec 31 17:32:01 2000 + Mon Jan 01 17:32:01 2001 +(49 rows) + +SELECT d1 FROM TIMESTAMP_TBL + WHERE d1 < timestamp without time zone '1997-01-02'; + d1 +----------------------------- + -infinity + Thu Jan 01 00:00:00 1970 + Tue Feb 16 17:32:01 0097 BC + Sat Feb 16 17:32:01 0097 + Thu Feb 16 17:32:01 0597 + Tue Feb 16 17:32:01 1097 + Sat Feb 16 17:32:01 1697 + Thu Feb 16 17:32:01 1797 + Tue Feb 16 17:32:01 1897 + Wed Feb 28 17:32:01 1996 + Thu Feb 29 17:32:01 1996 + Fri Mar 01 17:32:01 1996 + Mon Dec 30 17:32:01 1996 + Tue Dec 31 17:32:01 1996 + Wed Jan 01 17:32:01 1997 +(15 rows) + +SELECT d1 FROM TIMESTAMP_TBL + WHERE d1 = timestamp without time zone '1997-01-02'; + d1 +-------------------------- + Thu Jan 02 00:00:00 1997 +(1 row) + +SELECT d1 FROM TIMESTAMP_TBL + WHERE d1 != timestamp without time zone '1997-01-02'; + d1 +----------------------------- + -infinity + infinity + Thu Jan 01 00:00:00 1970 + Mon Feb 10 17:32:01 1997 + Mon Feb 10 17:32:01 1997 + Mon Feb 10 17:32:02 1997 + Mon Feb 10 17:32:01.4 1997 + Mon Feb 10 17:32:01.5 1997 + Mon Feb 10 17:32:01.6 1997 + Thu Jan 02 03:04:05 1997 + Mon Feb 10 17:32:01 1997 + Mon Feb 10 17:32:01 1997 + Mon Feb 10 17:32:01 1997 + Mon Feb 10 17:32:01 1997 + Tue Jun 10 17:32:01 1997 + Sat Sep 22 18:19:20 2001 + Wed Mar 15 08:14:01 2000 + Wed Mar 15 13:14:02 2000 + Wed Mar 15 12:14:03 2000 + Wed Mar 15 03:14:04 2000 + Wed Mar 15 02:14:05 2000 + Mon Feb 10 17:32:01 1997 + Mon Feb 10 17:32:01 1997 + Mon Feb 10 17:32:00 1997 + Mon Feb 10 17:32:01 1997 + Mon Feb 10 17:32:01 1997 + Mon Feb 10 17:32:01 1997 + Mon Feb 10 17:32:01 1997 + Mon Feb 10 17:32:01 1997 + Mon Feb 10 17:32:01 1997 + Mon Feb 10 17:32:01 1997 + Mon Feb 10 17:32:01 1997 + Mon Feb 10 17:32:01 1997 + Tue Jun 10 18:32:01 1997 + Mon Feb 10 17:32:01 1997 + Tue Feb 11 17:32:01 1997 + Wed Feb 12 17:32:01 1997 + Thu Feb 13 17:32:01 1997 + Fri Feb 14 17:32:01 1997 + Sat Feb 15 17:32:01 1997 + Sun Feb 16 17:32:01 1997 + Tue Feb 16 17:32:01 0097 BC + Sat Feb 16 17:32:01 0097 + Thu Feb 16 17:32:01 0597 + Tue Feb 16 17:32:01 1097 + Sat Feb 16 17:32:01 1697 + Thu Feb 16 17:32:01 1797 + Tue Feb 16 17:32:01 1897 + Sun Feb 16 17:32:01 1997 + Sat Feb 16 17:32:01 2097 + Wed Feb 28 17:32:01 1996 + Thu Feb 29 17:32:01 1996 + Fri Mar 01 17:32:01 1996 + Mon Dec 30 17:32:01 1996 + Tue Dec 31 17:32:01 1996 + Wed Jan 01 17:32:01 1997 + Fri Feb 28 17:32:01 1997 + Sat Mar 01 17:32:01 1997 + Tue Dec 30 17:32:01 1997 + Wed Dec 31 17:32:01 1997 + Fri Dec 31 17:32:01 1999 + Sat Jan 01 17:32:01 2000 + Sun Dec 31 17:32:01 2000 + Mon Jan 01 17:32:01 2001 +(64 rows) + +SELECT d1 FROM TIMESTAMP_TBL + WHERE d1 <= timestamp without time zone '1997-01-02'; + d1 +----------------------------- + -infinity + Thu Jan 01 00:00:00 1970 + Thu Jan 02 00:00:00 1997 + Tue Feb 16 17:32:01 0097 BC + Sat Feb 16 17:32:01 0097 + Thu Feb 16 17:32:01 0597 + Tue Feb 16 17:32:01 1097 + Sat Feb 16 17:32:01 1697 + Thu Feb 16 17:32:01 1797 + Tue Feb 16 17:32:01 1897 + Wed Feb 28 17:32:01 1996 + Thu Feb 29 17:32:01 1996 + Fri Mar 01 17:32:01 1996 + Mon Dec 30 17:32:01 1996 + Tue Dec 31 17:32:01 1996 + Wed Jan 01 17:32:01 1997 +(16 rows) + +SELECT d1 FROM TIMESTAMP_TBL + WHERE d1 >= timestamp without time zone '1997-01-02'; + d1 +---------------------------- + infinity + Mon Feb 10 17:32:01 1997 + Mon Feb 10 17:32:01 1997 + Mon Feb 10 17:32:02 1997 + Mon Feb 10 17:32:01.4 1997 + Mon Feb 10 17:32:01.5 1997 + Mon Feb 10 17:32:01.6 1997 + Thu Jan 02 00:00:00 1997 + Thu Jan 02 03:04:05 1997 + Mon Feb 10 17:32:01 1997 + Mon Feb 10 17:32:01 1997 + Mon Feb 10 17:32:01 1997 + Mon Feb 10 17:32:01 1997 + Tue Jun 10 17:32:01 1997 + Sat Sep 22 18:19:20 2001 + Wed Mar 15 08:14:01 2000 + Wed Mar 15 13:14:02 2000 + Wed Mar 15 12:14:03 2000 + Wed Mar 15 03:14:04 2000 + Wed Mar 15 02:14:05 2000 + Mon Feb 10 17:32:01 1997 + Mon Feb 10 17:32:01 1997 + Mon Feb 10 17:32:00 1997 + Mon Feb 10 17:32:01 1997 + Mon Feb 10 17:32:01 1997 + Mon Feb 10 17:32:01 1997 + Mon Feb 10 17:32:01 1997 + Mon Feb 10 17:32:01 1997 + Mon Feb 10 17:32:01 1997 + Mon Feb 10 17:32:01 1997 + Mon Feb 10 17:32:01 1997 + Mon Feb 10 17:32:01 1997 + Tue Jun 10 18:32:01 1997 + Mon Feb 10 17:32:01 1997 + Tue Feb 11 17:32:01 1997 + Wed Feb 12 17:32:01 1997 + Thu Feb 13 17:32:01 1997 + Fri Feb 14 17:32:01 1997 + Sat Feb 15 17:32:01 1997 + Sun Feb 16 17:32:01 1997 + Sun Feb 16 17:32:01 1997 + Sat Feb 16 17:32:01 2097 + Fri Feb 28 17:32:01 1997 + Sat Mar 01 17:32:01 1997 + Tue Dec 30 17:32:01 1997 + Wed Dec 31 17:32:01 1997 + Fri Dec 31 17:32:01 1999 + Sat Jan 01 17:32:01 2000 + Sun Dec 31 17:32:01 2000 + Mon Jan 01 17:32:01 2001 +(50 rows) + +SELECT d1 - timestamp without time zone '1997-01-02' AS diff + FROM TIMESTAMP_TBL WHERE d1 BETWEEN '1902-01-01' AND '2038-01-01'; + diff +---------------------------------------- + @ 9863 days ago + @ 39 days 17 hours 32 mins 1 sec + @ 39 days 17 hours 32 mins 1 sec + @ 39 days 17 hours 32 mins 2 secs + @ 39 days 17 hours 32 mins 1.4 secs + @ 39 days 17 hours 32 mins 1.5 secs + @ 39 days 17 hours 32 mins 1.6 secs + @ 0 + @ 3 hours 4 mins 5 secs + @ 39 days 17 hours 32 mins 1 sec + @ 39 days 17 hours 32 mins 1 sec + @ 39 days 17 hours 32 mins 1 sec + @ 39 days 17 hours 32 mins 1 sec + @ 159 days 17 hours 32 mins 1 sec + @ 1724 days 18 hours 19 mins 20 secs + @ 1168 days 8 hours 14 mins 1 sec + @ 1168 days 13 hours 14 mins 2 secs + @ 1168 days 12 hours 14 mins 3 secs + @ 1168 days 3 hours 14 mins 4 secs + @ 1168 days 2 hours 14 mins 5 secs + @ 39 days 17 hours 32 mins 1 sec + @ 39 days 17 hours 32 mins 1 sec + @ 39 days 17 hours 32 mins + @ 39 days 17 hours 32 mins 1 sec + @ 39 days 17 hours 32 mins 1 sec + @ 39 days 17 hours 32 mins 1 sec + @ 39 days 17 hours 32 mins 1 sec + @ 39 days 17 hours 32 mins 1 sec + @ 39 days 17 hours 32 mins 1 sec + @ 39 days 17 hours 32 mins 1 sec + @ 39 days 17 hours 32 mins 1 sec + @ 39 days 17 hours 32 mins 1 sec + @ 159 days 18 hours 32 mins 1 sec + @ 39 days 17 hours 32 mins 1 sec + @ 40 days 17 hours 32 mins 1 sec + @ 41 days 17 hours 32 mins 1 sec + @ 42 days 17 hours 32 mins 1 sec + @ 43 days 17 hours 32 mins 1 sec + @ 44 days 17 hours 32 mins 1 sec + @ 45 days 17 hours 32 mins 1 sec + @ 45 days 17 hours 32 mins 1 sec + @ 308 days 6 hours 27 mins 59 secs ago + @ 307 days 6 hours 27 mins 59 secs ago + @ 306 days 6 hours 27 mins 59 secs ago + @ 2 days 6 hours 27 mins 59 secs ago + @ 1 day 6 hours 27 mins 59 secs ago + @ 6 hours 27 mins 59 secs ago + @ 57 days 17 hours 32 mins 1 sec + @ 58 days 17 hours 32 mins 1 sec + @ 362 days 17 hours 32 mins 1 sec + @ 363 days 17 hours 32 mins 1 sec + @ 1093 days 17 hours 32 mins 1 sec + @ 1094 days 17 hours 32 mins 1 sec + @ 1459 days 17 hours 32 mins 1 sec + @ 1460 days 17 hours 32 mins 1 sec +(55 rows) + +SELECT date_trunc( 'week', timestamp '2004-02-29 15:44:17.71393' ) AS week_trunc; + week_trunc +-------------------------- + Mon Feb 23 00:00:00 2004 +(1 row) + +-- verify date_bin behaves the same as date_trunc for relevant intervals +-- case 1: AD dates, origin < input +SELECT + str, + interval, + date_trunc(str, ts) = date_bin(interval::interval, ts, timestamp '2001-01-01') AS equal +FROM ( + VALUES + ('week', '7 d'), + ('day', '1 d'), + ('hour', '1 h'), + ('minute', '1 m'), + ('second', '1 s'), + ('millisecond', '1 ms'), + ('microsecond', '1 us') +) intervals (str, interval), +(VALUES (timestamp '2020-02-29 15:44:17.71393')) ts (ts); + str | interval | equal +-------------+----------+------- + week | 7 d | t + day | 1 d | t + hour | 1 h | t + minute | 1 m | t + second | 1 s | t + millisecond | 1 ms | t + microsecond | 1 us | t +(7 rows) + +-- case 2: BC dates, origin < input +SELECT + str, + interval, + date_trunc(str, ts) = date_bin(interval::interval, ts, timestamp '2000-01-01 BC') AS equal +FROM ( + VALUES + ('week', '7 d'), + ('day', '1 d'), + ('hour', '1 h'), + ('minute', '1 m'), + ('second', '1 s'), + ('millisecond', '1 ms'), + ('microsecond', '1 us') +) intervals (str, interval), +(VALUES (timestamp '0055-6-10 15:44:17.71393 BC')) ts (ts); + str | interval | equal +-------------+----------+------- + week | 7 d | t + day | 1 d | t + hour | 1 h | t + minute | 1 m | t + second | 1 s | t + millisecond | 1 ms | t + microsecond | 1 us | t +(7 rows) + +-- case 3: AD dates, origin > input +SELECT + str, + interval, + date_trunc(str, ts) = date_bin(interval::interval, ts, timestamp '2020-03-02') AS equal +FROM ( + VALUES + ('week', '7 d'), + ('day', '1 d'), + ('hour', '1 h'), + ('minute', '1 m'), + ('second', '1 s'), + ('millisecond', '1 ms'), + ('microsecond', '1 us') +) intervals (str, interval), +(VALUES (timestamp '2020-02-29 15:44:17.71393')) ts (ts); + str | interval | equal +-------------+----------+------- + week | 7 d | t + day | 1 d | t + hour | 1 h | t + minute | 1 m | t + second | 1 s | t + millisecond | 1 ms | t + microsecond | 1 us | t +(7 rows) + +-- case 4: BC dates, origin > input +SELECT + str, + interval, + date_trunc(str, ts) = date_bin(interval::interval, ts, timestamp '0055-06-17 BC') AS equal +FROM ( + VALUES + ('week', '7 d'), + ('day', '1 d'), + ('hour', '1 h'), + ('minute', '1 m'), + ('second', '1 s'), + ('millisecond', '1 ms'), + ('microsecond', '1 us') +) intervals (str, interval), +(VALUES (timestamp '0055-6-10 15:44:17.71393 BC')) ts (ts); + str | interval | equal +-------------+----------+------- + week | 7 d | t + day | 1 d | t + hour | 1 h | t + minute | 1 m | t + second | 1 s | t + millisecond | 1 ms | t + microsecond | 1 us | t +(7 rows) + +-- bin timestamps into arbitrary intervals +SELECT + interval, + ts, + origin, + date_bin(interval::interval, ts, origin) +FROM ( + VALUES + ('15 days'), + ('2 hours'), + ('1 hour 30 minutes'), + ('15 minutes'), + ('10 seconds'), + ('100 milliseconds'), + ('250 microseconds') +) intervals (interval), +(VALUES (timestamp '2020-02-11 15:44:17.71393')) ts (ts), +(VALUES (timestamp '2001-01-01')) origin (origin); + interval | ts | origin | date_bin +-------------------+--------------------------------+--------------------------+-------------------------------- + 15 days | Tue Feb 11 15:44:17.71393 2020 | Mon Jan 01 00:00:00 2001 | Thu Feb 06 00:00:00 2020 + 2 hours | Tue Feb 11 15:44:17.71393 2020 | Mon Jan 01 00:00:00 2001 | Tue Feb 11 14:00:00 2020 + 1 hour 30 minutes | Tue Feb 11 15:44:17.71393 2020 | Mon Jan 01 00:00:00 2001 | Tue Feb 11 15:00:00 2020 + 15 minutes | Tue Feb 11 15:44:17.71393 2020 | Mon Jan 01 00:00:00 2001 | Tue Feb 11 15:30:00 2020 + 10 seconds | Tue Feb 11 15:44:17.71393 2020 | Mon Jan 01 00:00:00 2001 | Tue Feb 11 15:44:10 2020 + 100 milliseconds | Tue Feb 11 15:44:17.71393 2020 | Mon Jan 01 00:00:00 2001 | Tue Feb 11 15:44:17.7 2020 + 250 microseconds | Tue Feb 11 15:44:17.71393 2020 | Mon Jan 01 00:00:00 2001 | Tue Feb 11 15:44:17.71375 2020 +(7 rows) + +-- shift bins using the origin parameter: +SELECT date_bin('5 min'::interval, timestamp '2020-02-01 01:01:01', timestamp '2020-02-01 00:02:30'); + date_bin +-------------------------- + Sat Feb 01 00:57:30 2020 +(1 row) + +-- disallow intervals with months or years +SELECT date_bin('5 months'::interval, timestamp '2020-02-01 01:01:01', timestamp '2001-01-01'); +ERROR: timestamps cannot be binned into intervals containing months or years +SELECT date_bin('5 years'::interval, timestamp '2020-02-01 01:01:01', timestamp '2001-01-01'); +ERROR: timestamps cannot be binned into intervals containing months or years +-- disallow zero intervals +SELECT date_bin('0 days'::interval, timestamp '1970-01-01 01:00:00' , timestamp '1970-01-01 00:00:00'); +ERROR: stride must be greater than zero +-- disallow negative intervals +SELECT date_bin('-2 days'::interval, timestamp '1970-01-01 01:00:00' , timestamp '1970-01-01 00:00:00'); +ERROR: stride must be greater than zero +-- Test casting within a BETWEEN qualifier +SELECT d1 - timestamp without time zone '1997-01-02' AS diff + FROM TIMESTAMP_TBL + WHERE d1 BETWEEN timestamp without time zone '1902-01-01' + AND timestamp without time zone '2038-01-01'; + diff +---------------------------------------- + @ 9863 days ago + @ 39 days 17 hours 32 mins 1 sec + @ 39 days 17 hours 32 mins 1 sec + @ 39 days 17 hours 32 mins 2 secs + @ 39 days 17 hours 32 mins 1.4 secs + @ 39 days 17 hours 32 mins 1.5 secs + @ 39 days 17 hours 32 mins 1.6 secs + @ 0 + @ 3 hours 4 mins 5 secs + @ 39 days 17 hours 32 mins 1 sec + @ 39 days 17 hours 32 mins 1 sec + @ 39 days 17 hours 32 mins 1 sec + @ 39 days 17 hours 32 mins 1 sec + @ 159 days 17 hours 32 mins 1 sec + @ 1724 days 18 hours 19 mins 20 secs + @ 1168 days 8 hours 14 mins 1 sec + @ 1168 days 13 hours 14 mins 2 secs + @ 1168 days 12 hours 14 mins 3 secs + @ 1168 days 3 hours 14 mins 4 secs + @ 1168 days 2 hours 14 mins 5 secs + @ 39 days 17 hours 32 mins 1 sec + @ 39 days 17 hours 32 mins 1 sec + @ 39 days 17 hours 32 mins + @ 39 days 17 hours 32 mins 1 sec + @ 39 days 17 hours 32 mins 1 sec + @ 39 days 17 hours 32 mins 1 sec + @ 39 days 17 hours 32 mins 1 sec + @ 39 days 17 hours 32 mins 1 sec + @ 39 days 17 hours 32 mins 1 sec + @ 39 days 17 hours 32 mins 1 sec + @ 39 days 17 hours 32 mins 1 sec + @ 39 days 17 hours 32 mins 1 sec + @ 159 days 18 hours 32 mins 1 sec + @ 39 days 17 hours 32 mins 1 sec + @ 40 days 17 hours 32 mins 1 sec + @ 41 days 17 hours 32 mins 1 sec + @ 42 days 17 hours 32 mins 1 sec + @ 43 days 17 hours 32 mins 1 sec + @ 44 days 17 hours 32 mins 1 sec + @ 45 days 17 hours 32 mins 1 sec + @ 45 days 17 hours 32 mins 1 sec + @ 308 days 6 hours 27 mins 59 secs ago + @ 307 days 6 hours 27 mins 59 secs ago + @ 306 days 6 hours 27 mins 59 secs ago + @ 2 days 6 hours 27 mins 59 secs ago + @ 1 day 6 hours 27 mins 59 secs ago + @ 6 hours 27 mins 59 secs ago + @ 57 days 17 hours 32 mins 1 sec + @ 58 days 17 hours 32 mins 1 sec + @ 362 days 17 hours 32 mins 1 sec + @ 363 days 17 hours 32 mins 1 sec + @ 1093 days 17 hours 32 mins 1 sec + @ 1094 days 17 hours 32 mins 1 sec + @ 1459 days 17 hours 32 mins 1 sec + @ 1460 days 17 hours 32 mins 1 sec +(55 rows) + +-- DATE_PART (timestamp_part) +SELECT d1 as "timestamp", + date_part( 'year', d1) AS year, date_part( 'month', d1) AS month, + date_part( 'day', d1) AS day, date_part( 'hour', d1) AS hour, + date_part( 'minute', d1) AS minute, date_part( 'second', d1) AS second + FROM TIMESTAMP_TBL; + timestamp | year | month | day | hour | minute | second +-----------------------------+-----------+-------+-----+------+--------+-------- + -infinity | -Infinity | | | | | + infinity | Infinity | | | | | + Thu Jan 01 00:00:00 1970 | 1970 | 1 | 1 | 0 | 0 | 0 + Mon Feb 10 17:32:01 1997 | 1997 | 2 | 10 | 17 | 32 | 1 + Mon Feb 10 17:32:01 1997 | 1997 | 2 | 10 | 17 | 32 | 1 + Mon Feb 10 17:32:02 1997 | 1997 | 2 | 10 | 17 | 32 | 2 + Mon Feb 10 17:32:01.4 1997 | 1997 | 2 | 10 | 17 | 32 | 1.4 + Mon Feb 10 17:32:01.5 1997 | 1997 | 2 | 10 | 17 | 32 | 1.5 + Mon Feb 10 17:32:01.6 1997 | 1997 | 2 | 10 | 17 | 32 | 1.6 + Thu Jan 02 00:00:00 1997 | 1997 | 1 | 2 | 0 | 0 | 0 + Thu Jan 02 03:04:05 1997 | 1997 | 1 | 2 | 3 | 4 | 5 + Mon Feb 10 17:32:01 1997 | 1997 | 2 | 10 | 17 | 32 | 1 + Mon Feb 10 17:32:01 1997 | 1997 | 2 | 10 | 17 | 32 | 1 + Mon Feb 10 17:32:01 1997 | 1997 | 2 | 10 | 17 | 32 | 1 + Mon Feb 10 17:32:01 1997 | 1997 | 2 | 10 | 17 | 32 | 1 + Tue Jun 10 17:32:01 1997 | 1997 | 6 | 10 | 17 | 32 | 1 + Sat Sep 22 18:19:20 2001 | 2001 | 9 | 22 | 18 | 19 | 20 + Wed Mar 15 08:14:01 2000 | 2000 | 3 | 15 | 8 | 14 | 1 + Wed Mar 15 13:14:02 2000 | 2000 | 3 | 15 | 13 | 14 | 2 + Wed Mar 15 12:14:03 2000 | 2000 | 3 | 15 | 12 | 14 | 3 + Wed Mar 15 03:14:04 2000 | 2000 | 3 | 15 | 3 | 14 | 4 + Wed Mar 15 02:14:05 2000 | 2000 | 3 | 15 | 2 | 14 | 5 + Mon Feb 10 17:32:01 1997 | 1997 | 2 | 10 | 17 | 32 | 1 + Mon Feb 10 17:32:01 1997 | 1997 | 2 | 10 | 17 | 32 | 1 + Mon Feb 10 17:32:00 1997 | 1997 | 2 | 10 | 17 | 32 | 0 + Mon Feb 10 17:32:01 1997 | 1997 | 2 | 10 | 17 | 32 | 1 + Mon Feb 10 17:32:01 1997 | 1997 | 2 | 10 | 17 | 32 | 1 + Mon Feb 10 17:32:01 1997 | 1997 | 2 | 10 | 17 | 32 | 1 + Mon Feb 10 17:32:01 1997 | 1997 | 2 | 10 | 17 | 32 | 1 + Mon Feb 10 17:32:01 1997 | 1997 | 2 | 10 | 17 | 32 | 1 + Mon Feb 10 17:32:01 1997 | 1997 | 2 | 10 | 17 | 32 | 1 + Mon Feb 10 17:32:01 1997 | 1997 | 2 | 10 | 17 | 32 | 1 + Mon Feb 10 17:32:01 1997 | 1997 | 2 | 10 | 17 | 32 | 1 + Mon Feb 10 17:32:01 1997 | 1997 | 2 | 10 | 17 | 32 | 1 + Tue Jun 10 18:32:01 1997 | 1997 | 6 | 10 | 18 | 32 | 1 + Mon Feb 10 17:32:01 1997 | 1997 | 2 | 10 | 17 | 32 | 1 + Tue Feb 11 17:32:01 1997 | 1997 | 2 | 11 | 17 | 32 | 1 + Wed Feb 12 17:32:01 1997 | 1997 | 2 | 12 | 17 | 32 | 1 + Thu Feb 13 17:32:01 1997 | 1997 | 2 | 13 | 17 | 32 | 1 + Fri Feb 14 17:32:01 1997 | 1997 | 2 | 14 | 17 | 32 | 1 + Sat Feb 15 17:32:01 1997 | 1997 | 2 | 15 | 17 | 32 | 1 + Sun Feb 16 17:32:01 1997 | 1997 | 2 | 16 | 17 | 32 | 1 + Tue Feb 16 17:32:01 0097 BC | -97 | 2 | 16 | 17 | 32 | 1 + Sat Feb 16 17:32:01 0097 | 97 | 2 | 16 | 17 | 32 | 1 + Thu Feb 16 17:32:01 0597 | 597 | 2 | 16 | 17 | 32 | 1 + Tue Feb 16 17:32:01 1097 | 1097 | 2 | 16 | 17 | 32 | 1 + Sat Feb 16 17:32:01 1697 | 1697 | 2 | 16 | 17 | 32 | 1 + Thu Feb 16 17:32:01 1797 | 1797 | 2 | 16 | 17 | 32 | 1 + Tue Feb 16 17:32:01 1897 | 1897 | 2 | 16 | 17 | 32 | 1 + Sun Feb 16 17:32:01 1997 | 1997 | 2 | 16 | 17 | 32 | 1 + Sat Feb 16 17:32:01 2097 | 2097 | 2 | 16 | 17 | 32 | 1 + Wed Feb 28 17:32:01 1996 | 1996 | 2 | 28 | 17 | 32 | 1 + Thu Feb 29 17:32:01 1996 | 1996 | 2 | 29 | 17 | 32 | 1 + Fri Mar 01 17:32:01 1996 | 1996 | 3 | 1 | 17 | 32 | 1 + Mon Dec 30 17:32:01 1996 | 1996 | 12 | 30 | 17 | 32 | 1 + Tue Dec 31 17:32:01 1996 | 1996 | 12 | 31 | 17 | 32 | 1 + Wed Jan 01 17:32:01 1997 | 1997 | 1 | 1 | 17 | 32 | 1 + Fri Feb 28 17:32:01 1997 | 1997 | 2 | 28 | 17 | 32 | 1 + Sat Mar 01 17:32:01 1997 | 1997 | 3 | 1 | 17 | 32 | 1 + Tue Dec 30 17:32:01 1997 | 1997 | 12 | 30 | 17 | 32 | 1 + Wed Dec 31 17:32:01 1997 | 1997 | 12 | 31 | 17 | 32 | 1 + Fri Dec 31 17:32:01 1999 | 1999 | 12 | 31 | 17 | 32 | 1 + Sat Jan 01 17:32:01 2000 | 2000 | 1 | 1 | 17 | 32 | 1 + Sun Dec 31 17:32:01 2000 | 2000 | 12 | 31 | 17 | 32 | 1 + Mon Jan 01 17:32:01 2001 | 2001 | 1 | 1 | 17 | 32 | 1 +(65 rows) + +SELECT d1 as "timestamp", + date_part( 'quarter', d1) AS quarter, date_part( 'msec', d1) AS msec, + date_part( 'usec', d1) AS usec + FROM TIMESTAMP_TBL; + timestamp | quarter | msec | usec +-----------------------------+---------+-------+---------- + -infinity | | | + infinity | | | + Thu Jan 01 00:00:00 1970 | 1 | 0 | 0 + Mon Feb 10 17:32:01 1997 | 1 | 1000 | 1000000 + Mon Feb 10 17:32:01 1997 | 1 | 1000 | 1000000 + Mon Feb 10 17:32:02 1997 | 1 | 2000 | 2000000 + Mon Feb 10 17:32:01.4 1997 | 1 | 1400 | 1400000 + Mon Feb 10 17:32:01.5 1997 | 1 | 1500 | 1500000 + Mon Feb 10 17:32:01.6 1997 | 1 | 1600 | 1600000 + Thu Jan 02 00:00:00 1997 | 1 | 0 | 0 + Thu Jan 02 03:04:05 1997 | 1 | 5000 | 5000000 + Mon Feb 10 17:32:01 1997 | 1 | 1000 | 1000000 + Mon Feb 10 17:32:01 1997 | 1 | 1000 | 1000000 + Mon Feb 10 17:32:01 1997 | 1 | 1000 | 1000000 + Mon Feb 10 17:32:01 1997 | 1 | 1000 | 1000000 + Tue Jun 10 17:32:01 1997 | 2 | 1000 | 1000000 + Sat Sep 22 18:19:20 2001 | 3 | 20000 | 20000000 + Wed Mar 15 08:14:01 2000 | 1 | 1000 | 1000000 + Wed Mar 15 13:14:02 2000 | 1 | 2000 | 2000000 + Wed Mar 15 12:14:03 2000 | 1 | 3000 | 3000000 + Wed Mar 15 03:14:04 2000 | 1 | 4000 | 4000000 + Wed Mar 15 02:14:05 2000 | 1 | 5000 | 5000000 + Mon Feb 10 17:32:01 1997 | 1 | 1000 | 1000000 + Mon Feb 10 17:32:01 1997 | 1 | 1000 | 1000000 + Mon Feb 10 17:32:00 1997 | 1 | 0 | 0 + Mon Feb 10 17:32:01 1997 | 1 | 1000 | 1000000 + Mon Feb 10 17:32:01 1997 | 1 | 1000 | 1000000 + Mon Feb 10 17:32:01 1997 | 1 | 1000 | 1000000 + Mon Feb 10 17:32:01 1997 | 1 | 1000 | 1000000 + Mon Feb 10 17:32:01 1997 | 1 | 1000 | 1000000 + Mon Feb 10 17:32:01 1997 | 1 | 1000 | 1000000 + Mon Feb 10 17:32:01 1997 | 1 | 1000 | 1000000 + Mon Feb 10 17:32:01 1997 | 1 | 1000 | 1000000 + Mon Feb 10 17:32:01 1997 | 1 | 1000 | 1000000 + Tue Jun 10 18:32:01 1997 | 2 | 1000 | 1000000 + Mon Feb 10 17:32:01 1997 | 1 | 1000 | 1000000 + Tue Feb 11 17:32:01 1997 | 1 | 1000 | 1000000 + Wed Feb 12 17:32:01 1997 | 1 | 1000 | 1000000 + Thu Feb 13 17:32:01 1997 | 1 | 1000 | 1000000 + Fri Feb 14 17:32:01 1997 | 1 | 1000 | 1000000 + Sat Feb 15 17:32:01 1997 | 1 | 1000 | 1000000 + Sun Feb 16 17:32:01 1997 | 1 | 1000 | 1000000 + Tue Feb 16 17:32:01 0097 BC | 1 | 1000 | 1000000 + Sat Feb 16 17:32:01 0097 | 1 | 1000 | 1000000 + Thu Feb 16 17:32:01 0597 | 1 | 1000 | 1000000 + Tue Feb 16 17:32:01 1097 | 1 | 1000 | 1000000 + Sat Feb 16 17:32:01 1697 | 1 | 1000 | 1000000 + Thu Feb 16 17:32:01 1797 | 1 | 1000 | 1000000 + Tue Feb 16 17:32:01 1897 | 1 | 1000 | 1000000 + Sun Feb 16 17:32:01 1997 | 1 | 1000 | 1000000 + Sat Feb 16 17:32:01 2097 | 1 | 1000 | 1000000 + Wed Feb 28 17:32:01 1996 | 1 | 1000 | 1000000 + Thu Feb 29 17:32:01 1996 | 1 | 1000 | 1000000 + Fri Mar 01 17:32:01 1996 | 1 | 1000 | 1000000 + Mon Dec 30 17:32:01 1996 | 4 | 1000 | 1000000 + Tue Dec 31 17:32:01 1996 | 4 | 1000 | 1000000 + Wed Jan 01 17:32:01 1997 | 1 | 1000 | 1000000 + Fri Feb 28 17:32:01 1997 | 1 | 1000 | 1000000 + Sat Mar 01 17:32:01 1997 | 1 | 1000 | 1000000 + Tue Dec 30 17:32:01 1997 | 4 | 1000 | 1000000 + Wed Dec 31 17:32:01 1997 | 4 | 1000 | 1000000 + Fri Dec 31 17:32:01 1999 | 4 | 1000 | 1000000 + Sat Jan 01 17:32:01 2000 | 1 | 1000 | 1000000 + Sun Dec 31 17:32:01 2000 | 4 | 1000 | 1000000 + Mon Jan 01 17:32:01 2001 | 1 | 1000 | 1000000 +(65 rows) + +SELECT d1 as "timestamp", + date_part( 'isoyear', d1) AS isoyear, date_part( 'week', d1) AS week, + date_part( 'isodow', d1) AS isodow, date_part( 'dow', d1) AS dow, + date_part( 'doy', d1) AS doy + FROM TIMESTAMP_TBL; + timestamp | isoyear | week | isodow | dow | doy +-----------------------------+-----------+------+--------+-----+----- + -infinity | -Infinity | | | | + infinity | Infinity | | | | + Thu Jan 01 00:00:00 1970 | 1970 | 1 | 4 | 4 | 1 + Mon Feb 10 17:32:01 1997 | 1997 | 7 | 1 | 1 | 41 + Mon Feb 10 17:32:01 1997 | 1997 | 7 | 1 | 1 | 41 + Mon Feb 10 17:32:02 1997 | 1997 | 7 | 1 | 1 | 41 + Mon Feb 10 17:32:01.4 1997 | 1997 | 7 | 1 | 1 | 41 + Mon Feb 10 17:32:01.5 1997 | 1997 | 7 | 1 | 1 | 41 + Mon Feb 10 17:32:01.6 1997 | 1997 | 7 | 1 | 1 | 41 + Thu Jan 02 00:00:00 1997 | 1997 | 1 | 4 | 4 | 2 + Thu Jan 02 03:04:05 1997 | 1997 | 1 | 4 | 4 | 2 + Mon Feb 10 17:32:01 1997 | 1997 | 7 | 1 | 1 | 41 + Mon Feb 10 17:32:01 1997 | 1997 | 7 | 1 | 1 | 41 + Mon Feb 10 17:32:01 1997 | 1997 | 7 | 1 | 1 | 41 + Mon Feb 10 17:32:01 1997 | 1997 | 7 | 1 | 1 | 41 + Tue Jun 10 17:32:01 1997 | 1997 | 24 | 2 | 2 | 161 + Sat Sep 22 18:19:20 2001 | 2001 | 38 | 6 | 6 | 265 + Wed Mar 15 08:14:01 2000 | 2000 | 11 | 3 | 3 | 75 + Wed Mar 15 13:14:02 2000 | 2000 | 11 | 3 | 3 | 75 + Wed Mar 15 12:14:03 2000 | 2000 | 11 | 3 | 3 | 75 + Wed Mar 15 03:14:04 2000 | 2000 | 11 | 3 | 3 | 75 + Wed Mar 15 02:14:05 2000 | 2000 | 11 | 3 | 3 | 75 + Mon Feb 10 17:32:01 1997 | 1997 | 7 | 1 | 1 | 41 + Mon Feb 10 17:32:01 1997 | 1997 | 7 | 1 | 1 | 41 + Mon Feb 10 17:32:00 1997 | 1997 | 7 | 1 | 1 | 41 + Mon Feb 10 17:32:01 1997 | 1997 | 7 | 1 | 1 | 41 + Mon Feb 10 17:32:01 1997 | 1997 | 7 | 1 | 1 | 41 + Mon Feb 10 17:32:01 1997 | 1997 | 7 | 1 | 1 | 41 + Mon Feb 10 17:32:01 1997 | 1997 | 7 | 1 | 1 | 41 + Mon Feb 10 17:32:01 1997 | 1997 | 7 | 1 | 1 | 41 + Mon Feb 10 17:32:01 1997 | 1997 | 7 | 1 | 1 | 41 + Mon Feb 10 17:32:01 1997 | 1997 | 7 | 1 | 1 | 41 + Mon Feb 10 17:32:01 1997 | 1997 | 7 | 1 | 1 | 41 + Mon Feb 10 17:32:01 1997 | 1997 | 7 | 1 | 1 | 41 + Tue Jun 10 18:32:01 1997 | 1997 | 24 | 2 | 2 | 161 + Mon Feb 10 17:32:01 1997 | 1997 | 7 | 1 | 1 | 41 + Tue Feb 11 17:32:01 1997 | 1997 | 7 | 2 | 2 | 42 + Wed Feb 12 17:32:01 1997 | 1997 | 7 | 3 | 3 | 43 + Thu Feb 13 17:32:01 1997 | 1997 | 7 | 4 | 4 | 44 + Fri Feb 14 17:32:01 1997 | 1997 | 7 | 5 | 5 | 45 + Sat Feb 15 17:32:01 1997 | 1997 | 7 | 6 | 6 | 46 + Sun Feb 16 17:32:01 1997 | 1997 | 7 | 7 | 0 | 47 + Tue Feb 16 17:32:01 0097 BC | -97 | 7 | 2 | 2 | 47 + Sat Feb 16 17:32:01 0097 | 97 | 7 | 6 | 6 | 47 + Thu Feb 16 17:32:01 0597 | 597 | 7 | 4 | 4 | 47 + Tue Feb 16 17:32:01 1097 | 1097 | 7 | 2 | 2 | 47 + Sat Feb 16 17:32:01 1697 | 1697 | 7 | 6 | 6 | 47 + Thu Feb 16 17:32:01 1797 | 1797 | 7 | 4 | 4 | 47 + Tue Feb 16 17:32:01 1897 | 1897 | 7 | 2 | 2 | 47 + Sun Feb 16 17:32:01 1997 | 1997 | 7 | 7 | 0 | 47 + Sat Feb 16 17:32:01 2097 | 2097 | 7 | 6 | 6 | 47 + Wed Feb 28 17:32:01 1996 | 1996 | 9 | 3 | 3 | 59 + Thu Feb 29 17:32:01 1996 | 1996 | 9 | 4 | 4 | 60 + Fri Mar 01 17:32:01 1996 | 1996 | 9 | 5 | 5 | 61 + Mon Dec 30 17:32:01 1996 | 1997 | 1 | 1 | 1 | 365 + Tue Dec 31 17:32:01 1996 | 1997 | 1 | 2 | 2 | 366 + Wed Jan 01 17:32:01 1997 | 1997 | 1 | 3 | 3 | 1 + Fri Feb 28 17:32:01 1997 | 1997 | 9 | 5 | 5 | 59 + Sat Mar 01 17:32:01 1997 | 1997 | 9 | 6 | 6 | 60 + Tue Dec 30 17:32:01 1997 | 1998 | 1 | 2 | 2 | 364 + Wed Dec 31 17:32:01 1997 | 1998 | 1 | 3 | 3 | 365 + Fri Dec 31 17:32:01 1999 | 1999 | 52 | 5 | 5 | 365 + Sat Jan 01 17:32:01 2000 | 1999 | 52 | 6 | 6 | 1 + Sun Dec 31 17:32:01 2000 | 2000 | 52 | 7 | 0 | 366 + Mon Jan 01 17:32:01 2001 | 2001 | 1 | 1 | 1 | 1 +(65 rows) + +SELECT d1 as "timestamp", + date_part( 'decade', d1) AS decade, + date_part( 'century', d1) AS century, + date_part( 'millennium', d1) AS millennium, + round(date_part( 'julian', d1)) AS julian, + date_part( 'epoch', d1) AS epoch + FROM TIMESTAMP_TBL; + timestamp | decade | century | millennium | julian | epoch +-----------------------------+-----------+-----------+------------+-----------+-------------- + -infinity | -Infinity | -Infinity | -Infinity | -Infinity | -Infinity + infinity | Infinity | Infinity | Infinity | Infinity | Infinity + Thu Jan 01 00:00:00 1970 | 197 | 20 | 2 | 2440588 | 0 + Mon Feb 10 17:32:01 1997 | 199 | 20 | 2 | 2450491 | 855595921 + Mon Feb 10 17:32:01 1997 | 199 | 20 | 2 | 2450491 | 855595921 + Mon Feb 10 17:32:02 1997 | 199 | 20 | 2 | 2450491 | 855595922 + Mon Feb 10 17:32:01.4 1997 | 199 | 20 | 2 | 2450491 | 855595921.4 + Mon Feb 10 17:32:01.5 1997 | 199 | 20 | 2 | 2450491 | 855595921.5 + Mon Feb 10 17:32:01.6 1997 | 199 | 20 | 2 | 2450491 | 855595921.6 + Thu Jan 02 00:00:00 1997 | 199 | 20 | 2 | 2450451 | 852163200 + Thu Jan 02 03:04:05 1997 | 199 | 20 | 2 | 2450451 | 852174245 + Mon Feb 10 17:32:01 1997 | 199 | 20 | 2 | 2450491 | 855595921 + Mon Feb 10 17:32:01 1997 | 199 | 20 | 2 | 2450491 | 855595921 + Mon Feb 10 17:32:01 1997 | 199 | 20 | 2 | 2450491 | 855595921 + Mon Feb 10 17:32:01 1997 | 199 | 20 | 2 | 2450491 | 855595921 + Tue Jun 10 17:32:01 1997 | 199 | 20 | 2 | 2450611 | 865963921 + Sat Sep 22 18:19:20 2001 | 200 | 21 | 3 | 2452176 | 1001182760 + Wed Mar 15 08:14:01 2000 | 200 | 20 | 2 | 2451619 | 953108041 + Wed Mar 15 13:14:02 2000 | 200 | 20 | 2 | 2451620 | 953126042 + Wed Mar 15 12:14:03 2000 | 200 | 20 | 2 | 2451620 | 953122443 + Wed Mar 15 03:14:04 2000 | 200 | 20 | 2 | 2451619 | 953090044 + Wed Mar 15 02:14:05 2000 | 200 | 20 | 2 | 2451619 | 953086445 + Mon Feb 10 17:32:01 1997 | 199 | 20 | 2 | 2450491 | 855595921 + Mon Feb 10 17:32:01 1997 | 199 | 20 | 2 | 2450491 | 855595921 + Mon Feb 10 17:32:00 1997 | 199 | 20 | 2 | 2450491 | 855595920 + Mon Feb 10 17:32:01 1997 | 199 | 20 | 2 | 2450491 | 855595921 + Mon Feb 10 17:32:01 1997 | 199 | 20 | 2 | 2450491 | 855595921 + Mon Feb 10 17:32:01 1997 | 199 | 20 | 2 | 2450491 | 855595921 + Mon Feb 10 17:32:01 1997 | 199 | 20 | 2 | 2450491 | 855595921 + Mon Feb 10 17:32:01 1997 | 199 | 20 | 2 | 2450491 | 855595921 + Mon Feb 10 17:32:01 1997 | 199 | 20 | 2 | 2450491 | 855595921 + Mon Feb 10 17:32:01 1997 | 199 | 20 | 2 | 2450491 | 855595921 + Mon Feb 10 17:32:01 1997 | 199 | 20 | 2 | 2450491 | 855595921 + Mon Feb 10 17:32:01 1997 | 199 | 20 | 2 | 2450491 | 855595921 + Tue Jun 10 18:32:01 1997 | 199 | 20 | 2 | 2450611 | 865967521 + Mon Feb 10 17:32:01 1997 | 199 | 20 | 2 | 2450491 | 855595921 + Tue Feb 11 17:32:01 1997 | 199 | 20 | 2 | 2450492 | 855682321 + Wed Feb 12 17:32:01 1997 | 199 | 20 | 2 | 2450493 | 855768721 + Thu Feb 13 17:32:01 1997 | 199 | 20 | 2 | 2450494 | 855855121 + Fri Feb 14 17:32:01 1997 | 199 | 20 | 2 | 2450495 | 855941521 + Sat Feb 15 17:32:01 1997 | 199 | 20 | 2 | 2450496 | 856027921 + Sun Feb 16 17:32:01 1997 | 199 | 20 | 2 | 2450497 | 856114321 + Tue Feb 16 17:32:01 0097 BC | -10 | -1 | -1 | 1686043 | -65192711279 + Sat Feb 16 17:32:01 0097 | 9 | 1 | 1 | 1756537 | -59102029679 + Thu Feb 16 17:32:01 0597 | 59 | 6 | 1 | 1939158 | -43323575279 + Tue Feb 16 17:32:01 1097 | 109 | 11 | 2 | 2121779 | -27545120879 + Sat Feb 16 17:32:01 1697 | 169 | 17 | 2 | 2340925 | -8610906479 + Thu Feb 16 17:32:01 1797 | 179 | 18 | 2 | 2377449 | -5455232879 + Tue Feb 16 17:32:01 1897 | 189 | 19 | 2 | 2413973 | -2299559279 + Sun Feb 16 17:32:01 1997 | 199 | 20 | 2 | 2450497 | 856114321 + Sat Feb 16 17:32:01 2097 | 209 | 21 | 3 | 2487022 | 4011874321 + Wed Feb 28 17:32:01 1996 | 199 | 20 | 2 | 2450143 | 825528721 + Thu Feb 29 17:32:01 1996 | 199 | 20 | 2 | 2450144 | 825615121 + Fri Mar 01 17:32:01 1996 | 199 | 20 | 2 | 2450145 | 825701521 + Mon Dec 30 17:32:01 1996 | 199 | 20 | 2 | 2450449 | 851967121 + Tue Dec 31 17:32:01 1996 | 199 | 20 | 2 | 2450450 | 852053521 + Wed Jan 01 17:32:01 1997 | 199 | 20 | 2 | 2450451 | 852139921 + Fri Feb 28 17:32:01 1997 | 199 | 20 | 2 | 2450509 | 857151121 + Sat Mar 01 17:32:01 1997 | 199 | 20 | 2 | 2450510 | 857237521 + Tue Dec 30 17:32:01 1997 | 199 | 20 | 2 | 2450814 | 883503121 + Wed Dec 31 17:32:01 1997 | 199 | 20 | 2 | 2450815 | 883589521 + Fri Dec 31 17:32:01 1999 | 199 | 20 | 2 | 2451545 | 946661521 + Sat Jan 01 17:32:01 2000 | 200 | 20 | 2 | 2451546 | 946747921 + Sun Dec 31 17:32:01 2000 | 200 | 20 | 2 | 2451911 | 978283921 + Mon Jan 01 17:32:01 2001 | 200 | 21 | 3 | 2451912 | 978370321 +(65 rows) + +-- extract implementation is mostly the same as date_part, so only +-- test a few cases for additional coverage. +SELECT d1 as "timestamp", + extract(microseconds from d1) AS microseconds, + extract(milliseconds from d1) AS milliseconds, + extract(seconds from d1) AS seconds, + round(extract(julian from d1)) AS julian, + extract(epoch from d1) AS epoch + FROM TIMESTAMP_TBL; + timestamp | microseconds | milliseconds | seconds | julian | epoch +-----------------------------+--------------+--------------+-----------+-----------+--------------------- + -infinity | | | | -Infinity | -Infinity + infinity | | | | Infinity | Infinity + Thu Jan 01 00:00:00 1970 | 0 | 0.000 | 0.000000 | 2440588 | 0.000000 + Mon Feb 10 17:32:01 1997 | 1000000 | 1000.000 | 1.000000 | 2450491 | 855595921.000000 + Mon Feb 10 17:32:01 1997 | 1000000 | 1000.000 | 1.000000 | 2450491 | 855595921.000000 + Mon Feb 10 17:32:02 1997 | 2000000 | 2000.000 | 2.000000 | 2450491 | 855595922.000000 + Mon Feb 10 17:32:01.4 1997 | 1400000 | 1400.000 | 1.400000 | 2450491 | 855595921.400000 + Mon Feb 10 17:32:01.5 1997 | 1500000 | 1500.000 | 1.500000 | 2450491 | 855595921.500000 + Mon Feb 10 17:32:01.6 1997 | 1600000 | 1600.000 | 1.600000 | 2450491 | 855595921.600000 + Thu Jan 02 00:00:00 1997 | 0 | 0.000 | 0.000000 | 2450451 | 852163200.000000 + Thu Jan 02 03:04:05 1997 | 5000000 | 5000.000 | 5.000000 | 2450451 | 852174245.000000 + Mon Feb 10 17:32:01 1997 | 1000000 | 1000.000 | 1.000000 | 2450491 | 855595921.000000 + Mon Feb 10 17:32:01 1997 | 1000000 | 1000.000 | 1.000000 | 2450491 | 855595921.000000 + Mon Feb 10 17:32:01 1997 | 1000000 | 1000.000 | 1.000000 | 2450491 | 855595921.000000 + Mon Feb 10 17:32:01 1997 | 1000000 | 1000.000 | 1.000000 | 2450491 | 855595921.000000 + Tue Jun 10 17:32:01 1997 | 1000000 | 1000.000 | 1.000000 | 2450611 | 865963921.000000 + Sat Sep 22 18:19:20 2001 | 20000000 | 20000.000 | 20.000000 | 2452176 | 1001182760.000000 + Wed Mar 15 08:14:01 2000 | 1000000 | 1000.000 | 1.000000 | 2451619 | 953108041.000000 + Wed Mar 15 13:14:02 2000 | 2000000 | 2000.000 | 2.000000 | 2451620 | 953126042.000000 + Wed Mar 15 12:14:03 2000 | 3000000 | 3000.000 | 3.000000 | 2451620 | 953122443.000000 + Wed Mar 15 03:14:04 2000 | 4000000 | 4000.000 | 4.000000 | 2451619 | 953090044.000000 + Wed Mar 15 02:14:05 2000 | 5000000 | 5000.000 | 5.000000 | 2451619 | 953086445.000000 + Mon Feb 10 17:32:01 1997 | 1000000 | 1000.000 | 1.000000 | 2450491 | 855595921.000000 + Mon Feb 10 17:32:01 1997 | 1000000 | 1000.000 | 1.000000 | 2450491 | 855595921.000000 + Mon Feb 10 17:32:00 1997 | 0 | 0.000 | 0.000000 | 2450491 | 855595920.000000 + Mon Feb 10 17:32:01 1997 | 1000000 | 1000.000 | 1.000000 | 2450491 | 855595921.000000 + Mon Feb 10 17:32:01 1997 | 1000000 | 1000.000 | 1.000000 | 2450491 | 855595921.000000 + Mon Feb 10 17:32:01 1997 | 1000000 | 1000.000 | 1.000000 | 2450491 | 855595921.000000 + Mon Feb 10 17:32:01 1997 | 1000000 | 1000.000 | 1.000000 | 2450491 | 855595921.000000 + Mon Feb 10 17:32:01 1997 | 1000000 | 1000.000 | 1.000000 | 2450491 | 855595921.000000 + Mon Feb 10 17:32:01 1997 | 1000000 | 1000.000 | 1.000000 | 2450491 | 855595921.000000 + Mon Feb 10 17:32:01 1997 | 1000000 | 1000.000 | 1.000000 | 2450491 | 855595921.000000 + Mon Feb 10 17:32:01 1997 | 1000000 | 1000.000 | 1.000000 | 2450491 | 855595921.000000 + Mon Feb 10 17:32:01 1997 | 1000000 | 1000.000 | 1.000000 | 2450491 | 855595921.000000 + Tue Jun 10 18:32:01 1997 | 1000000 | 1000.000 | 1.000000 | 2450611 | 865967521.000000 + Mon Feb 10 17:32:01 1997 | 1000000 | 1000.000 | 1.000000 | 2450491 | 855595921.000000 + Tue Feb 11 17:32:01 1997 | 1000000 | 1000.000 | 1.000000 | 2450492 | 855682321.000000 + Wed Feb 12 17:32:01 1997 | 1000000 | 1000.000 | 1.000000 | 2450493 | 855768721.000000 + Thu Feb 13 17:32:01 1997 | 1000000 | 1000.000 | 1.000000 | 2450494 | 855855121.000000 + Fri Feb 14 17:32:01 1997 | 1000000 | 1000.000 | 1.000000 | 2450495 | 855941521.000000 + Sat Feb 15 17:32:01 1997 | 1000000 | 1000.000 | 1.000000 | 2450496 | 856027921.000000 + Sun Feb 16 17:32:01 1997 | 1000000 | 1000.000 | 1.000000 | 2450497 | 856114321.000000 + Tue Feb 16 17:32:01 0097 BC | 1000000 | 1000.000 | 1.000000 | 1686043 | -65192711279.000000 + Sat Feb 16 17:32:01 0097 | 1000000 | 1000.000 | 1.000000 | 1756537 | -59102029679.000000 + Thu Feb 16 17:32:01 0597 | 1000000 | 1000.000 | 1.000000 | 1939158 | -43323575279.000000 + Tue Feb 16 17:32:01 1097 | 1000000 | 1000.000 | 1.000000 | 2121779 | -27545120879.000000 + Sat Feb 16 17:32:01 1697 | 1000000 | 1000.000 | 1.000000 | 2340925 | -8610906479.000000 + Thu Feb 16 17:32:01 1797 | 1000000 | 1000.000 | 1.000000 | 2377449 | -5455232879.000000 + Tue Feb 16 17:32:01 1897 | 1000000 | 1000.000 | 1.000000 | 2413973 | -2299559279.000000 + Sun Feb 16 17:32:01 1997 | 1000000 | 1000.000 | 1.000000 | 2450497 | 856114321.000000 + Sat Feb 16 17:32:01 2097 | 1000000 | 1000.000 | 1.000000 | 2487022 | 4011874321.000000 + Wed Feb 28 17:32:01 1996 | 1000000 | 1000.000 | 1.000000 | 2450143 | 825528721.000000 + Thu Feb 29 17:32:01 1996 | 1000000 | 1000.000 | 1.000000 | 2450144 | 825615121.000000 + Fri Mar 01 17:32:01 1996 | 1000000 | 1000.000 | 1.000000 | 2450145 | 825701521.000000 + Mon Dec 30 17:32:01 1996 | 1000000 | 1000.000 | 1.000000 | 2450449 | 851967121.000000 + Tue Dec 31 17:32:01 1996 | 1000000 | 1000.000 | 1.000000 | 2450450 | 852053521.000000 + Wed Jan 01 17:32:01 1997 | 1000000 | 1000.000 | 1.000000 | 2450451 | 852139921.000000 + Fri Feb 28 17:32:01 1997 | 1000000 | 1000.000 | 1.000000 | 2450509 | 857151121.000000 + Sat Mar 01 17:32:01 1997 | 1000000 | 1000.000 | 1.000000 | 2450510 | 857237521.000000 + Tue Dec 30 17:32:01 1997 | 1000000 | 1000.000 | 1.000000 | 2450814 | 883503121.000000 + Wed Dec 31 17:32:01 1997 | 1000000 | 1000.000 | 1.000000 | 2450815 | 883589521.000000 + Fri Dec 31 17:32:01 1999 | 1000000 | 1000.000 | 1.000000 | 2451545 | 946661521.000000 + Sat Jan 01 17:32:01 2000 | 1000000 | 1000.000 | 1.000000 | 2451546 | 946747921.000000 + Sun Dec 31 17:32:01 2000 | 1000000 | 1000.000 | 1.000000 | 2451911 | 978283921.000000 + Mon Jan 01 17:32:01 2001 | 1000000 | 1000.000 | 1.000000 | 2451912 | 978370321.000000 +(65 rows) + +-- value near upper bound uses special case in code +SELECT date_part('epoch', '294270-01-01 00:00:00'::timestamp); + date_part +--------------- + 9224097091200 +(1 row) + +SELECT extract(epoch from '294270-01-01 00:00:00'::timestamp); + extract +---------------------- + 9224097091200.000000 +(1 row) + +-- another internal overflow test case +SELECT extract(epoch from '5000-01-01 00:00:00'::timestamp); + extract +-------------------- + 95617584000.000000 +(1 row) + +-- test edge-case overflow in timestamp subtraction +SELECT timestamp '294276-12-31 23:59:59' - timestamp '1999-12-23 19:59:04.224193' AS ok; + ok +----------------------------------------- + @ 106751991 days 4 hours 54.775807 secs +(1 row) + +SELECT timestamp '294276-12-31 23:59:59' - timestamp '1999-12-23 19:59:04.224192' AS overflows; +ERROR: interval out of range +-- TO_CHAR() +SELECT to_char(d1, 'DAY Day day DY Dy dy MONTH Month month RM MON Mon mon') + FROM TIMESTAMP_TBL; + to_char +------------------------------------------------------------------------------------------ + + + THURSDAY Thursday thursday THU Thu thu JANUARY January january I JAN Jan jan + MONDAY Monday monday MON Mon mon FEBRUARY February february II FEB Feb feb + MONDAY Monday monday MON Mon mon FEBRUARY February february II FEB Feb feb + MONDAY Monday monday MON Mon mon FEBRUARY February february II FEB Feb feb + MONDAY Monday monday MON Mon mon FEBRUARY February february II FEB Feb feb + MONDAY Monday monday MON Mon mon FEBRUARY February february II FEB Feb feb + MONDAY Monday monday MON Mon mon FEBRUARY February february II FEB Feb feb + THURSDAY Thursday thursday THU Thu thu JANUARY January january I JAN Jan jan + THURSDAY Thursday thursday THU Thu thu JANUARY January january I JAN Jan jan + MONDAY Monday monday MON Mon mon FEBRUARY February february II FEB Feb feb + MONDAY Monday monday MON Mon mon FEBRUARY February february II FEB Feb feb + MONDAY Monday monday MON Mon mon FEBRUARY February february II FEB Feb feb + MONDAY Monday monday MON Mon mon FEBRUARY February february II FEB Feb feb + TUESDAY Tuesday tuesday TUE Tue tue JUNE June june VI JUN Jun jun + SATURDAY Saturday saturday SAT Sat sat SEPTEMBER September september IX SEP Sep sep + WEDNESDAY Wednesday wednesday WED Wed wed MARCH March march III MAR Mar mar + WEDNESDAY Wednesday wednesday WED Wed wed MARCH March march III MAR Mar mar + WEDNESDAY Wednesday wednesday WED Wed wed MARCH March march III MAR Mar mar + WEDNESDAY Wednesday wednesday WED Wed wed MARCH March march III MAR Mar mar + WEDNESDAY Wednesday wednesday WED Wed wed MARCH March march III MAR Mar mar + MONDAY Monday monday MON Mon mon FEBRUARY February february II FEB Feb feb + MONDAY Monday monday MON Mon mon FEBRUARY February february II FEB Feb feb + MONDAY Monday monday MON Mon mon FEBRUARY February february II FEB Feb feb + MONDAY Monday monday MON Mon mon FEBRUARY February february II FEB Feb feb + MONDAY Monday monday MON Mon mon FEBRUARY February february II FEB Feb feb + MONDAY Monday monday MON Mon mon FEBRUARY February february II FEB Feb feb + MONDAY Monday monday MON Mon mon FEBRUARY February february II FEB Feb feb + MONDAY Monday monday MON Mon mon FEBRUARY February february II FEB Feb feb + MONDAY Monday monday MON Mon mon FEBRUARY February february II FEB Feb feb + MONDAY Monday monday MON Mon mon FEBRUARY February february II FEB Feb feb + MONDAY Monday monday MON Mon mon FEBRUARY February february II FEB Feb feb + MONDAY Monday monday MON Mon mon FEBRUARY February february II FEB Feb feb + TUESDAY Tuesday tuesday TUE Tue tue JUNE June june VI JUN Jun jun + MONDAY Monday monday MON Mon mon FEBRUARY February february II FEB Feb feb + TUESDAY Tuesday tuesday TUE Tue tue FEBRUARY February february II FEB Feb feb + WEDNESDAY Wednesday wednesday WED Wed wed FEBRUARY February february II FEB Feb feb + THURSDAY Thursday thursday THU Thu thu FEBRUARY February february II FEB Feb feb + FRIDAY Friday friday FRI Fri fri FEBRUARY February february II FEB Feb feb + SATURDAY Saturday saturday SAT Sat sat FEBRUARY February february II FEB Feb feb + SUNDAY Sunday sunday SUN Sun sun FEBRUARY February february II FEB Feb feb + TUESDAY Tuesday tuesday TUE Tue tue FEBRUARY February february II FEB Feb feb + SATURDAY Saturday saturday SAT Sat sat FEBRUARY February february II FEB Feb feb + THURSDAY Thursday thursday THU Thu thu FEBRUARY February february II FEB Feb feb + TUESDAY Tuesday tuesday TUE Tue tue FEBRUARY February february II FEB Feb feb + SATURDAY Saturday saturday SAT Sat sat FEBRUARY February february II FEB Feb feb + THURSDAY Thursday thursday THU Thu thu FEBRUARY February february II FEB Feb feb + TUESDAY Tuesday tuesday TUE Tue tue FEBRUARY February february II FEB Feb feb + SUNDAY Sunday sunday SUN Sun sun FEBRUARY February february II FEB Feb feb + SATURDAY Saturday saturday SAT Sat sat FEBRUARY February february II FEB Feb feb + WEDNESDAY Wednesday wednesday WED Wed wed FEBRUARY February february II FEB Feb feb + THURSDAY Thursday thursday THU Thu thu FEBRUARY February february II FEB Feb feb + FRIDAY Friday friday FRI Fri fri MARCH March march III MAR Mar mar + MONDAY Monday monday MON Mon mon DECEMBER December december XII DEC Dec dec + TUESDAY Tuesday tuesday TUE Tue tue DECEMBER December december XII DEC Dec dec + WEDNESDAY Wednesday wednesday WED Wed wed JANUARY January january I JAN Jan jan + FRIDAY Friday friday FRI Fri fri FEBRUARY February february II FEB Feb feb + SATURDAY Saturday saturday SAT Sat sat MARCH March march III MAR Mar mar + TUESDAY Tuesday tuesday TUE Tue tue DECEMBER December december XII DEC Dec dec + WEDNESDAY Wednesday wednesday WED Wed wed DECEMBER December december XII DEC Dec dec + FRIDAY Friday friday FRI Fri fri DECEMBER December december XII DEC Dec dec + SATURDAY Saturday saturday SAT Sat sat JANUARY January january I JAN Jan jan + SUNDAY Sunday sunday SUN Sun sun DECEMBER December december XII DEC Dec dec + MONDAY Monday monday MON Mon mon JANUARY January january I JAN Jan jan +(65 rows) + +SELECT to_char(d1, 'FMDAY FMDay FMday FMMONTH FMMonth FMmonth FMRM') + FROM TIMESTAMP_TBL; + to_char +-------------------------------------------------------------- + + + THURSDAY Thursday thursday JANUARY January january I + MONDAY Monday monday FEBRUARY February february II + MONDAY Monday monday FEBRUARY February february II + MONDAY Monday monday FEBRUARY February february II + MONDAY Monday monday FEBRUARY February february II + MONDAY Monday monday FEBRUARY February february II + MONDAY Monday monday FEBRUARY February february II + THURSDAY Thursday thursday JANUARY January january I + THURSDAY Thursday thursday JANUARY January january I + MONDAY Monday monday FEBRUARY February february II + MONDAY Monday monday FEBRUARY February february II + MONDAY Monday monday FEBRUARY February february II + MONDAY Monday monday FEBRUARY February february II + TUESDAY Tuesday tuesday JUNE June june VI + SATURDAY Saturday saturday SEPTEMBER September september IX + WEDNESDAY Wednesday wednesday MARCH March march III + WEDNESDAY Wednesday wednesday MARCH March march III + WEDNESDAY Wednesday wednesday MARCH March march III + WEDNESDAY Wednesday wednesday MARCH March march III + WEDNESDAY Wednesday wednesday MARCH March march III + MONDAY Monday monday FEBRUARY February february II + MONDAY Monday monday FEBRUARY February february II + MONDAY Monday monday FEBRUARY February february II + MONDAY Monday monday FEBRUARY February february II + MONDAY Monday monday FEBRUARY February february II + MONDAY Monday monday FEBRUARY February february II + MONDAY Monday monday FEBRUARY February february II + MONDAY Monday monday FEBRUARY February february II + MONDAY Monday monday FEBRUARY February february II + MONDAY Monday monday FEBRUARY February february II + MONDAY Monday monday FEBRUARY February february II + MONDAY Monday monday FEBRUARY February february II + TUESDAY Tuesday tuesday JUNE June june VI + MONDAY Monday monday FEBRUARY February february II + TUESDAY Tuesday tuesday FEBRUARY February february II + WEDNESDAY Wednesday wednesday FEBRUARY February february II + THURSDAY Thursday thursday FEBRUARY February february II + FRIDAY Friday friday FEBRUARY February february II + SATURDAY Saturday saturday FEBRUARY February february II + SUNDAY Sunday sunday FEBRUARY February february II + TUESDAY Tuesday tuesday FEBRUARY February february II + SATURDAY Saturday saturday FEBRUARY February february II + THURSDAY Thursday thursday FEBRUARY February february II + TUESDAY Tuesday tuesday FEBRUARY February february II + SATURDAY Saturday saturday FEBRUARY February february II + THURSDAY Thursday thursday FEBRUARY February february II + TUESDAY Tuesday tuesday FEBRUARY February february II + SUNDAY Sunday sunday FEBRUARY February february II + SATURDAY Saturday saturday FEBRUARY February february II + WEDNESDAY Wednesday wednesday FEBRUARY February february II + THURSDAY Thursday thursday FEBRUARY February february II + FRIDAY Friday friday MARCH March march III + MONDAY Monday monday DECEMBER December december XII + TUESDAY Tuesday tuesday DECEMBER December december XII + WEDNESDAY Wednesday wednesday JANUARY January january I + FRIDAY Friday friday FEBRUARY February february II + SATURDAY Saturday saturday MARCH March march III + TUESDAY Tuesday tuesday DECEMBER December december XII + WEDNESDAY Wednesday wednesday DECEMBER December december XII + FRIDAY Friday friday DECEMBER December december XII + SATURDAY Saturday saturday JANUARY January january I + SUNDAY Sunday sunday DECEMBER December december XII + MONDAY Monday monday JANUARY January january I +(65 rows) + +SELECT to_char(d1, 'Y,YYY YYYY YYY YY Y CC Q MM WW DDD DD D J') + FROM TIMESTAMP_TBL; + to_char +-------------------------------------------------- + + + 1,970 1970 970 70 0 20 1 01 01 001 01 5 2440588 + 1,997 1997 997 97 7 20 1 02 06 041 10 2 2450490 + 1,997 1997 997 97 7 20 1 02 06 041 10 2 2450490 + 1,997 1997 997 97 7 20 1 02 06 041 10 2 2450490 + 1,997 1997 997 97 7 20 1 02 06 041 10 2 2450490 + 1,997 1997 997 97 7 20 1 02 06 041 10 2 2450490 + 1,997 1997 997 97 7 20 1 02 06 041 10 2 2450490 + 1,997 1997 997 97 7 20 1 01 01 002 02 5 2450451 + 1,997 1997 997 97 7 20 1 01 01 002 02 5 2450451 + 1,997 1997 997 97 7 20 1 02 06 041 10 2 2450490 + 1,997 1997 997 97 7 20 1 02 06 041 10 2 2450490 + 1,997 1997 997 97 7 20 1 02 06 041 10 2 2450490 + 1,997 1997 997 97 7 20 1 02 06 041 10 2 2450490 + 1,997 1997 997 97 7 20 2 06 23 161 10 3 2450610 + 2,001 2001 001 01 1 21 3 09 38 265 22 7 2452175 + 2,000 2000 000 00 0 20 1 03 11 075 15 4 2451619 + 2,000 2000 000 00 0 20 1 03 11 075 15 4 2451619 + 2,000 2000 000 00 0 20 1 03 11 075 15 4 2451619 + 2,000 2000 000 00 0 20 1 03 11 075 15 4 2451619 + 2,000 2000 000 00 0 20 1 03 11 075 15 4 2451619 + 1,997 1997 997 97 7 20 1 02 06 041 10 2 2450490 + 1,997 1997 997 97 7 20 1 02 06 041 10 2 2450490 + 1,997 1997 997 97 7 20 1 02 06 041 10 2 2450490 + 1,997 1997 997 97 7 20 1 02 06 041 10 2 2450490 + 1,997 1997 997 97 7 20 1 02 06 041 10 2 2450490 + 1,997 1997 997 97 7 20 1 02 06 041 10 2 2450490 + 1,997 1997 997 97 7 20 1 02 06 041 10 2 2450490 + 1,997 1997 997 97 7 20 1 02 06 041 10 2 2450490 + 1,997 1997 997 97 7 20 1 02 06 041 10 2 2450490 + 1,997 1997 997 97 7 20 1 02 06 041 10 2 2450490 + 1,997 1997 997 97 7 20 1 02 06 041 10 2 2450490 + 1,997 1997 997 97 7 20 1 02 06 041 10 2 2450490 + 1,997 1997 997 97 7 20 2 06 23 161 10 3 2450610 + 1,997 1997 997 97 7 20 1 02 06 041 10 2 2450490 + 1,997 1997 997 97 7 20 1 02 06 042 11 3 2450491 + 1,997 1997 997 97 7 20 1 02 07 043 12 4 2450492 + 1,997 1997 997 97 7 20 1 02 07 044 13 5 2450493 + 1,997 1997 997 97 7 20 1 02 07 045 14 6 2450494 + 1,997 1997 997 97 7 20 1 02 07 046 15 7 2450495 + 1,997 1997 997 97 7 20 1 02 07 047 16 1 2450496 + 0,097 0097 097 97 7 -01 1 02 07 047 16 3 1686042 + 0,097 0097 097 97 7 01 1 02 07 047 16 7 1756536 + 0,597 0597 597 97 7 06 1 02 07 047 16 5 1939157 + 1,097 1097 097 97 7 11 1 02 07 047 16 3 2121778 + 1,697 1697 697 97 7 17 1 02 07 047 16 7 2340924 + 1,797 1797 797 97 7 18 1 02 07 047 16 5 2377448 + 1,897 1897 897 97 7 19 1 02 07 047 16 3 2413972 + 1,997 1997 997 97 7 20 1 02 07 047 16 1 2450496 + 2,097 2097 097 97 7 21 1 02 07 047 16 7 2487021 + 1,996 1996 996 96 6 20 1 02 09 059 28 4 2450142 + 1,996 1996 996 96 6 20 1 02 09 060 29 5 2450143 + 1,996 1996 996 96 6 20 1 03 09 061 01 6 2450144 + 1,996 1996 996 96 6 20 4 12 53 365 30 2 2450448 + 1,996 1996 996 96 6 20 4 12 53 366 31 3 2450449 + 1,997 1997 997 97 7 20 1 01 01 001 01 4 2450450 + 1,997 1997 997 97 7 20 1 02 09 059 28 6 2450508 + 1,997 1997 997 97 7 20 1 03 09 060 01 7 2450509 + 1,997 1997 997 97 7 20 4 12 52 364 30 3 2450813 + 1,997 1997 997 97 7 20 4 12 53 365 31 4 2450814 + 1,999 1999 999 99 9 20 4 12 53 365 31 6 2451544 + 2,000 2000 000 00 0 20 1 01 01 001 01 7 2451545 + 2,000 2000 000 00 0 20 4 12 53 366 31 1 2451910 + 2,001 2001 001 01 1 21 1 01 01 001 01 2 2451911 +(65 rows) + +SELECT to_char(d1, 'FMY,YYY FMYYYY FMYYY FMYY FMY FMCC FMQ FMMM FMWW FMDDD FMDD FMD FMJ') + FROM TIMESTAMP_TBL; + to_char +------------------------------------------------- + + + 1,970 1970 970 70 0 20 1 1 1 1 1 5 2440588 + 1,997 1997 997 97 7 20 1 2 6 41 10 2 2450490 + 1,997 1997 997 97 7 20 1 2 6 41 10 2 2450490 + 1,997 1997 997 97 7 20 1 2 6 41 10 2 2450490 + 1,997 1997 997 97 7 20 1 2 6 41 10 2 2450490 + 1,997 1997 997 97 7 20 1 2 6 41 10 2 2450490 + 1,997 1997 997 97 7 20 1 2 6 41 10 2 2450490 + 1,997 1997 997 97 7 20 1 1 1 2 2 5 2450451 + 1,997 1997 997 97 7 20 1 1 1 2 2 5 2450451 + 1,997 1997 997 97 7 20 1 2 6 41 10 2 2450490 + 1,997 1997 997 97 7 20 1 2 6 41 10 2 2450490 + 1,997 1997 997 97 7 20 1 2 6 41 10 2 2450490 + 1,997 1997 997 97 7 20 1 2 6 41 10 2 2450490 + 1,997 1997 997 97 7 20 2 6 23 161 10 3 2450610 + 2,001 2001 1 1 1 21 3 9 38 265 22 7 2452175 + 2,000 2000 0 0 0 20 1 3 11 75 15 4 2451619 + 2,000 2000 0 0 0 20 1 3 11 75 15 4 2451619 + 2,000 2000 0 0 0 20 1 3 11 75 15 4 2451619 + 2,000 2000 0 0 0 20 1 3 11 75 15 4 2451619 + 2,000 2000 0 0 0 20 1 3 11 75 15 4 2451619 + 1,997 1997 997 97 7 20 1 2 6 41 10 2 2450490 + 1,997 1997 997 97 7 20 1 2 6 41 10 2 2450490 + 1,997 1997 997 97 7 20 1 2 6 41 10 2 2450490 + 1,997 1997 997 97 7 20 1 2 6 41 10 2 2450490 + 1,997 1997 997 97 7 20 1 2 6 41 10 2 2450490 + 1,997 1997 997 97 7 20 1 2 6 41 10 2 2450490 + 1,997 1997 997 97 7 20 1 2 6 41 10 2 2450490 + 1,997 1997 997 97 7 20 1 2 6 41 10 2 2450490 + 1,997 1997 997 97 7 20 1 2 6 41 10 2 2450490 + 1,997 1997 997 97 7 20 1 2 6 41 10 2 2450490 + 1,997 1997 997 97 7 20 1 2 6 41 10 2 2450490 + 1,997 1997 997 97 7 20 1 2 6 41 10 2 2450490 + 1,997 1997 997 97 7 20 2 6 23 161 10 3 2450610 + 1,997 1997 997 97 7 20 1 2 6 41 10 2 2450490 + 1,997 1997 997 97 7 20 1 2 6 42 11 3 2450491 + 1,997 1997 997 97 7 20 1 2 7 43 12 4 2450492 + 1,997 1997 997 97 7 20 1 2 7 44 13 5 2450493 + 1,997 1997 997 97 7 20 1 2 7 45 14 6 2450494 + 1,997 1997 997 97 7 20 1 2 7 46 15 7 2450495 + 1,997 1997 997 97 7 20 1 2 7 47 16 1 2450496 + 0,097 97 97 97 7 -1 1 2 7 47 16 3 1686042 + 0,097 97 97 97 7 1 1 2 7 47 16 7 1756536 + 0,597 597 597 97 7 6 1 2 7 47 16 5 1939157 + 1,097 1097 97 97 7 11 1 2 7 47 16 3 2121778 + 1,697 1697 697 97 7 17 1 2 7 47 16 7 2340924 + 1,797 1797 797 97 7 18 1 2 7 47 16 5 2377448 + 1,897 1897 897 97 7 19 1 2 7 47 16 3 2413972 + 1,997 1997 997 97 7 20 1 2 7 47 16 1 2450496 + 2,097 2097 97 97 7 21 1 2 7 47 16 7 2487021 + 1,996 1996 996 96 6 20 1 2 9 59 28 4 2450142 + 1,996 1996 996 96 6 20 1 2 9 60 29 5 2450143 + 1,996 1996 996 96 6 20 1 3 9 61 1 6 2450144 + 1,996 1996 996 96 6 20 4 12 53 365 30 2 2450448 + 1,996 1996 996 96 6 20 4 12 53 366 31 3 2450449 + 1,997 1997 997 97 7 20 1 1 1 1 1 4 2450450 + 1,997 1997 997 97 7 20 1 2 9 59 28 6 2450508 + 1,997 1997 997 97 7 20 1 3 9 60 1 7 2450509 + 1,997 1997 997 97 7 20 4 12 52 364 30 3 2450813 + 1,997 1997 997 97 7 20 4 12 53 365 31 4 2450814 + 1,999 1999 999 99 9 20 4 12 53 365 31 6 2451544 + 2,000 2000 0 0 0 20 1 1 1 1 1 7 2451545 + 2,000 2000 0 0 0 20 4 12 53 366 31 1 2451910 + 2,001 2001 1 1 1 21 1 1 1 1 1 2 2451911 +(65 rows) + +SELECT to_char(d1, 'HH HH12 HH24 MI SS SSSS') + FROM TIMESTAMP_TBL; + to_char +---------------------- + + + 12 12 00 00 00 0 + 05 05 17 32 01 63121 + 05 05 17 32 01 63121 + 05 05 17 32 02 63122 + 05 05 17 32 01 63121 + 05 05 17 32 01 63121 + 05 05 17 32 01 63121 + 12 12 00 00 00 0 + 03 03 03 04 05 11045 + 05 05 17 32 01 63121 + 05 05 17 32 01 63121 + 05 05 17 32 01 63121 + 05 05 17 32 01 63121 + 05 05 17 32 01 63121 + 06 06 18 19 20 65960 + 08 08 08 14 01 29641 + 01 01 13 14 02 47642 + 12 12 12 14 03 44043 + 03 03 03 14 04 11644 + 02 02 02 14 05 8045 + 05 05 17 32 01 63121 + 05 05 17 32 01 63121 + 05 05 17 32 00 63120 + 05 05 17 32 01 63121 + 05 05 17 32 01 63121 + 05 05 17 32 01 63121 + 05 05 17 32 01 63121 + 05 05 17 32 01 63121 + 05 05 17 32 01 63121 + 05 05 17 32 01 63121 + 05 05 17 32 01 63121 + 05 05 17 32 01 63121 + 06 06 18 32 01 66721 + 05 05 17 32 01 63121 + 05 05 17 32 01 63121 + 05 05 17 32 01 63121 + 05 05 17 32 01 63121 + 05 05 17 32 01 63121 + 05 05 17 32 01 63121 + 05 05 17 32 01 63121 + 05 05 17 32 01 63121 + 05 05 17 32 01 63121 + 05 05 17 32 01 63121 + 05 05 17 32 01 63121 + 05 05 17 32 01 63121 + 05 05 17 32 01 63121 + 05 05 17 32 01 63121 + 05 05 17 32 01 63121 + 05 05 17 32 01 63121 + 05 05 17 32 01 63121 + 05 05 17 32 01 63121 + 05 05 17 32 01 63121 + 05 05 17 32 01 63121 + 05 05 17 32 01 63121 + 05 05 17 32 01 63121 + 05 05 17 32 01 63121 + 05 05 17 32 01 63121 + 05 05 17 32 01 63121 + 05 05 17 32 01 63121 + 05 05 17 32 01 63121 + 05 05 17 32 01 63121 + 05 05 17 32 01 63121 + 05 05 17 32 01 63121 +(65 rows) + +SELECT to_char(d1, E'"HH:MI:SS is" HH:MI:SS "\\"text between quote marks\\""') + FROM TIMESTAMP_TBL; + to_char +------------------------------------------------- + + + HH:MI:SS is 12:00:00 "text between quote marks" + HH:MI:SS is 05:32:01 "text between quote marks" + HH:MI:SS is 05:32:01 "text between quote marks" + HH:MI:SS is 05:32:02 "text between quote marks" + HH:MI:SS is 05:32:01 "text between quote marks" + HH:MI:SS is 05:32:01 "text between quote marks" + HH:MI:SS is 05:32:01 "text between quote marks" + HH:MI:SS is 12:00:00 "text between quote marks" + HH:MI:SS is 03:04:05 "text between quote marks" + HH:MI:SS is 05:32:01 "text between quote marks" + HH:MI:SS is 05:32:01 "text between quote marks" + HH:MI:SS is 05:32:01 "text between quote marks" + HH:MI:SS is 05:32:01 "text between quote marks" + HH:MI:SS is 05:32:01 "text between quote marks" + HH:MI:SS is 06:19:20 "text between quote marks" + HH:MI:SS is 08:14:01 "text between quote marks" + HH:MI:SS is 01:14:02 "text between quote marks" + HH:MI:SS is 12:14:03 "text between quote marks" + HH:MI:SS is 03:14:04 "text between quote marks" + HH:MI:SS is 02:14:05 "text between quote marks" + HH:MI:SS is 05:32:01 "text between quote marks" + HH:MI:SS is 05:32:01 "text between quote marks" + HH:MI:SS is 05:32:00 "text between quote marks" + HH:MI:SS is 05:32:01 "text between quote marks" + HH:MI:SS is 05:32:01 "text between quote marks" + HH:MI:SS is 05:32:01 "text between quote marks" + HH:MI:SS is 05:32:01 "text between quote marks" + HH:MI:SS is 05:32:01 "text between quote marks" + HH:MI:SS is 05:32:01 "text between quote marks" + HH:MI:SS is 05:32:01 "text between quote marks" + HH:MI:SS is 05:32:01 "text between quote marks" + HH:MI:SS is 05:32:01 "text between quote marks" + HH:MI:SS is 06:32:01 "text between quote marks" + HH:MI:SS is 05:32:01 "text between quote marks" + HH:MI:SS is 05:32:01 "text between quote marks" + HH:MI:SS is 05:32:01 "text between quote marks" + HH:MI:SS is 05:32:01 "text between quote marks" + HH:MI:SS is 05:32:01 "text between quote marks" + HH:MI:SS is 05:32:01 "text between quote marks" + HH:MI:SS is 05:32:01 "text between quote marks" + HH:MI:SS is 05:32:01 "text between quote marks" + HH:MI:SS is 05:32:01 "text between quote marks" + HH:MI:SS is 05:32:01 "text between quote marks" + HH:MI:SS is 05:32:01 "text between quote marks" + HH:MI:SS is 05:32:01 "text between quote marks" + HH:MI:SS is 05:32:01 "text between quote marks" + HH:MI:SS is 05:32:01 "text between quote marks" + HH:MI:SS is 05:32:01 "text between quote marks" + HH:MI:SS is 05:32:01 "text between quote marks" + HH:MI:SS is 05:32:01 "text between quote marks" + HH:MI:SS is 05:32:01 "text between quote marks" + HH:MI:SS is 05:32:01 "text between quote marks" + HH:MI:SS is 05:32:01 "text between quote marks" + HH:MI:SS is 05:32:01 "text between quote marks" + HH:MI:SS is 05:32:01 "text between quote marks" + HH:MI:SS is 05:32:01 "text between quote marks" + HH:MI:SS is 05:32:01 "text between quote marks" + HH:MI:SS is 05:32:01 "text between quote marks" + HH:MI:SS is 05:32:01 "text between quote marks" + HH:MI:SS is 05:32:01 "text between quote marks" + HH:MI:SS is 05:32:01 "text between quote marks" + HH:MI:SS is 05:32:01 "text between quote marks" + HH:MI:SS is 05:32:01 "text between quote marks" +(65 rows) + +SELECT to_char(d1, 'HH24--text--MI--text--SS') + FROM TIMESTAMP_TBL; + to_char +------------------------ + + + 00--text--00--text--00 + 17--text--32--text--01 + 17--text--32--text--01 + 17--text--32--text--02 + 17--text--32--text--01 + 17--text--32--text--01 + 17--text--32--text--01 + 00--text--00--text--00 + 03--text--04--text--05 + 17--text--32--text--01 + 17--text--32--text--01 + 17--text--32--text--01 + 17--text--32--text--01 + 17--text--32--text--01 + 18--text--19--text--20 + 08--text--14--text--01 + 13--text--14--text--02 + 12--text--14--text--03 + 03--text--14--text--04 + 02--text--14--text--05 + 17--text--32--text--01 + 17--text--32--text--01 + 17--text--32--text--00 + 17--text--32--text--01 + 17--text--32--text--01 + 17--text--32--text--01 + 17--text--32--text--01 + 17--text--32--text--01 + 17--text--32--text--01 + 17--text--32--text--01 + 17--text--32--text--01 + 17--text--32--text--01 + 18--text--32--text--01 + 17--text--32--text--01 + 17--text--32--text--01 + 17--text--32--text--01 + 17--text--32--text--01 + 17--text--32--text--01 + 17--text--32--text--01 + 17--text--32--text--01 + 17--text--32--text--01 + 17--text--32--text--01 + 17--text--32--text--01 + 17--text--32--text--01 + 17--text--32--text--01 + 17--text--32--text--01 + 17--text--32--text--01 + 17--text--32--text--01 + 17--text--32--text--01 + 17--text--32--text--01 + 17--text--32--text--01 + 17--text--32--text--01 + 17--text--32--text--01 + 17--text--32--text--01 + 17--text--32--text--01 + 17--text--32--text--01 + 17--text--32--text--01 + 17--text--32--text--01 + 17--text--32--text--01 + 17--text--32--text--01 + 17--text--32--text--01 + 17--text--32--text--01 + 17--text--32--text--01 +(65 rows) + +SELECT to_char(d1, 'YYYYTH YYYYth Jth') + FROM TIMESTAMP_TBL; + to_char +------------------------- + + + 1970TH 1970th 2440588th + 1997TH 1997th 2450490th + 1997TH 1997th 2450490th + 1997TH 1997th 2450490th + 1997TH 1997th 2450490th + 1997TH 1997th 2450490th + 1997TH 1997th 2450490th + 1997TH 1997th 2450451st + 1997TH 1997th 2450451st + 1997TH 1997th 2450490th + 1997TH 1997th 2450490th + 1997TH 1997th 2450490th + 1997TH 1997th 2450490th + 1997TH 1997th 2450610th + 2001ST 2001st 2452175th + 2000TH 2000th 2451619th + 2000TH 2000th 2451619th + 2000TH 2000th 2451619th + 2000TH 2000th 2451619th + 2000TH 2000th 2451619th + 1997TH 1997th 2450490th + 1997TH 1997th 2450490th + 1997TH 1997th 2450490th + 1997TH 1997th 2450490th + 1997TH 1997th 2450490th + 1997TH 1997th 2450490th + 1997TH 1997th 2450490th + 1997TH 1997th 2450490th + 1997TH 1997th 2450490th + 1997TH 1997th 2450490th + 1997TH 1997th 2450490th + 1997TH 1997th 2450490th + 1997TH 1997th 2450610th + 1997TH 1997th 2450490th + 1997TH 1997th 2450491st + 1997TH 1997th 2450492nd + 1997TH 1997th 2450493rd + 1997TH 1997th 2450494th + 1997TH 1997th 2450495th + 1997TH 1997th 2450496th + 0097TH 0097th 1686042nd + 0097TH 0097th 1756536th + 0597TH 0597th 1939157th + 1097TH 1097th 2121778th + 1697TH 1697th 2340924th + 1797TH 1797th 2377448th + 1897TH 1897th 2413972nd + 1997TH 1997th 2450496th + 2097TH 2097th 2487021st + 1996TH 1996th 2450142nd + 1996TH 1996th 2450143rd + 1996TH 1996th 2450144th + 1996TH 1996th 2450448th + 1996TH 1996th 2450449th + 1997TH 1997th 2450450th + 1997TH 1997th 2450508th + 1997TH 1997th 2450509th + 1997TH 1997th 2450813th + 1997TH 1997th 2450814th + 1999TH 1999th 2451544th + 2000TH 2000th 2451545th + 2000TH 2000th 2451910th + 2001ST 2001st 2451911th +(65 rows) + +SELECT to_char(d1, 'YYYY A.D. YYYY a.d. YYYY bc HH:MI:SS P.M. HH:MI:SS p.m. HH:MI:SS pm') + FROM TIMESTAMP_TBL; + to_char +--------------------------------------------------------------------- + + + 1970 A.D. 1970 a.d. 1970 ad 12:00:00 A.M. 12:00:00 a.m. 12:00:00 am + 1997 A.D. 1997 a.d. 1997 ad 05:32:01 P.M. 05:32:01 p.m. 05:32:01 pm + 1997 A.D. 1997 a.d. 1997 ad 05:32:01 P.M. 05:32:01 p.m. 05:32:01 pm + 1997 A.D. 1997 a.d. 1997 ad 05:32:02 P.M. 05:32:02 p.m. 05:32:02 pm + 1997 A.D. 1997 a.d. 1997 ad 05:32:01 P.M. 05:32:01 p.m. 05:32:01 pm + 1997 A.D. 1997 a.d. 1997 ad 05:32:01 P.M. 05:32:01 p.m. 05:32:01 pm + 1997 A.D. 1997 a.d. 1997 ad 05:32:01 P.M. 05:32:01 p.m. 05:32:01 pm + 1997 A.D. 1997 a.d. 1997 ad 12:00:00 A.M. 12:00:00 a.m. 12:00:00 am + 1997 A.D. 1997 a.d. 1997 ad 03:04:05 A.M. 03:04:05 a.m. 03:04:05 am + 1997 A.D. 1997 a.d. 1997 ad 05:32:01 P.M. 05:32:01 p.m. 05:32:01 pm + 1997 A.D. 1997 a.d. 1997 ad 05:32:01 P.M. 05:32:01 p.m. 05:32:01 pm + 1997 A.D. 1997 a.d. 1997 ad 05:32:01 P.M. 05:32:01 p.m. 05:32:01 pm + 1997 A.D. 1997 a.d. 1997 ad 05:32:01 P.M. 05:32:01 p.m. 05:32:01 pm + 1997 A.D. 1997 a.d. 1997 ad 05:32:01 P.M. 05:32:01 p.m. 05:32:01 pm + 2001 A.D. 2001 a.d. 2001 ad 06:19:20 P.M. 06:19:20 p.m. 06:19:20 pm + 2000 A.D. 2000 a.d. 2000 ad 08:14:01 A.M. 08:14:01 a.m. 08:14:01 am + 2000 A.D. 2000 a.d. 2000 ad 01:14:02 P.M. 01:14:02 p.m. 01:14:02 pm + 2000 A.D. 2000 a.d. 2000 ad 12:14:03 P.M. 12:14:03 p.m. 12:14:03 pm + 2000 A.D. 2000 a.d. 2000 ad 03:14:04 A.M. 03:14:04 a.m. 03:14:04 am + 2000 A.D. 2000 a.d. 2000 ad 02:14:05 A.M. 02:14:05 a.m. 02:14:05 am + 1997 A.D. 1997 a.d. 1997 ad 05:32:01 P.M. 05:32:01 p.m. 05:32:01 pm + 1997 A.D. 1997 a.d. 1997 ad 05:32:01 P.M. 05:32:01 p.m. 05:32:01 pm + 1997 A.D. 1997 a.d. 1997 ad 05:32:00 P.M. 05:32:00 p.m. 05:32:00 pm + 1997 A.D. 1997 a.d. 1997 ad 05:32:01 P.M. 05:32:01 p.m. 05:32:01 pm + 1997 A.D. 1997 a.d. 1997 ad 05:32:01 P.M. 05:32:01 p.m. 05:32:01 pm + 1997 A.D. 1997 a.d. 1997 ad 05:32:01 P.M. 05:32:01 p.m. 05:32:01 pm + 1997 A.D. 1997 a.d. 1997 ad 05:32:01 P.M. 05:32:01 p.m. 05:32:01 pm + 1997 A.D. 1997 a.d. 1997 ad 05:32:01 P.M. 05:32:01 p.m. 05:32:01 pm + 1997 A.D. 1997 a.d. 1997 ad 05:32:01 P.M. 05:32:01 p.m. 05:32:01 pm + 1997 A.D. 1997 a.d. 1997 ad 05:32:01 P.M. 05:32:01 p.m. 05:32:01 pm + 1997 A.D. 1997 a.d. 1997 ad 05:32:01 P.M. 05:32:01 p.m. 05:32:01 pm + 1997 A.D. 1997 a.d. 1997 ad 05:32:01 P.M. 05:32:01 p.m. 05:32:01 pm + 1997 A.D. 1997 a.d. 1997 ad 06:32:01 P.M. 06:32:01 p.m. 06:32:01 pm + 1997 A.D. 1997 a.d. 1997 ad 05:32:01 P.M. 05:32:01 p.m. 05:32:01 pm + 1997 A.D. 1997 a.d. 1997 ad 05:32:01 P.M. 05:32:01 p.m. 05:32:01 pm + 1997 A.D. 1997 a.d. 1997 ad 05:32:01 P.M. 05:32:01 p.m. 05:32:01 pm + 1997 A.D. 1997 a.d. 1997 ad 05:32:01 P.M. 05:32:01 p.m. 05:32:01 pm + 1997 A.D. 1997 a.d. 1997 ad 05:32:01 P.M. 05:32:01 p.m. 05:32:01 pm + 1997 A.D. 1997 a.d. 1997 ad 05:32:01 P.M. 05:32:01 p.m. 05:32:01 pm + 1997 A.D. 1997 a.d. 1997 ad 05:32:01 P.M. 05:32:01 p.m. 05:32:01 pm + 0097 B.C. 0097 b.c. 0097 bc 05:32:01 P.M. 05:32:01 p.m. 05:32:01 pm + 0097 A.D. 0097 a.d. 0097 ad 05:32:01 P.M. 05:32:01 p.m. 05:32:01 pm + 0597 A.D. 0597 a.d. 0597 ad 05:32:01 P.M. 05:32:01 p.m. 05:32:01 pm + 1097 A.D. 1097 a.d. 1097 ad 05:32:01 P.M. 05:32:01 p.m. 05:32:01 pm + 1697 A.D. 1697 a.d. 1697 ad 05:32:01 P.M. 05:32:01 p.m. 05:32:01 pm + 1797 A.D. 1797 a.d. 1797 ad 05:32:01 P.M. 05:32:01 p.m. 05:32:01 pm + 1897 A.D. 1897 a.d. 1897 ad 05:32:01 P.M. 05:32:01 p.m. 05:32:01 pm + 1997 A.D. 1997 a.d. 1997 ad 05:32:01 P.M. 05:32:01 p.m. 05:32:01 pm + 2097 A.D. 2097 a.d. 2097 ad 05:32:01 P.M. 05:32:01 p.m. 05:32:01 pm + 1996 A.D. 1996 a.d. 1996 ad 05:32:01 P.M. 05:32:01 p.m. 05:32:01 pm + 1996 A.D. 1996 a.d. 1996 ad 05:32:01 P.M. 05:32:01 p.m. 05:32:01 pm + 1996 A.D. 1996 a.d. 1996 ad 05:32:01 P.M. 05:32:01 p.m. 05:32:01 pm + 1996 A.D. 1996 a.d. 1996 ad 05:32:01 P.M. 05:32:01 p.m. 05:32:01 pm + 1996 A.D. 1996 a.d. 1996 ad 05:32:01 P.M. 05:32:01 p.m. 05:32:01 pm + 1997 A.D. 1997 a.d. 1997 ad 05:32:01 P.M. 05:32:01 p.m. 05:32:01 pm + 1997 A.D. 1997 a.d. 1997 ad 05:32:01 P.M. 05:32:01 p.m. 05:32:01 pm + 1997 A.D. 1997 a.d. 1997 ad 05:32:01 P.M. 05:32:01 p.m. 05:32:01 pm + 1997 A.D. 1997 a.d. 1997 ad 05:32:01 P.M. 05:32:01 p.m. 05:32:01 pm + 1997 A.D. 1997 a.d. 1997 ad 05:32:01 P.M. 05:32:01 p.m. 05:32:01 pm + 1999 A.D. 1999 a.d. 1999 ad 05:32:01 P.M. 05:32:01 p.m. 05:32:01 pm + 2000 A.D. 2000 a.d. 2000 ad 05:32:01 P.M. 05:32:01 p.m. 05:32:01 pm + 2000 A.D. 2000 a.d. 2000 ad 05:32:01 P.M. 05:32:01 p.m. 05:32:01 pm + 2001 A.D. 2001 a.d. 2001 ad 05:32:01 P.M. 05:32:01 p.m. 05:32:01 pm +(65 rows) + +SELECT to_char(d1, 'IYYY IYY IY I IW IDDD ID') + FROM TIMESTAMP_TBL; + to_char +------------------------ + + + 1970 970 70 0 01 004 4 + 1997 997 97 7 07 043 1 + 1997 997 97 7 07 043 1 + 1997 997 97 7 07 043 1 + 1997 997 97 7 07 043 1 + 1997 997 97 7 07 043 1 + 1997 997 97 7 07 043 1 + 1997 997 97 7 01 004 4 + 1997 997 97 7 01 004 4 + 1997 997 97 7 07 043 1 + 1997 997 97 7 07 043 1 + 1997 997 97 7 07 043 1 + 1997 997 97 7 07 043 1 + 1997 997 97 7 24 163 2 + 2001 001 01 1 38 265 6 + 2000 000 00 0 11 073 3 + 2000 000 00 0 11 073 3 + 2000 000 00 0 11 073 3 + 2000 000 00 0 11 073 3 + 2000 000 00 0 11 073 3 + 1997 997 97 7 07 043 1 + 1997 997 97 7 07 043 1 + 1997 997 97 7 07 043 1 + 1997 997 97 7 07 043 1 + 1997 997 97 7 07 043 1 + 1997 997 97 7 07 043 1 + 1997 997 97 7 07 043 1 + 1997 997 97 7 07 043 1 + 1997 997 97 7 07 043 1 + 1997 997 97 7 07 043 1 + 1997 997 97 7 07 043 1 + 1997 997 97 7 07 043 1 + 1997 997 97 7 24 163 2 + 1997 997 97 7 07 043 1 + 1997 997 97 7 07 044 2 + 1997 997 97 7 07 045 3 + 1997 997 97 7 07 046 4 + 1997 997 97 7 07 047 5 + 1997 997 97 7 07 048 6 + 1997 997 97 7 07 049 7 + 0097 097 97 7 07 044 2 + 0097 097 97 7 07 048 6 + 0597 597 97 7 07 046 4 + 1097 097 97 7 07 044 2 + 1697 697 97 7 07 048 6 + 1797 797 97 7 07 046 4 + 1897 897 97 7 07 044 2 + 1997 997 97 7 07 049 7 + 2097 097 97 7 07 048 6 + 1996 996 96 6 09 059 3 + 1996 996 96 6 09 060 4 + 1996 996 96 6 09 061 5 + 1997 997 97 7 01 001 1 + 1997 997 97 7 01 002 2 + 1997 997 97 7 01 003 3 + 1997 997 97 7 09 061 5 + 1997 997 97 7 09 062 6 + 1998 998 98 8 01 002 2 + 1998 998 98 8 01 003 3 + 1999 999 99 9 52 362 5 + 1999 999 99 9 52 363 6 + 2000 000 00 0 52 364 7 + 2001 001 01 1 01 001 1 +(65 rows) + +SELECT to_char(d1, 'FMIYYY FMIYY FMIY FMI FMIW FMIDDD FMID') + FROM TIMESTAMP_TBL; + to_char +------------------------ + + + 1970 970 70 0 1 4 4 + 1997 997 97 7 7 43 1 + 1997 997 97 7 7 43 1 + 1997 997 97 7 7 43 1 + 1997 997 97 7 7 43 1 + 1997 997 97 7 7 43 1 + 1997 997 97 7 7 43 1 + 1997 997 97 7 1 4 4 + 1997 997 97 7 1 4 4 + 1997 997 97 7 7 43 1 + 1997 997 97 7 7 43 1 + 1997 997 97 7 7 43 1 + 1997 997 97 7 7 43 1 + 1997 997 97 7 24 163 2 + 2001 1 1 1 38 265 6 + 2000 0 0 0 11 73 3 + 2000 0 0 0 11 73 3 + 2000 0 0 0 11 73 3 + 2000 0 0 0 11 73 3 + 2000 0 0 0 11 73 3 + 1997 997 97 7 7 43 1 + 1997 997 97 7 7 43 1 + 1997 997 97 7 7 43 1 + 1997 997 97 7 7 43 1 + 1997 997 97 7 7 43 1 + 1997 997 97 7 7 43 1 + 1997 997 97 7 7 43 1 + 1997 997 97 7 7 43 1 + 1997 997 97 7 7 43 1 + 1997 997 97 7 7 43 1 + 1997 997 97 7 7 43 1 + 1997 997 97 7 7 43 1 + 1997 997 97 7 24 163 2 + 1997 997 97 7 7 43 1 + 1997 997 97 7 7 44 2 + 1997 997 97 7 7 45 3 + 1997 997 97 7 7 46 4 + 1997 997 97 7 7 47 5 + 1997 997 97 7 7 48 6 + 1997 997 97 7 7 49 7 + 97 97 97 7 7 44 2 + 97 97 97 7 7 48 6 + 597 597 97 7 7 46 4 + 1097 97 97 7 7 44 2 + 1697 697 97 7 7 48 6 + 1797 797 97 7 7 46 4 + 1897 897 97 7 7 44 2 + 1997 997 97 7 7 49 7 + 2097 97 97 7 7 48 6 + 1996 996 96 6 9 59 3 + 1996 996 96 6 9 60 4 + 1996 996 96 6 9 61 5 + 1997 997 97 7 1 1 1 + 1997 997 97 7 1 2 2 + 1997 997 97 7 1 3 3 + 1997 997 97 7 9 61 5 + 1997 997 97 7 9 62 6 + 1998 998 98 8 1 2 2 + 1998 998 98 8 1 3 3 + 1999 999 99 9 52 362 5 + 1999 999 99 9 52 363 6 + 2000 0 0 0 52 364 7 + 2001 1 1 1 1 1 1 +(65 rows) + +SELECT to_char(d, 'FF1 FF2 FF3 FF4 FF5 FF6 ff1 ff2 ff3 ff4 ff5 ff6 MS US') + FROM (VALUES + ('2018-11-02 12:34:56'::timestamp), + ('2018-11-02 12:34:56.78'), + ('2018-11-02 12:34:56.78901'), + ('2018-11-02 12:34:56.78901234') + ) d(d); + to_char +-------------------------------------------------------------------- + 0 00 000 0000 00000 000000 0 00 000 0000 00000 000000 000 000000 + 7 78 780 7800 78000 780000 7 78 780 7800 78000 780000 780 780000 + 7 78 789 7890 78901 789010 7 78 789 7890 78901 789010 789 789010 + 7 78 789 7890 78901 789012 7 78 789 7890 78901 789012 789 789012 +(4 rows) + +-- Roman months, with upper and lower case. +SELECT i, + to_char(i * interval '1mon', 'rm'), + to_char(i * interval '1mon', 'RM') + FROM generate_series(-13, 13) i; + i | to_char | to_char +-----+---------+--------- + -13 | xii | XII + -12 | i | I + -11 | ii | II + -10 | iii | III + -9 | iv | IV + -8 | v | V + -7 | vi | VI + -6 | vii | VII + -5 | viii | VIII + -4 | ix | IX + -3 | x | X + -2 | xi | XI + -1 | xii | XII + 0 | | + 1 | i | I + 2 | ii | II + 3 | iii | III + 4 | iv | IV + 5 | v | V + 6 | vi | VI + 7 | vii | VII + 8 | viii | VIII + 9 | ix | IX + 10 | x | X + 11 | xi | XI + 12 | xii | XII + 13 | i | I +(27 rows) + +-- timestamp numeric fields constructor +SELECT make_timestamp(2014, 12, 28, 6, 30, 45.887); + make_timestamp +------------------------------ + Sun Dec 28 06:30:45.887 2014 +(1 row) + +SELECT make_timestamp(-44, 3, 15, 12, 30, 15); + make_timestamp +----------------------------- + Fri Mar 15 12:30:15 0044 BC +(1 row) + +-- should fail +select make_timestamp(0, 7, 15, 12, 30, 15); +ERROR: date field value out of range: 0-07-15 +-- generate_series for timestamp +select * from generate_series('2020-01-01 00:00'::timestamp, + '2020-01-02 03:00'::timestamp, + '1 hour'::interval); + generate_series +-------------------------- + Wed Jan 01 00:00:00 2020 + Wed Jan 01 01:00:00 2020 + Wed Jan 01 02:00:00 2020 + Wed Jan 01 03:00:00 2020 + Wed Jan 01 04:00:00 2020 + Wed Jan 01 05:00:00 2020 + Wed Jan 01 06:00:00 2020 + Wed Jan 01 07:00:00 2020 + Wed Jan 01 08:00:00 2020 + Wed Jan 01 09:00:00 2020 + Wed Jan 01 10:00:00 2020 + Wed Jan 01 11:00:00 2020 + Wed Jan 01 12:00:00 2020 + Wed Jan 01 13:00:00 2020 + Wed Jan 01 14:00:00 2020 + Wed Jan 01 15:00:00 2020 + Wed Jan 01 16:00:00 2020 + Wed Jan 01 17:00:00 2020 + Wed Jan 01 18:00:00 2020 + Wed Jan 01 19:00:00 2020 + Wed Jan 01 20:00:00 2020 + Wed Jan 01 21:00:00 2020 + Wed Jan 01 22:00:00 2020 + Wed Jan 01 23:00:00 2020 + Thu Jan 02 00:00:00 2020 + Thu Jan 02 01:00:00 2020 + Thu Jan 02 02:00:00 2020 + Thu Jan 02 03:00:00 2020 +(28 rows) + +-- the LIMIT should allow this to terminate in a reasonable amount of time +-- (but that unfortunately doesn't work yet for SELECT * FROM ...) +select generate_series('2022-01-01 00:00'::timestamp, + 'infinity'::timestamp, + '1 month'::interval) limit 10; + generate_series +-------------------------- + Sat Jan 01 00:00:00 2022 + Tue Feb 01 00:00:00 2022 + Tue Mar 01 00:00:00 2022 + Fri Apr 01 00:00:00 2022 + Sun May 01 00:00:00 2022 + Wed Jun 01 00:00:00 2022 + Fri Jul 01 00:00:00 2022 + Mon Aug 01 00:00:00 2022 + Thu Sep 01 00:00:00 2022 + Sat Oct 01 00:00:00 2022 +(10 rows) + +-- errors +select * from generate_series('2020-01-01 00:00'::timestamp, + '2020-01-02 03:00'::timestamp, + '0 hour'::interval); +ERROR: step size cannot equal zero diff --git a/src/test/regress/expected/timestamptz.out b/src/test/regress/expected/timestamptz.out new file mode 100644 index 0000000..0dd2fe2 --- /dev/null +++ b/src/test/regress/expected/timestamptz.out @@ -0,0 +1,3156 @@ +-- +-- TIMESTAMPTZ +-- +CREATE TABLE TIMESTAMPTZ_TBL (d1 timestamp(2) with time zone); +-- Test shorthand input values +-- We can't just "select" the results since they aren't constants; test for +-- equality instead. We can do that by running the test inside a transaction +-- block, within which the value of 'now' shouldn't change, and so these +-- related values shouldn't either. +BEGIN; +INSERT INTO TIMESTAMPTZ_TBL VALUES ('today'); +INSERT INTO TIMESTAMPTZ_TBL VALUES ('yesterday'); +INSERT INTO TIMESTAMPTZ_TBL VALUES ('tomorrow'); +INSERT INTO TIMESTAMPTZ_TBL VALUES ('tomorrow EST'); +INSERT INTO TIMESTAMPTZ_TBL VALUES ('tomorrow zulu'); +SELECT count(*) AS One FROM TIMESTAMPTZ_TBL WHERE d1 = timestamp with time zone 'today'; + one +----- + 1 +(1 row) + +SELECT count(*) AS One FROM TIMESTAMPTZ_TBL WHERE d1 = timestamp with time zone 'tomorrow'; + one +----- + 1 +(1 row) + +SELECT count(*) AS One FROM TIMESTAMPTZ_TBL WHERE d1 = timestamp with time zone 'yesterday'; + one +----- + 1 +(1 row) + +SELECT count(*) AS One FROM TIMESTAMPTZ_TBL WHERE d1 = timestamp with time zone 'tomorrow EST'; + one +----- + 1 +(1 row) + +SELECT count(*) AS One FROM TIMESTAMPTZ_TBL WHERE d1 = timestamp with time zone 'tomorrow zulu'; + one +----- + 1 +(1 row) + +COMMIT; +DELETE FROM TIMESTAMPTZ_TBL; +-- Verify that 'now' *does* change over a reasonable interval such as 100 msec, +-- and that it doesn't change over the same interval within a transaction block +INSERT INTO TIMESTAMPTZ_TBL VALUES ('now'); +SELECT pg_sleep(0.1); + pg_sleep +---------- + +(1 row) + +BEGIN; +INSERT INTO TIMESTAMPTZ_TBL VALUES ('now'); +SELECT pg_sleep(0.1); + pg_sleep +---------- + +(1 row) + +INSERT INTO TIMESTAMPTZ_TBL VALUES ('now'); +SELECT pg_sleep(0.1); + pg_sleep +---------- + +(1 row) + +SELECT count(*) AS two FROM TIMESTAMPTZ_TBL WHERE d1 = timestamp(2) with time zone 'now'; + two +----- + 2 +(1 row) + +SELECT count(d1) AS three, count(DISTINCT d1) AS two FROM TIMESTAMPTZ_TBL; + three | two +-------+----- + 3 | 2 +(1 row) + +COMMIT; +TRUNCATE TIMESTAMPTZ_TBL; +-- Special values +INSERT INTO TIMESTAMPTZ_TBL VALUES ('-infinity'); +INSERT INTO TIMESTAMPTZ_TBL VALUES ('infinity'); +INSERT INTO TIMESTAMPTZ_TBL VALUES ('epoch'); +SELECT timestamptz 'infinity' = timestamptz '+infinity' AS t; + t +--- + t +(1 row) + +-- Postgres v6.0 standard output format +INSERT INTO TIMESTAMPTZ_TBL VALUES ('Mon Feb 10 17:32:01 1997 PST'); +-- Variations on Postgres v6.1 standard output format +INSERT INTO TIMESTAMPTZ_TBL VALUES ('Mon Feb 10 17:32:01.000001 1997 PST'); +INSERT INTO TIMESTAMPTZ_TBL VALUES ('Mon Feb 10 17:32:01.999999 1997 PST'); +INSERT INTO TIMESTAMPTZ_TBL VALUES ('Mon Feb 10 17:32:01.4 1997 PST'); +INSERT INTO TIMESTAMPTZ_TBL VALUES ('Mon Feb 10 17:32:01.5 1997 PST'); +INSERT INTO TIMESTAMPTZ_TBL VALUES ('Mon Feb 10 17:32:01.6 1997 PST'); +-- ISO 8601 format +INSERT INTO TIMESTAMPTZ_TBL VALUES ('1997-01-02'); +INSERT INTO TIMESTAMPTZ_TBL VALUES ('1997-01-02 03:04:05'); +INSERT INTO TIMESTAMPTZ_TBL VALUES ('1997-02-10 17:32:01-08'); +INSERT INTO TIMESTAMPTZ_TBL VALUES ('1997-02-10 17:32:01-0800'); +INSERT INTO TIMESTAMPTZ_TBL VALUES ('1997-02-10 17:32:01 -08:00'); +INSERT INTO TIMESTAMPTZ_TBL VALUES ('19970210 173201 -0800'); +INSERT INTO TIMESTAMPTZ_TBL VALUES ('1997-06-10 17:32:01 -07:00'); +INSERT INTO TIMESTAMPTZ_TBL VALUES ('2001-09-22T18:19:20'); +-- POSIX format (note that the timezone abbrev is just decoration here) +INSERT INTO TIMESTAMPTZ_TBL VALUES ('2000-03-15 08:14:01 GMT+8'); +INSERT INTO TIMESTAMPTZ_TBL VALUES ('2000-03-15 13:14:02 GMT-1'); +INSERT INTO TIMESTAMPTZ_TBL VALUES ('2000-03-15 12:14:03 GMT-2'); +INSERT INTO TIMESTAMPTZ_TBL VALUES ('2000-03-15 03:14:04 PST+8'); +INSERT INTO TIMESTAMPTZ_TBL VALUES ('2000-03-15 02:14:05 MST+7:00'); +-- Variations for acceptable input formats +INSERT INTO TIMESTAMPTZ_TBL VALUES ('Feb 10 17:32:01 1997 -0800'); +INSERT INTO TIMESTAMPTZ_TBL VALUES ('Feb 10 17:32:01 1997'); +INSERT INTO TIMESTAMPTZ_TBL VALUES ('Feb 10 5:32PM 1997'); +INSERT INTO TIMESTAMPTZ_TBL VALUES ('1997/02/10 17:32:01-0800'); +INSERT INTO TIMESTAMPTZ_TBL VALUES ('1997-02-10 17:32:01 PST'); +INSERT INTO TIMESTAMPTZ_TBL VALUES ('Feb-10-1997 17:32:01 PST'); +INSERT INTO TIMESTAMPTZ_TBL VALUES ('02-10-1997 17:32:01 PST'); +INSERT INTO TIMESTAMPTZ_TBL VALUES ('19970210 173201 PST'); +set datestyle to ymd; +INSERT INTO TIMESTAMPTZ_TBL VALUES ('97FEB10 5:32:01PM UTC'); +INSERT INTO TIMESTAMPTZ_TBL VALUES ('97/02/10 17:32:01 UTC'); +reset datestyle; +INSERT INTO TIMESTAMPTZ_TBL VALUES ('1997.041 17:32:01 UTC'); +-- timestamps at different timezones +INSERT INTO TIMESTAMPTZ_TBL VALUES ('19970210 173201 America/New_York'); +SELECT '19970210 173201' AT TIME ZONE 'America/New_York'; + timezone +-------------------------- + Mon Feb 10 20:32:01 1997 +(1 row) + +INSERT INTO TIMESTAMPTZ_TBL VALUES ('19970710 173201 America/New_York'); +SELECT '19970710 173201' AT TIME ZONE 'America/New_York'; + timezone +-------------------------- + Thu Jul 10 20:32:01 1997 +(1 row) + +INSERT INTO TIMESTAMPTZ_TBL VALUES ('19970710 173201 America/Does_not_exist'); +ERROR: time zone "america/does_not_exist" not recognized +LINE 1: INSERT INTO TIMESTAMPTZ_TBL VALUES ('19970710 173201 America... + ^ +SELECT '19970710 173201' AT TIME ZONE 'America/Does_not_exist'; +ERROR: time zone "America/Does_not_exist" not recognized +-- Daylight saving time for timestamps beyond 32-bit time_t range. +SELECT '20500710 173201 Europe/Helsinki'::timestamptz; -- DST + timestamptz +------------------------------ + Sun Jul 10 07:32:01 2050 PDT +(1 row) + +SELECT '20500110 173201 Europe/Helsinki'::timestamptz; -- non-DST + timestamptz +------------------------------ + Mon Jan 10 07:32:01 2050 PST +(1 row) + +SELECT '205000-07-10 17:32:01 Europe/Helsinki'::timestamptz; -- DST + timestamptz +-------------------------------- + Thu Jul 10 07:32:01 205000 PDT +(1 row) + +SELECT '205000-01-10 17:32:01 Europe/Helsinki'::timestamptz; -- non-DST + timestamptz +-------------------------------- + Fri Jan 10 07:32:01 205000 PST +(1 row) + +-- Test non-error-throwing API +SELECT pg_input_is_valid('now', 'timestamptz'); + pg_input_is_valid +------------------- + t +(1 row) + +SELECT pg_input_is_valid('garbage', 'timestamptz'); + pg_input_is_valid +------------------- + f +(1 row) + +SELECT pg_input_is_valid('2001-01-01 00:00 Nehwon/Lankhmar', 'timestamptz'); + pg_input_is_valid +------------------- + f +(1 row) + +SELECT * FROM pg_input_error_info('garbage', 'timestamptz'); + message | detail | hint | sql_error_code +-------------------------------------------------------------------+--------+------+---------------- + invalid input syntax for type timestamp with time zone: "garbage" | | | 22007 +(1 row) + +SELECT * FROM pg_input_error_info('2001-01-01 00:00 Nehwon/Lankhmar', 'timestamptz'); + message | detail | hint | sql_error_code +--------------------------------------------+--------+------+---------------- + time zone "nehwon/lankhmar" not recognized | | | 22023 +(1 row) + +-- Check date conversion and date arithmetic +INSERT INTO TIMESTAMPTZ_TBL VALUES ('1997-06-10 18:32:01 PDT'); +INSERT INTO TIMESTAMPTZ_TBL VALUES ('Feb 10 17:32:01 1997'); +INSERT INTO TIMESTAMPTZ_TBL VALUES ('Feb 11 17:32:01 1997'); +INSERT INTO TIMESTAMPTZ_TBL VALUES ('Feb 12 17:32:01 1997'); +INSERT INTO TIMESTAMPTZ_TBL VALUES ('Feb 13 17:32:01 1997'); +INSERT INTO TIMESTAMPTZ_TBL VALUES ('Feb 14 17:32:01 1997'); +INSERT INTO TIMESTAMPTZ_TBL VALUES ('Feb 15 17:32:01 1997'); +INSERT INTO TIMESTAMPTZ_TBL VALUES ('Feb 16 17:32:01 1997'); +INSERT INTO TIMESTAMPTZ_TBL VALUES ('Feb 16 17:32:01 0097 BC'); +INSERT INTO TIMESTAMPTZ_TBL VALUES ('Feb 16 17:32:01 0097'); +INSERT INTO TIMESTAMPTZ_TBL VALUES ('Feb 16 17:32:01 0597'); +INSERT INTO TIMESTAMPTZ_TBL VALUES ('Feb 16 17:32:01 1097'); +INSERT INTO TIMESTAMPTZ_TBL VALUES ('Feb 16 17:32:01 1697'); +INSERT INTO TIMESTAMPTZ_TBL VALUES ('Feb 16 17:32:01 1797'); +INSERT INTO TIMESTAMPTZ_TBL VALUES ('Feb 16 17:32:01 1897'); +INSERT INTO TIMESTAMPTZ_TBL VALUES ('Feb 16 17:32:01 1997'); +INSERT INTO TIMESTAMPTZ_TBL VALUES ('Feb 16 17:32:01 2097'); +INSERT INTO TIMESTAMPTZ_TBL VALUES ('Feb 28 17:32:01 1996'); +INSERT INTO TIMESTAMPTZ_TBL VALUES ('Feb 29 17:32:01 1996'); +INSERT INTO TIMESTAMPTZ_TBL VALUES ('Mar 01 17:32:01 1996'); +INSERT INTO TIMESTAMPTZ_TBL VALUES ('Dec 30 17:32:01 1996'); +INSERT INTO TIMESTAMPTZ_TBL VALUES ('Dec 31 17:32:01 1996'); +INSERT INTO TIMESTAMPTZ_TBL VALUES ('Jan 01 17:32:01 1997'); +INSERT INTO TIMESTAMPTZ_TBL VALUES ('Feb 28 17:32:01 1997'); +INSERT INTO TIMESTAMPTZ_TBL VALUES ('Feb 29 17:32:01 1997'); +ERROR: date/time field value out of range: "Feb 29 17:32:01 1997" +LINE 1: INSERT INTO TIMESTAMPTZ_TBL VALUES ('Feb 29 17:32:01 1997'); + ^ +INSERT INTO TIMESTAMPTZ_TBL VALUES ('Mar 01 17:32:01 1997'); +INSERT INTO TIMESTAMPTZ_TBL VALUES ('Dec 30 17:32:01 1997'); +INSERT INTO TIMESTAMPTZ_TBL VALUES ('Dec 31 17:32:01 1997'); +INSERT INTO TIMESTAMPTZ_TBL VALUES ('Dec 31 17:32:01 1999'); +INSERT INTO TIMESTAMPTZ_TBL VALUES ('Jan 01 17:32:01 2000'); +INSERT INTO TIMESTAMPTZ_TBL VALUES ('Dec 31 17:32:01 2000'); +INSERT INTO TIMESTAMPTZ_TBL VALUES ('Jan 01 17:32:01 2001'); +-- Currently unsupported syntax and ranges +INSERT INTO TIMESTAMPTZ_TBL VALUES ('Feb 16 17:32:01 -0097'); +ERROR: time zone displacement out of range: "Feb 16 17:32:01 -0097" +LINE 1: INSERT INTO TIMESTAMPTZ_TBL VALUES ('Feb 16 17:32:01 -0097')... + ^ +INSERT INTO TIMESTAMPTZ_TBL VALUES ('Feb 16 17:32:01 5097 BC'); +ERROR: timestamp out of range: "Feb 16 17:32:01 5097 BC" +LINE 1: INSERT INTO TIMESTAMPTZ_TBL VALUES ('Feb 16 17:32:01 5097 BC... + ^ +-- Alternative field order that we've historically supported (sort of) +-- with regular and POSIXy timezone specs +SELECT 'Wed Jul 11 10:51:14 America/New_York 2001'::timestamptz; + timestamptz +------------------------------ + Wed Jul 11 07:51:14 2001 PDT +(1 row) + +SELECT 'Wed Jul 11 10:51:14 GMT-4 2001'::timestamptz; + timestamptz +------------------------------ + Tue Jul 10 23:51:14 2001 PDT +(1 row) + +SELECT 'Wed Jul 11 10:51:14 GMT+4 2001'::timestamptz; + timestamptz +------------------------------ + Wed Jul 11 07:51:14 2001 PDT +(1 row) + +SELECT 'Wed Jul 11 10:51:14 PST-03:00 2001'::timestamptz; + timestamptz +------------------------------ + Wed Jul 11 00:51:14 2001 PDT +(1 row) + +SELECT 'Wed Jul 11 10:51:14 PST+03:00 2001'::timestamptz; + timestamptz +------------------------------ + Wed Jul 11 06:51:14 2001 PDT +(1 row) + +SELECT d1 FROM TIMESTAMPTZ_TBL; + d1 +--------------------------------- + -infinity + infinity + Wed Dec 31 16:00:00 1969 PST + Mon Feb 10 17:32:01 1997 PST + Mon Feb 10 17:32:01 1997 PST + Mon Feb 10 17:32:02 1997 PST + Mon Feb 10 17:32:01.4 1997 PST + Mon Feb 10 17:32:01.5 1997 PST + Mon Feb 10 17:32:01.6 1997 PST + Thu Jan 02 00:00:00 1997 PST + Thu Jan 02 03:04:05 1997 PST + Mon Feb 10 17:32:01 1997 PST + Mon Feb 10 17:32:01 1997 PST + Mon Feb 10 17:32:01 1997 PST + Mon Feb 10 17:32:01 1997 PST + Tue Jun 10 17:32:01 1997 PDT + Sat Sep 22 18:19:20 2001 PDT + Wed Mar 15 08:14:01 2000 PST + Wed Mar 15 04:14:02 2000 PST + Wed Mar 15 02:14:03 2000 PST + Wed Mar 15 03:14:04 2000 PST + Wed Mar 15 01:14:05 2000 PST + Mon Feb 10 17:32:01 1997 PST + Mon Feb 10 17:32:01 1997 PST + Mon Feb 10 17:32:00 1997 PST + Mon Feb 10 17:32:01 1997 PST + Mon Feb 10 17:32:01 1997 PST + Mon Feb 10 17:32:01 1997 PST + Mon Feb 10 17:32:01 1997 PST + Mon Feb 10 17:32:01 1997 PST + Mon Feb 10 09:32:01 1997 PST + Mon Feb 10 09:32:01 1997 PST + Mon Feb 10 09:32:01 1997 PST + Mon Feb 10 14:32:01 1997 PST + Thu Jul 10 14:32:01 1997 PDT + Tue Jun 10 18:32:01 1997 PDT + Mon Feb 10 17:32:01 1997 PST + Tue Feb 11 17:32:01 1997 PST + Wed Feb 12 17:32:01 1997 PST + Thu Feb 13 17:32:01 1997 PST + Fri Feb 14 17:32:01 1997 PST + Sat Feb 15 17:32:01 1997 PST + Sun Feb 16 17:32:01 1997 PST + Tue Feb 16 17:32:01 0097 PST BC + Sat Feb 16 17:32:01 0097 PST + Thu Feb 16 17:32:01 0597 PST + Tue Feb 16 17:32:01 1097 PST + Sat Feb 16 17:32:01 1697 PST + Thu Feb 16 17:32:01 1797 PST + Tue Feb 16 17:32:01 1897 PST + Sun Feb 16 17:32:01 1997 PST + Sat Feb 16 17:32:01 2097 PST + Wed Feb 28 17:32:01 1996 PST + Thu Feb 29 17:32:01 1996 PST + Fri Mar 01 17:32:01 1996 PST + Mon Dec 30 17:32:01 1996 PST + Tue Dec 31 17:32:01 1996 PST + Wed Jan 01 17:32:01 1997 PST + Fri Feb 28 17:32:01 1997 PST + Sat Mar 01 17:32:01 1997 PST + Tue Dec 30 17:32:01 1997 PST + Wed Dec 31 17:32:01 1997 PST + Fri Dec 31 17:32:01 1999 PST + Sat Jan 01 17:32:01 2000 PST + Sun Dec 31 17:32:01 2000 PST + Mon Jan 01 17:32:01 2001 PST +(66 rows) + +-- Check behavior at the boundaries of the timestamp range +SELECT '4714-11-24 00:00:00+00 BC'::timestamptz; + timestamptz +--------------------------------- + Sun Nov 23 16:00:00 4714 PST BC +(1 row) + +SELECT '4714-11-23 16:00:00-08 BC'::timestamptz; + timestamptz +--------------------------------- + Sun Nov 23 16:00:00 4714 PST BC +(1 row) + +SELECT 'Sun Nov 23 16:00:00 4714 PST BC'::timestamptz; + timestamptz +--------------------------------- + Sun Nov 23 16:00:00 4714 PST BC +(1 row) + +SELECT '4714-11-23 23:59:59+00 BC'::timestamptz; -- out of range +ERROR: timestamp out of range: "4714-11-23 23:59:59+00 BC" +LINE 1: SELECT '4714-11-23 23:59:59+00 BC'::timestamptz; + ^ +SELECT '294276-12-31 23:59:59+00'::timestamptz; + timestamptz +-------------------------------- + Sun Dec 31 15:59:59 294276 PST +(1 row) + +SELECT '294276-12-31 15:59:59-08'::timestamptz; + timestamptz +-------------------------------- + Sun Dec 31 15:59:59 294276 PST +(1 row) + +SELECT '294277-01-01 00:00:00+00'::timestamptz; -- out of range +ERROR: timestamp out of range: "294277-01-01 00:00:00+00" +LINE 1: SELECT '294277-01-01 00:00:00+00'::timestamptz; + ^ +SELECT '294277-12-31 16:00:00-08'::timestamptz; -- out of range +ERROR: timestamp out of range: "294277-12-31 16:00:00-08" +LINE 1: SELECT '294277-12-31 16:00:00-08'::timestamptz; + ^ +-- Demonstrate functions and operators +SELECT d1 FROM TIMESTAMPTZ_TBL + WHERE d1 > timestamp with time zone '1997-01-02'; + d1 +-------------------------------- + infinity + Mon Feb 10 17:32:01 1997 PST + Mon Feb 10 17:32:01 1997 PST + Mon Feb 10 17:32:02 1997 PST + Mon Feb 10 17:32:01.4 1997 PST + Mon Feb 10 17:32:01.5 1997 PST + Mon Feb 10 17:32:01.6 1997 PST + Thu Jan 02 03:04:05 1997 PST + Mon Feb 10 17:32:01 1997 PST + Mon Feb 10 17:32:01 1997 PST + Mon Feb 10 17:32:01 1997 PST + Mon Feb 10 17:32:01 1997 PST + Tue Jun 10 17:32:01 1997 PDT + Sat Sep 22 18:19:20 2001 PDT + Wed Mar 15 08:14:01 2000 PST + Wed Mar 15 04:14:02 2000 PST + Wed Mar 15 02:14:03 2000 PST + Wed Mar 15 03:14:04 2000 PST + Wed Mar 15 01:14:05 2000 PST + Mon Feb 10 17:32:01 1997 PST + Mon Feb 10 17:32:01 1997 PST + Mon Feb 10 17:32:00 1997 PST + Mon Feb 10 17:32:01 1997 PST + Mon Feb 10 17:32:01 1997 PST + Mon Feb 10 17:32:01 1997 PST + Mon Feb 10 17:32:01 1997 PST + Mon Feb 10 17:32:01 1997 PST + Mon Feb 10 09:32:01 1997 PST + Mon Feb 10 09:32:01 1997 PST + Mon Feb 10 09:32:01 1997 PST + Mon Feb 10 14:32:01 1997 PST + Thu Jul 10 14:32:01 1997 PDT + Tue Jun 10 18:32:01 1997 PDT + Mon Feb 10 17:32:01 1997 PST + Tue Feb 11 17:32:01 1997 PST + Wed Feb 12 17:32:01 1997 PST + Thu Feb 13 17:32:01 1997 PST + Fri Feb 14 17:32:01 1997 PST + Sat Feb 15 17:32:01 1997 PST + Sun Feb 16 17:32:01 1997 PST + Sun Feb 16 17:32:01 1997 PST + Sat Feb 16 17:32:01 2097 PST + Fri Feb 28 17:32:01 1997 PST + Sat Mar 01 17:32:01 1997 PST + Tue Dec 30 17:32:01 1997 PST + Wed Dec 31 17:32:01 1997 PST + Fri Dec 31 17:32:01 1999 PST + Sat Jan 01 17:32:01 2000 PST + Sun Dec 31 17:32:01 2000 PST + Mon Jan 01 17:32:01 2001 PST +(50 rows) + +SELECT d1 FROM TIMESTAMPTZ_TBL + WHERE d1 < timestamp with time zone '1997-01-02'; + d1 +--------------------------------- + -infinity + Wed Dec 31 16:00:00 1969 PST + Tue Feb 16 17:32:01 0097 PST BC + Sat Feb 16 17:32:01 0097 PST + Thu Feb 16 17:32:01 0597 PST + Tue Feb 16 17:32:01 1097 PST + Sat Feb 16 17:32:01 1697 PST + Thu Feb 16 17:32:01 1797 PST + Tue Feb 16 17:32:01 1897 PST + Wed Feb 28 17:32:01 1996 PST + Thu Feb 29 17:32:01 1996 PST + Fri Mar 01 17:32:01 1996 PST + Mon Dec 30 17:32:01 1996 PST + Tue Dec 31 17:32:01 1996 PST + Wed Jan 01 17:32:01 1997 PST +(15 rows) + +SELECT d1 FROM TIMESTAMPTZ_TBL + WHERE d1 = timestamp with time zone '1997-01-02'; + d1 +------------------------------ + Thu Jan 02 00:00:00 1997 PST +(1 row) + +SELECT d1 FROM TIMESTAMPTZ_TBL + WHERE d1 != timestamp with time zone '1997-01-02'; + d1 +--------------------------------- + -infinity + infinity + Wed Dec 31 16:00:00 1969 PST + Mon Feb 10 17:32:01 1997 PST + Mon Feb 10 17:32:01 1997 PST + Mon Feb 10 17:32:02 1997 PST + Mon Feb 10 17:32:01.4 1997 PST + Mon Feb 10 17:32:01.5 1997 PST + Mon Feb 10 17:32:01.6 1997 PST + Thu Jan 02 03:04:05 1997 PST + Mon Feb 10 17:32:01 1997 PST + Mon Feb 10 17:32:01 1997 PST + Mon Feb 10 17:32:01 1997 PST + Mon Feb 10 17:32:01 1997 PST + Tue Jun 10 17:32:01 1997 PDT + Sat Sep 22 18:19:20 2001 PDT + Wed Mar 15 08:14:01 2000 PST + Wed Mar 15 04:14:02 2000 PST + Wed Mar 15 02:14:03 2000 PST + Wed Mar 15 03:14:04 2000 PST + Wed Mar 15 01:14:05 2000 PST + Mon Feb 10 17:32:01 1997 PST + Mon Feb 10 17:32:01 1997 PST + Mon Feb 10 17:32:00 1997 PST + Mon Feb 10 17:32:01 1997 PST + Mon Feb 10 17:32:01 1997 PST + Mon Feb 10 17:32:01 1997 PST + Mon Feb 10 17:32:01 1997 PST + Mon Feb 10 17:32:01 1997 PST + Mon Feb 10 09:32:01 1997 PST + Mon Feb 10 09:32:01 1997 PST + Mon Feb 10 09:32:01 1997 PST + Mon Feb 10 14:32:01 1997 PST + Thu Jul 10 14:32:01 1997 PDT + Tue Jun 10 18:32:01 1997 PDT + Mon Feb 10 17:32:01 1997 PST + Tue Feb 11 17:32:01 1997 PST + Wed Feb 12 17:32:01 1997 PST + Thu Feb 13 17:32:01 1997 PST + Fri Feb 14 17:32:01 1997 PST + Sat Feb 15 17:32:01 1997 PST + Sun Feb 16 17:32:01 1997 PST + Tue Feb 16 17:32:01 0097 PST BC + Sat Feb 16 17:32:01 0097 PST + Thu Feb 16 17:32:01 0597 PST + Tue Feb 16 17:32:01 1097 PST + Sat Feb 16 17:32:01 1697 PST + Thu Feb 16 17:32:01 1797 PST + Tue Feb 16 17:32:01 1897 PST + Sun Feb 16 17:32:01 1997 PST + Sat Feb 16 17:32:01 2097 PST + Wed Feb 28 17:32:01 1996 PST + Thu Feb 29 17:32:01 1996 PST + Fri Mar 01 17:32:01 1996 PST + Mon Dec 30 17:32:01 1996 PST + Tue Dec 31 17:32:01 1996 PST + Wed Jan 01 17:32:01 1997 PST + Fri Feb 28 17:32:01 1997 PST + Sat Mar 01 17:32:01 1997 PST + Tue Dec 30 17:32:01 1997 PST + Wed Dec 31 17:32:01 1997 PST + Fri Dec 31 17:32:01 1999 PST + Sat Jan 01 17:32:01 2000 PST + Sun Dec 31 17:32:01 2000 PST + Mon Jan 01 17:32:01 2001 PST +(65 rows) + +SELECT d1 FROM TIMESTAMPTZ_TBL + WHERE d1 <= timestamp with time zone '1997-01-02'; + d1 +--------------------------------- + -infinity + Wed Dec 31 16:00:00 1969 PST + Thu Jan 02 00:00:00 1997 PST + Tue Feb 16 17:32:01 0097 PST BC + Sat Feb 16 17:32:01 0097 PST + Thu Feb 16 17:32:01 0597 PST + Tue Feb 16 17:32:01 1097 PST + Sat Feb 16 17:32:01 1697 PST + Thu Feb 16 17:32:01 1797 PST + Tue Feb 16 17:32:01 1897 PST + Wed Feb 28 17:32:01 1996 PST + Thu Feb 29 17:32:01 1996 PST + Fri Mar 01 17:32:01 1996 PST + Mon Dec 30 17:32:01 1996 PST + Tue Dec 31 17:32:01 1996 PST + Wed Jan 01 17:32:01 1997 PST +(16 rows) + +SELECT d1 FROM TIMESTAMPTZ_TBL + WHERE d1 >= timestamp with time zone '1997-01-02'; + d1 +-------------------------------- + infinity + Mon Feb 10 17:32:01 1997 PST + Mon Feb 10 17:32:01 1997 PST + Mon Feb 10 17:32:02 1997 PST + Mon Feb 10 17:32:01.4 1997 PST + Mon Feb 10 17:32:01.5 1997 PST + Mon Feb 10 17:32:01.6 1997 PST + Thu Jan 02 00:00:00 1997 PST + Thu Jan 02 03:04:05 1997 PST + Mon Feb 10 17:32:01 1997 PST + Mon Feb 10 17:32:01 1997 PST + Mon Feb 10 17:32:01 1997 PST + Mon Feb 10 17:32:01 1997 PST + Tue Jun 10 17:32:01 1997 PDT + Sat Sep 22 18:19:20 2001 PDT + Wed Mar 15 08:14:01 2000 PST + Wed Mar 15 04:14:02 2000 PST + Wed Mar 15 02:14:03 2000 PST + Wed Mar 15 03:14:04 2000 PST + Wed Mar 15 01:14:05 2000 PST + Mon Feb 10 17:32:01 1997 PST + Mon Feb 10 17:32:01 1997 PST + Mon Feb 10 17:32:00 1997 PST + Mon Feb 10 17:32:01 1997 PST + Mon Feb 10 17:32:01 1997 PST + Mon Feb 10 17:32:01 1997 PST + Mon Feb 10 17:32:01 1997 PST + Mon Feb 10 17:32:01 1997 PST + Mon Feb 10 09:32:01 1997 PST + Mon Feb 10 09:32:01 1997 PST + Mon Feb 10 09:32:01 1997 PST + Mon Feb 10 14:32:01 1997 PST + Thu Jul 10 14:32:01 1997 PDT + Tue Jun 10 18:32:01 1997 PDT + Mon Feb 10 17:32:01 1997 PST + Tue Feb 11 17:32:01 1997 PST + Wed Feb 12 17:32:01 1997 PST + Thu Feb 13 17:32:01 1997 PST + Fri Feb 14 17:32:01 1997 PST + Sat Feb 15 17:32:01 1997 PST + Sun Feb 16 17:32:01 1997 PST + Sun Feb 16 17:32:01 1997 PST + Sat Feb 16 17:32:01 2097 PST + Fri Feb 28 17:32:01 1997 PST + Sat Mar 01 17:32:01 1997 PST + Tue Dec 30 17:32:01 1997 PST + Wed Dec 31 17:32:01 1997 PST + Fri Dec 31 17:32:01 1999 PST + Sat Jan 01 17:32:01 2000 PST + Sun Dec 31 17:32:01 2000 PST + Mon Jan 01 17:32:01 2001 PST +(51 rows) + +SELECT d1 - timestamp with time zone '1997-01-02' AS diff + FROM TIMESTAMPTZ_TBL WHERE d1 BETWEEN '1902-01-01' AND '2038-01-01'; + diff +---------------------------------------- + @ 9863 days 8 hours ago + @ 39 days 17 hours 32 mins 1 sec + @ 39 days 17 hours 32 mins 1 sec + @ 39 days 17 hours 32 mins 2 secs + @ 39 days 17 hours 32 mins 1.4 secs + @ 39 days 17 hours 32 mins 1.5 secs + @ 39 days 17 hours 32 mins 1.6 secs + @ 0 + @ 3 hours 4 mins 5 secs + @ 39 days 17 hours 32 mins 1 sec + @ 39 days 17 hours 32 mins 1 sec + @ 39 days 17 hours 32 mins 1 sec + @ 39 days 17 hours 32 mins 1 sec + @ 159 days 16 hours 32 mins 1 sec + @ 1724 days 17 hours 19 mins 20 secs + @ 1168 days 8 hours 14 mins 1 sec + @ 1168 days 4 hours 14 mins 2 secs + @ 1168 days 2 hours 14 mins 3 secs + @ 1168 days 3 hours 14 mins 4 secs + @ 1168 days 1 hour 14 mins 5 secs + @ 39 days 17 hours 32 mins 1 sec + @ 39 days 17 hours 32 mins 1 sec + @ 39 days 17 hours 32 mins + @ 39 days 17 hours 32 mins 1 sec + @ 39 days 17 hours 32 mins 1 sec + @ 39 days 17 hours 32 mins 1 sec + @ 39 days 17 hours 32 mins 1 sec + @ 39 days 17 hours 32 mins 1 sec + @ 39 days 9 hours 32 mins 1 sec + @ 39 days 9 hours 32 mins 1 sec + @ 39 days 9 hours 32 mins 1 sec + @ 39 days 14 hours 32 mins 1 sec + @ 189 days 13 hours 32 mins 1 sec + @ 159 days 17 hours 32 mins 1 sec + @ 39 days 17 hours 32 mins 1 sec + @ 40 days 17 hours 32 mins 1 sec + @ 41 days 17 hours 32 mins 1 sec + @ 42 days 17 hours 32 mins 1 sec + @ 43 days 17 hours 32 mins 1 sec + @ 44 days 17 hours 32 mins 1 sec + @ 45 days 17 hours 32 mins 1 sec + @ 45 days 17 hours 32 mins 1 sec + @ 308 days 6 hours 27 mins 59 secs ago + @ 307 days 6 hours 27 mins 59 secs ago + @ 306 days 6 hours 27 mins 59 secs ago + @ 2 days 6 hours 27 mins 59 secs ago + @ 1 day 6 hours 27 mins 59 secs ago + @ 6 hours 27 mins 59 secs ago + @ 57 days 17 hours 32 mins 1 sec + @ 58 days 17 hours 32 mins 1 sec + @ 362 days 17 hours 32 mins 1 sec + @ 363 days 17 hours 32 mins 1 sec + @ 1093 days 17 hours 32 mins 1 sec + @ 1094 days 17 hours 32 mins 1 sec + @ 1459 days 17 hours 32 mins 1 sec + @ 1460 days 17 hours 32 mins 1 sec +(56 rows) + +SELECT date_trunc( 'week', timestamp with time zone '2004-02-29 15:44:17.71393' ) AS week_trunc; + week_trunc +------------------------------ + Mon Feb 23 00:00:00 2004 PST +(1 row) + +SELECT date_trunc('day', timestamp with time zone '2001-02-16 20:38:40+00', 'Australia/Sydney') as sydney_trunc; -- zone name + sydney_trunc +------------------------------ + Fri Feb 16 05:00:00 2001 PST +(1 row) + +SELECT date_trunc('day', timestamp with time zone '2001-02-16 20:38:40+00', 'GMT') as gmt_trunc; -- fixed-offset abbreviation + gmt_trunc +------------------------------ + Thu Feb 15 16:00:00 2001 PST +(1 row) + +SELECT date_trunc('day', timestamp with time zone '2001-02-16 20:38:40+00', 'VET') as vet_trunc; -- variable-offset abbreviation + vet_trunc +------------------------------ + Thu Feb 15 20:00:00 2001 PST +(1 row) + +-- verify date_bin behaves the same as date_trunc for relevant intervals +SELECT + str, + interval, + date_trunc(str, ts, 'Australia/Sydney') = date_bin(interval::interval, ts, timestamp with time zone '2001-01-01+11') AS equal +FROM ( + VALUES + ('day', '1 d'), + ('hour', '1 h'), + ('minute', '1 m'), + ('second', '1 s'), + ('millisecond', '1 ms'), + ('microsecond', '1 us') +) intervals (str, interval), +(VALUES (timestamptz '2020-02-29 15:44:17.71393+00')) ts (ts); + str | interval | equal +-------------+----------+------- + day | 1 d | t + hour | 1 h | t + minute | 1 m | t + second | 1 s | t + millisecond | 1 ms | t + microsecond | 1 us | t +(6 rows) + +-- bin timestamps into arbitrary intervals +SELECT + interval, + ts, + origin, + date_bin(interval::interval, ts, origin) +FROM ( + VALUES + ('15 days'), + ('2 hours'), + ('1 hour 30 minutes'), + ('15 minutes'), + ('10 seconds'), + ('100 milliseconds'), + ('250 microseconds') +) intervals (interval), +(VALUES (timestamptz '2020-02-11 15:44:17.71393')) ts (ts), +(VALUES (timestamptz '2001-01-01')) origin (origin); + interval | ts | origin | date_bin +-------------------+------------------------------------+------------------------------+------------------------------------ + 15 days | Tue Feb 11 15:44:17.71393 2020 PST | Mon Jan 01 00:00:00 2001 PST | Thu Feb 06 00:00:00 2020 PST + 2 hours | Tue Feb 11 15:44:17.71393 2020 PST | Mon Jan 01 00:00:00 2001 PST | Tue Feb 11 14:00:00 2020 PST + 1 hour 30 minutes | Tue Feb 11 15:44:17.71393 2020 PST | Mon Jan 01 00:00:00 2001 PST | Tue Feb 11 15:00:00 2020 PST + 15 minutes | Tue Feb 11 15:44:17.71393 2020 PST | Mon Jan 01 00:00:00 2001 PST | Tue Feb 11 15:30:00 2020 PST + 10 seconds | Tue Feb 11 15:44:17.71393 2020 PST | Mon Jan 01 00:00:00 2001 PST | Tue Feb 11 15:44:10 2020 PST + 100 milliseconds | Tue Feb 11 15:44:17.71393 2020 PST | Mon Jan 01 00:00:00 2001 PST | Tue Feb 11 15:44:17.7 2020 PST + 250 microseconds | Tue Feb 11 15:44:17.71393 2020 PST | Mon Jan 01 00:00:00 2001 PST | Tue Feb 11 15:44:17.71375 2020 PST +(7 rows) + +-- shift bins using the origin parameter: +SELECT date_bin('5 min'::interval, timestamptz '2020-02-01 01:01:01+00', timestamptz '2020-02-01 00:02:30+00'); + date_bin +------------------------------ + Fri Jan 31 16:57:30 2020 PST +(1 row) + +-- disallow intervals with months or years +SELECT date_bin('5 months'::interval, timestamp with time zone '2020-02-01 01:01:01+00', timestamp with time zone '2001-01-01+00'); +ERROR: timestamps cannot be binned into intervals containing months or years +SELECT date_bin('5 years'::interval, timestamp with time zone '2020-02-01 01:01:01+00', timestamp with time zone '2001-01-01+00'); +ERROR: timestamps cannot be binned into intervals containing months or years +-- disallow zero intervals +SELECT date_bin('0 days'::interval, timestamp with time zone '1970-01-01 01:00:00+00' , timestamp with time zone '1970-01-01 00:00:00+00'); +ERROR: stride must be greater than zero +-- disallow negative intervals +SELECT date_bin('-2 days'::interval, timestamp with time zone '1970-01-01 01:00:00+00' , timestamp with time zone '1970-01-01 00:00:00+00'); +ERROR: stride must be greater than zero +-- Test casting within a BETWEEN qualifier +SELECT d1 - timestamp with time zone '1997-01-02' AS diff + FROM TIMESTAMPTZ_TBL + WHERE d1 BETWEEN timestamp with time zone '1902-01-01' AND timestamp with time zone '2038-01-01'; + diff +---------------------------------------- + @ 9863 days 8 hours ago + @ 39 days 17 hours 32 mins 1 sec + @ 39 days 17 hours 32 mins 1 sec + @ 39 days 17 hours 32 mins 2 secs + @ 39 days 17 hours 32 mins 1.4 secs + @ 39 days 17 hours 32 mins 1.5 secs + @ 39 days 17 hours 32 mins 1.6 secs + @ 0 + @ 3 hours 4 mins 5 secs + @ 39 days 17 hours 32 mins 1 sec + @ 39 days 17 hours 32 mins 1 sec + @ 39 days 17 hours 32 mins 1 sec + @ 39 days 17 hours 32 mins 1 sec + @ 159 days 16 hours 32 mins 1 sec + @ 1724 days 17 hours 19 mins 20 secs + @ 1168 days 8 hours 14 mins 1 sec + @ 1168 days 4 hours 14 mins 2 secs + @ 1168 days 2 hours 14 mins 3 secs + @ 1168 days 3 hours 14 mins 4 secs + @ 1168 days 1 hour 14 mins 5 secs + @ 39 days 17 hours 32 mins 1 sec + @ 39 days 17 hours 32 mins 1 sec + @ 39 days 17 hours 32 mins + @ 39 days 17 hours 32 mins 1 sec + @ 39 days 17 hours 32 mins 1 sec + @ 39 days 17 hours 32 mins 1 sec + @ 39 days 17 hours 32 mins 1 sec + @ 39 days 17 hours 32 mins 1 sec + @ 39 days 9 hours 32 mins 1 sec + @ 39 days 9 hours 32 mins 1 sec + @ 39 days 9 hours 32 mins 1 sec + @ 39 days 14 hours 32 mins 1 sec + @ 189 days 13 hours 32 mins 1 sec + @ 159 days 17 hours 32 mins 1 sec + @ 39 days 17 hours 32 mins 1 sec + @ 40 days 17 hours 32 mins 1 sec + @ 41 days 17 hours 32 mins 1 sec + @ 42 days 17 hours 32 mins 1 sec + @ 43 days 17 hours 32 mins 1 sec + @ 44 days 17 hours 32 mins 1 sec + @ 45 days 17 hours 32 mins 1 sec + @ 45 days 17 hours 32 mins 1 sec + @ 308 days 6 hours 27 mins 59 secs ago + @ 307 days 6 hours 27 mins 59 secs ago + @ 306 days 6 hours 27 mins 59 secs ago + @ 2 days 6 hours 27 mins 59 secs ago + @ 1 day 6 hours 27 mins 59 secs ago + @ 6 hours 27 mins 59 secs ago + @ 57 days 17 hours 32 mins 1 sec + @ 58 days 17 hours 32 mins 1 sec + @ 362 days 17 hours 32 mins 1 sec + @ 363 days 17 hours 32 mins 1 sec + @ 1093 days 17 hours 32 mins 1 sec + @ 1094 days 17 hours 32 mins 1 sec + @ 1459 days 17 hours 32 mins 1 sec + @ 1460 days 17 hours 32 mins 1 sec +(56 rows) + +-- DATE_PART (timestamptz_part) +SELECT d1 as timestamptz, + date_part( 'year', d1) AS year, date_part( 'month', d1) AS month, + date_part( 'day', d1) AS day, date_part( 'hour', d1) AS hour, + date_part( 'minute', d1) AS minute, date_part( 'second', d1) AS second + FROM TIMESTAMPTZ_TBL; + timestamptz | year | month | day | hour | minute | second +---------------------------------+-----------+-------+-----+------+--------+-------- + -infinity | -Infinity | | | | | + infinity | Infinity | | | | | + Wed Dec 31 16:00:00 1969 PST | 1969 | 12 | 31 | 16 | 0 | 0 + Mon Feb 10 17:32:01 1997 PST | 1997 | 2 | 10 | 17 | 32 | 1 + Mon Feb 10 17:32:01 1997 PST | 1997 | 2 | 10 | 17 | 32 | 1 + Mon Feb 10 17:32:02 1997 PST | 1997 | 2 | 10 | 17 | 32 | 2 + Mon Feb 10 17:32:01.4 1997 PST | 1997 | 2 | 10 | 17 | 32 | 1.4 + Mon Feb 10 17:32:01.5 1997 PST | 1997 | 2 | 10 | 17 | 32 | 1.5 + Mon Feb 10 17:32:01.6 1997 PST | 1997 | 2 | 10 | 17 | 32 | 1.6 + Thu Jan 02 00:00:00 1997 PST | 1997 | 1 | 2 | 0 | 0 | 0 + Thu Jan 02 03:04:05 1997 PST | 1997 | 1 | 2 | 3 | 4 | 5 + Mon Feb 10 17:32:01 1997 PST | 1997 | 2 | 10 | 17 | 32 | 1 + Mon Feb 10 17:32:01 1997 PST | 1997 | 2 | 10 | 17 | 32 | 1 + Mon Feb 10 17:32:01 1997 PST | 1997 | 2 | 10 | 17 | 32 | 1 + Mon Feb 10 17:32:01 1997 PST | 1997 | 2 | 10 | 17 | 32 | 1 + Tue Jun 10 17:32:01 1997 PDT | 1997 | 6 | 10 | 17 | 32 | 1 + Sat Sep 22 18:19:20 2001 PDT | 2001 | 9 | 22 | 18 | 19 | 20 + Wed Mar 15 08:14:01 2000 PST | 2000 | 3 | 15 | 8 | 14 | 1 + Wed Mar 15 04:14:02 2000 PST | 2000 | 3 | 15 | 4 | 14 | 2 + Wed Mar 15 02:14:03 2000 PST | 2000 | 3 | 15 | 2 | 14 | 3 + Wed Mar 15 03:14:04 2000 PST | 2000 | 3 | 15 | 3 | 14 | 4 + Wed Mar 15 01:14:05 2000 PST | 2000 | 3 | 15 | 1 | 14 | 5 + Mon Feb 10 17:32:01 1997 PST | 1997 | 2 | 10 | 17 | 32 | 1 + Mon Feb 10 17:32:01 1997 PST | 1997 | 2 | 10 | 17 | 32 | 1 + Mon Feb 10 17:32:00 1997 PST | 1997 | 2 | 10 | 17 | 32 | 0 + Mon Feb 10 17:32:01 1997 PST | 1997 | 2 | 10 | 17 | 32 | 1 + Mon Feb 10 17:32:01 1997 PST | 1997 | 2 | 10 | 17 | 32 | 1 + Mon Feb 10 17:32:01 1997 PST | 1997 | 2 | 10 | 17 | 32 | 1 + Mon Feb 10 17:32:01 1997 PST | 1997 | 2 | 10 | 17 | 32 | 1 + Mon Feb 10 17:32:01 1997 PST | 1997 | 2 | 10 | 17 | 32 | 1 + Mon Feb 10 09:32:01 1997 PST | 1997 | 2 | 10 | 9 | 32 | 1 + Mon Feb 10 09:32:01 1997 PST | 1997 | 2 | 10 | 9 | 32 | 1 + Mon Feb 10 09:32:01 1997 PST | 1997 | 2 | 10 | 9 | 32 | 1 + Mon Feb 10 14:32:01 1997 PST | 1997 | 2 | 10 | 14 | 32 | 1 + Thu Jul 10 14:32:01 1997 PDT | 1997 | 7 | 10 | 14 | 32 | 1 + Tue Jun 10 18:32:01 1997 PDT | 1997 | 6 | 10 | 18 | 32 | 1 + Mon Feb 10 17:32:01 1997 PST | 1997 | 2 | 10 | 17 | 32 | 1 + Tue Feb 11 17:32:01 1997 PST | 1997 | 2 | 11 | 17 | 32 | 1 + Wed Feb 12 17:32:01 1997 PST | 1997 | 2 | 12 | 17 | 32 | 1 + Thu Feb 13 17:32:01 1997 PST | 1997 | 2 | 13 | 17 | 32 | 1 + Fri Feb 14 17:32:01 1997 PST | 1997 | 2 | 14 | 17 | 32 | 1 + Sat Feb 15 17:32:01 1997 PST | 1997 | 2 | 15 | 17 | 32 | 1 + Sun Feb 16 17:32:01 1997 PST | 1997 | 2 | 16 | 17 | 32 | 1 + Tue Feb 16 17:32:01 0097 PST BC | -97 | 2 | 16 | 17 | 32 | 1 + Sat Feb 16 17:32:01 0097 PST | 97 | 2 | 16 | 17 | 32 | 1 + Thu Feb 16 17:32:01 0597 PST | 597 | 2 | 16 | 17 | 32 | 1 + Tue Feb 16 17:32:01 1097 PST | 1097 | 2 | 16 | 17 | 32 | 1 + Sat Feb 16 17:32:01 1697 PST | 1697 | 2 | 16 | 17 | 32 | 1 + Thu Feb 16 17:32:01 1797 PST | 1797 | 2 | 16 | 17 | 32 | 1 + Tue Feb 16 17:32:01 1897 PST | 1897 | 2 | 16 | 17 | 32 | 1 + Sun Feb 16 17:32:01 1997 PST | 1997 | 2 | 16 | 17 | 32 | 1 + Sat Feb 16 17:32:01 2097 PST | 2097 | 2 | 16 | 17 | 32 | 1 + Wed Feb 28 17:32:01 1996 PST | 1996 | 2 | 28 | 17 | 32 | 1 + Thu Feb 29 17:32:01 1996 PST | 1996 | 2 | 29 | 17 | 32 | 1 + Fri Mar 01 17:32:01 1996 PST | 1996 | 3 | 1 | 17 | 32 | 1 + Mon Dec 30 17:32:01 1996 PST | 1996 | 12 | 30 | 17 | 32 | 1 + Tue Dec 31 17:32:01 1996 PST | 1996 | 12 | 31 | 17 | 32 | 1 + Wed Jan 01 17:32:01 1997 PST | 1997 | 1 | 1 | 17 | 32 | 1 + Fri Feb 28 17:32:01 1997 PST | 1997 | 2 | 28 | 17 | 32 | 1 + Sat Mar 01 17:32:01 1997 PST | 1997 | 3 | 1 | 17 | 32 | 1 + Tue Dec 30 17:32:01 1997 PST | 1997 | 12 | 30 | 17 | 32 | 1 + Wed Dec 31 17:32:01 1997 PST | 1997 | 12 | 31 | 17 | 32 | 1 + Fri Dec 31 17:32:01 1999 PST | 1999 | 12 | 31 | 17 | 32 | 1 + Sat Jan 01 17:32:01 2000 PST | 2000 | 1 | 1 | 17 | 32 | 1 + Sun Dec 31 17:32:01 2000 PST | 2000 | 12 | 31 | 17 | 32 | 1 + Mon Jan 01 17:32:01 2001 PST | 2001 | 1 | 1 | 17 | 32 | 1 +(66 rows) + +SELECT d1 as timestamptz, + date_part( 'quarter', d1) AS quarter, date_part( 'msec', d1) AS msec, + date_part( 'usec', d1) AS usec + FROM TIMESTAMPTZ_TBL; + timestamptz | quarter | msec | usec +---------------------------------+---------+-------+---------- + -infinity | | | + infinity | | | + Wed Dec 31 16:00:00 1969 PST | 4 | 0 | 0 + Mon Feb 10 17:32:01 1997 PST | 1 | 1000 | 1000000 + Mon Feb 10 17:32:01 1997 PST | 1 | 1000 | 1000000 + Mon Feb 10 17:32:02 1997 PST | 1 | 2000 | 2000000 + Mon Feb 10 17:32:01.4 1997 PST | 1 | 1400 | 1400000 + Mon Feb 10 17:32:01.5 1997 PST | 1 | 1500 | 1500000 + Mon Feb 10 17:32:01.6 1997 PST | 1 | 1600 | 1600000 + Thu Jan 02 00:00:00 1997 PST | 1 | 0 | 0 + Thu Jan 02 03:04:05 1997 PST | 1 | 5000 | 5000000 + Mon Feb 10 17:32:01 1997 PST | 1 | 1000 | 1000000 + Mon Feb 10 17:32:01 1997 PST | 1 | 1000 | 1000000 + Mon Feb 10 17:32:01 1997 PST | 1 | 1000 | 1000000 + Mon Feb 10 17:32:01 1997 PST | 1 | 1000 | 1000000 + Tue Jun 10 17:32:01 1997 PDT | 2 | 1000 | 1000000 + Sat Sep 22 18:19:20 2001 PDT | 3 | 20000 | 20000000 + Wed Mar 15 08:14:01 2000 PST | 1 | 1000 | 1000000 + Wed Mar 15 04:14:02 2000 PST | 1 | 2000 | 2000000 + Wed Mar 15 02:14:03 2000 PST | 1 | 3000 | 3000000 + Wed Mar 15 03:14:04 2000 PST | 1 | 4000 | 4000000 + Wed Mar 15 01:14:05 2000 PST | 1 | 5000 | 5000000 + Mon Feb 10 17:32:01 1997 PST | 1 | 1000 | 1000000 + Mon Feb 10 17:32:01 1997 PST | 1 | 1000 | 1000000 + Mon Feb 10 17:32:00 1997 PST | 1 | 0 | 0 + Mon Feb 10 17:32:01 1997 PST | 1 | 1000 | 1000000 + Mon Feb 10 17:32:01 1997 PST | 1 | 1000 | 1000000 + Mon Feb 10 17:32:01 1997 PST | 1 | 1000 | 1000000 + Mon Feb 10 17:32:01 1997 PST | 1 | 1000 | 1000000 + Mon Feb 10 17:32:01 1997 PST | 1 | 1000 | 1000000 + Mon Feb 10 09:32:01 1997 PST | 1 | 1000 | 1000000 + Mon Feb 10 09:32:01 1997 PST | 1 | 1000 | 1000000 + Mon Feb 10 09:32:01 1997 PST | 1 | 1000 | 1000000 + Mon Feb 10 14:32:01 1997 PST | 1 | 1000 | 1000000 + Thu Jul 10 14:32:01 1997 PDT | 3 | 1000 | 1000000 + Tue Jun 10 18:32:01 1997 PDT | 2 | 1000 | 1000000 + Mon Feb 10 17:32:01 1997 PST | 1 | 1000 | 1000000 + Tue Feb 11 17:32:01 1997 PST | 1 | 1000 | 1000000 + Wed Feb 12 17:32:01 1997 PST | 1 | 1000 | 1000000 + Thu Feb 13 17:32:01 1997 PST | 1 | 1000 | 1000000 + Fri Feb 14 17:32:01 1997 PST | 1 | 1000 | 1000000 + Sat Feb 15 17:32:01 1997 PST | 1 | 1000 | 1000000 + Sun Feb 16 17:32:01 1997 PST | 1 | 1000 | 1000000 + Tue Feb 16 17:32:01 0097 PST BC | 1 | 1000 | 1000000 + Sat Feb 16 17:32:01 0097 PST | 1 | 1000 | 1000000 + Thu Feb 16 17:32:01 0597 PST | 1 | 1000 | 1000000 + Tue Feb 16 17:32:01 1097 PST | 1 | 1000 | 1000000 + Sat Feb 16 17:32:01 1697 PST | 1 | 1000 | 1000000 + Thu Feb 16 17:32:01 1797 PST | 1 | 1000 | 1000000 + Tue Feb 16 17:32:01 1897 PST | 1 | 1000 | 1000000 + Sun Feb 16 17:32:01 1997 PST | 1 | 1000 | 1000000 + Sat Feb 16 17:32:01 2097 PST | 1 | 1000 | 1000000 + Wed Feb 28 17:32:01 1996 PST | 1 | 1000 | 1000000 + Thu Feb 29 17:32:01 1996 PST | 1 | 1000 | 1000000 + Fri Mar 01 17:32:01 1996 PST | 1 | 1000 | 1000000 + Mon Dec 30 17:32:01 1996 PST | 4 | 1000 | 1000000 + Tue Dec 31 17:32:01 1996 PST | 4 | 1000 | 1000000 + Wed Jan 01 17:32:01 1997 PST | 1 | 1000 | 1000000 + Fri Feb 28 17:32:01 1997 PST | 1 | 1000 | 1000000 + Sat Mar 01 17:32:01 1997 PST | 1 | 1000 | 1000000 + Tue Dec 30 17:32:01 1997 PST | 4 | 1000 | 1000000 + Wed Dec 31 17:32:01 1997 PST | 4 | 1000 | 1000000 + Fri Dec 31 17:32:01 1999 PST | 4 | 1000 | 1000000 + Sat Jan 01 17:32:01 2000 PST | 1 | 1000 | 1000000 + Sun Dec 31 17:32:01 2000 PST | 4 | 1000 | 1000000 + Mon Jan 01 17:32:01 2001 PST | 1 | 1000 | 1000000 +(66 rows) + +SELECT d1 as timestamptz, + date_part( 'isoyear', d1) AS isoyear, date_part( 'week', d1) AS week, + date_part( 'isodow', d1) AS isodow, date_part( 'dow', d1) AS dow, + date_part( 'doy', d1) AS doy + FROM TIMESTAMPTZ_TBL; + timestamptz | isoyear | week | isodow | dow | doy +---------------------------------+-----------+------+--------+-----+----- + -infinity | -Infinity | | | | + infinity | Infinity | | | | + Wed Dec 31 16:00:00 1969 PST | 1970 | 1 | 3 | 3 | 365 + Mon Feb 10 17:32:01 1997 PST | 1997 | 7 | 1 | 1 | 41 + Mon Feb 10 17:32:01 1997 PST | 1997 | 7 | 1 | 1 | 41 + Mon Feb 10 17:32:02 1997 PST | 1997 | 7 | 1 | 1 | 41 + Mon Feb 10 17:32:01.4 1997 PST | 1997 | 7 | 1 | 1 | 41 + Mon Feb 10 17:32:01.5 1997 PST | 1997 | 7 | 1 | 1 | 41 + Mon Feb 10 17:32:01.6 1997 PST | 1997 | 7 | 1 | 1 | 41 + Thu Jan 02 00:00:00 1997 PST | 1997 | 1 | 4 | 4 | 2 + Thu Jan 02 03:04:05 1997 PST | 1997 | 1 | 4 | 4 | 2 + Mon Feb 10 17:32:01 1997 PST | 1997 | 7 | 1 | 1 | 41 + Mon Feb 10 17:32:01 1997 PST | 1997 | 7 | 1 | 1 | 41 + Mon Feb 10 17:32:01 1997 PST | 1997 | 7 | 1 | 1 | 41 + Mon Feb 10 17:32:01 1997 PST | 1997 | 7 | 1 | 1 | 41 + Tue Jun 10 17:32:01 1997 PDT | 1997 | 24 | 2 | 2 | 161 + Sat Sep 22 18:19:20 2001 PDT | 2001 | 38 | 6 | 6 | 265 + Wed Mar 15 08:14:01 2000 PST | 2000 | 11 | 3 | 3 | 75 + Wed Mar 15 04:14:02 2000 PST | 2000 | 11 | 3 | 3 | 75 + Wed Mar 15 02:14:03 2000 PST | 2000 | 11 | 3 | 3 | 75 + Wed Mar 15 03:14:04 2000 PST | 2000 | 11 | 3 | 3 | 75 + Wed Mar 15 01:14:05 2000 PST | 2000 | 11 | 3 | 3 | 75 + Mon Feb 10 17:32:01 1997 PST | 1997 | 7 | 1 | 1 | 41 + Mon Feb 10 17:32:01 1997 PST | 1997 | 7 | 1 | 1 | 41 + Mon Feb 10 17:32:00 1997 PST | 1997 | 7 | 1 | 1 | 41 + Mon Feb 10 17:32:01 1997 PST | 1997 | 7 | 1 | 1 | 41 + Mon Feb 10 17:32:01 1997 PST | 1997 | 7 | 1 | 1 | 41 + Mon Feb 10 17:32:01 1997 PST | 1997 | 7 | 1 | 1 | 41 + Mon Feb 10 17:32:01 1997 PST | 1997 | 7 | 1 | 1 | 41 + Mon Feb 10 17:32:01 1997 PST | 1997 | 7 | 1 | 1 | 41 + Mon Feb 10 09:32:01 1997 PST | 1997 | 7 | 1 | 1 | 41 + Mon Feb 10 09:32:01 1997 PST | 1997 | 7 | 1 | 1 | 41 + Mon Feb 10 09:32:01 1997 PST | 1997 | 7 | 1 | 1 | 41 + Mon Feb 10 14:32:01 1997 PST | 1997 | 7 | 1 | 1 | 41 + Thu Jul 10 14:32:01 1997 PDT | 1997 | 28 | 4 | 4 | 191 + Tue Jun 10 18:32:01 1997 PDT | 1997 | 24 | 2 | 2 | 161 + Mon Feb 10 17:32:01 1997 PST | 1997 | 7 | 1 | 1 | 41 + Tue Feb 11 17:32:01 1997 PST | 1997 | 7 | 2 | 2 | 42 + Wed Feb 12 17:32:01 1997 PST | 1997 | 7 | 3 | 3 | 43 + Thu Feb 13 17:32:01 1997 PST | 1997 | 7 | 4 | 4 | 44 + Fri Feb 14 17:32:01 1997 PST | 1997 | 7 | 5 | 5 | 45 + Sat Feb 15 17:32:01 1997 PST | 1997 | 7 | 6 | 6 | 46 + Sun Feb 16 17:32:01 1997 PST | 1997 | 7 | 7 | 0 | 47 + Tue Feb 16 17:32:01 0097 PST BC | -97 | 7 | 2 | 2 | 47 + Sat Feb 16 17:32:01 0097 PST | 97 | 7 | 6 | 6 | 47 + Thu Feb 16 17:32:01 0597 PST | 597 | 7 | 4 | 4 | 47 + Tue Feb 16 17:32:01 1097 PST | 1097 | 7 | 2 | 2 | 47 + Sat Feb 16 17:32:01 1697 PST | 1697 | 7 | 6 | 6 | 47 + Thu Feb 16 17:32:01 1797 PST | 1797 | 7 | 4 | 4 | 47 + Tue Feb 16 17:32:01 1897 PST | 1897 | 7 | 2 | 2 | 47 + Sun Feb 16 17:32:01 1997 PST | 1997 | 7 | 7 | 0 | 47 + Sat Feb 16 17:32:01 2097 PST | 2097 | 7 | 6 | 6 | 47 + Wed Feb 28 17:32:01 1996 PST | 1996 | 9 | 3 | 3 | 59 + Thu Feb 29 17:32:01 1996 PST | 1996 | 9 | 4 | 4 | 60 + Fri Mar 01 17:32:01 1996 PST | 1996 | 9 | 5 | 5 | 61 + Mon Dec 30 17:32:01 1996 PST | 1997 | 1 | 1 | 1 | 365 + Tue Dec 31 17:32:01 1996 PST | 1997 | 1 | 2 | 2 | 366 + Wed Jan 01 17:32:01 1997 PST | 1997 | 1 | 3 | 3 | 1 + Fri Feb 28 17:32:01 1997 PST | 1997 | 9 | 5 | 5 | 59 + Sat Mar 01 17:32:01 1997 PST | 1997 | 9 | 6 | 6 | 60 + Tue Dec 30 17:32:01 1997 PST | 1998 | 1 | 2 | 2 | 364 + Wed Dec 31 17:32:01 1997 PST | 1998 | 1 | 3 | 3 | 365 + Fri Dec 31 17:32:01 1999 PST | 1999 | 52 | 5 | 5 | 365 + Sat Jan 01 17:32:01 2000 PST | 1999 | 52 | 6 | 6 | 1 + Sun Dec 31 17:32:01 2000 PST | 2000 | 52 | 7 | 0 | 366 + Mon Jan 01 17:32:01 2001 PST | 2001 | 1 | 1 | 1 | 1 +(66 rows) + +SELECT d1 as timestamptz, + date_part( 'decade', d1) AS decade, + date_part( 'century', d1) AS century, + date_part( 'millennium', d1) AS millennium, + round(date_part( 'julian', d1)) AS julian, + date_part( 'epoch', d1) AS epoch + FROM TIMESTAMPTZ_TBL; + timestamptz | decade | century | millennium | julian | epoch +---------------------------------+-----------+-----------+------------+-----------+-------------- + -infinity | -Infinity | -Infinity | -Infinity | -Infinity | -Infinity + infinity | Infinity | Infinity | Infinity | Infinity | Infinity + Wed Dec 31 16:00:00 1969 PST | 196 | 20 | 2 | 2440588 | 0 + Mon Feb 10 17:32:01 1997 PST | 199 | 20 | 2 | 2450491 | 855624721 + Mon Feb 10 17:32:01 1997 PST | 199 | 20 | 2 | 2450491 | 855624721 + Mon Feb 10 17:32:02 1997 PST | 199 | 20 | 2 | 2450491 | 855624722 + Mon Feb 10 17:32:01.4 1997 PST | 199 | 20 | 2 | 2450491 | 855624721.4 + Mon Feb 10 17:32:01.5 1997 PST | 199 | 20 | 2 | 2450491 | 855624721.5 + Mon Feb 10 17:32:01.6 1997 PST | 199 | 20 | 2 | 2450491 | 855624721.6 + Thu Jan 02 00:00:00 1997 PST | 199 | 20 | 2 | 2450451 | 852192000 + Thu Jan 02 03:04:05 1997 PST | 199 | 20 | 2 | 2450451 | 852203045 + Mon Feb 10 17:32:01 1997 PST | 199 | 20 | 2 | 2450491 | 855624721 + Mon Feb 10 17:32:01 1997 PST | 199 | 20 | 2 | 2450491 | 855624721 + Mon Feb 10 17:32:01 1997 PST | 199 | 20 | 2 | 2450491 | 855624721 + Mon Feb 10 17:32:01 1997 PST | 199 | 20 | 2 | 2450491 | 855624721 + Tue Jun 10 17:32:01 1997 PDT | 199 | 20 | 2 | 2450611 | 865989121 + Sat Sep 22 18:19:20 2001 PDT | 200 | 21 | 3 | 2452176 | 1001207960 + Wed Mar 15 08:14:01 2000 PST | 200 | 20 | 2 | 2451619 | 953136841 + Wed Mar 15 04:14:02 2000 PST | 200 | 20 | 2 | 2451619 | 953122442 + Wed Mar 15 02:14:03 2000 PST | 200 | 20 | 2 | 2451619 | 953115243 + Wed Mar 15 03:14:04 2000 PST | 200 | 20 | 2 | 2451619 | 953118844 + Wed Mar 15 01:14:05 2000 PST | 200 | 20 | 2 | 2451619 | 953111645 + Mon Feb 10 17:32:01 1997 PST | 199 | 20 | 2 | 2450491 | 855624721 + Mon Feb 10 17:32:01 1997 PST | 199 | 20 | 2 | 2450491 | 855624721 + Mon Feb 10 17:32:00 1997 PST | 199 | 20 | 2 | 2450491 | 855624720 + Mon Feb 10 17:32:01 1997 PST | 199 | 20 | 2 | 2450491 | 855624721 + Mon Feb 10 17:32:01 1997 PST | 199 | 20 | 2 | 2450491 | 855624721 + Mon Feb 10 17:32:01 1997 PST | 199 | 20 | 2 | 2450491 | 855624721 + Mon Feb 10 17:32:01 1997 PST | 199 | 20 | 2 | 2450491 | 855624721 + Mon Feb 10 17:32:01 1997 PST | 199 | 20 | 2 | 2450491 | 855624721 + Mon Feb 10 09:32:01 1997 PST | 199 | 20 | 2 | 2450490 | 855595921 + Mon Feb 10 09:32:01 1997 PST | 199 | 20 | 2 | 2450490 | 855595921 + Mon Feb 10 09:32:01 1997 PST | 199 | 20 | 2 | 2450490 | 855595921 + Mon Feb 10 14:32:01 1997 PST | 199 | 20 | 2 | 2450491 | 855613921 + Thu Jul 10 14:32:01 1997 PDT | 199 | 20 | 2 | 2450641 | 868570321 + Tue Jun 10 18:32:01 1997 PDT | 199 | 20 | 2 | 2450611 | 865992721 + Mon Feb 10 17:32:01 1997 PST | 199 | 20 | 2 | 2450491 | 855624721 + Tue Feb 11 17:32:01 1997 PST | 199 | 20 | 2 | 2450492 | 855711121 + Wed Feb 12 17:32:01 1997 PST | 199 | 20 | 2 | 2450493 | 855797521 + Thu Feb 13 17:32:01 1997 PST | 199 | 20 | 2 | 2450494 | 855883921 + Fri Feb 14 17:32:01 1997 PST | 199 | 20 | 2 | 2450495 | 855970321 + Sat Feb 15 17:32:01 1997 PST | 199 | 20 | 2 | 2450496 | 856056721 + Sun Feb 16 17:32:01 1997 PST | 199 | 20 | 2 | 2450497 | 856143121 + Tue Feb 16 17:32:01 0097 PST BC | -10 | -1 | -1 | 1686043 | -65192682479 + Sat Feb 16 17:32:01 0097 PST | 9 | 1 | 1 | 1756537 | -59102000879 + Thu Feb 16 17:32:01 0597 PST | 59 | 6 | 1 | 1939158 | -43323546479 + Tue Feb 16 17:32:01 1097 PST | 109 | 11 | 2 | 2121779 | -27545092079 + Sat Feb 16 17:32:01 1697 PST | 169 | 17 | 2 | 2340925 | -8610877679 + Thu Feb 16 17:32:01 1797 PST | 179 | 18 | 2 | 2377449 | -5455204079 + Tue Feb 16 17:32:01 1897 PST | 189 | 19 | 2 | 2413973 | -2299530479 + Sun Feb 16 17:32:01 1997 PST | 199 | 20 | 2 | 2450497 | 856143121 + Sat Feb 16 17:32:01 2097 PST | 209 | 21 | 3 | 2487022 | 4011903121 + Wed Feb 28 17:32:01 1996 PST | 199 | 20 | 2 | 2450143 | 825557521 + Thu Feb 29 17:32:01 1996 PST | 199 | 20 | 2 | 2450144 | 825643921 + Fri Mar 01 17:32:01 1996 PST | 199 | 20 | 2 | 2450145 | 825730321 + Mon Dec 30 17:32:01 1996 PST | 199 | 20 | 2 | 2450449 | 851995921 + Tue Dec 31 17:32:01 1996 PST | 199 | 20 | 2 | 2450450 | 852082321 + Wed Jan 01 17:32:01 1997 PST | 199 | 20 | 2 | 2450451 | 852168721 + Fri Feb 28 17:32:01 1997 PST | 199 | 20 | 2 | 2450509 | 857179921 + Sat Mar 01 17:32:01 1997 PST | 199 | 20 | 2 | 2450510 | 857266321 + Tue Dec 30 17:32:01 1997 PST | 199 | 20 | 2 | 2450814 | 883531921 + Wed Dec 31 17:32:01 1997 PST | 199 | 20 | 2 | 2450815 | 883618321 + Fri Dec 31 17:32:01 1999 PST | 199 | 20 | 2 | 2451545 | 946690321 + Sat Jan 01 17:32:01 2000 PST | 200 | 20 | 2 | 2451546 | 946776721 + Sun Dec 31 17:32:01 2000 PST | 200 | 20 | 2 | 2451911 | 978312721 + Mon Jan 01 17:32:01 2001 PST | 200 | 21 | 3 | 2451912 | 978399121 +(66 rows) + +SELECT d1 as timestamptz, + date_part( 'timezone', d1) AS timezone, + date_part( 'timezone_hour', d1) AS timezone_hour, + date_part( 'timezone_minute', d1) AS timezone_minute + FROM TIMESTAMPTZ_TBL; + timestamptz | timezone | timezone_hour | timezone_minute +---------------------------------+----------+---------------+----------------- + -infinity | | | + infinity | | | + Wed Dec 31 16:00:00 1969 PST | -28800 | -8 | 0 + Mon Feb 10 17:32:01 1997 PST | -28800 | -8 | 0 + Mon Feb 10 17:32:01 1997 PST | -28800 | -8 | 0 + Mon Feb 10 17:32:02 1997 PST | -28800 | -8 | 0 + Mon Feb 10 17:32:01.4 1997 PST | -28800 | -8 | 0 + Mon Feb 10 17:32:01.5 1997 PST | -28800 | -8 | 0 + Mon Feb 10 17:32:01.6 1997 PST | -28800 | -8 | 0 + Thu Jan 02 00:00:00 1997 PST | -28800 | -8 | 0 + Thu Jan 02 03:04:05 1997 PST | -28800 | -8 | 0 + Mon Feb 10 17:32:01 1997 PST | -28800 | -8 | 0 + Mon Feb 10 17:32:01 1997 PST | -28800 | -8 | 0 + Mon Feb 10 17:32:01 1997 PST | -28800 | -8 | 0 + Mon Feb 10 17:32:01 1997 PST | -28800 | -8 | 0 + Tue Jun 10 17:32:01 1997 PDT | -25200 | -7 | 0 + Sat Sep 22 18:19:20 2001 PDT | -25200 | -7 | 0 + Wed Mar 15 08:14:01 2000 PST | -28800 | -8 | 0 + Wed Mar 15 04:14:02 2000 PST | -28800 | -8 | 0 + Wed Mar 15 02:14:03 2000 PST | -28800 | -8 | 0 + Wed Mar 15 03:14:04 2000 PST | -28800 | -8 | 0 + Wed Mar 15 01:14:05 2000 PST | -28800 | -8 | 0 + Mon Feb 10 17:32:01 1997 PST | -28800 | -8 | 0 + Mon Feb 10 17:32:01 1997 PST | -28800 | -8 | 0 + Mon Feb 10 17:32:00 1997 PST | -28800 | -8 | 0 + Mon Feb 10 17:32:01 1997 PST | -28800 | -8 | 0 + Mon Feb 10 17:32:01 1997 PST | -28800 | -8 | 0 + Mon Feb 10 17:32:01 1997 PST | -28800 | -8 | 0 + Mon Feb 10 17:32:01 1997 PST | -28800 | -8 | 0 + Mon Feb 10 17:32:01 1997 PST | -28800 | -8 | 0 + Mon Feb 10 09:32:01 1997 PST | -28800 | -8 | 0 + Mon Feb 10 09:32:01 1997 PST | -28800 | -8 | 0 + Mon Feb 10 09:32:01 1997 PST | -28800 | -8 | 0 + Mon Feb 10 14:32:01 1997 PST | -28800 | -8 | 0 + Thu Jul 10 14:32:01 1997 PDT | -25200 | -7 | 0 + Tue Jun 10 18:32:01 1997 PDT | -25200 | -7 | 0 + Mon Feb 10 17:32:01 1997 PST | -28800 | -8 | 0 + Tue Feb 11 17:32:01 1997 PST | -28800 | -8 | 0 + Wed Feb 12 17:32:01 1997 PST | -28800 | -8 | 0 + Thu Feb 13 17:32:01 1997 PST | -28800 | -8 | 0 + Fri Feb 14 17:32:01 1997 PST | -28800 | -8 | 0 + Sat Feb 15 17:32:01 1997 PST | -28800 | -8 | 0 + Sun Feb 16 17:32:01 1997 PST | -28800 | -8 | 0 + Tue Feb 16 17:32:01 0097 PST BC | -28800 | -8 | 0 + Sat Feb 16 17:32:01 0097 PST | -28800 | -8 | 0 + Thu Feb 16 17:32:01 0597 PST | -28800 | -8 | 0 + Tue Feb 16 17:32:01 1097 PST | -28800 | -8 | 0 + Sat Feb 16 17:32:01 1697 PST | -28800 | -8 | 0 + Thu Feb 16 17:32:01 1797 PST | -28800 | -8 | 0 + Tue Feb 16 17:32:01 1897 PST | -28800 | -8 | 0 + Sun Feb 16 17:32:01 1997 PST | -28800 | -8 | 0 + Sat Feb 16 17:32:01 2097 PST | -28800 | -8 | 0 + Wed Feb 28 17:32:01 1996 PST | -28800 | -8 | 0 + Thu Feb 29 17:32:01 1996 PST | -28800 | -8 | 0 + Fri Mar 01 17:32:01 1996 PST | -28800 | -8 | 0 + Mon Dec 30 17:32:01 1996 PST | -28800 | -8 | 0 + Tue Dec 31 17:32:01 1996 PST | -28800 | -8 | 0 + Wed Jan 01 17:32:01 1997 PST | -28800 | -8 | 0 + Fri Feb 28 17:32:01 1997 PST | -28800 | -8 | 0 + Sat Mar 01 17:32:01 1997 PST | -28800 | -8 | 0 + Tue Dec 30 17:32:01 1997 PST | -28800 | -8 | 0 + Wed Dec 31 17:32:01 1997 PST | -28800 | -8 | 0 + Fri Dec 31 17:32:01 1999 PST | -28800 | -8 | 0 + Sat Jan 01 17:32:01 2000 PST | -28800 | -8 | 0 + Sun Dec 31 17:32:01 2000 PST | -28800 | -8 | 0 + Mon Jan 01 17:32:01 2001 PST | -28800 | -8 | 0 +(66 rows) + +-- extract implementation is mostly the same as date_part, so only +-- test a few cases for additional coverage. +SELECT d1 as "timestamp", + extract(microseconds from d1) AS microseconds, + extract(milliseconds from d1) AS milliseconds, + extract(seconds from d1) AS seconds, + round(extract(julian from d1)) AS julian, + extract(epoch from d1) AS epoch + FROM TIMESTAMPTZ_TBL; + timestamp | microseconds | milliseconds | seconds | julian | epoch +---------------------------------+--------------+--------------+-----------+-----------+--------------------- + -infinity | | | | -Infinity | -Infinity + infinity | | | | Infinity | Infinity + Wed Dec 31 16:00:00 1969 PST | 0 | 0.000 | 0.000000 | 2440588 | 0.000000 + Mon Feb 10 17:32:01 1997 PST | 1000000 | 1000.000 | 1.000000 | 2450491 | 855624721.000000 + Mon Feb 10 17:32:01 1997 PST | 1000000 | 1000.000 | 1.000000 | 2450491 | 855624721.000000 + Mon Feb 10 17:32:02 1997 PST | 2000000 | 2000.000 | 2.000000 | 2450491 | 855624722.000000 + Mon Feb 10 17:32:01.4 1997 PST | 1400000 | 1400.000 | 1.400000 | 2450491 | 855624721.400000 + Mon Feb 10 17:32:01.5 1997 PST | 1500000 | 1500.000 | 1.500000 | 2450491 | 855624721.500000 + Mon Feb 10 17:32:01.6 1997 PST | 1600000 | 1600.000 | 1.600000 | 2450491 | 855624721.600000 + Thu Jan 02 00:00:00 1997 PST | 0 | 0.000 | 0.000000 | 2450451 | 852192000.000000 + Thu Jan 02 03:04:05 1997 PST | 5000000 | 5000.000 | 5.000000 | 2450451 | 852203045.000000 + Mon Feb 10 17:32:01 1997 PST | 1000000 | 1000.000 | 1.000000 | 2450491 | 855624721.000000 + Mon Feb 10 17:32:01 1997 PST | 1000000 | 1000.000 | 1.000000 | 2450491 | 855624721.000000 + Mon Feb 10 17:32:01 1997 PST | 1000000 | 1000.000 | 1.000000 | 2450491 | 855624721.000000 + Mon Feb 10 17:32:01 1997 PST | 1000000 | 1000.000 | 1.000000 | 2450491 | 855624721.000000 + Tue Jun 10 17:32:01 1997 PDT | 1000000 | 1000.000 | 1.000000 | 2450611 | 865989121.000000 + Sat Sep 22 18:19:20 2001 PDT | 20000000 | 20000.000 | 20.000000 | 2452176 | 1001207960.000000 + Wed Mar 15 08:14:01 2000 PST | 1000000 | 1000.000 | 1.000000 | 2451619 | 953136841.000000 + Wed Mar 15 04:14:02 2000 PST | 2000000 | 2000.000 | 2.000000 | 2451619 | 953122442.000000 + Wed Mar 15 02:14:03 2000 PST | 3000000 | 3000.000 | 3.000000 | 2451619 | 953115243.000000 + Wed Mar 15 03:14:04 2000 PST | 4000000 | 4000.000 | 4.000000 | 2451619 | 953118844.000000 + Wed Mar 15 01:14:05 2000 PST | 5000000 | 5000.000 | 5.000000 | 2451619 | 953111645.000000 + Mon Feb 10 17:32:01 1997 PST | 1000000 | 1000.000 | 1.000000 | 2450491 | 855624721.000000 + Mon Feb 10 17:32:01 1997 PST | 1000000 | 1000.000 | 1.000000 | 2450491 | 855624721.000000 + Mon Feb 10 17:32:00 1997 PST | 0 | 0.000 | 0.000000 | 2450491 | 855624720.000000 + Mon Feb 10 17:32:01 1997 PST | 1000000 | 1000.000 | 1.000000 | 2450491 | 855624721.000000 + Mon Feb 10 17:32:01 1997 PST | 1000000 | 1000.000 | 1.000000 | 2450491 | 855624721.000000 + Mon Feb 10 17:32:01 1997 PST | 1000000 | 1000.000 | 1.000000 | 2450491 | 855624721.000000 + Mon Feb 10 17:32:01 1997 PST | 1000000 | 1000.000 | 1.000000 | 2450491 | 855624721.000000 + Mon Feb 10 17:32:01 1997 PST | 1000000 | 1000.000 | 1.000000 | 2450491 | 855624721.000000 + Mon Feb 10 09:32:01 1997 PST | 1000000 | 1000.000 | 1.000000 | 2450490 | 855595921.000000 + Mon Feb 10 09:32:01 1997 PST | 1000000 | 1000.000 | 1.000000 | 2450490 | 855595921.000000 + Mon Feb 10 09:32:01 1997 PST | 1000000 | 1000.000 | 1.000000 | 2450490 | 855595921.000000 + Mon Feb 10 14:32:01 1997 PST | 1000000 | 1000.000 | 1.000000 | 2450491 | 855613921.000000 + Thu Jul 10 14:32:01 1997 PDT | 1000000 | 1000.000 | 1.000000 | 2450641 | 868570321.000000 + Tue Jun 10 18:32:01 1997 PDT | 1000000 | 1000.000 | 1.000000 | 2450611 | 865992721.000000 + Mon Feb 10 17:32:01 1997 PST | 1000000 | 1000.000 | 1.000000 | 2450491 | 855624721.000000 + Tue Feb 11 17:32:01 1997 PST | 1000000 | 1000.000 | 1.000000 | 2450492 | 855711121.000000 + Wed Feb 12 17:32:01 1997 PST | 1000000 | 1000.000 | 1.000000 | 2450493 | 855797521.000000 + Thu Feb 13 17:32:01 1997 PST | 1000000 | 1000.000 | 1.000000 | 2450494 | 855883921.000000 + Fri Feb 14 17:32:01 1997 PST | 1000000 | 1000.000 | 1.000000 | 2450495 | 855970321.000000 + Sat Feb 15 17:32:01 1997 PST | 1000000 | 1000.000 | 1.000000 | 2450496 | 856056721.000000 + Sun Feb 16 17:32:01 1997 PST | 1000000 | 1000.000 | 1.000000 | 2450497 | 856143121.000000 + Tue Feb 16 17:32:01 0097 PST BC | 1000000 | 1000.000 | 1.000000 | 1686043 | -65192682479.000000 + Sat Feb 16 17:32:01 0097 PST | 1000000 | 1000.000 | 1.000000 | 1756537 | -59102000879.000000 + Thu Feb 16 17:32:01 0597 PST | 1000000 | 1000.000 | 1.000000 | 1939158 | -43323546479.000000 + Tue Feb 16 17:32:01 1097 PST | 1000000 | 1000.000 | 1.000000 | 2121779 | -27545092079.000000 + Sat Feb 16 17:32:01 1697 PST | 1000000 | 1000.000 | 1.000000 | 2340925 | -8610877679.000000 + Thu Feb 16 17:32:01 1797 PST | 1000000 | 1000.000 | 1.000000 | 2377449 | -5455204079.000000 + Tue Feb 16 17:32:01 1897 PST | 1000000 | 1000.000 | 1.000000 | 2413973 | -2299530479.000000 + Sun Feb 16 17:32:01 1997 PST | 1000000 | 1000.000 | 1.000000 | 2450497 | 856143121.000000 + Sat Feb 16 17:32:01 2097 PST | 1000000 | 1000.000 | 1.000000 | 2487022 | 4011903121.000000 + Wed Feb 28 17:32:01 1996 PST | 1000000 | 1000.000 | 1.000000 | 2450143 | 825557521.000000 + Thu Feb 29 17:32:01 1996 PST | 1000000 | 1000.000 | 1.000000 | 2450144 | 825643921.000000 + Fri Mar 01 17:32:01 1996 PST | 1000000 | 1000.000 | 1.000000 | 2450145 | 825730321.000000 + Mon Dec 30 17:32:01 1996 PST | 1000000 | 1000.000 | 1.000000 | 2450449 | 851995921.000000 + Tue Dec 31 17:32:01 1996 PST | 1000000 | 1000.000 | 1.000000 | 2450450 | 852082321.000000 + Wed Jan 01 17:32:01 1997 PST | 1000000 | 1000.000 | 1.000000 | 2450451 | 852168721.000000 + Fri Feb 28 17:32:01 1997 PST | 1000000 | 1000.000 | 1.000000 | 2450509 | 857179921.000000 + Sat Mar 01 17:32:01 1997 PST | 1000000 | 1000.000 | 1.000000 | 2450510 | 857266321.000000 + Tue Dec 30 17:32:01 1997 PST | 1000000 | 1000.000 | 1.000000 | 2450814 | 883531921.000000 + Wed Dec 31 17:32:01 1997 PST | 1000000 | 1000.000 | 1.000000 | 2450815 | 883618321.000000 + Fri Dec 31 17:32:01 1999 PST | 1000000 | 1000.000 | 1.000000 | 2451545 | 946690321.000000 + Sat Jan 01 17:32:01 2000 PST | 1000000 | 1000.000 | 1.000000 | 2451546 | 946776721.000000 + Sun Dec 31 17:32:01 2000 PST | 1000000 | 1000.000 | 1.000000 | 2451911 | 978312721.000000 + Mon Jan 01 17:32:01 2001 PST | 1000000 | 1000.000 | 1.000000 | 2451912 | 978399121.000000 +(66 rows) + +-- value near upper bound uses special case in code +SELECT date_part('epoch', '294270-01-01 00:00:00+00'::timestamptz); + date_part +--------------- + 9224097091200 +(1 row) + +SELECT extract(epoch from '294270-01-01 00:00:00+00'::timestamptz); + extract +---------------------- + 9224097091200.000000 +(1 row) + +-- another internal overflow test case +SELECT extract(epoch from '5000-01-01 00:00:00+00'::timestamptz); + extract +-------------------- + 95617584000.000000 +(1 row) + +-- test edge-case overflow in timestamp subtraction +SELECT timestamptz '294276-12-31 23:59:59 UTC' - timestamptz '1999-12-23 19:59:04.224193 UTC' AS ok; + ok +----------------------------------------- + @ 106751991 days 4 hours 54.775807 secs +(1 row) + +SELECT timestamptz '294276-12-31 23:59:59 UTC' - timestamptz '1999-12-23 19:59:04.224192 UTC' AS overflows; +ERROR: interval out of range +-- TO_CHAR() +SELECT to_char(d1, 'DAY Day day DY Dy dy MONTH Month month RM MON Mon mon') + FROM TIMESTAMPTZ_TBL; + to_char +------------------------------------------------------------------------------------------ + + + WEDNESDAY Wednesday wednesday WED Wed wed DECEMBER December december XII DEC Dec dec + MONDAY Monday monday MON Mon mon FEBRUARY February february II FEB Feb feb + MONDAY Monday monday MON Mon mon FEBRUARY February february II FEB Feb feb + MONDAY Monday monday MON Mon mon FEBRUARY February february II FEB Feb feb + MONDAY Monday monday MON Mon mon FEBRUARY February february II FEB Feb feb + MONDAY Monday monday MON Mon mon FEBRUARY February february II FEB Feb feb + MONDAY Monday monday MON Mon mon FEBRUARY February february II FEB Feb feb + THURSDAY Thursday thursday THU Thu thu JANUARY January january I JAN Jan jan + THURSDAY Thursday thursday THU Thu thu JANUARY January january I JAN Jan jan + MONDAY Monday monday MON Mon mon FEBRUARY February february II FEB Feb feb + MONDAY Monday monday MON Mon mon FEBRUARY February february II FEB Feb feb + MONDAY Monday monday MON Mon mon FEBRUARY February february II FEB Feb feb + MONDAY Monday monday MON Mon mon FEBRUARY February february II FEB Feb feb + TUESDAY Tuesday tuesday TUE Tue tue JUNE June june VI JUN Jun jun + SATURDAY Saturday saturday SAT Sat sat SEPTEMBER September september IX SEP Sep sep + WEDNESDAY Wednesday wednesday WED Wed wed MARCH March march III MAR Mar mar + WEDNESDAY Wednesday wednesday WED Wed wed MARCH March march III MAR Mar mar + WEDNESDAY Wednesday wednesday WED Wed wed MARCH March march III MAR Mar mar + WEDNESDAY Wednesday wednesday WED Wed wed MARCH March march III MAR Mar mar + WEDNESDAY Wednesday wednesday WED Wed wed MARCH March march III MAR Mar mar + MONDAY Monday monday MON Mon mon FEBRUARY February february II FEB Feb feb + MONDAY Monday monday MON Mon mon FEBRUARY February february II FEB Feb feb + MONDAY Monday monday MON Mon mon FEBRUARY February february II FEB Feb feb + MONDAY Monday monday MON Mon mon FEBRUARY February february II FEB Feb feb + MONDAY Monday monday MON Mon mon FEBRUARY February february II FEB Feb feb + MONDAY Monday monday MON Mon mon FEBRUARY February february II FEB Feb feb + MONDAY Monday monday MON Mon mon FEBRUARY February february II FEB Feb feb + MONDAY Monday monday MON Mon mon FEBRUARY February february II FEB Feb feb + MONDAY Monday monday MON Mon mon FEBRUARY February february II FEB Feb feb + MONDAY Monday monday MON Mon mon FEBRUARY February february II FEB Feb feb + MONDAY Monday monday MON Mon mon FEBRUARY February february II FEB Feb feb + MONDAY Monday monday MON Mon mon FEBRUARY February february II FEB Feb feb + THURSDAY Thursday thursday THU Thu thu JULY July july VII JUL Jul jul + TUESDAY Tuesday tuesday TUE Tue tue JUNE June june VI JUN Jun jun + MONDAY Monday monday MON Mon mon FEBRUARY February february II FEB Feb feb + TUESDAY Tuesday tuesday TUE Tue tue FEBRUARY February february II FEB Feb feb + WEDNESDAY Wednesday wednesday WED Wed wed FEBRUARY February february II FEB Feb feb + THURSDAY Thursday thursday THU Thu thu FEBRUARY February february II FEB Feb feb + FRIDAY Friday friday FRI Fri fri FEBRUARY February february II FEB Feb feb + SATURDAY Saturday saturday SAT Sat sat FEBRUARY February february II FEB Feb feb + SUNDAY Sunday sunday SUN Sun sun FEBRUARY February february II FEB Feb feb + TUESDAY Tuesday tuesday TUE Tue tue FEBRUARY February february II FEB Feb feb + SATURDAY Saturday saturday SAT Sat sat FEBRUARY February february II FEB Feb feb + THURSDAY Thursday thursday THU Thu thu FEBRUARY February february II FEB Feb feb + TUESDAY Tuesday tuesday TUE Tue tue FEBRUARY February february II FEB Feb feb + SATURDAY Saturday saturday SAT Sat sat FEBRUARY February february II FEB Feb feb + THURSDAY Thursday thursday THU Thu thu FEBRUARY February february II FEB Feb feb + TUESDAY Tuesday tuesday TUE Tue tue FEBRUARY February february II FEB Feb feb + SUNDAY Sunday sunday SUN Sun sun FEBRUARY February february II FEB Feb feb + SATURDAY Saturday saturday SAT Sat sat FEBRUARY February february II FEB Feb feb + WEDNESDAY Wednesday wednesday WED Wed wed FEBRUARY February february II FEB Feb feb + THURSDAY Thursday thursday THU Thu thu FEBRUARY February february II FEB Feb feb + FRIDAY Friday friday FRI Fri fri MARCH March march III MAR Mar mar + MONDAY Monday monday MON Mon mon DECEMBER December december XII DEC Dec dec + TUESDAY Tuesday tuesday TUE Tue tue DECEMBER December december XII DEC Dec dec + WEDNESDAY Wednesday wednesday WED Wed wed JANUARY January january I JAN Jan jan + FRIDAY Friday friday FRI Fri fri FEBRUARY February february II FEB Feb feb + SATURDAY Saturday saturday SAT Sat sat MARCH March march III MAR Mar mar + TUESDAY Tuesday tuesday TUE Tue tue DECEMBER December december XII DEC Dec dec + WEDNESDAY Wednesday wednesday WED Wed wed DECEMBER December december XII DEC Dec dec + FRIDAY Friday friday FRI Fri fri DECEMBER December december XII DEC Dec dec + SATURDAY Saturday saturday SAT Sat sat JANUARY January january I JAN Jan jan + SUNDAY Sunday sunday SUN Sun sun DECEMBER December december XII DEC Dec dec + MONDAY Monday monday MON Mon mon JANUARY January january I JAN Jan jan +(66 rows) + +SELECT to_char(d1, 'FMDAY FMDay FMday FMMONTH FMMonth FMmonth FMRM') + FROM TIMESTAMPTZ_TBL; + to_char +-------------------------------------------------------------- + + + WEDNESDAY Wednesday wednesday DECEMBER December december XII + MONDAY Monday monday FEBRUARY February february II + MONDAY Monday monday FEBRUARY February february II + MONDAY Monday monday FEBRUARY February february II + MONDAY Monday monday FEBRUARY February february II + MONDAY Monday monday FEBRUARY February february II + MONDAY Monday monday FEBRUARY February february II + THURSDAY Thursday thursday JANUARY January january I + THURSDAY Thursday thursday JANUARY January january I + MONDAY Monday monday FEBRUARY February february II + MONDAY Monday monday FEBRUARY February february II + MONDAY Monday monday FEBRUARY February february II + MONDAY Monday monday FEBRUARY February february II + TUESDAY Tuesday tuesday JUNE June june VI + SATURDAY Saturday saturday SEPTEMBER September september IX + WEDNESDAY Wednesday wednesday MARCH March march III + WEDNESDAY Wednesday wednesday MARCH March march III + WEDNESDAY Wednesday wednesday MARCH March march III + WEDNESDAY Wednesday wednesday MARCH March march III + WEDNESDAY Wednesday wednesday MARCH March march III + MONDAY Monday monday FEBRUARY February february II + MONDAY Monday monday FEBRUARY February february II + MONDAY Monday monday FEBRUARY February february II + MONDAY Monday monday FEBRUARY February february II + MONDAY Monday monday FEBRUARY February february II + MONDAY Monday monday FEBRUARY February february II + MONDAY Monday monday FEBRUARY February february II + MONDAY Monday monday FEBRUARY February february II + MONDAY Monday monday FEBRUARY February february II + MONDAY Monday monday FEBRUARY February february II + MONDAY Monday monday FEBRUARY February february II + MONDAY Monday monday FEBRUARY February february II + THURSDAY Thursday thursday JULY July july VII + TUESDAY Tuesday tuesday JUNE June june VI + MONDAY Monday monday FEBRUARY February february II + TUESDAY Tuesday tuesday FEBRUARY February february II + WEDNESDAY Wednesday wednesday FEBRUARY February february II + THURSDAY Thursday thursday FEBRUARY February february II + FRIDAY Friday friday FEBRUARY February february II + SATURDAY Saturday saturday FEBRUARY February february II + SUNDAY Sunday sunday FEBRUARY February february II + TUESDAY Tuesday tuesday FEBRUARY February february II + SATURDAY Saturday saturday FEBRUARY February february II + THURSDAY Thursday thursday FEBRUARY February february II + TUESDAY Tuesday tuesday FEBRUARY February february II + SATURDAY Saturday saturday FEBRUARY February february II + THURSDAY Thursday thursday FEBRUARY February february II + TUESDAY Tuesday tuesday FEBRUARY February february II + SUNDAY Sunday sunday FEBRUARY February february II + SATURDAY Saturday saturday FEBRUARY February february II + WEDNESDAY Wednesday wednesday FEBRUARY February february II + THURSDAY Thursday thursday FEBRUARY February february II + FRIDAY Friday friday MARCH March march III + MONDAY Monday monday DECEMBER December december XII + TUESDAY Tuesday tuesday DECEMBER December december XII + WEDNESDAY Wednesday wednesday JANUARY January january I + FRIDAY Friday friday FEBRUARY February february II + SATURDAY Saturday saturday MARCH March march III + TUESDAY Tuesday tuesday DECEMBER December december XII + WEDNESDAY Wednesday wednesday DECEMBER December december XII + FRIDAY Friday friday DECEMBER December december XII + SATURDAY Saturday saturday JANUARY January january I + SUNDAY Sunday sunday DECEMBER December december XII + MONDAY Monday monday JANUARY January january I +(66 rows) + +SELECT to_char(d1, 'Y,YYY YYYY YYY YY Y CC Q MM WW DDD DD D J') + FROM TIMESTAMPTZ_TBL; + to_char +-------------------------------------------------- + + + 1,969 1969 969 69 9 20 4 12 53 365 31 4 2440587 + 1,997 1997 997 97 7 20 1 02 06 041 10 2 2450490 + 1,997 1997 997 97 7 20 1 02 06 041 10 2 2450490 + 1,997 1997 997 97 7 20 1 02 06 041 10 2 2450490 + 1,997 1997 997 97 7 20 1 02 06 041 10 2 2450490 + 1,997 1997 997 97 7 20 1 02 06 041 10 2 2450490 + 1,997 1997 997 97 7 20 1 02 06 041 10 2 2450490 + 1,997 1997 997 97 7 20 1 01 01 002 02 5 2450451 + 1,997 1997 997 97 7 20 1 01 01 002 02 5 2450451 + 1,997 1997 997 97 7 20 1 02 06 041 10 2 2450490 + 1,997 1997 997 97 7 20 1 02 06 041 10 2 2450490 + 1,997 1997 997 97 7 20 1 02 06 041 10 2 2450490 + 1,997 1997 997 97 7 20 1 02 06 041 10 2 2450490 + 1,997 1997 997 97 7 20 2 06 23 161 10 3 2450610 + 2,001 2001 001 01 1 21 3 09 38 265 22 7 2452175 + 2,000 2000 000 00 0 20 1 03 11 075 15 4 2451619 + 2,000 2000 000 00 0 20 1 03 11 075 15 4 2451619 + 2,000 2000 000 00 0 20 1 03 11 075 15 4 2451619 + 2,000 2000 000 00 0 20 1 03 11 075 15 4 2451619 + 2,000 2000 000 00 0 20 1 03 11 075 15 4 2451619 + 1,997 1997 997 97 7 20 1 02 06 041 10 2 2450490 + 1,997 1997 997 97 7 20 1 02 06 041 10 2 2450490 + 1,997 1997 997 97 7 20 1 02 06 041 10 2 2450490 + 1,997 1997 997 97 7 20 1 02 06 041 10 2 2450490 + 1,997 1997 997 97 7 20 1 02 06 041 10 2 2450490 + 1,997 1997 997 97 7 20 1 02 06 041 10 2 2450490 + 1,997 1997 997 97 7 20 1 02 06 041 10 2 2450490 + 1,997 1997 997 97 7 20 1 02 06 041 10 2 2450490 + 1,997 1997 997 97 7 20 1 02 06 041 10 2 2450490 + 1,997 1997 997 97 7 20 1 02 06 041 10 2 2450490 + 1,997 1997 997 97 7 20 1 02 06 041 10 2 2450490 + 1,997 1997 997 97 7 20 1 02 06 041 10 2 2450490 + 1,997 1997 997 97 7 20 3 07 28 191 10 5 2450640 + 1,997 1997 997 97 7 20 2 06 23 161 10 3 2450610 + 1,997 1997 997 97 7 20 1 02 06 041 10 2 2450490 + 1,997 1997 997 97 7 20 1 02 06 042 11 3 2450491 + 1,997 1997 997 97 7 20 1 02 07 043 12 4 2450492 + 1,997 1997 997 97 7 20 1 02 07 044 13 5 2450493 + 1,997 1997 997 97 7 20 1 02 07 045 14 6 2450494 + 1,997 1997 997 97 7 20 1 02 07 046 15 7 2450495 + 1,997 1997 997 97 7 20 1 02 07 047 16 1 2450496 + 0,097 0097 097 97 7 -01 1 02 07 047 16 3 1686042 + 0,097 0097 097 97 7 01 1 02 07 047 16 7 1756536 + 0,597 0597 597 97 7 06 1 02 07 047 16 5 1939157 + 1,097 1097 097 97 7 11 1 02 07 047 16 3 2121778 + 1,697 1697 697 97 7 17 1 02 07 047 16 7 2340924 + 1,797 1797 797 97 7 18 1 02 07 047 16 5 2377448 + 1,897 1897 897 97 7 19 1 02 07 047 16 3 2413972 + 1,997 1997 997 97 7 20 1 02 07 047 16 1 2450496 + 2,097 2097 097 97 7 21 1 02 07 047 16 7 2487021 + 1,996 1996 996 96 6 20 1 02 09 059 28 4 2450142 + 1,996 1996 996 96 6 20 1 02 09 060 29 5 2450143 + 1,996 1996 996 96 6 20 1 03 09 061 01 6 2450144 + 1,996 1996 996 96 6 20 4 12 53 365 30 2 2450448 + 1,996 1996 996 96 6 20 4 12 53 366 31 3 2450449 + 1,997 1997 997 97 7 20 1 01 01 001 01 4 2450450 + 1,997 1997 997 97 7 20 1 02 09 059 28 6 2450508 + 1,997 1997 997 97 7 20 1 03 09 060 01 7 2450509 + 1,997 1997 997 97 7 20 4 12 52 364 30 3 2450813 + 1,997 1997 997 97 7 20 4 12 53 365 31 4 2450814 + 1,999 1999 999 99 9 20 4 12 53 365 31 6 2451544 + 2,000 2000 000 00 0 20 1 01 01 001 01 7 2451545 + 2,000 2000 000 00 0 20 4 12 53 366 31 1 2451910 + 2,001 2001 001 01 1 21 1 01 01 001 01 2 2451911 +(66 rows) + +SELECT to_char(d1, 'FMY,YYY FMYYYY FMYYY FMYY FMY FMCC FMQ FMMM FMWW FMDDD FMDD FMD FMJ') + FROM TIMESTAMPTZ_TBL; + to_char +------------------------------------------------- + + + 1,969 1969 969 69 9 20 4 12 53 365 31 4 2440587 + 1,997 1997 997 97 7 20 1 2 6 41 10 2 2450490 + 1,997 1997 997 97 7 20 1 2 6 41 10 2 2450490 + 1,997 1997 997 97 7 20 1 2 6 41 10 2 2450490 + 1,997 1997 997 97 7 20 1 2 6 41 10 2 2450490 + 1,997 1997 997 97 7 20 1 2 6 41 10 2 2450490 + 1,997 1997 997 97 7 20 1 2 6 41 10 2 2450490 + 1,997 1997 997 97 7 20 1 1 1 2 2 5 2450451 + 1,997 1997 997 97 7 20 1 1 1 2 2 5 2450451 + 1,997 1997 997 97 7 20 1 2 6 41 10 2 2450490 + 1,997 1997 997 97 7 20 1 2 6 41 10 2 2450490 + 1,997 1997 997 97 7 20 1 2 6 41 10 2 2450490 + 1,997 1997 997 97 7 20 1 2 6 41 10 2 2450490 + 1,997 1997 997 97 7 20 2 6 23 161 10 3 2450610 + 2,001 2001 1 1 1 21 3 9 38 265 22 7 2452175 + 2,000 2000 0 0 0 20 1 3 11 75 15 4 2451619 + 2,000 2000 0 0 0 20 1 3 11 75 15 4 2451619 + 2,000 2000 0 0 0 20 1 3 11 75 15 4 2451619 + 2,000 2000 0 0 0 20 1 3 11 75 15 4 2451619 + 2,000 2000 0 0 0 20 1 3 11 75 15 4 2451619 + 1,997 1997 997 97 7 20 1 2 6 41 10 2 2450490 + 1,997 1997 997 97 7 20 1 2 6 41 10 2 2450490 + 1,997 1997 997 97 7 20 1 2 6 41 10 2 2450490 + 1,997 1997 997 97 7 20 1 2 6 41 10 2 2450490 + 1,997 1997 997 97 7 20 1 2 6 41 10 2 2450490 + 1,997 1997 997 97 7 20 1 2 6 41 10 2 2450490 + 1,997 1997 997 97 7 20 1 2 6 41 10 2 2450490 + 1,997 1997 997 97 7 20 1 2 6 41 10 2 2450490 + 1,997 1997 997 97 7 20 1 2 6 41 10 2 2450490 + 1,997 1997 997 97 7 20 1 2 6 41 10 2 2450490 + 1,997 1997 997 97 7 20 1 2 6 41 10 2 2450490 + 1,997 1997 997 97 7 20 1 2 6 41 10 2 2450490 + 1,997 1997 997 97 7 20 3 7 28 191 10 5 2450640 + 1,997 1997 997 97 7 20 2 6 23 161 10 3 2450610 + 1,997 1997 997 97 7 20 1 2 6 41 10 2 2450490 + 1,997 1997 997 97 7 20 1 2 6 42 11 3 2450491 + 1,997 1997 997 97 7 20 1 2 7 43 12 4 2450492 + 1,997 1997 997 97 7 20 1 2 7 44 13 5 2450493 + 1,997 1997 997 97 7 20 1 2 7 45 14 6 2450494 + 1,997 1997 997 97 7 20 1 2 7 46 15 7 2450495 + 1,997 1997 997 97 7 20 1 2 7 47 16 1 2450496 + 0,097 97 97 97 7 -1 1 2 7 47 16 3 1686042 + 0,097 97 97 97 7 1 1 2 7 47 16 7 1756536 + 0,597 597 597 97 7 6 1 2 7 47 16 5 1939157 + 1,097 1097 97 97 7 11 1 2 7 47 16 3 2121778 + 1,697 1697 697 97 7 17 1 2 7 47 16 7 2340924 + 1,797 1797 797 97 7 18 1 2 7 47 16 5 2377448 + 1,897 1897 897 97 7 19 1 2 7 47 16 3 2413972 + 1,997 1997 997 97 7 20 1 2 7 47 16 1 2450496 + 2,097 2097 97 97 7 21 1 2 7 47 16 7 2487021 + 1,996 1996 996 96 6 20 1 2 9 59 28 4 2450142 + 1,996 1996 996 96 6 20 1 2 9 60 29 5 2450143 + 1,996 1996 996 96 6 20 1 3 9 61 1 6 2450144 + 1,996 1996 996 96 6 20 4 12 53 365 30 2 2450448 + 1,996 1996 996 96 6 20 4 12 53 366 31 3 2450449 + 1,997 1997 997 97 7 20 1 1 1 1 1 4 2450450 + 1,997 1997 997 97 7 20 1 2 9 59 28 6 2450508 + 1,997 1997 997 97 7 20 1 3 9 60 1 7 2450509 + 1,997 1997 997 97 7 20 4 12 52 364 30 3 2450813 + 1,997 1997 997 97 7 20 4 12 53 365 31 4 2450814 + 1,999 1999 999 99 9 20 4 12 53 365 31 6 2451544 + 2,000 2000 0 0 0 20 1 1 1 1 1 7 2451545 + 2,000 2000 0 0 0 20 4 12 53 366 31 1 2451910 + 2,001 2001 1 1 1 21 1 1 1 1 1 2 2451911 +(66 rows) + +SELECT to_char(d1, 'HH HH12 HH24 MI SS SSSS') + FROM TIMESTAMPTZ_TBL; + to_char +---------------------- + + + 04 04 16 00 00 57600 + 05 05 17 32 01 63121 + 05 05 17 32 01 63121 + 05 05 17 32 02 63122 + 05 05 17 32 01 63121 + 05 05 17 32 01 63121 + 05 05 17 32 01 63121 + 12 12 00 00 00 0 + 03 03 03 04 05 11045 + 05 05 17 32 01 63121 + 05 05 17 32 01 63121 + 05 05 17 32 01 63121 + 05 05 17 32 01 63121 + 05 05 17 32 01 63121 + 06 06 18 19 20 65960 + 08 08 08 14 01 29641 + 04 04 04 14 02 15242 + 02 02 02 14 03 8043 + 03 03 03 14 04 11644 + 01 01 01 14 05 4445 + 05 05 17 32 01 63121 + 05 05 17 32 01 63121 + 05 05 17 32 00 63120 + 05 05 17 32 01 63121 + 05 05 17 32 01 63121 + 05 05 17 32 01 63121 + 05 05 17 32 01 63121 + 05 05 17 32 01 63121 + 09 09 09 32 01 34321 + 09 09 09 32 01 34321 + 09 09 09 32 01 34321 + 02 02 14 32 01 52321 + 02 02 14 32 01 52321 + 06 06 18 32 01 66721 + 05 05 17 32 01 63121 + 05 05 17 32 01 63121 + 05 05 17 32 01 63121 + 05 05 17 32 01 63121 + 05 05 17 32 01 63121 + 05 05 17 32 01 63121 + 05 05 17 32 01 63121 + 05 05 17 32 01 63121 + 05 05 17 32 01 63121 + 05 05 17 32 01 63121 + 05 05 17 32 01 63121 + 05 05 17 32 01 63121 + 05 05 17 32 01 63121 + 05 05 17 32 01 63121 + 05 05 17 32 01 63121 + 05 05 17 32 01 63121 + 05 05 17 32 01 63121 + 05 05 17 32 01 63121 + 05 05 17 32 01 63121 + 05 05 17 32 01 63121 + 05 05 17 32 01 63121 + 05 05 17 32 01 63121 + 05 05 17 32 01 63121 + 05 05 17 32 01 63121 + 05 05 17 32 01 63121 + 05 05 17 32 01 63121 + 05 05 17 32 01 63121 + 05 05 17 32 01 63121 + 05 05 17 32 01 63121 + 05 05 17 32 01 63121 +(66 rows) + +SELECT to_char(d1, E'"HH:MI:SS is" HH:MI:SS "\\"text between quote marks\\""') + FROM TIMESTAMPTZ_TBL; + to_char +------------------------------------------------- + + + HH:MI:SS is 04:00:00 "text between quote marks" + HH:MI:SS is 05:32:01 "text between quote marks" + HH:MI:SS is 05:32:01 "text between quote marks" + HH:MI:SS is 05:32:02 "text between quote marks" + HH:MI:SS is 05:32:01 "text between quote marks" + HH:MI:SS is 05:32:01 "text between quote marks" + HH:MI:SS is 05:32:01 "text between quote marks" + HH:MI:SS is 12:00:00 "text between quote marks" + HH:MI:SS is 03:04:05 "text between quote marks" + HH:MI:SS is 05:32:01 "text between quote marks" + HH:MI:SS is 05:32:01 "text between quote marks" + HH:MI:SS is 05:32:01 "text between quote marks" + HH:MI:SS is 05:32:01 "text between quote marks" + HH:MI:SS is 05:32:01 "text between quote marks" + HH:MI:SS is 06:19:20 "text between quote marks" + HH:MI:SS is 08:14:01 "text between quote marks" + HH:MI:SS is 04:14:02 "text between quote marks" + HH:MI:SS is 02:14:03 "text between quote marks" + HH:MI:SS is 03:14:04 "text between quote marks" + HH:MI:SS is 01:14:05 "text between quote marks" + HH:MI:SS is 05:32:01 "text between quote marks" + HH:MI:SS is 05:32:01 "text between quote marks" + HH:MI:SS is 05:32:00 "text between quote marks" + HH:MI:SS is 05:32:01 "text between quote marks" + HH:MI:SS is 05:32:01 "text between quote marks" + HH:MI:SS is 05:32:01 "text between quote marks" + HH:MI:SS is 05:32:01 "text between quote marks" + HH:MI:SS is 05:32:01 "text between quote marks" + HH:MI:SS is 09:32:01 "text between quote marks" + HH:MI:SS is 09:32:01 "text between quote marks" + HH:MI:SS is 09:32:01 "text between quote marks" + HH:MI:SS is 02:32:01 "text between quote marks" + HH:MI:SS is 02:32:01 "text between quote marks" + HH:MI:SS is 06:32:01 "text between quote marks" + HH:MI:SS is 05:32:01 "text between quote marks" + HH:MI:SS is 05:32:01 "text between quote marks" + HH:MI:SS is 05:32:01 "text between quote marks" + HH:MI:SS is 05:32:01 "text between quote marks" + HH:MI:SS is 05:32:01 "text between quote marks" + HH:MI:SS is 05:32:01 "text between quote marks" + HH:MI:SS is 05:32:01 "text between quote marks" + HH:MI:SS is 05:32:01 "text between quote marks" + HH:MI:SS is 05:32:01 "text between quote marks" + HH:MI:SS is 05:32:01 "text between quote marks" + HH:MI:SS is 05:32:01 "text between quote marks" + HH:MI:SS is 05:32:01 "text between quote marks" + HH:MI:SS is 05:32:01 "text between quote marks" + HH:MI:SS is 05:32:01 "text between quote marks" + HH:MI:SS is 05:32:01 "text between quote marks" + HH:MI:SS is 05:32:01 "text between quote marks" + HH:MI:SS is 05:32:01 "text between quote marks" + HH:MI:SS is 05:32:01 "text between quote marks" + HH:MI:SS is 05:32:01 "text between quote marks" + HH:MI:SS is 05:32:01 "text between quote marks" + HH:MI:SS is 05:32:01 "text between quote marks" + HH:MI:SS is 05:32:01 "text between quote marks" + HH:MI:SS is 05:32:01 "text between quote marks" + HH:MI:SS is 05:32:01 "text between quote marks" + HH:MI:SS is 05:32:01 "text between quote marks" + HH:MI:SS is 05:32:01 "text between quote marks" + HH:MI:SS is 05:32:01 "text between quote marks" + HH:MI:SS is 05:32:01 "text between quote marks" + HH:MI:SS is 05:32:01 "text between quote marks" + HH:MI:SS is 05:32:01 "text between quote marks" +(66 rows) + +SELECT to_char(d1, 'HH24--text--MI--text--SS') + FROM TIMESTAMPTZ_TBL; + to_char +------------------------ + + + 16--text--00--text--00 + 17--text--32--text--01 + 17--text--32--text--01 + 17--text--32--text--02 + 17--text--32--text--01 + 17--text--32--text--01 + 17--text--32--text--01 + 00--text--00--text--00 + 03--text--04--text--05 + 17--text--32--text--01 + 17--text--32--text--01 + 17--text--32--text--01 + 17--text--32--text--01 + 17--text--32--text--01 + 18--text--19--text--20 + 08--text--14--text--01 + 04--text--14--text--02 + 02--text--14--text--03 + 03--text--14--text--04 + 01--text--14--text--05 + 17--text--32--text--01 + 17--text--32--text--01 + 17--text--32--text--00 + 17--text--32--text--01 + 17--text--32--text--01 + 17--text--32--text--01 + 17--text--32--text--01 + 17--text--32--text--01 + 09--text--32--text--01 + 09--text--32--text--01 + 09--text--32--text--01 + 14--text--32--text--01 + 14--text--32--text--01 + 18--text--32--text--01 + 17--text--32--text--01 + 17--text--32--text--01 + 17--text--32--text--01 + 17--text--32--text--01 + 17--text--32--text--01 + 17--text--32--text--01 + 17--text--32--text--01 + 17--text--32--text--01 + 17--text--32--text--01 + 17--text--32--text--01 + 17--text--32--text--01 + 17--text--32--text--01 + 17--text--32--text--01 + 17--text--32--text--01 + 17--text--32--text--01 + 17--text--32--text--01 + 17--text--32--text--01 + 17--text--32--text--01 + 17--text--32--text--01 + 17--text--32--text--01 + 17--text--32--text--01 + 17--text--32--text--01 + 17--text--32--text--01 + 17--text--32--text--01 + 17--text--32--text--01 + 17--text--32--text--01 + 17--text--32--text--01 + 17--text--32--text--01 + 17--text--32--text--01 + 17--text--32--text--01 +(66 rows) + +SELECT to_char(d1, 'YYYYTH YYYYth Jth') + FROM TIMESTAMPTZ_TBL; + to_char +------------------------- + + + 1969TH 1969th 2440587th + 1997TH 1997th 2450490th + 1997TH 1997th 2450490th + 1997TH 1997th 2450490th + 1997TH 1997th 2450490th + 1997TH 1997th 2450490th + 1997TH 1997th 2450490th + 1997TH 1997th 2450451st + 1997TH 1997th 2450451st + 1997TH 1997th 2450490th + 1997TH 1997th 2450490th + 1997TH 1997th 2450490th + 1997TH 1997th 2450490th + 1997TH 1997th 2450610th + 2001ST 2001st 2452175th + 2000TH 2000th 2451619th + 2000TH 2000th 2451619th + 2000TH 2000th 2451619th + 2000TH 2000th 2451619th + 2000TH 2000th 2451619th + 1997TH 1997th 2450490th + 1997TH 1997th 2450490th + 1997TH 1997th 2450490th + 1997TH 1997th 2450490th + 1997TH 1997th 2450490th + 1997TH 1997th 2450490th + 1997TH 1997th 2450490th + 1997TH 1997th 2450490th + 1997TH 1997th 2450490th + 1997TH 1997th 2450490th + 1997TH 1997th 2450490th + 1997TH 1997th 2450490th + 1997TH 1997th 2450640th + 1997TH 1997th 2450610th + 1997TH 1997th 2450490th + 1997TH 1997th 2450491st + 1997TH 1997th 2450492nd + 1997TH 1997th 2450493rd + 1997TH 1997th 2450494th + 1997TH 1997th 2450495th + 1997TH 1997th 2450496th + 0097TH 0097th 1686042nd + 0097TH 0097th 1756536th + 0597TH 0597th 1939157th + 1097TH 1097th 2121778th + 1697TH 1697th 2340924th + 1797TH 1797th 2377448th + 1897TH 1897th 2413972nd + 1997TH 1997th 2450496th + 2097TH 2097th 2487021st + 1996TH 1996th 2450142nd + 1996TH 1996th 2450143rd + 1996TH 1996th 2450144th + 1996TH 1996th 2450448th + 1996TH 1996th 2450449th + 1997TH 1997th 2450450th + 1997TH 1997th 2450508th + 1997TH 1997th 2450509th + 1997TH 1997th 2450813th + 1997TH 1997th 2450814th + 1999TH 1999th 2451544th + 2000TH 2000th 2451545th + 2000TH 2000th 2451910th + 2001ST 2001st 2451911th +(66 rows) + +SELECT to_char(d1, 'YYYY A.D. YYYY a.d. YYYY bc HH:MI:SS P.M. HH:MI:SS p.m. HH:MI:SS pm') + FROM TIMESTAMPTZ_TBL; + to_char +--------------------------------------------------------------------- + + + 1969 A.D. 1969 a.d. 1969 ad 04:00:00 P.M. 04:00:00 p.m. 04:00:00 pm + 1997 A.D. 1997 a.d. 1997 ad 05:32:01 P.M. 05:32:01 p.m. 05:32:01 pm + 1997 A.D. 1997 a.d. 1997 ad 05:32:01 P.M. 05:32:01 p.m. 05:32:01 pm + 1997 A.D. 1997 a.d. 1997 ad 05:32:02 P.M. 05:32:02 p.m. 05:32:02 pm + 1997 A.D. 1997 a.d. 1997 ad 05:32:01 P.M. 05:32:01 p.m. 05:32:01 pm + 1997 A.D. 1997 a.d. 1997 ad 05:32:01 P.M. 05:32:01 p.m. 05:32:01 pm + 1997 A.D. 1997 a.d. 1997 ad 05:32:01 P.M. 05:32:01 p.m. 05:32:01 pm + 1997 A.D. 1997 a.d. 1997 ad 12:00:00 A.M. 12:00:00 a.m. 12:00:00 am + 1997 A.D. 1997 a.d. 1997 ad 03:04:05 A.M. 03:04:05 a.m. 03:04:05 am + 1997 A.D. 1997 a.d. 1997 ad 05:32:01 P.M. 05:32:01 p.m. 05:32:01 pm + 1997 A.D. 1997 a.d. 1997 ad 05:32:01 P.M. 05:32:01 p.m. 05:32:01 pm + 1997 A.D. 1997 a.d. 1997 ad 05:32:01 P.M. 05:32:01 p.m. 05:32:01 pm + 1997 A.D. 1997 a.d. 1997 ad 05:32:01 P.M. 05:32:01 p.m. 05:32:01 pm + 1997 A.D. 1997 a.d. 1997 ad 05:32:01 P.M. 05:32:01 p.m. 05:32:01 pm + 2001 A.D. 2001 a.d. 2001 ad 06:19:20 P.M. 06:19:20 p.m. 06:19:20 pm + 2000 A.D. 2000 a.d. 2000 ad 08:14:01 A.M. 08:14:01 a.m. 08:14:01 am + 2000 A.D. 2000 a.d. 2000 ad 04:14:02 A.M. 04:14:02 a.m. 04:14:02 am + 2000 A.D. 2000 a.d. 2000 ad 02:14:03 A.M. 02:14:03 a.m. 02:14:03 am + 2000 A.D. 2000 a.d. 2000 ad 03:14:04 A.M. 03:14:04 a.m. 03:14:04 am + 2000 A.D. 2000 a.d. 2000 ad 01:14:05 A.M. 01:14:05 a.m. 01:14:05 am + 1997 A.D. 1997 a.d. 1997 ad 05:32:01 P.M. 05:32:01 p.m. 05:32:01 pm + 1997 A.D. 1997 a.d. 1997 ad 05:32:01 P.M. 05:32:01 p.m. 05:32:01 pm + 1997 A.D. 1997 a.d. 1997 ad 05:32:00 P.M. 05:32:00 p.m. 05:32:00 pm + 1997 A.D. 1997 a.d. 1997 ad 05:32:01 P.M. 05:32:01 p.m. 05:32:01 pm + 1997 A.D. 1997 a.d. 1997 ad 05:32:01 P.M. 05:32:01 p.m. 05:32:01 pm + 1997 A.D. 1997 a.d. 1997 ad 05:32:01 P.M. 05:32:01 p.m. 05:32:01 pm + 1997 A.D. 1997 a.d. 1997 ad 05:32:01 P.M. 05:32:01 p.m. 05:32:01 pm + 1997 A.D. 1997 a.d. 1997 ad 05:32:01 P.M. 05:32:01 p.m. 05:32:01 pm + 1997 A.D. 1997 a.d. 1997 ad 09:32:01 A.M. 09:32:01 a.m. 09:32:01 am + 1997 A.D. 1997 a.d. 1997 ad 09:32:01 A.M. 09:32:01 a.m. 09:32:01 am + 1997 A.D. 1997 a.d. 1997 ad 09:32:01 A.M. 09:32:01 a.m. 09:32:01 am + 1997 A.D. 1997 a.d. 1997 ad 02:32:01 P.M. 02:32:01 p.m. 02:32:01 pm + 1997 A.D. 1997 a.d. 1997 ad 02:32:01 P.M. 02:32:01 p.m. 02:32:01 pm + 1997 A.D. 1997 a.d. 1997 ad 06:32:01 P.M. 06:32:01 p.m. 06:32:01 pm + 1997 A.D. 1997 a.d. 1997 ad 05:32:01 P.M. 05:32:01 p.m. 05:32:01 pm + 1997 A.D. 1997 a.d. 1997 ad 05:32:01 P.M. 05:32:01 p.m. 05:32:01 pm + 1997 A.D. 1997 a.d. 1997 ad 05:32:01 P.M. 05:32:01 p.m. 05:32:01 pm + 1997 A.D. 1997 a.d. 1997 ad 05:32:01 P.M. 05:32:01 p.m. 05:32:01 pm + 1997 A.D. 1997 a.d. 1997 ad 05:32:01 P.M. 05:32:01 p.m. 05:32:01 pm + 1997 A.D. 1997 a.d. 1997 ad 05:32:01 P.M. 05:32:01 p.m. 05:32:01 pm + 1997 A.D. 1997 a.d. 1997 ad 05:32:01 P.M. 05:32:01 p.m. 05:32:01 pm + 0097 B.C. 0097 b.c. 0097 bc 05:32:01 P.M. 05:32:01 p.m. 05:32:01 pm + 0097 A.D. 0097 a.d. 0097 ad 05:32:01 P.M. 05:32:01 p.m. 05:32:01 pm + 0597 A.D. 0597 a.d. 0597 ad 05:32:01 P.M. 05:32:01 p.m. 05:32:01 pm + 1097 A.D. 1097 a.d. 1097 ad 05:32:01 P.M. 05:32:01 p.m. 05:32:01 pm + 1697 A.D. 1697 a.d. 1697 ad 05:32:01 P.M. 05:32:01 p.m. 05:32:01 pm + 1797 A.D. 1797 a.d. 1797 ad 05:32:01 P.M. 05:32:01 p.m. 05:32:01 pm + 1897 A.D. 1897 a.d. 1897 ad 05:32:01 P.M. 05:32:01 p.m. 05:32:01 pm + 1997 A.D. 1997 a.d. 1997 ad 05:32:01 P.M. 05:32:01 p.m. 05:32:01 pm + 2097 A.D. 2097 a.d. 2097 ad 05:32:01 P.M. 05:32:01 p.m. 05:32:01 pm + 1996 A.D. 1996 a.d. 1996 ad 05:32:01 P.M. 05:32:01 p.m. 05:32:01 pm + 1996 A.D. 1996 a.d. 1996 ad 05:32:01 P.M. 05:32:01 p.m. 05:32:01 pm + 1996 A.D. 1996 a.d. 1996 ad 05:32:01 P.M. 05:32:01 p.m. 05:32:01 pm + 1996 A.D. 1996 a.d. 1996 ad 05:32:01 P.M. 05:32:01 p.m. 05:32:01 pm + 1996 A.D. 1996 a.d. 1996 ad 05:32:01 P.M. 05:32:01 p.m. 05:32:01 pm + 1997 A.D. 1997 a.d. 1997 ad 05:32:01 P.M. 05:32:01 p.m. 05:32:01 pm + 1997 A.D. 1997 a.d. 1997 ad 05:32:01 P.M. 05:32:01 p.m. 05:32:01 pm + 1997 A.D. 1997 a.d. 1997 ad 05:32:01 P.M. 05:32:01 p.m. 05:32:01 pm + 1997 A.D. 1997 a.d. 1997 ad 05:32:01 P.M. 05:32:01 p.m. 05:32:01 pm + 1997 A.D. 1997 a.d. 1997 ad 05:32:01 P.M. 05:32:01 p.m. 05:32:01 pm + 1999 A.D. 1999 a.d. 1999 ad 05:32:01 P.M. 05:32:01 p.m. 05:32:01 pm + 2000 A.D. 2000 a.d. 2000 ad 05:32:01 P.M. 05:32:01 p.m. 05:32:01 pm + 2000 A.D. 2000 a.d. 2000 ad 05:32:01 P.M. 05:32:01 p.m. 05:32:01 pm + 2001 A.D. 2001 a.d. 2001 ad 05:32:01 P.M. 05:32:01 p.m. 05:32:01 pm +(66 rows) + +SELECT to_char(d1, 'IYYY IYY IY I IW IDDD ID') + FROM TIMESTAMPTZ_TBL; + to_char +------------------------ + + + 1970 970 70 0 01 003 3 + 1997 997 97 7 07 043 1 + 1997 997 97 7 07 043 1 + 1997 997 97 7 07 043 1 + 1997 997 97 7 07 043 1 + 1997 997 97 7 07 043 1 + 1997 997 97 7 07 043 1 + 1997 997 97 7 01 004 4 + 1997 997 97 7 01 004 4 + 1997 997 97 7 07 043 1 + 1997 997 97 7 07 043 1 + 1997 997 97 7 07 043 1 + 1997 997 97 7 07 043 1 + 1997 997 97 7 24 163 2 + 2001 001 01 1 38 265 6 + 2000 000 00 0 11 073 3 + 2000 000 00 0 11 073 3 + 2000 000 00 0 11 073 3 + 2000 000 00 0 11 073 3 + 2000 000 00 0 11 073 3 + 1997 997 97 7 07 043 1 + 1997 997 97 7 07 043 1 + 1997 997 97 7 07 043 1 + 1997 997 97 7 07 043 1 + 1997 997 97 7 07 043 1 + 1997 997 97 7 07 043 1 + 1997 997 97 7 07 043 1 + 1997 997 97 7 07 043 1 + 1997 997 97 7 07 043 1 + 1997 997 97 7 07 043 1 + 1997 997 97 7 07 043 1 + 1997 997 97 7 07 043 1 + 1997 997 97 7 28 193 4 + 1997 997 97 7 24 163 2 + 1997 997 97 7 07 043 1 + 1997 997 97 7 07 044 2 + 1997 997 97 7 07 045 3 + 1997 997 97 7 07 046 4 + 1997 997 97 7 07 047 5 + 1997 997 97 7 07 048 6 + 1997 997 97 7 07 049 7 + 0097 097 97 7 07 044 2 + 0097 097 97 7 07 048 6 + 0597 597 97 7 07 046 4 + 1097 097 97 7 07 044 2 + 1697 697 97 7 07 048 6 + 1797 797 97 7 07 046 4 + 1897 897 97 7 07 044 2 + 1997 997 97 7 07 049 7 + 2097 097 97 7 07 048 6 + 1996 996 96 6 09 059 3 + 1996 996 96 6 09 060 4 + 1996 996 96 6 09 061 5 + 1997 997 97 7 01 001 1 + 1997 997 97 7 01 002 2 + 1997 997 97 7 01 003 3 + 1997 997 97 7 09 061 5 + 1997 997 97 7 09 062 6 + 1998 998 98 8 01 002 2 + 1998 998 98 8 01 003 3 + 1999 999 99 9 52 362 5 + 1999 999 99 9 52 363 6 + 2000 000 00 0 52 364 7 + 2001 001 01 1 01 001 1 +(66 rows) + +SELECT to_char(d1, 'FMIYYY FMIYY FMIY FMI FMIW FMIDDD FMID') + FROM TIMESTAMPTZ_TBL; + to_char +------------------------ + + + 1970 970 70 0 1 3 3 + 1997 997 97 7 7 43 1 + 1997 997 97 7 7 43 1 + 1997 997 97 7 7 43 1 + 1997 997 97 7 7 43 1 + 1997 997 97 7 7 43 1 + 1997 997 97 7 7 43 1 + 1997 997 97 7 1 4 4 + 1997 997 97 7 1 4 4 + 1997 997 97 7 7 43 1 + 1997 997 97 7 7 43 1 + 1997 997 97 7 7 43 1 + 1997 997 97 7 7 43 1 + 1997 997 97 7 24 163 2 + 2001 1 1 1 38 265 6 + 2000 0 0 0 11 73 3 + 2000 0 0 0 11 73 3 + 2000 0 0 0 11 73 3 + 2000 0 0 0 11 73 3 + 2000 0 0 0 11 73 3 + 1997 997 97 7 7 43 1 + 1997 997 97 7 7 43 1 + 1997 997 97 7 7 43 1 + 1997 997 97 7 7 43 1 + 1997 997 97 7 7 43 1 + 1997 997 97 7 7 43 1 + 1997 997 97 7 7 43 1 + 1997 997 97 7 7 43 1 + 1997 997 97 7 7 43 1 + 1997 997 97 7 7 43 1 + 1997 997 97 7 7 43 1 + 1997 997 97 7 7 43 1 + 1997 997 97 7 28 193 4 + 1997 997 97 7 24 163 2 + 1997 997 97 7 7 43 1 + 1997 997 97 7 7 44 2 + 1997 997 97 7 7 45 3 + 1997 997 97 7 7 46 4 + 1997 997 97 7 7 47 5 + 1997 997 97 7 7 48 6 + 1997 997 97 7 7 49 7 + 97 97 97 7 7 44 2 + 97 97 97 7 7 48 6 + 597 597 97 7 7 46 4 + 1097 97 97 7 7 44 2 + 1697 697 97 7 7 48 6 + 1797 797 97 7 7 46 4 + 1897 897 97 7 7 44 2 + 1997 997 97 7 7 49 7 + 2097 97 97 7 7 48 6 + 1996 996 96 6 9 59 3 + 1996 996 96 6 9 60 4 + 1996 996 96 6 9 61 5 + 1997 997 97 7 1 1 1 + 1997 997 97 7 1 2 2 + 1997 997 97 7 1 3 3 + 1997 997 97 7 9 61 5 + 1997 997 97 7 9 62 6 + 1998 998 98 8 1 2 2 + 1998 998 98 8 1 3 3 + 1999 999 99 9 52 362 5 + 1999 999 99 9 52 363 6 + 2000 0 0 0 52 364 7 + 2001 1 1 1 1 1 1 +(66 rows) + +SELECT to_char(d, 'FF1 FF2 FF3 FF4 FF5 FF6 ff1 ff2 ff3 ff4 ff5 ff6 MS US') + FROM (VALUES + ('2018-11-02 12:34:56'::timestamptz), + ('2018-11-02 12:34:56.78'), + ('2018-11-02 12:34:56.78901'), + ('2018-11-02 12:34:56.78901234') + ) d(d); + to_char +-------------------------------------------------------------------- + 0 00 000 0000 00000 000000 0 00 000 0000 00000 000000 000 000000 + 7 78 780 7800 78000 780000 7 78 780 7800 78000 780000 780 780000 + 7 78 789 7890 78901 789010 7 78 789 7890 78901 789010 789 789010 + 7 78 789 7890 78901 789012 7 78 789 7890 78901 789012 789 789012 +(4 rows) + +-- Check OF, TZH, TZM with various zone offsets, particularly fractional hours +SET timezone = '00:00'; +SELECT to_char(now(), 'OF') as "OF", to_char(now(), 'TZH:TZM') as "TZH:TZM"; + OF | TZH:TZM +-----+--------- + +00 | +00:00 +(1 row) + +SET timezone = '+02:00'; +SELECT to_char(now(), 'OF') as "OF", to_char(now(), 'TZH:TZM') as "TZH:TZM"; + OF | TZH:TZM +-----+--------- + -02 | -02:00 +(1 row) + +SET timezone = '-13:00'; +SELECT to_char(now(), 'OF') as "OF", to_char(now(), 'TZH:TZM') as "TZH:TZM"; + OF | TZH:TZM +-----+--------- + +13 | +13:00 +(1 row) + +SET timezone = '-00:30'; +SELECT to_char(now(), 'OF') as "OF", to_char(now(), 'TZH:TZM') as "TZH:TZM"; + OF | TZH:TZM +--------+--------- + +00:30 | +00:30 +(1 row) + +SET timezone = '00:30'; +SELECT to_char(now(), 'OF') as "OF", to_char(now(), 'TZH:TZM') as "TZH:TZM"; + OF | TZH:TZM +--------+--------- + -00:30 | -00:30 +(1 row) + +SET timezone = '-04:30'; +SELECT to_char(now(), 'OF') as "OF", to_char(now(), 'TZH:TZM') as "TZH:TZM"; + OF | TZH:TZM +--------+--------- + +04:30 | +04:30 +(1 row) + +SET timezone = '04:30'; +SELECT to_char(now(), 'OF') as "OF", to_char(now(), 'TZH:TZM') as "TZH:TZM"; + OF | TZH:TZM +--------+--------- + -04:30 | -04:30 +(1 row) + +SET timezone = '-04:15'; +SELECT to_char(now(), 'OF') as "OF", to_char(now(), 'TZH:TZM') as "TZH:TZM"; + OF | TZH:TZM +--------+--------- + +04:15 | +04:15 +(1 row) + +SET timezone = '04:15'; +SELECT to_char(now(), 'OF') as "OF", to_char(now(), 'TZH:TZM') as "TZH:TZM"; + OF | TZH:TZM +--------+--------- + -04:15 | -04:15 +(1 row) + +RESET timezone; +-- Check of, tzh, tzm with various zone offsets. +SET timezone = '00:00'; +SELECT to_char(now(), 'of') as "Of", to_char(now(), 'tzh:tzm') as "tzh:tzm"; + Of | tzh:tzm +-----+--------- + +00 | +00:00 +(1 row) + +SET timezone = '+02:00'; +SELECT to_char(now(), 'of') as "of", to_char(now(), 'tzh:tzm') as "tzh:tzm"; + of | tzh:tzm +-----+--------- + -02 | -02:00 +(1 row) + +SET timezone = '-13:00'; +SELECT to_char(now(), 'of') as "of", to_char(now(), 'tzh:tzm') as "tzh:tzm"; + of | tzh:tzm +-----+--------- + +13 | +13:00 +(1 row) + +SET timezone = '-00:30'; +SELECT to_char(now(), 'of') as "of", to_char(now(), 'tzh:tzm') as "tzh:tzm"; + of | tzh:tzm +--------+--------- + +00:30 | +00:30 +(1 row) + +SET timezone = '00:30'; +SELECT to_char(now(), 'of') as "of", to_char(now(), 'tzh:tzm') as "tzh:tzm"; + of | tzh:tzm +--------+--------- + -00:30 | -00:30 +(1 row) + +SET timezone = '-04:30'; +SELECT to_char(now(), 'of') as "of", to_char(now(), 'tzh:tzm') as "tzh:tzm"; + of | tzh:tzm +--------+--------- + +04:30 | +04:30 +(1 row) + +SET timezone = '04:30'; +SELECT to_char(now(), 'of') as "of", to_char(now(), 'tzh:tzm') as "tzh:tzm"; + of | tzh:tzm +--------+--------- + -04:30 | -04:30 +(1 row) + +SET timezone = '-04:15'; +SELECT to_char(now(), 'of') as "of", to_char(now(), 'tzh:tzm') as "tzh:tzm"; + of | tzh:tzm +--------+--------- + +04:15 | +04:15 +(1 row) + +SET timezone = '04:15'; +SELECT to_char(now(), 'of') as "of", to_char(now(), 'tzh:tzm') as "tzh:tzm"; + of | tzh:tzm +--------+--------- + -04:15 | -04:15 +(1 row) + +RESET timezone; +CREATE TABLE TIMESTAMPTZ_TST (a int , b timestamptz); +-- Test year field value with len > 4 +INSERT INTO TIMESTAMPTZ_TST VALUES(1, 'Sat Mar 12 23:58:48 1000 IST'); +INSERT INTO TIMESTAMPTZ_TST VALUES(2, 'Sat Mar 12 23:58:48 10000 IST'); +INSERT INTO TIMESTAMPTZ_TST VALUES(3, 'Sat Mar 12 23:58:48 100000 IST'); +INSERT INTO TIMESTAMPTZ_TST VALUES(3, '10000 Mar 12 23:58:48 IST'); +INSERT INTO TIMESTAMPTZ_TST VALUES(4, '100000312 23:58:48 IST'); +INSERT INTO TIMESTAMPTZ_TST VALUES(4, '1000000312 23:58:48 IST'); +--Verify data +SELECT * FROM TIMESTAMPTZ_TST ORDER BY a; + a | b +---+-------------------------------- + 1 | Wed Mar 12 13:58:48 1000 PST + 2 | Sun Mar 12 14:58:48 10000 PDT + 3 | Sun Mar 12 14:58:48 100000 PDT + 3 | Sun Mar 12 14:58:48 10000 PDT + 4 | Sun Mar 12 14:58:48 10000 PDT + 4 | Sun Mar 12 14:58:48 100000 PDT +(6 rows) + +--Cleanup +DROP TABLE TIMESTAMPTZ_TST; +-- test timestamptz constructors +set TimeZone to 'America/New_York'; +-- numeric timezone +SELECT make_timestamptz(1973, 07, 15, 08, 15, 55.33); + make_timestamptz +--------------------------------- + Sun Jul 15 08:15:55.33 1973 EDT +(1 row) + +SELECT make_timestamptz(1973, 07, 15, 08, 15, 55.33, '+2'); + make_timestamptz +--------------------------------- + Sun Jul 15 02:15:55.33 1973 EDT +(1 row) + +SELECT make_timestamptz(1973, 07, 15, 08, 15, 55.33, '-2'); + make_timestamptz +--------------------------------- + Sun Jul 15 06:15:55.33 1973 EDT +(1 row) + +WITH tzs (tz) AS (VALUES + ('+1'), ('+1:'), ('+1:0'), ('+100'), ('+1:00'), ('+01:00'), + ('+10'), ('+1000'), ('+10:'), ('+10:0'), ('+10:00'), ('+10:00:'), + ('+10:00:1'), ('+10:00:01'), + ('+10:00:10')) + SELECT make_timestamptz(2010, 2, 27, 3, 45, 00, tz), tz FROM tzs; + make_timestamptz | tz +------------------------------+----------- + Fri Feb 26 21:45:00 2010 EST | +1 + Fri Feb 26 21:45:00 2010 EST | +1: + Fri Feb 26 21:45:00 2010 EST | +1:0 + Fri Feb 26 21:45:00 2010 EST | +100 + Fri Feb 26 21:45:00 2010 EST | +1:00 + Fri Feb 26 21:45:00 2010 EST | +01:00 + Fri Feb 26 12:45:00 2010 EST | +10 + Fri Feb 26 12:45:00 2010 EST | +1000 + Fri Feb 26 12:45:00 2010 EST | +10: + Fri Feb 26 12:45:00 2010 EST | +10:0 + Fri Feb 26 12:45:00 2010 EST | +10:00 + Fri Feb 26 12:45:00 2010 EST | +10:00: + Fri Feb 26 12:44:59 2010 EST | +10:00:1 + Fri Feb 26 12:44:59 2010 EST | +10:00:01 + Fri Feb 26 12:44:50 2010 EST | +10:00:10 +(15 rows) + +-- these should fail +SELECT make_timestamptz(1973, 07, 15, 08, 15, 55.33, '2'); +ERROR: invalid input syntax for type numeric time zone: "2" +HINT: Numeric time zones must have "-" or "+" as first character. +SELECT make_timestamptz(2014, 12, 10, 10, 10, 10, '+16'); +ERROR: numeric time zone "+16" out of range +SELECT make_timestamptz(2014, 12, 10, 10, 10, 10, '-16'); +ERROR: numeric time zone "-16" out of range +-- should be true +SELECT make_timestamptz(1973, 07, 15, 08, 15, 55.33, '+2') = '1973-07-15 08:15:55.33+02'::timestamptz; + ?column? +---------- + t +(1 row) + +-- full timezone names +SELECT make_timestamptz(2014, 12, 10, 0, 0, 0, 'Europe/Prague') = timestamptz '2014-12-10 00:00:00 Europe/Prague'; + ?column? +---------- + t +(1 row) + +SELECT make_timestamptz(2014, 12, 10, 0, 0, 0, 'Europe/Prague') AT TIME ZONE 'UTC'; + timezone +-------------------------- + Tue Dec 09 23:00:00 2014 +(1 row) + +SELECT make_timestamptz(1846, 12, 10, 0, 0, 0, 'Asia/Manila') AT TIME ZONE 'UTC'; + timezone +-------------------------- + Wed Dec 09 15:56:00 1846 +(1 row) + +SELECT make_timestamptz(1881, 12, 10, 0, 0, 0, 'Europe/Paris') AT TIME ZONE 'UTC'; + timezone +-------------------------- + Fri Dec 09 23:50:39 1881 +(1 row) + +SELECT make_timestamptz(1910, 12, 24, 0, 0, 0, 'Nehwon/Lankhmar'); +ERROR: time zone "Nehwon/Lankhmar" not recognized +-- abbreviations +SELECT make_timestamptz(2008, 12, 10, 10, 10, 10, 'EST'); + make_timestamptz +------------------------------ + Wed Dec 10 10:10:10 2008 EST +(1 row) + +SELECT make_timestamptz(2008, 12, 10, 10, 10, 10, 'EDT'); + make_timestamptz +------------------------------ + Wed Dec 10 09:10:10 2008 EST +(1 row) + +SELECT make_timestamptz(2014, 12, 10, 10, 10, 10, 'PST8PDT'); + make_timestamptz +------------------------------ + Wed Dec 10 13:10:10 2014 EST +(1 row) + +RESET TimeZone; +-- generate_series for timestamptz +select * from generate_series('2020-01-01 00:00'::timestamptz, + '2020-01-02 03:00'::timestamptz, + '1 hour'::interval); + generate_series +------------------------------ + Wed Jan 01 00:00:00 2020 PST + Wed Jan 01 01:00:00 2020 PST + Wed Jan 01 02:00:00 2020 PST + Wed Jan 01 03:00:00 2020 PST + Wed Jan 01 04:00:00 2020 PST + Wed Jan 01 05:00:00 2020 PST + Wed Jan 01 06:00:00 2020 PST + Wed Jan 01 07:00:00 2020 PST + Wed Jan 01 08:00:00 2020 PST + Wed Jan 01 09:00:00 2020 PST + Wed Jan 01 10:00:00 2020 PST + Wed Jan 01 11:00:00 2020 PST + Wed Jan 01 12:00:00 2020 PST + Wed Jan 01 13:00:00 2020 PST + Wed Jan 01 14:00:00 2020 PST + Wed Jan 01 15:00:00 2020 PST + Wed Jan 01 16:00:00 2020 PST + Wed Jan 01 17:00:00 2020 PST + Wed Jan 01 18:00:00 2020 PST + Wed Jan 01 19:00:00 2020 PST + Wed Jan 01 20:00:00 2020 PST + Wed Jan 01 21:00:00 2020 PST + Wed Jan 01 22:00:00 2020 PST + Wed Jan 01 23:00:00 2020 PST + Thu Jan 02 00:00:00 2020 PST + Thu Jan 02 01:00:00 2020 PST + Thu Jan 02 02:00:00 2020 PST + Thu Jan 02 03:00:00 2020 PST +(28 rows) + +-- the LIMIT should allow this to terminate in a reasonable amount of time +-- (but that unfortunately doesn't work yet for SELECT * FROM ...) +select generate_series('2022-01-01 00:00'::timestamptz, + 'infinity'::timestamptz, + '1 month'::interval) limit 10; + generate_series +------------------------------ + Sat Jan 01 00:00:00 2022 PST + Tue Feb 01 00:00:00 2022 PST + Tue Mar 01 00:00:00 2022 PST + Fri Apr 01 00:00:00 2022 PDT + Sun May 01 00:00:00 2022 PDT + Wed Jun 01 00:00:00 2022 PDT + Fri Jul 01 00:00:00 2022 PDT + Mon Aug 01 00:00:00 2022 PDT + Thu Sep 01 00:00:00 2022 PDT + Sat Oct 01 00:00:00 2022 PDT +(10 rows) + +-- errors +select * from generate_series('2020-01-01 00:00'::timestamptz, + '2020-01-02 03:00'::timestamptz, + '0 hour'::interval); +ERROR: step size cannot equal zero +-- Interval crossing time shift for Europe/Warsaw timezone (with DST) +SET TimeZone to 'UTC'; +SELECT date_add('2022-10-30 00:00:00+01'::timestamptz, + '1 day'::interval); + date_add +------------------------------ + Sun Oct 30 23:00:00 2022 UTC +(1 row) + +SELECT date_add('2021-10-31 00:00:00+02'::timestamptz, + '1 day'::interval, + 'Europe/Warsaw'); + date_add +------------------------------ + Sun Oct 31 23:00:00 2021 UTC +(1 row) + +SELECT date_subtract('2022-10-30 00:00:00+01'::timestamptz, + '1 day'::interval); + date_subtract +------------------------------ + Fri Oct 28 23:00:00 2022 UTC +(1 row) + +SELECT date_subtract('2021-10-31 00:00:00+02'::timestamptz, + '1 day'::interval, + 'Europe/Warsaw'); + date_subtract +------------------------------ + Fri Oct 29 22:00:00 2021 UTC +(1 row) + +SELECT * FROM generate_series('2021-12-31 23:00:00+00'::timestamptz, + '2020-12-31 23:00:00+00'::timestamptz, + '-1 month'::interval, + 'Europe/Warsaw'); + generate_series +------------------------------ + Fri Dec 31 23:00:00 2021 UTC + Tue Nov 30 23:00:00 2021 UTC + Sun Oct 31 23:00:00 2021 UTC + Thu Sep 30 22:00:00 2021 UTC + Tue Aug 31 22:00:00 2021 UTC + Sat Jul 31 22:00:00 2021 UTC + Wed Jun 30 22:00:00 2021 UTC + Mon May 31 22:00:00 2021 UTC + Fri Apr 30 22:00:00 2021 UTC + Wed Mar 31 22:00:00 2021 UTC + Sun Feb 28 23:00:00 2021 UTC + Sun Jan 31 23:00:00 2021 UTC + Thu Dec 31 23:00:00 2020 UTC +(13 rows) + +RESET TimeZone; +-- +-- Test behavior with a dynamic (time-varying) timezone abbreviation. +-- These tests rely on the knowledge that MSK (Europe/Moscow standard time) +-- moved forwards in Mar 2011 and backwards again in Oct 2014. +-- +SET TimeZone to 'UTC'; +SELECT '2011-03-27 00:00:00 Europe/Moscow'::timestamptz; + timestamptz +------------------------------ + Sat Mar 26 21:00:00 2011 UTC +(1 row) + +SELECT '2011-03-27 01:00:00 Europe/Moscow'::timestamptz; + timestamptz +------------------------------ + Sat Mar 26 22:00:00 2011 UTC +(1 row) + +SELECT '2011-03-27 01:59:59 Europe/Moscow'::timestamptz; + timestamptz +------------------------------ + Sat Mar 26 22:59:59 2011 UTC +(1 row) + +SELECT '2011-03-27 02:00:00 Europe/Moscow'::timestamptz; + timestamptz +------------------------------ + Sat Mar 26 23:00:00 2011 UTC +(1 row) + +SELECT '2011-03-27 02:00:01 Europe/Moscow'::timestamptz; + timestamptz +------------------------------ + Sat Mar 26 23:00:01 2011 UTC +(1 row) + +SELECT '2011-03-27 02:59:59 Europe/Moscow'::timestamptz; + timestamptz +------------------------------ + Sat Mar 26 23:59:59 2011 UTC +(1 row) + +SELECT '2011-03-27 03:00:00 Europe/Moscow'::timestamptz; + timestamptz +------------------------------ + Sat Mar 26 23:00:00 2011 UTC +(1 row) + +SELECT '2011-03-27 03:00:01 Europe/Moscow'::timestamptz; + timestamptz +------------------------------ + Sat Mar 26 23:00:01 2011 UTC +(1 row) + +SELECT '2011-03-27 04:00:00 Europe/Moscow'::timestamptz; + timestamptz +------------------------------ + Sun Mar 27 00:00:00 2011 UTC +(1 row) + +SELECT '2011-03-27 00:00:00 MSK'::timestamptz; + timestamptz +------------------------------ + Sat Mar 26 21:00:00 2011 UTC +(1 row) + +SELECT '2011-03-27 01:00:00 MSK'::timestamptz; + timestamptz +------------------------------ + Sat Mar 26 22:00:00 2011 UTC +(1 row) + +SELECT '2011-03-27 01:59:59 MSK'::timestamptz; + timestamptz +------------------------------ + Sat Mar 26 22:59:59 2011 UTC +(1 row) + +SELECT '2011-03-27 02:00:00 MSK'::timestamptz; + timestamptz +------------------------------ + Sat Mar 26 22:00:00 2011 UTC +(1 row) + +SELECT '2011-03-27 02:00:01 MSK'::timestamptz; + timestamptz +------------------------------ + Sat Mar 26 22:00:01 2011 UTC +(1 row) + +SELECT '2011-03-27 02:59:59 MSK'::timestamptz; + timestamptz +------------------------------ + Sat Mar 26 22:59:59 2011 UTC +(1 row) + +SELECT '2011-03-27 03:00:00 MSK'::timestamptz; + timestamptz +------------------------------ + Sat Mar 26 23:00:00 2011 UTC +(1 row) + +SELECT '2011-03-27 03:00:01 MSK'::timestamptz; + timestamptz +------------------------------ + Sat Mar 26 23:00:01 2011 UTC +(1 row) + +SELECT '2011-03-27 04:00:00 MSK'::timestamptz; + timestamptz +------------------------------ + Sun Mar 27 00:00:00 2011 UTC +(1 row) + +SELECT '2014-10-26 00:00:00 Europe/Moscow'::timestamptz; + timestamptz +------------------------------ + Sat Oct 25 20:00:00 2014 UTC +(1 row) + +SELECT '2014-10-26 00:59:59 Europe/Moscow'::timestamptz; + timestamptz +------------------------------ + Sat Oct 25 20:59:59 2014 UTC +(1 row) + +SELECT '2014-10-26 01:00:00 Europe/Moscow'::timestamptz; + timestamptz +------------------------------ + Sat Oct 25 22:00:00 2014 UTC +(1 row) + +SELECT '2014-10-26 01:00:01 Europe/Moscow'::timestamptz; + timestamptz +------------------------------ + Sat Oct 25 22:00:01 2014 UTC +(1 row) + +SELECT '2014-10-26 02:00:00 Europe/Moscow'::timestamptz; + timestamptz +------------------------------ + Sat Oct 25 23:00:00 2014 UTC +(1 row) + +SELECT '2014-10-26 00:00:00 MSK'::timestamptz; + timestamptz +------------------------------ + Sat Oct 25 20:00:00 2014 UTC +(1 row) + +SELECT '2014-10-26 00:59:59 MSK'::timestamptz; + timestamptz +------------------------------ + Sat Oct 25 20:59:59 2014 UTC +(1 row) + +SELECT '2014-10-26 01:00:00 MSK'::timestamptz; + timestamptz +------------------------------ + Sat Oct 25 22:00:00 2014 UTC +(1 row) + +SELECT '2014-10-26 01:00:01 MSK'::timestamptz; + timestamptz +------------------------------ + Sat Oct 25 22:00:01 2014 UTC +(1 row) + +SELECT '2014-10-26 02:00:00 MSK'::timestamptz; + timestamptz +------------------------------ + Sat Oct 25 23:00:00 2014 UTC +(1 row) + +SELECT '2011-03-27 00:00:00'::timestamp AT TIME ZONE 'Europe/Moscow'; + timezone +------------------------------ + Sat Mar 26 21:00:00 2011 UTC +(1 row) + +SELECT '2011-03-27 01:00:00'::timestamp AT TIME ZONE 'Europe/Moscow'; + timezone +------------------------------ + Sat Mar 26 22:00:00 2011 UTC +(1 row) + +SELECT '2011-03-27 01:59:59'::timestamp AT TIME ZONE 'Europe/Moscow'; + timezone +------------------------------ + Sat Mar 26 22:59:59 2011 UTC +(1 row) + +SELECT '2011-03-27 02:00:00'::timestamp AT TIME ZONE 'Europe/Moscow'; + timezone +------------------------------ + Sat Mar 26 23:00:00 2011 UTC +(1 row) + +SELECT '2011-03-27 02:00:01'::timestamp AT TIME ZONE 'Europe/Moscow'; + timezone +------------------------------ + Sat Mar 26 23:00:01 2011 UTC +(1 row) + +SELECT '2011-03-27 02:59:59'::timestamp AT TIME ZONE 'Europe/Moscow'; + timezone +------------------------------ + Sat Mar 26 23:59:59 2011 UTC +(1 row) + +SELECT '2011-03-27 03:00:00'::timestamp AT TIME ZONE 'Europe/Moscow'; + timezone +------------------------------ + Sat Mar 26 23:00:00 2011 UTC +(1 row) + +SELECT '2011-03-27 03:00:01'::timestamp AT TIME ZONE 'Europe/Moscow'; + timezone +------------------------------ + Sat Mar 26 23:00:01 2011 UTC +(1 row) + +SELECT '2011-03-27 04:00:00'::timestamp AT TIME ZONE 'Europe/Moscow'; + timezone +------------------------------ + Sun Mar 27 00:00:00 2011 UTC +(1 row) + +SELECT '2011-03-27 00:00:00'::timestamp AT TIME ZONE 'MSK'; + timezone +------------------------------ + Sat Mar 26 21:00:00 2011 UTC +(1 row) + +SELECT '2011-03-27 01:00:00'::timestamp AT TIME ZONE 'MSK'; + timezone +------------------------------ + Sat Mar 26 22:00:00 2011 UTC +(1 row) + +SELECT '2011-03-27 01:59:59'::timestamp AT TIME ZONE 'MSK'; + timezone +------------------------------ + Sat Mar 26 22:59:59 2011 UTC +(1 row) + +SELECT '2011-03-27 02:00:00'::timestamp AT TIME ZONE 'MSK'; + timezone +------------------------------ + Sat Mar 26 22:00:00 2011 UTC +(1 row) + +SELECT '2011-03-27 02:00:01'::timestamp AT TIME ZONE 'MSK'; + timezone +------------------------------ + Sat Mar 26 22:00:01 2011 UTC +(1 row) + +SELECT '2011-03-27 02:59:59'::timestamp AT TIME ZONE 'MSK'; + timezone +------------------------------ + Sat Mar 26 22:59:59 2011 UTC +(1 row) + +SELECT '2011-03-27 03:00:00'::timestamp AT TIME ZONE 'MSK'; + timezone +------------------------------ + Sat Mar 26 23:00:00 2011 UTC +(1 row) + +SELECT '2011-03-27 03:00:01'::timestamp AT TIME ZONE 'MSK'; + timezone +------------------------------ + Sat Mar 26 23:00:01 2011 UTC +(1 row) + +SELECT '2011-03-27 04:00:00'::timestamp AT TIME ZONE 'MSK'; + timezone +------------------------------ + Sun Mar 27 00:00:00 2011 UTC +(1 row) + +SELECT '2014-10-26 00:00:00'::timestamp AT TIME ZONE 'Europe/Moscow'; + timezone +------------------------------ + Sat Oct 25 20:00:00 2014 UTC +(1 row) + +SELECT '2014-10-26 00:59:59'::timestamp AT TIME ZONE 'Europe/Moscow'; + timezone +------------------------------ + Sat Oct 25 20:59:59 2014 UTC +(1 row) + +SELECT '2014-10-26 01:00:00'::timestamp AT TIME ZONE 'Europe/Moscow'; + timezone +------------------------------ + Sat Oct 25 22:00:00 2014 UTC +(1 row) + +SELECT '2014-10-26 01:00:01'::timestamp AT TIME ZONE 'Europe/Moscow'; + timezone +------------------------------ + Sat Oct 25 22:00:01 2014 UTC +(1 row) + +SELECT '2014-10-26 02:00:00'::timestamp AT TIME ZONE 'Europe/Moscow'; + timezone +------------------------------ + Sat Oct 25 23:00:00 2014 UTC +(1 row) + +SELECT '2014-10-26 00:00:00'::timestamp AT TIME ZONE 'MSK'; + timezone +------------------------------ + Sat Oct 25 20:00:00 2014 UTC +(1 row) + +SELECT '2014-10-26 00:59:59'::timestamp AT TIME ZONE 'MSK'; + timezone +------------------------------ + Sat Oct 25 20:59:59 2014 UTC +(1 row) + +SELECT '2014-10-26 01:00:00'::timestamp AT TIME ZONE 'MSK'; + timezone +------------------------------ + Sat Oct 25 22:00:00 2014 UTC +(1 row) + +SELECT '2014-10-26 01:00:01'::timestamp AT TIME ZONE 'MSK'; + timezone +------------------------------ + Sat Oct 25 22:00:01 2014 UTC +(1 row) + +SELECT '2014-10-26 02:00:00'::timestamp AT TIME ZONE 'MSK'; + timezone +------------------------------ + Sat Oct 25 23:00:00 2014 UTC +(1 row) + +SELECT make_timestamptz(2014, 10, 26, 0, 0, 0, 'MSK'); + make_timestamptz +------------------------------ + Sat Oct 25 20:00:00 2014 UTC +(1 row) + +SELECT make_timestamptz(2014, 10, 26, 1, 0, 0, 'MSK'); + make_timestamptz +------------------------------ + Sat Oct 25 22:00:00 2014 UTC +(1 row) + +SELECT to_timestamp( 0); -- 1970-01-01 00:00:00+00 + to_timestamp +------------------------------ + Thu Jan 01 00:00:00 1970 UTC +(1 row) + +SELECT to_timestamp( 946684800); -- 2000-01-01 00:00:00+00 + to_timestamp +------------------------------ + Sat Jan 01 00:00:00 2000 UTC +(1 row) + +SELECT to_timestamp(1262349296.7890123); -- 2010-01-01 12:34:56.789012+00 + to_timestamp +------------------------------------- + Fri Jan 01 12:34:56.789012 2010 UTC +(1 row) + +-- edge cases +SELECT to_timestamp(-210866803200); -- 4714-11-24 00:00:00+00 BC + to_timestamp +--------------------------------- + Mon Nov 24 00:00:00 4714 UTC BC +(1 row) + +-- upper limit varies between integer and float timestamps, so hard to test +-- nonfinite values +SELECT to_timestamp(' Infinity'::float); + to_timestamp +-------------- + infinity +(1 row) + +SELECT to_timestamp('-Infinity'::float); + to_timestamp +-------------- + -infinity +(1 row) + +SELECT to_timestamp('NaN'::float); +ERROR: timestamp cannot be NaN +SET TimeZone to 'Europe/Moscow'; +SELECT '2011-03-26 21:00:00 UTC'::timestamptz; + timestamptz +------------------------------ + Sun Mar 27 00:00:00 2011 MSK +(1 row) + +SELECT '2011-03-26 22:00:00 UTC'::timestamptz; + timestamptz +------------------------------ + Sun Mar 27 01:00:00 2011 MSK +(1 row) + +SELECT '2011-03-26 22:59:59 UTC'::timestamptz; + timestamptz +------------------------------ + Sun Mar 27 01:59:59 2011 MSK +(1 row) + +SELECT '2011-03-26 23:00:00 UTC'::timestamptz; + timestamptz +------------------------------ + Sun Mar 27 03:00:00 2011 MSK +(1 row) + +SELECT '2011-03-26 23:00:01 UTC'::timestamptz; + timestamptz +------------------------------ + Sun Mar 27 03:00:01 2011 MSK +(1 row) + +SELECT '2011-03-26 23:59:59 UTC'::timestamptz; + timestamptz +------------------------------ + Sun Mar 27 03:59:59 2011 MSK +(1 row) + +SELECT '2011-03-27 00:00:00 UTC'::timestamptz; + timestamptz +------------------------------ + Sun Mar 27 04:00:00 2011 MSK +(1 row) + +SELECT '2014-10-25 21:00:00 UTC'::timestamptz; + timestamptz +------------------------------ + Sun Oct 26 01:00:00 2014 MSK +(1 row) + +SELECT '2014-10-25 21:59:59 UTC'::timestamptz; + timestamptz +------------------------------ + Sun Oct 26 01:59:59 2014 MSK +(1 row) + +SELECT '2014-10-25 22:00:00 UTC'::timestamptz; + timestamptz +------------------------------ + Sun Oct 26 01:00:00 2014 MSK +(1 row) + +SELECT '2014-10-25 22:00:01 UTC'::timestamptz; + timestamptz +------------------------------ + Sun Oct 26 01:00:01 2014 MSK +(1 row) + +SELECT '2014-10-25 23:00:00 UTC'::timestamptz; + timestamptz +------------------------------ + Sun Oct 26 02:00:00 2014 MSK +(1 row) + +RESET TimeZone; +SELECT '2011-03-26 21:00:00 UTC'::timestamptz AT TIME ZONE 'Europe/Moscow'; + timezone +-------------------------- + Sun Mar 27 00:00:00 2011 +(1 row) + +SELECT '2011-03-26 22:00:00 UTC'::timestamptz AT TIME ZONE 'Europe/Moscow'; + timezone +-------------------------- + Sun Mar 27 01:00:00 2011 +(1 row) + +SELECT '2011-03-26 22:59:59 UTC'::timestamptz AT TIME ZONE 'Europe/Moscow'; + timezone +-------------------------- + Sun Mar 27 01:59:59 2011 +(1 row) + +SELECT '2011-03-26 23:00:00 UTC'::timestamptz AT TIME ZONE 'Europe/Moscow'; + timezone +-------------------------- + Sun Mar 27 03:00:00 2011 +(1 row) + +SELECT '2011-03-26 23:00:01 UTC'::timestamptz AT TIME ZONE 'Europe/Moscow'; + timezone +-------------------------- + Sun Mar 27 03:00:01 2011 +(1 row) + +SELECT '2011-03-26 23:59:59 UTC'::timestamptz AT TIME ZONE 'Europe/Moscow'; + timezone +-------------------------- + Sun Mar 27 03:59:59 2011 +(1 row) + +SELECT '2011-03-27 00:00:00 UTC'::timestamptz AT TIME ZONE 'Europe/Moscow'; + timezone +-------------------------- + Sun Mar 27 04:00:00 2011 +(1 row) + +SELECT '2014-10-25 21:00:00 UTC'::timestamptz AT TIME ZONE 'Europe/Moscow'; + timezone +-------------------------- + Sun Oct 26 01:00:00 2014 +(1 row) + +SELECT '2014-10-25 21:59:59 UTC'::timestamptz AT TIME ZONE 'Europe/Moscow'; + timezone +-------------------------- + Sun Oct 26 01:59:59 2014 +(1 row) + +SELECT '2014-10-25 22:00:00 UTC'::timestamptz AT TIME ZONE 'Europe/Moscow'; + timezone +-------------------------- + Sun Oct 26 01:00:00 2014 +(1 row) + +SELECT '2014-10-25 22:00:01 UTC'::timestamptz AT TIME ZONE 'Europe/Moscow'; + timezone +-------------------------- + Sun Oct 26 01:00:01 2014 +(1 row) + +SELECT '2014-10-25 23:00:00 UTC'::timestamptz AT TIME ZONE 'Europe/Moscow'; + timezone +-------------------------- + Sun Oct 26 02:00:00 2014 +(1 row) + +SELECT '2011-03-26 21:00:00 UTC'::timestamptz AT TIME ZONE 'MSK'; + timezone +-------------------------- + Sun Mar 27 00:00:00 2011 +(1 row) + +SELECT '2011-03-26 22:00:00 UTC'::timestamptz AT TIME ZONE 'MSK'; + timezone +-------------------------- + Sun Mar 27 01:00:00 2011 +(1 row) + +SELECT '2011-03-26 22:59:59 UTC'::timestamptz AT TIME ZONE 'MSK'; + timezone +-------------------------- + Sun Mar 27 01:59:59 2011 +(1 row) + +SELECT '2011-03-26 23:00:00 UTC'::timestamptz AT TIME ZONE 'MSK'; + timezone +-------------------------- + Sun Mar 27 03:00:00 2011 +(1 row) + +SELECT '2011-03-26 23:00:01 UTC'::timestamptz AT TIME ZONE 'MSK'; + timezone +-------------------------- + Sun Mar 27 03:00:01 2011 +(1 row) + +SELECT '2011-03-26 23:59:59 UTC'::timestamptz AT TIME ZONE 'MSK'; + timezone +-------------------------- + Sun Mar 27 03:59:59 2011 +(1 row) + +SELECT '2011-03-27 00:00:00 UTC'::timestamptz AT TIME ZONE 'MSK'; + timezone +-------------------------- + Sun Mar 27 04:00:00 2011 +(1 row) + +SELECT '2014-10-25 21:00:00 UTC'::timestamptz AT TIME ZONE 'MSK'; + timezone +-------------------------- + Sun Oct 26 01:00:00 2014 +(1 row) + +SELECT '2014-10-25 21:59:59 UTC'::timestamptz AT TIME ZONE 'MSK'; + timezone +-------------------------- + Sun Oct 26 01:59:59 2014 +(1 row) + +SELECT '2014-10-25 22:00:00 UTC'::timestamptz AT TIME ZONE 'MSK'; + timezone +-------------------------- + Sun Oct 26 01:00:00 2014 +(1 row) + +SELECT '2014-10-25 22:00:01 UTC'::timestamptz AT TIME ZONE 'MSK'; + timezone +-------------------------- + Sun Oct 26 01:00:01 2014 +(1 row) + +SELECT '2014-10-25 23:00:00 UTC'::timestamptz AT TIME ZONE 'MSK'; + timezone +-------------------------- + Sun Oct 26 02:00:00 2014 +(1 row) + +-- +-- Test that AT TIME ZONE isn't misoptimized when using an index (bug #14504) +-- +create temp table tmptz (f1 timestamptz primary key); +insert into tmptz values ('2017-01-18 00:00+00'); +explain (costs off) +select * from tmptz where f1 at time zone 'utc' = '2017-01-18 00:00'; + QUERY PLAN +----------------------------------------------------------------------------------------------------- + Seq Scan on tmptz + Filter: ((f1 AT TIME ZONE 'utc'::text) = 'Wed Jan 18 00:00:00 2017'::timestamp without time zone) +(2 rows) + +select * from tmptz where f1 at time zone 'utc' = '2017-01-18 00:00'; + f1 +------------------------------ + Tue Jan 17 16:00:00 2017 PST +(1 row) + diff --git a/src/test/regress/expected/timetz.out b/src/test/regress/expected/timetz.out new file mode 100644 index 0000000..658a839 --- /dev/null +++ b/src/test/regress/expected/timetz.out @@ -0,0 +1,324 @@ +-- +-- TIMETZ +-- +CREATE TABLE TIMETZ_TBL (f1 time(2) with time zone); +INSERT INTO TIMETZ_TBL VALUES ('00:01 PDT'); +INSERT INTO TIMETZ_TBL VALUES ('01:00 PDT'); +INSERT INTO TIMETZ_TBL VALUES ('02:03 PDT'); +INSERT INTO TIMETZ_TBL VALUES ('07:07 PST'); +INSERT INTO TIMETZ_TBL VALUES ('08:08 EDT'); +INSERT INTO TIMETZ_TBL VALUES ('11:59 PDT'); +INSERT INTO TIMETZ_TBL VALUES ('12:00 PDT'); +INSERT INTO TIMETZ_TBL VALUES ('12:01 PDT'); +INSERT INTO TIMETZ_TBL VALUES ('23:59 PDT'); +INSERT INTO TIMETZ_TBL VALUES ('11:59:59.99 PM PDT'); +INSERT INTO TIMETZ_TBL VALUES ('2003-03-07 15:36:39 America/New_York'); +INSERT INTO TIMETZ_TBL VALUES ('2003-07-07 15:36:39 America/New_York'); +-- this should fail (the timezone offset is not known) +INSERT INTO TIMETZ_TBL VALUES ('15:36:39 America/New_York'); +ERROR: invalid input syntax for type time with time zone: "15:36:39 America/New_York" +LINE 1: INSERT INTO TIMETZ_TBL VALUES ('15:36:39 America/New_York'); + ^ +-- this should fail (timezone not specified without a date) +INSERT INTO TIMETZ_TBL VALUES ('15:36:39 m2'); +ERROR: invalid input syntax for type time with time zone: "15:36:39 m2" +LINE 1: INSERT INTO TIMETZ_TBL VALUES ('15:36:39 m2'); + ^ +-- this should fail (dynamic timezone abbreviation without a date) +INSERT INTO TIMETZ_TBL VALUES ('15:36:39 MSK m2'); +ERROR: invalid input syntax for type time with time zone: "15:36:39 MSK m2" +LINE 1: INSERT INTO TIMETZ_TBL VALUES ('15:36:39 MSK m2'); + ^ +SELECT f1 AS "Time TZ" FROM TIMETZ_TBL; + Time TZ +---------------- + 00:01:00-07 + 01:00:00-07 + 02:03:00-07 + 07:07:00-08 + 08:08:00-04 + 11:59:00-07 + 12:00:00-07 + 12:01:00-07 + 23:59:00-07 + 23:59:59.99-07 + 15:36:39-05 + 15:36:39-04 +(12 rows) + +SELECT f1 AS "Three" FROM TIMETZ_TBL WHERE f1 < '05:06:07-07'; + Three +------------- + 00:01:00-07 + 01:00:00-07 + 02:03:00-07 +(3 rows) + +SELECT f1 AS "Seven" FROM TIMETZ_TBL WHERE f1 > '05:06:07-07'; + Seven +---------------- + 07:07:00-08 + 08:08:00-04 + 11:59:00-07 + 12:00:00-07 + 12:01:00-07 + 23:59:00-07 + 23:59:59.99-07 + 15:36:39-05 + 15:36:39-04 +(9 rows) + +SELECT f1 AS "None" FROM TIMETZ_TBL WHERE f1 < '00:00-07'; + None +------ +(0 rows) + +SELECT f1 AS "Ten" FROM TIMETZ_TBL WHERE f1 >= '00:00-07'; + Ten +---------------- + 00:01:00-07 + 01:00:00-07 + 02:03:00-07 + 07:07:00-08 + 08:08:00-04 + 11:59:00-07 + 12:00:00-07 + 12:01:00-07 + 23:59:00-07 + 23:59:59.99-07 + 15:36:39-05 + 15:36:39-04 +(12 rows) + +-- Check edge cases +SELECT '23:59:59.999999 PDT'::timetz; + timetz +-------------------- + 23:59:59.999999-07 +(1 row) + +SELECT '23:59:59.9999999 PDT'::timetz; -- rounds up + timetz +------------- + 24:00:00-07 +(1 row) + +SELECT '23:59:60 PDT'::timetz; -- rounds up + timetz +------------- + 24:00:00-07 +(1 row) + +SELECT '24:00:00 PDT'::timetz; -- allowed + timetz +------------- + 24:00:00-07 +(1 row) + +SELECT '24:00:00.01 PDT'::timetz; -- not allowed +ERROR: date/time field value out of range: "24:00:00.01 PDT" +LINE 1: SELECT '24:00:00.01 PDT'::timetz; + ^ +SELECT '23:59:60.01 PDT'::timetz; -- not allowed +ERROR: date/time field value out of range: "23:59:60.01 PDT" +LINE 1: SELECT '23:59:60.01 PDT'::timetz; + ^ +SELECT '24:01:00 PDT'::timetz; -- not allowed +ERROR: date/time field value out of range: "24:01:00 PDT" +LINE 1: SELECT '24:01:00 PDT'::timetz; + ^ +SELECT '25:00:00 PDT'::timetz; -- not allowed +ERROR: date/time field value out of range: "25:00:00 PDT" +LINE 1: SELECT '25:00:00 PDT'::timetz; + ^ +-- Test non-error-throwing API +SELECT pg_input_is_valid('12:00:00 PDT', 'timetz'); + pg_input_is_valid +------------------- + t +(1 row) + +SELECT pg_input_is_valid('25:00:00 PDT', 'timetz'); + pg_input_is_valid +------------------- + f +(1 row) + +SELECT pg_input_is_valid('15:36:39 America/New_York', 'timetz'); + pg_input_is_valid +------------------- + f +(1 row) + +SELECT * FROM pg_input_error_info('25:00:00 PDT', 'timetz'); + message | detail | hint | sql_error_code +----------------------------------------------------+--------+------+---------------- + date/time field value out of range: "25:00:00 PDT" | | | 22008 +(1 row) + +SELECT * FROM pg_input_error_info('15:36:39 America/New_York', 'timetz'); + message | detail | hint | sql_error_code +--------------------------------------------------------------------------------+--------+------+---------------- + invalid input syntax for type time with time zone: "15:36:39 America/New_York" | | | 22007 +(1 row) + +-- +-- TIME simple math +-- +-- We now make a distinction between time and intervals, +-- and adding two times together makes no sense at all. +-- Leave in one query to show that it is rejected, +-- and do the rest of the testing in horology.sql +-- where we do mixed-type arithmetic. - thomas 2000-12-02 +SELECT f1 + time with time zone '00:01' AS "Illegal" FROM TIMETZ_TBL; +ERROR: operator does not exist: time with time zone + time with time zone +LINE 1: SELECT f1 + time with time zone '00:01' AS "Illegal" FROM TI... + ^ +HINT: No operator matches the given name and argument types. You might need to add explicit type casts. +-- +-- test EXTRACT +-- +SELECT EXTRACT(MICROSECOND FROM TIME WITH TIME ZONE '2020-05-26 13:30:25.575401-04'); + extract +---------- + 25575401 +(1 row) + +SELECT EXTRACT(MILLISECOND FROM TIME WITH TIME ZONE '2020-05-26 13:30:25.575401-04'); + extract +----------- + 25575.401 +(1 row) + +SELECT EXTRACT(SECOND FROM TIME WITH TIME ZONE '2020-05-26 13:30:25.575401-04'); + extract +----------- + 25.575401 +(1 row) + +SELECT EXTRACT(MINUTE FROM TIME WITH TIME ZONE '2020-05-26 13:30:25.575401-04'); + extract +--------- + 30 +(1 row) + +SELECT EXTRACT(HOUR FROM TIME WITH TIME ZONE '2020-05-26 13:30:25.575401-04'); + extract +--------- + 13 +(1 row) + +SELECT EXTRACT(DAY FROM TIME WITH TIME ZONE '2020-05-26 13:30:25.575401-04'); -- error +ERROR: unit "day" not supported for type time with time zone +SELECT EXTRACT(FORTNIGHT FROM TIME WITH TIME ZONE '2020-05-26 13:30:25.575401-04'); -- error +ERROR: unit "fortnight" not recognized for type time with time zone +SELECT EXTRACT(TIMEZONE FROM TIME WITH TIME ZONE '2020-05-26 13:30:25.575401-04:30'); + extract +--------- + -16200 +(1 row) + +SELECT EXTRACT(TIMEZONE_HOUR FROM TIME WITH TIME ZONE '2020-05-26 13:30:25.575401-04:30'); + extract +--------- + -4 +(1 row) + +SELECT EXTRACT(TIMEZONE_MINUTE FROM TIME WITH TIME ZONE '2020-05-26 13:30:25.575401-04:30'); + extract +--------- + -30 +(1 row) + +SELECT EXTRACT(EPOCH FROM TIME WITH TIME ZONE '2020-05-26 13:30:25.575401-04'); + extract +-------------- + 63025.575401 +(1 row) + +-- date_part implementation is mostly the same as extract, so only +-- test a few cases for additional coverage. +SELECT date_part('microsecond', TIME WITH TIME ZONE '2020-05-26 13:30:25.575401-04'); + date_part +----------- + 25575401 +(1 row) + +SELECT date_part('millisecond', TIME WITH TIME ZONE '2020-05-26 13:30:25.575401-04'); + date_part +----------- + 25575.401 +(1 row) + +SELECT date_part('second', TIME WITH TIME ZONE '2020-05-26 13:30:25.575401-04'); + date_part +----------- + 25.575401 +(1 row) + +SELECT date_part('epoch', TIME WITH TIME ZONE '2020-05-26 13:30:25.575401-04'); + date_part +-------------- + 63025.575401 +(1 row) + +-- +-- Test timetz_zone, timetz_izone +-- +BEGIN; +SET LOCAL TimeZone TO 'UTC'; +CREATE VIEW timetz_local_view AS + SELECT f1 AS dat, + f1 AT TIME ZONE current_setting('TimeZone') AS dat_at_tz, + f1 AT TIME ZONE INTERVAL '00:00' AS dat_at_int + FROM TIMETZ_TBL + ORDER BY f1; +SELECT pg_get_viewdef('timetz_local_view', true); + pg_get_viewdef +----------------------------------------------------------------------- + SELECT f1 AS dat, + + (f1 AT TIME ZONE current_setting('TimeZone'::text)) AS dat_at_tz,+ + (f1 AT TIME ZONE '@ 0'::interval) AS dat_at_int + + FROM timetz_tbl + + ORDER BY f1; +(1 row) + +TABLE timetz_local_view; + dat | dat_at_tz | dat_at_int +----------------+----------------+---------------- + 00:01:00-07 | 07:01:00+00 | 07:01:00+00 + 01:00:00-07 | 08:00:00+00 | 08:00:00+00 + 02:03:00-07 | 09:03:00+00 | 09:03:00+00 + 08:08:00-04 | 12:08:00+00 | 12:08:00+00 + 07:07:00-08 | 15:07:00+00 | 15:07:00+00 + 11:59:00-07 | 18:59:00+00 | 18:59:00+00 + 12:00:00-07 | 19:00:00+00 | 19:00:00+00 + 12:01:00-07 | 19:01:00+00 | 19:01:00+00 + 15:36:39-04 | 19:36:39+00 | 19:36:39+00 + 15:36:39-05 | 20:36:39+00 | 20:36:39+00 + 23:59:00-07 | 06:59:00+00 | 06:59:00+00 + 23:59:59.99-07 | 06:59:59.99+00 | 06:59:59.99+00 +(12 rows) + +SELECT f1 AS dat, + f1 AT TIME ZONE 'UTC+10' AS dat_at_tz, + f1 AT TIME ZONE INTERVAL '-10:00' AS dat_at_int + FROM TIMETZ_TBL + ORDER BY f1; + dat | dat_at_tz | dat_at_int +----------------+----------------+---------------- + 00:01:00-07 | 21:01:00-10 | 21:01:00-10 + 01:00:00-07 | 22:00:00-10 | 22:00:00-10 + 02:03:00-07 | 23:03:00-10 | 23:03:00-10 + 08:08:00-04 | 02:08:00-10 | 02:08:00-10 + 07:07:00-08 | 05:07:00-10 | 05:07:00-10 + 11:59:00-07 | 08:59:00-10 | 08:59:00-10 + 12:00:00-07 | 09:00:00-10 | 09:00:00-10 + 12:01:00-07 | 09:01:00-10 | 09:01:00-10 + 15:36:39-04 | 09:36:39-10 | 09:36:39-10 + 15:36:39-05 | 10:36:39-10 | 10:36:39-10 + 23:59:00-07 | 20:59:00-10 | 20:59:00-10 + 23:59:59.99-07 | 20:59:59.99-10 | 20:59:59.99-10 +(12 rows) + +ROLLBACK; diff --git a/src/test/regress/expected/transactions.out b/src/test/regress/expected/transactions.out new file mode 100644 index 0000000..535f73c --- /dev/null +++ b/src/test/regress/expected/transactions.out @@ -0,0 +1,1198 @@ +-- +-- TRANSACTIONS +-- +BEGIN; +CREATE TABLE xacttest (a smallint, b real); +INSERT INTO xacttest VALUES + (56, 7.8), + (100, 99.097), + (0, 0.09561), + (42, 324.78); +INSERT INTO xacttest (a, b) VALUES (777, 777.777); +END; +-- should retrieve one value-- +SELECT a FROM xacttest WHERE a > 100; + a +----- + 777 +(1 row) + +BEGIN; +CREATE TABLE disappear (a int4); +DELETE FROM xacttest; +-- should be empty +SELECT * FROM xacttest; + a | b +---+--- +(0 rows) + +ABORT; +-- should not exist +SELECT oid FROM pg_class WHERE relname = 'disappear'; + oid +----- +(0 rows) + +-- should have members again +SELECT * FROM xacttest; + a | b +-----+--------- + 56 | 7.8 + 100 | 99.097 + 0 | 0.09561 + 42 | 324.78 + 777 | 777.777 +(5 rows) + +-- Test that transaction characteristics cannot be reset. +BEGIN TRANSACTION ISOLATION LEVEL SERIALIZABLE; +SELECT COUNT(*) FROM xacttest; + count +------- + 5 +(1 row) + +RESET transaction_isolation; -- error +ERROR: parameter "transaction_isolation" cannot be reset +END; +BEGIN TRANSACTION READ ONLY; +SELECT COUNT(*) FROM xacttest; + count +------- + 5 +(1 row) + +RESET transaction_read_only; -- error +ERROR: parameter "transaction_read_only" cannot be reset +END; +BEGIN TRANSACTION DEFERRABLE; +SELECT COUNT(*) FROM xacttest; + count +------- + 5 +(1 row) + +RESET transaction_deferrable; -- error +ERROR: parameter "transaction_deferrable" cannot be reset +END; +CREATE FUNCTION errfunc() RETURNS int LANGUAGE SQL AS 'SELECT 1' +SET transaction_read_only = on; -- error +ERROR: parameter "transaction_read_only" cannot be set locally in functions +-- Read-only tests +CREATE TABLE writetest (a int); +CREATE TEMPORARY TABLE temptest (a int); +BEGIN; +SET TRANSACTION ISOLATION LEVEL SERIALIZABLE, READ ONLY, DEFERRABLE; -- ok +SELECT * FROM writetest; -- ok + a +--- +(0 rows) + +SET TRANSACTION READ WRITE; --fail +ERROR: transaction read-write mode must be set before any query +COMMIT; +BEGIN; +SET TRANSACTION READ ONLY; -- ok +SET TRANSACTION READ WRITE; -- ok +SET TRANSACTION READ ONLY; -- ok +SELECT * FROM writetest; -- ok + a +--- +(0 rows) + +SAVEPOINT x; +SET TRANSACTION READ ONLY; -- ok +SELECT * FROM writetest; -- ok + a +--- +(0 rows) + +SET TRANSACTION READ ONLY; -- ok +SET TRANSACTION READ WRITE; --fail +ERROR: cannot set transaction read-write mode inside a read-only transaction +COMMIT; +BEGIN; +SET TRANSACTION READ WRITE; -- ok +SAVEPOINT x; +SET TRANSACTION READ WRITE; -- ok +SET TRANSACTION READ ONLY; -- ok +SELECT * FROM writetest; -- ok + a +--- +(0 rows) + +SET TRANSACTION READ ONLY; -- ok +SET TRANSACTION READ WRITE; --fail +ERROR: cannot set transaction read-write mode inside a read-only transaction +COMMIT; +BEGIN; +SET TRANSACTION READ WRITE; -- ok +SAVEPOINT x; +SET TRANSACTION READ ONLY; -- ok +SELECT * FROM writetest; -- ok + a +--- +(0 rows) + +ROLLBACK TO SAVEPOINT x; +SHOW transaction_read_only; -- off + transaction_read_only +----------------------- + off +(1 row) + +SAVEPOINT y; +SET TRANSACTION READ ONLY; -- ok +SELECT * FROM writetest; -- ok + a +--- +(0 rows) + +RELEASE SAVEPOINT y; +SHOW transaction_read_only; -- off + transaction_read_only +----------------------- + off +(1 row) + +COMMIT; +SET SESSION CHARACTERISTICS AS TRANSACTION READ ONLY; +DROP TABLE writetest; -- fail +ERROR: cannot execute DROP TABLE in a read-only transaction +INSERT INTO writetest VALUES (1); -- fail +ERROR: cannot execute INSERT in a read-only transaction +SELECT * FROM writetest; -- ok + a +--- +(0 rows) + +DELETE FROM temptest; -- ok +UPDATE temptest SET a = 0 FROM writetest WHERE temptest.a = 1 AND writetest.a = temptest.a; -- ok +PREPARE test AS UPDATE writetest SET a = 0; -- ok +EXECUTE test; -- fail +ERROR: cannot execute UPDATE in a read-only transaction +SELECT * FROM writetest, temptest; -- ok + a | a +---+--- +(0 rows) + +CREATE TABLE test AS SELECT * FROM writetest; -- fail +ERROR: cannot execute CREATE TABLE AS in a read-only transaction +START TRANSACTION READ WRITE; +DROP TABLE writetest; -- ok +COMMIT; +-- Subtransactions, basic tests +-- create & drop tables +SET SESSION CHARACTERISTICS AS TRANSACTION READ WRITE; +CREATE TABLE trans_foobar (a int); +BEGIN; + CREATE TABLE trans_foo (a int); + SAVEPOINT one; + DROP TABLE trans_foo; + CREATE TABLE trans_bar (a int); + ROLLBACK TO SAVEPOINT one; + RELEASE SAVEPOINT one; + SAVEPOINT two; + CREATE TABLE trans_baz (a int); + RELEASE SAVEPOINT two; + drop TABLE trans_foobar; + CREATE TABLE trans_barbaz (a int); +COMMIT; +-- should exist: trans_barbaz, trans_baz, trans_foo +SELECT * FROM trans_foo; -- should be empty + a +--- +(0 rows) + +SELECT * FROM trans_bar; -- shouldn't exist +ERROR: relation "trans_bar" does not exist +LINE 1: SELECT * FROM trans_bar; + ^ +SELECT * FROM trans_barbaz; -- should be empty + a +--- +(0 rows) + +SELECT * FROM trans_baz; -- should be empty + a +--- +(0 rows) + +-- inserts +BEGIN; + INSERT INTO trans_foo VALUES (1); + SAVEPOINT one; + INSERT into trans_bar VALUES (1); +ERROR: relation "trans_bar" does not exist +LINE 1: INSERT into trans_bar VALUES (1); + ^ + ROLLBACK TO one; + RELEASE SAVEPOINT one; + SAVEPOINT two; + INSERT into trans_barbaz VALUES (1); + RELEASE two; + SAVEPOINT three; + SAVEPOINT four; + INSERT INTO trans_foo VALUES (2); + RELEASE SAVEPOINT four; + ROLLBACK TO SAVEPOINT three; + RELEASE SAVEPOINT three; + INSERT INTO trans_foo VALUES (3); +COMMIT; +SELECT * FROM trans_foo; -- should have 1 and 3 + a +--- + 1 + 3 +(2 rows) + +SELECT * FROM trans_barbaz; -- should have 1 + a +--- + 1 +(1 row) + +-- test whole-tree commit +BEGIN; + SAVEPOINT one; + SELECT trans_foo; +ERROR: column "trans_foo" does not exist +LINE 1: SELECT trans_foo; + ^ + ROLLBACK TO SAVEPOINT one; + RELEASE SAVEPOINT one; + SAVEPOINT two; + CREATE TABLE savepoints (a int); + SAVEPOINT three; + INSERT INTO savepoints VALUES (1); + SAVEPOINT four; + INSERT INTO savepoints VALUES (2); + SAVEPOINT five; + INSERT INTO savepoints VALUES (3); + ROLLBACK TO SAVEPOINT five; +COMMIT; +COMMIT; -- should not be in a transaction block +WARNING: there is no transaction in progress +SELECT * FROM savepoints; + a +--- + 1 + 2 +(2 rows) + +-- test whole-tree rollback +BEGIN; + SAVEPOINT one; + DELETE FROM savepoints WHERE a=1; + RELEASE SAVEPOINT one; + SAVEPOINT two; + DELETE FROM savepoints WHERE a=1; + SAVEPOINT three; + DELETE FROM savepoints WHERE a=2; +ROLLBACK; +COMMIT; -- should not be in a transaction block +WARNING: there is no transaction in progress +SELECT * FROM savepoints; + a +--- + 1 + 2 +(2 rows) + +-- test whole-tree commit on an aborted subtransaction +BEGIN; + INSERT INTO savepoints VALUES (4); + SAVEPOINT one; + INSERT INTO savepoints VALUES (5); + SELECT trans_foo; +ERROR: column "trans_foo" does not exist +LINE 1: SELECT trans_foo; + ^ +COMMIT; +SELECT * FROM savepoints; + a +--- + 1 + 2 +(2 rows) + +BEGIN; + INSERT INTO savepoints VALUES (6); + SAVEPOINT one; + INSERT INTO savepoints VALUES (7); + RELEASE SAVEPOINT one; + INSERT INTO savepoints VALUES (8); +COMMIT; +-- rows 6 and 8 should have been created by the same xact +SELECT a.xmin = b.xmin FROM savepoints a, savepoints b WHERE a.a=6 AND b.a=8; + ?column? +---------- + t +(1 row) + +-- rows 6 and 7 should have been created by different xacts +SELECT a.xmin = b.xmin FROM savepoints a, savepoints b WHERE a.a=6 AND b.a=7; + ?column? +---------- + f +(1 row) + +BEGIN; + INSERT INTO savepoints VALUES (9); + SAVEPOINT one; + INSERT INTO savepoints VALUES (10); + ROLLBACK TO SAVEPOINT one; + INSERT INTO savepoints VALUES (11); +COMMIT; +SELECT a FROM savepoints WHERE a in (9, 10, 11); + a +---- + 9 + 11 +(2 rows) + +-- rows 9 and 11 should have been created by different xacts +SELECT a.xmin = b.xmin FROM savepoints a, savepoints b WHERE a.a=9 AND b.a=11; + ?column? +---------- + f +(1 row) + +BEGIN; + INSERT INTO savepoints VALUES (12); + SAVEPOINT one; + INSERT INTO savepoints VALUES (13); + SAVEPOINT two; + INSERT INTO savepoints VALUES (14); + ROLLBACK TO SAVEPOINT one; + INSERT INTO savepoints VALUES (15); + SAVEPOINT two; + INSERT INTO savepoints VALUES (16); + SAVEPOINT three; + INSERT INTO savepoints VALUES (17); +COMMIT; +SELECT a FROM savepoints WHERE a BETWEEN 12 AND 17; + a +---- + 12 + 15 + 16 + 17 +(4 rows) + +BEGIN; + INSERT INTO savepoints VALUES (18); + SAVEPOINT one; + INSERT INTO savepoints VALUES (19); + SAVEPOINT two; + INSERT INTO savepoints VALUES (20); + ROLLBACK TO SAVEPOINT one; + INSERT INTO savepoints VALUES (21); + ROLLBACK TO SAVEPOINT one; + INSERT INTO savepoints VALUES (22); +COMMIT; +SELECT a FROM savepoints WHERE a BETWEEN 18 AND 22; + a +---- + 18 + 22 +(2 rows) + +DROP TABLE savepoints; +-- only in a transaction block: +SAVEPOINT one; +ERROR: SAVEPOINT can only be used in transaction blocks +ROLLBACK TO SAVEPOINT one; +ERROR: ROLLBACK TO SAVEPOINT can only be used in transaction blocks +RELEASE SAVEPOINT one; +ERROR: RELEASE SAVEPOINT can only be used in transaction blocks +-- Only "rollback to" allowed in aborted state +BEGIN; + SAVEPOINT one; + SELECT 0/0; +ERROR: division by zero + SAVEPOINT two; -- ignored till the end of ... +ERROR: current transaction is aborted, commands ignored until end of transaction block + RELEASE SAVEPOINT one; -- ignored till the end of ... +ERROR: current transaction is aborted, commands ignored until end of transaction block + ROLLBACK TO SAVEPOINT one; + SELECT 1; + ?column? +---------- + 1 +(1 row) + +COMMIT; +SELECT 1; -- this should work + ?column? +---------- + 1 +(1 row) + +-- check non-transactional behavior of cursors +BEGIN; + DECLARE c CURSOR FOR SELECT unique2 FROM tenk1 ORDER BY unique2; + SAVEPOINT one; + FETCH 10 FROM c; + unique2 +--------- + 0 + 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 +(10 rows) + + ROLLBACK TO SAVEPOINT one; + FETCH 10 FROM c; + unique2 +--------- + 10 + 11 + 12 + 13 + 14 + 15 + 16 + 17 + 18 + 19 +(10 rows) + + RELEASE SAVEPOINT one; + FETCH 10 FROM c; + unique2 +--------- + 20 + 21 + 22 + 23 + 24 + 25 + 26 + 27 + 28 + 29 +(10 rows) + + CLOSE c; + DECLARE c CURSOR FOR SELECT unique2/0 FROM tenk1 ORDER BY unique2; + SAVEPOINT two; + FETCH 10 FROM c; +ERROR: division by zero + ROLLBACK TO SAVEPOINT two; + -- c is now dead to the world ... + FETCH 10 FROM c; +ERROR: portal "c" cannot be run + ROLLBACK TO SAVEPOINT two; + RELEASE SAVEPOINT two; + FETCH 10 FROM c; +ERROR: portal "c" cannot be run +COMMIT; +-- +-- Check that "stable" functions are really stable. They should not be +-- able to see the partial results of the calling query. (Ideally we would +-- also check that they don't see commits of concurrent transactions, but +-- that's a mite hard to do within the limitations of pg_regress.) +-- +select * from xacttest; + a | b +-----+--------- + 56 | 7.8 + 100 | 99.097 + 0 | 0.09561 + 42 | 324.78 + 777 | 777.777 +(5 rows) + +create or replace function max_xacttest() returns smallint language sql as +'select max(a) from xacttest' stable; +begin; +update xacttest set a = max_xacttest() + 10 where a > 0; +select * from xacttest; + a | b +-----+--------- + 0 | 0.09561 + 787 | 7.8 + 787 | 99.097 + 787 | 324.78 + 787 | 777.777 +(5 rows) + +rollback; +-- But a volatile function can see the partial results of the calling query +create or replace function max_xacttest() returns smallint language sql as +'select max(a) from xacttest' volatile; +begin; +update xacttest set a = max_xacttest() + 10 where a > 0; +select * from xacttest; + a | b +-----+--------- + 0 | 0.09561 + 787 | 7.8 + 797 | 99.097 + 807 | 324.78 + 817 | 777.777 +(5 rows) + +rollback; +-- Now the same test with plpgsql (since it depends on SPI which is different) +create or replace function max_xacttest() returns smallint language plpgsql as +'begin return max(a) from xacttest; end' stable; +begin; +update xacttest set a = max_xacttest() + 10 where a > 0; +select * from xacttest; + a | b +-----+--------- + 0 | 0.09561 + 787 | 7.8 + 787 | 99.097 + 787 | 324.78 + 787 | 777.777 +(5 rows) + +rollback; +create or replace function max_xacttest() returns smallint language plpgsql as +'begin return max(a) from xacttest; end' volatile; +begin; +update xacttest set a = max_xacttest() + 10 where a > 0; +select * from xacttest; + a | b +-----+--------- + 0 | 0.09561 + 787 | 7.8 + 797 | 99.097 + 807 | 324.78 + 817 | 777.777 +(5 rows) + +rollback; +-- test case for problems with dropping an open relation during abort +BEGIN; + savepoint x; + CREATE TABLE koju (a INT UNIQUE); + INSERT INTO koju VALUES (1); + INSERT INTO koju VALUES (1); +ERROR: duplicate key value violates unique constraint "koju_a_key" +DETAIL: Key (a)=(1) already exists. + rollback to x; + CREATE TABLE koju (a INT UNIQUE); + INSERT INTO koju VALUES (1); + INSERT INTO koju VALUES (1); +ERROR: duplicate key value violates unique constraint "koju_a_key" +DETAIL: Key (a)=(1) already exists. +ROLLBACK; +DROP TABLE trans_foo; +DROP TABLE trans_baz; +DROP TABLE trans_barbaz; +-- test case for problems with revalidating an open relation during abort +create function inverse(int) returns float8 as +$$ +begin + analyze revalidate_bug; + return 1::float8/$1; +exception + when division_by_zero then return 0; +end$$ language plpgsql volatile; +create table revalidate_bug (c float8 unique); +insert into revalidate_bug values (1); +insert into revalidate_bug values (inverse(0)); +drop table revalidate_bug; +drop function inverse(int); +-- verify that cursors created during an aborted subtransaction are +-- closed, but that we do not rollback the effect of any FETCHs +-- performed in the aborted subtransaction +begin; +savepoint x; +create table trans_abc (a int); +insert into trans_abc values (5); +insert into trans_abc values (10); +declare foo cursor for select * from trans_abc; +fetch from foo; + a +--- + 5 +(1 row) + +rollback to x; +-- should fail +fetch from foo; +ERROR: cursor "foo" does not exist +commit; +begin; +create table trans_abc (a int); +insert into trans_abc values (5); +insert into trans_abc values (10); +insert into trans_abc values (15); +declare foo cursor for select * from trans_abc; +fetch from foo; + a +--- + 5 +(1 row) + +savepoint x; +fetch from foo; + a +---- + 10 +(1 row) + +rollback to x; +fetch from foo; + a +---- + 15 +(1 row) + +abort; +-- Test for proper cleanup after a failure in a cursor portal +-- that was created in an outer subtransaction +CREATE FUNCTION invert(x float8) RETURNS float8 LANGUAGE plpgsql AS +$$ begin return 1/x; end $$; +CREATE FUNCTION create_temp_tab() RETURNS text +LANGUAGE plpgsql AS $$ +BEGIN + CREATE TEMP TABLE new_table (f1 float8); + -- case of interest is that we fail while holding an open + -- relcache reference to new_table + INSERT INTO new_table SELECT invert(0.0); + RETURN 'foo'; +END $$; +BEGIN; +DECLARE ok CURSOR FOR SELECT * FROM int8_tbl; +DECLARE ctt CURSOR FOR SELECT create_temp_tab(); +FETCH ok; + q1 | q2 +-----+----- + 123 | 456 +(1 row) + +SAVEPOINT s1; +FETCH ok; -- should work + q1 | q2 +-----+------------------ + 123 | 4567890123456789 +(1 row) + +FETCH ctt; -- error occurs here +ERROR: division by zero +CONTEXT: PL/pgSQL function invert(double precision) line 1 at RETURN +SQL statement "INSERT INTO new_table SELECT invert(0.0)" +PL/pgSQL function create_temp_tab() line 6 at SQL statement +ROLLBACK TO s1; +FETCH ok; -- should work + q1 | q2 +------------------+----- + 4567890123456789 | 123 +(1 row) + +FETCH ctt; -- must be rejected +ERROR: portal "ctt" cannot be run +COMMIT; +DROP FUNCTION create_temp_tab(); +DROP FUNCTION invert(x float8); +-- Tests for AND CHAIN +CREATE TABLE trans_abc (a int); +-- set nondefault value so we have something to override below +SET default_transaction_read_only = on; +START TRANSACTION ISOLATION LEVEL REPEATABLE READ, READ WRITE, DEFERRABLE; +SHOW transaction_isolation; + transaction_isolation +----------------------- + repeatable read +(1 row) + +SHOW transaction_read_only; + transaction_read_only +----------------------- + off +(1 row) + +SHOW transaction_deferrable; + transaction_deferrable +------------------------ + on +(1 row) + +INSERT INTO trans_abc VALUES (1); +INSERT INTO trans_abc VALUES (2); +COMMIT AND CHAIN; -- TBLOCK_END +SHOW transaction_isolation; + transaction_isolation +----------------------- + repeatable read +(1 row) + +SHOW transaction_read_only; + transaction_read_only +----------------------- + off +(1 row) + +SHOW transaction_deferrable; + transaction_deferrable +------------------------ + on +(1 row) + +INSERT INTO trans_abc VALUES ('error'); +ERROR: invalid input syntax for type integer: "error" +LINE 1: INSERT INTO trans_abc VALUES ('error'); + ^ +INSERT INTO trans_abc VALUES (3); -- check it's really aborted +ERROR: current transaction is aborted, commands ignored until end of transaction block +COMMIT AND CHAIN; -- TBLOCK_ABORT_END +SHOW transaction_isolation; + transaction_isolation +----------------------- + repeatable read +(1 row) + +SHOW transaction_read_only; + transaction_read_only +----------------------- + off +(1 row) + +SHOW transaction_deferrable; + transaction_deferrable +------------------------ + on +(1 row) + +INSERT INTO trans_abc VALUES (4); +COMMIT; +START TRANSACTION ISOLATION LEVEL REPEATABLE READ, READ WRITE, DEFERRABLE; +SHOW transaction_isolation; + transaction_isolation +----------------------- + repeatable read +(1 row) + +SHOW transaction_read_only; + transaction_read_only +----------------------- + off +(1 row) + +SHOW transaction_deferrable; + transaction_deferrable +------------------------ + on +(1 row) + +SAVEPOINT x; +INSERT INTO trans_abc VALUES ('error'); +ERROR: invalid input syntax for type integer: "error" +LINE 1: INSERT INTO trans_abc VALUES ('error'); + ^ +COMMIT AND CHAIN; -- TBLOCK_ABORT_PENDING +SHOW transaction_isolation; + transaction_isolation +----------------------- + repeatable read +(1 row) + +SHOW transaction_read_only; + transaction_read_only +----------------------- + off +(1 row) + +SHOW transaction_deferrable; + transaction_deferrable +------------------------ + on +(1 row) + +INSERT INTO trans_abc VALUES (5); +COMMIT; +START TRANSACTION ISOLATION LEVEL REPEATABLE READ, READ WRITE, DEFERRABLE; +SHOW transaction_isolation; + transaction_isolation +----------------------- + repeatable read +(1 row) + +SHOW transaction_read_only; + transaction_read_only +----------------------- + off +(1 row) + +SHOW transaction_deferrable; + transaction_deferrable +------------------------ + on +(1 row) + +SAVEPOINT x; +COMMIT AND CHAIN; -- TBLOCK_SUBCOMMIT +SHOW transaction_isolation; + transaction_isolation +----------------------- + repeatable read +(1 row) + +SHOW transaction_read_only; + transaction_read_only +----------------------- + off +(1 row) + +SHOW transaction_deferrable; + transaction_deferrable +------------------------ + on +(1 row) + +COMMIT; +START TRANSACTION ISOLATION LEVEL READ COMMITTED, READ WRITE, DEFERRABLE; +SHOW transaction_isolation; + transaction_isolation +----------------------- + read committed +(1 row) + +SHOW transaction_read_only; + transaction_read_only +----------------------- + off +(1 row) + +SHOW transaction_deferrable; + transaction_deferrable +------------------------ + on +(1 row) + +SAVEPOINT x; +COMMIT AND CHAIN; -- TBLOCK_SUBCOMMIT +SHOW transaction_isolation; + transaction_isolation +----------------------- + read committed +(1 row) + +SHOW transaction_read_only; + transaction_read_only +----------------------- + off +(1 row) + +SHOW transaction_deferrable; + transaction_deferrable +------------------------ + on +(1 row) + +COMMIT; +-- different mix of options just for fun +START TRANSACTION ISOLATION LEVEL SERIALIZABLE, READ WRITE, NOT DEFERRABLE; +SHOW transaction_isolation; + transaction_isolation +----------------------- + serializable +(1 row) + +SHOW transaction_read_only; + transaction_read_only +----------------------- + off +(1 row) + +SHOW transaction_deferrable; + transaction_deferrable +------------------------ + off +(1 row) + +INSERT INTO trans_abc VALUES (6); +ROLLBACK AND CHAIN; -- TBLOCK_ABORT_PENDING +SHOW transaction_isolation; + transaction_isolation +----------------------- + serializable +(1 row) + +SHOW transaction_read_only; + transaction_read_only +----------------------- + off +(1 row) + +SHOW transaction_deferrable; + transaction_deferrable +------------------------ + off +(1 row) + +INSERT INTO trans_abc VALUES ('error'); +ERROR: invalid input syntax for type integer: "error" +LINE 1: INSERT INTO trans_abc VALUES ('error'); + ^ +ROLLBACK AND CHAIN; -- TBLOCK_ABORT_END +SHOW transaction_isolation; + transaction_isolation +----------------------- + serializable +(1 row) + +SHOW transaction_read_only; + transaction_read_only +----------------------- + off +(1 row) + +SHOW transaction_deferrable; + transaction_deferrable +------------------------ + off +(1 row) + +ROLLBACK; +-- not allowed outside a transaction block +COMMIT AND CHAIN; -- error +ERROR: COMMIT AND CHAIN can only be used in transaction blocks +ROLLBACK AND CHAIN; -- error +ERROR: ROLLBACK AND CHAIN can only be used in transaction blocks +SELECT * FROM trans_abc ORDER BY 1; + a +--- + 1 + 2 + 4 + 5 +(4 rows) + +RESET default_transaction_read_only; +DROP TABLE trans_abc; +-- Test assorted behaviors around the implicit transaction block created +-- when multiple SQL commands are sent in a single Query message. These +-- tests rely on the fact that psql will not break SQL commands apart at a +-- backslash-quoted semicolon, but will send them as one Query. +create temp table i_table (f1 int); +-- psql will show all results of a multi-statement Query +SELECT 1\; SELECT 2\; SELECT 3; + ?column? +---------- + 1 +(1 row) + + ?column? +---------- + 2 +(1 row) + + ?column? +---------- + 3 +(1 row) + +-- this implicitly commits: +insert into i_table values(1)\; select * from i_table; + f1 +---- + 1 +(1 row) + +-- 1/0 error will cause rolling back the whole implicit transaction +insert into i_table values(2)\; select * from i_table\; select 1/0; + f1 +---- + 1 + 2 +(2 rows) + +ERROR: division by zero +select * from i_table; + f1 +---- + 1 +(1 row) + +rollback; -- we are not in a transaction at this point +WARNING: there is no transaction in progress +-- can use regular begin/commit/rollback within a single Query +begin\; insert into i_table values(3)\; commit; +rollback; -- we are not in a transaction at this point +WARNING: there is no transaction in progress +begin\; insert into i_table values(4)\; rollback; +rollback; -- we are not in a transaction at this point +WARNING: there is no transaction in progress +-- begin converts implicit transaction into a regular one that +-- can extend past the end of the Query +select 1\; begin\; insert into i_table values(5); + ?column? +---------- + 1 +(1 row) + +commit; +select 1\; begin\; insert into i_table values(6); + ?column? +---------- + 1 +(1 row) + +rollback; +-- commit in implicit-transaction state commits but issues a warning. +insert into i_table values(7)\; commit\; insert into i_table values(8)\; select 1/0; +WARNING: there is no transaction in progress +ERROR: division by zero +-- similarly, rollback aborts but issues a warning. +insert into i_table values(9)\; rollback\; select 2; +WARNING: there is no transaction in progress + ?column? +---------- + 2 +(1 row) + +select * from i_table; + f1 +---- + 1 + 3 + 5 + 7 +(4 rows) + +rollback; -- we are not in a transaction at this point +WARNING: there is no transaction in progress +-- implicit transaction block is still a transaction block, for e.g. VACUUM +SELECT 1\; VACUUM; + ?column? +---------- + 1 +(1 row) + +ERROR: VACUUM cannot run inside a transaction block +SELECT 1\; COMMIT\; VACUUM; +WARNING: there is no transaction in progress + ?column? +---------- + 1 +(1 row) + +ERROR: VACUUM cannot run inside a transaction block +-- we disallow savepoint-related commands in implicit-transaction state +SELECT 1\; SAVEPOINT sp; + ?column? +---------- + 1 +(1 row) + +ERROR: SAVEPOINT can only be used in transaction blocks +SELECT 1\; COMMIT\; SAVEPOINT sp; +WARNING: there is no transaction in progress + ?column? +---------- + 1 +(1 row) + +ERROR: SAVEPOINT can only be used in transaction blocks +ROLLBACK TO SAVEPOINT sp\; SELECT 2; +ERROR: ROLLBACK TO SAVEPOINT can only be used in transaction blocks +SELECT 2\; RELEASE SAVEPOINT sp\; SELECT 3; + ?column? +---------- + 2 +(1 row) + +ERROR: RELEASE SAVEPOINT can only be used in transaction blocks +-- but this is OK, because the BEGIN converts it to a regular xact +SELECT 1\; BEGIN\; SAVEPOINT sp\; ROLLBACK TO SAVEPOINT sp\; COMMIT; + ?column? +---------- + 1 +(1 row) + +-- Tests for AND CHAIN in implicit transaction blocks +SET TRANSACTION READ ONLY\; COMMIT AND CHAIN; -- error +ERROR: COMMIT AND CHAIN can only be used in transaction blocks +SHOW transaction_read_only; + transaction_read_only +----------------------- + off +(1 row) + +SET TRANSACTION READ ONLY\; ROLLBACK AND CHAIN; -- error +ERROR: ROLLBACK AND CHAIN can only be used in transaction blocks +SHOW transaction_read_only; + transaction_read_only +----------------------- + off +(1 row) + +CREATE TABLE trans_abc (a int); +-- COMMIT/ROLLBACK + COMMIT/ROLLBACK AND CHAIN +INSERT INTO trans_abc VALUES (7)\; COMMIT\; INSERT INTO trans_abc VALUES (8)\; COMMIT AND CHAIN; -- 7 commit, 8 error +WARNING: there is no transaction in progress +ERROR: COMMIT AND CHAIN can only be used in transaction blocks +INSERT INTO trans_abc VALUES (9)\; ROLLBACK\; INSERT INTO trans_abc VALUES (10)\; ROLLBACK AND CHAIN; -- 9 rollback, 10 error +WARNING: there is no transaction in progress +ERROR: ROLLBACK AND CHAIN can only be used in transaction blocks +-- COMMIT/ROLLBACK AND CHAIN + COMMIT/ROLLBACK +INSERT INTO trans_abc VALUES (11)\; COMMIT AND CHAIN\; INSERT INTO trans_abc VALUES (12)\; COMMIT; -- 11 error, 12 not reached +ERROR: COMMIT AND CHAIN can only be used in transaction blocks +INSERT INTO trans_abc VALUES (13)\; ROLLBACK AND CHAIN\; INSERT INTO trans_abc VALUES (14)\; ROLLBACK; -- 13 error, 14 not reached +ERROR: ROLLBACK AND CHAIN can only be used in transaction blocks +-- START TRANSACTION + COMMIT/ROLLBACK AND CHAIN +START TRANSACTION ISOLATION LEVEL REPEATABLE READ\; INSERT INTO trans_abc VALUES (15)\; COMMIT AND CHAIN; -- 15 ok +SHOW transaction_isolation; -- transaction is active at this point + transaction_isolation +----------------------- + repeatable read +(1 row) + +COMMIT; +START TRANSACTION ISOLATION LEVEL REPEATABLE READ\; INSERT INTO trans_abc VALUES (16)\; ROLLBACK AND CHAIN; -- 16 ok +SHOW transaction_isolation; -- transaction is active at this point + transaction_isolation +----------------------- + repeatable read +(1 row) + +ROLLBACK; +SET default_transaction_isolation = 'read committed'; +-- START TRANSACTION + COMMIT/ROLLBACK + COMMIT/ROLLBACK AND CHAIN +START TRANSACTION ISOLATION LEVEL REPEATABLE READ\; INSERT INTO trans_abc VALUES (17)\; COMMIT\; INSERT INTO trans_abc VALUES (18)\; COMMIT AND CHAIN; -- 17 commit, 18 error +ERROR: COMMIT AND CHAIN can only be used in transaction blocks +SHOW transaction_isolation; -- out of transaction block + transaction_isolation +----------------------- + read committed +(1 row) + +START TRANSACTION ISOLATION LEVEL REPEATABLE READ\; INSERT INTO trans_abc VALUES (19)\; ROLLBACK\; INSERT INTO trans_abc VALUES (20)\; ROLLBACK AND CHAIN; -- 19 rollback, 20 error +ERROR: ROLLBACK AND CHAIN can only be used in transaction blocks +SHOW transaction_isolation; -- out of transaction block + transaction_isolation +----------------------- + read committed +(1 row) + +RESET default_transaction_isolation; +SELECT * FROM trans_abc ORDER BY 1; + a +---- + 7 + 15 + 17 +(3 rows) + +DROP TABLE trans_abc; +-- Test for successful cleanup of an aborted transaction at session exit. +-- THIS MUST BE THE LAST TEST IN THIS FILE. +begin; +select 1/0; +ERROR: division by zero +rollback to X; +ERROR: savepoint "x" does not exist +-- DO NOT ADD ANYTHING HERE. diff --git a/src/test/regress/expected/triggers.out b/src/test/regress/expected/triggers.out new file mode 100644 index 0000000..78e9030 --- /dev/null +++ b/src/test/regress/expected/triggers.out @@ -0,0 +1,3711 @@ +-- +-- TRIGGERS +-- +-- directory paths and dlsuffix are passed to us in environment variables +\getenv libdir PG_LIBDIR +\getenv dlsuffix PG_DLSUFFIX +\set autoinclib :libdir '/autoinc' :dlsuffix +\set refintlib :libdir '/refint' :dlsuffix +\set regresslib :libdir '/regress' :dlsuffix +CREATE FUNCTION autoinc () + RETURNS trigger + AS :'autoinclib' + LANGUAGE C; +CREATE FUNCTION check_primary_key () + RETURNS trigger + AS :'refintlib' + LANGUAGE C; +CREATE FUNCTION check_foreign_key () + RETURNS trigger + AS :'refintlib' + LANGUAGE C; +CREATE FUNCTION trigger_return_old () + RETURNS trigger + AS :'regresslib' + LANGUAGE C; +CREATE FUNCTION set_ttdummy (int4) + RETURNS int4 + AS :'regresslib' + LANGUAGE C STRICT; +create table pkeys (pkey1 int4 not null, pkey2 text not null); +create table fkeys (fkey1 int4, fkey2 text, fkey3 int); +create table fkeys2 (fkey21 int4, fkey22 text, pkey23 int not null); +create index fkeys_i on fkeys (fkey1, fkey2); +create index fkeys2_i on fkeys2 (fkey21, fkey22); +create index fkeys2p_i on fkeys2 (pkey23); +insert into pkeys values (10, '1'); +insert into pkeys values (20, '2'); +insert into pkeys values (30, '3'); +insert into pkeys values (40, '4'); +insert into pkeys values (50, '5'); +insert into pkeys values (60, '6'); +create unique index pkeys_i on pkeys (pkey1, pkey2); +-- +-- For fkeys: +-- (fkey1, fkey2) --> pkeys (pkey1, pkey2) +-- (fkey3) --> fkeys2 (pkey23) +-- +create trigger check_fkeys_pkey_exist + before insert or update on fkeys + for each row + execute function + check_primary_key ('fkey1', 'fkey2', 'pkeys', 'pkey1', 'pkey2'); +create trigger check_fkeys_pkey2_exist + before insert or update on fkeys + for each row + execute function check_primary_key ('fkey3', 'fkeys2', 'pkey23'); +-- +-- For fkeys2: +-- (fkey21, fkey22) --> pkeys (pkey1, pkey2) +-- +create trigger check_fkeys2_pkey_exist + before insert or update on fkeys2 + for each row + execute procedure + check_primary_key ('fkey21', 'fkey22', 'pkeys', 'pkey1', 'pkey2'); +-- Test comments +COMMENT ON TRIGGER check_fkeys2_pkey_bad ON fkeys2 IS 'wrong'; +ERROR: trigger "check_fkeys2_pkey_bad" for table "fkeys2" does not exist +COMMENT ON TRIGGER check_fkeys2_pkey_exist ON fkeys2 IS 'right'; +COMMENT ON TRIGGER check_fkeys2_pkey_exist ON fkeys2 IS NULL; +-- +-- For pkeys: +-- ON DELETE/UPDATE (pkey1, pkey2) CASCADE: +-- fkeys (fkey1, fkey2) and fkeys2 (fkey21, fkey22) +-- +create trigger check_pkeys_fkey_cascade + before delete or update on pkeys + for each row + execute procedure + check_foreign_key (2, 'cascade', 'pkey1', 'pkey2', + 'fkeys', 'fkey1', 'fkey2', 'fkeys2', 'fkey21', 'fkey22'); +-- +-- For fkeys2: +-- ON DELETE/UPDATE (pkey23) RESTRICT: +-- fkeys (fkey3) +-- +create trigger check_fkeys2_fkey_restrict + before delete or update on fkeys2 + for each row + execute procedure check_foreign_key (1, 'restrict', 'pkey23', 'fkeys', 'fkey3'); +insert into fkeys2 values (10, '1', 1); +insert into fkeys2 values (30, '3', 2); +insert into fkeys2 values (40, '4', 5); +insert into fkeys2 values (50, '5', 3); +-- no key in pkeys +insert into fkeys2 values (70, '5', 3); +ERROR: tuple references non-existent key +DETAIL: Trigger "check_fkeys2_pkey_exist" found tuple referencing non-existent key in "pkeys". +insert into fkeys values (10, '1', 2); +insert into fkeys values (30, '3', 3); +insert into fkeys values (40, '4', 2); +insert into fkeys values (50, '5', 2); +-- no key in pkeys +insert into fkeys values (70, '5', 1); +ERROR: tuple references non-existent key +DETAIL: Trigger "check_fkeys_pkey_exist" found tuple referencing non-existent key in "pkeys". +-- no key in fkeys2 +insert into fkeys values (60, '6', 4); +ERROR: tuple references non-existent key +DETAIL: Trigger "check_fkeys_pkey2_exist" found tuple referencing non-existent key in "fkeys2". +delete from pkeys where pkey1 = 30 and pkey2 = '3'; +NOTICE: check_pkeys_fkey_cascade: 1 tuple(s) of fkeys are deleted +ERROR: "check_fkeys2_fkey_restrict": tuple is referenced in "fkeys" +CONTEXT: SQL statement "delete from fkeys2 where fkey21 = $1 and fkey22 = $2 " +delete from pkeys where pkey1 = 40 and pkey2 = '4'; +NOTICE: check_pkeys_fkey_cascade: 1 tuple(s) of fkeys are deleted +NOTICE: check_pkeys_fkey_cascade: 1 tuple(s) of fkeys2 are deleted +update pkeys set pkey1 = 7, pkey2 = '70' where pkey1 = 50 and pkey2 = '5'; +NOTICE: check_pkeys_fkey_cascade: 1 tuple(s) of fkeys are deleted +ERROR: "check_fkeys2_fkey_restrict": tuple is referenced in "fkeys" +CONTEXT: SQL statement "delete from fkeys2 where fkey21 = $1 and fkey22 = $2 " +update pkeys set pkey1 = 7, pkey2 = '70' where pkey1 = 10 and pkey2 = '1'; +NOTICE: check_pkeys_fkey_cascade: 1 tuple(s) of fkeys are deleted +NOTICE: check_pkeys_fkey_cascade: 1 tuple(s) of fkeys2 are deleted +SELECT trigger_name, event_manipulation, event_object_schema, event_object_table, + action_order, action_condition, action_orientation, action_timing, + action_reference_old_table, action_reference_new_table + FROM information_schema.triggers + WHERE event_object_table in ('pkeys', 'fkeys', 'fkeys2') + ORDER BY trigger_name COLLATE "C", 2; + trigger_name | event_manipulation | event_object_schema | event_object_table | action_order | action_condition | action_orientation | action_timing | action_reference_old_table | action_reference_new_table +----------------------------+--------------------+---------------------+--------------------+--------------+------------------+--------------------+---------------+----------------------------+---------------------------- + check_fkeys2_fkey_restrict | DELETE | public | fkeys2 | 1 | | ROW | BEFORE | | + check_fkeys2_fkey_restrict | UPDATE | public | fkeys2 | 1 | | ROW | BEFORE | | + check_fkeys2_pkey_exist | INSERT | public | fkeys2 | 1 | | ROW | BEFORE | | + check_fkeys2_pkey_exist | UPDATE | public | fkeys2 | 2 | | ROW | BEFORE | | + check_fkeys_pkey2_exist | INSERT | public | fkeys | 1 | | ROW | BEFORE | | + check_fkeys_pkey2_exist | UPDATE | public | fkeys | 1 | | ROW | BEFORE | | + check_fkeys_pkey_exist | INSERT | public | fkeys | 2 | | ROW | BEFORE | | + check_fkeys_pkey_exist | UPDATE | public | fkeys | 2 | | ROW | BEFORE | | + check_pkeys_fkey_cascade | DELETE | public | pkeys | 1 | | ROW | BEFORE | | + check_pkeys_fkey_cascade | UPDATE | public | pkeys | 1 | | ROW | BEFORE | | +(10 rows) + +DROP TABLE pkeys; +DROP TABLE fkeys; +DROP TABLE fkeys2; +-- Check behavior when trigger returns unmodified trigtuple +create table trigtest (f1 int, f2 text); +create trigger trigger_return_old + before insert or delete or update on trigtest + for each row execute procedure trigger_return_old(); +insert into trigtest values(1, 'foo'); +select * from trigtest; + f1 | f2 +----+----- + 1 | foo +(1 row) + +update trigtest set f2 = f2 || 'bar'; +select * from trigtest; + f1 | f2 +----+----- + 1 | foo +(1 row) + +delete from trigtest; +select * from trigtest; + f1 | f2 +----+---- +(0 rows) + +-- Also check what happens when such a trigger runs before or after others +create function f1_times_10() returns trigger as +$$ begin new.f1 := new.f1 * 10; return new; end $$ language plpgsql; +create trigger trigger_alpha + before insert or update on trigtest + for each row execute procedure f1_times_10(); +insert into trigtest values(1, 'foo'); +select * from trigtest; + f1 | f2 +----+----- + 10 | foo +(1 row) + +update trigtest set f2 = f2 || 'bar'; +select * from trigtest; + f1 | f2 +----+----- + 10 | foo +(1 row) + +delete from trigtest; +select * from trigtest; + f1 | f2 +----+---- +(0 rows) + +create trigger trigger_zed + before insert or update on trigtest + for each row execute procedure f1_times_10(); +insert into trigtest values(1, 'foo'); +select * from trigtest; + f1 | f2 +-----+----- + 100 | foo +(1 row) + +update trigtest set f2 = f2 || 'bar'; +select * from trigtest; + f1 | f2 +------+----- + 1000 | foo +(1 row) + +delete from trigtest; +select * from trigtest; + f1 | f2 +----+---- +(0 rows) + +drop trigger trigger_alpha on trigtest; +insert into trigtest values(1, 'foo'); +select * from trigtest; + f1 | f2 +----+----- + 10 | foo +(1 row) + +update trigtest set f2 = f2 || 'bar'; +select * from trigtest; + f1 | f2 +-----+----- + 100 | foo +(1 row) + +delete from trigtest; +select * from trigtest; + f1 | f2 +----+---- +(0 rows) + +drop table trigtest; +-- Check behavior with an implicit column default, too (bug #16644) +create table trigtest ( + a integer, + b bool default true not null, + c text default 'xyzzy' not null); +create trigger trigger_return_old + before insert or delete or update on trigtest + for each row execute procedure trigger_return_old(); +insert into trigtest values(1); +select * from trigtest; + a | b | c +---+---+------- + 1 | t | xyzzy +(1 row) + +alter table trigtest add column d integer default 42 not null; +select * from trigtest; + a | b | c | d +---+---+-------+---- + 1 | t | xyzzy | 42 +(1 row) + +update trigtest set a = 2 where a = 1 returning *; + a | b | c | d +---+---+-------+---- + 1 | t | xyzzy | 42 +(1 row) + +select * from trigtest; + a | b | c | d +---+---+-------+---- + 1 | t | xyzzy | 42 +(1 row) + +alter table trigtest drop column b; +select * from trigtest; + a | c | d +---+-------+---- + 1 | xyzzy | 42 +(1 row) + +update trigtest set a = 2 where a = 1 returning *; + a | c | d +---+-------+---- + 1 | xyzzy | 42 +(1 row) + +select * from trigtest; + a | c | d +---+-------+---- + 1 | xyzzy | 42 +(1 row) + +drop table trigtest; +create sequence ttdummy_seq increment 10 start 0 minvalue 0; +create table tttest ( + price_id int4, + price_val int4, + price_on int4, + price_off int4 default 999999 +); +create trigger ttdummy + before delete or update on tttest + for each row + execute procedure + ttdummy (price_on, price_off); +create trigger ttserial + before insert or update on tttest + for each row + execute procedure + autoinc (price_on, ttdummy_seq); +insert into tttest values (1, 1, null); +insert into tttest values (2, 2, null); +insert into tttest values (3, 3, 0); +select * from tttest; + price_id | price_val | price_on | price_off +----------+-----------+----------+----------- + 1 | 1 | 10 | 999999 + 2 | 2 | 20 | 999999 + 3 | 3 | 30 | 999999 +(3 rows) + +delete from tttest where price_id = 2; +select * from tttest; + price_id | price_val | price_on | price_off +----------+-----------+----------+----------- + 1 | 1 | 10 | 999999 + 3 | 3 | 30 | 999999 + 2 | 2 | 20 | 40 +(3 rows) + +-- what do we see ? +-- get current prices +select * from tttest where price_off = 999999; + price_id | price_val | price_on | price_off +----------+-----------+----------+----------- + 1 | 1 | 10 | 999999 + 3 | 3 | 30 | 999999 +(2 rows) + +-- change price for price_id == 3 +update tttest set price_val = 30 where price_id = 3; +select * from tttest; + price_id | price_val | price_on | price_off +----------+-----------+----------+----------- + 1 | 1 | 10 | 999999 + 2 | 2 | 20 | 40 + 3 | 30 | 50 | 999999 + 3 | 3 | 30 | 50 +(4 rows) + +-- now we want to change pric_id in ALL tuples +-- this gets us not what we need +update tttest set price_id = 5 where price_id = 3; +select * from tttest; + price_id | price_val | price_on | price_off +----------+-----------+----------+----------- + 1 | 1 | 10 | 999999 + 2 | 2 | 20 | 40 + 3 | 3 | 30 | 50 + 5 | 30 | 60 | 999999 + 3 | 30 | 50 | 60 +(5 rows) + +-- restore data as before last update: +select set_ttdummy(0); + set_ttdummy +------------- + 1 +(1 row) + +delete from tttest where price_id = 5; +update tttest set price_off = 999999 where price_val = 30; +select * from tttest; + price_id | price_val | price_on | price_off +----------+-----------+----------+----------- + 1 | 1 | 10 | 999999 + 2 | 2 | 20 | 40 + 3 | 3 | 30 | 50 + 3 | 30 | 50 | 999999 +(4 rows) + +-- and try change price_id now! +update tttest set price_id = 5 where price_id = 3; +select * from tttest; + price_id | price_val | price_on | price_off +----------+-----------+----------+----------- + 1 | 1 | 10 | 999999 + 2 | 2 | 20 | 40 + 5 | 3 | 30 | 50 + 5 | 30 | 50 | 999999 +(4 rows) + +-- isn't it what we need ? +select set_ttdummy(1); + set_ttdummy +------------- + 0 +(1 row) + +-- we want to correct some "date" +update tttest set price_on = -1 where price_id = 1; +ERROR: ttdummy (tttest): you cannot change price_on and/or price_off columns (use set_ttdummy) +-- but this doesn't work +-- try in this way +select set_ttdummy(0); + set_ttdummy +------------- + 1 +(1 row) + +update tttest set price_on = -1 where price_id = 1; +select * from tttest; + price_id | price_val | price_on | price_off +----------+-----------+----------+----------- + 2 | 2 | 20 | 40 + 5 | 3 | 30 | 50 + 5 | 30 | 50 | 999999 + 1 | 1 | -1 | 999999 +(4 rows) + +-- isn't it what we need ? +-- get price for price_id == 5 as it was @ "date" 35 +select * from tttest where price_on <= 35 and price_off > 35 and price_id = 5; + price_id | price_val | price_on | price_off +----------+-----------+----------+----------- + 5 | 3 | 30 | 50 +(1 row) + +drop table tttest; +drop sequence ttdummy_seq; +-- +-- tests for per-statement triggers +-- +CREATE TABLE log_table (tstamp timestamp default timeofday()::timestamp); +CREATE TABLE main_table (a int unique, b int); +COPY main_table (a,b) FROM stdin; +CREATE FUNCTION trigger_func() RETURNS trigger LANGUAGE plpgsql AS ' +BEGIN + RAISE NOTICE ''trigger_func(%) called: action = %, when = %, level = %'', TG_ARGV[0], TG_OP, TG_WHEN, TG_LEVEL; + RETURN NULL; +END;'; +CREATE TRIGGER before_ins_stmt_trig BEFORE INSERT ON main_table +FOR EACH STATEMENT EXECUTE PROCEDURE trigger_func('before_ins_stmt'); +CREATE TRIGGER after_ins_stmt_trig AFTER INSERT ON main_table +FOR EACH STATEMENT EXECUTE PROCEDURE trigger_func('after_ins_stmt'); +-- +-- if neither 'FOR EACH ROW' nor 'FOR EACH STATEMENT' was specified, +-- CREATE TRIGGER should default to 'FOR EACH STATEMENT' +-- +CREATE TRIGGER after_upd_stmt_trig AFTER UPDATE ON main_table +EXECUTE PROCEDURE trigger_func('after_upd_stmt'); +-- Both insert and update statement level triggers (before and after) should +-- fire. Doesn't fire UPDATE before trigger, but only because one isn't +-- defined. +INSERT INTO main_table (a, b) VALUES (5, 10) ON CONFLICT (a) + DO UPDATE SET b = EXCLUDED.b; +NOTICE: trigger_func(before_ins_stmt) called: action = INSERT, when = BEFORE, level = STATEMENT +NOTICE: trigger_func(after_upd_stmt) called: action = UPDATE, when = AFTER, level = STATEMENT +NOTICE: trigger_func(after_ins_stmt) called: action = INSERT, when = AFTER, level = STATEMENT +CREATE TRIGGER after_upd_row_trig AFTER UPDATE ON main_table +FOR EACH ROW EXECUTE PROCEDURE trigger_func('after_upd_row'); +INSERT INTO main_table DEFAULT VALUES; +NOTICE: trigger_func(before_ins_stmt) called: action = INSERT, when = BEFORE, level = STATEMENT +NOTICE: trigger_func(after_ins_stmt) called: action = INSERT, when = AFTER, level = STATEMENT +UPDATE main_table SET a = a + 1 WHERE b < 30; +NOTICE: trigger_func(after_upd_row) called: action = UPDATE, when = AFTER, level = ROW +NOTICE: trigger_func(after_upd_row) called: action = UPDATE, when = AFTER, level = ROW +NOTICE: trigger_func(after_upd_row) called: action = UPDATE, when = AFTER, level = ROW +NOTICE: trigger_func(after_upd_row) called: action = UPDATE, when = AFTER, level = ROW +NOTICE: trigger_func(after_upd_stmt) called: action = UPDATE, when = AFTER, level = STATEMENT +-- UPDATE that effects zero rows should still call per-statement trigger +UPDATE main_table SET a = a + 2 WHERE b > 100; +NOTICE: trigger_func(after_upd_stmt) called: action = UPDATE, when = AFTER, level = STATEMENT +-- constraint now unneeded +ALTER TABLE main_table DROP CONSTRAINT main_table_a_key; +-- COPY should fire per-row and per-statement INSERT triggers +COPY main_table (a, b) FROM stdin; +NOTICE: trigger_func(before_ins_stmt) called: action = INSERT, when = BEFORE, level = STATEMENT +NOTICE: trigger_func(after_ins_stmt) called: action = INSERT, when = AFTER, level = STATEMENT +SELECT * FROM main_table ORDER BY a, b; + a | b +----+---- + 6 | 10 + 21 | 20 + 30 | 40 + 31 | 10 + 50 | 35 + 50 | 60 + 81 | 15 + | +(8 rows) + +-- +-- test triggers with WHEN clause +-- +CREATE TRIGGER modified_a BEFORE UPDATE OF a ON main_table +FOR EACH ROW WHEN (OLD.a <> NEW.a) EXECUTE PROCEDURE trigger_func('modified_a'); +CREATE TRIGGER modified_any BEFORE UPDATE OF a ON main_table +FOR EACH ROW WHEN (OLD.* IS DISTINCT FROM NEW.*) EXECUTE PROCEDURE trigger_func('modified_any'); +CREATE TRIGGER insert_a AFTER INSERT ON main_table +FOR EACH ROW WHEN (NEW.a = 123) EXECUTE PROCEDURE trigger_func('insert_a'); +CREATE TRIGGER delete_a AFTER DELETE ON main_table +FOR EACH ROW WHEN (OLD.a = 123) EXECUTE PROCEDURE trigger_func('delete_a'); +CREATE TRIGGER insert_when BEFORE INSERT ON main_table +FOR EACH STATEMENT WHEN (true) EXECUTE PROCEDURE trigger_func('insert_when'); +CREATE TRIGGER delete_when AFTER DELETE ON main_table +FOR EACH STATEMENT WHEN (true) EXECUTE PROCEDURE trigger_func('delete_when'); +SELECT trigger_name, event_manipulation, event_object_schema, event_object_table, + action_order, action_condition, action_orientation, action_timing, + action_reference_old_table, action_reference_new_table + FROM information_schema.triggers + WHERE event_object_table IN ('main_table') + ORDER BY trigger_name COLLATE "C", 2; + trigger_name | event_manipulation | event_object_schema | event_object_table | action_order | action_condition | action_orientation | action_timing | action_reference_old_table | action_reference_new_table +----------------------+--------------------+---------------------+--------------------+--------------+--------------------------------+--------------------+---------------+----------------------------+---------------------------- + after_ins_stmt_trig | INSERT | public | main_table | 1 | | STATEMENT | AFTER | | + after_upd_row_trig | UPDATE | public | main_table | 1 | | ROW | AFTER | | + after_upd_stmt_trig | UPDATE | public | main_table | 1 | | STATEMENT | AFTER | | + before_ins_stmt_trig | INSERT | public | main_table | 1 | | STATEMENT | BEFORE | | + delete_a | DELETE | public | main_table | 1 | (old.a = 123) | ROW | AFTER | | + delete_when | DELETE | public | main_table | 1 | true | STATEMENT | AFTER | | + insert_a | INSERT | public | main_table | 1 | (new.a = 123) | ROW | AFTER | | + insert_when | INSERT | public | main_table | 2 | true | STATEMENT | BEFORE | | + modified_a | UPDATE | public | main_table | 1 | (old.a <> new.a) | ROW | BEFORE | | + modified_any | UPDATE | public | main_table | 2 | (old.* IS DISTINCT FROM new.*) | ROW | BEFORE | | +(10 rows) + +INSERT INTO main_table (a) VALUES (123), (456); +NOTICE: trigger_func(before_ins_stmt) called: action = INSERT, when = BEFORE, level = STATEMENT +NOTICE: trigger_func(insert_when) called: action = INSERT, when = BEFORE, level = STATEMENT +NOTICE: trigger_func(insert_a) called: action = INSERT, when = AFTER, level = ROW +NOTICE: trigger_func(after_ins_stmt) called: action = INSERT, when = AFTER, level = STATEMENT +COPY main_table FROM stdin; +NOTICE: trigger_func(before_ins_stmt) called: action = INSERT, when = BEFORE, level = STATEMENT +NOTICE: trigger_func(insert_when) called: action = INSERT, when = BEFORE, level = STATEMENT +NOTICE: trigger_func(insert_a) called: action = INSERT, when = AFTER, level = ROW +NOTICE: trigger_func(after_ins_stmt) called: action = INSERT, when = AFTER, level = STATEMENT +DELETE FROM main_table WHERE a IN (123, 456); +NOTICE: trigger_func(delete_a) called: action = DELETE, when = AFTER, level = ROW +NOTICE: trigger_func(delete_a) called: action = DELETE, when = AFTER, level = ROW +NOTICE: trigger_func(delete_when) called: action = DELETE, when = AFTER, level = STATEMENT +UPDATE main_table SET a = 50, b = 60; +NOTICE: trigger_func(modified_any) called: action = UPDATE, when = BEFORE, level = ROW +NOTICE: trigger_func(modified_any) called: action = UPDATE, when = BEFORE, level = ROW +NOTICE: trigger_func(modified_a) called: action = UPDATE, when = BEFORE, level = ROW +NOTICE: trigger_func(modified_a) called: action = UPDATE, when = BEFORE, level = ROW +NOTICE: trigger_func(modified_a) called: action = UPDATE, when = BEFORE, level = ROW +NOTICE: trigger_func(modified_a) called: action = UPDATE, when = BEFORE, level = ROW +NOTICE: trigger_func(modified_a) called: action = UPDATE, when = BEFORE, level = ROW +NOTICE: trigger_func(after_upd_row) called: action = UPDATE, when = AFTER, level = ROW +NOTICE: trigger_func(after_upd_stmt) called: action = UPDATE, when = AFTER, level = STATEMENT +SELECT * FROM main_table ORDER BY a, b; + a | b +----+---- + 6 | 10 + 21 | 20 + 30 | 40 + 31 | 10 + 50 | 35 + 50 | 60 + 81 | 15 + | +(8 rows) + +SELECT pg_get_triggerdef(oid, true) FROM pg_trigger WHERE tgrelid = 'main_table'::regclass AND tgname = 'modified_a'; + pg_get_triggerdef +------------------------------------------------------------------------------------------------------------------------------------------- + CREATE TRIGGER modified_a BEFORE UPDATE OF a ON main_table FOR EACH ROW WHEN (old.a <> new.a) EXECUTE FUNCTION trigger_func('modified_a') +(1 row) + +SELECT pg_get_triggerdef(oid, false) FROM pg_trigger WHERE tgrelid = 'main_table'::regclass AND tgname = 'modified_a'; + pg_get_triggerdef +---------------------------------------------------------------------------------------------------------------------------------------------------- + CREATE TRIGGER modified_a BEFORE UPDATE OF a ON public.main_table FOR EACH ROW WHEN ((old.a <> new.a)) EXECUTE FUNCTION trigger_func('modified_a') +(1 row) + +SELECT pg_get_triggerdef(oid, true) FROM pg_trigger WHERE tgrelid = 'main_table'::regclass AND tgname = 'modified_any'; + pg_get_triggerdef +------------------------------------------------------------------------------------------------------------------------------------------------------------- + CREATE TRIGGER modified_any BEFORE UPDATE OF a ON main_table FOR EACH ROW WHEN (old.* IS DISTINCT FROM new.*) EXECUTE FUNCTION trigger_func('modified_any') +(1 row) + +-- Test RENAME TRIGGER +ALTER TRIGGER modified_a ON main_table RENAME TO modified_modified_a; +SELECT count(*) FROM pg_trigger WHERE tgrelid = 'main_table'::regclass AND tgname = 'modified_a'; + count +------- + 0 +(1 row) + +SELECT count(*) FROM pg_trigger WHERE tgrelid = 'main_table'::regclass AND tgname = 'modified_modified_a'; + count +------- + 1 +(1 row) + +DROP TRIGGER modified_modified_a ON main_table; +DROP TRIGGER modified_any ON main_table; +DROP TRIGGER insert_a ON main_table; +DROP TRIGGER delete_a ON main_table; +DROP TRIGGER insert_when ON main_table; +DROP TRIGGER delete_when ON main_table; +-- Test WHEN condition accessing system columns. +create table table_with_oids(a int); +insert into table_with_oids values (1); +create trigger oid_unchanged_trig after update on table_with_oids + for each row + when (new.tableoid = old.tableoid AND new.tableoid <> 0) + execute procedure trigger_func('after_upd_oid_unchanged'); +update table_with_oids set a = a + 1; +NOTICE: trigger_func(after_upd_oid_unchanged) called: action = UPDATE, when = AFTER, level = ROW +drop table table_with_oids; +-- Test column-level triggers +DROP TRIGGER after_upd_row_trig ON main_table; +CREATE TRIGGER before_upd_a_row_trig BEFORE UPDATE OF a ON main_table +FOR EACH ROW EXECUTE PROCEDURE trigger_func('before_upd_a_row'); +CREATE TRIGGER after_upd_b_row_trig AFTER UPDATE OF b ON main_table +FOR EACH ROW EXECUTE PROCEDURE trigger_func('after_upd_b_row'); +CREATE TRIGGER after_upd_a_b_row_trig AFTER UPDATE OF a, b ON main_table +FOR EACH ROW EXECUTE PROCEDURE trigger_func('after_upd_a_b_row'); +CREATE TRIGGER before_upd_a_stmt_trig BEFORE UPDATE OF a ON main_table +FOR EACH STATEMENT EXECUTE PROCEDURE trigger_func('before_upd_a_stmt'); +CREATE TRIGGER after_upd_b_stmt_trig AFTER UPDATE OF b ON main_table +FOR EACH STATEMENT EXECUTE PROCEDURE trigger_func('after_upd_b_stmt'); +SELECT pg_get_triggerdef(oid) FROM pg_trigger WHERE tgrelid = 'main_table'::regclass AND tgname = 'after_upd_a_b_row_trig'; + pg_get_triggerdef +------------------------------------------------------------------------------------------------------------------------------------------------- + CREATE TRIGGER after_upd_a_b_row_trig AFTER UPDATE OF a, b ON public.main_table FOR EACH ROW EXECUTE FUNCTION trigger_func('after_upd_a_b_row') +(1 row) + +UPDATE main_table SET a = 50; +NOTICE: trigger_func(before_upd_a_stmt) called: action = UPDATE, when = BEFORE, level = STATEMENT +NOTICE: trigger_func(before_upd_a_row) called: action = UPDATE, when = BEFORE, level = ROW +NOTICE: trigger_func(before_upd_a_row) called: action = UPDATE, when = BEFORE, level = ROW +NOTICE: trigger_func(before_upd_a_row) called: action = UPDATE, when = BEFORE, level = ROW +NOTICE: trigger_func(before_upd_a_row) called: action = UPDATE, when = BEFORE, level = ROW +NOTICE: trigger_func(before_upd_a_row) called: action = UPDATE, when = BEFORE, level = ROW +NOTICE: trigger_func(before_upd_a_row) called: action = UPDATE, when = BEFORE, level = ROW +NOTICE: trigger_func(before_upd_a_row) called: action = UPDATE, when = BEFORE, level = ROW +NOTICE: trigger_func(before_upd_a_row) called: action = UPDATE, when = BEFORE, level = ROW +NOTICE: trigger_func(after_upd_stmt) called: action = UPDATE, when = AFTER, level = STATEMENT +UPDATE main_table SET b = 10; +NOTICE: trigger_func(after_upd_a_b_row) called: action = UPDATE, when = AFTER, level = ROW +NOTICE: trigger_func(after_upd_b_row) called: action = UPDATE, when = AFTER, level = ROW +NOTICE: trigger_func(after_upd_a_b_row) called: action = UPDATE, when = AFTER, level = ROW +NOTICE: trigger_func(after_upd_b_row) called: action = UPDATE, when = AFTER, level = ROW +NOTICE: trigger_func(after_upd_a_b_row) called: action = UPDATE, when = AFTER, level = ROW +NOTICE: trigger_func(after_upd_b_row) called: action = UPDATE, when = AFTER, level = ROW +NOTICE: trigger_func(after_upd_a_b_row) called: action = UPDATE, when = AFTER, level = ROW +NOTICE: trigger_func(after_upd_b_row) called: action = UPDATE, when = AFTER, level = ROW +NOTICE: trigger_func(after_upd_a_b_row) called: action = UPDATE, when = AFTER, level = ROW +NOTICE: trigger_func(after_upd_b_row) called: action = UPDATE, when = AFTER, level = ROW +NOTICE: trigger_func(after_upd_a_b_row) called: action = UPDATE, when = AFTER, level = ROW +NOTICE: trigger_func(after_upd_b_row) called: action = UPDATE, when = AFTER, level = ROW +NOTICE: trigger_func(after_upd_a_b_row) called: action = UPDATE, when = AFTER, level = ROW +NOTICE: trigger_func(after_upd_b_row) called: action = UPDATE, when = AFTER, level = ROW +NOTICE: trigger_func(after_upd_a_b_row) called: action = UPDATE, when = AFTER, level = ROW +NOTICE: trigger_func(after_upd_b_row) called: action = UPDATE, when = AFTER, level = ROW +NOTICE: trigger_func(after_upd_b_stmt) called: action = UPDATE, when = AFTER, level = STATEMENT +NOTICE: trigger_func(after_upd_stmt) called: action = UPDATE, when = AFTER, level = STATEMENT +-- +-- Test case for bug with BEFORE trigger followed by AFTER trigger with WHEN +-- +CREATE TABLE some_t (some_col boolean NOT NULL); +CREATE FUNCTION dummy_update_func() RETURNS trigger AS $$ +BEGIN + RAISE NOTICE 'dummy_update_func(%) called: action = %, old = %, new = %', + TG_ARGV[0], TG_OP, OLD, NEW; + RETURN NEW; +END; +$$ LANGUAGE plpgsql; +CREATE TRIGGER some_trig_before BEFORE UPDATE ON some_t FOR EACH ROW + EXECUTE PROCEDURE dummy_update_func('before'); +CREATE TRIGGER some_trig_aftera AFTER UPDATE ON some_t FOR EACH ROW + WHEN (NOT OLD.some_col AND NEW.some_col) + EXECUTE PROCEDURE dummy_update_func('aftera'); +CREATE TRIGGER some_trig_afterb AFTER UPDATE ON some_t FOR EACH ROW + WHEN (NOT NEW.some_col) + EXECUTE PROCEDURE dummy_update_func('afterb'); +INSERT INTO some_t VALUES (TRUE); +UPDATE some_t SET some_col = TRUE; +NOTICE: dummy_update_func(before) called: action = UPDATE, old = (t), new = (t) +UPDATE some_t SET some_col = FALSE; +NOTICE: dummy_update_func(before) called: action = UPDATE, old = (t), new = (f) +NOTICE: dummy_update_func(afterb) called: action = UPDATE, old = (t), new = (f) +UPDATE some_t SET some_col = TRUE; +NOTICE: dummy_update_func(before) called: action = UPDATE, old = (f), new = (t) +NOTICE: dummy_update_func(aftera) called: action = UPDATE, old = (f), new = (t) +DROP TABLE some_t; +-- bogus cases +CREATE TRIGGER error_upd_and_col BEFORE UPDATE OR UPDATE OF a ON main_table +FOR EACH ROW EXECUTE PROCEDURE trigger_func('error_upd_and_col'); +ERROR: duplicate trigger events specified at or near "ON" +LINE 1: ...ER error_upd_and_col BEFORE UPDATE OR UPDATE OF a ON main_ta... + ^ +CREATE TRIGGER error_upd_a_a BEFORE UPDATE OF a, a ON main_table +FOR EACH ROW EXECUTE PROCEDURE trigger_func('error_upd_a_a'); +ERROR: column "a" specified more than once +CREATE TRIGGER error_ins_a BEFORE INSERT OF a ON main_table +FOR EACH ROW EXECUTE PROCEDURE trigger_func('error_ins_a'); +ERROR: syntax error at or near "OF" +LINE 1: CREATE TRIGGER error_ins_a BEFORE INSERT OF a ON main_table + ^ +CREATE TRIGGER error_ins_when BEFORE INSERT OR UPDATE ON main_table +FOR EACH ROW WHEN (OLD.a <> NEW.a) +EXECUTE PROCEDURE trigger_func('error_ins_old'); +ERROR: INSERT trigger's WHEN condition cannot reference OLD values +LINE 2: FOR EACH ROW WHEN (OLD.a <> NEW.a) + ^ +CREATE TRIGGER error_del_when BEFORE DELETE OR UPDATE ON main_table +FOR EACH ROW WHEN (OLD.a <> NEW.a) +EXECUTE PROCEDURE trigger_func('error_del_new'); +ERROR: DELETE trigger's WHEN condition cannot reference NEW values +LINE 2: FOR EACH ROW WHEN (OLD.a <> NEW.a) + ^ +CREATE TRIGGER error_del_when BEFORE INSERT OR UPDATE ON main_table +FOR EACH ROW WHEN (NEW.tableoid <> 0) +EXECUTE PROCEDURE trigger_func('error_when_sys_column'); +ERROR: BEFORE trigger's WHEN condition cannot reference NEW system columns +LINE 2: FOR EACH ROW WHEN (NEW.tableoid <> 0) + ^ +CREATE TRIGGER error_stmt_when BEFORE UPDATE OF a ON main_table +FOR EACH STATEMENT WHEN (OLD.* IS DISTINCT FROM NEW.*) +EXECUTE PROCEDURE trigger_func('error_stmt_when'); +ERROR: statement trigger's WHEN condition cannot reference column values +LINE 2: FOR EACH STATEMENT WHEN (OLD.* IS DISTINCT FROM NEW.*) + ^ +-- check dependency restrictions +ALTER TABLE main_table DROP COLUMN b; +ERROR: cannot drop column b of table main_table because other objects depend on it +DETAIL: trigger after_upd_b_row_trig on table main_table depends on column b of table main_table +trigger after_upd_a_b_row_trig on table main_table depends on column b of table main_table +trigger after_upd_b_stmt_trig on table main_table depends on column b of table main_table +HINT: Use DROP ... CASCADE to drop the dependent objects too. +-- this should succeed, but we'll roll it back to keep the triggers around +begin; +DROP TRIGGER after_upd_a_b_row_trig ON main_table; +DROP TRIGGER after_upd_b_row_trig ON main_table; +DROP TRIGGER after_upd_b_stmt_trig ON main_table; +ALTER TABLE main_table DROP COLUMN b; +rollback; +-- Test enable/disable triggers +create table trigtest (i serial primary key); +-- test that disabling RI triggers works +create table trigtest2 (i int references trigtest(i) on delete cascade); +create function trigtest() returns trigger as $$ +begin + raise notice '% % % %', TG_TABLE_NAME, TG_OP, TG_WHEN, TG_LEVEL; + return new; +end;$$ language plpgsql; +create trigger trigtest_b_row_tg before insert or update or delete on trigtest +for each row execute procedure trigtest(); +create trigger trigtest_a_row_tg after insert or update or delete on trigtest +for each row execute procedure trigtest(); +create trigger trigtest_b_stmt_tg before insert or update or delete on trigtest +for each statement execute procedure trigtest(); +create trigger trigtest_a_stmt_tg after insert or update or delete on trigtest +for each statement execute procedure trigtest(); +insert into trigtest default values; +NOTICE: trigtest INSERT BEFORE STATEMENT +NOTICE: trigtest INSERT BEFORE ROW +NOTICE: trigtest INSERT AFTER ROW +NOTICE: trigtest INSERT AFTER STATEMENT +alter table trigtest disable trigger trigtest_b_row_tg; +insert into trigtest default values; +NOTICE: trigtest INSERT BEFORE STATEMENT +NOTICE: trigtest INSERT AFTER ROW +NOTICE: trigtest INSERT AFTER STATEMENT +alter table trigtest disable trigger user; +insert into trigtest default values; +alter table trigtest enable trigger trigtest_a_stmt_tg; +insert into trigtest default values; +NOTICE: trigtest INSERT AFTER STATEMENT +set session_replication_role = replica; +insert into trigtest default values; -- does not trigger +alter table trigtest enable always trigger trigtest_a_stmt_tg; +insert into trigtest default values; -- now it does +NOTICE: trigtest INSERT AFTER STATEMENT +reset session_replication_role; +insert into trigtest2 values(1); +insert into trigtest2 values(2); +delete from trigtest where i=2; +NOTICE: trigtest DELETE AFTER STATEMENT +select * from trigtest2; + i +--- + 1 +(1 row) + +alter table trigtest disable trigger all; +delete from trigtest where i=1; +select * from trigtest2; + i +--- + 1 +(1 row) + +-- ensure we still insert, even when all triggers are disabled +insert into trigtest default values; +select * from trigtest; + i +--- + 3 + 4 + 5 + 6 + 7 +(5 rows) + +drop table trigtest2; +drop table trigtest; +-- dump trigger data +CREATE TABLE trigger_test ( + i int, + v varchar +); +CREATE OR REPLACE FUNCTION trigger_data() RETURNS trigger +LANGUAGE plpgsql AS $$ + +declare + + argstr text; + relid text; + +begin + + relid := TG_relid::regclass; + + -- plpgsql can't discover its trigger data in a hash like perl and python + -- can, or by a sort of reflection like tcl can, + -- so we have to hard code the names. + raise NOTICE 'TG_NAME: %', TG_name; + raise NOTICE 'TG_WHEN: %', TG_when; + raise NOTICE 'TG_LEVEL: %', TG_level; + raise NOTICE 'TG_OP: %', TG_op; + raise NOTICE 'TG_RELID::regclass: %', relid; + raise NOTICE 'TG_RELNAME: %', TG_relname; + raise NOTICE 'TG_TABLE_NAME: %', TG_table_name; + raise NOTICE 'TG_TABLE_SCHEMA: %', TG_table_schema; + raise NOTICE 'TG_NARGS: %', TG_nargs; + + argstr := '['; + for i in 0 .. TG_nargs - 1 loop + if i > 0 then + argstr := argstr || ', '; + end if; + argstr := argstr || TG_argv[i]; + end loop; + argstr := argstr || ']'; + raise NOTICE 'TG_ARGV: %', argstr; + + if TG_OP != 'INSERT' then + raise NOTICE 'OLD: %', OLD; + end if; + + if TG_OP != 'DELETE' then + raise NOTICE 'NEW: %', NEW; + end if; + + if TG_OP = 'DELETE' then + return OLD; + else + return NEW; + end if; + +end; +$$; +CREATE TRIGGER show_trigger_data_trig +BEFORE INSERT OR UPDATE OR DELETE ON trigger_test +FOR EACH ROW EXECUTE PROCEDURE trigger_data(23,'skidoo'); +insert into trigger_test values(1,'insert'); +NOTICE: TG_NAME: show_trigger_data_trig +NOTICE: TG_WHEN: BEFORE +NOTICE: TG_LEVEL: ROW +NOTICE: TG_OP: INSERT +NOTICE: TG_RELID::regclass: trigger_test +NOTICE: TG_RELNAME: trigger_test +NOTICE: TG_TABLE_NAME: trigger_test +NOTICE: TG_TABLE_SCHEMA: public +NOTICE: TG_NARGS: 2 +NOTICE: TG_ARGV: [23, skidoo] +NOTICE: NEW: (1,insert) +update trigger_test set v = 'update' where i = 1; +NOTICE: TG_NAME: show_trigger_data_trig +NOTICE: TG_WHEN: BEFORE +NOTICE: TG_LEVEL: ROW +NOTICE: TG_OP: UPDATE +NOTICE: TG_RELID::regclass: trigger_test +NOTICE: TG_RELNAME: trigger_test +NOTICE: TG_TABLE_NAME: trigger_test +NOTICE: TG_TABLE_SCHEMA: public +NOTICE: TG_NARGS: 2 +NOTICE: TG_ARGV: [23, skidoo] +NOTICE: OLD: (1,insert) +NOTICE: NEW: (1,update) +delete from trigger_test; +NOTICE: TG_NAME: show_trigger_data_trig +NOTICE: TG_WHEN: BEFORE +NOTICE: TG_LEVEL: ROW +NOTICE: TG_OP: DELETE +NOTICE: TG_RELID::regclass: trigger_test +NOTICE: TG_RELNAME: trigger_test +NOTICE: TG_TABLE_NAME: trigger_test +NOTICE: TG_TABLE_SCHEMA: public +NOTICE: TG_NARGS: 2 +NOTICE: TG_ARGV: [23, skidoo] +NOTICE: OLD: (1,update) +DROP TRIGGER show_trigger_data_trig on trigger_test; +DROP FUNCTION trigger_data(); +DROP TABLE trigger_test; +-- +-- Test use of row comparisons on OLD/NEW +-- +CREATE TABLE trigger_test (f1 int, f2 text, f3 text); +-- this is the obvious (and wrong...) way to compare rows +CREATE FUNCTION mytrigger() RETURNS trigger LANGUAGE plpgsql as $$ +begin + if row(old.*) = row(new.*) then + raise notice 'row % not changed', new.f1; + else + raise notice 'row % changed', new.f1; + end if; + return new; +end$$; +CREATE TRIGGER t +BEFORE UPDATE ON trigger_test +FOR EACH ROW EXECUTE PROCEDURE mytrigger(); +INSERT INTO trigger_test VALUES(1, 'foo', 'bar'); +INSERT INTO trigger_test VALUES(2, 'baz', 'quux'); +UPDATE trigger_test SET f3 = 'bar'; +NOTICE: row 1 not changed +NOTICE: row 2 changed +UPDATE trigger_test SET f3 = NULL; +NOTICE: row 1 changed +NOTICE: row 2 changed +-- this demonstrates that the above isn't really working as desired: +UPDATE trigger_test SET f3 = NULL; +NOTICE: row 1 changed +NOTICE: row 2 changed +-- the right way when considering nulls is +CREATE OR REPLACE FUNCTION mytrigger() RETURNS trigger LANGUAGE plpgsql as $$ +begin + if row(old.*) is distinct from row(new.*) then + raise notice 'row % changed', new.f1; + else + raise notice 'row % not changed', new.f1; + end if; + return new; +end$$; +UPDATE trigger_test SET f3 = 'bar'; +NOTICE: row 1 changed +NOTICE: row 2 changed +UPDATE trigger_test SET f3 = NULL; +NOTICE: row 1 changed +NOTICE: row 2 changed +UPDATE trigger_test SET f3 = NULL; +NOTICE: row 1 not changed +NOTICE: row 2 not changed +DROP TABLE trigger_test; +DROP FUNCTION mytrigger(); +-- Test snapshot management in serializable transactions involving triggers +-- per bug report in 6bc73d4c0910042358k3d1adff3qa36f8df75198ecea@mail.gmail.com +CREATE FUNCTION serializable_update_trig() RETURNS trigger LANGUAGE plpgsql AS +$$ +declare + rec record; +begin + new.description = 'updated in trigger'; + return new; +end; +$$; +CREATE TABLE serializable_update_tab ( + id int, + filler text, + description text +); +CREATE TRIGGER serializable_update_trig BEFORE UPDATE ON serializable_update_tab + FOR EACH ROW EXECUTE PROCEDURE serializable_update_trig(); +INSERT INTO serializable_update_tab SELECT a, repeat('xyzxz', 100), 'new' + FROM generate_series(1, 50) a; +BEGIN; +SET TRANSACTION ISOLATION LEVEL SERIALIZABLE; +UPDATE serializable_update_tab SET description = 'no no', id = 1 WHERE id = 1; +COMMIT; +SELECT description FROM serializable_update_tab WHERE id = 1; + description +-------------------- + updated in trigger +(1 row) + +DROP TABLE serializable_update_tab; +-- minimal update trigger +CREATE TABLE min_updates_test ( + f1 text, + f2 int, + f3 int); +INSERT INTO min_updates_test VALUES ('a',1,2),('b','2',null); +CREATE TRIGGER z_min_update +BEFORE UPDATE ON min_updates_test +FOR EACH ROW EXECUTE PROCEDURE suppress_redundant_updates_trigger(); +\set QUIET false +UPDATE min_updates_test SET f1 = f1; +UPDATE 0 +UPDATE min_updates_test SET f2 = f2 + 1; +UPDATE 2 +UPDATE min_updates_test SET f3 = 2 WHERE f3 is null; +UPDATE 1 +\set QUIET true +SELECT * FROM min_updates_test; + f1 | f2 | f3 +----+----+---- + a | 2 | 2 + b | 3 | 2 +(2 rows) + +DROP TABLE min_updates_test; +-- +-- Test triggers on views +-- +CREATE VIEW main_view AS SELECT a, b FROM main_table; +-- VIEW trigger function +CREATE OR REPLACE FUNCTION view_trigger() RETURNS trigger +LANGUAGE plpgsql AS $$ +declare + argstr text := ''; +begin + for i in 0 .. TG_nargs - 1 loop + if i > 0 then + argstr := argstr || ', '; + end if; + argstr := argstr || TG_argv[i]; + end loop; + + raise notice '% % % % (%)', TG_TABLE_NAME, TG_WHEN, TG_OP, TG_LEVEL, argstr; + + if TG_LEVEL = 'ROW' then + if TG_OP = 'INSERT' then + raise NOTICE 'NEW: %', NEW; + INSERT INTO main_table VALUES (NEW.a, NEW.b); + RETURN NEW; + end if; + + if TG_OP = 'UPDATE' then + raise NOTICE 'OLD: %, NEW: %', OLD, NEW; + UPDATE main_table SET a = NEW.a, b = NEW.b WHERE a = OLD.a AND b = OLD.b; + if NOT FOUND then RETURN NULL; end if; + RETURN NEW; + end if; + + if TG_OP = 'DELETE' then + raise NOTICE 'OLD: %', OLD; + DELETE FROM main_table WHERE a = OLD.a AND b = OLD.b; + if NOT FOUND then RETURN NULL; end if; + RETURN OLD; + end if; + end if; + + RETURN NULL; +end; +$$; +-- Before row triggers aren't allowed on views +CREATE TRIGGER invalid_trig BEFORE INSERT ON main_view +FOR EACH ROW EXECUTE PROCEDURE trigger_func('before_ins_row'); +ERROR: "main_view" is a view +DETAIL: Views cannot have row-level BEFORE or AFTER triggers. +CREATE TRIGGER invalid_trig BEFORE UPDATE ON main_view +FOR EACH ROW EXECUTE PROCEDURE trigger_func('before_upd_row'); +ERROR: "main_view" is a view +DETAIL: Views cannot have row-level BEFORE or AFTER triggers. +CREATE TRIGGER invalid_trig BEFORE DELETE ON main_view +FOR EACH ROW EXECUTE PROCEDURE trigger_func('before_del_row'); +ERROR: "main_view" is a view +DETAIL: Views cannot have row-level BEFORE or AFTER triggers. +-- After row triggers aren't allowed on views +CREATE TRIGGER invalid_trig AFTER INSERT ON main_view +FOR EACH ROW EXECUTE PROCEDURE trigger_func('before_ins_row'); +ERROR: "main_view" is a view +DETAIL: Views cannot have row-level BEFORE or AFTER triggers. +CREATE TRIGGER invalid_trig AFTER UPDATE ON main_view +FOR EACH ROW EXECUTE PROCEDURE trigger_func('before_upd_row'); +ERROR: "main_view" is a view +DETAIL: Views cannot have row-level BEFORE or AFTER triggers. +CREATE TRIGGER invalid_trig AFTER DELETE ON main_view +FOR EACH ROW EXECUTE PROCEDURE trigger_func('before_del_row'); +ERROR: "main_view" is a view +DETAIL: Views cannot have row-level BEFORE or AFTER triggers. +-- Truncate triggers aren't allowed on views +CREATE TRIGGER invalid_trig BEFORE TRUNCATE ON main_view +EXECUTE PROCEDURE trigger_func('before_tru_row'); +ERROR: "main_view" is a view +DETAIL: Views cannot have TRUNCATE triggers. +CREATE TRIGGER invalid_trig AFTER TRUNCATE ON main_view +EXECUTE PROCEDURE trigger_func('before_tru_row'); +ERROR: "main_view" is a view +DETAIL: Views cannot have TRUNCATE triggers. +-- INSTEAD OF triggers aren't allowed on tables +CREATE TRIGGER invalid_trig INSTEAD OF INSERT ON main_table +FOR EACH ROW EXECUTE PROCEDURE view_trigger('instead_of_ins'); +ERROR: "main_table" is a table +DETAIL: Tables cannot have INSTEAD OF triggers. +CREATE TRIGGER invalid_trig INSTEAD OF UPDATE ON main_table +FOR EACH ROW EXECUTE PROCEDURE view_trigger('instead_of_upd'); +ERROR: "main_table" is a table +DETAIL: Tables cannot have INSTEAD OF triggers. +CREATE TRIGGER invalid_trig INSTEAD OF DELETE ON main_table +FOR EACH ROW EXECUTE PROCEDURE view_trigger('instead_of_del'); +ERROR: "main_table" is a table +DETAIL: Tables cannot have INSTEAD OF triggers. +-- Don't support WHEN clauses with INSTEAD OF triggers +CREATE TRIGGER invalid_trig INSTEAD OF UPDATE ON main_view +FOR EACH ROW WHEN (OLD.a <> NEW.a) EXECUTE PROCEDURE view_trigger('instead_of_upd'); +ERROR: INSTEAD OF triggers cannot have WHEN conditions +-- Don't support column-level INSTEAD OF triggers +CREATE TRIGGER invalid_trig INSTEAD OF UPDATE OF a ON main_view +FOR EACH ROW EXECUTE PROCEDURE view_trigger('instead_of_upd'); +ERROR: INSTEAD OF triggers cannot have column lists +-- Don't support statement-level INSTEAD OF triggers +CREATE TRIGGER invalid_trig INSTEAD OF UPDATE ON main_view +EXECUTE PROCEDURE view_trigger('instead_of_upd'); +ERROR: INSTEAD OF triggers must be FOR EACH ROW +-- Valid INSTEAD OF triggers +CREATE TRIGGER instead_of_insert_trig INSTEAD OF INSERT ON main_view +FOR EACH ROW EXECUTE PROCEDURE view_trigger('instead_of_ins'); +CREATE TRIGGER instead_of_update_trig INSTEAD OF UPDATE ON main_view +FOR EACH ROW EXECUTE PROCEDURE view_trigger('instead_of_upd'); +CREATE TRIGGER instead_of_delete_trig INSTEAD OF DELETE ON main_view +FOR EACH ROW EXECUTE PROCEDURE view_trigger('instead_of_del'); +-- Valid BEFORE statement VIEW triggers +CREATE TRIGGER before_ins_stmt_trig BEFORE INSERT ON main_view +FOR EACH STATEMENT EXECUTE PROCEDURE view_trigger('before_view_ins_stmt'); +CREATE TRIGGER before_upd_stmt_trig BEFORE UPDATE ON main_view +FOR EACH STATEMENT EXECUTE PROCEDURE view_trigger('before_view_upd_stmt'); +CREATE TRIGGER before_del_stmt_trig BEFORE DELETE ON main_view +FOR EACH STATEMENT EXECUTE PROCEDURE view_trigger('before_view_del_stmt'); +-- Valid AFTER statement VIEW triggers +CREATE TRIGGER after_ins_stmt_trig AFTER INSERT ON main_view +FOR EACH STATEMENT EXECUTE PROCEDURE view_trigger('after_view_ins_stmt'); +CREATE TRIGGER after_upd_stmt_trig AFTER UPDATE ON main_view +FOR EACH STATEMENT EXECUTE PROCEDURE view_trigger('after_view_upd_stmt'); +CREATE TRIGGER after_del_stmt_trig AFTER DELETE ON main_view +FOR EACH STATEMENT EXECUTE PROCEDURE view_trigger('after_view_del_stmt'); +\set QUIET false +-- Insert into view using trigger +INSERT INTO main_view VALUES (20, 30); +NOTICE: main_view BEFORE INSERT STATEMENT (before_view_ins_stmt) +NOTICE: main_view INSTEAD OF INSERT ROW (instead_of_ins) +NOTICE: NEW: (20,30) +NOTICE: trigger_func(before_ins_stmt) called: action = INSERT, when = BEFORE, level = STATEMENT +NOTICE: trigger_func(after_ins_stmt) called: action = INSERT, when = AFTER, level = STATEMENT +NOTICE: main_view AFTER INSERT STATEMENT (after_view_ins_stmt) +INSERT 0 1 +INSERT INTO main_view VALUES (21, 31) RETURNING a, b; +NOTICE: main_view BEFORE INSERT STATEMENT (before_view_ins_stmt) +NOTICE: main_view INSTEAD OF INSERT ROW (instead_of_ins) +NOTICE: NEW: (21,31) +NOTICE: trigger_func(before_ins_stmt) called: action = INSERT, when = BEFORE, level = STATEMENT +NOTICE: trigger_func(after_ins_stmt) called: action = INSERT, when = AFTER, level = STATEMENT +NOTICE: main_view AFTER INSERT STATEMENT (after_view_ins_stmt) + a | b +----+---- + 21 | 31 +(1 row) + +INSERT 0 1 +-- Table trigger will prevent updates +UPDATE main_view SET b = 31 WHERE a = 20; +NOTICE: main_view BEFORE UPDATE STATEMENT (before_view_upd_stmt) +NOTICE: main_view INSTEAD OF UPDATE ROW (instead_of_upd) +NOTICE: OLD: (20,30), NEW: (20,31) +NOTICE: trigger_func(before_upd_a_stmt) called: action = UPDATE, when = BEFORE, level = STATEMENT +NOTICE: trigger_func(before_upd_a_row) called: action = UPDATE, when = BEFORE, level = ROW +NOTICE: trigger_func(after_upd_b_stmt) called: action = UPDATE, when = AFTER, level = STATEMENT +NOTICE: trigger_func(after_upd_stmt) called: action = UPDATE, when = AFTER, level = STATEMENT +NOTICE: main_view AFTER UPDATE STATEMENT (after_view_upd_stmt) +UPDATE 0 +UPDATE main_view SET b = 32 WHERE a = 21 AND b = 31 RETURNING a, b; +NOTICE: main_view BEFORE UPDATE STATEMENT (before_view_upd_stmt) +NOTICE: main_view INSTEAD OF UPDATE ROW (instead_of_upd) +NOTICE: OLD: (21,31), NEW: (21,32) +NOTICE: trigger_func(before_upd_a_stmt) called: action = UPDATE, when = BEFORE, level = STATEMENT +NOTICE: trigger_func(before_upd_a_row) called: action = UPDATE, when = BEFORE, level = ROW +NOTICE: trigger_func(after_upd_b_stmt) called: action = UPDATE, when = AFTER, level = STATEMENT +NOTICE: trigger_func(after_upd_stmt) called: action = UPDATE, when = AFTER, level = STATEMENT +NOTICE: main_view AFTER UPDATE STATEMENT (after_view_upd_stmt) + a | b +---+--- +(0 rows) + +UPDATE 0 +-- Remove table trigger to allow updates +DROP TRIGGER before_upd_a_row_trig ON main_table; +DROP TRIGGER +UPDATE main_view SET b = 31 WHERE a = 20; +NOTICE: main_view BEFORE UPDATE STATEMENT (before_view_upd_stmt) +NOTICE: main_view INSTEAD OF UPDATE ROW (instead_of_upd) +NOTICE: OLD: (20,30), NEW: (20,31) +NOTICE: trigger_func(before_upd_a_stmt) called: action = UPDATE, when = BEFORE, level = STATEMENT +NOTICE: trigger_func(after_upd_a_b_row) called: action = UPDATE, when = AFTER, level = ROW +NOTICE: trigger_func(after_upd_b_row) called: action = UPDATE, when = AFTER, level = ROW +NOTICE: trigger_func(after_upd_b_stmt) called: action = UPDATE, when = AFTER, level = STATEMENT +NOTICE: trigger_func(after_upd_stmt) called: action = UPDATE, when = AFTER, level = STATEMENT +NOTICE: main_view AFTER UPDATE STATEMENT (after_view_upd_stmt) +UPDATE 1 +UPDATE main_view SET b = 32 WHERE a = 21 AND b = 31 RETURNING a, b; +NOTICE: main_view BEFORE UPDATE STATEMENT (before_view_upd_stmt) +NOTICE: main_view INSTEAD OF UPDATE ROW (instead_of_upd) +NOTICE: OLD: (21,31), NEW: (21,32) +NOTICE: trigger_func(before_upd_a_stmt) called: action = UPDATE, when = BEFORE, level = STATEMENT +NOTICE: trigger_func(after_upd_a_b_row) called: action = UPDATE, when = AFTER, level = ROW +NOTICE: trigger_func(after_upd_b_row) called: action = UPDATE, when = AFTER, level = ROW +NOTICE: trigger_func(after_upd_b_stmt) called: action = UPDATE, when = AFTER, level = STATEMENT +NOTICE: trigger_func(after_upd_stmt) called: action = UPDATE, when = AFTER, level = STATEMENT +NOTICE: main_view AFTER UPDATE STATEMENT (after_view_upd_stmt) + a | b +----+---- + 21 | 32 +(1 row) + +UPDATE 1 +-- Before and after stmt triggers should fire even when no rows are affected +UPDATE main_view SET b = 0 WHERE false; +NOTICE: main_view BEFORE UPDATE STATEMENT (before_view_upd_stmt) +NOTICE: main_view AFTER UPDATE STATEMENT (after_view_upd_stmt) +UPDATE 0 +-- Delete from view using trigger +DELETE FROM main_view WHERE a IN (20,21); +NOTICE: main_view BEFORE DELETE STATEMENT (before_view_del_stmt) +NOTICE: main_view INSTEAD OF DELETE ROW (instead_of_del) +NOTICE: OLD: (21,10) +NOTICE: main_view INSTEAD OF DELETE ROW (instead_of_del) +NOTICE: OLD: (20,31) +NOTICE: main_view INSTEAD OF DELETE ROW (instead_of_del) +NOTICE: OLD: (21,32) +NOTICE: main_view AFTER DELETE STATEMENT (after_view_del_stmt) +DELETE 3 +DELETE FROM main_view WHERE a = 31 RETURNING a, b; +NOTICE: main_view BEFORE DELETE STATEMENT (before_view_del_stmt) +NOTICE: main_view INSTEAD OF DELETE ROW (instead_of_del) +NOTICE: OLD: (31,10) +NOTICE: main_view AFTER DELETE STATEMENT (after_view_del_stmt) + a | b +----+---- + 31 | 10 +(1 row) + +DELETE 1 +\set QUIET true +-- Describe view should list triggers +\d main_view + View "public.main_view" + Column | Type | Collation | Nullable | Default +--------+---------+-----------+----------+--------- + a | integer | | | + b | integer | | | +Triggers: + after_del_stmt_trig AFTER DELETE ON main_view FOR EACH STATEMENT EXECUTE FUNCTION view_trigger('after_view_del_stmt') + after_ins_stmt_trig AFTER INSERT ON main_view FOR EACH STATEMENT EXECUTE FUNCTION view_trigger('after_view_ins_stmt') + after_upd_stmt_trig AFTER UPDATE ON main_view FOR EACH STATEMENT EXECUTE FUNCTION view_trigger('after_view_upd_stmt') + before_del_stmt_trig BEFORE DELETE ON main_view FOR EACH STATEMENT EXECUTE FUNCTION view_trigger('before_view_del_stmt') + before_ins_stmt_trig BEFORE INSERT ON main_view FOR EACH STATEMENT EXECUTE FUNCTION view_trigger('before_view_ins_stmt') + before_upd_stmt_trig BEFORE UPDATE ON main_view FOR EACH STATEMENT EXECUTE FUNCTION view_trigger('before_view_upd_stmt') + instead_of_delete_trig INSTEAD OF DELETE ON main_view FOR EACH ROW EXECUTE FUNCTION view_trigger('instead_of_del') + instead_of_insert_trig INSTEAD OF INSERT ON main_view FOR EACH ROW EXECUTE FUNCTION view_trigger('instead_of_ins') + instead_of_update_trig INSTEAD OF UPDATE ON main_view FOR EACH ROW EXECUTE FUNCTION view_trigger('instead_of_upd') + +-- Test dropping view triggers +DROP TRIGGER instead_of_insert_trig ON main_view; +DROP TRIGGER instead_of_delete_trig ON main_view; +\d+ main_view + View "public.main_view" + Column | Type | Collation | Nullable | Default | Storage | Description +--------+---------+-----------+----------+---------+---------+------------- + a | integer | | | | plain | + b | integer | | | | plain | +View definition: + SELECT a, + b + FROM main_table; +Triggers: + after_del_stmt_trig AFTER DELETE ON main_view FOR EACH STATEMENT EXECUTE FUNCTION view_trigger('after_view_del_stmt') + after_ins_stmt_trig AFTER INSERT ON main_view FOR EACH STATEMENT EXECUTE FUNCTION view_trigger('after_view_ins_stmt') + after_upd_stmt_trig AFTER UPDATE ON main_view FOR EACH STATEMENT EXECUTE FUNCTION view_trigger('after_view_upd_stmt') + before_del_stmt_trig BEFORE DELETE ON main_view FOR EACH STATEMENT EXECUTE FUNCTION view_trigger('before_view_del_stmt') + before_ins_stmt_trig BEFORE INSERT ON main_view FOR EACH STATEMENT EXECUTE FUNCTION view_trigger('before_view_ins_stmt') + before_upd_stmt_trig BEFORE UPDATE ON main_view FOR EACH STATEMENT EXECUTE FUNCTION view_trigger('before_view_upd_stmt') + instead_of_update_trig INSTEAD OF UPDATE ON main_view FOR EACH ROW EXECUTE FUNCTION view_trigger('instead_of_upd') + +DROP VIEW main_view; +-- +-- Test triggers on a join view +-- +CREATE TABLE country_table ( + country_id serial primary key, + country_name text unique not null, + continent text not null +); +INSERT INTO country_table (country_name, continent) + VALUES ('Japan', 'Asia'), + ('UK', 'Europe'), + ('USA', 'North America') + RETURNING *; + country_id | country_name | continent +------------+--------------+--------------- + 1 | Japan | Asia + 2 | UK | Europe + 3 | USA | North America +(3 rows) + +CREATE TABLE city_table ( + city_id serial primary key, + city_name text not null, + population bigint, + country_id int references country_table +); +CREATE VIEW city_view AS + SELECT city_id, city_name, population, country_name, continent + FROM city_table ci + LEFT JOIN country_table co ON co.country_id = ci.country_id; +CREATE FUNCTION city_insert() RETURNS trigger LANGUAGE plpgsql AS $$ +declare + ctry_id int; +begin + if NEW.country_name IS NOT NULL then + SELECT country_id, continent INTO ctry_id, NEW.continent + FROM country_table WHERE country_name = NEW.country_name; + if NOT FOUND then + raise exception 'No such country: "%"', NEW.country_name; + end if; + else + NEW.continent := NULL; + end if; + + if NEW.city_id IS NOT NULL then + INSERT INTO city_table + VALUES(NEW.city_id, NEW.city_name, NEW.population, ctry_id); + else + INSERT INTO city_table(city_name, population, country_id) + VALUES(NEW.city_name, NEW.population, ctry_id) + RETURNING city_id INTO NEW.city_id; + end if; + + RETURN NEW; +end; +$$; +CREATE TRIGGER city_insert_trig INSTEAD OF INSERT ON city_view +FOR EACH ROW EXECUTE PROCEDURE city_insert(); +CREATE FUNCTION city_delete() RETURNS trigger LANGUAGE plpgsql AS $$ +begin + DELETE FROM city_table WHERE city_id = OLD.city_id; + if NOT FOUND then RETURN NULL; end if; + RETURN OLD; +end; +$$; +CREATE TRIGGER city_delete_trig INSTEAD OF DELETE ON city_view +FOR EACH ROW EXECUTE PROCEDURE city_delete(); +CREATE FUNCTION city_update() RETURNS trigger LANGUAGE plpgsql AS $$ +declare + ctry_id int; +begin + if NEW.country_name IS DISTINCT FROM OLD.country_name then + SELECT country_id, continent INTO ctry_id, NEW.continent + FROM country_table WHERE country_name = NEW.country_name; + if NOT FOUND then + raise exception 'No such country: "%"', NEW.country_name; + end if; + + UPDATE city_table SET city_name = NEW.city_name, + population = NEW.population, + country_id = ctry_id + WHERE city_id = OLD.city_id; + else + UPDATE city_table SET city_name = NEW.city_name, + population = NEW.population + WHERE city_id = OLD.city_id; + NEW.continent := OLD.continent; + end if; + + if NOT FOUND then RETURN NULL; end if; + RETURN NEW; +end; +$$; +CREATE TRIGGER city_update_trig INSTEAD OF UPDATE ON city_view +FOR EACH ROW EXECUTE PROCEDURE city_update(); +\set QUIET false +-- INSERT .. RETURNING +INSERT INTO city_view(city_name) VALUES('Tokyo') RETURNING *; + city_id | city_name | population | country_name | continent +---------+-----------+------------+--------------+----------- + 1 | Tokyo | | | +(1 row) + +INSERT 0 1 +INSERT INTO city_view(city_name, population) VALUES('London', 7556900) RETURNING *; + city_id | city_name | population | country_name | continent +---------+-----------+------------+--------------+----------- + 2 | London | 7556900 | | +(1 row) + +INSERT 0 1 +INSERT INTO city_view(city_name, country_name) VALUES('Washington DC', 'USA') RETURNING *; + city_id | city_name | population | country_name | continent +---------+---------------+------------+--------------+--------------- + 3 | Washington DC | | USA | North America +(1 row) + +INSERT 0 1 +INSERT INTO city_view(city_id, city_name) VALUES(123456, 'New York') RETURNING *; + city_id | city_name | population | country_name | continent +---------+-----------+------------+--------------+----------- + 123456 | New York | | | +(1 row) + +INSERT 0 1 +INSERT INTO city_view VALUES(234567, 'Birmingham', 1016800, 'UK', 'EU') RETURNING *; + city_id | city_name | population | country_name | continent +---------+------------+------------+--------------+----------- + 234567 | Birmingham | 1016800 | UK | Europe +(1 row) + +INSERT 0 1 +-- UPDATE .. RETURNING +UPDATE city_view SET country_name = 'Japon' WHERE city_name = 'Tokyo'; -- error +ERROR: No such country: "Japon" +CONTEXT: PL/pgSQL function city_update() line 9 at RAISE +UPDATE city_view SET country_name = 'Japan' WHERE city_name = 'Takyo'; -- no match +UPDATE 0 +UPDATE city_view SET country_name = 'Japan' WHERE city_name = 'Tokyo' RETURNING *; -- OK + city_id | city_name | population | country_name | continent +---------+-----------+------------+--------------+----------- + 1 | Tokyo | | Japan | Asia +(1 row) + +UPDATE 1 +UPDATE city_view SET population = 13010279 WHERE city_name = 'Tokyo' RETURNING *; + city_id | city_name | population | country_name | continent +---------+-----------+------------+--------------+----------- + 1 | Tokyo | 13010279 | Japan | Asia +(1 row) + +UPDATE 1 +UPDATE city_view SET country_name = 'UK' WHERE city_name = 'New York' RETURNING *; + city_id | city_name | population | country_name | continent +---------+-----------+------------+--------------+----------- + 123456 | New York | | UK | Europe +(1 row) + +UPDATE 1 +UPDATE city_view SET country_name = 'USA', population = 8391881 WHERE city_name = 'New York' RETURNING *; + city_id | city_name | population | country_name | continent +---------+-----------+------------+--------------+--------------- + 123456 | New York | 8391881 | USA | North America +(1 row) + +UPDATE 1 +UPDATE city_view SET continent = 'EU' WHERE continent = 'Europe' RETURNING *; + city_id | city_name | population | country_name | continent +---------+------------+------------+--------------+----------- + 234567 | Birmingham | 1016800 | UK | Europe +(1 row) + +UPDATE 1 +UPDATE city_view v1 SET country_name = v2.country_name FROM city_view v2 + WHERE v2.city_name = 'Birmingham' AND v1.city_name = 'London' RETURNING *; + city_id | city_name | population | country_name | continent | city_id | city_name | population | country_name | continent +---------+-----------+------------+--------------+-----------+---------+------------+------------+--------------+----------- + 2 | London | 7556900 | UK | Europe | 234567 | Birmingham | 1016800 | UK | Europe +(1 row) + +UPDATE 1 +-- DELETE .. RETURNING +DELETE FROM city_view WHERE city_name = 'Birmingham' RETURNING *; + city_id | city_name | population | country_name | continent +---------+------------+------------+--------------+----------- + 234567 | Birmingham | 1016800 | UK | Europe +(1 row) + +DELETE 1 +\set QUIET true +-- read-only view with WHERE clause +CREATE VIEW european_city_view AS + SELECT * FROM city_view WHERE continent = 'Europe'; +SELECT count(*) FROM european_city_view; + count +------- + 1 +(1 row) + +CREATE FUNCTION no_op_trig_fn() RETURNS trigger LANGUAGE plpgsql +AS 'begin RETURN NULL; end'; +CREATE TRIGGER no_op_trig INSTEAD OF INSERT OR UPDATE OR DELETE +ON european_city_view FOR EACH ROW EXECUTE PROCEDURE no_op_trig_fn(); +\set QUIET false +INSERT INTO european_city_view VALUES (0, 'x', 10000, 'y', 'z'); +INSERT 0 0 +UPDATE european_city_view SET population = 10000; +UPDATE 0 +DELETE FROM european_city_view; +DELETE 0 +\set QUIET true +-- rules bypassing no-op triggers +CREATE RULE european_city_insert_rule AS ON INSERT TO european_city_view +DO INSTEAD INSERT INTO city_view +VALUES (NEW.city_id, NEW.city_name, NEW.population, NEW.country_name, NEW.continent) +RETURNING *; +CREATE RULE european_city_update_rule AS ON UPDATE TO european_city_view +DO INSTEAD UPDATE city_view SET + city_name = NEW.city_name, + population = NEW.population, + country_name = NEW.country_name +WHERE city_id = OLD.city_id +RETURNING NEW.*; +CREATE RULE european_city_delete_rule AS ON DELETE TO european_city_view +DO INSTEAD DELETE FROM city_view WHERE city_id = OLD.city_id RETURNING *; +\set QUIET false +-- INSERT not limited by view's WHERE clause, but UPDATE AND DELETE are +INSERT INTO european_city_view(city_name, country_name) + VALUES ('Cambridge', 'USA') RETURNING *; + city_id | city_name | population | country_name | continent +---------+-----------+------------+--------------+--------------- + 4 | Cambridge | | USA | North America +(1 row) + +INSERT 0 1 +UPDATE european_city_view SET country_name = 'UK' + WHERE city_name = 'Cambridge'; +UPDATE 0 +DELETE FROM european_city_view WHERE city_name = 'Cambridge'; +DELETE 0 +-- UPDATE and DELETE via rule and trigger +UPDATE city_view SET country_name = 'UK' + WHERE city_name = 'Cambridge' RETURNING *; + city_id | city_name | population | country_name | continent +---------+-----------+------------+--------------+----------- + 4 | Cambridge | | UK | Europe +(1 row) + +UPDATE 1 +UPDATE european_city_view SET population = 122800 + WHERE city_name = 'Cambridge' RETURNING *; + city_id | city_name | population | country_name | continent +---------+-----------+------------+--------------+----------- + 4 | Cambridge | 122800 | UK | Europe +(1 row) + +UPDATE 1 +DELETE FROM european_city_view WHERE city_name = 'Cambridge' RETURNING *; + city_id | city_name | population | country_name | continent +---------+-----------+------------+--------------+----------- + 4 | Cambridge | 122800 | UK | Europe +(1 row) + +DELETE 1 +-- join UPDATE test +UPDATE city_view v SET population = 599657 + FROM city_table ci, country_table co + WHERE ci.city_name = 'Washington DC' and co.country_name = 'USA' + AND v.city_id = ci.city_id AND v.country_name = co.country_name + RETURNING co.country_id, v.country_name, + v.city_id, v.city_name, v.population; + country_id | country_name | city_id | city_name | population +------------+--------------+---------+---------------+------------ + 3 | USA | 3 | Washington DC | 599657 +(1 row) + +UPDATE 1 +\set QUIET true +SELECT * FROM city_view; + city_id | city_name | population | country_name | continent +---------+---------------+------------+--------------+--------------- + 1 | Tokyo | 13010279 | Japan | Asia + 123456 | New York | 8391881 | USA | North America + 2 | London | 7556900 | UK | Europe + 3 | Washington DC | 599657 | USA | North America +(4 rows) + +DROP TABLE city_table CASCADE; +NOTICE: drop cascades to 2 other objects +DETAIL: drop cascades to view city_view +drop cascades to view european_city_view +DROP TABLE country_table; +-- Test pg_trigger_depth() +create table depth_a (id int not null primary key); +create table depth_b (id int not null primary key); +create table depth_c (id int not null primary key); +create function depth_a_tf() returns trigger + language plpgsql as $$ +begin + raise notice '%: depth = %', tg_name, pg_trigger_depth(); + insert into depth_b values (new.id); + raise notice '%: depth = %', tg_name, pg_trigger_depth(); + return new; +end; +$$; +create trigger depth_a_tr before insert on depth_a + for each row execute procedure depth_a_tf(); +create function depth_b_tf() returns trigger + language plpgsql as $$ +begin + raise notice '%: depth = %', tg_name, pg_trigger_depth(); + begin + execute 'insert into depth_c values (' || new.id::text || ')'; + exception + when sqlstate 'U9999' then + raise notice 'SQLSTATE = U9999: depth = %', pg_trigger_depth(); + end; + raise notice '%: depth = %', tg_name, pg_trigger_depth(); + if new.id = 1 then + execute 'insert into depth_c values (' || new.id::text || ')'; + end if; + return new; +end; +$$; +create trigger depth_b_tr before insert on depth_b + for each row execute procedure depth_b_tf(); +create function depth_c_tf() returns trigger + language plpgsql as $$ +begin + raise notice '%: depth = %', tg_name, pg_trigger_depth(); + if new.id = 1 then + raise exception sqlstate 'U9999'; + end if; + raise notice '%: depth = %', tg_name, pg_trigger_depth(); + return new; +end; +$$; +create trigger depth_c_tr before insert on depth_c + for each row execute procedure depth_c_tf(); +select pg_trigger_depth(); + pg_trigger_depth +------------------ + 0 +(1 row) + +insert into depth_a values (1); +NOTICE: depth_a_tr: depth = 1 +NOTICE: depth_b_tr: depth = 2 +NOTICE: depth_c_tr: depth = 3 +NOTICE: SQLSTATE = U9999: depth = 2 +NOTICE: depth_b_tr: depth = 2 +NOTICE: depth_c_tr: depth = 3 +ERROR: U9999 +CONTEXT: PL/pgSQL function depth_c_tf() line 5 at RAISE +SQL statement "insert into depth_c values (1)" +PL/pgSQL function depth_b_tf() line 12 at EXECUTE +SQL statement "insert into depth_b values (new.id)" +PL/pgSQL function depth_a_tf() line 4 at SQL statement +select pg_trigger_depth(); + pg_trigger_depth +------------------ + 0 +(1 row) + +insert into depth_a values (2); +NOTICE: depth_a_tr: depth = 1 +NOTICE: depth_b_tr: depth = 2 +NOTICE: depth_c_tr: depth = 3 +NOTICE: depth_c_tr: depth = 3 +NOTICE: depth_b_tr: depth = 2 +NOTICE: depth_a_tr: depth = 1 +select pg_trigger_depth(); + pg_trigger_depth +------------------ + 0 +(1 row) + +drop table depth_a, depth_b, depth_c; +drop function depth_a_tf(); +drop function depth_b_tf(); +drop function depth_c_tf(); +-- +-- Test updates to rows during firing of BEFORE ROW triggers. +-- As of 9.2, such cases should be rejected (see bug #6123). +-- +create temp table parent ( + aid int not null primary key, + val1 text, + val2 text, + val3 text, + val4 text, + bcnt int not null default 0); +create temp table child ( + bid int not null primary key, + aid int not null, + val1 text); +create function parent_upd_func() + returns trigger language plpgsql as +$$ +begin + if old.val1 <> new.val1 then + new.val2 = new.val1; + delete from child where child.aid = new.aid and child.val1 = new.val1; + end if; + return new; +end; +$$; +create trigger parent_upd_trig before update on parent + for each row execute procedure parent_upd_func(); +create function parent_del_func() + returns trigger language plpgsql as +$$ +begin + delete from child where aid = old.aid; + return old; +end; +$$; +create trigger parent_del_trig before delete on parent + for each row execute procedure parent_del_func(); +create function child_ins_func() + returns trigger language plpgsql as +$$ +begin + update parent set bcnt = bcnt + 1 where aid = new.aid; + return new; +end; +$$; +create trigger child_ins_trig after insert on child + for each row execute procedure child_ins_func(); +create function child_del_func() + returns trigger language plpgsql as +$$ +begin + update parent set bcnt = bcnt - 1 where aid = old.aid; + return old; +end; +$$; +create trigger child_del_trig after delete on child + for each row execute procedure child_del_func(); +insert into parent values (1, 'a', 'a', 'a', 'a', 0); +insert into child values (10, 1, 'b'); +select * from parent; select * from child; + aid | val1 | val2 | val3 | val4 | bcnt +-----+------+------+------+------+------ + 1 | a | a | a | a | 1 +(1 row) + + bid | aid | val1 +-----+-----+------ + 10 | 1 | b +(1 row) + +update parent set val1 = 'b' where aid = 1; -- should fail +ERROR: tuple to be updated was already modified by an operation triggered by the current command +HINT: Consider using an AFTER trigger instead of a BEFORE trigger to propagate changes to other rows. +select * from parent; select * from child; + aid | val1 | val2 | val3 | val4 | bcnt +-----+------+------+------+------+------ + 1 | a | a | a | a | 1 +(1 row) + + bid | aid | val1 +-----+-----+------ + 10 | 1 | b +(1 row) + +delete from parent where aid = 1; -- should fail +ERROR: tuple to be deleted was already modified by an operation triggered by the current command +HINT: Consider using an AFTER trigger instead of a BEFORE trigger to propagate changes to other rows. +select * from parent; select * from child; + aid | val1 | val2 | val3 | val4 | bcnt +-----+------+------+------+------+------ + 1 | a | a | a | a | 1 +(1 row) + + bid | aid | val1 +-----+-----+------ + 10 | 1 | b +(1 row) + +-- replace the trigger function with one that restarts the deletion after +-- having modified a child +create or replace function parent_del_func() + returns trigger language plpgsql as +$$ +begin + delete from child where aid = old.aid; + if found then + delete from parent where aid = old.aid; + return null; -- cancel outer deletion + end if; + return old; +end; +$$; +delete from parent where aid = 1; +select * from parent; select * from child; + aid | val1 | val2 | val3 | val4 | bcnt +-----+------+------+------+------+------ +(0 rows) + + bid | aid | val1 +-----+-----+------ +(0 rows) + +drop table parent, child; +drop function parent_upd_func(); +drop function parent_del_func(); +drop function child_ins_func(); +drop function child_del_func(); +-- similar case, but with a self-referencing FK so that parent and child +-- rows can be affected by a single operation +create temp table self_ref_trigger ( + id int primary key, + parent int references self_ref_trigger, + data text, + nchildren int not null default 0 +); +create function self_ref_trigger_ins_func() + returns trigger language plpgsql as +$$ +begin + if new.parent is not null then + update self_ref_trigger set nchildren = nchildren + 1 + where id = new.parent; + end if; + return new; +end; +$$; +create trigger self_ref_trigger_ins_trig before insert on self_ref_trigger + for each row execute procedure self_ref_trigger_ins_func(); +create function self_ref_trigger_del_func() + returns trigger language plpgsql as +$$ +begin + if old.parent is not null then + update self_ref_trigger set nchildren = nchildren - 1 + where id = old.parent; + end if; + return old; +end; +$$; +create trigger self_ref_trigger_del_trig before delete on self_ref_trigger + for each row execute procedure self_ref_trigger_del_func(); +insert into self_ref_trigger values (1, null, 'root'); +insert into self_ref_trigger values (2, 1, 'root child A'); +insert into self_ref_trigger values (3, 1, 'root child B'); +insert into self_ref_trigger values (4, 2, 'grandchild 1'); +insert into self_ref_trigger values (5, 3, 'grandchild 2'); +update self_ref_trigger set data = 'root!' where id = 1; +select * from self_ref_trigger; + id | parent | data | nchildren +----+--------+--------------+----------- + 2 | 1 | root child A | 1 + 4 | 2 | grandchild 1 | 0 + 3 | 1 | root child B | 1 + 5 | 3 | grandchild 2 | 0 + 1 | | root! | 2 +(5 rows) + +delete from self_ref_trigger; +ERROR: tuple to be updated was already modified by an operation triggered by the current command +HINT: Consider using an AFTER trigger instead of a BEFORE trigger to propagate changes to other rows. +select * from self_ref_trigger; + id | parent | data | nchildren +----+--------+--------------+----------- + 2 | 1 | root child A | 1 + 4 | 2 | grandchild 1 | 0 + 3 | 1 | root child B | 1 + 5 | 3 | grandchild 2 | 0 + 1 | | root! | 2 +(5 rows) + +drop table self_ref_trigger; +drop function self_ref_trigger_ins_func(); +drop function self_ref_trigger_del_func(); +-- +-- Check that statement triggers work correctly even with all children excluded +-- +create table stmt_trig_on_empty_upd (a int); +create table stmt_trig_on_empty_upd1 () inherits (stmt_trig_on_empty_upd); +create function update_stmt_notice() returns trigger as $$ +begin + raise notice 'updating %', TG_TABLE_NAME; + return null; +end; +$$ language plpgsql; +create trigger before_stmt_trigger + before update on stmt_trig_on_empty_upd + execute procedure update_stmt_notice(); +create trigger before_stmt_trigger + before update on stmt_trig_on_empty_upd1 + execute procedure update_stmt_notice(); +-- inherited no-op update +update stmt_trig_on_empty_upd set a = a where false returning a+1 as aa; +NOTICE: updating stmt_trig_on_empty_upd + aa +---- +(0 rows) + +-- simple no-op update +update stmt_trig_on_empty_upd1 set a = a where false returning a+1 as aa; +NOTICE: updating stmt_trig_on_empty_upd1 + aa +---- +(0 rows) + +drop table stmt_trig_on_empty_upd cascade; +NOTICE: drop cascades to table stmt_trig_on_empty_upd1 +drop function update_stmt_notice(); +-- +-- Check that index creation (or DDL in general) is prohibited in a trigger +-- +create table trigger_ddl_table ( + col1 integer, + col2 integer +); +create function trigger_ddl_func() returns trigger as $$ +begin + alter table trigger_ddl_table add primary key (col1); + return new; +end$$ language plpgsql; +create trigger trigger_ddl_func before insert on trigger_ddl_table for each row + execute procedure trigger_ddl_func(); +insert into trigger_ddl_table values (1, 42); -- fail +ERROR: cannot ALTER TABLE "trigger_ddl_table" because it is being used by active queries in this session +CONTEXT: SQL statement "alter table trigger_ddl_table add primary key (col1)" +PL/pgSQL function trigger_ddl_func() line 3 at SQL statement +create or replace function trigger_ddl_func() returns trigger as $$ +begin + create index on trigger_ddl_table (col2); + return new; +end$$ language plpgsql; +insert into trigger_ddl_table values (1, 42); -- fail +ERROR: cannot CREATE INDEX "trigger_ddl_table" because it is being used by active queries in this session +CONTEXT: SQL statement "create index on trigger_ddl_table (col2)" +PL/pgSQL function trigger_ddl_func() line 3 at SQL statement +drop table trigger_ddl_table; +drop function trigger_ddl_func(); +-- +-- Verify behavior of before and after triggers with INSERT...ON CONFLICT +-- DO UPDATE +-- +create table upsert (key int4 primary key, color text); +create function upsert_before_func() + returns trigger language plpgsql as +$$ +begin + if (TG_OP = 'UPDATE') then + raise warning 'before update (old): %', old.*::text; + raise warning 'before update (new): %', new.*::text; + elsif (TG_OP = 'INSERT') then + raise warning 'before insert (new): %', new.*::text; + if new.key % 2 = 0 then + new.key := new.key + 1; + new.color := new.color || ' trig modified'; + raise warning 'before insert (new, modified): %', new.*::text; + end if; + end if; + return new; +end; +$$; +create trigger upsert_before_trig before insert or update on upsert + for each row execute procedure upsert_before_func(); +create function upsert_after_func() + returns trigger language plpgsql as +$$ +begin + if (TG_OP = 'UPDATE') then + raise warning 'after update (old): %', old.*::text; + raise warning 'after update (new): %', new.*::text; + elsif (TG_OP = 'INSERT') then + raise warning 'after insert (new): %', new.*::text; + end if; + return null; +end; +$$; +create trigger upsert_after_trig after insert or update on upsert + for each row execute procedure upsert_after_func(); +insert into upsert values(1, 'black') on conflict (key) do update set color = 'updated ' || upsert.color; +WARNING: before insert (new): (1,black) +WARNING: after insert (new): (1,black) +insert into upsert values(2, 'red') on conflict (key) do update set color = 'updated ' || upsert.color; +WARNING: before insert (new): (2,red) +WARNING: before insert (new, modified): (3,"red trig modified") +WARNING: after insert (new): (3,"red trig modified") +insert into upsert values(3, 'orange') on conflict (key) do update set color = 'updated ' || upsert.color; +WARNING: before insert (new): (3,orange) +WARNING: before update (old): (3,"red trig modified") +WARNING: before update (new): (3,"updated red trig modified") +WARNING: after update (old): (3,"red trig modified") +WARNING: after update (new): (3,"updated red trig modified") +insert into upsert values(4, 'green') on conflict (key) do update set color = 'updated ' || upsert.color; +WARNING: before insert (new): (4,green) +WARNING: before insert (new, modified): (5,"green trig modified") +WARNING: after insert (new): (5,"green trig modified") +insert into upsert values(5, 'purple') on conflict (key) do update set color = 'updated ' || upsert.color; +WARNING: before insert (new): (5,purple) +WARNING: before update (old): (5,"green trig modified") +WARNING: before update (new): (5,"updated green trig modified") +WARNING: after update (old): (5,"green trig modified") +WARNING: after update (new): (5,"updated green trig modified") +insert into upsert values(6, 'white') on conflict (key) do update set color = 'updated ' || upsert.color; +WARNING: before insert (new): (6,white) +WARNING: before insert (new, modified): (7,"white trig modified") +WARNING: after insert (new): (7,"white trig modified") +insert into upsert values(7, 'pink') on conflict (key) do update set color = 'updated ' || upsert.color; +WARNING: before insert (new): (7,pink) +WARNING: before update (old): (7,"white trig modified") +WARNING: before update (new): (7,"updated white trig modified") +WARNING: after update (old): (7,"white trig modified") +WARNING: after update (new): (7,"updated white trig modified") +insert into upsert values(8, 'yellow') on conflict (key) do update set color = 'updated ' || upsert.color; +WARNING: before insert (new): (8,yellow) +WARNING: before insert (new, modified): (9,"yellow trig modified") +WARNING: after insert (new): (9,"yellow trig modified") +select * from upsert; + key | color +-----+----------------------------- + 1 | black + 3 | updated red trig modified + 5 | updated green trig modified + 7 | updated white trig modified + 9 | yellow trig modified +(5 rows) + +drop table upsert; +drop function upsert_before_func(); +drop function upsert_after_func(); +-- +-- Verify that triggers with transition tables are not allowed on +-- views +-- +create table my_table (i int); +create view my_view as select * from my_table; +create function my_trigger_function() returns trigger as $$ begin end; $$ language plpgsql; +create trigger my_trigger after update on my_view referencing old table as old_table + for each statement execute procedure my_trigger_function(); +ERROR: "my_view" is a view +DETAIL: Triggers on views cannot have transition tables. +drop function my_trigger_function(); +drop view my_view; +drop table my_table; +-- +-- Verify cases that are unsupported with partitioned tables +-- +create table parted_trig (a int) partition by list (a); +create function trigger_nothing() returns trigger + language plpgsql as $$ begin end; $$; +create trigger failed instead of update on parted_trig + for each row execute procedure trigger_nothing(); +ERROR: "parted_trig" is a table +DETAIL: Tables cannot have INSTEAD OF triggers. +create trigger failed after update on parted_trig + referencing old table as old_table + for each row execute procedure trigger_nothing(); +ERROR: "parted_trig" is a partitioned table +DETAIL: ROW triggers with transition tables are not supported on partitioned tables. +drop table parted_trig; +-- +-- Verify trigger creation for partitioned tables, and drop behavior +-- +create table trigpart (a int, b int) partition by range (a); +create table trigpart1 partition of trigpart for values from (0) to (1000); +create trigger trg1 after insert on trigpart for each row execute procedure trigger_nothing(); +create table trigpart2 partition of trigpart for values from (1000) to (2000); +create table trigpart3 (like trigpart); +alter table trigpart attach partition trigpart3 for values from (2000) to (3000); +create table trigpart4 partition of trigpart for values from (3000) to (4000) partition by range (a); +create table trigpart41 partition of trigpart4 for values from (3000) to (3500); +create table trigpart42 (like trigpart); +alter table trigpart4 attach partition trigpart42 for values from (3500) to (4000); +select tgrelid::regclass, tgname, tgfoid::regproc from pg_trigger + where tgrelid::regclass::text like 'trigpart%' order by tgrelid::regclass::text; + tgrelid | tgname | tgfoid +------------+--------+----------------- + trigpart | trg1 | trigger_nothing + trigpart1 | trg1 | trigger_nothing + trigpart2 | trg1 | trigger_nothing + trigpart3 | trg1 | trigger_nothing + trigpart4 | trg1 | trigger_nothing + trigpart41 | trg1 | trigger_nothing + trigpart42 | trg1 | trigger_nothing +(7 rows) + +drop trigger trg1 on trigpart1; -- fail +ERROR: cannot drop trigger trg1 on table trigpart1 because trigger trg1 on table trigpart requires it +HINT: You can drop trigger trg1 on table trigpart instead. +drop trigger trg1 on trigpart2; -- fail +ERROR: cannot drop trigger trg1 on table trigpart2 because trigger trg1 on table trigpart requires it +HINT: You can drop trigger trg1 on table trigpart instead. +drop trigger trg1 on trigpart3; -- fail +ERROR: cannot drop trigger trg1 on table trigpart3 because trigger trg1 on table trigpart requires it +HINT: You can drop trigger trg1 on table trigpart instead. +drop table trigpart2; -- ok, trigger should be gone in that partition +select tgrelid::regclass, tgname, tgfoid::regproc from pg_trigger + where tgrelid::regclass::text like 'trigpart%' order by tgrelid::regclass::text; + tgrelid | tgname | tgfoid +------------+--------+----------------- + trigpart | trg1 | trigger_nothing + trigpart1 | trg1 | trigger_nothing + trigpart3 | trg1 | trigger_nothing + trigpart4 | trg1 | trigger_nothing + trigpart41 | trg1 | trigger_nothing + trigpart42 | trg1 | trigger_nothing +(6 rows) + +drop trigger trg1 on trigpart; -- ok, all gone +select tgrelid::regclass, tgname, tgfoid::regproc from pg_trigger + where tgrelid::regclass::text like 'trigpart%' order by tgrelid::regclass::text; + tgrelid | tgname | tgfoid +---------+--------+-------- +(0 rows) + +-- check detach behavior +create trigger trg1 after insert on trigpart for each row execute procedure trigger_nothing(); +\d trigpart3 + Table "public.trigpart3" + Column | Type | Collation | Nullable | Default +--------+---------+-----------+----------+--------- + a | integer | | | + b | integer | | | +Partition of: trigpart FOR VALUES FROM (2000) TO (3000) +Triggers: + trg1 AFTER INSERT ON trigpart3 FOR EACH ROW EXECUTE FUNCTION trigger_nothing(), ON TABLE trigpart + +alter table trigpart detach partition trigpart3; +drop trigger trg1 on trigpart3; -- fail due to "does not exist" +ERROR: trigger "trg1" for table "trigpart3" does not exist +alter table trigpart detach partition trigpart4; +drop trigger trg1 on trigpart41; -- fail due to "does not exist" +ERROR: trigger "trg1" for table "trigpart41" does not exist +drop table trigpart4; +alter table trigpart attach partition trigpart3 for values from (2000) to (3000); +alter table trigpart detach partition trigpart3; +alter table trigpart attach partition trigpart3 for values from (2000) to (3000); +drop table trigpart3; +select tgrelid::regclass::text, tgname, tgfoid::regproc, tgenabled, tgisinternal from pg_trigger + where tgname ~ '^trg1' order by 1; + tgrelid | tgname | tgfoid | tgenabled | tgisinternal +-----------+--------+-----------------+-----------+-------------- + trigpart | trg1 | trigger_nothing | O | f + trigpart1 | trg1 | trigger_nothing | O | f +(2 rows) + +create table trigpart3 (like trigpart); +create trigger trg1 after insert on trigpart3 for each row execute procedure trigger_nothing(); +\d trigpart3 + Table "public.trigpart3" + Column | Type | Collation | Nullable | Default +--------+---------+-----------+----------+--------- + a | integer | | | + b | integer | | | +Triggers: + trg1 AFTER INSERT ON trigpart3 FOR EACH ROW EXECUTE FUNCTION trigger_nothing() + +alter table trigpart attach partition trigpart3 FOR VALUES FROM (2000) to (3000); -- fail +ERROR: trigger "trg1" for relation "trigpart3" already exists +drop table trigpart3; +-- check display of unrelated triggers +create trigger samename after delete on trigpart execute function trigger_nothing(); +create trigger samename after delete on trigpart1 execute function trigger_nothing(); +\d trigpart1 + Table "public.trigpart1" + Column | Type | Collation | Nullable | Default +--------+---------+-----------+----------+--------- + a | integer | | | + b | integer | | | +Partition of: trigpart FOR VALUES FROM (0) TO (1000) +Triggers: + samename AFTER DELETE ON trigpart1 FOR EACH STATEMENT EXECUTE FUNCTION trigger_nothing() + trg1 AFTER INSERT ON trigpart1 FOR EACH ROW EXECUTE FUNCTION trigger_nothing(), ON TABLE trigpart + +drop table trigpart; +drop function trigger_nothing(); +-- +-- Verify that triggers are fired for partitioned tables +-- +create table parted_stmt_trig (a int) partition by list (a); +create table parted_stmt_trig1 partition of parted_stmt_trig for values in (1); +create table parted_stmt_trig2 partition of parted_stmt_trig for values in (2); +create table parted2_stmt_trig (a int) partition by list (a); +create table parted2_stmt_trig1 partition of parted2_stmt_trig for values in (1); +create table parted2_stmt_trig2 partition of parted2_stmt_trig for values in (2); +create or replace function trigger_notice() returns trigger as $$ + begin + raise notice 'trigger % on % % % for %', TG_NAME, TG_TABLE_NAME, TG_WHEN, TG_OP, TG_LEVEL; + if TG_LEVEL = 'ROW' then + return NEW; + end if; + return null; + end; + $$ language plpgsql; +-- insert/update/delete statement-level triggers on the parent +create trigger trig_ins_before before insert on parted_stmt_trig + for each statement execute procedure trigger_notice(); +create trigger trig_ins_after after insert on parted_stmt_trig + for each statement execute procedure trigger_notice(); +create trigger trig_upd_before before update on parted_stmt_trig + for each statement execute procedure trigger_notice(); +create trigger trig_upd_after after update on parted_stmt_trig + for each statement execute procedure trigger_notice(); +create trigger trig_del_before before delete on parted_stmt_trig + for each statement execute procedure trigger_notice(); +create trigger trig_del_after after delete on parted_stmt_trig + for each statement execute procedure trigger_notice(); +-- insert/update/delete row-level triggers on the parent +create trigger trig_ins_after_parent after insert on parted_stmt_trig + for each row execute procedure trigger_notice(); +create trigger trig_upd_after_parent after update on parted_stmt_trig + for each row execute procedure trigger_notice(); +create trigger trig_del_after_parent after delete on parted_stmt_trig + for each row execute procedure trigger_notice(); +-- insert/update/delete row-level triggers on the first partition +create trigger trig_ins_before_child before insert on parted_stmt_trig1 + for each row execute procedure trigger_notice(); +create trigger trig_ins_after_child after insert on parted_stmt_trig1 + for each row execute procedure trigger_notice(); +create trigger trig_upd_before_child before update on parted_stmt_trig1 + for each row execute procedure trigger_notice(); +create trigger trig_upd_after_child after update on parted_stmt_trig1 + for each row execute procedure trigger_notice(); +create trigger trig_del_before_child before delete on parted_stmt_trig1 + for each row execute procedure trigger_notice(); +create trigger trig_del_after_child after delete on parted_stmt_trig1 + for each row execute procedure trigger_notice(); +-- insert/update/delete statement-level triggers on the parent +create trigger trig_ins_before_3 before insert on parted2_stmt_trig + for each statement execute procedure trigger_notice(); +create trigger trig_ins_after_3 after insert on parted2_stmt_trig + for each statement execute procedure trigger_notice(); +create trigger trig_upd_before_3 before update on parted2_stmt_trig + for each statement execute procedure trigger_notice(); +create trigger trig_upd_after_3 after update on parted2_stmt_trig + for each statement execute procedure trigger_notice(); +create trigger trig_del_before_3 before delete on parted2_stmt_trig + for each statement execute procedure trigger_notice(); +create trigger trig_del_after_3 after delete on parted2_stmt_trig + for each statement execute procedure trigger_notice(); +with ins (a) as ( + insert into parted2_stmt_trig values (1), (2) returning a +) insert into parted_stmt_trig select a from ins returning tableoid::regclass, a; +NOTICE: trigger trig_ins_before on parted_stmt_trig BEFORE INSERT for STATEMENT +NOTICE: trigger trig_ins_before_3 on parted2_stmt_trig BEFORE INSERT for STATEMENT +NOTICE: trigger trig_ins_before_child on parted_stmt_trig1 BEFORE INSERT for ROW +NOTICE: trigger trig_ins_after_child on parted_stmt_trig1 AFTER INSERT for ROW +NOTICE: trigger trig_ins_after_parent on parted_stmt_trig1 AFTER INSERT for ROW +NOTICE: trigger trig_ins_after_parent on parted_stmt_trig2 AFTER INSERT for ROW +NOTICE: trigger trig_ins_after_3 on parted2_stmt_trig AFTER INSERT for STATEMENT +NOTICE: trigger trig_ins_after on parted_stmt_trig AFTER INSERT for STATEMENT + tableoid | a +-------------------+--- + parted_stmt_trig1 | 1 + parted_stmt_trig2 | 2 +(2 rows) + +with upd as ( + update parted2_stmt_trig set a = a +) update parted_stmt_trig set a = a; +NOTICE: trigger trig_upd_before on parted_stmt_trig BEFORE UPDATE for STATEMENT +NOTICE: trigger trig_upd_before_child on parted_stmt_trig1 BEFORE UPDATE for ROW +NOTICE: trigger trig_upd_before_3 on parted2_stmt_trig BEFORE UPDATE for STATEMENT +NOTICE: trigger trig_upd_after_child on parted_stmt_trig1 AFTER UPDATE for ROW +NOTICE: trigger trig_upd_after_parent on parted_stmt_trig1 AFTER UPDATE for ROW +NOTICE: trigger trig_upd_after_parent on parted_stmt_trig2 AFTER UPDATE for ROW +NOTICE: trigger trig_upd_after on parted_stmt_trig AFTER UPDATE for STATEMENT +NOTICE: trigger trig_upd_after_3 on parted2_stmt_trig AFTER UPDATE for STATEMENT +delete from parted_stmt_trig; +NOTICE: trigger trig_del_before on parted_stmt_trig BEFORE DELETE for STATEMENT +NOTICE: trigger trig_del_before_child on parted_stmt_trig1 BEFORE DELETE for ROW +NOTICE: trigger trig_del_after_parent on parted_stmt_trig2 AFTER DELETE for ROW +NOTICE: trigger trig_del_after on parted_stmt_trig AFTER DELETE for STATEMENT +-- insert via copy on the parent +copy parted_stmt_trig(a) from stdin; +NOTICE: trigger trig_ins_before on parted_stmt_trig BEFORE INSERT for STATEMENT +NOTICE: trigger trig_ins_before_child on parted_stmt_trig1 BEFORE INSERT for ROW +NOTICE: trigger trig_ins_after_child on parted_stmt_trig1 AFTER INSERT for ROW +NOTICE: trigger trig_ins_after_parent on parted_stmt_trig1 AFTER INSERT for ROW +NOTICE: trigger trig_ins_after_parent on parted_stmt_trig2 AFTER INSERT for ROW +NOTICE: trigger trig_ins_after on parted_stmt_trig AFTER INSERT for STATEMENT +-- insert via copy on the first partition +copy parted_stmt_trig1(a) from stdin; +NOTICE: trigger trig_ins_before_child on parted_stmt_trig1 BEFORE INSERT for ROW +NOTICE: trigger trig_ins_after_child on parted_stmt_trig1 AFTER INSERT for ROW +NOTICE: trigger trig_ins_after_parent on parted_stmt_trig1 AFTER INSERT for ROW +-- Disabling a trigger in the parent table should disable children triggers too +alter table parted_stmt_trig disable trigger trig_ins_after_parent; +insert into parted_stmt_trig values (1); +NOTICE: trigger trig_ins_before on parted_stmt_trig BEFORE INSERT for STATEMENT +NOTICE: trigger trig_ins_before_child on parted_stmt_trig1 BEFORE INSERT for ROW +NOTICE: trigger trig_ins_after_child on parted_stmt_trig1 AFTER INSERT for ROW +NOTICE: trigger trig_ins_after on parted_stmt_trig AFTER INSERT for STATEMENT +alter table parted_stmt_trig enable trigger trig_ins_after_parent; +insert into parted_stmt_trig values (1); +NOTICE: trigger trig_ins_before on parted_stmt_trig BEFORE INSERT for STATEMENT +NOTICE: trigger trig_ins_before_child on parted_stmt_trig1 BEFORE INSERT for ROW +NOTICE: trigger trig_ins_after_child on parted_stmt_trig1 AFTER INSERT for ROW +NOTICE: trigger trig_ins_after_parent on parted_stmt_trig1 AFTER INSERT for ROW +NOTICE: trigger trig_ins_after on parted_stmt_trig AFTER INSERT for STATEMENT +drop table parted_stmt_trig, parted2_stmt_trig; +-- Verify that triggers fire in alphabetical order +create table parted_trig (a int) partition by range (a); +create table parted_trig_1 partition of parted_trig for values from (0) to (1000) + partition by range (a); +create table parted_trig_1_1 partition of parted_trig_1 for values from (0) to (100); +create table parted_trig_2 partition of parted_trig for values from (1000) to (2000); +create trigger zzz after insert on parted_trig for each row execute procedure trigger_notice(); +create trigger mmm after insert on parted_trig_1_1 for each row execute procedure trigger_notice(); +create trigger aaa after insert on parted_trig_1 for each row execute procedure trigger_notice(); +create trigger bbb after insert on parted_trig for each row execute procedure trigger_notice(); +create trigger qqq after insert on parted_trig_1_1 for each row execute procedure trigger_notice(); +insert into parted_trig values (50), (1500); +NOTICE: trigger aaa on parted_trig_1_1 AFTER INSERT for ROW +NOTICE: trigger bbb on parted_trig_1_1 AFTER INSERT for ROW +NOTICE: trigger mmm on parted_trig_1_1 AFTER INSERT for ROW +NOTICE: trigger qqq on parted_trig_1_1 AFTER INSERT for ROW +NOTICE: trigger zzz on parted_trig_1_1 AFTER INSERT for ROW +NOTICE: trigger bbb on parted_trig_2 AFTER INSERT for ROW +NOTICE: trigger zzz on parted_trig_2 AFTER INSERT for ROW +drop table parted_trig; +-- Verify that the correct triggers fire for cross-partition updates +create table parted_trig (a int) partition by list (a); +create table parted_trig1 partition of parted_trig for values in (1); +create table parted_trig2 partition of parted_trig for values in (2); +insert into parted_trig values (1); +create or replace function trigger_notice() returns trigger as $$ + begin + raise notice 'trigger % on % % % for %', TG_NAME, TG_TABLE_NAME, TG_WHEN, TG_OP, TG_LEVEL; + if TG_LEVEL = 'ROW' then + if TG_OP = 'DELETE' then + return OLD; + else + return NEW; + end if; + end if; + return null; + end; + $$ language plpgsql; +create trigger parted_trig_before_stmt before insert or update or delete on parted_trig + for each statement execute procedure trigger_notice(); +create trigger parted_trig_before_row before insert or update or delete on parted_trig + for each row execute procedure trigger_notice(); +create trigger parted_trig_after_row after insert or update or delete on parted_trig + for each row execute procedure trigger_notice(); +create trigger parted_trig_after_stmt after insert or update or delete on parted_trig + for each statement execute procedure trigger_notice(); +update parted_trig set a = 2 where a = 1; +NOTICE: trigger parted_trig_before_stmt on parted_trig BEFORE UPDATE for STATEMENT +NOTICE: trigger parted_trig_before_row on parted_trig1 BEFORE UPDATE for ROW +NOTICE: trigger parted_trig_before_row on parted_trig1 BEFORE DELETE for ROW +NOTICE: trigger parted_trig_before_row on parted_trig2 BEFORE INSERT for ROW +NOTICE: trigger parted_trig_after_row on parted_trig1 AFTER DELETE for ROW +NOTICE: trigger parted_trig_after_row on parted_trig2 AFTER INSERT for ROW +NOTICE: trigger parted_trig_after_stmt on parted_trig AFTER UPDATE for STATEMENT +-- update action in merge should behave the same +merge into parted_trig using (select 1) as ss on true + when matched and a = 2 then update set a = 1; +NOTICE: trigger parted_trig_before_stmt on parted_trig BEFORE UPDATE for STATEMENT +NOTICE: trigger parted_trig_before_row on parted_trig2 BEFORE UPDATE for ROW +NOTICE: trigger parted_trig_before_row on parted_trig2 BEFORE DELETE for ROW +NOTICE: trigger parted_trig_before_row on parted_trig1 BEFORE INSERT for ROW +NOTICE: trigger parted_trig_after_row on parted_trig2 AFTER DELETE for ROW +NOTICE: trigger parted_trig_after_row on parted_trig1 AFTER INSERT for ROW +NOTICE: trigger parted_trig_after_stmt on parted_trig AFTER UPDATE for STATEMENT +drop table parted_trig; +-- Verify propagation of trigger arguments to partitions +create table parted_trig (a int) partition by list (a); +create table parted_trig1 partition of parted_trig for values in (1); +create or replace function trigger_notice() returns trigger as $$ + declare + arg1 text = TG_ARGV[0]; + arg2 integer = TG_ARGV[1]; + begin + raise notice 'trigger % on % % % for % args % %', + TG_NAME, TG_TABLE_NAME, TG_WHEN, TG_OP, TG_LEVEL, arg1, arg2; + return null; + end; + $$ language plpgsql; +create trigger aaa after insert on parted_trig + for each row execute procedure trigger_notice('quirky', 1); +-- Verify propagation of trigger arguments to partitions attached after creating trigger +create table parted_trig2 partition of parted_trig for values in (2); +create table parted_trig3 (like parted_trig); +alter table parted_trig attach partition parted_trig3 for values in (3); +insert into parted_trig values (1), (2), (3); +NOTICE: trigger aaa on parted_trig1 AFTER INSERT for ROW args quirky 1 +NOTICE: trigger aaa on parted_trig2 AFTER INSERT for ROW args quirky 1 +NOTICE: trigger aaa on parted_trig3 AFTER INSERT for ROW args quirky 1 +drop table parted_trig; +-- test irregular partitions (i.e., different column definitions), +-- including that the WHEN clause works +create function bark(text) returns bool language plpgsql immutable + as $$ begin raise notice '% <- woof!', $1; return true; end; $$; +create or replace function trigger_notice_ab() returns trigger as $$ + begin + raise notice 'trigger % on % % % for %: (a,b)=(%,%)', + TG_NAME, TG_TABLE_NAME, TG_WHEN, TG_OP, TG_LEVEL, + NEW.a, NEW.b; + if TG_LEVEL = 'ROW' then + return NEW; + end if; + return null; + end; + $$ language plpgsql; +create table parted_irreg_ancestor (fd text, b text, fd2 int, fd3 int, a int) + partition by range (b); +alter table parted_irreg_ancestor drop column fd, + drop column fd2, drop column fd3; +create table parted_irreg (fd int, a int, fd2 int, b text) + partition by range (b); +alter table parted_irreg drop column fd, drop column fd2; +alter table parted_irreg_ancestor attach partition parted_irreg + for values from ('aaaa') to ('zzzz'); +create table parted1_irreg (b text, fd int, a int); +alter table parted1_irreg drop column fd; +alter table parted_irreg attach partition parted1_irreg + for values from ('aaaa') to ('bbbb'); +create trigger parted_trig after insert on parted_irreg + for each row execute procedure trigger_notice_ab(); +create trigger parted_trig_odd after insert on parted_irreg for each row + when (bark(new.b) AND new.a % 2 = 1) execute procedure trigger_notice_ab(); +-- we should hear barking for every insert, but parted_trig_odd only emits +-- noise for odd values of a. parted_trig does it for all inserts. +insert into parted_irreg values (1, 'aardvark'), (2, 'aanimals'); +NOTICE: aardvark <- woof! +NOTICE: aanimals <- woof! +NOTICE: trigger parted_trig on parted1_irreg AFTER INSERT for ROW: (a,b)=(1,aardvark) +NOTICE: trigger parted_trig_odd on parted1_irreg AFTER INSERT for ROW: (a,b)=(1,aardvark) +NOTICE: trigger parted_trig on parted1_irreg AFTER INSERT for ROW: (a,b)=(2,aanimals) +insert into parted1_irreg values ('aardwolf', 2); +NOTICE: aardwolf <- woof! +NOTICE: trigger parted_trig on parted1_irreg AFTER INSERT for ROW: (a,b)=(2,aardwolf) +insert into parted_irreg_ancestor values ('aasvogel', 3); +NOTICE: aasvogel <- woof! +NOTICE: trigger parted_trig on parted1_irreg AFTER INSERT for ROW: (a,b)=(3,aasvogel) +NOTICE: trigger parted_trig_odd on parted1_irreg AFTER INSERT for ROW: (a,b)=(3,aasvogel) +drop table parted_irreg_ancestor; +-- Before triggers and partitions +create table parted (a int, b int, c text) partition by list (a); +create table parted_1 partition of parted for values in (1) + partition by list (b); +create table parted_1_1 partition of parted_1 for values in (1); +create function parted_trigfunc() returns trigger language plpgsql as $$ +begin + new.a = new.a + 1; + return new; +end; +$$; +insert into parted values (1, 1, 'uno uno v1'); -- works +create trigger t before insert or update or delete on parted + for each row execute function parted_trigfunc(); +insert into parted values (1, 1, 'uno uno v2'); -- fail +ERROR: moving row to another partition during a BEFORE FOR EACH ROW trigger is not supported +DETAIL: Before executing trigger "t", the row was to be in partition "public.parted_1_1". +update parted set c = c || 'v3'; -- fail +ERROR: no partition of relation "parted" found for row +DETAIL: Partition key of the failing row contains (a) = (2). +create or replace function parted_trigfunc() returns trigger language plpgsql as $$ +begin + new.b = new.b + 1; + return new; +end; +$$; +insert into parted values (1, 1, 'uno uno v4'); -- fail +ERROR: moving row to another partition during a BEFORE FOR EACH ROW trigger is not supported +DETAIL: Before executing trigger "t", the row was to be in partition "public.parted_1_1". +update parted set c = c || 'v5'; -- fail +ERROR: no partition of relation "parted_1" found for row +DETAIL: Partition key of the failing row contains (b) = (2). +create or replace function parted_trigfunc() returns trigger language plpgsql as $$ +begin + new.c = new.c || ' did '|| TG_OP; + return new; +end; +$$; +insert into parted values (1, 1, 'uno uno'); -- works +update parted set c = c || ' v6'; -- works +select tableoid::regclass, * from parted; + tableoid | a | b | c +------------+---+---+---------------------------------- + parted_1_1 | 1 | 1 | uno uno v1 v6 did UPDATE + parted_1_1 | 1 | 1 | uno uno did INSERT v6 did UPDATE +(2 rows) + +-- update itself moves tuple to new partition; trigger still works +truncate table parted; +create table parted_2 partition of parted for values in (2); +insert into parted values (1, 1, 'uno uno v5'); +update parted set a = 2; +select tableoid::regclass, * from parted; + tableoid | a | b | c +----------+---+---+--------------------------------------------- + parted_2 | 2 | 1 | uno uno v5 did INSERT did UPDATE did INSERT +(1 row) + +-- both trigger and update change the partition +create or replace function parted_trigfunc2() returns trigger language plpgsql as $$ +begin + new.a = new.a + 1; + return new; +end; +$$; +create trigger t2 before update on parted + for each row execute function parted_trigfunc2(); +truncate table parted; +insert into parted values (1, 1, 'uno uno v6'); +create table parted_3 partition of parted for values in (3); +update parted set a = a + 1; +select tableoid::regclass, * from parted; + tableoid | a | b | c +----------+---+---+--------------------------------------------- + parted_3 | 3 | 1 | uno uno v6 did INSERT did UPDATE did INSERT +(1 row) + +-- there's no partition for a=0, but this update works anyway because +-- the trigger causes the tuple to be routed to another partition +update parted set a = 0; +select tableoid::regclass, * from parted; + tableoid | a | b | c +------------+---+---+------------------------------------------------------------------- + parted_1_1 | 1 | 1 | uno uno v6 did INSERT did UPDATE did INSERT did UPDATE did INSERT +(1 row) + +drop table parted; +create table parted (a int, b int, c text) partition by list ((a + b)); +create or replace function parted_trigfunc() returns trigger language plpgsql as $$ +begin + new.a = new.a + new.b; + return new; +end; +$$; +create table parted_1 partition of parted for values in (1, 2); +create table parted_2 partition of parted for values in (3, 4); +create trigger t before insert or update on parted + for each row execute function parted_trigfunc(); +insert into parted values (0, 1, 'zero win'); +insert into parted values (1, 1, 'one fail'); +ERROR: moving row to another partition during a BEFORE FOR EACH ROW trigger is not supported +DETAIL: Before executing trigger "t", the row was to be in partition "public.parted_1". +insert into parted values (1, 2, 'two fail'); +ERROR: moving row to another partition during a BEFORE FOR EACH ROW trigger is not supported +DETAIL: Before executing trigger "t", the row was to be in partition "public.parted_2". +select * from parted; + a | b | c +---+---+---------- + 1 | 1 | zero win +(1 row) + +drop table parted; +drop function parted_trigfunc(); +-- +-- Constraint triggers and partitioned tables +create table parted_constr_ancestor (a int, b text) + partition by range (b); +create table parted_constr (a int, b text) + partition by range (b); +alter table parted_constr_ancestor attach partition parted_constr + for values from ('aaaa') to ('zzzz'); +create table parted1_constr (a int, b text); +alter table parted_constr attach partition parted1_constr + for values from ('aaaa') to ('bbbb'); +create constraint trigger parted_trig after insert on parted_constr_ancestor + deferrable + for each row execute procedure trigger_notice_ab(); +create constraint trigger parted_trig_two after insert on parted_constr + deferrable initially deferred + for each row when (bark(new.b) AND new.a % 2 = 1) + execute procedure trigger_notice_ab(); +-- The immediate constraint is fired immediately; the WHEN clause of the +-- deferred constraint is also called immediately. The deferred constraint +-- is fired at commit time. +begin; +insert into parted_constr values (1, 'aardvark'); +NOTICE: aardvark <- woof! +NOTICE: trigger parted_trig on parted1_constr AFTER INSERT for ROW: (a,b)=(1,aardvark) +insert into parted1_constr values (2, 'aardwolf'); +NOTICE: aardwolf <- woof! +NOTICE: trigger parted_trig on parted1_constr AFTER INSERT for ROW: (a,b)=(2,aardwolf) +insert into parted_constr_ancestor values (3, 'aasvogel'); +NOTICE: aasvogel <- woof! +NOTICE: trigger parted_trig on parted1_constr AFTER INSERT for ROW: (a,b)=(3,aasvogel) +commit; +NOTICE: trigger parted_trig_two on parted1_constr AFTER INSERT for ROW: (a,b)=(1,aardvark) +NOTICE: trigger parted_trig_two on parted1_constr AFTER INSERT for ROW: (a,b)=(3,aasvogel) +-- The WHEN clause is immediate, and both constraint triggers are fired at +-- commit time. +begin; +set constraints parted_trig deferred; +insert into parted_constr values (1, 'aardvark'); +NOTICE: aardvark <- woof! +insert into parted1_constr values (2, 'aardwolf'), (3, 'aasvogel'); +NOTICE: aardwolf <- woof! +NOTICE: aasvogel <- woof! +commit; +NOTICE: trigger parted_trig on parted1_constr AFTER INSERT for ROW: (a,b)=(1,aardvark) +NOTICE: trigger parted_trig_two on parted1_constr AFTER INSERT for ROW: (a,b)=(1,aardvark) +NOTICE: trigger parted_trig on parted1_constr AFTER INSERT for ROW: (a,b)=(2,aardwolf) +NOTICE: trigger parted_trig on parted1_constr AFTER INSERT for ROW: (a,b)=(3,aasvogel) +NOTICE: trigger parted_trig_two on parted1_constr AFTER INSERT for ROW: (a,b)=(3,aasvogel) +drop table parted_constr_ancestor; +drop function bark(text); +-- Test that the WHEN clause is set properly to partitions +create table parted_trigger (a int, b text) partition by range (a); +create table parted_trigger_1 partition of parted_trigger for values from (0) to (1000); +create table parted_trigger_2 (drp int, a int, b text); +alter table parted_trigger_2 drop column drp; +alter table parted_trigger attach partition parted_trigger_2 for values from (1000) to (2000); +create trigger parted_trigger after update on parted_trigger + for each row when (new.a % 2 = 1 and length(old.b) >= 2) execute procedure trigger_notice_ab(); +create table parted_trigger_3 (b text, a int) partition by range (length(b)); +create table parted_trigger_3_1 partition of parted_trigger_3 for values from (1) to (3); +create table parted_trigger_3_2 partition of parted_trigger_3 for values from (3) to (5); +alter table parted_trigger attach partition parted_trigger_3 for values from (2000) to (3000); +insert into parted_trigger values + (0, 'a'), (1, 'bbb'), (2, 'bcd'), (3, 'c'), + (1000, 'c'), (1001, 'ddd'), (1002, 'efg'), (1003, 'f'), + (2000, 'e'), (2001, 'fff'), (2002, 'ghi'), (2003, 'h'); +update parted_trigger set a = a + 2; -- notice for odd 'a' values, long 'b' values +NOTICE: trigger parted_trigger on parted_trigger_1 AFTER UPDATE for ROW: (a,b)=(3,bbb) +NOTICE: trigger parted_trigger on parted_trigger_2 AFTER UPDATE for ROW: (a,b)=(1003,ddd) +NOTICE: trigger parted_trigger on parted_trigger_3_2 AFTER UPDATE for ROW: (a,b)=(2003,fff) +drop table parted_trigger; +-- try a constraint trigger, also +create table parted_referenced (a int); +create table unparted_trigger (a int, b text); -- for comparison purposes +create table parted_trigger (a int, b text) partition by range (a); +create table parted_trigger_1 partition of parted_trigger for values from (0) to (1000); +create table parted_trigger_2 (drp int, a int, b text); +alter table parted_trigger_2 drop column drp; +alter table parted_trigger attach partition parted_trigger_2 for values from (1000) to (2000); +create constraint trigger parted_trigger after update on parted_trigger + from parted_referenced + for each row execute procedure trigger_notice_ab(); +create constraint trigger parted_trigger after update on unparted_trigger + from parted_referenced + for each row execute procedure trigger_notice_ab(); +create table parted_trigger_3 (b text, a int) partition by range (length(b)); +create table parted_trigger_3_1 partition of parted_trigger_3 for values from (1) to (3); +create table parted_trigger_3_2 partition of parted_trigger_3 for values from (3) to (5); +alter table parted_trigger attach partition parted_trigger_3 for values from (2000) to (3000); +select tgname, conname, t.tgrelid::regclass, t.tgconstrrelid::regclass, + c.conrelid::regclass, c.confrelid::regclass + from pg_trigger t join pg_constraint c on (t.tgconstraint = c.oid) + where tgname = 'parted_trigger' + order by t.tgrelid::regclass::text; + tgname | conname | tgrelid | tgconstrrelid | conrelid | confrelid +----------------+----------------+--------------------+-------------------+--------------------+----------- + parted_trigger | parted_trigger | parted_trigger | parted_referenced | parted_trigger | - + parted_trigger | parted_trigger | parted_trigger_1 | parted_referenced | parted_trigger_1 | - + parted_trigger | parted_trigger | parted_trigger_2 | parted_referenced | parted_trigger_2 | - + parted_trigger | parted_trigger | parted_trigger_3 | parted_referenced | parted_trigger_3 | - + parted_trigger | parted_trigger | parted_trigger_3_1 | parted_referenced | parted_trigger_3_1 | - + parted_trigger | parted_trigger | parted_trigger_3_2 | parted_referenced | parted_trigger_3_2 | - + parted_trigger | parted_trigger | unparted_trigger | parted_referenced | unparted_trigger | - +(7 rows) + +drop table parted_referenced, parted_trigger, unparted_trigger; +-- verify that the "AFTER UPDATE OF columns" event is propagated correctly +create table parted_trigger (a int, b text) partition by range (a); +create table parted_trigger_1 partition of parted_trigger for values from (0) to (1000); +create table parted_trigger_2 (drp int, a int, b text); +alter table parted_trigger_2 drop column drp; +alter table parted_trigger attach partition parted_trigger_2 for values from (1000) to (2000); +create trigger parted_trigger after update of b on parted_trigger + for each row execute procedure trigger_notice_ab(); +create table parted_trigger_3 (b text, a int) partition by range (length(b)); +create table parted_trigger_3_1 partition of parted_trigger_3 for values from (1) to (4); +create table parted_trigger_3_2 partition of parted_trigger_3 for values from (4) to (8); +alter table parted_trigger attach partition parted_trigger_3 for values from (2000) to (3000); +insert into parted_trigger values (0, 'a'), (1000, 'c'), (2000, 'e'), (2001, 'eeee'); +update parted_trigger set a = a + 2; -- no notices here +update parted_trigger set b = b || 'b'; -- all triggers should fire +NOTICE: trigger parted_trigger on parted_trigger_1 AFTER UPDATE for ROW: (a,b)=(2,ab) +NOTICE: trigger parted_trigger on parted_trigger_2 AFTER UPDATE for ROW: (a,b)=(1002,cb) +NOTICE: trigger parted_trigger on parted_trigger_3_1 AFTER UPDATE for ROW: (a,b)=(2002,eb) +NOTICE: trigger parted_trigger on parted_trigger_3_2 AFTER UPDATE for ROW: (a,b)=(2003,eeeeb) +drop table parted_trigger; +drop function trigger_notice_ab(); +-- Make sure we don't end up with unnecessary copies of triggers, when +-- cloning them. +create table trg_clone (a int) partition by range (a); +create table trg_clone1 partition of trg_clone for values from (0) to (1000); +alter table trg_clone add constraint uniq unique (a) deferrable; +create table trg_clone2 partition of trg_clone for values from (1000) to (2000); +create table trg_clone3 partition of trg_clone for values from (2000) to (3000) + partition by range (a); +create table trg_clone_3_3 partition of trg_clone3 for values from (2000) to (2100); +select tgrelid::regclass, count(*) from pg_trigger + where tgrelid::regclass in ('trg_clone', 'trg_clone1', 'trg_clone2', + 'trg_clone3', 'trg_clone_3_3') + group by tgrelid::regclass order by tgrelid::regclass; + tgrelid | count +---------------+------- + trg_clone | 1 + trg_clone1 | 1 + trg_clone2 | 1 + trg_clone3 | 1 + trg_clone_3_3 | 1 +(5 rows) + +drop table trg_clone; +-- Test the interaction between ALTER TABLE .. DISABLE TRIGGER and +-- both kinds of inheritance. Historically, legacy inheritance has +-- not recursed to children, so that behavior is preserved. +create table parent (a int); +create table child1 () inherits (parent); +create function trig_nothing() returns trigger language plpgsql + as $$ begin return null; end $$; +create trigger tg after insert on parent + for each row execute function trig_nothing(); +create trigger tg after insert on child1 + for each row execute function trig_nothing(); +alter table parent disable trigger tg; +select tgrelid::regclass, tgname, tgenabled from pg_trigger + where tgrelid in ('parent'::regclass, 'child1'::regclass) + order by tgrelid::regclass::text; + tgrelid | tgname | tgenabled +---------+--------+----------- + child1 | tg | O + parent | tg | D +(2 rows) + +alter table only parent enable always trigger tg; +select tgrelid::regclass, tgname, tgenabled from pg_trigger + where tgrelid in ('parent'::regclass, 'child1'::regclass) + order by tgrelid::regclass::text; + tgrelid | tgname | tgenabled +---------+--------+----------- + child1 | tg | O + parent | tg | A +(2 rows) + +drop table parent, child1; +create table parent (a int) partition by list (a); +create table child1 partition of parent for values in (1); +create trigger tg after insert on parent + for each row execute procedure trig_nothing(); +create trigger tg_stmt after insert on parent + for statement execute procedure trig_nothing(); +select tgrelid::regclass, tgname, tgenabled from pg_trigger + where tgrelid in ('parent'::regclass, 'child1'::regclass) + order by tgrelid::regclass::text, tgname; + tgrelid | tgname | tgenabled +---------+---------+----------- + child1 | tg | O + parent | tg | O + parent | tg_stmt | O +(3 rows) + +alter table only parent enable always trigger tg; -- no recursion because ONLY +alter table parent enable always trigger tg_stmt; -- no recursion because statement trigger +select tgrelid::regclass, tgname, tgenabled from pg_trigger + where tgrelid in ('parent'::regclass, 'child1'::regclass) + order by tgrelid::regclass::text, tgname; + tgrelid | tgname | tgenabled +---------+---------+----------- + child1 | tg | O + parent | tg | A + parent | tg_stmt | A +(3 rows) + +-- The following is a no-op for the parent trigger but not so +-- for the child trigger, so recursion should be applied. +alter table parent enable always trigger tg; +select tgrelid::regclass, tgname, tgenabled from pg_trigger + where tgrelid in ('parent'::regclass, 'child1'::regclass) + order by tgrelid::regclass::text, tgname; + tgrelid | tgname | tgenabled +---------+---------+----------- + child1 | tg | A + parent | tg | A + parent | tg_stmt | A +(3 rows) + +-- This variant malfunctioned in some releases. +alter table parent disable trigger user; +select tgrelid::regclass, tgname, tgenabled from pg_trigger + where tgrelid in ('parent'::regclass, 'child1'::regclass) + order by tgrelid::regclass::text, tgname; + tgrelid | tgname | tgenabled +---------+---------+----------- + child1 | tg | D + parent | tg | D + parent | tg_stmt | D +(3 rows) + +drop table parent, child1; +-- Check processing of foreign key triggers +create table parent (a int primary key, f int references parent) + partition by list (a); +create table child1 partition of parent for values in (1); +select tgrelid::regclass, rtrim(tgname, '0123456789') as tgname, + tgfoid::regproc, tgenabled + from pg_trigger where tgrelid in ('parent'::regclass, 'child1'::regclass) + order by tgrelid::regclass::text, tgfoid; + tgrelid | tgname | tgfoid | tgenabled +---------+-------------------------+------------------------+----------- + child1 | RI_ConstraintTrigger_c_ | "RI_FKey_check_ins" | O + child1 | RI_ConstraintTrigger_c_ | "RI_FKey_check_upd" | O + parent | RI_ConstraintTrigger_c_ | "RI_FKey_check_ins" | O + parent | RI_ConstraintTrigger_c_ | "RI_FKey_check_upd" | O + parent | RI_ConstraintTrigger_a_ | "RI_FKey_noaction_del" | O + parent | RI_ConstraintTrigger_a_ | "RI_FKey_noaction_upd" | O +(6 rows) + +alter table parent disable trigger all; +select tgrelid::regclass, rtrim(tgname, '0123456789') as tgname, + tgfoid::regproc, tgenabled + from pg_trigger where tgrelid in ('parent'::regclass, 'child1'::regclass) + order by tgrelid::regclass::text, tgfoid; + tgrelid | tgname | tgfoid | tgenabled +---------+-------------------------+------------------------+----------- + child1 | RI_ConstraintTrigger_c_ | "RI_FKey_check_ins" | D + child1 | RI_ConstraintTrigger_c_ | "RI_FKey_check_upd" | D + parent | RI_ConstraintTrigger_c_ | "RI_FKey_check_ins" | D + parent | RI_ConstraintTrigger_c_ | "RI_FKey_check_upd" | D + parent | RI_ConstraintTrigger_a_ | "RI_FKey_noaction_del" | D + parent | RI_ConstraintTrigger_a_ | "RI_FKey_noaction_upd" | D +(6 rows) + +drop table parent, child1; +-- Verify that firing state propagates correctly on creation, too +CREATE TABLE trgfire (i int) PARTITION BY RANGE (i); +CREATE TABLE trgfire1 PARTITION OF trgfire FOR VALUES FROM (1) TO (10); +CREATE OR REPLACE FUNCTION tgf() RETURNS trigger LANGUAGE plpgsql + AS $$ begin raise exception 'except'; end $$; +CREATE TRIGGER tg AFTER INSERT ON trgfire FOR EACH ROW EXECUTE FUNCTION tgf(); +INSERT INTO trgfire VALUES (1); +ERROR: except +CONTEXT: PL/pgSQL function tgf() line 1 at RAISE +ALTER TABLE trgfire DISABLE TRIGGER tg; +INSERT INTO trgfire VALUES (1); +CREATE TABLE trgfire2 PARTITION OF trgfire FOR VALUES FROM (10) TO (20); +INSERT INTO trgfire VALUES (11); +CREATE TABLE trgfire3 (LIKE trgfire); +ALTER TABLE trgfire ATTACH PARTITION trgfire3 FOR VALUES FROM (20) TO (30); +INSERT INTO trgfire VALUES (21); +CREATE TABLE trgfire4 PARTITION OF trgfire FOR VALUES FROM (30) TO (40) PARTITION BY LIST (i); +CREATE TABLE trgfire4_30 PARTITION OF trgfire4 FOR VALUES IN (30); +INSERT INTO trgfire VALUES (30); +CREATE TABLE trgfire5 (LIKE trgfire) PARTITION BY LIST (i); +CREATE TABLE trgfire5_40 PARTITION OF trgfire5 FOR VALUES IN (40); +ALTER TABLE trgfire ATTACH PARTITION trgfire5 FOR VALUES FROM (40) TO (50); +INSERT INTO trgfire VALUES (40); +SELECT tgrelid::regclass, tgenabled FROM pg_trigger + WHERE tgrelid::regclass IN (SELECT oid from pg_class where relname LIKE 'trgfire%') + ORDER BY tgrelid::regclass::text; + tgrelid | tgenabled +-------------+----------- + trgfire | D + trgfire1 | D + trgfire2 | D + trgfire3 | D + trgfire4 | D + trgfire4_30 | D + trgfire5 | D + trgfire5_40 | D +(8 rows) + +ALTER TABLE trgfire ENABLE TRIGGER tg; +INSERT INTO trgfire VALUES (1); +ERROR: except +CONTEXT: PL/pgSQL function tgf() line 1 at RAISE +INSERT INTO trgfire VALUES (11); +ERROR: except +CONTEXT: PL/pgSQL function tgf() line 1 at RAISE +INSERT INTO trgfire VALUES (21); +ERROR: except +CONTEXT: PL/pgSQL function tgf() line 1 at RAISE +INSERT INTO trgfire VALUES (30); +ERROR: except +CONTEXT: PL/pgSQL function tgf() line 1 at RAISE +INSERT INTO trgfire VALUES (40); +ERROR: except +CONTEXT: PL/pgSQL function tgf() line 1 at RAISE +DROP TABLE trgfire; +DROP FUNCTION tgf(); +-- +-- Test the interaction between transition tables and both kinds of +-- inheritance. We'll dump the contents of the transition tables in a +-- format that shows the attribute order, so that we can distinguish +-- tuple formats (though not dropped attributes). +-- +create or replace function dump_insert() returns trigger language plpgsql as +$$ + begin + raise notice 'trigger = %, new table = %', + TG_NAME, + (select string_agg(new_table::text, ', ' order by a) from new_table); + return null; + end; +$$; +create or replace function dump_update() returns trigger language plpgsql as +$$ + begin + raise notice 'trigger = %, old table = %, new table = %', + TG_NAME, + (select string_agg(old_table::text, ', ' order by a) from old_table), + (select string_agg(new_table::text, ', ' order by a) from new_table); + return null; + end; +$$; +create or replace function dump_delete() returns trigger language plpgsql as +$$ + begin + raise notice 'trigger = %, old table = %', + TG_NAME, + (select string_agg(old_table::text, ', ' order by a) from old_table); + return null; + end; +$$; +-- +-- Verify behavior of statement triggers on partition hierarchy with +-- transition tables. Tuples should appear to each trigger in the +-- format of the relation the trigger is attached to. +-- +-- set up a partition hierarchy with some different TupleDescriptors +create table parent (a text, b int) partition by list (a); +-- a child matching parent +create table child1 partition of parent for values in ('AAA'); +-- a child with a dropped column +create table child2 (x int, a text, b int); +alter table child2 drop column x; +alter table parent attach partition child2 for values in ('BBB'); +-- a child with a different column order +create table child3 (b int, a text); +alter table parent attach partition child3 for values in ('CCC'); +create trigger parent_insert_trig + after insert on parent referencing new table as new_table + for each statement execute procedure dump_insert(); +create trigger parent_update_trig + after update on parent referencing old table as old_table new table as new_table + for each statement execute procedure dump_update(); +create trigger parent_delete_trig + after delete on parent referencing old table as old_table + for each statement execute procedure dump_delete(); +create trigger child1_insert_trig + after insert on child1 referencing new table as new_table + for each statement execute procedure dump_insert(); +create trigger child1_update_trig + after update on child1 referencing old table as old_table new table as new_table + for each statement execute procedure dump_update(); +create trigger child1_delete_trig + after delete on child1 referencing old table as old_table + for each statement execute procedure dump_delete(); +create trigger child2_insert_trig + after insert on child2 referencing new table as new_table + for each statement execute procedure dump_insert(); +create trigger child2_update_trig + after update on child2 referencing old table as old_table new table as new_table + for each statement execute procedure dump_update(); +create trigger child2_delete_trig + after delete on child2 referencing old table as old_table + for each statement execute procedure dump_delete(); +create trigger child3_insert_trig + after insert on child3 referencing new table as new_table + for each statement execute procedure dump_insert(); +create trigger child3_update_trig + after update on child3 referencing old table as old_table new table as new_table + for each statement execute procedure dump_update(); +create trigger child3_delete_trig + after delete on child3 referencing old table as old_table + for each statement execute procedure dump_delete(); +SELECT trigger_name, event_manipulation, event_object_schema, event_object_table, + action_order, action_condition, action_orientation, action_timing, + action_reference_old_table, action_reference_new_table + FROM information_schema.triggers + WHERE event_object_table IN ('parent', 'child1', 'child2', 'child3') + ORDER BY trigger_name COLLATE "C", 2; + trigger_name | event_manipulation | event_object_schema | event_object_table | action_order | action_condition | action_orientation | action_timing | action_reference_old_table | action_reference_new_table +--------------------+--------------------+---------------------+--------------------+--------------+------------------+--------------------+---------------+----------------------------+---------------------------- + child1_delete_trig | DELETE | public | child1 | 1 | | STATEMENT | AFTER | old_table | + child1_insert_trig | INSERT | public | child1 | 1 | | STATEMENT | AFTER | | new_table + child1_update_trig | UPDATE | public | child1 | 1 | | STATEMENT | AFTER | old_table | new_table + child2_delete_trig | DELETE | public | child2 | 1 | | STATEMENT | AFTER | old_table | + child2_insert_trig | INSERT | public | child2 | 1 | | STATEMENT | AFTER | | new_table + child2_update_trig | UPDATE | public | child2 | 1 | | STATEMENT | AFTER | old_table | new_table + child3_delete_trig | DELETE | public | child3 | 1 | | STATEMENT | AFTER | old_table | + child3_insert_trig | INSERT | public | child3 | 1 | | STATEMENT | AFTER | | new_table + child3_update_trig | UPDATE | public | child3 | 1 | | STATEMENT | AFTER | old_table | new_table + parent_delete_trig | DELETE | public | parent | 1 | | STATEMENT | AFTER | old_table | + parent_insert_trig | INSERT | public | parent | 1 | | STATEMENT | AFTER | | new_table + parent_update_trig | UPDATE | public | parent | 1 | | STATEMENT | AFTER | old_table | new_table +(12 rows) + +-- insert directly into children sees respective child-format tuples +insert into child1 values ('AAA', 42); +NOTICE: trigger = child1_insert_trig, new table = (AAA,42) +insert into child2 values ('BBB', 42); +NOTICE: trigger = child2_insert_trig, new table = (BBB,42) +insert into child3 values (42, 'CCC'); +NOTICE: trigger = child3_insert_trig, new table = (42,CCC) +-- update via parent sees parent-format tuples +update parent set b = b + 1; +NOTICE: trigger = parent_update_trig, old table = (AAA,42), (BBB,42), (CCC,42), new table = (AAA,43), (BBB,43), (CCC,43) +-- delete via parent sees parent-format tuples +delete from parent; +NOTICE: trigger = parent_delete_trig, old table = (AAA,43), (BBB,43), (CCC,43) +-- insert into parent sees parent-format tuples +insert into parent values ('AAA', 42); +NOTICE: trigger = parent_insert_trig, new table = (AAA,42) +insert into parent values ('BBB', 42); +NOTICE: trigger = parent_insert_trig, new table = (BBB,42) +insert into parent values ('CCC', 42); +NOTICE: trigger = parent_insert_trig, new table = (CCC,42) +-- delete from children sees respective child-format tuples +delete from child1; +NOTICE: trigger = child1_delete_trig, old table = (AAA,42) +delete from child2; +NOTICE: trigger = child2_delete_trig, old table = (BBB,42) +delete from child3; +NOTICE: trigger = child3_delete_trig, old table = (42,CCC) +-- copy into parent sees parent-format tuples +copy parent (a, b) from stdin; +NOTICE: trigger = parent_insert_trig, new table = (AAA,42), (BBB,42), (CCC,42) +-- DML affecting parent sees tuples collected from children even if +-- there is no transition table trigger on the children +drop trigger child1_insert_trig on child1; +drop trigger child1_update_trig on child1; +drop trigger child1_delete_trig on child1; +drop trigger child2_insert_trig on child2; +drop trigger child2_update_trig on child2; +drop trigger child2_delete_trig on child2; +drop trigger child3_insert_trig on child3; +drop trigger child3_update_trig on child3; +drop trigger child3_delete_trig on child3; +delete from parent; +NOTICE: trigger = parent_delete_trig, old table = (AAA,42), (BBB,42), (CCC,42) +-- copy into parent sees tuples collected from children even if there +-- is no transition-table trigger on the children +copy parent (a, b) from stdin; +NOTICE: trigger = parent_insert_trig, new table = (AAA,42), (BBB,42), (CCC,42) +-- insert into parent with a before trigger on a child tuple before +-- insertion, and we capture the newly modified row in parent format +create or replace function intercept_insert() returns trigger language plpgsql as +$$ + begin + new.b = new.b + 1000; + return new; + end; +$$; +create trigger intercept_insert_child3 + before insert on child3 + for each row execute procedure intercept_insert(); +-- insert, parent trigger sees post-modification parent-format tuple +insert into parent values ('AAA', 42), ('BBB', 42), ('CCC', 66); +NOTICE: trigger = parent_insert_trig, new table = (AAA,42), (BBB,42), (CCC,1066) +-- copy, parent trigger sees post-modification parent-format tuple +copy parent (a, b) from stdin; +NOTICE: trigger = parent_insert_trig, new table = (AAA,42), (BBB,42), (CCC,1234) +drop table child1, child2, child3, parent; +drop function intercept_insert(); +-- +-- Verify prohibition of row triggers with transition triggers on +-- partitions +-- +create table parent (a text, b int) partition by list (a); +create table child partition of parent for values in ('AAA'); +-- adding row trigger with transition table fails +create trigger child_row_trig + after insert on child referencing new table as new_table + for each row execute procedure dump_insert(); +ERROR: ROW triggers with transition tables are not supported on partitions +-- detaching it first works +alter table parent detach partition child; +create trigger child_row_trig + after insert on child referencing new table as new_table + for each row execute procedure dump_insert(); +-- but now we're not allowed to reattach it +alter table parent attach partition child for values in ('AAA'); +ERROR: trigger "child_row_trig" prevents table "child" from becoming a partition +DETAIL: ROW triggers with transition tables are not supported on partitions. +-- drop the trigger, and now we're allowed to attach it again +drop trigger child_row_trig on child; +alter table parent attach partition child for values in ('AAA'); +drop table child, parent; +-- +-- Verify behavior of statement triggers on (non-partition) +-- inheritance hierarchy with transition tables; similar to the +-- partition case, except there is no rerouting on insertion and child +-- tables can have extra columns +-- +-- set up inheritance hierarchy with different TupleDescriptors +create table parent (a text, b int); +-- a child matching parent +create table child1 () inherits (parent); +-- a child with a different column order +create table child2 (b int, a text); +alter table child2 inherit parent; +-- a child with an extra column +create table child3 (c text) inherits (parent); +create trigger parent_insert_trig + after insert on parent referencing new table as new_table + for each statement execute procedure dump_insert(); +create trigger parent_update_trig + after update on parent referencing old table as old_table new table as new_table + for each statement execute procedure dump_update(); +create trigger parent_delete_trig + after delete on parent referencing old table as old_table + for each statement execute procedure dump_delete(); +create trigger child1_insert_trig + after insert on child1 referencing new table as new_table + for each statement execute procedure dump_insert(); +create trigger child1_update_trig + after update on child1 referencing old table as old_table new table as new_table + for each statement execute procedure dump_update(); +create trigger child1_delete_trig + after delete on child1 referencing old table as old_table + for each statement execute procedure dump_delete(); +create trigger child2_insert_trig + after insert on child2 referencing new table as new_table + for each statement execute procedure dump_insert(); +create trigger child2_update_trig + after update on child2 referencing old table as old_table new table as new_table + for each statement execute procedure dump_update(); +create trigger child2_delete_trig + after delete on child2 referencing old table as old_table + for each statement execute procedure dump_delete(); +create trigger child3_insert_trig + after insert on child3 referencing new table as new_table + for each statement execute procedure dump_insert(); +create trigger child3_update_trig + after update on child3 referencing old table as old_table new table as new_table + for each statement execute procedure dump_update(); +create trigger child3_delete_trig + after delete on child3 referencing old table as old_table + for each statement execute procedure dump_delete(); +-- insert directly into children sees respective child-format tuples +insert into child1 values ('AAA', 42); +NOTICE: trigger = child1_insert_trig, new table = (AAA,42) +insert into child2 values (42, 'BBB'); +NOTICE: trigger = child2_insert_trig, new table = (42,BBB) +insert into child3 values ('CCC', 42, 'foo'); +NOTICE: trigger = child3_insert_trig, new table = (CCC,42,foo) +-- update via parent sees parent-format tuples +update parent set b = b + 1; +NOTICE: trigger = parent_update_trig, old table = (AAA,42), (BBB,42), (CCC,42), new table = (AAA,43), (BBB,43), (CCC,43) +-- delete via parent sees parent-format tuples +delete from parent; +NOTICE: trigger = parent_delete_trig, old table = (AAA,43), (BBB,43), (CCC,43) +-- reinsert values into children for next test... +insert into child1 values ('AAA', 42); +NOTICE: trigger = child1_insert_trig, new table = (AAA,42) +insert into child2 values (42, 'BBB'); +NOTICE: trigger = child2_insert_trig, new table = (42,BBB) +insert into child3 values ('CCC', 42, 'foo'); +NOTICE: trigger = child3_insert_trig, new table = (CCC,42,foo) +-- delete from children sees respective child-format tuples +delete from child1; +NOTICE: trigger = child1_delete_trig, old table = (AAA,42) +delete from child2; +NOTICE: trigger = child2_delete_trig, old table = (42,BBB) +delete from child3; +NOTICE: trigger = child3_delete_trig, old table = (CCC,42,foo) +-- copy into parent sees parent-format tuples (no rerouting, so these +-- are really inserted into the parent) +copy parent (a, b) from stdin; +NOTICE: trigger = parent_insert_trig, new table = (AAA,42), (BBB,42), (CCC,42) +-- same behavior for copy if there is an index (interesting because rows are +-- captured by a different code path in copyfrom.c if there are indexes) +create index on parent(b); +copy parent (a, b) from stdin; +NOTICE: trigger = parent_insert_trig, new table = (DDD,42) +-- DML affecting parent sees tuples collected from children even if +-- there is no transition table trigger on the children +drop trigger child1_insert_trig on child1; +drop trigger child1_update_trig on child1; +drop trigger child1_delete_trig on child1; +drop trigger child2_insert_trig on child2; +drop trigger child2_update_trig on child2; +drop trigger child2_delete_trig on child2; +drop trigger child3_insert_trig on child3; +drop trigger child3_update_trig on child3; +drop trigger child3_delete_trig on child3; +delete from parent; +NOTICE: trigger = parent_delete_trig, old table = (AAA,42), (BBB,42), (CCC,42), (DDD,42) +drop table child1, child2, child3, parent; +-- +-- Verify prohibition of row triggers with transition triggers on +-- inheritance children +-- +create table parent (a text, b int); +create table child () inherits (parent); +-- adding row trigger with transition table fails +create trigger child_row_trig + after insert on child referencing new table as new_table + for each row execute procedure dump_insert(); +ERROR: ROW triggers with transition tables are not supported on inheritance children +-- disinheriting it first works +alter table child no inherit parent; +create trigger child_row_trig + after insert on child referencing new table as new_table + for each row execute procedure dump_insert(); +-- but now we're not allowed to make it inherit anymore +alter table child inherit parent; +ERROR: trigger "child_row_trig" prevents table "child" from becoming an inheritance child +DETAIL: ROW triggers with transition tables are not supported in inheritance hierarchies. +-- drop the trigger, and now we're allowed to make it inherit again +drop trigger child_row_trig on child; +alter table child inherit parent; +drop table child, parent; +-- +-- Verify behavior of queries with wCTEs, where multiple transition +-- tuplestores can be active at the same time because there are +-- multiple DML statements that might fire triggers with transition +-- tables +-- +create table table1 (a int); +create table table2 (a text); +create trigger table1_trig + after insert on table1 referencing new table as new_table + for each statement execute procedure dump_insert(); +create trigger table2_trig + after insert on table2 referencing new table as new_table + for each statement execute procedure dump_insert(); +with wcte as (insert into table1 values (42)) + insert into table2 values ('hello world'); +NOTICE: trigger = table2_trig, new table = ("hello world") +NOTICE: trigger = table1_trig, new table = (42) +with wcte as (insert into table1 values (43)) + insert into table1 values (44); +NOTICE: trigger = table1_trig, new table = (43), (44) +select * from table1; + a +---- + 42 + 44 + 43 +(3 rows) + +select * from table2; + a +------------- + hello world +(1 row) + +drop table table1; +drop table table2; +-- +-- Verify behavior of INSERT ... ON CONFLICT DO UPDATE ... with +-- transition tables. +-- +create table my_table (a int primary key, b text); +create trigger my_table_insert_trig + after insert on my_table referencing new table as new_table + for each statement execute procedure dump_insert(); +create trigger my_table_update_trig + after update on my_table referencing old table as old_table new table as new_table + for each statement execute procedure dump_update(); +-- inserts only +insert into my_table values (1, 'AAA'), (2, 'BBB') + on conflict (a) do + update set b = my_table.b || ':' || excluded.b; +NOTICE: trigger = my_table_update_trig, old table = , new table = +NOTICE: trigger = my_table_insert_trig, new table = (1,AAA), (2,BBB) +-- mixture of inserts and updates +insert into my_table values (1, 'AAA'), (2, 'BBB'), (3, 'CCC'), (4, 'DDD') + on conflict (a) do + update set b = my_table.b || ':' || excluded.b; +NOTICE: trigger = my_table_update_trig, old table = (1,AAA), (2,BBB), new table = (1,AAA:AAA), (2,BBB:BBB) +NOTICE: trigger = my_table_insert_trig, new table = (3,CCC), (4,DDD) +-- updates only +insert into my_table values (3, 'CCC'), (4, 'DDD') + on conflict (a) do + update set b = my_table.b || ':' || excluded.b; +NOTICE: trigger = my_table_update_trig, old table = (3,CCC), (4,DDD), new table = (3,CCC:CCC), (4,DDD:DDD) +NOTICE: trigger = my_table_insert_trig, new table = +-- +-- now using a partitioned table +-- +create table iocdu_tt_parted (a int primary key, b text) partition by list (a); +create table iocdu_tt_parted1 partition of iocdu_tt_parted for values in (1); +create table iocdu_tt_parted2 partition of iocdu_tt_parted for values in (2); +create table iocdu_tt_parted3 partition of iocdu_tt_parted for values in (3); +create table iocdu_tt_parted4 partition of iocdu_tt_parted for values in (4); +create trigger iocdu_tt_parted_insert_trig + after insert on iocdu_tt_parted referencing new table as new_table + for each statement execute procedure dump_insert(); +create trigger iocdu_tt_parted_update_trig + after update on iocdu_tt_parted referencing old table as old_table new table as new_table + for each statement execute procedure dump_update(); +-- inserts only +insert into iocdu_tt_parted values (1, 'AAA'), (2, 'BBB') + on conflict (a) do + update set b = iocdu_tt_parted.b || ':' || excluded.b; +NOTICE: trigger = iocdu_tt_parted_update_trig, old table = , new table = +NOTICE: trigger = iocdu_tt_parted_insert_trig, new table = (1,AAA), (2,BBB) +-- mixture of inserts and updates +insert into iocdu_tt_parted values (1, 'AAA'), (2, 'BBB'), (3, 'CCC'), (4, 'DDD') + on conflict (a) do + update set b = iocdu_tt_parted.b || ':' || excluded.b; +NOTICE: trigger = iocdu_tt_parted_update_trig, old table = (1,AAA), (2,BBB), new table = (1,AAA:AAA), (2,BBB:BBB) +NOTICE: trigger = iocdu_tt_parted_insert_trig, new table = (3,CCC), (4,DDD) +-- updates only +insert into iocdu_tt_parted values (3, 'CCC'), (4, 'DDD') + on conflict (a) do + update set b = iocdu_tt_parted.b || ':' || excluded.b; +NOTICE: trigger = iocdu_tt_parted_update_trig, old table = (3,CCC), (4,DDD), new table = (3,CCC:CCC), (4,DDD:DDD) +NOTICE: trigger = iocdu_tt_parted_insert_trig, new table = +drop table iocdu_tt_parted; +-- +-- Verify that you can't create a trigger with transition tables for +-- more than one event. +-- +create trigger my_table_multievent_trig + after insert or update on my_table referencing new table as new_table + for each statement execute procedure dump_insert(); +ERROR: transition tables cannot be specified for triggers with more than one event +-- +-- Verify that you can't create a trigger with transition tables with +-- a column list. +-- +create trigger my_table_col_update_trig + after update of b on my_table referencing new table as new_table + for each statement execute procedure dump_insert(); +ERROR: transition tables cannot be specified for triggers with column lists +drop table my_table; +-- +-- Test firing of triggers with transition tables by foreign key cascades +-- +create table refd_table (a int primary key, b text); +create table trig_table (a int, b text, + foreign key (a) references refd_table on update cascade on delete cascade +); +create trigger trig_table_before_trig + before insert or update or delete on trig_table + for each statement execute procedure trigger_func('trig_table'); +create trigger trig_table_insert_trig + after insert on trig_table referencing new table as new_table + for each statement execute procedure dump_insert(); +create trigger trig_table_update_trig + after update on trig_table referencing old table as old_table new table as new_table + for each statement execute procedure dump_update(); +create trigger trig_table_delete_trig + after delete on trig_table referencing old table as old_table + for each statement execute procedure dump_delete(); +insert into refd_table values + (1, 'one'), + (2, 'two'), + (3, 'three'); +insert into trig_table values + (1, 'one a'), + (1, 'one b'), + (2, 'two a'), + (2, 'two b'), + (3, 'three a'), + (3, 'three b'); +NOTICE: trigger_func(trig_table) called: action = INSERT, when = BEFORE, level = STATEMENT +NOTICE: trigger = trig_table_insert_trig, new table = (1,"one a"), (1,"one b"), (2,"two a"), (2,"two b"), (3,"three a"), (3,"three b") +update refd_table set a = 11 where b = 'one'; +NOTICE: trigger_func(trig_table) called: action = UPDATE, when = BEFORE, level = STATEMENT +NOTICE: trigger = trig_table_update_trig, old table = (1,"one a"), (1,"one b"), new table = (11,"one a"), (11,"one b") +select * from trig_table; + a | b +----+--------- + 2 | two a + 2 | two b + 3 | three a + 3 | three b + 11 | one a + 11 | one b +(6 rows) + +delete from refd_table where length(b) = 3; +NOTICE: trigger_func(trig_table) called: action = DELETE, when = BEFORE, level = STATEMENT +NOTICE: trigger = trig_table_delete_trig, old table = (2,"two a"), (2,"two b"), (11,"one a"), (11,"one b") +select * from trig_table; + a | b +---+--------- + 3 | three a + 3 | three b +(2 rows) + +drop table refd_table, trig_table; +-- +-- self-referential FKs are even more fun +-- +create table self_ref (a int primary key, + b int references self_ref(a) on delete cascade); +create trigger self_ref_before_trig + before delete on self_ref + for each statement execute procedure trigger_func('self_ref'); +create trigger self_ref_r_trig + after delete on self_ref referencing old table as old_table + for each row execute procedure dump_delete(); +create trigger self_ref_s_trig + after delete on self_ref referencing old table as old_table + for each statement execute procedure dump_delete(); +insert into self_ref values (1, null), (2, 1), (3, 2); +delete from self_ref where a = 1; +NOTICE: trigger_func(self_ref) called: action = DELETE, when = BEFORE, level = STATEMENT +NOTICE: trigger = self_ref_r_trig, old table = (1,), (2,1) +NOTICE: trigger_func(self_ref) called: action = DELETE, when = BEFORE, level = STATEMENT +NOTICE: trigger = self_ref_r_trig, old table = (1,), (2,1) +NOTICE: trigger = self_ref_s_trig, old table = (1,), (2,1) +NOTICE: trigger = self_ref_r_trig, old table = (3,2) +NOTICE: trigger = self_ref_s_trig, old table = (3,2) +-- without AR trigger, cascaded deletes all end up in one transition table +drop trigger self_ref_r_trig on self_ref; +insert into self_ref values (1, null), (2, 1), (3, 2), (4, 3); +delete from self_ref where a = 1; +NOTICE: trigger_func(self_ref) called: action = DELETE, when = BEFORE, level = STATEMENT +NOTICE: trigger = self_ref_s_trig, old table = (1,), (2,1), (3,2), (4,3) +drop table self_ref; +-- +-- test transition tables with MERGE +-- +create table merge_target_table (a int primary key, b text); +create trigger merge_target_table_insert_trig + after insert on merge_target_table referencing new table as new_table + for each statement execute procedure dump_insert(); +create trigger merge_target_table_update_trig + after update on merge_target_table referencing old table as old_table new table as new_table + for each statement execute procedure dump_update(); +create trigger merge_target_table_delete_trig + after delete on merge_target_table referencing old table as old_table + for each statement execute procedure dump_delete(); +create table merge_source_table (a int, b text); +insert into merge_source_table + values (1, 'initial1'), (2, 'initial2'), + (3, 'initial3'), (4, 'initial4'); +merge into merge_target_table t +using merge_source_table s +on t.a = s.a +when not matched then + insert values (a, b); +NOTICE: trigger = merge_target_table_insert_trig, new table = (1,initial1), (2,initial2), (3,initial3), (4,initial4) +merge into merge_target_table t +using merge_source_table s +on t.a = s.a +when matched and s.a <= 2 then + update set b = t.b || ' updated by merge' +when matched and s.a > 2 then + delete +when not matched then + insert values (a, b); +NOTICE: trigger = merge_target_table_delete_trig, old table = (3,initial3), (4,initial4) +NOTICE: trigger = merge_target_table_update_trig, old table = (1,initial1), (2,initial2), new table = (1,"initial1 updated by merge"), (2,"initial2 updated by merge") +NOTICE: trigger = merge_target_table_insert_trig, new table = +merge into merge_target_table t +using merge_source_table s +on t.a = s.a +when matched and s.a <= 2 then + update set b = t.b || ' updated again by merge' +when matched and s.a > 2 then + delete +when not matched then + insert values (a, b); +NOTICE: trigger = merge_target_table_delete_trig, old table = +NOTICE: trigger = merge_target_table_update_trig, old table = (1,"initial1 updated by merge"), (2,"initial2 updated by merge"), new table = (1,"initial1 updated by merge updated again by merge"), (2,"initial2 updated by merge updated again by merge") +NOTICE: trigger = merge_target_table_insert_trig, new table = (3,initial3), (4,initial4) +drop table merge_source_table, merge_target_table; +-- cleanup +drop function dump_insert(); +drop function dump_update(); +drop function dump_delete(); +-- +-- Tests for CREATE OR REPLACE TRIGGER +-- +create table my_table (id integer); +create function funcA() returns trigger as $$ +begin + raise notice 'hello from funcA'; + return null; +end; $$ language plpgsql; +create function funcB() returns trigger as $$ +begin + raise notice 'hello from funcB'; + return null; +end; $$ language plpgsql; +create trigger my_trig + after insert on my_table + for each row execute procedure funcA(); +create trigger my_trig + before insert on my_table + for each row execute procedure funcB(); -- should fail +ERROR: trigger "my_trig" for relation "my_table" already exists +insert into my_table values (1); +NOTICE: hello from funcA +create or replace trigger my_trig + before insert on my_table + for each row execute procedure funcB(); -- OK +insert into my_table values (2); -- this insert should become a no-op +NOTICE: hello from funcB +table my_table; + id +---- + 1 +(1 row) + +drop table my_table; +-- test CREATE OR REPLACE TRIGGER on partition table +create table parted_trig (a int) partition by range (a); +create table parted_trig_1 partition of parted_trig + for values from (0) to (1000) partition by range (a); +create table parted_trig_1_1 partition of parted_trig_1 for values from (0) to (100); +create table parted_trig_2 partition of parted_trig for values from (1000) to (2000); +create table default_parted_trig partition of parted_trig default; +-- test that trigger can be replaced by another one +-- at the same level of partition table +create or replace trigger my_trig + after insert on parted_trig + for each row execute procedure funcA(); +insert into parted_trig (a) values (50); +NOTICE: hello from funcA +create or replace trigger my_trig + after insert on parted_trig + for each row execute procedure funcB(); +insert into parted_trig (a) values (50); +NOTICE: hello from funcB +-- test that child trigger cannot be replaced directly +create or replace trigger my_trig + after insert on parted_trig + for each row execute procedure funcA(); +insert into parted_trig (a) values (50); +NOTICE: hello from funcA +create or replace trigger my_trig + after insert on parted_trig_1 + for each row execute procedure funcB(); -- should fail +ERROR: trigger "my_trig" for relation "parted_trig_1" is an internal or a child trigger +insert into parted_trig (a) values (50); +NOTICE: hello from funcA +drop trigger my_trig on parted_trig; +insert into parted_trig (a) values (50); +-- test that user trigger can be overwritten by one defined at upper level +create trigger my_trig + after insert on parted_trig_1 + for each row execute procedure funcA(); +insert into parted_trig (a) values (50); +NOTICE: hello from funcA +create trigger my_trig + after insert on parted_trig + for each row execute procedure funcB(); -- should fail +ERROR: trigger "my_trig" for relation "parted_trig_1" already exists +insert into parted_trig (a) values (50); +NOTICE: hello from funcA +create or replace trigger my_trig + after insert on parted_trig + for each row execute procedure funcB(); +insert into parted_trig (a) values (50); +NOTICE: hello from funcB +-- cleanup +drop table parted_trig; +drop function funcA(); +drop function funcB(); +-- Leave around some objects for other tests +create table trigger_parted (a int primary key) partition by list (a); +create function trigger_parted_trigfunc() returns trigger language plpgsql as + $$ begin end; $$; +create trigger aft_row after insert or update on trigger_parted + for each row execute function trigger_parted_trigfunc(); +create table trigger_parted_p1 partition of trigger_parted for values in (1) + partition by list (a); +create table trigger_parted_p1_1 partition of trigger_parted_p1 for values in (1); +create table trigger_parted_p2 partition of trigger_parted for values in (2) + partition by list (a); +create table trigger_parted_p2_2 partition of trigger_parted_p2 for values in (2); +alter table only trigger_parted_p2 disable trigger aft_row; +alter table trigger_parted_p2_2 enable always trigger aft_row; +-- verify transition table conversion slot's lifetime +-- https://postgr.es/m/39a71864-b120-5a5c-8cc5-c632b6f16761@amazon.com +create table convslot_test_parent (col1 text primary key); +create table convslot_test_child (col1 text primary key, + foreign key (col1) references convslot_test_parent(col1) on delete cascade on update cascade +); +alter table convslot_test_child add column col2 text not null default 'tutu'; +insert into convslot_test_parent(col1) values ('1'); +insert into convslot_test_child(col1) values ('1'); +insert into convslot_test_parent(col1) values ('3'); +insert into convslot_test_child(col1) values ('3'); +create function convslot_trig1() +returns trigger +language plpgsql +AS $$ +begin +raise notice 'trigger = %, old_table = %', + TG_NAME, + (select string_agg(old_table::text, ', ' order by col1) from old_table); +return null; +end; $$; +create function convslot_trig2() +returns trigger +language plpgsql +AS $$ +begin +raise notice 'trigger = %, new table = %', + TG_NAME, + (select string_agg(new_table::text, ', ' order by col1) from new_table); +return null; +end; $$; +create trigger but_trigger after update on convslot_test_child +referencing new table as new_table +for each statement execute function convslot_trig2(); +update convslot_test_parent set col1 = col1 || '1'; +NOTICE: trigger = but_trigger, new table = (11,tutu), (31,tutu) +create function convslot_trig3() +returns trigger +language plpgsql +AS $$ +begin +raise notice 'trigger = %, old_table = %, new table = %', + TG_NAME, + (select string_agg(old_table::text, ', ' order by col1) from old_table), + (select string_agg(new_table::text, ', ' order by col1) from new_table); +return null; +end; $$; +create trigger but_trigger2 after update on convslot_test_child +referencing old table as old_table new table as new_table +for each statement execute function convslot_trig3(); +update convslot_test_parent set col1 = col1 || '1'; +NOTICE: trigger = but_trigger, new table = (111,tutu), (311,tutu) +NOTICE: trigger = but_trigger2, old_table = (11,tutu), (31,tutu), new table = (111,tutu), (311,tutu) +create trigger bdt_trigger after delete on convslot_test_child +referencing old table as old_table +for each statement execute function convslot_trig1(); +delete from convslot_test_parent; +NOTICE: trigger = bdt_trigger, old_table = (111,tutu), (311,tutu) +drop table convslot_test_child, convslot_test_parent; +drop function convslot_trig1(); +drop function convslot_trig2(); +drop function convslot_trig3(); +-- Bug #17607: variant of above in which trigger function raises an error; +-- we don't see any ill effects unless trigger tuple requires mapping +create table convslot_test_parent (id int primary key, val int) +partition by range (id); +create table convslot_test_part (val int, id int not null); +alter table convslot_test_parent + attach partition convslot_test_part for values from (1) to (1000); +create function convslot_trig4() returns trigger as +$$begin raise exception 'BOOM!'; end$$ language plpgsql; +create trigger convslot_test_parent_update + after update on convslot_test_parent + referencing old table as old_rows new table as new_rows + for each statement execute procedure convslot_trig4(); +insert into convslot_test_parent (id, val) values (1, 2); +begin; +savepoint svp; +update convslot_test_parent set val = 3; -- error expected +ERROR: BOOM! +CONTEXT: PL/pgSQL function convslot_trig4() line 1 at RAISE +rollback to savepoint svp; +rollback; +drop table convslot_test_parent; +drop function convslot_trig4(); +-- Test trigger renaming on partitioned tables +create table grandparent (id int, primary key (id)) partition by range (id); +create table middle partition of grandparent for values from (1) to (10) +partition by range (id); +create table chi partition of middle for values from (1) to (5); +create table cho partition of middle for values from (6) to (10); +create function f () returns trigger as +$$ begin return new; end; $$ +language plpgsql; +create trigger a after insert on grandparent +for each row execute procedure f(); +alter trigger a on grandparent rename to b; +select tgrelid::regclass, tgname, +(select tgname from pg_trigger tr where tr.oid = pg_trigger.tgparentid) parent_tgname +from pg_trigger where tgrelid in (select relid from pg_partition_tree('grandparent')) +order by tgname, tgrelid::regclass::text COLLATE "C"; + tgrelid | tgname | parent_tgname +-------------+--------+--------------- + chi | b | b + cho | b | b + grandparent | b | + middle | b | b +(4 rows) + +alter trigger a on only grandparent rename to b; -- ONLY not supported +ERROR: syntax error at or near "only" +LINE 1: alter trigger a on only grandparent rename to b; + ^ +alter trigger b on middle rename to c; -- can't rename trigger on partition +ERROR: cannot rename trigger "b" on table "middle" +HINT: Rename the trigger on the partitioned table "grandparent" instead. +create trigger c after insert on middle +for each row execute procedure f(); +alter trigger b on grandparent rename to c; +ERROR: trigger "c" for relation "middle" already exists +-- Rename cascading does not affect statement triggers +create trigger p after insert on grandparent for each statement execute function f(); +create trigger p after insert on middle for each statement execute function f(); +alter trigger p on grandparent rename to q; +select tgrelid::regclass, tgname, +(select tgname from pg_trigger tr where tr.oid = pg_trigger.tgparentid) parent_tgname +from pg_trigger where tgrelid in (select relid from pg_partition_tree('grandparent')) +order by tgname, tgrelid::regclass::text COLLATE "C"; + tgrelid | tgname | parent_tgname +-------------+--------+--------------- + chi | b | b + cho | b | b + grandparent | b | + middle | b | b + chi | c | c + cho | c | c + middle | c | + middle | p | + grandparent | q | +(9 rows) + +drop table grandparent; +-- Trigger renaming does not recurse on legacy inheritance +create table parent (a int); +create table child () inherits (parent); +create trigger parenttrig after insert on parent +for each row execute procedure f(); +create trigger parenttrig after insert on child +for each row execute procedure f(); +alter trigger parenttrig on parent rename to anothertrig; +\d+ child + Table "public.child" + Column | Type | Collation | Nullable | Default | Storage | Stats target | Description +--------+---------+-----------+----------+---------+---------+--------------+------------- + a | integer | | | | plain | | +Triggers: + parenttrig AFTER INSERT ON child FOR EACH ROW EXECUTE FUNCTION f() +Inherits: parent + +drop table parent, child; +drop function f(); diff --git a/src/test/regress/expected/truncate.out b/src/test/regress/expected/truncate.out new file mode 100644 index 0000000..1e88e86 --- /dev/null +++ b/src/test/regress/expected/truncate.out @@ -0,0 +1,594 @@ +-- Test basic TRUNCATE functionality. +CREATE TABLE truncate_a (col1 integer primary key); +INSERT INTO truncate_a VALUES (1); +INSERT INTO truncate_a VALUES (2); +SELECT * FROM truncate_a; + col1 +------ + 1 + 2 +(2 rows) + +-- Roll truncate back +BEGIN; +TRUNCATE truncate_a; +ROLLBACK; +SELECT * FROM truncate_a; + col1 +------ + 1 + 2 +(2 rows) + +-- Commit the truncate this time +BEGIN; +TRUNCATE truncate_a; +COMMIT; +SELECT * FROM truncate_a; + col1 +------ +(0 rows) + +-- Test foreign-key checks +CREATE TABLE trunc_b (a int REFERENCES truncate_a); +CREATE TABLE trunc_c (a serial PRIMARY KEY); +CREATE TABLE trunc_d (a int REFERENCES trunc_c); +CREATE TABLE trunc_e (a int REFERENCES truncate_a, b int REFERENCES trunc_c); +TRUNCATE TABLE truncate_a; -- fail +ERROR: cannot truncate a table referenced in a foreign key constraint +DETAIL: Table "trunc_b" references "truncate_a". +HINT: Truncate table "trunc_b" at the same time, or use TRUNCATE ... CASCADE. +TRUNCATE TABLE truncate_a,trunc_b; -- fail +ERROR: cannot truncate a table referenced in a foreign key constraint +DETAIL: Table "trunc_e" references "truncate_a". +HINT: Truncate table "trunc_e" at the same time, or use TRUNCATE ... CASCADE. +TRUNCATE TABLE truncate_a,trunc_b,trunc_e; -- ok +TRUNCATE TABLE truncate_a,trunc_e; -- fail +ERROR: cannot truncate a table referenced in a foreign key constraint +DETAIL: Table "trunc_b" references "truncate_a". +HINT: Truncate table "trunc_b" at the same time, or use TRUNCATE ... CASCADE. +TRUNCATE TABLE trunc_c; -- fail +ERROR: cannot truncate a table referenced in a foreign key constraint +DETAIL: Table "trunc_d" references "trunc_c". +HINT: Truncate table "trunc_d" at the same time, or use TRUNCATE ... CASCADE. +TRUNCATE TABLE trunc_c,trunc_d; -- fail +ERROR: cannot truncate a table referenced in a foreign key constraint +DETAIL: Table "trunc_e" references "trunc_c". +HINT: Truncate table "trunc_e" at the same time, or use TRUNCATE ... CASCADE. +TRUNCATE TABLE trunc_c,trunc_d,trunc_e; -- ok +TRUNCATE TABLE trunc_c,trunc_d,trunc_e,truncate_a; -- fail +ERROR: cannot truncate a table referenced in a foreign key constraint +DETAIL: Table "trunc_b" references "truncate_a". +HINT: Truncate table "trunc_b" at the same time, or use TRUNCATE ... CASCADE. +TRUNCATE TABLE trunc_c,trunc_d,trunc_e,truncate_a,trunc_b; -- ok +TRUNCATE TABLE truncate_a RESTRICT; -- fail +ERROR: cannot truncate a table referenced in a foreign key constraint +DETAIL: Table "trunc_b" references "truncate_a". +HINT: Truncate table "trunc_b" at the same time, or use TRUNCATE ... CASCADE. +TRUNCATE TABLE truncate_a CASCADE; -- ok +NOTICE: truncate cascades to table "trunc_b" +NOTICE: truncate cascades to table "trunc_e" +-- circular references +ALTER TABLE truncate_a ADD FOREIGN KEY (col1) REFERENCES trunc_c; +-- Add some data to verify that truncating actually works ... +INSERT INTO trunc_c VALUES (1); +INSERT INTO truncate_a VALUES (1); +INSERT INTO trunc_b VALUES (1); +INSERT INTO trunc_d VALUES (1); +INSERT INTO trunc_e VALUES (1,1); +TRUNCATE TABLE trunc_c; +ERROR: cannot truncate a table referenced in a foreign key constraint +DETAIL: Table "truncate_a" references "trunc_c". +HINT: Truncate table "truncate_a" at the same time, or use TRUNCATE ... CASCADE. +TRUNCATE TABLE trunc_c,truncate_a; +ERROR: cannot truncate a table referenced in a foreign key constraint +DETAIL: Table "trunc_d" references "trunc_c". +HINT: Truncate table "trunc_d" at the same time, or use TRUNCATE ... CASCADE. +TRUNCATE TABLE trunc_c,truncate_a,trunc_d; +ERROR: cannot truncate a table referenced in a foreign key constraint +DETAIL: Table "trunc_e" references "trunc_c". +HINT: Truncate table "trunc_e" at the same time, or use TRUNCATE ... CASCADE. +TRUNCATE TABLE trunc_c,truncate_a,trunc_d,trunc_e; +ERROR: cannot truncate a table referenced in a foreign key constraint +DETAIL: Table "trunc_b" references "truncate_a". +HINT: Truncate table "trunc_b" at the same time, or use TRUNCATE ... CASCADE. +TRUNCATE TABLE trunc_c,truncate_a,trunc_d,trunc_e,trunc_b; +-- Verify that truncating did actually work +SELECT * FROM truncate_a + UNION ALL + SELECT * FROM trunc_c + UNION ALL + SELECT * FROM trunc_b + UNION ALL + SELECT * FROM trunc_d; + col1 +------ +(0 rows) + +SELECT * FROM trunc_e; + a | b +---+--- +(0 rows) + +-- Add data again to test TRUNCATE ... CASCADE +INSERT INTO trunc_c VALUES (1); +INSERT INTO truncate_a VALUES (1); +INSERT INTO trunc_b VALUES (1); +INSERT INTO trunc_d VALUES (1); +INSERT INTO trunc_e VALUES (1,1); +TRUNCATE TABLE trunc_c CASCADE; -- ok +NOTICE: truncate cascades to table "truncate_a" +NOTICE: truncate cascades to table "trunc_d" +NOTICE: truncate cascades to table "trunc_e" +NOTICE: truncate cascades to table "trunc_b" +SELECT * FROM truncate_a + UNION ALL + SELECT * FROM trunc_c + UNION ALL + SELECT * FROM trunc_b + UNION ALL + SELECT * FROM trunc_d; + col1 +------ +(0 rows) + +SELECT * FROM trunc_e; + a | b +---+--- +(0 rows) + +DROP TABLE truncate_a,trunc_c,trunc_b,trunc_d,trunc_e CASCADE; +-- Test TRUNCATE with inheritance +CREATE TABLE trunc_f (col1 integer primary key); +INSERT INTO trunc_f VALUES (1); +INSERT INTO trunc_f VALUES (2); +CREATE TABLE trunc_fa (col2a text) INHERITS (trunc_f); +INSERT INTO trunc_fa VALUES (3, 'three'); +CREATE TABLE trunc_fb (col2b int) INHERITS (trunc_f); +INSERT INTO trunc_fb VALUES (4, 444); +CREATE TABLE trunc_faa (col3 text) INHERITS (trunc_fa); +INSERT INTO trunc_faa VALUES (5, 'five', 'FIVE'); +BEGIN; +SELECT * FROM trunc_f; + col1 +------ + 1 + 2 + 3 + 4 + 5 +(5 rows) + +TRUNCATE trunc_f; +SELECT * FROM trunc_f; + col1 +------ +(0 rows) + +ROLLBACK; +BEGIN; +SELECT * FROM trunc_f; + col1 +------ + 1 + 2 + 3 + 4 + 5 +(5 rows) + +TRUNCATE ONLY trunc_f; +SELECT * FROM trunc_f; + col1 +------ + 3 + 4 + 5 +(3 rows) + +ROLLBACK; +BEGIN; +SELECT * FROM trunc_f; + col1 +------ + 1 + 2 + 3 + 4 + 5 +(5 rows) + +SELECT * FROM trunc_fa; + col1 | col2a +------+------- + 3 | three + 5 | five +(2 rows) + +SELECT * FROM trunc_faa; + col1 | col2a | col3 +------+-------+------ + 5 | five | FIVE +(1 row) + +TRUNCATE ONLY trunc_fb, ONLY trunc_fa; +SELECT * FROM trunc_f; + col1 +------ + 1 + 2 + 5 +(3 rows) + +SELECT * FROM trunc_fa; + col1 | col2a +------+------- + 5 | five +(1 row) + +SELECT * FROM trunc_faa; + col1 | col2a | col3 +------+-------+------ + 5 | five | FIVE +(1 row) + +ROLLBACK; +BEGIN; +SELECT * FROM trunc_f; + col1 +------ + 1 + 2 + 3 + 4 + 5 +(5 rows) + +SELECT * FROM trunc_fa; + col1 | col2a +------+------- + 3 | three + 5 | five +(2 rows) + +SELECT * FROM trunc_faa; + col1 | col2a | col3 +------+-------+------ + 5 | five | FIVE +(1 row) + +TRUNCATE ONLY trunc_fb, trunc_fa; +SELECT * FROM trunc_f; + col1 +------ + 1 + 2 +(2 rows) + +SELECT * FROM trunc_fa; + col1 | col2a +------+------- +(0 rows) + +SELECT * FROM trunc_faa; + col1 | col2a | col3 +------+-------+------ +(0 rows) + +ROLLBACK; +DROP TABLE trunc_f CASCADE; +NOTICE: drop cascades to 3 other objects +DETAIL: drop cascades to table trunc_fa +drop cascades to table trunc_faa +drop cascades to table trunc_fb +-- Test ON TRUNCATE triggers +CREATE TABLE trunc_trigger_test (f1 int, f2 text, f3 text); +CREATE TABLE trunc_trigger_log (tgop text, tglevel text, tgwhen text, + tgargv text, tgtable name, rowcount bigint); +CREATE FUNCTION trunctrigger() RETURNS trigger as $$ +declare c bigint; +begin + execute 'select count(*) from ' || quote_ident(tg_table_name) into c; + insert into trunc_trigger_log values + (TG_OP, TG_LEVEL, TG_WHEN, TG_ARGV[0], tg_table_name, c); + return null; +end; +$$ LANGUAGE plpgsql; +-- basic before trigger +INSERT INTO trunc_trigger_test VALUES(1, 'foo', 'bar'), (2, 'baz', 'quux'); +CREATE TRIGGER t +BEFORE TRUNCATE ON trunc_trigger_test +FOR EACH STATEMENT +EXECUTE PROCEDURE trunctrigger('before trigger truncate'); +SELECT count(*) as "Row count in test table" FROM trunc_trigger_test; + Row count in test table +------------------------- + 2 +(1 row) + +SELECT * FROM trunc_trigger_log; + tgop | tglevel | tgwhen | tgargv | tgtable | rowcount +------+---------+--------+--------+---------+---------- +(0 rows) + +TRUNCATE trunc_trigger_test; +SELECT count(*) as "Row count in test table" FROM trunc_trigger_test; + Row count in test table +------------------------- + 0 +(1 row) + +SELECT * FROM trunc_trigger_log; + tgop | tglevel | tgwhen | tgargv | tgtable | rowcount +----------+-----------+--------+-------------------------+--------------------+---------- + TRUNCATE | STATEMENT | BEFORE | before trigger truncate | trunc_trigger_test | 2 +(1 row) + +DROP TRIGGER t ON trunc_trigger_test; +truncate trunc_trigger_log; +-- same test with an after trigger +INSERT INTO trunc_trigger_test VALUES(1, 'foo', 'bar'), (2, 'baz', 'quux'); +CREATE TRIGGER tt +AFTER TRUNCATE ON trunc_trigger_test +FOR EACH STATEMENT +EXECUTE PROCEDURE trunctrigger('after trigger truncate'); +SELECT count(*) as "Row count in test table" FROM trunc_trigger_test; + Row count in test table +------------------------- + 2 +(1 row) + +SELECT * FROM trunc_trigger_log; + tgop | tglevel | tgwhen | tgargv | tgtable | rowcount +------+---------+--------+--------+---------+---------- +(0 rows) + +TRUNCATE trunc_trigger_test; +SELECT count(*) as "Row count in test table" FROM trunc_trigger_test; + Row count in test table +------------------------- + 0 +(1 row) + +SELECT * FROM trunc_trigger_log; + tgop | tglevel | tgwhen | tgargv | tgtable | rowcount +----------+-----------+--------+------------------------+--------------------+---------- + TRUNCATE | STATEMENT | AFTER | after trigger truncate | trunc_trigger_test | 0 +(1 row) + +DROP TABLE trunc_trigger_test; +DROP TABLE trunc_trigger_log; +DROP FUNCTION trunctrigger(); +-- test TRUNCATE ... RESTART IDENTITY +CREATE SEQUENCE truncate_a_id1 START WITH 33; +CREATE TABLE truncate_a (id serial, + id1 integer default nextval('truncate_a_id1')); +ALTER SEQUENCE truncate_a_id1 OWNED BY truncate_a.id1; +INSERT INTO truncate_a DEFAULT VALUES; +INSERT INTO truncate_a DEFAULT VALUES; +SELECT * FROM truncate_a; + id | id1 +----+----- + 1 | 33 + 2 | 34 +(2 rows) + +TRUNCATE truncate_a; +INSERT INTO truncate_a DEFAULT VALUES; +INSERT INTO truncate_a DEFAULT VALUES; +SELECT * FROM truncate_a; + id | id1 +----+----- + 3 | 35 + 4 | 36 +(2 rows) + +TRUNCATE truncate_a RESTART IDENTITY; +INSERT INTO truncate_a DEFAULT VALUES; +INSERT INTO truncate_a DEFAULT VALUES; +SELECT * FROM truncate_a; + id | id1 +----+----- + 1 | 33 + 2 | 34 +(2 rows) + +CREATE TABLE truncate_b (id int GENERATED ALWAYS AS IDENTITY (START WITH 44)); +INSERT INTO truncate_b DEFAULT VALUES; +INSERT INTO truncate_b DEFAULT VALUES; +SELECT * FROM truncate_b; + id +---- + 44 + 45 +(2 rows) + +TRUNCATE truncate_b; +INSERT INTO truncate_b DEFAULT VALUES; +INSERT INTO truncate_b DEFAULT VALUES; +SELECT * FROM truncate_b; + id +---- + 46 + 47 +(2 rows) + +TRUNCATE truncate_b RESTART IDENTITY; +INSERT INTO truncate_b DEFAULT VALUES; +INSERT INTO truncate_b DEFAULT VALUES; +SELECT * FROM truncate_b; + id +---- + 44 + 45 +(2 rows) + +-- check rollback of a RESTART IDENTITY operation +BEGIN; +TRUNCATE truncate_a RESTART IDENTITY; +INSERT INTO truncate_a DEFAULT VALUES; +SELECT * FROM truncate_a; + id | id1 +----+----- + 1 | 33 +(1 row) + +ROLLBACK; +INSERT INTO truncate_a DEFAULT VALUES; +INSERT INTO truncate_a DEFAULT VALUES; +SELECT * FROM truncate_a; + id | id1 +----+----- + 1 | 33 + 2 | 34 + 3 | 35 + 4 | 36 +(4 rows) + +DROP TABLE truncate_a; +SELECT nextval('truncate_a_id1'); -- fail, seq should have been dropped +ERROR: relation "truncate_a_id1" does not exist +LINE 1: SELECT nextval('truncate_a_id1'); + ^ +-- partitioned table +CREATE TABLE truncparted (a int, b char) PARTITION BY LIST (a); +-- error, can't truncate a partitioned table +TRUNCATE ONLY truncparted; +ERROR: cannot truncate only a partitioned table +HINT: Do not specify the ONLY keyword, or use TRUNCATE ONLY on the partitions directly. +CREATE TABLE truncparted1 PARTITION OF truncparted FOR VALUES IN (1); +INSERT INTO truncparted VALUES (1, 'a'); +-- error, must truncate partitions +TRUNCATE ONLY truncparted; +ERROR: cannot truncate only a partitioned table +HINT: Do not specify the ONLY keyword, or use TRUNCATE ONLY on the partitions directly. +TRUNCATE truncparted; +DROP TABLE truncparted; +-- foreign key on partitioned table: partition key is referencing column. +-- Make sure truncate did execute on all tables +CREATE FUNCTION tp_ins_data() RETURNS void LANGUAGE plpgsql AS $$ + BEGIN + INSERT INTO truncprim VALUES (1), (100), (150); + INSERT INTO truncpart VALUES (1), (100), (150); + END +$$; +CREATE FUNCTION tp_chk_data(OUT pktb regclass, OUT pkval int, OUT fktb regclass, OUT fkval int) + RETURNS SETOF record LANGUAGE plpgsql AS $$ + BEGIN + RETURN QUERY SELECT + pk.tableoid::regclass, pk.a, fk.tableoid::regclass, fk.a + FROM truncprim pk FULL JOIN truncpart fk USING (a) + ORDER BY 2, 4; + END +$$; +CREATE TABLE truncprim (a int PRIMARY KEY); +CREATE TABLE truncpart (a int REFERENCES truncprim) + PARTITION BY RANGE (a); +CREATE TABLE truncpart_1 PARTITION OF truncpart FOR VALUES FROM (0) TO (100); +CREATE TABLE truncpart_2 PARTITION OF truncpart FOR VALUES FROM (100) TO (200) + PARTITION BY RANGE (a); +CREATE TABLE truncpart_2_1 PARTITION OF truncpart_2 FOR VALUES FROM (100) TO (150); +CREATE TABLE truncpart_2_d PARTITION OF truncpart_2 DEFAULT; +TRUNCATE TABLE truncprim; -- should fail +ERROR: cannot truncate a table referenced in a foreign key constraint +DETAIL: Table "truncpart" references "truncprim". +HINT: Truncate table "truncpart" at the same time, or use TRUNCATE ... CASCADE. +select tp_ins_data(); + tp_ins_data +------------- + +(1 row) + +-- should truncate everything +TRUNCATE TABLE truncprim, truncpart; +select * from tp_chk_data(); + pktb | pkval | fktb | fkval +------+-------+------+------- +(0 rows) + +select tp_ins_data(); + tp_ins_data +------------- + +(1 row) + +-- should truncate everything +TRUNCATE TABLE truncprim CASCADE; +NOTICE: truncate cascades to table "truncpart" +NOTICE: truncate cascades to table "truncpart_1" +NOTICE: truncate cascades to table "truncpart_2" +NOTICE: truncate cascades to table "truncpart_2_1" +NOTICE: truncate cascades to table "truncpart_2_d" +SELECT * FROM tp_chk_data(); + pktb | pkval | fktb | fkval +------+-------+------+------- +(0 rows) + +SELECT tp_ins_data(); + tp_ins_data +------------- + +(1 row) + +-- should truncate all partitions +TRUNCATE TABLE truncpart; +SELECT * FROM tp_chk_data(); + pktb | pkval | fktb | fkval +-----------+-------+------+------- + truncprim | 1 | | + truncprim | 100 | | + truncprim | 150 | | +(3 rows) + +DROP TABLE truncprim, truncpart; +DROP FUNCTION tp_ins_data(), tp_chk_data(); +-- test cascade when referencing a partitioned table +CREATE TABLE trunc_a (a INT PRIMARY KEY) PARTITION BY RANGE (a); +CREATE TABLE trunc_a1 PARTITION OF trunc_a FOR VALUES FROM (0) TO (10); +CREATE TABLE trunc_a2 PARTITION OF trunc_a FOR VALUES FROM (10) TO (20) + PARTITION BY RANGE (a); +CREATE TABLE trunc_a21 PARTITION OF trunc_a2 FOR VALUES FROM (10) TO (12); +CREATE TABLE trunc_a22 PARTITION OF trunc_a2 FOR VALUES FROM (12) TO (16); +CREATE TABLE trunc_a2d PARTITION OF trunc_a2 DEFAULT; +CREATE TABLE trunc_a3 PARTITION OF trunc_a FOR VALUES FROM (20) TO (30); +INSERT INTO trunc_a VALUES (0), (5), (10), (15), (20), (25); +-- truncate a partition cascading to a table +CREATE TABLE ref_b ( + b INT PRIMARY KEY, + a INT REFERENCES trunc_a(a) ON DELETE CASCADE +); +INSERT INTO ref_b VALUES (10, 0), (50, 5), (100, 10), (150, 15); +TRUNCATE TABLE trunc_a1 CASCADE; +NOTICE: truncate cascades to table "ref_b" +SELECT a FROM ref_b; + a +--- +(0 rows) + +DROP TABLE ref_b; +-- truncate a partition cascading to a partitioned table +CREATE TABLE ref_c ( + c INT PRIMARY KEY, + a INT REFERENCES trunc_a(a) ON DELETE CASCADE +) PARTITION BY RANGE (c); +CREATE TABLE ref_c1 PARTITION OF ref_c FOR VALUES FROM (100) TO (200); +CREATE TABLE ref_c2 PARTITION OF ref_c FOR VALUES FROM (200) TO (300); +INSERT INTO ref_c VALUES (100, 10), (150, 15), (200, 20), (250, 25); +TRUNCATE TABLE trunc_a21 CASCADE; +NOTICE: truncate cascades to table "ref_c" +NOTICE: truncate cascades to table "ref_c1" +NOTICE: truncate cascades to table "ref_c2" +SELECT a as "from table ref_c" FROM ref_c; + from table ref_c +------------------ +(0 rows) + +SELECT a as "from table trunc_a" FROM trunc_a ORDER BY a; + from table trunc_a +-------------------- + 15 + 20 + 25 +(3 rows) + +DROP TABLE trunc_a, ref_c; diff --git a/src/test/regress/expected/tsdicts.out b/src/test/regress/expected/tsdicts.out new file mode 100644 index 0000000..4eff85d --- /dev/null +++ b/src/test/regress/expected/tsdicts.out @@ -0,0 +1,723 @@ +--Test text search dictionaries and configurations +-- Test ISpell dictionary with ispell affix file +CREATE TEXT SEARCH DICTIONARY ispell ( + Template=ispell, + DictFile=ispell_sample, + AffFile=ispell_sample +); +SELECT ts_lexize('ispell', 'skies'); + ts_lexize +----------- + {sky} +(1 row) + +SELECT ts_lexize('ispell', 'bookings'); + ts_lexize +---------------- + {booking,book} +(1 row) + +SELECT ts_lexize('ispell', 'booking'); + ts_lexize +---------------- + {booking,book} +(1 row) + +SELECT ts_lexize('ispell', 'foot'); + ts_lexize +----------- + {foot} +(1 row) + +SELECT ts_lexize('ispell', 'foots'); + ts_lexize +----------- + {foot} +(1 row) + +SELECT ts_lexize('ispell', 'rebookings'); + ts_lexize +---------------- + {booking,book} +(1 row) + +SELECT ts_lexize('ispell', 'rebooking'); + ts_lexize +---------------- + {booking,book} +(1 row) + +SELECT ts_lexize('ispell', 'rebook'); + ts_lexize +----------- + +(1 row) + +SELECT ts_lexize('ispell', 'unbookings'); + ts_lexize +----------- + {book} +(1 row) + +SELECT ts_lexize('ispell', 'unbooking'); + ts_lexize +----------- + {book} +(1 row) + +SELECT ts_lexize('ispell', 'unbook'); + ts_lexize +----------- + {book} +(1 row) + +SELECT ts_lexize('ispell', 'footklubber'); + ts_lexize +---------------- + {foot,klubber} +(1 row) + +SELECT ts_lexize('ispell', 'footballklubber'); + ts_lexize +------------------------------------------------------ + {footballklubber,foot,ball,klubber,football,klubber} +(1 row) + +SELECT ts_lexize('ispell', 'ballyklubber'); + ts_lexize +---------------- + {ball,klubber} +(1 row) + +SELECT ts_lexize('ispell', 'footballyklubber'); + ts_lexize +--------------------- + {foot,ball,klubber} +(1 row) + +-- Test ISpell dictionary with hunspell affix file +CREATE TEXT SEARCH DICTIONARY hunspell ( + Template=ispell, + DictFile=ispell_sample, + AffFile=hunspell_sample +); +SELECT ts_lexize('hunspell', 'skies'); + ts_lexize +----------- + {sky} +(1 row) + +SELECT ts_lexize('hunspell', 'bookings'); + ts_lexize +---------------- + {booking,book} +(1 row) + +SELECT ts_lexize('hunspell', 'booking'); + ts_lexize +---------------- + {booking,book} +(1 row) + +SELECT ts_lexize('hunspell', 'foot'); + ts_lexize +----------- + {foot} +(1 row) + +SELECT ts_lexize('hunspell', 'foots'); + ts_lexize +----------- + {foot} +(1 row) + +SELECT ts_lexize('hunspell', 'rebookings'); + ts_lexize +---------------- + {booking,book} +(1 row) + +SELECT ts_lexize('hunspell', 'rebooking'); + ts_lexize +---------------- + {booking,book} +(1 row) + +SELECT ts_lexize('hunspell', 'rebook'); + ts_lexize +----------- + +(1 row) + +SELECT ts_lexize('hunspell', 'unbookings'); + ts_lexize +----------- + {book} +(1 row) + +SELECT ts_lexize('hunspell', 'unbooking'); + ts_lexize +----------- + {book} +(1 row) + +SELECT ts_lexize('hunspell', 'unbook'); + ts_lexize +----------- + {book} +(1 row) + +SELECT ts_lexize('hunspell', 'footklubber'); + ts_lexize +---------------- + {foot,klubber} +(1 row) + +SELECT ts_lexize('hunspell', 'footballklubber'); + ts_lexize +------------------------------------------------------ + {footballklubber,foot,ball,klubber,football,klubber} +(1 row) + +SELECT ts_lexize('hunspell', 'ballyklubber'); + ts_lexize +---------------- + {ball,klubber} +(1 row) + +SELECT ts_lexize('hunspell', 'footballyklubber'); + ts_lexize +--------------------- + {foot,ball,klubber} +(1 row) + +-- Test ISpell dictionary with hunspell affix file with FLAG long parameter +CREATE TEXT SEARCH DICTIONARY hunspell_long ( + Template=ispell, + DictFile=hunspell_sample_long, + AffFile=hunspell_sample_long +); +SELECT ts_lexize('hunspell_long', 'skies'); + ts_lexize +----------- + {sky} +(1 row) + +SELECT ts_lexize('hunspell_long', 'bookings'); + ts_lexize +---------------- + {booking,book} +(1 row) + +SELECT ts_lexize('hunspell_long', 'booking'); + ts_lexize +---------------- + {booking,book} +(1 row) + +SELECT ts_lexize('hunspell_long', 'foot'); + ts_lexize +----------- + {foot} +(1 row) + +SELECT ts_lexize('hunspell_long', 'foots'); + ts_lexize +----------- + {foot} +(1 row) + +SELECT ts_lexize('hunspell_long', 'rebookings'); + ts_lexize +---------------- + {booking,book} +(1 row) + +SELECT ts_lexize('hunspell_long', 'rebooking'); + ts_lexize +---------------- + {booking,book} +(1 row) + +SELECT ts_lexize('hunspell_long', 'rebook'); + ts_lexize +----------- + +(1 row) + +SELECT ts_lexize('hunspell_long', 'unbookings'); + ts_lexize +----------- + {book} +(1 row) + +SELECT ts_lexize('hunspell_long', 'unbooking'); + ts_lexize +----------- + {book} +(1 row) + +SELECT ts_lexize('hunspell_long', 'unbook'); + ts_lexize +----------- + {book} +(1 row) + +SELECT ts_lexize('hunspell_long', 'booked'); + ts_lexize +----------- + {book} +(1 row) + +SELECT ts_lexize('hunspell_long', 'footklubber'); + ts_lexize +---------------- + {foot,klubber} +(1 row) + +SELECT ts_lexize('hunspell_long', 'footballklubber'); + ts_lexize +------------------------------------------------------ + {footballklubber,foot,ball,klubber,football,klubber} +(1 row) + +SELECT ts_lexize('hunspell_long', 'ballyklubber'); + ts_lexize +---------------- + {ball,klubber} +(1 row) + +SELECT ts_lexize('hunspell_long', 'ballsklubber'); + ts_lexize +---------------- + {ball,klubber} +(1 row) + +SELECT ts_lexize('hunspell_long', 'footballyklubber'); + ts_lexize +--------------------- + {foot,ball,klubber} +(1 row) + +SELECT ts_lexize('hunspell_long', 'ex-machina'); + ts_lexize +--------------- + {ex-,machina} +(1 row) + +-- Test ISpell dictionary with hunspell affix file with FLAG num parameter +CREATE TEXT SEARCH DICTIONARY hunspell_num ( + Template=ispell, + DictFile=hunspell_sample_num, + AffFile=hunspell_sample_num +); +SELECT ts_lexize('hunspell_num', 'skies'); + ts_lexize +----------- + {sky} +(1 row) + +SELECT ts_lexize('hunspell_num', 'sk'); + ts_lexize +----------- + {sky} +(1 row) + +SELECT ts_lexize('hunspell_num', 'bookings'); + ts_lexize +---------------- + {booking,book} +(1 row) + +SELECT ts_lexize('hunspell_num', 'booking'); + ts_lexize +---------------- + {booking,book} +(1 row) + +SELECT ts_lexize('hunspell_num', 'foot'); + ts_lexize +----------- + {foot} +(1 row) + +SELECT ts_lexize('hunspell_num', 'foots'); + ts_lexize +----------- + {foot} +(1 row) + +SELECT ts_lexize('hunspell_num', 'rebookings'); + ts_lexize +---------------- + {booking,book} +(1 row) + +SELECT ts_lexize('hunspell_num', 'rebooking'); + ts_lexize +---------------- + {booking,book} +(1 row) + +SELECT ts_lexize('hunspell_num', 'rebook'); + ts_lexize +----------- + +(1 row) + +SELECT ts_lexize('hunspell_num', 'unbookings'); + ts_lexize +----------- + {book} +(1 row) + +SELECT ts_lexize('hunspell_num', 'unbooking'); + ts_lexize +----------- + {book} +(1 row) + +SELECT ts_lexize('hunspell_num', 'unbook'); + ts_lexize +----------- + {book} +(1 row) + +SELECT ts_lexize('hunspell_num', 'booked'); + ts_lexize +----------- + {book} +(1 row) + +SELECT ts_lexize('hunspell_num', 'footklubber'); + ts_lexize +---------------- + {foot,klubber} +(1 row) + +SELECT ts_lexize('hunspell_num', 'footballklubber'); + ts_lexize +------------------------------------------------------ + {footballklubber,foot,ball,klubber,football,klubber} +(1 row) + +SELECT ts_lexize('hunspell_num', 'ballyklubber'); + ts_lexize +---------------- + {ball,klubber} +(1 row) + +SELECT ts_lexize('hunspell_num', 'footballyklubber'); + ts_lexize +--------------------- + {foot,ball,klubber} +(1 row) + +-- Test suitability of affix and dict files +CREATE TEXT SEARCH DICTIONARY hunspell_err ( + Template=ispell, + DictFile=ispell_sample, + AffFile=hunspell_sample_long +); +ERROR: invalid affix alias "GJUS" +CREATE TEXT SEARCH DICTIONARY hunspell_err ( + Template=ispell, + DictFile=ispell_sample, + AffFile=hunspell_sample_num +); +ERROR: invalid affix flag "SZ\" +CREATE TEXT SEARCH DICTIONARY hunspell_invalid_1 ( + Template=ispell, + DictFile=hunspell_sample_long, + AffFile=ispell_sample +); +CREATE TEXT SEARCH DICTIONARY hunspell_invalid_2 ( + Template=ispell, + DictFile=hunspell_sample_long, + AffFile=hunspell_sample_num +); +CREATE TEXT SEARCH DICTIONARY hunspell_invalid_3 ( + Template=ispell, + DictFile=hunspell_sample_num, + AffFile=ispell_sample +); +CREATE TEXT SEARCH DICTIONARY hunspell_err ( + Template=ispell, + DictFile=hunspell_sample_num, + AffFile=hunspell_sample_long +); +ERROR: invalid affix alias "302,301,202,303" +-- Synonym dictionary +CREATE TEXT SEARCH DICTIONARY synonym ( + Template=synonym, + Synonyms=synonym_sample +); +SELECT ts_lexize('synonym', 'PoStGrEs'); + ts_lexize +----------- + {pgsql} +(1 row) + +SELECT ts_lexize('synonym', 'Gogle'); + ts_lexize +----------- + {googl} +(1 row) + +SELECT ts_lexize('synonym', 'indices'); + ts_lexize +----------- + {index} +(1 row) + +-- test altering boolean parameters +SELECT dictinitoption FROM pg_ts_dict WHERE dictname = 'synonym'; + dictinitoption +----------------------------- + synonyms = 'synonym_sample' +(1 row) + +ALTER TEXT SEARCH DICTIONARY synonym (CaseSensitive = 1); +SELECT ts_lexize('synonym', 'PoStGrEs'); + ts_lexize +----------- + +(1 row) + +SELECT dictinitoption FROM pg_ts_dict WHERE dictname = 'synonym'; + dictinitoption +------------------------------------------------ + synonyms = 'synonym_sample', casesensitive = 1 +(1 row) + +ALTER TEXT SEARCH DICTIONARY synonym (CaseSensitive = 2); -- fail +ERROR: casesensitive requires a Boolean value +ALTER TEXT SEARCH DICTIONARY synonym (CaseSensitive = off); +SELECT ts_lexize('synonym', 'PoStGrEs'); + ts_lexize +----------- + {pgsql} +(1 row) + +SELECT dictinitoption FROM pg_ts_dict WHERE dictname = 'synonym'; + dictinitoption +---------------------------------------------------- + synonyms = 'synonym_sample', casesensitive = 'off' +(1 row) + +-- Create and simple test thesaurus dictionary +-- More tests in configuration checks because ts_lexize() +-- cannot pass more than one word to thesaurus. +CREATE TEXT SEARCH DICTIONARY thesaurus ( + Template=thesaurus, + DictFile=thesaurus_sample, + Dictionary=english_stem +); +SELECT ts_lexize('thesaurus', 'one'); + ts_lexize +----------- + {1} +(1 row) + +-- Test ispell dictionary in configuration +CREATE TEXT SEARCH CONFIGURATION ispell_tst ( + COPY=english +); +ALTER TEXT SEARCH CONFIGURATION ispell_tst ALTER MAPPING FOR + word, numword, asciiword, hword, numhword, asciihword, hword_part, hword_numpart, hword_asciipart + WITH ispell, english_stem; +SELECT to_tsvector('ispell_tst', 'Booking the skies after rebookings for footballklubber from a foot'); + to_tsvector +---------------------------------------------------------------------------------------------------- + 'ball':7 'book':1,5 'booking':1,5 'foot':7,10 'football':7 'footballklubber':7 'klubber':7 'sky':3 +(1 row) + +SELECT to_tsquery('ispell_tst', 'footballklubber'); + to_tsquery +-------------------------------------------------------------------------- + 'footballklubber' | 'foot' & 'ball' & 'klubber' | 'football' & 'klubber' +(1 row) + +SELECT to_tsquery('ispell_tst', 'footballyklubber:b & rebookings:A & sky'); + to_tsquery +------------------------------------------------------------------------ + 'foot':B & 'ball':B & 'klubber':B & ( 'booking':A | 'book':A ) & 'sky' +(1 row) + +-- Test ispell dictionary with hunspell affix in configuration +CREATE TEXT SEARCH CONFIGURATION hunspell_tst ( + COPY=ispell_tst +); +ALTER TEXT SEARCH CONFIGURATION hunspell_tst ALTER MAPPING + REPLACE ispell WITH hunspell; +SELECT to_tsvector('hunspell_tst', 'Booking the skies after rebookings for footballklubber from a foot'); + to_tsvector +---------------------------------------------------------------------------------------------------- + 'ball':7 'book':1,5 'booking':1,5 'foot':7,10 'football':7 'footballklubber':7 'klubber':7 'sky':3 +(1 row) + +SELECT to_tsquery('hunspell_tst', 'footballklubber'); + to_tsquery +-------------------------------------------------------------------------- + 'footballklubber' | 'foot' & 'ball' & 'klubber' | 'football' & 'klubber' +(1 row) + +SELECT to_tsquery('hunspell_tst', 'footballyklubber:b & rebookings:A & sky'); + to_tsquery +------------------------------------------------------------------------ + 'foot':B & 'ball':B & 'klubber':B & ( 'booking':A | 'book':A ) & 'sky' +(1 row) + +SELECT to_tsquery('hunspell_tst', 'footballyklubber:b <-> sky'); + to_tsquery +------------------------------------------------- + ( 'foot':B & 'ball':B & 'klubber':B ) <-> 'sky' +(1 row) + +SELECT phraseto_tsquery('hunspell_tst', 'footballyklubber sky'); + phraseto_tsquery +------------------------------------------- + ( 'foot' & 'ball' & 'klubber' ) <-> 'sky' +(1 row) + +-- Test ispell dictionary with hunspell affix with FLAG long in configuration +ALTER TEXT SEARCH CONFIGURATION hunspell_tst ALTER MAPPING + REPLACE hunspell WITH hunspell_long; +SELECT to_tsvector('hunspell_tst', 'Booking the skies after rebookings for footballklubber from a foot'); + to_tsvector +---------------------------------------------------------------------------------------------------- + 'ball':7 'book':1,5 'booking':1,5 'foot':7,10 'football':7 'footballklubber':7 'klubber':7 'sky':3 +(1 row) + +SELECT to_tsquery('hunspell_tst', 'footballklubber'); + to_tsquery +-------------------------------------------------------------------------- + 'footballklubber' | 'foot' & 'ball' & 'klubber' | 'football' & 'klubber' +(1 row) + +SELECT to_tsquery('hunspell_tst', 'footballyklubber:b & rebookings:A & sky'); + to_tsquery +------------------------------------------------------------------------ + 'foot':B & 'ball':B & 'klubber':B & ( 'booking':A | 'book':A ) & 'sky' +(1 row) + +-- Test ispell dictionary with hunspell affix with FLAG num in configuration +ALTER TEXT SEARCH CONFIGURATION hunspell_tst ALTER MAPPING + REPLACE hunspell_long WITH hunspell_num; +SELECT to_tsvector('hunspell_tst', 'Booking the skies after rebookings for footballklubber from a foot'); + to_tsvector +---------------------------------------------------------------------------------------------------- + 'ball':7 'book':1,5 'booking':1,5 'foot':7,10 'football':7 'footballklubber':7 'klubber':7 'sky':3 +(1 row) + +SELECT to_tsquery('hunspell_tst', 'footballklubber'); + to_tsquery +-------------------------------------------------------------------------- + 'footballklubber' | 'foot' & 'ball' & 'klubber' | 'football' & 'klubber' +(1 row) + +SELECT to_tsquery('hunspell_tst', 'footballyklubber:b & rebookings:A & sky'); + to_tsquery +------------------------------------------------------------------------ + 'foot':B & 'ball':B & 'klubber':B & ( 'booking':A | 'book':A ) & 'sky' +(1 row) + +-- Test synonym dictionary in configuration +CREATE TEXT SEARCH CONFIGURATION synonym_tst ( + COPY=english +); +ALTER TEXT SEARCH CONFIGURATION synonym_tst ALTER MAPPING FOR + asciiword, hword_asciipart, asciihword + WITH synonym, english_stem; +SELECT to_tsvector('synonym_tst', 'Postgresql is often called as postgres or pgsql and pronounced as postgre'); + to_tsvector +--------------------------------------------------- + 'call':4 'often':3 'pgsql':1,6,8,12 'pronounc':10 +(1 row) + +SELECT to_tsvector('synonym_tst', 'Most common mistake is to write Gogle instead of Google'); + to_tsvector +---------------------------------------------------------- + 'common':2 'googl':7,10 'instead':8 'mistak':3 'write':6 +(1 row) + +SELECT to_tsvector('synonym_tst', 'Indexes or indices - Which is right plural form of index?'); + to_tsvector +---------------------------------------------- + 'form':8 'index':1,3,10 'plural':7 'right':6 +(1 row) + +SELECT to_tsquery('synonym_tst', 'Index & indices'); + to_tsquery +--------------------- + 'index' & 'index':* +(1 row) + +-- test thesaurus in configuration +-- see thesaurus_sample.ths to understand 'odd' resulting tsvector +CREATE TEXT SEARCH CONFIGURATION thesaurus_tst ( + COPY=synonym_tst +); +ALTER TEXT SEARCH CONFIGURATION thesaurus_tst ALTER MAPPING FOR + asciiword, hword_asciipart, asciihword + WITH synonym, thesaurus, english_stem; +SELECT to_tsvector('thesaurus_tst', 'one postgres one two one two three one'); + to_tsvector +---------------------------------- + '1':1,5 '12':3 '123':4 'pgsql':2 +(1 row) + +SELECT to_tsvector('thesaurus_tst', 'Supernovae star is very new star and usually called supernovae (abbreviation SN)'); + to_tsvector +-------------------------------------------------------------- + 'abbrevi':10 'call':8 'new':4 'sn':1,9,11 'star':5 'usual':7 +(1 row) + +SELECT to_tsvector('thesaurus_tst', 'Booking tickets is looking like a booking a tickets'); + to_tsvector +------------------------------------------------------- + 'card':3,10 'invit':2,9 'like':6 'look':5 'order':1,8 +(1 row) + +-- invalid: non-lowercase quoted identifiers +CREATE TEXT SEARCH DICTIONARY tsdict_case +( + Template = ispell, + "DictFile" = ispell_sample, + "AffFile" = ispell_sample +); +ERROR: unrecognized Ispell parameter: "DictFile" +-- Test grammar for configurations +CREATE TEXT SEARCH CONFIGURATION dummy_tst (COPY=english); +-- Overriden mapping change with duplicated tokens. +ALTER TEXT SEARCH CONFIGURATION dummy_tst + ALTER MAPPING FOR word, word WITH ispell; +-- Not a token supported by the configuration's parser, fails. +ALTER TEXT SEARCH CONFIGURATION dummy_tst + DROP MAPPING FOR not_a_token, not_a_token; +ERROR: token type "not_a_token" does not exist +-- Not a token supported by the configuration's parser, fails even +-- with IF EXISTS. +ALTER TEXT SEARCH CONFIGURATION dummy_tst + DROP MAPPING IF EXISTS FOR not_a_token, not_a_token; +ERROR: token type "not_a_token" does not exist +-- Token supported by the configuration's parser, succeeds. +ALTER TEXT SEARCH CONFIGURATION dummy_tst + DROP MAPPING FOR word, word; +-- No mapping for token supported by the configuration's parser, fails. +ALTER TEXT SEARCH CONFIGURATION dummy_tst + DROP MAPPING FOR word; +ERROR: mapping for token type "word" does not exist +-- Token supported by the configuration's parser, cannot be found, +-- succeeds with IF EXISTS. +ALTER TEXT SEARCH CONFIGURATION dummy_tst + DROP MAPPING IF EXISTS FOR word, word; +NOTICE: mapping for token type "word" does not exist, skipping +-- Re-add mapping, with duplicated tokens supported by the parser. +ALTER TEXT SEARCH CONFIGURATION dummy_tst + ADD MAPPING FOR word, word WITH ispell; +-- Not a token supported by the configuration's parser, fails. +ALTER TEXT SEARCH CONFIGURATION dummy_tst + ADD MAPPING FOR not_a_token WITH ispell; +ERROR: token type "not_a_token" does not exist +DROP TEXT SEARCH CONFIGURATION dummy_tst; diff --git a/src/test/regress/expected/tsearch.out b/src/test/regress/expected/tsearch.out new file mode 100644 index 0000000..cfa391a --- /dev/null +++ b/src/test/regress/expected/tsearch.out @@ -0,0 +1,3007 @@ +-- directory paths are passed to us in environment variables +\getenv abs_srcdir PG_ABS_SRCDIR +-- +-- Sanity checks for text search catalogs +-- +-- NB: we assume the oidjoins test will have caught any dangling links, +-- that is OID or REGPROC fields that are not zero and do not match some +-- row in the linked-to table. However, if we want to enforce that a link +-- field can't be 0, we have to check it here. +-- Find unexpected zero link entries +SELECT oid, prsname +FROM pg_ts_parser +WHERE prsnamespace = 0 OR prsstart = 0 OR prstoken = 0 OR prsend = 0 OR + -- prsheadline is optional + prslextype = 0; + oid | prsname +-----+--------- +(0 rows) + +SELECT oid, dictname +FROM pg_ts_dict +WHERE dictnamespace = 0 OR dictowner = 0 OR dicttemplate = 0; + oid | dictname +-----+---------- +(0 rows) + +SELECT oid, tmplname +FROM pg_ts_template +WHERE tmplnamespace = 0 OR tmpllexize = 0; -- tmplinit is optional + oid | tmplname +-----+---------- +(0 rows) + +SELECT oid, cfgname +FROM pg_ts_config +WHERE cfgnamespace = 0 OR cfgowner = 0 OR cfgparser = 0; + oid | cfgname +-----+--------- +(0 rows) + +SELECT mapcfg, maptokentype, mapseqno +FROM pg_ts_config_map +WHERE mapcfg = 0 OR mapdict = 0; + mapcfg | maptokentype | mapseqno +--------+--------------+---------- +(0 rows) + +-- Look for pg_ts_config_map entries that aren't one of parser's token types +SELECT * FROM + ( SELECT oid AS cfgid, (ts_token_type(cfgparser)).tokid AS tokid + FROM pg_ts_config ) AS tt +RIGHT JOIN pg_ts_config_map AS m + ON (tt.cfgid=m.mapcfg AND tt.tokid=m.maptokentype) +WHERE + tt.cfgid IS NULL OR tt.tokid IS NULL; + cfgid | tokid | mapcfg | maptokentype | mapseqno | mapdict +-------+-------+--------+--------------+----------+--------- +(0 rows) + +-- Load some test data +CREATE TABLE test_tsvector( + t text, + a tsvector +); +\set filename :abs_srcdir '/data/tsearch.data' +COPY test_tsvector FROM :'filename'; +ANALYZE test_tsvector; +-- test basic text search behavior without indexes, then with +SELECT count(*) FROM test_tsvector WHERE a @@ 'wr|qh'; + count +------- + 158 +(1 row) + +SELECT count(*) FROM test_tsvector WHERE a @@ 'wr&qh'; + count +------- + 17 +(1 row) + +SELECT count(*) FROM test_tsvector WHERE a @@ 'eq&yt'; + count +------- + 6 +(1 row) + +SELECT count(*) FROM test_tsvector WHERE a @@ 'eq|yt'; + count +------- + 98 +(1 row) + +SELECT count(*) FROM test_tsvector WHERE a @@ '(eq&yt)|(wr&qh)'; + count +------- + 23 +(1 row) + +SELECT count(*) FROM test_tsvector WHERE a @@ '(eq|yt)&(wr|qh)'; + count +------- + 39 +(1 row) + +SELECT count(*) FROM test_tsvector WHERE a @@ 'w:*|q:*'; + count +------- + 494 +(1 row) + +SELECT count(*) FROM test_tsvector WHERE a @@ any ('{wr,qh}'); + count +------- + 158 +(1 row) + +SELECT count(*) FROM test_tsvector WHERE a @@ 'no_such_lexeme'; + count +------- + 0 +(1 row) + +SELECT count(*) FROM test_tsvector WHERE a @@ '!no_such_lexeme'; + count +------- + 508 +(1 row) + +SELECT count(*) FROM test_tsvector WHERE a @@ 'pl <-> yh'; + count +------- + 1 +(1 row) + +SELECT count(*) FROM test_tsvector WHERE a @@ 'yh <-> pl'; + count +------- + 0 +(1 row) + +SELECT count(*) FROM test_tsvector WHERE a @@ 'qe <2> qt'; + count +------- + 1 +(1 row) + +SELECT count(*) FROM test_tsvector WHERE a @@ '!pl <-> yh'; + count +------- + 3 +(1 row) + +SELECT count(*) FROM test_tsvector WHERE a @@ '!pl <-> !yh'; + count +------- + 432 +(1 row) + +SELECT count(*) FROM test_tsvector WHERE a @@ '!yh <-> pl'; + count +------- + 1 +(1 row) + +SELECT count(*) FROM test_tsvector WHERE a @@ '!qe <2> qt'; + count +------- + 6 +(1 row) + +SELECT count(*) FROM test_tsvector WHERE a @@ '!(pl <-> yh)'; + count +------- + 507 +(1 row) + +SELECT count(*) FROM test_tsvector WHERE a @@ '!(yh <-> pl)'; + count +------- + 508 +(1 row) + +SELECT count(*) FROM test_tsvector WHERE a @@ '!(qe <2> qt)'; + count +------- + 507 +(1 row) + +SELECT count(*) FROM test_tsvector WHERE a @@ 'wd:A'; + count +------- + 56 +(1 row) + +SELECT count(*) FROM test_tsvector WHERE a @@ 'wd:D'; + count +------- + 58 +(1 row) + +SELECT count(*) FROM test_tsvector WHERE a @@ '!wd:A'; + count +------- + 452 +(1 row) + +SELECT count(*) FROM test_tsvector WHERE a @@ '!wd:D'; + count +------- + 450 +(1 row) + +create index wowidx on test_tsvector using gist (a); +SET enable_seqscan=OFF; +SET enable_indexscan=ON; +SET enable_bitmapscan=OFF; +explain (costs off) SELECT count(*) FROM test_tsvector WHERE a @@ 'wr|qh'; + QUERY PLAN +------------------------------------------------------- + Aggregate + -> Index Scan using wowidx on test_tsvector + Index Cond: (a @@ '''wr'' | ''qh'''::tsquery) +(3 rows) + +SELECT count(*) FROM test_tsvector WHERE a @@ 'wr|qh'; + count +------- + 158 +(1 row) + +SELECT count(*) FROM test_tsvector WHERE a @@ 'wr&qh'; + count +------- + 17 +(1 row) + +SELECT count(*) FROM test_tsvector WHERE a @@ 'eq&yt'; + count +------- + 6 +(1 row) + +SELECT count(*) FROM test_tsvector WHERE a @@ 'eq|yt'; + count +------- + 98 +(1 row) + +SELECT count(*) FROM test_tsvector WHERE a @@ '(eq&yt)|(wr&qh)'; + count +------- + 23 +(1 row) + +SELECT count(*) FROM test_tsvector WHERE a @@ '(eq|yt)&(wr|qh)'; + count +------- + 39 +(1 row) + +SELECT count(*) FROM test_tsvector WHERE a @@ 'w:*|q:*'; + count +------- + 494 +(1 row) + +SELECT count(*) FROM test_tsvector WHERE a @@ any ('{wr,qh}'); + count +------- + 158 +(1 row) + +SELECT count(*) FROM test_tsvector WHERE a @@ 'no_such_lexeme'; + count +------- + 0 +(1 row) + +SELECT count(*) FROM test_tsvector WHERE a @@ '!no_such_lexeme'; + count +------- + 508 +(1 row) + +SELECT count(*) FROM test_tsvector WHERE a @@ 'pl <-> yh'; + count +------- + 1 +(1 row) + +SELECT count(*) FROM test_tsvector WHERE a @@ 'yh <-> pl'; + count +------- + 0 +(1 row) + +SELECT count(*) FROM test_tsvector WHERE a @@ 'qe <2> qt'; + count +------- + 1 +(1 row) + +SELECT count(*) FROM test_tsvector WHERE a @@ '!pl <-> yh'; + count +------- + 3 +(1 row) + +SELECT count(*) FROM test_tsvector WHERE a @@ '!pl <-> !yh'; + count +------- + 432 +(1 row) + +SELECT count(*) FROM test_tsvector WHERE a @@ '!yh <-> pl'; + count +------- + 1 +(1 row) + +SELECT count(*) FROM test_tsvector WHERE a @@ '!qe <2> qt'; + count +------- + 6 +(1 row) + +SELECT count(*) FROM test_tsvector WHERE a @@ '!(pl <-> yh)'; + count +------- + 507 +(1 row) + +SELECT count(*) FROM test_tsvector WHERE a @@ '!(yh <-> pl)'; + count +------- + 508 +(1 row) + +SELECT count(*) FROM test_tsvector WHERE a @@ '!(qe <2> qt)'; + count +------- + 507 +(1 row) + +SELECT count(*) FROM test_tsvector WHERE a @@ 'wd:A'; + count +------- + 56 +(1 row) + +SELECT count(*) FROM test_tsvector WHERE a @@ 'wd:D'; + count +------- + 58 +(1 row) + +SELECT count(*) FROM test_tsvector WHERE a @@ '!wd:A'; + count +------- + 452 +(1 row) + +SELECT count(*) FROM test_tsvector WHERE a @@ '!wd:D'; + count +------- + 450 +(1 row) + +SET enable_indexscan=OFF; +SET enable_bitmapscan=ON; +explain (costs off) SELECT count(*) FROM test_tsvector WHERE a @@ 'wr|qh'; + QUERY PLAN +------------------------------------------------------------- + Aggregate + -> Bitmap Heap Scan on test_tsvector + Recheck Cond: (a @@ '''wr'' | ''qh'''::tsquery) + -> Bitmap Index Scan on wowidx + Index Cond: (a @@ '''wr'' | ''qh'''::tsquery) +(5 rows) + +SELECT count(*) FROM test_tsvector WHERE a @@ 'wr|qh'; + count +------- + 158 +(1 row) + +SELECT count(*) FROM test_tsvector WHERE a @@ 'wr&qh'; + count +------- + 17 +(1 row) + +SELECT count(*) FROM test_tsvector WHERE a @@ 'eq&yt'; + count +------- + 6 +(1 row) + +SELECT count(*) FROM test_tsvector WHERE a @@ 'eq|yt'; + count +------- + 98 +(1 row) + +SELECT count(*) FROM test_tsvector WHERE a @@ '(eq&yt)|(wr&qh)'; + count +------- + 23 +(1 row) + +SELECT count(*) FROM test_tsvector WHERE a @@ '(eq|yt)&(wr|qh)'; + count +------- + 39 +(1 row) + +SELECT count(*) FROM test_tsvector WHERE a @@ 'w:*|q:*'; + count +------- + 494 +(1 row) + +SELECT count(*) FROM test_tsvector WHERE a @@ any ('{wr,qh}'); + count +------- + 158 +(1 row) + +SELECT count(*) FROM test_tsvector WHERE a @@ 'no_such_lexeme'; + count +------- + 0 +(1 row) + +SELECT count(*) FROM test_tsvector WHERE a @@ '!no_such_lexeme'; + count +------- + 508 +(1 row) + +SELECT count(*) FROM test_tsvector WHERE a @@ 'pl <-> yh'; + count +------- + 1 +(1 row) + +SELECT count(*) FROM test_tsvector WHERE a @@ 'yh <-> pl'; + count +------- + 0 +(1 row) + +SELECT count(*) FROM test_tsvector WHERE a @@ 'qe <2> qt'; + count +------- + 1 +(1 row) + +SELECT count(*) FROM test_tsvector WHERE a @@ '!pl <-> yh'; + count +------- + 3 +(1 row) + +SELECT count(*) FROM test_tsvector WHERE a @@ '!pl <-> !yh'; + count +------- + 432 +(1 row) + +SELECT count(*) FROM test_tsvector WHERE a @@ '!yh <-> pl'; + count +------- + 1 +(1 row) + +SELECT count(*) FROM test_tsvector WHERE a @@ '!qe <2> qt'; + count +------- + 6 +(1 row) + +SELECT count(*) FROM test_tsvector WHERE a @@ '!(pl <-> yh)'; + count +------- + 507 +(1 row) + +SELECT count(*) FROM test_tsvector WHERE a @@ '!(yh <-> pl)'; + count +------- + 508 +(1 row) + +SELECT count(*) FROM test_tsvector WHERE a @@ '!(qe <2> qt)'; + count +------- + 507 +(1 row) + +SELECT count(*) FROM test_tsvector WHERE a @@ 'wd:A'; + count +------- + 56 +(1 row) + +SELECT count(*) FROM test_tsvector WHERE a @@ 'wd:D'; + count +------- + 58 +(1 row) + +SELECT count(*) FROM test_tsvector WHERE a @@ '!wd:A'; + count +------- + 452 +(1 row) + +SELECT count(*) FROM test_tsvector WHERE a @@ '!wd:D'; + count +------- + 450 +(1 row) + +-- Test siglen parameter of GiST tsvector_ops +CREATE INDEX wowidx1 ON test_tsvector USING gist (a tsvector_ops(foo=1)); +ERROR: unrecognized parameter "foo" +CREATE INDEX wowidx1 ON test_tsvector USING gist (a tsvector_ops(siglen=0)); +ERROR: value 0 out of bounds for option "siglen" +DETAIL: Valid values are between "1" and "2024". +CREATE INDEX wowidx1 ON test_tsvector USING gist (a tsvector_ops(siglen=2048)); +ERROR: value 2048 out of bounds for option "siglen" +DETAIL: Valid values are between "1" and "2024". +CREATE INDEX wowidx1 ON test_tsvector USING gist (a tsvector_ops(siglen=100,foo='bar')); +ERROR: unrecognized parameter "foo" +CREATE INDEX wowidx1 ON test_tsvector USING gist (a tsvector_ops(siglen=100, siglen = 200)); +ERROR: parameter "siglen" specified more than once +CREATE INDEX wowidx2 ON test_tsvector USING gist (a tsvector_ops(siglen=1)); +\d test_tsvector + Table "public.test_tsvector" + Column | Type | Collation | Nullable | Default +--------+----------+-----------+----------+--------- + t | text | | | + a | tsvector | | | +Indexes: + "wowidx" gist (a) + "wowidx2" gist (a tsvector_ops (siglen='1')) + +DROP INDEX wowidx; +EXPLAIN (costs off) SELECT count(*) FROM test_tsvector WHERE a @@ 'wr|qh'; + QUERY PLAN +------------------------------------------------------------- + Aggregate + -> Bitmap Heap Scan on test_tsvector + Recheck Cond: (a @@ '''wr'' | ''qh'''::tsquery) + -> Bitmap Index Scan on wowidx2 + Index Cond: (a @@ '''wr'' | ''qh'''::tsquery) +(5 rows) + +SELECT count(*) FROM test_tsvector WHERE a @@ 'wr|qh'; + count +------- + 158 +(1 row) + +SELECT count(*) FROM test_tsvector WHERE a @@ 'wr&qh'; + count +------- + 17 +(1 row) + +SELECT count(*) FROM test_tsvector WHERE a @@ 'eq&yt'; + count +------- + 6 +(1 row) + +SELECT count(*) FROM test_tsvector WHERE a @@ 'eq|yt'; + count +------- + 98 +(1 row) + +SELECT count(*) FROM test_tsvector WHERE a @@ '(eq&yt)|(wr&qh)'; + count +------- + 23 +(1 row) + +SELECT count(*) FROM test_tsvector WHERE a @@ '(eq|yt)&(wr|qh)'; + count +------- + 39 +(1 row) + +SELECT count(*) FROM test_tsvector WHERE a @@ 'w:*|q:*'; + count +------- + 494 +(1 row) + +SELECT count(*) FROM test_tsvector WHERE a @@ any ('{wr,qh}'); + count +------- + 158 +(1 row) + +SELECT count(*) FROM test_tsvector WHERE a @@ 'no_such_lexeme'; + count +------- + 0 +(1 row) + +SELECT count(*) FROM test_tsvector WHERE a @@ '!no_such_lexeme'; + count +------- + 508 +(1 row) + +SELECT count(*) FROM test_tsvector WHERE a @@ 'pl <-> yh'; + count +------- + 1 +(1 row) + +SELECT count(*) FROM test_tsvector WHERE a @@ 'yh <-> pl'; + count +------- + 0 +(1 row) + +SELECT count(*) FROM test_tsvector WHERE a @@ 'qe <2> qt'; + count +------- + 1 +(1 row) + +SELECT count(*) FROM test_tsvector WHERE a @@ '!pl <-> yh'; + count +------- + 3 +(1 row) + +SELECT count(*) FROM test_tsvector WHERE a @@ '!pl <-> !yh'; + count +------- + 432 +(1 row) + +SELECT count(*) FROM test_tsvector WHERE a @@ '!yh <-> pl'; + count +------- + 1 +(1 row) + +SELECT count(*) FROM test_tsvector WHERE a @@ '!qe <2> qt'; + count +------- + 6 +(1 row) + +SELECT count(*) FROM test_tsvector WHERE a @@ '!(pl <-> yh)'; + count +------- + 507 +(1 row) + +SELECT count(*) FROM test_tsvector WHERE a @@ '!(yh <-> pl)'; + count +------- + 508 +(1 row) + +SELECT count(*) FROM test_tsvector WHERE a @@ '!(qe <2> qt)'; + count +------- + 507 +(1 row) + +SELECT count(*) FROM test_tsvector WHERE a @@ 'wd:A'; + count +------- + 56 +(1 row) + +SELECT count(*) FROM test_tsvector WHERE a @@ 'wd:D'; + count +------- + 58 +(1 row) + +SELECT count(*) FROM test_tsvector WHERE a @@ '!wd:A'; + count +------- + 452 +(1 row) + +SELECT count(*) FROM test_tsvector WHERE a @@ '!wd:D'; + count +------- + 450 +(1 row) + +DROP INDEX wowidx2; +CREATE INDEX wowidx ON test_tsvector USING gist (a tsvector_ops(siglen=484)); +\d test_tsvector + Table "public.test_tsvector" + Column | Type | Collation | Nullable | Default +--------+----------+-----------+----------+--------- + t | text | | | + a | tsvector | | | +Indexes: + "wowidx" gist (a tsvector_ops (siglen='484')) + +EXPLAIN (costs off) SELECT count(*) FROM test_tsvector WHERE a @@ 'wr|qh'; + QUERY PLAN +------------------------------------------------------------- + Aggregate + -> Bitmap Heap Scan on test_tsvector + Recheck Cond: (a @@ '''wr'' | ''qh'''::tsquery) + -> Bitmap Index Scan on wowidx + Index Cond: (a @@ '''wr'' | ''qh'''::tsquery) +(5 rows) + +SELECT count(*) FROM test_tsvector WHERE a @@ 'wr|qh'; + count +------- + 158 +(1 row) + +SELECT count(*) FROM test_tsvector WHERE a @@ 'wr&qh'; + count +------- + 17 +(1 row) + +SELECT count(*) FROM test_tsvector WHERE a @@ 'eq&yt'; + count +------- + 6 +(1 row) + +SELECT count(*) FROM test_tsvector WHERE a @@ 'eq|yt'; + count +------- + 98 +(1 row) + +SELECT count(*) FROM test_tsvector WHERE a @@ '(eq&yt)|(wr&qh)'; + count +------- + 23 +(1 row) + +SELECT count(*) FROM test_tsvector WHERE a @@ '(eq|yt)&(wr|qh)'; + count +------- + 39 +(1 row) + +SELECT count(*) FROM test_tsvector WHERE a @@ 'w:*|q:*'; + count +------- + 494 +(1 row) + +SELECT count(*) FROM test_tsvector WHERE a @@ any ('{wr,qh}'); + count +------- + 158 +(1 row) + +SELECT count(*) FROM test_tsvector WHERE a @@ 'no_such_lexeme'; + count +------- + 0 +(1 row) + +SELECT count(*) FROM test_tsvector WHERE a @@ '!no_such_lexeme'; + count +------- + 508 +(1 row) + +SELECT count(*) FROM test_tsvector WHERE a @@ 'pl <-> yh'; + count +------- + 1 +(1 row) + +SELECT count(*) FROM test_tsvector WHERE a @@ 'yh <-> pl'; + count +------- + 0 +(1 row) + +SELECT count(*) FROM test_tsvector WHERE a @@ 'qe <2> qt'; + count +------- + 1 +(1 row) + +SELECT count(*) FROM test_tsvector WHERE a @@ '!pl <-> yh'; + count +------- + 3 +(1 row) + +SELECT count(*) FROM test_tsvector WHERE a @@ '!pl <-> !yh'; + count +------- + 432 +(1 row) + +SELECT count(*) FROM test_tsvector WHERE a @@ '!yh <-> pl'; + count +------- + 1 +(1 row) + +SELECT count(*) FROM test_tsvector WHERE a @@ '!qe <2> qt'; + count +------- + 6 +(1 row) + +SELECT count(*) FROM test_tsvector WHERE a @@ '!(pl <-> yh)'; + count +------- + 507 +(1 row) + +SELECT count(*) FROM test_tsvector WHERE a @@ '!(yh <-> pl)'; + count +------- + 508 +(1 row) + +SELECT count(*) FROM test_tsvector WHERE a @@ '!(qe <2> qt)'; + count +------- + 507 +(1 row) + +SELECT count(*) FROM test_tsvector WHERE a @@ 'wd:A'; + count +------- + 56 +(1 row) + +SELECT count(*) FROM test_tsvector WHERE a @@ 'wd:D'; + count +------- + 58 +(1 row) + +SELECT count(*) FROM test_tsvector WHERE a @@ '!wd:A'; + count +------- + 452 +(1 row) + +SELECT count(*) FROM test_tsvector WHERE a @@ '!wd:D'; + count +------- + 450 +(1 row) + +RESET enable_seqscan; +RESET enable_indexscan; +RESET enable_bitmapscan; +DROP INDEX wowidx; +CREATE INDEX wowidx ON test_tsvector USING gin (a); +SET enable_seqscan=OFF; +-- GIN only supports bitmapscan, so no need to test plain indexscan +explain (costs off) SELECT count(*) FROM test_tsvector WHERE a @@ 'wr|qh'; + QUERY PLAN +------------------------------------------------------------- + Aggregate + -> Bitmap Heap Scan on test_tsvector + Recheck Cond: (a @@ '''wr'' | ''qh'''::tsquery) + -> Bitmap Index Scan on wowidx + Index Cond: (a @@ '''wr'' | ''qh'''::tsquery) +(5 rows) + +SELECT count(*) FROM test_tsvector WHERE a @@ 'wr|qh'; + count +------- + 158 +(1 row) + +SELECT count(*) FROM test_tsvector WHERE a @@ 'wr&qh'; + count +------- + 17 +(1 row) + +SELECT count(*) FROM test_tsvector WHERE a @@ 'eq&yt'; + count +------- + 6 +(1 row) + +SELECT count(*) FROM test_tsvector WHERE a @@ 'eq|yt'; + count +------- + 98 +(1 row) + +SELECT count(*) FROM test_tsvector WHERE a @@ '(eq&yt)|(wr&qh)'; + count +------- + 23 +(1 row) + +SELECT count(*) FROM test_tsvector WHERE a @@ '(eq|yt)&(wr|qh)'; + count +------- + 39 +(1 row) + +SELECT count(*) FROM test_tsvector WHERE a @@ 'w:*|q:*'; + count +------- + 494 +(1 row) + +SELECT count(*) FROM test_tsvector WHERE a @@ any ('{wr,qh}'); + count +------- + 158 +(1 row) + +SELECT count(*) FROM test_tsvector WHERE a @@ 'no_such_lexeme'; + count +------- + 0 +(1 row) + +SELECT count(*) FROM test_tsvector WHERE a @@ '!no_such_lexeme'; + count +------- + 508 +(1 row) + +SELECT count(*) FROM test_tsvector WHERE a @@ 'pl <-> yh'; + count +------- + 1 +(1 row) + +SELECT count(*) FROM test_tsvector WHERE a @@ 'yh <-> pl'; + count +------- + 0 +(1 row) + +SELECT count(*) FROM test_tsvector WHERE a @@ 'qe <2> qt'; + count +------- + 1 +(1 row) + +SELECT count(*) FROM test_tsvector WHERE a @@ '!pl <-> yh'; + count +------- + 3 +(1 row) + +SELECT count(*) FROM test_tsvector WHERE a @@ '!pl <-> !yh'; + count +------- + 432 +(1 row) + +SELECT count(*) FROM test_tsvector WHERE a @@ '!yh <-> pl'; + count +------- + 1 +(1 row) + +SELECT count(*) FROM test_tsvector WHERE a @@ '!qe <2> qt'; + count +------- + 6 +(1 row) + +SELECT count(*) FROM test_tsvector WHERE a @@ '!(pl <-> yh)'; + count +------- + 507 +(1 row) + +SELECT count(*) FROM test_tsvector WHERE a @@ '!(yh <-> pl)'; + count +------- + 508 +(1 row) + +SELECT count(*) FROM test_tsvector WHERE a @@ '!(qe <2> qt)'; + count +------- + 507 +(1 row) + +SELECT count(*) FROM test_tsvector WHERE a @@ 'wd:A'; + count +------- + 56 +(1 row) + +SELECT count(*) FROM test_tsvector WHERE a @@ 'wd:D'; + count +------- + 58 +(1 row) + +SELECT count(*) FROM test_tsvector WHERE a @@ '!wd:A'; + count +------- + 452 +(1 row) + +SELECT count(*) FROM test_tsvector WHERE a @@ '!wd:D'; + count +------- + 450 +(1 row) + +-- Test optimization of non-empty GIN_SEARCH_MODE_ALL queries +EXPLAIN (COSTS OFF) +SELECT count(*) FROM test_tsvector WHERE a @@ '!qh'; + QUERY PLAN +----------------------------------------------------- + Aggregate + -> Bitmap Heap Scan on test_tsvector + Recheck Cond: (a @@ '!''qh'''::tsquery) + -> Bitmap Index Scan on wowidx + Index Cond: (a @@ '!''qh'''::tsquery) +(5 rows) + +SELECT count(*) FROM test_tsvector WHERE a @@ '!qh'; + count +------- + 410 +(1 row) + +EXPLAIN (COSTS OFF) +SELECT count(*) FROM test_tsvector WHERE a @@ 'wr' AND a @@ '!qh'; + QUERY PLAN +------------------------------------------------------------------------------------ + Aggregate + -> Bitmap Heap Scan on test_tsvector + Recheck Cond: ((a @@ '''wr'''::tsquery) AND (a @@ '!''qh'''::tsquery)) + -> Bitmap Index Scan on wowidx + Index Cond: ((a @@ '''wr'''::tsquery) AND (a @@ '!''qh'''::tsquery)) +(5 rows) + +SELECT count(*) FROM test_tsvector WHERE a @@ 'wr' AND a @@ '!qh'; + count +------- + 60 +(1 row) + +RESET enable_seqscan; +INSERT INTO test_tsvector VALUES ('???', 'DFG:1A,2B,6C,10 FGH'); +SELECT * FROM ts_stat('SELECT a FROM test_tsvector') ORDER BY ndoc DESC, nentry DESC, word LIMIT 10; + word | ndoc | nentry +------+------+-------- + qq | 108 | 108 + qt | 102 | 102 + qe | 100 | 101 + qh | 98 | 99 + qw | 98 | 98 + qa | 97 | 97 + ql | 94 | 94 + qs | 94 | 94 + qr | 92 | 93 + qi | 92 | 92 +(10 rows) + +SELECT * FROM ts_stat('SELECT a FROM test_tsvector', 'AB') ORDER BY ndoc DESC, nentry DESC, word; + word | ndoc | nentry +------+------+-------- + DFG | 1 | 2 +(1 row) + +--dictionaries and to_tsvector +SELECT ts_lexize('english_stem', 'skies'); + ts_lexize +----------- + {sky} +(1 row) + +SELECT ts_lexize('english_stem', 'identity'); + ts_lexize +----------- + {ident} +(1 row) + +SELECT * FROM ts_token_type('default'); + tokid | alias | description +-------+-----------------+------------------------------------------ + 1 | asciiword | Word, all ASCII + 2 | word | Word, all letters + 3 | numword | Word, letters and digits + 4 | email | Email address + 5 | url | URL + 6 | host | Host + 7 | sfloat | Scientific notation + 8 | version | Version number + 9 | hword_numpart | Hyphenated word part, letters and digits + 10 | hword_part | Hyphenated word part, all letters + 11 | hword_asciipart | Hyphenated word part, all ASCII + 12 | blank | Space symbols + 13 | tag | XML tag + 14 | protocol | Protocol head + 15 | numhword | Hyphenated word, letters and digits + 16 | asciihword | Hyphenated word, all ASCII + 17 | hword | Hyphenated word, all letters + 18 | url_path | URL path + 19 | file | File or path name + 20 | float | Decimal notation + 21 | int | Signed integer + 22 | uint | Unsigned integer + 23 | entity | XML entity +(23 rows) + +SELECT * FROM ts_parse('default', '345 qwe@efd.r '' http://www.com/ http://aew.werc.ewr/?ad=qwe&dw 1aew.werc.ewr/?ad=qwe&dw 2aew.werc.ewr http://3aew.werc.ewr/?ad=qwe&dw http://4aew.werc.ewr http://5aew.werc.ewr:8100/? ad=qwe&dw 6aew.werc.ewr:8100/?ad=qwe&dw 7aew.werc.ewr:8100/?ad=qwe&dw=%20%32 +4.0e-10 qwe qwe qwqwe 234.435 455 5.005 teodor@stack.net teodor@123-stack.net 123_teodor@stack.net 123-teodor@stack.net qwe-wer asdf qwer jf sdjk ewr1> ewri2 +/usr/local/fff /awdf/dwqe/4325 rewt/ewr wefjn /wqe-324/ewr gist.h gist.h.c gist.c. readline 4.2 4.2. 4.2, readline-4.2 readline-4.2. 234 + wow < jqw <> qwerty'); + tokid | token +-------+-------------------------------------- + 22 | 345 + 12 | + 1 | qwe + 12 | @ + 19 | efd.r + 12 | ' + 14 | http:// + 6 | www.com + 12 | / + 14 | http:// + 5 | aew.werc.ewr/?ad=qwe&dw + 6 | aew.werc.ewr + 18 | /?ad=qwe&dw + 12 | + 5 | 1aew.werc.ewr/?ad=qwe&dw + 6 | 1aew.werc.ewr + 18 | /?ad=qwe&dw + 12 | + 6 | 2aew.werc.ewr + 12 | + 14 | http:// + 5 | 3aew.werc.ewr/?ad=qwe&dw + 6 | 3aew.werc.ewr + 18 | /?ad=qwe&dw + 12 | + 14 | http:// + 6 | 4aew.werc.ewr + 12 | + 14 | http:// + 5 | 5aew.werc.ewr:8100/? + 6 | 5aew.werc.ewr:8100 + 18 | /? + 12 | + 1 | ad + 12 | = + 1 | qwe + 12 | & + 1 | dw + 12 | + 5 | 6aew.werc.ewr:8100/?ad=qwe&dw + 6 | 6aew.werc.ewr:8100 + 18 | /?ad=qwe&dw + 12 | + 5 | 7aew.werc.ewr:8100/?ad=qwe&dw=%20%32 + 6 | 7aew.werc.ewr:8100 + 18 | /?ad=qwe&dw=%20%32 + 12 | + 7 | +4.0e-10 + 12 | + 1 | qwe + 12 | + 1 | qwe + 12 | + 1 | qwqwe + 12 | + 20 | 234.435 + 12 | + 22 | 455 + 12 | + 20 | 5.005 + 12 | + 4 | teodor@stack.net + 12 | + 4 | teodor@123-stack.net + 12 | + 4 | 123_teodor@stack.net + 12 | + 4 | 123-teodor@stack.net + 12 | + 16 | qwe-wer + 11 | qwe + 12 | - + 11 | wer + 12 | + 1 | asdf + 12 | + 13 | + 1 | qwer + 12 | + 1 | jf + 12 | + 1 | sdjk + 12 | < + 1 | we + 12 | + 1 | hjwer + 12 | + 13 | + 12 | + 3 | ewr1 + 12 | > + 3 | ewri2 + 12 | + 13 | + 12 | + + | + 19 | /usr/local/fff + 12 | + 19 | /awdf/dwqe/4325 + 12 | + 19 | rewt/ewr + 12 | + 1 | wefjn + 12 | + 19 | /wqe-324/ewr + 12 | + 19 | gist.h + 12 | + 19 | gist.h.c + 12 | + 19 | gist.c + 12 | . + 1 | readline + 12 | + 20 | 4.2 + 12 | + 20 | 4.2 + 12 | . + 20 | 4.2 + 12 | , + 1 | readline + 20 | -4.2 + 12 | + 1 | readline + 20 | -4.2 + 12 | . + 22 | 234 + 12 | + + | + 12 | < + 1 | i + 12 | + 13 | + 12 | + 1 | wow + 12 | + 12 | < + 1 | jqw + 12 | + 12 | <> + 1 | qwerty +(139 rows) + +SELECT to_tsvector('english', '345 qwe@efd.r '' http://www.com/ http://aew.werc.ewr/?ad=qwe&dw 1aew.werc.ewr/?ad=qwe&dw 2aew.werc.ewr http://3aew.werc.ewr/?ad=qwe&dw http://4aew.werc.ewr http://5aew.werc.ewr:8100/? ad=qwe&dw 6aew.werc.ewr:8100/?ad=qwe&dw 7aew.werc.ewr:8100/?ad=qwe&dw=%20%32 +4.0e-10 qwe qwe qwqwe 234.435 455 5.005 teodor@stack.net teodor@123-stack.net 123_teodor@stack.net 123-teodor@stack.net qwe-wer asdf qwer jf sdjk ewr1> ewri2 +/usr/local/fff /awdf/dwqe/4325 rewt/ewr wefjn /wqe-324/ewr gist.h gist.h.c gist.c. readline 4.2 4.2. 4.2, readline-4.2 readline-4.2. 234 + wow < jqw <> qwerty'); + to_tsvector +------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ + '+4.0e-10':28 '-4.2':63,65 '/?':18 '/?ad=qwe&dw':7,10,14,24 '/?ad=qwe&dw=%20%32':27 '/awdf/dwqe/4325':51 '/usr/local/fff':50 '/wqe-324/ewr':54 '123-teodor@stack.net':38 '123_teodor@stack.net':37 '1aew.werc.ewr':9 '1aew.werc.ewr/?ad=qwe&dw':8 '234':66 '234.435':32 '2aew.werc.ewr':11 '345':1 '3aew.werc.ewr':13 '3aew.werc.ewr/?ad=qwe&dw':12 '4.2':59,60,61 '455':33 '4aew.werc.ewr':15 '5.005':34 '5aew.werc.ewr:8100':17 '5aew.werc.ewr:8100/?':16 '6aew.werc.ewr:8100':23 '6aew.werc.ewr:8100/?ad=qwe&dw':22 '7aew.werc.ewr:8100':26 '7aew.werc.ewr:8100/?ad=qwe&dw=%20%32':25 'ad':19 'aew.werc.ewr':6 'aew.werc.ewr/?ad=qwe&dw':5 'asdf':42 'dw':21 'efd.r':3 'ewr1':48 'ewri2':49 'gist.c':57 'gist.h':55 'gist.h.c':56 'hjwer':47 'jf':44 'jqw':69 'qwe':2,20,29,30,40 'qwe-wer':39 'qwer':43 'qwerti':70 'qwqwe':31 'readlin':58,62,64 'rewt/ewr':52 'sdjk':45 'teodor@123-stack.net':36 'teodor@stack.net':35 'wefjn':53 'wer':41 'wow':68 'www.com':4 +(1 row) + +SELECT length(to_tsvector('english', '345 qwe@efd.r '' http://www.com/ http://aew.werc.ewr/?ad=qwe&dw 1aew.werc.ewr/?ad=qwe&dw 2aew.werc.ewr http://3aew.werc.ewr/?ad=qwe&dw http://4aew.werc.ewr http://5aew.werc.ewr:8100/? ad=qwe&dw 6aew.werc.ewr:8100/?ad=qwe&dw 7aew.werc.ewr:8100/?ad=qwe&dw=%20%32 +4.0e-10 qwe qwe qwqwe 234.435 455 5.005 teodor@stack.net teodor@123-stack.net 123_teodor@stack.net 123-teodor@stack.net qwe-wer asdf qwer jf sdjk ewr1> ewri2 +/usr/local/fff /awdf/dwqe/4325 rewt/ewr wefjn /wqe-324/ewr gist.h gist.h.c gist.c. readline 4.2 4.2. 4.2, readline-4.2 readline-4.2. 234 + wow < jqw <> qwerty')); + length +-------- + 56 +(1 row) + +-- ts_debug +SELECT * from ts_debug('english', 'abc&nm1;def©ghiõjkl'); + alias | description | token | dictionaries | dictionary | lexemes +-----------+-----------------+----------------------------+----------------+--------------+--------- + tag | XML tag | | {} | | + asciiword | Word, all ASCII | abc | {english_stem} | english_stem | {abc} + entity | XML entity | &nm1; | {} | | + asciiword | Word, all ASCII | def | {english_stem} | english_stem | {def} + entity | XML entity | © | {} | | + asciiword | Word, all ASCII | ghi | {english_stem} | english_stem | {ghi} + entity | XML entity | õ | {} | | + asciiword | Word, all ASCII | jkl | {english_stem} | english_stem | {jkl} + tag | XML tag | | {} | | +(9 rows) + +-- check parsing of URLs +SELECT * from ts_debug('english', 'http://www.harewoodsolutions.co.uk/press.aspx'); + alias | description | token | dictionaries | dictionary | lexemes +----------+---------------+----------------------------------------+--------------+------------+------------------------------------------ + protocol | Protocol head | http:// | {} | | + url | URL | www.harewoodsolutions.co.uk/press.aspx | {simple} | simple | {www.harewoodsolutions.co.uk/press.aspx} + host | Host | www.harewoodsolutions.co.uk | {simple} | simple | {www.harewoodsolutions.co.uk} + url_path | URL path | /press.aspx | {simple} | simple | {/press.aspx} + tag | XML tag | | {} | | +(5 rows) + +SELECT * from ts_debug('english', 'http://aew.wer0c.ewr/id?ad=qwe&dw'); + alias | description | token | dictionaries | dictionary | lexemes +----------+---------------+----------------------------+--------------+------------+------------------------------ + protocol | Protocol head | http:// | {} | | + url | URL | aew.wer0c.ewr/id?ad=qwe&dw | {simple} | simple | {aew.wer0c.ewr/id?ad=qwe&dw} + host | Host | aew.wer0c.ewr | {simple} | simple | {aew.wer0c.ewr} + url_path | URL path | /id?ad=qwe&dw | {simple} | simple | {/id?ad=qwe&dw} + tag | XML tag | | {} | | +(5 rows) + +SELECT * from ts_debug('english', 'http://5aew.werc.ewr:8100/?'); + alias | description | token | dictionaries | dictionary | lexemes +----------+---------------+----------------------+--------------+------------+------------------------ + protocol | Protocol head | http:// | {} | | + url | URL | 5aew.werc.ewr:8100/? | {simple} | simple | {5aew.werc.ewr:8100/?} + host | Host | 5aew.werc.ewr:8100 | {simple} | simple | {5aew.werc.ewr:8100} + url_path | URL path | /? | {simple} | simple | {/?} +(4 rows) + +SELECT * from ts_debug('english', '5aew.werc.ewr:8100/?xx'); + alias | description | token | dictionaries | dictionary | lexemes +----------+-------------+------------------------+--------------+------------+-------------------------- + url | URL | 5aew.werc.ewr:8100/?xx | {simple} | simple | {5aew.werc.ewr:8100/?xx} + host | Host | 5aew.werc.ewr:8100 | {simple} | simple | {5aew.werc.ewr:8100} + url_path | URL path | /?xx | {simple} | simple | {/?xx} +(3 rows) + +SELECT token, alias, + dictionaries, dictionaries is null as dnull, array_dims(dictionaries) as ddims, + lexemes, lexemes is null as lnull, array_dims(lexemes) as ldims +from ts_debug('english', 'a title'); + token | alias | dictionaries | dnull | ddims | lexemes | lnull | ldims +-------+-----------+----------------+-------+-------+---------+-------+------- + a | asciiword | {english_stem} | f | [1:1] | {} | f | + | blank | {} | f | | | t | + title | asciiword | {english_stem} | f | [1:1] | {titl} | f | [1:1] +(3 rows) + +-- to_tsquery +SELECT to_tsquery('english', 'qwe & sKies '); + to_tsquery +--------------- + 'qwe' & 'sky' +(1 row) + +SELECT to_tsquery('simple', 'qwe & sKies '); + to_tsquery +----------------- + 'qwe' & 'skies' +(1 row) + +SELECT to_tsquery('english', '''the wether'':dc & '' sKies '':BC '); + to_tsquery +------------------------ + 'wether':CD & 'sky':BC +(1 row) + +SELECT to_tsquery('english', 'asd&(and|fghj)'); + to_tsquery +---------------- + 'asd' & 'fghj' +(1 row) + +SELECT to_tsquery('english', '(asd&and)|fghj'); + to_tsquery +---------------- + 'asd' | 'fghj' +(1 row) + +SELECT to_tsquery('english', '(asd&!and)|fghj'); + to_tsquery +---------------- + 'asd' | 'fghj' +(1 row) + +SELECT to_tsquery('english', '(the|and&(i&1))&fghj'); + to_tsquery +-------------- + '1' & 'fghj' +(1 row) + +SELECT plainto_tsquery('english', 'the and z 1))& fghj'); + plainto_tsquery +-------------------- + 'z' & '1' & 'fghj' +(1 row) + +SELECT plainto_tsquery('english', 'foo bar') && plainto_tsquery('english', 'asd'); + ?column? +----------------------- + 'foo' & 'bar' & 'asd' +(1 row) + +SELECT plainto_tsquery('english', 'foo bar') || plainto_tsquery('english', 'asd fg'); + ?column? +------------------------------ + 'foo' & 'bar' | 'asd' & 'fg' +(1 row) + +SELECT plainto_tsquery('english', 'foo bar') || !!plainto_tsquery('english', 'asd fg'); + ?column? +----------------------------------- + 'foo' & 'bar' | !( 'asd' & 'fg' ) +(1 row) + +SELECT plainto_tsquery('english', 'foo bar') && 'asd | fg'; + ?column? +---------------------------------- + 'foo' & 'bar' & ( 'asd' | 'fg' ) +(1 row) + +-- Check stop word deletion, a and s are stop-words +SELECT to_tsquery('english', '!(a & !b) & c'); + to_tsquery +------------- + !!'b' & 'c' +(1 row) + +SELECT to_tsquery('english', '!(a & !b)'); + to_tsquery +------------ + !!'b' +(1 row) + +SELECT to_tsquery('english', '(1 <-> 2) <-> a'); + to_tsquery +------------- + '1' <-> '2' +(1 row) + +SELECT to_tsquery('english', '(1 <-> a) <-> 2'); + to_tsquery +------------- + '1' <2> '2' +(1 row) + +SELECT to_tsquery('english', '(a <-> 1) <-> 2'); + to_tsquery +------------- + '1' <-> '2' +(1 row) + +SELECT to_tsquery('english', 'a <-> (1 <-> 2)'); + to_tsquery +------------- + '1' <-> '2' +(1 row) + +SELECT to_tsquery('english', '1 <-> (a <-> 2)'); + to_tsquery +------------- + '1' <2> '2' +(1 row) + +SELECT to_tsquery('english', '1 <-> (2 <-> a)'); + to_tsquery +------------- + '1' <-> '2' +(1 row) + +SELECT to_tsquery('english', '(1 <-> 2) <3> a'); + to_tsquery +------------- + '1' <-> '2' +(1 row) + +SELECT to_tsquery('english', '(1 <-> a) <3> 2'); + to_tsquery +------------- + '1' <4> '2' +(1 row) + +SELECT to_tsquery('english', '(a <-> 1) <3> 2'); + to_tsquery +------------- + '1' <3> '2' +(1 row) + +SELECT to_tsquery('english', 'a <3> (1 <-> 2)'); + to_tsquery +------------- + '1' <-> '2' +(1 row) + +SELECT to_tsquery('english', '1 <3> (a <-> 2)'); + to_tsquery +------------- + '1' <4> '2' +(1 row) + +SELECT to_tsquery('english', '1 <3> (2 <-> a)'); + to_tsquery +------------- + '1' <3> '2' +(1 row) + +SELECT to_tsquery('english', '(1 <3> 2) <-> a'); + to_tsquery +------------- + '1' <3> '2' +(1 row) + +SELECT to_tsquery('english', '(1 <3> a) <-> 2'); + to_tsquery +------------- + '1' <4> '2' +(1 row) + +SELECT to_tsquery('english', '(a <3> 1) <-> 2'); + to_tsquery +------------- + '1' <-> '2' +(1 row) + +SELECT to_tsquery('english', 'a <-> (1 <3> 2)'); + to_tsquery +------------- + '1' <3> '2' +(1 row) + +SELECT to_tsquery('english', '1 <-> (a <3> 2)'); + to_tsquery +------------- + '1' <4> '2' +(1 row) + +SELECT to_tsquery('english', '1 <-> (2 <3> a)'); + to_tsquery +------------- + '1' <-> '2' +(1 row) + +SELECT to_tsquery('english', '((a <-> 1) <-> 2) <-> s'); + to_tsquery +------------- + '1' <-> '2' +(1 row) + +SELECT to_tsquery('english', '(2 <-> (a <-> 1)) <-> s'); + to_tsquery +------------- + '2' <2> '1' +(1 row) + +SELECT to_tsquery('english', '((1 <-> a) <-> 2) <-> s'); + to_tsquery +------------- + '1' <2> '2' +(1 row) + +SELECT to_tsquery('english', '(2 <-> (1 <-> a)) <-> s'); + to_tsquery +------------- + '2' <-> '1' +(1 row) + +SELECT to_tsquery('english', 's <-> ((a <-> 1) <-> 2)'); + to_tsquery +------------- + '1' <-> '2' +(1 row) + +SELECT to_tsquery('english', 's <-> (2 <-> (a <-> 1))'); + to_tsquery +------------- + '2' <2> '1' +(1 row) + +SELECT to_tsquery('english', 's <-> ((1 <-> a) <-> 2)'); + to_tsquery +------------- + '1' <2> '2' +(1 row) + +SELECT to_tsquery('english', 's <-> (2 <-> (1 <-> a))'); + to_tsquery +------------- + '2' <-> '1' +(1 row) + +SELECT to_tsquery('english', '((a <-> 1) <-> s) <-> 2'); + to_tsquery +------------- + '1' <2> '2' +(1 row) + +SELECT to_tsquery('english', '(s <-> (a <-> 1)) <-> 2'); + to_tsquery +------------- + '1' <-> '2' +(1 row) + +SELECT to_tsquery('english', '((1 <-> a) <-> s) <-> 2'); + to_tsquery +------------- + '1' <3> '2' +(1 row) + +SELECT to_tsquery('english', '(s <-> (1 <-> a)) <-> 2'); + to_tsquery +------------- + '1' <2> '2' +(1 row) + +SELECT to_tsquery('english', '2 <-> ((a <-> 1) <-> s)'); + to_tsquery +------------- + '2' <2> '1' +(1 row) + +SELECT to_tsquery('english', '2 <-> (s <-> (a <-> 1))'); + to_tsquery +------------- + '2' <3> '1' +(1 row) + +SELECT to_tsquery('english', '2 <-> ((1 <-> a) <-> s)'); + to_tsquery +------------- + '2' <-> '1' +(1 row) + +SELECT to_tsquery('english', '2 <-> (s <-> (1 <-> a))'); + to_tsquery +------------- + '2' <2> '1' +(1 row) + +SELECT to_tsquery('english', 'foo <-> (a <-> (the <-> bar))'); + to_tsquery +----------------- + 'foo' <3> 'bar' +(1 row) + +SELECT to_tsquery('english', '((foo <-> a) <-> the) <-> bar'); + to_tsquery +----------------- + 'foo' <3> 'bar' +(1 row) + +SELECT to_tsquery('english', 'foo <-> a <-> the <-> bar'); + to_tsquery +----------------- + 'foo' <3> 'bar' +(1 row) + +SELECT phraseto_tsquery('english', 'PostgreSQL can be extended by the user in many ways'); + phraseto_tsquery +----------------------------------------------------------- + 'postgresql' <3> 'extend' <3> 'user' <2> 'mani' <-> 'way' +(1 row) + +SELECT ts_rank_cd(to_tsvector('english', ' +Day after day, day after day, + We stuck, nor breath nor motion, +As idle as a painted Ship + Upon a painted Ocean. +Water, water, every where + And all the boards did shrink; +Water, water, every where, + Nor any drop to drink. +S. T. Coleridge (1772-1834) +'), to_tsquery('english', 'paint&water')); + ts_rank_cd +------------ + 0.05 +(1 row) + +SELECT ts_rank_cd(to_tsvector('english', ' +Day after day, day after day, + We stuck, nor breath nor motion, +As idle as a painted Ship + Upon a painted Ocean. +Water, water, every where + And all the boards did shrink; +Water, water, every where, + Nor any drop to drink. +S. T. Coleridge (1772-1834) +'), to_tsquery('english', 'breath&motion&water')); + ts_rank_cd +------------- + 0.008333334 +(1 row) + +SELECT ts_rank_cd(to_tsvector('english', ' +Day after day, day after day, + We stuck, nor breath nor motion, +As idle as a painted Ship + Upon a painted Ocean. +Water, water, every where + And all the boards did shrink; +Water, water, every where, + Nor any drop to drink. +S. T. Coleridge (1772-1834) +'), to_tsquery('english', 'ocean')); + ts_rank_cd +------------ + 0.1 +(1 row) + +SELECT ts_rank_cd(to_tsvector('english', ' +Day after day, day after day, + We stuck, nor breath nor motion, +As idle as a painted Ship + Upon a painted Ocean. +Water, water, every where + And all the boards did shrink; +Water, water, every where, + Nor any drop to drink. +S. T. Coleridge (1772-1834) +'), to_tsquery('english', 'painted <-> Ship')); + ts_rank_cd +------------ + 0.1 +(1 row) + +SELECT ts_rank_cd(strip(to_tsvector('both stripped')), + to_tsquery('both & stripped')); + ts_rank_cd +------------ + 0 +(1 row) + +SELECT ts_rank_cd(to_tsvector('unstripped') || strip(to_tsvector('stripped')), + to_tsquery('unstripped & stripped')); + ts_rank_cd +------------ + 0 +(1 row) + +--headline tests +SELECT ts_headline('english', ' +Day after day, day after day, + We stuck, nor breath nor motion, +As idle as a painted Ship + Upon a painted Ocean. +Water, water, every where + And all the boards did shrink; +Water, water, every where, + Nor any drop to drink. +S. T. Coleridge (1772-1834) +', to_tsquery('english', 'paint&water')); + ts_headline +----------------------------------------- + painted Ocean. + + Water, water, every where+ + And all the boards did shrink; + + Water, water, every +(1 row) + +SELECT ts_headline('english', ' +Day after day, day after day, + We stuck, nor breath nor motion, +As idle as a painted Ship + Upon a painted Ocean. +Water, water, every where + And all the boards did shrink; +Water, water, every where, + Nor any drop to drink. +S. T. Coleridge (1772-1834) +', to_tsquery('english', 'breath&motion&water')); + ts_headline +---------------------------------- + breath nor motion,+ + As idle as a painted Ship + + Upon a painted Ocean. + + Water, water +(1 row) + +SELECT ts_headline('english', ' +Day after day, day after day, + We stuck, nor breath nor motion, +As idle as a painted Ship + Upon a painted Ocean. +Water, water, every where + And all the boards did shrink; +Water, water, every where, + Nor any drop to drink. +S. T. Coleridge (1772-1834) +', to_tsquery('english', 'ocean')); + ts_headline +---------------------------------- + Ocean. + + Water, water, every where + + And all the boards did shrink;+ + Water, water, every where +(1 row) + +SELECT ts_headline('english', ' +Day after day, day after day, + We stuck, nor breath nor motion, +As idle as a painted Ship + Upon a painted Ocean. +Water, water, every where + And all the boards did shrink; +Water, water, every where, + Nor any drop to drink. +S. T. Coleridge (1772-1834) +', to_tsquery('english', 'day & drink')); + ts_headline +------------------------------------ + day, + + We stuck, nor breath nor motion,+ + As idle as a painted Ship + + Upon a painted Ocean. + + Water, water, every where + + And all the boards did shrink; + + Water, water, every where, + + Nor any drop +(1 row) + +SELECT ts_headline('english', ' +Day after day, day after day, + We stuck, nor breath nor motion, +As idle as a painted Ship + Upon a painted Ocean. +Water, water, every where + And all the boards did shrink; +Water, water, every where, + Nor any drop to drink. +S. T. Coleridge (1772-1834) +', to_tsquery('english', 'day | drink')); + ts_headline +----------------------------------------------------------- + Day after day, day after day,+ + We stuck, nor breath nor motion, + + As idle as a painted +(1 row) + +SELECT ts_headline('english', ' +Day after day, day after day, + We stuck, nor breath nor motion, +As idle as a painted Ship + Upon a painted Ocean. +Water, water, every where + And all the boards did shrink; +Water, water, every where, + Nor any drop to drink. +S. T. Coleridge (1772-1834) +', to_tsquery('english', 'day | !drink')); + ts_headline +----------------------------------------------------------- + Day after day, day after day,+ + We stuck, nor breath nor motion, + + As idle as a painted +(1 row) + +SELECT ts_headline('english', ' +Day after day, day after day, + We stuck, nor breath nor motion, +As idle as a painted Ship + Upon a painted Ocean. +Water, water, every where + And all the boards did shrink; +Water, water, every where, + Nor any drop to drink. +S. T. Coleridge (1772-1834) +', to_tsquery('english', 'painted <-> Ship & drink')); + ts_headline +---------------------------------- + painted Ship + + Upon a painted Ocean. + + Water, water, every where + + And all the boards did shrink;+ + Water, water, every where, + + Nor any drop to drink +(1 row) + +SELECT ts_headline('english', ' +Day after day, day after day, + We stuck, nor breath nor motion, +As idle as a painted Ship + Upon a painted Ocean. +Water, water, every where + And all the boards did shrink; +Water, water, every where, + Nor any drop to drink. +S. T. Coleridge (1772-1834) +', to_tsquery('english', 'painted <-> Ship | drink')); + ts_headline +--------------------------------- + painted Ship + + Upon a painted Ocean. + + Water, water, every where + + And all the boards did shrink +(1 row) + +SELECT ts_headline('english', ' +Day after day, day after day, + We stuck, nor breath nor motion, +As idle as a painted Ship + Upon a painted Ocean. +Water, water, every where + And all the boards did shrink; +Water, water, every where, + Nor any drop to drink. +S. T. Coleridge (1772-1834) +', to_tsquery('english', 'painted <-> Ship | !drink')); + ts_headline +--------------------------------- + painted Ship + + Upon a painted Ocean. + + Water, water, every where + + And all the boards did shrink +(1 row) + +SELECT ts_headline('english', ' +Day after day, day after day, + We stuck, nor breath nor motion, +As idle as a painted Ship + Upon a painted Ocean. +Water, water, every where + And all the boards did shrink; +Water, water, every where, + Nor any drop to drink. +S. T. Coleridge (1772-1834) +', phraseto_tsquery('english', 'painted Ocean')); + ts_headline +---------------------------------- + painted Ocean. + + Water, water, every where + + And all the boards did shrink;+ + Water, water, every +(1 row) + +SELECT ts_headline('english', ' +Day after day, day after day, + We stuck, nor breath nor motion, +As idle as a painted Ship + Upon a painted Ocean. +Water, water, every where + And all the boards did shrink; +Water, water, every where, + Nor any drop to drink. +S. T. Coleridge (1772-1834) +', phraseto_tsquery('english', 'idle as a painted Ship')); + ts_headline +--------------------------------------------- + idle as a painted Ship+ + Upon a painted Ocean. + + Water, water, every where + + And all the boards +(1 row) + +SELECT ts_headline('english', +'Lorem ipsum urna. Nullam nullam ullamcorper urna.', +to_tsquery('english','Lorem') && phraseto_tsquery('english','ullamcorper urna'), +'MaxWords=100, MinWords=1'); + ts_headline +------------------------------------------------------------------------------- + Lorem ipsum urna. Nullam nullam ullamcorper urna +(1 row) + +SELECT ts_headline('english', +'Lorem ipsum urna. Nullam nullam ullamcorper urna.', +phraseto_tsquery('english','ullamcorper urna'), +'MaxWords=100, MinWords=5'); + ts_headline +------------------------------------------------------------- + urna. Nullam nullam ullamcorper urna. +(1 row) + +SELECT ts_headline('english', ' + + + +Sea view wow foo bar qq +YES   +ff-bg + + +', +to_tsquery('english', 'sea&foo'), 'HighlightAll=true'); + ts_headline +----------------------------------------------------------------------------- + + + + + + + + + Sea view wow foo bar qq + + YES  + + ff-bg + + + + + + +(1 row) + +SELECT ts_headline('simple', '1 2 3 1 3'::text, '1 <-> 3', 'MaxWords=2, MinWords=1'); + ts_headline +------------------- + 1 3 +(1 row) + +SELECT ts_headline('simple', '1 2 3 1 3'::text, '1 & 3', 'MaxWords=4, MinWords=1'); + ts_headline +--------------------- + 1 2 3 +(1 row) + +SELECT ts_headline('simple', '1 2 3 1 3'::text, '1 <-> 3', 'MaxWords=4, MinWords=1'); + ts_headline +------------------- + 1 3 +(1 row) + +--Check if headline fragments work +SELECT ts_headline('english', ' +Day after day, day after day, + We stuck, nor breath nor motion, +As idle as a painted Ship + Upon a painted Ocean. +Water, water, every where + And all the boards did shrink; +Water, water, every where, + Nor any drop to drink. +S. T. Coleridge (1772-1834) +', to_tsquery('english', 'ocean'), 'MaxFragments=1'); + ts_headline +------------------------------------ + after day, + + We stuck, nor breath nor motion,+ + As idle as a painted Ship + + Upon a painted Ocean. + + Water, water, every where + + And all the boards did shrink; + + Water, water, every where, + + Nor any drop +(1 row) + +--Check if more than one fragments are displayed +SELECT ts_headline('english', ' +Day after day, day after day, + We stuck, nor breath nor motion, +As idle as a painted Ship + Upon a painted Ocean. +Water, water, every where + And all the boards did shrink; +Water, water, every where, + Nor any drop to drink. +S. T. Coleridge (1772-1834) +', to_tsquery('english', 'Coleridge & stuck'), 'MaxFragments=2'); + ts_headline +---------------------------------------------- + after day, day after day, + + We stuck, nor breath nor motion, + + As idle as a painted Ship + + Upon a painted Ocean. + + Water, water, every where + + And all the boards did shrink; + + Water, water, every where ... drop to drink.+ + S. T. Coleridge +(1 row) + +--Fragments when there all query words are not in the document +SELECT ts_headline('english', ' +Day after day, day after day, + We stuck, nor breath nor motion, +As idle as a painted Ship + Upon a painted Ocean. +Water, water, every where + And all the boards did shrink; +Water, water, every where, + Nor any drop to drink. +S. T. Coleridge (1772-1834) +', to_tsquery('english', 'ocean & seahorse'), 'MaxFragments=1'); + ts_headline +------------------------------------ + + + Day after day, day after day, + + We stuck, nor breath nor motion,+ + As idle as +(1 row) + +--FragmentDelimiter option +SELECT ts_headline('english', ' +Day after day, day after day, + We stuck, nor breath nor motion, +As idle as a painted Ship + Upon a painted Ocean. +Water, water, every where + And all the boards did shrink; +Water, water, every where, + Nor any drop to drink. +S. T. Coleridge (1772-1834) +', to_tsquery('english', 'Coleridge & stuck'), 'MaxFragments=2,FragmentDelimiter=***'); + ts_headline +-------------------------------------------- + after day, day after day, + + We stuck, nor breath nor motion, + + As idle as a painted Ship + + Upon a painted Ocean. + + Water, water, every where + + And all the boards did shrink; + + Water, water, every where***drop to drink.+ + S. T. Coleridge +(1 row) + +--Fragments with phrase search +SELECT ts_headline('english', +'Lorem ipsum urna. Nullam nullam ullamcorper urna.', +to_tsquery('english','Lorem') && phraseto_tsquery('english','ullamcorper urna'), +'MaxFragments=100, MaxWords=100, MinWords=1'); + ts_headline +------------------------------------------------------------------------------- + Lorem ipsum urna. Nullam nullam ullamcorper urna +(1 row) + +-- Edge cases with empty query +SELECT ts_headline('english', +'', to_tsquery('english', '')); +NOTICE: text-search query doesn't contain lexemes: "" + ts_headline +------------- + +(1 row) + +SELECT ts_headline('english', +'foo bar', to_tsquery('english', '')); +NOTICE: text-search query doesn't contain lexemes: "" + ts_headline +------------- + foo bar +(1 row) + +--Rewrite sub system +CREATE TABLE test_tsquery (txtkeyword TEXT, txtsample TEXT); +\set ECHO none +ALTER TABLE test_tsquery ADD COLUMN keyword tsquery; +UPDATE test_tsquery SET keyword = to_tsquery('english', txtkeyword); +ALTER TABLE test_tsquery ADD COLUMN sample tsquery; +UPDATE test_tsquery SET sample = to_tsquery('english', txtsample::text); +SELECT COUNT(*) FROM test_tsquery WHERE keyword < 'new <-> york'; + count +------- + 2 +(1 row) + +SELECT COUNT(*) FROM test_tsquery WHERE keyword <= 'new <-> york'; + count +------- + 3 +(1 row) + +SELECT COUNT(*) FROM test_tsquery WHERE keyword = 'new <-> york'; + count +------- + 1 +(1 row) + +SELECT COUNT(*) FROM test_tsquery WHERE keyword >= 'new <-> york'; + count +------- + 4 +(1 row) + +SELECT COUNT(*) FROM test_tsquery WHERE keyword > 'new <-> york'; + count +------- + 3 +(1 row) + +CREATE UNIQUE INDEX bt_tsq ON test_tsquery (keyword); +SET enable_seqscan=OFF; +SELECT COUNT(*) FROM test_tsquery WHERE keyword < 'new <-> york'; + count +------- + 2 +(1 row) + +SELECT COUNT(*) FROM test_tsquery WHERE keyword <= 'new <-> york'; + count +------- + 3 +(1 row) + +SELECT COUNT(*) FROM test_tsquery WHERE keyword = 'new <-> york'; + count +------- + 1 +(1 row) + +SELECT COUNT(*) FROM test_tsquery WHERE keyword >= 'new <-> york'; + count +------- + 4 +(1 row) + +SELECT COUNT(*) FROM test_tsquery WHERE keyword > 'new <-> york'; + count +------- + 3 +(1 row) + +RESET enable_seqscan; +SELECT ts_rewrite('foo & bar & qq & new & york', 'new & york'::tsquery, 'big & apple | nyc | new & york & city'); + ts_rewrite +------------------------------------------------------------------------------ + 'foo' & 'bar' & 'qq' & ( 'city' & 'new' & 'york' | 'nyc' | 'big' & 'apple' ) +(1 row) + +SELECT ts_rewrite(ts_rewrite('new & !york ', 'york', '!jersey'), + 'jersey', 'mexico'); + ts_rewrite +-------------------- + 'new' & !!'mexico' +(1 row) + +SELECT ts_rewrite('moscow', 'SELECT keyword, sample FROM test_tsquery'::text ); + ts_rewrite +--------------------- + 'moskva' | 'moscow' +(1 row) + +SELECT ts_rewrite('moscow & hotel', 'SELECT keyword, sample FROM test_tsquery'::text ); + ts_rewrite +----------------------------------- + 'hotel' & ( 'moskva' | 'moscow' ) +(1 row) + +SELECT ts_rewrite('bar & qq & foo & (new <-> york)', 'SELECT keyword, sample FROM test_tsquery'::text ); + ts_rewrite +------------------------------------------------------------------------------------- + 'citi' & 'foo' & ( 'bar' | 'qq' ) & ( 'nyc' | 'big' <-> 'appl' | 'new' <-> 'york' ) +(1 row) + +SELECT ts_rewrite( 'moscow', 'SELECT keyword, sample FROM test_tsquery'); + ts_rewrite +--------------------- + 'moskva' | 'moscow' +(1 row) + +SELECT ts_rewrite( 'moscow & hotel', 'SELECT keyword, sample FROM test_tsquery'); + ts_rewrite +----------------------------------- + 'hotel' & ( 'moskva' | 'moscow' ) +(1 row) + +SELECT ts_rewrite( 'bar & qq & foo & (new <-> york)', 'SELECT keyword, sample FROM test_tsquery'); + ts_rewrite +------------------------------------------------------------------------------------- + 'citi' & 'foo' & ( 'bar' | 'qq' ) & ( 'nyc' | 'big' <-> 'appl' | 'new' <-> 'york' ) +(1 row) + +SELECT ts_rewrite('1 & (2 <-> 3)', 'SELECT keyword, sample FROM test_tsquery'::text ); + ts_rewrite +------------- + '2' <-> '4' +(1 row) + +SELECT ts_rewrite('1 & (2 <2> 3)', 'SELECT keyword, sample FROM test_tsquery'::text ); + ts_rewrite +------------------- + '1' & '2' <2> '3' +(1 row) + +SELECT ts_rewrite('5 <-> (1 & (2 <-> 3))', 'SELECT keyword, sample FROM test_tsquery'::text ); + ts_rewrite +------------------------- + '5' <-> ( '2' <-> '4' ) +(1 row) + +SELECT ts_rewrite('5 <-> (6 | 8)', 'SELECT keyword, sample FROM test_tsquery'::text ); + ts_rewrite +----------------------- + '5' <-> ( '6' | '8' ) +(1 row) + +-- Check empty substitution +SELECT ts_rewrite(to_tsquery('5 & (6 | 5)'), to_tsquery('5'), to_tsquery('')); +NOTICE: text-search query doesn't contain lexemes: "" + ts_rewrite +------------ + '6' +(1 row) + +SELECT ts_rewrite(to_tsquery('!5'), to_tsquery('5'), to_tsquery('')); +NOTICE: text-search query doesn't contain lexemes: "" + ts_rewrite +------------ + +(1 row) + +SELECT keyword FROM test_tsquery WHERE keyword @> 'new'; + keyword +------------------ + 'new' <-> 'york' +(1 row) + +SELECT keyword FROM test_tsquery WHERE keyword @> 'moscow'; + keyword +---------- + 'moscow' +(1 row) + +SELECT keyword FROM test_tsquery WHERE keyword <@ 'new'; + keyword +--------- +(0 rows) + +SELECT keyword FROM test_tsquery WHERE keyword <@ 'moscow'; + keyword +---------- + 'moscow' +(1 row) + +SELECT ts_rewrite( query, 'SELECT keyword, sample FROM test_tsquery' ) FROM to_tsquery('english', 'moscow') AS query; + ts_rewrite +--------------------- + 'moskva' | 'moscow' +(1 row) + +SELECT ts_rewrite( query, 'SELECT keyword, sample FROM test_tsquery' ) FROM to_tsquery('english', 'moscow & hotel') AS query; + ts_rewrite +----------------------------------- + 'hotel' & ( 'moskva' | 'moscow' ) +(1 row) + +SELECT ts_rewrite( query, 'SELECT keyword, sample FROM test_tsquery' ) FROM to_tsquery('english', 'bar & qq & foo & (new <-> york)') AS query; + ts_rewrite +------------------------------------------------------------------------------------- + 'citi' & 'foo' & ( 'bar' | 'qq' ) & ( 'nyc' | 'big' <-> 'appl' | 'new' <-> 'york' ) +(1 row) + +SELECT ts_rewrite( query, 'SELECT keyword, sample FROM test_tsquery' ) FROM to_tsquery('english', 'moscow') AS query; + ts_rewrite +--------------------- + 'moskva' | 'moscow' +(1 row) + +SELECT ts_rewrite( query, 'SELECT keyword, sample FROM test_tsquery' ) FROM to_tsquery('english', 'moscow & hotel') AS query; + ts_rewrite +----------------------------------- + 'hotel' & ( 'moskva' | 'moscow' ) +(1 row) + +SELECT ts_rewrite( query, 'SELECT keyword, sample FROM test_tsquery' ) FROM to_tsquery('english', 'bar & qq & foo & (new <-> york)') AS query; + ts_rewrite +------------------------------------------------------------------------------------- + 'citi' & 'foo' & ( 'bar' | 'qq' ) & ( 'nyc' | 'big' <-> 'appl' | 'new' <-> 'york' ) +(1 row) + +CREATE INDEX qq ON test_tsquery USING gist (keyword tsquery_ops); +SET enable_seqscan=OFF; +SELECT keyword FROM test_tsquery WHERE keyword @> 'new'; + keyword +------------------ + 'new' <-> 'york' +(1 row) + +SELECT keyword FROM test_tsquery WHERE keyword @> 'moscow'; + keyword +---------- + 'moscow' +(1 row) + +SELECT keyword FROM test_tsquery WHERE keyword <@ 'new'; + keyword +--------- +(0 rows) + +SELECT keyword FROM test_tsquery WHERE keyword <@ 'moscow'; + keyword +---------- + 'moscow' +(1 row) + +SELECT ts_rewrite( query, 'SELECT keyword, sample FROM test_tsquery' ) FROM to_tsquery('english', 'moscow') AS query; + ts_rewrite +--------------------- + 'moskva' | 'moscow' +(1 row) + +SELECT ts_rewrite( query, 'SELECT keyword, sample FROM test_tsquery' ) FROM to_tsquery('english', 'moscow & hotel') AS query; + ts_rewrite +----------------------------------- + 'hotel' & ( 'moskva' | 'moscow' ) +(1 row) + +SELECT ts_rewrite( query, 'SELECT keyword, sample FROM test_tsquery' ) FROM to_tsquery('english', 'bar & qq & foo & (new <-> york)') AS query; + ts_rewrite +------------------------------------------------------------------------------------- + 'citi' & 'foo' & ( 'bar' | 'qq' ) & ( 'nyc' | 'big' <-> 'appl' | 'new' <-> 'york' ) +(1 row) + +SELECT ts_rewrite( query, 'SELECT keyword, sample FROM test_tsquery' ) FROM to_tsquery('english', 'moscow') AS query; + ts_rewrite +--------------------- + 'moskva' | 'moscow' +(1 row) + +SELECT ts_rewrite( query, 'SELECT keyword, sample FROM test_tsquery' ) FROM to_tsquery('english', 'moscow & hotel') AS query; + ts_rewrite +----------------------------------- + 'hotel' & ( 'moskva' | 'moscow' ) +(1 row) + +SELECT ts_rewrite( query, 'SELECT keyword, sample FROM test_tsquery' ) FROM to_tsquery('english', 'bar & qq & foo & (new <-> york)') AS query; + ts_rewrite +------------------------------------------------------------------------------------- + 'citi' & 'foo' & ( 'bar' | 'qq' ) & ( 'nyc' | 'big' <-> 'appl' | 'new' <-> 'york' ) +(1 row) + +SELECT ts_rewrite(tsquery_phrase('foo', 'foo'), 'foo', 'bar | baz'); + ts_rewrite +----------------------------------------- + ( 'bar' | 'baz' ) <-> ( 'bar' | 'baz' ) +(1 row) + +SELECT to_tsvector('foo bar') @@ + ts_rewrite(tsquery_phrase('foo', 'foo'), 'foo', 'bar | baz'); + ?column? +---------- + f +(1 row) + +SELECT to_tsvector('bar baz') @@ + ts_rewrite(tsquery_phrase('foo', 'foo'), 'foo', 'bar | baz'); + ?column? +---------- + t +(1 row) + +RESET enable_seqscan; +--test GUC +SET default_text_search_config=simple; +SELECT to_tsvector('SKIES My booKs'); + to_tsvector +---------------------------- + 'books':3 'my':2 'skies':1 +(1 row) + +SELECT plainto_tsquery('SKIES My booKs'); + plainto_tsquery +-------------------------- + 'skies' & 'my' & 'books' +(1 row) + +SELECT to_tsquery('SKIES & My | booKs'); + to_tsquery +-------------------------- + 'skies' & 'my' | 'books' +(1 row) + +SET default_text_search_config=english; +SELECT to_tsvector('SKIES My booKs'); + to_tsvector +------------------ + 'book':3 'sky':1 +(1 row) + +SELECT plainto_tsquery('SKIES My booKs'); + plainto_tsquery +----------------- + 'sky' & 'book' +(1 row) + +SELECT to_tsquery('SKIES & My | booKs'); + to_tsquery +---------------- + 'sky' | 'book' +(1 row) + +--trigger +CREATE TRIGGER tsvectorupdate +BEFORE UPDATE OR INSERT ON test_tsvector +FOR EACH ROW EXECUTE PROCEDURE tsvector_update_trigger(a, 'pg_catalog.english', t); +SELECT count(*) FROM test_tsvector WHERE a @@ to_tsquery('345&qwerty'); + count +------- + 0 +(1 row) + +INSERT INTO test_tsvector (t) VALUES ('345 qwerty'); +SELECT count(*) FROM test_tsvector WHERE a @@ to_tsquery('345&qwerty'); + count +------- + 1 +(1 row) + +UPDATE test_tsvector SET t = null WHERE t = '345 qwerty'; +SELECT count(*) FROM test_tsvector WHERE a @@ to_tsquery('345&qwerty'); + count +------- + 0 +(1 row) + +INSERT INTO test_tsvector (t) VALUES ('345 qwerty'); +SELECT count(*) FROM test_tsvector WHERE a @@ to_tsquery('345&qwerty'); + count +------- + 1 +(1 row) + +-- Test inlining of immutable constant functions +-- to_tsquery(text) is not immutable, so it won't be inlined +explain (costs off) +select * from test_tsquery, to_tsquery('new') q where txtsample @@ q; + QUERY PLAN +------------------------------------------------ + Nested Loop + Join Filter: (test_tsquery.txtsample @@ q.q) + -> Function Scan on to_tsquery q + -> Seq Scan on test_tsquery +(4 rows) + +-- to_tsquery(regconfig, text) is an immutable function. +-- That allows us to get rid of using function scan and join at all. +explain (costs off) +select * from test_tsquery, to_tsquery('english', 'new') q where txtsample @@ q; + QUERY PLAN +--------------------------------------------- + Seq Scan on test_tsquery + Filter: (txtsample @@ '''new'''::tsquery) +(2 rows) + +-- test finding items in GIN's pending list +create temp table pendtest (ts tsvector); +create index pendtest_idx on pendtest using gin(ts); +insert into pendtest values (to_tsvector('Lore ipsam')); +insert into pendtest values (to_tsvector('Lore ipsum')); +select * from pendtest where 'ipsu:*'::tsquery @@ ts; + ts +-------------------- + 'ipsum':2 'lore':1 +(1 row) + +select * from pendtest where 'ipsa:*'::tsquery @@ ts; + ts +-------------------- + 'ipsam':2 'lore':1 +(1 row) + +select * from pendtest where 'ips:*'::tsquery @@ ts; + ts +-------------------- + 'ipsam':2 'lore':1 + 'ipsum':2 'lore':1 +(2 rows) + +select * from pendtest where 'ipt:*'::tsquery @@ ts; + ts +---- +(0 rows) + +select * from pendtest where 'ipi:*'::tsquery @@ ts; + ts +---- +(0 rows) + +--check OP_PHRASE on index +create temp table phrase_index_test(fts tsvector); +insert into phrase_index_test values ('A fat cat has just eaten a rat.'); +insert into phrase_index_test values (to_tsvector('english', 'A fat cat has just eaten a rat.')); +create index phrase_index_test_idx on phrase_index_test using gin(fts); +set enable_seqscan = off; +select * from phrase_index_test where fts @@ phraseto_tsquery('english', 'fat cat'); + fts +----------------------------------- + 'cat':3 'eaten':6 'fat':2 'rat':8 +(1 row) + +set enable_seqscan = on; +-- test websearch_to_tsquery function +select websearch_to_tsquery('simple', 'I have a fat:*ABCD cat'); + websearch_to_tsquery +--------------------------------------------- + 'i' & 'have' & 'a' & 'fat' & 'abcd' & 'cat' +(1 row) + +select websearch_to_tsquery('simple', 'orange:**AABBCCDD'); + websearch_to_tsquery +----------------------- + 'orange' & 'aabbccdd' +(1 row) + +select websearch_to_tsquery('simple', 'fat:A!cat:B|rat:C<'); + websearch_to_tsquery +----------------------------------------- + 'fat' & 'a' & 'cat' & 'b' & 'rat' & 'c' +(1 row) + +select websearch_to_tsquery('simple', 'fat:A : cat:B'); + websearch_to_tsquery +--------------------------- + 'fat' & 'a' & 'cat' & 'b' +(1 row) + +select websearch_to_tsquery('simple', 'fat*rat'); + websearch_to_tsquery +---------------------- + 'fat' <-> 'rat' +(1 row) + +select websearch_to_tsquery('simple', 'fat-rat'); + websearch_to_tsquery +------------------------------- + 'fat-rat' <-> 'fat' <-> 'rat' +(1 row) + +select websearch_to_tsquery('simple', 'fat_rat'); + websearch_to_tsquery +---------------------- + 'fat' <-> 'rat' +(1 row) + +-- weights are completely ignored +select websearch_to_tsquery('simple', 'abc : def'); + websearch_to_tsquery +---------------------- + 'abc' & 'def' +(1 row) + +select websearch_to_tsquery('simple', 'abc:def'); + websearch_to_tsquery +---------------------- + 'abc' & 'def' +(1 row) + +select websearch_to_tsquery('simple', 'a:::b'); + websearch_to_tsquery +---------------------- + 'a' & 'b' +(1 row) + +select websearch_to_tsquery('simple', 'abc:d'); + websearch_to_tsquery +---------------------- + 'abc' & 'd' +(1 row) + +select websearch_to_tsquery('simple', ':'); +NOTICE: text-search query contains only stop words or doesn't contain lexemes, ignored + websearch_to_tsquery +---------------------- + +(1 row) + +-- these operators are ignored +select websearch_to_tsquery('simple', 'abc & def'); + websearch_to_tsquery +---------------------- + 'abc' & 'def' +(1 row) + +select websearch_to_tsquery('simple', 'abc | def'); + websearch_to_tsquery +---------------------- + 'abc' & 'def' +(1 row) + +select websearch_to_tsquery('simple', 'abc <-> def'); + websearch_to_tsquery +---------------------- + 'abc' & 'def' +(1 row) + +select websearch_to_tsquery('simple', 'abc (pg or class)'); + websearch_to_tsquery +------------------------ + 'abc' & 'pg' | 'class' +(1 row) + +-- NOT is ignored in quotes +select websearch_to_tsquery('english', 'My brand new smartphone'); + websearch_to_tsquery +------------------------------- + 'brand' & 'new' & 'smartphon' +(1 row) + +select websearch_to_tsquery('english', 'My brand "new smartphone"'); + websearch_to_tsquery +--------------------------------- + 'brand' & 'new' <-> 'smartphon' +(1 row) + +select websearch_to_tsquery('english', 'My brand "new -smartphone"'); + websearch_to_tsquery +--------------------------------- + 'brand' & 'new' <-> 'smartphon' +(1 row) + +-- test OR operator +select websearch_to_tsquery('simple', 'cat or rat'); + websearch_to_tsquery +---------------------- + 'cat' | 'rat' +(1 row) + +select websearch_to_tsquery('simple', 'cat OR rat'); + websearch_to_tsquery +---------------------- + 'cat' | 'rat' +(1 row) + +select websearch_to_tsquery('simple', 'cat "OR" rat'); + websearch_to_tsquery +---------------------- + 'cat' & 'or' & 'rat' +(1 row) + +select websearch_to_tsquery('simple', 'cat OR'); + websearch_to_tsquery +---------------------- + 'cat' & 'or' +(1 row) + +select websearch_to_tsquery('simple', 'OR rat'); + websearch_to_tsquery +---------------------- + 'or' & 'rat' +(1 row) + +select websearch_to_tsquery('simple', '"fat cat OR rat"'); + websearch_to_tsquery +------------------------------------ + 'fat' <-> 'cat' <-> 'or' <-> 'rat' +(1 row) + +select websearch_to_tsquery('simple', 'fat (cat OR rat'); + websearch_to_tsquery +----------------------- + 'fat' & 'cat' | 'rat' +(1 row) + +select websearch_to_tsquery('simple', 'or OR or'); + websearch_to_tsquery +---------------------- + 'or' | 'or' +(1 row) + +-- OR is an operator here ... +select websearch_to_tsquery('simple', '"fat cat"or"fat rat"'); + websearch_to_tsquery +----------------------------------- + 'fat' <-> 'cat' | 'fat' <-> 'rat' +(1 row) + +select websearch_to_tsquery('simple', 'fat or(rat'); + websearch_to_tsquery +---------------------- + 'fat' | 'rat' +(1 row) + +select websearch_to_tsquery('simple', 'fat or)rat'); + websearch_to_tsquery +---------------------- + 'fat' | 'rat' +(1 row) + +select websearch_to_tsquery('simple', 'fat or&rat'); + websearch_to_tsquery +---------------------- + 'fat' | 'rat' +(1 row) + +select websearch_to_tsquery('simple', 'fat or|rat'); + websearch_to_tsquery +---------------------- + 'fat' | 'rat' +(1 row) + +select websearch_to_tsquery('simple', 'fat or!rat'); + websearch_to_tsquery +---------------------- + 'fat' | 'rat' +(1 row) + +select websearch_to_tsquery('simple', 'fat orrat'); + websearch_to_tsquery +---------------------- + 'fat' | 'rat' +(1 row) + +select websearch_to_tsquery('simple', 'fat or '); + websearch_to_tsquery +---------------------- + 'fat' & 'or' +(1 row) + +-- ... but not here +select websearch_to_tsquery('simple', 'abc orange'); + websearch_to_tsquery +---------------------- + 'abc' & 'orange' +(1 row) + +select websearch_to_tsquery('simple', 'abc OR1234'); + websearch_to_tsquery +---------------------- + 'abc' & 'or1234' +(1 row) + +select websearch_to_tsquery('simple', 'abc or-abc'); + websearch_to_tsquery +------------------------------------- + 'abc' & 'or-abc' <-> 'or' <-> 'abc' +(1 row) + +select websearch_to_tsquery('simple', 'abc OR_abc'); + websearch_to_tsquery +------------------------ + 'abc' & 'or' <-> 'abc' +(1 row) + +-- test quotes +select websearch_to_tsquery('english', '"pg_class pg'); + websearch_to_tsquery +--------------------------- + 'pg' <-> 'class' <-> 'pg' +(1 row) + +select websearch_to_tsquery('english', 'pg_class pg"'); + websearch_to_tsquery +------------------------- + 'pg' <-> 'class' & 'pg' +(1 row) + +select websearch_to_tsquery('english', '"pg_class pg"'); + websearch_to_tsquery +--------------------------- + 'pg' <-> 'class' <-> 'pg' +(1 row) + +select websearch_to_tsquery('english', '"pg_class : pg"'); + websearch_to_tsquery +--------------------------- + 'pg' <-> 'class' <-> 'pg' +(1 row) + +select websearch_to_tsquery('english', 'abc "pg_class pg"'); + websearch_to_tsquery +----------------------------------- + 'abc' & 'pg' <-> 'class' <-> 'pg' +(1 row) + +select websearch_to_tsquery('english', '"pg_class pg" def'); + websearch_to_tsquery +----------------------------------- + 'pg' <-> 'class' <-> 'pg' & 'def' +(1 row) + +select websearch_to_tsquery('english', 'abc "pg pg_class pg" def'); + websearch_to_tsquery +---------------------------------------------------- + 'abc' & 'pg' <-> 'pg' <-> 'class' <-> 'pg' & 'def' +(1 row) + +select websearch_to_tsquery('english', ' or "pg pg_class pg" or '); + websearch_to_tsquery +------------------------------------ + 'pg' <-> 'pg' <-> 'class' <-> 'pg' +(1 row) + +select websearch_to_tsquery('english', '""pg pg_class pg""'); + websearch_to_tsquery +-------------------------------- + 'pg' & 'pg' <-> 'class' & 'pg' +(1 row) + +select websearch_to_tsquery('english', 'abc """"" def'); + websearch_to_tsquery +---------------------- + 'abc' & 'def' +(1 row) + +select websearch_to_tsquery('english', 'cat -"fat rat"'); + websearch_to_tsquery +------------------------------ + 'cat' & !( 'fat' <-> 'rat' ) +(1 row) + +select websearch_to_tsquery('english', 'cat -"fat rat" cheese'); + websearch_to_tsquery +---------------------------------------- + 'cat' & !( 'fat' <-> 'rat' ) & 'chees' +(1 row) + +select websearch_to_tsquery('english', 'abc "def -"'); + websearch_to_tsquery +---------------------- + 'abc' & 'def' +(1 row) + +select websearch_to_tsquery('english', 'abc "def :"'); + websearch_to_tsquery +---------------------- + 'abc' & 'def' +(1 row) + +select websearch_to_tsquery('english', '"A fat cat" has just eaten a -rat.'); + websearch_to_tsquery +------------------------------------ + 'fat' <-> 'cat' & 'eaten' & !'rat' +(1 row) + +select websearch_to_tsquery('english', '"A fat cat" has just eaten OR !rat.'); + websearch_to_tsquery +----------------------------------- + 'fat' <-> 'cat' & 'eaten' | 'rat' +(1 row) + +select websearch_to_tsquery('english', '"A fat cat" has just (+eaten OR -rat)'); + websearch_to_tsquery +------------------------------------ + 'fat' <-> 'cat' & 'eaten' | !'rat' +(1 row) + +select websearch_to_tsquery('english', 'this is ----fine'); + websearch_to_tsquery +---------------------- + !!!!'fine' +(1 row) + +select websearch_to_tsquery('english', '(()) )))) this ||| is && -fine, "dear friend" OR good'); + websearch_to_tsquery +---------------------------------------- + !'fine' & 'dear' <-> 'friend' | 'good' +(1 row) + +select websearch_to_tsquery('english', 'an old <-> cat " is fine &&& too'); + websearch_to_tsquery +------------------------ + 'old' & 'cat' & 'fine' +(1 row) + +select websearch_to_tsquery('english', '"A the" OR just on'); +NOTICE: text-search query contains only stop words or doesn't contain lexemes, ignored + websearch_to_tsquery +---------------------- + +(1 row) + +select websearch_to_tsquery('english', '"a fat cat" ate a rat'); + websearch_to_tsquery +--------------------------------- + 'fat' <-> 'cat' & 'ate' & 'rat' +(1 row) + +select to_tsvector('english', 'A fat cat ate a rat') @@ + websearch_to_tsquery('english', '"a fat cat" ate a rat'); + ?column? +---------- + t +(1 row) + +select to_tsvector('english', 'A fat grey cat ate a rat') @@ + websearch_to_tsquery('english', '"a fat cat" ate a rat'); + ?column? +---------- + f +(1 row) + +-- cases handled by gettoken_tsvector() +select websearch_to_tsquery(''''); +NOTICE: text-search query contains only stop words or doesn't contain lexemes, ignored + websearch_to_tsquery +---------------------- + +(1 row) + +select websearch_to_tsquery('''abc''''def'''); + websearch_to_tsquery +---------------------- + 'abc' <-> 'def' +(1 row) + +select websearch_to_tsquery('\abc'); + websearch_to_tsquery +---------------------- + 'abc' +(1 row) + +select websearch_to_tsquery('\'); +NOTICE: text-search query contains only stop words or doesn't contain lexemes, ignored + websearch_to_tsquery +---------------------- + +(1 row) + diff --git a/src/test/regress/expected/tsrf.out b/src/test/regress/expected/tsrf.out new file mode 100644 index 0000000..d47b5f6 --- /dev/null +++ b/src/test/regress/expected/tsrf.out @@ -0,0 +1,712 @@ +-- +-- tsrf - targetlist set returning function tests +-- +-- simple srf +SELECT generate_series(1, 3); + generate_series +----------------- + 1 + 2 + 3 +(3 rows) + +-- parallel iteration +SELECT generate_series(1, 3), generate_series(3,5); + generate_series | generate_series +-----------------+----------------- + 1 | 3 + 2 | 4 + 3 | 5 +(3 rows) + +-- parallel iteration, different number of rows +SELECT generate_series(1, 2), generate_series(1,4); + generate_series | generate_series +-----------------+----------------- + 1 | 1 + 2 | 2 + | 3 + | 4 +(4 rows) + +-- srf, with SRF argument +SELECT generate_series(1, generate_series(1, 3)); + generate_series +----------------- + 1 + 1 + 2 + 1 + 2 + 3 +(6 rows) + +-- but we've traditionally rejected the same in FROM +SELECT * FROM generate_series(1, generate_series(1, 3)); +ERROR: set-returning functions must appear at top level of FROM +LINE 1: SELECT * FROM generate_series(1, generate_series(1, 3)); + ^ +-- srf, with two SRF arguments +SELECT generate_series(generate_series(1,3), generate_series(2, 4)); + generate_series +----------------- + 1 + 2 + 2 + 3 + 3 + 4 +(6 rows) + +-- check proper nesting of SRFs in different expressions +explain (verbose, costs off) +SELECT generate_series(1, generate_series(1, 3)), generate_series(2, 4); + QUERY PLAN +-------------------------------------------------------------------------------- + ProjectSet + Output: generate_series(1, (generate_series(1, 3))), (generate_series(2, 4)) + -> ProjectSet + Output: generate_series(1, 3), generate_series(2, 4) + -> Result +(5 rows) + +SELECT generate_series(1, generate_series(1, 3)), generate_series(2, 4); + generate_series | generate_series +-----------------+----------------- + 1 | 2 + 1 | 3 + 2 | 3 + 1 | 4 + 2 | 4 + 3 | 4 +(6 rows) + +CREATE TABLE few(id int, dataa text, datab text); +INSERT INTO few VALUES(1, 'a', 'foo'),(2, 'a', 'bar'),(3, 'b', 'bar'); +-- SRF with a provably-dummy relation +explain (verbose, costs off) +SELECT unnest(ARRAY[1, 2]) FROM few WHERE false; + QUERY PLAN +-------------------------------------- + ProjectSet + Output: unnest('{1,2}'::integer[]) + -> Result + One-Time Filter: false +(4 rows) + +SELECT unnest(ARRAY[1, 2]) FROM few WHERE false; + unnest +-------- +(0 rows) + +-- SRF shouldn't prevent upper query from recognizing lower as dummy +explain (verbose, costs off) +SELECT * FROM few f1, + (SELECT unnest(ARRAY[1,2]) FROM few f2 WHERE false OFFSET 0) ss; + QUERY PLAN +------------------------------------------------ + Result + Output: f1.id, f1.dataa, f1.datab, ss.unnest + One-Time Filter: false +(3 rows) + +SELECT * FROM few f1, + (SELECT unnest(ARRAY[1,2]) FROM few f2 WHERE false OFFSET 0) ss; + id | dataa | datab | unnest +----+-------+-------+-------- +(0 rows) + +-- SRF output order of sorting is maintained, if SRF is not referenced +SELECT few.id, generate_series(1,3) g FROM few ORDER BY id DESC; + id | g +----+--- + 3 | 1 + 3 | 2 + 3 | 3 + 2 | 1 + 2 | 2 + 2 | 3 + 1 | 1 + 1 | 2 + 1 | 3 +(9 rows) + +-- but SRFs can be referenced in sort +SELECT few.id, generate_series(1,3) g FROM few ORDER BY id, g DESC; + id | g +----+--- + 1 | 3 + 1 | 2 + 1 | 1 + 2 | 3 + 2 | 2 + 2 | 1 + 3 | 3 + 3 | 2 + 3 | 1 +(9 rows) + +SELECT few.id, generate_series(1,3) g FROM few ORDER BY id, generate_series(1,3) DESC; + id | g +----+--- + 1 | 3 + 1 | 2 + 1 | 1 + 2 | 3 + 2 | 2 + 2 | 1 + 3 | 3 + 3 | 2 + 3 | 1 +(9 rows) + +-- it's weird to have ORDER BYs that increase the number of results +SELECT few.id FROM few ORDER BY id, generate_series(1,3) DESC; + id +---- + 1 + 1 + 1 + 2 + 2 + 2 + 3 + 3 + 3 +(9 rows) + +-- SRFs are computed after aggregation +SET enable_hashagg TO 0; -- stable output order +SELECT few.dataa, count(*), min(id), max(id), unnest('{1,1,3}'::int[]) FROM few WHERE few.id = 1 GROUP BY few.dataa; + dataa | count | min | max | unnest +-------+-------+-----+-----+-------- + a | 1 | 1 | 1 | 1 + a | 1 | 1 | 1 | 1 + a | 1 | 1 | 1 | 3 +(3 rows) + +-- unless referenced in GROUP BY clause +SELECT few.dataa, count(*), min(id), max(id), unnest('{1,1,3}'::int[]) FROM few WHERE few.id = 1 GROUP BY few.dataa, unnest('{1,1,3}'::int[]); + dataa | count | min | max | unnest +-------+-------+-----+-----+-------- + a | 2 | 1 | 1 | 1 + a | 1 | 1 | 1 | 3 +(2 rows) + +SELECT few.dataa, count(*), min(id), max(id), unnest('{1,1,3}'::int[]) FROM few WHERE few.id = 1 GROUP BY few.dataa, 5; + dataa | count | min | max | unnest +-------+-------+-----+-----+-------- + a | 2 | 1 | 1 | 1 + a | 1 | 1 | 1 | 3 +(2 rows) + +RESET enable_hashagg; +-- check HAVING works when GROUP BY does [not] reference SRF output +SELECT dataa, generate_series(1,1), count(*) FROM few GROUP BY 1 HAVING count(*) > 1; + dataa | generate_series | count +-------+-----------------+------- + a | 1 | 2 +(1 row) + +SELECT dataa, generate_series(1,1), count(*) FROM few GROUP BY 1, 2 HAVING count(*) > 1; + dataa | generate_series | count +-------+-----------------+------- + a | 1 | 2 +(1 row) + +-- it's weird to have GROUP BYs that increase the number of results +SELECT few.dataa, count(*) FROM few WHERE dataa = 'a' GROUP BY few.dataa ORDER BY 2; + dataa | count +-------+------- + a | 2 +(1 row) + +SELECT few.dataa, count(*) FROM few WHERE dataa = 'a' GROUP BY few.dataa, unnest('{1,1,3}'::int[]) ORDER BY 2; + dataa | count +-------+------- + a | 2 + a | 4 +(2 rows) + +-- SRFs are not allowed if they'd need to be conditionally executed +SELECT q1, case when q1 > 0 then generate_series(1,3) else 0 end FROM int8_tbl; +ERROR: set-returning functions are not allowed in CASE +LINE 1: SELECT q1, case when q1 > 0 then generate_series(1,3) else 0... + ^ +HINT: You might be able to move the set-returning function into a LATERAL FROM item. +SELECT q1, coalesce(generate_series(1,3), 0) FROM int8_tbl; +ERROR: set-returning functions are not allowed in COALESCE +LINE 1: SELECT q1, coalesce(generate_series(1,3), 0) FROM int8_tbl; + ^ +HINT: You might be able to move the set-returning function into a LATERAL FROM item. +-- SRFs are not allowed in aggregate arguments +SELECT min(generate_series(1, 3)) FROM few; +ERROR: aggregate function calls cannot contain set-returning function calls +LINE 1: SELECT min(generate_series(1, 3)) FROM few; + ^ +HINT: You might be able to move the set-returning function into a LATERAL FROM item. +-- ... unless they're within a sub-select +SELECT sum((3 = ANY(SELECT generate_series(1,4)))::int); + sum +----- + 1 +(1 row) + +SELECT sum((3 = ANY(SELECT lag(x) over(order by x) + FROM generate_series(1,4) x))::int); + sum +----- + 1 +(1 row) + +-- SRFs are not allowed in window function arguments, either +SELECT min(generate_series(1, 3)) OVER() FROM few; +ERROR: window function calls cannot contain set-returning function calls +LINE 1: SELECT min(generate_series(1, 3)) OVER() FROM few; + ^ +HINT: You might be able to move the set-returning function into a LATERAL FROM item. +-- SRFs are normally computed after window functions +SELECT id,lag(id) OVER(), count(*) OVER(), generate_series(1,3) FROM few; + id | lag | count | generate_series +----+-----+-------+----------------- + 1 | | 3 | 1 + 1 | | 3 | 2 + 1 | | 3 | 3 + 2 | 1 | 3 | 1 + 2 | 1 | 3 | 2 + 2 | 1 | 3 | 3 + 3 | 2 | 3 | 1 + 3 | 2 | 3 | 2 + 3 | 2 | 3 | 3 +(9 rows) + +-- unless referencing SRFs +SELECT SUM(count(*)) OVER(PARTITION BY generate_series(1,3) ORDER BY generate_series(1,3)), generate_series(1,3) g FROM few GROUP BY g; + sum | g +-----+--- + 3 | 1 + 3 | 2 + 3 | 3 +(3 rows) + +-- sorting + grouping +SELECT few.dataa, count(*), min(id), max(id), generate_series(1,3) FROM few GROUP BY few.dataa ORDER BY 5, 1; + dataa | count | min | max | generate_series +-------+-------+-----+-----+----------------- + a | 2 | 1 | 2 | 1 + b | 1 | 3 | 3 | 1 + a | 2 | 1 | 2 | 2 + b | 1 | 3 | 3 | 2 + a | 2 | 1 | 2 | 3 + b | 1 | 3 | 3 | 3 +(6 rows) + +-- grouping sets are a bit special, they produce NULLs in columns not actually NULL +set enable_hashagg = false; +SELECT dataa, datab b, generate_series(1,2) g, count(*) FROM few GROUP BY CUBE(dataa, datab); + dataa | b | g | count +-------+-----+---+------- + a | bar | 1 | 1 + a | bar | 2 | 1 + a | foo | 1 | 1 + a | foo | 2 | 1 + a | | 1 | 2 + a | | 2 | 2 + b | bar | 1 | 1 + b | bar | 2 | 1 + b | | 1 | 1 + b | | 2 | 1 + | | 1 | 3 + | | 2 | 3 + | bar | 1 | 2 + | bar | 2 | 2 + | foo | 1 | 1 + | foo | 2 | 1 +(16 rows) + +SELECT dataa, datab b, generate_series(1,2) g, count(*) FROM few GROUP BY CUBE(dataa, datab) ORDER BY dataa; + dataa | b | g | count +-------+-----+---+------- + a | bar | 1 | 1 + a | bar | 2 | 1 + a | foo | 1 | 1 + a | foo | 2 | 1 + a | | 1 | 2 + a | | 2 | 2 + b | bar | 1 | 1 + b | bar | 2 | 1 + b | | 1 | 1 + b | | 2 | 1 + | | 1 | 3 + | | 2 | 3 + | bar | 1 | 2 + | bar | 2 | 2 + | foo | 1 | 1 + | foo | 2 | 1 +(16 rows) + +SELECT dataa, datab b, generate_series(1,2) g, count(*) FROM few GROUP BY CUBE(dataa, datab) ORDER BY g; + dataa | b | g | count +-------+-----+---+------- + a | bar | 1 | 1 + a | foo | 1 | 1 + a | | 1 | 2 + b | bar | 1 | 1 + b | | 1 | 1 + | | 1 | 3 + | bar | 1 | 2 + | foo | 1 | 1 + | foo | 2 | 1 + a | bar | 2 | 1 + b | | 2 | 1 + a | foo | 2 | 1 + | bar | 2 | 2 + a | | 2 | 2 + | | 2 | 3 + b | bar | 2 | 1 +(16 rows) + +SELECT dataa, datab b, generate_series(1,2) g, count(*) FROM few GROUP BY CUBE(dataa, datab, g); + dataa | b | g | count +-------+-----+---+------- + a | bar | 1 | 1 + a | bar | 2 | 1 + a | bar | | 2 + a | foo | 1 | 1 + a | foo | 2 | 1 + a | foo | | 2 + a | | | 4 + b | bar | 1 | 1 + b | bar | 2 | 1 + b | bar | | 2 + b | | | 2 + | | | 6 + | bar | 1 | 2 + | bar | 2 | 2 + | bar | | 4 + | foo | 1 | 1 + | foo | 2 | 1 + | foo | | 2 + a | | 1 | 2 + b | | 1 | 1 + | | 1 | 3 + a | | 2 | 2 + b | | 2 | 1 + | | 2 | 3 +(24 rows) + +SELECT dataa, datab b, generate_series(1,2) g, count(*) FROM few GROUP BY CUBE(dataa, datab, g) ORDER BY dataa; + dataa | b | g | count +-------+-----+---+------- + a | foo | | 2 + a | | | 4 + a | | 2 | 2 + a | bar | 1 | 1 + a | bar | 2 | 1 + a | bar | | 2 + a | foo | 1 | 1 + a | foo | 2 | 1 + a | | 1 | 2 + b | bar | 1 | 1 + b | | | 2 + b | | 1 | 1 + b | bar | 2 | 1 + b | bar | | 2 + b | | 2 | 1 + | | 2 | 3 + | | | 6 + | bar | 1 | 2 + | bar | 2 | 2 + | bar | | 4 + | foo | 1 | 1 + | foo | 2 | 1 + | foo | | 2 + | | 1 | 3 +(24 rows) + +SELECT dataa, datab b, generate_series(1,2) g, count(*) FROM few GROUP BY CUBE(dataa, datab, g) ORDER BY g; + dataa | b | g | count +-------+-----+---+------- + a | bar | 1 | 1 + a | foo | 1 | 1 + b | bar | 1 | 1 + | bar | 1 | 2 + | foo | 1 | 1 + a | | 1 | 2 + b | | 1 | 1 + | | 1 | 3 + a | | 2 | 2 + b | | 2 | 1 + | bar | 2 | 2 + | | 2 | 3 + | foo | 2 | 1 + a | bar | 2 | 1 + a | foo | 2 | 1 + b | bar | 2 | 1 + a | | | 4 + b | bar | | 2 + b | | | 2 + | | | 6 + a | foo | | 2 + a | bar | | 2 + | bar | | 4 + | foo | | 2 +(24 rows) + +reset enable_hashagg; +-- case with degenerate ORDER BY +explain (verbose, costs off) +select 'foo' as f, generate_series(1,2) as g from few order by 1; + QUERY PLAN +---------------------------------------------- + ProjectSet + Output: 'foo'::text, generate_series(1, 2) + -> Seq Scan on public.few + Output: id, dataa, datab +(4 rows) + +select 'foo' as f, generate_series(1,2) as g from few order by 1; + f | g +-----+--- + foo | 1 + foo | 2 + foo | 1 + foo | 2 + foo | 1 + foo | 2 +(6 rows) + +-- data modification +CREATE TABLE fewmore AS SELECT generate_series(1,3) AS data; +INSERT INTO fewmore VALUES(generate_series(4,5)); +SELECT * FROM fewmore; + data +------ + 1 + 2 + 3 + 4 + 5 +(5 rows) + +-- SRFs are not allowed in UPDATE (they once were, but it was nonsense) +UPDATE fewmore SET data = generate_series(4,9); +ERROR: set-returning functions are not allowed in UPDATE +LINE 1: UPDATE fewmore SET data = generate_series(4,9); + ^ +-- SRFs are not allowed in RETURNING +INSERT INTO fewmore VALUES(1) RETURNING generate_series(1,3); +ERROR: set-returning functions are not allowed in RETURNING +LINE 1: INSERT INTO fewmore VALUES(1) RETURNING generate_series(1,3)... + ^ +-- nor standalone VALUES (but surely this is a bug?) +VALUES(1, generate_series(1,2)); +ERROR: set-returning functions are not allowed in VALUES +LINE 1: VALUES(1, generate_series(1,2)); + ^ +-- We allow tSRFs that are not at top level +SELECT int4mul(generate_series(1,2), 10); + int4mul +--------- + 10 + 20 +(2 rows) + +SELECT generate_series(1,3) IS DISTINCT FROM 2; + ?column? +---------- + t + f + t +(3 rows) + +-- but SRFs in function RTEs must be at top level (annoying restriction) +SELECT * FROM int4mul(generate_series(1,2), 10); +ERROR: set-returning functions must appear at top level of FROM +LINE 1: SELECT * FROM int4mul(generate_series(1,2), 10); + ^ +-- DISTINCT ON is evaluated before tSRF evaluation if SRF is not +-- referenced either in ORDER BY or in the DISTINCT ON list. The ORDER +-- BY reference can be implicitly generated, if there's no other ORDER BY. +-- implicit reference (via implicit ORDER) to all columns +SELECT DISTINCT ON (a) a, b, generate_series(1,3) g +FROM (VALUES (3, 2), (3,1), (1,1), (1,4), (5,3), (5,1)) AS t(a, b); + a | b | g +---+---+--- + 1 | 1 | 1 + 3 | 2 | 1 + 5 | 3 | 1 +(3 rows) + +-- unreferenced in DISTINCT ON or ORDER BY +SELECT DISTINCT ON (a) a, b, generate_series(1,3) g +FROM (VALUES (3, 2), (3,1), (1,1), (1,4), (5,3), (5,1)) AS t(a, b) +ORDER BY a, b DESC; + a | b | g +---+---+--- + 1 | 4 | 1 + 1 | 4 | 2 + 1 | 4 | 3 + 3 | 2 | 1 + 3 | 2 | 2 + 3 | 2 | 3 + 5 | 3 | 1 + 5 | 3 | 2 + 5 | 3 | 3 +(9 rows) + +-- referenced in ORDER BY +SELECT DISTINCT ON (a) a, b, generate_series(1,3) g +FROM (VALUES (3, 2), (3,1), (1,1), (1,4), (5,3), (5,1)) AS t(a, b) +ORDER BY a, b DESC, g DESC; + a | b | g +---+---+--- + 1 | 4 | 3 + 3 | 2 | 3 + 5 | 3 | 3 +(3 rows) + +-- referenced in ORDER BY and DISTINCT ON +SELECT DISTINCT ON (a, b, g) a, b, generate_series(1,3) g +FROM (VALUES (3, 2), (3,1), (1,1), (1,4), (5,3), (5,1)) AS t(a, b) +ORDER BY a, b DESC, g DESC; + a | b | g +---+---+--- + 1 | 4 | 3 + 1 | 4 | 2 + 1 | 4 | 1 + 1 | 1 | 3 + 1 | 1 | 2 + 1 | 1 | 1 + 3 | 2 | 3 + 3 | 2 | 2 + 3 | 2 | 1 + 3 | 1 | 3 + 3 | 1 | 2 + 3 | 1 | 1 + 5 | 3 | 3 + 5 | 3 | 2 + 5 | 3 | 1 + 5 | 1 | 3 + 5 | 1 | 2 + 5 | 1 | 1 +(18 rows) + +-- only SRF mentioned in DISTINCT ON +SELECT DISTINCT ON (g) a, b, generate_series(1,3) g +FROM (VALUES (3, 2), (3,1), (1,1), (1,4), (5,3), (5,1)) AS t(a, b); + a | b | g +---+---+--- + 3 | 2 | 1 + 5 | 1 | 2 + 3 | 1 | 3 +(3 rows) + +-- LIMIT / OFFSET is evaluated after SRF evaluation +SELECT a, generate_series(1,2) FROM (VALUES(1),(2),(3)) r(a) LIMIT 2 OFFSET 2; + a | generate_series +---+----------------- + 2 | 1 + 2 | 2 +(2 rows) + +-- SRFs are not allowed in LIMIT. +SELECT 1 LIMIT generate_series(1,3); +ERROR: set-returning functions are not allowed in LIMIT +LINE 1: SELECT 1 LIMIT generate_series(1,3); + ^ +-- tSRF in correlated subquery, referencing table outside +SELECT (SELECT generate_series(1,3) LIMIT 1 OFFSET few.id) FROM few; + generate_series +----------------- + 2 + 3 + +(3 rows) + +-- tSRF in correlated subquery, referencing SRF outside +SELECT (SELECT generate_series(1,3) LIMIT 1 OFFSET g.i) FROM generate_series(0,3) g(i); + generate_series +----------------- + 1 + 2 + 3 + +(4 rows) + +-- Operators can return sets too +CREATE OPERATOR |@| (PROCEDURE = unnest, RIGHTARG = ANYARRAY); +SELECT |@|ARRAY[1,2,3]; + ?column? +---------- + 1 + 2 + 3 +(3 rows) + +-- Some fun cases involving duplicate SRF calls +explain (verbose, costs off) +select generate_series(1,3) as x, generate_series(1,3) + 1 as xp1; + QUERY PLAN +------------------------------------------------------------------ + Result + Output: (generate_series(1, 3)), ((generate_series(1, 3)) + 1) + -> ProjectSet + Output: generate_series(1, 3) + -> Result +(5 rows) + +select generate_series(1,3) as x, generate_series(1,3) + 1 as xp1; + x | xp1 +---+----- + 1 | 2 + 2 | 3 + 3 | 4 +(3 rows) + +explain (verbose, costs off) +select generate_series(1,3)+1 order by generate_series(1,3); + QUERY PLAN +------------------------------------------------------------------------ + Sort + Output: (((generate_series(1, 3)) + 1)), (generate_series(1, 3)) + Sort Key: (generate_series(1, 3)) + -> Result + Output: ((generate_series(1, 3)) + 1), (generate_series(1, 3)) + -> ProjectSet + Output: generate_series(1, 3) + -> Result +(8 rows) + +select generate_series(1,3)+1 order by generate_series(1,3); + ?column? +---------- + 2 + 3 + 4 +(3 rows) + +-- Check that SRFs of same nesting level run in lockstep +explain (verbose, costs off) +select generate_series(1,3) as x, generate_series(3,6) + 1 as y; + QUERY PLAN +------------------------------------------------------------------ + Result + Output: (generate_series(1, 3)), ((generate_series(3, 6)) + 1) + -> ProjectSet + Output: generate_series(1, 3), generate_series(3, 6) + -> Result +(5 rows) + +select generate_series(1,3) as x, generate_series(3,6) + 1 as y; + x | y +---+--- + 1 | 4 + 2 | 5 + 3 | 6 + | 7 +(4 rows) + +-- Clean up +DROP TABLE few; +DROP TABLE fewmore; diff --git a/src/test/regress/expected/tstypes.out b/src/test/regress/expected/tstypes.out new file mode 100644 index 0000000..4cfc3b9 --- /dev/null +++ b/src/test/regress/expected/tstypes.out @@ -0,0 +1,1444 @@ +-- deal with numeric instability of ts_rank +SET extra_float_digits = 0; +--Base tsvector test +SELECT '1'::tsvector; + tsvector +---------- + '1' +(1 row) + +SELECT '1 '::tsvector; + tsvector +---------- + '1' +(1 row) + +SELECT ' 1'::tsvector; + tsvector +---------- + '1' +(1 row) + +SELECT ' 1 '::tsvector; + tsvector +---------- + '1' +(1 row) + +SELECT '1 2'::tsvector; + tsvector +---------- + '1' '2' +(1 row) + +SELECT '''1 2'''::tsvector; + tsvector +---------- + '1 2' +(1 row) + +SELECT E'''1 \\''2'''::tsvector; + tsvector +---------- + '1 ''2' +(1 row) + +SELECT E'''1 \\''2''3'::tsvector; + tsvector +------------- + '1 ''2' '3' +(1 row) + +SELECT E'''1 \\''2'' 3'::tsvector; + tsvector +------------- + '1 ''2' '3' +(1 row) + +SELECT E'''1 \\''2'' '' 3'' 4 '::tsvector; + tsvector +------------------ + ' 3' '1 ''2' '4' +(1 row) + +SELECT $$'\\as' ab\c ab\\c AB\\\c ab\\\\c$$::tsvector; + tsvector +---------------------------------------- + 'AB\\c' '\\as' 'ab\\\\c' 'ab\\c' 'abc' +(1 row) + +SELECT tsvectorin(tsvectorout($$'\\as' ab\c ab\\c AB\\\c ab\\\\c$$::tsvector)); + tsvectorin +---------------------------------------- + 'AB\\c' '\\as' 'ab\\\\c' 'ab\\c' 'abc' +(1 row) + +SELECT '''w'':4A,3B,2C,1D,5 a:8'; + ?column? +----------------------- + 'w':4A,3B,2C,1D,5 a:8 +(1 row) + +SELECT 'a:3A b:2a'::tsvector || 'ba:1234 a:1B'; + ?column? +---------------------------- + 'a':3A,4B 'b':2A 'ba':1237 +(1 row) + +SELECT $$'' '1' '2'$$::tsvector; -- error, empty lexeme is not allowed +ERROR: syntax error in tsvector: "'' '1' '2'" +LINE 1: SELECT $$'' '1' '2'$$::tsvector; + ^ +-- Also try it with non-error-throwing API +SELECT pg_input_is_valid('foo', 'tsvector'); + pg_input_is_valid +------------------- + t +(1 row) + +SELECT pg_input_is_valid($$''$$, 'tsvector'); + pg_input_is_valid +------------------- + f +(1 row) + +SELECT * FROM pg_input_error_info($$''$$, 'tsvector'); + message | detail | hint | sql_error_code +--------------------------------+--------+------+---------------- + syntax error in tsvector: "''" | | | 42601 +(1 row) + +--Base tsquery test +SELECT '1'::tsquery; + tsquery +--------- + '1' +(1 row) + +SELECT '1 '::tsquery; + tsquery +--------- + '1' +(1 row) + +SELECT ' 1'::tsquery; + tsquery +--------- + '1' +(1 row) + +SELECT ' 1 '::tsquery; + tsquery +--------- + '1' +(1 row) + +SELECT '''1 2'''::tsquery; + tsquery +--------- + '1 2' +(1 row) + +SELECT E'''1 \\''2'''::tsquery; + tsquery +--------- + '1 ''2' +(1 row) + +SELECT '!1'::tsquery; + tsquery +--------- + !'1' +(1 row) + +SELECT '1|2'::tsquery; + tsquery +----------- + '1' | '2' +(1 row) + +SELECT '1|!2'::tsquery; + tsquery +------------ + '1' | !'2' +(1 row) + +SELECT '!1|2'::tsquery; + tsquery +------------ + !'1' | '2' +(1 row) + +SELECT '!1|!2'::tsquery; + tsquery +------------- + !'1' | !'2' +(1 row) + +SELECT '!(!1|!2)'::tsquery; + tsquery +------------------ + !( !'1' | !'2' ) +(1 row) + +SELECT '!(!1|2)'::tsquery; + tsquery +----------------- + !( !'1' | '2' ) +(1 row) + +SELECT '!(1|!2)'::tsquery; + tsquery +----------------- + !( '1' | !'2' ) +(1 row) + +SELECT '!(1|2)'::tsquery; + tsquery +---------------- + !( '1' | '2' ) +(1 row) + +SELECT '1&2'::tsquery; + tsquery +----------- + '1' & '2' +(1 row) + +SELECT '!1&2'::tsquery; + tsquery +------------ + !'1' & '2' +(1 row) + +SELECT '1&!2'::tsquery; + tsquery +------------ + '1' & !'2' +(1 row) + +SELECT '!1&!2'::tsquery; + tsquery +------------- + !'1' & !'2' +(1 row) + +SELECT '(1&2)'::tsquery; + tsquery +----------- + '1' & '2' +(1 row) + +SELECT '1&(2)'::tsquery; + tsquery +----------- + '1' & '2' +(1 row) + +SELECT '!(1)&2'::tsquery; + tsquery +------------ + !'1' & '2' +(1 row) + +SELECT '!(1&2)'::tsquery; + tsquery +---------------- + !( '1' & '2' ) +(1 row) + +SELECT '1|2&3'::tsquery; + tsquery +----------------- + '1' | '2' & '3' +(1 row) + +SELECT '1|(2&3)'::tsquery; + tsquery +----------------- + '1' | '2' & '3' +(1 row) + +SELECT '(1|2)&3'::tsquery; + tsquery +--------------------- + ( '1' | '2' ) & '3' +(1 row) + +SELECT '1|2&!3'::tsquery; + tsquery +------------------ + '1' | '2' & !'3' +(1 row) + +SELECT '1|!2&3'::tsquery; + tsquery +------------------ + '1' | !'2' & '3' +(1 row) + +SELECT '!1|2&3'::tsquery; + tsquery +------------------ + !'1' | '2' & '3' +(1 row) + +SELECT '!1|(2&3)'::tsquery; + tsquery +------------------ + !'1' | '2' & '3' +(1 row) + +SELECT '!(1|2)&3'::tsquery; + tsquery +---------------------- + !( '1' | '2' ) & '3' +(1 row) + +SELECT '(!1|2)&3'::tsquery; + tsquery +---------------------- + ( !'1' | '2' ) & '3' +(1 row) + +SELECT '1|(2|(4|(5|6)))'::tsquery; + tsquery +----------------------------- + '1' | '2' | '4' | '5' | '6' +(1 row) + +SELECT '1|2|4|5|6'::tsquery; + tsquery +----------------------------- + '1' | '2' | '4' | '5' | '6' +(1 row) + +SELECT '1&(2&(4&(5&6)))'::tsquery; + tsquery +----------------------------- + '1' & '2' & '4' & '5' & '6' +(1 row) + +SELECT '1&2&4&5&6'::tsquery; + tsquery +----------------------------- + '1' & '2' & '4' & '5' & '6' +(1 row) + +SELECT '1&(2&(4&(5|6)))'::tsquery; + tsquery +--------------------------------- + '1' & '2' & '4' & ( '5' | '6' ) +(1 row) + +SELECT '1&(2&(4&(5|!6)))'::tsquery; + tsquery +---------------------------------- + '1' & '2' & '4' & ( '5' | !'6' ) +(1 row) + +SELECT E'1&(''2''&('' 4''&(\\|5 | ''6 \\'' !|&'')))'::tsquery; + tsquery +------------------------------------------ + '1' & '2' & ' 4' & ( '|5' | '6 '' !|&' ) +(1 row) + +SELECT $$'\\as'$$::tsquery; + tsquery +--------- + '\\as' +(1 row) + +SELECT 'a:* & nbb:*ac | doo:a* | goo'::tsquery; + tsquery +-------------------------------------- + 'a':* & 'nbb':*AC | 'doo':*A | 'goo' +(1 row) + +SELECT '!!b'::tsquery; + tsquery +--------- + !!'b' +(1 row) + +SELECT '!!!b'::tsquery; + tsquery +--------- + !!!'b' +(1 row) + +SELECT '!(!b)'::tsquery; + tsquery +--------- + !!'b' +(1 row) + +SELECT 'a & !!b'::tsquery; + tsquery +------------- + 'a' & !!'b' +(1 row) + +SELECT '!!a & b'::tsquery; + tsquery +------------- + !!'a' & 'b' +(1 row) + +SELECT '!!a & !!b'::tsquery; + tsquery +--------------- + !!'a' & !!'b' +(1 row) + +-- Also try it with non-error-throwing API +SELECT pg_input_is_valid('foo', 'tsquery'); + pg_input_is_valid +------------------- + t +(1 row) + +SELECT pg_input_is_valid('foo!', 'tsquery'); + pg_input_is_valid +------------------- + f +(1 row) + +SELECT * FROM pg_input_error_info('foo!', 'tsquery'); + message | detail | hint | sql_error_code +---------------------------------+--------+------+---------------- + syntax error in tsquery: "foo!" | | | 42601 +(1 row) + +SELECT * FROM pg_input_error_info('a <100000> b', 'tsquery'); + message | detail | hint | sql_error_code +---------------------------------------------------------------------------------------+--------+------+---------------- + distance in phrase operator must be an integer value between zero and 16384 inclusive | | | 22023 +(1 row) + +--comparisons +SELECT 'a' < 'b & c'::tsquery as "true"; + true +------ + t +(1 row) + +SELECT 'a' > 'b & c'::tsquery as "false"; + false +------- + f +(1 row) + +SELECT 'a | f' < 'b & c'::tsquery as "false"; + false +------- + t +(1 row) + +SELECT 'a | ff' < 'b & c'::tsquery as "false"; + false +------- + f +(1 row) + +SELECT 'a | f | g' < 'b & c'::tsquery as "false"; + false +------- + f +(1 row) + +--concatenation +SELECT numnode( 'new'::tsquery ); + numnode +--------- + 1 +(1 row) + +SELECT numnode( 'new & york'::tsquery ); + numnode +--------- + 3 +(1 row) + +SELECT numnode( 'new & york | qwery'::tsquery ); + numnode +--------- + 5 +(1 row) + +SELECT 'foo & bar'::tsquery && 'asd'; + ?column? +----------------------- + 'foo' & 'bar' & 'asd' +(1 row) + +SELECT 'foo & bar'::tsquery || 'asd & fg'; + ?column? +------------------------------ + 'foo' & 'bar' | 'asd' & 'fg' +(1 row) + +SELECT 'foo & bar'::tsquery || !!'asd & fg'::tsquery; + ?column? +----------------------------------- + 'foo' & 'bar' | !( 'asd' & 'fg' ) +(1 row) + +SELECT 'foo & bar'::tsquery && 'asd | fg'; + ?column? +---------------------------------- + 'foo' & 'bar' & ( 'asd' | 'fg' ) +(1 row) + +SELECT 'a' <-> 'b & d'::tsquery; + ?column? +----------------------- + 'a' <-> ( 'b' & 'd' ) +(1 row) + +SELECT 'a & g' <-> 'b & d'::tsquery; + ?column? +--------------------------------- + ( 'a' & 'g' ) <-> ( 'b' & 'd' ) +(1 row) + +SELECT 'a & g' <-> 'b | d'::tsquery; + ?column? +--------------------------------- + ( 'a' & 'g' ) <-> ( 'b' | 'd' ) +(1 row) + +SELECT 'a & g' <-> 'b <-> d'::tsquery; + ?column? +----------------------------------- + ( 'a' & 'g' ) <-> ( 'b' <-> 'd' ) +(1 row) + +SELECT tsquery_phrase('a <3> g', 'b & d', 10); + tsquery_phrase +-------------------------------- + 'a' <3> 'g' <10> ( 'b' & 'd' ) +(1 row) + +-- tsvector-tsquery operations +SELECT 'a b:89 ca:23A,64b d:34c'::tsvector @@ 'd:AC & ca' as "true"; + true +------ + t +(1 row) + +SELECT 'a b:89 ca:23A,64b d:34c'::tsvector @@ 'd:AC & ca:B' as "true"; + true +------ + t +(1 row) + +SELECT 'a b:89 ca:23A,64b d:34c'::tsvector @@ 'd:AC & ca:A' as "true"; + true +------ + t +(1 row) + +SELECT 'a b:89 ca:23A,64b d:34c'::tsvector @@ 'd:AC & ca:C' as "false"; + false +------- + f +(1 row) + +SELECT 'a b:89 ca:23A,64b d:34c'::tsvector @@ 'd:AC & ca:CB' as "true"; + true +------ + t +(1 row) + +SELECT 'a b:89 ca:23A,64b d:34c'::tsvector @@ 'd:AC & c:*C' as "false"; + false +------- + f +(1 row) + +SELECT 'a b:89 ca:23A,64b d:34c'::tsvector @@ 'd:AC & c:*CB' as "true"; + true +------ + t +(1 row) + +SELECT 'a b:89 ca:23A,64b cb:80c d:34c'::tsvector @@ 'd:AC & c:*C' as "true"; + true +------ + t +(1 row) + +SELECT 'a b:89 ca:23A,64c cb:80b d:34c'::tsvector @@ 'd:AC & c:*C' as "true"; + true +------ + t +(1 row) + +SELECT 'a b:89 ca:23A,64c cb:80b d:34c'::tsvector @@ 'd:AC & c:*B' as "true"; + true +------ + t +(1 row) + +SELECT 'wa:1D wb:2A'::tsvector @@ 'w:*D & w:*A'::tsquery as "true"; + true +------ + t +(1 row) + +SELECT 'wa:1D wb:2A'::tsvector @@ 'w:*D <-> w:*A'::tsquery as "true"; + true +------ + t +(1 row) + +SELECT 'wa:1A wb:2D'::tsvector @@ 'w:*D <-> w:*A'::tsquery as "false"; + false +------- + f +(1 row) + +SELECT 'wa:1A'::tsvector @@ 'w:*A'::tsquery as "true"; + true +------ + t +(1 row) + +SELECT 'wa:1A'::tsvector @@ 'w:*D'::tsquery as "false"; + false +------- + f +(1 row) + +SELECT 'wa:1A'::tsvector @@ '!w:*A'::tsquery as "false"; + false +------- + f +(1 row) + +SELECT 'wa:1A'::tsvector @@ '!w:*D'::tsquery as "true"; + true +------ + t +(1 row) + +-- historically, a stripped tsvector matches queries ignoring weights: +SELECT strip('wa:1A'::tsvector) @@ 'w:*A'::tsquery as "true"; + true +------ + t +(1 row) + +SELECT strip('wa:1A'::tsvector) @@ 'w:*D'::tsquery as "true"; + true +------ + t +(1 row) + +SELECT strip('wa:1A'::tsvector) @@ '!w:*A'::tsquery as "false"; + false +------- + f +(1 row) + +SELECT strip('wa:1A'::tsvector) @@ '!w:*D'::tsquery as "false"; + false +------- + f +(1 row) + +SELECT 'supernova'::tsvector @@ 'super'::tsquery AS "false"; + false +------- + f +(1 row) + +SELECT 'supeanova supernova'::tsvector @@ 'super'::tsquery AS "false"; + false +------- + f +(1 row) + +SELECT 'supeznova supernova'::tsvector @@ 'super'::tsquery AS "false"; + false +------- + f +(1 row) + +SELECT 'supernova'::tsvector @@ 'super:*'::tsquery AS "true"; + true +------ + t +(1 row) + +SELECT 'supeanova supernova'::tsvector @@ 'super:*'::tsquery AS "true"; + true +------ + t +(1 row) + +SELECT 'supeznova supernova'::tsvector @@ 'super:*'::tsquery AS "true"; + true +------ + t +(1 row) + +--phrase search +SELECT to_tsvector('simple', '1 2 3 1') @@ '1 <-> 2' AS "true"; + true +------ + t +(1 row) + +SELECT to_tsvector('simple', '1 2 3 1') @@ '1 <2> 2' AS "false"; + false +------- + f +(1 row) + +SELECT to_tsvector('simple', '1 2 3 1') @@ '1 <-> 3' AS "false"; + false +------- + f +(1 row) + +SELECT to_tsvector('simple', '1 2 3 1') @@ '1 <2> 3' AS "true"; + true +------ + t +(1 row) + +SELECT to_tsvector('simple', '1 2 1 2') @@ '1 <3> 2' AS "true"; + true +------ + t +(1 row) + +SELECT to_tsvector('simple', '1 2 11 3') @@ '1 <-> 3' AS "false"; + false +------- + f +(1 row) + +SELECT to_tsvector('simple', '1 2 11 3') @@ '1:* <-> 3' AS "true"; + true +------ + t +(1 row) + +SELECT to_tsvector('simple', '1 2 3 4') @@ '1 <-> 2 <-> 3' AS "true"; + true +------ + t +(1 row) + +SELECT to_tsvector('simple', '1 2 3 4') @@ '(1 <-> 2) <-> 3' AS "true"; + true +------ + t +(1 row) + +SELECT to_tsvector('simple', '1 2 3 4') @@ '1 <-> (2 <-> 3)' AS "true"; + true +------ + t +(1 row) + +SELECT to_tsvector('simple', '1 2 3 4') @@ '1 <2> (2 <-> 3)' AS "false"; + false +------- + f +(1 row) + +SELECT to_tsvector('simple', '1 2 1 2 3 4') @@ '(1 <-> 2) <-> 3' AS "true"; + true +------ + t +(1 row) + +SELECT to_tsvector('simple', '1 2 1 2 3 4') @@ '1 <-> 2 <-> 3' AS "true"; + true +------ + t +(1 row) + +-- without position data, phrase search does not match +SELECT strip(to_tsvector('simple', '1 2 3 4')) @@ '1 <-> 2 <-> 3' AS "false"; + false +------- + f +(1 row) + +select to_tsvector('simple', 'q x q y') @@ 'q <-> (x & y)' AS "false"; + false +------- + f +(1 row) + +select to_tsvector('simple', 'q x') @@ 'q <-> (x | y <-> z)' AS "true"; + true +------ + t +(1 row) + +select to_tsvector('simple', 'q y') @@ 'q <-> (x | y <-> z)' AS "false"; + false +------- + f +(1 row) + +select to_tsvector('simple', 'q y z') @@ 'q <-> (x | y <-> z)' AS "true"; + true +------ + t +(1 row) + +select to_tsvector('simple', 'q y x') @@ 'q <-> (x | y <-> z)' AS "false"; + false +------- + f +(1 row) + +select to_tsvector('simple', 'q x y') @@ 'q <-> (x | y <-> z)' AS "true"; + true +------ + t +(1 row) + +select to_tsvector('simple', 'q x') @@ '(x | y <-> z) <-> q' AS "false"; + false +------- + f +(1 row) + +select to_tsvector('simple', 'x q') @@ '(x | y <-> z) <-> q' AS "true"; + true +------ + t +(1 row) + +select to_tsvector('simple', 'x y q') @@ '(x | y <-> z) <-> q' AS "false"; + false +------- + f +(1 row) + +select to_tsvector('simple', 'x y z') @@ '(x | y <-> z) <-> q' AS "false"; + false +------- + f +(1 row) + +select to_tsvector('simple', 'x y z q') @@ '(x | y <-> z) <-> q' AS "true"; + true +------ + t +(1 row) + +select to_tsvector('simple', 'y z q') @@ '(x | y <-> z) <-> q' AS "true"; + true +------ + t +(1 row) + +select to_tsvector('simple', 'y y q') @@ '(x | y <-> z) <-> q' AS "false"; + false +------- + f +(1 row) + +select to_tsvector('simple', 'y y q') @@ '(!x | y <-> z) <-> q' AS "true"; + true +------ + t +(1 row) + +select to_tsvector('simple', 'x y q') @@ '(!x | y <-> z) <-> q' AS "true"; + true +------ + t +(1 row) + +select to_tsvector('simple', 'y y q') @@ '(x | y <-> !z) <-> q' AS "true"; + true +------ + t +(1 row) + +select to_tsvector('simple', 'x q') @@ '(x | y <-> !z) <-> q' AS "true"; + true +------ + t +(1 row) + +select to_tsvector('simple', 'x q') @@ '(!x | y <-> z) <-> q' AS "false"; + false +------- + f +(1 row) + +select to_tsvector('simple', 'z q') @@ '(!x | y <-> z) <-> q' AS "true"; + true +------ + t +(1 row) + +select to_tsvector('simple', 'x y q') @@ '(!x | y) <-> y <-> q' AS "false"; + false +------- + f +(1 row) + +select to_tsvector('simple', 'x y q') @@ '(!x | !y) <-> y <-> q' AS "true"; + true +------ + t +(1 row) + +select to_tsvector('simple', 'x y q') @@ '(x | !y) <-> y <-> q' AS "true"; + true +------ + t +(1 row) + +select to_tsvector('simple', 'x y q') @@ '(x | !!z) <-> y <-> q' AS "true"; + true +------ + t +(1 row) + +select to_tsvector('simple', 'x y q y') @@ '!x <-> y' AS "true"; + true +------ + t +(1 row) + +select to_tsvector('simple', 'x y q y') @@ '!x <-> !y' AS "true"; + true +------ + t +(1 row) + +select to_tsvector('simple', 'x y q y') @@ '!x <-> !!y' AS "true"; + true +------ + t +(1 row) + +select to_tsvector('simple', 'x y q y') @@ '!(x <-> y)' AS "false"; + false +------- + f +(1 row) + +select to_tsvector('simple', 'x y q y') @@ '!(x <2> y)' AS "true"; + true +------ + t +(1 row) + +select strip(to_tsvector('simple', 'x y q y')) @@ '!x <-> y' AS "false"; + false +------- + f +(1 row) + +select strip(to_tsvector('simple', 'x y q y')) @@ '!x <-> !y' AS "false"; + false +------- + f +(1 row) + +select strip(to_tsvector('simple', 'x y q y')) @@ '!x <-> !!y' AS "false"; + false +------- + f +(1 row) + +select strip(to_tsvector('simple', 'x y q y')) @@ '!(x <-> y)' AS "true"; + true +------ + t +(1 row) + +select strip(to_tsvector('simple', 'x y q y')) @@ '!(x <2> y)' AS "true"; + true +------ + t +(1 row) + +select to_tsvector('simple', 'x y q y') @@ '!foo' AS "true"; + true +------ + t +(1 row) + +select to_tsvector('simple', '') @@ '!foo' AS "true"; + true +------ + t +(1 row) + +--ranking +SELECT ts_rank(' a:1 s:2C d g'::tsvector, 'a | s'); + ts_rank +----------- + 0.0911891 +(1 row) + +SELECT ts_rank(' a:1 sa:2C d g'::tsvector, 'a | s'); + ts_rank +----------- + 0.0303964 +(1 row) + +SELECT ts_rank(' a:1 sa:2C d g'::tsvector, 'a | s:*'); + ts_rank +----------- + 0.0911891 +(1 row) + +SELECT ts_rank(' a:1 sa:2C d g'::tsvector, 'a | sa:*'); + ts_rank +----------- + 0.0911891 +(1 row) + +SELECT ts_rank(' a:1 s:2B d g'::tsvector, 'a | s'); + ts_rank +---------- + 0.151982 +(1 row) + +SELECT ts_rank(' a:1 s:2 d g'::tsvector, 'a | s'); + ts_rank +----------- + 0.0607927 +(1 row) + +SELECT ts_rank(' a:1 s:2C d g'::tsvector, 'a & s'); + ts_rank +---------- + 0.140153 +(1 row) + +SELECT ts_rank(' a:1 s:2B d g'::tsvector, 'a & s'); + ts_rank +---------- + 0.198206 +(1 row) + +SELECT ts_rank(' a:1 s:2 d g'::tsvector, 'a & s'); + ts_rank +----------- + 0.0991032 +(1 row) + +SELECT ts_rank_cd(' a:1 s:2C d g'::tsvector, 'a | s'); + ts_rank_cd +------------ + 0.3 +(1 row) + +SELECT ts_rank_cd(' a:1 sa:2C d g'::tsvector, 'a | s'); + ts_rank_cd +------------ + 0.1 +(1 row) + +SELECT ts_rank_cd(' a:1 sa:2C d g'::tsvector, 'a | s:*'); + ts_rank_cd +------------ + 0.3 +(1 row) + +SELECT ts_rank_cd(' a:1 sa:2C d g'::tsvector, 'a | sa:*'); + ts_rank_cd +------------ + 0.3 +(1 row) + +SELECT ts_rank_cd(' a:1 sa:3C sab:2c d g'::tsvector, 'a | sa:*'); + ts_rank_cd +------------ + 0.5 +(1 row) + +SELECT ts_rank_cd(' a:1 s:2B d g'::tsvector, 'a | s'); + ts_rank_cd +------------ + 0.5 +(1 row) + +SELECT ts_rank_cd(' a:1 s:2 d g'::tsvector, 'a | s'); + ts_rank_cd +------------ + 0.2 +(1 row) + +SELECT ts_rank_cd(' a:1 s:2C d g'::tsvector, 'a & s'); + ts_rank_cd +------------ + 0.133333 +(1 row) + +SELECT ts_rank_cd(' a:1 s:2B d g'::tsvector, 'a & s'); + ts_rank_cd +------------ + 0.16 +(1 row) + +SELECT ts_rank_cd(' a:1 s:2 d g'::tsvector, 'a & s'); + ts_rank_cd +------------ + 0.1 +(1 row) + +SELECT ts_rank_cd(' a:1 s:2A d g'::tsvector, 'a <-> s'); + ts_rank_cd +------------ + 0.181818 +(1 row) + +SELECT ts_rank_cd(' a:1 s:2C d g'::tsvector, 'a <-> s'); + ts_rank_cd +------------ + 0.133333 +(1 row) + +SELECT ts_rank_cd(' a:1 s:2 d g'::tsvector, 'a <-> s'); + ts_rank_cd +------------ + 0.1 +(1 row) + +SELECT ts_rank_cd(' a:1 s:2 d:2A g'::tsvector, 'a <-> s'); + ts_rank_cd +------------ + 0.1 +(1 row) + +SELECT ts_rank_cd(' a:1 s:2,3A d:2A g'::tsvector, 'a <2> s:A'); + ts_rank_cd +------------ + 0.0909091 +(1 row) + +SELECT ts_rank_cd(' a:1 b:2 s:3A d:2A g'::tsvector, 'a <2> s:A'); + ts_rank_cd +------------ + 0.0909091 +(1 row) + +SELECT ts_rank_cd(' a:1 sa:2D sb:2A g'::tsvector, 'a <-> s:*'); + ts_rank_cd +------------ + 0.1 +(1 row) + +SELECT ts_rank_cd(' a:1 sa:2A sb:2D g'::tsvector, 'a <-> s:*'); + ts_rank_cd +------------ + 0.1 +(1 row) + +SELECT ts_rank_cd(' a:1 sa:2A sb:2D g'::tsvector, 'a <-> s:* <-> sa:A'); + ts_rank_cd +------------ + 0 +(1 row) + +SELECT ts_rank_cd(' a:1 sa:2A sb:2D g'::tsvector, 'a <-> s:* <-> sa:B'); + ts_rank_cd +------------ + 0 +(1 row) + +SELECT 'a:1 b:2'::tsvector @@ 'a <-> b'::tsquery AS "true"; + true +------ + t +(1 row) + +SELECT 'a:1 b:2'::tsvector @@ 'a <0> b'::tsquery AS "false"; + false +------- + f +(1 row) + +SELECT 'a:1 b:2'::tsvector @@ 'a <1> b'::tsquery AS "true"; + true +------ + t +(1 row) + +SELECT 'a:1 b:2'::tsvector @@ 'a <2> b'::tsquery AS "false"; + false +------- + f +(1 row) + +SELECT 'a:1 b:3'::tsvector @@ 'a <-> b'::tsquery AS "false"; + false +------- + f +(1 row) + +SELECT 'a:1 b:3'::tsvector @@ 'a <0> b'::tsquery AS "false"; + false +------- + f +(1 row) + +SELECT 'a:1 b:3'::tsvector @@ 'a <1> b'::tsquery AS "false"; + false +------- + f +(1 row) + +SELECT 'a:1 b:3'::tsvector @@ 'a <2> b'::tsquery AS "true"; + true +------ + t +(1 row) + +SELECT 'a:1 b:3'::tsvector @@ 'a <3> b'::tsquery AS "false"; + false +------- + f +(1 row) + +SELECT 'a:1 b:3'::tsvector @@ 'a <0> a:*'::tsquery AS "true"; + true +------ + t +(1 row) + +-- tsvector editing operations +SELECT strip('w:12B w:13* w:12,5,6 a:1,3* a:3 w asd:1dc asd'::tsvector); + strip +--------------- + 'a' 'asd' 'w' +(1 row) + +SELECT strip('base:7 hidden:6 rebel:1 spaceship:2,33A,34B,35C,36D strike:3'::tsvector); + strip +---------------------------------------------- + 'base' 'hidden' 'rebel' 'spaceship' 'strike' +(1 row) + +SELECT strip('base hidden rebel spaceship strike'::tsvector); + strip +---------------------------------------------- + 'base' 'hidden' 'rebel' 'spaceship' 'strike' +(1 row) + +SELECT ts_delete(to_tsvector('english', 'Rebel spaceships, striking from a hidden base'), 'spaceship'); + ts_delete +------------------------------------------ + 'base':7 'hidden':6 'rebel':1 'strike':3 +(1 row) + +SELECT ts_delete('base:7 hidden:6 rebel:1 spaceship:2,33A,34B,35C,36D strike:3'::tsvector, 'base'); + ts_delete +-------------------------------------------------------------- + 'hidden':6 'rebel':1 'spaceship':2,33A,34B,35C,36 'strike':3 +(1 row) + +SELECT ts_delete('base:7 hidden:6 rebel:1 spaceship:2,33A,34B,35C,36D strike:3'::tsvector, 'bas'); + ts_delete +----------------------------------------------------------------------- + 'base':7 'hidden':6 'rebel':1 'spaceship':2,33A,34B,35C,36 'strike':3 +(1 row) + +SELECT ts_delete('base:7 hidden:6 rebel:1 spaceship:2,33A,34B,35C,36D strike:3'::tsvector, 'bases'); + ts_delete +----------------------------------------------------------------------- + 'base':7 'hidden':6 'rebel':1 'spaceship':2,33A,34B,35C,36 'strike':3 +(1 row) + +SELECT ts_delete('base:7 hidden:6 rebel:1 spaceship:2,33A,34B,35C,36D strike:3'::tsvector, 'spaceship'); + ts_delete +------------------------------------------ + 'base':7 'hidden':6 'rebel':1 'strike':3 +(1 row) + +SELECT ts_delete('base hidden rebel spaceship strike'::tsvector, 'spaceship'); + ts_delete +---------------------------------- + 'base' 'hidden' 'rebel' 'strike' +(1 row) + +SELECT ts_delete('base:7 hidden:6 rebel:1 spaceship:2,33A,34B,35C,36D strike:3'::tsvector, ARRAY['spaceship','rebel']); + ts_delete +-------------------------------- + 'base':7 'hidden':6 'strike':3 +(1 row) + +SELECT ts_delete('base:7 hidden:6 rebel:1 spaceship:2,33A,34B,35C,36D strike:3'::tsvector, ARRAY['spaceships','rebel']); + ts_delete +------------------------------------------------------------- + 'base':7 'hidden':6 'spaceship':2,33A,34B,35C,36 'strike':3 +(1 row) + +SELECT ts_delete('base:7 hidden:6 rebel:1 spaceship:2,33A,34B,35C,36D strike:3'::tsvector, ARRAY['spaceshi','rebel']); + ts_delete +------------------------------------------------------------- + 'base':7 'hidden':6 'spaceship':2,33A,34B,35C,36 'strike':3 +(1 row) + +SELECT ts_delete('base:7 hidden:6 rebel:1 spaceship:2,33A,34B,35C,36D strike:3'::tsvector, ARRAY['spaceship','leya','rebel']); + ts_delete +-------------------------------- + 'base':7 'hidden':6 'strike':3 +(1 row) + +SELECT ts_delete('base hidden rebel spaceship strike'::tsvector, ARRAY['spaceship','leya','rebel']); + ts_delete +-------------------------- + 'base' 'hidden' 'strike' +(1 row) + +SELECT ts_delete('base hidden rebel spaceship strike'::tsvector, ARRAY['spaceship','leya','rebel','rebel']); + ts_delete +-------------------------- + 'base' 'hidden' 'strike' +(1 row) + +SELECT ts_delete('base hidden rebel spaceship strike'::tsvector, ARRAY['spaceship','leya','rebel', '', NULL]); + ts_delete +-------------------------- + 'base' 'hidden' 'strike' +(1 row) + +SELECT unnest('base:7 hidden:6 rebel:1 spaceship:2,33A,34B,35C,36D strike:3'::tsvector); + unnest +--------------------------------------------- + (base,{7},{D}) + (hidden,{6},{D}) + (rebel,{1},{D}) + (spaceship,"{2,33,34,35,36}","{D,A,B,C,D}") + (strike,{3},{D}) +(5 rows) + +SELECT unnest('base hidden rebel spaceship strike'::tsvector); + unnest +--------------- + (base,,) + (hidden,,) + (rebel,,) + (spaceship,,) + (strike,,) +(5 rows) + +SELECT * FROM unnest('base:7 hidden:6 rebel:1 spaceship:2,33A,34B,35C,36D strike:3'::tsvector); + lexeme | positions | weights +-----------+-----------------+------------- + base | {7} | {D} + hidden | {6} | {D} + rebel | {1} | {D} + spaceship | {2,33,34,35,36} | {D,A,B,C,D} + strike | {3} | {D} +(5 rows) + +SELECT * FROM unnest('base hidden rebel spaceship strike'::tsvector); + lexeme | positions | weights +-----------+-----------+--------- + base | | + hidden | | + rebel | | + spaceship | | + strike | | +(5 rows) + +SELECT lexeme, positions[1] from unnest('base:7 hidden:6 rebel:1 spaceship:2,33A,34B,35C,36D strike:3'::tsvector); + lexeme | positions +-----------+----------- + base | 7 + hidden | 6 + rebel | 1 + spaceship | 2 + strike | 3 +(5 rows) + +SELECT tsvector_to_array('base:7 hidden:6 rebel:1 spaceship:2,33A,34B,35C,36D strike:3'::tsvector); + tsvector_to_array +-------------------------------------- + {base,hidden,rebel,spaceship,strike} +(1 row) + +SELECT tsvector_to_array('base hidden rebel spaceship strike'::tsvector); + tsvector_to_array +-------------------------------------- + {base,hidden,rebel,spaceship,strike} +(1 row) + +SELECT array_to_tsvector(ARRAY['base','hidden','rebel','spaceship','strike']); + array_to_tsvector +---------------------------------------------- + 'base' 'hidden' 'rebel' 'spaceship' 'strike' +(1 row) + +-- null and empty string are disallowed, since we mustn't make an empty lexeme +SELECT array_to_tsvector(ARRAY['base','hidden','rebel','spaceship', NULL]); +ERROR: lexeme array may not contain nulls +SELECT array_to_tsvector(ARRAY['base','hidden','rebel','spaceship', '']); +ERROR: lexeme array may not contain empty strings +-- array_to_tsvector must sort and de-dup +SELECT array_to_tsvector(ARRAY['foo','bar','baz','bar']); + array_to_tsvector +------------------- + 'bar' 'baz' 'foo' +(1 row) + +SELECT setweight('w:12B w:13* w:12,5,6 a:1,3* a:3 w asd:1dc asd zxc:81,567,222A'::tsvector, 'c'); + setweight +---------------------------------------------------------- + 'a':1C,3C 'asd':1C 'w':5C,6C,12C,13C 'zxc':81C,222C,567C +(1 row) + +SELECT setweight('a:1,3A asd:1C w:5,6,12B,13A zxc:81,222A,567'::tsvector, 'c'); + setweight +---------------------------------------------------------- + 'a':1C,3C 'asd':1C 'w':5C,6C,12C,13C 'zxc':81C,222C,567C +(1 row) + +SELECT setweight('a:1,3A asd:1C w:5,6,12B,13A zxc:81,222A,567'::tsvector, 'c', '{a}'); + setweight +------------------------------------------------------ + 'a':1C,3C 'asd':1C 'w':5,6,12B,13A 'zxc':81,222A,567 +(1 row) + +SELECT setweight('a:1,3A asd:1C w:5,6,12B,13A zxc:81,222A,567'::tsvector, 'c', '{a}'); + setweight +------------------------------------------------------ + 'a':1C,3C 'asd':1C 'w':5,6,12B,13A 'zxc':81,222A,567 +(1 row) + +SELECT setweight('a:1,3A asd:1C w:5,6,12B,13A zxc:81,222A,567'::tsvector, 'c', '{a,zxc}'); + setweight +-------------------------------------------------------- + 'a':1C,3C 'asd':1C 'w':5,6,12B,13A 'zxc':81C,222C,567C +(1 row) + +SELECT setweight('a asd w:5,6,12B,13A zxc'::tsvector, 'c', ARRAY['a', 'zxc', '', NULL]); + setweight +--------------------------------- + 'a' 'asd' 'w':5,6,12B,13A 'zxc' +(1 row) + +SELECT ts_filter('base:7A empir:17 evil:15 first:11 galact:16 hidden:6A rebel:1A spaceship:2A strike:3A victori:12 won:9'::tsvector, '{a}'); + ts_filter +------------------------------------------------------------- + 'base':7A 'hidden':6A 'rebel':1A 'spaceship':2A 'strike':3A +(1 row) + +SELECT ts_filter('base hidden rebel spaceship strike'::tsvector, '{a}'); + ts_filter +----------- + +(1 row) + +SELECT ts_filter('base hidden rebel spaceship strike'::tsvector, '{a,b,NULL}'); +ERROR: weight array may not contain nulls diff --git a/src/test/regress/expected/tuplesort.out b/src/test/regress/expected/tuplesort.out new file mode 100644 index 0000000..a2efa17 --- /dev/null +++ b/src/test/regress/expected/tuplesort.out @@ -0,0 +1,692 @@ +-- only use parallelism when explicitly intending to do so +SET max_parallel_maintenance_workers = 0; +SET max_parallel_workers = 0; +-- A table with contents that, when sorted, triggers abbreviated +-- key aborts. One easy way to achieve that is to use uuids that all +-- have the same prefix, as abbreviated keys for uuids just use the +-- first sizeof(Datum) bytes. +CREATE TEMP TABLE abbrev_abort_uuids ( + id serial not null, + abort_increasing uuid, + abort_decreasing uuid, + noabort_increasing uuid, + noabort_decreasing uuid); +INSERT INTO abbrev_abort_uuids (abort_increasing, abort_decreasing, noabort_increasing, noabort_decreasing) + SELECT + ('00000000-0000-0000-0000-'||to_char(g.i, '000000000000FM'))::uuid abort_increasing, + ('00000000-0000-0000-0000-'||to_char(20000 - g.i, '000000000000FM'))::uuid abort_decreasing, + (to_char(g.i % 10009, '00000000FM')||'-0000-0000-0000-'||to_char(g.i, '000000000000FM'))::uuid noabort_increasing, + (to_char(((20000 - g.i) % 10009), '00000000FM')||'-0000-0000-0000-'||to_char(20000 - g.i, '000000000000FM'))::uuid noabort_decreasing + FROM generate_series(0, 20000, 1) g(i); +-- and a few NULLs +INSERT INTO abbrev_abort_uuids(id) VALUES(0); +INSERT INTO abbrev_abort_uuids DEFAULT VALUES; +INSERT INTO abbrev_abort_uuids DEFAULT VALUES; +-- add just a few duplicates +INSERT INTO abbrev_abort_uuids (abort_increasing, abort_decreasing, noabort_increasing, noabort_decreasing) + SELECT abort_increasing, abort_decreasing, noabort_increasing, noabort_decreasing + FROM abbrev_abort_uuids + WHERE (id < 10 OR id > 19990) AND id % 3 = 0 AND abort_increasing is not null; +---- +-- Check sort node uses of tuplesort wrt. abbreviated keys +---- +-- plain sort triggering abbreviated abort +SELECT abort_increasing, abort_decreasing FROM abbrev_abort_uuids ORDER BY abort_increasing OFFSET 20000 - 4; + abort_increasing | abort_decreasing +--------------------------------------+-------------------------------------- + 00000000-0000-0000-0000-000000019992 | 00000000-0000-0000-0000-000000000008 + 00000000-0000-0000-0000-000000019993 | 00000000-0000-0000-0000-000000000007 + 00000000-0000-0000-0000-000000019994 | 00000000-0000-0000-0000-000000000006 + 00000000-0000-0000-0000-000000019994 | 00000000-0000-0000-0000-000000000006 + 00000000-0000-0000-0000-000000019995 | 00000000-0000-0000-0000-000000000005 + 00000000-0000-0000-0000-000000019996 | 00000000-0000-0000-0000-000000000004 + 00000000-0000-0000-0000-000000019997 | 00000000-0000-0000-0000-000000000003 + 00000000-0000-0000-0000-000000019997 | 00000000-0000-0000-0000-000000000003 + 00000000-0000-0000-0000-000000019998 | 00000000-0000-0000-0000-000000000002 + 00000000-0000-0000-0000-000000019999 | 00000000-0000-0000-0000-000000000001 + 00000000-0000-0000-0000-000000020000 | 00000000-0000-0000-0000-000000000000 + 00000000-0000-0000-0000-000000020000 | 00000000-0000-0000-0000-000000000000 + | + | + | +(15 rows) + +SELECT abort_increasing, abort_decreasing FROM abbrev_abort_uuids ORDER BY abort_decreasing NULLS FIRST OFFSET 20000 - 4; + abort_increasing | abort_decreasing +--------------------------------------+-------------------------------------- + 00000000-0000-0000-0000-000000000011 | 00000000-0000-0000-0000-000000019989 + 00000000-0000-0000-0000-000000000010 | 00000000-0000-0000-0000-000000019990 + 00000000-0000-0000-0000-000000000009 | 00000000-0000-0000-0000-000000019991 + 00000000-0000-0000-0000-000000000008 | 00000000-0000-0000-0000-000000019992 + 00000000-0000-0000-0000-000000000008 | 00000000-0000-0000-0000-000000019992 + 00000000-0000-0000-0000-000000000007 | 00000000-0000-0000-0000-000000019993 + 00000000-0000-0000-0000-000000000006 | 00000000-0000-0000-0000-000000019994 + 00000000-0000-0000-0000-000000000005 | 00000000-0000-0000-0000-000000019995 + 00000000-0000-0000-0000-000000000005 | 00000000-0000-0000-0000-000000019995 + 00000000-0000-0000-0000-000000000004 | 00000000-0000-0000-0000-000000019996 + 00000000-0000-0000-0000-000000000003 | 00000000-0000-0000-0000-000000019997 + 00000000-0000-0000-0000-000000000002 | 00000000-0000-0000-0000-000000019998 + 00000000-0000-0000-0000-000000000002 | 00000000-0000-0000-0000-000000019998 + 00000000-0000-0000-0000-000000000001 | 00000000-0000-0000-0000-000000019999 + 00000000-0000-0000-0000-000000000000 | 00000000-0000-0000-0000-000000020000 +(15 rows) + +-- plain sort not triggering abbreviated abort +SELECT noabort_increasing, noabort_decreasing FROM abbrev_abort_uuids ORDER BY noabort_increasing OFFSET 20000 - 4; + noabort_increasing | noabort_decreasing +--------------------------------------+-------------------------------------- + 00009997-0000-0000-0000-000000009997 | 00010003-0000-0000-0000-000000010003 + 00009998-0000-0000-0000-000000009998 | 00010002-0000-0000-0000-000000010002 + 00009999-0000-0000-0000-000000009999 | 00010001-0000-0000-0000-000000010001 + 00010000-0000-0000-0000-000000010000 | 00010000-0000-0000-0000-000000010000 + 00010001-0000-0000-0000-000000010001 | 00009999-0000-0000-0000-000000009999 + 00010002-0000-0000-0000-000000010002 | 00009998-0000-0000-0000-000000009998 + 00010003-0000-0000-0000-000000010003 | 00009997-0000-0000-0000-000000009997 + 00010004-0000-0000-0000-000000010004 | 00009996-0000-0000-0000-000000009996 + 00010005-0000-0000-0000-000000010005 | 00009995-0000-0000-0000-000000009995 + 00010006-0000-0000-0000-000000010006 | 00009994-0000-0000-0000-000000009994 + 00010007-0000-0000-0000-000000010007 | 00009993-0000-0000-0000-000000009993 + 00010008-0000-0000-0000-000000010008 | 00009992-0000-0000-0000-000000009992 + | + | + | +(15 rows) + +SELECT noabort_increasing, noabort_decreasing FROM abbrev_abort_uuids ORDER BY noabort_decreasing NULLS FIRST OFFSET 20000 - 4; + noabort_increasing | noabort_decreasing +--------------------------------------+-------------------------------------- + 00010006-0000-0000-0000-000000010006 | 00009994-0000-0000-0000-000000009994 + 00010005-0000-0000-0000-000000010005 | 00009995-0000-0000-0000-000000009995 + 00010004-0000-0000-0000-000000010004 | 00009996-0000-0000-0000-000000009996 + 00010003-0000-0000-0000-000000010003 | 00009997-0000-0000-0000-000000009997 + 00010002-0000-0000-0000-000000010002 | 00009998-0000-0000-0000-000000009998 + 00010001-0000-0000-0000-000000010001 | 00009999-0000-0000-0000-000000009999 + 00010000-0000-0000-0000-000000010000 | 00010000-0000-0000-0000-000000010000 + 00009999-0000-0000-0000-000000009999 | 00010001-0000-0000-0000-000000010001 + 00009998-0000-0000-0000-000000009998 | 00010002-0000-0000-0000-000000010002 + 00009997-0000-0000-0000-000000009997 | 00010003-0000-0000-0000-000000010003 + 00009996-0000-0000-0000-000000009996 | 00010004-0000-0000-0000-000000010004 + 00009995-0000-0000-0000-000000009995 | 00010005-0000-0000-0000-000000010005 + 00009994-0000-0000-0000-000000009994 | 00010006-0000-0000-0000-000000010006 + 00009993-0000-0000-0000-000000009993 | 00010007-0000-0000-0000-000000010007 + 00009992-0000-0000-0000-000000009992 | 00010008-0000-0000-0000-000000010008 +(15 rows) + +-- bounded sort (disables abbreviated keys) +SELECT abort_increasing, noabort_increasing FROM abbrev_abort_uuids ORDER BY abort_increasing LIMIT 5; + abort_increasing | noabort_increasing +--------------------------------------+-------------------------------------- + 00000000-0000-0000-0000-000000000000 | 00000000-0000-0000-0000-000000000000 + 00000000-0000-0000-0000-000000000001 | 00000001-0000-0000-0000-000000000001 + 00000000-0000-0000-0000-000000000002 | 00000002-0000-0000-0000-000000000002 + 00000000-0000-0000-0000-000000000002 | 00000002-0000-0000-0000-000000000002 + 00000000-0000-0000-0000-000000000003 | 00000003-0000-0000-0000-000000000003 +(5 rows) + +SELECT abort_increasing, noabort_increasing FROM abbrev_abort_uuids ORDER BY noabort_increasing NULLS FIRST LIMIT 5; + abort_increasing | noabort_increasing +--------------------------------------+-------------------------------------- + | + | + | + 00000000-0000-0000-0000-000000000000 | 00000000-0000-0000-0000-000000000000 + 00000000-0000-0000-0000-000000010009 | 00000000-0000-0000-0000-000000010009 +(5 rows) + +---- +-- Check index creation uses of tuplesort wrt. abbreviated keys +---- +-- index creation using abbreviated keys successfully +CREATE INDEX abbrev_abort_uuids__noabort_increasing_idx ON abbrev_abort_uuids (noabort_increasing); +CREATE INDEX abbrev_abort_uuids__noabort_decreasing_idx ON abbrev_abort_uuids (noabort_decreasing); +-- verify +EXPLAIN (COSTS OFF) +SELECT id, noabort_increasing, noabort_decreasing FROM abbrev_abort_uuids ORDER BY noabort_increasing LIMIT 5; + QUERY PLAN +----------------------------------------------------------------------------------------- + Limit + -> Index Scan using abbrev_abort_uuids__noabort_increasing_idx on abbrev_abort_uuids +(2 rows) + +SELECT id, noabort_increasing, noabort_decreasing FROM abbrev_abort_uuids ORDER BY noabort_increasing LIMIT 5; + id | noabort_increasing | noabort_decreasing +-------+--------------------------------------+-------------------------------------- + 1 | 00000000-0000-0000-0000-000000000000 | 00009991-0000-0000-0000-000000020000 + 10010 | 00000000-0000-0000-0000-000000010009 | 00009991-0000-0000-0000-000000009991 + 2 | 00000001-0000-0000-0000-000000000001 | 00009990-0000-0000-0000-000000019999 + 10011 | 00000001-0000-0000-0000-000000010010 | 00009990-0000-0000-0000-000000009990 + 3 | 00000002-0000-0000-0000-000000000002 | 00009989-0000-0000-0000-000000019998 +(5 rows) + +EXPLAIN (COSTS OFF) +SELECT id, noabort_increasing, noabort_decreasing FROM abbrev_abort_uuids ORDER BY noabort_decreasing LIMIT 5; + QUERY PLAN +----------------------------------------------------------------------------------------- + Limit + -> Index Scan using abbrev_abort_uuids__noabort_decreasing_idx on abbrev_abort_uuids +(2 rows) + +SELECT id, noabort_increasing, noabort_decreasing FROM abbrev_abort_uuids ORDER BY noabort_decreasing LIMIT 5; + id | noabort_increasing | noabort_decreasing +-------+--------------------------------------+-------------------------------------- + 20001 | 00009991-0000-0000-0000-000000020000 | 00000000-0000-0000-0000-000000000000 + 20010 | 00009991-0000-0000-0000-000000020000 | 00000000-0000-0000-0000-000000000000 + 9992 | 00009991-0000-0000-0000-000000009991 | 00000000-0000-0000-0000-000000010009 + 20000 | 00009990-0000-0000-0000-000000019999 | 00000001-0000-0000-0000-000000000001 + 9991 | 00009990-0000-0000-0000-000000009990 | 00000001-0000-0000-0000-000000010010 +(5 rows) + +-- index creation using abbreviated keys, hitting abort +CREATE INDEX abbrev_abort_uuids__abort_increasing_idx ON abbrev_abort_uuids (abort_increasing); +CREATE INDEX abbrev_abort_uuids__abort_decreasing_idx ON abbrev_abort_uuids (abort_decreasing); +-- verify +EXPLAIN (COSTS OFF) +SELECT id, abort_increasing, abort_decreasing FROM abbrev_abort_uuids ORDER BY abort_increasing LIMIT 5; + QUERY PLAN +--------------------------------------------------------------------------------------- + Limit + -> Index Scan using abbrev_abort_uuids__abort_increasing_idx on abbrev_abort_uuids +(2 rows) + +SELECT id, abort_increasing, abort_decreasing FROM abbrev_abort_uuids ORDER BY abort_increasing LIMIT 5; + id | abort_increasing | abort_decreasing +-------+--------------------------------------+-------------------------------------- + 1 | 00000000-0000-0000-0000-000000000000 | 00000000-0000-0000-0000-000000020000 + 2 | 00000000-0000-0000-0000-000000000001 | 00000000-0000-0000-0000-000000019999 + 3 | 00000000-0000-0000-0000-000000000002 | 00000000-0000-0000-0000-000000019998 + 20004 | 00000000-0000-0000-0000-000000000002 | 00000000-0000-0000-0000-000000019998 + 4 | 00000000-0000-0000-0000-000000000003 | 00000000-0000-0000-0000-000000019997 +(5 rows) + +EXPLAIN (COSTS OFF) +SELECT id, abort_increasing, abort_decreasing FROM abbrev_abort_uuids ORDER BY abort_decreasing LIMIT 5; + QUERY PLAN +--------------------------------------------------------------------------------------- + Limit + -> Index Scan using abbrev_abort_uuids__abort_decreasing_idx on abbrev_abort_uuids +(2 rows) + +SELECT id, abort_increasing, abort_decreasing FROM abbrev_abort_uuids ORDER BY abort_decreasing LIMIT 5; + id | abort_increasing | abort_decreasing +-------+--------------------------------------+-------------------------------------- + 20001 | 00000000-0000-0000-0000-000000020000 | 00000000-0000-0000-0000-000000000000 + 20010 | 00000000-0000-0000-0000-000000020000 | 00000000-0000-0000-0000-000000000000 + 20000 | 00000000-0000-0000-0000-000000019999 | 00000000-0000-0000-0000-000000000001 + 19999 | 00000000-0000-0000-0000-000000019998 | 00000000-0000-0000-0000-000000000002 + 19998 | 00000000-0000-0000-0000-000000019997 | 00000000-0000-0000-0000-000000000003 +(5 rows) + +---- +-- Check CLUSTER uses of tuplesort wrt. abbreviated keys +---- +-- when aborting, increasing order +BEGIN; +SET LOCAL enable_indexscan = false; +CLUSTER abbrev_abort_uuids USING abbrev_abort_uuids__abort_increasing_idx; +-- head +SELECT id, abort_increasing, abort_decreasing, noabort_increasing, noabort_decreasing +FROM abbrev_abort_uuids +ORDER BY ctid LIMIT 5; + id | abort_increasing | abort_decreasing | noabort_increasing | noabort_decreasing +-------+--------------------------------------+--------------------------------------+--------------------------------------+-------------------------------------- + 1 | 00000000-0000-0000-0000-000000000000 | 00000000-0000-0000-0000-000000020000 | 00000000-0000-0000-0000-000000000000 | 00009991-0000-0000-0000-000000020000 + 2 | 00000000-0000-0000-0000-000000000001 | 00000000-0000-0000-0000-000000019999 | 00000001-0000-0000-0000-000000000001 | 00009990-0000-0000-0000-000000019999 + 3 | 00000000-0000-0000-0000-000000000002 | 00000000-0000-0000-0000-000000019998 | 00000002-0000-0000-0000-000000000002 | 00009989-0000-0000-0000-000000019998 + 20004 | 00000000-0000-0000-0000-000000000002 | 00000000-0000-0000-0000-000000019998 | 00000002-0000-0000-0000-000000000002 | 00009989-0000-0000-0000-000000019998 + 4 | 00000000-0000-0000-0000-000000000003 | 00000000-0000-0000-0000-000000019997 | 00000003-0000-0000-0000-000000000003 | 00009988-0000-0000-0000-000000019997 +(5 rows) + +-- tail +SELECT id, abort_increasing, abort_decreasing, noabort_increasing, noabort_decreasing +FROM abbrev_abort_uuids +ORDER BY ctid DESC LIMIT 5; + id | abort_increasing | abort_decreasing | noabort_increasing | noabort_decreasing +-------+--------------------------------------+--------------------------------------+--------------------------------------+-------------------------------------- + 0 | | | | + 20002 | | | | + 20003 | | | | + 20001 | 00000000-0000-0000-0000-000000020000 | 00000000-0000-0000-0000-000000000000 | 00009991-0000-0000-0000-000000020000 | 00000000-0000-0000-0000-000000000000 + 20010 | 00000000-0000-0000-0000-000000020000 | 00000000-0000-0000-0000-000000000000 | 00009991-0000-0000-0000-000000020000 | 00000000-0000-0000-0000-000000000000 +(5 rows) + +ROLLBACK; +-- when aborting, decreasing order +BEGIN; +SET LOCAL enable_indexscan = false; +CLUSTER abbrev_abort_uuids USING abbrev_abort_uuids__abort_decreasing_idx; +-- head +SELECT id, abort_increasing, abort_decreasing, noabort_increasing, noabort_decreasing +FROM abbrev_abort_uuids +ORDER BY ctid LIMIT 5; + id | abort_increasing | abort_decreasing | noabort_increasing | noabort_decreasing +-------+--------------------------------------+--------------------------------------+--------------------------------------+-------------------------------------- + 20010 | 00000000-0000-0000-0000-000000020000 | 00000000-0000-0000-0000-000000000000 | 00009991-0000-0000-0000-000000020000 | 00000000-0000-0000-0000-000000000000 + 20001 | 00000000-0000-0000-0000-000000020000 | 00000000-0000-0000-0000-000000000000 | 00009991-0000-0000-0000-000000020000 | 00000000-0000-0000-0000-000000000000 + 20000 | 00000000-0000-0000-0000-000000019999 | 00000000-0000-0000-0000-000000000001 | 00009990-0000-0000-0000-000000019999 | 00000001-0000-0000-0000-000000000001 + 19999 | 00000000-0000-0000-0000-000000019998 | 00000000-0000-0000-0000-000000000002 | 00009989-0000-0000-0000-000000019998 | 00000002-0000-0000-0000-000000000002 + 20009 | 00000000-0000-0000-0000-000000019997 | 00000000-0000-0000-0000-000000000003 | 00009988-0000-0000-0000-000000019997 | 00000003-0000-0000-0000-000000000003 +(5 rows) + +-- tail +SELECT id, abort_increasing, abort_decreasing, noabort_increasing, noabort_decreasing +FROM abbrev_abort_uuids +ORDER BY ctid DESC LIMIT 5; + id | abort_increasing | abort_decreasing | noabort_increasing | noabort_decreasing +-------+--------------------------------------+--------------------------------------+--------------------------------------+-------------------------------------- + 0 | | | | + 20002 | | | | + 20003 | | | | + 1 | 00000000-0000-0000-0000-000000000000 | 00000000-0000-0000-0000-000000020000 | 00000000-0000-0000-0000-000000000000 | 00009991-0000-0000-0000-000000020000 + 2 | 00000000-0000-0000-0000-000000000001 | 00000000-0000-0000-0000-000000019999 | 00000001-0000-0000-0000-000000000001 | 00009990-0000-0000-0000-000000019999 +(5 rows) + +ROLLBACK; +-- when not aborting, increasing order +BEGIN; +SET LOCAL enable_indexscan = false; +CLUSTER abbrev_abort_uuids USING abbrev_abort_uuids__noabort_increasing_idx; +-- head +SELECT id, abort_increasing, abort_decreasing, noabort_increasing, noabort_decreasing +FROM abbrev_abort_uuids +ORDER BY ctid LIMIT 5; + id | abort_increasing | abort_decreasing | noabort_increasing | noabort_decreasing +-------+--------------------------------------+--------------------------------------+--------------------------------------+-------------------------------------- + 1 | 00000000-0000-0000-0000-000000000000 | 00000000-0000-0000-0000-000000020000 | 00000000-0000-0000-0000-000000000000 | 00009991-0000-0000-0000-000000020000 + 10010 | 00000000-0000-0000-0000-000000010009 | 00000000-0000-0000-0000-000000009991 | 00000000-0000-0000-0000-000000010009 | 00009991-0000-0000-0000-000000009991 + 2 | 00000000-0000-0000-0000-000000000001 | 00000000-0000-0000-0000-000000019999 | 00000001-0000-0000-0000-000000000001 | 00009990-0000-0000-0000-000000019999 + 10011 | 00000000-0000-0000-0000-000000010010 | 00000000-0000-0000-0000-000000009990 | 00000001-0000-0000-0000-000000010010 | 00009990-0000-0000-0000-000000009990 + 20004 | 00000000-0000-0000-0000-000000000002 | 00000000-0000-0000-0000-000000019998 | 00000002-0000-0000-0000-000000000002 | 00009989-0000-0000-0000-000000019998 +(5 rows) + +-- tail +SELECT id, abort_increasing, abort_decreasing, noabort_increasing, noabort_decreasing +FROM abbrev_abort_uuids +ORDER BY ctid DESC LIMIT 5; + id | abort_increasing | abort_decreasing | noabort_increasing | noabort_decreasing +-------+--------------------------------------+--------------------------------------+--------------------------------------+-------------------------------------- + 0 | | | | + 20002 | | | | + 20003 | | | | + 10009 | 00000000-0000-0000-0000-000000010008 | 00000000-0000-0000-0000-000000009992 | 00010008-0000-0000-0000-000000010008 | 00009992-0000-0000-0000-000000009992 + 10008 | 00000000-0000-0000-0000-000000010007 | 00000000-0000-0000-0000-000000009993 | 00010007-0000-0000-0000-000000010007 | 00009993-0000-0000-0000-000000009993 +(5 rows) + +ROLLBACK; +-- when no aborting, decreasing order +BEGIN; +SET LOCAL enable_indexscan = false; +CLUSTER abbrev_abort_uuids USING abbrev_abort_uuids__noabort_decreasing_idx; +-- head +SELECT id, abort_increasing, abort_decreasing, noabort_increasing, noabort_decreasing +FROM abbrev_abort_uuids +ORDER BY ctid LIMIT 5; + id | abort_increasing | abort_decreasing | noabort_increasing | noabort_decreasing +-------+--------------------------------------+--------------------------------------+--------------------------------------+-------------------------------------- + 20010 | 00000000-0000-0000-0000-000000020000 | 00000000-0000-0000-0000-000000000000 | 00009991-0000-0000-0000-000000020000 | 00000000-0000-0000-0000-000000000000 + 20001 | 00000000-0000-0000-0000-000000020000 | 00000000-0000-0000-0000-000000000000 | 00009991-0000-0000-0000-000000020000 | 00000000-0000-0000-0000-000000000000 + 9992 | 00000000-0000-0000-0000-000000009991 | 00000000-0000-0000-0000-000000010009 | 00009991-0000-0000-0000-000000009991 | 00000000-0000-0000-0000-000000010009 + 20000 | 00000000-0000-0000-0000-000000019999 | 00000000-0000-0000-0000-000000000001 | 00009990-0000-0000-0000-000000019999 | 00000001-0000-0000-0000-000000000001 + 9991 | 00000000-0000-0000-0000-000000009990 | 00000000-0000-0000-0000-000000010010 | 00009990-0000-0000-0000-000000009990 | 00000001-0000-0000-0000-000000010010 +(5 rows) + +-- tail +SELECT id, abort_increasing, abort_decreasing, noabort_increasing, noabort_decreasing +FROM abbrev_abort_uuids +ORDER BY ctid DESC LIMIT 5; + id | abort_increasing | abort_decreasing | noabort_increasing | noabort_decreasing +-------+--------------------------------------+--------------------------------------+--------------------------------------+-------------------------------------- + 0 | | | | + 20003 | | | | + 20002 | | | | + 9993 | 00000000-0000-0000-0000-000000009992 | 00000000-0000-0000-0000-000000010008 | 00009992-0000-0000-0000-000000009992 | 00010008-0000-0000-0000-000000010008 + 9994 | 00000000-0000-0000-0000-000000009993 | 00000000-0000-0000-0000-000000010007 | 00009993-0000-0000-0000-000000009993 | 00010007-0000-0000-0000-000000010007 +(5 rows) + +ROLLBACK; +---- +-- test forward and backward scans for in-memory and disk based tuplesort +---- +-- in-memory +BEGIN; +SET LOCAL enable_indexscan = false; +-- unfortunately can't show analyze output confirming sort method, +-- the memory used output wouldn't be stable +EXPLAIN (COSTS OFF) DECLARE c SCROLL CURSOR FOR SELECT noabort_decreasing FROM abbrev_abort_uuids ORDER BY noabort_decreasing; + QUERY PLAN +-------------------------------------- + Sort + Sort Key: noabort_decreasing + -> Seq Scan on abbrev_abort_uuids +(3 rows) + +DECLARE c SCROLL CURSOR FOR SELECT noabort_decreasing FROM abbrev_abort_uuids ORDER BY noabort_decreasing; +-- first and second +FETCH NEXT FROM c; + noabort_decreasing +-------------------------------------- + 00000000-0000-0000-0000-000000000000 +(1 row) + +FETCH NEXT FROM c; + noabort_decreasing +-------------------------------------- + 00000000-0000-0000-0000-000000000000 +(1 row) + +-- scroll beyond beginning +FETCH BACKWARD FROM c; + noabort_decreasing +-------------------------------------- + 00000000-0000-0000-0000-000000000000 +(1 row) + +FETCH BACKWARD FROM c; + noabort_decreasing +-------------------- +(0 rows) + +FETCH BACKWARD FROM c; + noabort_decreasing +-------------------- +(0 rows) + +FETCH BACKWARD FROM c; + noabort_decreasing +-------------------- +(0 rows) + +FETCH NEXT FROM c; + noabort_decreasing +-------------------------------------- + 00000000-0000-0000-0000-000000000000 +(1 row) + +-- scroll beyond end end +FETCH LAST FROM c; + noabort_decreasing +-------------------- + +(1 row) + +FETCH BACKWARD FROM c; + noabort_decreasing +-------------------- + +(1 row) + +FETCH NEXT FROM c; + noabort_decreasing +-------------------- + +(1 row) + +FETCH NEXT FROM c; + noabort_decreasing +-------------------- +(0 rows) + +FETCH NEXT FROM c; + noabort_decreasing +-------------------- +(0 rows) + +FETCH BACKWARD FROM c; + noabort_decreasing +-------------------- + +(1 row) + +FETCH NEXT FROM c; + noabort_decreasing +-------------------- +(0 rows) + +COMMIT; +-- disk based +BEGIN; +SET LOCAL enable_indexscan = false; +SET LOCAL work_mem = '100kB'; +-- unfortunately can't show analyze output confirming sort method, +-- the memory used output wouldn't be stable +EXPLAIN (COSTS OFF) DECLARE c SCROLL CURSOR FOR SELECT noabort_decreasing FROM abbrev_abort_uuids ORDER BY noabort_decreasing; + QUERY PLAN +-------------------------------------- + Sort + Sort Key: noabort_decreasing + -> Seq Scan on abbrev_abort_uuids +(3 rows) + +DECLARE c SCROLL CURSOR FOR SELECT noabort_decreasing FROM abbrev_abort_uuids ORDER BY noabort_decreasing; +-- first and second +FETCH NEXT FROM c; + noabort_decreasing +-------------------------------------- + 00000000-0000-0000-0000-000000000000 +(1 row) + +FETCH NEXT FROM c; + noabort_decreasing +-------------------------------------- + 00000000-0000-0000-0000-000000000000 +(1 row) + +-- scroll beyond beginning +FETCH BACKWARD FROM c; + noabort_decreasing +-------------------------------------- + 00000000-0000-0000-0000-000000000000 +(1 row) + +FETCH BACKWARD FROM c; + noabort_decreasing +-------------------- +(0 rows) + +FETCH BACKWARD FROM c; + noabort_decreasing +-------------------- +(0 rows) + +FETCH BACKWARD FROM c; + noabort_decreasing +-------------------- +(0 rows) + +FETCH NEXT FROM c; + noabort_decreasing +-------------------------------------- + 00000000-0000-0000-0000-000000000000 +(1 row) + +-- scroll beyond end end +FETCH LAST FROM c; + noabort_decreasing +-------------------- + +(1 row) + +FETCH BACKWARD FROM c; + noabort_decreasing +-------------------- + +(1 row) + +FETCH NEXT FROM c; + noabort_decreasing +-------------------- + +(1 row) + +FETCH NEXT FROM c; + noabort_decreasing +-------------------- +(0 rows) + +FETCH NEXT FROM c; + noabort_decreasing +-------------------- +(0 rows) + +FETCH BACKWARD FROM c; + noabort_decreasing +-------------------- + +(1 row) + +FETCH NEXT FROM c; + noabort_decreasing +-------------------- +(0 rows) + +COMMIT; +---- +-- test tuplesort using both in-memory and disk sort +--- +-- memory based +SELECT + -- fixed-width by-value datum + (array_agg(id ORDER BY id DESC NULLS FIRST))[0:5], + -- fixed-width by-ref datum + (array_agg(abort_increasing ORDER BY abort_increasing DESC NULLS LAST))[0:5], + -- variable-width datum + (array_agg(id::text ORDER BY id::text DESC NULLS LAST))[0:5], + -- fixed width by-value datum tuplesort + percentile_disc(0.99) WITHIN GROUP (ORDER BY id), + -- ensure state is shared + percentile_disc(0.01) WITHIN GROUP (ORDER BY id), + -- fixed width by-ref datum tuplesort + percentile_disc(0.8) WITHIN GROUP (ORDER BY abort_increasing), + -- variable width by-ref datum tuplesort + percentile_disc(0.2) WITHIN GROUP (ORDER BY id::text), + -- multi-column tuplesort + rank('00000000-0000-0000-0000-000000000000', '2', '2') WITHIN GROUP (ORDER BY noabort_increasing, id, id::text) +FROM ( + SELECT * FROM abbrev_abort_uuids + UNION ALL + SELECT NULL, NULL, NULL, NULL, NULL) s; + array_agg | array_agg | array_agg | percentile_disc | percentile_disc | percentile_disc | percentile_disc | rank +--------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+----------------------------+-----------------+-----------------+--------------------------------------+-----------------+------ + {NULL,20010,20009,20008,20007} | {00000000-0000-0000-0000-000000020000,00000000-0000-0000-0000-000000020000,00000000-0000-0000-0000-000000019999,00000000-0000-0000-0000-000000019998,00000000-0000-0000-0000-000000019997} | {9999,9998,9997,9996,9995} | 19810 | 200 | 00000000-0000-0000-0000-000000016003 | 136 | 2 +(1 row) + +-- disk based (see also above) +BEGIN; +SET LOCAL work_mem = '100kB'; +SELECT + (array_agg(id ORDER BY id DESC NULLS FIRST))[0:5], + (array_agg(abort_increasing ORDER BY abort_increasing DESC NULLS LAST))[0:5], + (array_agg(id::text ORDER BY id::text DESC NULLS LAST))[0:5], + percentile_disc(0.99) WITHIN GROUP (ORDER BY id), + percentile_disc(0.01) WITHIN GROUP (ORDER BY id), + percentile_disc(0.8) WITHIN GROUP (ORDER BY abort_increasing), + percentile_disc(0.2) WITHIN GROUP (ORDER BY id::text), + rank('00000000-0000-0000-0000-000000000000', '2', '2') WITHIN GROUP (ORDER BY noabort_increasing, id, id::text) +FROM ( + SELECT * FROM abbrev_abort_uuids + UNION ALL + SELECT NULL, NULL, NULL, NULL, NULL) s; + array_agg | array_agg | array_agg | percentile_disc | percentile_disc | percentile_disc | percentile_disc | rank +--------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+----------------------------+-----------------+-----------------+--------------------------------------+-----------------+------ + {NULL,20010,20009,20008,20007} | {00000000-0000-0000-0000-000000020000,00000000-0000-0000-0000-000000020000,00000000-0000-0000-0000-000000019999,00000000-0000-0000-0000-000000019998,00000000-0000-0000-0000-000000019997} | {9999,9998,9997,9996,9995} | 19810 | 200 | 00000000-0000-0000-0000-000000016003 | 136 | 2 +(1 row) + +ROLLBACK; +---- +-- test tuplesort mark/restore +--- +CREATE TEMP TABLE test_mark_restore(col1 int, col2 int, col12 int); +-- need a few duplicates for mark/restore to matter +INSERT INTO test_mark_restore(col1, col2, col12) + SELECT a.i, b.i, a.i * b.i FROM generate_series(1, 500) a(i), generate_series(1, 5) b(i); +BEGIN; +SET LOCAL enable_nestloop = off; +SET LOCAL enable_hashjoin = off; +SET LOCAL enable_material = off; +-- set query into variable once, to avoid repetition of the fairly long query +SELECT $$ + SELECT col12, count(distinct a.col1), count(distinct a.col2), count(distinct b.col1), count(distinct b.col2), count(*) + FROM test_mark_restore a + JOIN test_mark_restore b USING(col12) + GROUP BY 1 + HAVING count(*) > 1 + ORDER BY 2 DESC, 1 DESC, 3 DESC, 4 DESC, 5 DESC, 6 DESC + LIMIT 10 +$$ AS qry \gset +-- test mark/restore with in-memory sorts +EXPLAIN (COSTS OFF) :qry; + QUERY PLAN +----------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Limit + -> Sort + Sort Key: (count(DISTINCT a.col1)) DESC, a.col12 DESC, (count(DISTINCT a.col2)) DESC, (count(DISTINCT b.col1)) DESC, (count(DISTINCT b.col2)) DESC, (count(*)) DESC + -> GroupAggregate + Group Key: a.col12 + Filter: (count(*) > 1) + -> Incremental Sort + Sort Key: a.col12 DESC, a.col1 + Presorted Key: a.col12 + -> Merge Join + Merge Cond: (a.col12 = b.col12) + -> Sort + Sort Key: a.col12 DESC + -> Seq Scan on test_mark_restore a + -> Sort + Sort Key: b.col12 DESC + -> Seq Scan on test_mark_restore b +(17 rows) + +:qry; + col12 | count | count | count | count | count +-------+-------+-------+-------+-------+------- + 480 | 5 | 5 | 5 | 5 | 25 + 420 | 5 | 5 | 5 | 5 | 25 + 360 | 5 | 5 | 5 | 5 | 25 + 300 | 5 | 5 | 5 | 5 | 25 + 240 | 5 | 5 | 5 | 5 | 25 + 180 | 5 | 5 | 5 | 5 | 25 + 120 | 5 | 5 | 5 | 5 | 25 + 60 | 5 | 5 | 5 | 5 | 25 + 960 | 4 | 4 | 4 | 4 | 16 + 900 | 4 | 4 | 4 | 4 | 16 +(10 rows) + +-- test mark/restore with on-disk sorts +SET LOCAL work_mem = '100kB'; +EXPLAIN (COSTS OFF) :qry; + QUERY PLAN +----------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Limit + -> Sort + Sort Key: (count(DISTINCT a.col1)) DESC, a.col12 DESC, (count(DISTINCT a.col2)) DESC, (count(DISTINCT b.col1)) DESC, (count(DISTINCT b.col2)) DESC, (count(*)) DESC + -> GroupAggregate + Group Key: a.col12 + Filter: (count(*) > 1) + -> Incremental Sort + Sort Key: a.col12 DESC, a.col1 + Presorted Key: a.col12 + -> Merge Join + Merge Cond: (a.col12 = b.col12) + -> Sort + Sort Key: a.col12 DESC + -> Seq Scan on test_mark_restore a + -> Sort + Sort Key: b.col12 DESC + -> Seq Scan on test_mark_restore b +(17 rows) + +:qry; + col12 | count | count | count | count | count +-------+-------+-------+-------+-------+------- + 480 | 5 | 5 | 5 | 5 | 25 + 420 | 5 | 5 | 5 | 5 | 25 + 360 | 5 | 5 | 5 | 5 | 25 + 300 | 5 | 5 | 5 | 5 | 25 + 240 | 5 | 5 | 5 | 5 | 25 + 180 | 5 | 5 | 5 | 5 | 25 + 120 | 5 | 5 | 5 | 5 | 25 + 60 | 5 | 5 | 5 | 5 | 25 + 960 | 4 | 4 | 4 | 4 | 16 + 900 | 4 | 4 | 4 | 4 | 16 +(10 rows) + +COMMIT; diff --git a/src/test/regress/expected/txid.out b/src/test/regress/expected/txid.out new file mode 100644 index 0000000..95ba66e --- /dev/null +++ b/src/test/regress/expected/txid.out @@ -0,0 +1,327 @@ +-- txid_snapshot data type and related functions +-- Note: these are backward-compatibility functions and types, and have been +-- replaced by new xid8-based variants. See xid.sql. The txid variants will +-- be removed in a future release. +-- i/o +select '12:13:'::txid_snapshot; + txid_snapshot +--------------- + 12:13: +(1 row) + +select '12:18:14,16'::txid_snapshot; + txid_snapshot +--------------- + 12:18:14,16 +(1 row) + +select '12:16:14,14'::txid_snapshot; + txid_snapshot +--------------- + 12:16:14 +(1 row) + +-- errors +select '31:12:'::txid_snapshot; +ERROR: invalid input syntax for type pg_snapshot: "31:12:" +LINE 1: select '31:12:'::txid_snapshot; + ^ +select '0:1:'::txid_snapshot; +ERROR: invalid input syntax for type pg_snapshot: "0:1:" +LINE 1: select '0:1:'::txid_snapshot; + ^ +select '12:13:0'::txid_snapshot; +ERROR: invalid input syntax for type pg_snapshot: "12:13:0" +LINE 1: select '12:13:0'::txid_snapshot; + ^ +select '12:16:14,13'::txid_snapshot; +ERROR: invalid input syntax for type pg_snapshot: "12:16:14,13" +LINE 1: select '12:16:14,13'::txid_snapshot; + ^ +create temp table snapshot_test ( + nr integer, + snap txid_snapshot +); +insert into snapshot_test values (1, '12:13:'); +insert into snapshot_test values (2, '12:20:13,15,18'); +insert into snapshot_test values (3, '100001:100009:100005,100007,100008'); +insert into snapshot_test values (4, '100:150:101,102,103,104,105,106,107,108,109,110,111,112,113,114,115,116,117,118,119,120,121,122,123,124,125,126,127,128,129,130,131'); +select snap from snapshot_test order by nr; + snap +------------------------------------------------------------------------------------------------------------------------------------- + 12:13: + 12:20:13,15,18 + 100001:100009:100005,100007,100008 + 100:150:101,102,103,104,105,106,107,108,109,110,111,112,113,114,115,116,117,118,119,120,121,122,123,124,125,126,127,128,129,130,131 +(4 rows) + +select txid_snapshot_xmin(snap), + txid_snapshot_xmax(snap), + txid_snapshot_xip(snap) +from snapshot_test order by nr; + txid_snapshot_xmin | txid_snapshot_xmax | txid_snapshot_xip +--------------------+--------------------+------------------- + 12 | 20 | 13 + 12 | 20 | 15 + 12 | 20 | 18 + 100001 | 100009 | 100005 + 100001 | 100009 | 100007 + 100001 | 100009 | 100008 + 100 | 150 | 101 + 100 | 150 | 102 + 100 | 150 | 103 + 100 | 150 | 104 + 100 | 150 | 105 + 100 | 150 | 106 + 100 | 150 | 107 + 100 | 150 | 108 + 100 | 150 | 109 + 100 | 150 | 110 + 100 | 150 | 111 + 100 | 150 | 112 + 100 | 150 | 113 + 100 | 150 | 114 + 100 | 150 | 115 + 100 | 150 | 116 + 100 | 150 | 117 + 100 | 150 | 118 + 100 | 150 | 119 + 100 | 150 | 120 + 100 | 150 | 121 + 100 | 150 | 122 + 100 | 150 | 123 + 100 | 150 | 124 + 100 | 150 | 125 + 100 | 150 | 126 + 100 | 150 | 127 + 100 | 150 | 128 + 100 | 150 | 129 + 100 | 150 | 130 + 100 | 150 | 131 +(37 rows) + +select id, txid_visible_in_snapshot(id, snap) +from snapshot_test, generate_series(11, 21) id +where nr = 2; + id | txid_visible_in_snapshot +----+-------------------------- + 11 | t + 12 | t + 13 | f + 14 | t + 15 | f + 16 | t + 17 | t + 18 | f + 19 | t + 20 | f + 21 | f +(11 rows) + +-- test bsearch +select id, txid_visible_in_snapshot(id, snap) +from snapshot_test, generate_series(90, 160) id +where nr = 4; + id | txid_visible_in_snapshot +-----+-------------------------- + 90 | t + 91 | t + 92 | t + 93 | t + 94 | t + 95 | t + 96 | t + 97 | t + 98 | t + 99 | t + 100 | t + 101 | f + 102 | f + 103 | f + 104 | f + 105 | f + 106 | f + 107 | f + 108 | f + 109 | f + 110 | f + 111 | f + 112 | f + 113 | f + 114 | f + 115 | f + 116 | f + 117 | f + 118 | f + 119 | f + 120 | f + 121 | f + 122 | f + 123 | f + 124 | f + 125 | f + 126 | f + 127 | f + 128 | f + 129 | f + 130 | f + 131 | f + 132 | t + 133 | t + 134 | t + 135 | t + 136 | t + 137 | t + 138 | t + 139 | t + 140 | t + 141 | t + 142 | t + 143 | t + 144 | t + 145 | t + 146 | t + 147 | t + 148 | t + 149 | t + 150 | f + 151 | f + 152 | f + 153 | f + 154 | f + 155 | f + 156 | f + 157 | f + 158 | f + 159 | f + 160 | f +(71 rows) + +-- test current values also +select txid_current() >= txid_snapshot_xmin(txid_current_snapshot()); + ?column? +---------- + t +(1 row) + +-- we can't assume current is always less than xmax, however +select txid_visible_in_snapshot(txid_current(), txid_current_snapshot()); + txid_visible_in_snapshot +-------------------------- + f +(1 row) + +-- test 64bitness +select txid_snapshot '1000100010001000:1000100010001100:1000100010001012,1000100010001013'; + txid_snapshot +--------------------------------------------------------------------- + 1000100010001000:1000100010001100:1000100010001012,1000100010001013 +(1 row) + +select txid_visible_in_snapshot('1000100010001012', '1000100010001000:1000100010001100:1000100010001012,1000100010001013'); + txid_visible_in_snapshot +-------------------------- + f +(1 row) + +select txid_visible_in_snapshot('1000100010001015', '1000100010001000:1000100010001100:1000100010001012,1000100010001013'); + txid_visible_in_snapshot +-------------------------- + t +(1 row) + +-- test 64bit overflow +SELECT txid_snapshot '1:9223372036854775807:3'; + txid_snapshot +------------------------- + 1:9223372036854775807:3 +(1 row) + +SELECT txid_snapshot '1:9223372036854775808:3'; +ERROR: invalid input syntax for type pg_snapshot: "1:9223372036854775808:3" +LINE 1: SELECT txid_snapshot '1:9223372036854775808:3'; + ^ +-- test txid_current_if_assigned +BEGIN; +SELECT txid_current_if_assigned() IS NULL; + ?column? +---------- + t +(1 row) + +SELECT txid_current() \gset +SELECT txid_current_if_assigned() IS NOT DISTINCT FROM BIGINT :'txid_current'; + ?column? +---------- + t +(1 row) + +COMMIT; +-- test xid status functions +BEGIN; +SELECT txid_current() AS committed \gset +COMMIT; +BEGIN; +SELECT txid_current() AS rolledback \gset +ROLLBACK; +BEGIN; +SELECT txid_current() AS inprogress \gset +SELECT txid_status(:committed) AS committed; + committed +----------- + committed +(1 row) + +SELECT txid_status(:rolledback) AS rolledback; + rolledback +------------ + aborted +(1 row) + +SELECT txid_status(:inprogress) AS inprogress; + inprogress +------------- + in progress +(1 row) + +SELECT txid_status(1); -- BootstrapTransactionId is always committed + txid_status +------------- + committed +(1 row) + +SELECT txid_status(2); -- FrozenTransactionId is always committed + txid_status +------------- + committed +(1 row) + +SELECT txid_status(3); -- in regress testing FirstNormalTransactionId will always be behind oldestXmin + txid_status +------------- + +(1 row) + +COMMIT; +BEGIN; +CREATE FUNCTION test_future_xid_status(bigint) +RETURNS void +LANGUAGE plpgsql +AS +$$ +BEGIN + PERFORM txid_status($1); + RAISE EXCEPTION 'didn''t ERROR at xid in the future as expected'; +EXCEPTION + WHEN invalid_parameter_value THEN + RAISE NOTICE 'Got expected error for xid in the future'; +END; +$$; +SELECT test_future_xid_status(:inprogress + 10000); +NOTICE: Got expected error for xid in the future + test_future_xid_status +------------------------ + +(1 row) + +ROLLBACK; diff --git a/src/test/regress/expected/type_sanity.out b/src/test/regress/expected/type_sanity.out new file mode 100644 index 0000000..a640cfc --- /dev/null +++ b/src/test/regress/expected/type_sanity.out @@ -0,0 +1,742 @@ +-- +-- TYPE_SANITY +-- Sanity checks for common errors in making type-related system tables: +-- pg_type, pg_class, pg_attribute, pg_range. +-- +-- None of the SELECTs here should ever find any matching entries, +-- so the expected output is easy to maintain ;-). +-- A test failure indicates someone messed up an entry in the system tables. +-- +-- NB: we assume the oidjoins test will have caught any dangling links, +-- that is OID or REGPROC fields that are not zero and do not match some +-- row in the linked-to table. However, if we want to enforce that a link +-- field can't be 0, we have to check it here. +-- **************** pg_type **************** +-- Look for illegal values in pg_type fields. +SELECT t1.oid, t1.typname +FROM pg_type as t1 +WHERE t1.typnamespace = 0 OR + (t1.typlen <= 0 AND t1.typlen != -1 AND t1.typlen != -2) OR + (t1.typtype not in ('b', 'c', 'd', 'e', 'm', 'p', 'r')) OR + NOT t1.typisdefined OR + (t1.typalign not in ('c', 's', 'i', 'd')) OR + (t1.typstorage not in ('p', 'x', 'e', 'm')); + oid | typname +-----+--------- +(0 rows) + +-- Look for "pass by value" types that can't be passed by value. +SELECT t1.oid, t1.typname +FROM pg_type as t1 +WHERE t1.typbyval AND + (t1.typlen != 1 OR t1.typalign != 'c') AND + (t1.typlen != 2 OR t1.typalign != 's') AND + (t1.typlen != 4 OR t1.typalign != 'i') AND + (t1.typlen != 8 OR t1.typalign != 'd'); + oid | typname +-----+--------- +(0 rows) + +-- Look for "toastable" types that aren't varlena. +SELECT t1.oid, t1.typname +FROM pg_type as t1 +WHERE t1.typstorage != 'p' AND + (t1.typbyval OR t1.typlen != -1); + oid | typname +-----+--------- +(0 rows) + +-- Look for complex types that do not have a typrelid entry, +-- or basic types that do. +SELECT t1.oid, t1.typname +FROM pg_type as t1 +WHERE (t1.typtype = 'c' AND t1.typrelid = 0) OR + (t1.typtype != 'c' AND t1.typrelid != 0); + oid | typname +-----+--------- +(0 rows) + +-- Look for types that should have an array type but don't. +-- Generally anything that's not a pseudotype should have an array type. +-- However, we do have a small number of exceptions. +SELECT t1.oid, t1.typname +FROM pg_type as t1 +WHERE t1.typtype not in ('p') AND t1.typname NOT LIKE E'\\_%' + AND NOT EXISTS + (SELECT 1 FROM pg_type as t2 + WHERE t2.typname = ('_' || t1.typname)::name AND + t2.typelem = t1.oid and t1.typarray = t2.oid) +ORDER BY t1.oid; + oid | typname +------+------------------------------ + 194 | pg_node_tree + 3361 | pg_ndistinct + 3402 | pg_dependencies + 4600 | pg_brin_bloom_summary + 4601 | pg_brin_minmax_multi_summary + 5017 | pg_mcv_list +(6 rows) + +-- Make sure typarray points to a "true" array type of our own base +SELECT t1.oid, t1.typname as basetype, t2.typname as arraytype, + t2.typsubscript +FROM pg_type t1 LEFT JOIN pg_type t2 ON (t1.typarray = t2.oid) +WHERE t1.typarray <> 0 AND + (t2.oid IS NULL OR + t2.typsubscript <> 'array_subscript_handler'::regproc); + oid | basetype | arraytype | typsubscript +-----+----------+-----------+-------------- +(0 rows) + +-- Look for range types that do not have a pg_range entry +SELECT t1.oid, t1.typname +FROM pg_type as t1 +WHERE t1.typtype = 'r' AND + NOT EXISTS(SELECT 1 FROM pg_range r WHERE rngtypid = t1.oid); + oid | typname +-----+--------- +(0 rows) + +-- Look for range types whose typalign isn't sufficient +SELECT t1.oid, t1.typname, t1.typalign, t2.typname, t2.typalign +FROM pg_type as t1 + LEFT JOIN pg_range as r ON rngtypid = t1.oid + LEFT JOIN pg_type as t2 ON rngsubtype = t2.oid +WHERE t1.typtype = 'r' AND + (t1.typalign != (CASE WHEN t2.typalign = 'd' THEN 'd'::"char" + ELSE 'i'::"char" END) + OR t2.oid IS NULL); + oid | typname | typalign | typname | typalign +-----+---------+----------+---------+---------- +(0 rows) + +-- Text conversion routines must be provided. +SELECT t1.oid, t1.typname +FROM pg_type as t1 +WHERE (t1.typinput = 0 OR t1.typoutput = 0); + oid | typname +-----+--------- +(0 rows) + +-- Check for bogus typinput routines +SELECT t1.oid, t1.typname, p1.oid, p1.proname +FROM pg_type AS t1, pg_proc AS p1 +WHERE t1.typinput = p1.oid AND NOT + ((p1.pronargs = 1 AND p1.proargtypes[0] = 'cstring'::regtype) OR + (p1.pronargs = 2 AND p1.proargtypes[0] = 'cstring'::regtype AND + p1.proargtypes[1] = 'oid'::regtype) OR + (p1.pronargs = 3 AND p1.proargtypes[0] = 'cstring'::regtype AND + p1.proargtypes[1] = 'oid'::regtype AND + p1.proargtypes[2] = 'int4'::regtype)); + oid | typname | oid | proname +-----+---------+-----+--------- +(0 rows) + +-- As of 8.0, this check finds refcursor, which is borrowing +-- other types' I/O routines +SELECT t1.oid, t1.typname, p1.oid, p1.proname +FROM pg_type AS t1, pg_proc AS p1 +WHERE t1.typinput = p1.oid AND t1.typtype in ('b', 'p') AND NOT + (t1.typelem != 0 AND t1.typlen < 0) AND NOT + (p1.prorettype = t1.oid AND NOT p1.proretset) +ORDER BY 1; + oid | typname | oid | proname +------+-----------+-----+--------- + 1790 | refcursor | 46 | textin +(1 row) + +-- Varlena array types will point to array_in +-- Exception as of 8.1: int2vector and oidvector have their own I/O routines +SELECT t1.oid, t1.typname, p1.oid, p1.proname +FROM pg_type AS t1, pg_proc AS p1 +WHERE t1.typinput = p1.oid AND + (t1.typelem != 0 AND t1.typlen < 0) AND NOT + (p1.oid = 'array_in'::regproc) +ORDER BY 1; + oid | typname | oid | proname +-----+------------+-----+-------------- + 22 | int2vector | 40 | int2vectorin + 30 | oidvector | 54 | oidvectorin +(2 rows) + +-- typinput routines should not be volatile +SELECT t1.oid, t1.typname, p1.oid, p1.proname +FROM pg_type AS t1, pg_proc AS p1 +WHERE t1.typinput = p1.oid AND p1.provolatile NOT IN ('i', 's'); + oid | typname | oid | proname +-----+---------+-----+--------- +(0 rows) + +-- Composites, domains, enums, multiranges, ranges should all use the same input routines +SELECT DISTINCT typtype, typinput +FROM pg_type AS t1 +WHERE t1.typtype not in ('b', 'p') +ORDER BY 1; + typtype | typinput +---------+--------------- + c | record_in + d | domain_in + e | enum_in + m | multirange_in + r | range_in +(5 rows) + +-- Check for bogus typoutput routines +-- As of 8.0, this check finds refcursor, which is borrowing +-- other types' I/O routines +SELECT t1.oid, t1.typname, p1.oid, p1.proname +FROM pg_type AS t1, pg_proc AS p1 +WHERE t1.typoutput = p1.oid AND t1.typtype in ('b', 'p') AND NOT + (p1.pronargs = 1 AND + (p1.proargtypes[0] = t1.oid OR + (p1.oid = 'array_out'::regproc AND + t1.typelem != 0 AND t1.typlen = -1))) +ORDER BY 1; + oid | typname | oid | proname +------+-----------+-----+--------- + 1790 | refcursor | 47 | textout +(1 row) + +SELECT t1.oid, t1.typname, p1.oid, p1.proname +FROM pg_type AS t1, pg_proc AS p1 +WHERE t1.typoutput = p1.oid AND NOT + (p1.prorettype = 'cstring'::regtype AND NOT p1.proretset); + oid | typname | oid | proname +-----+---------+-----+--------- +(0 rows) + +-- typoutput routines should not be volatile +SELECT t1.oid, t1.typname, p1.oid, p1.proname +FROM pg_type AS t1, pg_proc AS p1 +WHERE t1.typoutput = p1.oid AND p1.provolatile NOT IN ('i', 's'); + oid | typname | oid | proname +-----+---------+-----+--------- +(0 rows) + +-- Composites, enums, multiranges, ranges should all use the same output routines +SELECT DISTINCT typtype, typoutput +FROM pg_type AS t1 +WHERE t1.typtype not in ('b', 'd', 'p') +ORDER BY 1; + typtype | typoutput +---------+---------------- + c | record_out + e | enum_out + m | multirange_out + r | range_out +(4 rows) + +-- Domains should have same typoutput as their base types +SELECT t1.oid, t1.typname, t2.oid, t2.typname +FROM pg_type AS t1 LEFT JOIN pg_type AS t2 ON t1.typbasetype = t2.oid +WHERE t1.typtype = 'd' AND t1.typoutput IS DISTINCT FROM t2.typoutput; + oid | typname | oid | typname +-----+---------+-----+--------- +(0 rows) + +-- Check for bogus typreceive routines +SELECT t1.oid, t1.typname, p1.oid, p1.proname +FROM pg_type AS t1, pg_proc AS p1 +WHERE t1.typreceive = p1.oid AND NOT + ((p1.pronargs = 1 AND p1.proargtypes[0] = 'internal'::regtype) OR + (p1.pronargs = 2 AND p1.proargtypes[0] = 'internal'::regtype AND + p1.proargtypes[1] = 'oid'::regtype) OR + (p1.pronargs = 3 AND p1.proargtypes[0] = 'internal'::regtype AND + p1.proargtypes[1] = 'oid'::regtype AND + p1.proargtypes[2] = 'int4'::regtype)); + oid | typname | oid | proname +-----+---------+-----+--------- +(0 rows) + +-- As of 7.4, this check finds refcursor, which is borrowing +-- other types' I/O routines +SELECT t1.oid, t1.typname, p1.oid, p1.proname +FROM pg_type AS t1, pg_proc AS p1 +WHERE t1.typreceive = p1.oid AND t1.typtype in ('b', 'p') AND NOT + (t1.typelem != 0 AND t1.typlen < 0) AND NOT + (p1.prorettype = t1.oid AND NOT p1.proretset) +ORDER BY 1; + oid | typname | oid | proname +------+-----------+------+---------- + 1790 | refcursor | 2414 | textrecv +(1 row) + +-- Varlena array types will point to array_recv +-- Exception as of 8.1: int2vector and oidvector have their own I/O routines +SELECT t1.oid, t1.typname, p1.oid, p1.proname +FROM pg_type AS t1, pg_proc AS p1 +WHERE t1.typreceive = p1.oid AND + (t1.typelem != 0 AND t1.typlen < 0) AND NOT + (p1.oid = 'array_recv'::regproc) +ORDER BY 1; + oid | typname | oid | proname +-----+------------+------+---------------- + 22 | int2vector | 2410 | int2vectorrecv + 30 | oidvector | 2420 | oidvectorrecv +(2 rows) + +-- Suspicious if typreceive doesn't take same number of args as typinput +SELECT t1.oid, t1.typname, p1.oid, p1.proname, p2.oid, p2.proname +FROM pg_type AS t1, pg_proc AS p1, pg_proc AS p2 +WHERE t1.typinput = p1.oid AND t1.typreceive = p2.oid AND + p1.pronargs != p2.pronargs; + oid | typname | oid | proname | oid | proname +-----+---------+-----+---------+-----+--------- +(0 rows) + +-- typreceive routines should not be volatile +SELECT t1.oid, t1.typname, p1.oid, p1.proname +FROM pg_type AS t1, pg_proc AS p1 +WHERE t1.typreceive = p1.oid AND p1.provolatile NOT IN ('i', 's'); + oid | typname | oid | proname +-----+---------+-----+--------- +(0 rows) + +-- Composites, domains, enums, multiranges, ranges should all use the same receive routines +SELECT DISTINCT typtype, typreceive +FROM pg_type AS t1 +WHERE t1.typtype not in ('b', 'p') +ORDER BY 1; + typtype | typreceive +---------+----------------- + c | record_recv + d | domain_recv + e | enum_recv + m | multirange_recv + r | range_recv +(5 rows) + +-- Check for bogus typsend routines +-- As of 7.4, this check finds refcursor, which is borrowing +-- other types' I/O routines +SELECT t1.oid, t1.typname, p1.oid, p1.proname +FROM pg_type AS t1, pg_proc AS p1 +WHERE t1.typsend = p1.oid AND t1.typtype in ('b', 'p') AND NOT + (p1.pronargs = 1 AND + (p1.proargtypes[0] = t1.oid OR + (p1.oid = 'array_send'::regproc AND + t1.typelem != 0 AND t1.typlen = -1))) +ORDER BY 1; + oid | typname | oid | proname +------+-----------+------+---------- + 1790 | refcursor | 2415 | textsend +(1 row) + +SELECT t1.oid, t1.typname, p1.oid, p1.proname +FROM pg_type AS t1, pg_proc AS p1 +WHERE t1.typsend = p1.oid AND NOT + (p1.prorettype = 'bytea'::regtype AND NOT p1.proretset); + oid | typname | oid | proname +-----+---------+-----+--------- +(0 rows) + +-- typsend routines should not be volatile +SELECT t1.oid, t1.typname, p1.oid, p1.proname +FROM pg_type AS t1, pg_proc AS p1 +WHERE t1.typsend = p1.oid AND p1.provolatile NOT IN ('i', 's'); + oid | typname | oid | proname +-----+---------+-----+--------- +(0 rows) + +-- Composites, enums, multiranges, ranges should all use the same send routines +SELECT DISTINCT typtype, typsend +FROM pg_type AS t1 +WHERE t1.typtype not in ('b', 'd', 'p') +ORDER BY 1; + typtype | typsend +---------+----------------- + c | record_send + e | enum_send + m | multirange_send + r | range_send +(4 rows) + +-- Domains should have same typsend as their base types +SELECT t1.oid, t1.typname, t2.oid, t2.typname +FROM pg_type AS t1 LEFT JOIN pg_type AS t2 ON t1.typbasetype = t2.oid +WHERE t1.typtype = 'd' AND t1.typsend IS DISTINCT FROM t2.typsend; + oid | typname | oid | typname +-----+---------+-----+--------- +(0 rows) + +-- Check for bogus typmodin routines +SELECT t1.oid, t1.typname, p1.oid, p1.proname +FROM pg_type AS t1, pg_proc AS p1 +WHERE t1.typmodin = p1.oid AND NOT + (p1.pronargs = 1 AND + p1.proargtypes[0] = 'cstring[]'::regtype AND + p1.prorettype = 'int4'::regtype AND NOT p1.proretset); + oid | typname | oid | proname +-----+---------+-----+--------- +(0 rows) + +-- typmodin routines should not be volatile +SELECT t1.oid, t1.typname, p1.oid, p1.proname +FROM pg_type AS t1, pg_proc AS p1 +WHERE t1.typmodin = p1.oid AND p1.provolatile NOT IN ('i', 's'); + oid | typname | oid | proname +-----+---------+-----+--------- +(0 rows) + +-- Check for bogus typmodout routines +SELECT t1.oid, t1.typname, p1.oid, p1.proname +FROM pg_type AS t1, pg_proc AS p1 +WHERE t1.typmodout = p1.oid AND NOT + (p1.pronargs = 1 AND + p1.proargtypes[0] = 'int4'::regtype AND + p1.prorettype = 'cstring'::regtype AND NOT p1.proretset); + oid | typname | oid | proname +-----+---------+-----+--------- +(0 rows) + +-- typmodout routines should not be volatile +SELECT t1.oid, t1.typname, p1.oid, p1.proname +FROM pg_type AS t1, pg_proc AS p1 +WHERE t1.typmodout = p1.oid AND p1.provolatile NOT IN ('i', 's'); + oid | typname | oid | proname +-----+---------+-----+--------- +(0 rows) + +-- Array types should have same typmodin/out as their element types +SELECT t1.oid, t1.typname, t2.oid, t2.typname +FROM pg_type AS t1, pg_type AS t2 +WHERE t1.typelem = t2.oid AND NOT + (t1.typmodin = t2.typmodin AND t1.typmodout = t2.typmodout); + oid | typname | oid | typname +-----+---------+-----+--------- +(0 rows) + +-- Array types should have same typdelim as their element types +SELECT t1.oid, t1.typname, t2.oid, t2.typname +FROM pg_type AS t1, pg_type AS t2 +WHERE t1.typarray = t2.oid AND NOT (t1.typdelim = t2.typdelim); + oid | typname | oid | typname +-----+---------+-----+--------- +(0 rows) + +-- Look for array types whose typalign isn't sufficient +SELECT t1.oid, t1.typname, t1.typalign, t2.typname, t2.typalign +FROM pg_type AS t1, pg_type AS t2 +WHERE t1.typarray = t2.oid AND + t2.typalign != (CASE WHEN t1.typalign = 'd' THEN 'd'::"char" + ELSE 'i'::"char" END); + oid | typname | typalign | typname | typalign +-----+---------+----------+---------+---------- +(0 rows) + +-- Check for typelem set without a handler +SELECT t1.oid, t1.typname, t1.typelem +FROM pg_type AS t1 +WHERE t1.typelem != 0 AND t1.typsubscript = 0; + oid | typname | typelem +-----+---------+--------- +(0 rows) + +-- Check for misuse of standard subscript handlers +SELECT t1.oid, t1.typname, + t1.typelem, t1.typlen, t1.typbyval +FROM pg_type AS t1 +WHERE t1.typsubscript = 'array_subscript_handler'::regproc AND NOT + (t1.typelem != 0 AND t1.typlen = -1 AND NOT t1.typbyval); + oid | typname | typelem | typlen | typbyval +-----+---------+---------+--------+---------- +(0 rows) + +SELECT t1.oid, t1.typname, + t1.typelem, t1.typlen, t1.typbyval +FROM pg_type AS t1 +WHERE t1.typsubscript = 'raw_array_subscript_handler'::regproc AND NOT + (t1.typelem != 0 AND t1.typlen > 0 AND NOT t1.typbyval); + oid | typname | typelem | typlen | typbyval +-----+---------+---------+--------+---------- +(0 rows) + +-- Check for bogus typanalyze routines +SELECT t1.oid, t1.typname, p1.oid, p1.proname +FROM pg_type AS t1, pg_proc AS p1 +WHERE t1.typanalyze = p1.oid AND NOT + (p1.pronargs = 1 AND + p1.proargtypes[0] = 'internal'::regtype AND + p1.prorettype = 'bool'::regtype AND NOT p1.proretset); + oid | typname | oid | proname +-----+---------+-----+--------- +(0 rows) + +-- there does not seem to be a reason to care about volatility of typanalyze +-- domains inherit their base type's typanalyze +SELECT d.oid, d.typname, d.typanalyze, t.oid, t.typname, t.typanalyze +FROM pg_type d JOIN pg_type t ON d.typbasetype = t.oid +WHERE d.typanalyze != t.typanalyze; + oid | typname | typanalyze | oid | typname | typanalyze +-----+---------+------------+-----+---------+------------ +(0 rows) + +-- range_typanalyze should be used for all and only range types +-- (but exclude domains, which we checked above) +SELECT t.oid, t.typname, t.typanalyze +FROM pg_type t LEFT JOIN pg_range r on t.oid = r.rngtypid +WHERE t.typbasetype = 0 AND + (t.typanalyze = 'range_typanalyze'::regproc) != (r.rngtypid IS NOT NULL); + oid | typname | typanalyze +-----+---------+------------ +(0 rows) + +-- array_typanalyze should be used for all and only array types +-- (but exclude domains, which we checked above) +-- As of 9.2 this finds int2vector and oidvector, which are weird anyway +SELECT t.oid, t.typname, t.typanalyze +FROM pg_type t +WHERE t.typbasetype = 0 AND + (t.typanalyze = 'array_typanalyze'::regproc) != + (t.typsubscript = 'array_subscript_handler'::regproc) +ORDER BY 1; + oid | typname | typanalyze +-----+------------+------------ + 22 | int2vector | - + 30 | oidvector | - +(2 rows) + +-- **************** pg_class **************** +-- Look for illegal values in pg_class fields +SELECT c1.oid, c1.relname +FROM pg_class as c1 +WHERE relkind NOT IN ('r', 'i', 'S', 't', 'v', 'm', 'c', 'f', 'p') OR + relpersistence NOT IN ('p', 'u', 't') OR + relreplident NOT IN ('d', 'n', 'f', 'i'); + oid | relname +-----+--------- +(0 rows) + +-- All tables and indexes should have an access method. +SELECT c1.oid, c1.relname +FROM pg_class as c1 +WHERE c1.relkind NOT IN ('S', 'v', 'f', 'c') and + c1.relam = 0; + oid | relname +-----+--------- +(0 rows) + +-- Conversely, sequences, views, types shouldn't have them +SELECT c1.oid, c1.relname +FROM pg_class as c1 +WHERE c1.relkind IN ('S', 'v', 'f', 'c') and + c1.relam != 0; + oid | relname +-----+--------- +(0 rows) + +-- Indexes should have AMs of type 'i' +SELECT pc.oid, pc.relname, pa.amname, pa.amtype +FROM pg_class as pc JOIN pg_am AS pa ON (pc.relam = pa.oid) +WHERE pc.relkind IN ('i') and + pa.amtype != 'i'; + oid | relname | amname | amtype +-----+---------+--------+-------- +(0 rows) + +-- Tables, matviews etc should have AMs of type 't' +SELECT pc.oid, pc.relname, pa.amname, pa.amtype +FROM pg_class as pc JOIN pg_am AS pa ON (pc.relam = pa.oid) +WHERE pc.relkind IN ('r', 't', 'm') and + pa.amtype != 't'; + oid | relname | amname | amtype +-----+---------+--------+-------- +(0 rows) + +-- **************** pg_attribute **************** +-- Look for illegal values in pg_attribute fields +SELECT a1.attrelid, a1.attname +FROM pg_attribute as a1 +WHERE a1.attrelid = 0 OR a1.atttypid = 0 OR a1.attnum = 0 OR + a1.attcacheoff != -1 OR a1.attinhcount < 0 OR + (a1.attinhcount = 0 AND NOT a1.attislocal); + attrelid | attname +----------+--------- +(0 rows) + +-- Cross-check attnum against parent relation +SELECT a1.attrelid, a1.attname, c1.oid, c1.relname +FROM pg_attribute AS a1, pg_class AS c1 +WHERE a1.attrelid = c1.oid AND a1.attnum > c1.relnatts; + attrelid | attname | oid | relname +----------+---------+-----+--------- +(0 rows) + +-- Detect missing pg_attribute entries: should have as many non-system +-- attributes as parent relation expects +SELECT c1.oid, c1.relname +FROM pg_class AS c1 +WHERE c1.relnatts != (SELECT count(*) FROM pg_attribute AS a1 + WHERE a1.attrelid = c1.oid AND a1.attnum > 0); + oid | relname +-----+--------- +(0 rows) + +-- Cross-check against pg_type entry +-- NOTE: we allow attstorage to be 'plain' even when typstorage is not; +-- this is mainly for toast tables. +SELECT a1.attrelid, a1.attname, t1.oid, t1.typname +FROM pg_attribute AS a1, pg_type AS t1 +WHERE a1.atttypid = t1.oid AND + (a1.attlen != t1.typlen OR + a1.attalign != t1.typalign OR + a1.attbyval != t1.typbyval OR + (a1.attstorage != t1.typstorage AND a1.attstorage != 'p')); + attrelid | attname | oid | typname +----------+---------+-----+--------- +(0 rows) + +-- **************** pg_range **************** +-- Look for illegal values in pg_range fields. +SELECT r.rngtypid, r.rngsubtype +FROM pg_range as r +WHERE r.rngtypid = 0 OR r.rngsubtype = 0 OR r.rngsubopc = 0; + rngtypid | rngsubtype +----------+------------ +(0 rows) + +-- rngcollation should be specified iff subtype is collatable +SELECT r.rngtypid, r.rngsubtype, r.rngcollation, t.typcollation +FROM pg_range r JOIN pg_type t ON t.oid = r.rngsubtype +WHERE (rngcollation = 0) != (typcollation = 0); + rngtypid | rngsubtype | rngcollation | typcollation +----------+------------+--------------+-------------- +(0 rows) + +-- opclass had better be a btree opclass accepting the subtype. +-- We must allow anyarray matches, cf IsBinaryCoercible() +SELECT r.rngtypid, r.rngsubtype, o.opcmethod, o.opcname +FROM pg_range r JOIN pg_opclass o ON o.oid = r.rngsubopc +WHERE o.opcmethod != 403 OR + ((o.opcintype != r.rngsubtype) AND NOT + (o.opcintype = 'pg_catalog.anyarray'::regtype AND + EXISTS(select 1 from pg_catalog.pg_type where + oid = r.rngsubtype and typelem != 0 and + typsubscript = 'array_subscript_handler'::regproc))); + rngtypid | rngsubtype | opcmethod | opcname +----------+------------+-----------+--------- +(0 rows) + +-- canonical function, if any, had better match the range type +SELECT r.rngtypid, r.rngsubtype, p.proname +FROM pg_range r JOIN pg_proc p ON p.oid = r.rngcanonical +WHERE pronargs != 1 OR proargtypes[0] != rngtypid OR prorettype != rngtypid; + rngtypid | rngsubtype | proname +----------+------------+--------- +(0 rows) + +-- subdiff function, if any, had better match the subtype +SELECT r.rngtypid, r.rngsubtype, p.proname +FROM pg_range r JOIN pg_proc p ON p.oid = r.rngsubdiff +WHERE pronargs != 2 + OR proargtypes[0] != rngsubtype OR proargtypes[1] != rngsubtype + OR prorettype != 'pg_catalog.float8'::regtype; + rngtypid | rngsubtype | proname +----------+------------+--------- +(0 rows) + +-- every range should have a valid multirange +SELECT r.rngtypid, r.rngsubtype, r.rngmultitypid +FROM pg_range r +WHERE r.rngmultitypid IS NULL OR r.rngmultitypid = 0; + rngtypid | rngsubtype | rngmultitypid +----------+------------+--------------- +(0 rows) + +-- Create a table that holds all the known in-core data types and leave it +-- around so as pg_upgrade is able to test their binary compatibility. +CREATE TABLE tab_core_types AS SELECT + '(11,12)'::point, + '(1,1),(2,2)'::line, + '((11,11),(12,12))'::lseg, + '((11,11),(13,13))'::box, + '((11,12),(13,13),(14,14))'::path AS openedpath, + '[(11,12),(13,13),(14,14)]'::path AS closedpath, + '((11,12),(13,13),(14,14))'::polygon, + '1,1,1'::circle, + 'today'::date, + 'now'::time, + 'now'::timestamp, + 'now'::timetz, + 'now'::timestamptz, + '12 seconds'::interval, + '{"reason":"because"}'::json, + '{"when":"now"}'::jsonb, + '$.a[*] ? (@ > 2)'::jsonpath, + '127.0.0.1'::inet, + '127.0.0.0/8'::cidr, + '00:01:03:86:1c:ba'::macaddr8, + '00:01:03:86:1c:ba'::macaddr, + 2::int2, 4::int4, 8::int8, + 4::float4, '8'::float8, pi()::numeric, + 'foo'::"char", + 'c'::bpchar, + 'abc'::varchar, + 'name'::name, + 'txt'::text, + true::bool, + E'\\xDEADBEEF'::bytea, + B'10001'::bit, + B'10001'::varbit AS varbit, + '12.34'::money, + 'abc'::refcursor, + '1 2'::int2vector, + '1 2'::oidvector, + format('%I=UC/%I', USER, USER)::aclitem AS aclitem, + 'a fat cat sat on a mat and ate a fat rat'::tsvector, + 'fat & rat'::tsquery, + 'a0eebc99-9c0b-4ef8-bb6d-6bb9bd380a11'::uuid, + '11'::xid8, + 'pg_class'::regclass, + 'regtype'::regtype type, + 'pg_monitor'::regrole, + 'pg_class'::regclass::oid, + '(1,1)'::tid, '2'::xid, '3'::cid, + '10:20:10,14,15'::txid_snapshot, + '10:20:10,14,15'::pg_snapshot, + '16/B374D848'::pg_lsn, + 1::information_schema.cardinal_number, + 'l'::information_schema.character_data, + 'n'::information_schema.sql_identifier, + 'now'::information_schema.time_stamp, + 'YES'::information_schema.yes_or_no, + '(1,2)'::int4range, '{(1,2)}'::int4multirange, + '(3,4)'::int8range, '{(3,4)}'::int8multirange, + '(3,4)'::numrange, '{(3,4)}'::nummultirange, + '(2020-01-02, 2021-02-03)'::daterange, + '{(2020-01-02, 2021-02-03)}'::datemultirange, + '(2020-01-02 03:04:05, 2021-02-03 06:07:08)'::tsrange, + '{(2020-01-02 03:04:05, 2021-02-03 06:07:08)}'::tsmultirange, + '(2020-01-02 03:04:05, 2021-02-03 06:07:08)'::tstzrange, + '{(2020-01-02 03:04:05, 2021-02-03 06:07:08)}'::tstzmultirange; +-- Sanity check on the previous table, checking that all core types are +-- included in this table. +SELECT oid, typname, typtype, typelem, typarray + FROM pg_type t + WHERE oid < 16384 AND + -- Exclude pseudotypes and composite types. + typtype NOT IN ('p', 'c') AND + -- These reg* types cannot be pg_upgraded, so discard them. + oid != ALL(ARRAY['regproc', 'regprocedure', 'regoper', + 'regoperator', 'regconfig', 'regdictionary', + 'regnamespace', 'regcollation']::regtype[]) AND + -- Discard types that do not accept input values as these cannot be + -- tested easily. + -- Note: XML might be disabled at compile-time. + oid != ALL(ARRAY['gtsvector', 'pg_node_tree', + 'pg_ndistinct', 'pg_dependencies', 'pg_mcv_list', + 'pg_brin_bloom_summary', + 'pg_brin_minmax_multi_summary', 'xml']::regtype[]) AND + -- Discard arrays. + NOT EXISTS (SELECT 1 FROM pg_type u WHERE u.typarray = t.oid) + -- Exclude everything from the table created above. This checks + -- that no in-core types are missing in tab_core_types. + AND NOT EXISTS (SELECT 1 + FROM pg_attribute a + WHERE a.atttypid=t.oid AND + a.attnum > 0 AND + a.attrelid='tab_core_types'::regclass); + oid | typname | typtype | typelem | typarray +-----+---------+---------+---------+---------- +(0 rows) + diff --git a/src/test/regress/expected/typed_table.out b/src/test/regress/expected/typed_table.out new file mode 100644 index 0000000..2e47ecb --- /dev/null +++ b/src/test/regress/expected/typed_table.out @@ -0,0 +1,133 @@ +CREATE TABLE ttable1 OF nothing; +ERROR: type "nothing" does not exist +CREATE TYPE person_type AS (id int, name text); +CREATE TABLE persons OF person_type; +CREATE TABLE IF NOT EXISTS persons OF person_type; +NOTICE: relation "persons" already exists, skipping +SELECT * FROM persons; + id | name +----+------ +(0 rows) + +\d persons + Table "public.persons" + Column | Type | Collation | Nullable | Default +--------+---------+-----------+----------+--------- + id | integer | | | + name | text | | | +Typed table of type: person_type + +CREATE FUNCTION get_all_persons() RETURNS SETOF person_type +LANGUAGE SQL +AS $$ + SELECT * FROM persons; +$$; +SELECT * FROM get_all_persons(); + id | name +----+------ +(0 rows) + +-- certain ALTER TABLE operations on typed tables are not allowed +ALTER TABLE persons ADD COLUMN comment text; +ERROR: cannot add column to typed table +ALTER TABLE persons DROP COLUMN name; +ERROR: cannot drop column from typed table +ALTER TABLE persons RENAME COLUMN id TO num; +ERROR: cannot rename column of typed table +ALTER TABLE persons ALTER COLUMN name TYPE varchar; +ERROR: cannot alter column type of typed table +CREATE TABLE stuff (id int); +ALTER TABLE persons INHERIT stuff; +ERROR: cannot change inheritance of typed table +CREATE TABLE personsx OF person_type (myname WITH OPTIONS NOT NULL); -- error +ERROR: column "myname" does not exist +CREATE TABLE persons2 OF person_type ( + id WITH OPTIONS PRIMARY KEY, + UNIQUE (name) +); +\d persons2 + Table "public.persons2" + Column | Type | Collation | Nullable | Default +--------+---------+-----------+----------+--------- + id | integer | | not null | + name | text | | | +Indexes: + "persons2_pkey" PRIMARY KEY, btree (id) + "persons2_name_key" UNIQUE CONSTRAINT, btree (name) +Typed table of type: person_type + +CREATE TABLE persons3 OF person_type ( + PRIMARY KEY (id), + name WITH OPTIONS DEFAULT '' +); +\d persons3 + Table "public.persons3" + Column | Type | Collation | Nullable | Default +--------+---------+-----------+----------+---------- + id | integer | | not null | + name | text | | | ''::text +Indexes: + "persons3_pkey" PRIMARY KEY, btree (id) +Typed table of type: person_type + +CREATE TABLE persons4 OF person_type ( + name WITH OPTIONS NOT NULL, + name WITH OPTIONS DEFAULT '' -- error, specified more than once +); +ERROR: column "name" specified more than once +DROP TYPE person_type RESTRICT; +ERROR: cannot drop type person_type because other objects depend on it +DETAIL: table persons depends on type person_type +function get_all_persons() depends on type person_type +table persons2 depends on type person_type +table persons3 depends on type person_type +HINT: Use DROP ... CASCADE to drop the dependent objects too. +DROP TYPE person_type CASCADE; +NOTICE: drop cascades to 4 other objects +DETAIL: drop cascades to table persons +drop cascades to function get_all_persons() +drop cascades to table persons2 +drop cascades to table persons3 +CREATE TABLE persons5 OF stuff; -- only CREATE TYPE AS types may be used +ERROR: type stuff is not a composite type +DROP TABLE stuff; +-- implicit casting +CREATE TYPE person_type AS (id int, name text); +CREATE TABLE persons OF person_type; +INSERT INTO persons VALUES (1, 'test'); +CREATE FUNCTION namelen(person_type) RETURNS int LANGUAGE SQL AS $$ SELECT length($1.name) $$; +SELECT id, namelen(persons) FROM persons; + id | namelen +----+--------- + 1 | 4 +(1 row) + +CREATE TABLE persons2 OF person_type ( + id WITH OPTIONS PRIMARY KEY, + UNIQUE (name) +); +\d persons2 + Table "public.persons2" + Column | Type | Collation | Nullable | Default +--------+---------+-----------+----------+--------- + id | integer | | not null | + name | text | | | +Indexes: + "persons2_pkey" PRIMARY KEY, btree (id) + "persons2_name_key" UNIQUE CONSTRAINT, btree (name) +Typed table of type: person_type + +CREATE TABLE persons3 OF person_type ( + PRIMARY KEY (id), + name NOT NULL DEFAULT '' +); +\d persons3 + Table "public.persons3" + Column | Type | Collation | Nullable | Default +--------+---------+-----------+----------+---------- + id | integer | | not null | + name | text | | not null | ''::text +Indexes: + "persons3_pkey" PRIMARY KEY, btree (id) +Typed table of type: person_type + diff --git a/src/test/regress/expected/unicode.out b/src/test/regress/expected/unicode.out new file mode 100644 index 0000000..f2713a2 --- /dev/null +++ b/src/test/regress/expected/unicode.out @@ -0,0 +1,89 @@ +SELECT getdatabaseencoding() <> 'UTF8' AS skip_test \gset +\if :skip_test +\quit +\endif +SELECT U&'\0061\0308bc' <> U&'\00E4bc' COLLATE "C" AS sanity_check; + sanity_check +-------------- + t +(1 row) + +SELECT normalize(''); + normalize +----------- + +(1 row) + +SELECT normalize(U&'\0061\0308\24D1c') = U&'\00E4\24D1c' COLLATE "C" AS test_default; + test_default +-------------- + t +(1 row) + +SELECT normalize(U&'\0061\0308\24D1c', NFC) = U&'\00E4\24D1c' COLLATE "C" AS test_nfc; + test_nfc +---------- + t +(1 row) + +SELECT normalize(U&'\00E4bc', NFC) = U&'\00E4bc' COLLATE "C" AS test_nfc_idem; + test_nfc_idem +--------------- + t +(1 row) + +SELECT normalize(U&'\00E4\24D1c', NFD) = U&'\0061\0308\24D1c' COLLATE "C" AS test_nfd; + test_nfd +---------- + t +(1 row) + +SELECT normalize(U&'\0061\0308\24D1c', NFKC) = U&'\00E4bc' COLLATE "C" AS test_nfkc; + test_nfkc +----------- + t +(1 row) + +SELECT normalize(U&'\00E4\24D1c', NFKD) = U&'\0061\0308bc' COLLATE "C" AS test_nfkd; + test_nfkd +----------- + t +(1 row) + +SELECT "normalize"('abc', 'def'); -- run-time error +ERROR: invalid normalization form: def +SELECT U&'\00E4\24D1c' IS NORMALIZED AS test_default; + test_default +-------------- + t +(1 row) + +SELECT U&'\00E4\24D1c' IS NFC NORMALIZED AS test_nfc; + test_nfc +---------- + t +(1 row) + +SELECT num, val, + val IS NFC NORMALIZED AS NFC, + val IS NFD NORMALIZED AS NFD, + val IS NFKC NORMALIZED AS NFKC, + val IS NFKD NORMALIZED AS NFKD +FROM + (VALUES (1, U&'\00E4bc'), + (2, U&'\0061\0308bc'), + (3, U&'\00E4\24D1c'), + (4, U&'\0061\0308\24D1c'), + (5, '')) vals (num, val) +ORDER BY num; + num | val | nfc | nfd | nfkc | nfkd +-----+-----+-----+-----+------+------ + 1 | äbc | t | f | t | f + 2 | äbc | f | t | f | t + 3 | äⓑc | t | f | f | f + 4 | äⓑc | f | t | f | f + 5 | | t | t | t | t +(5 rows) + +SELECT is_normalized('abc', 'def'); -- run-time error +ERROR: invalid normalization form: def diff --git a/src/test/regress/expected/unicode_1.out b/src/test/regress/expected/unicode_1.out new file mode 100644 index 0000000..8505c4f --- /dev/null +++ b/src/test/regress/expected/unicode_1.out @@ -0,0 +1,3 @@ +SELECT getdatabaseencoding() <> 'UTF8' AS skip_test \gset +\if :skip_test +\quit diff --git a/src/test/regress/expected/union.out b/src/test/regress/expected/union.out new file mode 100644 index 0000000..e2613d6 --- /dev/null +++ b/src/test/regress/expected/union.out @@ -0,0 +1,1434 @@ +-- +-- UNION (also INTERSECT, EXCEPT) +-- +-- Simple UNION constructs +SELECT 1 AS two UNION SELECT 2 ORDER BY 1; + two +----- + 1 + 2 +(2 rows) + +SELECT 1 AS one UNION SELECT 1 ORDER BY 1; + one +----- + 1 +(1 row) + +SELECT 1 AS two UNION ALL SELECT 2; + two +----- + 1 + 2 +(2 rows) + +SELECT 1 AS two UNION ALL SELECT 1; + two +----- + 1 + 1 +(2 rows) + +SELECT 1 AS three UNION SELECT 2 UNION SELECT 3 ORDER BY 1; + three +------- + 1 + 2 + 3 +(3 rows) + +SELECT 1 AS two UNION SELECT 2 UNION SELECT 2 ORDER BY 1; + two +----- + 1 + 2 +(2 rows) + +SELECT 1 AS three UNION SELECT 2 UNION ALL SELECT 2 ORDER BY 1; + three +------- + 1 + 2 + 2 +(3 rows) + +SELECT 1.1 AS two UNION SELECT 2.2 ORDER BY 1; + two +----- + 1.1 + 2.2 +(2 rows) + +-- Mixed types +SELECT 1.1 AS two UNION SELECT 2 ORDER BY 1; + two +----- + 1.1 + 2 +(2 rows) + +SELECT 1 AS two UNION SELECT 2.2 ORDER BY 1; + two +----- + 1 + 2.2 +(2 rows) + +SELECT 1 AS one UNION SELECT 1.0::float8 ORDER BY 1; + one +----- + 1 +(1 row) + +SELECT 1.1 AS two UNION ALL SELECT 2 ORDER BY 1; + two +----- + 1.1 + 2 +(2 rows) + +SELECT 1.0::float8 AS two UNION ALL SELECT 1 ORDER BY 1; + two +----- + 1 + 1 +(2 rows) + +SELECT 1.1 AS three UNION SELECT 2 UNION SELECT 3 ORDER BY 1; + three +------- + 1.1 + 2 + 3 +(3 rows) + +SELECT 1.1::float8 AS two UNION SELECT 2 UNION SELECT 2.0::float8 ORDER BY 1; + two +----- + 1.1 + 2 +(2 rows) + +SELECT 1.1 AS three UNION SELECT 2 UNION ALL SELECT 2 ORDER BY 1; + three +------- + 1.1 + 2 + 2 +(3 rows) + +SELECT 1.1 AS two UNION (SELECT 2 UNION ALL SELECT 2) ORDER BY 1; + two +----- + 1.1 + 2 +(2 rows) + +-- +-- Try testing from tables... +-- +SELECT f1 AS five FROM FLOAT8_TBL +UNION +SELECT f1 FROM FLOAT8_TBL +ORDER BY 1; + five +----------------------- + -1.2345678901234e+200 + -1004.3 + -34.84 + -1.2345678901234e-200 + 0 +(5 rows) + +SELECT f1 AS ten FROM FLOAT8_TBL +UNION ALL +SELECT f1 FROM FLOAT8_TBL; + ten +----------------------- + 0 + -34.84 + -1004.3 + -1.2345678901234e+200 + -1.2345678901234e-200 + 0 + -34.84 + -1004.3 + -1.2345678901234e+200 + -1.2345678901234e-200 +(10 rows) + +SELECT f1 AS nine FROM FLOAT8_TBL +UNION +SELECT f1 FROM INT4_TBL +ORDER BY 1; + nine +----------------------- + -1.2345678901234e+200 + -2147483647 + -123456 + -1004.3 + -34.84 + -1.2345678901234e-200 + 0 + 123456 + 2147483647 +(9 rows) + +SELECT f1 AS ten FROM FLOAT8_TBL +UNION ALL +SELECT f1 FROM INT4_TBL; + ten +----------------------- + 0 + -34.84 + -1004.3 + -1.2345678901234e+200 + -1.2345678901234e-200 + 0 + 123456 + -123456 + 2147483647 + -2147483647 +(10 rows) + +SELECT f1 AS five FROM FLOAT8_TBL + WHERE f1 BETWEEN -1e6 AND 1e6 +UNION +SELECT f1 FROM INT4_TBL + WHERE f1 BETWEEN 0 AND 1000000 +ORDER BY 1; + five +----------------------- + -1004.3 + -34.84 + -1.2345678901234e-200 + 0 + 123456 +(5 rows) + +SELECT CAST(f1 AS char(4)) AS three FROM VARCHAR_TBL +UNION +SELECT f1 FROM CHAR_TBL +ORDER BY 1; + three +------- + a + ab + abcd +(3 rows) + +SELECT f1 AS three FROM VARCHAR_TBL +UNION +SELECT CAST(f1 AS varchar) FROM CHAR_TBL +ORDER BY 1; + three +------- + a + ab + abcd +(3 rows) + +SELECT f1 AS eight FROM VARCHAR_TBL +UNION ALL +SELECT f1 FROM CHAR_TBL; + eight +------- + a + ab + abcd + abcd + a + ab + abcd + abcd +(8 rows) + +SELECT f1 AS five FROM TEXT_TBL +UNION +SELECT f1 FROM VARCHAR_TBL +UNION +SELECT TRIM(TRAILING FROM f1) FROM CHAR_TBL +ORDER BY 1; + five +------------------- + a + ab + abcd + doh! + hi de ho neighbor +(5 rows) + +-- +-- INTERSECT and EXCEPT +-- +SELECT q2 FROM int8_tbl INTERSECT SELECT q1 FROM int8_tbl ORDER BY 1; + q2 +------------------ + 123 + 4567890123456789 +(2 rows) + +SELECT q2 FROM int8_tbl INTERSECT ALL SELECT q1 FROM int8_tbl ORDER BY 1; + q2 +------------------ + 123 + 4567890123456789 + 4567890123456789 +(3 rows) + +SELECT q2 FROM int8_tbl EXCEPT SELECT q1 FROM int8_tbl ORDER BY 1; + q2 +------------------- + -4567890123456789 + 456 +(2 rows) + +SELECT q2 FROM int8_tbl EXCEPT ALL SELECT q1 FROM int8_tbl ORDER BY 1; + q2 +------------------- + -4567890123456789 + 456 +(2 rows) + +SELECT q2 FROM int8_tbl EXCEPT ALL SELECT DISTINCT q1 FROM int8_tbl ORDER BY 1; + q2 +------------------- + -4567890123456789 + 456 + 4567890123456789 +(3 rows) + +SELECT q1 FROM int8_tbl EXCEPT SELECT q2 FROM int8_tbl ORDER BY 1; + q1 +---- +(0 rows) + +SELECT q1 FROM int8_tbl EXCEPT ALL SELECT q2 FROM int8_tbl ORDER BY 1; + q1 +------------------ + 123 + 4567890123456789 +(2 rows) + +SELECT q1 FROM int8_tbl EXCEPT ALL SELECT DISTINCT q2 FROM int8_tbl ORDER BY 1; + q1 +------------------ + 123 + 4567890123456789 + 4567890123456789 +(3 rows) + +SELECT q1 FROM int8_tbl EXCEPT ALL SELECT q1 FROM int8_tbl FOR NO KEY UPDATE; +ERROR: FOR NO KEY UPDATE is not allowed with UNION/INTERSECT/EXCEPT +-- nested cases +(SELECT 1,2,3 UNION SELECT 4,5,6) INTERSECT SELECT 4,5,6; + ?column? | ?column? | ?column? +----------+----------+---------- + 4 | 5 | 6 +(1 row) + +(SELECT 1,2,3 UNION SELECT 4,5,6 ORDER BY 1,2) INTERSECT SELECT 4,5,6; + ?column? | ?column? | ?column? +----------+----------+---------- + 4 | 5 | 6 +(1 row) + +(SELECT 1,2,3 UNION SELECT 4,5,6) EXCEPT SELECT 4,5,6; + ?column? | ?column? | ?column? +----------+----------+---------- + 1 | 2 | 3 +(1 row) + +(SELECT 1,2,3 UNION SELECT 4,5,6 ORDER BY 1,2) EXCEPT SELECT 4,5,6; + ?column? | ?column? | ?column? +----------+----------+---------- + 1 | 2 | 3 +(1 row) + +-- exercise both hashed and sorted implementations of UNION/INTERSECT/EXCEPT +set enable_hashagg to on; +explain (costs off) +select count(*) from + ( select unique1 from tenk1 union select fivethous from tenk1 ) ss; + QUERY PLAN +---------------------------------------------------------------- + Aggregate + -> HashAggregate + Group Key: tenk1.unique1 + -> Append + -> Index Only Scan using tenk1_unique1 on tenk1 + -> Seq Scan on tenk1 tenk1_1 +(6 rows) + +select count(*) from + ( select unique1 from tenk1 union select fivethous from tenk1 ) ss; + count +------- + 10000 +(1 row) + +explain (costs off) +select count(*) from + ( select unique1 from tenk1 intersect select fivethous from tenk1 ) ss; + QUERY PLAN +------------------------------------------------------------------------------------ + Aggregate + -> Subquery Scan on ss + -> HashSetOp Intersect + -> Append + -> Subquery Scan on "*SELECT* 2" + -> Seq Scan on tenk1 + -> Subquery Scan on "*SELECT* 1" + -> Index Only Scan using tenk1_unique1 on tenk1 tenk1_1 +(8 rows) + +select count(*) from + ( select unique1 from tenk1 intersect select fivethous from tenk1 ) ss; + count +------- + 5000 +(1 row) + +explain (costs off) +select unique1 from tenk1 except select unique2 from tenk1 where unique2 != 10; + QUERY PLAN +------------------------------------------------------------------------ + HashSetOp Except + -> Append + -> Subquery Scan on "*SELECT* 1" + -> Index Only Scan using tenk1_unique1 on tenk1 + -> Subquery Scan on "*SELECT* 2" + -> Index Only Scan using tenk1_unique2 on tenk1 tenk1_1 + Filter: (unique2 <> 10) +(7 rows) + +select unique1 from tenk1 except select unique2 from tenk1 where unique2 != 10; + unique1 +--------- + 10 +(1 row) + +set enable_hashagg to off; +explain (costs off) +select count(*) from + ( select unique1 from tenk1 union select fivethous from tenk1 ) ss; + QUERY PLAN +---------------------------------------------------------------------- + Aggregate + -> Unique + -> Sort + Sort Key: tenk1.unique1 + -> Append + -> Index Only Scan using tenk1_unique1 on tenk1 + -> Seq Scan on tenk1 tenk1_1 +(7 rows) + +select count(*) from + ( select unique1 from tenk1 union select fivethous from tenk1 ) ss; + count +------- + 10000 +(1 row) + +explain (costs off) +select count(*) from + ( select unique1 from tenk1 intersect select fivethous from tenk1 ) ss; + QUERY PLAN +------------------------------------------------------------------------------------------ + Aggregate + -> Subquery Scan on ss + -> SetOp Intersect + -> Sort + Sort Key: "*SELECT* 2".fivethous + -> Append + -> Subquery Scan on "*SELECT* 2" + -> Seq Scan on tenk1 + -> Subquery Scan on "*SELECT* 1" + -> Index Only Scan using tenk1_unique1 on tenk1 tenk1_1 +(10 rows) + +select count(*) from + ( select unique1 from tenk1 intersect select fivethous from tenk1 ) ss; + count +------- + 5000 +(1 row) + +explain (costs off) +select unique1 from tenk1 except select unique2 from tenk1 where unique2 != 10; + QUERY PLAN +------------------------------------------------------------------------------ + SetOp Except + -> Sort + Sort Key: "*SELECT* 1".unique1 + -> Append + -> Subquery Scan on "*SELECT* 1" + -> Index Only Scan using tenk1_unique1 on tenk1 + -> Subquery Scan on "*SELECT* 2" + -> Index Only Scan using tenk1_unique2 on tenk1 tenk1_1 + Filter: (unique2 <> 10) +(9 rows) + +select unique1 from tenk1 except select unique2 from tenk1 where unique2 != 10; + unique1 +--------- + 10 +(1 row) + +reset enable_hashagg; +-- non-hashable type +set enable_hashagg to on; +explain (costs off) +select x from (values (100::money), (200::money)) _(x) union select x from (values (100::money), (300::money)) _(x); + QUERY PLAN +----------------------------------------------- + Unique + -> Sort + Sort Key: "*VALUES*".column1 + -> Append + -> Values Scan on "*VALUES*" + -> Values Scan on "*VALUES*_1" +(6 rows) + +set enable_hashagg to off; +explain (costs off) +select x from (values (100::money), (200::money)) _(x) union select x from (values (100::money), (300::money)) _(x); + QUERY PLAN +----------------------------------------------- + Unique + -> Sort + Sort Key: "*VALUES*".column1 + -> Append + -> Values Scan on "*VALUES*" + -> Values Scan on "*VALUES*_1" +(6 rows) + +reset enable_hashagg; +-- arrays +set enable_hashagg to on; +explain (costs off) +select x from (values (array[1, 2]), (array[1, 3])) _(x) union select x from (values (array[1, 2]), (array[1, 4])) _(x); + QUERY PLAN +----------------------------------------- + HashAggregate + Group Key: "*VALUES*".column1 + -> Append + -> Values Scan on "*VALUES*" + -> Values Scan on "*VALUES*_1" +(5 rows) + +select x from (values (array[1, 2]), (array[1, 3])) _(x) union select x from (values (array[1, 2]), (array[1, 4])) _(x); + x +------- + {1,4} + {1,2} + {1,3} +(3 rows) + +explain (costs off) +select x from (values (array[1, 2]), (array[1, 3])) _(x) intersect select x from (values (array[1, 2]), (array[1, 4])) _(x); + QUERY PLAN +----------------------------------------------- + HashSetOp Intersect + -> Append + -> Subquery Scan on "*SELECT* 1" + -> Values Scan on "*VALUES*" + -> Subquery Scan on "*SELECT* 2" + -> Values Scan on "*VALUES*_1" +(6 rows) + +select x from (values (array[1, 2]), (array[1, 3])) _(x) intersect select x from (values (array[1, 2]), (array[1, 4])) _(x); + x +------- + {1,2} +(1 row) + +explain (costs off) +select x from (values (array[1, 2]), (array[1, 3])) _(x) except select x from (values (array[1, 2]), (array[1, 4])) _(x); + QUERY PLAN +----------------------------------------------- + HashSetOp Except + -> Append + -> Subquery Scan on "*SELECT* 1" + -> Values Scan on "*VALUES*" + -> Subquery Scan on "*SELECT* 2" + -> Values Scan on "*VALUES*_1" +(6 rows) + +select x from (values (array[1, 2]), (array[1, 3])) _(x) except select x from (values (array[1, 2]), (array[1, 4])) _(x); + x +------- + {1,3} +(1 row) + +-- non-hashable type +explain (costs off) +select x from (values (array[100::money]), (array[200::money])) _(x) union select x from (values (array[100::money]), (array[300::money])) _(x); + QUERY PLAN +----------------------------------------------- + Unique + -> Sort + Sort Key: "*VALUES*".column1 + -> Append + -> Values Scan on "*VALUES*" + -> Values Scan on "*VALUES*_1" +(6 rows) + +select x from (values (array[100::money]), (array[200::money])) _(x) union select x from (values (array[100::money]), (array[300::money])) _(x); + x +----------- + {$100.00} + {$200.00} + {$300.00} +(3 rows) + +set enable_hashagg to off; +explain (costs off) +select x from (values (array[1, 2]), (array[1, 3])) _(x) union select x from (values (array[1, 2]), (array[1, 4])) _(x); + QUERY PLAN +----------------------------------------------- + Unique + -> Sort + Sort Key: "*VALUES*".column1 + -> Append + -> Values Scan on "*VALUES*" + -> Values Scan on "*VALUES*_1" +(6 rows) + +select x from (values (array[1, 2]), (array[1, 3])) _(x) union select x from (values (array[1, 2]), (array[1, 4])) _(x); + x +------- + {1,2} + {1,3} + {1,4} +(3 rows) + +explain (costs off) +select x from (values (array[1, 2]), (array[1, 3])) _(x) intersect select x from (values (array[1, 2]), (array[1, 4])) _(x); + QUERY PLAN +----------------------------------------------------- + SetOp Intersect + -> Sort + Sort Key: "*SELECT* 1".x + -> Append + -> Subquery Scan on "*SELECT* 1" + -> Values Scan on "*VALUES*" + -> Subquery Scan on "*SELECT* 2" + -> Values Scan on "*VALUES*_1" +(8 rows) + +select x from (values (array[1, 2]), (array[1, 3])) _(x) intersect select x from (values (array[1, 2]), (array[1, 4])) _(x); + x +------- + {1,2} +(1 row) + +explain (costs off) +select x from (values (array[1, 2]), (array[1, 3])) _(x) except select x from (values (array[1, 2]), (array[1, 4])) _(x); + QUERY PLAN +----------------------------------------------------- + SetOp Except + -> Sort + Sort Key: "*SELECT* 1".x + -> Append + -> Subquery Scan on "*SELECT* 1" + -> Values Scan on "*VALUES*" + -> Subquery Scan on "*SELECT* 2" + -> Values Scan on "*VALUES*_1" +(8 rows) + +select x from (values (array[1, 2]), (array[1, 3])) _(x) except select x from (values (array[1, 2]), (array[1, 4])) _(x); + x +------- + {1,3} +(1 row) + +reset enable_hashagg; +-- records +set enable_hashagg to on; +explain (costs off) +select x from (values (row(1, 2)), (row(1, 3))) _(x) union select x from (values (row(1, 2)), (row(1, 4))) _(x); + QUERY PLAN +----------------------------------------------- + Unique + -> Sort + Sort Key: "*VALUES*".column1 + -> Append + -> Values Scan on "*VALUES*" + -> Values Scan on "*VALUES*_1" +(6 rows) + +select x from (values (row(1, 2)), (row(1, 3))) _(x) union select x from (values (row(1, 2)), (row(1, 4))) _(x); + x +------- + (1,2) + (1,3) + (1,4) +(3 rows) + +explain (costs off) +select x from (values (row(1, 2)), (row(1, 3))) _(x) intersect select x from (values (row(1, 2)), (row(1, 4))) _(x); + QUERY PLAN +----------------------------------------------------- + SetOp Intersect + -> Sort + Sort Key: "*SELECT* 1".x + -> Append + -> Subquery Scan on "*SELECT* 1" + -> Values Scan on "*VALUES*" + -> Subquery Scan on "*SELECT* 2" + -> Values Scan on "*VALUES*_1" +(8 rows) + +select x from (values (row(1, 2)), (row(1, 3))) _(x) intersect select x from (values (row(1, 2)), (row(1, 4))) _(x); + x +------- + (1,2) +(1 row) + +explain (costs off) +select x from (values (row(1, 2)), (row(1, 3))) _(x) except select x from (values (row(1, 2)), (row(1, 4))) _(x); + QUERY PLAN +----------------------------------------------------- + SetOp Except + -> Sort + Sort Key: "*SELECT* 1".x + -> Append + -> Subquery Scan on "*SELECT* 1" + -> Values Scan on "*VALUES*" + -> Subquery Scan on "*SELECT* 2" + -> Values Scan on "*VALUES*_1" +(8 rows) + +select x from (values (row(1, 2)), (row(1, 3))) _(x) except select x from (values (row(1, 2)), (row(1, 4))) _(x); + x +------- + (1,3) +(1 row) + +-- non-hashable type +-- With an anonymous row type, the typcache does not report that the +-- type is hashable. (Otherwise, this would fail at execution time.) +explain (costs off) +select x from (values (row(100::money)), (row(200::money))) _(x) union select x from (values (row(100::money)), (row(300::money))) _(x); + QUERY PLAN +----------------------------------------------- + Unique + -> Sort + Sort Key: "*VALUES*".column1 + -> Append + -> Values Scan on "*VALUES*" + -> Values Scan on "*VALUES*_1" +(6 rows) + +select x from (values (row(100::money)), (row(200::money))) _(x) union select x from (values (row(100::money)), (row(300::money))) _(x); + x +----------- + ($100.00) + ($200.00) + ($300.00) +(3 rows) + +-- With a defined row type, the typcache can inspect the type's fields +-- for hashability. +create type ct1 as (f1 money); +explain (costs off) +select x from (values (row(100::money)::ct1), (row(200::money)::ct1)) _(x) union select x from (values (row(100::money)::ct1), (row(300::money)::ct1)) _(x); + QUERY PLAN +----------------------------------------------- + Unique + -> Sort + Sort Key: "*VALUES*".column1 + -> Append + -> Values Scan on "*VALUES*" + -> Values Scan on "*VALUES*_1" +(6 rows) + +select x from (values (row(100::money)::ct1), (row(200::money)::ct1)) _(x) union select x from (values (row(100::money)::ct1), (row(300::money)::ct1)) _(x); + x +----------- + ($100.00) + ($200.00) + ($300.00) +(3 rows) + +drop type ct1; +set enable_hashagg to off; +explain (costs off) +select x from (values (row(1, 2)), (row(1, 3))) _(x) union select x from (values (row(1, 2)), (row(1, 4))) _(x); + QUERY PLAN +----------------------------------------------- + Unique + -> Sort + Sort Key: "*VALUES*".column1 + -> Append + -> Values Scan on "*VALUES*" + -> Values Scan on "*VALUES*_1" +(6 rows) + +select x from (values (row(1, 2)), (row(1, 3))) _(x) union select x from (values (row(1, 2)), (row(1, 4))) _(x); + x +------- + (1,2) + (1,3) + (1,4) +(3 rows) + +explain (costs off) +select x from (values (row(1, 2)), (row(1, 3))) _(x) intersect select x from (values (row(1, 2)), (row(1, 4))) _(x); + QUERY PLAN +----------------------------------------------------- + SetOp Intersect + -> Sort + Sort Key: "*SELECT* 1".x + -> Append + -> Subquery Scan on "*SELECT* 1" + -> Values Scan on "*VALUES*" + -> Subquery Scan on "*SELECT* 2" + -> Values Scan on "*VALUES*_1" +(8 rows) + +select x from (values (row(1, 2)), (row(1, 3))) _(x) intersect select x from (values (row(1, 2)), (row(1, 4))) _(x); + x +------- + (1,2) +(1 row) + +explain (costs off) +select x from (values (row(1, 2)), (row(1, 3))) _(x) except select x from (values (row(1, 2)), (row(1, 4))) _(x); + QUERY PLAN +----------------------------------------------------- + SetOp Except + -> Sort + Sort Key: "*SELECT* 1".x + -> Append + -> Subquery Scan on "*SELECT* 1" + -> Values Scan on "*VALUES*" + -> Subquery Scan on "*SELECT* 2" + -> Values Scan on "*VALUES*_1" +(8 rows) + +select x from (values (row(1, 2)), (row(1, 3))) _(x) except select x from (values (row(1, 2)), (row(1, 4))) _(x); + x +------- + (1,3) +(1 row) + +reset enable_hashagg; +-- +-- Mixed types +-- +SELECT f1 FROM float8_tbl INTERSECT SELECT f1 FROM int4_tbl ORDER BY 1; + f1 +---- + 0 +(1 row) + +SELECT f1 FROM float8_tbl EXCEPT SELECT f1 FROM int4_tbl ORDER BY 1; + f1 +----------------------- + -1.2345678901234e+200 + -1004.3 + -34.84 + -1.2345678901234e-200 +(4 rows) + +-- +-- Operator precedence and (((((extra))))) parentheses +-- +SELECT q1 FROM int8_tbl INTERSECT SELECT q2 FROM int8_tbl UNION ALL SELECT q2 FROM int8_tbl ORDER BY 1; + q1 +------------------- + -4567890123456789 + 123 + 123 + 456 + 4567890123456789 + 4567890123456789 + 4567890123456789 +(7 rows) + +SELECT q1 FROM int8_tbl INTERSECT (((SELECT q2 FROM int8_tbl UNION ALL SELECT q2 FROM int8_tbl))) ORDER BY 1; + q1 +------------------ + 123 + 4567890123456789 +(2 rows) + +(((SELECT q1 FROM int8_tbl INTERSECT SELECT q2 FROM int8_tbl ORDER BY 1))) UNION ALL SELECT q2 FROM int8_tbl; + q1 +------------------- + 123 + 4567890123456789 + 456 + 4567890123456789 + 123 + 4567890123456789 + -4567890123456789 +(7 rows) + +SELECT q1 FROM int8_tbl UNION ALL SELECT q2 FROM int8_tbl EXCEPT SELECT q1 FROM int8_tbl ORDER BY 1; + q1 +------------------- + -4567890123456789 + 456 +(2 rows) + +SELECT q1 FROM int8_tbl UNION ALL (((SELECT q2 FROM int8_tbl EXCEPT SELECT q1 FROM int8_tbl ORDER BY 1))); + q1 +------------------- + 123 + 123 + 4567890123456789 + 4567890123456789 + 4567890123456789 + -4567890123456789 + 456 +(7 rows) + +(((SELECT q1 FROM int8_tbl UNION ALL SELECT q2 FROM int8_tbl))) EXCEPT SELECT q1 FROM int8_tbl ORDER BY 1; + q1 +------------------- + -4567890123456789 + 456 +(2 rows) + +-- +-- Subqueries with ORDER BY & LIMIT clauses +-- +-- In this syntax, ORDER BY/LIMIT apply to the result of the EXCEPT +SELECT q1,q2 FROM int8_tbl EXCEPT SELECT q2,q1 FROM int8_tbl +ORDER BY q2,q1; + q1 | q2 +------------------+------------------- + 4567890123456789 | -4567890123456789 + 123 | 456 +(2 rows) + +-- This should fail, because q2 isn't a name of an EXCEPT output column +SELECT q1 FROM int8_tbl EXCEPT SELECT q2 FROM int8_tbl ORDER BY q2 LIMIT 1; +ERROR: column "q2" does not exist +LINE 1: ... int8_tbl EXCEPT SELECT q2 FROM int8_tbl ORDER BY q2 LIMIT 1... + ^ +DETAIL: There is a column named "q2" in table "*SELECT* 2", but it cannot be referenced from this part of the query. +-- But this should work: +SELECT q1 FROM int8_tbl EXCEPT (((SELECT q2 FROM int8_tbl ORDER BY q2 LIMIT 1))) ORDER BY 1; + q1 +------------------ + 123 + 4567890123456789 +(2 rows) + +-- +-- New syntaxes (7.1) permit new tests +-- +(((((select * from int8_tbl))))); + q1 | q2 +------------------+------------------- + 123 | 456 + 123 | 4567890123456789 + 4567890123456789 | 123 + 4567890123456789 | 4567890123456789 + 4567890123456789 | -4567890123456789 +(5 rows) + +-- +-- Check behavior with empty select list (allowed since 9.4) +-- +select union select; +-- +(1 row) + +select intersect select; +-- +(1 row) + +select except select; +-- +(0 rows) + +-- check hashed implementation +set enable_hashagg = true; +set enable_sort = false; +explain (costs off) +select from generate_series(1,5) union select from generate_series(1,3); + QUERY PLAN +---------------------------------------------------------------- + HashAggregate + -> Append + -> Function Scan on generate_series + -> Function Scan on generate_series generate_series_1 +(4 rows) + +explain (costs off) +select from generate_series(1,5) intersect select from generate_series(1,3); + QUERY PLAN +---------------------------------------------------------------------- + HashSetOp Intersect + -> Append + -> Subquery Scan on "*SELECT* 1" + -> Function Scan on generate_series + -> Subquery Scan on "*SELECT* 2" + -> Function Scan on generate_series generate_series_1 +(6 rows) + +select from generate_series(1,5) union select from generate_series(1,3); +-- +(1 row) + +select from generate_series(1,5) union all select from generate_series(1,3); +-- +(8 rows) + +select from generate_series(1,5) intersect select from generate_series(1,3); +-- +(1 row) + +select from generate_series(1,5) intersect all select from generate_series(1,3); +-- +(3 rows) + +select from generate_series(1,5) except select from generate_series(1,3); +-- +(0 rows) + +select from generate_series(1,5) except all select from generate_series(1,3); +-- +(2 rows) + +-- check sorted implementation +set enable_hashagg = false; +set enable_sort = true; +explain (costs off) +select from generate_series(1,5) union select from generate_series(1,3); + QUERY PLAN +---------------------------------------------------------------- + Unique + -> Append + -> Function Scan on generate_series + -> Function Scan on generate_series generate_series_1 +(4 rows) + +explain (costs off) +select from generate_series(1,5) intersect select from generate_series(1,3); + QUERY PLAN +---------------------------------------------------------------------- + SetOp Intersect + -> Append + -> Subquery Scan on "*SELECT* 1" + -> Function Scan on generate_series + -> Subquery Scan on "*SELECT* 2" + -> Function Scan on generate_series generate_series_1 +(6 rows) + +select from generate_series(1,5) union select from generate_series(1,3); +-- +(1 row) + +select from generate_series(1,5) union all select from generate_series(1,3); +-- +(8 rows) + +select from generate_series(1,5) intersect select from generate_series(1,3); +-- +(1 row) + +select from generate_series(1,5) intersect all select from generate_series(1,3); +-- +(3 rows) + +select from generate_series(1,5) except select from generate_series(1,3); +-- +(0 rows) + +select from generate_series(1,5) except all select from generate_series(1,3); +-- +(2 rows) + +reset enable_hashagg; +reset enable_sort; +-- +-- Check handling of a case with unknown constants. We don't guarantee +-- an undecorated constant will work in all cases, but historically this +-- usage has worked, so test we don't break it. +-- +SELECT a.f1 FROM (SELECT 'test' AS f1 FROM varchar_tbl) a +UNION +SELECT b.f1 FROM (SELECT f1 FROM varchar_tbl) b +ORDER BY 1; + f1 +------ + a + ab + abcd + test +(4 rows) + +-- This should fail, but it should produce an error cursor +SELECT '3.4'::numeric UNION SELECT 'foo'; +ERROR: invalid input syntax for type numeric: "foo" +LINE 1: SELECT '3.4'::numeric UNION SELECT 'foo'; + ^ +-- +-- Test that expression-index constraints can be pushed down through +-- UNION or UNION ALL +-- +CREATE TEMP TABLE t1 (a text, b text); +CREATE INDEX t1_ab_idx on t1 ((a || b)); +CREATE TEMP TABLE t2 (ab text primary key); +INSERT INTO t1 VALUES ('a', 'b'), ('x', 'y'); +INSERT INTO t2 VALUES ('ab'), ('xy'); +set enable_seqscan = off; +set enable_indexscan = on; +set enable_bitmapscan = off; +explain (costs off) + SELECT * FROM + (SELECT a || b AS ab FROM t1 + UNION ALL + SELECT * FROM t2) t + WHERE ab = 'ab'; + QUERY PLAN +--------------------------------------------- + Append + -> Index Scan using t1_ab_idx on t1 + Index Cond: ((a || b) = 'ab'::text) + -> Index Only Scan using t2_pkey on t2 + Index Cond: (ab = 'ab'::text) +(5 rows) + +explain (costs off) + SELECT * FROM + (SELECT a || b AS ab FROM t1 + UNION + SELECT * FROM t2) t + WHERE ab = 'ab'; + QUERY PLAN +--------------------------------------------------- + HashAggregate + Group Key: ((t1.a || t1.b)) + -> Append + -> Index Scan using t1_ab_idx on t1 + Index Cond: ((a || b) = 'ab'::text) + -> Index Only Scan using t2_pkey on t2 + Index Cond: (ab = 'ab'::text) +(7 rows) + +-- +-- Test that ORDER BY for UNION ALL can be pushed down to inheritance +-- children. +-- +CREATE TEMP TABLE t1c (b text, a text); +ALTER TABLE t1c INHERIT t1; +CREATE TEMP TABLE t2c (primary key (ab)) INHERITS (t2); +INSERT INTO t1c VALUES ('v', 'w'), ('c', 'd'), ('m', 'n'), ('e', 'f'); +INSERT INTO t2c VALUES ('vw'), ('cd'), ('mn'), ('ef'); +CREATE INDEX t1c_ab_idx on t1c ((a || b)); +set enable_seqscan = on; +set enable_indexonlyscan = off; +explain (costs off) + SELECT * FROM + (SELECT a || b AS ab FROM t1 + UNION ALL + SELECT ab FROM t2) t + ORDER BY 1 LIMIT 8; + QUERY PLAN +----------------------------------------------------- + Limit + -> Merge Append + Sort Key: ((t1.a || t1.b)) + -> Index Scan using t1_ab_idx on t1 + -> Index Scan using t1c_ab_idx on t1c t1_1 + -> Index Scan using t2_pkey on t2 + -> Index Scan using t2c_pkey on t2c t2_1 +(7 rows) + + SELECT * FROM + (SELECT a || b AS ab FROM t1 + UNION ALL + SELECT ab FROM t2) t + ORDER BY 1 LIMIT 8; + ab +---- + ab + ab + cd + dc + ef + fe + mn + nm +(8 rows) + +reset enable_seqscan; +reset enable_indexscan; +reset enable_bitmapscan; +-- This simpler variant of the above test has been observed to fail differently +create table events (event_id int primary key); +create table other_events (event_id int primary key); +create table events_child () inherits (events); +explain (costs off) +select event_id + from (select event_id from events + union all + select event_id from other_events) ss + order by event_id; + QUERY PLAN +---------------------------------------------------------- + Merge Append + Sort Key: events.event_id + -> Index Scan using events_pkey on events + -> Sort + Sort Key: events_1.event_id + -> Seq Scan on events_child events_1 + -> Index Scan using other_events_pkey on other_events +(7 rows) + +drop table events_child, events, other_events; +reset enable_indexonlyscan; +-- Test constraint exclusion of UNION ALL subqueries +explain (costs off) + SELECT * FROM + (SELECT 1 AS t, * FROM tenk1 a + UNION ALL + SELECT 2 AS t, * FROM tenk1 b) c + WHERE t = 2; + QUERY PLAN +--------------------- + Seq Scan on tenk1 b +(1 row) + +-- Test that we push quals into UNION sub-selects only when it's safe +explain (costs off) +SELECT * FROM + (SELECT 1 AS t, 2 AS x + UNION + SELECT 2 AS t, 4 AS x) ss +WHERE x < 4 +ORDER BY x; + QUERY PLAN +-------------------------------------------------- + Sort + Sort Key: (2) + -> Unique + -> Sort + Sort Key: (1), (2) + -> Append + -> Result + -> Result + One-Time Filter: false +(9 rows) + +SELECT * FROM + (SELECT 1 AS t, 2 AS x + UNION + SELECT 2 AS t, 4 AS x) ss +WHERE x < 4 +ORDER BY x; + t | x +---+--- + 1 | 2 +(1 row) + +explain (costs off) +SELECT * FROM + (SELECT 1 AS t, generate_series(1,10) AS x + UNION + SELECT 2 AS t, 4 AS x) ss +WHERE x < 4 +ORDER BY x; + QUERY PLAN +-------------------------------------------------------- + Sort + Sort Key: ss.x + -> Subquery Scan on ss + Filter: (ss.x < 4) + -> HashAggregate + Group Key: (1), (generate_series(1, 10)) + -> Append + -> ProjectSet + -> Result + -> Result +(10 rows) + +SELECT * FROM + (SELECT 1 AS t, generate_series(1,10) AS x + UNION + SELECT 2 AS t, 4 AS x) ss +WHERE x < 4 +ORDER BY x; + t | x +---+--- + 1 | 1 + 1 | 2 + 1 | 3 +(3 rows) + +explain (costs off) +SELECT * FROM + (SELECT 1 AS t, (random()*3)::int AS x + UNION + SELECT 2 AS t, 4 AS x) ss +WHERE x > 3 +ORDER BY x; + QUERY PLAN +------------------------------------------------------------------------------------ + Sort + Sort Key: ss.x + -> Subquery Scan on ss + Filter: (ss.x > 3) + -> Unique + -> Sort + Sort Key: (1), (((random() * '3'::double precision))::integer) + -> Append + -> Result + -> Result +(10 rows) + +SELECT * FROM + (SELECT 1 AS t, (random()*3)::int AS x + UNION + SELECT 2 AS t, 4 AS x) ss +WHERE x > 3 +ORDER BY x; + t | x +---+--- + 2 | 4 +(1 row) + +-- Test cases where the native ordering of a sub-select has more pathkeys +-- than the outer query cares about +explain (costs off) +select distinct q1 from + (select distinct * from int8_tbl i81 + union all + select distinct * from int8_tbl i82) ss +where q2 = q2; + QUERY PLAN +---------------------------------------------------------- + Unique + -> Merge Append + Sort Key: "*SELECT* 1".q1 + -> Subquery Scan on "*SELECT* 1" + -> Unique + -> Sort + Sort Key: i81.q1, i81.q2 + -> Seq Scan on int8_tbl i81 + Filter: (q2 IS NOT NULL) + -> Subquery Scan on "*SELECT* 2" + -> Unique + -> Sort + Sort Key: i82.q1, i82.q2 + -> Seq Scan on int8_tbl i82 + Filter: (q2 IS NOT NULL) +(15 rows) + +select distinct q1 from + (select distinct * from int8_tbl i81 + union all + select distinct * from int8_tbl i82) ss +where q2 = q2; + q1 +------------------ + 123 + 4567890123456789 +(2 rows) + +explain (costs off) +select distinct q1 from + (select distinct * from int8_tbl i81 + union all + select distinct * from int8_tbl i82) ss +where -q1 = q2; + QUERY PLAN +-------------------------------------------------------- + Unique + -> Merge Append + Sort Key: "*SELECT* 1".q1 + -> Subquery Scan on "*SELECT* 1" + -> Unique + -> Sort + Sort Key: i81.q1, i81.q2 + -> Seq Scan on int8_tbl i81 + Filter: ((- q1) = q2) + -> Subquery Scan on "*SELECT* 2" + -> Unique + -> Sort + Sort Key: i82.q1, i82.q2 + -> Seq Scan on int8_tbl i82 + Filter: ((- q1) = q2) +(15 rows) + +select distinct q1 from + (select distinct * from int8_tbl i81 + union all + select distinct * from int8_tbl i82) ss +where -q1 = q2; + q1 +------------------ + 4567890123456789 +(1 row) + +-- Test proper handling of parameterized appendrel paths when the +-- potential join qual is expensive +create function expensivefunc(int) returns int +language plpgsql immutable strict cost 10000 +as $$begin return $1; end$$; +create temp table t3 as select generate_series(-1000,1000) as x; +create index t3i on t3 (expensivefunc(x)); +analyze t3; +explain (costs off) +select * from + (select * from t3 a union all select * from t3 b) ss + join int4_tbl on f1 = expensivefunc(x); + QUERY PLAN +------------------------------------------------------------ + Nested Loop + -> Seq Scan on int4_tbl + -> Append + -> Index Scan using t3i on t3 a + Index Cond: (expensivefunc(x) = int4_tbl.f1) + -> Index Scan using t3i on t3 b + Index Cond: (expensivefunc(x) = int4_tbl.f1) +(7 rows) + +select * from + (select * from t3 a union all select * from t3 b) ss + join int4_tbl on f1 = expensivefunc(x); + x | f1 +---+---- + 0 | 0 + 0 | 0 +(2 rows) + +drop table t3; +drop function expensivefunc(int); +-- Test handling of appendrel quals that const-simplify into an AND +explain (costs off) +select * from + (select *, 0 as x from int8_tbl a + union all + select *, 1 as x from int8_tbl b) ss +where (x = 0) or (q1 >= q2 and q1 <= q2); + QUERY PLAN +--------------------------------------------- + Append + -> Seq Scan on int8_tbl a + -> Seq Scan on int8_tbl b + Filter: ((q1 >= q2) AND (q1 <= q2)) +(4 rows) + +select * from + (select *, 0 as x from int8_tbl a + union all + select *, 1 as x from int8_tbl b) ss +where (x = 0) or (q1 >= q2 and q1 <= q2); + q1 | q2 | x +------------------+-------------------+--- + 123 | 456 | 0 + 123 | 4567890123456789 | 0 + 4567890123456789 | 123 | 0 + 4567890123456789 | 4567890123456789 | 0 + 4567890123456789 | -4567890123456789 | 0 + 4567890123456789 | 4567890123456789 | 1 +(6 rows) + diff --git a/src/test/regress/expected/updatable_views.out b/src/test/regress/expected/updatable_views.out new file mode 100644 index 0000000..1950e6f --- /dev/null +++ b/src/test/regress/expected/updatable_views.out @@ -0,0 +1,3366 @@ +-- +-- UPDATABLE VIEWS +-- +-- avoid bit-exact output here because operations may not be bit-exact. +SET extra_float_digits = 0; +-- check that non-updatable views and columns are rejected with useful error +-- messages +CREATE TABLE base_tbl (a int PRIMARY KEY, b text DEFAULT 'Unspecified'); +INSERT INTO base_tbl SELECT i, 'Row ' || i FROM generate_series(-2, 2) g(i); +CREATE VIEW ro_view1 AS SELECT DISTINCT a, b FROM base_tbl; -- DISTINCT not supported +CREATE VIEW ro_view2 AS SELECT a, b FROM base_tbl GROUP BY a, b; -- GROUP BY not supported +CREATE VIEW ro_view3 AS SELECT 1 FROM base_tbl HAVING max(a) > 0; -- HAVING not supported +CREATE VIEW ro_view4 AS SELECT count(*) FROM base_tbl; -- Aggregate functions not supported +CREATE VIEW ro_view5 AS SELECT a, rank() OVER() FROM base_tbl; -- Window functions not supported +CREATE VIEW ro_view6 AS SELECT a, b FROM base_tbl UNION SELECT -a, b FROM base_tbl; -- Set ops not supported +CREATE VIEW ro_view7 AS WITH t AS (SELECT a, b FROM base_tbl) SELECT * FROM t; -- WITH not supported +CREATE VIEW ro_view8 AS SELECT a, b FROM base_tbl ORDER BY a OFFSET 1; -- OFFSET not supported +CREATE VIEW ro_view9 AS SELECT a, b FROM base_tbl ORDER BY a LIMIT 1; -- LIMIT not supported +CREATE VIEW ro_view10 AS SELECT 1 AS a; -- No base relations +CREATE VIEW ro_view11 AS SELECT b1.a, b2.b FROM base_tbl b1, base_tbl b2; -- Multiple base relations +CREATE VIEW ro_view12 AS SELECT * FROM generate_series(1, 10) AS g(a); -- SRF in rangetable +CREATE VIEW ro_view13 AS SELECT a, b FROM (SELECT * FROM base_tbl) AS t; -- Subselect in rangetable +CREATE VIEW rw_view14 AS SELECT ctid, a, b FROM base_tbl; -- System columns may be part of an updatable view +CREATE VIEW rw_view15 AS SELECT a, upper(b) FROM base_tbl; -- Expression/function may be part of an updatable view +CREATE VIEW rw_view16 AS SELECT a, b, a AS aa FROM base_tbl; -- Repeated column may be part of an updatable view +CREATE VIEW ro_view17 AS SELECT * FROM ro_view1; -- Base relation not updatable +CREATE VIEW ro_view18 AS SELECT * FROM (VALUES(1)) AS tmp(a); -- VALUES in rangetable +CREATE SEQUENCE uv_seq; +CREATE VIEW ro_view19 AS SELECT * FROM uv_seq; -- View based on a sequence +CREATE VIEW ro_view20 AS SELECT a, b, generate_series(1, a) g FROM base_tbl; -- SRF in targetlist not supported +SELECT table_name, is_insertable_into + FROM information_schema.tables + WHERE table_name LIKE E'r_\\_view%' + ORDER BY table_name; + table_name | is_insertable_into +------------+-------------------- + ro_view1 | NO + ro_view10 | NO + ro_view11 | NO + ro_view12 | NO + ro_view13 | NO + ro_view17 | NO + ro_view18 | NO + ro_view19 | NO + ro_view2 | NO + ro_view20 | NO + ro_view3 | NO + ro_view4 | NO + ro_view5 | NO + ro_view6 | NO + ro_view7 | NO + ro_view8 | NO + ro_view9 | NO + rw_view14 | YES + rw_view15 | YES + rw_view16 | YES +(20 rows) + +SELECT table_name, is_updatable, is_insertable_into + FROM information_schema.views + WHERE table_name LIKE E'r_\\_view%' + ORDER BY table_name; + table_name | is_updatable | is_insertable_into +------------+--------------+-------------------- + ro_view1 | NO | NO + ro_view10 | NO | NO + ro_view11 | NO | NO + ro_view12 | NO | NO + ro_view13 | NO | NO + ro_view17 | NO | NO + ro_view18 | NO | NO + ro_view19 | NO | NO + ro_view2 | NO | NO + ro_view20 | NO | NO + ro_view3 | NO | NO + ro_view4 | NO | NO + ro_view5 | NO | NO + ro_view6 | NO | NO + ro_view7 | NO | NO + ro_view8 | NO | NO + ro_view9 | NO | NO + rw_view14 | YES | YES + rw_view15 | YES | YES + rw_view16 | YES | YES +(20 rows) + +SELECT table_name, column_name, is_updatable + FROM information_schema.columns + WHERE table_name LIKE E'r_\\_view%' + ORDER BY table_name, ordinal_position; + table_name | column_name | is_updatable +------------+-------------+-------------- + ro_view1 | a | NO + ro_view1 | b | NO + ro_view10 | a | NO + ro_view11 | a | NO + ro_view11 | b | NO + ro_view12 | a | NO + ro_view13 | a | NO + ro_view13 | b | NO + ro_view17 | a | NO + ro_view17 | b | NO + ro_view18 | a | NO + ro_view19 | last_value | NO + ro_view19 | log_cnt | NO + ro_view19 | is_called | NO + ro_view2 | a | NO + ro_view2 | b | NO + ro_view20 | a | NO + ro_view20 | b | NO + ro_view20 | g | NO + ro_view3 | ?column? | NO + ro_view4 | count | NO + ro_view5 | a | NO + ro_view5 | rank | NO + ro_view6 | a | NO + ro_view6 | b | NO + ro_view7 | a | NO + ro_view7 | b | NO + ro_view8 | a | NO + ro_view8 | b | NO + ro_view9 | a | NO + ro_view9 | b | NO + rw_view14 | ctid | NO + rw_view14 | a | YES + rw_view14 | b | YES + rw_view15 | a | YES + rw_view15 | upper | NO + rw_view16 | a | YES + rw_view16 | b | YES + rw_view16 | aa | YES +(39 rows) + +-- Read-only views +DELETE FROM ro_view1; +ERROR: cannot delete from view "ro_view1" +DETAIL: Views containing DISTINCT are not automatically updatable. +HINT: To enable deleting from the view, provide an INSTEAD OF DELETE trigger or an unconditional ON DELETE DO INSTEAD rule. +DELETE FROM ro_view2; +ERROR: cannot delete from view "ro_view2" +DETAIL: Views containing GROUP BY are not automatically updatable. +HINT: To enable deleting from the view, provide an INSTEAD OF DELETE trigger or an unconditional ON DELETE DO INSTEAD rule. +DELETE FROM ro_view3; +ERROR: cannot delete from view "ro_view3" +DETAIL: Views containing HAVING are not automatically updatable. +HINT: To enable deleting from the view, provide an INSTEAD OF DELETE trigger or an unconditional ON DELETE DO INSTEAD rule. +DELETE FROM ro_view4; +ERROR: cannot delete from view "ro_view4" +DETAIL: Views that return aggregate functions are not automatically updatable. +HINT: To enable deleting from the view, provide an INSTEAD OF DELETE trigger or an unconditional ON DELETE DO INSTEAD rule. +DELETE FROM ro_view5; +ERROR: cannot delete from view "ro_view5" +DETAIL: Views that return window functions are not automatically updatable. +HINT: To enable deleting from the view, provide an INSTEAD OF DELETE trigger or an unconditional ON DELETE DO INSTEAD rule. +DELETE FROM ro_view6; +ERROR: cannot delete from view "ro_view6" +DETAIL: Views containing UNION, INTERSECT, or EXCEPT are not automatically updatable. +HINT: To enable deleting from the view, provide an INSTEAD OF DELETE trigger or an unconditional ON DELETE DO INSTEAD rule. +UPDATE ro_view7 SET a=a+1; +ERROR: cannot update view "ro_view7" +DETAIL: Views containing WITH are not automatically updatable. +HINT: To enable updating the view, provide an INSTEAD OF UPDATE trigger or an unconditional ON UPDATE DO INSTEAD rule. +UPDATE ro_view8 SET a=a+1; +ERROR: cannot update view "ro_view8" +DETAIL: Views containing LIMIT or OFFSET are not automatically updatable. +HINT: To enable updating the view, provide an INSTEAD OF UPDATE trigger or an unconditional ON UPDATE DO INSTEAD rule. +UPDATE ro_view9 SET a=a+1; +ERROR: cannot update view "ro_view9" +DETAIL: Views containing LIMIT or OFFSET are not automatically updatable. +HINT: To enable updating the view, provide an INSTEAD OF UPDATE trigger or an unconditional ON UPDATE DO INSTEAD rule. +UPDATE ro_view10 SET a=a+1; +ERROR: cannot update view "ro_view10" +DETAIL: Views that do not select from a single table or view are not automatically updatable. +HINT: To enable updating the view, provide an INSTEAD OF UPDATE trigger or an unconditional ON UPDATE DO INSTEAD rule. +UPDATE ro_view11 SET a=a+1; +ERROR: cannot update view "ro_view11" +DETAIL: Views that do not select from a single table or view are not automatically updatable. +HINT: To enable updating the view, provide an INSTEAD OF UPDATE trigger or an unconditional ON UPDATE DO INSTEAD rule. +UPDATE ro_view12 SET a=a+1; +ERROR: cannot update view "ro_view12" +DETAIL: Views that do not select from a single table or view are not automatically updatable. +HINT: To enable updating the view, provide an INSTEAD OF UPDATE trigger or an unconditional ON UPDATE DO INSTEAD rule. +INSERT INTO ro_view13 VALUES (3, 'Row 3'); +ERROR: cannot insert into view "ro_view13" +DETAIL: Views that do not select from a single table or view are not automatically updatable. +HINT: To enable inserting into the view, provide an INSTEAD OF INSERT trigger or an unconditional ON INSERT DO INSTEAD rule. +-- Partially updatable view +INSERT INTO rw_view14 VALUES (null, 3, 'Row 3'); -- should fail +ERROR: cannot insert into column "ctid" of view "rw_view14" +DETAIL: View columns that refer to system columns are not updatable. +INSERT INTO rw_view14 (a, b) VALUES (3, 'Row 3'); -- should be OK +UPDATE rw_view14 SET ctid=null WHERE a=3; -- should fail +ERROR: cannot update column "ctid" of view "rw_view14" +DETAIL: View columns that refer to system columns are not updatable. +UPDATE rw_view14 SET b='ROW 3' WHERE a=3; -- should be OK +SELECT * FROM base_tbl; + a | b +----+-------- + -2 | Row -2 + -1 | Row -1 + 0 | Row 0 + 1 | Row 1 + 2 | Row 2 + 3 | ROW 3 +(6 rows) + +DELETE FROM rw_view14 WHERE a=3; -- should be OK +-- Partially updatable view +INSERT INTO rw_view15 VALUES (3, 'ROW 3'); -- should fail +ERROR: cannot insert into column "upper" of view "rw_view15" +DETAIL: View columns that are not columns of their base relation are not updatable. +INSERT INTO rw_view15 (a) VALUES (3); -- should be OK +INSERT INTO rw_view15 (a) VALUES (3) ON CONFLICT DO NOTHING; -- succeeds +SELECT * FROM rw_view15; + a | upper +----+------------- + -2 | ROW -2 + -1 | ROW -1 + 0 | ROW 0 + 1 | ROW 1 + 2 | ROW 2 + 3 | UNSPECIFIED +(6 rows) + +INSERT INTO rw_view15 (a) VALUES (3) ON CONFLICT (a) DO NOTHING; -- succeeds +SELECT * FROM rw_view15; + a | upper +----+------------- + -2 | ROW -2 + -1 | ROW -1 + 0 | ROW 0 + 1 | ROW 1 + 2 | ROW 2 + 3 | UNSPECIFIED +(6 rows) + +INSERT INTO rw_view15 (a) VALUES (3) ON CONFLICT (a) DO UPDATE set a = excluded.a; -- succeeds +SELECT * FROM rw_view15; + a | upper +----+------------- + -2 | ROW -2 + -1 | ROW -1 + 0 | ROW 0 + 1 | ROW 1 + 2 | ROW 2 + 3 | UNSPECIFIED +(6 rows) + +INSERT INTO rw_view15 (a) VALUES (3) ON CONFLICT (a) DO UPDATE set upper = 'blarg'; -- fails +ERROR: cannot insert into column "upper" of view "rw_view15" +DETAIL: View columns that are not columns of their base relation are not updatable. +SELECT * FROM rw_view15; + a | upper +----+------------- + -2 | ROW -2 + -1 | ROW -1 + 0 | ROW 0 + 1 | ROW 1 + 2 | ROW 2 + 3 | UNSPECIFIED +(6 rows) + +SELECT * FROM rw_view15; + a | upper +----+------------- + -2 | ROW -2 + -1 | ROW -1 + 0 | ROW 0 + 1 | ROW 1 + 2 | ROW 2 + 3 | UNSPECIFIED +(6 rows) + +ALTER VIEW rw_view15 ALTER COLUMN upper SET DEFAULT 'NOT SET'; +INSERT INTO rw_view15 (a) VALUES (4); -- should fail +ERROR: cannot insert into column "upper" of view "rw_view15" +DETAIL: View columns that are not columns of their base relation are not updatable. +UPDATE rw_view15 SET upper='ROW 3' WHERE a=3; -- should fail +ERROR: cannot update column "upper" of view "rw_view15" +DETAIL: View columns that are not columns of their base relation are not updatable. +UPDATE rw_view15 SET upper=DEFAULT WHERE a=3; -- should fail +ERROR: cannot update column "upper" of view "rw_view15" +DETAIL: View columns that are not columns of their base relation are not updatable. +UPDATE rw_view15 SET a=4 WHERE a=3; -- should be OK +SELECT * FROM base_tbl; + a | b +----+------------- + -2 | Row -2 + -1 | Row -1 + 0 | Row 0 + 1 | Row 1 + 2 | Row 2 + 4 | Unspecified +(6 rows) + +DELETE FROM rw_view15 WHERE a=4; -- should be OK +-- Partially updatable view +INSERT INTO rw_view16 VALUES (3, 'Row 3', 3); -- should fail +ERROR: multiple assignments to same column "a" +INSERT INTO rw_view16 (a, b) VALUES (3, 'Row 3'); -- should be OK +UPDATE rw_view16 SET a=3, aa=-3 WHERE a=3; -- should fail +ERROR: multiple assignments to same column "a" +UPDATE rw_view16 SET aa=-3 WHERE a=3; -- should be OK +SELECT * FROM base_tbl; + a | b +----+-------- + -2 | Row -2 + -1 | Row -1 + 0 | Row 0 + 1 | Row 1 + 2 | Row 2 + -3 | Row 3 +(6 rows) + +DELETE FROM rw_view16 WHERE a=-3; -- should be OK +-- Read-only views +INSERT INTO ro_view17 VALUES (3, 'ROW 3'); +ERROR: cannot insert into view "ro_view1" +DETAIL: Views containing DISTINCT are not automatically updatable. +HINT: To enable inserting into the view, provide an INSTEAD OF INSERT trigger or an unconditional ON INSERT DO INSTEAD rule. +DELETE FROM ro_view18; +ERROR: cannot delete from view "ro_view18" +DETAIL: Views that do not select from a single table or view are not automatically updatable. +HINT: To enable deleting from the view, provide an INSTEAD OF DELETE trigger or an unconditional ON DELETE DO INSTEAD rule. +UPDATE ro_view19 SET last_value=1000; +ERROR: cannot update view "ro_view19" +DETAIL: Views that do not select from a single table or view are not automatically updatable. +HINT: To enable updating the view, provide an INSTEAD OF UPDATE trigger or an unconditional ON UPDATE DO INSTEAD rule. +UPDATE ro_view20 SET b=upper(b); +ERROR: cannot update view "ro_view20" +DETAIL: Views that return set-returning functions are not automatically updatable. +HINT: To enable updating the view, provide an INSTEAD OF UPDATE trigger or an unconditional ON UPDATE DO INSTEAD rule. +-- A view with a conditional INSTEAD rule but no unconditional INSTEAD rules +-- or INSTEAD OF triggers should be non-updatable and generate useful error +-- messages with appropriate detail +CREATE RULE rw_view16_ins_rule AS ON INSERT TO rw_view16 + WHERE NEW.a > 0 DO INSTEAD INSERT INTO base_tbl VALUES (NEW.a, NEW.b); +CREATE RULE rw_view16_upd_rule AS ON UPDATE TO rw_view16 + WHERE OLD.a > 0 DO INSTEAD UPDATE base_tbl SET b=NEW.b WHERE a=OLD.a; +CREATE RULE rw_view16_del_rule AS ON DELETE TO rw_view16 + WHERE OLD.a > 0 DO INSTEAD DELETE FROM base_tbl WHERE a=OLD.a; +INSERT INTO rw_view16 (a, b) VALUES (3, 'Row 3'); -- should fail +ERROR: cannot insert into view "rw_view16" +DETAIL: Views with conditional DO INSTEAD rules are not automatically updatable. +HINT: To enable inserting into the view, provide an INSTEAD OF INSERT trigger or an unconditional ON INSERT DO INSTEAD rule. +UPDATE rw_view16 SET b='ROW 2' WHERE a=2; -- should fail +ERROR: cannot update view "rw_view16" +DETAIL: Views with conditional DO INSTEAD rules are not automatically updatable. +HINT: To enable updating the view, provide an INSTEAD OF UPDATE trigger or an unconditional ON UPDATE DO INSTEAD rule. +DELETE FROM rw_view16 WHERE a=2; -- should fail +ERROR: cannot delete from view "rw_view16" +DETAIL: Views with conditional DO INSTEAD rules are not automatically updatable. +HINT: To enable deleting from the view, provide an INSTEAD OF DELETE trigger or an unconditional ON DELETE DO INSTEAD rule. +DROP TABLE base_tbl CASCADE; +NOTICE: drop cascades to 16 other objects +DETAIL: drop cascades to view ro_view1 +drop cascades to view ro_view17 +drop cascades to view ro_view2 +drop cascades to view ro_view3 +drop cascades to view ro_view4 +drop cascades to view ro_view5 +drop cascades to view ro_view6 +drop cascades to view ro_view7 +drop cascades to view ro_view8 +drop cascades to view ro_view9 +drop cascades to view ro_view11 +drop cascades to view ro_view13 +drop cascades to view rw_view14 +drop cascades to view rw_view15 +drop cascades to view rw_view16 +drop cascades to view ro_view20 +DROP VIEW ro_view10, ro_view12, ro_view18; +DROP SEQUENCE uv_seq CASCADE; +NOTICE: drop cascades to view ro_view19 +-- simple updatable view +CREATE TABLE base_tbl (a int PRIMARY KEY, b text DEFAULT 'Unspecified'); +INSERT INTO base_tbl SELECT i, 'Row ' || i FROM generate_series(-2, 2) g(i); +CREATE VIEW rw_view1 AS SELECT * FROM base_tbl WHERE a>0; +SELECT table_name, is_insertable_into + FROM information_schema.tables + WHERE table_name = 'rw_view1'; + table_name | is_insertable_into +------------+-------------------- + rw_view1 | YES +(1 row) + +SELECT table_name, is_updatable, is_insertable_into + FROM information_schema.views + WHERE table_name = 'rw_view1'; + table_name | is_updatable | is_insertable_into +------------+--------------+-------------------- + rw_view1 | YES | YES +(1 row) + +SELECT table_name, column_name, is_updatable + FROM information_schema.columns + WHERE table_name = 'rw_view1' + ORDER BY ordinal_position; + table_name | column_name | is_updatable +------------+-------------+-------------- + rw_view1 | a | YES + rw_view1 | b | YES +(2 rows) + +INSERT INTO rw_view1 VALUES (3, 'Row 3'); +INSERT INTO rw_view1 (a) VALUES (4); +UPDATE rw_view1 SET a=5 WHERE a=4; +DELETE FROM rw_view1 WHERE b='Row 2'; +SELECT * FROM base_tbl; + a | b +----+------------- + -2 | Row -2 + -1 | Row -1 + 0 | Row 0 + 1 | Row 1 + 3 | Row 3 + 5 | Unspecified +(6 rows) + +EXPLAIN (costs off) UPDATE rw_view1 SET a=6 WHERE a=5; + QUERY PLAN +-------------------------------------------------- + Update on base_tbl + -> Index Scan using base_tbl_pkey on base_tbl + Index Cond: ((a > 0) AND (a = 5)) +(3 rows) + +EXPLAIN (costs off) DELETE FROM rw_view1 WHERE a=5; + QUERY PLAN +-------------------------------------------------- + Delete on base_tbl + -> Index Scan using base_tbl_pkey on base_tbl + Index Cond: ((a > 0) AND (a = 5)) +(3 rows) + +-- it's still updatable if we add a DO ALSO rule +CREATE TABLE base_tbl_hist(ts timestamptz default now(), a int, b text); +CREATE RULE base_tbl_log AS ON INSERT TO rw_view1 DO ALSO + INSERT INTO base_tbl_hist(a,b) VALUES(new.a, new.b); +SELECT table_name, is_updatable, is_insertable_into + FROM information_schema.views + WHERE table_name = 'rw_view1'; + table_name | is_updatable | is_insertable_into +------------+--------------+-------------------- + rw_view1 | YES | YES +(1 row) + +-- Check behavior with DEFAULTs (bug #17633) +INSERT INTO rw_view1 VALUES (9, DEFAULT), (10, DEFAULT); +SELECT a, b FROM base_tbl_hist; + a | b +----+--- + 9 | + 10 | +(2 rows) + +DROP TABLE base_tbl CASCADE; +NOTICE: drop cascades to view rw_view1 +DROP TABLE base_tbl_hist; +-- view on top of view +CREATE TABLE base_tbl (a int PRIMARY KEY, b text DEFAULT 'Unspecified'); +INSERT INTO base_tbl SELECT i, 'Row ' || i FROM generate_series(-2, 2) g(i); +CREATE VIEW rw_view1 AS SELECT b AS bb, a AS aa FROM base_tbl WHERE a>0; +CREATE VIEW rw_view2 AS SELECT aa AS aaa, bb AS bbb FROM rw_view1 WHERE aa<10; +SELECT table_name, is_insertable_into + FROM information_schema.tables + WHERE table_name = 'rw_view2'; + table_name | is_insertable_into +------------+-------------------- + rw_view2 | YES +(1 row) + +SELECT table_name, is_updatable, is_insertable_into + FROM information_schema.views + WHERE table_name = 'rw_view2'; + table_name | is_updatable | is_insertable_into +------------+--------------+-------------------- + rw_view2 | YES | YES +(1 row) + +SELECT table_name, column_name, is_updatable + FROM information_schema.columns + WHERE table_name = 'rw_view2' + ORDER BY ordinal_position; + table_name | column_name | is_updatable +------------+-------------+-------------- + rw_view2 | aaa | YES + rw_view2 | bbb | YES +(2 rows) + +INSERT INTO rw_view2 VALUES (3, 'Row 3'); +INSERT INTO rw_view2 (aaa) VALUES (4); +SELECT * FROM rw_view2; + aaa | bbb +-----+------------- + 1 | Row 1 + 2 | Row 2 + 3 | Row 3 + 4 | Unspecified +(4 rows) + +UPDATE rw_view2 SET bbb='Row 4' WHERE aaa=4; +DELETE FROM rw_view2 WHERE aaa=2; +SELECT * FROM rw_view2; + aaa | bbb +-----+------- + 1 | Row 1 + 3 | Row 3 + 4 | Row 4 +(3 rows) + +EXPLAIN (costs off) UPDATE rw_view2 SET aaa=5 WHERE aaa=4; + QUERY PLAN +-------------------------------------------------------- + Update on base_tbl + -> Index Scan using base_tbl_pkey on base_tbl + Index Cond: ((a < 10) AND (a > 0) AND (a = 4)) +(3 rows) + +EXPLAIN (costs off) DELETE FROM rw_view2 WHERE aaa=4; + QUERY PLAN +-------------------------------------------------------- + Delete on base_tbl + -> Index Scan using base_tbl_pkey on base_tbl + Index Cond: ((a < 10) AND (a > 0) AND (a = 4)) +(3 rows) + +DROP TABLE base_tbl CASCADE; +NOTICE: drop cascades to 2 other objects +DETAIL: drop cascades to view rw_view1 +drop cascades to view rw_view2 +-- view on top of view with rules +CREATE TABLE base_tbl (a int PRIMARY KEY, b text DEFAULT 'Unspecified'); +INSERT INTO base_tbl SELECT i, 'Row ' || i FROM generate_series(-2, 2) g(i); +CREATE VIEW rw_view1 AS SELECT * FROM base_tbl WHERE a>0 OFFSET 0; -- not updatable without rules/triggers +CREATE VIEW rw_view2 AS SELECT * FROM rw_view1 WHERE a<10; +SELECT table_name, is_insertable_into + FROM information_schema.tables + WHERE table_name LIKE 'rw_view%' + ORDER BY table_name; + table_name | is_insertable_into +------------+-------------------- + rw_view1 | NO + rw_view2 | NO +(2 rows) + +SELECT table_name, is_updatable, is_insertable_into + FROM information_schema.views + WHERE table_name LIKE 'rw_view%' + ORDER BY table_name; + table_name | is_updatable | is_insertable_into +------------+--------------+-------------------- + rw_view1 | NO | NO + rw_view2 | NO | NO +(2 rows) + +SELECT table_name, column_name, is_updatable + FROM information_schema.columns + WHERE table_name LIKE 'rw_view%' + ORDER BY table_name, ordinal_position; + table_name | column_name | is_updatable +------------+-------------+-------------- + rw_view1 | a | NO + rw_view1 | b | NO + rw_view2 | a | NO + rw_view2 | b | NO +(4 rows) + +CREATE RULE rw_view1_ins_rule AS ON INSERT TO rw_view1 + DO INSTEAD INSERT INTO base_tbl VALUES (NEW.a, NEW.b) RETURNING *; +SELECT table_name, is_insertable_into + FROM information_schema.tables + WHERE table_name LIKE 'rw_view%' + ORDER BY table_name; + table_name | is_insertable_into +------------+-------------------- + rw_view1 | YES + rw_view2 | YES +(2 rows) + +SELECT table_name, is_updatable, is_insertable_into + FROM information_schema.views + WHERE table_name LIKE 'rw_view%' + ORDER BY table_name; + table_name | is_updatable | is_insertable_into +------------+--------------+-------------------- + rw_view1 | NO | YES + rw_view2 | NO | YES +(2 rows) + +SELECT table_name, column_name, is_updatable + FROM information_schema.columns + WHERE table_name LIKE 'rw_view%' + ORDER BY table_name, ordinal_position; + table_name | column_name | is_updatable +------------+-------------+-------------- + rw_view1 | a | NO + rw_view1 | b | NO + rw_view2 | a | NO + rw_view2 | b | NO +(4 rows) + +CREATE RULE rw_view1_upd_rule AS ON UPDATE TO rw_view1 + DO INSTEAD UPDATE base_tbl SET b=NEW.b WHERE a=OLD.a RETURNING NEW.*; +SELECT table_name, is_insertable_into + FROM information_schema.tables + WHERE table_name LIKE 'rw_view%' + ORDER BY table_name; + table_name | is_insertable_into +------------+-------------------- + rw_view1 | YES + rw_view2 | YES +(2 rows) + +SELECT table_name, is_updatable, is_insertable_into + FROM information_schema.views + WHERE table_name LIKE 'rw_view%' + ORDER BY table_name; + table_name | is_updatable | is_insertable_into +------------+--------------+-------------------- + rw_view1 | NO | YES + rw_view2 | NO | YES +(2 rows) + +SELECT table_name, column_name, is_updatable + FROM information_schema.columns + WHERE table_name LIKE 'rw_view%' + ORDER BY table_name, ordinal_position; + table_name | column_name | is_updatable +------------+-------------+-------------- + rw_view1 | a | NO + rw_view1 | b | NO + rw_view2 | a | NO + rw_view2 | b | NO +(4 rows) + +CREATE RULE rw_view1_del_rule AS ON DELETE TO rw_view1 + DO INSTEAD DELETE FROM base_tbl WHERE a=OLD.a RETURNING OLD.*; +SELECT table_name, is_insertable_into + FROM information_schema.tables + WHERE table_name LIKE 'rw_view%' + ORDER BY table_name; + table_name | is_insertable_into +------------+-------------------- + rw_view1 | YES + rw_view2 | YES +(2 rows) + +SELECT table_name, is_updatable, is_insertable_into + FROM information_schema.views + WHERE table_name LIKE 'rw_view%' + ORDER BY table_name; + table_name | is_updatable | is_insertable_into +------------+--------------+-------------------- + rw_view1 | YES | YES + rw_view2 | YES | YES +(2 rows) + +SELECT table_name, column_name, is_updatable + FROM information_schema.columns + WHERE table_name LIKE 'rw_view%' + ORDER BY table_name, ordinal_position; + table_name | column_name | is_updatable +------------+-------------+-------------- + rw_view1 | a | YES + rw_view1 | b | YES + rw_view2 | a | YES + rw_view2 | b | YES +(4 rows) + +INSERT INTO rw_view2 VALUES (3, 'Row 3') RETURNING *; + a | b +---+------- + 3 | Row 3 +(1 row) + +UPDATE rw_view2 SET b='Row three' WHERE a=3 RETURNING *; + a | b +---+----------- + 3 | Row three +(1 row) + +SELECT * FROM rw_view2; + a | b +---+----------- + 1 | Row 1 + 2 | Row 2 + 3 | Row three +(3 rows) + +DELETE FROM rw_view2 WHERE a=3 RETURNING *; + a | b +---+----------- + 3 | Row three +(1 row) + +SELECT * FROM rw_view2; + a | b +---+------- + 1 | Row 1 + 2 | Row 2 +(2 rows) + +EXPLAIN (costs off) UPDATE rw_view2 SET a=3 WHERE a=2; + QUERY PLAN +---------------------------------------------------------------- + Update on base_tbl + -> Nested Loop + -> Index Scan using base_tbl_pkey on base_tbl + Index Cond: (a = 2) + -> Subquery Scan on rw_view1 + Filter: ((rw_view1.a < 10) AND (rw_view1.a = 2)) + -> Bitmap Heap Scan on base_tbl base_tbl_1 + Recheck Cond: (a > 0) + -> Bitmap Index Scan on base_tbl_pkey + Index Cond: (a > 0) +(10 rows) + +EXPLAIN (costs off) DELETE FROM rw_view2 WHERE a=2; + QUERY PLAN +---------------------------------------------------------------- + Delete on base_tbl + -> Nested Loop + -> Index Scan using base_tbl_pkey on base_tbl + Index Cond: (a = 2) + -> Subquery Scan on rw_view1 + Filter: ((rw_view1.a < 10) AND (rw_view1.a = 2)) + -> Bitmap Heap Scan on base_tbl base_tbl_1 + Recheck Cond: (a > 0) + -> Bitmap Index Scan on base_tbl_pkey + Index Cond: (a > 0) +(10 rows) + +DROP TABLE base_tbl CASCADE; +NOTICE: drop cascades to 2 other objects +DETAIL: drop cascades to view rw_view1 +drop cascades to view rw_view2 +-- view on top of view with triggers +CREATE TABLE base_tbl (a int PRIMARY KEY, b text DEFAULT 'Unspecified'); +INSERT INTO base_tbl SELECT i, 'Row ' || i FROM generate_series(-2, 2) g(i); +CREATE VIEW rw_view1 AS SELECT * FROM base_tbl WHERE a>0 OFFSET 0; -- not updatable without rules/triggers +CREATE VIEW rw_view2 AS SELECT * FROM rw_view1 WHERE a<10; +SELECT table_name, is_insertable_into + FROM information_schema.tables + WHERE table_name LIKE 'rw_view%' + ORDER BY table_name; + table_name | is_insertable_into +------------+-------------------- + rw_view1 | NO + rw_view2 | NO +(2 rows) + +SELECT table_name, is_updatable, is_insertable_into, + is_trigger_updatable, is_trigger_deletable, + is_trigger_insertable_into + FROM information_schema.views + WHERE table_name LIKE 'rw_view%' + ORDER BY table_name; + table_name | is_updatable | is_insertable_into | is_trigger_updatable | is_trigger_deletable | is_trigger_insertable_into +------------+--------------+--------------------+----------------------+----------------------+---------------------------- + rw_view1 | NO | NO | NO | NO | NO + rw_view2 | NO | NO | NO | NO | NO +(2 rows) + +SELECT table_name, column_name, is_updatable + FROM information_schema.columns + WHERE table_name LIKE 'rw_view%' + ORDER BY table_name, ordinal_position; + table_name | column_name | is_updatable +------------+-------------+-------------- + rw_view1 | a | NO + rw_view1 | b | NO + rw_view2 | a | NO + rw_view2 | b | NO +(4 rows) + +CREATE FUNCTION rw_view1_trig_fn() +RETURNS trigger AS +$$ +BEGIN + IF TG_OP = 'INSERT' THEN + INSERT INTO base_tbl VALUES (NEW.a, NEW.b); + RETURN NEW; + ELSIF TG_OP = 'UPDATE' THEN + UPDATE base_tbl SET b=NEW.b WHERE a=OLD.a; + RETURN NEW; + ELSIF TG_OP = 'DELETE' THEN + DELETE FROM base_tbl WHERE a=OLD.a; + RETURN OLD; + END IF; +END; +$$ +LANGUAGE plpgsql; +CREATE TRIGGER rw_view1_ins_trig INSTEAD OF INSERT ON rw_view1 + FOR EACH ROW EXECUTE PROCEDURE rw_view1_trig_fn(); +SELECT table_name, is_insertable_into + FROM information_schema.tables + WHERE table_name LIKE 'rw_view%' + ORDER BY table_name; + table_name | is_insertable_into +------------+-------------------- + rw_view1 | NO + rw_view2 | NO +(2 rows) + +SELECT table_name, is_updatable, is_insertable_into, + is_trigger_updatable, is_trigger_deletable, + is_trigger_insertable_into + FROM information_schema.views + WHERE table_name LIKE 'rw_view%' + ORDER BY table_name; + table_name | is_updatable | is_insertable_into | is_trigger_updatable | is_trigger_deletable | is_trigger_insertable_into +------------+--------------+--------------------+----------------------+----------------------+---------------------------- + rw_view1 | NO | NO | NO | NO | YES + rw_view2 | NO | NO | NO | NO | NO +(2 rows) + +SELECT table_name, column_name, is_updatable + FROM information_schema.columns + WHERE table_name LIKE 'rw_view%' + ORDER BY table_name, ordinal_position; + table_name | column_name | is_updatable +------------+-------------+-------------- + rw_view1 | a | NO + rw_view1 | b | NO + rw_view2 | a | NO + rw_view2 | b | NO +(4 rows) + +CREATE TRIGGER rw_view1_upd_trig INSTEAD OF UPDATE ON rw_view1 + FOR EACH ROW EXECUTE PROCEDURE rw_view1_trig_fn(); +SELECT table_name, is_insertable_into + FROM information_schema.tables + WHERE table_name LIKE 'rw_view%' + ORDER BY table_name; + table_name | is_insertable_into +------------+-------------------- + rw_view1 | NO + rw_view2 | NO +(2 rows) + +SELECT table_name, is_updatable, is_insertable_into, + is_trigger_updatable, is_trigger_deletable, + is_trigger_insertable_into + FROM information_schema.views + WHERE table_name LIKE 'rw_view%' + ORDER BY table_name; + table_name | is_updatable | is_insertable_into | is_trigger_updatable | is_trigger_deletable | is_trigger_insertable_into +------------+--------------+--------------------+----------------------+----------------------+---------------------------- + rw_view1 | NO | NO | YES | NO | YES + rw_view2 | NO | NO | NO | NO | NO +(2 rows) + +SELECT table_name, column_name, is_updatable + FROM information_schema.columns + WHERE table_name LIKE 'rw_view%' + ORDER BY table_name, ordinal_position; + table_name | column_name | is_updatable +------------+-------------+-------------- + rw_view1 | a | NO + rw_view1 | b | NO + rw_view2 | a | NO + rw_view2 | b | NO +(4 rows) + +CREATE TRIGGER rw_view1_del_trig INSTEAD OF DELETE ON rw_view1 + FOR EACH ROW EXECUTE PROCEDURE rw_view1_trig_fn(); +SELECT table_name, is_insertable_into + FROM information_schema.tables + WHERE table_name LIKE 'rw_view%' + ORDER BY table_name; + table_name | is_insertable_into +------------+-------------------- + rw_view1 | NO + rw_view2 | NO +(2 rows) + +SELECT table_name, is_updatable, is_insertable_into, + is_trigger_updatable, is_trigger_deletable, + is_trigger_insertable_into + FROM information_schema.views + WHERE table_name LIKE 'rw_view%' + ORDER BY table_name; + table_name | is_updatable | is_insertable_into | is_trigger_updatable | is_trigger_deletable | is_trigger_insertable_into +------------+--------------+--------------------+----------------------+----------------------+---------------------------- + rw_view1 | NO | NO | YES | YES | YES + rw_view2 | NO | NO | NO | NO | NO +(2 rows) + +SELECT table_name, column_name, is_updatable + FROM information_schema.columns + WHERE table_name LIKE 'rw_view%' + ORDER BY table_name, ordinal_position; + table_name | column_name | is_updatable +------------+-------------+-------------- + rw_view1 | a | NO + rw_view1 | b | NO + rw_view2 | a | NO + rw_view2 | b | NO +(4 rows) + +INSERT INTO rw_view2 VALUES (3, 'Row 3') RETURNING *; + a | b +---+------- + 3 | Row 3 +(1 row) + +UPDATE rw_view2 SET b='Row three' WHERE a=3 RETURNING *; + a | b +---+----------- + 3 | Row three +(1 row) + +SELECT * FROM rw_view2; + a | b +---+----------- + 1 | Row 1 + 2 | Row 2 + 3 | Row three +(3 rows) + +DELETE FROM rw_view2 WHERE a=3 RETURNING *; + a | b +---+----------- + 3 | Row three +(1 row) + +SELECT * FROM rw_view2; + a | b +---+------- + 1 | Row 1 + 2 | Row 2 +(2 rows) + +EXPLAIN (costs off) UPDATE rw_view2 SET a=3 WHERE a=2; + QUERY PLAN +---------------------------------------------------------- + Update on rw_view1 rw_view1_1 + -> Subquery Scan on rw_view1 + Filter: ((rw_view1.a < 10) AND (rw_view1.a = 2)) + -> Bitmap Heap Scan on base_tbl + Recheck Cond: (a > 0) + -> Bitmap Index Scan on base_tbl_pkey + Index Cond: (a > 0) +(7 rows) + +EXPLAIN (costs off) DELETE FROM rw_view2 WHERE a=2; + QUERY PLAN +---------------------------------------------------------- + Delete on rw_view1 rw_view1_1 + -> Subquery Scan on rw_view1 + Filter: ((rw_view1.a < 10) AND (rw_view1.a = 2)) + -> Bitmap Heap Scan on base_tbl + Recheck Cond: (a > 0) + -> Bitmap Index Scan on base_tbl_pkey + Index Cond: (a > 0) +(7 rows) + +DROP TABLE base_tbl CASCADE; +NOTICE: drop cascades to 2 other objects +DETAIL: drop cascades to view rw_view1 +drop cascades to view rw_view2 +DROP FUNCTION rw_view1_trig_fn(); +-- update using whole row from view +CREATE TABLE base_tbl (a int PRIMARY KEY, b text DEFAULT 'Unspecified'); +INSERT INTO base_tbl SELECT i, 'Row ' || i FROM generate_series(-2, 2) g(i); +CREATE VIEW rw_view1 AS SELECT b AS bb, a AS aa FROM base_tbl; +CREATE FUNCTION rw_view1_aa(x rw_view1) + RETURNS int AS $$ SELECT x.aa $$ LANGUAGE sql; +UPDATE rw_view1 v SET bb='Updated row 2' WHERE rw_view1_aa(v)=2 + RETURNING rw_view1_aa(v), v.bb; + rw_view1_aa | bb +-------------+--------------- + 2 | Updated row 2 +(1 row) + +SELECT * FROM base_tbl; + a | b +----+--------------- + -2 | Row -2 + -1 | Row -1 + 0 | Row 0 + 1 | Row 1 + 2 | Updated row 2 +(5 rows) + +EXPLAIN (costs off) +UPDATE rw_view1 v SET bb='Updated row 2' WHERE rw_view1_aa(v)=2 + RETURNING rw_view1_aa(v), v.bb; + QUERY PLAN +-------------------------------------------------- + Update on base_tbl + -> Index Scan using base_tbl_pkey on base_tbl + Index Cond: (a = 2) +(3 rows) + +DROP TABLE base_tbl CASCADE; +NOTICE: drop cascades to 2 other objects +DETAIL: drop cascades to view rw_view1 +drop cascades to function rw_view1_aa(rw_view1) +-- permissions checks +CREATE USER regress_view_user1; +CREATE USER regress_view_user2; +CREATE USER regress_view_user3; +SET SESSION AUTHORIZATION regress_view_user1; +CREATE TABLE base_tbl(a int, b text, c float); +INSERT INTO base_tbl VALUES (1, 'Row 1', 1.0); +CREATE VIEW rw_view1 AS SELECT b AS bb, c AS cc, a AS aa FROM base_tbl; +INSERT INTO rw_view1 VALUES ('Row 2', 2.0, 2); +GRANT SELECT ON base_tbl TO regress_view_user2; +GRANT SELECT ON rw_view1 TO regress_view_user2; +GRANT UPDATE (a,c) ON base_tbl TO regress_view_user2; +GRANT UPDATE (bb,cc) ON rw_view1 TO regress_view_user2; +RESET SESSION AUTHORIZATION; +SET SESSION AUTHORIZATION regress_view_user2; +CREATE VIEW rw_view2 AS SELECT b AS bb, c AS cc, a AS aa FROM base_tbl; +SELECT * FROM base_tbl; -- ok + a | b | c +---+-------+--- + 1 | Row 1 | 1 + 2 | Row 2 | 2 +(2 rows) + +SELECT * FROM rw_view1; -- ok + bb | cc | aa +-------+----+---- + Row 1 | 1 | 1 + Row 2 | 2 | 2 +(2 rows) + +SELECT * FROM rw_view2; -- ok + bb | cc | aa +-------+----+---- + Row 1 | 1 | 1 + Row 2 | 2 | 2 +(2 rows) + +INSERT INTO base_tbl VALUES (3, 'Row 3', 3.0); -- not allowed +ERROR: permission denied for table base_tbl +INSERT INTO rw_view1 VALUES ('Row 3', 3.0, 3); -- not allowed +ERROR: permission denied for view rw_view1 +INSERT INTO rw_view2 VALUES ('Row 3', 3.0, 3); -- not allowed +ERROR: permission denied for table base_tbl +UPDATE base_tbl SET a=a, c=c; -- ok +UPDATE base_tbl SET b=b; -- not allowed +ERROR: permission denied for table base_tbl +UPDATE rw_view1 SET bb=bb, cc=cc; -- ok +UPDATE rw_view1 SET aa=aa; -- not allowed +ERROR: permission denied for view rw_view1 +UPDATE rw_view2 SET aa=aa, cc=cc; -- ok +UPDATE rw_view2 SET bb=bb; -- not allowed +ERROR: permission denied for table base_tbl +DELETE FROM base_tbl; -- not allowed +ERROR: permission denied for table base_tbl +DELETE FROM rw_view1; -- not allowed +ERROR: permission denied for view rw_view1 +DELETE FROM rw_view2; -- not allowed +ERROR: permission denied for table base_tbl +RESET SESSION AUTHORIZATION; +SET SESSION AUTHORIZATION regress_view_user1; +GRANT INSERT, DELETE ON base_tbl TO regress_view_user2; +RESET SESSION AUTHORIZATION; +SET SESSION AUTHORIZATION regress_view_user2; +INSERT INTO base_tbl VALUES (3, 'Row 3', 3.0); -- ok +INSERT INTO rw_view1 VALUES ('Row 4', 4.0, 4); -- not allowed +ERROR: permission denied for view rw_view1 +INSERT INTO rw_view2 VALUES ('Row 4', 4.0, 4); -- ok +DELETE FROM base_tbl WHERE a=1; -- ok +DELETE FROM rw_view1 WHERE aa=2; -- not allowed +ERROR: permission denied for view rw_view1 +DELETE FROM rw_view2 WHERE aa=2; -- ok +SELECT * FROM base_tbl; + a | b | c +---+-------+--- + 3 | Row 3 | 3 + 4 | Row 4 | 4 +(2 rows) + +RESET SESSION AUTHORIZATION; +SET SESSION AUTHORIZATION regress_view_user1; +REVOKE INSERT, DELETE ON base_tbl FROM regress_view_user2; +GRANT INSERT, DELETE ON rw_view1 TO regress_view_user2; +RESET SESSION AUTHORIZATION; +SET SESSION AUTHORIZATION regress_view_user2; +INSERT INTO base_tbl VALUES (5, 'Row 5', 5.0); -- not allowed +ERROR: permission denied for table base_tbl +INSERT INTO rw_view1 VALUES ('Row 5', 5.0, 5); -- ok +INSERT INTO rw_view2 VALUES ('Row 6', 6.0, 6); -- not allowed +ERROR: permission denied for table base_tbl +DELETE FROM base_tbl WHERE a=3; -- not allowed +ERROR: permission denied for table base_tbl +DELETE FROM rw_view1 WHERE aa=3; -- ok +DELETE FROM rw_view2 WHERE aa=4; -- not allowed +ERROR: permission denied for table base_tbl +SELECT * FROM base_tbl; + a | b | c +---+-------+--- + 4 | Row 4 | 4 + 5 | Row 5 | 5 +(2 rows) + +RESET SESSION AUTHORIZATION; +DROP TABLE base_tbl CASCADE; +NOTICE: drop cascades to 2 other objects +DETAIL: drop cascades to view rw_view1 +drop cascades to view rw_view2 +-- nested-view permissions +CREATE TABLE base_tbl(a int, b text, c float); +INSERT INTO base_tbl VALUES (1, 'Row 1', 1.0); +SET SESSION AUTHORIZATION regress_view_user1; +CREATE VIEW rw_view1 AS SELECT * FROM base_tbl; +SELECT * FROM rw_view1; -- not allowed +ERROR: permission denied for table base_tbl +SELECT * FROM rw_view1 FOR UPDATE; -- not allowed +ERROR: permission denied for table base_tbl +UPDATE rw_view1 SET b = 'foo' WHERE a = 1; -- not allowed +ERROR: permission denied for table base_tbl +SET SESSION AUTHORIZATION regress_view_user2; +CREATE VIEW rw_view2 AS SELECT * FROM rw_view1; +SELECT * FROM rw_view2; -- not allowed +ERROR: permission denied for view rw_view1 +SELECT * FROM rw_view2 FOR UPDATE; -- not allowed +ERROR: permission denied for view rw_view1 +UPDATE rw_view2 SET b = 'bar' WHERE a = 1; -- not allowed +ERROR: permission denied for view rw_view1 +RESET SESSION AUTHORIZATION; +GRANT SELECT ON base_tbl TO regress_view_user1; +SET SESSION AUTHORIZATION regress_view_user1; +SELECT * FROM rw_view1; + a | b | c +---+-------+--- + 1 | Row 1 | 1 +(1 row) + +SELECT * FROM rw_view1 FOR UPDATE; -- not allowed +ERROR: permission denied for table base_tbl +UPDATE rw_view1 SET b = 'foo' WHERE a = 1; -- not allowed +ERROR: permission denied for table base_tbl +SET SESSION AUTHORIZATION regress_view_user2; +SELECT * FROM rw_view2; -- not allowed +ERROR: permission denied for view rw_view1 +SELECT * FROM rw_view2 FOR UPDATE; -- not allowed +ERROR: permission denied for view rw_view1 +UPDATE rw_view2 SET b = 'bar' WHERE a = 1; -- not allowed +ERROR: permission denied for view rw_view1 +SET SESSION AUTHORIZATION regress_view_user1; +GRANT SELECT ON rw_view1 TO regress_view_user2; +SET SESSION AUTHORIZATION regress_view_user2; +SELECT * FROM rw_view2; + a | b | c +---+-------+--- + 1 | Row 1 | 1 +(1 row) + +SELECT * FROM rw_view2 FOR UPDATE; -- not allowed +ERROR: permission denied for view rw_view1 +UPDATE rw_view2 SET b = 'bar' WHERE a = 1; -- not allowed +ERROR: permission denied for view rw_view1 +RESET SESSION AUTHORIZATION; +GRANT UPDATE ON base_tbl TO regress_view_user1; +SET SESSION AUTHORIZATION regress_view_user1; +SELECT * FROM rw_view1; + a | b | c +---+-------+--- + 1 | Row 1 | 1 +(1 row) + +SELECT * FROM rw_view1 FOR UPDATE; + a | b | c +---+-------+--- + 1 | Row 1 | 1 +(1 row) + +UPDATE rw_view1 SET b = 'foo' WHERE a = 1; +SET SESSION AUTHORIZATION regress_view_user2; +SELECT * FROM rw_view2; + a | b | c +---+-----+--- + 1 | foo | 1 +(1 row) + +SELECT * FROM rw_view2 FOR UPDATE; -- not allowed +ERROR: permission denied for view rw_view1 +UPDATE rw_view2 SET b = 'bar' WHERE a = 1; -- not allowed +ERROR: permission denied for view rw_view1 +SET SESSION AUTHORIZATION regress_view_user1; +GRANT UPDATE ON rw_view1 TO regress_view_user2; +SET SESSION AUTHORIZATION regress_view_user2; +SELECT * FROM rw_view2; + a | b | c +---+-----+--- + 1 | foo | 1 +(1 row) + +SELECT * FROM rw_view2 FOR UPDATE; + a | b | c +---+-----+--- + 1 | foo | 1 +(1 row) + +UPDATE rw_view2 SET b = 'bar' WHERE a = 1; +RESET SESSION AUTHORIZATION; +REVOKE UPDATE ON base_tbl FROM regress_view_user1; +SET SESSION AUTHORIZATION regress_view_user1; +SELECT * FROM rw_view1; + a | b | c +---+-----+--- + 1 | bar | 1 +(1 row) + +SELECT * FROM rw_view1 FOR UPDATE; -- not allowed +ERROR: permission denied for table base_tbl +UPDATE rw_view1 SET b = 'foo' WHERE a = 1; -- not allowed +ERROR: permission denied for table base_tbl +SET SESSION AUTHORIZATION regress_view_user2; +SELECT * FROM rw_view2; + a | b | c +---+-----+--- + 1 | bar | 1 +(1 row) + +SELECT * FROM rw_view2 FOR UPDATE; -- not allowed +ERROR: permission denied for table base_tbl +UPDATE rw_view2 SET b = 'bar' WHERE a = 1; -- not allowed +ERROR: permission denied for table base_tbl +RESET SESSION AUTHORIZATION; +DROP TABLE base_tbl CASCADE; +NOTICE: drop cascades to 2 other objects +DETAIL: drop cascades to view rw_view1 +drop cascades to view rw_view2 +-- security invoker view permissions +SET SESSION AUTHORIZATION regress_view_user1; +CREATE TABLE base_tbl(a int, b text, c float); +INSERT INTO base_tbl VALUES (1, 'Row 1', 1.0); +CREATE VIEW rw_view1 AS SELECT b AS bb, c AS cc, a AS aa FROM base_tbl; +ALTER VIEW rw_view1 SET (security_invoker = true); +INSERT INTO rw_view1 VALUES ('Row 2', 2.0, 2); +GRANT SELECT ON rw_view1 TO regress_view_user2; +GRANT UPDATE (bb,cc) ON rw_view1 TO regress_view_user2; +SET SESSION AUTHORIZATION regress_view_user2; +SELECT * FROM base_tbl; -- not allowed +ERROR: permission denied for table base_tbl +SELECT * FROM rw_view1; -- not allowed +ERROR: permission denied for table base_tbl +INSERT INTO base_tbl VALUES (3, 'Row 3', 3.0); -- not allowed +ERROR: permission denied for table base_tbl +INSERT INTO rw_view1 VALUES ('Row 3', 3.0, 3); -- not allowed +ERROR: permission denied for view rw_view1 +UPDATE base_tbl SET a=a; -- not allowed +ERROR: permission denied for table base_tbl +UPDATE rw_view1 SET bb=bb, cc=cc; -- not allowed +ERROR: permission denied for table base_tbl +DELETE FROM base_tbl; -- not allowed +ERROR: permission denied for table base_tbl +DELETE FROM rw_view1; -- not allowed +ERROR: permission denied for view rw_view1 +SET SESSION AUTHORIZATION regress_view_user1; +GRANT SELECT ON base_tbl TO regress_view_user2; +GRANT UPDATE (a,c) ON base_tbl TO regress_view_user2; +SET SESSION AUTHORIZATION regress_view_user2; +SELECT * FROM base_tbl; -- ok + a | b | c +---+-------+--- + 1 | Row 1 | 1 + 2 | Row 2 | 2 +(2 rows) + +SELECT * FROM rw_view1; -- ok + bb | cc | aa +-------+----+---- + Row 1 | 1 | 1 + Row 2 | 2 | 2 +(2 rows) + +UPDATE base_tbl SET a=a, c=c; -- ok +UPDATE base_tbl SET b=b; -- not allowed +ERROR: permission denied for table base_tbl +UPDATE rw_view1 SET cc=cc; -- ok +UPDATE rw_view1 SET aa=aa; -- not allowed +ERROR: permission denied for view rw_view1 +UPDATE rw_view1 SET bb=bb; -- not allowed +ERROR: permission denied for table base_tbl +SET SESSION AUTHORIZATION regress_view_user1; +GRANT INSERT, DELETE ON base_tbl TO regress_view_user2; +SET SESSION AUTHORIZATION regress_view_user2; +INSERT INTO base_tbl VALUES (3, 'Row 3', 3.0); -- ok +INSERT INTO rw_view1 VALUES ('Row 4', 4.0, 4); -- not allowed +ERROR: permission denied for view rw_view1 +DELETE FROM base_tbl WHERE a=1; -- ok +DELETE FROM rw_view1 WHERE aa=2; -- not allowed +ERROR: permission denied for view rw_view1 +SET SESSION AUTHORIZATION regress_view_user1; +REVOKE INSERT, DELETE ON base_tbl FROM regress_view_user2; +GRANT INSERT, DELETE ON rw_view1 TO regress_view_user2; +SET SESSION AUTHORIZATION regress_view_user2; +INSERT INTO rw_view1 VALUES ('Row 4', 4.0, 4); -- not allowed +ERROR: permission denied for table base_tbl +DELETE FROM rw_view1 WHERE aa=2; -- not allowed +ERROR: permission denied for table base_tbl +SET SESSION AUTHORIZATION regress_view_user1; +GRANT INSERT, DELETE ON base_tbl TO regress_view_user2; +SET SESSION AUTHORIZATION regress_view_user2; +INSERT INTO rw_view1 VALUES ('Row 4', 4.0, 4); -- ok +DELETE FROM rw_view1 WHERE aa=2; -- ok +SELECT * FROM base_tbl; -- ok + a | b | c +---+-------+--- + 3 | Row 3 | 3 + 4 | Row 4 | 4 +(2 rows) + +RESET SESSION AUTHORIZATION; +DROP TABLE base_tbl CASCADE; +NOTICE: drop cascades to view rw_view1 +-- ordinary view on top of security invoker view permissions +CREATE TABLE base_tbl(a int, b text, c float); +INSERT INTO base_tbl VALUES (1, 'Row 1', 1.0); +SET SESSION AUTHORIZATION regress_view_user1; +CREATE VIEW rw_view1 AS SELECT b AS bb, c AS cc, a AS aa FROM base_tbl; +ALTER VIEW rw_view1 SET (security_invoker = true); +SELECT * FROM rw_view1; -- not allowed +ERROR: permission denied for table base_tbl +UPDATE rw_view1 SET aa=aa; -- not allowed +ERROR: permission denied for table base_tbl +SET SESSION AUTHORIZATION regress_view_user2; +CREATE VIEW rw_view2 AS SELECT cc AS ccc, aa AS aaa, bb AS bbb FROM rw_view1; +GRANT SELECT, UPDATE ON rw_view2 TO regress_view_user3; +SELECT * FROM rw_view2; -- not allowed +ERROR: permission denied for view rw_view1 +UPDATE rw_view2 SET aaa=aaa; -- not allowed +ERROR: permission denied for view rw_view1 +RESET SESSION AUTHORIZATION; +GRANT SELECT ON base_tbl TO regress_view_user1; +GRANT UPDATE (a, b) ON base_tbl TO regress_view_user1; +SET SESSION AUTHORIZATION regress_view_user1; +SELECT * FROM rw_view1; -- ok + bb | cc | aa +-------+----+---- + Row 1 | 1 | 1 +(1 row) + +UPDATE rw_view1 SET aa=aa, bb=bb; -- ok +UPDATE rw_view1 SET cc=cc; -- not allowed +ERROR: permission denied for table base_tbl +SET SESSION AUTHORIZATION regress_view_user2; +SELECT * FROM rw_view2; -- not allowed +ERROR: permission denied for view rw_view1 +UPDATE rw_view2 SET aaa=aaa; -- not allowed +ERROR: permission denied for view rw_view1 +SET SESSION AUTHORIZATION regress_view_user3; +SELECT * FROM rw_view2; -- not allowed +ERROR: permission denied for view rw_view1 +UPDATE rw_view2 SET aaa=aaa; -- not allowed +ERROR: permission denied for view rw_view1 +SET SESSION AUTHORIZATION regress_view_user1; +GRANT SELECT ON rw_view1 TO regress_view_user2; +GRANT UPDATE (bb, cc) ON rw_view1 TO regress_view_user2; +SET SESSION AUTHORIZATION regress_view_user2; +SELECT * FROM rw_view2; -- not allowed +ERROR: permission denied for table base_tbl +UPDATE rw_view2 SET bbb=bbb; -- not allowed +ERROR: permission denied for table base_tbl +SET SESSION AUTHORIZATION regress_view_user3; +SELECT * FROM rw_view2; -- not allowed +ERROR: permission denied for table base_tbl +UPDATE rw_view2 SET bbb=bbb; -- not allowed +ERROR: permission denied for table base_tbl +RESET SESSION AUTHORIZATION; +GRANT SELECT ON base_tbl TO regress_view_user2; +GRANT UPDATE (a, c) ON base_tbl TO regress_view_user2; +SET SESSION AUTHORIZATION regress_view_user2; +SELECT * FROM rw_view2; -- ok + ccc | aaa | bbb +-----+-----+------- + 1 | 1 | Row 1 +(1 row) + +UPDATE rw_view2 SET aaa=aaa; -- not allowed +ERROR: permission denied for view rw_view1 +UPDATE rw_view2 SET bbb=bbb; -- not allowed +ERROR: permission denied for table base_tbl +UPDATE rw_view2 SET ccc=ccc; -- ok +SET SESSION AUTHORIZATION regress_view_user3; +SELECT * FROM rw_view2; -- not allowed +ERROR: permission denied for table base_tbl +UPDATE rw_view2 SET aaa=aaa; -- not allowed +ERROR: permission denied for view rw_view1 +UPDATE rw_view2 SET bbb=bbb; -- not allowed +ERROR: permission denied for table base_tbl +UPDATE rw_view2 SET ccc=ccc; -- not allowed +ERROR: permission denied for table base_tbl +RESET SESSION AUTHORIZATION; +GRANT SELECT ON base_tbl TO regress_view_user3; +GRANT UPDATE (a, c) ON base_tbl TO regress_view_user3; +SET SESSION AUTHORIZATION regress_view_user3; +SELECT * FROM rw_view2; -- ok + ccc | aaa | bbb +-----+-----+------- + 1 | 1 | Row 1 +(1 row) + +UPDATE rw_view2 SET aaa=aaa; -- not allowed +ERROR: permission denied for view rw_view1 +UPDATE rw_view2 SET bbb=bbb; -- not allowed +ERROR: permission denied for table base_tbl +UPDATE rw_view2 SET ccc=ccc; -- ok +RESET SESSION AUTHORIZATION; +REVOKE SELECT, UPDATE ON base_tbl FROM regress_view_user1; +SET SESSION AUTHORIZATION regress_view_user1; +SELECT * FROM rw_view1; -- not allowed +ERROR: permission denied for table base_tbl +UPDATE rw_view1 SET aa=aa; -- not allowed +ERROR: permission denied for table base_tbl +SET SESSION AUTHORIZATION regress_view_user2; +SELECT * FROM rw_view2; -- ok + ccc | aaa | bbb +-----+-----+------- + 1 | 1 | Row 1 +(1 row) + +UPDATE rw_view2 SET aaa=aaa; -- not allowed +ERROR: permission denied for view rw_view1 +UPDATE rw_view2 SET bbb=bbb; -- not allowed +ERROR: permission denied for table base_tbl +UPDATE rw_view2 SET ccc=ccc; -- ok +SET SESSION AUTHORIZATION regress_view_user3; +SELECT * FROM rw_view2; -- ok + ccc | aaa | bbb +-----+-----+------- + 1 | 1 | Row 1 +(1 row) + +UPDATE rw_view2 SET aaa=aaa; -- not allowed +ERROR: permission denied for view rw_view1 +UPDATE rw_view2 SET bbb=bbb; -- not allowed +ERROR: permission denied for table base_tbl +UPDATE rw_view2 SET ccc=ccc; -- ok +RESET SESSION AUTHORIZATION; +REVOKE SELECT, UPDATE ON base_tbl FROM regress_view_user2; +SET SESSION AUTHORIZATION regress_view_user2; +SELECT * FROM rw_view2; -- not allowed +ERROR: permission denied for table base_tbl +UPDATE rw_view2 SET aaa=aaa; -- not allowed +ERROR: permission denied for view rw_view1 +UPDATE rw_view2 SET bbb=bbb; -- not allowed +ERROR: permission denied for table base_tbl +UPDATE rw_view2 SET ccc=ccc; -- not allowed +ERROR: permission denied for table base_tbl +SET SESSION AUTHORIZATION regress_view_user3; +SELECT * FROM rw_view2; -- ok + ccc | aaa | bbb +-----+-----+------- + 1 | 1 | Row 1 +(1 row) + +UPDATE rw_view2 SET aaa=aaa; -- not allowed +ERROR: permission denied for view rw_view1 +UPDATE rw_view2 SET bbb=bbb; -- not allowed +ERROR: permission denied for table base_tbl +UPDATE rw_view2 SET ccc=ccc; -- ok +RESET SESSION AUTHORIZATION; +DROP TABLE base_tbl CASCADE; +NOTICE: drop cascades to 2 other objects +DETAIL: drop cascades to view rw_view1 +drop cascades to view rw_view2 +DROP USER regress_view_user1; +DROP USER regress_view_user2; +DROP USER regress_view_user3; +-- column defaults +CREATE TABLE base_tbl (a int PRIMARY KEY, b text DEFAULT 'Unspecified', c serial); +INSERT INTO base_tbl VALUES (1, 'Row 1'); +INSERT INTO base_tbl VALUES (2, 'Row 2'); +INSERT INTO base_tbl VALUES (3); +CREATE VIEW rw_view1 AS SELECT a AS aa, b AS bb FROM base_tbl; +ALTER VIEW rw_view1 ALTER COLUMN bb SET DEFAULT 'View default'; +INSERT INTO rw_view1 VALUES (4, 'Row 4'); +INSERT INTO rw_view1 (aa) VALUES (5); +SELECT * FROM base_tbl; + a | b | c +---+--------------+--- + 1 | Row 1 | 1 + 2 | Row 2 | 2 + 3 | Unspecified | 3 + 4 | Row 4 | 4 + 5 | View default | 5 +(5 rows) + +DROP TABLE base_tbl CASCADE; +NOTICE: drop cascades to view rw_view1 +-- Table having triggers +CREATE TABLE base_tbl (a int PRIMARY KEY, b text DEFAULT 'Unspecified'); +INSERT INTO base_tbl VALUES (1, 'Row 1'); +INSERT INTO base_tbl VALUES (2, 'Row 2'); +CREATE FUNCTION rw_view1_trig_fn() +RETURNS trigger AS +$$ +BEGIN + IF TG_OP = 'INSERT' THEN + UPDATE base_tbl SET b=NEW.b WHERE a=1; + RETURN NULL; + END IF; + RETURN NULL; +END; +$$ +LANGUAGE plpgsql; +CREATE TRIGGER rw_view1_ins_trig AFTER INSERT ON base_tbl + FOR EACH ROW EXECUTE PROCEDURE rw_view1_trig_fn(); +CREATE VIEW rw_view1 AS SELECT a AS aa, b AS bb FROM base_tbl; +INSERT INTO rw_view1 VALUES (3, 'Row 3'); +select * from base_tbl; + a | b +---+------- + 2 | Row 2 + 3 | Row 3 + 1 | Row 3 +(3 rows) + +DROP VIEW rw_view1; +DROP TRIGGER rw_view1_ins_trig on base_tbl; +DROP FUNCTION rw_view1_trig_fn(); +DROP TABLE base_tbl; +-- view with ORDER BY +CREATE TABLE base_tbl (a int, b int); +INSERT INTO base_tbl VALUES (1,2), (4,5), (3,-3); +CREATE VIEW rw_view1 AS SELECT * FROM base_tbl ORDER BY a+b; +SELECT * FROM rw_view1; + a | b +---+---- + 3 | -3 + 1 | 2 + 4 | 5 +(3 rows) + +INSERT INTO rw_view1 VALUES (7,-8); +SELECT * FROM rw_view1; + a | b +---+---- + 7 | -8 + 3 | -3 + 1 | 2 + 4 | 5 +(4 rows) + +EXPLAIN (verbose, costs off) UPDATE rw_view1 SET b = b + 1 RETURNING *; + QUERY PLAN +------------------------------------------------- + Update on public.base_tbl + Output: base_tbl.a, base_tbl.b + -> Seq Scan on public.base_tbl + Output: (base_tbl.b + 1), base_tbl.ctid +(4 rows) + +UPDATE rw_view1 SET b = b + 1 RETURNING *; + a | b +---+---- + 1 | 3 + 4 | 6 + 3 | -2 + 7 | -7 +(4 rows) + +SELECT * FROM rw_view1; + a | b +---+---- + 7 | -7 + 3 | -2 + 1 | 3 + 4 | 6 +(4 rows) + +DROP TABLE base_tbl CASCADE; +NOTICE: drop cascades to view rw_view1 +-- multiple array-column updates +CREATE TABLE base_tbl (a int, arr int[]); +INSERT INTO base_tbl VALUES (1,ARRAY[2]), (3,ARRAY[4]); +CREATE VIEW rw_view1 AS SELECT * FROM base_tbl; +UPDATE rw_view1 SET arr[1] = 42, arr[2] = 77 WHERE a = 3; +SELECT * FROM rw_view1; + a | arr +---+--------- + 1 | {2} + 3 | {42,77} +(2 rows) + +DROP TABLE base_tbl CASCADE; +NOTICE: drop cascades to view rw_view1 +-- views with updatable and non-updatable columns +CREATE TABLE base_tbl(a float); +INSERT INTO base_tbl SELECT i/10.0 FROM generate_series(1,10) g(i); +CREATE VIEW rw_view1 AS + SELECT ctid, sin(a) s, a, cos(a) c + FROM base_tbl + WHERE a != 0 + ORDER BY abs(a); +INSERT INTO rw_view1 VALUES (null, null, 1.1, null); -- should fail +ERROR: cannot insert into column "ctid" of view "rw_view1" +DETAIL: View columns that refer to system columns are not updatable. +INSERT INTO rw_view1 (s, c, a) VALUES (null, null, 1.1); -- should fail +ERROR: cannot insert into column "s" of view "rw_view1" +DETAIL: View columns that are not columns of their base relation are not updatable. +INSERT INTO rw_view1 (a) VALUES (1.1) RETURNING a, s, c; -- OK + a | s | c +-----+-------------------+------------------- + 1.1 | 0.891207360061435 | 0.453596121425577 +(1 row) + +UPDATE rw_view1 SET s = s WHERE a = 1.1; -- should fail +ERROR: cannot update column "s" of view "rw_view1" +DETAIL: View columns that are not columns of their base relation are not updatable. +UPDATE rw_view1 SET a = 1.05 WHERE a = 1.1 RETURNING s; -- OK + s +------------------- + 0.867423225594017 +(1 row) + +DELETE FROM rw_view1 WHERE a = 1.05; -- OK +CREATE VIEW rw_view2 AS + SELECT s, c, s/c t, a base_a, ctid + FROM rw_view1; +INSERT INTO rw_view2 VALUES (null, null, null, 1.1, null); -- should fail +ERROR: cannot insert into column "t" of view "rw_view2" +DETAIL: View columns that are not columns of their base relation are not updatable. +INSERT INTO rw_view2(s, c, base_a) VALUES (null, null, 1.1); -- should fail +ERROR: cannot insert into column "s" of view "rw_view1" +DETAIL: View columns that are not columns of their base relation are not updatable. +INSERT INTO rw_view2(base_a) VALUES (1.1) RETURNING t; -- OK + t +------------------ + 1.96475965724865 +(1 row) + +UPDATE rw_view2 SET s = s WHERE base_a = 1.1; -- should fail +ERROR: cannot update column "s" of view "rw_view1" +DETAIL: View columns that are not columns of their base relation are not updatable. +UPDATE rw_view2 SET t = t WHERE base_a = 1.1; -- should fail +ERROR: cannot update column "t" of view "rw_view2" +DETAIL: View columns that are not columns of their base relation are not updatable. +UPDATE rw_view2 SET base_a = 1.05 WHERE base_a = 1.1; -- OK +DELETE FROM rw_view2 WHERE base_a = 1.05 RETURNING base_a, s, c, t; -- OK + base_a | s | c | t +--------+-------------------+-------------------+------------------ + 1.05 | 0.867423225594017 | 0.497571047891727 | 1.74331530998317 +(1 row) + +CREATE VIEW rw_view3 AS + SELECT s, c, s/c t, ctid + FROM rw_view1; +INSERT INTO rw_view3 VALUES (null, null, null, null); -- should fail +ERROR: cannot insert into column "t" of view "rw_view3" +DETAIL: View columns that are not columns of their base relation are not updatable. +INSERT INTO rw_view3(s) VALUES (null); -- should fail +ERROR: cannot insert into column "s" of view "rw_view1" +DETAIL: View columns that are not columns of their base relation are not updatable. +UPDATE rw_view3 SET s = s; -- should fail +ERROR: cannot update column "s" of view "rw_view1" +DETAIL: View columns that are not columns of their base relation are not updatable. +DELETE FROM rw_view3 WHERE s = sin(0.1); -- should be OK +SELECT * FROM base_tbl ORDER BY a; + a +----- + 0.2 + 0.3 + 0.4 + 0.5 + 0.6 + 0.7 + 0.8 + 0.9 + 1 +(9 rows) + +SELECT table_name, is_insertable_into + FROM information_schema.tables + WHERE table_name LIKE E'r_\\_view%' + ORDER BY table_name; + table_name | is_insertable_into +------------+-------------------- + rw_view1 | YES + rw_view2 | YES + rw_view3 | NO +(3 rows) + +SELECT table_name, is_updatable, is_insertable_into + FROM information_schema.views + WHERE table_name LIKE E'r_\\_view%' + ORDER BY table_name; + table_name | is_updatable | is_insertable_into +------------+--------------+-------------------- + rw_view1 | YES | YES + rw_view2 | YES | YES + rw_view3 | NO | NO +(3 rows) + +SELECT table_name, column_name, is_updatable + FROM information_schema.columns + WHERE table_name LIKE E'r_\\_view%' + ORDER BY table_name, ordinal_position; + table_name | column_name | is_updatable +------------+-------------+-------------- + rw_view1 | ctid | NO + rw_view1 | s | NO + rw_view1 | a | YES + rw_view1 | c | NO + rw_view2 | s | NO + rw_view2 | c | NO + rw_view2 | t | NO + rw_view2 | base_a | YES + rw_view2 | ctid | NO + rw_view3 | s | NO + rw_view3 | c | NO + rw_view3 | t | NO + rw_view3 | ctid | NO +(13 rows) + +SELECT events & 4 != 0 AS upd, + events & 8 != 0 AS ins, + events & 16 != 0 AS del + FROM pg_catalog.pg_relation_is_updatable('rw_view3'::regclass, false) t(events); + upd | ins | del +-----+-----+----- + f | f | t +(1 row) + +DROP TABLE base_tbl CASCADE; +NOTICE: drop cascades to 3 other objects +DETAIL: drop cascades to view rw_view1 +drop cascades to view rw_view2 +drop cascades to view rw_view3 +-- view on table with GENERATED columns +CREATE TABLE base_tbl (id int, idplus1 int GENERATED ALWAYS AS (id + 1) STORED); +CREATE VIEW rw_view1 AS SELECT * FROM base_tbl; +INSERT INTO base_tbl (id) VALUES (1); +INSERT INTO rw_view1 (id) VALUES (2); +INSERT INTO base_tbl (id, idplus1) VALUES (3, DEFAULT); +INSERT INTO rw_view1 (id, idplus1) VALUES (4, DEFAULT); +INSERT INTO base_tbl (id, idplus1) VALUES (5, 6); -- error +ERROR: cannot insert a non-DEFAULT value into column "idplus1" +DETAIL: Column "idplus1" is a generated column. +INSERT INTO rw_view1 (id, idplus1) VALUES (6, 7); -- error +ERROR: cannot insert a non-DEFAULT value into column "idplus1" +DETAIL: Column "idplus1" is a generated column. +SELECT * FROM base_tbl; + id | idplus1 +----+--------- + 1 | 2 + 2 | 3 + 3 | 4 + 4 | 5 +(4 rows) + +UPDATE base_tbl SET id = 2000 WHERE id = 2; +UPDATE rw_view1 SET id = 3000 WHERE id = 3; +SELECT * FROM base_tbl; + id | idplus1 +------+--------- + 1 | 2 + 4 | 5 + 2000 | 2001 + 3000 | 3001 +(4 rows) + +DROP TABLE base_tbl CASCADE; +NOTICE: drop cascades to view rw_view1 +-- inheritance tests +CREATE TABLE base_tbl_parent (a int); +CREATE TABLE base_tbl_child (CHECK (a > 0)) INHERITS (base_tbl_parent); +INSERT INTO base_tbl_parent SELECT * FROM generate_series(-8, -1); +INSERT INTO base_tbl_child SELECT * FROM generate_series(1, 8); +CREATE VIEW rw_view1 AS SELECT * FROM base_tbl_parent; +CREATE VIEW rw_view2 AS SELECT * FROM ONLY base_tbl_parent; +SELECT * FROM rw_view1 ORDER BY a; + a +---- + -8 + -7 + -6 + -5 + -4 + -3 + -2 + -1 + 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 +(16 rows) + +SELECT * FROM ONLY rw_view1 ORDER BY a; + a +---- + -8 + -7 + -6 + -5 + -4 + -3 + -2 + -1 + 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 +(16 rows) + +SELECT * FROM rw_view2 ORDER BY a; + a +---- + -8 + -7 + -6 + -5 + -4 + -3 + -2 + -1 +(8 rows) + +INSERT INTO rw_view1 VALUES (-100), (100); +INSERT INTO rw_view2 VALUES (-200), (200); +UPDATE rw_view1 SET a = a*10 WHERE a IN (-1, 1); -- Should produce -10 and 10 +UPDATE ONLY rw_view1 SET a = a*10 WHERE a IN (-2, 2); -- Should produce -20 and 20 +UPDATE rw_view2 SET a = a*10 WHERE a IN (-3, 3); -- Should produce -30 only +UPDATE ONLY rw_view2 SET a = a*10 WHERE a IN (-4, 4); -- Should produce -40 only +DELETE FROM rw_view1 WHERE a IN (-5, 5); -- Should delete -5 and 5 +DELETE FROM ONLY rw_view1 WHERE a IN (-6, 6); -- Should delete -6 and 6 +DELETE FROM rw_view2 WHERE a IN (-7, 7); -- Should delete -7 only +DELETE FROM ONLY rw_view2 WHERE a IN (-8, 8); -- Should delete -8 only +SELECT * FROM ONLY base_tbl_parent ORDER BY a; + a +------ + -200 + -100 + -40 + -30 + -20 + -10 + 100 + 200 +(8 rows) + +SELECT * FROM base_tbl_child ORDER BY a; + a +---- + 3 + 4 + 7 + 8 + 10 + 20 +(6 rows) + +CREATE TABLE other_tbl_parent (id int); +CREATE TABLE other_tbl_child () INHERITS (other_tbl_parent); +INSERT INTO other_tbl_parent VALUES (7),(200); +INSERT INTO other_tbl_child VALUES (8),(100); +EXPLAIN (costs off) +UPDATE rw_view1 SET a = a + 1000 FROM other_tbl_parent WHERE a = id; + QUERY PLAN +------------------------------------------------------------------------- + Update on base_tbl_parent + Update on base_tbl_parent base_tbl_parent_1 + Update on base_tbl_child base_tbl_parent_2 + -> Merge Join + Merge Cond: (base_tbl_parent.a = other_tbl_parent.id) + -> Sort + Sort Key: base_tbl_parent.a + -> Append + -> Seq Scan on base_tbl_parent base_tbl_parent_1 + -> Seq Scan on base_tbl_child base_tbl_parent_2 + -> Sort + Sort Key: other_tbl_parent.id + -> Append + -> Seq Scan on other_tbl_parent other_tbl_parent_1 + -> Seq Scan on other_tbl_child other_tbl_parent_2 +(15 rows) + +UPDATE rw_view1 SET a = a + 1000 FROM other_tbl_parent WHERE a = id; +SELECT * FROM ONLY base_tbl_parent ORDER BY a; + a +------ + -200 + -100 + -40 + -30 + -20 + -10 + 1100 + 1200 +(8 rows) + +SELECT * FROM base_tbl_child ORDER BY a; + a +------ + 3 + 4 + 10 + 20 + 1007 + 1008 +(6 rows) + +DROP TABLE base_tbl_parent, base_tbl_child CASCADE; +NOTICE: drop cascades to 2 other objects +DETAIL: drop cascades to view rw_view1 +drop cascades to view rw_view2 +DROP TABLE other_tbl_parent CASCADE; +NOTICE: drop cascades to table other_tbl_child +-- simple WITH CHECK OPTION +CREATE TABLE base_tbl (a int, b int DEFAULT 10); +INSERT INTO base_tbl VALUES (1,2), (2,3), (1,-1); +CREATE VIEW rw_view1 AS SELECT * FROM base_tbl WHERE a < b + WITH LOCAL CHECK OPTION; +\d+ rw_view1 + View "public.rw_view1" + Column | Type | Collation | Nullable | Default | Storage | Description +--------+---------+-----------+----------+---------+---------+------------- + a | integer | | | | plain | + b | integer | | | | plain | +View definition: + SELECT a, + b + FROM base_tbl + WHERE a < b; +Options: check_option=local + +SELECT * FROM information_schema.views WHERE table_name = 'rw_view1'; + table_catalog | table_schema | table_name | view_definition | check_option | is_updatable | is_insertable_into | is_trigger_updatable | is_trigger_deletable | is_trigger_insertable_into +---------------+--------------+------------+------------------+--------------+--------------+--------------------+----------------------+----------------------+---------------------------- + regression | public | rw_view1 | SELECT a, +| LOCAL | YES | YES | NO | NO | NO + | | | b +| | | | | | + | | | FROM base_tbl+| | | | | | + | | | WHERE (a < b); | | | | | | +(1 row) + +INSERT INTO rw_view1 VALUES(3,4); -- ok +INSERT INTO rw_view1 VALUES(4,3); -- should fail +ERROR: new row violates check option for view "rw_view1" +DETAIL: Failing row contains (4, 3). +INSERT INTO rw_view1 VALUES(5,null); -- should fail +ERROR: new row violates check option for view "rw_view1" +DETAIL: Failing row contains (5, null). +UPDATE rw_view1 SET b = 5 WHERE a = 3; -- ok +UPDATE rw_view1 SET b = -5 WHERE a = 3; -- should fail +ERROR: new row violates check option for view "rw_view1" +DETAIL: Failing row contains (3, -5). +INSERT INTO rw_view1(a) VALUES (9); -- ok +INSERT INTO rw_view1(a) VALUES (10); -- should fail +ERROR: new row violates check option for view "rw_view1" +DETAIL: Failing row contains (10, 10). +SELECT * FROM base_tbl; + a | b +---+---- + 1 | 2 + 2 | 3 + 1 | -1 + 3 | 5 + 9 | 10 +(5 rows) + +DROP TABLE base_tbl CASCADE; +NOTICE: drop cascades to view rw_view1 +-- WITH LOCAL/CASCADED CHECK OPTION +CREATE TABLE base_tbl (a int); +CREATE VIEW rw_view1 AS SELECT * FROM base_tbl WHERE a > 0; +CREATE VIEW rw_view2 AS SELECT * FROM rw_view1 WHERE a < 10 + WITH CHECK OPTION; -- implicitly cascaded +\d+ rw_view2 + View "public.rw_view2" + Column | Type | Collation | Nullable | Default | Storage | Description +--------+---------+-----------+----------+---------+---------+------------- + a | integer | | | | plain | +View definition: + SELECT a + FROM rw_view1 + WHERE a < 10; +Options: check_option=cascaded + +SELECT * FROM information_schema.views WHERE table_name = 'rw_view2'; + table_catalog | table_schema | table_name | view_definition | check_option | is_updatable | is_insertable_into | is_trigger_updatable | is_trigger_deletable | is_trigger_insertable_into +---------------+--------------+------------+-------------------+--------------+--------------+--------------------+----------------------+----------------------+---------------------------- + regression | public | rw_view2 | SELECT a +| CASCADED | YES | YES | NO | NO | NO + | | | FROM rw_view1 +| | | | | | + | | | WHERE (a < 10); | | | | | | +(1 row) + +INSERT INTO rw_view2 VALUES (-5); -- should fail +ERROR: new row violates check option for view "rw_view1" +DETAIL: Failing row contains (-5). +INSERT INTO rw_view2 VALUES (5); -- ok +INSERT INTO rw_view2 VALUES (15); -- should fail +ERROR: new row violates check option for view "rw_view2" +DETAIL: Failing row contains (15). +SELECT * FROM base_tbl; + a +--- + 5 +(1 row) + +UPDATE rw_view2 SET a = a - 10; -- should fail +ERROR: new row violates check option for view "rw_view1" +DETAIL: Failing row contains (-5). +UPDATE rw_view2 SET a = a + 10; -- should fail +ERROR: new row violates check option for view "rw_view2" +DETAIL: Failing row contains (15). +CREATE OR REPLACE VIEW rw_view2 AS SELECT * FROM rw_view1 WHERE a < 10 + WITH LOCAL CHECK OPTION; +\d+ rw_view2 + View "public.rw_view2" + Column | Type | Collation | Nullable | Default | Storage | Description +--------+---------+-----------+----------+---------+---------+------------- + a | integer | | | | plain | +View definition: + SELECT a + FROM rw_view1 + WHERE a < 10; +Options: check_option=local + +SELECT * FROM information_schema.views WHERE table_name = 'rw_view2'; + table_catalog | table_schema | table_name | view_definition | check_option | is_updatable | is_insertable_into | is_trigger_updatable | is_trigger_deletable | is_trigger_insertable_into +---------------+--------------+------------+-------------------+--------------+--------------+--------------------+----------------------+----------------------+---------------------------- + regression | public | rw_view2 | SELECT a +| LOCAL | YES | YES | NO | NO | NO + | | | FROM rw_view1 +| | | | | | + | | | WHERE (a < 10); | | | | | | +(1 row) + +INSERT INTO rw_view2 VALUES (-10); -- ok, but not in view +INSERT INTO rw_view2 VALUES (20); -- should fail +ERROR: new row violates check option for view "rw_view2" +DETAIL: Failing row contains (20). +SELECT * FROM base_tbl; + a +----- + 5 + -10 +(2 rows) + +ALTER VIEW rw_view1 SET (check_option=here); -- invalid +ERROR: invalid value for enum option "check_option": here +DETAIL: Valid values are "local" and "cascaded". +ALTER VIEW rw_view1 SET (check_option=local); +INSERT INTO rw_view2 VALUES (-20); -- should fail +ERROR: new row violates check option for view "rw_view1" +DETAIL: Failing row contains (-20). +INSERT INTO rw_view2 VALUES (30); -- should fail +ERROR: new row violates check option for view "rw_view2" +DETAIL: Failing row contains (30). +ALTER VIEW rw_view2 RESET (check_option); +\d+ rw_view2 + View "public.rw_view2" + Column | Type | Collation | Nullable | Default | Storage | Description +--------+---------+-----------+----------+---------+---------+------------- + a | integer | | | | plain | +View definition: + SELECT a + FROM rw_view1 + WHERE a < 10; + +SELECT * FROM information_schema.views WHERE table_name = 'rw_view2'; + table_catalog | table_schema | table_name | view_definition | check_option | is_updatable | is_insertable_into | is_trigger_updatable | is_trigger_deletable | is_trigger_insertable_into +---------------+--------------+------------+-------------------+--------------+--------------+--------------------+----------------------+----------------------+---------------------------- + regression | public | rw_view2 | SELECT a +| NONE | YES | YES | NO | NO | NO + | | | FROM rw_view1 +| | | | | | + | | | WHERE (a < 10); | | | | | | +(1 row) + +INSERT INTO rw_view2 VALUES (30); -- ok, but not in view +SELECT * FROM base_tbl; + a +----- + 5 + -10 + 30 +(3 rows) + +DROP TABLE base_tbl CASCADE; +NOTICE: drop cascades to 2 other objects +DETAIL: drop cascades to view rw_view1 +drop cascades to view rw_view2 +-- WITH CHECK OPTION with no local view qual +CREATE TABLE base_tbl (a int); +CREATE VIEW rw_view1 AS SELECT * FROM base_tbl WITH CHECK OPTION; +CREATE VIEW rw_view2 AS SELECT * FROM rw_view1 WHERE a > 0; +CREATE VIEW rw_view3 AS SELECT * FROM rw_view2 WITH CHECK OPTION; +SELECT * FROM information_schema.views WHERE table_name LIKE E'rw\\_view_' ORDER BY table_name; + table_catalog | table_schema | table_name | view_definition | check_option | is_updatable | is_insertable_into | is_trigger_updatable | is_trigger_deletable | is_trigger_insertable_into +---------------+--------------+------------+-------------------+--------------+--------------+--------------------+----------------------+----------------------+---------------------------- + regression | public | rw_view1 | SELECT a +| CASCADED | YES | YES | NO | NO | NO + | | | FROM base_tbl; | | | | | | + regression | public | rw_view2 | SELECT a +| NONE | YES | YES | NO | NO | NO + | | | FROM rw_view1 +| | | | | | + | | | WHERE (a > 0); | | | | | | + regression | public | rw_view3 | SELECT a +| CASCADED | YES | YES | NO | NO | NO + | | | FROM rw_view2; | | | | | | +(3 rows) + +INSERT INTO rw_view1 VALUES (-1); -- ok +INSERT INTO rw_view1 VALUES (1); -- ok +INSERT INTO rw_view2 VALUES (-2); -- ok, but not in view +INSERT INTO rw_view2 VALUES (2); -- ok +INSERT INTO rw_view3 VALUES (-3); -- should fail +ERROR: new row violates check option for view "rw_view2" +DETAIL: Failing row contains (-3). +INSERT INTO rw_view3 VALUES (3); -- ok +DROP TABLE base_tbl CASCADE; +NOTICE: drop cascades to 3 other objects +DETAIL: drop cascades to view rw_view1 +drop cascades to view rw_view2 +drop cascades to view rw_view3 +-- WITH CHECK OPTION with scalar array ops +CREATE TABLE base_tbl (a int, b int[]); +CREATE VIEW rw_view1 AS SELECT * FROM base_tbl WHERE a = ANY (b) + WITH CHECK OPTION; +INSERT INTO rw_view1 VALUES (1, ARRAY[1,2,3]); -- ok +INSERT INTO rw_view1 VALUES (10, ARRAY[4,5]); -- should fail +ERROR: new row violates check option for view "rw_view1" +DETAIL: Failing row contains (10, {4,5}). +UPDATE rw_view1 SET b[2] = -b[2] WHERE a = 1; -- ok +UPDATE rw_view1 SET b[1] = -b[1] WHERE a = 1; -- should fail +ERROR: new row violates check option for view "rw_view1" +DETAIL: Failing row contains (1, {-1,-2,3}). +PREPARE ins(int, int[]) AS INSERT INTO rw_view1 VALUES($1, $2); +EXECUTE ins(2, ARRAY[1,2,3]); -- ok +EXECUTE ins(10, ARRAY[4,5]); -- should fail +ERROR: new row violates check option for view "rw_view1" +DETAIL: Failing row contains (10, {4,5}). +DEALLOCATE PREPARE ins; +DROP TABLE base_tbl CASCADE; +NOTICE: drop cascades to view rw_view1 +-- WITH CHECK OPTION with subquery +CREATE TABLE base_tbl (a int); +CREATE TABLE ref_tbl (a int PRIMARY KEY); +INSERT INTO ref_tbl SELECT * FROM generate_series(1,10); +CREATE VIEW rw_view1 AS + SELECT * FROM base_tbl b + WHERE EXISTS(SELECT 1 FROM ref_tbl r WHERE r.a = b.a) + WITH CHECK OPTION; +INSERT INTO rw_view1 VALUES (5); -- ok +INSERT INTO rw_view1 VALUES (15); -- should fail +ERROR: new row violates check option for view "rw_view1" +DETAIL: Failing row contains (15). +UPDATE rw_view1 SET a = a + 5; -- ok +UPDATE rw_view1 SET a = a + 5; -- should fail +ERROR: new row violates check option for view "rw_view1" +DETAIL: Failing row contains (15). +EXPLAIN (costs off) INSERT INTO rw_view1 VALUES (5); + QUERY PLAN +--------------------------------------------------------- + Insert on base_tbl b + -> Result + SubPlan 1 + -> Index Only Scan using ref_tbl_pkey on ref_tbl r + Index Cond: (a = b.a) +(5 rows) + +EXPLAIN (costs off) UPDATE rw_view1 SET a = a + 5; + QUERY PLAN +----------------------------------------------------------- + Update on base_tbl b + -> Hash Join + Hash Cond: (b.a = r.a) + -> Seq Scan on base_tbl b + -> Hash + -> Seq Scan on ref_tbl r + SubPlan 1 + -> Index Only Scan using ref_tbl_pkey on ref_tbl r_1 + Index Cond: (a = b.a) +(9 rows) + +DROP TABLE base_tbl, ref_tbl CASCADE; +NOTICE: drop cascades to view rw_view1 +-- WITH CHECK OPTION with BEFORE trigger on base table +CREATE TABLE base_tbl (a int, b int); +CREATE FUNCTION base_tbl_trig_fn() +RETURNS trigger AS +$$ +BEGIN + NEW.b := 10; + RETURN NEW; +END; +$$ +LANGUAGE plpgsql; +CREATE TRIGGER base_tbl_trig BEFORE INSERT OR UPDATE ON base_tbl + FOR EACH ROW EXECUTE PROCEDURE base_tbl_trig_fn(); +CREATE VIEW rw_view1 AS SELECT * FROM base_tbl WHERE a < b WITH CHECK OPTION; +INSERT INTO rw_view1 VALUES (5,0); -- ok +INSERT INTO rw_view1 VALUES (15, 20); -- should fail +ERROR: new row violates check option for view "rw_view1" +DETAIL: Failing row contains (15, 10). +UPDATE rw_view1 SET a = 20, b = 30; -- should fail +ERROR: new row violates check option for view "rw_view1" +DETAIL: Failing row contains (20, 10). +DROP TABLE base_tbl CASCADE; +NOTICE: drop cascades to view rw_view1 +DROP FUNCTION base_tbl_trig_fn(); +-- WITH LOCAL CHECK OPTION with INSTEAD OF trigger on base view +CREATE TABLE base_tbl (a int, b int); +CREATE VIEW rw_view1 AS SELECT a FROM base_tbl WHERE a < b; +CREATE FUNCTION rw_view1_trig_fn() +RETURNS trigger AS +$$ +BEGIN + IF TG_OP = 'INSERT' THEN + INSERT INTO base_tbl VALUES (NEW.a, 10); + RETURN NEW; + ELSIF TG_OP = 'UPDATE' THEN + UPDATE base_tbl SET a=NEW.a WHERE a=OLD.a; + RETURN NEW; + ELSIF TG_OP = 'DELETE' THEN + DELETE FROM base_tbl WHERE a=OLD.a; + RETURN OLD; + END IF; +END; +$$ +LANGUAGE plpgsql; +CREATE TRIGGER rw_view1_trig + INSTEAD OF INSERT OR UPDATE OR DELETE ON rw_view1 + FOR EACH ROW EXECUTE PROCEDURE rw_view1_trig_fn(); +CREATE VIEW rw_view2 AS + SELECT * FROM rw_view1 WHERE a > 0 WITH LOCAL CHECK OPTION; +INSERT INTO rw_view2 VALUES (-5); -- should fail +ERROR: new row violates check option for view "rw_view2" +DETAIL: Failing row contains (-5). +INSERT INTO rw_view2 VALUES (5); -- ok +INSERT INTO rw_view2 VALUES (50); -- ok, but not in view +UPDATE rw_view2 SET a = a - 10; -- should fail +ERROR: new row violates check option for view "rw_view2" +DETAIL: Failing row contains (-5). +SELECT * FROM base_tbl; + a | b +----+---- + 5 | 10 + 50 | 10 +(2 rows) + +-- Check option won't cascade down to base view with INSTEAD OF triggers +ALTER VIEW rw_view2 SET (check_option=cascaded); +INSERT INTO rw_view2 VALUES (100); -- ok, but not in view (doesn't fail rw_view1's check) +UPDATE rw_view2 SET a = 200 WHERE a = 5; -- ok, but not in view (doesn't fail rw_view1's check) +SELECT * FROM base_tbl; + a | b +-----+---- + 50 | 10 + 100 | 10 + 200 | 10 +(3 rows) + +-- Neither local nor cascaded check options work with INSTEAD rules +DROP TRIGGER rw_view1_trig ON rw_view1; +CREATE RULE rw_view1_ins_rule AS ON INSERT TO rw_view1 + DO INSTEAD INSERT INTO base_tbl VALUES (NEW.a, 10); +CREATE RULE rw_view1_upd_rule AS ON UPDATE TO rw_view1 + DO INSTEAD UPDATE base_tbl SET a=NEW.a WHERE a=OLD.a; +INSERT INTO rw_view2 VALUES (-10); -- ok, but not in view (doesn't fail rw_view2's check) +INSERT INTO rw_view2 VALUES (5); -- ok +INSERT INTO rw_view2 VALUES (20); -- ok, but not in view (doesn't fail rw_view1's check) +UPDATE rw_view2 SET a = 30 WHERE a = 5; -- ok, but not in view (doesn't fail rw_view1's check) +INSERT INTO rw_view2 VALUES (5); -- ok +UPDATE rw_view2 SET a = -5 WHERE a = 5; -- ok, but not in view (doesn't fail rw_view2's check) +SELECT * FROM base_tbl; + a | b +-----+---- + 50 | 10 + 100 | 10 + 200 | 10 + -10 | 10 + 20 | 10 + 30 | 10 + -5 | 10 +(7 rows) + +DROP TABLE base_tbl CASCADE; +NOTICE: drop cascades to 2 other objects +DETAIL: drop cascades to view rw_view1 +drop cascades to view rw_view2 +DROP FUNCTION rw_view1_trig_fn(); +CREATE TABLE base_tbl (a int); +CREATE VIEW rw_view1 AS SELECT a,10 AS b FROM base_tbl; +CREATE RULE rw_view1_ins_rule AS ON INSERT TO rw_view1 + DO INSTEAD INSERT INTO base_tbl VALUES (NEW.a); +CREATE VIEW rw_view2 AS + SELECT * FROM rw_view1 WHERE a > b WITH LOCAL CHECK OPTION; +INSERT INTO rw_view2 VALUES (2,3); -- ok, but not in view (doesn't fail rw_view2's check) +DROP TABLE base_tbl CASCADE; +NOTICE: drop cascades to 2 other objects +DETAIL: drop cascades to view rw_view1 +drop cascades to view rw_view2 +-- security barrier view +CREATE TABLE base_tbl (person text, visibility text); +INSERT INTO base_tbl VALUES ('Tom', 'public'), + ('Dick', 'private'), + ('Harry', 'public'); +CREATE VIEW rw_view1 AS + SELECT person FROM base_tbl WHERE visibility = 'public'; +CREATE FUNCTION snoop(anyelement) +RETURNS boolean AS +$$ +BEGIN + RAISE NOTICE 'snooped value: %', $1; + RETURN true; +END; +$$ +LANGUAGE plpgsql COST 0.000001; +CREATE OR REPLACE FUNCTION leakproof(anyelement) +RETURNS boolean AS +$$ +BEGIN + RETURN true; +END; +$$ +LANGUAGE plpgsql STRICT IMMUTABLE LEAKPROOF; +SELECT * FROM rw_view1 WHERE snoop(person); +NOTICE: snooped value: Tom +NOTICE: snooped value: Dick +NOTICE: snooped value: Harry + person +-------- + Tom + Harry +(2 rows) + +UPDATE rw_view1 SET person=person WHERE snoop(person); +NOTICE: snooped value: Tom +NOTICE: snooped value: Dick +NOTICE: snooped value: Harry +DELETE FROM rw_view1 WHERE NOT snoop(person); +NOTICE: snooped value: Dick +NOTICE: snooped value: Tom +NOTICE: snooped value: Harry +ALTER VIEW rw_view1 SET (security_barrier = true); +SELECT table_name, is_insertable_into + FROM information_schema.tables + WHERE table_name = 'rw_view1'; + table_name | is_insertable_into +------------+-------------------- + rw_view1 | YES +(1 row) + +SELECT table_name, is_updatable, is_insertable_into + FROM information_schema.views + WHERE table_name = 'rw_view1'; + table_name | is_updatable | is_insertable_into +------------+--------------+-------------------- + rw_view1 | YES | YES +(1 row) + +SELECT table_name, column_name, is_updatable + FROM information_schema.columns + WHERE table_name = 'rw_view1' + ORDER BY ordinal_position; + table_name | column_name | is_updatable +------------+-------------+-------------- + rw_view1 | person | YES +(1 row) + +SELECT * FROM rw_view1 WHERE snoop(person); +NOTICE: snooped value: Tom +NOTICE: snooped value: Harry + person +-------- + Tom + Harry +(2 rows) + +UPDATE rw_view1 SET person=person WHERE snoop(person); +NOTICE: snooped value: Tom +NOTICE: snooped value: Harry +DELETE FROM rw_view1 WHERE NOT snoop(person); +NOTICE: snooped value: Tom +NOTICE: snooped value: Harry +EXPLAIN (costs off) SELECT * FROM rw_view1 WHERE snoop(person); + QUERY PLAN +----------------------------------------------- + Subquery Scan on rw_view1 + Filter: snoop(rw_view1.person) + -> Seq Scan on base_tbl + Filter: (visibility = 'public'::text) +(4 rows) + +EXPLAIN (costs off) UPDATE rw_view1 SET person=person WHERE snoop(person); + QUERY PLAN +------------------------------------------------------------------- + Update on base_tbl + -> Seq Scan on base_tbl + Filter: ((visibility = 'public'::text) AND snoop(person)) +(3 rows) + +EXPLAIN (costs off) DELETE FROM rw_view1 WHERE NOT snoop(person); + QUERY PLAN +------------------------------------------------------------------------- + Delete on base_tbl + -> Seq Scan on base_tbl + Filter: ((visibility = 'public'::text) AND (NOT snoop(person))) +(3 rows) + +-- security barrier view on top of security barrier view +CREATE VIEW rw_view2 WITH (security_barrier = true) AS + SELECT * FROM rw_view1 WHERE snoop(person); +SELECT table_name, is_insertable_into + FROM information_schema.tables + WHERE table_name = 'rw_view2'; + table_name | is_insertable_into +------------+-------------------- + rw_view2 | YES +(1 row) + +SELECT table_name, is_updatable, is_insertable_into + FROM information_schema.views + WHERE table_name = 'rw_view2'; + table_name | is_updatable | is_insertable_into +------------+--------------+-------------------- + rw_view2 | YES | YES +(1 row) + +SELECT table_name, column_name, is_updatable + FROM information_schema.columns + WHERE table_name = 'rw_view2' + ORDER BY ordinal_position; + table_name | column_name | is_updatable +------------+-------------+-------------- + rw_view2 | person | YES +(1 row) + +SELECT * FROM rw_view2 WHERE snoop(person); +NOTICE: snooped value: Tom +NOTICE: snooped value: Tom +NOTICE: snooped value: Harry +NOTICE: snooped value: Harry + person +-------- + Tom + Harry +(2 rows) + +UPDATE rw_view2 SET person=person WHERE snoop(person); +NOTICE: snooped value: Tom +NOTICE: snooped value: Tom +NOTICE: snooped value: Harry +NOTICE: snooped value: Harry +DELETE FROM rw_view2 WHERE NOT snoop(person); +NOTICE: snooped value: Tom +NOTICE: snooped value: Tom +NOTICE: snooped value: Harry +NOTICE: snooped value: Harry +EXPLAIN (costs off) SELECT * FROM rw_view2 WHERE snoop(person); + QUERY PLAN +----------------------------------------------------- + Subquery Scan on rw_view2 + Filter: snoop(rw_view2.person) + -> Subquery Scan on rw_view1 + Filter: snoop(rw_view1.person) + -> Seq Scan on base_tbl + Filter: (visibility = 'public'::text) +(6 rows) + +EXPLAIN (costs off) UPDATE rw_view2 SET person=person WHERE snoop(person); + QUERY PLAN +------------------------------------------------------------------------------------- + Update on base_tbl + -> Seq Scan on base_tbl + Filter: ((visibility = 'public'::text) AND snoop(person) AND snoop(person)) +(3 rows) + +EXPLAIN (costs off) DELETE FROM rw_view2 WHERE NOT snoop(person); + QUERY PLAN +------------------------------------------------------------------------------------------- + Delete on base_tbl + -> Seq Scan on base_tbl + Filter: ((visibility = 'public'::text) AND snoop(person) AND (NOT snoop(person))) +(3 rows) + +DROP TABLE base_tbl CASCADE; +NOTICE: drop cascades to 2 other objects +DETAIL: drop cascades to view rw_view1 +drop cascades to view rw_view2 +-- security barrier view on top of table with rules +CREATE TABLE base_tbl(id int PRIMARY KEY, data text, deleted boolean); +INSERT INTO base_tbl VALUES (1, 'Row 1', false), (2, 'Row 2', true); +CREATE RULE base_tbl_ins_rule AS ON INSERT TO base_tbl + WHERE EXISTS (SELECT 1 FROM base_tbl t WHERE t.id = new.id) + DO INSTEAD + UPDATE base_tbl SET data = new.data, deleted = false WHERE id = new.id; +CREATE RULE base_tbl_del_rule AS ON DELETE TO base_tbl + DO INSTEAD + UPDATE base_tbl SET deleted = true WHERE id = old.id; +CREATE VIEW rw_view1 WITH (security_barrier=true) AS + SELECT id, data FROM base_tbl WHERE NOT deleted; +SELECT * FROM rw_view1; + id | data +----+------- + 1 | Row 1 +(1 row) + +EXPLAIN (costs off) DELETE FROM rw_view1 WHERE id = 1 AND snoop(data); + QUERY PLAN +------------------------------------------------------------------- + Update on base_tbl base_tbl_1 + -> Nested Loop + -> Index Scan using base_tbl_pkey on base_tbl base_tbl_1 + Index Cond: (id = 1) + -> Index Scan using base_tbl_pkey on base_tbl + Index Cond: (id = 1) + Filter: ((NOT deleted) AND snoop(data)) +(7 rows) + +DELETE FROM rw_view1 WHERE id = 1 AND snoop(data); +NOTICE: snooped value: Row 1 +EXPLAIN (costs off) INSERT INTO rw_view1 VALUES (2, 'New row 2'); + QUERY PLAN +----------------------------------------------------------- + Insert on base_tbl + InitPlan 1 (returns $0) + -> Index Only Scan using base_tbl_pkey on base_tbl t + Index Cond: (id = 2) + -> Result + One-Time Filter: ($0 IS NOT TRUE) + + Update on base_tbl + InitPlan 1 (returns $0) + -> Index Only Scan using base_tbl_pkey on base_tbl t + Index Cond: (id = 2) + -> Result + One-Time Filter: $0 + -> Index Scan using base_tbl_pkey on base_tbl + Index Cond: (id = 2) +(15 rows) + +INSERT INTO rw_view1 VALUES (2, 'New row 2'); +SELECT * FROM base_tbl; + id | data | deleted +----+-----------+--------- + 1 | Row 1 | t + 2 | New row 2 | f +(2 rows) + +DROP TABLE base_tbl CASCADE; +NOTICE: drop cascades to view rw_view1 +-- security barrier view based on inheritance set +CREATE TABLE t1 (a int, b float, c text); +CREATE INDEX t1_a_idx ON t1(a); +INSERT INTO t1 +SELECT i,i,'t1' FROM generate_series(1,10) g(i); +ANALYZE t1; +CREATE TABLE t11 (d text) INHERITS (t1); +CREATE INDEX t11_a_idx ON t11(a); +INSERT INTO t11 +SELECT i,i,'t11','t11d' FROM generate_series(1,10) g(i); +ANALYZE t11; +CREATE TABLE t12 (e int[]) INHERITS (t1); +CREATE INDEX t12_a_idx ON t12(a); +INSERT INTO t12 +SELECT i,i,'t12','{1,2}'::int[] FROM generate_series(1,10) g(i); +ANALYZE t12; +CREATE TABLE t111 () INHERITS (t11, t12); +NOTICE: merging multiple inherited definitions of column "a" +NOTICE: merging multiple inherited definitions of column "b" +NOTICE: merging multiple inherited definitions of column "c" +CREATE INDEX t111_a_idx ON t111(a); +INSERT INTO t111 +SELECT i,i,'t111','t111d','{1,1,1}'::int[] FROM generate_series(1,10) g(i); +ANALYZE t111; +CREATE VIEW v1 WITH (security_barrier=true) AS +SELECT *, (SELECT d FROM t11 WHERE t11.a = t1.a LIMIT 1) AS d +FROM t1 +WHERE a > 5 AND EXISTS(SELECT 1 FROM t12 WHERE t12.a = t1.a); +SELECT * FROM v1 WHERE a=3; -- should not see anything + a | b | c | d +---+---+---+--- +(0 rows) + +SELECT * FROM v1 WHERE a=8; + a | b | c | d +---+---+------+------ + 8 | 8 | t1 | t11d + 8 | 8 | t11 | t11d + 8 | 8 | t12 | t11d + 8 | 8 | t111 | t11d +(4 rows) + +EXPLAIN (VERBOSE, COSTS OFF) +UPDATE v1 SET a=100 WHERE snoop(a) AND leakproof(a) AND a < 7 AND a != 6; + QUERY PLAN +----------------------------------------------------------------------------------------------------- + Update on public.t1 + Update on public.t1 t1_1 + Update on public.t11 t1_2 + Update on public.t12 t1_3 + Update on public.t111 t1_4 + -> Result + Output: 100, t1.tableoid, t1.ctid + -> Append + -> Index Scan using t1_a_idx on public.t1 t1_1 + Output: t1_1.tableoid, t1_1.ctid + Index Cond: ((t1_1.a > 5) AND (t1_1.a < 7)) + Filter: ((t1_1.a <> 6) AND (SubPlan 1) AND snoop(t1_1.a) AND leakproof(t1_1.a)) + SubPlan 1 + -> Append + -> Seq Scan on public.t12 t12_1 + Filter: (t12_1.a = t1_1.a) + -> Seq Scan on public.t111 t12_2 + Filter: (t12_2.a = t1_1.a) + -> Index Scan using t11_a_idx on public.t11 t1_2 + Output: t1_2.tableoid, t1_2.ctid + Index Cond: ((t1_2.a > 5) AND (t1_2.a < 7)) + Filter: ((t1_2.a <> 6) AND (SubPlan 1) AND snoop(t1_2.a) AND leakproof(t1_2.a)) + -> Index Scan using t12_a_idx on public.t12 t1_3 + Output: t1_3.tableoid, t1_3.ctid + Index Cond: ((t1_3.a > 5) AND (t1_3.a < 7)) + Filter: ((t1_3.a <> 6) AND (SubPlan 1) AND snoop(t1_3.a) AND leakproof(t1_3.a)) + -> Index Scan using t111_a_idx on public.t111 t1_4 + Output: t1_4.tableoid, t1_4.ctid + Index Cond: ((t1_4.a > 5) AND (t1_4.a < 7)) + Filter: ((t1_4.a <> 6) AND (SubPlan 1) AND snoop(t1_4.a) AND leakproof(t1_4.a)) +(30 rows) + +UPDATE v1 SET a=100 WHERE snoop(a) AND leakproof(a) AND a < 7 AND a != 6; +SELECT * FROM v1 WHERE a=100; -- Nothing should have been changed to 100 + a | b | c | d +---+---+---+--- +(0 rows) + +SELECT * FROM t1 WHERE a=100; -- Nothing should have been changed to 100 + a | b | c +---+---+--- +(0 rows) + +EXPLAIN (VERBOSE, COSTS OFF) +UPDATE v1 SET a=a+1 WHERE snoop(a) AND leakproof(a) AND a = 8; + QUERY PLAN +----------------------------------------------------------------------------------- + Update on public.t1 + Update on public.t1 t1_1 + Update on public.t11 t1_2 + Update on public.t12 t1_3 + Update on public.t111 t1_4 + -> Result + Output: (t1.a + 1), t1.tableoid, t1.ctid + -> Append + -> Index Scan using t1_a_idx on public.t1 t1_1 + Output: t1_1.a, t1_1.tableoid, t1_1.ctid + Index Cond: ((t1_1.a > 5) AND (t1_1.a = 8)) + Filter: ((SubPlan 1) AND snoop(t1_1.a) AND leakproof(t1_1.a)) + SubPlan 1 + -> Append + -> Seq Scan on public.t12 t12_1 + Filter: (t12_1.a = t1_1.a) + -> Seq Scan on public.t111 t12_2 + Filter: (t12_2.a = t1_1.a) + -> Index Scan using t11_a_idx on public.t11 t1_2 + Output: t1_2.a, t1_2.tableoid, t1_2.ctid + Index Cond: ((t1_2.a > 5) AND (t1_2.a = 8)) + Filter: ((SubPlan 1) AND snoop(t1_2.a) AND leakproof(t1_2.a)) + -> Index Scan using t12_a_idx on public.t12 t1_3 + Output: t1_3.a, t1_3.tableoid, t1_3.ctid + Index Cond: ((t1_3.a > 5) AND (t1_3.a = 8)) + Filter: ((SubPlan 1) AND snoop(t1_3.a) AND leakproof(t1_3.a)) + -> Index Scan using t111_a_idx on public.t111 t1_4 + Output: t1_4.a, t1_4.tableoid, t1_4.ctid + Index Cond: ((t1_4.a > 5) AND (t1_4.a = 8)) + Filter: ((SubPlan 1) AND snoop(t1_4.a) AND leakproof(t1_4.a)) +(30 rows) + +UPDATE v1 SET a=a+1 WHERE snoop(a) AND leakproof(a) AND a = 8; +NOTICE: snooped value: 8 +NOTICE: snooped value: 8 +NOTICE: snooped value: 8 +NOTICE: snooped value: 8 +SELECT * FROM v1 WHERE b=8; + a | b | c | d +---+---+------+------ + 9 | 8 | t1 | t11d + 9 | 8 | t11 | t11d + 9 | 8 | t12 | t11d + 9 | 8 | t111 | t11d +(4 rows) + +DELETE FROM v1 WHERE snoop(a) AND leakproof(a); -- should not delete everything, just where a>5 +NOTICE: snooped value: 6 +NOTICE: snooped value: 7 +NOTICE: snooped value: 9 +NOTICE: snooped value: 10 +NOTICE: snooped value: 9 +NOTICE: snooped value: 6 +NOTICE: snooped value: 7 +NOTICE: snooped value: 9 +NOTICE: snooped value: 10 +NOTICE: snooped value: 9 +NOTICE: snooped value: 6 +NOTICE: snooped value: 7 +NOTICE: snooped value: 9 +NOTICE: snooped value: 10 +NOTICE: snooped value: 9 +NOTICE: snooped value: 6 +NOTICE: snooped value: 7 +NOTICE: snooped value: 9 +NOTICE: snooped value: 10 +NOTICE: snooped value: 9 +TABLE t1; -- verify all a<=5 are intact + a | b | c +---+---+------ + 1 | 1 | t1 + 2 | 2 | t1 + 3 | 3 | t1 + 4 | 4 | t1 + 5 | 5 | t1 + 1 | 1 | t11 + 2 | 2 | t11 + 3 | 3 | t11 + 4 | 4 | t11 + 5 | 5 | t11 + 1 | 1 | t12 + 2 | 2 | t12 + 3 | 3 | t12 + 4 | 4 | t12 + 5 | 5 | t12 + 1 | 1 | t111 + 2 | 2 | t111 + 3 | 3 | t111 + 4 | 4 | t111 + 5 | 5 | t111 +(20 rows) + +DROP TABLE t1, t11, t12, t111 CASCADE; +NOTICE: drop cascades to view v1 +DROP FUNCTION snoop(anyelement); +DROP FUNCTION leakproof(anyelement); +CREATE TABLE tx1 (a integer); +CREATE TABLE tx2 (b integer); +CREATE TABLE tx3 (c integer); +CREATE VIEW vx1 AS SELECT a FROM tx1 WHERE EXISTS(SELECT 1 FROM tx2 JOIN tx3 ON b=c); +INSERT INTO vx1 values (1); +SELECT * FROM tx1; + a +--- + 1 +(1 row) + +SELECT * FROM vx1; + a +--- +(0 rows) + +DROP VIEW vx1; +DROP TABLE tx1; +DROP TABLE tx2; +DROP TABLE tx3; +CREATE TABLE tx1 (a integer); +CREATE TABLE tx2 (b integer); +CREATE TABLE tx3 (c integer); +CREATE VIEW vx1 AS SELECT a FROM tx1 WHERE EXISTS(SELECT 1 FROM tx2 JOIN tx3 ON b=c); +INSERT INTO vx1 VALUES (1); +INSERT INTO vx1 VALUES (1); +SELECT * FROM tx1; + a +--- + 1 + 1 +(2 rows) + +SELECT * FROM vx1; + a +--- +(0 rows) + +DROP VIEW vx1; +DROP TABLE tx1; +DROP TABLE tx2; +DROP TABLE tx3; +CREATE TABLE tx1 (a integer, b integer); +CREATE TABLE tx2 (b integer, c integer); +CREATE TABLE tx3 (c integer, d integer); +ALTER TABLE tx1 DROP COLUMN b; +ALTER TABLE tx2 DROP COLUMN c; +ALTER TABLE tx3 DROP COLUMN d; +CREATE VIEW vx1 AS SELECT a FROM tx1 WHERE EXISTS(SELECT 1 FROM tx2 JOIN tx3 ON b=c); +INSERT INTO vx1 VALUES (1); +INSERT INTO vx1 VALUES (1); +SELECT * FROM tx1; + a +--- + 1 + 1 +(2 rows) + +SELECT * FROM vx1; + a +--- +(0 rows) + +DROP VIEW vx1; +DROP TABLE tx1; +DROP TABLE tx2; +DROP TABLE tx3; +-- +-- Test handling of vars from correlated subqueries in quals from outer +-- security barrier views, per bug #13988 +-- +CREATE TABLE t1 (a int, b text, c int); +INSERT INTO t1 VALUES (1, 'one', 10); +CREATE TABLE t2 (cc int); +INSERT INTO t2 VALUES (10), (20); +CREATE VIEW v1 WITH (security_barrier = true) AS + SELECT * FROM t1 WHERE (a > 0) + WITH CHECK OPTION; +CREATE VIEW v2 WITH (security_barrier = true) AS + SELECT * FROM v1 WHERE EXISTS (SELECT 1 FROM t2 WHERE t2.cc = v1.c) + WITH CHECK OPTION; +INSERT INTO v2 VALUES (2, 'two', 20); -- ok +INSERT INTO v2 VALUES (-2, 'minus two', 20); -- not allowed +ERROR: new row violates check option for view "v1" +DETAIL: Failing row contains (-2, minus two, 20). +INSERT INTO v2 VALUES (3, 'three', 30); -- not allowed +ERROR: new row violates check option for view "v2" +DETAIL: Failing row contains (3, three, 30). +UPDATE v2 SET b = 'ONE' WHERE a = 1; -- ok +UPDATE v2 SET a = -1 WHERE a = 1; -- not allowed +ERROR: new row violates check option for view "v1" +DETAIL: Failing row contains (-1, ONE, 10). +UPDATE v2 SET c = 30 WHERE a = 1; -- not allowed +ERROR: new row violates check option for view "v2" +DETAIL: Failing row contains (1, ONE, 30). +DELETE FROM v2 WHERE a = 2; -- ok +SELECT * FROM v2; + a | b | c +---+-----+---- + 1 | ONE | 10 +(1 row) + +DROP VIEW v2; +DROP VIEW v1; +DROP TABLE t2; +DROP TABLE t1; +-- +-- Test sub-select in nested security barrier views, per bug #17972 +-- +CREATE TABLE t1 (a int); +CREATE VIEW v1 WITH (security_barrier = true) AS + SELECT * FROM t1; +CREATE RULE v1_upd_rule AS ON UPDATE TO v1 DO INSTEAD + UPDATE t1 SET a = NEW.a WHERE a = OLD.a; +CREATE VIEW v2 WITH (security_barrier = true) AS + SELECT * FROM v1 WHERE EXISTS (SELECT 1); +EXPLAIN (COSTS OFF) UPDATE v2 SET a = 1; + QUERY PLAN +--------------------------------------------------- + Update on t1 + InitPlan 1 (returns $0) + -> Result + -> Merge Join + Merge Cond: (t1.a = v1.a) + -> Sort + Sort Key: t1.a + -> Seq Scan on t1 + -> Sort + Sort Key: v1.a + -> Subquery Scan on v1 + -> Result + One-Time Filter: $0 + -> Seq Scan on t1 t1_1 +(14 rows) + +DROP VIEW v2; +DROP VIEW v1; +DROP TABLE t1; +-- +-- Test CREATE OR REPLACE VIEW turning a non-updatable view into an +-- auto-updatable view and adding check options in a single step +-- +CREATE TABLE t1 (a int, b text); +CREATE VIEW v1 AS SELECT null::int AS a; +CREATE OR REPLACE VIEW v1 AS SELECT * FROM t1 WHERE a > 0 WITH CHECK OPTION; +INSERT INTO v1 VALUES (1, 'ok'); -- ok +INSERT INTO v1 VALUES (-1, 'invalid'); -- should fail +ERROR: new row violates check option for view "v1" +DETAIL: Failing row contains (-1, invalid). +DROP VIEW v1; +DROP TABLE t1; +-- check that an auto-updatable view on a partitioned table works correctly +create table uv_pt (a int, b int, v varchar) partition by range (a, b); +create table uv_pt1 (b int not null, v varchar, a int not null) partition by range (b); +create table uv_pt11 (like uv_pt1); +alter table uv_pt11 drop a; +alter table uv_pt11 add a int; +alter table uv_pt11 drop a; +alter table uv_pt11 add a int not null; +alter table uv_pt1 attach partition uv_pt11 for values from (2) to (5); +alter table uv_pt attach partition uv_pt1 for values from (1, 2) to (1, 10); +create view uv_ptv as select * from uv_pt; +select events & 4 != 0 AS upd, + events & 8 != 0 AS ins, + events & 16 != 0 AS del + from pg_catalog.pg_relation_is_updatable('uv_pt'::regclass, false) t(events); + upd | ins | del +-----+-----+----- + t | t | t +(1 row) + +select pg_catalog.pg_column_is_updatable('uv_pt'::regclass, 1::smallint, false); + pg_column_is_updatable +------------------------ + t +(1 row) + +select pg_catalog.pg_column_is_updatable('uv_pt'::regclass, 2::smallint, false); + pg_column_is_updatable +------------------------ + t +(1 row) + +select table_name, is_updatable, is_insertable_into + from information_schema.views where table_name = 'uv_ptv'; + table_name | is_updatable | is_insertable_into +------------+--------------+-------------------- + uv_ptv | YES | YES +(1 row) + +select table_name, column_name, is_updatable + from information_schema.columns where table_name = 'uv_ptv' order by column_name; + table_name | column_name | is_updatable +------------+-------------+-------------- + uv_ptv | a | YES + uv_ptv | b | YES + uv_ptv | v | YES +(3 rows) + +insert into uv_ptv values (1, 2); +select tableoid::regclass, * from uv_pt; + tableoid | a | b | v +----------+---+---+--- + uv_pt11 | 1 | 2 | +(1 row) + +create view uv_ptv_wco as select * from uv_pt where a = 0 with check option; +insert into uv_ptv_wco values (1, 2); +ERROR: new row violates check option for view "uv_ptv_wco" +DETAIL: Failing row contains (1, 2, null). +drop view uv_ptv, uv_ptv_wco; +drop table uv_pt, uv_pt1, uv_pt11; +-- check that wholerow vars appearing in WITH CHECK OPTION constraint expressions +-- work fine with partitioned tables +create table wcowrtest (a int) partition by list (a); +create table wcowrtest1 partition of wcowrtest for values in (1); +create view wcowrtest_v as select * from wcowrtest where wcowrtest = '(2)'::wcowrtest with check option; +insert into wcowrtest_v values (1); +ERROR: new row violates check option for view "wcowrtest_v" +DETAIL: Failing row contains (1). +alter table wcowrtest add b text; +create table wcowrtest2 (b text, c int, a int); +alter table wcowrtest2 drop c; +alter table wcowrtest attach partition wcowrtest2 for values in (2); +create table sometable (a int, b text); +insert into sometable values (1, 'a'), (2, 'b'); +create view wcowrtest_v2 as + select * + from wcowrtest r + where r in (select s from sometable s where r.a = s.a) +with check option; +-- WITH CHECK qual will be processed with wcowrtest2's +-- rowtype after tuple-routing +insert into wcowrtest_v2 values (2, 'no such row in sometable'); +ERROR: new row violates check option for view "wcowrtest_v2" +DETAIL: Failing row contains (2, no such row in sometable). +drop view wcowrtest_v, wcowrtest_v2; +drop table wcowrtest, sometable; +-- Check INSERT .. ON CONFLICT DO UPDATE works correctly when the view's +-- columns are named and ordered differently than the underlying table's. +create table uv_iocu_tab (a text unique, b float); +insert into uv_iocu_tab values ('xyxyxy', 0); +create view uv_iocu_view as + select b, b+1 as c, a, '2.0'::text as two from uv_iocu_tab; +insert into uv_iocu_view (a, b) values ('xyxyxy', 1) + on conflict (a) do update set b = uv_iocu_view.b; +select * from uv_iocu_tab; + a | b +--------+--- + xyxyxy | 0 +(1 row) + +insert into uv_iocu_view (a, b) values ('xyxyxy', 1) + on conflict (a) do update set b = excluded.b; +select * from uv_iocu_tab; + a | b +--------+--- + xyxyxy | 1 +(1 row) + +-- OK to access view columns that are not present in underlying base +-- relation in the ON CONFLICT portion of the query +insert into uv_iocu_view (a, b) values ('xyxyxy', 3) + on conflict (a) do update set b = cast(excluded.two as float); +select * from uv_iocu_tab; + a | b +--------+--- + xyxyxy | 2 +(1 row) + +explain (costs off) +insert into uv_iocu_view (a, b) values ('xyxyxy', 3) + on conflict (a) do update set b = excluded.b where excluded.c > 0; + QUERY PLAN +----------------------------------------------------------------------------------- + Insert on uv_iocu_tab + Conflict Resolution: UPDATE + Conflict Arbiter Indexes: uv_iocu_tab_a_key + Conflict Filter: ((excluded.b + '1'::double precision) > '0'::double precision) + -> Result +(5 rows) + +insert into uv_iocu_view (a, b) values ('xyxyxy', 3) + on conflict (a) do update set b = excluded.b where excluded.c > 0; +select * from uv_iocu_tab; + a | b +--------+--- + xyxyxy | 3 +(1 row) + +drop view uv_iocu_view; +drop table uv_iocu_tab; +-- Test whole-row references to the view +create table uv_iocu_tab (a int unique, b text); +create view uv_iocu_view as + select b as bb, a as aa, uv_iocu_tab::text as cc from uv_iocu_tab; +insert into uv_iocu_view (aa,bb) values (1,'x'); +explain (costs off) +insert into uv_iocu_view (aa,bb) values (1,'y') + on conflict (aa) do update set bb = 'Rejected: '||excluded.* + where excluded.aa > 0 + and excluded.bb != '' + and excluded.cc is not null; + QUERY PLAN +--------------------------------------------------------------------------------------------------------- + Insert on uv_iocu_tab + Conflict Resolution: UPDATE + Conflict Arbiter Indexes: uv_iocu_tab_a_key + Conflict Filter: ((excluded.a > 0) AND (excluded.b <> ''::text) AND ((excluded.*)::text IS NOT NULL)) + -> Result +(5 rows) + +insert into uv_iocu_view (aa,bb) values (1,'y') + on conflict (aa) do update set bb = 'Rejected: '||excluded.* + where excluded.aa > 0 + and excluded.bb != '' + and excluded.cc is not null; +select * from uv_iocu_view; + bb | aa | cc +-------------------------+----+--------------------------------- + Rejected: (y,1,"(1,y)") | 1 | (1,"Rejected: (y,1,""(1,y)"")") +(1 row) + +-- Test omitting a column of the base relation +delete from uv_iocu_view; +insert into uv_iocu_view (aa,bb) values (1,'x'); +insert into uv_iocu_view (aa) values (1) + on conflict (aa) do update set bb = 'Rejected: '||excluded.*; +select * from uv_iocu_view; + bb | aa | cc +-----------------------+----+------------------------------- + Rejected: (,1,"(1,)") | 1 | (1,"Rejected: (,1,""(1,)"")") +(1 row) + +alter table uv_iocu_tab alter column b set default 'table default'; +insert into uv_iocu_view (aa) values (1) + on conflict (aa) do update set bb = 'Rejected: '||excluded.*; +select * from uv_iocu_view; + bb | aa | cc +-------------------------------------------------------+----+--------------------------------------------------------------------- + Rejected: ("table default",1,"(1,""table default"")") | 1 | (1,"Rejected: (""table default"",1,""(1,""""table default"""")"")") +(1 row) + +alter view uv_iocu_view alter column bb set default 'view default'; +insert into uv_iocu_view (aa) values (1) + on conflict (aa) do update set bb = 'Rejected: '||excluded.*; +select * from uv_iocu_view; + bb | aa | cc +-----------------------------------------------------+----+------------------------------------------------------------------- + Rejected: ("view default",1,"(1,""view default"")") | 1 | (1,"Rejected: (""view default"",1,""(1,""""view default"""")"")") +(1 row) + +-- Should fail to update non-updatable columns +insert into uv_iocu_view (aa) values (1) + on conflict (aa) do update set cc = 'XXX'; +ERROR: cannot insert into column "cc" of view "uv_iocu_view" +DETAIL: View columns that are not columns of their base relation are not updatable. +drop view uv_iocu_view; +drop table uv_iocu_tab; +-- ON CONFLICT DO UPDATE permissions checks +create user regress_view_user1; +create user regress_view_user2; +set session authorization regress_view_user1; +create table base_tbl(a int unique, b text, c float); +insert into base_tbl values (1,'xxx',1.0); +create view rw_view1 as select b as bb, c as cc, a as aa from base_tbl; +grant select (aa,bb) on rw_view1 to regress_view_user2; +grant insert on rw_view1 to regress_view_user2; +grant update (bb) on rw_view1 to regress_view_user2; +set session authorization regress_view_user2; +insert into rw_view1 values ('yyy',2.0,1) + on conflict (aa) do update set bb = excluded.cc; -- Not allowed +ERROR: permission denied for view rw_view1 +insert into rw_view1 values ('yyy',2.0,1) + on conflict (aa) do update set bb = rw_view1.cc; -- Not allowed +ERROR: permission denied for view rw_view1 +insert into rw_view1 values ('yyy',2.0,1) + on conflict (aa) do update set bb = excluded.bb; -- OK +insert into rw_view1 values ('zzz',2.0,1) + on conflict (aa) do update set bb = rw_view1.bb||'xxx'; -- OK +insert into rw_view1 values ('zzz',2.0,1) + on conflict (aa) do update set cc = 3.0; -- Not allowed +ERROR: permission denied for view rw_view1 +reset session authorization; +select * from base_tbl; + a | b | c +---+--------+--- + 1 | yyyxxx | 1 +(1 row) + +set session authorization regress_view_user1; +grant select (a,b) on base_tbl to regress_view_user2; +grant insert (a,b) on base_tbl to regress_view_user2; +grant update (a,b) on base_tbl to regress_view_user2; +set session authorization regress_view_user2; +create view rw_view2 as select b as bb, c as cc, a as aa from base_tbl; +insert into rw_view2 (aa,bb) values (1,'xxx') + on conflict (aa) do update set bb = excluded.bb; -- Not allowed +ERROR: permission denied for table base_tbl +create view rw_view3 as select b as bb, a as aa from base_tbl; +insert into rw_view3 (aa,bb) values (1,'xxx') + on conflict (aa) do update set bb = excluded.bb; -- OK +reset session authorization; +select * from base_tbl; + a | b | c +---+-----+--- + 1 | xxx | 1 +(1 row) + +set session authorization regress_view_user2; +create view rw_view4 as select aa, bb, cc FROM rw_view1; +insert into rw_view4 (aa,bb) values (1,'yyy') + on conflict (aa) do update set bb = excluded.bb; -- Not allowed +ERROR: permission denied for view rw_view1 +create view rw_view5 as select aa, bb FROM rw_view1; +insert into rw_view5 (aa,bb) values (1,'yyy') + on conflict (aa) do update set bb = excluded.bb; -- OK +reset session authorization; +select * from base_tbl; + a | b | c +---+-----+--- + 1 | yyy | 1 +(1 row) + +drop view rw_view5; +drop view rw_view4; +drop view rw_view3; +drop view rw_view2; +drop view rw_view1; +drop table base_tbl; +drop user regress_view_user1; +drop user regress_view_user2; +-- Test single- and multi-row inserts with table and view defaults. +-- Table defaults should be used, unless overridden by view defaults. +create table base_tab_def (a int, b text default 'Table default', + c text default 'Table default', d text, e text); +create view base_tab_def_view as select * from base_tab_def; +alter view base_tab_def_view alter b set default 'View default'; +alter view base_tab_def_view alter d set default 'View default'; +insert into base_tab_def values (1); +insert into base_tab_def values (2), (3); +insert into base_tab_def values (4, default, default, default, default); +insert into base_tab_def values (5, default, default, default, default), + (6, default, default, default, default); +insert into base_tab_def_view values (11); +insert into base_tab_def_view values (12), (13); +insert into base_tab_def_view values (14, default, default, default, default); +insert into base_tab_def_view values (15, default, default, default, default), + (16, default, default, default, default); +insert into base_tab_def_view values (17), (default); +select * from base_tab_def order by a; + a | b | c | d | e +----+---------------+---------------+--------------+--- + 1 | Table default | Table default | | + 2 | Table default | Table default | | + 3 | Table default | Table default | | + 4 | Table default | Table default | | + 5 | Table default | Table default | | + 6 | Table default | Table default | | + 11 | View default | Table default | View default | + 12 | View default | Table default | View default | + 13 | View default | Table default | View default | + 14 | View default | Table default | View default | + 15 | View default | Table default | View default | + 16 | View default | Table default | View default | + 17 | View default | Table default | View default | + | View default | Table default | View default | +(14 rows) + +-- Adding an INSTEAD OF trigger should cause NULLs to be inserted instead of +-- table defaults, where there are no view defaults. +create function base_tab_def_view_instrig_func() returns trigger +as +$$ +begin + insert into base_tab_def values (new.a, new.b, new.c, new.d, new.e); + return new; +end; +$$ +language plpgsql; +create trigger base_tab_def_view_instrig instead of insert on base_tab_def_view + for each row execute function base_tab_def_view_instrig_func(); +truncate base_tab_def; +insert into base_tab_def values (1); +insert into base_tab_def values (2), (3); +insert into base_tab_def values (4, default, default, default, default); +insert into base_tab_def values (5, default, default, default, default), + (6, default, default, default, default); +insert into base_tab_def_view values (11); +insert into base_tab_def_view values (12), (13); +insert into base_tab_def_view values (14, default, default, default, default); +insert into base_tab_def_view values (15, default, default, default, default), + (16, default, default, default, default); +insert into base_tab_def_view values (17), (default); +select * from base_tab_def order by a; + a | b | c | d | e +----+---------------+---------------+--------------+--- + 1 | Table default | Table default | | + 2 | Table default | Table default | | + 3 | Table default | Table default | | + 4 | Table default | Table default | | + 5 | Table default | Table default | | + 6 | Table default | Table default | | + 11 | View default | | View default | + 12 | View default | | View default | + 13 | View default | | View default | + 14 | View default | | View default | + 15 | View default | | View default | + 16 | View default | | View default | + 17 | View default | | View default | + | View default | | View default | +(14 rows) + +-- Using an unconditional DO INSTEAD rule should also cause NULLs to be +-- inserted where there are no view defaults. +drop trigger base_tab_def_view_instrig on base_tab_def_view; +drop function base_tab_def_view_instrig_func; +create rule base_tab_def_view_ins_rule as on insert to base_tab_def_view + do instead insert into base_tab_def values (new.a, new.b, new.c, new.d, new.e); +truncate base_tab_def; +insert into base_tab_def values (1); +insert into base_tab_def values (2), (3); +insert into base_tab_def values (4, default, default, default, default); +insert into base_tab_def values (5, default, default, default, default), + (6, default, default, default, default); +insert into base_tab_def_view values (11); +insert into base_tab_def_view values (12), (13); +insert into base_tab_def_view values (14, default, default, default, default); +insert into base_tab_def_view values (15, default, default, default, default), + (16, default, default, default, default); +insert into base_tab_def_view values (17), (default); +select * from base_tab_def order by a; + a | b | c | d | e +----+---------------+---------------+--------------+--- + 1 | Table default | Table default | | + 2 | Table default | Table default | | + 3 | Table default | Table default | | + 4 | Table default | Table default | | + 5 | Table default | Table default | | + 6 | Table default | Table default | | + 11 | View default | | View default | + 12 | View default | | View default | + 13 | View default | | View default | + 14 | View default | | View default | + 15 | View default | | View default | + 16 | View default | | View default | + 17 | View default | | View default | + | View default | | View default | +(14 rows) + +-- A DO ALSO rule should cause each row to be inserted twice. The first +-- insert should behave the same as an auto-updatable view (using table +-- defaults, unless overridden by view defaults). The second insert should +-- behave the same as a rule-updatable view (inserting NULLs where there are +-- no view defaults). +drop rule base_tab_def_view_ins_rule on base_tab_def_view; +create rule base_tab_def_view_ins_rule as on insert to base_tab_def_view + do also insert into base_tab_def values (new.a, new.b, new.c, new.d, new.e); +truncate base_tab_def; +insert into base_tab_def values (1); +insert into base_tab_def values (2), (3); +insert into base_tab_def values (4, default, default, default, default); +insert into base_tab_def values (5, default, default, default, default), + (6, default, default, default, default); +insert into base_tab_def_view values (11); +insert into base_tab_def_view values (12), (13); +insert into base_tab_def_view values (14, default, default, default, default); +insert into base_tab_def_view values (15, default, default, default, default), + (16, default, default, default, default); +insert into base_tab_def_view values (17), (default); +select * from base_tab_def order by a, c NULLS LAST; + a | b | c | d | e +----+---------------+---------------+--------------+--- + 1 | Table default | Table default | | + 2 | Table default | Table default | | + 3 | Table default | Table default | | + 4 | Table default | Table default | | + 5 | Table default | Table default | | + 6 | Table default | Table default | | + 11 | View default | Table default | View default | + 11 | View default | | View default | + 12 | View default | Table default | View default | + 12 | View default | | View default | + 13 | View default | Table default | View default | + 13 | View default | | View default | + 14 | View default | Table default | View default | + 14 | View default | | View default | + 15 | View default | Table default | View default | + 15 | View default | | View default | + 16 | View default | Table default | View default | + 16 | View default | | View default | + 17 | View default | Table default | View default | + 17 | View default | | View default | + | View default | Table default | View default | + | View default | | View default | +(22 rows) + +-- Test a DO ALSO INSERT ... SELECT rule +drop rule base_tab_def_view_ins_rule on base_tab_def_view; +create rule base_tab_def_view_ins_rule as on insert to base_tab_def_view + do also insert into base_tab_def (a, b, e) select new.a, new.b, 'xxx'; +truncate base_tab_def; +insert into base_tab_def_view values (1, default, default, default, default); +insert into base_tab_def_view values (2, default, default, default, default), + (3, default, default, default, default); +select * from base_tab_def order by a, e nulls first; + a | b | c | d | e +---+--------------+---------------+--------------+----- + 1 | View default | Table default | View default | + 1 | View default | Table default | | xxx + 2 | View default | Table default | View default | + 2 | View default | Table default | | xxx + 3 | View default | Table default | View default | + 3 | View default | Table default | | xxx +(6 rows) + +drop view base_tab_def_view; +drop table base_tab_def; +-- Test defaults with array assignments +create table base_tab (a serial, b int[], c text, d text default 'Table default'); +create view base_tab_view as select c, a, b from base_tab; +alter view base_tab_view alter column c set default 'View default'; +insert into base_tab_view (b[1], b[2], c, b[5], b[4], a, b[3]) +values (1, 2, default, 5, 4, default, 3), (10, 11, 'C value', 14, 13, 100, 12); +select * from base_tab order by a; + a | b | c | d +-----+------------------+--------------+--------------- + 1 | {1,2,3,4,5} | View default | Table default + 100 | {10,11,12,13,14} | C value | Table default +(2 rows) + +drop view base_tab_view; +drop table base_tab; diff --git a/src/test/regress/expected/update.out b/src/test/regress/expected/update.out new file mode 100644 index 0000000..c809f88 --- /dev/null +++ b/src/test/regress/expected/update.out @@ -0,0 +1,1028 @@ +-- +-- UPDATE syntax tests +-- +CREATE TABLE update_test ( + a INT DEFAULT 10, + b INT, + c TEXT +); +CREATE TABLE upsert_test ( + a INT PRIMARY KEY, + b TEXT +); +INSERT INTO update_test VALUES (5, 10, 'foo'); +INSERT INTO update_test(b, a) VALUES (15, 10); +SELECT * FROM update_test; + a | b | c +----+----+----- + 5 | 10 | foo + 10 | 15 | +(2 rows) + +UPDATE update_test SET a = DEFAULT, b = DEFAULT; +SELECT * FROM update_test; + a | b | c +----+---+----- + 10 | | foo + 10 | | +(2 rows) + +-- aliases for the UPDATE target table +UPDATE update_test AS t SET b = 10 WHERE t.a = 10; +SELECT * FROM update_test; + a | b | c +----+----+----- + 10 | 10 | foo + 10 | 10 | +(2 rows) + +UPDATE update_test t SET b = t.b + 10 WHERE t.a = 10; +SELECT * FROM update_test; + a | b | c +----+----+----- + 10 | 20 | foo + 10 | 20 | +(2 rows) + +-- +-- Test VALUES in FROM +-- +UPDATE update_test SET a=v.i FROM (VALUES(100, 20)) AS v(i, j) + WHERE update_test.b = v.j; +SELECT * FROM update_test; + a | b | c +-----+----+----- + 100 | 20 | foo + 100 | 20 | +(2 rows) + +-- fail, wrong data type: +UPDATE update_test SET a = v.* FROM (VALUES(100, 20)) AS v(i, j) + WHERE update_test.b = v.j; +ERROR: column "a" is of type integer but expression is of type record +LINE 1: UPDATE update_test SET a = v.* FROM (VALUES(100, 20)) AS v(i... + ^ +HINT: You will need to rewrite or cast the expression. +-- +-- Test multiple-set-clause syntax +-- +INSERT INTO update_test SELECT a,b+1,c FROM update_test; +SELECT * FROM update_test; + a | b | c +-----+----+----- + 100 | 20 | foo + 100 | 20 | + 100 | 21 | foo + 100 | 21 | +(4 rows) + +UPDATE update_test SET (c,b,a) = ('bugle', b+11, DEFAULT) WHERE c = 'foo'; +SELECT * FROM update_test; + a | b | c +-----+----+------- + 100 | 20 | + 100 | 21 | + 10 | 31 | bugle + 10 | 32 | bugle +(4 rows) + +UPDATE update_test SET (c,b) = ('car', a+b), a = a + 1 WHERE a = 10; +SELECT * FROM update_test; + a | b | c +-----+----+----- + 100 | 20 | + 100 | 21 | + 11 | 41 | car + 11 | 42 | car +(4 rows) + +-- fail, multi assignment to same column: +UPDATE update_test SET (c,b) = ('car', a+b), b = a + 1 WHERE a = 10; +ERROR: multiple assignments to same column "b" +-- uncorrelated sub-select: +UPDATE update_test + SET (b,a) = (select a,b from update_test where b = 41 and c = 'car') + WHERE a = 100 AND b = 20; +SELECT * FROM update_test; + a | b | c +-----+----+----- + 100 | 21 | + 11 | 41 | car + 11 | 42 | car + 41 | 11 | +(4 rows) + +-- correlated sub-select: +UPDATE update_test o + SET (b,a) = (select a+1,b from update_test i + where i.a=o.a and i.b=o.b and i.c is not distinct from o.c); +SELECT * FROM update_test; + a | b | c +----+-----+----- + 21 | 101 | + 41 | 12 | car + 42 | 12 | car + 11 | 42 | +(4 rows) + +-- fail, multiple rows supplied: +UPDATE update_test SET (b,a) = (select a+1,b from update_test); +ERROR: more than one row returned by a subquery used as an expression +-- set to null if no rows supplied: +UPDATE update_test SET (b,a) = (select a+1,b from update_test where a = 1000) + WHERE a = 11; +SELECT * FROM update_test; + a | b | c +----+-----+----- + 21 | 101 | + 41 | 12 | car + 42 | 12 | car + | | +(4 rows) + +-- *-expansion should work in this context: +UPDATE update_test SET (a,b) = ROW(v.*) FROM (VALUES(21, 100)) AS v(i, j) + WHERE update_test.a = v.i; +-- you might expect this to work, but syntactically it's not a RowExpr: +UPDATE update_test SET (a,b) = (v.*) FROM (VALUES(21, 101)) AS v(i, j) + WHERE update_test.a = v.i; +ERROR: source for a multiple-column UPDATE item must be a sub-SELECT or ROW() expression +LINE 1: UPDATE update_test SET (a,b) = (v.*) FROM (VALUES(21, 101)) ... + ^ +-- if an alias for the target table is specified, don't allow references +-- to the original table name +UPDATE update_test AS t SET b = update_test.b + 10 WHERE t.a = 10; +ERROR: invalid reference to FROM-clause entry for table "update_test" +LINE 1: UPDATE update_test AS t SET b = update_test.b + 10 WHERE t.a... + ^ +HINT: Perhaps you meant to reference the table alias "t". +-- Make sure that we can update to a TOASTed value. +UPDATE update_test SET c = repeat('x', 10000) WHERE c = 'car'; +SELECT a, b, char_length(c) FROM update_test; + a | b | char_length +----+-----+------------- + | | + 21 | 100 | + 41 | 12 | 10000 + 42 | 12 | 10000 +(4 rows) + +-- Check multi-assignment with a Result node to handle a one-time filter. +EXPLAIN (VERBOSE, COSTS OFF) +UPDATE update_test t + SET (a, b) = (SELECT b, a FROM update_test s WHERE s.a = t.a) + WHERE CURRENT_USER = SESSION_USER; + QUERY PLAN +------------------------------------------------------------- + Update on public.update_test t + -> Result + Output: $1, $2, (SubPlan 1 (returns $1,$2)), t.ctid + One-Time Filter: (CURRENT_USER = SESSION_USER) + -> Seq Scan on public.update_test t + Output: t.a, t.ctid + SubPlan 1 (returns $1,$2) + -> Seq Scan on public.update_test s + Output: s.b, s.a + Filter: (s.a = t.a) +(10 rows) + +UPDATE update_test t + SET (a, b) = (SELECT b, a FROM update_test s WHERE s.a = t.a) + WHERE CURRENT_USER = SESSION_USER; +SELECT a, b, char_length(c) FROM update_test; + a | b | char_length +-----+----+------------- + | | + 100 | 21 | + 12 | 41 | 10000 + 12 | 42 | 10000 +(4 rows) + +-- Test ON CONFLICT DO UPDATE +INSERT INTO upsert_test VALUES(1, 'Boo'), (3, 'Zoo'); +-- uncorrelated sub-select: +WITH aaa AS (SELECT 1 AS a, 'Foo' AS b) INSERT INTO upsert_test + VALUES (1, 'Bar') ON CONFLICT(a) + DO UPDATE SET (b, a) = (SELECT b, a FROM aaa) RETURNING *; + a | b +---+----- + 1 | Foo +(1 row) + +-- correlated sub-select: +INSERT INTO upsert_test VALUES (1, 'Baz'), (3, 'Zaz') ON CONFLICT(a) + DO UPDATE SET (b, a) = (SELECT b || ', Correlated', a from upsert_test i WHERE i.a = upsert_test.a) + RETURNING *; + a | b +---+----------------- + 1 | Foo, Correlated + 3 | Zoo, Correlated +(2 rows) + +-- correlated sub-select (EXCLUDED.* alias): +INSERT INTO upsert_test VALUES (1, 'Bat'), (3, 'Zot') ON CONFLICT(a) + DO UPDATE SET (b, a) = (SELECT b || ', Excluded', a from upsert_test i WHERE i.a = excluded.a) + RETURNING *; + a | b +---+--------------------------- + 1 | Foo, Correlated, Excluded + 3 | Zoo, Correlated, Excluded +(2 rows) + +-- ON CONFLICT using system attributes in RETURNING, testing both the +-- inserting and updating paths. See bug report at: +-- https://www.postgresql.org/message-id/73436355-6432-49B1-92ED-1FE4F7E7E100%40finefun.com.au +INSERT INTO upsert_test VALUES (2, 'Beeble') ON CONFLICT(a) + DO UPDATE SET (b, a) = (SELECT b || ', Excluded', a from upsert_test i WHERE i.a = excluded.a) + RETURNING tableoid::regclass, xmin = pg_current_xact_id()::xid AS xmin_correct, xmax = 0 AS xmax_correct; + tableoid | xmin_correct | xmax_correct +-------------+--------------+-------------- + upsert_test | t | t +(1 row) + +-- currently xmax is set after a conflict - that's probably not good, +-- but it seems worthwhile to have to be explicit if that changes. +INSERT INTO upsert_test VALUES (2, 'Brox') ON CONFLICT(a) + DO UPDATE SET (b, a) = (SELECT b || ', Excluded', a from upsert_test i WHERE i.a = excluded.a) + RETURNING tableoid::regclass, xmin = pg_current_xact_id()::xid AS xmin_correct, xmax = pg_current_xact_id()::xid AS xmax_correct; + tableoid | xmin_correct | xmax_correct +-------------+--------------+-------------- + upsert_test | t | t +(1 row) + +DROP TABLE update_test; +DROP TABLE upsert_test; +-- Test ON CONFLICT DO UPDATE with partitioned table and non-identical children +CREATE TABLE upsert_test ( + a INT PRIMARY KEY, + b TEXT +) PARTITION BY LIST (a); +CREATE TABLE upsert_test_1 PARTITION OF upsert_test FOR VALUES IN (1); +CREATE TABLE upsert_test_2 (b TEXT, a INT PRIMARY KEY); +ALTER TABLE upsert_test ATTACH PARTITION upsert_test_2 FOR VALUES IN (2); +INSERT INTO upsert_test VALUES(1, 'Boo'), (2, 'Zoo'); +-- uncorrelated sub-select: +WITH aaa AS (SELECT 1 AS a, 'Foo' AS b) INSERT INTO upsert_test + VALUES (1, 'Bar') ON CONFLICT(a) + DO UPDATE SET (b, a) = (SELECT b, a FROM aaa) RETURNING *; + a | b +---+----- + 1 | Foo +(1 row) + +-- correlated sub-select: +WITH aaa AS (SELECT 1 AS ctea, ' Foo' AS cteb) INSERT INTO upsert_test + VALUES (1, 'Bar'), (2, 'Baz') ON CONFLICT(a) + DO UPDATE SET (b, a) = (SELECT upsert_test.b||cteb, upsert_test.a FROM aaa) RETURNING *; + a | b +---+--------- + 1 | Foo Foo + 2 | Zoo Foo +(2 rows) + +DROP TABLE upsert_test; +--------------------------- +-- UPDATE with row movement +--------------------------- +-- When a partitioned table receives an UPDATE to the partitioned key and the +-- new values no longer meet the partition's bound, the row must be moved to +-- the correct partition for the new partition key (if one exists). We must +-- also ensure that updatable views on partitioned tables properly enforce any +-- WITH CHECK OPTION that is defined. The situation with triggers in this case +-- also requires thorough testing as partition key updates causing row +-- movement convert UPDATEs into DELETE+INSERT. +CREATE TABLE range_parted ( + a text, + b bigint, + c numeric, + d int, + e varchar +) PARTITION BY RANGE (a, b); +-- Create partitions intentionally in descending bound order, so as to test +-- that update-row-movement works with the leaf partitions not in bound order. +CREATE TABLE part_b_20_b_30 (e varchar, c numeric, a text, b bigint, d int); +ALTER TABLE range_parted ATTACH PARTITION part_b_20_b_30 FOR VALUES FROM ('b', 20) TO ('b', 30); +CREATE TABLE part_b_10_b_20 (e varchar, c numeric, a text, b bigint, d int) PARTITION BY RANGE (c); +CREATE TABLE part_b_1_b_10 PARTITION OF range_parted FOR VALUES FROM ('b', 1) TO ('b', 10); +ALTER TABLE range_parted ATTACH PARTITION part_b_10_b_20 FOR VALUES FROM ('b', 10) TO ('b', 20); +CREATE TABLE part_a_10_a_20 PARTITION OF range_parted FOR VALUES FROM ('a', 10) TO ('a', 20); +CREATE TABLE part_a_1_a_10 PARTITION OF range_parted FOR VALUES FROM ('a', 1) TO ('a', 10); +-- Check that partition-key UPDATE works sanely on a partitioned table that +-- does not have any child partitions. +UPDATE part_b_10_b_20 set b = b - 6; +-- Create some more partitions following the above pattern of descending bound +-- order, but let's make the situation a bit more complex by having the +-- attribute numbers of the columns vary from their parent partition. +CREATE TABLE part_c_100_200 (e varchar, c numeric, a text, b bigint, d int) PARTITION BY range (abs(d)); +ALTER TABLE part_c_100_200 DROP COLUMN e, DROP COLUMN c, DROP COLUMN a; +ALTER TABLE part_c_100_200 ADD COLUMN c numeric, ADD COLUMN e varchar, ADD COLUMN a text; +ALTER TABLE part_c_100_200 DROP COLUMN b; +ALTER TABLE part_c_100_200 ADD COLUMN b bigint; +CREATE TABLE part_d_1_15 PARTITION OF part_c_100_200 FOR VALUES FROM (1) TO (15); +CREATE TABLE part_d_15_20 PARTITION OF part_c_100_200 FOR VALUES FROM (15) TO (20); +ALTER TABLE part_b_10_b_20 ATTACH PARTITION part_c_100_200 FOR VALUES FROM (100) TO (200); +CREATE TABLE part_c_1_100 (e varchar, d int, c numeric, b bigint, a text); +ALTER TABLE part_b_10_b_20 ATTACH PARTITION part_c_1_100 FOR VALUES FROM (1) TO (100); +\set init_range_parted 'truncate range_parted; insert into range_parted VALUES (''a'', 1, 1, 1), (''a'', 10, 200, 1), (''b'', 12, 96, 1), (''b'', 13, 97, 2), (''b'', 15, 105, 16), (''b'', 17, 105, 19)' +\set show_data 'select tableoid::regclass::text COLLATE "C" partname, * from range_parted ORDER BY 1, 2, 3, 4, 5, 6' +:init_range_parted; +:show_data; + partname | a | b | c | d | e +----------------+---+----+-----+----+--- + part_a_10_a_20 | a | 10 | 200 | 1 | + part_a_1_a_10 | a | 1 | 1 | 1 | + part_c_1_100 | b | 12 | 96 | 1 | + part_c_1_100 | b | 13 | 97 | 2 | + part_d_15_20 | b | 15 | 105 | 16 | + part_d_15_20 | b | 17 | 105 | 19 | +(6 rows) + +-- The order of subplans should be in bound order +EXPLAIN (costs off) UPDATE range_parted set c = c - 50 WHERE c > 97; + QUERY PLAN +------------------------------------------------------- + Update on range_parted + Update on part_a_1_a_10 range_parted_1 + Update on part_a_10_a_20 range_parted_2 + Update on part_b_1_b_10 range_parted_3 + Update on part_c_1_100 range_parted_4 + Update on part_d_1_15 range_parted_5 + Update on part_d_15_20 range_parted_6 + Update on part_b_20_b_30 range_parted_7 + -> Append + -> Seq Scan on part_a_1_a_10 range_parted_1 + Filter: (c > '97'::numeric) + -> Seq Scan on part_a_10_a_20 range_parted_2 + Filter: (c > '97'::numeric) + -> Seq Scan on part_b_1_b_10 range_parted_3 + Filter: (c > '97'::numeric) + -> Seq Scan on part_c_1_100 range_parted_4 + Filter: (c > '97'::numeric) + -> Seq Scan on part_d_1_15 range_parted_5 + Filter: (c > '97'::numeric) + -> Seq Scan on part_d_15_20 range_parted_6 + Filter: (c > '97'::numeric) + -> Seq Scan on part_b_20_b_30 range_parted_7 + Filter: (c > '97'::numeric) +(23 rows) + +-- fail, row movement happens only within the partition subtree. +UPDATE part_c_100_200 set c = c - 20, d = c WHERE c = 105; +ERROR: new row for relation "part_c_100_200" violates partition constraint +DETAIL: Failing row contains (105, 85, null, b, 15). +-- fail, no partition key update, so no attempt to move tuple, +-- but "a = 'a'" violates partition constraint enforced by root partition) +UPDATE part_b_10_b_20 set a = 'a'; +ERROR: new row for relation "part_b_10_b_20" violates partition constraint +DETAIL: Failing row contains (null, 96, a, 12, 1). +-- ok, partition key update, no constraint violation +UPDATE range_parted set d = d - 10 WHERE d > 10; +-- ok, no partition key update, no constraint violation +UPDATE range_parted set e = d; +-- No row found +UPDATE part_c_1_100 set c = c + 20 WHERE c = 98; +-- ok, row movement +UPDATE part_b_10_b_20 set c = c + 20 returning c, b, a; + c | b | a +-----+----+--- + 116 | 12 | b + 117 | 13 | b + 125 | 15 | b + 125 | 17 | b +(4 rows) + +:show_data; + partname | a | b | c | d | e +----------------+---+----+-----+---+--- + part_a_10_a_20 | a | 10 | 200 | 1 | 1 + part_a_1_a_10 | a | 1 | 1 | 1 | 1 + part_d_1_15 | b | 12 | 116 | 1 | 1 + part_d_1_15 | b | 13 | 117 | 2 | 2 + part_d_1_15 | b | 15 | 125 | 6 | 6 + part_d_1_15 | b | 17 | 125 | 9 | 9 +(6 rows) + +-- fail, row movement happens only within the partition subtree. +UPDATE part_b_10_b_20 set b = b - 6 WHERE c > 116 returning *; +ERROR: new row for relation "part_b_10_b_20" violates partition constraint +DETAIL: Failing row contains (2, 117, b, 7, 2). +-- ok, row movement, with subset of rows moved into different partition. +UPDATE range_parted set b = b - 6 WHERE c > 116 returning a, b + c; + a | ?column? +---+---------- + a | 204 + b | 124 + b | 134 + b | 136 +(4 rows) + +:show_data; + partname | a | b | c | d | e +---------------+---+----+-----+---+--- + part_a_1_a_10 | a | 1 | 1 | 1 | 1 + part_a_1_a_10 | a | 4 | 200 | 1 | 1 + part_b_1_b_10 | b | 7 | 117 | 2 | 2 + part_b_1_b_10 | b | 9 | 125 | 6 | 6 + part_d_1_15 | b | 11 | 125 | 9 | 9 + part_d_1_15 | b | 12 | 116 | 1 | 1 +(6 rows) + +-- Common table needed for multiple test scenarios. +CREATE TABLE mintab(c1 int); +INSERT into mintab VALUES (120); +-- update partition key using updatable view. +CREATE VIEW upview AS SELECT * FROM range_parted WHERE (select c > c1 FROM mintab) WITH CHECK OPTION; +-- ok +UPDATE upview set c = 199 WHERE b = 4; +-- fail, check option violation +UPDATE upview set c = 120 WHERE b = 4; +ERROR: new row violates check option for view "upview" +DETAIL: Failing row contains (a, 4, 120, 1, 1). +-- fail, row movement with check option violation +UPDATE upview set a = 'b', b = 15, c = 120 WHERE b = 4; +ERROR: new row violates check option for view "upview" +DETAIL: Failing row contains (b, 15, 120, 1, 1). +-- ok, row movement, check option passes +UPDATE upview set a = 'b', b = 15 WHERE b = 4; +:show_data; + partname | a | b | c | d | e +---------------+---+----+-----+---+--- + part_a_1_a_10 | a | 1 | 1 | 1 | 1 + part_b_1_b_10 | b | 7 | 117 | 2 | 2 + part_b_1_b_10 | b | 9 | 125 | 6 | 6 + part_d_1_15 | b | 11 | 125 | 9 | 9 + part_d_1_15 | b | 12 | 116 | 1 | 1 + part_d_1_15 | b | 15 | 199 | 1 | 1 +(6 rows) + +-- cleanup +DROP VIEW upview; +-- RETURNING having whole-row vars. +:init_range_parted; +UPDATE range_parted set c = 95 WHERE a = 'b' and b > 10 and c > 100 returning (range_parted), *; + range_parted | a | b | c | d | e +---------------+---+----+----+----+--- + (b,15,95,16,) | b | 15 | 95 | 16 | + (b,17,95,19,) | b | 17 | 95 | 19 | +(2 rows) + +:show_data; + partname | a | b | c | d | e +----------------+---+----+-----+----+--- + part_a_10_a_20 | a | 10 | 200 | 1 | + part_a_1_a_10 | a | 1 | 1 | 1 | + part_c_1_100 | b | 12 | 96 | 1 | + part_c_1_100 | b | 13 | 97 | 2 | + part_c_1_100 | b | 15 | 95 | 16 | + part_c_1_100 | b | 17 | 95 | 19 | +(6 rows) + +-- Transition tables with update row movement +:init_range_parted; +CREATE FUNCTION trans_updatetrigfunc() RETURNS trigger LANGUAGE plpgsql AS +$$ + begin + raise notice 'trigger = %, old table = %, new table = %', + TG_NAME, + (select string_agg(old_table::text, ', ' ORDER BY a) FROM old_table), + (select string_agg(new_table::text, ', ' ORDER BY a) FROM new_table); + return null; + end; +$$; +CREATE TRIGGER trans_updatetrig + AFTER UPDATE ON range_parted REFERENCING OLD TABLE AS old_table NEW TABLE AS new_table + FOR EACH STATEMENT EXECUTE PROCEDURE trans_updatetrigfunc(); +UPDATE range_parted set c = (case when c = 96 then 110 else c + 1 end ) WHERE a = 'b' and b > 10 and c >= 96; +NOTICE: trigger = trans_updatetrig, old table = (b,12,96,1,), (b,13,97,2,), (b,15,105,16,), (b,17,105,19,), new table = (b,12,110,1,), (b,13,98,2,), (b,15,106,16,), (b,17,106,19,) +:show_data; + partname | a | b | c | d | e +----------------+---+----+-----+----+--- + part_a_10_a_20 | a | 10 | 200 | 1 | + part_a_1_a_10 | a | 1 | 1 | 1 | + part_c_1_100 | b | 13 | 98 | 2 | + part_d_15_20 | b | 15 | 106 | 16 | + part_d_15_20 | b | 17 | 106 | 19 | + part_d_1_15 | b | 12 | 110 | 1 | +(6 rows) + +:init_range_parted; +-- Enabling OLD TABLE capture for both DELETE as well as UPDATE stmt triggers +-- should not cause DELETEd rows to be captured twice. Similar thing for +-- INSERT triggers and inserted rows. +CREATE TRIGGER trans_deletetrig + AFTER DELETE ON range_parted REFERENCING OLD TABLE AS old_table + FOR EACH STATEMENT EXECUTE PROCEDURE trans_updatetrigfunc(); +CREATE TRIGGER trans_inserttrig + AFTER INSERT ON range_parted REFERENCING NEW TABLE AS new_table + FOR EACH STATEMENT EXECUTE PROCEDURE trans_updatetrigfunc(); +UPDATE range_parted set c = c + 50 WHERE a = 'b' and b > 10 and c >= 96; +NOTICE: trigger = trans_updatetrig, old table = (b,12,96,1,), (b,13,97,2,), (b,15,105,16,), (b,17,105,19,), new table = (b,12,146,1,), (b,13,147,2,), (b,15,155,16,), (b,17,155,19,) +:show_data; + partname | a | b | c | d | e +----------------+---+----+-----+----+--- + part_a_10_a_20 | a | 10 | 200 | 1 | + part_a_1_a_10 | a | 1 | 1 | 1 | + part_d_15_20 | b | 15 | 155 | 16 | + part_d_15_20 | b | 17 | 155 | 19 | + part_d_1_15 | b | 12 | 146 | 1 | + part_d_1_15 | b | 13 | 147 | 2 | +(6 rows) + +DROP TRIGGER trans_deletetrig ON range_parted; +DROP TRIGGER trans_inserttrig ON range_parted; +-- Don't drop trans_updatetrig yet. It is required below. +-- Test with transition tuple conversion happening for rows moved into the +-- new partition. This requires a trigger that references transition table +-- (we already have trans_updatetrig). For inserted rows, the conversion +-- is not usually needed, because the original tuple is already compatible with +-- the desired transition tuple format. But conversion happens when there is a +-- BR trigger because the trigger can change the inserted row. So install a +-- BR triggers on those child partitions where the rows will be moved. +CREATE FUNCTION func_parted_mod_b() RETURNS trigger AS $$ +BEGIN + NEW.b = NEW.b + 1; + return NEW; +END $$ language plpgsql; +CREATE TRIGGER trig_c1_100 BEFORE UPDATE OR INSERT ON part_c_1_100 + FOR EACH ROW EXECUTE PROCEDURE func_parted_mod_b(); +CREATE TRIGGER trig_d1_15 BEFORE UPDATE OR INSERT ON part_d_1_15 + FOR EACH ROW EXECUTE PROCEDURE func_parted_mod_b(); +CREATE TRIGGER trig_d15_20 BEFORE UPDATE OR INSERT ON part_d_15_20 + FOR EACH ROW EXECUTE PROCEDURE func_parted_mod_b(); +:init_range_parted; +UPDATE range_parted set c = (case when c = 96 then 110 else c + 1 end) WHERE a = 'b' and b > 10 and c >= 96; +NOTICE: trigger = trans_updatetrig, old table = (b,13,96,1,), (b,14,97,2,), (b,16,105,16,), (b,18,105,19,), new table = (b,15,110,1,), (b,15,98,2,), (b,17,106,16,), (b,19,106,19,) +:show_data; + partname | a | b | c | d | e +----------------+---+----+-----+----+--- + part_a_10_a_20 | a | 10 | 200 | 1 | + part_a_1_a_10 | a | 1 | 1 | 1 | + part_c_1_100 | b | 15 | 98 | 2 | + part_d_15_20 | b | 17 | 106 | 16 | + part_d_15_20 | b | 19 | 106 | 19 | + part_d_1_15 | b | 15 | 110 | 1 | +(6 rows) + +:init_range_parted; +UPDATE range_parted set c = c + 50 WHERE a = 'b' and b > 10 and c >= 96; +NOTICE: trigger = trans_updatetrig, old table = (b,13,96,1,), (b,14,97,2,), (b,16,105,16,), (b,18,105,19,), new table = (b,15,146,1,), (b,16,147,2,), (b,17,155,16,), (b,19,155,19,) +:show_data; + partname | a | b | c | d | e +----------------+---+----+-----+----+--- + part_a_10_a_20 | a | 10 | 200 | 1 | + part_a_1_a_10 | a | 1 | 1 | 1 | + part_d_15_20 | b | 17 | 155 | 16 | + part_d_15_20 | b | 19 | 155 | 19 | + part_d_1_15 | b | 15 | 146 | 1 | + part_d_1_15 | b | 16 | 147 | 2 | +(6 rows) + +-- Case where per-partition tuple conversion map array is allocated, but the +-- map is not required for the particular tuple that is routed, thanks to +-- matching table attributes of the partition and the target table. +:init_range_parted; +UPDATE range_parted set b = 15 WHERE b = 1; +NOTICE: trigger = trans_updatetrig, old table = (a,1,1,1,), new table = (a,15,1,1,) +:show_data; + partname | a | b | c | d | e +----------------+---+----+-----+----+--- + part_a_10_a_20 | a | 10 | 200 | 1 | + part_a_10_a_20 | a | 15 | 1 | 1 | + part_c_1_100 | b | 13 | 96 | 1 | + part_c_1_100 | b | 14 | 97 | 2 | + part_d_15_20 | b | 16 | 105 | 16 | + part_d_15_20 | b | 18 | 105 | 19 | +(6 rows) + +DROP TRIGGER trans_updatetrig ON range_parted; +DROP TRIGGER trig_c1_100 ON part_c_1_100; +DROP TRIGGER trig_d1_15 ON part_d_1_15; +DROP TRIGGER trig_d15_20 ON part_d_15_20; +DROP FUNCTION func_parted_mod_b(); +-- RLS policies with update-row-movement +----------------------------------------- +ALTER TABLE range_parted ENABLE ROW LEVEL SECURITY; +CREATE USER regress_range_parted_user; +GRANT ALL ON range_parted, mintab TO regress_range_parted_user; +CREATE POLICY seeall ON range_parted AS PERMISSIVE FOR SELECT USING (true); +CREATE POLICY policy_range_parted ON range_parted for UPDATE USING (true) WITH CHECK (c % 2 = 0); +:init_range_parted; +SET SESSION AUTHORIZATION regress_range_parted_user; +-- This should fail with RLS violation error while moving row from +-- part_a_10_a_20 to part_d_1_15, because we are setting 'c' to an odd number. +UPDATE range_parted set a = 'b', c = 151 WHERE a = 'a' and c = 200; +ERROR: new row violates row-level security policy for table "range_parted" +RESET SESSION AUTHORIZATION; +-- Create a trigger on part_d_1_15 +CREATE FUNCTION func_d_1_15() RETURNS trigger AS $$ +BEGIN + NEW.c = NEW.c + 1; -- Make even numbers odd, or vice versa + return NEW; +END $$ LANGUAGE plpgsql; +CREATE TRIGGER trig_d_1_15 BEFORE INSERT ON part_d_1_15 + FOR EACH ROW EXECUTE PROCEDURE func_d_1_15(); +:init_range_parted; +SET SESSION AUTHORIZATION regress_range_parted_user; +-- Here, RLS checks should succeed while moving row from part_a_10_a_20 to +-- part_d_1_15. Even though the UPDATE is setting 'c' to an odd number, the +-- trigger at the destination partition again makes it an even number. +UPDATE range_parted set a = 'b', c = 151 WHERE a = 'a' and c = 200; +RESET SESSION AUTHORIZATION; +:init_range_parted; +SET SESSION AUTHORIZATION regress_range_parted_user; +-- This should fail with RLS violation error. Even though the UPDATE is setting +-- 'c' to an even number, the trigger at the destination partition again makes +-- it an odd number. +UPDATE range_parted set a = 'b', c = 150 WHERE a = 'a' and c = 200; +ERROR: new row violates row-level security policy for table "range_parted" +-- Cleanup +RESET SESSION AUTHORIZATION; +DROP TRIGGER trig_d_1_15 ON part_d_1_15; +DROP FUNCTION func_d_1_15(); +-- Policy expression contains SubPlan +RESET SESSION AUTHORIZATION; +:init_range_parted; +CREATE POLICY policy_range_parted_subplan on range_parted + AS RESTRICTIVE for UPDATE USING (true) + WITH CHECK ((SELECT range_parted.c <= c1 FROM mintab)); +SET SESSION AUTHORIZATION regress_range_parted_user; +-- fail, mintab has row with c1 = 120 +UPDATE range_parted set a = 'b', c = 122 WHERE a = 'a' and c = 200; +ERROR: new row violates row-level security policy "policy_range_parted_subplan" for table "range_parted" +-- ok +UPDATE range_parted set a = 'b', c = 120 WHERE a = 'a' and c = 200; +-- RLS policy expression contains whole row. +RESET SESSION AUTHORIZATION; +:init_range_parted; +CREATE POLICY policy_range_parted_wholerow on range_parted AS RESTRICTIVE for UPDATE USING (true) + WITH CHECK (range_parted = row('b', 10, 112, 1, NULL)::range_parted); +SET SESSION AUTHORIZATION regress_range_parted_user; +-- ok, should pass the RLS check +UPDATE range_parted set a = 'b', c = 112 WHERE a = 'a' and c = 200; +RESET SESSION AUTHORIZATION; +:init_range_parted; +SET SESSION AUTHORIZATION regress_range_parted_user; +-- fail, the whole row RLS check should fail +UPDATE range_parted set a = 'b', c = 116 WHERE a = 'a' and c = 200; +ERROR: new row violates row-level security policy "policy_range_parted_wholerow" for table "range_parted" +-- Cleanup +RESET SESSION AUTHORIZATION; +DROP POLICY policy_range_parted ON range_parted; +DROP POLICY policy_range_parted_subplan ON range_parted; +DROP POLICY policy_range_parted_wholerow ON range_parted; +REVOKE ALL ON range_parted, mintab FROM regress_range_parted_user; +DROP USER regress_range_parted_user; +DROP TABLE mintab; +-- statement triggers with update row movement +--------------------------------------------------- +:init_range_parted; +CREATE FUNCTION trigfunc() returns trigger language plpgsql as +$$ + begin + raise notice 'trigger = % fired on table % during %', + TG_NAME, TG_TABLE_NAME, TG_OP; + return null; + end; +$$; +-- Triggers on root partition +CREATE TRIGGER parent_delete_trig + AFTER DELETE ON range_parted for each statement execute procedure trigfunc(); +CREATE TRIGGER parent_update_trig + AFTER UPDATE ON range_parted for each statement execute procedure trigfunc(); +CREATE TRIGGER parent_insert_trig + AFTER INSERT ON range_parted for each statement execute procedure trigfunc(); +-- Triggers on leaf partition part_c_1_100 +CREATE TRIGGER c1_delete_trig + AFTER DELETE ON part_c_1_100 for each statement execute procedure trigfunc(); +CREATE TRIGGER c1_update_trig + AFTER UPDATE ON part_c_1_100 for each statement execute procedure trigfunc(); +CREATE TRIGGER c1_insert_trig + AFTER INSERT ON part_c_1_100 for each statement execute procedure trigfunc(); +-- Triggers on leaf partition part_d_1_15 +CREATE TRIGGER d1_delete_trig + AFTER DELETE ON part_d_1_15 for each statement execute procedure trigfunc(); +CREATE TRIGGER d1_update_trig + AFTER UPDATE ON part_d_1_15 for each statement execute procedure trigfunc(); +CREATE TRIGGER d1_insert_trig + AFTER INSERT ON part_d_1_15 for each statement execute procedure trigfunc(); +-- Triggers on leaf partition part_d_15_20 +CREATE TRIGGER d15_delete_trig + AFTER DELETE ON part_d_15_20 for each statement execute procedure trigfunc(); +CREATE TRIGGER d15_update_trig + AFTER UPDATE ON part_d_15_20 for each statement execute procedure trigfunc(); +CREATE TRIGGER d15_insert_trig + AFTER INSERT ON part_d_15_20 for each statement execute procedure trigfunc(); +-- Move all rows from part_c_100_200 to part_c_1_100. None of the delete or +-- insert statement triggers should be fired. +UPDATE range_parted set c = c - 50 WHERE c > 97; +NOTICE: trigger = parent_update_trig fired on table range_parted during UPDATE +:show_data; + partname | a | b | c | d | e +----------------+---+----+-----+----+--- + part_a_10_a_20 | a | 10 | 150 | 1 | + part_a_1_a_10 | a | 1 | 1 | 1 | + part_c_1_100 | b | 12 | 96 | 1 | + part_c_1_100 | b | 13 | 97 | 2 | + part_c_1_100 | b | 15 | 55 | 16 | + part_c_1_100 | b | 17 | 55 | 19 | +(6 rows) + +DROP TRIGGER parent_delete_trig ON range_parted; +DROP TRIGGER parent_update_trig ON range_parted; +DROP TRIGGER parent_insert_trig ON range_parted; +DROP TRIGGER c1_delete_trig ON part_c_1_100; +DROP TRIGGER c1_update_trig ON part_c_1_100; +DROP TRIGGER c1_insert_trig ON part_c_1_100; +DROP TRIGGER d1_delete_trig ON part_d_1_15; +DROP TRIGGER d1_update_trig ON part_d_1_15; +DROP TRIGGER d1_insert_trig ON part_d_1_15; +DROP TRIGGER d15_delete_trig ON part_d_15_20; +DROP TRIGGER d15_update_trig ON part_d_15_20; +DROP TRIGGER d15_insert_trig ON part_d_15_20; +-- Creating default partition for range +:init_range_parted; +create table part_def partition of range_parted default; +\d+ part_def + Table "public.part_def" + Column | Type | Collation | Nullable | Default | Storage | Stats target | Description +--------+-------------------+-----------+----------+---------+----------+--------------+------------- + a | text | | | | extended | | + b | bigint | | | | plain | | + c | numeric | | | | main | | + d | integer | | | | plain | | + e | character varying | | | | extended | | +Partition of: range_parted DEFAULT +Partition constraint: (NOT ((a IS NOT NULL) AND (b IS NOT NULL) AND (((a = 'a'::text) AND (b >= '1'::bigint) AND (b < '10'::bigint)) OR ((a = 'a'::text) AND (b >= '10'::bigint) AND (b < '20'::bigint)) OR ((a = 'b'::text) AND (b >= '1'::bigint) AND (b < '10'::bigint)) OR ((a = 'b'::text) AND (b >= '10'::bigint) AND (b < '20'::bigint)) OR ((a = 'b'::text) AND (b >= '20'::bigint) AND (b < '30'::bigint))))) + +insert into range_parted values ('c', 9); +-- ok +update part_def set a = 'd' where a = 'c'; +-- fail +update part_def set a = 'a' where a = 'd'; +ERROR: new row for relation "part_def" violates partition constraint +DETAIL: Failing row contains (a, 9, null, null, null). +:show_data; + partname | a | b | c | d | e +----------------+---+----+-----+----+--- + part_a_10_a_20 | a | 10 | 200 | 1 | + part_a_1_a_10 | a | 1 | 1 | 1 | + part_c_1_100 | b | 12 | 96 | 1 | + part_c_1_100 | b | 13 | 97 | 2 | + part_d_15_20 | b | 15 | 105 | 16 | + part_d_15_20 | b | 17 | 105 | 19 | + part_def | d | 9 | | | +(7 rows) + +-- Update row movement from non-default to default partition. +-- fail, default partition is not under part_a_10_a_20; +UPDATE part_a_10_a_20 set a = 'ad' WHERE a = 'a'; +ERROR: new row for relation "part_a_10_a_20" violates partition constraint +DETAIL: Failing row contains (ad, 10, 200, 1, null). +-- ok +UPDATE range_parted set a = 'ad' WHERE a = 'a'; +UPDATE range_parted set a = 'bd' WHERE a = 'b'; +:show_data; + partname | a | b | c | d | e +----------+----+----+-----+----+--- + part_def | ad | 1 | 1 | 1 | + part_def | ad | 10 | 200 | 1 | + part_def | bd | 12 | 96 | 1 | + part_def | bd | 13 | 97 | 2 | + part_def | bd | 15 | 105 | 16 | + part_def | bd | 17 | 105 | 19 | + part_def | d | 9 | | | +(7 rows) + +-- Update row movement from default to non-default partitions. +-- ok +UPDATE range_parted set a = 'a' WHERE a = 'ad'; +UPDATE range_parted set a = 'b' WHERE a = 'bd'; +:show_data; + partname | a | b | c | d | e +----------------+---+----+-----+----+--- + part_a_10_a_20 | a | 10 | 200 | 1 | + part_a_1_a_10 | a | 1 | 1 | 1 | + part_c_1_100 | b | 12 | 96 | 1 | + part_c_1_100 | b | 13 | 97 | 2 | + part_d_15_20 | b | 15 | 105 | 16 | + part_d_15_20 | b | 17 | 105 | 19 | + part_def | d | 9 | | | +(7 rows) + +-- Cleanup: range_parted no longer needed. +DROP TABLE range_parted; +CREATE TABLE list_parted ( + a text, + b int +) PARTITION BY list (a); +CREATE TABLE list_part1 PARTITION OF list_parted for VALUES in ('a', 'b'); +CREATE TABLE list_default PARTITION OF list_parted default; +INSERT into list_part1 VALUES ('a', 1); +INSERT into list_default VALUES ('d', 10); +-- fail +UPDATE list_default set a = 'a' WHERE a = 'd'; +ERROR: new row for relation "list_default" violates partition constraint +DETAIL: Failing row contains (a, 10). +-- ok +UPDATE list_default set a = 'x' WHERE a = 'd'; +DROP TABLE list_parted; +-- Test retrieval of system columns with non-consistent partition row types. +-- This is only partially supported, as seen in the results. +create table utrtest (a int, b text) partition by list (a); +create table utr1 (a int check (a in (1)), q text, b text); +create table utr2 (a int check (a in (2)), b text); +alter table utr1 drop column q; +alter table utrtest attach partition utr1 for values in (1); +alter table utrtest attach partition utr2 for values in (2); +insert into utrtest values (1, 'foo') + returning *, tableoid::regclass, xmin = pg_current_xact_id()::xid as xmin_ok; + a | b | tableoid | xmin_ok +---+-----+----------+--------- + 1 | foo | utr1 | t +(1 row) + +insert into utrtest values (2, 'bar') + returning *, tableoid::regclass, xmin = pg_current_xact_id()::xid as xmin_ok; -- fails +ERROR: cannot retrieve a system column in this context +insert into utrtest values (2, 'bar') + returning *, tableoid::regclass; + a | b | tableoid +---+-----+---------- + 2 | bar | utr2 +(1 row) + +update utrtest set b = b || b from (values (1), (2)) s(x) where a = s.x + returning *, tableoid::regclass, xmin = pg_current_xact_id()::xid as xmin_ok; + a | b | x | tableoid | xmin_ok +---+--------+---+----------+--------- + 1 | foofoo | 1 | utr1 | t + 2 | barbar | 2 | utr2 | t +(2 rows) + +update utrtest set a = 3 - a from (values (1), (2)) s(x) where a = s.x + returning *, tableoid::regclass, xmin = pg_current_xact_id()::xid as xmin_ok; -- fails +ERROR: cannot retrieve a system column in this context +update utrtest set a = 3 - a from (values (1), (2)) s(x) where a = s.x + returning *, tableoid::regclass; + a | b | x | tableoid +---+--------+---+---------- + 2 | foofoo | 1 | utr2 + 1 | barbar | 2 | utr1 +(2 rows) + +delete from utrtest + returning *, tableoid::regclass, xmax = pg_current_xact_id()::xid as xmax_ok; + a | b | tableoid | xmax_ok +---+--------+----------+--------- + 1 | barbar | utr1 | t + 2 | foofoo | utr2 | t +(2 rows) + +drop table utrtest; +-------------- +-- Some more update-partition-key test scenarios below. This time use list +-- partitions. +-------------- +-- Setup for list partitions +CREATE TABLE list_parted (a numeric, b int, c int8) PARTITION BY list (a); +CREATE TABLE sub_parted PARTITION OF list_parted for VALUES in (1) PARTITION BY list (b); +CREATE TABLE sub_part1(b int, c int8, a numeric); +ALTER TABLE sub_parted ATTACH PARTITION sub_part1 for VALUES in (1); +CREATE TABLE sub_part2(b int, c int8, a numeric); +ALTER TABLE sub_parted ATTACH PARTITION sub_part2 for VALUES in (2); +CREATE TABLE list_part1(a numeric, b int, c int8); +ALTER TABLE list_parted ATTACH PARTITION list_part1 for VALUES in (2,3); +INSERT into list_parted VALUES (2,5,50); +INSERT into list_parted VALUES (3,6,60); +INSERT into sub_parted VALUES (1,1,60); +INSERT into sub_parted VALUES (1,2,10); +-- Test partition constraint violation when intermediate ancestor is used and +-- constraint is inherited from upper root. +UPDATE sub_parted set a = 2 WHERE c = 10; +ERROR: new row for relation "sub_parted" violates partition constraint +DETAIL: Failing row contains (2, 2, 10). +-- Test update-partition-key, where the unpruned partitions do not have their +-- partition keys updated. +SELECT tableoid::regclass::text, * FROM list_parted WHERE a = 2 ORDER BY 1; + tableoid | a | b | c +------------+---+---+---- + list_part1 | 2 | 5 | 50 +(1 row) + +UPDATE list_parted set b = c + a WHERE a = 2; +SELECT tableoid::regclass::text, * FROM list_parted WHERE a = 2 ORDER BY 1; + tableoid | a | b | c +------------+---+----+---- + list_part1 | 2 | 52 | 50 +(1 row) + +-- Test the case where BR UPDATE triggers change the partition key. +CREATE FUNCTION func_parted_mod_b() returns trigger as $$ +BEGIN + NEW.b = 2; -- This is changing partition key column. + return NEW; +END $$ LANGUAGE plpgsql; +CREATE TRIGGER parted_mod_b before update on sub_part1 + for each row execute procedure func_parted_mod_b(); +SELECT tableoid::regclass::text, * FROM list_parted ORDER BY 1, 2, 3, 4; + tableoid | a | b | c +------------+---+----+---- + list_part1 | 2 | 52 | 50 + list_part1 | 3 | 6 | 60 + sub_part1 | 1 | 1 | 60 + sub_part2 | 1 | 2 | 10 +(4 rows) + +-- This should do the tuple routing even though there is no explicit +-- partition-key update, because there is a trigger on sub_part1. +UPDATE list_parted set c = 70 WHERE b = 1; +SELECT tableoid::regclass::text, * FROM list_parted ORDER BY 1, 2, 3, 4; + tableoid | a | b | c +------------+---+----+---- + list_part1 | 2 | 52 | 50 + list_part1 | 3 | 6 | 60 + sub_part2 | 1 | 2 | 10 + sub_part2 | 1 | 2 | 70 +(4 rows) + +DROP TRIGGER parted_mod_b ON sub_part1; +-- If BR DELETE trigger prevented DELETE from happening, we should also skip +-- the INSERT if that delete is part of UPDATE=>DELETE+INSERT. +CREATE OR REPLACE FUNCTION func_parted_mod_b() returns trigger as $$ +BEGIN + raise notice 'Trigger: Got OLD row %, but returning NULL', OLD; + return NULL; +END $$ LANGUAGE plpgsql; +CREATE TRIGGER trig_skip_delete before delete on sub_part2 + for each row execute procedure func_parted_mod_b(); +UPDATE list_parted set b = 1 WHERE c = 70; +NOTICE: Trigger: Got OLD row (2,70,1), but returning NULL +SELECT tableoid::regclass::text, * FROM list_parted ORDER BY 1, 2, 3, 4; + tableoid | a | b | c +------------+---+----+---- + list_part1 | 2 | 52 | 50 + list_part1 | 3 | 6 | 60 + sub_part2 | 1 | 2 | 10 + sub_part2 | 1 | 2 | 70 +(4 rows) + +-- Drop the trigger. Now the row should be moved. +DROP TRIGGER trig_skip_delete ON sub_part2; +UPDATE list_parted set b = 1 WHERE c = 70; +SELECT tableoid::regclass::text, * FROM list_parted ORDER BY 1, 2, 3, 4; + tableoid | a | b | c +------------+---+----+---- + list_part1 | 2 | 52 | 50 + list_part1 | 3 | 6 | 60 + sub_part1 | 1 | 1 | 70 + sub_part2 | 1 | 2 | 10 +(4 rows) + +DROP FUNCTION func_parted_mod_b(); +-- UPDATE partition-key with FROM clause. If join produces multiple output +-- rows for the same row to be modified, we should tuple-route the row only +-- once. There should not be any rows inserted. +CREATE TABLE non_parted (id int); +INSERT into non_parted VALUES (1), (1), (1), (2), (2), (2), (3), (3), (3); +UPDATE list_parted t1 set a = 2 FROM non_parted t2 WHERE t1.a = t2.id and a = 1; +SELECT tableoid::regclass::text, * FROM list_parted ORDER BY 1, 2, 3, 4; + tableoid | a | b | c +------------+---+----+---- + list_part1 | 2 | 1 | 70 + list_part1 | 2 | 2 | 10 + list_part1 | 2 | 52 | 50 + list_part1 | 3 | 6 | 60 +(4 rows) + +DROP TABLE non_parted; +-- Cleanup: list_parted no longer needed. +DROP TABLE list_parted; +-- create custom operator class and hash function, for the same reason +-- explained in alter_table.sql +create or replace function dummy_hashint4(a int4, seed int8) returns int8 as +$$ begin return (a + seed); end; $$ language 'plpgsql' immutable; +create operator class custom_opclass for type int4 using hash as +operator 1 = , function 2 dummy_hashint4(int4, int8); +create table hash_parted ( + a int, + b int +) partition by hash (a custom_opclass, b custom_opclass); +create table hpart1 partition of hash_parted for values with (modulus 2, remainder 1); +create table hpart2 partition of hash_parted for values with (modulus 4, remainder 2); +create table hpart3 partition of hash_parted for values with (modulus 8, remainder 0); +create table hpart4 partition of hash_parted for values with (modulus 8, remainder 4); +insert into hpart1 values (1, 1); +insert into hpart2 values (2, 5); +insert into hpart4 values (3, 4); +-- fail +update hpart1 set a = 3, b=4 where a = 1; +ERROR: new row for relation "hpart1" violates partition constraint +DETAIL: Failing row contains (3, 4). +-- ok, row movement +update hash_parted set b = b - 1 where b = 1; +-- ok +update hash_parted set b = b + 8 where b = 1; +-- cleanup +drop table hash_parted; +drop operator class custom_opclass using hash; +drop function dummy_hashint4(a int4, seed int8); diff --git a/src/test/regress/expected/uuid.out b/src/test/regress/expected/uuid.out new file mode 100644 index 0000000..8e7f219 --- /dev/null +++ b/src/test/regress/expected/uuid.out @@ -0,0 +1,172 @@ +-- regression test for the uuid datatype +-- creating test tables +CREATE TABLE guid1 +( + guid_field UUID, + text_field TEXT DEFAULT(now()) +); +CREATE TABLE guid2 +( + guid_field UUID, + text_field TEXT DEFAULT(now()) +); +-- inserting invalid data tests +-- too long +INSERT INTO guid1(guid_field) VALUES('11111111-1111-1111-1111-111111111111F'); +ERROR: invalid input syntax for type uuid: "11111111-1111-1111-1111-111111111111F" +LINE 1: INSERT INTO guid1(guid_field) VALUES('11111111-1111-1111-111... + ^ +-- too short +INSERT INTO guid1(guid_field) VALUES('{11111111-1111-1111-1111-11111111111}'); +ERROR: invalid input syntax for type uuid: "{11111111-1111-1111-1111-11111111111}" +LINE 1: INSERT INTO guid1(guid_field) VALUES('{11111111-1111-1111-11... + ^ +-- valid data but invalid format +INSERT INTO guid1(guid_field) VALUES('111-11111-1111-1111-1111-111111111111'); +ERROR: invalid input syntax for type uuid: "111-11111-1111-1111-1111-111111111111" +LINE 1: INSERT INTO guid1(guid_field) VALUES('111-11111-1111-1111-11... + ^ +INSERT INTO guid1(guid_field) VALUES('{22222222-2222-2222-2222-222222222222 '); +ERROR: invalid input syntax for type uuid: "{22222222-2222-2222-2222-222222222222 " +LINE 1: INSERT INTO guid1(guid_field) VALUES('{22222222-2222-2222-22... + ^ +-- invalid data +INSERT INTO guid1(guid_field) VALUES('11111111-1111-1111-G111-111111111111'); +ERROR: invalid input syntax for type uuid: "11111111-1111-1111-G111-111111111111" +LINE 1: INSERT INTO guid1(guid_field) VALUES('11111111-1111-1111-G11... + ^ +INSERT INTO guid1(guid_field) VALUES('11+11111-1111-1111-1111-111111111111'); +ERROR: invalid input syntax for type uuid: "11+11111-1111-1111-1111-111111111111" +LINE 1: INSERT INTO guid1(guid_field) VALUES('11+11111-1111-1111-111... + ^ +-- test non-error-throwing API +SELECT pg_input_is_valid('11', 'uuid'); + pg_input_is_valid +------------------- + f +(1 row) + +SELECT * FROM pg_input_error_info('11', 'uuid'); + message | detail | hint | sql_error_code +------------------------------------------+--------+------+---------------- + invalid input syntax for type uuid: "11" | | | 22P02 +(1 row) + +--inserting three input formats +INSERT INTO guid1(guid_field) VALUES('11111111-1111-1111-1111-111111111111'); +INSERT INTO guid1(guid_field) VALUES('{22222222-2222-2222-2222-222222222222}'); +INSERT INTO guid1(guid_field) VALUES('3f3e3c3b3a3039383736353433a2313e'); +-- retrieving the inserted data +SELECT guid_field FROM guid1; + guid_field +-------------------------------------- + 11111111-1111-1111-1111-111111111111 + 22222222-2222-2222-2222-222222222222 + 3f3e3c3b-3a30-3938-3736-353433a2313e +(3 rows) + +-- ordering test +SELECT guid_field FROM guid1 ORDER BY guid_field ASC; + guid_field +-------------------------------------- + 11111111-1111-1111-1111-111111111111 + 22222222-2222-2222-2222-222222222222 + 3f3e3c3b-3a30-3938-3736-353433a2313e +(3 rows) + +SELECT guid_field FROM guid1 ORDER BY guid_field DESC; + guid_field +-------------------------------------- + 3f3e3c3b-3a30-3938-3736-353433a2313e + 22222222-2222-2222-2222-222222222222 + 11111111-1111-1111-1111-111111111111 +(3 rows) + +-- = operator test +SELECT COUNT(*) FROM guid1 WHERE guid_field = '3f3e3c3b-3a30-3938-3736-353433a2313e'; + count +------- + 1 +(1 row) + +-- <> operator test +SELECT COUNT(*) FROM guid1 WHERE guid_field <> '11111111111111111111111111111111'; + count +------- + 2 +(1 row) + +-- < operator test +SELECT COUNT(*) FROM guid1 WHERE guid_field < '22222222-2222-2222-2222-222222222222'; + count +------- + 1 +(1 row) + +-- <= operator test +SELECT COUNT(*) FROM guid1 WHERE guid_field <= '22222222-2222-2222-2222-222222222222'; + count +------- + 2 +(1 row) + +-- > operator test +SELECT COUNT(*) FROM guid1 WHERE guid_field > '22222222-2222-2222-2222-222222222222'; + count +------- + 1 +(1 row) + +-- >= operator test +SELECT COUNT(*) FROM guid1 WHERE guid_field >= '22222222-2222-2222-2222-222222222222'; + count +------- + 2 +(1 row) + +-- btree and hash index creation test +CREATE INDEX guid1_btree ON guid1 USING BTREE (guid_field); +CREATE INDEX guid1_hash ON guid1 USING HASH (guid_field); +-- unique index test +CREATE UNIQUE INDEX guid1_unique_BTREE ON guid1 USING BTREE (guid_field); +-- should fail +INSERT INTO guid1(guid_field) VALUES('11111111-1111-1111-1111-111111111111'); +ERROR: duplicate key value violates unique constraint "guid1_unique_btree" +DETAIL: Key (guid_field)=(11111111-1111-1111-1111-111111111111) already exists. +-- check to see whether the new indexes are actually there +SELECT count(*) FROM pg_class WHERE relkind='i' AND relname LIKE 'guid%'; + count +------- + 3 +(1 row) + +-- populating the test tables with additional records +INSERT INTO guid1(guid_field) VALUES('44444444-4444-4444-4444-444444444444'); +INSERT INTO guid2(guid_field) VALUES('11111111-1111-1111-1111-111111111111'); +INSERT INTO guid2(guid_field) VALUES('{22222222-2222-2222-2222-222222222222}'); +INSERT INTO guid2(guid_field) VALUES('3f3e3c3b3a3039383736353433a2313e'); +-- join test +SELECT COUNT(*) FROM guid1 g1 INNER JOIN guid2 g2 ON g1.guid_field = g2.guid_field; + count +------- + 3 +(1 row) + +SELECT COUNT(*) FROM guid1 g1 LEFT JOIN guid2 g2 ON g1.guid_field = g2.guid_field WHERE g2.guid_field IS NULL; + count +------- + 1 +(1 row) + +-- generation test +TRUNCATE guid1; +INSERT INTO guid1 (guid_field) VALUES (gen_random_uuid()); +INSERT INTO guid1 (guid_field) VALUES (gen_random_uuid()); +SELECT count(DISTINCT guid_field) FROM guid1; + count +------- + 2 +(1 row) + +-- clean up +DROP TABLE guid1, guid2 CASCADE; diff --git a/src/test/regress/expected/vacuum.out b/src/test/regress/expected/vacuum.out new file mode 100644 index 0000000..4def90b --- /dev/null +++ b/src/test/regress/expected/vacuum.out @@ -0,0 +1,510 @@ +-- +-- VACUUM +-- +CREATE TABLE vactst (i INT); +INSERT INTO vactst VALUES (1); +INSERT INTO vactst SELECT * FROM vactst; +INSERT INTO vactst SELECT * FROM vactst; +INSERT INTO vactst SELECT * FROM vactst; +INSERT INTO vactst SELECT * FROM vactst; +INSERT INTO vactst SELECT * FROM vactst; +INSERT INTO vactst SELECT * FROM vactst; +INSERT INTO vactst SELECT * FROM vactst; +INSERT INTO vactst SELECT * FROM vactst; +INSERT INTO vactst SELECT * FROM vactst; +INSERT INTO vactst SELECT * FROM vactst; +INSERT INTO vactst SELECT * FROM vactst; +INSERT INTO vactst VALUES (0); +SELECT count(*) FROM vactst; + count +------- + 2049 +(1 row) + +DELETE FROM vactst WHERE i != 0; +SELECT * FROM vactst; + i +--- + 0 +(1 row) + +VACUUM FULL vactst; +UPDATE vactst SET i = i + 1; +INSERT INTO vactst SELECT * FROM vactst; +INSERT INTO vactst SELECT * FROM vactst; +INSERT INTO vactst SELECT * FROM vactst; +INSERT INTO vactst SELECT * FROM vactst; +INSERT INTO vactst SELECT * FROM vactst; +INSERT INTO vactst SELECT * FROM vactst; +INSERT INTO vactst SELECT * FROM vactst; +INSERT INTO vactst SELECT * FROM vactst; +INSERT INTO vactst SELECT * FROM vactst; +INSERT INTO vactst SELECT * FROM vactst; +INSERT INTO vactst SELECT * FROM vactst; +INSERT INTO vactst VALUES (0); +SELECT count(*) FROM vactst; + count +------- + 2049 +(1 row) + +DELETE FROM vactst WHERE i != 0; +VACUUM (FULL) vactst; +DELETE FROM vactst; +SELECT * FROM vactst; + i +--- +(0 rows) + +VACUUM (FULL, FREEZE) vactst; +VACUUM (ANALYZE, FULL) vactst; +CREATE TABLE vaccluster (i INT PRIMARY KEY); +ALTER TABLE vaccluster CLUSTER ON vaccluster_pkey; +CLUSTER vaccluster; +CREATE FUNCTION do_analyze() RETURNS VOID VOLATILE LANGUAGE SQL + AS 'ANALYZE pg_am'; +CREATE FUNCTION wrap_do_analyze(c INT) RETURNS INT IMMUTABLE LANGUAGE SQL + AS 'SELECT $1 FROM do_analyze()'; +CREATE INDEX ON vaccluster(wrap_do_analyze(i)); +INSERT INTO vaccluster VALUES (1), (2); +ANALYZE vaccluster; +ERROR: ANALYZE cannot be executed from VACUUM or ANALYZE +CONTEXT: SQL function "do_analyze" statement 1 +SQL function "wrap_do_analyze" statement 1 +-- Test ANALYZE in transaction, where the transaction surrounding +-- analyze performed modifications. This tests for the bug at +-- https://postgr.es/m/c7988239-d42c-ddc4-41db-171b23b35e4f%40ssinger.info +-- (which hopefully is unlikely to be reintroduced), but also seems +-- independently worthwhile to cover. +INSERT INTO vactst SELECT generate_series(1, 300); +DELETE FROM vactst WHERE i % 7 = 0; -- delete a few rows outside +BEGIN; +INSERT INTO vactst SELECT generate_series(301, 400); +DELETE FROM vactst WHERE i % 5 <> 0; -- delete a few rows inside +ANALYZE vactst; +COMMIT; +VACUUM FULL pg_am; +VACUUM FULL pg_class; +VACUUM FULL pg_database; +VACUUM FULL vaccluster; +ERROR: ANALYZE cannot be executed from VACUUM or ANALYZE +CONTEXT: SQL function "do_analyze" statement 1 +SQL function "wrap_do_analyze" statement 1 +VACUUM FULL vactst; +VACUUM (DISABLE_PAGE_SKIPPING) vaccluster; +-- PARALLEL option +CREATE TABLE pvactst (i INT, a INT[], p POINT) with (autovacuum_enabled = off); +INSERT INTO pvactst SELECT i, array[1,2,3], point(i, i+1) FROM generate_series(1,1000) i; +CREATE INDEX btree_pvactst ON pvactst USING btree (i); +CREATE INDEX hash_pvactst ON pvactst USING hash (i); +CREATE INDEX brin_pvactst ON pvactst USING brin (i); +CREATE INDEX gin_pvactst ON pvactst USING gin (a); +CREATE INDEX gist_pvactst ON pvactst USING gist (p); +CREATE INDEX spgist_pvactst ON pvactst USING spgist (p); +-- VACUUM invokes parallel index cleanup +SET min_parallel_index_scan_size to 0; +VACUUM (PARALLEL 2) pvactst; +-- VACUUM invokes parallel bulk-deletion +UPDATE pvactst SET i = i WHERE i < 1000; +VACUUM (PARALLEL 2) pvactst; +UPDATE pvactst SET i = i WHERE i < 1000; +VACUUM (PARALLEL 0) pvactst; -- disable parallel vacuum +VACUUM (PARALLEL -1) pvactst; -- error +ERROR: parallel workers for vacuum must be between 0 and 1024 +LINE 1: VACUUM (PARALLEL -1) pvactst; + ^ +VACUUM (PARALLEL 2, INDEX_CLEANUP FALSE) pvactst; +VACUUM (PARALLEL 2, FULL TRUE) pvactst; -- error, cannot use both PARALLEL and FULL +ERROR: VACUUM FULL cannot be performed in parallel +VACUUM (PARALLEL) pvactst; -- error, cannot use PARALLEL option without parallel degree +ERROR: parallel option requires a value between 0 and 1024 +LINE 1: VACUUM (PARALLEL) pvactst; + ^ +-- Test different combinations of parallel and full options for temporary tables +CREATE TEMPORARY TABLE tmp (a int PRIMARY KEY); +CREATE INDEX tmp_idx1 ON tmp (a); +VACUUM (PARALLEL 1, FULL FALSE) tmp; -- parallel vacuum disabled for temp tables +WARNING: disabling parallel option of vacuum on "tmp" --- cannot vacuum temporary tables in parallel +VACUUM (PARALLEL 0, FULL TRUE) tmp; -- can specify parallel disabled (even though that's implied by FULL) +RESET min_parallel_index_scan_size; +DROP TABLE pvactst; +-- INDEX_CLEANUP option +CREATE TABLE no_index_cleanup (i INT PRIMARY KEY, t TEXT); +-- Use uncompressed data stored in toast. +CREATE INDEX no_index_cleanup_idx ON no_index_cleanup(t); +ALTER TABLE no_index_cleanup ALTER COLUMN t SET STORAGE EXTERNAL; +INSERT INTO no_index_cleanup(i, t) VALUES (generate_series(1,30), + repeat('1234567890',269)); +-- index cleanup option is ignored if VACUUM FULL +VACUUM (INDEX_CLEANUP TRUE, FULL TRUE) no_index_cleanup; +VACUUM (FULL TRUE) no_index_cleanup; +-- Toast inherits the value from its parent table. +ALTER TABLE no_index_cleanup SET (vacuum_index_cleanup = false); +DELETE FROM no_index_cleanup WHERE i < 15; +-- Nothing is cleaned up. +VACUUM no_index_cleanup; +-- Both parent relation and toast are cleaned up. +ALTER TABLE no_index_cleanup SET (vacuum_index_cleanup = true); +VACUUM no_index_cleanup; +ALTER TABLE no_index_cleanup SET (vacuum_index_cleanup = auto); +VACUUM no_index_cleanup; +-- Parameter is set for both the parent table and its toast relation. +INSERT INTO no_index_cleanup(i, t) VALUES (generate_series(31,60), + repeat('1234567890',269)); +DELETE FROM no_index_cleanup WHERE i < 45; +-- Only toast index is cleaned up. +ALTER TABLE no_index_cleanup SET (vacuum_index_cleanup = off, + toast.vacuum_index_cleanup = yes); +VACUUM no_index_cleanup; +-- Only parent is cleaned up. +ALTER TABLE no_index_cleanup SET (vacuum_index_cleanup = true, + toast.vacuum_index_cleanup = false); +VACUUM no_index_cleanup; +-- Test some extra relations. +VACUUM (INDEX_CLEANUP FALSE) vaccluster; +VACUUM (INDEX_CLEANUP AUTO) vactst; -- index cleanup option is ignored if no indexes +VACUUM (INDEX_CLEANUP FALSE, FREEZE TRUE) vaccluster; +-- TRUNCATE option +CREATE TEMP TABLE vac_truncate_test(i INT NOT NULL, j text) + WITH (vacuum_truncate=true, autovacuum_enabled=false); +INSERT INTO vac_truncate_test VALUES (1, NULL), (NULL, NULL); +ERROR: null value in column "i" of relation "vac_truncate_test" violates not-null constraint +DETAIL: Failing row contains (null, null). +VACUUM (TRUNCATE FALSE, DISABLE_PAGE_SKIPPING) vac_truncate_test; +SELECT pg_relation_size('vac_truncate_test') > 0; + ?column? +---------- + t +(1 row) + +VACUUM (DISABLE_PAGE_SKIPPING) vac_truncate_test; +SELECT pg_relation_size('vac_truncate_test') = 0; + ?column? +---------- + t +(1 row) + +VACUUM (TRUNCATE FALSE, FULL TRUE) vac_truncate_test; +DROP TABLE vac_truncate_test; +-- partitioned table +CREATE TABLE vacparted (a int, b char) PARTITION BY LIST (a); +CREATE TABLE vacparted1 PARTITION OF vacparted FOR VALUES IN (1); +INSERT INTO vacparted VALUES (1, 'a'); +UPDATE vacparted SET b = 'b'; +VACUUM (ANALYZE) vacparted; +VACUUM (FULL) vacparted; +VACUUM (FREEZE) vacparted; +-- check behavior with duplicate column mentions +VACUUM ANALYZE vacparted(a,b,a); +ERROR: column "a" of relation "vacparted" appears more than once +ANALYZE vacparted(a,b,b); +ERROR: column "b" of relation "vacparted" appears more than once +-- partitioned table with index +CREATE TABLE vacparted_i (a int primary key, b varchar(100)) + PARTITION BY HASH (a); +CREATE TABLE vacparted_i1 PARTITION OF vacparted_i + FOR VALUES WITH (MODULUS 2, REMAINDER 0); +CREATE TABLE vacparted_i2 PARTITION OF vacparted_i + FOR VALUES WITH (MODULUS 2, REMAINDER 1); +INSERT INTO vacparted_i SELECT i, 'test_'|| i from generate_series(1,10) i; +VACUUM (ANALYZE) vacparted_i; +VACUUM (FULL) vacparted_i; +VACUUM (FREEZE) vacparted_i; +SELECT relname, relhasindex FROM pg_class + WHERE relname LIKE 'vacparted_i%' AND relkind IN ('p','r') + ORDER BY relname; + relname | relhasindex +--------------+------------- + vacparted_i | t + vacparted_i1 | t + vacparted_i2 | t +(3 rows) + +DROP TABLE vacparted_i; +-- multiple tables specified +VACUUM vaccluster, vactst; +VACUUM vacparted, does_not_exist; +ERROR: relation "does_not_exist" does not exist +VACUUM (FREEZE) vacparted, vaccluster, vactst; +VACUUM (FREEZE) does_not_exist, vaccluster; +ERROR: relation "does_not_exist" does not exist +VACUUM ANALYZE vactst, vacparted (a); +VACUUM ANALYZE vactst (does_not_exist), vacparted (b); +ERROR: column "does_not_exist" of relation "vactst" does not exist +VACUUM FULL vacparted, vactst; +VACUUM FULL vactst, vacparted (a, b), vaccluster (i); +ERROR: ANALYZE option must be specified when a column list is provided +ANALYZE vactst, vacparted; +ANALYZE vacparted (b), vactst; +ANALYZE vactst, does_not_exist, vacparted; +ERROR: relation "does_not_exist" does not exist +ANALYZE vactst (i), vacparted (does_not_exist); +ERROR: column "does_not_exist" of relation "vacparted" does not exist +ANALYZE vactst, vactst; +BEGIN; -- ANALYZE behaves differently inside a transaction block +ANALYZE vactst, vactst; +COMMIT; +-- parenthesized syntax for ANALYZE +ANALYZE (VERBOSE) does_not_exist; +ERROR: relation "does_not_exist" does not exist +ANALYZE (nonexistent-arg) does_not_exist; +ERROR: syntax error at or near "arg" +LINE 1: ANALYZE (nonexistent-arg) does_not_exist; + ^ +ANALYZE (nonexistentarg) does_not_exit; +ERROR: unrecognized ANALYZE option "nonexistentarg" +LINE 1: ANALYZE (nonexistentarg) does_not_exit; + ^ +-- ensure argument order independence, and that SKIP_LOCKED on non-existing +-- relation still errors out. Suppress WARNING messages caused by concurrent +-- autovacuums. +SET client_min_messages TO 'ERROR'; +ANALYZE (SKIP_LOCKED, VERBOSE) does_not_exist; +ERROR: relation "does_not_exist" does not exist +ANALYZE (VERBOSE, SKIP_LOCKED) does_not_exist; +ERROR: relation "does_not_exist" does not exist +-- SKIP_LOCKED option +VACUUM (SKIP_LOCKED) vactst; +VACUUM (SKIP_LOCKED, FULL) vactst; +ANALYZE (SKIP_LOCKED) vactst; +RESET client_min_messages; +-- ensure VACUUM and ANALYZE don't have a problem with serializable +SET default_transaction_isolation = serializable; +VACUUM vactst; +ANALYZE vactst; +RESET default_transaction_isolation; +BEGIN TRANSACTION ISOLATION LEVEL SERIALIZABLE; +ANALYZE vactst; +COMMIT; +-- PROCESS_TOAST option +CREATE TABLE vac_option_tab (a INT, t TEXT); +INSERT INTO vac_option_tab SELECT a, 't' || a FROM generate_series(1, 10) AS a; +ALTER TABLE vac_option_tab ALTER COLUMN t SET STORAGE EXTERNAL; +-- Check the number of vacuums done on table vac_option_tab and on its +-- toast relation, to check that PROCESS_TOAST and PROCESS_MAIN work on +-- what they should. +CREATE VIEW vac_option_tab_counts AS + SELECT CASE WHEN c.relname IS NULL + THEN 'main' ELSE 'toast' END as rel, + s.vacuum_count + FROM pg_stat_all_tables s + LEFT JOIN pg_class c ON s.relid = c.reltoastrelid + WHERE c.relname = 'vac_option_tab' OR s.relname = 'vac_option_tab' + ORDER BY rel; +VACUUM (PROCESS_TOAST TRUE) vac_option_tab; +SELECT * FROM vac_option_tab_counts; + rel | vacuum_count +-------+-------------- + main | 1 + toast | 1 +(2 rows) + +VACUUM (PROCESS_TOAST FALSE) vac_option_tab; +SELECT * FROM vac_option_tab_counts; + rel | vacuum_count +-------+-------------- + main | 2 + toast | 1 +(2 rows) + +VACUUM (PROCESS_TOAST FALSE, FULL) vac_option_tab; -- error +ERROR: PROCESS_TOAST required with VACUUM FULL +-- PROCESS_MAIN option +-- Only the toast table is processed. +VACUUM (PROCESS_MAIN FALSE) vac_option_tab; +SELECT * FROM vac_option_tab_counts; + rel | vacuum_count +-------+-------------- + main | 2 + toast | 2 +(2 rows) + +-- Nothing is processed. +VACUUM (PROCESS_MAIN FALSE, PROCESS_TOAST FALSE) vac_option_tab; +SELECT * FROM vac_option_tab_counts; + rel | vacuum_count +-------+-------------- + main | 2 + toast | 2 +(2 rows) + +-- Check if the filenodes nodes have been updated as wanted after FULL. +SELECT relfilenode AS main_filenode FROM pg_class + WHERE relname = 'vac_option_tab' \gset +SELECT t.relfilenode AS toast_filenode FROM pg_class c, pg_class t + WHERE c.reltoastrelid = t.oid AND c.relname = 'vac_option_tab' \gset +-- Only the toast relation is processed. +VACUUM (PROCESS_MAIN FALSE, FULL) vac_option_tab; +SELECT relfilenode = :main_filenode AS is_same_main_filenode + FROM pg_class WHERE relname = 'vac_option_tab'; + is_same_main_filenode +----------------------- + t +(1 row) + +SELECT t.relfilenode = :toast_filenode AS is_same_toast_filenode + FROM pg_class c, pg_class t + WHERE c.reltoastrelid = t.oid AND c.relname = 'vac_option_tab'; + is_same_toast_filenode +------------------------ + f +(1 row) + +-- BUFFER_USAGE_LIMIT option +VACUUM (BUFFER_USAGE_LIMIT '512 kB') vac_option_tab; +ANALYZE (BUFFER_USAGE_LIMIT '512 kB') vac_option_tab; +-- try disabling the buffer usage limit +VACUUM (BUFFER_USAGE_LIMIT 0) vac_option_tab; +ANALYZE (BUFFER_USAGE_LIMIT 0) vac_option_tab; +-- value exceeds max size error +VACUUM (BUFFER_USAGE_LIMIT 16777220) vac_option_tab; +ERROR: BUFFER_USAGE_LIMIT option must be 0 or between 128 kB and 16777216 kB +-- value is less than min size error +VACUUM (BUFFER_USAGE_LIMIT 120) vac_option_tab; +ERROR: BUFFER_USAGE_LIMIT option must be 0 or between 128 kB and 16777216 kB +-- integer overflow error +VACUUM (BUFFER_USAGE_LIMIT 10000000000) vac_option_tab; +ERROR: BUFFER_USAGE_LIMIT option must be 0 or between 128 kB and 16777216 kB +HINT: Value exceeds integer range. +-- incompatible with VACUUM FULL error +VACUUM (BUFFER_USAGE_LIMIT '512 kB', FULL) vac_option_tab; +ERROR: BUFFER_USAGE_LIMIT cannot be specified for VACUUM FULL +-- SKIP_DATABASE_STATS option +VACUUM (SKIP_DATABASE_STATS) vactst; +-- ONLY_DATABASE_STATS option +VACUUM (ONLY_DATABASE_STATS); +VACUUM (ONLY_DATABASE_STATS) vactst; -- error +ERROR: ONLY_DATABASE_STATS cannot be specified with a list of tables +DROP VIEW vac_option_tab_counts; +DROP TABLE vac_option_tab; +DROP TABLE vaccluster; +DROP TABLE vactst; +DROP TABLE vacparted; +DROP TABLE no_index_cleanup; +-- relation ownership, WARNING logs generated as all are skipped. +CREATE TABLE vacowned (a int); +CREATE TABLE vacowned_parted (a int) PARTITION BY LIST (a); +CREATE TABLE vacowned_part1 PARTITION OF vacowned_parted FOR VALUES IN (1); +CREATE TABLE vacowned_part2 PARTITION OF vacowned_parted FOR VALUES IN (2); +CREATE ROLE regress_vacuum; +SET ROLE regress_vacuum; +-- Simple table +VACUUM vacowned; +WARNING: permission denied to vacuum "vacowned", skipping it +ANALYZE vacowned; +WARNING: permission denied to analyze "vacowned", skipping it +VACUUM (ANALYZE) vacowned; +WARNING: permission denied to vacuum "vacowned", skipping it +-- Catalog +VACUUM pg_catalog.pg_class; +WARNING: permission denied to vacuum "pg_class", skipping it +ANALYZE pg_catalog.pg_class; +WARNING: permission denied to analyze "pg_class", skipping it +VACUUM (ANALYZE) pg_catalog.pg_class; +WARNING: permission denied to vacuum "pg_class", skipping it +-- Shared catalog +VACUUM pg_catalog.pg_authid; +WARNING: permission denied to vacuum "pg_authid", skipping it +ANALYZE pg_catalog.pg_authid; +WARNING: permission denied to analyze "pg_authid", skipping it +VACUUM (ANALYZE) pg_catalog.pg_authid; +WARNING: permission denied to vacuum "pg_authid", skipping it +-- Partitioned table and its partitions, nothing owned by other user. +-- Relations are not listed in a single command to test ownership +-- independently. +VACUUM vacowned_parted; +WARNING: permission denied to vacuum "vacowned_parted", skipping it +WARNING: permission denied to vacuum "vacowned_part1", skipping it +WARNING: permission denied to vacuum "vacowned_part2", skipping it +VACUUM vacowned_part1; +WARNING: permission denied to vacuum "vacowned_part1", skipping it +VACUUM vacowned_part2; +WARNING: permission denied to vacuum "vacowned_part2", skipping it +ANALYZE vacowned_parted; +WARNING: permission denied to analyze "vacowned_parted", skipping it +WARNING: permission denied to analyze "vacowned_part1", skipping it +WARNING: permission denied to analyze "vacowned_part2", skipping it +ANALYZE vacowned_part1; +WARNING: permission denied to analyze "vacowned_part1", skipping it +ANALYZE vacowned_part2; +WARNING: permission denied to analyze "vacowned_part2", skipping it +VACUUM (ANALYZE) vacowned_parted; +WARNING: permission denied to vacuum "vacowned_parted", skipping it +WARNING: permission denied to vacuum "vacowned_part1", skipping it +WARNING: permission denied to vacuum "vacowned_part2", skipping it +VACUUM (ANALYZE) vacowned_part1; +WARNING: permission denied to vacuum "vacowned_part1", skipping it +VACUUM (ANALYZE) vacowned_part2; +WARNING: permission denied to vacuum "vacowned_part2", skipping it +RESET ROLE; +-- Partitioned table and one partition owned by other user. +ALTER TABLE vacowned_parted OWNER TO regress_vacuum; +ALTER TABLE vacowned_part1 OWNER TO regress_vacuum; +SET ROLE regress_vacuum; +VACUUM vacowned_parted; +WARNING: permission denied to vacuum "vacowned_part2", skipping it +VACUUM vacowned_part1; +VACUUM vacowned_part2; +WARNING: permission denied to vacuum "vacowned_part2", skipping it +ANALYZE vacowned_parted; +WARNING: permission denied to analyze "vacowned_part2", skipping it +ANALYZE vacowned_part1; +ANALYZE vacowned_part2; +WARNING: permission denied to analyze "vacowned_part2", skipping it +VACUUM (ANALYZE) vacowned_parted; +WARNING: permission denied to vacuum "vacowned_part2", skipping it +VACUUM (ANALYZE) vacowned_part1; +VACUUM (ANALYZE) vacowned_part2; +WARNING: permission denied to vacuum "vacowned_part2", skipping it +RESET ROLE; +-- Only one partition owned by other user. +ALTER TABLE vacowned_parted OWNER TO CURRENT_USER; +SET ROLE regress_vacuum; +VACUUM vacowned_parted; +WARNING: permission denied to vacuum "vacowned_parted", skipping it +WARNING: permission denied to vacuum "vacowned_part2", skipping it +VACUUM vacowned_part1; +VACUUM vacowned_part2; +WARNING: permission denied to vacuum "vacowned_part2", skipping it +ANALYZE vacowned_parted; +WARNING: permission denied to analyze "vacowned_parted", skipping it +WARNING: permission denied to analyze "vacowned_part2", skipping it +ANALYZE vacowned_part1; +ANALYZE vacowned_part2; +WARNING: permission denied to analyze "vacowned_part2", skipping it +VACUUM (ANALYZE) vacowned_parted; +WARNING: permission denied to vacuum "vacowned_parted", skipping it +WARNING: permission denied to vacuum "vacowned_part2", skipping it +VACUUM (ANALYZE) vacowned_part1; +VACUUM (ANALYZE) vacowned_part2; +WARNING: permission denied to vacuum "vacowned_part2", skipping it +RESET ROLE; +-- Only partitioned table owned by other user. +ALTER TABLE vacowned_parted OWNER TO regress_vacuum; +ALTER TABLE vacowned_part1 OWNER TO CURRENT_USER; +SET ROLE regress_vacuum; +VACUUM vacowned_parted; +WARNING: permission denied to vacuum "vacowned_part1", skipping it +WARNING: permission denied to vacuum "vacowned_part2", skipping it +VACUUM vacowned_part1; +WARNING: permission denied to vacuum "vacowned_part1", skipping it +VACUUM vacowned_part2; +WARNING: permission denied to vacuum "vacowned_part2", skipping it +ANALYZE vacowned_parted; +WARNING: permission denied to analyze "vacowned_part1", skipping it +WARNING: permission denied to analyze "vacowned_part2", skipping it +ANALYZE vacowned_part1; +WARNING: permission denied to analyze "vacowned_part1", skipping it +ANALYZE vacowned_part2; +WARNING: permission denied to analyze "vacowned_part2", skipping it +VACUUM (ANALYZE) vacowned_parted; +WARNING: permission denied to vacuum "vacowned_part1", skipping it +WARNING: permission denied to vacuum "vacowned_part2", skipping it +VACUUM (ANALYZE) vacowned_part1; +WARNING: permission denied to vacuum "vacowned_part1", skipping it +VACUUM (ANALYZE) vacowned_part2; +WARNING: permission denied to vacuum "vacowned_part2", skipping it +RESET ROLE; +DROP TABLE vacowned; +DROP TABLE vacowned_parted; +DROP ROLE regress_vacuum; diff --git a/src/test/regress/expected/vacuum_parallel.out b/src/test/regress/expected/vacuum_parallel.out new file mode 100644 index 0000000..ddf0ee5 --- /dev/null +++ b/src/test/regress/expected/vacuum_parallel.out @@ -0,0 +1,49 @@ +SET max_parallel_maintenance_workers TO 4; +SET min_parallel_index_scan_size TO '128kB'; +-- Bug #17245: Make sure that we don't totally fail to VACUUM individual indexes that +-- happen to be below min_parallel_index_scan_size during parallel VACUUM: +CREATE TABLE parallel_vacuum_table (a int) WITH (autovacuum_enabled = off); +INSERT INTO parallel_vacuum_table SELECT i from generate_series(1, 10000) i; +-- Parallel VACUUM will never be used unless there are at least two indexes +-- that exceed min_parallel_index_scan_size. Create two such indexes, and +-- a third index that is smaller than min_parallel_index_scan_size. +CREATE INDEX regular_sized_index ON parallel_vacuum_table(a); +CREATE INDEX typically_sized_index ON parallel_vacuum_table(a); +-- Note: vacuum_in_leader_small_index can apply deduplication, making it ~3x +-- smaller than the other indexes +CREATE INDEX vacuum_in_leader_small_index ON parallel_vacuum_table((1)); +-- Verify (as best we can) that the cost model for parallel VACUUM +-- will make our VACUUM run in parallel, while always leaving it up to the +-- parallel leader to handle the vacuum_in_leader_small_index index: +SELECT EXISTS ( +SELECT 1 +FROM pg_class +WHERE oid = 'vacuum_in_leader_small_index'::regclass AND + pg_relation_size(oid) < + pg_size_bytes(current_setting('min_parallel_index_scan_size')) +) as leader_will_handle_small_index; + leader_will_handle_small_index +-------------------------------- + t +(1 row) + +SELECT count(*) as trigger_parallel_vacuum_nindexes +FROM pg_class +WHERE oid in ('regular_sized_index'::regclass, 'typically_sized_index'::regclass) AND + pg_relation_size(oid) >= + pg_size_bytes(current_setting('min_parallel_index_scan_size')); + trigger_parallel_vacuum_nindexes +---------------------------------- + 2 +(1 row) + +-- Parallel VACUUM with B-Tree page deletions, ambulkdelete calls: +DELETE FROM parallel_vacuum_table; +VACUUM (PARALLEL 4, INDEX_CLEANUP ON) parallel_vacuum_table; +-- Since vacuum_in_leader_small_index uses deduplication, we expect an +-- assertion failure with bug #17245 (in the absence of bugfix): +INSERT INTO parallel_vacuum_table SELECT i FROM generate_series(1, 10000) i; +RESET max_parallel_maintenance_workers; +RESET min_parallel_index_scan_size; +-- Deliberately don't drop table, to get further coverage from tools like +-- pg_amcheck in some testing scenarios diff --git a/src/test/regress/expected/varchar.out b/src/test/regress/expected/varchar.out new file mode 100644 index 0000000..2886352 --- /dev/null +++ b/src/test/regress/expected/varchar.out @@ -0,0 +1,132 @@ +-- +-- VARCHAR +-- +-- +-- Build a table for testing +-- (This temporarily hides the table created in test_setup.sql) +-- +CREATE TEMP TABLE VARCHAR_TBL(f1 varchar(1)); +INSERT INTO VARCHAR_TBL (f1) VALUES ('a'); +INSERT INTO VARCHAR_TBL (f1) VALUES ('A'); +-- any of the following three input formats are acceptable +INSERT INTO VARCHAR_TBL (f1) VALUES ('1'); +INSERT INTO VARCHAR_TBL (f1) VALUES (2); +INSERT INTO VARCHAR_TBL (f1) VALUES ('3'); +-- zero-length char +INSERT INTO VARCHAR_TBL (f1) VALUES (''); +-- try varchar's of greater than 1 length +INSERT INTO VARCHAR_TBL (f1) VALUES ('cd'); +ERROR: value too long for type character varying(1) +INSERT INTO VARCHAR_TBL (f1) VALUES ('c '); +SELECT * FROM VARCHAR_TBL; + f1 +---- + a + A + 1 + 2 + 3 + + c +(7 rows) + +SELECT c.* + FROM VARCHAR_TBL c + WHERE c.f1 <> 'a'; + f1 +---- + A + 1 + 2 + 3 + + c +(6 rows) + +SELECT c.* + FROM VARCHAR_TBL c + WHERE c.f1 = 'a'; + f1 +---- + a +(1 row) + +SELECT c.* + FROM VARCHAR_TBL c + WHERE c.f1 < 'a'; + f1 +---- + A + 1 + 2 + 3 + +(5 rows) + +SELECT c.* + FROM VARCHAR_TBL c + WHERE c.f1 <= 'a'; + f1 +---- + a + A + 1 + 2 + 3 + +(6 rows) + +SELECT c.* + FROM VARCHAR_TBL c + WHERE c.f1 > 'a'; + f1 +---- + c +(1 row) + +SELECT c.* + FROM VARCHAR_TBL c + WHERE c.f1 >= 'a'; + f1 +---- + a + c +(2 rows) + +DROP TABLE VARCHAR_TBL; +-- +-- Now test longer arrays of char +-- +-- This varchar_tbl was already created and filled in test_setup.sql. +-- Here we just try to insert bad values. +-- +INSERT INTO VARCHAR_TBL (f1) VALUES ('abcde'); +ERROR: value too long for type character varying(4) +SELECT * FROM VARCHAR_TBL; + f1 +------ + a + ab + abcd + abcd +(4 rows) + +-- Also try it with non-error-throwing API +SELECT pg_input_is_valid('abcd ', 'varchar(4)'); + pg_input_is_valid +------------------- + t +(1 row) + +SELECT pg_input_is_valid('abcde', 'varchar(4)'); + pg_input_is_valid +------------------- + f +(1 row) + +SELECT * FROM pg_input_error_info('abcde', 'varchar(4)'); + message | detail | hint | sql_error_code +----------------------------------------------+--------+------+---------------- + value too long for type character varying(4) | | | 22001 +(1 row) + diff --git a/src/test/regress/expected/varchar_1.out b/src/test/regress/expected/varchar_1.out new file mode 100644 index 0000000..7cb74c7 --- /dev/null +++ b/src/test/regress/expected/varchar_1.out @@ -0,0 +1,132 @@ +-- +-- VARCHAR +-- +-- +-- Build a table for testing +-- (This temporarily hides the table created in test_setup.sql) +-- +CREATE TEMP TABLE VARCHAR_TBL(f1 varchar(1)); +INSERT INTO VARCHAR_TBL (f1) VALUES ('a'); +INSERT INTO VARCHAR_TBL (f1) VALUES ('A'); +-- any of the following three input formats are acceptable +INSERT INTO VARCHAR_TBL (f1) VALUES ('1'); +INSERT INTO VARCHAR_TBL (f1) VALUES (2); +INSERT INTO VARCHAR_TBL (f1) VALUES ('3'); +-- zero-length char +INSERT INTO VARCHAR_TBL (f1) VALUES (''); +-- try varchar's of greater than 1 length +INSERT INTO VARCHAR_TBL (f1) VALUES ('cd'); +ERROR: value too long for type character varying(1) +INSERT INTO VARCHAR_TBL (f1) VALUES ('c '); +SELECT * FROM VARCHAR_TBL; + f1 +---- + a + A + 1 + 2 + 3 + + c +(7 rows) + +SELECT c.* + FROM VARCHAR_TBL c + WHERE c.f1 <> 'a'; + f1 +---- + A + 1 + 2 + 3 + + c +(6 rows) + +SELECT c.* + FROM VARCHAR_TBL c + WHERE c.f1 = 'a'; + f1 +---- + a +(1 row) + +SELECT c.* + FROM VARCHAR_TBL c + WHERE c.f1 < 'a'; + f1 +---- + 1 + 2 + 3 + +(4 rows) + +SELECT c.* + FROM VARCHAR_TBL c + WHERE c.f1 <= 'a'; + f1 +---- + a + 1 + 2 + 3 + +(5 rows) + +SELECT c.* + FROM VARCHAR_TBL c + WHERE c.f1 > 'a'; + f1 +---- + A + c +(2 rows) + +SELECT c.* + FROM VARCHAR_TBL c + WHERE c.f1 >= 'a'; + f1 +---- + a + A + c +(3 rows) + +DROP TABLE VARCHAR_TBL; +-- +-- Now test longer arrays of char +-- +-- This varchar_tbl was already created and filled in test_setup.sql. +-- Here we just try to insert bad values. +-- +INSERT INTO VARCHAR_TBL (f1) VALUES ('abcde'); +ERROR: value too long for type character varying(4) +SELECT * FROM VARCHAR_TBL; + f1 +------ + a + ab + abcd + abcd +(4 rows) + +-- Also try it with non-error-throwing API +SELECT pg_input_is_valid('abcd ', 'varchar(4)'); + pg_input_is_valid +------------------- + t +(1 row) + +SELECT pg_input_is_valid('abcde', 'varchar(4)'); + pg_input_is_valid +------------------- + f +(1 row) + +SELECT * FROM pg_input_error_info('abcde', 'varchar(4)'); + message | detail | hint | sql_error_code +----------------------------------------------+--------+------+---------------- + value too long for type character varying(4) | | | 22001 +(1 row) + diff --git a/src/test/regress/expected/varchar_2.out b/src/test/regress/expected/varchar_2.out new file mode 100644 index 0000000..9b154c6 --- /dev/null +++ b/src/test/regress/expected/varchar_2.out @@ -0,0 +1,132 @@ +-- +-- VARCHAR +-- +-- +-- Build a table for testing +-- (This temporarily hides the table created in test_setup.sql) +-- +CREATE TEMP TABLE VARCHAR_TBL(f1 varchar(1)); +INSERT INTO VARCHAR_TBL (f1) VALUES ('a'); +INSERT INTO VARCHAR_TBL (f1) VALUES ('A'); +-- any of the following three input formats are acceptable +INSERT INTO VARCHAR_TBL (f1) VALUES ('1'); +INSERT INTO VARCHAR_TBL (f1) VALUES (2); +INSERT INTO VARCHAR_TBL (f1) VALUES ('3'); +-- zero-length char +INSERT INTO VARCHAR_TBL (f1) VALUES (''); +-- try varchar's of greater than 1 length +INSERT INTO VARCHAR_TBL (f1) VALUES ('cd'); +ERROR: value too long for type character varying(1) +INSERT INTO VARCHAR_TBL (f1) VALUES ('c '); +SELECT * FROM VARCHAR_TBL; + f1 +---- + a + A + 1 + 2 + 3 + + c +(7 rows) + +SELECT c.* + FROM VARCHAR_TBL c + WHERE c.f1 <> 'a'; + f1 +---- + A + 1 + 2 + 3 + + c +(6 rows) + +SELECT c.* + FROM VARCHAR_TBL c + WHERE c.f1 = 'a'; + f1 +---- + a +(1 row) + +SELECT c.* + FROM VARCHAR_TBL c + WHERE c.f1 < 'a'; + f1 +---- + +(1 row) + +SELECT c.* + FROM VARCHAR_TBL c + WHERE c.f1 <= 'a'; + f1 +---- + a + +(2 rows) + +SELECT c.* + FROM VARCHAR_TBL c + WHERE c.f1 > 'a'; + f1 +---- + A + 1 + 2 + 3 + c +(5 rows) + +SELECT c.* + FROM VARCHAR_TBL c + WHERE c.f1 >= 'a'; + f1 +---- + a + A + 1 + 2 + 3 + c +(6 rows) + +DROP TABLE VARCHAR_TBL; +-- +-- Now test longer arrays of char +-- +-- This varchar_tbl was already created and filled in test_setup.sql. +-- Here we just try to insert bad values. +-- +INSERT INTO VARCHAR_TBL (f1) VALUES ('abcde'); +ERROR: value too long for type character varying(4) +SELECT * FROM VARCHAR_TBL; + f1 +------ + a + ab + abcd + abcd +(4 rows) + +-- Also try it with non-error-throwing API +SELECT pg_input_is_valid('abcd ', 'varchar(4)'); + pg_input_is_valid +------------------- + t +(1 row) + +SELECT pg_input_is_valid('abcde', 'varchar(4)'); + pg_input_is_valid +------------------- + f +(1 row) + +SELECT * FROM pg_input_error_info('abcde', 'varchar(4)'); + message | detail | hint | sql_error_code +----------------------------------------------+--------+------+---------------- + value too long for type character varying(4) | | | 22001 +(1 row) + diff --git a/src/test/regress/expected/window.out b/src/test/regress/expected/window.out new file mode 100644 index 0000000..8612788 --- /dev/null +++ b/src/test/regress/expected/window.out @@ -0,0 +1,4878 @@ +-- +-- WINDOW FUNCTIONS +-- +CREATE TEMPORARY TABLE empsalary ( + depname varchar, + empno bigint, + salary int, + enroll_date date +); +INSERT INTO empsalary VALUES +('develop', 10, 5200, '2007-08-01'), +('sales', 1, 5000, '2006-10-01'), +('personnel', 5, 3500, '2007-12-10'), +('sales', 4, 4800, '2007-08-08'), +('personnel', 2, 3900, '2006-12-23'), +('develop', 7, 4200, '2008-01-01'), +('develop', 9, 4500, '2008-01-01'), +('sales', 3, 4800, '2007-08-01'), +('develop', 8, 6000, '2006-10-01'), +('develop', 11, 5200, '2007-08-15'); +SELECT depname, empno, salary, sum(salary) OVER (PARTITION BY depname) FROM empsalary ORDER BY depname, salary; + depname | empno | salary | sum +-----------+-------+--------+------- + develop | 7 | 4200 | 25100 + develop | 9 | 4500 | 25100 + develop | 11 | 5200 | 25100 + develop | 10 | 5200 | 25100 + develop | 8 | 6000 | 25100 + personnel | 5 | 3500 | 7400 + personnel | 2 | 3900 | 7400 + sales | 3 | 4800 | 14600 + sales | 4 | 4800 | 14600 + sales | 1 | 5000 | 14600 +(10 rows) + +SELECT depname, empno, salary, rank() OVER (PARTITION BY depname ORDER BY salary) FROM empsalary; + depname | empno | salary | rank +-----------+-------+--------+------ + develop | 7 | 4200 | 1 + develop | 9 | 4500 | 2 + develop | 11 | 5200 | 3 + develop | 10 | 5200 | 3 + develop | 8 | 6000 | 5 + personnel | 5 | 3500 | 1 + personnel | 2 | 3900 | 2 + sales | 3 | 4800 | 1 + sales | 4 | 4800 | 1 + sales | 1 | 5000 | 3 +(10 rows) + +-- with GROUP BY +SELECT four, ten, SUM(SUM(four)) OVER (PARTITION BY four), AVG(ten) FROM tenk1 +GROUP BY four, ten ORDER BY four, ten; + four | ten | sum | avg +------+-----+------+------------------------ + 0 | 0 | 0 | 0.00000000000000000000 + 0 | 2 | 0 | 2.0000000000000000 + 0 | 4 | 0 | 4.0000000000000000 + 0 | 6 | 0 | 6.0000000000000000 + 0 | 8 | 0 | 8.0000000000000000 + 1 | 1 | 2500 | 1.00000000000000000000 + 1 | 3 | 2500 | 3.0000000000000000 + 1 | 5 | 2500 | 5.0000000000000000 + 1 | 7 | 2500 | 7.0000000000000000 + 1 | 9 | 2500 | 9.0000000000000000 + 2 | 0 | 5000 | 0.00000000000000000000 + 2 | 2 | 5000 | 2.0000000000000000 + 2 | 4 | 5000 | 4.0000000000000000 + 2 | 6 | 5000 | 6.0000000000000000 + 2 | 8 | 5000 | 8.0000000000000000 + 3 | 1 | 7500 | 1.00000000000000000000 + 3 | 3 | 7500 | 3.0000000000000000 + 3 | 5 | 7500 | 5.0000000000000000 + 3 | 7 | 7500 | 7.0000000000000000 + 3 | 9 | 7500 | 9.0000000000000000 +(20 rows) + +SELECT depname, empno, salary, sum(salary) OVER w FROM empsalary WINDOW w AS (PARTITION BY depname); + depname | empno | salary | sum +-----------+-------+--------+------- + develop | 11 | 5200 | 25100 + develop | 7 | 4200 | 25100 + develop | 9 | 4500 | 25100 + develop | 8 | 6000 | 25100 + develop | 10 | 5200 | 25100 + personnel | 5 | 3500 | 7400 + personnel | 2 | 3900 | 7400 + sales | 3 | 4800 | 14600 + sales | 1 | 5000 | 14600 + sales | 4 | 4800 | 14600 +(10 rows) + +SELECT depname, empno, salary, rank() OVER w FROM empsalary WINDOW w AS (PARTITION BY depname ORDER BY salary) ORDER BY rank() OVER w; + depname | empno | salary | rank +-----------+-------+--------+------ + develop | 7 | 4200 | 1 + personnel | 5 | 3500 | 1 + sales | 3 | 4800 | 1 + sales | 4 | 4800 | 1 + personnel | 2 | 3900 | 2 + develop | 9 | 4500 | 2 + sales | 1 | 5000 | 3 + develop | 11 | 5200 | 3 + develop | 10 | 5200 | 3 + develop | 8 | 6000 | 5 +(10 rows) + +-- empty window specification +SELECT COUNT(*) OVER () FROM tenk1 WHERE unique2 < 10; + count +------- + 10 + 10 + 10 + 10 + 10 + 10 + 10 + 10 + 10 + 10 +(10 rows) + +SELECT COUNT(*) OVER w FROM tenk1 WHERE unique2 < 10 WINDOW w AS (); + count +------- + 10 + 10 + 10 + 10 + 10 + 10 + 10 + 10 + 10 + 10 +(10 rows) + +-- no window operation +SELECT four FROM tenk1 WHERE FALSE WINDOW w AS (PARTITION BY ten); + four +------ +(0 rows) + +-- cumulative aggregate +SELECT sum(four) OVER (PARTITION BY ten ORDER BY unique2) AS sum_1, ten, four FROM tenk1 WHERE unique2 < 10; + sum_1 | ten | four +-------+-----+------ + 0 | 0 | 0 + 0 | 0 | 0 + 2 | 0 | 2 + 3 | 1 | 3 + 4 | 1 | 1 + 5 | 1 | 1 + 3 | 3 | 3 + 0 | 4 | 0 + 1 | 7 | 1 + 1 | 9 | 1 +(10 rows) + +SELECT row_number() OVER (ORDER BY unique2) FROM tenk1 WHERE unique2 < 10; + row_number +------------ + 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 + 10 +(10 rows) + +SELECT rank() OVER (PARTITION BY four ORDER BY ten) AS rank_1, ten, four FROM tenk1 WHERE unique2 < 10; + rank_1 | ten | four +--------+-----+------ + 1 | 0 | 0 + 1 | 0 | 0 + 3 | 4 | 0 + 1 | 1 | 1 + 1 | 1 | 1 + 3 | 7 | 1 + 4 | 9 | 1 + 1 | 0 | 2 + 1 | 1 | 3 + 2 | 3 | 3 +(10 rows) + +SELECT dense_rank() OVER (PARTITION BY four ORDER BY ten), ten, four FROM tenk1 WHERE unique2 < 10; + dense_rank | ten | four +------------+-----+------ + 1 | 0 | 0 + 1 | 0 | 0 + 2 | 4 | 0 + 1 | 1 | 1 + 1 | 1 | 1 + 2 | 7 | 1 + 3 | 9 | 1 + 1 | 0 | 2 + 1 | 1 | 3 + 2 | 3 | 3 +(10 rows) + +SELECT percent_rank() OVER (PARTITION BY four ORDER BY ten), ten, four FROM tenk1 WHERE unique2 < 10; + percent_rank | ten | four +--------------------+-----+------ + 0 | 0 | 0 + 0 | 0 | 0 + 1 | 4 | 0 + 0 | 1 | 1 + 0 | 1 | 1 + 0.6666666666666666 | 7 | 1 + 1 | 9 | 1 + 0 | 0 | 2 + 0 | 1 | 3 + 1 | 3 | 3 +(10 rows) + +SELECT cume_dist() OVER (PARTITION BY four ORDER BY ten), ten, four FROM tenk1 WHERE unique2 < 10; + cume_dist | ten | four +--------------------+-----+------ + 0.6666666666666666 | 0 | 0 + 0.6666666666666666 | 0 | 0 + 1 | 4 | 0 + 0.5 | 1 | 1 + 0.5 | 1 | 1 + 0.75 | 7 | 1 + 1 | 9 | 1 + 1 | 0 | 2 + 0.5 | 1 | 3 + 1 | 3 | 3 +(10 rows) + +SELECT ntile(3) OVER (ORDER BY ten, four), ten, four FROM tenk1 WHERE unique2 < 10; + ntile | ten | four +-------+-----+------ + 1 | 0 | 0 + 1 | 0 | 0 + 1 | 0 | 2 + 1 | 1 | 1 + 2 | 1 | 1 + 2 | 1 | 3 + 2 | 3 | 3 + 3 | 4 | 0 + 3 | 7 | 1 + 3 | 9 | 1 +(10 rows) + +SELECT ntile(NULL) OVER (ORDER BY ten, four), ten, four FROM tenk1 LIMIT 2; + ntile | ten | four +-------+-----+------ + | 0 | 0 + | 0 | 0 +(2 rows) + +SELECT lag(ten) OVER (PARTITION BY four ORDER BY ten), ten, four FROM tenk1 WHERE unique2 < 10; + lag | ten | four +-----+-----+------ + | 0 | 0 + 0 | 0 | 0 + 0 | 4 | 0 + | 1 | 1 + 1 | 1 | 1 + 1 | 7 | 1 + 7 | 9 | 1 + | 0 | 2 + | 1 | 3 + 1 | 3 | 3 +(10 rows) + +SELECT lag(ten, four) OVER (PARTITION BY four ORDER BY ten), ten, four FROM tenk1 WHERE unique2 < 10; + lag | ten | four +-----+-----+------ + 0 | 0 | 0 + 0 | 0 | 0 + 4 | 4 | 0 + | 1 | 1 + 1 | 1 | 1 + 1 | 7 | 1 + 7 | 9 | 1 + | 0 | 2 + | 1 | 3 + | 3 | 3 +(10 rows) + +SELECT lag(ten, four, 0) OVER (PARTITION BY four ORDER BY ten), ten, four FROM tenk1 WHERE unique2 < 10; + lag | ten | four +-----+-----+------ + 0 | 0 | 0 + 0 | 0 | 0 + 4 | 4 | 0 + 0 | 1 | 1 + 1 | 1 | 1 + 1 | 7 | 1 + 7 | 9 | 1 + 0 | 0 | 2 + 0 | 1 | 3 + 0 | 3 | 3 +(10 rows) + +SELECT lag(ten, four, 0.7) OVER (PARTITION BY four ORDER BY ten), ten, four FROM tenk1 WHERE unique2 < 10 ORDER BY four, ten; + lag | ten | four +-----+-----+------ + 0 | 0 | 0 + 0 | 0 | 0 + 4 | 4 | 0 + 0.7 | 1 | 1 + 1 | 1 | 1 + 1 | 7 | 1 + 7 | 9 | 1 + 0.7 | 0 | 2 + 0.7 | 1 | 3 + 0.7 | 3 | 3 +(10 rows) + +SELECT lead(ten) OVER (PARTITION BY four ORDER BY ten), ten, four FROM tenk1 WHERE unique2 < 10; + lead | ten | four +------+-----+------ + 0 | 0 | 0 + 4 | 0 | 0 + | 4 | 0 + 1 | 1 | 1 + 7 | 1 | 1 + 9 | 7 | 1 + | 9 | 1 + | 0 | 2 + 3 | 1 | 3 + | 3 | 3 +(10 rows) + +SELECT lead(ten * 2, 1) OVER (PARTITION BY four ORDER BY ten), ten, four FROM tenk1 WHERE unique2 < 10; + lead | ten | four +------+-----+------ + 0 | 0 | 0 + 8 | 0 | 0 + | 4 | 0 + 2 | 1 | 1 + 14 | 1 | 1 + 18 | 7 | 1 + | 9 | 1 + | 0 | 2 + 6 | 1 | 3 + | 3 | 3 +(10 rows) + +SELECT lead(ten * 2, 1, -1) OVER (PARTITION BY four ORDER BY ten), ten, four FROM tenk1 WHERE unique2 < 10; + lead | ten | four +------+-----+------ + 0 | 0 | 0 + 8 | 0 | 0 + -1 | 4 | 0 + 2 | 1 | 1 + 14 | 1 | 1 + 18 | 7 | 1 + -1 | 9 | 1 + -1 | 0 | 2 + 6 | 1 | 3 + -1 | 3 | 3 +(10 rows) + +SELECT lead(ten * 2, 1, -1.4) OVER (PARTITION BY four ORDER BY ten), ten, four FROM tenk1 WHERE unique2 < 10 ORDER BY four, ten; + lead | ten | four +------+-----+------ + 0 | 0 | 0 + 8 | 0 | 0 + -1.4 | 4 | 0 + 2 | 1 | 1 + 14 | 1 | 1 + 18 | 7 | 1 + -1.4 | 9 | 1 + -1.4 | 0 | 2 + 6 | 1 | 3 + -1.4 | 3 | 3 +(10 rows) + +SELECT first_value(ten) OVER (PARTITION BY four ORDER BY ten), ten, four FROM tenk1 WHERE unique2 < 10; + first_value | ten | four +-------------+-----+------ + 0 | 0 | 0 + 0 | 0 | 0 + 0 | 4 | 0 + 1 | 1 | 1 + 1 | 1 | 1 + 1 | 7 | 1 + 1 | 9 | 1 + 0 | 0 | 2 + 1 | 1 | 3 + 1 | 3 | 3 +(10 rows) + +-- last_value returns the last row of the frame, which is CURRENT ROW in ORDER BY window. +SELECT last_value(four) OVER (ORDER BY ten), ten, four FROM tenk1 WHERE unique2 < 10; + last_value | ten | four +------------+-----+------ + 0 | 0 | 0 + 0 | 0 | 2 + 0 | 0 | 0 + 1 | 1 | 1 + 1 | 1 | 3 + 1 | 1 | 1 + 3 | 3 | 3 + 0 | 4 | 0 + 1 | 7 | 1 + 1 | 9 | 1 +(10 rows) + +SELECT last_value(ten) OVER (PARTITION BY four), ten, four FROM + (SELECT * FROM tenk1 WHERE unique2 < 10 ORDER BY four, ten)s + ORDER BY four, ten; + last_value | ten | four +------------+-----+------ + 4 | 0 | 0 + 4 | 0 | 0 + 4 | 4 | 0 + 9 | 1 | 1 + 9 | 1 | 1 + 9 | 7 | 1 + 9 | 9 | 1 + 0 | 0 | 2 + 3 | 1 | 3 + 3 | 3 | 3 +(10 rows) + +SELECT nth_value(ten, four + 1) OVER (PARTITION BY four), ten, four + FROM (SELECT * FROM tenk1 WHERE unique2 < 10 ORDER BY four, ten)s; + nth_value | ten | four +-----------+-----+------ + 0 | 0 | 0 + 0 | 0 | 0 + 0 | 4 | 0 + 1 | 1 | 1 + 1 | 1 | 1 + 1 | 7 | 1 + 1 | 9 | 1 + | 0 | 2 + | 1 | 3 + | 3 | 3 +(10 rows) + +SELECT ten, two, sum(hundred) AS gsum, sum(sum(hundred)) OVER (PARTITION BY two ORDER BY ten) AS wsum +FROM tenk1 GROUP BY ten, two; + ten | two | gsum | wsum +-----+-----+-------+-------- + 0 | 0 | 45000 | 45000 + 2 | 0 | 47000 | 92000 + 4 | 0 | 49000 | 141000 + 6 | 0 | 51000 | 192000 + 8 | 0 | 53000 | 245000 + 1 | 1 | 46000 | 46000 + 3 | 1 | 48000 | 94000 + 5 | 1 | 50000 | 144000 + 7 | 1 | 52000 | 196000 + 9 | 1 | 54000 | 250000 +(10 rows) + +SELECT count(*) OVER (PARTITION BY four), four FROM (SELECT * FROM tenk1 WHERE two = 1)s WHERE unique2 < 10; + count | four +-------+------ + 4 | 1 + 4 | 1 + 4 | 1 + 4 | 1 + 2 | 3 + 2 | 3 +(6 rows) + +SELECT (count(*) OVER (PARTITION BY four ORDER BY ten) + + sum(hundred) OVER (PARTITION BY four ORDER BY ten))::varchar AS cntsum + FROM tenk1 WHERE unique2 < 10; + cntsum +-------- + 22 + 22 + 87 + 24 + 24 + 82 + 92 + 51 + 92 + 136 +(10 rows) + +-- opexpr with different windows evaluation. +SELECT * FROM( + SELECT count(*) OVER (PARTITION BY four ORDER BY ten) + + sum(hundred) OVER (PARTITION BY two ORDER BY ten) AS total, + count(*) OVER (PARTITION BY four ORDER BY ten) AS fourcount, + sum(hundred) OVER (PARTITION BY two ORDER BY ten) AS twosum + FROM tenk1 +)sub +WHERE total <> fourcount + twosum; + total | fourcount | twosum +-------+-----------+-------- +(0 rows) + +SELECT avg(four) OVER (PARTITION BY four ORDER BY thousand / 100) FROM tenk1 WHERE unique2 < 10; + avg +------------------------ + 0.00000000000000000000 + 0.00000000000000000000 + 0.00000000000000000000 + 1.00000000000000000000 + 1.00000000000000000000 + 1.00000000000000000000 + 1.00000000000000000000 + 2.0000000000000000 + 3.0000000000000000 + 3.0000000000000000 +(10 rows) + +SELECT ten, two, sum(hundred) AS gsum, sum(sum(hundred)) OVER win AS wsum +FROM tenk1 GROUP BY ten, two WINDOW win AS (PARTITION BY two ORDER BY ten); + ten | two | gsum | wsum +-----+-----+-------+-------- + 0 | 0 | 45000 | 45000 + 2 | 0 | 47000 | 92000 + 4 | 0 | 49000 | 141000 + 6 | 0 | 51000 | 192000 + 8 | 0 | 53000 | 245000 + 1 | 1 | 46000 | 46000 + 3 | 1 | 48000 | 94000 + 5 | 1 | 50000 | 144000 + 7 | 1 | 52000 | 196000 + 9 | 1 | 54000 | 250000 +(10 rows) + +-- more than one window with GROUP BY +SELECT sum(salary), + row_number() OVER (ORDER BY depname), + sum(sum(salary)) OVER (ORDER BY depname DESC) +FROM empsalary GROUP BY depname; + sum | row_number | sum +-------+------------+------- + 25100 | 1 | 47100 + 7400 | 2 | 22000 + 14600 | 3 | 14600 +(3 rows) + +-- identical windows with different names +SELECT sum(salary) OVER w1, count(*) OVER w2 +FROM empsalary WINDOW w1 AS (ORDER BY salary), w2 AS (ORDER BY salary); + sum | count +-------+------- + 3500 | 1 + 7400 | 2 + 11600 | 3 + 16100 | 4 + 25700 | 6 + 25700 | 6 + 30700 | 7 + 41100 | 9 + 41100 | 9 + 47100 | 10 +(10 rows) + +-- subplan +SELECT lead(ten, (SELECT two FROM tenk1 WHERE s.unique2 = unique2)) OVER (PARTITION BY four ORDER BY ten) +FROM tenk1 s WHERE unique2 < 10; + lead +------ + 0 + 0 + 4 + 1 + 7 + 9 + + 0 + 3 + +(10 rows) + +-- empty table +SELECT count(*) OVER (PARTITION BY four) FROM (SELECT * FROM tenk1 WHERE FALSE)s; + count +------- +(0 rows) + +-- mixture of agg/wfunc in the same window +SELECT sum(salary) OVER w, rank() OVER w FROM empsalary WINDOW w AS (PARTITION BY depname ORDER BY salary DESC); + sum | rank +-------+------ + 6000 | 1 + 16400 | 2 + 16400 | 2 + 20900 | 4 + 25100 | 5 + 3900 | 1 + 7400 | 2 + 5000 | 1 + 14600 | 2 + 14600 | 2 +(10 rows) + +-- strict aggs +SELECT empno, depname, salary, bonus, depadj, MIN(bonus) OVER (ORDER BY empno), MAX(depadj) OVER () FROM( + SELECT *, + CASE WHEN enroll_date < '2008-01-01' THEN 2008 - extract(YEAR FROM enroll_date) END * 500 AS bonus, + CASE WHEN + AVG(salary) OVER (PARTITION BY depname) < salary + THEN 200 END AS depadj FROM empsalary +)s; + empno | depname | salary | bonus | depadj | min | max +-------+-----------+--------+-------+--------+------+----- + 1 | sales | 5000 | 1000 | 200 | 1000 | 200 + 2 | personnel | 3900 | 1000 | 200 | 1000 | 200 + 3 | sales | 4800 | 500 | | 500 | 200 + 4 | sales | 4800 | 500 | | 500 | 200 + 5 | personnel | 3500 | 500 | | 500 | 200 + 7 | develop | 4200 | | | 500 | 200 + 8 | develop | 6000 | 1000 | 200 | 500 | 200 + 9 | develop | 4500 | | | 500 | 200 + 10 | develop | 5200 | 500 | 200 | 500 | 200 + 11 | develop | 5200 | 500 | 200 | 500 | 200 +(10 rows) + +-- window function over ungrouped agg over empty row set (bug before 9.1) +SELECT SUM(COUNT(f1)) OVER () FROM int4_tbl WHERE f1=42; + sum +----- + 0 +(1 row) + +-- window function with ORDER BY an expression involving aggregates (9.1 bug) +select ten, + sum(unique1) + sum(unique2) as res, + rank() over (order by sum(unique1) + sum(unique2)) as rank +from tenk1 +group by ten order by ten; + ten | res | rank +-----+----------+------ + 0 | 9976146 | 4 + 1 | 10114187 | 9 + 2 | 10059554 | 8 + 3 | 9878541 | 1 + 4 | 9881005 | 2 + 5 | 9981670 | 5 + 6 | 9947099 | 3 + 7 | 10120309 | 10 + 8 | 9991305 | 6 + 9 | 10040184 | 7 +(10 rows) + +-- window and aggregate with GROUP BY expression (9.2 bug) +explain (costs off) +select first_value(max(x)) over (), y + from (select unique1 as x, ten+four as y from tenk1) ss + group by y; + QUERY PLAN +--------------------------------------------- + WindowAgg + -> HashAggregate + Group Key: (tenk1.ten + tenk1.four) + -> Seq Scan on tenk1 +(4 rows) + +-- window functions returning pass-by-ref values from different rows +select x, lag(x, 1) over (order by x), lead(x, 3) over (order by x) +from (select x::numeric as x from generate_series(1,10) x); + x | lag | lead +----+-----+------ + 1 | | 4 + 2 | 1 | 5 + 3 | 2 | 6 + 4 | 3 | 7 + 5 | 4 | 8 + 6 | 5 | 9 + 7 | 6 | 10 + 8 | 7 | + 9 | 8 | + 10 | 9 | +(10 rows) + +-- test non-default frame specifications +SELECT four, ten, + sum(ten) over (partition by four order by ten), + last_value(ten) over (partition by four order by ten) +FROM (select distinct ten, four from tenk1) ss; + four | ten | sum | last_value +------+-----+-----+------------ + 0 | 0 | 0 | 0 + 0 | 2 | 2 | 2 + 0 | 4 | 6 | 4 + 0 | 6 | 12 | 6 + 0 | 8 | 20 | 8 + 1 | 1 | 1 | 1 + 1 | 3 | 4 | 3 + 1 | 5 | 9 | 5 + 1 | 7 | 16 | 7 + 1 | 9 | 25 | 9 + 2 | 0 | 0 | 0 + 2 | 2 | 2 | 2 + 2 | 4 | 6 | 4 + 2 | 6 | 12 | 6 + 2 | 8 | 20 | 8 + 3 | 1 | 1 | 1 + 3 | 3 | 4 | 3 + 3 | 5 | 9 | 5 + 3 | 7 | 16 | 7 + 3 | 9 | 25 | 9 +(20 rows) + +SELECT four, ten, + sum(ten) over (partition by four order by ten range between unbounded preceding and current row), + last_value(ten) over (partition by four order by ten range between unbounded preceding and current row) +FROM (select distinct ten, four from tenk1) ss; + four | ten | sum | last_value +------+-----+-----+------------ + 0 | 0 | 0 | 0 + 0 | 2 | 2 | 2 + 0 | 4 | 6 | 4 + 0 | 6 | 12 | 6 + 0 | 8 | 20 | 8 + 1 | 1 | 1 | 1 + 1 | 3 | 4 | 3 + 1 | 5 | 9 | 5 + 1 | 7 | 16 | 7 + 1 | 9 | 25 | 9 + 2 | 0 | 0 | 0 + 2 | 2 | 2 | 2 + 2 | 4 | 6 | 4 + 2 | 6 | 12 | 6 + 2 | 8 | 20 | 8 + 3 | 1 | 1 | 1 + 3 | 3 | 4 | 3 + 3 | 5 | 9 | 5 + 3 | 7 | 16 | 7 + 3 | 9 | 25 | 9 +(20 rows) + +SELECT four, ten, + sum(ten) over (partition by four order by ten range between unbounded preceding and unbounded following), + last_value(ten) over (partition by four order by ten range between unbounded preceding and unbounded following) +FROM (select distinct ten, four from tenk1) ss; + four | ten | sum | last_value +------+-----+-----+------------ + 0 | 0 | 20 | 8 + 0 | 2 | 20 | 8 + 0 | 4 | 20 | 8 + 0 | 6 | 20 | 8 + 0 | 8 | 20 | 8 + 1 | 1 | 25 | 9 + 1 | 3 | 25 | 9 + 1 | 5 | 25 | 9 + 1 | 7 | 25 | 9 + 1 | 9 | 25 | 9 + 2 | 0 | 20 | 8 + 2 | 2 | 20 | 8 + 2 | 4 | 20 | 8 + 2 | 6 | 20 | 8 + 2 | 8 | 20 | 8 + 3 | 1 | 25 | 9 + 3 | 3 | 25 | 9 + 3 | 5 | 25 | 9 + 3 | 7 | 25 | 9 + 3 | 9 | 25 | 9 +(20 rows) + +SELECT four, ten/4 as two, + sum(ten/4) over (partition by four order by ten/4 range between unbounded preceding and current row), + last_value(ten/4) over (partition by four order by ten/4 range between unbounded preceding and current row) +FROM (select distinct ten, four from tenk1) ss; + four | two | sum | last_value +------+-----+-----+------------ + 0 | 0 | 0 | 0 + 0 | 0 | 0 | 0 + 0 | 1 | 2 | 1 + 0 | 1 | 2 | 1 + 0 | 2 | 4 | 2 + 1 | 0 | 0 | 0 + 1 | 0 | 0 | 0 + 1 | 1 | 2 | 1 + 1 | 1 | 2 | 1 + 1 | 2 | 4 | 2 + 2 | 0 | 0 | 0 + 2 | 0 | 0 | 0 + 2 | 1 | 2 | 1 + 2 | 1 | 2 | 1 + 2 | 2 | 4 | 2 + 3 | 0 | 0 | 0 + 3 | 0 | 0 | 0 + 3 | 1 | 2 | 1 + 3 | 1 | 2 | 1 + 3 | 2 | 4 | 2 +(20 rows) + +SELECT four, ten/4 as two, + sum(ten/4) over (partition by four order by ten/4 rows between unbounded preceding and current row), + last_value(ten/4) over (partition by four order by ten/4 rows between unbounded preceding and current row) +FROM (select distinct ten, four from tenk1) ss; + four | two | sum | last_value +------+-----+-----+------------ + 0 | 0 | 0 | 0 + 0 | 0 | 0 | 0 + 0 | 1 | 1 | 1 + 0 | 1 | 2 | 1 + 0 | 2 | 4 | 2 + 1 | 0 | 0 | 0 + 1 | 0 | 0 | 0 + 1 | 1 | 1 | 1 + 1 | 1 | 2 | 1 + 1 | 2 | 4 | 2 + 2 | 0 | 0 | 0 + 2 | 0 | 0 | 0 + 2 | 1 | 1 | 1 + 2 | 1 | 2 | 1 + 2 | 2 | 4 | 2 + 3 | 0 | 0 | 0 + 3 | 0 | 0 | 0 + 3 | 1 | 1 | 1 + 3 | 1 | 2 | 1 + 3 | 2 | 4 | 2 +(20 rows) + +SELECT sum(unique1) over (order by four range between current row and unbounded following), + unique1, four +FROM tenk1 WHERE unique1 < 10; + sum | unique1 | four +-----+---------+------ + 45 | 0 | 0 + 45 | 8 | 0 + 45 | 4 | 0 + 33 | 5 | 1 + 33 | 9 | 1 + 33 | 1 | 1 + 18 | 6 | 2 + 18 | 2 | 2 + 10 | 3 | 3 + 10 | 7 | 3 +(10 rows) + +SELECT sum(unique1) over (rows between current row and unbounded following), + unique1, four +FROM tenk1 WHERE unique1 < 10; + sum | unique1 | four +-----+---------+------ + 45 | 4 | 0 + 41 | 2 | 2 + 39 | 1 | 1 + 38 | 6 | 2 + 32 | 9 | 1 + 23 | 8 | 0 + 15 | 5 | 1 + 10 | 3 | 3 + 7 | 7 | 3 + 0 | 0 | 0 +(10 rows) + +SELECT sum(unique1) over (rows between 2 preceding and 2 following), + unique1, four +FROM tenk1 WHERE unique1 < 10; + sum | unique1 | four +-----+---------+------ + 7 | 4 | 0 + 13 | 2 | 2 + 22 | 1 | 1 + 26 | 6 | 2 + 29 | 9 | 1 + 31 | 8 | 0 + 32 | 5 | 1 + 23 | 3 | 3 + 15 | 7 | 3 + 10 | 0 | 0 +(10 rows) + +SELECT sum(unique1) over (rows between 2 preceding and 2 following exclude no others), + unique1, four +FROM tenk1 WHERE unique1 < 10; + sum | unique1 | four +-----+---------+------ + 7 | 4 | 0 + 13 | 2 | 2 + 22 | 1 | 1 + 26 | 6 | 2 + 29 | 9 | 1 + 31 | 8 | 0 + 32 | 5 | 1 + 23 | 3 | 3 + 15 | 7 | 3 + 10 | 0 | 0 +(10 rows) + +SELECT sum(unique1) over (rows between 2 preceding and 2 following exclude current row), + unique1, four +FROM tenk1 WHERE unique1 < 10; + sum | unique1 | four +-----+---------+------ + 3 | 4 | 0 + 11 | 2 | 2 + 21 | 1 | 1 + 20 | 6 | 2 + 20 | 9 | 1 + 23 | 8 | 0 + 27 | 5 | 1 + 20 | 3 | 3 + 8 | 7 | 3 + 10 | 0 | 0 +(10 rows) + +SELECT sum(unique1) over (rows between 2 preceding and 2 following exclude group), + unique1, four +FROM tenk1 WHERE unique1 < 10; + sum | unique1 | four +-----+---------+------ + | 4 | 0 + | 2 | 2 + | 1 | 1 + | 6 | 2 + | 9 | 1 + | 8 | 0 + | 5 | 1 + | 3 | 3 + | 7 | 3 + | 0 | 0 +(10 rows) + +SELECT sum(unique1) over (rows between 2 preceding and 2 following exclude ties), + unique1, four +FROM tenk1 WHERE unique1 < 10; + sum | unique1 | four +-----+---------+------ + 4 | 4 | 0 + 2 | 2 | 2 + 1 | 1 | 1 + 6 | 6 | 2 + 9 | 9 | 1 + 8 | 8 | 0 + 5 | 5 | 1 + 3 | 3 | 3 + 7 | 7 | 3 + 0 | 0 | 0 +(10 rows) + +SELECT first_value(unique1) over (ORDER BY four rows between current row and 2 following exclude current row), + unique1, four +FROM tenk1 WHERE unique1 < 10; + first_value | unique1 | four +-------------+---------+------ + 8 | 0 | 0 + 4 | 8 | 0 + 5 | 4 | 0 + 9 | 5 | 1 + 1 | 9 | 1 + 6 | 1 | 1 + 2 | 6 | 2 + 3 | 2 | 2 + 7 | 3 | 3 + | 7 | 3 +(10 rows) + +SELECT first_value(unique1) over (ORDER BY four rows between current row and 2 following exclude group), + unique1, four +FROM tenk1 WHERE unique1 < 10; + first_value | unique1 | four +-------------+---------+------ + | 0 | 0 + 5 | 8 | 0 + 5 | 4 | 0 + | 5 | 1 + 6 | 9 | 1 + 6 | 1 | 1 + 3 | 6 | 2 + 3 | 2 | 2 + | 3 | 3 + | 7 | 3 +(10 rows) + +SELECT first_value(unique1) over (ORDER BY four rows between current row and 2 following exclude ties), + unique1, four +FROM tenk1 WHERE unique1 < 10; + first_value | unique1 | four +-------------+---------+------ + 0 | 0 | 0 + 8 | 8 | 0 + 4 | 4 | 0 + 5 | 5 | 1 + 9 | 9 | 1 + 1 | 1 | 1 + 6 | 6 | 2 + 2 | 2 | 2 + 3 | 3 | 3 + 7 | 7 | 3 +(10 rows) + +SELECT last_value(unique1) over (ORDER BY four rows between current row and 2 following exclude current row), + unique1, four +FROM tenk1 WHERE unique1 < 10; + last_value | unique1 | four +------------+---------+------ + 4 | 0 | 0 + 5 | 8 | 0 + 9 | 4 | 0 + 1 | 5 | 1 + 6 | 9 | 1 + 2 | 1 | 1 + 3 | 6 | 2 + 7 | 2 | 2 + 7 | 3 | 3 + | 7 | 3 +(10 rows) + +SELECT last_value(unique1) over (ORDER BY four rows between current row and 2 following exclude group), + unique1, four +FROM tenk1 WHERE unique1 < 10; + last_value | unique1 | four +------------+---------+------ + | 0 | 0 + 5 | 8 | 0 + 9 | 4 | 0 + | 5 | 1 + 6 | 9 | 1 + 2 | 1 | 1 + 3 | 6 | 2 + 7 | 2 | 2 + | 3 | 3 + | 7 | 3 +(10 rows) + +SELECT last_value(unique1) over (ORDER BY four rows between current row and 2 following exclude ties), + unique1, four +FROM tenk1 WHERE unique1 < 10; + last_value | unique1 | four +------------+---------+------ + 0 | 0 | 0 + 5 | 8 | 0 + 9 | 4 | 0 + 5 | 5 | 1 + 6 | 9 | 1 + 2 | 1 | 1 + 3 | 6 | 2 + 7 | 2 | 2 + 3 | 3 | 3 + 7 | 7 | 3 +(10 rows) + +SELECT sum(unique1) over (rows between 2 preceding and 1 preceding), + unique1, four +FROM tenk1 WHERE unique1 < 10; + sum | unique1 | four +-----+---------+------ + | 4 | 0 + 4 | 2 | 2 + 6 | 1 | 1 + 3 | 6 | 2 + 7 | 9 | 1 + 15 | 8 | 0 + 17 | 5 | 1 + 13 | 3 | 3 + 8 | 7 | 3 + 10 | 0 | 0 +(10 rows) + +SELECT sum(unique1) over (rows between 1 following and 3 following), + unique1, four +FROM tenk1 WHERE unique1 < 10; + sum | unique1 | four +-----+---------+------ + 9 | 4 | 0 + 16 | 2 | 2 + 23 | 1 | 1 + 22 | 6 | 2 + 16 | 9 | 1 + 15 | 8 | 0 + 10 | 5 | 1 + 7 | 3 | 3 + 0 | 7 | 3 + | 0 | 0 +(10 rows) + +SELECT sum(unique1) over (rows between unbounded preceding and 1 following), + unique1, four +FROM tenk1 WHERE unique1 < 10; + sum | unique1 | four +-----+---------+------ + 6 | 4 | 0 + 7 | 2 | 2 + 13 | 1 | 1 + 22 | 6 | 2 + 30 | 9 | 1 + 35 | 8 | 0 + 38 | 5 | 1 + 45 | 3 | 3 + 45 | 7 | 3 + 45 | 0 | 0 +(10 rows) + +SELECT sum(unique1) over (w range between current row and unbounded following), + unique1, four +FROM tenk1 WHERE unique1 < 10 WINDOW w AS (order by four); + sum | unique1 | four +-----+---------+------ + 45 | 0 | 0 + 45 | 8 | 0 + 45 | 4 | 0 + 33 | 5 | 1 + 33 | 9 | 1 + 33 | 1 | 1 + 18 | 6 | 2 + 18 | 2 | 2 + 10 | 3 | 3 + 10 | 7 | 3 +(10 rows) + +SELECT sum(unique1) over (w range between unbounded preceding and current row exclude current row), + unique1, four +FROM tenk1 WHERE unique1 < 10 WINDOW w AS (order by four); + sum | unique1 | four +-----+---------+------ + 12 | 0 | 0 + 4 | 8 | 0 + 8 | 4 | 0 + 22 | 5 | 1 + 18 | 9 | 1 + 26 | 1 | 1 + 29 | 6 | 2 + 33 | 2 | 2 + 42 | 3 | 3 + 38 | 7 | 3 +(10 rows) + +SELECT sum(unique1) over (w range between unbounded preceding and current row exclude group), + unique1, four +FROM tenk1 WHERE unique1 < 10 WINDOW w AS (order by four); + sum | unique1 | four +-----+---------+------ + | 0 | 0 + | 8 | 0 + | 4 | 0 + 12 | 5 | 1 + 12 | 9 | 1 + 12 | 1 | 1 + 27 | 6 | 2 + 27 | 2 | 2 + 35 | 3 | 3 + 35 | 7 | 3 +(10 rows) + +SELECT sum(unique1) over (w range between unbounded preceding and current row exclude ties), + unique1, four +FROM tenk1 WHERE unique1 < 10 WINDOW w AS (order by four); + sum | unique1 | four +-----+---------+------ + 0 | 0 | 0 + 8 | 8 | 0 + 4 | 4 | 0 + 17 | 5 | 1 + 21 | 9 | 1 + 13 | 1 | 1 + 33 | 6 | 2 + 29 | 2 | 2 + 38 | 3 | 3 + 42 | 7 | 3 +(10 rows) + +SELECT first_value(unique1) over w, + nth_value(unique1, 2) over w AS nth_2, + last_value(unique1) over w, unique1, four +FROM tenk1 WHERE unique1 < 10 +WINDOW w AS (order by four range between current row and unbounded following); + first_value | nth_2 | last_value | unique1 | four +-------------+-------+------------+---------+------ + 0 | 8 | 7 | 0 | 0 + 0 | 8 | 7 | 8 | 0 + 0 | 8 | 7 | 4 | 0 + 5 | 9 | 7 | 5 | 1 + 5 | 9 | 7 | 9 | 1 + 5 | 9 | 7 | 1 | 1 + 6 | 2 | 7 | 6 | 2 + 6 | 2 | 7 | 2 | 2 + 3 | 7 | 7 | 3 | 3 + 3 | 7 | 7 | 7 | 3 +(10 rows) + +SELECT sum(unique1) over + (order by unique1 + rows (SELECT unique1 FROM tenk1 ORDER BY unique1 LIMIT 1) + 1 PRECEDING), + unique1 +FROM tenk1 WHERE unique1 < 10; + sum | unique1 +-----+--------- + 0 | 0 + 1 | 1 + 3 | 2 + 5 | 3 + 7 | 4 + 9 | 5 + 11 | 6 + 13 | 7 + 15 | 8 + 17 | 9 +(10 rows) + +CREATE TEMP VIEW v_window AS + SELECT i, sum(i) over (order by i rows between 1 preceding and 1 following) as sum_rows + FROM generate_series(1, 10) i; +SELECT * FROM v_window; + i | sum_rows +----+---------- + 1 | 3 + 2 | 6 + 3 | 9 + 4 | 12 + 5 | 15 + 6 | 18 + 7 | 21 + 8 | 24 + 9 | 27 + 10 | 19 +(10 rows) + +SELECT pg_get_viewdef('v_window'); + pg_get_viewdef +----------------------------------------------------------------------------------- + SELECT i, + + sum(i) OVER (ORDER BY i ROWS BETWEEN 1 PRECEDING AND 1 FOLLOWING) AS sum_rows+ + FROM generate_series(1, 10) i(i); +(1 row) + +CREATE OR REPLACE TEMP VIEW v_window AS + SELECT i, sum(i) over (order by i rows between 1 preceding and 1 following + exclude current row) as sum_rows FROM generate_series(1, 10) i; +SELECT * FROM v_window; + i | sum_rows +----+---------- + 1 | 2 + 2 | 4 + 3 | 6 + 4 | 8 + 5 | 10 + 6 | 12 + 7 | 14 + 8 | 16 + 9 | 18 + 10 | 9 +(10 rows) + +SELECT pg_get_viewdef('v_window'); + pg_get_viewdef +------------------------------------------------------------------------------------------------------- + SELECT i, + + sum(i) OVER (ORDER BY i ROWS BETWEEN 1 PRECEDING AND 1 FOLLOWING EXCLUDE CURRENT ROW) AS sum_rows+ + FROM generate_series(1, 10) i(i); +(1 row) + +CREATE OR REPLACE TEMP VIEW v_window AS + SELECT i, sum(i) over (order by i rows between 1 preceding and 1 following + exclude group) as sum_rows FROM generate_series(1, 10) i; +SELECT * FROM v_window; + i | sum_rows +----+---------- + 1 | 2 + 2 | 4 + 3 | 6 + 4 | 8 + 5 | 10 + 6 | 12 + 7 | 14 + 8 | 16 + 9 | 18 + 10 | 9 +(10 rows) + +SELECT pg_get_viewdef('v_window'); + pg_get_viewdef +------------------------------------------------------------------------------------------------- + SELECT i, + + sum(i) OVER (ORDER BY i ROWS BETWEEN 1 PRECEDING AND 1 FOLLOWING EXCLUDE GROUP) AS sum_rows+ + FROM generate_series(1, 10) i(i); +(1 row) + +CREATE OR REPLACE TEMP VIEW v_window AS + SELECT i, sum(i) over (order by i rows between 1 preceding and 1 following + exclude ties) as sum_rows FROM generate_series(1, 10) i; +SELECT * FROM v_window; + i | sum_rows +----+---------- + 1 | 3 + 2 | 6 + 3 | 9 + 4 | 12 + 5 | 15 + 6 | 18 + 7 | 21 + 8 | 24 + 9 | 27 + 10 | 19 +(10 rows) + +SELECT pg_get_viewdef('v_window'); + pg_get_viewdef +------------------------------------------------------------------------------------------------ + SELECT i, + + sum(i) OVER (ORDER BY i ROWS BETWEEN 1 PRECEDING AND 1 FOLLOWING EXCLUDE TIES) AS sum_rows+ + FROM generate_series(1, 10) i(i); +(1 row) + +CREATE OR REPLACE TEMP VIEW v_window AS + SELECT i, sum(i) over (order by i rows between 1 preceding and 1 following + exclude no others) as sum_rows FROM generate_series(1, 10) i; +SELECT * FROM v_window; + i | sum_rows +----+---------- + 1 | 3 + 2 | 6 + 3 | 9 + 4 | 12 + 5 | 15 + 6 | 18 + 7 | 21 + 8 | 24 + 9 | 27 + 10 | 19 +(10 rows) + +SELECT pg_get_viewdef('v_window'); + pg_get_viewdef +----------------------------------------------------------------------------------- + SELECT i, + + sum(i) OVER (ORDER BY i ROWS BETWEEN 1 PRECEDING AND 1 FOLLOWING) AS sum_rows+ + FROM generate_series(1, 10) i(i); +(1 row) + +CREATE OR REPLACE TEMP VIEW v_window AS + SELECT i, sum(i) over (order by i groups between 1 preceding and 1 following) as sum_rows FROM generate_series(1, 10) i; +SELECT * FROM v_window; + i | sum_rows +----+---------- + 1 | 3 + 2 | 6 + 3 | 9 + 4 | 12 + 5 | 15 + 6 | 18 + 7 | 21 + 8 | 24 + 9 | 27 + 10 | 19 +(10 rows) + +SELECT pg_get_viewdef('v_window'); + pg_get_viewdef +------------------------------------------------------------------------------------- + SELECT i, + + sum(i) OVER (ORDER BY i GROUPS BETWEEN 1 PRECEDING AND 1 FOLLOWING) AS sum_rows+ + FROM generate_series(1, 10) i(i); +(1 row) + +DROP VIEW v_window; +CREATE TEMP VIEW v_window AS + SELECT i, min(i) over (order by i range between '1 day' preceding and '10 days' following) as min_i + FROM generate_series(now(), now()+'100 days'::interval, '1 hour') i; +SELECT pg_get_viewdef('v_window'); + pg_get_viewdef +----------------------------------------------------------------------------------------------------------------------- + SELECT i, + + min(i) OVER (ORDER BY i RANGE BETWEEN '@ 1 day'::interval PRECEDING AND '@ 10 days'::interval FOLLOWING) AS min_i+ + FROM generate_series(now(), (now() + '@ 100 days'::interval), '@ 1 hour'::interval) i(i); +(1 row) + +-- RANGE offset PRECEDING/FOLLOWING tests +SELECT sum(unique1) over (order by four range between 2::int8 preceding and 1::int2 preceding), + unique1, four +FROM tenk1 WHERE unique1 < 10; + sum | unique1 | four +-----+---------+------ + | 0 | 0 + | 8 | 0 + | 4 | 0 + 12 | 5 | 1 + 12 | 9 | 1 + 12 | 1 | 1 + 27 | 6 | 2 + 27 | 2 | 2 + 23 | 3 | 3 + 23 | 7 | 3 +(10 rows) + +SELECT sum(unique1) over (order by four desc range between 2::int8 preceding and 1::int2 preceding), + unique1, four +FROM tenk1 WHERE unique1 < 10; + sum | unique1 | four +-----+---------+------ + | 3 | 3 + | 7 | 3 + 10 | 6 | 2 + 10 | 2 | 2 + 18 | 9 | 1 + 18 | 5 | 1 + 18 | 1 | 1 + 23 | 0 | 0 + 23 | 8 | 0 + 23 | 4 | 0 +(10 rows) + +SELECT sum(unique1) over (order by four range between 2::int8 preceding and 1::int2 preceding exclude no others), + unique1, four +FROM tenk1 WHERE unique1 < 10; + sum | unique1 | four +-----+---------+------ + | 0 | 0 + | 8 | 0 + | 4 | 0 + 12 | 5 | 1 + 12 | 9 | 1 + 12 | 1 | 1 + 27 | 6 | 2 + 27 | 2 | 2 + 23 | 3 | 3 + 23 | 7 | 3 +(10 rows) + +SELECT sum(unique1) over (order by four range between 2::int8 preceding and 1::int2 preceding exclude current row), + unique1, four +FROM tenk1 WHERE unique1 < 10; + sum | unique1 | four +-----+---------+------ + | 0 | 0 + | 8 | 0 + | 4 | 0 + 12 | 5 | 1 + 12 | 9 | 1 + 12 | 1 | 1 + 27 | 6 | 2 + 27 | 2 | 2 + 23 | 3 | 3 + 23 | 7 | 3 +(10 rows) + +SELECT sum(unique1) over (order by four range between 2::int8 preceding and 1::int2 preceding exclude group), + unique1, four +FROM tenk1 WHERE unique1 < 10; + sum | unique1 | four +-----+---------+------ + | 0 | 0 + | 8 | 0 + | 4 | 0 + 12 | 5 | 1 + 12 | 9 | 1 + 12 | 1 | 1 + 27 | 6 | 2 + 27 | 2 | 2 + 23 | 3 | 3 + 23 | 7 | 3 +(10 rows) + +SELECT sum(unique1) over (order by four range between 2::int8 preceding and 1::int2 preceding exclude ties), + unique1, four +FROM tenk1 WHERE unique1 < 10; + sum | unique1 | four +-----+---------+------ + | 0 | 0 + | 8 | 0 + | 4 | 0 + 12 | 5 | 1 + 12 | 9 | 1 + 12 | 1 | 1 + 27 | 6 | 2 + 27 | 2 | 2 + 23 | 3 | 3 + 23 | 7 | 3 +(10 rows) + +SELECT sum(unique1) over (order by four range between 2::int8 preceding and 6::int2 following exclude ties), + unique1, four +FROM tenk1 WHERE unique1 < 10; + sum | unique1 | four +-----+---------+------ + 33 | 0 | 0 + 41 | 8 | 0 + 37 | 4 | 0 + 35 | 5 | 1 + 39 | 9 | 1 + 31 | 1 | 1 + 43 | 6 | 2 + 39 | 2 | 2 + 26 | 3 | 3 + 30 | 7 | 3 +(10 rows) + +SELECT sum(unique1) over (order by four range between 2::int8 preceding and 6::int2 following exclude group), + unique1, four +FROM tenk1 WHERE unique1 < 10; + sum | unique1 | four +-----+---------+------ + 33 | 0 | 0 + 33 | 8 | 0 + 33 | 4 | 0 + 30 | 5 | 1 + 30 | 9 | 1 + 30 | 1 | 1 + 37 | 6 | 2 + 37 | 2 | 2 + 23 | 3 | 3 + 23 | 7 | 3 +(10 rows) + +SELECT sum(unique1) over (partition by four order by unique1 range between 5::int8 preceding and 6::int2 following), + unique1, four +FROM tenk1 WHERE unique1 < 10; + sum | unique1 | four +-----+---------+------ + 4 | 0 | 0 + 12 | 4 | 0 + 12 | 8 | 0 + 6 | 1 | 1 + 15 | 5 | 1 + 14 | 9 | 1 + 8 | 2 | 2 + 8 | 6 | 2 + 10 | 3 | 3 + 10 | 7 | 3 +(10 rows) + +SELECT sum(unique1) over (partition by four order by unique1 range between 5::int8 preceding and 6::int2 following + exclude current row),unique1, four +FROM tenk1 WHERE unique1 < 10; + sum | unique1 | four +-----+---------+------ + 4 | 0 | 0 + 8 | 4 | 0 + 4 | 8 | 0 + 5 | 1 | 1 + 10 | 5 | 1 + 5 | 9 | 1 + 6 | 2 | 2 + 2 | 6 | 2 + 7 | 3 | 3 + 3 | 7 | 3 +(10 rows) + +select sum(salary) over (order by enroll_date range between '1 year'::interval preceding and '1 year'::interval following), + salary, enroll_date from empsalary; + sum | salary | enroll_date +-------+--------+------------- + 34900 | 5000 | 10-01-2006 + 34900 | 6000 | 10-01-2006 + 38400 | 3900 | 12-23-2006 + 47100 | 4800 | 08-01-2007 + 47100 | 5200 | 08-01-2007 + 47100 | 4800 | 08-08-2007 + 47100 | 5200 | 08-15-2007 + 36100 | 3500 | 12-10-2007 + 32200 | 4500 | 01-01-2008 + 32200 | 4200 | 01-01-2008 +(10 rows) + +select sum(salary) over (order by enroll_date desc range between '1 year'::interval preceding and '1 year'::interval following), + salary, enroll_date from empsalary; + sum | salary | enroll_date +-------+--------+------------- + 32200 | 4200 | 01-01-2008 + 32200 | 4500 | 01-01-2008 + 36100 | 3500 | 12-10-2007 + 47100 | 5200 | 08-15-2007 + 47100 | 4800 | 08-08-2007 + 47100 | 4800 | 08-01-2007 + 47100 | 5200 | 08-01-2007 + 38400 | 3900 | 12-23-2006 + 34900 | 5000 | 10-01-2006 + 34900 | 6000 | 10-01-2006 +(10 rows) + +select sum(salary) over (order by enroll_date desc range between '1 year'::interval following and '1 year'::interval following), + salary, enroll_date from empsalary; + sum | salary | enroll_date +-----+--------+------------- + | 4200 | 01-01-2008 + | 4500 | 01-01-2008 + | 3500 | 12-10-2007 + | 5200 | 08-15-2007 + | 4800 | 08-08-2007 + | 4800 | 08-01-2007 + | 5200 | 08-01-2007 + | 3900 | 12-23-2006 + | 5000 | 10-01-2006 + | 6000 | 10-01-2006 +(10 rows) + +select sum(salary) over (order by enroll_date range between '1 year'::interval preceding and '1 year'::interval following + exclude current row), salary, enroll_date from empsalary; + sum | salary | enroll_date +-------+--------+------------- + 29900 | 5000 | 10-01-2006 + 28900 | 6000 | 10-01-2006 + 34500 | 3900 | 12-23-2006 + 42300 | 4800 | 08-01-2007 + 41900 | 5200 | 08-01-2007 + 42300 | 4800 | 08-08-2007 + 41900 | 5200 | 08-15-2007 + 32600 | 3500 | 12-10-2007 + 27700 | 4500 | 01-01-2008 + 28000 | 4200 | 01-01-2008 +(10 rows) + +select sum(salary) over (order by enroll_date range between '1 year'::interval preceding and '1 year'::interval following + exclude group), salary, enroll_date from empsalary; + sum | salary | enroll_date +-------+--------+------------- + 23900 | 5000 | 10-01-2006 + 23900 | 6000 | 10-01-2006 + 34500 | 3900 | 12-23-2006 + 37100 | 4800 | 08-01-2007 + 37100 | 5200 | 08-01-2007 + 42300 | 4800 | 08-08-2007 + 41900 | 5200 | 08-15-2007 + 32600 | 3500 | 12-10-2007 + 23500 | 4500 | 01-01-2008 + 23500 | 4200 | 01-01-2008 +(10 rows) + +select sum(salary) over (order by enroll_date range between '1 year'::interval preceding and '1 year'::interval following + exclude ties), salary, enroll_date from empsalary; + sum | salary | enroll_date +-------+--------+------------- + 28900 | 5000 | 10-01-2006 + 29900 | 6000 | 10-01-2006 + 38400 | 3900 | 12-23-2006 + 41900 | 4800 | 08-01-2007 + 42300 | 5200 | 08-01-2007 + 47100 | 4800 | 08-08-2007 + 47100 | 5200 | 08-15-2007 + 36100 | 3500 | 12-10-2007 + 28000 | 4500 | 01-01-2008 + 27700 | 4200 | 01-01-2008 +(10 rows) + +select first_value(salary) over(order by salary range between 1000 preceding and 1000 following), + lead(salary) over(order by salary range between 1000 preceding and 1000 following), + nth_value(salary, 1) over(order by salary range between 1000 preceding and 1000 following), + salary from empsalary; + first_value | lead | nth_value | salary +-------------+------+-----------+-------- + 3500 | 3900 | 3500 | 3500 + 3500 | 4200 | 3500 | 3900 + 3500 | 4500 | 3500 | 4200 + 3500 | 4800 | 3500 | 4500 + 3900 | 4800 | 3900 | 4800 + 3900 | 5000 | 3900 | 4800 + 4200 | 5200 | 4200 | 5000 + 4200 | 5200 | 4200 | 5200 + 4200 | 6000 | 4200 | 5200 + 5000 | | 5000 | 6000 +(10 rows) + +select last_value(salary) over(order by salary range between 1000 preceding and 1000 following), + lag(salary) over(order by salary range between 1000 preceding and 1000 following), + salary from empsalary; + last_value | lag | salary +------------+------+-------- + 4500 | | 3500 + 4800 | 3500 | 3900 + 5200 | 3900 | 4200 + 5200 | 4200 | 4500 + 5200 | 4500 | 4800 + 5200 | 4800 | 4800 + 6000 | 4800 | 5000 + 6000 | 5000 | 5200 + 6000 | 5200 | 5200 + 6000 | 5200 | 6000 +(10 rows) + +select first_value(salary) over(order by salary range between 1000 following and 3000 following + exclude current row), + lead(salary) over(order by salary range between 1000 following and 3000 following exclude ties), + nth_value(salary, 1) over(order by salary range between 1000 following and 3000 following + exclude ties), + salary from empsalary; + first_value | lead | nth_value | salary +-------------+------+-----------+-------- + 4500 | 3900 | 4500 | 3500 + 5000 | 4200 | 5000 | 3900 + 5200 | 4500 | 5200 | 4200 + 6000 | 4800 | 6000 | 4500 + 6000 | 4800 | 6000 | 4800 + 6000 | 5000 | 6000 | 4800 + 6000 | 5200 | 6000 | 5000 + | 5200 | | 5200 + | 6000 | | 5200 + | | | 6000 +(10 rows) + +select last_value(salary) over(order by salary range between 1000 following and 3000 following + exclude group), + lag(salary) over(order by salary range between 1000 following and 3000 following exclude group), + salary from empsalary; + last_value | lag | salary +------------+------+-------- + 6000 | | 3500 + 6000 | 3500 | 3900 + 6000 | 3900 | 4200 + 6000 | 4200 | 4500 + 6000 | 4500 | 4800 + 6000 | 4800 | 4800 + 6000 | 4800 | 5000 + | 5000 | 5200 + | 5200 | 5200 + | 5200 | 6000 +(10 rows) + +select first_value(salary) over(order by enroll_date range between unbounded preceding and '1 year'::interval following + exclude ties), + last_value(salary) over(order by enroll_date range between unbounded preceding and '1 year'::interval following), + salary, enroll_date from empsalary; + first_value | last_value | salary | enroll_date +-------------+------------+--------+------------- + 5000 | 5200 | 5000 | 10-01-2006 + 6000 | 5200 | 6000 | 10-01-2006 + 5000 | 3500 | 3900 | 12-23-2006 + 5000 | 4200 | 4800 | 08-01-2007 + 5000 | 4200 | 5200 | 08-01-2007 + 5000 | 4200 | 4800 | 08-08-2007 + 5000 | 4200 | 5200 | 08-15-2007 + 5000 | 4200 | 3500 | 12-10-2007 + 5000 | 4200 | 4500 | 01-01-2008 + 5000 | 4200 | 4200 | 01-01-2008 +(10 rows) + +select first_value(salary) over(order by enroll_date range between unbounded preceding and '1 year'::interval following + exclude ties), + last_value(salary) over(order by enroll_date range between unbounded preceding and '1 year'::interval following + exclude ties), + salary, enroll_date from empsalary; + first_value | last_value | salary | enroll_date +-------------+------------+--------+------------- + 5000 | 5200 | 5000 | 10-01-2006 + 6000 | 5200 | 6000 | 10-01-2006 + 5000 | 3500 | 3900 | 12-23-2006 + 5000 | 4200 | 4800 | 08-01-2007 + 5000 | 4200 | 5200 | 08-01-2007 + 5000 | 4200 | 4800 | 08-08-2007 + 5000 | 4200 | 5200 | 08-15-2007 + 5000 | 4200 | 3500 | 12-10-2007 + 5000 | 4500 | 4500 | 01-01-2008 + 5000 | 4200 | 4200 | 01-01-2008 +(10 rows) + +select first_value(salary) over(order by enroll_date range between unbounded preceding and '1 year'::interval following + exclude group), + last_value(salary) over(order by enroll_date range between unbounded preceding and '1 year'::interval following + exclude group), + salary, enroll_date from empsalary; + first_value | last_value | salary | enroll_date +-------------+------------+--------+------------- + 3900 | 5200 | 5000 | 10-01-2006 + 3900 | 5200 | 6000 | 10-01-2006 + 5000 | 3500 | 3900 | 12-23-2006 + 5000 | 4200 | 4800 | 08-01-2007 + 5000 | 4200 | 5200 | 08-01-2007 + 5000 | 4200 | 4800 | 08-08-2007 + 5000 | 4200 | 5200 | 08-15-2007 + 5000 | 4200 | 3500 | 12-10-2007 + 5000 | 3500 | 4500 | 01-01-2008 + 5000 | 3500 | 4200 | 01-01-2008 +(10 rows) + +select first_value(salary) over(order by enroll_date range between unbounded preceding and '1 year'::interval following + exclude current row), + last_value(salary) over(order by enroll_date range between unbounded preceding and '1 year'::interval following + exclude current row), + salary, enroll_date from empsalary; + first_value | last_value | salary | enroll_date +-------------+------------+--------+------------- + 6000 | 5200 | 5000 | 10-01-2006 + 5000 | 5200 | 6000 | 10-01-2006 + 5000 | 3500 | 3900 | 12-23-2006 + 5000 | 4200 | 4800 | 08-01-2007 + 5000 | 4200 | 5200 | 08-01-2007 + 5000 | 4200 | 4800 | 08-08-2007 + 5000 | 4200 | 5200 | 08-15-2007 + 5000 | 4200 | 3500 | 12-10-2007 + 5000 | 4200 | 4500 | 01-01-2008 + 5000 | 4500 | 4200 | 01-01-2008 +(10 rows) + +-- RANGE offset PRECEDING/FOLLOWING with null values +select x, y, + first_value(y) over w, + last_value(y) over w +from + (select x, x as y from generate_series(1,5) as x + union all select null, 42 + union all select null, 43) ss +window w as + (order by x asc nulls first range between 2 preceding and 2 following); + x | y | first_value | last_value +---+----+-------------+------------ + | 42 | 42 | 43 + | 43 | 42 | 43 + 1 | 1 | 1 | 3 + 2 | 2 | 1 | 4 + 3 | 3 | 1 | 5 + 4 | 4 | 2 | 5 + 5 | 5 | 3 | 5 +(7 rows) + +select x, y, + first_value(y) over w, + last_value(y) over w +from + (select x, x as y from generate_series(1,5) as x + union all select null, 42 + union all select null, 43) ss +window w as + (order by x asc nulls last range between 2 preceding and 2 following); + x | y | first_value | last_value +---+----+-------------+------------ + 1 | 1 | 1 | 3 + 2 | 2 | 1 | 4 + 3 | 3 | 1 | 5 + 4 | 4 | 2 | 5 + 5 | 5 | 3 | 5 + | 42 | 42 | 43 + | 43 | 42 | 43 +(7 rows) + +select x, y, + first_value(y) over w, + last_value(y) over w +from + (select x, x as y from generate_series(1,5) as x + union all select null, 42 + union all select null, 43) ss +window w as + (order by x desc nulls first range between 2 preceding and 2 following); + x | y | first_value | last_value +---+----+-------------+------------ + | 43 | 43 | 42 + | 42 | 43 | 42 + 5 | 5 | 5 | 3 + 4 | 4 | 5 | 2 + 3 | 3 | 5 | 1 + 2 | 2 | 4 | 1 + 1 | 1 | 3 | 1 +(7 rows) + +select x, y, + first_value(y) over w, + last_value(y) over w +from + (select x, x as y from generate_series(1,5) as x + union all select null, 42 + union all select null, 43) ss +window w as + (order by x desc nulls last range between 2 preceding and 2 following); + x | y | first_value | last_value +---+----+-------------+------------ + 5 | 5 | 5 | 3 + 4 | 4 | 5 | 2 + 3 | 3 | 5 | 1 + 2 | 2 | 4 | 1 + 1 | 1 | 3 | 1 + | 42 | 42 | 43 + | 43 | 42 | 43 +(7 rows) + +-- There is a syntactic ambiguity in the SQL standard. Since +-- UNBOUNDED is a non-reserved word, it could be the name of a +-- function parameter and be used as an expression. There is a +-- grammar hack to resolve such cases as the keyword. The following +-- tests record this behavior. +CREATE FUNCTION unbounded_syntax_test1a(x int) RETURNS TABLE (a int, b int, c int) +LANGUAGE SQL +BEGIN ATOMIC + SELECT sum(unique1) over (rows between x preceding and x following), + unique1, four + FROM tenk1 WHERE unique1 < 10; +END; +CREATE FUNCTION unbounded_syntax_test1b(x int) RETURNS TABLE (a int, b int, c int) +LANGUAGE SQL +AS $$ + SELECT sum(unique1) over (rows between x preceding and x following), + unique1, four + FROM tenk1 WHERE unique1 < 10; +$$; +-- These will apply the argument to the window specification inside the function. +SELECT * FROM unbounded_syntax_test1a(2); + a | b | c +----+---+--- + 7 | 4 | 0 + 13 | 2 | 2 + 22 | 1 | 1 + 26 | 6 | 2 + 29 | 9 | 1 + 31 | 8 | 0 + 32 | 5 | 1 + 23 | 3 | 3 + 15 | 7 | 3 + 10 | 0 | 0 +(10 rows) + +SELECT * FROM unbounded_syntax_test1b(2); + a | b | c +----+---+--- + 7 | 4 | 0 + 13 | 2 | 2 + 22 | 1 | 1 + 26 | 6 | 2 + 29 | 9 | 1 + 31 | 8 | 0 + 32 | 5 | 1 + 23 | 3 | 3 + 15 | 7 | 3 + 10 | 0 | 0 +(10 rows) + +CREATE FUNCTION unbounded_syntax_test2a(unbounded int) RETURNS TABLE (a int, b int, c int) +LANGUAGE SQL +BEGIN ATOMIC + SELECT sum(unique1) over (rows between unbounded preceding and unbounded following), + unique1, four + FROM tenk1 WHERE unique1 < 10; +END; +CREATE FUNCTION unbounded_syntax_test2b(unbounded int) RETURNS TABLE (a int, b int, c int) +LANGUAGE SQL +AS $$ + SELECT sum(unique1) over (rows between unbounded preceding and unbounded following), + unique1, four + FROM tenk1 WHERE unique1 < 10; +$$; +-- These will not apply the argument but instead treat UNBOUNDED as a keyword. +SELECT * FROM unbounded_syntax_test2a(2); + a | b | c +----+---+--- + 45 | 4 | 0 + 45 | 2 | 2 + 45 | 1 | 1 + 45 | 6 | 2 + 45 | 9 | 1 + 45 | 8 | 0 + 45 | 5 | 1 + 45 | 3 | 3 + 45 | 7 | 3 + 45 | 0 | 0 +(10 rows) + +SELECT * FROM unbounded_syntax_test2b(2); + a | b | c +----+---+--- + 45 | 4 | 0 + 45 | 2 | 2 + 45 | 1 | 1 + 45 | 6 | 2 + 45 | 9 | 1 + 45 | 8 | 0 + 45 | 5 | 1 + 45 | 3 | 3 + 45 | 7 | 3 + 45 | 0 | 0 +(10 rows) + +DROP FUNCTION unbounded_syntax_test1a, unbounded_syntax_test1b, + unbounded_syntax_test2a, unbounded_syntax_test2b; +-- Other tests with token UNBOUNDED in potentially problematic position +CREATE FUNCTION unbounded(x int) RETURNS int LANGUAGE SQL IMMUTABLE RETURN x; +SELECT sum(unique1) over (rows between 1 preceding and 1 following), + unique1, four +FROM tenk1 WHERE unique1 < 10; + sum | unique1 | four +-----+---------+------ + 6 | 4 | 0 + 7 | 2 | 2 + 9 | 1 | 1 + 16 | 6 | 2 + 23 | 9 | 1 + 22 | 8 | 0 + 16 | 5 | 1 + 15 | 3 | 3 + 10 | 7 | 3 + 7 | 0 | 0 +(10 rows) + +SELECT sum(unique1) over (rows between unbounded(1) preceding and unbounded(1) following), + unique1, four +FROM tenk1 WHERE unique1 < 10; + sum | unique1 | four +-----+---------+------ + 6 | 4 | 0 + 7 | 2 | 2 + 9 | 1 | 1 + 16 | 6 | 2 + 23 | 9 | 1 + 22 | 8 | 0 + 16 | 5 | 1 + 15 | 3 | 3 + 10 | 7 | 3 + 7 | 0 | 0 +(10 rows) + +SELECT sum(unique1) over (rows between unbounded.x preceding and unbounded.x following), + unique1, four +FROM tenk1, (values (1)) as unbounded(x) WHERE unique1 < 10; +ERROR: argument of ROWS must not contain variables +LINE 1: SELECT sum(unique1) over (rows between unbounded.x preceding... + ^ +DROP FUNCTION unbounded; +-- Check overflow behavior for various integer sizes +select x, last_value(x) over (order by x::smallint range between current row and 2147450884 following) +from generate_series(32764, 32766) x; + x | last_value +-------+------------ + 32764 | 32766 + 32765 | 32766 + 32766 | 32766 +(3 rows) + +select x, last_value(x) over (order by x::smallint desc range between current row and 2147450885 following) +from generate_series(-32766, -32764) x; + x | last_value +--------+------------ + -32764 | -32766 + -32765 | -32766 + -32766 | -32766 +(3 rows) + +select x, last_value(x) over (order by x range between current row and 4 following) +from generate_series(2147483644, 2147483646) x; + x | last_value +------------+------------ + 2147483644 | 2147483646 + 2147483645 | 2147483646 + 2147483646 | 2147483646 +(3 rows) + +select x, last_value(x) over (order by x desc range between current row and 5 following) +from generate_series(-2147483646, -2147483644) x; + x | last_value +-------------+------------- + -2147483644 | -2147483646 + -2147483645 | -2147483646 + -2147483646 | -2147483646 +(3 rows) + +select x, last_value(x) over (order by x range between current row and 4 following) +from generate_series(9223372036854775804, 9223372036854775806) x; + x | last_value +---------------------+--------------------- + 9223372036854775804 | 9223372036854775806 + 9223372036854775805 | 9223372036854775806 + 9223372036854775806 | 9223372036854775806 +(3 rows) + +select x, last_value(x) over (order by x desc range between current row and 5 following) +from generate_series(-9223372036854775806, -9223372036854775804) x; + x | last_value +----------------------+---------------------- + -9223372036854775804 | -9223372036854775806 + -9223372036854775805 | -9223372036854775806 + -9223372036854775806 | -9223372036854775806 +(3 rows) + +-- Test in_range for other numeric datatypes +create temp table numerics( + id int, + f_float4 float4, + f_float8 float8, + f_numeric numeric +); +insert into numerics values +(0, '-infinity', '-infinity', '-infinity'), +(1, -3, -3, -3), +(2, -1, -1, -1), +(3, 0, 0, 0), +(4, 1.1, 1.1, 1.1), +(5, 1.12, 1.12, 1.12), +(6, 2, 2, 2), +(7, 100, 100, 100), +(8, 'infinity', 'infinity', 'infinity'), +(9, 'NaN', 'NaN', 'NaN'); +select id, f_float4, first_value(id) over w, last_value(id) over w +from numerics +window w as (order by f_float4 range between + 1 preceding and 1 following); + id | f_float4 | first_value | last_value +----+-----------+-------------+------------ + 0 | -Infinity | 0 | 0 + 1 | -3 | 1 | 1 + 2 | -1 | 2 | 3 + 3 | 0 | 2 | 3 + 4 | 1.1 | 4 | 6 + 5 | 1.12 | 4 | 6 + 6 | 2 | 4 | 6 + 7 | 100 | 7 | 7 + 8 | Infinity | 8 | 8 + 9 | NaN | 9 | 9 +(10 rows) + +select id, f_float4, first_value(id) over w, last_value(id) over w +from numerics +window w as (order by f_float4 range between + 1 preceding and 1.1::float4 following); + id | f_float4 | first_value | last_value +----+-----------+-------------+------------ + 0 | -Infinity | 0 | 0 + 1 | -3 | 1 | 1 + 2 | -1 | 2 | 3 + 3 | 0 | 2 | 4 + 4 | 1.1 | 4 | 6 + 5 | 1.12 | 4 | 6 + 6 | 2 | 4 | 6 + 7 | 100 | 7 | 7 + 8 | Infinity | 8 | 8 + 9 | NaN | 9 | 9 +(10 rows) + +select id, f_float4, first_value(id) over w, last_value(id) over w +from numerics +window w as (order by f_float4 range between + 'inf' preceding and 'inf' following); + id | f_float4 | first_value | last_value +----+-----------+-------------+------------ + 0 | -Infinity | 0 | 8 + 1 | -3 | 0 | 8 + 2 | -1 | 0 | 8 + 3 | 0 | 0 | 8 + 4 | 1.1 | 0 | 8 + 5 | 1.12 | 0 | 8 + 6 | 2 | 0 | 8 + 7 | 100 | 0 | 8 + 8 | Infinity | 0 | 8 + 9 | NaN | 9 | 9 +(10 rows) + +select id, f_float4, first_value(id) over w, last_value(id) over w +from numerics +window w as (order by f_float4 range between + 'inf' preceding and 'inf' preceding); + id | f_float4 | first_value | last_value +----+-----------+-------------+------------ + 0 | -Infinity | 0 | 0 + 1 | -3 | 0 | 0 + 2 | -1 | 0 | 0 + 3 | 0 | 0 | 0 + 4 | 1.1 | 0 | 0 + 5 | 1.12 | 0 | 0 + 6 | 2 | 0 | 0 + 7 | 100 | 0 | 0 + 8 | Infinity | 0 | 8 + 9 | NaN | 9 | 9 +(10 rows) + +select id, f_float4, first_value(id) over w, last_value(id) over w +from numerics +window w as (order by f_float4 range between + 'inf' following and 'inf' following); + id | f_float4 | first_value | last_value +----+-----------+-------------+------------ + 0 | -Infinity | 0 | 8 + 1 | -3 | 8 | 8 + 2 | -1 | 8 | 8 + 3 | 0 | 8 | 8 + 4 | 1.1 | 8 | 8 + 5 | 1.12 | 8 | 8 + 6 | 2 | 8 | 8 + 7 | 100 | 8 | 8 + 8 | Infinity | 8 | 8 + 9 | NaN | 9 | 9 +(10 rows) + +select id, f_float4, first_value(id) over w, last_value(id) over w +from numerics +window w as (order by f_float4 range between + 1.1 preceding and 'NaN' following); -- error, NaN disallowed +ERROR: invalid preceding or following size in window function +select id, f_float8, first_value(id) over w, last_value(id) over w +from numerics +window w as (order by f_float8 range between + 1 preceding and 1 following); + id | f_float8 | first_value | last_value +----+-----------+-------------+------------ + 0 | -Infinity | 0 | 0 + 1 | -3 | 1 | 1 + 2 | -1 | 2 | 3 + 3 | 0 | 2 | 3 + 4 | 1.1 | 4 | 6 + 5 | 1.12 | 4 | 6 + 6 | 2 | 4 | 6 + 7 | 100 | 7 | 7 + 8 | Infinity | 8 | 8 + 9 | NaN | 9 | 9 +(10 rows) + +select id, f_float8, first_value(id) over w, last_value(id) over w +from numerics +window w as (order by f_float8 range between + 1 preceding and 1.1::float8 following); + id | f_float8 | first_value | last_value +----+-----------+-------------+------------ + 0 | -Infinity | 0 | 0 + 1 | -3 | 1 | 1 + 2 | -1 | 2 | 3 + 3 | 0 | 2 | 4 + 4 | 1.1 | 4 | 6 + 5 | 1.12 | 4 | 6 + 6 | 2 | 4 | 6 + 7 | 100 | 7 | 7 + 8 | Infinity | 8 | 8 + 9 | NaN | 9 | 9 +(10 rows) + +select id, f_float8, first_value(id) over w, last_value(id) over w +from numerics +window w as (order by f_float8 range between + 'inf' preceding and 'inf' following); + id | f_float8 | first_value | last_value +----+-----------+-------------+------------ + 0 | -Infinity | 0 | 8 + 1 | -3 | 0 | 8 + 2 | -1 | 0 | 8 + 3 | 0 | 0 | 8 + 4 | 1.1 | 0 | 8 + 5 | 1.12 | 0 | 8 + 6 | 2 | 0 | 8 + 7 | 100 | 0 | 8 + 8 | Infinity | 0 | 8 + 9 | NaN | 9 | 9 +(10 rows) + +select id, f_float8, first_value(id) over w, last_value(id) over w +from numerics +window w as (order by f_float8 range between + 'inf' preceding and 'inf' preceding); + id | f_float8 | first_value | last_value +----+-----------+-------------+------------ + 0 | -Infinity | 0 | 0 + 1 | -3 | 0 | 0 + 2 | -1 | 0 | 0 + 3 | 0 | 0 | 0 + 4 | 1.1 | 0 | 0 + 5 | 1.12 | 0 | 0 + 6 | 2 | 0 | 0 + 7 | 100 | 0 | 0 + 8 | Infinity | 0 | 8 + 9 | NaN | 9 | 9 +(10 rows) + +select id, f_float8, first_value(id) over w, last_value(id) over w +from numerics +window w as (order by f_float8 range between + 'inf' following and 'inf' following); + id | f_float8 | first_value | last_value +----+-----------+-------------+------------ + 0 | -Infinity | 0 | 8 + 1 | -3 | 8 | 8 + 2 | -1 | 8 | 8 + 3 | 0 | 8 | 8 + 4 | 1.1 | 8 | 8 + 5 | 1.12 | 8 | 8 + 6 | 2 | 8 | 8 + 7 | 100 | 8 | 8 + 8 | Infinity | 8 | 8 + 9 | NaN | 9 | 9 +(10 rows) + +select id, f_float8, first_value(id) over w, last_value(id) over w +from numerics +window w as (order by f_float8 range between + 1.1 preceding and 'NaN' following); -- error, NaN disallowed +ERROR: invalid preceding or following size in window function +select id, f_numeric, first_value(id) over w, last_value(id) over w +from numerics +window w as (order by f_numeric range between + 1 preceding and 1 following); + id | f_numeric | first_value | last_value +----+-----------+-------------+------------ + 0 | -Infinity | 0 | 0 + 1 | -3 | 1 | 1 + 2 | -1 | 2 | 3 + 3 | 0 | 2 | 3 + 4 | 1.1 | 4 | 6 + 5 | 1.12 | 4 | 6 + 6 | 2 | 4 | 6 + 7 | 100 | 7 | 7 + 8 | Infinity | 8 | 8 + 9 | NaN | 9 | 9 +(10 rows) + +select id, f_numeric, first_value(id) over w, last_value(id) over w +from numerics +window w as (order by f_numeric range between + 1 preceding and 1.1::numeric following); + id | f_numeric | first_value | last_value +----+-----------+-------------+------------ + 0 | -Infinity | 0 | 0 + 1 | -3 | 1 | 1 + 2 | -1 | 2 | 3 + 3 | 0 | 2 | 4 + 4 | 1.1 | 4 | 6 + 5 | 1.12 | 4 | 6 + 6 | 2 | 4 | 6 + 7 | 100 | 7 | 7 + 8 | Infinity | 8 | 8 + 9 | NaN | 9 | 9 +(10 rows) + +select id, f_numeric, first_value(id) over w, last_value(id) over w +from numerics +window w as (order by f_numeric range between + 1 preceding and 1.1::float8 following); -- currently unsupported +ERROR: RANGE with offset PRECEDING/FOLLOWING is not supported for column type numeric and offset type double precision +LINE 4: 1 preceding and 1.1::float8 following); + ^ +HINT: Cast the offset value to an appropriate type. +select id, f_numeric, first_value(id) over w, last_value(id) over w +from numerics +window w as (order by f_numeric range between + 'inf' preceding and 'inf' following); + id | f_numeric | first_value | last_value +----+-----------+-------------+------------ + 0 | -Infinity | 0 | 8 + 1 | -3 | 0 | 8 + 2 | -1 | 0 | 8 + 3 | 0 | 0 | 8 + 4 | 1.1 | 0 | 8 + 5 | 1.12 | 0 | 8 + 6 | 2 | 0 | 8 + 7 | 100 | 0 | 8 + 8 | Infinity | 0 | 8 + 9 | NaN | 9 | 9 +(10 rows) + +select id, f_numeric, first_value(id) over w, last_value(id) over w +from numerics +window w as (order by f_numeric range between + 'inf' preceding and 'inf' preceding); + id | f_numeric | first_value | last_value +----+-----------+-------------+------------ + 0 | -Infinity | 0 | 0 + 1 | -3 | 0 | 0 + 2 | -1 | 0 | 0 + 3 | 0 | 0 | 0 + 4 | 1.1 | 0 | 0 + 5 | 1.12 | 0 | 0 + 6 | 2 | 0 | 0 + 7 | 100 | 0 | 0 + 8 | Infinity | 0 | 8 + 9 | NaN | 9 | 9 +(10 rows) + +select id, f_numeric, first_value(id) over w, last_value(id) over w +from numerics +window w as (order by f_numeric range between + 'inf' following and 'inf' following); + id | f_numeric | first_value | last_value +----+-----------+-------------+------------ + 0 | -Infinity | 0 | 8 + 1 | -3 | 8 | 8 + 2 | -1 | 8 | 8 + 3 | 0 | 8 | 8 + 4 | 1.1 | 8 | 8 + 5 | 1.12 | 8 | 8 + 6 | 2 | 8 | 8 + 7 | 100 | 8 | 8 + 8 | Infinity | 8 | 8 + 9 | NaN | 9 | 9 +(10 rows) + +select id, f_numeric, first_value(id) over w, last_value(id) over w +from numerics +window w as (order by f_numeric range between + 1.1 preceding and 'NaN' following); -- error, NaN disallowed +ERROR: invalid preceding or following size in window function +-- Test in_range for other datetime datatypes +create temp table datetimes( + id int, + f_time time, + f_timetz timetz, + f_interval interval, + f_timestamptz timestamptz, + f_timestamp timestamp +); +insert into datetimes values +(1, '11:00', '11:00 BST', '1 year', '2000-10-19 10:23:54+01', '2000-10-19 10:23:54'), +(2, '12:00', '12:00 BST', '2 years', '2001-10-19 10:23:54+01', '2001-10-19 10:23:54'), +(3, '13:00', '13:00 BST', '3 years', '2001-10-19 10:23:54+01', '2001-10-19 10:23:54'), +(4, '14:00', '14:00 BST', '4 years', '2002-10-19 10:23:54+01', '2002-10-19 10:23:54'), +(5, '15:00', '15:00 BST', '5 years', '2003-10-19 10:23:54+01', '2003-10-19 10:23:54'), +(6, '15:00', '15:00 BST', '5 years', '2004-10-19 10:23:54+01', '2004-10-19 10:23:54'), +(7, '17:00', '17:00 BST', '7 years', '2005-10-19 10:23:54+01', '2005-10-19 10:23:54'), +(8, '18:00', '18:00 BST', '8 years', '2006-10-19 10:23:54+01', '2006-10-19 10:23:54'), +(9, '19:00', '19:00 BST', '9 years', '2007-10-19 10:23:54+01', '2007-10-19 10:23:54'), +(10, '20:00', '20:00 BST', '10 years', '2008-10-19 10:23:54+01', '2008-10-19 10:23:54'); +select id, f_time, first_value(id) over w, last_value(id) over w +from datetimes +window w as (order by f_time range between + '70 min'::interval preceding and '2 hours'::interval following); + id | f_time | first_value | last_value +----+----------+-------------+------------ + 1 | 11:00:00 | 1 | 3 + 2 | 12:00:00 | 1 | 4 + 3 | 13:00:00 | 2 | 6 + 4 | 14:00:00 | 3 | 6 + 5 | 15:00:00 | 4 | 7 + 6 | 15:00:00 | 4 | 7 + 7 | 17:00:00 | 7 | 9 + 8 | 18:00:00 | 7 | 10 + 9 | 19:00:00 | 8 | 10 + 10 | 20:00:00 | 9 | 10 +(10 rows) + +select id, f_time, first_value(id) over w, last_value(id) over w +from datetimes +window w as (order by f_time desc range between + '70 min' preceding and '2 hours' following); + id | f_time | first_value | last_value +----+----------+-------------+------------ + 10 | 20:00:00 | 10 | 8 + 9 | 19:00:00 | 10 | 7 + 8 | 18:00:00 | 9 | 7 + 7 | 17:00:00 | 8 | 5 + 6 | 15:00:00 | 6 | 3 + 5 | 15:00:00 | 6 | 3 + 4 | 14:00:00 | 6 | 2 + 3 | 13:00:00 | 4 | 1 + 2 | 12:00:00 | 3 | 1 + 1 | 11:00:00 | 2 | 1 +(10 rows) + +select id, f_timetz, first_value(id) over w, last_value(id) over w +from datetimes +window w as (order by f_timetz range between + '70 min'::interval preceding and '2 hours'::interval following); + id | f_timetz | first_value | last_value +----+-------------+-------------+------------ + 1 | 11:00:00+01 | 1 | 3 + 2 | 12:00:00+01 | 1 | 4 + 3 | 13:00:00+01 | 2 | 6 + 4 | 14:00:00+01 | 3 | 6 + 5 | 15:00:00+01 | 4 | 7 + 6 | 15:00:00+01 | 4 | 7 + 7 | 17:00:00+01 | 7 | 9 + 8 | 18:00:00+01 | 7 | 10 + 9 | 19:00:00+01 | 8 | 10 + 10 | 20:00:00+01 | 9 | 10 +(10 rows) + +select id, f_timetz, first_value(id) over w, last_value(id) over w +from datetimes +window w as (order by f_timetz desc range between + '70 min' preceding and '2 hours' following); + id | f_timetz | first_value | last_value +----+-------------+-------------+------------ + 10 | 20:00:00+01 | 10 | 8 + 9 | 19:00:00+01 | 10 | 7 + 8 | 18:00:00+01 | 9 | 7 + 7 | 17:00:00+01 | 8 | 5 + 6 | 15:00:00+01 | 6 | 3 + 5 | 15:00:00+01 | 6 | 3 + 4 | 14:00:00+01 | 6 | 2 + 3 | 13:00:00+01 | 4 | 1 + 2 | 12:00:00+01 | 3 | 1 + 1 | 11:00:00+01 | 2 | 1 +(10 rows) + +select id, f_interval, first_value(id) over w, last_value(id) over w +from datetimes +window w as (order by f_interval range between + '1 year'::interval preceding and '1 year'::interval following); + id | f_interval | first_value | last_value +----+------------+-------------+------------ + 1 | @ 1 year | 1 | 2 + 2 | @ 2 years | 1 | 3 + 3 | @ 3 years | 2 | 4 + 4 | @ 4 years | 3 | 6 + 5 | @ 5 years | 4 | 6 + 6 | @ 5 years | 4 | 6 + 7 | @ 7 years | 7 | 8 + 8 | @ 8 years | 7 | 9 + 9 | @ 9 years | 8 | 10 + 10 | @ 10 years | 9 | 10 +(10 rows) + +select id, f_interval, first_value(id) over w, last_value(id) over w +from datetimes +window w as (order by f_interval desc range between + '1 year' preceding and '1 year' following); + id | f_interval | first_value | last_value +----+------------+-------------+------------ + 10 | @ 10 years | 10 | 9 + 9 | @ 9 years | 10 | 8 + 8 | @ 8 years | 9 | 7 + 7 | @ 7 years | 8 | 7 + 6 | @ 5 years | 6 | 4 + 5 | @ 5 years | 6 | 4 + 4 | @ 4 years | 6 | 3 + 3 | @ 3 years | 4 | 2 + 2 | @ 2 years | 3 | 1 + 1 | @ 1 year | 2 | 1 +(10 rows) + +select id, f_timestamptz, first_value(id) over w, last_value(id) over w +from datetimes +window w as (order by f_timestamptz range between + '1 year'::interval preceding and '1 year'::interval following); + id | f_timestamptz | first_value | last_value +----+------------------------------+-------------+------------ + 1 | Thu Oct 19 02:23:54 2000 PDT | 1 | 3 + 2 | Fri Oct 19 02:23:54 2001 PDT | 1 | 4 + 3 | Fri Oct 19 02:23:54 2001 PDT | 1 | 4 + 4 | Sat Oct 19 02:23:54 2002 PDT | 2 | 5 + 5 | Sun Oct 19 02:23:54 2003 PDT | 4 | 6 + 6 | Tue Oct 19 02:23:54 2004 PDT | 5 | 7 + 7 | Wed Oct 19 02:23:54 2005 PDT | 6 | 8 + 8 | Thu Oct 19 02:23:54 2006 PDT | 7 | 9 + 9 | Fri Oct 19 02:23:54 2007 PDT | 8 | 10 + 10 | Sun Oct 19 02:23:54 2008 PDT | 9 | 10 +(10 rows) + +select id, f_timestamptz, first_value(id) over w, last_value(id) over w +from datetimes +window w as (order by f_timestamptz desc range between + '1 year' preceding and '1 year' following); + id | f_timestamptz | first_value | last_value +----+------------------------------+-------------+------------ + 10 | Sun Oct 19 02:23:54 2008 PDT | 10 | 9 + 9 | Fri Oct 19 02:23:54 2007 PDT | 10 | 8 + 8 | Thu Oct 19 02:23:54 2006 PDT | 9 | 7 + 7 | Wed Oct 19 02:23:54 2005 PDT | 8 | 6 + 6 | Tue Oct 19 02:23:54 2004 PDT | 7 | 5 + 5 | Sun Oct 19 02:23:54 2003 PDT | 6 | 4 + 4 | Sat Oct 19 02:23:54 2002 PDT | 5 | 2 + 3 | Fri Oct 19 02:23:54 2001 PDT | 4 | 1 + 2 | Fri Oct 19 02:23:54 2001 PDT | 4 | 1 + 1 | Thu Oct 19 02:23:54 2000 PDT | 3 | 1 +(10 rows) + +select id, f_timestamp, first_value(id) over w, last_value(id) over w +from datetimes +window w as (order by f_timestamp range between + '1 year'::interval preceding and '1 year'::interval following); + id | f_timestamp | first_value | last_value +----+--------------------------+-------------+------------ + 1 | Thu Oct 19 10:23:54 2000 | 1 | 3 + 2 | Fri Oct 19 10:23:54 2001 | 1 | 4 + 3 | Fri Oct 19 10:23:54 2001 | 1 | 4 + 4 | Sat Oct 19 10:23:54 2002 | 2 | 5 + 5 | Sun Oct 19 10:23:54 2003 | 4 | 6 + 6 | Tue Oct 19 10:23:54 2004 | 5 | 7 + 7 | Wed Oct 19 10:23:54 2005 | 6 | 8 + 8 | Thu Oct 19 10:23:54 2006 | 7 | 9 + 9 | Fri Oct 19 10:23:54 2007 | 8 | 10 + 10 | Sun Oct 19 10:23:54 2008 | 9 | 10 +(10 rows) + +select id, f_timestamp, first_value(id) over w, last_value(id) over w +from datetimes +window w as (order by f_timestamp desc range between + '1 year' preceding and '1 year' following); + id | f_timestamp | first_value | last_value +----+--------------------------+-------------+------------ + 10 | Sun Oct 19 10:23:54 2008 | 10 | 9 + 9 | Fri Oct 19 10:23:54 2007 | 10 | 8 + 8 | Thu Oct 19 10:23:54 2006 | 9 | 7 + 7 | Wed Oct 19 10:23:54 2005 | 8 | 6 + 6 | Tue Oct 19 10:23:54 2004 | 7 | 5 + 5 | Sun Oct 19 10:23:54 2003 | 6 | 4 + 4 | Sat Oct 19 10:23:54 2002 | 5 | 2 + 3 | Fri Oct 19 10:23:54 2001 | 4 | 1 + 2 | Fri Oct 19 10:23:54 2001 | 4 | 1 + 1 | Thu Oct 19 10:23:54 2000 | 3 | 1 +(10 rows) + +-- RANGE offset PRECEDING/FOLLOWING error cases +select sum(salary) over (order by enroll_date, salary range between '1 year'::interval preceding and '2 years'::interval following + exclude ties), salary, enroll_date from empsalary; +ERROR: RANGE with offset PRECEDING/FOLLOWING requires exactly one ORDER BY column +LINE 1: select sum(salary) over (order by enroll_date, salary range ... + ^ +select sum(salary) over (range between '1 year'::interval preceding and '2 years'::interval following + exclude ties), salary, enroll_date from empsalary; +ERROR: RANGE with offset PRECEDING/FOLLOWING requires exactly one ORDER BY column +LINE 1: select sum(salary) over (range between '1 year'::interval pr... + ^ +select sum(salary) over (order by depname range between '1 year'::interval preceding and '2 years'::interval following + exclude ties), salary, enroll_date from empsalary; +ERROR: RANGE with offset PRECEDING/FOLLOWING is not supported for column type text +LINE 1: ... sum(salary) over (order by depname range between '1 year'::... + ^ +select max(enroll_date) over (order by enroll_date range between 1 preceding and 2 following + exclude ties), salary, enroll_date from empsalary; +ERROR: RANGE with offset PRECEDING/FOLLOWING is not supported for column type date and offset type integer +LINE 1: ...ll_date) over (order by enroll_date range between 1 precedin... + ^ +HINT: Cast the offset value to an appropriate type. +select max(enroll_date) over (order by salary range between -1 preceding and 2 following + exclude ties), salary, enroll_date from empsalary; +ERROR: invalid preceding or following size in window function +select max(enroll_date) over (order by salary range between 1 preceding and -2 following + exclude ties), salary, enroll_date from empsalary; +ERROR: invalid preceding or following size in window function +select max(enroll_date) over (order by salary range between '1 year'::interval preceding and '2 years'::interval following + exclude ties), salary, enroll_date from empsalary; +ERROR: RANGE with offset PRECEDING/FOLLOWING is not supported for column type integer and offset type interval +LINE 1: ...(enroll_date) over (order by salary range between '1 year'::... + ^ +HINT: Cast the offset value to an appropriate type. +select max(enroll_date) over (order by enroll_date range between '1 year'::interval preceding and '-2 years'::interval following + exclude ties), salary, enroll_date from empsalary; +ERROR: invalid preceding or following size in window function +-- GROUPS tests +SELECT sum(unique1) over (order by four groups between unbounded preceding and current row), + unique1, four +FROM tenk1 WHERE unique1 < 10; + sum | unique1 | four +-----+---------+------ + 12 | 0 | 0 + 12 | 8 | 0 + 12 | 4 | 0 + 27 | 5 | 1 + 27 | 9 | 1 + 27 | 1 | 1 + 35 | 6 | 2 + 35 | 2 | 2 + 45 | 3 | 3 + 45 | 7 | 3 +(10 rows) + +SELECT sum(unique1) over (order by four groups between unbounded preceding and unbounded following), + unique1, four +FROM tenk1 WHERE unique1 < 10; + sum | unique1 | four +-----+---------+------ + 45 | 0 | 0 + 45 | 8 | 0 + 45 | 4 | 0 + 45 | 5 | 1 + 45 | 9 | 1 + 45 | 1 | 1 + 45 | 6 | 2 + 45 | 2 | 2 + 45 | 3 | 3 + 45 | 7 | 3 +(10 rows) + +SELECT sum(unique1) over (order by four groups between current row and unbounded following), + unique1, four +FROM tenk1 WHERE unique1 < 10; + sum | unique1 | four +-----+---------+------ + 45 | 0 | 0 + 45 | 8 | 0 + 45 | 4 | 0 + 33 | 5 | 1 + 33 | 9 | 1 + 33 | 1 | 1 + 18 | 6 | 2 + 18 | 2 | 2 + 10 | 3 | 3 + 10 | 7 | 3 +(10 rows) + +SELECT sum(unique1) over (order by four groups between 1 preceding and unbounded following), + unique1, four +FROM tenk1 WHERE unique1 < 10; + sum | unique1 | four +-----+---------+------ + 45 | 0 | 0 + 45 | 8 | 0 + 45 | 4 | 0 + 45 | 5 | 1 + 45 | 9 | 1 + 45 | 1 | 1 + 33 | 6 | 2 + 33 | 2 | 2 + 18 | 3 | 3 + 18 | 7 | 3 +(10 rows) + +SELECT sum(unique1) over (order by four groups between 1 following and unbounded following), + unique1, four +FROM tenk1 WHERE unique1 < 10; + sum | unique1 | four +-----+---------+------ + 33 | 0 | 0 + 33 | 8 | 0 + 33 | 4 | 0 + 18 | 5 | 1 + 18 | 9 | 1 + 18 | 1 | 1 + 10 | 6 | 2 + 10 | 2 | 2 + | 3 | 3 + | 7 | 3 +(10 rows) + +SELECT sum(unique1) over (order by four groups between unbounded preceding and 2 following), + unique1, four +FROM tenk1 WHERE unique1 < 10; + sum | unique1 | four +-----+---------+------ + 35 | 0 | 0 + 35 | 8 | 0 + 35 | 4 | 0 + 45 | 5 | 1 + 45 | 9 | 1 + 45 | 1 | 1 + 45 | 6 | 2 + 45 | 2 | 2 + 45 | 3 | 3 + 45 | 7 | 3 +(10 rows) + +SELECT sum(unique1) over (order by four groups between 2 preceding and 1 preceding), + unique1, four +FROM tenk1 WHERE unique1 < 10; + sum | unique1 | four +-----+---------+------ + | 0 | 0 + | 8 | 0 + | 4 | 0 + 12 | 5 | 1 + 12 | 9 | 1 + 12 | 1 | 1 + 27 | 6 | 2 + 27 | 2 | 2 + 23 | 3 | 3 + 23 | 7 | 3 +(10 rows) + +SELECT sum(unique1) over (order by four groups between 2 preceding and 1 following), + unique1, four +FROM tenk1 WHERE unique1 < 10; + sum | unique1 | four +-----+---------+------ + 27 | 0 | 0 + 27 | 8 | 0 + 27 | 4 | 0 + 35 | 5 | 1 + 35 | 9 | 1 + 35 | 1 | 1 + 45 | 6 | 2 + 45 | 2 | 2 + 33 | 3 | 3 + 33 | 7 | 3 +(10 rows) + +SELECT sum(unique1) over (order by four groups between 0 preceding and 0 following), + unique1, four +FROM tenk1 WHERE unique1 < 10; + sum | unique1 | four +-----+---------+------ + 12 | 0 | 0 + 12 | 8 | 0 + 12 | 4 | 0 + 15 | 5 | 1 + 15 | 9 | 1 + 15 | 1 | 1 + 8 | 6 | 2 + 8 | 2 | 2 + 10 | 3 | 3 + 10 | 7 | 3 +(10 rows) + +SELECT sum(unique1) over (order by four groups between 2 preceding and 1 following + exclude current row), unique1, four +FROM tenk1 WHERE unique1 < 10; + sum | unique1 | four +-----+---------+------ + 27 | 0 | 0 + 19 | 8 | 0 + 23 | 4 | 0 + 30 | 5 | 1 + 26 | 9 | 1 + 34 | 1 | 1 + 39 | 6 | 2 + 43 | 2 | 2 + 30 | 3 | 3 + 26 | 7 | 3 +(10 rows) + +SELECT sum(unique1) over (order by four groups between 2 preceding and 1 following + exclude group), unique1, four +FROM tenk1 WHERE unique1 < 10; + sum | unique1 | four +-----+---------+------ + 15 | 0 | 0 + 15 | 8 | 0 + 15 | 4 | 0 + 20 | 5 | 1 + 20 | 9 | 1 + 20 | 1 | 1 + 37 | 6 | 2 + 37 | 2 | 2 + 23 | 3 | 3 + 23 | 7 | 3 +(10 rows) + +SELECT sum(unique1) over (order by four groups between 2 preceding and 1 following + exclude ties), unique1, four +FROM tenk1 WHERE unique1 < 10; + sum | unique1 | four +-----+---------+------ + 15 | 0 | 0 + 23 | 8 | 0 + 19 | 4 | 0 + 25 | 5 | 1 + 29 | 9 | 1 + 21 | 1 | 1 + 43 | 6 | 2 + 39 | 2 | 2 + 26 | 3 | 3 + 30 | 7 | 3 +(10 rows) + +SELECT sum(unique1) over (partition by ten + order by four groups between 0 preceding and 0 following),unique1, four, ten +FROM tenk1 WHERE unique1 < 10; + sum | unique1 | four | ten +-----+---------+------+----- + 0 | 0 | 0 | 0 + 1 | 1 | 1 | 1 + 2 | 2 | 2 | 2 + 3 | 3 | 3 | 3 + 4 | 4 | 0 | 4 + 5 | 5 | 1 | 5 + 6 | 6 | 2 | 6 + 7 | 7 | 3 | 7 + 8 | 8 | 0 | 8 + 9 | 9 | 1 | 9 +(10 rows) + +SELECT sum(unique1) over (partition by ten + order by four groups between 0 preceding and 0 following exclude current row), unique1, four, ten +FROM tenk1 WHERE unique1 < 10; + sum | unique1 | four | ten +-----+---------+------+----- + | 0 | 0 | 0 + | 1 | 1 | 1 + | 2 | 2 | 2 + | 3 | 3 | 3 + | 4 | 0 | 4 + | 5 | 1 | 5 + | 6 | 2 | 6 + | 7 | 3 | 7 + | 8 | 0 | 8 + | 9 | 1 | 9 +(10 rows) + +SELECT sum(unique1) over (partition by ten + order by four groups between 0 preceding and 0 following exclude group), unique1, four, ten +FROM tenk1 WHERE unique1 < 10; + sum | unique1 | four | ten +-----+---------+------+----- + | 0 | 0 | 0 + | 1 | 1 | 1 + | 2 | 2 | 2 + | 3 | 3 | 3 + | 4 | 0 | 4 + | 5 | 1 | 5 + | 6 | 2 | 6 + | 7 | 3 | 7 + | 8 | 0 | 8 + | 9 | 1 | 9 +(10 rows) + +SELECT sum(unique1) over (partition by ten + order by four groups between 0 preceding and 0 following exclude ties), unique1, four, ten +FROM tenk1 WHERE unique1 < 10; + sum | unique1 | four | ten +-----+---------+------+----- + 0 | 0 | 0 | 0 + 1 | 1 | 1 | 1 + 2 | 2 | 2 | 2 + 3 | 3 | 3 | 3 + 4 | 4 | 0 | 4 + 5 | 5 | 1 | 5 + 6 | 6 | 2 | 6 + 7 | 7 | 3 | 7 + 8 | 8 | 0 | 8 + 9 | 9 | 1 | 9 +(10 rows) + +select first_value(salary) over(order by enroll_date groups between 1 preceding and 1 following), + lead(salary) over(order by enroll_date groups between 1 preceding and 1 following), + nth_value(salary, 1) over(order by enroll_date groups between 1 preceding and 1 following), + salary, enroll_date from empsalary; + first_value | lead | nth_value | salary | enroll_date +-------------+------+-----------+--------+------------- + 5000 | 6000 | 5000 | 5000 | 10-01-2006 + 5000 | 3900 | 5000 | 6000 | 10-01-2006 + 5000 | 4800 | 5000 | 3900 | 12-23-2006 + 3900 | 5200 | 3900 | 4800 | 08-01-2007 + 3900 | 4800 | 3900 | 5200 | 08-01-2007 + 4800 | 5200 | 4800 | 4800 | 08-08-2007 + 4800 | 3500 | 4800 | 5200 | 08-15-2007 + 5200 | 4500 | 5200 | 3500 | 12-10-2007 + 3500 | 4200 | 3500 | 4500 | 01-01-2008 + 3500 | | 3500 | 4200 | 01-01-2008 +(10 rows) + +select last_value(salary) over(order by enroll_date groups between 1 preceding and 1 following), + lag(salary) over(order by enroll_date groups between 1 preceding and 1 following), + salary, enroll_date from empsalary; + last_value | lag | salary | enroll_date +------------+------+--------+------------- + 3900 | | 5000 | 10-01-2006 + 3900 | 5000 | 6000 | 10-01-2006 + 5200 | 6000 | 3900 | 12-23-2006 + 4800 | 3900 | 4800 | 08-01-2007 + 4800 | 4800 | 5200 | 08-01-2007 + 5200 | 5200 | 4800 | 08-08-2007 + 3500 | 4800 | 5200 | 08-15-2007 + 4200 | 5200 | 3500 | 12-10-2007 + 4200 | 3500 | 4500 | 01-01-2008 + 4200 | 4500 | 4200 | 01-01-2008 +(10 rows) + +select first_value(salary) over(order by enroll_date groups between 1 following and 3 following + exclude current row), + lead(salary) over(order by enroll_date groups between 1 following and 3 following exclude ties), + nth_value(salary, 1) over(order by enroll_date groups between 1 following and 3 following + exclude ties), + salary, enroll_date from empsalary; + first_value | lead | nth_value | salary | enroll_date +-------------+------+-----------+--------+------------- + 3900 | 6000 | 3900 | 5000 | 10-01-2006 + 3900 | 3900 | 3900 | 6000 | 10-01-2006 + 4800 | 4800 | 4800 | 3900 | 12-23-2006 + 4800 | 5200 | 4800 | 4800 | 08-01-2007 + 4800 | 4800 | 4800 | 5200 | 08-01-2007 + 5200 | 5200 | 5200 | 4800 | 08-08-2007 + 3500 | 3500 | 3500 | 5200 | 08-15-2007 + 4500 | 4500 | 4500 | 3500 | 12-10-2007 + | 4200 | | 4500 | 01-01-2008 + | | | 4200 | 01-01-2008 +(10 rows) + +select last_value(salary) over(order by enroll_date groups between 1 following and 3 following + exclude group), + lag(salary) over(order by enroll_date groups between 1 following and 3 following exclude group), + salary, enroll_date from empsalary; + last_value | lag | salary | enroll_date +------------+------+--------+------------- + 4800 | | 5000 | 10-01-2006 + 4800 | 5000 | 6000 | 10-01-2006 + 5200 | 6000 | 3900 | 12-23-2006 + 3500 | 3900 | 4800 | 08-01-2007 + 3500 | 4800 | 5200 | 08-01-2007 + 4200 | 5200 | 4800 | 08-08-2007 + 4200 | 4800 | 5200 | 08-15-2007 + 4200 | 5200 | 3500 | 12-10-2007 + | 3500 | 4500 | 01-01-2008 + | 4500 | 4200 | 01-01-2008 +(10 rows) + +-- Show differences in offset interpretation between ROWS, RANGE, and GROUPS +WITH cte (x) AS ( + SELECT * FROM generate_series(1, 35, 2) +) +SELECT x, (sum(x) over w) +FROM cte +WINDOW w AS (ORDER BY x rows between 1 preceding and 1 following); + x | sum +----+----- + 1 | 4 + 3 | 9 + 5 | 15 + 7 | 21 + 9 | 27 + 11 | 33 + 13 | 39 + 15 | 45 + 17 | 51 + 19 | 57 + 21 | 63 + 23 | 69 + 25 | 75 + 27 | 81 + 29 | 87 + 31 | 93 + 33 | 99 + 35 | 68 +(18 rows) + +WITH cte (x) AS ( + SELECT * FROM generate_series(1, 35, 2) +) +SELECT x, (sum(x) over w) +FROM cte +WINDOW w AS (ORDER BY x range between 1 preceding and 1 following); + x | sum +----+----- + 1 | 1 + 3 | 3 + 5 | 5 + 7 | 7 + 9 | 9 + 11 | 11 + 13 | 13 + 15 | 15 + 17 | 17 + 19 | 19 + 21 | 21 + 23 | 23 + 25 | 25 + 27 | 27 + 29 | 29 + 31 | 31 + 33 | 33 + 35 | 35 +(18 rows) + +WITH cte (x) AS ( + SELECT * FROM generate_series(1, 35, 2) +) +SELECT x, (sum(x) over w) +FROM cte +WINDOW w AS (ORDER BY x groups between 1 preceding and 1 following); + x | sum +----+----- + 1 | 4 + 3 | 9 + 5 | 15 + 7 | 21 + 9 | 27 + 11 | 33 + 13 | 39 + 15 | 45 + 17 | 51 + 19 | 57 + 21 | 63 + 23 | 69 + 25 | 75 + 27 | 81 + 29 | 87 + 31 | 93 + 33 | 99 + 35 | 68 +(18 rows) + +WITH cte (x) AS ( + select 1 union all select 1 union all select 1 union all + SELECT * FROM generate_series(5, 49, 2) +) +SELECT x, (sum(x) over w) +FROM cte +WINDOW w AS (ORDER BY x rows between 1 preceding and 1 following); + x | sum +----+----- + 1 | 2 + 1 | 3 + 1 | 7 + 5 | 13 + 7 | 21 + 9 | 27 + 11 | 33 + 13 | 39 + 15 | 45 + 17 | 51 + 19 | 57 + 21 | 63 + 23 | 69 + 25 | 75 + 27 | 81 + 29 | 87 + 31 | 93 + 33 | 99 + 35 | 105 + 37 | 111 + 39 | 117 + 41 | 123 + 43 | 129 + 45 | 135 + 47 | 141 + 49 | 96 +(26 rows) + +WITH cte (x) AS ( + select 1 union all select 1 union all select 1 union all + SELECT * FROM generate_series(5, 49, 2) +) +SELECT x, (sum(x) over w) +FROM cte +WINDOW w AS (ORDER BY x range between 1 preceding and 1 following); + x | sum +----+----- + 1 | 3 + 1 | 3 + 1 | 3 + 5 | 5 + 7 | 7 + 9 | 9 + 11 | 11 + 13 | 13 + 15 | 15 + 17 | 17 + 19 | 19 + 21 | 21 + 23 | 23 + 25 | 25 + 27 | 27 + 29 | 29 + 31 | 31 + 33 | 33 + 35 | 35 + 37 | 37 + 39 | 39 + 41 | 41 + 43 | 43 + 45 | 45 + 47 | 47 + 49 | 49 +(26 rows) + +WITH cte (x) AS ( + select 1 union all select 1 union all select 1 union all + SELECT * FROM generate_series(5, 49, 2) +) +SELECT x, (sum(x) over w) +FROM cte +WINDOW w AS (ORDER BY x groups between 1 preceding and 1 following); + x | sum +----+----- + 1 | 8 + 1 | 8 + 1 | 8 + 5 | 15 + 7 | 21 + 9 | 27 + 11 | 33 + 13 | 39 + 15 | 45 + 17 | 51 + 19 | 57 + 21 | 63 + 23 | 69 + 25 | 75 + 27 | 81 + 29 | 87 + 31 | 93 + 33 | 99 + 35 | 105 + 37 | 111 + 39 | 117 + 41 | 123 + 43 | 129 + 45 | 135 + 47 | 141 + 49 | 96 +(26 rows) + +-- with UNION +SELECT count(*) OVER (PARTITION BY four) FROM (SELECT * FROM tenk1 UNION ALL SELECT * FROM tenk2)s LIMIT 0; + count +------- +(0 rows) + +-- check some degenerate cases +create temp table t1 (f1 int, f2 int8); +insert into t1 values (1,1),(1,2),(2,2); +select f1, sum(f1) over (partition by f1 + range between 1 preceding and 1 following) +from t1 where f1 = f2; -- error, must have order by +ERROR: RANGE with offset PRECEDING/FOLLOWING requires exactly one ORDER BY column +LINE 1: select f1, sum(f1) over (partition by f1 + ^ +explain (costs off) +select f1, sum(f1) over (partition by f1 order by f2 + range between 1 preceding and 1 following) +from t1 where f1 = f2; + QUERY PLAN +--------------------------------- + WindowAgg + -> Sort + Sort Key: f1 + -> Seq Scan on t1 + Filter: (f1 = f2) +(5 rows) + +select f1, sum(f1) over (partition by f1 order by f2 + range between 1 preceding and 1 following) +from t1 where f1 = f2; + f1 | sum +----+----- + 1 | 1 + 2 | 2 +(2 rows) + +select f1, sum(f1) over (partition by f1, f1 order by f2 + range between 2 preceding and 1 preceding) +from t1 where f1 = f2; + f1 | sum +----+----- + 1 | + 2 | +(2 rows) + +select f1, sum(f1) over (partition by f1, f2 order by f2 + range between 1 following and 2 following) +from t1 where f1 = f2; + f1 | sum +----+----- + 1 | + 2 | +(2 rows) + +select f1, sum(f1) over (partition by f1 + groups between 1 preceding and 1 following) +from t1 where f1 = f2; -- error, must have order by +ERROR: GROUPS mode requires an ORDER BY clause +LINE 1: select f1, sum(f1) over (partition by f1 + ^ +explain (costs off) +select f1, sum(f1) over (partition by f1 order by f2 + groups between 1 preceding and 1 following) +from t1 where f1 = f2; + QUERY PLAN +--------------------------------- + WindowAgg + -> Sort + Sort Key: f1 + -> Seq Scan on t1 + Filter: (f1 = f2) +(5 rows) + +select f1, sum(f1) over (partition by f1 order by f2 + groups between 1 preceding and 1 following) +from t1 where f1 = f2; + f1 | sum +----+----- + 1 | 1 + 2 | 2 +(2 rows) + +select f1, sum(f1) over (partition by f1, f1 order by f2 + groups between 2 preceding and 1 preceding) +from t1 where f1 = f2; + f1 | sum +----+----- + 1 | + 2 | +(2 rows) + +select f1, sum(f1) over (partition by f1, f2 order by f2 + groups between 1 following and 2 following) +from t1 where f1 = f2; + f1 | sum +----+----- + 1 | + 2 | +(2 rows) + +-- ordering by a non-integer constant is allowed +SELECT rank() OVER (ORDER BY length('abc')); + rank +------ + 1 +(1 row) + +-- can't order by another window function +SELECT rank() OVER (ORDER BY rank() OVER (ORDER BY random())); +ERROR: window functions are not allowed in window definitions +LINE 1: SELECT rank() OVER (ORDER BY rank() OVER (ORDER BY random())... + ^ +-- some other errors +SELECT * FROM empsalary WHERE row_number() OVER (ORDER BY salary) < 10; +ERROR: window functions are not allowed in WHERE +LINE 1: SELECT * FROM empsalary WHERE row_number() OVER (ORDER BY sa... + ^ +SELECT * FROM empsalary INNER JOIN tenk1 ON row_number() OVER (ORDER BY salary) < 10; +ERROR: window functions are not allowed in JOIN conditions +LINE 1: SELECT * FROM empsalary INNER JOIN tenk1 ON row_number() OVE... + ^ +SELECT rank() OVER (ORDER BY 1), count(*) FROM empsalary GROUP BY 1; +ERROR: window functions are not allowed in GROUP BY +LINE 1: SELECT rank() OVER (ORDER BY 1), count(*) FROM empsalary GRO... + ^ +SELECT * FROM rank() OVER (ORDER BY random()); +ERROR: syntax error at or near "ORDER" +LINE 1: SELECT * FROM rank() OVER (ORDER BY random()); + ^ +DELETE FROM empsalary WHERE (rank() OVER (ORDER BY random())) > 10; +ERROR: window functions are not allowed in WHERE +LINE 1: DELETE FROM empsalary WHERE (rank() OVER (ORDER BY random())... + ^ +DELETE FROM empsalary RETURNING rank() OVER (ORDER BY random()); +ERROR: window functions are not allowed in RETURNING +LINE 1: DELETE FROM empsalary RETURNING rank() OVER (ORDER BY random... + ^ +SELECT count(*) OVER w FROM tenk1 WINDOW w AS (ORDER BY unique1), w AS (ORDER BY unique1); +ERROR: window "w" is already defined +LINE 1: ...w FROM tenk1 WINDOW w AS (ORDER BY unique1), w AS (ORDER BY ... + ^ +SELECT rank() OVER (PARTITION BY four, ORDER BY ten) FROM tenk1; +ERROR: syntax error at or near "ORDER" +LINE 1: SELECT rank() OVER (PARTITION BY four, ORDER BY ten) FROM te... + ^ +SELECT count() OVER () FROM tenk1; +ERROR: count(*) must be used to call a parameterless aggregate function +LINE 1: SELECT count() OVER () FROM tenk1; + ^ +SELECT generate_series(1, 100) OVER () FROM empsalary; +ERROR: OVER specified, but generate_series is not a window function nor an aggregate function +LINE 1: SELECT generate_series(1, 100) OVER () FROM empsalary; + ^ +SELECT ntile(0) OVER (ORDER BY ten), ten, four FROM tenk1; +ERROR: argument of ntile must be greater than zero +SELECT nth_value(four, 0) OVER (ORDER BY ten), ten, four FROM tenk1; +ERROR: argument of nth_value must be greater than zero +-- filter +SELECT sum(salary), row_number() OVER (ORDER BY depname), sum( + sum(salary) FILTER (WHERE enroll_date > '2007-01-01') +) FILTER (WHERE depname <> 'sales') OVER (ORDER BY depname DESC) AS "filtered_sum", + depname +FROM empsalary GROUP BY depname; + sum | row_number | filtered_sum | depname +-------+------------+--------------+----------- + 25100 | 1 | 22600 | develop + 7400 | 2 | 3500 | personnel + 14600 | 3 | | sales +(3 rows) + +-- +-- Test SupportRequestOptimizeWindowClause's ability to de-duplicate +-- WindowClauses +-- +-- Ensure WindowClause frameOptions are changed so that only a single +-- WindowAgg exists in the plan. +EXPLAIN (COSTS OFF) +SELECT + empno, + depname, + row_number() OVER (PARTITION BY depname ORDER BY enroll_date) rn, + rank() OVER (PARTITION BY depname ORDER BY enroll_date ROWS BETWEEN + UNBOUNDED PRECEDING AND UNBOUNDED FOLLOWING) rnk, + dense_rank() OVER (PARTITION BY depname ORDER BY enroll_date RANGE BETWEEN + CURRENT ROW AND CURRENT ROW) drnk, + ntile(10) OVER (PARTITION BY depname ORDER BY enroll_date RANGE BETWEEN + CURRENT ROW AND UNBOUNDED FOLLOWING) nt, + percent_rank() OVER (PARTITION BY depname ORDER BY enroll_date ROWS BETWEEN + CURRENT ROW AND UNBOUNDED FOLLOWING) pr, + cume_dist() OVER (PARTITION BY depname ORDER BY enroll_date RANGE BETWEEN + CURRENT ROW AND UNBOUNDED FOLLOWING) cd +FROM empsalary; + QUERY PLAN +---------------------------------------- + WindowAgg + -> Sort + Sort Key: depname, enroll_date + -> Seq Scan on empsalary +(4 rows) + +-- Ensure WindowFuncs which cannot support their WindowClause's frameOptions +-- being changed are untouched +EXPLAIN (COSTS OFF, VERBOSE) +SELECT + empno, + depname, + row_number() OVER (PARTITION BY depname ORDER BY enroll_date) rn, + rank() OVER (PARTITION BY depname ORDER BY enroll_date ROWS BETWEEN + UNBOUNDED PRECEDING AND UNBOUNDED FOLLOWING) rnk, + count(*) OVER (PARTITION BY depname ORDER BY enroll_date RANGE BETWEEN + CURRENT ROW AND CURRENT ROW) cnt +FROM empsalary; + QUERY PLAN +------------------------------------------------------------------------------------------------------ + WindowAgg + Output: empno, depname, (row_number() OVER (?)), (rank() OVER (?)), count(*) OVER (?), enroll_date + -> WindowAgg + Output: depname, enroll_date, empno, row_number() OVER (?), rank() OVER (?) + -> Sort + Output: depname, enroll_date, empno + Sort Key: empsalary.depname, empsalary.enroll_date + -> Seq Scan on pg_temp.empsalary + Output: depname, enroll_date, empno +(9 rows) + +-- Ensure the above query gives us the expected results +SELECT + empno, + depname, + row_number() OVER (PARTITION BY depname ORDER BY enroll_date) rn, + rank() OVER (PARTITION BY depname ORDER BY enroll_date ROWS BETWEEN + UNBOUNDED PRECEDING AND UNBOUNDED FOLLOWING) rnk, + count(*) OVER (PARTITION BY depname ORDER BY enroll_date RANGE BETWEEN + CURRENT ROW AND CURRENT ROW) cnt +FROM empsalary; + empno | depname | rn | rnk | cnt +-------+-----------+----+-----+----- + 8 | develop | 1 | 1 | 1 + 10 | develop | 2 | 2 | 1 + 11 | develop | 3 | 3 | 1 + 9 | develop | 4 | 4 | 2 + 7 | develop | 5 | 4 | 2 + 2 | personnel | 1 | 1 | 1 + 5 | personnel | 2 | 2 | 1 + 1 | sales | 1 | 1 | 1 + 3 | sales | 2 | 2 | 1 + 4 | sales | 3 | 3 | 1 +(10 rows) + +-- Test pushdown of quals into a subquery containing window functions +-- pushdown is safe because all PARTITION BY clauses include depname: +EXPLAIN (COSTS OFF) +SELECT * FROM + (SELECT depname, + sum(salary) OVER (PARTITION BY depname) depsalary, + min(salary) OVER (PARTITION BY depname || 'A', depname) depminsalary + FROM empsalary) emp +WHERE depname = 'sales'; + QUERY PLAN +-------------------------------------------------------------------------- + Subquery Scan on emp + -> WindowAgg + -> WindowAgg + -> Sort + Sort Key: (((empsalary.depname)::text || 'A'::text)) + -> Seq Scan on empsalary + Filter: ((depname)::text = 'sales'::text) +(7 rows) + +-- pushdown is unsafe because there's a PARTITION BY clause without depname: +EXPLAIN (COSTS OFF) +SELECT * FROM + (SELECT depname, + sum(salary) OVER (PARTITION BY enroll_date) enroll_salary, + min(salary) OVER (PARTITION BY depname) depminsalary + FROM empsalary) emp +WHERE depname = 'sales'; + QUERY PLAN +------------------------------------------------------- + Subquery Scan on emp + Filter: ((emp.depname)::text = 'sales'::text) + -> WindowAgg + -> Sort + Sort Key: empsalary.enroll_date + -> WindowAgg + -> Sort + Sort Key: empsalary.depname + -> Seq Scan on empsalary +(9 rows) + +-- Test window function run conditions are properly pushed down into the +-- WindowAgg +EXPLAIN (COSTS OFF) +SELECT * FROM + (SELECT empno, + row_number() OVER (ORDER BY empno) rn + FROM empsalary) emp +WHERE rn < 3; + QUERY PLAN +---------------------------------------------- + WindowAgg + Run Condition: (row_number() OVER (?) < 3) + -> Sort + Sort Key: empsalary.empno + -> Seq Scan on empsalary +(5 rows) + +-- The following 3 statements should result the same result. +SELECT * FROM + (SELECT empno, + row_number() OVER (ORDER BY empno) rn + FROM empsalary) emp +WHERE rn < 3; + empno | rn +-------+---- + 1 | 1 + 2 | 2 +(2 rows) + +SELECT * FROM + (SELECT empno, + row_number() OVER (ORDER BY empno) rn + FROM empsalary) emp +WHERE 3 > rn; + empno | rn +-------+---- + 1 | 1 + 2 | 2 +(2 rows) + +SELECT * FROM + (SELECT empno, + row_number() OVER (ORDER BY empno) rn + FROM empsalary) emp +WHERE 2 >= rn; + empno | rn +-------+---- + 1 | 1 + 2 | 2 +(2 rows) + +-- Ensure r <= 3 is pushed down into the run condition of the window agg +EXPLAIN (COSTS OFF) +SELECT * FROM + (SELECT empno, + salary, + rank() OVER (ORDER BY salary DESC) r + FROM empsalary) emp +WHERE r <= 3; + QUERY PLAN +----------------------------------------- + WindowAgg + Run Condition: (rank() OVER (?) <= 3) + -> Sort + Sort Key: empsalary.salary DESC + -> Seq Scan on empsalary +(5 rows) + +SELECT * FROM + (SELECT empno, + salary, + rank() OVER (ORDER BY salary DESC) r + FROM empsalary) emp +WHERE r <= 3; + empno | salary | r +-------+--------+--- + 8 | 6000 | 1 + 10 | 5200 | 2 + 11 | 5200 | 2 +(3 rows) + +-- Ensure dr = 1 is converted to dr <= 1 to get all rows leading up to dr = 1 +EXPLAIN (COSTS OFF) +SELECT * FROM + (SELECT empno, + salary, + dense_rank() OVER (ORDER BY salary DESC) dr + FROM empsalary) emp +WHERE dr = 1; + QUERY PLAN +----------------------------------------------------- + Subquery Scan on emp + Filter: (emp.dr = 1) + -> WindowAgg + Run Condition: (dense_rank() OVER (?) <= 1) + -> Sort + Sort Key: empsalary.salary DESC + -> Seq Scan on empsalary +(7 rows) + +SELECT * FROM + (SELECT empno, + salary, + dense_rank() OVER (ORDER BY salary DESC) dr + FROM empsalary) emp +WHERE dr = 1; + empno | salary | dr +-------+--------+---- + 8 | 6000 | 1 +(1 row) + +-- Check COUNT() and COUNT(*) +EXPLAIN (COSTS OFF) +SELECT * FROM + (SELECT empno, + salary, + count(*) OVER (ORDER BY salary DESC) c + FROM empsalary) emp +WHERE c <= 3; + QUERY PLAN +------------------------------------------- + WindowAgg + Run Condition: (count(*) OVER (?) <= 3) + -> Sort + Sort Key: empsalary.salary DESC + -> Seq Scan on empsalary +(5 rows) + +SELECT * FROM + (SELECT empno, + salary, + count(*) OVER (ORDER BY salary DESC) c + FROM empsalary) emp +WHERE c <= 3; + empno | salary | c +-------+--------+--- + 8 | 6000 | 1 + 10 | 5200 | 3 + 11 | 5200 | 3 +(3 rows) + +EXPLAIN (COSTS OFF) +SELECT * FROM + (SELECT empno, + salary, + count(empno) OVER (ORDER BY salary DESC) c + FROM empsalary) emp +WHERE c <= 3; + QUERY PLAN +--------------------------------------------------------- + WindowAgg + Run Condition: (count(empsalary.empno) OVER (?) <= 3) + -> Sort + Sort Key: empsalary.salary DESC + -> Seq Scan on empsalary +(5 rows) + +SELECT * FROM + (SELECT empno, + salary, + count(empno) OVER (ORDER BY salary DESC) c + FROM empsalary) emp +WHERE c <= 3; + empno | salary | c +-------+--------+--- + 8 | 6000 | 1 + 10 | 5200 | 3 + 11 | 5200 | 3 +(3 rows) + +EXPLAIN (COSTS OFF) +SELECT * FROM + (SELECT empno, + salary, + count(*) OVER (ORDER BY salary DESC ROWS BETWEEN CURRENT ROW AND UNBOUNDED FOLLOWING) c + FROM empsalary) emp +WHERE c >= 3; + QUERY PLAN +------------------------------------------- + WindowAgg + Run Condition: (count(*) OVER (?) >= 3) + -> Sort + Sort Key: empsalary.salary DESC + -> Seq Scan on empsalary +(5 rows) + +EXPLAIN (COSTS OFF) +SELECT * FROM + (SELECT empno, + salary, + count(*) OVER () c + FROM empsalary) emp +WHERE 11 <= c; + QUERY PLAN +-------------------------------------------- + WindowAgg + Run Condition: (11 <= count(*) OVER (?)) + -> Seq Scan on empsalary +(3 rows) + +EXPLAIN (COSTS OFF) +SELECT * FROM + (SELECT empno, + salary, + count(*) OVER (ORDER BY salary DESC) c, + dense_rank() OVER (ORDER BY salary DESC) dr + FROM empsalary) emp +WHERE dr = 1; + QUERY PLAN +----------------------------------------------------- + Subquery Scan on emp + Filter: (emp.dr = 1) + -> WindowAgg + Run Condition: (dense_rank() OVER (?) <= 1) + -> Sort + Sort Key: empsalary.salary DESC + -> Seq Scan on empsalary +(7 rows) + +-- Ensure we get a run condition when there's a PARTITION BY clause +EXPLAIN (COSTS OFF) +SELECT * FROM + (SELECT empno, + depname, + row_number() OVER (PARTITION BY depname ORDER BY empno) rn + FROM empsalary) emp +WHERE rn < 3; + QUERY PLAN +------------------------------------------------------ + WindowAgg + Run Condition: (row_number() OVER (?) < 3) + -> Sort + Sort Key: empsalary.depname, empsalary.empno + -> Seq Scan on empsalary +(5 rows) + +-- and ensure we get the correct results from the above plan +SELECT * FROM + (SELECT empno, + depname, + row_number() OVER (PARTITION BY depname ORDER BY empno) rn + FROM empsalary) emp +WHERE rn < 3; + empno | depname | rn +-------+-----------+---- + 7 | develop | 1 + 8 | develop | 2 + 2 | personnel | 1 + 5 | personnel | 2 + 1 | sales | 1 + 3 | sales | 2 +(6 rows) + +-- ensure that "unused" subquery columns are not removed when the column only +-- exists in the run condition +EXPLAIN (COSTS OFF) +SELECT empno, depname FROM + (SELECT empno, + depname, + row_number() OVER (PARTITION BY depname ORDER BY empno) rn + FROM empsalary) emp +WHERE rn < 3; + QUERY PLAN +------------------------------------------------------------ + Subquery Scan on emp + -> WindowAgg + Run Condition: (row_number() OVER (?) < 3) + -> Sort + Sort Key: empsalary.depname, empsalary.empno + -> Seq Scan on empsalary +(6 rows) + +-- likewise with count(empno) instead of row_number() +EXPLAIN (COSTS OFF) +SELECT * FROM + (SELECT empno, + depname, + salary, + count(empno) OVER (PARTITION BY depname ORDER BY salary DESC) c + FROM empsalary) emp +WHERE c <= 3; + QUERY PLAN +------------------------------------------------------------ + WindowAgg + Run Condition: (count(empsalary.empno) OVER (?) <= 3) + -> Sort + Sort Key: empsalary.depname, empsalary.salary DESC + -> Seq Scan on empsalary +(5 rows) + +-- and again, check the results are what we expect. +SELECT * FROM + (SELECT empno, + depname, + salary, + count(empno) OVER (PARTITION BY depname ORDER BY salary DESC) c + FROM empsalary) emp +WHERE c <= 3; + empno | depname | salary | c +-------+-----------+--------+--- + 8 | develop | 6000 | 1 + 10 | develop | 5200 | 3 + 11 | develop | 5200 | 3 + 2 | personnel | 3900 | 1 + 5 | personnel | 3500 | 2 + 1 | sales | 5000 | 1 + 4 | sales | 4800 | 3 + 3 | sales | 4800 | 3 +(8 rows) + +-- Ensure we get the correct run condition when the window function is both +-- monotonically increasing and decreasing. +EXPLAIN (COSTS OFF) +SELECT * FROM + (SELECT empno, + depname, + salary, + count(empno) OVER () c + FROM empsalary) emp +WHERE c = 1; + QUERY PLAN +-------------------------------------------------------- + WindowAgg + Run Condition: (count(empsalary.empno) OVER (?) = 1) + -> Seq Scan on empsalary +(3 rows) + +-- Some more complex cases with multiple window clauses +EXPLAIN (COSTS OFF) +SELECT * FROM + (SELECT *, + count(salary) OVER (PARTITION BY depname || '') c1, -- w1 + row_number() OVER (PARTITION BY depname) rn, -- w2 + count(*) OVER (PARTITION BY depname) c2, -- w2 + count(*) OVER (PARTITION BY '' || depname) c3, -- w3 + ntile(2) OVER (PARTITION BY depname) nt -- w2 + FROM empsalary +) e WHERE rn <= 1 AND c1 <= 3 AND nt < 2; + QUERY PLAN +----------------------------------------------------------------------------------------------- + Subquery Scan on e + -> WindowAgg + Filter: (((row_number() OVER (?)) <= 1) AND ((ntile(2) OVER (?)) < 2)) + Run Condition: (count(empsalary.salary) OVER (?) <= 3) + -> Sort + Sort Key: (((empsalary.depname)::text || ''::text)) + -> WindowAgg + Run Condition: ((row_number() OVER (?) <= 1) AND (ntile(2) OVER (?) < 2)) + -> Sort + Sort Key: empsalary.depname + -> WindowAgg + -> Sort + Sort Key: ((''::text || (empsalary.depname)::text)) + -> Seq Scan on empsalary +(14 rows) + +-- Ensure we correctly filter out all of the run conditions from each window +SELECT * FROM + (SELECT *, + count(salary) OVER (PARTITION BY depname || '') c1, -- w1 + row_number() OVER (PARTITION BY depname) rn, -- w2 + count(*) OVER (PARTITION BY depname) c2, -- w2 + count(*) OVER (PARTITION BY '' || depname) c3, -- w3 + ntile(2) OVER (PARTITION BY depname) nt -- w2 + FROM empsalary +) e WHERE rn <= 1 AND c1 <= 3 AND nt < 2; + depname | empno | salary | enroll_date | c1 | rn | c2 | c3 | nt +-----------+-------+--------+-------------+----+----+----+----+---- + personnel | 5 | 3500 | 12-10-2007 | 2 | 1 | 2 | 2 | 1 + sales | 3 | 4800 | 08-01-2007 | 3 | 1 | 3 | 3 | 1 +(2 rows) + +-- Ensure we remove references to reduced outer joins as nulling rels in run +-- conditions +EXPLAIN (COSTS OFF) +SELECT 1 FROM + (SELECT ntile(e2.salary) OVER (PARTITION BY e1.depname) AS c + FROM empsalary e1 LEFT JOIN empsalary e2 ON TRUE + WHERE e1.empno = e2.empno) s +WHERE s.c = 1; + QUERY PLAN +--------------------------------------------------------- + Subquery Scan on s + Filter: (s.c = 1) + -> WindowAgg + Run Condition: (ntile(e2.salary) OVER (?) <= 1) + -> Sort + Sort Key: e1.depname + -> Merge Join + Merge Cond: (e1.empno = e2.empno) + -> Sort + Sort Key: e1.empno + -> Seq Scan on empsalary e1 + -> Sort + Sort Key: e2.empno + -> Seq Scan on empsalary e2 +(14 rows) + +-- Tests to ensure we don't push down the run condition when it's not valid to +-- do so. +-- Ensure we don't push down when the frame options show that the window +-- function is not monotonically increasing +EXPLAIN (COSTS OFF) +SELECT * FROM + (SELECT empno, + salary, + count(*) OVER (ORDER BY salary DESC ROWS BETWEEN CURRENT ROW AND UNBOUNDED FOLLOWING) c + FROM empsalary) emp +WHERE c <= 3; + QUERY PLAN +----------------------------------------------- + Subquery Scan on emp + Filter: (emp.c <= 3) + -> WindowAgg + -> Sort + Sort Key: empsalary.salary DESC + -> Seq Scan on empsalary +(6 rows) + +-- Ensure we don't push down when the window function's monotonic properties +-- don't match that of the clauses. +EXPLAIN (COSTS OFF) +SELECT * FROM + (SELECT empno, + salary, + count(*) OVER (ORDER BY salary) c + FROM empsalary) emp +WHERE 3 <= c; + QUERY PLAN +------------------------------------------ + Subquery Scan on emp + Filter: (3 <= emp.c) + -> WindowAgg + -> Sort + Sort Key: empsalary.salary + -> Seq Scan on empsalary +(6 rows) + +-- Ensure we don't use a run condition when there's a volatile function in the +-- WindowFunc +EXPLAIN (COSTS OFF) +SELECT * FROM + (SELECT empno, + salary, + count(random()) OVER (ORDER BY empno DESC) c + FROM empsalary) emp +WHERE c = 1; + QUERY PLAN +---------------------------------------------- + Subquery Scan on emp + Filter: (emp.c = 1) + -> WindowAgg + -> Sort + Sort Key: empsalary.empno DESC + -> Seq Scan on empsalary +(6 rows) + +-- Ensure we don't use a run condition when the WindowFunc contains subplans +EXPLAIN (COSTS OFF) +SELECT * FROM + (SELECT empno, + salary, + count((SELECT 1)) OVER (ORDER BY empno DESC) c + FROM empsalary) emp +WHERE c = 1; + QUERY PLAN +---------------------------------------------- + Subquery Scan on emp + Filter: (emp.c = 1) + -> WindowAgg + InitPlan 1 (returns $0) + -> Result + -> Sort + Sort Key: empsalary.empno DESC + -> Seq Scan on empsalary +(8 rows) + +-- Test Sort node collapsing +EXPLAIN (COSTS OFF) +SELECT * FROM + (SELECT depname, + sum(salary) OVER (PARTITION BY depname order by empno) depsalary, + min(salary) OVER (PARTITION BY depname, empno order by enroll_date) depminsalary + FROM empsalary) emp +WHERE depname = 'sales'; + QUERY PLAN +---------------------------------------------------------------------- + Subquery Scan on emp + -> WindowAgg + -> WindowAgg + -> Sort + Sort Key: empsalary.empno, empsalary.enroll_date + -> Seq Scan on empsalary + Filter: ((depname)::text = 'sales'::text) +(7 rows) + +-- Ensure that the evaluation order of the WindowAggs results in the WindowAgg +-- with the same sort order that's required by the ORDER BY is evaluated last. +EXPLAIN (COSTS OFF) +SELECT empno, + enroll_date, + depname, + sum(salary) OVER (PARTITION BY depname order by empno) depsalary, + min(salary) OVER (PARTITION BY depname order by enroll_date) depminsalary +FROM empsalary +ORDER BY depname, empno; + QUERY PLAN +---------------------------------------------------- + WindowAgg + -> Incremental Sort + Sort Key: depname, empno + Presorted Key: depname + -> WindowAgg + -> Sort + Sort Key: depname, enroll_date + -> Seq Scan on empsalary +(8 rows) + +-- As above, but with an adjusted ORDER BY to ensure the above plan didn't +-- perform only 2 sorts by accident. +EXPLAIN (COSTS OFF) +SELECT empno, + enroll_date, + depname, + sum(salary) OVER (PARTITION BY depname order by empno) depsalary, + min(salary) OVER (PARTITION BY depname order by enroll_date) depminsalary +FROM empsalary +ORDER BY depname, enroll_date; + QUERY PLAN +----------------------------------------------- + WindowAgg + -> Incremental Sort + Sort Key: depname, enroll_date + Presorted Key: depname + -> WindowAgg + -> Sort + Sort Key: depname, empno + -> Seq Scan on empsalary +(8 rows) + +SET enable_hashagg TO off; +-- Ensure we don't get a sort for both DISTINCT and ORDER BY. We expect the +-- sort for the DISTINCT to provide presorted input for the ORDER BY. +EXPLAIN (COSTS OFF) +SELECT DISTINCT + empno, + enroll_date, + depname, + sum(salary) OVER (PARTITION BY depname order by empno) depsalary, + min(salary) OVER (PARTITION BY depname order by enroll_date) depminsalary +FROM empsalary +ORDER BY depname, enroll_date; + QUERY PLAN +----------------------------------------------------------------------------------------------- + Unique + -> Incremental Sort + Sort Key: depname, enroll_date, empno, (sum(salary) OVER (?)), (min(salary) OVER (?)) + Presorted Key: depname, enroll_date + -> WindowAgg + -> Incremental Sort + Sort Key: depname, enroll_date + Presorted Key: depname + -> WindowAgg + -> Sort + Sort Key: depname, empno + -> Seq Scan on empsalary +(12 rows) + +-- As above but adjust the ORDER BY clause to help ensure the plan with the +-- minimum amount of sorting wasn't a fluke. +EXPLAIN (COSTS OFF) +SELECT DISTINCT + empno, + enroll_date, + depname, + sum(salary) OVER (PARTITION BY depname order by empno) depsalary, + min(salary) OVER (PARTITION BY depname order by enroll_date) depminsalary +FROM empsalary +ORDER BY depname, empno; + QUERY PLAN +----------------------------------------------------------------------------------------------- + Unique + -> Incremental Sort + Sort Key: depname, empno, enroll_date, (sum(salary) OVER (?)), (min(salary) OVER (?)) + Presorted Key: depname, empno + -> WindowAgg + -> Incremental Sort + Sort Key: depname, empno + Presorted Key: depname + -> WindowAgg + -> Sort + Sort Key: depname, enroll_date + -> Seq Scan on empsalary +(12 rows) + +RESET enable_hashagg; +-- Test Sort node reordering +EXPLAIN (COSTS OFF) +SELECT + lead(1) OVER (PARTITION BY depname ORDER BY salary, enroll_date), + lag(1) OVER (PARTITION BY depname ORDER BY salary,enroll_date,empno) +FROM empsalary; + QUERY PLAN +------------------------------------------------------------- + WindowAgg + -> WindowAgg + -> Sort + Sort Key: depname, salary, enroll_date, empno + -> Seq Scan on empsalary +(5 rows) + +-- Test incremental sorting +EXPLAIN (COSTS OFF) +SELECT * FROM + (SELECT depname, + empno, + salary, + enroll_date, + row_number() OVER (PARTITION BY depname ORDER BY enroll_date) AS first_emp, + row_number() OVER (PARTITION BY depname ORDER BY enroll_date DESC) AS last_emp + FROM empsalary) emp +WHERE first_emp = 1 OR last_emp = 1; + QUERY PLAN +----------------------------------------------------------------------------------- + Subquery Scan on emp + Filter: ((emp.first_emp = 1) OR (emp.last_emp = 1)) + -> WindowAgg + -> Incremental Sort + Sort Key: empsalary.depname, empsalary.enroll_date + Presorted Key: empsalary.depname + -> WindowAgg + -> Sort + Sort Key: empsalary.depname, empsalary.enroll_date DESC + -> Seq Scan on empsalary +(10 rows) + +SELECT * FROM + (SELECT depname, + empno, + salary, + enroll_date, + row_number() OVER (PARTITION BY depname ORDER BY enroll_date) AS first_emp, + row_number() OVER (PARTITION BY depname ORDER BY enroll_date DESC) AS last_emp + FROM empsalary) emp +WHERE first_emp = 1 OR last_emp = 1; + depname | empno | salary | enroll_date | first_emp | last_emp +-----------+-------+--------+-------------+-----------+---------- + develop | 8 | 6000 | 10-01-2006 | 1 | 5 + develop | 7 | 4200 | 01-01-2008 | 5 | 1 + personnel | 2 | 3900 | 12-23-2006 | 1 | 2 + personnel | 5 | 3500 | 12-10-2007 | 2 | 1 + sales | 1 | 5000 | 10-01-2006 | 1 | 3 + sales | 4 | 4800 | 08-08-2007 | 3 | 1 +(6 rows) + +-- cleanup +DROP TABLE empsalary; +-- test user-defined window function with named args and default args +CREATE FUNCTION nth_value_def(val anyelement, n integer = 1) RETURNS anyelement + LANGUAGE internal WINDOW IMMUTABLE STRICT AS 'window_nth_value'; +SELECT nth_value_def(n := 2, val := ten) OVER (PARTITION BY four), ten, four + FROM (SELECT * FROM tenk1 WHERE unique2 < 10 ORDER BY four, ten) s; + nth_value_def | ten | four +---------------+-----+------ + 0 | 0 | 0 + 0 | 0 | 0 + 0 | 4 | 0 + 1 | 1 | 1 + 1 | 1 | 1 + 1 | 7 | 1 + 1 | 9 | 1 + | 0 | 2 + 3 | 1 | 3 + 3 | 3 | 3 +(10 rows) + +SELECT nth_value_def(ten) OVER (PARTITION BY four), ten, four + FROM (SELECT * FROM tenk1 WHERE unique2 < 10 ORDER BY four, ten) s; + nth_value_def | ten | four +---------------+-----+------ + 0 | 0 | 0 + 0 | 0 | 0 + 0 | 4 | 0 + 1 | 1 | 1 + 1 | 1 | 1 + 1 | 7 | 1 + 1 | 9 | 1 + 0 | 0 | 2 + 1 | 1 | 3 + 1 | 3 | 3 +(10 rows) + +-- +-- Test the basic moving-aggregate machinery +-- +-- create aggregates that record the series of transform calls (these are +-- intentionally not true inverses) +CREATE FUNCTION logging_sfunc_nonstrict(text, anyelement) RETURNS text AS +$$ SELECT COALESCE($1, '') || '*' || quote_nullable($2) $$ +LANGUAGE SQL IMMUTABLE; +CREATE FUNCTION logging_msfunc_nonstrict(text, anyelement) RETURNS text AS +$$ SELECT COALESCE($1, '') || '+' || quote_nullable($2) $$ +LANGUAGE SQL IMMUTABLE; +CREATE FUNCTION logging_minvfunc_nonstrict(text, anyelement) RETURNS text AS +$$ SELECT $1 || '-' || quote_nullable($2) $$ +LANGUAGE SQL IMMUTABLE; +CREATE AGGREGATE logging_agg_nonstrict (anyelement) +( + stype = text, + sfunc = logging_sfunc_nonstrict, + mstype = text, + msfunc = logging_msfunc_nonstrict, + minvfunc = logging_minvfunc_nonstrict +); +CREATE AGGREGATE logging_agg_nonstrict_initcond (anyelement) +( + stype = text, + sfunc = logging_sfunc_nonstrict, + mstype = text, + msfunc = logging_msfunc_nonstrict, + minvfunc = logging_minvfunc_nonstrict, + initcond = 'I', + minitcond = 'MI' +); +CREATE FUNCTION logging_sfunc_strict(text, anyelement) RETURNS text AS +$$ SELECT $1 || '*' || quote_nullable($2) $$ +LANGUAGE SQL STRICT IMMUTABLE; +CREATE FUNCTION logging_msfunc_strict(text, anyelement) RETURNS text AS +$$ SELECT $1 || '+' || quote_nullable($2) $$ +LANGUAGE SQL STRICT IMMUTABLE; +CREATE FUNCTION logging_minvfunc_strict(text, anyelement) RETURNS text AS +$$ SELECT $1 || '-' || quote_nullable($2) $$ +LANGUAGE SQL STRICT IMMUTABLE; +CREATE AGGREGATE logging_agg_strict (text) +( + stype = text, + sfunc = logging_sfunc_strict, + mstype = text, + msfunc = logging_msfunc_strict, + minvfunc = logging_minvfunc_strict +); +CREATE AGGREGATE logging_agg_strict_initcond (anyelement) +( + stype = text, + sfunc = logging_sfunc_strict, + mstype = text, + msfunc = logging_msfunc_strict, + minvfunc = logging_minvfunc_strict, + initcond = 'I', + minitcond = 'MI' +); +-- test strict and non-strict cases +SELECT + p::text || ',' || i::text || ':' || COALESCE(v::text, 'NULL') AS row, + logging_agg_nonstrict(v) over wnd as nstrict, + logging_agg_nonstrict_initcond(v) over wnd as nstrict_init, + logging_agg_strict(v::text) over wnd as strict, + logging_agg_strict_initcond(v) over wnd as strict_init +FROM (VALUES + (1, 1, NULL), + (1, 2, 'a'), + (1, 3, 'b'), + (1, 4, NULL), + (1, 5, NULL), + (1, 6, 'c'), + (2, 1, NULL), + (2, 2, 'x'), + (3, 1, 'z') +) AS t(p, i, v) +WINDOW wnd AS (PARTITION BY P ORDER BY i ROWS BETWEEN 1 PRECEDING AND CURRENT ROW) +ORDER BY p, i; + row | nstrict | nstrict_init | strict | strict_init +----------+-----------------------------------------------+-------------------------------------------------+-----------+---------------- + 1,1:NULL | +NULL | MI+NULL | | MI + 1,2:a | +NULL+'a' | MI+NULL+'a' | a | MI+'a' + 1,3:b | +NULL+'a'-NULL+'b' | MI+NULL+'a'-NULL+'b' | a+'b' | MI+'a'+'b' + 1,4:NULL | +NULL+'a'-NULL+'b'-'a'+NULL | MI+NULL+'a'-NULL+'b'-'a'+NULL | a+'b'-'a' | MI+'a'+'b'-'a' + 1,5:NULL | +NULL+'a'-NULL+'b'-'a'+NULL-'b'+NULL | MI+NULL+'a'-NULL+'b'-'a'+NULL-'b'+NULL | | MI + 1,6:c | +NULL+'a'-NULL+'b'-'a'+NULL-'b'+NULL-NULL+'c' | MI+NULL+'a'-NULL+'b'-'a'+NULL-'b'+NULL-NULL+'c' | c | MI+'c' + 2,1:NULL | +NULL | MI+NULL | | MI + 2,2:x | +NULL+'x' | MI+NULL+'x' | x | MI+'x' + 3,1:z | +'z' | MI+'z' | z | MI+'z' +(9 rows) + +-- and again, but with filter +SELECT + p::text || ',' || i::text || ':' || + CASE WHEN f THEN COALESCE(v::text, 'NULL') ELSE '-' END as row, + logging_agg_nonstrict(v) filter(where f) over wnd as nstrict_filt, + logging_agg_nonstrict_initcond(v) filter(where f) over wnd as nstrict_init_filt, + logging_agg_strict(v::text) filter(where f) over wnd as strict_filt, + logging_agg_strict_initcond(v) filter(where f) over wnd as strict_init_filt +FROM (VALUES + (1, 1, true, NULL), + (1, 2, false, 'a'), + (1, 3, true, 'b'), + (1, 4, false, NULL), + (1, 5, false, NULL), + (1, 6, false, 'c'), + (2, 1, false, NULL), + (2, 2, true, 'x'), + (3, 1, true, 'z') +) AS t(p, i, f, v) +WINDOW wnd AS (PARTITION BY p ORDER BY i ROWS BETWEEN 1 PRECEDING AND CURRENT ROW) +ORDER BY p, i; + row | nstrict_filt | nstrict_init_filt | strict_filt | strict_init_filt +----------+--------------+-------------------+-------------+------------------ + 1,1:NULL | +NULL | MI+NULL | | MI + 1,2:- | +NULL | MI+NULL | | MI + 1,3:b | +'b' | MI+'b' | b | MI+'b' + 1,4:- | +'b' | MI+'b' | b | MI+'b' + 1,5:- | | MI | | MI + 1,6:- | | MI | | MI + 2,1:- | | MI | | MI + 2,2:x | +'x' | MI+'x' | x | MI+'x' + 3,1:z | +'z' | MI+'z' | z | MI+'z' +(9 rows) + +-- test that volatile arguments disable moving-aggregate mode +SELECT + i::text || ':' || COALESCE(v::text, 'NULL') as row, + logging_agg_strict(v::text) + over wnd as inverse, + logging_agg_strict(v::text || CASE WHEN random() < 0 then '?' ELSE '' END) + over wnd as noinverse +FROM (VALUES + (1, 'a'), + (2, 'b'), + (3, 'c') +) AS t(i, v) +WINDOW wnd AS (ORDER BY i ROWS BETWEEN 1 PRECEDING AND CURRENT ROW) +ORDER BY i; + row | inverse | noinverse +-----+---------------+----------- + 1:a | a | a + 2:b | a+'b' | a*'b' + 3:c | a+'b'-'a'+'c' | b*'c' +(3 rows) + +SELECT + i::text || ':' || COALESCE(v::text, 'NULL') as row, + logging_agg_strict(v::text) filter(where true) + over wnd as inverse, + logging_agg_strict(v::text) filter(where random() >= 0) + over wnd as noinverse +FROM (VALUES + (1, 'a'), + (2, 'b'), + (3, 'c') +) AS t(i, v) +WINDOW wnd AS (ORDER BY i ROWS BETWEEN 1 PRECEDING AND CURRENT ROW) +ORDER BY i; + row | inverse | noinverse +-----+---------------+----------- + 1:a | a | a + 2:b | a+'b' | a*'b' + 3:c | a+'b'-'a'+'c' | b*'c' +(3 rows) + +-- test that non-overlapping windows don't use inverse transitions +SELECT + logging_agg_strict(v::text) OVER wnd +FROM (VALUES + (1, 'a'), + (2, 'b'), + (3, 'c') +) AS t(i, v) +WINDOW wnd AS (ORDER BY i ROWS BETWEEN CURRENT ROW AND CURRENT ROW) +ORDER BY i; + logging_agg_strict +-------------------- + a + b + c +(3 rows) + +-- test that returning NULL from the inverse transition functions +-- restarts the aggregation from scratch. The second aggregate is supposed +-- to test cases where only some aggregates restart, the third one checks +-- that one aggregate restarting doesn't cause others to restart. +CREATE FUNCTION sum_int_randrestart_minvfunc(int4, int4) RETURNS int4 AS +$$ SELECT CASE WHEN random() < 0.2 THEN NULL ELSE $1 - $2 END $$ +LANGUAGE SQL STRICT; +CREATE AGGREGATE sum_int_randomrestart (int4) +( + stype = int4, + sfunc = int4pl, + mstype = int4, + msfunc = int4pl, + minvfunc = sum_int_randrestart_minvfunc +); +WITH +vs AS ( + SELECT i, (random() * 100)::int4 AS v + FROM generate_series(1, 100) AS i +), +sum_following AS ( + SELECT i, SUM(v) OVER + (ORDER BY i DESC ROWS BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW) AS s + FROM vs +) +SELECT DISTINCT + sum_following.s = sum_int_randomrestart(v) OVER fwd AS eq1, + -sum_following.s = sum_int_randomrestart(-v) OVER fwd AS eq2, + 100*3+(vs.i-1)*3 = length(logging_agg_nonstrict(''::text) OVER fwd) AS eq3 +FROM vs +JOIN sum_following ON sum_following.i = vs.i +WINDOW fwd AS ( + ORDER BY vs.i ROWS BETWEEN CURRENT ROW AND UNBOUNDED FOLLOWING +); + eq1 | eq2 | eq3 +-----+-----+----- + t | t | t +(1 row) + +-- +-- Test various built-in aggregates that have moving-aggregate support +-- +-- test inverse transition functions handle NULLs properly +SELECT i,AVG(v::bigint) OVER (ORDER BY i ROWS BETWEEN CURRENT ROW AND UNBOUNDED FOLLOWING) + FROM (VALUES(1,1),(2,2),(3,NULL),(4,NULL)) t(i,v); + i | avg +---+-------------------- + 1 | 1.5000000000000000 + 2 | 2.0000000000000000 + 3 | + 4 | +(4 rows) + +SELECT i,AVG(v::int) OVER (ORDER BY i ROWS BETWEEN CURRENT ROW AND UNBOUNDED FOLLOWING) + FROM (VALUES(1,1),(2,2),(3,NULL),(4,NULL)) t(i,v); + i | avg +---+-------------------- + 1 | 1.5000000000000000 + 2 | 2.0000000000000000 + 3 | + 4 | +(4 rows) + +SELECT i,AVG(v::smallint) OVER (ORDER BY i ROWS BETWEEN CURRENT ROW AND UNBOUNDED FOLLOWING) + FROM (VALUES(1,1),(2,2),(3,NULL),(4,NULL)) t(i,v); + i | avg +---+-------------------- + 1 | 1.5000000000000000 + 2 | 2.0000000000000000 + 3 | + 4 | +(4 rows) + +SELECT i,AVG(v::numeric) OVER (ORDER BY i ROWS BETWEEN CURRENT ROW AND UNBOUNDED FOLLOWING) + FROM (VALUES(1,1.5),(2,2.5),(3,NULL),(4,NULL)) t(i,v); + i | avg +---+-------------------- + 1 | 2.0000000000000000 + 2 | 2.5000000000000000 + 3 | + 4 | +(4 rows) + +SELECT i,AVG(v::interval) OVER (ORDER BY i ROWS BETWEEN CURRENT ROW AND UNBOUNDED FOLLOWING) + FROM (VALUES(1,'1 sec'),(2,'2 sec'),(3,NULL),(4,NULL)) t(i,v); + i | avg +---+------------ + 1 | @ 1.5 secs + 2 | @ 2 secs + 3 | + 4 | +(4 rows) + +SELECT i,SUM(v::smallint) OVER (ORDER BY i ROWS BETWEEN CURRENT ROW AND UNBOUNDED FOLLOWING) + FROM (VALUES(1,1),(2,2),(3,NULL),(4,NULL)) t(i,v); + i | sum +---+----- + 1 | 3 + 2 | 2 + 3 | + 4 | +(4 rows) + +SELECT i,SUM(v::int) OVER (ORDER BY i ROWS BETWEEN CURRENT ROW AND UNBOUNDED FOLLOWING) + FROM (VALUES(1,1),(2,2),(3,NULL),(4,NULL)) t(i,v); + i | sum +---+----- + 1 | 3 + 2 | 2 + 3 | + 4 | +(4 rows) + +SELECT i,SUM(v::bigint) OVER (ORDER BY i ROWS BETWEEN CURRENT ROW AND UNBOUNDED FOLLOWING) + FROM (VALUES(1,1),(2,2),(3,NULL),(4,NULL)) t(i,v); + i | sum +---+----- + 1 | 3 + 2 | 2 + 3 | + 4 | +(4 rows) + +SELECT i,SUM(v::money) OVER (ORDER BY i ROWS BETWEEN CURRENT ROW AND UNBOUNDED FOLLOWING) + FROM (VALUES(1,'1.10'),(2,'2.20'),(3,NULL),(4,NULL)) t(i,v); + i | sum +---+------- + 1 | $3.30 + 2 | $2.20 + 3 | + 4 | +(4 rows) + +SELECT i,SUM(v::interval) OVER (ORDER BY i ROWS BETWEEN CURRENT ROW AND UNBOUNDED FOLLOWING) + FROM (VALUES(1,'1 sec'),(2,'2 sec'),(3,NULL),(4,NULL)) t(i,v); + i | sum +---+---------- + 1 | @ 3 secs + 2 | @ 2 secs + 3 | + 4 | +(4 rows) + +SELECT i,SUM(v::numeric) OVER (ORDER BY i ROWS BETWEEN CURRENT ROW AND UNBOUNDED FOLLOWING) + FROM (VALUES(1,1.1),(2,2.2),(3,NULL),(4,NULL)) t(i,v); + i | sum +---+----- + 1 | 3.3 + 2 | 2.2 + 3 | + 4 | +(4 rows) + +SELECT SUM(n::numeric) OVER (ORDER BY i ROWS BETWEEN CURRENT ROW AND UNBOUNDED FOLLOWING) + FROM (VALUES(1,1.01),(2,2),(3,3)) v(i,n); + sum +------ + 6.01 + 5 + 3 +(3 rows) + +SELECT i,COUNT(v) OVER (ORDER BY i ROWS BETWEEN CURRENT ROW AND UNBOUNDED FOLLOWING) + FROM (VALUES(1,1),(2,2),(3,NULL),(4,NULL)) t(i,v); + i | count +---+------- + 1 | 2 + 2 | 1 + 3 | 0 + 4 | 0 +(4 rows) + +SELECT i,COUNT(*) OVER (ORDER BY i ROWS BETWEEN CURRENT ROW AND UNBOUNDED FOLLOWING) + FROM (VALUES(1,1),(2,2),(3,NULL),(4,NULL)) t(i,v); + i | count +---+------- + 1 | 4 + 2 | 3 + 3 | 2 + 4 | 1 +(4 rows) + +SELECT VAR_POP(n::bigint) OVER (ORDER BY i ROWS BETWEEN CURRENT ROW AND UNBOUNDED FOLLOWING) + FROM (VALUES(1,600),(2,470),(3,170),(4,430),(5,300)) r(i,n); + var_pop +----------------------- + 21704.000000000000 + 13868.750000000000 + 11266.666666666667 + 4225.0000000000000000 + 0 +(5 rows) + +SELECT VAR_POP(n::int) OVER (ORDER BY i ROWS BETWEEN CURRENT ROW AND UNBOUNDED FOLLOWING) + FROM (VALUES(1,600),(2,470),(3,170),(4,430),(5,300)) r(i,n); + var_pop +----------------------- + 21704.000000000000 + 13868.750000000000 + 11266.666666666667 + 4225.0000000000000000 + 0 +(5 rows) + +SELECT VAR_POP(n::smallint) OVER (ORDER BY i ROWS BETWEEN CURRENT ROW AND UNBOUNDED FOLLOWING) + FROM (VALUES(1,600),(2,470),(3,170),(4,430),(5,300)) r(i,n); + var_pop +----------------------- + 21704.000000000000 + 13868.750000000000 + 11266.666666666667 + 4225.0000000000000000 + 0 +(5 rows) + +SELECT VAR_POP(n::numeric) OVER (ORDER BY i ROWS BETWEEN CURRENT ROW AND UNBOUNDED FOLLOWING) + FROM (VALUES(1,600),(2,470),(3,170),(4,430),(5,300)) r(i,n); + var_pop +----------------------- + 21704.000000000000 + 13868.750000000000 + 11266.666666666667 + 4225.0000000000000000 + 0 +(5 rows) + +SELECT VAR_SAMP(n::bigint) OVER (ORDER BY i ROWS BETWEEN CURRENT ROW AND UNBOUNDED FOLLOWING) + FROM (VALUES(1,600),(2,470),(3,170),(4,430),(5,300)) r(i,n); + var_samp +----------------------- + 27130.000000000000 + 18491.666666666667 + 16900.000000000000 + 8450.0000000000000000 + +(5 rows) + +SELECT VAR_SAMP(n::int) OVER (ORDER BY i ROWS BETWEEN CURRENT ROW AND UNBOUNDED FOLLOWING) + FROM (VALUES(1,600),(2,470),(3,170),(4,430),(5,300)) r(i,n); + var_samp +----------------------- + 27130.000000000000 + 18491.666666666667 + 16900.000000000000 + 8450.0000000000000000 + +(5 rows) + +SELECT VAR_SAMP(n::smallint) OVER (ORDER BY i ROWS BETWEEN CURRENT ROW AND UNBOUNDED FOLLOWING) + FROM (VALUES(1,600),(2,470),(3,170),(4,430),(5,300)) r(i,n); + var_samp +----------------------- + 27130.000000000000 + 18491.666666666667 + 16900.000000000000 + 8450.0000000000000000 + +(5 rows) + +SELECT VAR_SAMP(n::numeric) OVER (ORDER BY i ROWS BETWEEN CURRENT ROW AND UNBOUNDED FOLLOWING) + FROM (VALUES(1,600),(2,470),(3,170),(4,430),(5,300)) r(i,n); + var_samp +----------------------- + 27130.000000000000 + 18491.666666666667 + 16900.000000000000 + 8450.0000000000000000 + +(5 rows) + +SELECT VARIANCE(n::bigint) OVER (ORDER BY i ROWS BETWEEN CURRENT ROW AND UNBOUNDED FOLLOWING) + FROM (VALUES(1,600),(2,470),(3,170),(4,430),(5,300)) r(i,n); + variance +----------------------- + 27130.000000000000 + 18491.666666666667 + 16900.000000000000 + 8450.0000000000000000 + +(5 rows) + +SELECT VARIANCE(n::int) OVER (ORDER BY i ROWS BETWEEN CURRENT ROW AND UNBOUNDED FOLLOWING) + FROM (VALUES(1,600),(2,470),(3,170),(4,430),(5,300)) r(i,n); + variance +----------------------- + 27130.000000000000 + 18491.666666666667 + 16900.000000000000 + 8450.0000000000000000 + +(5 rows) + +SELECT VARIANCE(n::smallint) OVER (ORDER BY i ROWS BETWEEN CURRENT ROW AND UNBOUNDED FOLLOWING) + FROM (VALUES(1,600),(2,470),(3,170),(4,430),(5,300)) r(i,n); + variance +----------------------- + 27130.000000000000 + 18491.666666666667 + 16900.000000000000 + 8450.0000000000000000 + +(5 rows) + +SELECT VARIANCE(n::numeric) OVER (ORDER BY i ROWS BETWEEN CURRENT ROW AND UNBOUNDED FOLLOWING) + FROM (VALUES(1,600),(2,470),(3,170),(4,430),(5,300)) r(i,n); + variance +----------------------- + 27130.000000000000 + 18491.666666666667 + 16900.000000000000 + 8450.0000000000000000 + +(5 rows) + +SELECT STDDEV_POP(n::bigint) OVER (ORDER BY i ROWS BETWEEN CURRENT ROW AND UNBOUNDED FOLLOWING) + FROM (VALUES(1,NULL),(2,600),(3,470),(4,170),(5,430),(6,300)) r(i,n); + stddev_pop +--------------------- + 147.322774885623 + 147.322774885623 + 117.765657133139 + 106.144555520604 + 65.0000000000000000 + 0 +(6 rows) + +SELECT STDDEV_POP(n::int) OVER (ORDER BY i ROWS BETWEEN CURRENT ROW AND UNBOUNDED FOLLOWING) + FROM (VALUES(1,NULL),(2,600),(3,470),(4,170),(5,430),(6,300)) r(i,n); + stddev_pop +--------------------- + 147.322774885623 + 147.322774885623 + 117.765657133139 + 106.144555520604 + 65.0000000000000000 + 0 +(6 rows) + +SELECT STDDEV_POP(n::smallint) OVER (ORDER BY i ROWS BETWEEN CURRENT ROW AND UNBOUNDED FOLLOWING) + FROM (VALUES(1,NULL),(2,600),(3,470),(4,170),(5,430),(6,300)) r(i,n); + stddev_pop +--------------------- + 147.322774885623 + 147.322774885623 + 117.765657133139 + 106.144555520604 + 65.0000000000000000 + 0 +(6 rows) + +SELECT STDDEV_POP(n::numeric) OVER (ORDER BY i ROWS BETWEEN CURRENT ROW AND UNBOUNDED FOLLOWING) + FROM (VALUES(1,NULL),(2,600),(3,470),(4,170),(5,430),(6,300)) r(i,n); + stddev_pop +--------------------- + 147.322774885623 + 147.322774885623 + 117.765657133139 + 106.144555520604 + 65.0000000000000000 + 0 +(6 rows) + +SELECT STDDEV_SAMP(n::bigint) OVER (ORDER BY i ROWS BETWEEN CURRENT ROW AND UNBOUNDED FOLLOWING) + FROM (VALUES(1,NULL),(2,600),(3,470),(4,170),(5,430),(6,300)) r(i,n); + stddev_samp +--------------------- + 164.711869639076 + 164.711869639076 + 135.984067694222 + 130.000000000000 + 91.9238815542511782 + +(6 rows) + +SELECT STDDEV_SAMP(n::int) OVER (ORDER BY i ROWS BETWEEN CURRENT ROW AND UNBOUNDED FOLLOWING) + FROM (VALUES(1,NULL),(2,600),(3,470),(4,170),(5,430),(6,300)) r(i,n); + stddev_samp +--------------------- + 164.711869639076 + 164.711869639076 + 135.984067694222 + 130.000000000000 + 91.9238815542511782 + +(6 rows) + +SELECT STDDEV_SAMP(n::smallint) OVER (ORDER BY i ROWS BETWEEN CURRENT ROW AND UNBOUNDED FOLLOWING) + FROM (VALUES(1,NULL),(2,600),(3,470),(4,170),(5,430),(6,300)) r(i,n); + stddev_samp +--------------------- + 164.711869639076 + 164.711869639076 + 135.984067694222 + 130.000000000000 + 91.9238815542511782 + +(6 rows) + +SELECT STDDEV_SAMP(n::numeric) OVER (ORDER BY i ROWS BETWEEN CURRENT ROW AND UNBOUNDED FOLLOWING) + FROM (VALUES(1,NULL),(2,600),(3,470),(4,170),(5,430),(6,300)) r(i,n); + stddev_samp +--------------------- + 164.711869639076 + 164.711869639076 + 135.984067694222 + 130.000000000000 + 91.9238815542511782 + +(6 rows) + +SELECT STDDEV(n::bigint) OVER (ORDER BY i ROWS BETWEEN CURRENT ROW AND UNBOUNDED FOLLOWING) + FROM (VALUES(0,NULL),(1,600),(2,470),(3,170),(4,430),(5,300)) r(i,n); + stddev +--------------------- + 164.711869639076 + 164.711869639076 + 135.984067694222 + 130.000000000000 + 91.9238815542511782 + +(6 rows) + +SELECT STDDEV(n::int) OVER (ORDER BY i ROWS BETWEEN CURRENT ROW AND UNBOUNDED FOLLOWING) + FROM (VALUES(0,NULL),(1,600),(2,470),(3,170),(4,430),(5,300)) r(i,n); + stddev +--------------------- + 164.711869639076 + 164.711869639076 + 135.984067694222 + 130.000000000000 + 91.9238815542511782 + +(6 rows) + +SELECT STDDEV(n::smallint) OVER (ORDER BY i ROWS BETWEEN CURRENT ROW AND UNBOUNDED FOLLOWING) + FROM (VALUES(0,NULL),(1,600),(2,470),(3,170),(4,430),(5,300)) r(i,n); + stddev +--------------------- + 164.711869639076 + 164.711869639076 + 135.984067694222 + 130.000000000000 + 91.9238815542511782 + +(6 rows) + +SELECT STDDEV(n::numeric) OVER (ORDER BY i ROWS BETWEEN CURRENT ROW AND UNBOUNDED FOLLOWING) + FROM (VALUES(0,NULL),(1,600),(2,470),(3,170),(4,430),(5,300)) r(i,n); + stddev +--------------------- + 164.711869639076 + 164.711869639076 + 135.984067694222 + 130.000000000000 + 91.9238815542511782 + +(6 rows) + +-- test that inverse transition functions work with various frame options +SELECT i,SUM(v::int) OVER (ORDER BY i ROWS BETWEEN CURRENT ROW AND CURRENT ROW) + FROM (VALUES(1,1),(2,2),(3,NULL),(4,NULL)) t(i,v); + i | sum +---+----- + 1 | 1 + 2 | 2 + 3 | + 4 | +(4 rows) + +SELECT i,SUM(v::int) OVER (ORDER BY i ROWS BETWEEN CURRENT ROW AND 1 FOLLOWING) + FROM (VALUES(1,1),(2,2),(3,NULL),(4,NULL)) t(i,v); + i | sum +---+----- + 1 | 3 + 2 | 2 + 3 | + 4 | +(4 rows) + +SELECT i,SUM(v::int) OVER (ORDER BY i ROWS BETWEEN 1 PRECEDING AND 1 FOLLOWING) + FROM (VALUES(1,1),(2,2),(3,3),(4,4)) t(i,v); + i | sum +---+----- + 1 | 3 + 2 | 6 + 3 | 9 + 4 | 7 +(4 rows) + +-- ensure aggregate over numeric properly recovers from NaN values +SELECT a, b, + SUM(b) OVER(ORDER BY A ROWS BETWEEN 1 PRECEDING AND CURRENT ROW) +FROM (VALUES(1,1::numeric),(2,2),(3,'NaN'),(4,3),(5,4)) t(a,b); + a | b | sum +---+-----+----- + 1 | 1 | 1 + 2 | 2 | 3 + 3 | NaN | NaN + 4 | 3 | NaN + 5 | 4 | 7 +(5 rows) + +-- It might be tempting for someone to add an inverse trans function for +-- float and double precision. This should not be done as it can give incorrect +-- results. This test should fail if anyone ever does this without thinking too +-- hard about it. +SELECT to_char(SUM(n::float8) OVER (ORDER BY i ROWS BETWEEN CURRENT ROW AND 1 FOLLOWING),'999999999999999999999D9') + FROM (VALUES(1,1e20),(2,1)) n(i,n); + to_char +-------------------------- + 100000000000000000000 + 1.0 +(2 rows) + +SELECT i, b, bool_and(b) OVER w, bool_or(b) OVER w + FROM (VALUES (1,true), (2,true), (3,false), (4,false), (5,true)) v(i,b) + WINDOW w AS (ORDER BY i ROWS BETWEEN CURRENT ROW AND 1 FOLLOWING); + i | b | bool_and | bool_or +---+---+----------+--------- + 1 | t | t | t + 2 | t | f | t + 3 | f | f | f + 4 | f | f | t + 5 | t | t | t +(5 rows) + +-- Tests for problems with failure to walk or mutate expressions +-- within window frame clauses. +-- test walker (fails with collation error if expressions are not walked) +SELECT array_agg(i) OVER w + FROM generate_series(1,5) i +WINDOW w AS (ORDER BY i ROWS BETWEEN (('foo' < 'foobar')::integer) PRECEDING AND CURRENT ROW); + array_agg +----------- + {1} + {1,2} + {2,3} + {3,4} + {4,5} +(5 rows) + +-- test mutator (fails when inlined if expressions are not mutated) +CREATE FUNCTION pg_temp.f(group_size BIGINT) RETURNS SETOF integer[] +AS $$ + SELECT array_agg(s) OVER w + FROM generate_series(1,5) s + WINDOW w AS (ORDER BY s ROWS BETWEEN CURRENT ROW AND GROUP_SIZE FOLLOWING) +$$ LANGUAGE SQL STABLE; +EXPLAIN (costs off) SELECT * FROM pg_temp.f(2); + QUERY PLAN +------------------------------------------------------ + Subquery Scan on f + -> WindowAgg + -> Sort + Sort Key: s.s + -> Function Scan on generate_series s +(5 rows) + +SELECT * FROM pg_temp.f(2); + f +--------- + {1,2,3} + {2,3,4} + {3,4,5} + {4,5} + {5} +(5 rows) + diff --git a/src/test/regress/expected/with.out b/src/test/regress/expected/with.out new file mode 100644 index 0000000..a01efa5 --- /dev/null +++ b/src/test/regress/expected/with.out @@ -0,0 +1,3532 @@ +-- +-- Tests for common table expressions (WITH query, ... SELECT ...) +-- +-- Basic WITH +WITH q1(x,y) AS (SELECT 1,2) +SELECT * FROM q1, q1 AS q2; + x | y | x | y +---+---+---+--- + 1 | 2 | 1 | 2 +(1 row) + +-- Multiple uses are evaluated only once +SELECT count(*) FROM ( + WITH q1(x) AS (SELECT random() FROM generate_series(1, 5)) + SELECT * FROM q1 + UNION + SELECT * FROM q1 +) ss; + count +------- + 5 +(1 row) + +-- WITH RECURSIVE +-- sum of 1..100 +WITH RECURSIVE t(n) AS ( + VALUES (1) +UNION ALL + SELECT n+1 FROM t WHERE n < 100 +) +SELECT sum(n) FROM t; + sum +------ + 5050 +(1 row) + +WITH RECURSIVE t(n) AS ( + SELECT (VALUES(1)) +UNION ALL + SELECT n+1 FROM t WHERE n < 5 +) +SELECT * FROM t; + n +--- + 1 + 2 + 3 + 4 + 5 +(5 rows) + +-- UNION DISTINCT requires hashable type +WITH RECURSIVE t(n) AS ( + VALUES (1::money) +UNION + SELECT n+1::money FROM t WHERE n < 100::money +) +SELECT sum(n) FROM t; +ERROR: could not implement recursive UNION +DETAIL: All column datatypes must be hashable. +-- recursive view +CREATE RECURSIVE VIEW nums (n) AS + VALUES (1) +UNION ALL + SELECT n+1 FROM nums WHERE n < 5; +SELECT * FROM nums; + n +--- + 1 + 2 + 3 + 4 + 5 +(5 rows) + +CREATE OR REPLACE RECURSIVE VIEW nums (n) AS + VALUES (1) +UNION ALL + SELECT n+1 FROM nums WHERE n < 6; +SELECT * FROM nums; + n +--- + 1 + 2 + 3 + 4 + 5 + 6 +(6 rows) + +-- This is an infinite loop with UNION ALL, but not with UNION +WITH RECURSIVE t(n) AS ( + SELECT 1 +UNION + SELECT 10-n FROM t) +SELECT * FROM t; + n +--- + 1 + 9 +(2 rows) + +-- This'd be an infinite loop, but outside query reads only as much as needed +WITH RECURSIVE t(n) AS ( + VALUES (1) +UNION ALL + SELECT n+1 FROM t) +SELECT * FROM t LIMIT 10; + n +---- + 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 + 10 +(10 rows) + +-- UNION case should have same property +WITH RECURSIVE t(n) AS ( + SELECT 1 +UNION + SELECT n+1 FROM t) +SELECT * FROM t LIMIT 10; + n +---- + 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 + 10 +(10 rows) + +-- Test behavior with an unknown-type literal in the WITH +WITH q AS (SELECT 'foo' AS x) +SELECT x, pg_typeof(x) FROM q; + x | pg_typeof +-----+----------- + foo | text +(1 row) + +WITH RECURSIVE t(n) AS ( + SELECT 'foo' +UNION ALL + SELECT n || ' bar' FROM t WHERE length(n) < 20 +) +SELECT n, pg_typeof(n) FROM t; + n | pg_typeof +-------------------------+----------- + foo | text + foo bar | text + foo bar bar | text + foo bar bar bar | text + foo bar bar bar bar | text + foo bar bar bar bar bar | text +(6 rows) + +-- In a perfect world, this would work and resolve the literal as int ... +-- but for now, we have to be content with resolving to text too soon. +WITH RECURSIVE t(n) AS ( + SELECT '7' +UNION ALL + SELECT n+1 FROM t WHERE n < 10 +) +SELECT n, pg_typeof(n) FROM t; +ERROR: operator does not exist: text + integer +LINE 4: SELECT n+1 FROM t WHERE n < 10 + ^ +HINT: No operator matches the given name and argument types. You might need to add explicit type casts. +-- Deeply nested WITH caused a list-munging problem in v13 +-- Detection of cross-references and self-references +WITH RECURSIVE w1(c1) AS + (WITH w2(c2) AS + (WITH w3(c3) AS + (WITH w4(c4) AS + (WITH w5(c5) AS + (WITH RECURSIVE w6(c6) AS + (WITH w6(c6) AS + (WITH w8(c8) AS + (SELECT 1) + SELECT * FROM w8) + SELECT * FROM w6) + SELECT * FROM w6) + SELECT * FROM w5) + SELECT * FROM w4) + SELECT * FROM w3) + SELECT * FROM w2) +SELECT * FROM w1; + c1 +---- + 1 +(1 row) + +-- Detection of invalid self-references +WITH RECURSIVE outermost(x) AS ( + SELECT 1 + UNION (WITH innermost1 AS ( + SELECT 2 + UNION (WITH innermost2 AS ( + SELECT 3 + UNION (WITH innermost3 AS ( + SELECT 4 + UNION (WITH innermost4 AS ( + SELECT 5 + UNION (WITH innermost5 AS ( + SELECT 6 + UNION (WITH innermost6 AS + (SELECT 7) + SELECT * FROM innermost6)) + SELECT * FROM innermost5)) + SELECT * FROM innermost4)) + SELECT * FROM innermost3)) + SELECT * FROM innermost2)) + SELECT * FROM outermost + UNION SELECT * FROM innermost1) + ) + SELECT * FROM outermost ORDER BY 1; + x +--- + 1 + 2 + 3 + 4 + 5 + 6 + 7 +(7 rows) + +-- +-- Some examples with a tree +-- +-- department structure represented here is as follows: +-- +-- ROOT-+->A-+->B-+->C +-- | | +-- | +->D-+->F +-- +->E-+->G +CREATE TEMP TABLE department ( + id INTEGER PRIMARY KEY, -- department ID + parent_department INTEGER REFERENCES department, -- upper department ID + name TEXT -- department name +); +INSERT INTO department VALUES (0, NULL, 'ROOT'); +INSERT INTO department VALUES (1, 0, 'A'); +INSERT INTO department VALUES (2, 1, 'B'); +INSERT INTO department VALUES (3, 2, 'C'); +INSERT INTO department VALUES (4, 2, 'D'); +INSERT INTO department VALUES (5, 0, 'E'); +INSERT INTO department VALUES (6, 4, 'F'); +INSERT INTO department VALUES (7, 5, 'G'); +-- extract all departments under 'A'. Result should be A, B, C, D and F +WITH RECURSIVE subdepartment AS +( + -- non recursive term + SELECT name as root_name, * FROM department WHERE name = 'A' + UNION ALL + -- recursive term + SELECT sd.root_name, d.* FROM department AS d, subdepartment AS sd + WHERE d.parent_department = sd.id +) +SELECT * FROM subdepartment ORDER BY name; + root_name | id | parent_department | name +-----------+----+-------------------+------ + A | 1 | 0 | A + A | 2 | 1 | B + A | 3 | 2 | C + A | 4 | 2 | D + A | 6 | 4 | F +(5 rows) + +-- extract all departments under 'A' with "level" number +WITH RECURSIVE subdepartment(level, id, parent_department, name) AS +( + -- non recursive term + SELECT 1, * FROM department WHERE name = 'A' + UNION ALL + -- recursive term + SELECT sd.level + 1, d.* FROM department AS d, subdepartment AS sd + WHERE d.parent_department = sd.id +) +SELECT * FROM subdepartment ORDER BY name; + level | id | parent_department | name +-------+----+-------------------+------ + 1 | 1 | 0 | A + 2 | 2 | 1 | B + 3 | 3 | 2 | C + 3 | 4 | 2 | D + 4 | 6 | 4 | F +(5 rows) + +-- extract all departments under 'A' with "level" number. +-- Only shows level 2 or more +WITH RECURSIVE subdepartment(level, id, parent_department, name) AS +( + -- non recursive term + SELECT 1, * FROM department WHERE name = 'A' + UNION ALL + -- recursive term + SELECT sd.level + 1, d.* FROM department AS d, subdepartment AS sd + WHERE d.parent_department = sd.id +) +SELECT * FROM subdepartment WHERE level >= 2 ORDER BY name; + level | id | parent_department | name +-------+----+-------------------+------ + 2 | 2 | 1 | B + 3 | 3 | 2 | C + 3 | 4 | 2 | D + 4 | 6 | 4 | F +(4 rows) + +-- "RECURSIVE" is ignored if the query has no self-reference +WITH RECURSIVE subdepartment AS +( + -- note lack of recursive UNION structure + SELECT * FROM department WHERE name = 'A' +) +SELECT * FROM subdepartment ORDER BY name; + id | parent_department | name +----+-------------------+------ + 1 | 0 | A +(1 row) + +-- inside subqueries +SELECT count(*) FROM ( + WITH RECURSIVE t(n) AS ( + SELECT 1 UNION ALL SELECT n + 1 FROM t WHERE n < 500 + ) + SELECT * FROM t) AS t WHERE n < ( + SELECT count(*) FROM ( + WITH RECURSIVE t(n) AS ( + SELECT 1 UNION ALL SELECT n + 1 FROM t WHERE n < 100 + ) + SELECT * FROM t WHERE n < 50000 + ) AS t WHERE n < 100); + count +------- + 98 +(1 row) + +-- use same CTE twice at different subquery levels +WITH q1(x,y) AS ( + SELECT hundred, sum(ten) FROM tenk1 GROUP BY hundred + ) +SELECT count(*) FROM q1 WHERE y > (SELECT sum(y)/100 FROM q1 qsub); + count +------- + 50 +(1 row) + +-- via a VIEW +CREATE TEMPORARY VIEW vsubdepartment AS + WITH RECURSIVE subdepartment AS + ( + -- non recursive term + SELECT * FROM department WHERE name = 'A' + UNION ALL + -- recursive term + SELECT d.* FROM department AS d, subdepartment AS sd + WHERE d.parent_department = sd.id + ) + SELECT * FROM subdepartment; +SELECT * FROM vsubdepartment ORDER BY name; + id | parent_department | name +----+-------------------+------ + 1 | 0 | A + 2 | 1 | B + 3 | 2 | C + 4 | 2 | D + 6 | 4 | F +(5 rows) + +-- Check reverse listing +SELECT pg_get_viewdef('vsubdepartment'::regclass); + pg_get_viewdef +----------------------------------------------- + WITH RECURSIVE subdepartment AS ( + + SELECT department.id, + + department.parent_department, + + department.name + + FROM department + + WHERE (department.name = 'A'::text)+ + UNION ALL + + SELECT d.id, + + d.parent_department, + + d.name + + FROM department d, + + subdepartment sd + + WHERE (d.parent_department = sd.id)+ + ) + + SELECT id, + + parent_department, + + name + + FROM subdepartment; +(1 row) + +SELECT pg_get_viewdef('vsubdepartment'::regclass, true); + pg_get_viewdef +--------------------------------------------- + WITH RECURSIVE subdepartment AS ( + + SELECT department.id, + + department.parent_department, + + department.name + + FROM department + + WHERE department.name = 'A'::text+ + UNION ALL + + SELECT d.id, + + d.parent_department, + + d.name + + FROM department d, + + subdepartment sd + + WHERE d.parent_department = sd.id+ + ) + + SELECT id, + + parent_department, + + name + + FROM subdepartment; +(1 row) + +-- Another reverse-listing example +CREATE VIEW sums_1_100 AS +WITH RECURSIVE t(n) AS ( + VALUES (1) +UNION ALL + SELECT n+1 FROM t WHERE n < 100 +) +SELECT sum(n) FROM t; +\d+ sums_1_100 + View "public.sums_1_100" + Column | Type | Collation | Nullable | Default | Storage | Description +--------+--------+-----------+----------+---------+---------+------------- + sum | bigint | | | | plain | +View definition: + WITH RECURSIVE t(n) AS ( + VALUES (1) + UNION ALL + SELECT t_1.n + 1 + FROM t t_1 + WHERE t_1.n < 100 + ) + SELECT sum(n) AS sum + FROM t; + +-- corner case in which sub-WITH gets initialized first +with recursive q as ( + select * from department + union all + (with x as (select * from q) + select * from x) + ) +select * from q limit 24; + id | parent_department | name +----+-------------------+------ + 0 | | ROOT + 1 | 0 | A + 2 | 1 | B + 3 | 2 | C + 4 | 2 | D + 5 | 0 | E + 6 | 4 | F + 7 | 5 | G + 0 | | ROOT + 1 | 0 | A + 2 | 1 | B + 3 | 2 | C + 4 | 2 | D + 5 | 0 | E + 6 | 4 | F + 7 | 5 | G + 0 | | ROOT + 1 | 0 | A + 2 | 1 | B + 3 | 2 | C + 4 | 2 | D + 5 | 0 | E + 6 | 4 | F + 7 | 5 | G +(24 rows) + +with recursive q as ( + select * from department + union all + (with recursive x as ( + select * from department + union all + (select * from q union all select * from x) + ) + select * from x) + ) +select * from q limit 32; + id | parent_department | name +----+-------------------+------ + 0 | | ROOT + 1 | 0 | A + 2 | 1 | B + 3 | 2 | C + 4 | 2 | D + 5 | 0 | E + 6 | 4 | F + 7 | 5 | G + 0 | | ROOT + 1 | 0 | A + 2 | 1 | B + 3 | 2 | C + 4 | 2 | D + 5 | 0 | E + 6 | 4 | F + 7 | 5 | G + 0 | | ROOT + 1 | 0 | A + 2 | 1 | B + 3 | 2 | C + 4 | 2 | D + 5 | 0 | E + 6 | 4 | F + 7 | 5 | G + 0 | | ROOT + 1 | 0 | A + 2 | 1 | B + 3 | 2 | C + 4 | 2 | D + 5 | 0 | E + 6 | 4 | F + 7 | 5 | G +(32 rows) + +-- recursive term has sub-UNION +WITH RECURSIVE t(i,j) AS ( + VALUES (1,2) + UNION ALL + SELECT t2.i, t.j+1 FROM + (SELECT 2 AS i UNION ALL SELECT 3 AS i) AS t2 + JOIN t ON (t2.i = t.i+1)) + SELECT * FROM t; + i | j +---+--- + 1 | 2 + 2 | 3 + 3 | 4 +(3 rows) + +-- +-- different tree example +-- +CREATE TEMPORARY TABLE tree( + id INTEGER PRIMARY KEY, + parent_id INTEGER REFERENCES tree(id) +); +INSERT INTO tree +VALUES (1, NULL), (2, 1), (3,1), (4,2), (5,2), (6,2), (7,3), (8,3), + (9,4), (10,4), (11,7), (12,7), (13,7), (14, 9), (15,11), (16,11); +-- +-- get all paths from "second level" nodes to leaf nodes +-- +WITH RECURSIVE t(id, path) AS ( + VALUES(1,ARRAY[]::integer[]) +UNION ALL + SELECT tree.id, t.path || tree.id + FROM tree JOIN t ON (tree.parent_id = t.id) +) +SELECT t1.*, t2.* FROM t AS t1 JOIN t AS t2 ON + (t1.path[1] = t2.path[1] AND + array_upper(t1.path,1) = 1 AND + array_upper(t2.path,1) > 1) + ORDER BY t1.id, t2.id; + id | path | id | path +----+------+----+------------- + 2 | {2} | 4 | {2,4} + 2 | {2} | 5 | {2,5} + 2 | {2} | 6 | {2,6} + 2 | {2} | 9 | {2,4,9} + 2 | {2} | 10 | {2,4,10} + 2 | {2} | 14 | {2,4,9,14} + 3 | {3} | 7 | {3,7} + 3 | {3} | 8 | {3,8} + 3 | {3} | 11 | {3,7,11} + 3 | {3} | 12 | {3,7,12} + 3 | {3} | 13 | {3,7,13} + 3 | {3} | 15 | {3,7,11,15} + 3 | {3} | 16 | {3,7,11,16} +(13 rows) + +-- just count 'em +WITH RECURSIVE t(id, path) AS ( + VALUES(1,ARRAY[]::integer[]) +UNION ALL + SELECT tree.id, t.path || tree.id + FROM tree JOIN t ON (tree.parent_id = t.id) +) +SELECT t1.id, count(t2.*) FROM t AS t1 JOIN t AS t2 ON + (t1.path[1] = t2.path[1] AND + array_upper(t1.path,1) = 1 AND + array_upper(t2.path,1) > 1) + GROUP BY t1.id + ORDER BY t1.id; + id | count +----+------- + 2 | 6 + 3 | 7 +(2 rows) + +-- this variant tickled a whole-row-variable bug in 8.4devel +WITH RECURSIVE t(id, path) AS ( + VALUES(1,ARRAY[]::integer[]) +UNION ALL + SELECT tree.id, t.path || tree.id + FROM tree JOIN t ON (tree.parent_id = t.id) +) +SELECT t1.id, t2.path, t2 FROM t AS t1 JOIN t AS t2 ON +(t1.id=t2.id); + id | path | t2 +----+-------------+-------------------- + 1 | {} | (1,{}) + 2 | {2} | (2,{2}) + 3 | {3} | (3,{3}) + 4 | {2,4} | (4,"{2,4}") + 5 | {2,5} | (5,"{2,5}") + 6 | {2,6} | (6,"{2,6}") + 7 | {3,7} | (7,"{3,7}") + 8 | {3,8} | (8,"{3,8}") + 9 | {2,4,9} | (9,"{2,4,9}") + 10 | {2,4,10} | (10,"{2,4,10}") + 11 | {3,7,11} | (11,"{3,7,11}") + 12 | {3,7,12} | (12,"{3,7,12}") + 13 | {3,7,13} | (13,"{3,7,13}") + 14 | {2,4,9,14} | (14,"{2,4,9,14}") + 15 | {3,7,11,15} | (15,"{3,7,11,15}") + 16 | {3,7,11,16} | (16,"{3,7,11,16}") +(16 rows) + +-- SEARCH clause +create temp table graph0( f int, t int, label text ); +insert into graph0 values + (1, 2, 'arc 1 -> 2'), + (1, 3, 'arc 1 -> 3'), + (2, 3, 'arc 2 -> 3'), + (1, 4, 'arc 1 -> 4'), + (4, 5, 'arc 4 -> 5'); +explain (verbose, costs off) +with recursive search_graph(f, t, label) as ( + select * from graph0 g + union all + select g.* + from graph0 g, search_graph sg + where g.f = sg.t +) search depth first by f, t set seq +select * from search_graph order by seq; + QUERY PLAN +---------------------------------------------------------------------------------------------- + Sort + Output: search_graph.f, search_graph.t, search_graph.label, search_graph.seq + Sort Key: search_graph.seq + CTE search_graph + -> Recursive Union + -> Seq Scan on pg_temp.graph0 g + Output: g.f, g.t, g.label, ARRAY[ROW(g.f, g.t)] + -> Merge Join + Output: g_1.f, g_1.t, g_1.label, array_cat(sg.seq, ARRAY[ROW(g_1.f, g_1.t)]) + Merge Cond: (g_1.f = sg.t) + -> Sort + Output: g_1.f, g_1.t, g_1.label + Sort Key: g_1.f + -> Seq Scan on pg_temp.graph0 g_1 + Output: g_1.f, g_1.t, g_1.label + -> Sort + Output: sg.seq, sg.t + Sort Key: sg.t + -> WorkTable Scan on search_graph sg + Output: sg.seq, sg.t + -> CTE Scan on search_graph + Output: search_graph.f, search_graph.t, search_graph.label, search_graph.seq +(22 rows) + +with recursive search_graph(f, t, label) as ( + select * from graph0 g + union all + select g.* + from graph0 g, search_graph sg + where g.f = sg.t +) search depth first by f, t set seq +select * from search_graph order by seq; + f | t | label | seq +---+---+------------+------------------- + 1 | 2 | arc 1 -> 2 | {"(1,2)"} + 2 | 3 | arc 2 -> 3 | {"(1,2)","(2,3)"} + 1 | 3 | arc 1 -> 3 | {"(1,3)"} + 1 | 4 | arc 1 -> 4 | {"(1,4)"} + 4 | 5 | arc 4 -> 5 | {"(1,4)","(4,5)"} + 2 | 3 | arc 2 -> 3 | {"(2,3)"} + 4 | 5 | arc 4 -> 5 | {"(4,5)"} +(7 rows) + +with recursive search_graph(f, t, label) as ( + select * from graph0 g + union distinct + select g.* + from graph0 g, search_graph sg + where g.f = sg.t +) search depth first by f, t set seq +select * from search_graph order by seq; + f | t | label | seq +---+---+------------+------------------- + 1 | 2 | arc 1 -> 2 | {"(1,2)"} + 2 | 3 | arc 2 -> 3 | {"(1,2)","(2,3)"} + 1 | 3 | arc 1 -> 3 | {"(1,3)"} + 1 | 4 | arc 1 -> 4 | {"(1,4)"} + 4 | 5 | arc 4 -> 5 | {"(1,4)","(4,5)"} + 2 | 3 | arc 2 -> 3 | {"(2,3)"} + 4 | 5 | arc 4 -> 5 | {"(4,5)"} +(7 rows) + +explain (verbose, costs off) +with recursive search_graph(f, t, label) as ( + select * from graph0 g + union all + select g.* + from graph0 g, search_graph sg + where g.f = sg.t +) search breadth first by f, t set seq +select * from search_graph order by seq; + QUERY PLAN +------------------------------------------------------------------------------------------------- + Sort + Output: search_graph.f, search_graph.t, search_graph.label, search_graph.seq + Sort Key: search_graph.seq + CTE search_graph + -> Recursive Union + -> Seq Scan on pg_temp.graph0 g + Output: g.f, g.t, g.label, ROW('0'::bigint, g.f, g.t) + -> Merge Join + Output: g_1.f, g_1.t, g_1.label, ROW(int8inc((sg.seq)."*DEPTH*"), g_1.f, g_1.t) + Merge Cond: (g_1.f = sg.t) + -> Sort + Output: g_1.f, g_1.t, g_1.label + Sort Key: g_1.f + -> Seq Scan on pg_temp.graph0 g_1 + Output: g_1.f, g_1.t, g_1.label + -> Sort + Output: sg.seq, sg.t + Sort Key: sg.t + -> WorkTable Scan on search_graph sg + Output: sg.seq, sg.t + -> CTE Scan on search_graph + Output: search_graph.f, search_graph.t, search_graph.label, search_graph.seq +(22 rows) + +with recursive search_graph(f, t, label) as ( + select * from graph0 g + union all + select g.* + from graph0 g, search_graph sg + where g.f = sg.t +) search breadth first by f, t set seq +select * from search_graph order by seq; + f | t | label | seq +---+---+------------+--------- + 1 | 2 | arc 1 -> 2 | (0,1,2) + 1 | 3 | arc 1 -> 3 | (0,1,3) + 1 | 4 | arc 1 -> 4 | (0,1,4) + 2 | 3 | arc 2 -> 3 | (0,2,3) + 4 | 5 | arc 4 -> 5 | (0,4,5) + 2 | 3 | arc 2 -> 3 | (1,2,3) + 4 | 5 | arc 4 -> 5 | (1,4,5) +(7 rows) + +with recursive search_graph(f, t, label) as ( + select * from graph0 g + union distinct + select g.* + from graph0 g, search_graph sg + where g.f = sg.t +) search breadth first by f, t set seq +select * from search_graph order by seq; + f | t | label | seq +---+---+------------+--------- + 1 | 2 | arc 1 -> 2 | (0,1,2) + 1 | 3 | arc 1 -> 3 | (0,1,3) + 1 | 4 | arc 1 -> 4 | (0,1,4) + 2 | 3 | arc 2 -> 3 | (0,2,3) + 4 | 5 | arc 4 -> 5 | (0,4,5) + 2 | 3 | arc 2 -> 3 | (1,2,3) + 4 | 5 | arc 4 -> 5 | (1,4,5) +(7 rows) + +-- a constant initial value causes issues for EXPLAIN +explain (verbose, costs off) +with recursive test as ( + select 1 as x + union all + select x + 1 + from test +) search depth first by x set y +select * from test limit 5; + QUERY PLAN +----------------------------------------------------------------------------------------- + Limit + Output: test.x, test.y + CTE test + -> Recursive Union + -> Result + Output: 1, '{(1)}'::record[] + -> WorkTable Scan on test test_1 + Output: (test_1.x + 1), array_cat(test_1.y, ARRAY[ROW((test_1.x + 1))]) + -> CTE Scan on test + Output: test.x, test.y +(10 rows) + +with recursive test as ( + select 1 as x + union all + select x + 1 + from test +) search depth first by x set y +select * from test limit 5; + x | y +---+----------------------- + 1 | {(1)} + 2 | {(1),(2)} + 3 | {(1),(2),(3)} + 4 | {(1),(2),(3),(4)} + 5 | {(1),(2),(3),(4),(5)} +(5 rows) + +explain (verbose, costs off) +with recursive test as ( + select 1 as x + union all + select x + 1 + from test +) search breadth first by x set y +select * from test limit 5; + QUERY PLAN +-------------------------------------------------------------------------------------------- + Limit + Output: test.x, test.y + CTE test + -> Recursive Union + -> Result + Output: 1, '(0,1)'::record + -> WorkTable Scan on test test_1 + Output: (test_1.x + 1), ROW(int8inc((test_1.y)."*DEPTH*"), (test_1.x + 1)) + -> CTE Scan on test + Output: test.x, test.y +(10 rows) + +with recursive test as ( + select 1 as x + union all + select x + 1 + from test +) search breadth first by x set y +select * from test limit 5; + x | y +---+------- + 1 | (0,1) + 2 | (1,2) + 3 | (2,3) + 4 | (3,4) + 5 | (4,5) +(5 rows) + +-- various syntax errors +with recursive search_graph(f, t, label) as ( + select * from graph0 g + union all + select g.* + from graph0 g, search_graph sg + where g.f = sg.t +) search depth first by foo, tar set seq +select * from search_graph; +ERROR: search column "foo" not in WITH query column list +LINE 7: ) search depth first by foo, tar set seq + ^ +with recursive search_graph(f, t, label) as ( + select * from graph0 g + union all + select g.* + from graph0 g, search_graph sg + where g.f = sg.t +) search depth first by f, t set label +select * from search_graph; +ERROR: search sequence column name "label" already used in WITH query column list +LINE 7: ) search depth first by f, t set label + ^ +with recursive search_graph(f, t, label) as ( + select * from graph0 g + union all + select g.* + from graph0 g, search_graph sg + where g.f = sg.t +) search depth first by f, t, f set seq +select * from search_graph; +ERROR: search column "f" specified more than once +LINE 7: ) search depth first by f, t, f set seq + ^ +with recursive search_graph(f, t, label) as ( + select * from graph0 g + union all + select * from graph0 g + union all + select g.* + from graph0 g, search_graph sg + where g.f = sg.t +) search depth first by f, t set seq +select * from search_graph order by seq; +ERROR: with a SEARCH or CYCLE clause, the left side of the UNION must be a SELECT +with recursive search_graph(f, t, label) as ( + select * from graph0 g + union all + (select * from graph0 g + union all + select g.* + from graph0 g, search_graph sg + where g.f = sg.t) +) search depth first by f, t set seq +select * from search_graph order by seq; +ERROR: with a SEARCH or CYCLE clause, the right side of the UNION must be a SELECT +-- check that we distinguish same CTE name used at different levels +-- (this case could be supported, perhaps, but it isn't today) +with recursive x(col) as ( + select 1 + union + (with x as (select * from x) + select * from x) +) search depth first by col set seq +select * from x; +ERROR: with a SEARCH or CYCLE clause, the recursive reference to WITH query "x" must be at the top level of its right-hand SELECT +-- test ruleutils and view expansion +create temp view v_search as +with recursive search_graph(f, t, label) as ( + select * from graph0 g + union all + select g.* + from graph0 g, search_graph sg + where g.f = sg.t +) search depth first by f, t set seq +select f, t, label from search_graph; +select pg_get_viewdef('v_search'); + pg_get_viewdef +------------------------------------------------ + WITH RECURSIVE search_graph(f, t, label) AS (+ + SELECT g.f, + + g.t, + + g.label + + FROM graph0 g + + UNION ALL + + SELECT g.f, + + g.t, + + g.label + + FROM graph0 g, + + search_graph sg + + WHERE (g.f = sg.t) + + ) SEARCH DEPTH FIRST BY f, t SET seq + + SELECT f, + + t, + + label + + FROM search_graph; +(1 row) + +select * from v_search; + f | t | label +---+---+------------ + 1 | 2 | arc 1 -> 2 + 1 | 3 | arc 1 -> 3 + 2 | 3 | arc 2 -> 3 + 1 | 4 | arc 1 -> 4 + 4 | 5 | arc 4 -> 5 + 2 | 3 | arc 2 -> 3 + 4 | 5 | arc 4 -> 5 +(7 rows) + +-- +-- test cycle detection +-- +create temp table graph( f int, t int, label text ); +insert into graph values + (1, 2, 'arc 1 -> 2'), + (1, 3, 'arc 1 -> 3'), + (2, 3, 'arc 2 -> 3'), + (1, 4, 'arc 1 -> 4'), + (4, 5, 'arc 4 -> 5'), + (5, 1, 'arc 5 -> 1'); +with recursive search_graph(f, t, label, is_cycle, path) as ( + select *, false, array[row(g.f, g.t)] from graph g + union all + select g.*, row(g.f, g.t) = any(path), path || row(g.f, g.t) + from graph g, search_graph sg + where g.f = sg.t and not is_cycle +) +select * from search_graph; + f | t | label | is_cycle | path +---+---+------------+----------+------------------------------------------- + 1 | 2 | arc 1 -> 2 | f | {"(1,2)"} + 1 | 3 | arc 1 -> 3 | f | {"(1,3)"} + 2 | 3 | arc 2 -> 3 | f | {"(2,3)"} + 1 | 4 | arc 1 -> 4 | f | {"(1,4)"} + 4 | 5 | arc 4 -> 5 | f | {"(4,5)"} + 5 | 1 | arc 5 -> 1 | f | {"(5,1)"} + 1 | 2 | arc 1 -> 2 | f | {"(5,1)","(1,2)"} + 1 | 3 | arc 1 -> 3 | f | {"(5,1)","(1,3)"} + 1 | 4 | arc 1 -> 4 | f | {"(5,1)","(1,4)"} + 2 | 3 | arc 2 -> 3 | f | {"(1,2)","(2,3)"} + 4 | 5 | arc 4 -> 5 | f | {"(1,4)","(4,5)"} + 5 | 1 | arc 5 -> 1 | f | {"(4,5)","(5,1)"} + 1 | 2 | arc 1 -> 2 | f | {"(4,5)","(5,1)","(1,2)"} + 1 | 3 | arc 1 -> 3 | f | {"(4,5)","(5,1)","(1,3)"} + 1 | 4 | arc 1 -> 4 | f | {"(4,5)","(5,1)","(1,4)"} + 2 | 3 | arc 2 -> 3 | f | {"(5,1)","(1,2)","(2,3)"} + 4 | 5 | arc 4 -> 5 | f | {"(5,1)","(1,4)","(4,5)"} + 5 | 1 | arc 5 -> 1 | f | {"(1,4)","(4,5)","(5,1)"} + 1 | 2 | arc 1 -> 2 | f | {"(1,4)","(4,5)","(5,1)","(1,2)"} + 1 | 3 | arc 1 -> 3 | f | {"(1,4)","(4,5)","(5,1)","(1,3)"} + 1 | 4 | arc 1 -> 4 | t | {"(1,4)","(4,5)","(5,1)","(1,4)"} + 2 | 3 | arc 2 -> 3 | f | {"(4,5)","(5,1)","(1,2)","(2,3)"} + 4 | 5 | arc 4 -> 5 | t | {"(4,5)","(5,1)","(1,4)","(4,5)"} + 5 | 1 | arc 5 -> 1 | t | {"(5,1)","(1,4)","(4,5)","(5,1)"} + 2 | 3 | arc 2 -> 3 | f | {"(1,4)","(4,5)","(5,1)","(1,2)","(2,3)"} +(25 rows) + +-- UNION DISTINCT exercises row type hashing support +with recursive search_graph(f, t, label, is_cycle, path) as ( + select *, false, array[row(g.f, g.t)] from graph g + union distinct + select g.*, row(g.f, g.t) = any(path), path || row(g.f, g.t) + from graph g, search_graph sg + where g.f = sg.t and not is_cycle +) +select * from search_graph; + f | t | label | is_cycle | path +---+---+------------+----------+------------------------------------------- + 1 | 2 | arc 1 -> 2 | f | {"(1,2)"} + 1 | 3 | arc 1 -> 3 | f | {"(1,3)"} + 2 | 3 | arc 2 -> 3 | f | {"(2,3)"} + 1 | 4 | arc 1 -> 4 | f | {"(1,4)"} + 4 | 5 | arc 4 -> 5 | f | {"(4,5)"} + 5 | 1 | arc 5 -> 1 | f | {"(5,1)"} + 1 | 2 | arc 1 -> 2 | f | {"(5,1)","(1,2)"} + 1 | 3 | arc 1 -> 3 | f | {"(5,1)","(1,3)"} + 1 | 4 | arc 1 -> 4 | f | {"(5,1)","(1,4)"} + 2 | 3 | arc 2 -> 3 | f | {"(1,2)","(2,3)"} + 4 | 5 | arc 4 -> 5 | f | {"(1,4)","(4,5)"} + 5 | 1 | arc 5 -> 1 | f | {"(4,5)","(5,1)"} + 1 | 2 | arc 1 -> 2 | f | {"(4,5)","(5,1)","(1,2)"} + 1 | 3 | arc 1 -> 3 | f | {"(4,5)","(5,1)","(1,3)"} + 1 | 4 | arc 1 -> 4 | f | {"(4,5)","(5,1)","(1,4)"} + 2 | 3 | arc 2 -> 3 | f | {"(5,1)","(1,2)","(2,3)"} + 4 | 5 | arc 4 -> 5 | f | {"(5,1)","(1,4)","(4,5)"} + 5 | 1 | arc 5 -> 1 | f | {"(1,4)","(4,5)","(5,1)"} + 1 | 2 | arc 1 -> 2 | f | {"(1,4)","(4,5)","(5,1)","(1,2)"} + 1 | 3 | arc 1 -> 3 | f | {"(1,4)","(4,5)","(5,1)","(1,3)"} + 1 | 4 | arc 1 -> 4 | t | {"(1,4)","(4,5)","(5,1)","(1,4)"} + 2 | 3 | arc 2 -> 3 | f | {"(4,5)","(5,1)","(1,2)","(2,3)"} + 4 | 5 | arc 4 -> 5 | t | {"(4,5)","(5,1)","(1,4)","(4,5)"} + 5 | 1 | arc 5 -> 1 | t | {"(5,1)","(1,4)","(4,5)","(5,1)"} + 2 | 3 | arc 2 -> 3 | f | {"(1,4)","(4,5)","(5,1)","(1,2)","(2,3)"} +(25 rows) + +-- ordering by the path column has same effect as SEARCH DEPTH FIRST +with recursive search_graph(f, t, label, is_cycle, path) as ( + select *, false, array[row(g.f, g.t)] from graph g + union all + select g.*, row(g.f, g.t) = any(path), path || row(g.f, g.t) + from graph g, search_graph sg + where g.f = sg.t and not is_cycle +) +select * from search_graph order by path; + f | t | label | is_cycle | path +---+---+------------+----------+------------------------------------------- + 1 | 2 | arc 1 -> 2 | f | {"(1,2)"} + 2 | 3 | arc 2 -> 3 | f | {"(1,2)","(2,3)"} + 1 | 3 | arc 1 -> 3 | f | {"(1,3)"} + 1 | 4 | arc 1 -> 4 | f | {"(1,4)"} + 4 | 5 | arc 4 -> 5 | f | {"(1,4)","(4,5)"} + 5 | 1 | arc 5 -> 1 | f | {"(1,4)","(4,5)","(5,1)"} + 1 | 2 | arc 1 -> 2 | f | {"(1,4)","(4,5)","(5,1)","(1,2)"} + 2 | 3 | arc 2 -> 3 | f | {"(1,4)","(4,5)","(5,1)","(1,2)","(2,3)"} + 1 | 3 | arc 1 -> 3 | f | {"(1,4)","(4,5)","(5,1)","(1,3)"} + 1 | 4 | arc 1 -> 4 | t | {"(1,4)","(4,5)","(5,1)","(1,4)"} + 2 | 3 | arc 2 -> 3 | f | {"(2,3)"} + 4 | 5 | arc 4 -> 5 | f | {"(4,5)"} + 5 | 1 | arc 5 -> 1 | f | {"(4,5)","(5,1)"} + 1 | 2 | arc 1 -> 2 | f | {"(4,5)","(5,1)","(1,2)"} + 2 | 3 | arc 2 -> 3 | f | {"(4,5)","(5,1)","(1,2)","(2,3)"} + 1 | 3 | arc 1 -> 3 | f | {"(4,5)","(5,1)","(1,3)"} + 1 | 4 | arc 1 -> 4 | f | {"(4,5)","(5,1)","(1,4)"} + 4 | 5 | arc 4 -> 5 | t | {"(4,5)","(5,1)","(1,4)","(4,5)"} + 5 | 1 | arc 5 -> 1 | f | {"(5,1)"} + 1 | 2 | arc 1 -> 2 | f | {"(5,1)","(1,2)"} + 2 | 3 | arc 2 -> 3 | f | {"(5,1)","(1,2)","(2,3)"} + 1 | 3 | arc 1 -> 3 | f | {"(5,1)","(1,3)"} + 1 | 4 | arc 1 -> 4 | f | {"(5,1)","(1,4)"} + 4 | 5 | arc 4 -> 5 | f | {"(5,1)","(1,4)","(4,5)"} + 5 | 1 | arc 5 -> 1 | t | {"(5,1)","(1,4)","(4,5)","(5,1)"} +(25 rows) + +-- CYCLE clause +explain (verbose, costs off) +with recursive search_graph(f, t, label) as ( + select * from graph g + union all + select g.* + from graph g, search_graph sg + where g.f = sg.t +) cycle f, t set is_cycle using path +select * from search_graph; + QUERY PLAN +----------------------------------------------------------------------------------------------------------------------------------------------------------------------- + CTE Scan on search_graph + Output: search_graph.f, search_graph.t, search_graph.label, search_graph.is_cycle, search_graph.path + CTE search_graph + -> Recursive Union + -> Seq Scan on pg_temp.graph g + Output: g.f, g.t, g.label, false, ARRAY[ROW(g.f, g.t)] + -> Merge Join + Output: g_1.f, g_1.t, g_1.label, CASE WHEN (ROW(g_1.f, g_1.t) = ANY (sg.path)) THEN true ELSE false END, array_cat(sg.path, ARRAY[ROW(g_1.f, g_1.t)]) + Merge Cond: (g_1.f = sg.t) + -> Sort + Output: g_1.f, g_1.t, g_1.label + Sort Key: g_1.f + -> Seq Scan on pg_temp.graph g_1 + Output: g_1.f, g_1.t, g_1.label + -> Sort + Output: sg.path, sg.t + Sort Key: sg.t + -> WorkTable Scan on search_graph sg + Output: sg.path, sg.t + Filter: (NOT sg.is_cycle) +(20 rows) + +with recursive search_graph(f, t, label) as ( + select * from graph g + union all + select g.* + from graph g, search_graph sg + where g.f = sg.t +) cycle f, t set is_cycle using path +select * from search_graph; + f | t | label | is_cycle | path +---+---+------------+----------+------------------------------------------- + 1 | 2 | arc 1 -> 2 | f | {"(1,2)"} + 1 | 3 | arc 1 -> 3 | f | {"(1,3)"} + 2 | 3 | arc 2 -> 3 | f | {"(2,3)"} + 1 | 4 | arc 1 -> 4 | f | {"(1,4)"} + 4 | 5 | arc 4 -> 5 | f | {"(4,5)"} + 5 | 1 | arc 5 -> 1 | f | {"(5,1)"} + 1 | 2 | arc 1 -> 2 | f | {"(5,1)","(1,2)"} + 1 | 3 | arc 1 -> 3 | f | {"(5,1)","(1,3)"} + 1 | 4 | arc 1 -> 4 | f | {"(5,1)","(1,4)"} + 2 | 3 | arc 2 -> 3 | f | {"(1,2)","(2,3)"} + 4 | 5 | arc 4 -> 5 | f | {"(1,4)","(4,5)"} + 5 | 1 | arc 5 -> 1 | f | {"(4,5)","(5,1)"} + 1 | 2 | arc 1 -> 2 | f | {"(4,5)","(5,1)","(1,2)"} + 1 | 3 | arc 1 -> 3 | f | {"(4,5)","(5,1)","(1,3)"} + 1 | 4 | arc 1 -> 4 | f | {"(4,5)","(5,1)","(1,4)"} + 2 | 3 | arc 2 -> 3 | f | {"(5,1)","(1,2)","(2,3)"} + 4 | 5 | arc 4 -> 5 | f | {"(5,1)","(1,4)","(4,5)"} + 5 | 1 | arc 5 -> 1 | f | {"(1,4)","(4,5)","(5,1)"} + 1 | 2 | arc 1 -> 2 | f | {"(1,4)","(4,5)","(5,1)","(1,2)"} + 1 | 3 | arc 1 -> 3 | f | {"(1,4)","(4,5)","(5,1)","(1,3)"} + 1 | 4 | arc 1 -> 4 | t | {"(1,4)","(4,5)","(5,1)","(1,4)"} + 2 | 3 | arc 2 -> 3 | f | {"(4,5)","(5,1)","(1,2)","(2,3)"} + 4 | 5 | arc 4 -> 5 | t | {"(4,5)","(5,1)","(1,4)","(4,5)"} + 5 | 1 | arc 5 -> 1 | t | {"(5,1)","(1,4)","(4,5)","(5,1)"} + 2 | 3 | arc 2 -> 3 | f | {"(1,4)","(4,5)","(5,1)","(1,2)","(2,3)"} +(25 rows) + +with recursive search_graph(f, t, label) as ( + select * from graph g + union distinct + select g.* + from graph g, search_graph sg + where g.f = sg.t +) cycle f, t set is_cycle to 'Y' default 'N' using path +select * from search_graph; + f | t | label | is_cycle | path +---+---+------------+----------+------------------------------------------- + 1 | 2 | arc 1 -> 2 | N | {"(1,2)"} + 1 | 3 | arc 1 -> 3 | N | {"(1,3)"} + 2 | 3 | arc 2 -> 3 | N | {"(2,3)"} + 1 | 4 | arc 1 -> 4 | N | {"(1,4)"} + 4 | 5 | arc 4 -> 5 | N | {"(4,5)"} + 5 | 1 | arc 5 -> 1 | N | {"(5,1)"} + 1 | 2 | arc 1 -> 2 | N | {"(5,1)","(1,2)"} + 1 | 3 | arc 1 -> 3 | N | {"(5,1)","(1,3)"} + 1 | 4 | arc 1 -> 4 | N | {"(5,1)","(1,4)"} + 2 | 3 | arc 2 -> 3 | N | {"(1,2)","(2,3)"} + 4 | 5 | arc 4 -> 5 | N | {"(1,4)","(4,5)"} + 5 | 1 | arc 5 -> 1 | N | {"(4,5)","(5,1)"} + 1 | 2 | arc 1 -> 2 | N | {"(4,5)","(5,1)","(1,2)"} + 1 | 3 | arc 1 -> 3 | N | {"(4,5)","(5,1)","(1,3)"} + 1 | 4 | arc 1 -> 4 | N | {"(4,5)","(5,1)","(1,4)"} + 2 | 3 | arc 2 -> 3 | N | {"(5,1)","(1,2)","(2,3)"} + 4 | 5 | arc 4 -> 5 | N | {"(5,1)","(1,4)","(4,5)"} + 5 | 1 | arc 5 -> 1 | N | {"(1,4)","(4,5)","(5,1)"} + 1 | 2 | arc 1 -> 2 | N | {"(1,4)","(4,5)","(5,1)","(1,2)"} + 1 | 3 | arc 1 -> 3 | N | {"(1,4)","(4,5)","(5,1)","(1,3)"} + 1 | 4 | arc 1 -> 4 | Y | {"(1,4)","(4,5)","(5,1)","(1,4)"} + 2 | 3 | arc 2 -> 3 | N | {"(4,5)","(5,1)","(1,2)","(2,3)"} + 4 | 5 | arc 4 -> 5 | Y | {"(4,5)","(5,1)","(1,4)","(4,5)"} + 5 | 1 | arc 5 -> 1 | Y | {"(5,1)","(1,4)","(4,5)","(5,1)"} + 2 | 3 | arc 2 -> 3 | N | {"(1,4)","(4,5)","(5,1)","(1,2)","(2,3)"} +(25 rows) + +explain (verbose, costs off) +with recursive test as ( + select 0 as x + union all + select (x + 1) % 10 + from test +) cycle x set is_cycle using path +select * from test; + QUERY PLAN +----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + CTE Scan on test + Output: test.x, test.is_cycle, test.path + CTE test + -> Recursive Union + -> Result + Output: 0, false, '{(0)}'::record[] + -> WorkTable Scan on test test_1 + Output: ((test_1.x + 1) % 10), CASE WHEN (ROW(((test_1.x + 1) % 10)) = ANY (test_1.path)) THEN true ELSE false END, array_cat(test_1.path, ARRAY[ROW(((test_1.x + 1) % 10))]) + Filter: (NOT test_1.is_cycle) +(9 rows) + +with recursive test as ( + select 0 as x + union all + select (x + 1) % 10 + from test +) cycle x set is_cycle using path +select * from test; + x | is_cycle | path +---+----------+----------------------------------------------- + 0 | f | {(0)} + 1 | f | {(0),(1)} + 2 | f | {(0),(1),(2)} + 3 | f | {(0),(1),(2),(3)} + 4 | f | {(0),(1),(2),(3),(4)} + 5 | f | {(0),(1),(2),(3),(4),(5)} + 6 | f | {(0),(1),(2),(3),(4),(5),(6)} + 7 | f | {(0),(1),(2),(3),(4),(5),(6),(7)} + 8 | f | {(0),(1),(2),(3),(4),(5),(6),(7),(8)} + 9 | f | {(0),(1),(2),(3),(4),(5),(6),(7),(8),(9)} + 0 | t | {(0),(1),(2),(3),(4),(5),(6),(7),(8),(9),(0)} +(11 rows) + +with recursive test as ( + select 0 as x + union all + select (x + 1) % 10 + from test + where not is_cycle -- redundant, but legal +) cycle x set is_cycle using path +select * from test; + x | is_cycle | path +---+----------+----------------------------------------------- + 0 | f | {(0)} + 1 | f | {(0),(1)} + 2 | f | {(0),(1),(2)} + 3 | f | {(0),(1),(2),(3)} + 4 | f | {(0),(1),(2),(3),(4)} + 5 | f | {(0),(1),(2),(3),(4),(5)} + 6 | f | {(0),(1),(2),(3),(4),(5),(6)} + 7 | f | {(0),(1),(2),(3),(4),(5),(6),(7)} + 8 | f | {(0),(1),(2),(3),(4),(5),(6),(7),(8)} + 9 | f | {(0),(1),(2),(3),(4),(5),(6),(7),(8),(9)} + 0 | t | {(0),(1),(2),(3),(4),(5),(6),(7),(8),(9),(0)} +(11 rows) + +-- multiple CTEs +with recursive +graph(f, t, label) as ( + values (1, 2, 'arc 1 -> 2'), + (1, 3, 'arc 1 -> 3'), + (2, 3, 'arc 2 -> 3'), + (1, 4, 'arc 1 -> 4'), + (4, 5, 'arc 4 -> 5'), + (5, 1, 'arc 5 -> 1') +), +search_graph(f, t, label) as ( + select * from graph g + union all + select g.* + from graph g, search_graph sg + where g.f = sg.t +) cycle f, t set is_cycle to true default false using path +select f, t, label from search_graph; + f | t | label +---+---+------------ + 1 | 2 | arc 1 -> 2 + 1 | 3 | arc 1 -> 3 + 2 | 3 | arc 2 -> 3 + 1 | 4 | arc 1 -> 4 + 4 | 5 | arc 4 -> 5 + 5 | 1 | arc 5 -> 1 + 2 | 3 | arc 2 -> 3 + 4 | 5 | arc 4 -> 5 + 5 | 1 | arc 5 -> 1 + 1 | 4 | arc 1 -> 4 + 1 | 3 | arc 1 -> 3 + 1 | 2 | arc 1 -> 2 + 5 | 1 | arc 5 -> 1 + 1 | 4 | arc 1 -> 4 + 1 | 3 | arc 1 -> 3 + 1 | 2 | arc 1 -> 2 + 4 | 5 | arc 4 -> 5 + 2 | 3 | arc 2 -> 3 + 1 | 4 | arc 1 -> 4 + 1 | 3 | arc 1 -> 3 + 1 | 2 | arc 1 -> 2 + 4 | 5 | arc 4 -> 5 + 2 | 3 | arc 2 -> 3 + 5 | 1 | arc 5 -> 1 + 2 | 3 | arc 2 -> 3 +(25 rows) + +-- star expansion +with recursive a as ( + select 1 as b + union all + select * from a +) cycle b set c using p +select * from a; + b | c | p +---+---+----------- + 1 | f | {(1)} + 1 | t | {(1),(1)} +(2 rows) + +-- search+cycle +with recursive search_graph(f, t, label) as ( + select * from graph g + union all + select g.* + from graph g, search_graph sg + where g.f = sg.t +) search depth first by f, t set seq + cycle f, t set is_cycle using path +select * from search_graph; + f | t | label | seq | is_cycle | path +---+---+------------+-------------------------------------------+----------+------------------------------------------- + 1 | 2 | arc 1 -> 2 | {"(1,2)"} | f | {"(1,2)"} + 1 | 3 | arc 1 -> 3 | {"(1,3)"} | f | {"(1,3)"} + 2 | 3 | arc 2 -> 3 | {"(2,3)"} | f | {"(2,3)"} + 1 | 4 | arc 1 -> 4 | {"(1,4)"} | f | {"(1,4)"} + 4 | 5 | arc 4 -> 5 | {"(4,5)"} | f | {"(4,5)"} + 5 | 1 | arc 5 -> 1 | {"(5,1)"} | f | {"(5,1)"} + 1 | 2 | arc 1 -> 2 | {"(5,1)","(1,2)"} | f | {"(5,1)","(1,2)"} + 1 | 3 | arc 1 -> 3 | {"(5,1)","(1,3)"} | f | {"(5,1)","(1,3)"} + 1 | 4 | arc 1 -> 4 | {"(5,1)","(1,4)"} | f | {"(5,1)","(1,4)"} + 2 | 3 | arc 2 -> 3 | {"(1,2)","(2,3)"} | f | {"(1,2)","(2,3)"} + 4 | 5 | arc 4 -> 5 | {"(1,4)","(4,5)"} | f | {"(1,4)","(4,5)"} + 5 | 1 | arc 5 -> 1 | {"(4,5)","(5,1)"} | f | {"(4,5)","(5,1)"} + 1 | 2 | arc 1 -> 2 | {"(4,5)","(5,1)","(1,2)"} | f | {"(4,5)","(5,1)","(1,2)"} + 1 | 3 | arc 1 -> 3 | {"(4,5)","(5,1)","(1,3)"} | f | {"(4,5)","(5,1)","(1,3)"} + 1 | 4 | arc 1 -> 4 | {"(4,5)","(5,1)","(1,4)"} | f | {"(4,5)","(5,1)","(1,4)"} + 2 | 3 | arc 2 -> 3 | {"(5,1)","(1,2)","(2,3)"} | f | {"(5,1)","(1,2)","(2,3)"} + 4 | 5 | arc 4 -> 5 | {"(5,1)","(1,4)","(4,5)"} | f | {"(5,1)","(1,4)","(4,5)"} + 5 | 1 | arc 5 -> 1 | {"(1,4)","(4,5)","(5,1)"} | f | {"(1,4)","(4,5)","(5,1)"} + 1 | 2 | arc 1 -> 2 | {"(1,4)","(4,5)","(5,1)","(1,2)"} | f | {"(1,4)","(4,5)","(5,1)","(1,2)"} + 1 | 3 | arc 1 -> 3 | {"(1,4)","(4,5)","(5,1)","(1,3)"} | f | {"(1,4)","(4,5)","(5,1)","(1,3)"} + 1 | 4 | arc 1 -> 4 | {"(1,4)","(4,5)","(5,1)","(1,4)"} | t | {"(1,4)","(4,5)","(5,1)","(1,4)"} + 2 | 3 | arc 2 -> 3 | {"(4,5)","(5,1)","(1,2)","(2,3)"} | f | {"(4,5)","(5,1)","(1,2)","(2,3)"} + 4 | 5 | arc 4 -> 5 | {"(4,5)","(5,1)","(1,4)","(4,5)"} | t | {"(4,5)","(5,1)","(1,4)","(4,5)"} + 5 | 1 | arc 5 -> 1 | {"(5,1)","(1,4)","(4,5)","(5,1)"} | t | {"(5,1)","(1,4)","(4,5)","(5,1)"} + 2 | 3 | arc 2 -> 3 | {"(1,4)","(4,5)","(5,1)","(1,2)","(2,3)"} | f | {"(1,4)","(4,5)","(5,1)","(1,2)","(2,3)"} +(25 rows) + +with recursive search_graph(f, t, label) as ( + select * from graph g + union all + select g.* + from graph g, search_graph sg + where g.f = sg.t +) search breadth first by f, t set seq + cycle f, t set is_cycle using path +select * from search_graph; + f | t | label | seq | is_cycle | path +---+---+------------+---------+----------+------------------------------------------- + 1 | 2 | arc 1 -> 2 | (0,1,2) | f | {"(1,2)"} + 1 | 3 | arc 1 -> 3 | (0,1,3) | f | {"(1,3)"} + 2 | 3 | arc 2 -> 3 | (0,2,3) | f | {"(2,3)"} + 1 | 4 | arc 1 -> 4 | (0,1,4) | f | {"(1,4)"} + 4 | 5 | arc 4 -> 5 | (0,4,5) | f | {"(4,5)"} + 5 | 1 | arc 5 -> 1 | (0,5,1) | f | {"(5,1)"} + 1 | 2 | arc 1 -> 2 | (1,1,2) | f | {"(5,1)","(1,2)"} + 1 | 3 | arc 1 -> 3 | (1,1,3) | f | {"(5,1)","(1,3)"} + 1 | 4 | arc 1 -> 4 | (1,1,4) | f | {"(5,1)","(1,4)"} + 2 | 3 | arc 2 -> 3 | (1,2,3) | f | {"(1,2)","(2,3)"} + 4 | 5 | arc 4 -> 5 | (1,4,5) | f | {"(1,4)","(4,5)"} + 5 | 1 | arc 5 -> 1 | (1,5,1) | f | {"(4,5)","(5,1)"} + 1 | 2 | arc 1 -> 2 | (2,1,2) | f | {"(4,5)","(5,1)","(1,2)"} + 1 | 3 | arc 1 -> 3 | (2,1,3) | f | {"(4,5)","(5,1)","(1,3)"} + 1 | 4 | arc 1 -> 4 | (2,1,4) | f | {"(4,5)","(5,1)","(1,4)"} + 2 | 3 | arc 2 -> 3 | (2,2,3) | f | {"(5,1)","(1,2)","(2,3)"} + 4 | 5 | arc 4 -> 5 | (2,4,5) | f | {"(5,1)","(1,4)","(4,5)"} + 5 | 1 | arc 5 -> 1 | (2,5,1) | f | {"(1,4)","(4,5)","(5,1)"} + 1 | 2 | arc 1 -> 2 | (3,1,2) | f | {"(1,4)","(4,5)","(5,1)","(1,2)"} + 1 | 3 | arc 1 -> 3 | (3,1,3) | f | {"(1,4)","(4,5)","(5,1)","(1,3)"} + 1 | 4 | arc 1 -> 4 | (3,1,4) | t | {"(1,4)","(4,5)","(5,1)","(1,4)"} + 2 | 3 | arc 2 -> 3 | (3,2,3) | f | {"(4,5)","(5,1)","(1,2)","(2,3)"} + 4 | 5 | arc 4 -> 5 | (3,4,5) | t | {"(4,5)","(5,1)","(1,4)","(4,5)"} + 5 | 1 | arc 5 -> 1 | (3,5,1) | t | {"(5,1)","(1,4)","(4,5)","(5,1)"} + 2 | 3 | arc 2 -> 3 | (4,2,3) | f | {"(1,4)","(4,5)","(5,1)","(1,2)","(2,3)"} +(25 rows) + +-- various syntax errors +with recursive search_graph(f, t, label) as ( + select * from graph g + union all + select g.* + from graph g, search_graph sg + where g.f = sg.t +) cycle foo, tar set is_cycle using path +select * from search_graph; +ERROR: cycle column "foo" not in WITH query column list +LINE 7: ) cycle foo, tar set is_cycle using path + ^ +with recursive search_graph(f, t, label) as ( + select * from graph g + union all + select g.* + from graph g, search_graph sg + where g.f = sg.t +) cycle f, t set is_cycle to true default 55 using path +select * from search_graph; +ERROR: CYCLE types boolean and integer cannot be matched +LINE 7: ) cycle f, t set is_cycle to true default 55 using path + ^ +with recursive search_graph(f, t, label) as ( + select * from graph g + union all + select g.* + from graph g, search_graph sg + where g.f = sg.t +) cycle f, t set is_cycle to point '(1,1)' default point '(0,0)' using path +select * from search_graph; +ERROR: could not identify an equality operator for type point +with recursive search_graph(f, t, label) as ( + select * from graph g + union all + select g.* + from graph g, search_graph sg + where g.f = sg.t +) cycle f, t set label to true default false using path +select * from search_graph; +ERROR: cycle mark column name "label" already used in WITH query column list +LINE 7: ) cycle f, t set label to true default false using path + ^ +with recursive search_graph(f, t, label) as ( + select * from graph g + union all + select g.* + from graph g, search_graph sg + where g.f = sg.t +) cycle f, t set is_cycle to true default false using label +select * from search_graph; +ERROR: cycle path column name "label" already used in WITH query column list +LINE 7: ) cycle f, t set is_cycle to true default false using label + ^ +with recursive search_graph(f, t, label) as ( + select * from graph g + union all + select g.* + from graph g, search_graph sg + where g.f = sg.t +) cycle f, t set foo to true default false using foo +select * from search_graph; +ERROR: cycle mark column name and cycle path column name are the same +LINE 7: ) cycle f, t set foo to true default false using foo + ^ +with recursive search_graph(f, t, label) as ( + select * from graph g + union all + select g.* + from graph g, search_graph sg + where g.f = sg.t +) cycle f, t, f set is_cycle to true default false using path +select * from search_graph; +ERROR: cycle column "f" specified more than once +LINE 7: ) cycle f, t, f set is_cycle to true default false using pat... + ^ +with recursive search_graph(f, t, label) as ( + select * from graph g + union all + select g.* + from graph g, search_graph sg + where g.f = sg.t +) search depth first by f, t set foo + cycle f, t set foo to true default false using path +select * from search_graph; +ERROR: search sequence column name and cycle mark column name are the same +LINE 7: ) search depth first by f, t set foo + ^ +with recursive search_graph(f, t, label) as ( + select * from graph g + union all + select g.* + from graph g, search_graph sg + where g.f = sg.t +) search depth first by f, t set foo + cycle f, t set is_cycle to true default false using foo +select * from search_graph; +ERROR: search sequence column name and cycle path column name are the same +LINE 7: ) search depth first by f, t set foo + ^ +-- test ruleutils and view expansion +create temp view v_cycle1 as +with recursive search_graph(f, t, label) as ( + select * from graph g + union all + select g.* + from graph g, search_graph sg + where g.f = sg.t +) cycle f, t set is_cycle using path +select f, t, label from search_graph; +create temp view v_cycle2 as +with recursive search_graph(f, t, label) as ( + select * from graph g + union all + select g.* + from graph g, search_graph sg + where g.f = sg.t +) cycle f, t set is_cycle to 'Y' default 'N' using path +select f, t, label from search_graph; +select pg_get_viewdef('v_cycle1'); + pg_get_viewdef +------------------------------------------------ + WITH RECURSIVE search_graph(f, t, label) AS (+ + SELECT g.f, + + g.t, + + g.label + + FROM graph g + + UNION ALL + + SELECT g.f, + + g.t, + + g.label + + FROM graph g, + + search_graph sg + + WHERE (g.f = sg.t) + + ) CYCLE f, t SET is_cycle USING path + + SELECT f, + + t, + + label + + FROM search_graph; +(1 row) + +select pg_get_viewdef('v_cycle2'); + pg_get_viewdef +----------------------------------------------------------------------------- + WITH RECURSIVE search_graph(f, t, label) AS ( + + SELECT g.f, + + g.t, + + g.label + + FROM graph g + + UNION ALL + + SELECT g.f, + + g.t, + + g.label + + FROM graph g, + + search_graph sg + + WHERE (g.f = sg.t) + + ) CYCLE f, t SET is_cycle TO 'Y'::text DEFAULT 'N'::text USING path+ + SELECT f, + + t, + + label + + FROM search_graph; +(1 row) + +select * from v_cycle1; + f | t | label +---+---+------------ + 1 | 2 | arc 1 -> 2 + 1 | 3 | arc 1 -> 3 + 2 | 3 | arc 2 -> 3 + 1 | 4 | arc 1 -> 4 + 4 | 5 | arc 4 -> 5 + 5 | 1 | arc 5 -> 1 + 1 | 2 | arc 1 -> 2 + 1 | 3 | arc 1 -> 3 + 1 | 4 | arc 1 -> 4 + 2 | 3 | arc 2 -> 3 + 4 | 5 | arc 4 -> 5 + 5 | 1 | arc 5 -> 1 + 1 | 2 | arc 1 -> 2 + 1 | 3 | arc 1 -> 3 + 1 | 4 | arc 1 -> 4 + 2 | 3 | arc 2 -> 3 + 4 | 5 | arc 4 -> 5 + 5 | 1 | arc 5 -> 1 + 1 | 2 | arc 1 -> 2 + 1 | 3 | arc 1 -> 3 + 1 | 4 | arc 1 -> 4 + 2 | 3 | arc 2 -> 3 + 4 | 5 | arc 4 -> 5 + 5 | 1 | arc 5 -> 1 + 2 | 3 | arc 2 -> 3 +(25 rows) + +select * from v_cycle2; + f | t | label +---+---+------------ + 1 | 2 | arc 1 -> 2 + 1 | 3 | arc 1 -> 3 + 2 | 3 | arc 2 -> 3 + 1 | 4 | arc 1 -> 4 + 4 | 5 | arc 4 -> 5 + 5 | 1 | arc 5 -> 1 + 1 | 2 | arc 1 -> 2 + 1 | 3 | arc 1 -> 3 + 1 | 4 | arc 1 -> 4 + 2 | 3 | arc 2 -> 3 + 4 | 5 | arc 4 -> 5 + 5 | 1 | arc 5 -> 1 + 1 | 2 | arc 1 -> 2 + 1 | 3 | arc 1 -> 3 + 1 | 4 | arc 1 -> 4 + 2 | 3 | arc 2 -> 3 + 4 | 5 | arc 4 -> 5 + 5 | 1 | arc 5 -> 1 + 1 | 2 | arc 1 -> 2 + 1 | 3 | arc 1 -> 3 + 1 | 4 | arc 1 -> 4 + 2 | 3 | arc 2 -> 3 + 4 | 5 | arc 4 -> 5 + 5 | 1 | arc 5 -> 1 + 2 | 3 | arc 2 -> 3 +(25 rows) + +-- +-- test multiple WITH queries +-- +WITH RECURSIVE + y (id) AS (VALUES (1)), + x (id) AS (SELECT * FROM y UNION ALL SELECT id+1 FROM x WHERE id < 5) +SELECT * FROM x; + id +---- + 1 + 2 + 3 + 4 + 5 +(5 rows) + +-- forward reference OK +WITH RECURSIVE + x(id) AS (SELECT * FROM y UNION ALL SELECT id+1 FROM x WHERE id < 5), + y(id) AS (values (1)) + SELECT * FROM x; + id +---- + 1 + 2 + 3 + 4 + 5 +(5 rows) + +WITH RECURSIVE + x(id) AS + (VALUES (1) UNION ALL SELECT id+1 FROM x WHERE id < 5), + y(id) AS + (VALUES (1) UNION ALL SELECT id+1 FROM y WHERE id < 10) + SELECT y.*, x.* FROM y LEFT JOIN x USING (id); + id | id +----+---- + 1 | 1 + 2 | 2 + 3 | 3 + 4 | 4 + 5 | 5 + 6 | + 7 | + 8 | + 9 | + 10 | +(10 rows) + +WITH RECURSIVE + x(id) AS + (VALUES (1) UNION ALL SELECT id+1 FROM x WHERE id < 5), + y(id) AS + (VALUES (1) UNION ALL SELECT id+1 FROM x WHERE id < 10) + SELECT y.*, x.* FROM y LEFT JOIN x USING (id); + id | id +----+---- + 1 | 1 + 2 | 2 + 3 | 3 + 4 | 4 + 5 | 5 + 6 | +(6 rows) + +WITH RECURSIVE + x(id) AS + (SELECT 1 UNION ALL SELECT id+1 FROM x WHERE id < 3 ), + y(id) AS + (SELECT * FROM x UNION ALL SELECT * FROM x), + z(id) AS + (SELECT * FROM x UNION ALL SELECT id+1 FROM z WHERE id < 10) + SELECT * FROM z; + id +---- + 1 + 2 + 3 + 2 + 3 + 4 + 3 + 4 + 5 + 4 + 5 + 6 + 5 + 6 + 7 + 6 + 7 + 8 + 7 + 8 + 9 + 8 + 9 + 10 + 9 + 10 + 10 +(27 rows) + +WITH RECURSIVE + x(id) AS + (SELECT 1 UNION ALL SELECT id+1 FROM x WHERE id < 3 ), + y(id) AS + (SELECT * FROM x UNION ALL SELECT * FROM x), + z(id) AS + (SELECT * FROM y UNION ALL SELECT id+1 FROM z WHERE id < 10) + SELECT * FROM z; + id +---- + 1 + 2 + 3 + 1 + 2 + 3 + 2 + 3 + 4 + 2 + 3 + 4 + 3 + 4 + 5 + 3 + 4 + 5 + 4 + 5 + 6 + 4 + 5 + 6 + 5 + 6 + 7 + 5 + 6 + 7 + 6 + 7 + 8 + 6 + 7 + 8 + 7 + 8 + 9 + 7 + 8 + 9 + 8 + 9 + 10 + 8 + 9 + 10 + 9 + 10 + 9 + 10 + 10 + 10 +(54 rows) + +-- +-- Test WITH attached to a data-modifying statement +-- +CREATE TEMPORARY TABLE y (a INTEGER); +INSERT INTO y SELECT generate_series(1, 10); +WITH t AS ( + SELECT a FROM y +) +INSERT INTO y +SELECT a+20 FROM t RETURNING *; + a +---- + 21 + 22 + 23 + 24 + 25 + 26 + 27 + 28 + 29 + 30 +(10 rows) + +SELECT * FROM y; + a +---- + 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 + 10 + 21 + 22 + 23 + 24 + 25 + 26 + 27 + 28 + 29 + 30 +(20 rows) + +WITH t AS ( + SELECT a FROM y +) +UPDATE y SET a = y.a-10 FROM t WHERE y.a > 20 AND t.a = y.a RETURNING y.a; + a +---- + 11 + 12 + 13 + 14 + 15 + 16 + 17 + 18 + 19 + 20 +(10 rows) + +SELECT * FROM y; + a +---- + 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 + 10 + 11 + 12 + 13 + 14 + 15 + 16 + 17 + 18 + 19 + 20 +(20 rows) + +WITH RECURSIVE t(a) AS ( + SELECT 11 + UNION ALL + SELECT a+1 FROM t WHERE a < 50 +) +DELETE FROM y USING t WHERE t.a = y.a RETURNING y.a; + a +---- + 11 + 12 + 13 + 14 + 15 + 16 + 17 + 18 + 19 + 20 +(10 rows) + +SELECT * FROM y; + a +---- + 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 + 10 +(10 rows) + +DROP TABLE y; +-- +-- error cases +-- +WITH x(n, b) AS (SELECT 1) +SELECT * FROM x; +ERROR: WITH query "x" has 1 columns available but 2 columns specified +LINE 1: WITH x(n, b) AS (SELECT 1) + ^ +-- INTERSECT +WITH RECURSIVE x(n) AS (SELECT 1 INTERSECT SELECT n+1 FROM x) + SELECT * FROM x; +ERROR: recursive query "x" does not have the form non-recursive-term UNION [ALL] recursive-term +LINE 1: WITH RECURSIVE x(n) AS (SELECT 1 INTERSECT SELECT n+1 FROM x... + ^ +WITH RECURSIVE x(n) AS (SELECT 1 INTERSECT ALL SELECT n+1 FROM x) + SELECT * FROM x; +ERROR: recursive query "x" does not have the form non-recursive-term UNION [ALL] recursive-term +LINE 1: WITH RECURSIVE x(n) AS (SELECT 1 INTERSECT ALL SELECT n+1 FR... + ^ +-- EXCEPT +WITH RECURSIVE x(n) AS (SELECT 1 EXCEPT SELECT n+1 FROM x) + SELECT * FROM x; +ERROR: recursive query "x" does not have the form non-recursive-term UNION [ALL] recursive-term +LINE 1: WITH RECURSIVE x(n) AS (SELECT 1 EXCEPT SELECT n+1 FROM x) + ^ +WITH RECURSIVE x(n) AS (SELECT 1 EXCEPT ALL SELECT n+1 FROM x) + SELECT * FROM x; +ERROR: recursive query "x" does not have the form non-recursive-term UNION [ALL] recursive-term +LINE 1: WITH RECURSIVE x(n) AS (SELECT 1 EXCEPT ALL SELECT n+1 FROM ... + ^ +-- no non-recursive term +WITH RECURSIVE x(n) AS (SELECT n FROM x) + SELECT * FROM x; +ERROR: recursive query "x" does not have the form non-recursive-term UNION [ALL] recursive-term +LINE 1: WITH RECURSIVE x(n) AS (SELECT n FROM x) + ^ +-- recursive term in the left hand side (strictly speaking, should allow this) +WITH RECURSIVE x(n) AS (SELECT n FROM x UNION ALL SELECT 1) + SELECT * FROM x; +ERROR: recursive reference to query "x" must not appear within its non-recursive term +LINE 1: WITH RECURSIVE x(n) AS (SELECT n FROM x UNION ALL SELECT 1) + ^ +CREATE TEMPORARY TABLE y (a INTEGER); +INSERT INTO y SELECT generate_series(1, 10); +-- LEFT JOIN +WITH RECURSIVE x(n) AS (SELECT a FROM y WHERE a = 1 + UNION ALL + SELECT x.n+1 FROM y LEFT JOIN x ON x.n = y.a WHERE n < 10) +SELECT * FROM x; +ERROR: recursive reference to query "x" must not appear within an outer join +LINE 3: SELECT x.n+1 FROM y LEFT JOIN x ON x.n = y.a WHERE n < 10) + ^ +-- RIGHT JOIN +WITH RECURSIVE x(n) AS (SELECT a FROM y WHERE a = 1 + UNION ALL + SELECT x.n+1 FROM x RIGHT JOIN y ON x.n = y.a WHERE n < 10) +SELECT * FROM x; +ERROR: recursive reference to query "x" must not appear within an outer join +LINE 3: SELECT x.n+1 FROM x RIGHT JOIN y ON x.n = y.a WHERE n < 10) + ^ +-- FULL JOIN +WITH RECURSIVE x(n) AS (SELECT a FROM y WHERE a = 1 + UNION ALL + SELECT x.n+1 FROM x FULL JOIN y ON x.n = y.a WHERE n < 10) +SELECT * FROM x; +ERROR: recursive reference to query "x" must not appear within an outer join +LINE 3: SELECT x.n+1 FROM x FULL JOIN y ON x.n = y.a WHERE n < 10) + ^ +-- subquery +WITH RECURSIVE x(n) AS (SELECT 1 UNION ALL SELECT n+1 FROM x + WHERE n IN (SELECT * FROM x)) + SELECT * FROM x; +ERROR: recursive reference to query "x" must not appear within a subquery +LINE 2: WHERE n IN (SELECT * FROM x)) + ^ +-- aggregate functions +WITH RECURSIVE x(n) AS (SELECT 1 UNION ALL SELECT count(*) FROM x) + SELECT * FROM x; +ERROR: aggregate functions are not allowed in a recursive query's recursive term +LINE 1: WITH RECURSIVE x(n) AS (SELECT 1 UNION ALL SELECT count(*) F... + ^ +WITH RECURSIVE x(n) AS (SELECT 1 UNION ALL SELECT sum(n) FROM x) + SELECT * FROM x; +ERROR: aggregate functions are not allowed in a recursive query's recursive term +LINE 1: WITH RECURSIVE x(n) AS (SELECT 1 UNION ALL SELECT sum(n) FRO... + ^ +-- ORDER BY +WITH RECURSIVE x(n) AS (SELECT 1 UNION ALL SELECT n+1 FROM x ORDER BY 1) + SELECT * FROM x; +ERROR: ORDER BY in a recursive query is not implemented +LINE 1: ...VE x(n) AS (SELECT 1 UNION ALL SELECT n+1 FROM x ORDER BY 1) + ^ +-- LIMIT/OFFSET +WITH RECURSIVE x(n) AS (SELECT 1 UNION ALL SELECT n+1 FROM x LIMIT 10 OFFSET 1) + SELECT * FROM x; +ERROR: OFFSET in a recursive query is not implemented +LINE 1: ... AS (SELECT 1 UNION ALL SELECT n+1 FROM x LIMIT 10 OFFSET 1) + ^ +-- FOR UPDATE +WITH RECURSIVE x(n) AS (SELECT 1 UNION ALL SELECT n+1 FROM x FOR UPDATE) + SELECT * FROM x; +ERROR: FOR UPDATE/SHARE in a recursive query is not implemented +-- target list has a recursive query name +WITH RECURSIVE x(id) AS (values (1) + UNION ALL + SELECT (SELECT * FROM x) FROM x WHERE id < 5 +) SELECT * FROM x; +ERROR: recursive reference to query "x" must not appear within a subquery +LINE 3: SELECT (SELECT * FROM x) FROM x WHERE id < 5 + ^ +-- mutual recursive query (not implemented) +WITH RECURSIVE + x (id) AS (SELECT 1 UNION ALL SELECT id+1 FROM y WHERE id < 5), + y (id) AS (SELECT 1 UNION ALL SELECT id+1 FROM x WHERE id < 5) +SELECT * FROM x; +ERROR: mutual recursion between WITH items is not implemented +LINE 2: x (id) AS (SELECT 1 UNION ALL SELECT id+1 FROM y WHERE id ... + ^ +-- non-linear recursion is not allowed +WITH RECURSIVE foo(i) AS + (values (1) + UNION ALL + (SELECT i+1 FROM foo WHERE i < 10 + UNION ALL + SELECT i+1 FROM foo WHERE i < 5) +) SELECT * FROM foo; +ERROR: recursive reference to query "foo" must not appear more than once +LINE 6: SELECT i+1 FROM foo WHERE i < 5) + ^ +WITH RECURSIVE foo(i) AS + (values (1) + UNION ALL + SELECT * FROM + (SELECT i+1 FROM foo WHERE i < 10 + UNION ALL + SELECT i+1 FROM foo WHERE i < 5) AS t +) SELECT * FROM foo; +ERROR: recursive reference to query "foo" must not appear more than once +LINE 7: SELECT i+1 FROM foo WHERE i < 5) AS t + ^ +WITH RECURSIVE foo(i) AS + (values (1) + UNION ALL + (SELECT i+1 FROM foo WHERE i < 10 + EXCEPT + SELECT i+1 FROM foo WHERE i < 5) +) SELECT * FROM foo; +ERROR: recursive reference to query "foo" must not appear within EXCEPT +LINE 6: SELECT i+1 FROM foo WHERE i < 5) + ^ +WITH RECURSIVE foo(i) AS + (values (1) + UNION ALL + (SELECT i+1 FROM foo WHERE i < 10 + INTERSECT + SELECT i+1 FROM foo WHERE i < 5) +) SELECT * FROM foo; +ERROR: recursive reference to query "foo" must not appear more than once +LINE 6: SELECT i+1 FROM foo WHERE i < 5) + ^ +-- Wrong type induced from non-recursive term +WITH RECURSIVE foo(i) AS + (SELECT i FROM (VALUES(1),(2)) t(i) + UNION ALL + SELECT (i+1)::numeric(10,0) FROM foo WHERE i < 10) +SELECT * FROM foo; +ERROR: recursive query "foo" column 1 has type integer in non-recursive term but type numeric overall +LINE 2: (SELECT i FROM (VALUES(1),(2)) t(i) + ^ +HINT: Cast the output of the non-recursive term to the correct type. +-- rejects different typmod, too (should we allow this?) +WITH RECURSIVE foo(i) AS + (SELECT i::numeric(3,0) FROM (VALUES(1),(2)) t(i) + UNION ALL + SELECT (i+1)::numeric(10,0) FROM foo WHERE i < 10) +SELECT * FROM foo; +ERROR: recursive query "foo" column 1 has type numeric(3,0) in non-recursive term but type numeric overall +LINE 2: (SELECT i::numeric(3,0) FROM (VALUES(1),(2)) t(i) + ^ +HINT: Cast the output of the non-recursive term to the correct type. +-- disallow OLD/NEW reference in CTE +CREATE TEMPORARY TABLE x (n integer); +CREATE RULE r2 AS ON UPDATE TO x DO INSTEAD + WITH t AS (SELECT OLD.*) UPDATE y SET a = t.n FROM t; +ERROR: cannot refer to OLD within WITH query +-- +-- test for bug #4902 +-- +with cte(foo) as ( values(42) ) values((select foo from cte)); + column1 +--------- + 42 +(1 row) + +with cte(foo) as ( select 42 ) select * from ((select foo from cte)) q; + foo +----- + 42 +(1 row) + +-- test CTE referencing an outer-level variable (to see that changed-parameter +-- signaling still works properly after fixing this bug) +select ( with cte(foo) as ( values(f1) ) + select (select foo from cte) ) +from int4_tbl; + foo +------------- + 0 + 123456 + -123456 + 2147483647 + -2147483647 +(5 rows) + +select ( with cte(foo) as ( values(f1) ) + values((select foo from cte)) ) +from int4_tbl; + column1 +------------- + 0 + 123456 + -123456 + 2147483647 + -2147483647 +(5 rows) + +-- +-- test for nested-recursive-WITH bug +-- +WITH RECURSIVE t(j) AS ( + WITH RECURSIVE s(i) AS ( + VALUES (1) + UNION ALL + SELECT i+1 FROM s WHERE i < 10 + ) + SELECT i FROM s + UNION ALL + SELECT j+1 FROM t WHERE j < 10 +) +SELECT * FROM t; + j +---- + 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 + 10 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 + 10 + 3 + 4 + 5 + 6 + 7 + 8 + 9 + 10 + 4 + 5 + 6 + 7 + 8 + 9 + 10 + 5 + 6 + 7 + 8 + 9 + 10 + 6 + 7 + 8 + 9 + 10 + 7 + 8 + 9 + 10 + 8 + 9 + 10 + 9 + 10 + 10 +(55 rows) + +-- +-- test WITH attached to intermediate-level set operation +-- +WITH outermost(x) AS ( + SELECT 1 + UNION (WITH innermost as (SELECT 2) + SELECT * FROM innermost + UNION SELECT 3) +) +SELECT * FROM outermost ORDER BY 1; + x +--- + 1 + 2 + 3 +(3 rows) + +WITH outermost(x) AS ( + SELECT 1 + UNION (WITH innermost as (SELECT 2) + SELECT * FROM outermost -- fail + UNION SELECT * FROM innermost) +) +SELECT * FROM outermost ORDER BY 1; +ERROR: relation "outermost" does not exist +LINE 4: SELECT * FROM outermost -- fail + ^ +DETAIL: There is a WITH item named "outermost", but it cannot be referenced from this part of the query. +HINT: Use WITH RECURSIVE, or re-order the WITH items to remove forward references. +WITH RECURSIVE outermost(x) AS ( + SELECT 1 + UNION (WITH innermost as (SELECT 2) + SELECT * FROM outermost + UNION SELECT * FROM innermost) +) +SELECT * FROM outermost ORDER BY 1; + x +--- + 1 + 2 +(2 rows) + +WITH RECURSIVE outermost(x) AS ( + WITH innermost as (SELECT 2 FROM outermost) -- fail + SELECT * FROM innermost + UNION SELECT * from outermost +) +SELECT * FROM outermost ORDER BY 1; +ERROR: recursive reference to query "outermost" must not appear within a subquery +LINE 2: WITH innermost as (SELECT 2 FROM outermost) -- fail + ^ +-- +-- This test will fail with the old implementation of PARAM_EXEC parameter +-- assignment, because the "q1" Var passed down to A's targetlist subselect +-- looks exactly like the "A.id" Var passed down to C's subselect, causing +-- the old code to give them the same runtime PARAM_EXEC slot. But the +-- lifespans of the two parameters overlap, thanks to B also reading A. +-- +with +A as ( select q2 as id, (select q1) as x from int8_tbl ), +B as ( select id, row_number() over (partition by id) as r from A ), +C as ( select A.id, array(select B.id from B where B.id = A.id) from A ) +select * from C; + id | array +-------------------+------------------------------------- + 456 | {456} + 4567890123456789 | {4567890123456789,4567890123456789} + 123 | {123} + 4567890123456789 | {4567890123456789,4567890123456789} + -4567890123456789 | {-4567890123456789} +(5 rows) + +-- +-- Test CTEs read in non-initialization orders +-- +WITH RECURSIVE + tab(id_key,link) AS (VALUES (1,17), (2,17), (3,17), (4,17), (6,17), (5,17)), + iter (id_key, row_type, link) AS ( + SELECT 0, 'base', 17 + UNION ALL ( + WITH remaining(id_key, row_type, link, min) AS ( + SELECT tab.id_key, 'true'::text, iter.link, MIN(tab.id_key) OVER () + FROM tab INNER JOIN iter USING (link) + WHERE tab.id_key > iter.id_key + ), + first_remaining AS ( + SELECT id_key, row_type, link + FROM remaining + WHERE id_key=min + ), + effect AS ( + SELECT tab.id_key, 'new'::text, tab.link + FROM first_remaining e INNER JOIN tab ON e.id_key=tab.id_key + WHERE e.row_type = 'false' + ) + SELECT * FROM first_remaining + UNION ALL SELECT * FROM effect + ) + ) +SELECT * FROM iter; + id_key | row_type | link +--------+----------+------ + 0 | base | 17 + 1 | true | 17 + 2 | true | 17 + 3 | true | 17 + 4 | true | 17 + 5 | true | 17 + 6 | true | 17 +(7 rows) + +WITH RECURSIVE + tab(id_key,link) AS (VALUES (1,17), (2,17), (3,17), (4,17), (6,17), (5,17)), + iter (id_key, row_type, link) AS ( + SELECT 0, 'base', 17 + UNION ( + WITH remaining(id_key, row_type, link, min) AS ( + SELECT tab.id_key, 'true'::text, iter.link, MIN(tab.id_key) OVER () + FROM tab INNER JOIN iter USING (link) + WHERE tab.id_key > iter.id_key + ), + first_remaining AS ( + SELECT id_key, row_type, link + FROM remaining + WHERE id_key=min + ), + effect AS ( + SELECT tab.id_key, 'new'::text, tab.link + FROM first_remaining e INNER JOIN tab ON e.id_key=tab.id_key + WHERE e.row_type = 'false' + ) + SELECT * FROM first_remaining + UNION ALL SELECT * FROM effect + ) + ) +SELECT * FROM iter; + id_key | row_type | link +--------+----------+------ + 0 | base | 17 + 1 | true | 17 + 2 | true | 17 + 3 | true | 17 + 4 | true | 17 + 5 | true | 17 + 6 | true | 17 +(7 rows) + +-- +-- Data-modifying statements in WITH +-- +-- INSERT ... RETURNING +WITH t AS ( + INSERT INTO y + VALUES + (11), + (12), + (13), + (14), + (15), + (16), + (17), + (18), + (19), + (20) + RETURNING * +) +SELECT * FROM t; + a +---- + 11 + 12 + 13 + 14 + 15 + 16 + 17 + 18 + 19 + 20 +(10 rows) + +SELECT * FROM y; + a +---- + 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 + 10 + 11 + 12 + 13 + 14 + 15 + 16 + 17 + 18 + 19 + 20 +(20 rows) + +-- UPDATE ... RETURNING +WITH t AS ( + UPDATE y + SET a=a+1 + RETURNING * +) +SELECT * FROM t; + a +---- + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 + 10 + 11 + 12 + 13 + 14 + 15 + 16 + 17 + 18 + 19 + 20 + 21 +(20 rows) + +SELECT * FROM y; + a +---- + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 + 10 + 11 + 12 + 13 + 14 + 15 + 16 + 17 + 18 + 19 + 20 + 21 +(20 rows) + +-- DELETE ... RETURNING +WITH t AS ( + DELETE FROM y + WHERE a <= 10 + RETURNING * +) +SELECT * FROM t; + a +---- + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 + 10 +(9 rows) + +SELECT * FROM y; + a +---- + 11 + 12 + 13 + 14 + 15 + 16 + 17 + 18 + 19 + 20 + 21 +(11 rows) + +-- forward reference +WITH RECURSIVE t AS ( + INSERT INTO y + SELECT a+5 FROM t2 WHERE a > 5 + RETURNING * +), t2 AS ( + UPDATE y SET a=a-11 RETURNING * +) +SELECT * FROM t +UNION ALL +SELECT * FROM t2; + a +---- + 11 + 12 + 13 + 14 + 15 + 0 + 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 + 10 +(16 rows) + +SELECT * FROM y; + a +---- + 0 + 1 + 2 + 3 + 4 + 5 + 6 + 11 + 7 + 12 + 8 + 13 + 9 + 14 + 10 + 15 +(16 rows) + +-- unconditional DO INSTEAD rule +CREATE RULE y_rule AS ON DELETE TO y DO INSTEAD + INSERT INTO y VALUES(42) RETURNING *; +WITH t AS ( + DELETE FROM y RETURNING * +) +SELECT * FROM t; + a +---- + 42 +(1 row) + +SELECT * FROM y; + a +---- + 0 + 1 + 2 + 3 + 4 + 5 + 6 + 11 + 7 + 12 + 8 + 13 + 9 + 14 + 10 + 15 + 42 +(17 rows) + +DROP RULE y_rule ON y; +-- check merging of outer CTE with CTE in a rule action +CREATE TEMP TABLE bug6051 AS + select i from generate_series(1,3) as t(i); +SELECT * FROM bug6051; + i +--- + 1 + 2 + 3 +(3 rows) + +WITH t1 AS ( DELETE FROM bug6051 RETURNING * ) +INSERT INTO bug6051 SELECT * FROM t1; +SELECT * FROM bug6051; + i +--- + 1 + 2 + 3 +(3 rows) + +CREATE TEMP TABLE bug6051_2 (i int); +CREATE RULE bug6051_ins AS ON INSERT TO bug6051 DO INSTEAD + INSERT INTO bug6051_2 + VALUES(NEW.i); +WITH t1 AS ( DELETE FROM bug6051 RETURNING * ) +INSERT INTO bug6051 SELECT * FROM t1; +SELECT * FROM bug6051; + i +--- +(0 rows) + +SELECT * FROM bug6051_2; + i +--- + 1 + 2 + 3 +(3 rows) + +-- check INSERT ... SELECT rule actions are disallowed on commands +-- that have modifyingCTEs +CREATE OR REPLACE RULE bug6051_ins AS ON INSERT TO bug6051 DO INSTEAD + INSERT INTO bug6051_2 + SELECT NEW.i; +WITH t1 AS ( DELETE FROM bug6051 RETURNING * ) +INSERT INTO bug6051 SELECT * FROM t1; +ERROR: INSERT ... SELECT rule actions are not supported for queries having data-modifying statements in WITH +-- silly example to verify that hasModifyingCTE flag is propagated +CREATE TEMP TABLE bug6051_3 AS + SELECT a FROM generate_series(11,13) AS a; +CREATE RULE bug6051_3_ins AS ON INSERT TO bug6051_3 DO INSTEAD + SELECT i FROM bug6051_2; +BEGIN; SET LOCAL debug_parallel_query = on; +WITH t1 AS ( DELETE FROM bug6051_3 RETURNING * ) + INSERT INTO bug6051_3 SELECT * FROM t1; + i +--- + 1 + 2 + 3 + 1 + 2 + 3 + 1 + 2 + 3 +(9 rows) + +COMMIT; +SELECT * FROM bug6051_3; + a +--- +(0 rows) + +-- check case where CTE reference is removed due to optimization +EXPLAIN (VERBOSE, COSTS OFF) +SELECT q1 FROM +( + WITH t_cte AS (SELECT * FROM int8_tbl t) + SELECT q1, (SELECT q2 FROM t_cte WHERE t_cte.q1 = i8.q1) AS t_sub + FROM int8_tbl i8 +) ss; + QUERY PLAN +-------------------------------------- + Subquery Scan on ss + Output: ss.q1 + -> Seq Scan on public.int8_tbl i8 + Output: i8.q1, NULL::bigint +(4 rows) + +SELECT q1 FROM +( + WITH t_cte AS (SELECT * FROM int8_tbl t) + SELECT q1, (SELECT q2 FROM t_cte WHERE t_cte.q1 = i8.q1) AS t_sub + FROM int8_tbl i8 +) ss; + q1 +------------------ + 123 + 123 + 4567890123456789 + 4567890123456789 + 4567890123456789 +(5 rows) + +EXPLAIN (VERBOSE, COSTS OFF) +SELECT q1 FROM +( + WITH t_cte AS MATERIALIZED (SELECT * FROM int8_tbl t) + SELECT q1, (SELECT q2 FROM t_cte WHERE t_cte.q1 = i8.q1) AS t_sub + FROM int8_tbl i8 +) ss; + QUERY PLAN +--------------------------------------------- + Subquery Scan on ss + Output: ss.q1 + -> Seq Scan on public.int8_tbl i8 + Output: i8.q1, NULL::bigint + CTE t_cte + -> Seq Scan on public.int8_tbl t + Output: t.q1, t.q2 +(7 rows) + +SELECT q1 FROM +( + WITH t_cte AS MATERIALIZED (SELECT * FROM int8_tbl t) + SELECT q1, (SELECT q2 FROM t_cte WHERE t_cte.q1 = i8.q1) AS t_sub + FROM int8_tbl i8 +) ss; + q1 +------------------ + 123 + 123 + 4567890123456789 + 4567890123456789 + 4567890123456789 +(5 rows) + +-- a truly recursive CTE in the same list +WITH RECURSIVE t(a) AS ( + SELECT 0 + UNION ALL + SELECT a+1 FROM t WHERE a+1 < 5 +), t2 as ( + INSERT INTO y + SELECT * FROM t RETURNING * +) +SELECT * FROM t2 JOIN y USING (a) ORDER BY a; + a +--- + 0 + 1 + 2 + 3 + 4 +(5 rows) + +SELECT * FROM y; + a +---- + 0 + 1 + 2 + 3 + 4 + 5 + 6 + 11 + 7 + 12 + 8 + 13 + 9 + 14 + 10 + 15 + 42 + 0 + 1 + 2 + 3 + 4 +(22 rows) + +-- data-modifying WITH in a modifying statement +WITH t AS ( + DELETE FROM y + WHERE a <= 10 + RETURNING * +) +INSERT INTO y SELECT -a FROM t RETURNING *; + a +----- + 0 + -1 + -2 + -3 + -4 + -5 + -6 + -7 + -8 + -9 + -10 + 0 + -1 + -2 + -3 + -4 +(16 rows) + +SELECT * FROM y; + a +----- + 11 + 12 + 13 + 14 + 15 + 42 + 0 + -1 + -2 + -3 + -4 + -5 + -6 + -7 + -8 + -9 + -10 + 0 + -1 + -2 + -3 + -4 +(22 rows) + +-- check that WITH query is run to completion even if outer query isn't +WITH t AS ( + UPDATE y SET a = a * 100 RETURNING * +) +SELECT * FROM t LIMIT 10; + a +------ + 1100 + 1200 + 1300 + 1400 + 1500 + 4200 + 0 + -100 + -200 + -300 +(10 rows) + +SELECT * FROM y; + a +------- + 1100 + 1200 + 1300 + 1400 + 1500 + 4200 + 0 + -100 + -200 + -300 + -400 + -500 + -600 + -700 + -800 + -900 + -1000 + 0 + -100 + -200 + -300 + -400 +(22 rows) + +-- data-modifying WITH containing INSERT...ON CONFLICT DO UPDATE +CREATE TABLE withz AS SELECT i AS k, (i || ' v')::text v FROM generate_series(1, 16, 3) i; +ALTER TABLE withz ADD UNIQUE (k); +WITH t AS ( + INSERT INTO withz SELECT i, 'insert' + FROM generate_series(0, 16) i + ON CONFLICT (k) DO UPDATE SET v = withz.v || ', now update' + RETURNING * +) +SELECT * FROM t JOIN y ON t.k = y.a ORDER BY a, k; + k | v | a +---+--------+--- + 0 | insert | 0 + 0 | insert | 0 +(2 rows) + +-- Test EXCLUDED.* reference within CTE +WITH aa AS ( + INSERT INTO withz VALUES(1, 5) ON CONFLICT (k) DO UPDATE SET v = EXCLUDED.v + WHERE withz.k != EXCLUDED.k + RETURNING * +) +SELECT * FROM aa; + k | v +---+--- +(0 rows) + +-- New query/snapshot demonstrates side-effects of previous query. +SELECT * FROM withz ORDER BY k; + k | v +----+------------------ + 0 | insert + 1 | 1 v, now update + 2 | insert + 3 | insert + 4 | 4 v, now update + 5 | insert + 6 | insert + 7 | 7 v, now update + 8 | insert + 9 | insert + 10 | 10 v, now update + 11 | insert + 12 | insert + 13 | 13 v, now update + 14 | insert + 15 | insert + 16 | 16 v, now update +(17 rows) + +-- +-- Ensure subqueries within the update clause work, even if they +-- reference outside values +-- +WITH aa AS (SELECT 1 a, 2 b) +INSERT INTO withz VALUES(1, 'insert') +ON CONFLICT (k) DO UPDATE SET v = (SELECT b || ' update' FROM aa WHERE a = 1 LIMIT 1); +WITH aa AS (SELECT 1 a, 2 b) +INSERT INTO withz VALUES(1, 'insert') +ON CONFLICT (k) DO UPDATE SET v = ' update' WHERE withz.k = (SELECT a FROM aa); +WITH aa AS (SELECT 1 a, 2 b) +INSERT INTO withz VALUES(1, 'insert') +ON CONFLICT (k) DO UPDATE SET v = (SELECT b || ' update' FROM aa WHERE a = 1 LIMIT 1); +WITH aa AS (SELECT 'a' a, 'b' b UNION ALL SELECT 'a' a, 'b' b) +INSERT INTO withz VALUES(1, 'insert') +ON CONFLICT (k) DO UPDATE SET v = (SELECT b || ' update' FROM aa WHERE a = 'a' LIMIT 1); +WITH aa AS (SELECT 1 a, 2 b) +INSERT INTO withz VALUES(1, (SELECT b || ' insert' FROM aa WHERE a = 1 )) +ON CONFLICT (k) DO UPDATE SET v = (SELECT b || ' update' FROM aa WHERE a = 1 LIMIT 1); +-- Update a row more than once, in different parts of a wCTE. That is +-- an allowed, presumably very rare, edge case, but since it was +-- broken in the past, having a test seems worthwhile. +WITH simpletup AS ( + SELECT 2 k, 'Green' v), +upsert_cte AS ( + INSERT INTO withz VALUES(2, 'Blue') ON CONFLICT (k) DO + UPDATE SET (k, v) = (SELECT k, v FROM simpletup WHERE simpletup.k = withz.k) + RETURNING k, v) +INSERT INTO withz VALUES(2, 'Red') ON CONFLICT (k) DO +UPDATE SET (k, v) = (SELECT k, v FROM upsert_cte WHERE upsert_cte.k = withz.k) +RETURNING k, v; + k | v +---+--- +(0 rows) + +DROP TABLE withz; +-- WITH referenced by MERGE statement +CREATE TABLE m AS SELECT i AS k, (i || ' v')::text v FROM generate_series(1, 16, 3) i; +ALTER TABLE m ADD UNIQUE (k); +WITH RECURSIVE cte_basic AS (SELECT 1 a, 'cte_basic val' b) +MERGE INTO m USING (select 0 k, 'merge source SubPlan' v) o ON m.k=o.k +WHEN MATCHED THEN UPDATE SET v = (SELECT b || ' merge update' FROM cte_basic WHERE cte_basic.a = m.k LIMIT 1) +WHEN NOT MATCHED THEN INSERT VALUES(o.k, o.v); +ERROR: WITH RECURSIVE is not supported for MERGE statement +-- Basic: +WITH cte_basic AS MATERIALIZED (SELECT 1 a, 'cte_basic val' b) +MERGE INTO m USING (select 0 k, 'merge source SubPlan' v offset 0) o ON m.k=o.k +WHEN MATCHED THEN UPDATE SET v = (SELECT b || ' merge update' FROM cte_basic WHERE cte_basic.a = m.k LIMIT 1) +WHEN NOT MATCHED THEN INSERT VALUES(o.k, o.v); +-- Examine +SELECT * FROM m where k = 0; + k | v +---+---------------------- + 0 | merge source SubPlan +(1 row) + +-- See EXPLAIN output for same query: +EXPLAIN (VERBOSE, COSTS OFF) +WITH cte_basic AS MATERIALIZED (SELECT 1 a, 'cte_basic val' b) +MERGE INTO m USING (select 0 k, 'merge source SubPlan' v offset 0) o ON m.k=o.k +WHEN MATCHED THEN UPDATE SET v = (SELECT b || ' merge update' FROM cte_basic WHERE cte_basic.a = m.k LIMIT 1) +WHEN NOT MATCHED THEN INSERT VALUES(o.k, o.v); + QUERY PLAN +------------------------------------------------------------------- + Merge on public.m + CTE cte_basic + -> Result + Output: 1, 'cte_basic val'::text + -> Hash Right Join + Output: m.ctid, o.k, o.v, o.* + Hash Cond: (m.k = o.k) + -> Seq Scan on public.m + Output: m.ctid, m.k + -> Hash + Output: o.k, o.v, o.* + -> Subquery Scan on o + Output: o.k, o.v, o.* + -> Result + Output: 0, 'merge source SubPlan'::text + SubPlan 2 + -> Limit + Output: ((cte_basic.b || ' merge update'::text)) + -> CTE Scan on cte_basic + Output: (cte_basic.b || ' merge update'::text) + Filter: (cte_basic.a = m.k) +(21 rows) + +-- InitPlan +WITH cte_init AS MATERIALIZED (SELECT 1 a, 'cte_init val' b) +MERGE INTO m USING (select 1 k, 'merge source InitPlan' v offset 0) o ON m.k=o.k +WHEN MATCHED THEN UPDATE SET v = (SELECT b || ' merge update' FROM cte_init WHERE a = 1 LIMIT 1) +WHEN NOT MATCHED THEN INSERT VALUES(o.k, o.v); +-- Examine +SELECT * FROM m where k = 1; + k | v +---+--------------------------- + 1 | cte_init val merge update +(1 row) + +-- See EXPLAIN output for same query: +EXPLAIN (VERBOSE, COSTS OFF) +WITH cte_init AS MATERIALIZED (SELECT 1 a, 'cte_init val' b) +MERGE INTO m USING (select 1 k, 'merge source InitPlan' v offset 0) o ON m.k=o.k +WHEN MATCHED THEN UPDATE SET v = (SELECT b || ' merge update' FROM cte_init WHERE a = 1 LIMIT 1) +WHEN NOT MATCHED THEN INSERT VALUES(o.k, o.v); + QUERY PLAN +-------------------------------------------------------------------- + Merge on public.m + CTE cte_init + -> Result + Output: 1, 'cte_init val'::text + InitPlan 2 (returns $1) + -> Limit + Output: ((cte_init.b || ' merge update'::text)) + -> CTE Scan on cte_init + Output: (cte_init.b || ' merge update'::text) + Filter: (cte_init.a = 1) + -> Hash Right Join + Output: m.ctid, o.k, o.v, o.* + Hash Cond: (m.k = o.k) + -> Seq Scan on public.m + Output: m.ctid, m.k + -> Hash + Output: o.k, o.v, o.* + -> Subquery Scan on o + Output: o.k, o.v, o.* + -> Result + Output: 1, 'merge source InitPlan'::text +(21 rows) + +-- MERGE source comes from CTE: +WITH merge_source_cte AS MATERIALIZED (SELECT 15 a, 'merge_source_cte val' b) +MERGE INTO m USING (select * from merge_source_cte) o ON m.k=o.a +WHEN MATCHED THEN UPDATE SET v = (SELECT b || merge_source_cte.*::text || ' merge update' FROM merge_source_cte WHERE a = 15) +WHEN NOT MATCHED THEN INSERT VALUES(o.a, o.b || (SELECT merge_source_cte.*::text || ' merge insert' FROM merge_source_cte)); +-- Examine +SELECT * FROM m where k = 15; + k | v +----+-------------------------------------------------------------- + 15 | merge_source_cte val(15,"merge_source_cte val") merge insert +(1 row) + +-- See EXPLAIN output for same query: +EXPLAIN (VERBOSE, COSTS OFF) +WITH merge_source_cte AS MATERIALIZED (SELECT 15 a, 'merge_source_cte val' b) +MERGE INTO m USING (select * from merge_source_cte) o ON m.k=o.a +WHEN MATCHED THEN UPDATE SET v = (SELECT b || merge_source_cte.*::text || ' merge update' FROM merge_source_cte WHERE a = 15) +WHEN NOT MATCHED THEN INSERT VALUES(o.a, o.b || (SELECT merge_source_cte.*::text || ' merge insert' FROM merge_source_cte)); + QUERY PLAN +----------------------------------------------------------------------------------------------------- + Merge on public.m + CTE merge_source_cte + -> Result + Output: 15, 'merge_source_cte val'::text + InitPlan 2 (returns $1) + -> CTE Scan on merge_source_cte merge_source_cte_1 + Output: ((merge_source_cte_1.b || (merge_source_cte_1.*)::text) || ' merge update'::text) + Filter: (merge_source_cte_1.a = 15) + InitPlan 3 (returns $2) + -> CTE Scan on merge_source_cte merge_source_cte_2 + Output: ((merge_source_cte_2.*)::text || ' merge insert'::text) + -> Hash Right Join + Output: m.ctid, merge_source_cte.a, merge_source_cte.b, merge_source_cte.* + Hash Cond: (m.k = merge_source_cte.a) + -> Seq Scan on public.m + Output: m.ctid, m.k + -> Hash + Output: merge_source_cte.a, merge_source_cte.b, merge_source_cte.* + -> CTE Scan on merge_source_cte + Output: merge_source_cte.a, merge_source_cte.b, merge_source_cte.* +(20 rows) + +DROP TABLE m; +-- check that run to completion happens in proper ordering +TRUNCATE TABLE y; +INSERT INTO y SELECT generate_series(1, 3); +CREATE TEMPORARY TABLE yy (a INTEGER); +WITH RECURSIVE t1 AS ( + INSERT INTO y SELECT * FROM y RETURNING * +), t2 AS ( + INSERT INTO yy SELECT * FROM t1 RETURNING * +) +SELECT 1; + ?column? +---------- + 1 +(1 row) + +SELECT * FROM y; + a +--- + 1 + 2 + 3 + 1 + 2 + 3 +(6 rows) + +SELECT * FROM yy; + a +--- + 1 + 2 + 3 +(3 rows) + +WITH RECURSIVE t1 AS ( + INSERT INTO yy SELECT * FROM t2 RETURNING * +), t2 AS ( + INSERT INTO y SELECT * FROM y RETURNING * +) +SELECT 1; + ?column? +---------- + 1 +(1 row) + +SELECT * FROM y; + a +--- + 1 + 2 + 3 + 1 + 2 + 3 + 1 + 2 + 3 + 1 + 2 + 3 +(12 rows) + +SELECT * FROM yy; + a +--- + 1 + 2 + 3 + 1 + 2 + 3 + 1 + 2 + 3 +(9 rows) + +-- triggers +TRUNCATE TABLE y; +INSERT INTO y SELECT generate_series(1, 10); +CREATE FUNCTION y_trigger() RETURNS trigger AS $$ +begin + raise notice 'y_trigger: a = %', new.a; + return new; +end; +$$ LANGUAGE plpgsql; +CREATE TRIGGER y_trig BEFORE INSERT ON y FOR EACH ROW + EXECUTE PROCEDURE y_trigger(); +WITH t AS ( + INSERT INTO y + VALUES + (21), + (22), + (23) + RETURNING * +) +SELECT * FROM t; +NOTICE: y_trigger: a = 21 +NOTICE: y_trigger: a = 22 +NOTICE: y_trigger: a = 23 + a +---- + 21 + 22 + 23 +(3 rows) + +SELECT * FROM y; + a +---- + 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 + 10 + 21 + 22 + 23 +(13 rows) + +DROP TRIGGER y_trig ON y; +CREATE TRIGGER y_trig AFTER INSERT ON y FOR EACH ROW + EXECUTE PROCEDURE y_trigger(); +WITH t AS ( + INSERT INTO y + VALUES + (31), + (32), + (33) + RETURNING * +) +SELECT * FROM t LIMIT 1; +NOTICE: y_trigger: a = 31 +NOTICE: y_trigger: a = 32 +NOTICE: y_trigger: a = 33 + a +---- + 31 +(1 row) + +SELECT * FROM y; + a +---- + 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 + 10 + 21 + 22 + 23 + 31 + 32 + 33 +(16 rows) + +DROP TRIGGER y_trig ON y; +CREATE OR REPLACE FUNCTION y_trigger() RETURNS trigger AS $$ +begin + raise notice 'y_trigger'; + return null; +end; +$$ LANGUAGE plpgsql; +CREATE TRIGGER y_trig AFTER INSERT ON y FOR EACH STATEMENT + EXECUTE PROCEDURE y_trigger(); +WITH t AS ( + INSERT INTO y + VALUES + (41), + (42), + (43) + RETURNING * +) +SELECT * FROM t; +NOTICE: y_trigger + a +---- + 41 + 42 + 43 +(3 rows) + +SELECT * FROM y; + a +---- + 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 + 10 + 21 + 22 + 23 + 31 + 32 + 33 + 41 + 42 + 43 +(19 rows) + +DROP TRIGGER y_trig ON y; +DROP FUNCTION y_trigger(); +-- WITH attached to inherited UPDATE or DELETE +CREATE TEMP TABLE parent ( id int, val text ); +CREATE TEMP TABLE child1 ( ) INHERITS ( parent ); +CREATE TEMP TABLE child2 ( ) INHERITS ( parent ); +INSERT INTO parent VALUES ( 1, 'p1' ); +INSERT INTO child1 VALUES ( 11, 'c11' ),( 12, 'c12' ); +INSERT INTO child2 VALUES ( 23, 'c21' ),( 24, 'c22' ); +WITH rcte AS ( SELECT sum(id) AS totalid FROM parent ) +UPDATE parent SET id = id + totalid FROM rcte; +SELECT * FROM parent; + id | val +----+----- + 72 | p1 + 82 | c11 + 83 | c12 + 94 | c21 + 95 | c22 +(5 rows) + +WITH wcte AS ( INSERT INTO child1 VALUES ( 42, 'new' ) RETURNING id AS newid ) +UPDATE parent SET id = id + newid FROM wcte; +SELECT * FROM parent; + id | val +-----+----- + 114 | p1 + 42 | new + 124 | c11 + 125 | c12 + 136 | c21 + 137 | c22 +(6 rows) + +WITH rcte AS ( SELECT max(id) AS maxid FROM parent ) +DELETE FROM parent USING rcte WHERE id = maxid; +SELECT * FROM parent; + id | val +-----+----- + 114 | p1 + 42 | new + 124 | c11 + 125 | c12 + 136 | c21 +(5 rows) + +WITH wcte AS ( INSERT INTO child2 VALUES ( 42, 'new2' ) RETURNING id AS newid ) +DELETE FROM parent USING wcte WHERE id = newid; +SELECT * FROM parent; + id | val +-----+------ + 114 | p1 + 124 | c11 + 125 | c12 + 136 | c21 + 42 | new2 +(5 rows) + +-- check EXPLAIN VERBOSE for a wCTE with RETURNING +EXPLAIN (VERBOSE, COSTS OFF) +WITH wcte AS ( INSERT INTO int8_tbl VALUES ( 42, 47 ) RETURNING q2 ) +DELETE FROM a_star USING wcte WHERE aa = q2; + QUERY PLAN +--------------------------------------------------------------------------- + Delete on public.a_star + Delete on public.a_star a_star_1 + Delete on public.b_star a_star_2 + Delete on public.c_star a_star_3 + Delete on public.d_star a_star_4 + Delete on public.e_star a_star_5 + Delete on public.f_star a_star_6 + CTE wcte + -> Insert on public.int8_tbl + Output: int8_tbl.q2 + -> Result + Output: '42'::bigint, '47'::bigint + -> Hash Join + Output: wcte.*, a_star.tableoid, a_star.ctid + Hash Cond: (a_star.aa = wcte.q2) + -> Append + -> Seq Scan on public.a_star a_star_1 + Output: a_star_1.aa, a_star_1.tableoid, a_star_1.ctid + -> Seq Scan on public.b_star a_star_2 + Output: a_star_2.aa, a_star_2.tableoid, a_star_2.ctid + -> Seq Scan on public.c_star a_star_3 + Output: a_star_3.aa, a_star_3.tableoid, a_star_3.ctid + -> Seq Scan on public.d_star a_star_4 + Output: a_star_4.aa, a_star_4.tableoid, a_star_4.ctid + -> Seq Scan on public.e_star a_star_5 + Output: a_star_5.aa, a_star_5.tableoid, a_star_5.ctid + -> Seq Scan on public.f_star a_star_6 + Output: a_star_6.aa, a_star_6.tableoid, a_star_6.ctid + -> Hash + Output: wcte.*, wcte.q2 + -> CTE Scan on wcte + Output: wcte.*, wcte.q2 +(32 rows) + +-- error cases +-- data-modifying WITH tries to use its own output +WITH RECURSIVE t AS ( + INSERT INTO y + SELECT * FROM t +) +VALUES(FALSE); +ERROR: recursive query "t" must not contain data-modifying statements +LINE 1: WITH RECURSIVE t AS ( + ^ +-- no RETURNING in a referenced data-modifying WITH +WITH t AS ( + INSERT INTO y VALUES(0) +) +SELECT * FROM t; +ERROR: WITH query "t" does not have a RETURNING clause +LINE 4: SELECT * FROM t; + ^ +-- data-modifying WITH allowed only at the top level +SELECT * FROM ( + WITH t AS (UPDATE y SET a=a+1 RETURNING *) + SELECT * FROM t +) ss; +ERROR: WITH clause containing a data-modifying statement must be at the top level +LINE 2: WITH t AS (UPDATE y SET a=a+1 RETURNING *) + ^ +-- most variants of rules aren't allowed +CREATE RULE y_rule AS ON INSERT TO y WHERE a=0 DO INSTEAD DELETE FROM y; +WITH t AS ( + INSERT INTO y VALUES(0) +) +VALUES(FALSE); +ERROR: conditional DO INSTEAD rules are not supported for data-modifying statements in WITH +CREATE OR REPLACE RULE y_rule AS ON INSERT TO y DO INSTEAD NOTHING; +WITH t AS ( + INSERT INTO y VALUES(0) +) +VALUES(FALSE); +ERROR: DO INSTEAD NOTHING rules are not supported for data-modifying statements in WITH +CREATE OR REPLACE RULE y_rule AS ON INSERT TO y DO INSTEAD NOTIFY foo; +WITH t AS ( + INSERT INTO y VALUES(0) +) +VALUES(FALSE); +ERROR: DO INSTEAD NOTIFY rules are not supported for data-modifying statements in WITH +CREATE OR REPLACE RULE y_rule AS ON INSERT TO y DO ALSO NOTIFY foo; +WITH t AS ( + INSERT INTO y VALUES(0) +) +VALUES(FALSE); +ERROR: DO ALSO rules are not supported for data-modifying statements in WITH +CREATE OR REPLACE RULE y_rule AS ON INSERT TO y + DO INSTEAD (NOTIFY foo; NOTIFY bar); +WITH t AS ( + INSERT INTO y VALUES(0) +) +VALUES(FALSE); +ERROR: multi-statement DO INSTEAD rules are not supported for data-modifying statements in WITH +DROP RULE y_rule ON y; +-- check that parser lookahead for WITH doesn't cause any odd behavior +create table foo (with baz); -- fail, WITH is a reserved word +ERROR: syntax error at or near "with" +LINE 1: create table foo (with baz); + ^ +create table foo (with ordinality); -- fail, WITH is a reserved word +ERROR: syntax error at or near "with" +LINE 1: create table foo (with ordinality); + ^ +with ordinality as (select 1 as x) select * from ordinality; + x +--- + 1 +(1 row) + +-- check sane response to attempt to modify CTE relation +WITH with_test AS (SELECT 42) INSERT INTO with_test VALUES (1); +ERROR: relation "with_test" does not exist +LINE 1: WITH with_test AS (SELECT 42) INSERT INTO with_test VALUES (... + ^ +-- check response to attempt to modify table with same name as a CTE (perhaps +-- surprisingly it works, because CTEs don't hide tables from data-modifying +-- statements) +create temp table with_test (i int); +with with_test as (select 42) insert into with_test select * from with_test; +select * from with_test; + i +---- + 42 +(1 row) + +drop table with_test; diff --git a/src/test/regress/expected/write_parallel.out b/src/test/regress/expected/write_parallel.out new file mode 100644 index 0000000..dc0c4ba --- /dev/null +++ b/src/test/regress/expected/write_parallel.out @@ -0,0 +1,80 @@ +-- +-- PARALLEL +-- +begin; +-- encourage use of parallel plans +set parallel_setup_cost=0; +set parallel_tuple_cost=0; +set min_parallel_table_scan_size=0; +set max_parallel_workers_per_gather=4; +-- +-- Test write operations that has an underlying query that is eligible +-- for parallel plans +-- +explain (costs off) create table parallel_write as + select length(stringu1) from tenk1 group by length(stringu1); + QUERY PLAN +--------------------------------------------------- + Finalize HashAggregate + Group Key: (length((stringu1)::text)) + -> Gather + Workers Planned: 4 + -> Partial HashAggregate + Group Key: length((stringu1)::text) + -> Parallel Seq Scan on tenk1 +(7 rows) + +create table parallel_write as + select length(stringu1) from tenk1 group by length(stringu1); +drop table parallel_write; +explain (costs off) select length(stringu1) into parallel_write + from tenk1 group by length(stringu1); + QUERY PLAN +--------------------------------------------------- + Finalize HashAggregate + Group Key: (length((stringu1)::text)) + -> Gather + Workers Planned: 4 + -> Partial HashAggregate + Group Key: length((stringu1)::text) + -> Parallel Seq Scan on tenk1 +(7 rows) + +select length(stringu1) into parallel_write + from tenk1 group by length(stringu1); +drop table parallel_write; +explain (costs off) create materialized view parallel_mat_view as + select length(stringu1) from tenk1 group by length(stringu1); + QUERY PLAN +--------------------------------------------------- + Finalize HashAggregate + Group Key: (length((stringu1)::text)) + -> Gather + Workers Planned: 4 + -> Partial HashAggregate + Group Key: length((stringu1)::text) + -> Parallel Seq Scan on tenk1 +(7 rows) + +create materialized view parallel_mat_view as + select length(stringu1) from tenk1 group by length(stringu1); +create unique index on parallel_mat_view(length); +refresh materialized view parallel_mat_view; +refresh materialized view concurrently parallel_mat_view; +drop materialized view parallel_mat_view; +prepare prep_stmt as select length(stringu1) from tenk1 group by length(stringu1); +explain (costs off) create table parallel_write as execute prep_stmt; + QUERY PLAN +--------------------------------------------------- + Finalize HashAggregate + Group Key: (length((stringu1)::text)) + -> Gather + Workers Planned: 4 + -> Partial HashAggregate + Group Key: length((stringu1)::text) + -> Parallel Seq Scan on tenk1 +(7 rows) + +create table parallel_write as execute prep_stmt; +drop table parallel_write; +rollback; diff --git a/src/test/regress/expected/xid.out b/src/test/regress/expected/xid.out new file mode 100644 index 0000000..835077e --- /dev/null +++ b/src/test/regress/expected/xid.out @@ -0,0 +1,530 @@ +-- xid and xid8 +-- values in range, in octal, decimal, hex +select '010'::xid, + '42'::xid, + '0xffffffff'::xid, + '-1'::xid, + '010'::xid8, + '42'::xid8, + '0xffffffffffffffff'::xid8, + '-1'::xid8; + xid | xid | xid | xid | xid8 | xid8 | xid8 | xid8 +-----+-----+------------+------------+------+------+----------------------+---------------------- + 8 | 42 | 4294967295 | 4294967295 | 8 | 42 | 18446744073709551615 | 18446744073709551615 +(1 row) + +-- garbage values +select ''::xid; +ERROR: invalid input syntax for type xid: "" +LINE 1: select ''::xid; + ^ +select 'asdf'::xid; +ERROR: invalid input syntax for type xid: "asdf" +LINE 1: select 'asdf'::xid; + ^ +select ''::xid8; +ERROR: invalid input syntax for type xid8: "" +LINE 1: select ''::xid8; + ^ +select 'asdf'::xid8; +ERROR: invalid input syntax for type xid8: "asdf" +LINE 1: select 'asdf'::xid8; + ^ +-- Also try it with non-error-throwing API +SELECT pg_input_is_valid('42', 'xid'); + pg_input_is_valid +------------------- + t +(1 row) + +SELECT pg_input_is_valid('asdf', 'xid'); + pg_input_is_valid +------------------- + f +(1 row) + +SELECT * FROM pg_input_error_info('0xffffffffff', 'xid'); + message | detail | hint | sql_error_code +---------------------------------------------------+--------+------+---------------- + value "0xffffffffff" is out of range for type xid | | | 22003 +(1 row) + +SELECT pg_input_is_valid('42', 'xid8'); + pg_input_is_valid +------------------- + t +(1 row) + +SELECT pg_input_is_valid('asdf', 'xid8'); + pg_input_is_valid +------------------- + f +(1 row) + +SELECT * FROM pg_input_error_info('0xffffffffffffffffffff', 'xid8'); + message | detail | hint | sql_error_code +--------------------------------------------------------------+--------+------+---------------- + value "0xffffffffffffffffffff" is out of range for type xid8 | | | 22003 +(1 row) + +-- equality +select '1'::xid = '1'::xid; + ?column? +---------- + t +(1 row) + +select '1'::xid != '1'::xid; + ?column? +---------- + f +(1 row) + +select '1'::xid8 = '1'::xid8; + ?column? +---------- + t +(1 row) + +select '1'::xid8 != '1'::xid8; + ?column? +---------- + f +(1 row) + +-- conversion +select '1'::xid = '1'::xid8::xid; + ?column? +---------- + t +(1 row) + +select '1'::xid != '1'::xid8::xid; + ?column? +---------- + f +(1 row) + +-- we don't want relational operators for xid, due to use of modular arithmetic +select '1'::xid < '2'::xid; +ERROR: operator does not exist: xid < xid +LINE 1: select '1'::xid < '2'::xid; + ^ +HINT: No operator matches the given name and argument types. You might need to add explicit type casts. +select '1'::xid <= '2'::xid; +ERROR: operator does not exist: xid <= xid +LINE 1: select '1'::xid <= '2'::xid; + ^ +HINT: No operator matches the given name and argument types. You might need to add explicit type casts. +select '1'::xid > '2'::xid; +ERROR: operator does not exist: xid > xid +LINE 1: select '1'::xid > '2'::xid; + ^ +HINT: No operator matches the given name and argument types. You might need to add explicit type casts. +select '1'::xid >= '2'::xid; +ERROR: operator does not exist: xid >= xid +LINE 1: select '1'::xid >= '2'::xid; + ^ +HINT: No operator matches the given name and argument types. You might need to add explicit type casts. +-- we want them for xid8 though +select '1'::xid8 < '2'::xid8, '2'::xid8 < '2'::xid8, '2'::xid8 < '1'::xid8; + ?column? | ?column? | ?column? +----------+----------+---------- + t | f | f +(1 row) + +select '1'::xid8 <= '2'::xid8, '2'::xid8 <= '2'::xid8, '2'::xid8 <= '1'::xid8; + ?column? | ?column? | ?column? +----------+----------+---------- + t | t | f +(1 row) + +select '1'::xid8 > '2'::xid8, '2'::xid8 > '2'::xid8, '2'::xid8 > '1'::xid8; + ?column? | ?column? | ?column? +----------+----------+---------- + f | f | t +(1 row) + +select '1'::xid8 >= '2'::xid8, '2'::xid8 >= '2'::xid8, '2'::xid8 >= '1'::xid8; + ?column? | ?column? | ?column? +----------+----------+---------- + f | t | t +(1 row) + +-- we also have a 3way compare for btrees +select xid8cmp('1', '2'), xid8cmp('2', '2'), xid8cmp('2', '1'); + xid8cmp | xid8cmp | xid8cmp +---------+---------+--------- + -1 | 0 | 1 +(1 row) + +-- min() and max() for xid8 +create table xid8_t1 (x xid8); +insert into xid8_t1 values ('0'), ('010'), ('42'), ('0xffffffffffffffff'), ('-1'); +select min(x), max(x) from xid8_t1; + min | max +-----+---------------------- + 0 | 18446744073709551615 +(1 row) + +-- xid8 has btree and hash opclasses +create index on xid8_t1 using btree(x); +create index on xid8_t1 using hash(x); +drop table xid8_t1; +-- pg_snapshot data type and related functions +-- Note: another set of tests similar to this exists in txid.sql, for a limited +-- time (the relevant functions share C code) +-- i/o +select '12:13:'::pg_snapshot; + pg_snapshot +------------- + 12:13: +(1 row) + +select '12:18:14,16'::pg_snapshot; + pg_snapshot +------------- + 12:18:14,16 +(1 row) + +select '12:16:14,14'::pg_snapshot; + pg_snapshot +------------- + 12:16:14 +(1 row) + +-- errors +select '31:12:'::pg_snapshot; +ERROR: invalid input syntax for type pg_snapshot: "31:12:" +LINE 1: select '31:12:'::pg_snapshot; + ^ +select '0:1:'::pg_snapshot; +ERROR: invalid input syntax for type pg_snapshot: "0:1:" +LINE 1: select '0:1:'::pg_snapshot; + ^ +select '12:13:0'::pg_snapshot; +ERROR: invalid input syntax for type pg_snapshot: "12:13:0" +LINE 1: select '12:13:0'::pg_snapshot; + ^ +select '12:16:14,13'::pg_snapshot; +ERROR: invalid input syntax for type pg_snapshot: "12:16:14,13" +LINE 1: select '12:16:14,13'::pg_snapshot; + ^ +-- also try it with non-error-throwing API +select pg_input_is_valid('12:13:', 'pg_snapshot'); + pg_input_is_valid +------------------- + t +(1 row) + +select pg_input_is_valid('31:12:', 'pg_snapshot'); + pg_input_is_valid +------------------- + f +(1 row) + +select * from pg_input_error_info('31:12:', 'pg_snapshot'); + message | detail | hint | sql_error_code +-----------------------------------------------------+--------+------+---------------- + invalid input syntax for type pg_snapshot: "31:12:" | | | 22P02 +(1 row) + +select pg_input_is_valid('12:16:14,13', 'pg_snapshot'); + pg_input_is_valid +------------------- + f +(1 row) + +select * from pg_input_error_info('12:16:14,13', 'pg_snapshot'); + message | detail | hint | sql_error_code +----------------------------------------------------------+--------+------+---------------- + invalid input syntax for type pg_snapshot: "12:16:14,13" | | | 22P02 +(1 row) + +create temp table snapshot_test ( + nr integer, + snap pg_snapshot +); +insert into snapshot_test values (1, '12:13:'); +insert into snapshot_test values (2, '12:20:13,15,18'); +insert into snapshot_test values (3, '100001:100009:100005,100007,100008'); +insert into snapshot_test values (4, '100:150:101,102,103,104,105,106,107,108,109,110,111,112,113,114,115,116,117,118,119,120,121,122,123,124,125,126,127,128,129,130,131'); +select snap from snapshot_test order by nr; + snap +------------------------------------------------------------------------------------------------------------------------------------- + 12:13: + 12:20:13,15,18 + 100001:100009:100005,100007,100008 + 100:150:101,102,103,104,105,106,107,108,109,110,111,112,113,114,115,116,117,118,119,120,121,122,123,124,125,126,127,128,129,130,131 +(4 rows) + +select pg_snapshot_xmin(snap), + pg_snapshot_xmax(snap), + pg_snapshot_xip(snap) +from snapshot_test order by nr; + pg_snapshot_xmin | pg_snapshot_xmax | pg_snapshot_xip +------------------+------------------+----------------- + 12 | 20 | 13 + 12 | 20 | 15 + 12 | 20 | 18 + 100001 | 100009 | 100005 + 100001 | 100009 | 100007 + 100001 | 100009 | 100008 + 100 | 150 | 101 + 100 | 150 | 102 + 100 | 150 | 103 + 100 | 150 | 104 + 100 | 150 | 105 + 100 | 150 | 106 + 100 | 150 | 107 + 100 | 150 | 108 + 100 | 150 | 109 + 100 | 150 | 110 + 100 | 150 | 111 + 100 | 150 | 112 + 100 | 150 | 113 + 100 | 150 | 114 + 100 | 150 | 115 + 100 | 150 | 116 + 100 | 150 | 117 + 100 | 150 | 118 + 100 | 150 | 119 + 100 | 150 | 120 + 100 | 150 | 121 + 100 | 150 | 122 + 100 | 150 | 123 + 100 | 150 | 124 + 100 | 150 | 125 + 100 | 150 | 126 + 100 | 150 | 127 + 100 | 150 | 128 + 100 | 150 | 129 + 100 | 150 | 130 + 100 | 150 | 131 +(37 rows) + +select id, pg_visible_in_snapshot(id::text::xid8, snap) +from snapshot_test, generate_series(11, 21) id +where nr = 2; + id | pg_visible_in_snapshot +----+------------------------ + 11 | t + 12 | t + 13 | f + 14 | t + 15 | f + 16 | t + 17 | t + 18 | f + 19 | t + 20 | f + 21 | f +(11 rows) + +-- test bsearch +select id, pg_visible_in_snapshot(id::text::xid8, snap) +from snapshot_test, generate_series(90, 160) id +where nr = 4; + id | pg_visible_in_snapshot +-----+------------------------ + 90 | t + 91 | t + 92 | t + 93 | t + 94 | t + 95 | t + 96 | t + 97 | t + 98 | t + 99 | t + 100 | t + 101 | f + 102 | f + 103 | f + 104 | f + 105 | f + 106 | f + 107 | f + 108 | f + 109 | f + 110 | f + 111 | f + 112 | f + 113 | f + 114 | f + 115 | f + 116 | f + 117 | f + 118 | f + 119 | f + 120 | f + 121 | f + 122 | f + 123 | f + 124 | f + 125 | f + 126 | f + 127 | f + 128 | f + 129 | f + 130 | f + 131 | f + 132 | t + 133 | t + 134 | t + 135 | t + 136 | t + 137 | t + 138 | t + 139 | t + 140 | t + 141 | t + 142 | t + 143 | t + 144 | t + 145 | t + 146 | t + 147 | t + 148 | t + 149 | t + 150 | f + 151 | f + 152 | f + 153 | f + 154 | f + 155 | f + 156 | f + 157 | f + 158 | f + 159 | f + 160 | f +(71 rows) + +-- test current values also +select pg_current_xact_id() >= pg_snapshot_xmin(pg_current_snapshot()); + ?column? +---------- + t +(1 row) + +-- we can't assume current is always less than xmax, however +select pg_visible_in_snapshot(pg_current_xact_id(), pg_current_snapshot()); + pg_visible_in_snapshot +------------------------ + f +(1 row) + +-- test 64bitness +select pg_snapshot '1000100010001000:1000100010001100:1000100010001012,1000100010001013'; + pg_snapshot +--------------------------------------------------------------------- + 1000100010001000:1000100010001100:1000100010001012,1000100010001013 +(1 row) + +select pg_visible_in_snapshot('1000100010001012', '1000100010001000:1000100010001100:1000100010001012,1000100010001013'); + pg_visible_in_snapshot +------------------------ + f +(1 row) + +select pg_visible_in_snapshot('1000100010001015', '1000100010001000:1000100010001100:1000100010001012,1000100010001013'); + pg_visible_in_snapshot +------------------------ + t +(1 row) + +-- test 64bit overflow +SELECT pg_snapshot '1:9223372036854775807:3'; + pg_snapshot +------------------------- + 1:9223372036854775807:3 +(1 row) + +SELECT pg_snapshot '1:9223372036854775808:3'; +ERROR: invalid input syntax for type pg_snapshot: "1:9223372036854775808:3" +LINE 1: SELECT pg_snapshot '1:9223372036854775808:3'; + ^ +-- test pg_current_xact_id_if_assigned +BEGIN; +SELECT pg_current_xact_id_if_assigned() IS NULL; + ?column? +---------- + t +(1 row) + +SELECT pg_current_xact_id() \gset +SELECT pg_current_xact_id_if_assigned() IS NOT DISTINCT FROM xid8 :'pg_current_xact_id'; + ?column? +---------- + t +(1 row) + +COMMIT; +-- test xid status functions +BEGIN; +SELECT pg_current_xact_id() AS committed \gset +COMMIT; +BEGIN; +SELECT pg_current_xact_id() AS rolledback \gset +ROLLBACK; +BEGIN; +SELECT pg_current_xact_id() AS inprogress \gset +SELECT pg_xact_status(:committed::text::xid8) AS committed; + committed +----------- + committed +(1 row) + +SELECT pg_xact_status(:rolledback::text::xid8) AS rolledback; + rolledback +------------ + aborted +(1 row) + +SELECT pg_xact_status(:inprogress::text::xid8) AS inprogress; + inprogress +------------- + in progress +(1 row) + +SELECT pg_xact_status('1'::xid8); -- BootstrapTransactionId is always committed + pg_xact_status +---------------- + committed +(1 row) + +SELECT pg_xact_status('2'::xid8); -- FrozenTransactionId is always committed + pg_xact_status +---------------- + committed +(1 row) + +SELECT pg_xact_status('3'::xid8); -- in regress testing FirstNormalTransactionId will always be behind oldestXmin + pg_xact_status +---------------- + +(1 row) + +COMMIT; +BEGIN; +CREATE FUNCTION test_future_xid_status(xid8) +RETURNS void +LANGUAGE plpgsql +AS +$$ +BEGIN + PERFORM pg_xact_status($1); + RAISE EXCEPTION 'didn''t ERROR at xid in the future as expected'; +EXCEPTION + WHEN invalid_parameter_value THEN + RAISE NOTICE 'Got expected error for xid in the future'; +END; +$$; +SELECT test_future_xid_status((:inprogress + 10000)::text::xid8); +NOTICE: Got expected error for xid in the future + test_future_xid_status +------------------------ + +(1 row) + +ROLLBACK; diff --git a/src/test/regress/expected/xml.out b/src/test/regress/expected/xml.out new file mode 100644 index 0000000..398345c --- /dev/null +++ b/src/test/regress/expected/xml.out @@ -0,0 +1,1787 @@ +CREATE TABLE xmltest ( + id int, + data xml +); +INSERT INTO xmltest VALUES (1, 'one'); +INSERT INTO xmltest VALUES (2, 'two'); +INSERT INTO xmltest VALUES (3, 'one + 2 | two +(2 rows) + +-- test non-throwing API, too +SELECT pg_input_is_valid('one', 'xml'); + pg_input_is_valid +------------------- + t +(1 row) + +SELECT pg_input_is_valid('oneone', 'xml'); + pg_input_is_valid +------------------- + f +(1 row) + +SELECT message FROM pg_input_error_info('', 'xml'); + message +---------------------------------------------- + invalid XML content: invalid XML declaration +(1 row) + +SELECT xmlcomment('test'); + xmlcomment +------------- + +(1 row) + +SELECT xmlcomment('-test'); + xmlcomment +-------------- + +(1 row) + +SELECT xmlcomment('test-'); +ERROR: invalid XML comment +SELECT xmlcomment('--test'); +ERROR: invalid XML comment +SELECT xmlcomment('te st'); + xmlcomment +-------------- + +(1 row) + +SELECT xmlconcat(xmlcomment('hello'), + xmlelement(NAME qux, 'foo'), + xmlcomment('world')); + xmlconcat +---------------------------------------- + foo +(1 row) + +SELECT xmlconcat('hello', 'you'); + xmlconcat +----------- + helloyou +(1 row) + +SELECT xmlconcat(1, 2); +ERROR: argument of XMLCONCAT must be type xml, not type integer +LINE 1: SELECT xmlconcat(1, 2); + ^ +SELECT xmlconcat('bad', '', NULL, ''); + xmlconcat +-------------- + +(1 row) + +SELECT xmlconcat('', NULL, ''); + xmlconcat +----------------------------------- + +(1 row) + +SELECT xmlconcat(NULL); + xmlconcat +----------- + +(1 row) + +SELECT xmlconcat(NULL, NULL); + xmlconcat +----------- + +(1 row) + +SELECT xmlelement(name element, + xmlattributes (1 as one, 'deuce' as two), + 'content'); + xmlelement +------------------------------------------------ + content +(1 row) + +SELECT xmlelement(name element, + xmlattributes ('unnamed and wrong')); +ERROR: unnamed XML attribute value must be a column reference +LINE 2: xmlattributes ('unnamed and wrong')); + ^ +SELECT xmlelement(name element, xmlelement(name nested, 'stuff')); + xmlelement +------------------------------------------- + stuff +(1 row) + +SELECT xmlelement(name employee, xmlforest(name, age, salary as pay)) FROM emp; + xmlelement +---------------------------------------------------------------------- + sharon251000 + sam302000 + bill201000 + jeff23600 + cim30400 + linda19100 +(6 rows) + +SELECT xmlelement(name duplicate, xmlattributes(1 as a, 2 as b, 3 as a)); +ERROR: XML attribute name "a" appears more than once +LINE 1: ...ment(name duplicate, xmlattributes(1 as a, 2 as b, 3 as a)); + ^ +SELECT xmlelement(name num, 37); + xmlelement +--------------- + 37 +(1 row) + +SELECT xmlelement(name foo, text 'bar'); + xmlelement +---------------- + bar +(1 row) + +SELECT xmlelement(name foo, xml 'bar'); + xmlelement +---------------- + bar +(1 row) + +SELECT xmlelement(name foo, text 'br'); + xmlelement +------------------------- + b<a/>r +(1 row) + +SELECT xmlelement(name foo, xml 'br'); + xmlelement +------------------- + br +(1 row) + +SELECT xmlelement(name foo, array[1, 2, 3]); + xmlelement +------------------------------------------------------------------------- + 123 +(1 row) + +SET xmlbinary TO base64; +SELECT xmlelement(name foo, bytea 'bar'); + xmlelement +----------------- + YmFy +(1 row) + +SET xmlbinary TO hex; +SELECT xmlelement(name foo, bytea 'bar'); + xmlelement +------------------- + 626172 +(1 row) + +SELECT xmlelement(name foo, xmlattributes(true as bar)); + xmlelement +------------------- + +(1 row) + +SELECT xmlelement(name foo, xmlattributes('2009-04-09 00:24:37'::timestamp as bar)); + xmlelement +---------------------------------- + +(1 row) + +SELECT xmlelement(name foo, xmlattributes('infinity'::timestamp as bar)); +ERROR: timestamp out of range +DETAIL: XML does not support infinite timestamp values. +SELECT xmlelement(name foo, xmlattributes('<>&"''' as funny, xml 'br' as funnier)); + xmlelement +------------------------------------------------------------ + +(1 row) + +SELECT xmlparse(content ''); + xmlparse +---------- + +(1 row) + +SELECT xmlparse(content ' '); + xmlparse +---------- + +(1 row) + +SELECT xmlparse(content 'abc'); + xmlparse +---------- + abc +(1 row) + +SELECT xmlparse(content 'x'); + xmlparse +-------------- + x +(1 row) + +SELECT xmlparse(content '&'); +ERROR: invalid XML content +DETAIL: line 1: xmlParseEntityRef: no name +& + ^ +line 1: chunk is not well balanced +& + ^ +SELECT xmlparse(content '&idontexist;'); +ERROR: invalid XML content +DETAIL: line 1: Entity 'idontexist' not defined +&idontexist; + ^ +line 1: chunk is not well balanced +&idontexist; + ^ +SELECT xmlparse(content ''); + xmlparse +--------------------------- + +(1 row) + +SELECT xmlparse(content ''); + xmlparse +-------------------------------- + +(1 row) + +SELECT xmlparse(content '&idontexist;'); +ERROR: invalid XML content +DETAIL: line 1: Entity 'idontexist' not defined +&idontexist; + ^ +line 1: Opening and ending tag mismatch: twoerrors line 1 and unbalanced +&idontexist; + ^ +line 1: chunk is not well balanced +&idontexist; + ^ +SELECT xmlparse(content ''); + xmlparse +--------------------- + +(1 row) + +SELECT xmlparse(document ' '); +ERROR: invalid XML document +DETAIL: line 1: Start tag expected, '<' not found + + ^ +SELECT xmlparse(document 'abc'); +ERROR: invalid XML document +DETAIL: line 1: Start tag expected, '<' not found +abc +^ +SELECT xmlparse(document 'x'); + xmlparse +-------------- + x +(1 row) + +SELECT xmlparse(document '&'); +ERROR: invalid XML document +DETAIL: line 1: xmlParseEntityRef: no name +& + ^ +line 1: Opening and ending tag mismatch: invalidentity line 1 and abc +& + ^ +SELECT xmlparse(document '&idontexist;'); +ERROR: invalid XML document +DETAIL: line 1: Entity 'idontexist' not defined +&idontexist; + ^ +line 1: Opening and ending tag mismatch: undefinedentity line 1 and abc +&idontexist; + ^ +SELECT xmlparse(document ''); + xmlparse +--------------------------- + +(1 row) + +SELECT xmlparse(document ''); + xmlparse +-------------------------------- + +(1 row) + +SELECT xmlparse(document '&idontexist;'); +ERROR: invalid XML document +DETAIL: line 1: Entity 'idontexist' not defined +&idontexist; + ^ +line 1: Opening and ending tag mismatch: twoerrors line 1 and unbalanced +&idontexist; + ^ +SELECT xmlparse(document ''); + xmlparse +--------------------- + +(1 row) + +SELECT xmlpi(name foo); + xmlpi +--------- + +(1 row) + +SELECT xmlpi(name xml); +ERROR: invalid XML processing instruction +DETAIL: XML processing instruction target name cannot be "xml". +SELECT xmlpi(name xmlstuff); + xmlpi +-------------- + +(1 row) + +SELECT xmlpi(name foo, 'bar'); + xmlpi +------------- + +(1 row) + +SELECT xmlpi(name foo, 'in?>valid'); +ERROR: invalid XML processing instruction +DETAIL: XML processing instruction cannot contain "?>". +SELECT xmlpi(name foo, null); + xmlpi +------- + +(1 row) + +SELECT xmlpi(name xml, null); +ERROR: invalid XML processing instruction +DETAIL: XML processing instruction target name cannot be "xml". +SELECT xmlpi(name xmlstuff, null); + xmlpi +------- + +(1 row) + +SELECT xmlpi(name "xml-stylesheet", 'href="mystyle.css" type="text/css"'); + xmlpi +------------------------------------------------------- + +(1 row) + +SELECT xmlpi(name foo, ' bar'); + xmlpi +------------- + +(1 row) + +SELECT xmlroot(xml '', version no value, standalone no value); + xmlroot +--------- + +(1 row) + +SELECT xmlroot(xml '', version '2.0'); + xmlroot +----------------------------- + +(1 row) + +SELECT xmlroot(xml '', version no value, standalone yes); + xmlroot +---------------------------------------------- + +(1 row) + +SELECT xmlroot(xml '', version no value, standalone yes); + xmlroot +---------------------------------------------- + +(1 row) + +SELECT xmlroot(xmlroot(xml '', version '1.0'), version '1.1', standalone no); + xmlroot +--------------------------------------------- + +(1 row) + +SELECT xmlroot('', version no value, standalone no); + xmlroot +--------------------------------------------- + +(1 row) + +SELECT xmlroot('', version no value, standalone no value); + xmlroot +--------- + +(1 row) + +SELECT xmlroot('', version no value); + xmlroot +---------------------------------------------- + +(1 row) + +SELECT xmlroot ( + xmlelement ( + name gazonk, + xmlattributes ( + 'val' AS name, + 1 + 1 AS num + ), + xmlelement ( + NAME qux, + 'foo' + ) + ), + version '1.0', + standalone yes +); + xmlroot +------------------------------------------------------------------------------------------ + foo +(1 row) + +SELECT xmlserialize(content data as character varying(20)) FROM xmltest; + xmlserialize +-------------------- + one + two +(2 rows) + +SELECT xmlserialize(content 'good' as char(10)); + xmlserialize +-------------- + good +(1 row) + +SELECT xmlserialize(document 'bad' as text); +ERROR: not an XML document +-- indent +SELECT xmlserialize(DOCUMENT '42' AS text INDENT); + xmlserialize +------------------------- + + + + + 42+ + + + + + +(1 row) + +SELECT xmlserialize(CONTENT '42' AS text INDENT); + xmlserialize +------------------------- + + + + + 42+ + + + +(1 row) + +-- no indent +SELECT xmlserialize(DOCUMENT '42' AS text NO INDENT); + xmlserialize +------------------------------------------- + 42 +(1 row) + +SELECT xmlserialize(CONTENT '42' AS text NO INDENT); + xmlserialize +------------------------------------------- + 42 +(1 row) + +-- indent non singly-rooted xml +SELECT xmlserialize(DOCUMENT '7342' AS text INDENT); +ERROR: not an XML document +SELECT xmlserialize(CONTENT '7342' AS text INDENT); + xmlserialize +----------------------- + 73 + + + + 42+ + +(1 row) + +-- indent non singly-rooted xml with mixed contents +SELECT xmlserialize(DOCUMENT 'text node73text node42' AS text INDENT); +ERROR: not an XML document +SELECT xmlserialize(CONTENT 'text node73text node42' AS text INDENT); + xmlserialize +------------------------ + text node + + 73text node+ + + + 42 + + +(1 row) + +-- indent singly-rooted xml with mixed contents +SELECT xmlserialize(DOCUMENT '42text node73' AS text INDENT); + xmlserialize +--------------------------------------------- + + + + + 42 + + text node73+ + + + + + +(1 row) + +SELECT xmlserialize(CONTENT '42text node73' AS text INDENT); + xmlserialize +--------------------------------------------- + + + + + 42 + + text node73+ + + + +(1 row) + +-- indent empty string +SELECT xmlserialize(DOCUMENT '' AS text INDENT); +ERROR: not an XML document +SELECT xmlserialize(CONTENT '' AS text INDENT); + xmlserialize +-------------- + +(1 row) + +-- whitespaces +SELECT xmlserialize(DOCUMENT ' ' AS text INDENT); +ERROR: not an XML document +SELECT xmlserialize(CONTENT ' ' AS text INDENT); + xmlserialize +-------------- + +(1 row) + +-- indent null +SELECT xmlserialize(DOCUMENT NULL AS text INDENT); + xmlserialize +-------------- + +(1 row) + +SELECT xmlserialize(CONTENT NULL AS text INDENT); + xmlserialize +-------------- + +(1 row) + +-- indent with XML declaration +SELECT xmlserialize(DOCUMENT '73' AS text INDENT); + xmlserialize +---------------------------------------- + + + + + + + 73 + + + + + + +(1 row) + +SELECT xmlserialize(CONTENT '73' AS text INDENT); + xmlserialize +------------------- + + + + + 73+ + + + +(1 row) + +-- indent containing DOCTYPE declaration +SELECT xmlserialize(DOCUMENT '' AS text INDENT); + xmlserialize +-------------- + + + + + +(1 row) + +SELECT xmlserialize(CONTENT '' AS text INDENT); + xmlserialize +-------------- + + + + + +(1 row) + +-- indent xml with empty element +SELECT xmlserialize(DOCUMENT '' AS text INDENT); + xmlserialize +-------------- + + + + + + + +(1 row) + +SELECT xmlserialize(CONTENT '' AS text INDENT); + xmlserialize +-------------- + + + + + +(1 row) + +-- 'no indent' = not using 'no indent' +SELECT xmlserialize(DOCUMENT '42' AS text) = xmlserialize(DOCUMENT '42' AS text NO INDENT); + ?column? +---------- + t +(1 row) + +SELECT xmlserialize(CONTENT '42' AS text) = xmlserialize(CONTENT '42' AS text NO INDENT); + ?column? +---------- + t +(1 row) + +SELECT xml 'bar' IS DOCUMENT; + ?column? +---------- + t +(1 row) + +SELECT xml 'barfoo' IS DOCUMENT; + ?column? +---------- + f +(1 row) + +SELECT xml '' IS NOT DOCUMENT; + ?column? +---------- + f +(1 row) + +SELECT xml 'abc' IS NOT DOCUMENT; + ?column? +---------- + t +(1 row) + +SELECT '<>' IS NOT DOCUMENT; +ERROR: invalid XML content +LINE 1: SELECT '<>' IS NOT DOCUMENT; + ^ +DETAIL: line 1: StartTag: invalid element name +<> + ^ +SELECT xmlagg(data) FROM xmltest; + xmlagg +-------------------------------------- + onetwo +(1 row) + +SELECT xmlagg(data) FROM xmltest WHERE id > 10; + xmlagg +-------- + +(1 row) + +SELECT xmlelement(name employees, xmlagg(xmlelement(name name, name))) FROM emp; + xmlelement +-------------------------------------------------------------------------------------------------------------------------------- + sharonsambilljeffcimlinda +(1 row) + +-- Check mapping SQL identifier to XML name +SELECT xmlpi(name ":::_xml_abc135.%-&_"); + xmlpi +------------------------------------------------- + +(1 row) + +SELECT xmlpi(name "123"); + xmlpi +--------------- + +(1 row) + +PREPARE foo (xml) AS SELECT xmlconcat('', $1); +SET XML OPTION DOCUMENT; +EXECUTE foo (''); + xmlconcat +-------------- + +(1 row) + +EXECUTE foo ('bad'); +ERROR: invalid XML document +LINE 1: EXECUTE foo ('bad'); + ^ +DETAIL: line 1: Start tag expected, '<' not found +bad +^ +SELECT xml ''; +ERROR: invalid XML document +LINE 1: SELECT xml ''; + ^ +DETAIL: line 1: Extra content at the end of the document + + ^ +SET XML OPTION CONTENT; +EXECUTE foo (''); + xmlconcat +-------------- + +(1 row) + +EXECUTE foo ('good'); + xmlconcat +------------ + good +(1 row) + +SELECT xml ' '; + xml +-------------------------------------------------------------------- + +(1 row) + +SELECT xml ' '; + xml +------------------------------ + +(1 row) + +SELECT xml ''; + xml +------------------ + +(1 row) + +SELECT xml ' oops '; +ERROR: invalid XML content +LINE 1: SELECT xml ' oops '; + ^ +DETAIL: line 1: StartTag: invalid element name + oops + ^ +SELECT xml ' '; +ERROR: invalid XML content +LINE 1: SELECT xml ' '; + ^ +DETAIL: line 1: StartTag: invalid element name + + ^ +SELECT xml ''; +ERROR: invalid XML content +LINE 1: SELECT xml ''; + ^ +DETAIL: line 1: Extra content at the end of the document + + ^ +-- Test backwards parsing +CREATE VIEW xmlview1 AS SELECT xmlcomment('test'); +CREATE VIEW xmlview2 AS SELECT xmlconcat('hello', 'you'); +CREATE VIEW xmlview3 AS SELECT xmlelement(name element, xmlattributes (1 as ":one:", 'deuce' as two), 'content&'); +CREATE VIEW xmlview4 AS SELECT xmlelement(name employee, xmlforest(name, age, salary as pay)) FROM emp; +CREATE VIEW xmlview5 AS SELECT xmlparse(content 'x'); +CREATE VIEW xmlview6 AS SELECT xmlpi(name foo, 'bar'); +CREATE VIEW xmlview7 AS SELECT xmlroot(xml '', version no value, standalone yes); +CREATE VIEW xmlview8 AS SELECT xmlserialize(content 'good' as char(10)); +CREATE VIEW xmlview9 AS SELECT xmlserialize(content 'good' as text); +SELECT table_name, view_definition FROM information_schema.views + WHERE table_name LIKE 'xmlview%' ORDER BY 1; + table_name | view_definition +------------+------------------------------------------------------------------------------------------------------------ + xmlview1 | SELECT xmlcomment('test'::text) AS xmlcomment; + xmlview2 | SELECT XMLCONCAT('hello'::xml, 'you'::xml) AS "xmlconcat"; + xmlview3 | SELECT XMLELEMENT(NAME element, XMLATTRIBUTES(1 AS ":one:", 'deuce' AS two), 'content&') AS "xmlelement"; + xmlview4 | SELECT XMLELEMENT(NAME employee, XMLFOREST(name AS name, age AS age, salary AS pay)) AS "xmlelement" + + | FROM emp; + xmlview5 | SELECT XMLPARSE(CONTENT 'x'::text STRIP WHITESPACE) AS "xmlparse"; + xmlview6 | SELECT XMLPI(NAME foo, 'bar'::text) AS "xmlpi"; + xmlview7 | SELECT XMLROOT(''::xml, VERSION NO VALUE, STANDALONE YES) AS "xmlroot"; + xmlview8 | SELECT (XMLSERIALIZE(CONTENT 'good'::xml AS character(10)))::character(10) AS "xmlserialize"; + xmlview9 | SELECT XMLSERIALIZE(CONTENT 'good'::xml AS text) AS "xmlserialize"; +(9 rows) + +-- Text XPath expressions evaluation +SELECT xpath('/value', data) FROM xmltest; + xpath +---------------------- + {one} + {two} +(2 rows) + +SELECT xpath(NULL, NULL) IS NULL FROM xmltest; + ?column? +---------- + t + t +(2 rows) + +SELECT xpath('', ''); +ERROR: empty XPath expression +CONTEXT: SQL function "xpath" statement 1 +SELECT xpath('//text()', 'number one'); + xpath +---------------- + {"number one"} +(1 row) + +SELECT xpath('//loc:piece/@id', 'number one', ARRAY[ARRAY['loc', 'http://127.0.0.1']]); + xpath +------- + {1,2} +(1 row) + +SELECT xpath('//loc:piece', 'number one', ARRAY[ARRAY['loc', 'http://127.0.0.1']]); + xpath +------------------------------------------------------------------------------------------------------------------------------------------------ + {"number one",""} +(1 row) + +SELECT xpath('//loc:piece', 'number one', ARRAY[ARRAY['loc', 'http://127.0.0.1']]); + xpath +------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ + {"number one",""} +(1 row) + +SELECT xpath('//b', 'one two three etc'); + xpath +------------------------- + {two,etc} +(1 row) + +SELECT xpath('//text()', '<'); + xpath +-------- + {<} +(1 row) + +SELECT xpath('//@value', ''); + xpath +-------- + {<} +(1 row) + +SELECT xpath('''<>''', ''); + xpath +--------------------------- + {<<invalid>>} +(1 row) + +SELECT xpath('count(//*)', ''); + xpath +------- + {3} +(1 row) + +SELECT xpath('count(//*)=0', ''); + xpath +--------- + {false} +(1 row) + +SELECT xpath('count(//*)=3', ''); + xpath +-------- + {true} +(1 row) + +SELECT xpath('name(/*)', ''); + xpath +-------- + {root} +(1 row) + +SELECT xpath('/nosuchtag', ''); + xpath +------- + {} +(1 row) + +SELECT xpath('root', ''); + xpath +----------- + {} +(1 row) + +-- Round-trip non-ASCII data through xpath(). +DO $$ +DECLARE + xml_declaration text := ''; + degree_symbol text; + res xml[]; +BEGIN + -- Per the documentation, except when the server encoding is UTF8, xpath() + -- may not work on non-ASCII data. The untranslatable_character and + -- undefined_function traps below, currently dead code, will become relevant + -- if we remove this limitation. + IF current_setting('server_encoding') <> 'UTF8' THEN + RAISE LOG 'skip: encoding % unsupported for xpath', + current_setting('server_encoding'); + RETURN; + END IF; + + degree_symbol := convert_from('\xc2b0', 'UTF8'); + res := xpath('text()', (xml_declaration || + '' || degree_symbol || '')::xml); + IF degree_symbol <> res[1]::text THEN + RAISE 'expected % (%), got % (%)', + degree_symbol, convert_to(degree_symbol, 'UTF8'), + res[1], convert_to(res[1]::text, 'UTF8'); + END IF; +EXCEPTION + -- character with byte sequence 0xc2 0xb0 in encoding "UTF8" has no equivalent in encoding "LATIN8" + WHEN untranslatable_character + -- default conversion function for encoding "UTF8" to "MULE_INTERNAL" does not exist + OR undefined_function + -- unsupported XML feature + OR feature_not_supported THEN + RAISE LOG 'skip: %', SQLERRM; +END +$$; +-- Test xmlexists and xpath_exists +SELECT xmlexists('//town[text() = ''Toronto'']' PASSING BY REF 'Bidford-on-AvonCwmbranBristol'); + xmlexists +----------- + f +(1 row) + +SELECT xmlexists('//town[text() = ''Cwmbran'']' PASSING BY REF 'Bidford-on-AvonCwmbranBristol'); + xmlexists +----------- + t +(1 row) + +SELECT xmlexists('count(/nosuchtag)' PASSING BY REF ''); + xmlexists +----------- + t +(1 row) + +SELECT xpath_exists('//town[text() = ''Toronto'']','Bidford-on-AvonCwmbranBristol'::xml); + xpath_exists +-------------- + f +(1 row) + +SELECT xpath_exists('//town[text() = ''Cwmbran'']','Bidford-on-AvonCwmbranBristol'::xml); + xpath_exists +-------------- + t +(1 row) + +SELECT xpath_exists('count(/nosuchtag)', ''::xml); + xpath_exists +-------------- + t +(1 row) + +INSERT INTO xmltest VALUES (4, 'BudvarfreeCarlinglots'::xml); +INSERT INTO xmltest VALUES (5, 'MolsonfreeCarlinglots'::xml); +INSERT INTO xmltest VALUES (6, 'BudvarfreeCarlinglots'::xml); +INSERT INTO xmltest VALUES (7, 'MolsonfreeCarlinglots'::xml); +SELECT COUNT(id) FROM xmltest WHERE xmlexists('/menu/beer' PASSING data); + count +------- + 0 +(1 row) + +SELECT COUNT(id) FROM xmltest WHERE xmlexists('/menu/beer' PASSING BY REF data BY REF); + count +------- + 0 +(1 row) + +SELECT COUNT(id) FROM xmltest WHERE xmlexists('/menu/beers' PASSING BY REF data); + count +------- + 2 +(1 row) + +SELECT COUNT(id) FROM xmltest WHERE xmlexists('/menu/beers/name[text() = ''Molson'']' PASSING BY REF data); + count +------- + 1 +(1 row) + +SELECT COUNT(id) FROM xmltest WHERE xpath_exists('/menu/beer',data); + count +------- + 0 +(1 row) + +SELECT COUNT(id) FROM xmltest WHERE xpath_exists('/menu/beers',data); + count +------- + 2 +(1 row) + +SELECT COUNT(id) FROM xmltest WHERE xpath_exists('/menu/beers/name[text() = ''Molson'']',data); + count +------- + 1 +(1 row) + +SELECT COUNT(id) FROM xmltest WHERE xpath_exists('/myns:menu/myns:beer',data,ARRAY[ARRAY['myns','http://myns.com']]); + count +------- + 0 +(1 row) + +SELECT COUNT(id) FROM xmltest WHERE xpath_exists('/myns:menu/myns:beers',data,ARRAY[ARRAY['myns','http://myns.com']]); + count +------- + 2 +(1 row) + +SELECT COUNT(id) FROM xmltest WHERE xpath_exists('/myns:menu/myns:beers/myns:name[text() = ''Molson'']',data,ARRAY[ARRAY['myns','http://myns.com']]); + count +------- + 1 +(1 row) + +CREATE TABLE query ( expr TEXT ); +INSERT INTO query VALUES ('/menu/beers/cost[text() = ''lots'']'); +SELECT COUNT(id) FROM xmltest, query WHERE xmlexists(expr PASSING BY REF data); + count +------- + 2 +(1 row) + +-- Test xml_is_well_formed and variants +SELECT xml_is_well_formed_document('bar'); + xml_is_well_formed_document +----------------------------- + t +(1 row) + +SELECT xml_is_well_formed_document('abc'); + xml_is_well_formed_document +----------------------------- + f +(1 row) + +SELECT xml_is_well_formed_content('bar'); + xml_is_well_formed_content +---------------------------- + t +(1 row) + +SELECT xml_is_well_formed_content('abc'); + xml_is_well_formed_content +---------------------------- + t +(1 row) + +SET xmloption TO DOCUMENT; +SELECT xml_is_well_formed('abc'); + xml_is_well_formed +-------------------- + f +(1 row) + +SELECT xml_is_well_formed('<>'); + xml_is_well_formed +-------------------- + f +(1 row) + +SELECT xml_is_well_formed(''); + xml_is_well_formed +-------------------- + t +(1 row) + +SELECT xml_is_well_formed('bar'); + xml_is_well_formed +-------------------- + t +(1 row) + +SELECT xml_is_well_formed('barbaz'); + xml_is_well_formed +-------------------- + f +(1 row) + +SELECT xml_is_well_formed('number one'); + xml_is_well_formed +-------------------- + t +(1 row) + +SELECT xml_is_well_formed('bar'); + xml_is_well_formed +-------------------- + f +(1 row) + +SELECT xml_is_well_formed('bar'); + xml_is_well_formed +-------------------- + t +(1 row) + +SELECT xml_is_well_formed('&'); + xml_is_well_formed +-------------------- + f +(1 row) + +SELECT xml_is_well_formed('&idontexist;'); + xml_is_well_formed +-------------------- + f +(1 row) + +SELECT xml_is_well_formed(''); + xml_is_well_formed +-------------------- + t +(1 row) + +SELECT xml_is_well_formed(''); + xml_is_well_formed +-------------------- + t +(1 row) + +SELECT xml_is_well_formed('&idontexist;'); + xml_is_well_formed +-------------------- + f +(1 row) + +SET xmloption TO CONTENT; +SELECT xml_is_well_formed('abc'); + xml_is_well_formed +-------------------- + t +(1 row) + +-- Since xpath() deals with namespaces, it's a bit stricter about +-- what's well-formed and what's not. If we don't obey these rules +-- (i.e. ignore namespace-related errors from libxml), xpath() +-- fails in subtle ways. The following would for example produce +-- the xml value +-- +-- which is invalid because '<' may not appear un-escaped in +-- attribute values. +-- Since different libxml versions emit slightly different +-- error messages, we suppress the DETAIL in this test. +\set VERBOSITY terse +SELECT xpath('/*', ''); +ERROR: could not parse XML document +\set VERBOSITY default +-- Again, the XML isn't well-formed for namespace purposes +SELECT xpath('/*', ''); +ERROR: could not parse XML document +DETAIL: line 1: Namespace prefix nosuchprefix on tag is not defined + + ^ +CONTEXT: SQL function "xpath" statement 1 +-- XPath deprecates relative namespaces, but they're not supposed to +-- throw an error, only a warning. +SELECT xpath('/*', ''); +WARNING: line 1: xmlns: URI relative is not absolute + + ^ + xpath +-------------------------------------- + {""} +(1 row) + +-- External entity references should not leak filesystem information. +SELECT XMLPARSE(DOCUMENT ']>&c;'); + xmlparse +----------------------------------------------------------------- + ]>&c; +(1 row) + +SELECT XMLPARSE(DOCUMENT ']>&c;'); + xmlparse +----------------------------------------------------------------------- + ]>&c; +(1 row) + +-- This might or might not load the requested DTD, but it mustn't throw error. +SELECT XMLPARSE(DOCUMENT ' '); + xmlparse +------------------------------------------------------------------------------------------------------------------------------------------------------ +   +(1 row) + +-- XMLPATH tests +CREATE TABLE xmldata(data xml); +INSERT INTO xmldata VALUES(' + + AU + Australia + 3 + + + CN + China + 3 + + + HK + HongKong + 3 + + + IN + India + 3 + + + JP + Japan + 3Sinzo Abe + + + SG + Singapore + 3791 + +'); +-- XMLTABLE with columns +SELECT xmltable.* + FROM (SELECT data FROM xmldata) x, + LATERAL XMLTABLE('/ROWS/ROW' + PASSING data + COLUMNS id int PATH '@id', + _id FOR ORDINALITY, + country_name text PATH 'COUNTRY_NAME/text()' NOT NULL, + country_id text PATH 'COUNTRY_ID', + region_id int PATH 'REGION_ID', + size float PATH 'SIZE', + unit text PATH 'SIZE/@unit', + premier_name text PATH 'PREMIER_NAME' DEFAULT 'not specified'); + id | _id | country_name | country_id | region_id | size | unit | premier_name +----+-----+--------------+------------+-----------+------+------+--------------- + 1 | 1 | Australia | AU | 3 | | | not specified + 2 | 2 | China | CN | 3 | | | not specified + 3 | 3 | HongKong | HK | 3 | | | not specified + 4 | 4 | India | IN | 3 | | | not specified + 5 | 5 | Japan | JP | 3 | | | Sinzo Abe + 6 | 6 | Singapore | SG | 3 | 791 | km | not specified +(6 rows) + +CREATE VIEW xmltableview1 AS SELECT xmltable.* + FROM (SELECT data FROM xmldata) x, + LATERAL XMLTABLE('/ROWS/ROW' + PASSING data + COLUMNS id int PATH '@id', + _id FOR ORDINALITY, + country_name text PATH 'COUNTRY_NAME/text()' NOT NULL, + country_id text PATH 'COUNTRY_ID', + region_id int PATH 'REGION_ID', + size float PATH 'SIZE', + unit text PATH 'SIZE/@unit', + premier_name text PATH 'PREMIER_NAME' DEFAULT 'not specified'); +SELECT * FROM xmltableview1; + id | _id | country_name | country_id | region_id | size | unit | premier_name +----+-----+--------------+------------+-----------+------+------+--------------- + 1 | 1 | Australia | AU | 3 | | | not specified + 2 | 2 | China | CN | 3 | | | not specified + 3 | 3 | HongKong | HK | 3 | | | not specified + 4 | 4 | India | IN | 3 | | | not specified + 5 | 5 | Japan | JP | 3 | | | Sinzo Abe + 6 | 6 | Singapore | SG | 3 | 791 | km | not specified +(6 rows) + +\sv xmltableview1 +CREATE OR REPLACE VIEW public.xmltableview1 AS + SELECT "xmltable".id, + "xmltable"._id, + "xmltable".country_name, + "xmltable".country_id, + "xmltable".region_id, + "xmltable".size, + "xmltable".unit, + "xmltable".premier_name + FROM ( SELECT xmldata.data + FROM xmldata) x, + LATERAL XMLTABLE(('/ROWS/ROW'::text) PASSING (x.data) COLUMNS id integer PATH ('@id'::text), _id FOR ORDINALITY, country_name text PATH ('COUNTRY_NAME/text()'::text) NOT NULL, country_id text PATH ('COUNTRY_ID'::text), region_id integer PATH ('REGION_ID'::text), size double precision PATH ('SIZE'::text), unit text PATH ('SIZE/@unit'::text), premier_name text DEFAULT ('not specified'::text) PATH ('PREMIER_NAME'::text)) +EXPLAIN (COSTS OFF) SELECT * FROM xmltableview1; + QUERY PLAN +----------------------------------------- + Nested Loop + -> Seq Scan on xmldata + -> Table Function Scan on "xmltable" +(3 rows) + +EXPLAIN (COSTS OFF, VERBOSE) SELECT * FROM xmltableview1; + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ + Nested Loop + Output: "xmltable".id, "xmltable"._id, "xmltable".country_name, "xmltable".country_id, "xmltable".region_id, "xmltable".size, "xmltable".unit, "xmltable".premier_name + -> Seq Scan on public.xmldata + Output: xmldata.data + -> Table Function Scan on "xmltable" + Output: "xmltable".id, "xmltable"._id, "xmltable".country_name, "xmltable".country_id, "xmltable".region_id, "xmltable".size, "xmltable".unit, "xmltable".premier_name + Table Function Call: XMLTABLE(('/ROWS/ROW'::text) PASSING (xmldata.data) COLUMNS id integer PATH ('@id'::text), _id FOR ORDINALITY, country_name text PATH ('COUNTRY_NAME/text()'::text) NOT NULL, country_id text PATH ('COUNTRY_ID'::text), region_id integer PATH ('REGION_ID'::text), size double precision PATH ('SIZE'::text), unit text PATH ('SIZE/@unit'::text), premier_name text DEFAULT ('not specified'::text) PATH ('PREMIER_NAME'::text)) +(7 rows) + +-- errors +SELECT * FROM XMLTABLE (ROW () PASSING null COLUMNS v1 timestamp) AS f (v1, v2); +ERROR: XMLTABLE function has 1 columns available but 2 columns specified +-- XMLNAMESPACES tests +SELECT * FROM XMLTABLE(XMLNAMESPACES('http://x.y' AS zz), + '/zz:rows/zz:row' + PASSING '10' + COLUMNS a int PATH 'zz:a'); + a +---- + 10 +(1 row) + +CREATE VIEW xmltableview2 AS SELECT * FROM XMLTABLE(XMLNAMESPACES('http://x.y' AS zz), + '/zz:rows/zz:row' + PASSING '10' + COLUMNS a int PATH 'zz:a'); +SELECT * FROM xmltableview2; + a +---- + 10 +(1 row) + +SELECT * FROM XMLTABLE(XMLNAMESPACES(DEFAULT 'http://x.y'), + '/rows/row' + PASSING '10' + COLUMNS a int PATH 'a'); +ERROR: DEFAULT namespace is not supported +SELECT * FROM XMLTABLE('.' + PASSING '' + COLUMNS a text PATH 'foo/namespace::node()'); + a +-------------------------------------- + http://www.w3.org/XML/1998/namespace +(1 row) + +-- used in prepare statements +PREPARE pp AS +SELECT xmltable.* + FROM (SELECT data FROM xmldata) x, + LATERAL XMLTABLE('/ROWS/ROW' + PASSING data + COLUMNS id int PATH '@id', + _id FOR ORDINALITY, + country_name text PATH 'COUNTRY_NAME' NOT NULL, + country_id text PATH 'COUNTRY_ID', + region_id int PATH 'REGION_ID', + size float PATH 'SIZE', + unit text PATH 'SIZE/@unit', + premier_name text PATH 'PREMIER_NAME' DEFAULT 'not specified'); +EXECUTE pp; + id | _id | country_name | country_id | region_id | size | unit | premier_name +----+-----+--------------+------------+-----------+------+------+--------------- + 1 | 1 | Australia | AU | 3 | | | not specified + 2 | 2 | China | CN | 3 | | | not specified + 3 | 3 | HongKong | HK | 3 | | | not specified + 4 | 4 | India | IN | 3 | | | not specified + 5 | 5 | Japan | JP | 3 | | | Sinzo Abe + 6 | 6 | Singapore | SG | 3 | 791 | km | not specified +(6 rows) + +SELECT xmltable.* FROM xmldata, LATERAL xmltable('/ROWS/ROW[COUNTRY_NAME="Japan" or COUNTRY_NAME="India"]' PASSING data COLUMNS "COUNTRY_NAME" text, "REGION_ID" int); + COUNTRY_NAME | REGION_ID +--------------+----------- + India | 3 + Japan | 3 +(2 rows) + +SELECT xmltable.* FROM xmldata, LATERAL xmltable('/ROWS/ROW[COUNTRY_NAME="Japan" or COUNTRY_NAME="India"]' PASSING data COLUMNS id FOR ORDINALITY, "COUNTRY_NAME" text, "REGION_ID" int); + id | COUNTRY_NAME | REGION_ID +----+--------------+----------- + 1 | India | 3 + 2 | Japan | 3 +(2 rows) + +SELECT xmltable.* FROM xmldata, LATERAL xmltable('/ROWS/ROW[COUNTRY_NAME="Japan" or COUNTRY_NAME="India"]' PASSING data COLUMNS id int PATH '@id', "COUNTRY_NAME" text, "REGION_ID" int); + id | COUNTRY_NAME | REGION_ID +----+--------------+----------- + 4 | India | 3 + 5 | Japan | 3 +(2 rows) + +SELECT xmltable.* FROM xmldata, LATERAL xmltable('/ROWS/ROW[COUNTRY_NAME="Japan" or COUNTRY_NAME="India"]' PASSING data COLUMNS id int PATH '@id'); + id +---- + 4 + 5 +(2 rows) + +SELECT xmltable.* FROM xmldata, LATERAL xmltable('/ROWS/ROW[COUNTRY_NAME="Japan" or COUNTRY_NAME="India"]' PASSING data COLUMNS id FOR ORDINALITY); + id +---- + 1 + 2 +(2 rows) + +SELECT xmltable.* FROM xmldata, LATERAL xmltable('/ROWS/ROW[COUNTRY_NAME="Japan" or COUNTRY_NAME="India"]' PASSING data COLUMNS id int PATH '@id', "COUNTRY_NAME" text, "REGION_ID" int, rawdata xml PATH '.'); + id | COUNTRY_NAME | REGION_ID | rawdata +----+--------------+-----------+------------------------------------------------------------------ + 4 | India | 3 | + + | | | IN + + | | | India + + | | | 3 + + | | | + 5 | Japan | 3 | + + | | | JP + + | | | Japan + + | | | 3Sinzo Abe+ + | | | +(2 rows) + +SELECT xmltable.* FROM xmldata, LATERAL xmltable('/ROWS/ROW[COUNTRY_NAME="Japan" or COUNTRY_NAME="India"]' PASSING data COLUMNS id int PATH '@id', "COUNTRY_NAME" text, "REGION_ID" int, rawdata xml PATH './*'); + id | COUNTRY_NAME | REGION_ID | rawdata +----+--------------+-----------+----------------------------------------------------------------------------------------------------------------------------- + 4 | India | 3 | INIndia3 + 5 | Japan | 3 | JPJapan3Sinzo Abe +(2 rows) + +SELECT * FROM xmltable('/root' passing 'a1aa2a bbbbxxxcccc' COLUMNS element text); + element +---------------------- + a1aa2a bbbbxxxcccc +(1 row) + +SELECT * FROM xmltable('/root' passing 'a1aa2a bbbbxxxcccc' COLUMNS element text PATH 'element/text()'); -- should fail +ERROR: more than one value returned by column XPath expression +-- CDATA test +select * from xmltable('d/r' passing ' &"<>!foo]]>2' columns c text); + c +------------------------- + &"<>!foo + 2 +(2 rows) + +-- XML builtin entities +SELECT * FROM xmltable('/x/a' PASSING ''"&<>' COLUMNS ent text); + ent +----- + ' + " + & + < + > +(5 rows) + +SELECT * FROM xmltable('/x/a' PASSING ''"&<>' COLUMNS ent xml); + ent +------------------ + ' + " + & + < + > +(5 rows) + +EXPLAIN (VERBOSE, COSTS OFF) +SELECT xmltable.* + FROM (SELECT data FROM xmldata) x, + LATERAL XMLTABLE('/ROWS/ROW' + PASSING data + COLUMNS id int PATH '@id', + _id FOR ORDINALITY, + country_name text PATH 'COUNTRY_NAME' NOT NULL, + country_id text PATH 'COUNTRY_ID', + region_id int PATH 'REGION_ID', + size float PATH 'SIZE', + unit text PATH 'SIZE/@unit', + premier_name text PATH 'PREMIER_NAME' DEFAULT 'not specified'); + QUERY PLAN +----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Nested Loop + Output: "xmltable".id, "xmltable"._id, "xmltable".country_name, "xmltable".country_id, "xmltable".region_id, "xmltable".size, "xmltable".unit, "xmltable".premier_name + -> Seq Scan on public.xmldata + Output: xmldata.data + -> Table Function Scan on "xmltable" + Output: "xmltable".id, "xmltable"._id, "xmltable".country_name, "xmltable".country_id, "xmltable".region_id, "xmltable".size, "xmltable".unit, "xmltable".premier_name + Table Function Call: XMLTABLE(('/ROWS/ROW'::text) PASSING (xmldata.data) COLUMNS id integer PATH ('@id'::text), _id FOR ORDINALITY, country_name text PATH ('COUNTRY_NAME'::text) NOT NULL, country_id text PATH ('COUNTRY_ID'::text), region_id integer PATH ('REGION_ID'::text), size double precision PATH ('SIZE'::text), unit text PATH ('SIZE/@unit'::text), premier_name text DEFAULT ('not specified'::text) PATH ('PREMIER_NAME'::text)) +(7 rows) + +-- test qual +SELECT xmltable.* FROM xmldata, LATERAL xmltable('/ROWS/ROW[COUNTRY_NAME="Japan" or COUNTRY_NAME="India"]' PASSING data COLUMNS "COUNTRY_NAME" text, "REGION_ID" int) WHERE "COUNTRY_NAME" = 'Japan'; + COUNTRY_NAME | REGION_ID +--------------+----------- + Japan | 3 +(1 row) + +EXPLAIN (VERBOSE, COSTS OFF) +SELECT xmltable.* FROM xmldata, LATERAL xmltable('/ROWS/ROW[COUNTRY_NAME="Japan" or COUNTRY_NAME="India"]' PASSING data COLUMNS "COUNTRY_NAME" text, "REGION_ID" int) WHERE "COUNTRY_NAME" = 'Japan'; + QUERY PLAN +---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Nested Loop + Output: "xmltable"."COUNTRY_NAME", "xmltable"."REGION_ID" + -> Seq Scan on public.xmldata + Output: xmldata.data + -> Table Function Scan on "xmltable" + Output: "xmltable"."COUNTRY_NAME", "xmltable"."REGION_ID" + Table Function Call: XMLTABLE(('/ROWS/ROW[COUNTRY_NAME="Japan" or COUNTRY_NAME="India"]'::text) PASSING (xmldata.data) COLUMNS "COUNTRY_NAME" text, "REGION_ID" integer) + Filter: ("xmltable"."COUNTRY_NAME" = 'Japan'::text) +(8 rows) + +-- should to work with more data +INSERT INTO xmldata VALUES(' + + CZ + Czech Republic + 2Milos Zeman + + + DE + Germany + 2 + + + FR + France + 2 + +'); +INSERT INTO xmldata VALUES(' + + EG + Egypt + 1 + + + SD + Sudan + 1 + +'); +SELECT xmltable.* + FROM (SELECT data FROM xmldata) x, + LATERAL XMLTABLE('/ROWS/ROW' + PASSING data + COLUMNS id int PATH '@id', + _id FOR ORDINALITY, + country_name text PATH 'COUNTRY_NAME' NOT NULL, + country_id text PATH 'COUNTRY_ID', + region_id int PATH 'REGION_ID', + size float PATH 'SIZE', + unit text PATH 'SIZE/@unit', + premier_name text PATH 'PREMIER_NAME' DEFAULT 'not specified'); + id | _id | country_name | country_id | region_id | size | unit | premier_name +----+-----+----------------+------------+-----------+------+------+--------------- + 1 | 1 | Australia | AU | 3 | | | not specified + 2 | 2 | China | CN | 3 | | | not specified + 3 | 3 | HongKong | HK | 3 | | | not specified + 4 | 4 | India | IN | 3 | | | not specified + 5 | 5 | Japan | JP | 3 | | | Sinzo Abe + 6 | 6 | Singapore | SG | 3 | 791 | km | not specified + 10 | 1 | Czech Republic | CZ | 2 | | | Milos Zeman + 11 | 2 | Germany | DE | 2 | | | not specified + 12 | 3 | France | FR | 2 | | | not specified + 20 | 1 | Egypt | EG | 1 | | | not specified + 21 | 2 | Sudan | SD | 1 | | | not specified +(11 rows) + +SELECT xmltable.* + FROM (SELECT data FROM xmldata) x, + LATERAL XMLTABLE('/ROWS/ROW' + PASSING data + COLUMNS id int PATH '@id', + _id FOR ORDINALITY, + country_name text PATH 'COUNTRY_NAME' NOT NULL, + country_id text PATH 'COUNTRY_ID', + region_id int PATH 'REGION_ID', + size float PATH 'SIZE', + unit text PATH 'SIZE/@unit', + premier_name text PATH 'PREMIER_NAME' DEFAULT 'not specified') + WHERE region_id = 2; + id | _id | country_name | country_id | region_id | size | unit | premier_name +----+-----+----------------+------------+-----------+------+------+--------------- + 10 | 1 | Czech Republic | CZ | 2 | | | Milos Zeman + 11 | 2 | Germany | DE | 2 | | | not specified + 12 | 3 | France | FR | 2 | | | not specified +(3 rows) + +EXPLAIN (VERBOSE, COSTS OFF) +SELECT xmltable.* + FROM (SELECT data FROM xmldata) x, + LATERAL XMLTABLE('/ROWS/ROW' + PASSING data + COLUMNS id int PATH '@id', + _id FOR ORDINALITY, + country_name text PATH 'COUNTRY_NAME' NOT NULL, + country_id text PATH 'COUNTRY_ID', + region_id int PATH 'REGION_ID', + size float PATH 'SIZE', + unit text PATH 'SIZE/@unit', + premier_name text PATH 'PREMIER_NAME' DEFAULT 'not specified') + WHERE region_id = 2; + QUERY PLAN +----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Nested Loop + Output: "xmltable".id, "xmltable"._id, "xmltable".country_name, "xmltable".country_id, "xmltable".region_id, "xmltable".size, "xmltable".unit, "xmltable".premier_name + -> Seq Scan on public.xmldata + Output: xmldata.data + -> Table Function Scan on "xmltable" + Output: "xmltable".id, "xmltable"._id, "xmltable".country_name, "xmltable".country_id, "xmltable".region_id, "xmltable".size, "xmltable".unit, "xmltable".premier_name + Table Function Call: XMLTABLE(('/ROWS/ROW'::text) PASSING (xmldata.data) COLUMNS id integer PATH ('@id'::text), _id FOR ORDINALITY, country_name text PATH ('COUNTRY_NAME'::text) NOT NULL, country_id text PATH ('COUNTRY_ID'::text), region_id integer PATH ('REGION_ID'::text), size double precision PATH ('SIZE'::text), unit text PATH ('SIZE/@unit'::text), premier_name text DEFAULT ('not specified'::text) PATH ('PREMIER_NAME'::text)) + Filter: ("xmltable".region_id = 2) +(8 rows) + +-- should fail, NULL value +SELECT xmltable.* + FROM (SELECT data FROM xmldata) x, + LATERAL XMLTABLE('/ROWS/ROW' + PASSING data + COLUMNS id int PATH '@id', + _id FOR ORDINALITY, + country_name text PATH 'COUNTRY_NAME' NOT NULL, + country_id text PATH 'COUNTRY_ID', + region_id int PATH 'REGION_ID', + size float PATH 'SIZE' NOT NULL, + unit text PATH 'SIZE/@unit', + premier_name text PATH 'PREMIER_NAME' DEFAULT 'not specified'); +ERROR: null is not allowed in column "size" +-- if all is ok, then result is empty +-- one line xml test +WITH + x AS (SELECT proname, proowner, procost::numeric, pronargs, + array_to_string(proargnames,',') as proargnames, + case when proargtypes <> '' then array_to_string(proargtypes::oid[],',') end as proargtypes + FROM pg_proc WHERE proname = 'f_leak'), + y AS (SELECT xmlelement(name proc, + xmlforest(proname, proowner, + procost, pronargs, + proargnames, proargtypes)) as proc + FROM x), + z AS (SELECT xmltable.* + FROM y, + LATERAL xmltable('/proc' PASSING proc + COLUMNS proname name, + proowner oid, + procost float, + pronargs int, + proargnames text, + proargtypes text)) + SELECT * FROM z + EXCEPT SELECT * FROM x; + proname | proowner | procost | pronargs | proargnames | proargtypes +---------+----------+---------+----------+-------------+------------- +(0 rows) + +-- multi line xml test, result should be empty too +WITH + x AS (SELECT proname, proowner, procost::numeric, pronargs, + array_to_string(proargnames,',') as proargnames, + case when proargtypes <> '' then array_to_string(proargtypes::oid[],',') end as proargtypes + FROM pg_proc), + y AS (SELECT xmlelement(name data, + xmlagg(xmlelement(name proc, + xmlforest(proname, proowner, procost, + pronargs, proargnames, proargtypes)))) as doc + FROM x), + z AS (SELECT xmltable.* + FROM y, + LATERAL xmltable('/data/proc' PASSING doc + COLUMNS proname name, + proowner oid, + procost float, + pronargs int, + proargnames text, + proargtypes text)) + SELECT * FROM z + EXCEPT SELECT * FROM x; + proname | proowner | procost | pronargs | proargnames | proargtypes +---------+----------+---------+----------+-------------+------------- +(0 rows) + +CREATE TABLE xmltest2(x xml, _path text); +INSERT INTO xmltest2 VALUES('1', 'A'); +INSERT INTO xmltest2 VALUES('2', 'B'); +INSERT INTO xmltest2 VALUES('3', 'C'); +INSERT INTO xmltest2 VALUES('2', 'D'); +SELECT xmltable.* FROM xmltest2, LATERAL xmltable('/d/r' PASSING x COLUMNS a int PATH '' || lower(_path) || 'c'); + a +--- + 1 + 2 + 3 + 2 +(4 rows) + +SELECT xmltable.* FROM xmltest2, LATERAL xmltable(('/d/r/' || lower(_path) || 'c') PASSING x COLUMNS a int PATH '.'); + a +--- + 1 + 2 + 3 + 2 +(4 rows) + +SELECT xmltable.* FROM xmltest2, LATERAL xmltable(('/d/r/' || lower(_path) || 'c') PASSING x COLUMNS a int PATH 'x' DEFAULT ascii(_path) - 54); + a +---- + 11 + 12 + 13 + 14 +(4 rows) + +-- XPath result can be boolean or number too +SELECT * FROM XMLTABLE('*' PASSING 'a' COLUMNS a xml PATH '.', b text PATH '.', c text PATH '"hi"', d boolean PATH '. = "a"', e integer PATH 'string-length(.)'); + a | b | c | d | e +----------+---+----+---+--- + a | a | hi | t | 1 +(1 row) + +\x +SELECT * FROM XMLTABLE('*' PASSING 'pre&deeppost' COLUMNS x xml PATH '/e/n2', y xml PATH '/'); +-[ RECORD 1 ]----------------------------------------------------------- +x | &deep +y | pre&deeppost+ + | + +\x +SELECT * FROM XMLTABLE('.' PASSING XMLELEMENT(NAME a) columns a varchar(20) PATH '""', b xml PATH '""'); + a | b +--------+-------------- + | <foo/> +(1 row) + diff --git a/src/test/regress/expected/xml_1.out b/src/test/regress/expected/xml_1.out new file mode 100644 index 0000000..63b7794 --- /dev/null +++ b/src/test/regress/expected/xml_1.out @@ -0,0 +1,1404 @@ +CREATE TABLE xmltest ( + id int, + data xml +); +INSERT INTO xmltest VALUES (1, 'one'); +ERROR: unsupported XML feature +LINE 1: INSERT INTO xmltest VALUES (1, 'one'); + ^ +DETAIL: This functionality requires the server to be built with libxml support. +INSERT INTO xmltest VALUES (2, 'two'); +ERROR: unsupported XML feature +LINE 1: INSERT INTO xmltest VALUES (2, 'two'); + ^ +DETAIL: This functionality requires the server to be built with libxml support. +INSERT INTO xmltest VALUES (3, 'one', 'xml'); +ERROR: unsupported XML feature +DETAIL: This functionality requires the server to be built with libxml support. +SELECT pg_input_is_valid('oneone', 'xml'); +ERROR: unsupported XML feature +DETAIL: This functionality requires the server to be built with libxml support. +SELECT message FROM pg_input_error_info('', 'xml'); +ERROR: unsupported XML feature +DETAIL: This functionality requires the server to be built with libxml support. +SELECT xmlcomment('test'); +ERROR: unsupported XML feature +DETAIL: This functionality requires the server to be built with libxml support. +SELECT xmlcomment('-test'); +ERROR: unsupported XML feature +DETAIL: This functionality requires the server to be built with libxml support. +SELECT xmlcomment('test-'); +ERROR: unsupported XML feature +DETAIL: This functionality requires the server to be built with libxml support. +SELECT xmlcomment('--test'); +ERROR: unsupported XML feature +DETAIL: This functionality requires the server to be built with libxml support. +SELECT xmlcomment('te st'); +ERROR: unsupported XML feature +DETAIL: This functionality requires the server to be built with libxml support. +SELECT xmlconcat(xmlcomment('hello'), + xmlelement(NAME qux, 'foo'), + xmlcomment('world')); +ERROR: unsupported XML feature +DETAIL: This functionality requires the server to be built with libxml support. +SELECT xmlconcat('hello', 'you'); +ERROR: unsupported XML feature +LINE 1: SELECT xmlconcat('hello', 'you'); + ^ +DETAIL: This functionality requires the server to be built with libxml support. +SELECT xmlconcat(1, 2); +ERROR: argument of XMLCONCAT must be type xml, not type integer +LINE 1: SELECT xmlconcat(1, 2); + ^ +SELECT xmlconcat('bad', '', NULL, ''); +ERROR: unsupported XML feature +LINE 1: SELECT xmlconcat('', NULL, '', NULL, ''); +ERROR: unsupported XML feature +LINE 1: SELECT xmlconcat('', NULL, 'r'); +ERROR: unsupported XML feature +DETAIL: This functionality requires the server to be built with libxml support. +SELECT xmlelement(name foo, xml 'br'); +ERROR: unsupported XML feature +DETAIL: This functionality requires the server to be built with libxml support. +SELECT xmlelement(name foo, array[1, 2, 3]); +ERROR: unsupported XML feature +DETAIL: This functionality requires the server to be built with libxml support. +SET xmlbinary TO base64; +SELECT xmlelement(name foo, bytea 'bar'); +ERROR: unsupported XML feature +DETAIL: This functionality requires the server to be built with libxml support. +SET xmlbinary TO hex; +SELECT xmlelement(name foo, bytea 'bar'); +ERROR: unsupported XML feature +DETAIL: This functionality requires the server to be built with libxml support. +SELECT xmlelement(name foo, xmlattributes(true as bar)); +ERROR: unsupported XML feature +DETAIL: This functionality requires the server to be built with libxml support. +SELECT xmlelement(name foo, xmlattributes('2009-04-09 00:24:37'::timestamp as bar)); +ERROR: unsupported XML feature +DETAIL: This functionality requires the server to be built with libxml support. +SELECT xmlelement(name foo, xmlattributes('infinity'::timestamp as bar)); +ERROR: unsupported XML feature +DETAIL: This functionality requires the server to be built with libxml support. +SELECT xmlelement(name foo, xmlattributes('<>&"''' as funny, xml 'br' as funnier)); +ERROR: unsupported XML feature +DETAIL: This functionality requires the server to be built with libxml support. +SELECT xmlparse(content ''); +ERROR: unsupported XML feature +DETAIL: This functionality requires the server to be built with libxml support. +SELECT xmlparse(content ' '); +ERROR: unsupported XML feature +DETAIL: This functionality requires the server to be built with libxml support. +SELECT xmlparse(content 'abc'); +ERROR: unsupported XML feature +DETAIL: This functionality requires the server to be built with libxml support. +SELECT xmlparse(content 'x'); +ERROR: unsupported XML feature +DETAIL: This functionality requires the server to be built with libxml support. +SELECT xmlparse(content '&'); +ERROR: unsupported XML feature +DETAIL: This functionality requires the server to be built with libxml support. +SELECT xmlparse(content '&idontexist;'); +ERROR: unsupported XML feature +DETAIL: This functionality requires the server to be built with libxml support. +SELECT xmlparse(content ''); +ERROR: unsupported XML feature +DETAIL: This functionality requires the server to be built with libxml support. +SELECT xmlparse(content ''); +ERROR: unsupported XML feature +DETAIL: This functionality requires the server to be built with libxml support. +SELECT xmlparse(content '&idontexist;'); +ERROR: unsupported XML feature +DETAIL: This functionality requires the server to be built with libxml support. +SELECT xmlparse(content ''); +ERROR: unsupported XML feature +DETAIL: This functionality requires the server to be built with libxml support. +SELECT xmlparse(document ' '); +ERROR: unsupported XML feature +DETAIL: This functionality requires the server to be built with libxml support. +SELECT xmlparse(document 'abc'); +ERROR: unsupported XML feature +DETAIL: This functionality requires the server to be built with libxml support. +SELECT xmlparse(document 'x'); +ERROR: unsupported XML feature +DETAIL: This functionality requires the server to be built with libxml support. +SELECT xmlparse(document '&'); +ERROR: unsupported XML feature +DETAIL: This functionality requires the server to be built with libxml support. +SELECT xmlparse(document '&idontexist;'); +ERROR: unsupported XML feature +DETAIL: This functionality requires the server to be built with libxml support. +SELECT xmlparse(document ''); +ERROR: unsupported XML feature +DETAIL: This functionality requires the server to be built with libxml support. +SELECT xmlparse(document ''); +ERROR: unsupported XML feature +DETAIL: This functionality requires the server to be built with libxml support. +SELECT xmlparse(document '&idontexist;'); +ERROR: unsupported XML feature +DETAIL: This functionality requires the server to be built with libxml support. +SELECT xmlparse(document ''); +ERROR: unsupported XML feature +DETAIL: This functionality requires the server to be built with libxml support. +SELECT xmlpi(name foo); +ERROR: unsupported XML feature +DETAIL: This functionality requires the server to be built with libxml support. +SELECT xmlpi(name xml); +ERROR: unsupported XML feature +DETAIL: This functionality requires the server to be built with libxml support. +SELECT xmlpi(name xmlstuff); +ERROR: unsupported XML feature +DETAIL: This functionality requires the server to be built with libxml support. +SELECT xmlpi(name foo, 'bar'); +ERROR: unsupported XML feature +DETAIL: This functionality requires the server to be built with libxml support. +SELECT xmlpi(name foo, 'in?>valid'); +ERROR: unsupported XML feature +DETAIL: This functionality requires the server to be built with libxml support. +SELECT xmlpi(name foo, null); +ERROR: unsupported XML feature +DETAIL: This functionality requires the server to be built with libxml support. +SELECT xmlpi(name xml, null); +ERROR: unsupported XML feature +DETAIL: This functionality requires the server to be built with libxml support. +SELECT xmlpi(name xmlstuff, null); +ERROR: unsupported XML feature +DETAIL: This functionality requires the server to be built with libxml support. +SELECT xmlpi(name "xml-stylesheet", 'href="mystyle.css" type="text/css"'); +ERROR: unsupported XML feature +DETAIL: This functionality requires the server to be built with libxml support. +SELECT xmlpi(name foo, ' bar'); +ERROR: unsupported XML feature +DETAIL: This functionality requires the server to be built with libxml support. +SELECT xmlroot(xml '', version no value, standalone no value); +ERROR: unsupported XML feature +LINE 1: SELECT xmlroot(xml '', version no value, standalone no... + ^ +DETAIL: This functionality requires the server to be built with libxml support. +SELECT xmlroot(xml '', version '2.0'); +ERROR: unsupported XML feature +LINE 1: SELECT xmlroot(xml '', version '2.0'); + ^ +DETAIL: This functionality requires the server to be built with libxml support. +SELECT xmlroot(xml '', version no value, standalone yes); +ERROR: unsupported XML feature +LINE 1: SELECT xmlroot(xml '', version no value, standalone ye... + ^ +DETAIL: This functionality requires the server to be built with libxml support. +SELECT xmlroot(xml '', version no value, standalone yes); +ERROR: unsupported XML feature +LINE 1: SELECT xmlroot(xml '', version no... + ^ +DETAIL: This functionality requires the server to be built with libxml support. +SELECT xmlroot(xmlroot(xml '', version '1.0'), version '1.1', standalone no); +ERROR: unsupported XML feature +LINE 1: SELECT xmlroot(xmlroot(xml '', version '1.0'), version... + ^ +DETAIL: This functionality requires the server to be built with libxml support. +SELECT xmlroot('', version no value, standalone no); +ERROR: unsupported XML feature +LINE 1: SELECT xmlroot('... + ^ +DETAIL: This functionality requires the server to be built with libxml support. +SELECT xmlroot('', version no value, standalone no value); +ERROR: unsupported XML feature +LINE 1: SELECT xmlroot('... + ^ +DETAIL: This functionality requires the server to be built with libxml support. +SELECT xmlroot('', version no value); +ERROR: unsupported XML feature +LINE 1: SELECT xmlroot('... + ^ +DETAIL: This functionality requires the server to be built with libxml support. +SELECT xmlroot ( + xmlelement ( + name gazonk, + xmlattributes ( + 'val' AS name, + 1 + 1 AS num + ), + xmlelement ( + NAME qux, + 'foo' + ) + ), + version '1.0', + standalone yes +); +ERROR: unsupported XML feature +DETAIL: This functionality requires the server to be built with libxml support. +SELECT xmlserialize(content data as character varying(20)) FROM xmltest; + xmlserialize +-------------- +(0 rows) + +SELECT xmlserialize(content 'good' as char(10)); +ERROR: unsupported XML feature +LINE 1: SELECT xmlserialize(content 'good' as char(10)); + ^ +DETAIL: This functionality requires the server to be built with libxml support. +SELECT xmlserialize(document 'bad' as text); +ERROR: unsupported XML feature +LINE 1: SELECT xmlserialize(document 'bad' as text); + ^ +DETAIL: This functionality requires the server to be built with libxml support. +-- indent +SELECT xmlserialize(DOCUMENT '42' AS text INDENT); +ERROR: unsupported XML feature +LINE 1: SELECT xmlserialize(DOCUMENT '42<... + ^ +DETAIL: This functionality requires the server to be built with libxml support. +SELECT xmlserialize(CONTENT '42' AS text INDENT); +ERROR: unsupported XML feature +LINE 1: SELECT xmlserialize(CONTENT '42<... + ^ +DETAIL: This functionality requires the server to be built with libxml support. +-- no indent +SELECT xmlserialize(DOCUMENT '42' AS text NO INDENT); +ERROR: unsupported XML feature +LINE 1: SELECT xmlserialize(DOCUMENT '42<... + ^ +DETAIL: This functionality requires the server to be built with libxml support. +SELECT xmlserialize(CONTENT '42' AS text NO INDENT); +ERROR: unsupported XML feature +LINE 1: SELECT xmlserialize(CONTENT '42<... + ^ +DETAIL: This functionality requires the server to be built with libxml support. +-- indent non singly-rooted xml +SELECT xmlserialize(DOCUMENT '7342' AS text INDENT); +ERROR: unsupported XML feature +LINE 1: SELECT xmlserialize(DOCUMENT '734... + ^ +DETAIL: This functionality requires the server to be built with libxml support. +SELECT xmlserialize(CONTENT '7342' AS text INDENT); +ERROR: unsupported XML feature +LINE 1: SELECT xmlserialize(CONTENT '734... + ^ +DETAIL: This functionality requires the server to be built with libxml support. +-- indent non singly-rooted xml with mixed contents +SELECT xmlserialize(DOCUMENT 'text node73text node42' AS text INDENT); +ERROR: unsupported XML feature +LINE 1: SELECT xmlserialize(DOCUMENT 'text node73text nod... + ^ +DETAIL: This functionality requires the server to be built with libxml support. +SELECT xmlserialize(CONTENT 'text node73text node42' AS text INDENT); +ERROR: unsupported XML feature +LINE 1: SELECT xmlserialize(CONTENT 'text node73text nod... + ^ +DETAIL: This functionality requires the server to be built with libxml support. +-- indent singly-rooted xml with mixed contents +SELECT xmlserialize(DOCUMENT '42text node73' AS text INDENT); +ERROR: unsupported XML feature +LINE 1: SELECT xmlserialize(DOCUMENT '42<... + ^ +DETAIL: This functionality requires the server to be built with libxml support. +SELECT xmlserialize(CONTENT '42text node73' AS text INDENT); +ERROR: unsupported XML feature +LINE 1: SELECT xmlserialize(CONTENT '42<... + ^ +DETAIL: This functionality requires the server to be built with libxml support. +-- indent empty string +SELECT xmlserialize(DOCUMENT '' AS text INDENT); +ERROR: unsupported XML feature +LINE 1: SELECT xmlserialize(DOCUMENT '' AS text INDENT); + ^ +DETAIL: This functionality requires the server to be built with libxml support. +SELECT xmlserialize(CONTENT '' AS text INDENT); +ERROR: unsupported XML feature +LINE 1: SELECT xmlserialize(CONTENT '' AS text INDENT); + ^ +DETAIL: This functionality requires the server to be built with libxml support. +-- whitespaces +SELECT xmlserialize(DOCUMENT ' ' AS text INDENT); +ERROR: unsupported XML feature +LINE 1: SELECT xmlserialize(DOCUMENT ' ' AS text INDENT); + ^ +DETAIL: This functionality requires the server to be built with libxml support. +SELECT xmlserialize(CONTENT ' ' AS text INDENT); +ERROR: unsupported XML feature +LINE 1: SELECT xmlserialize(CONTENT ' ' AS text INDENT); + ^ +DETAIL: This functionality requires the server to be built with libxml support. +-- indent null +SELECT xmlserialize(DOCUMENT NULL AS text INDENT); + xmlserialize +-------------- + +(1 row) + +SELECT xmlserialize(CONTENT NULL AS text INDENT); + xmlserialize +-------------- + +(1 row) + +-- indent with XML declaration +SELECT xmlserialize(DOCUMENT '73' AS text INDENT); +ERROR: unsupported XML feature +LINE 1: SELECT xmlserialize(DOCUMENT '73' AS text INDENT); +ERROR: unsupported XML feature +LINE 1: SELECT xmlserialize(CONTENT '' AS text INDENT); +ERROR: unsupported XML feature +LINE 1: SELECT xmlserialize(DOCUMENT '' AS text INDE... + ^ +DETAIL: This functionality requires the server to be built with libxml support. +SELECT xmlserialize(CONTENT '' AS text INDENT); +ERROR: unsupported XML feature +LINE 1: SELECT xmlserialize(CONTENT '' AS text INDE... + ^ +DETAIL: This functionality requires the server to be built with libxml support. +-- indent xml with empty element +SELECT xmlserialize(DOCUMENT '' AS text INDENT); +ERROR: unsupported XML feature +LINE 1: SELECT xmlserialize(DOCUMENT '' AS tex... + ^ +DETAIL: This functionality requires the server to be built with libxml support. +SELECT xmlserialize(CONTENT '' AS text INDENT); +ERROR: unsupported XML feature +LINE 1: SELECT xmlserialize(CONTENT '' AS tex... + ^ +DETAIL: This functionality requires the server to be built with libxml support. +-- 'no indent' = not using 'no indent' +SELECT xmlserialize(DOCUMENT '42' AS text) = xmlserialize(DOCUMENT '42' AS text NO INDENT); +ERROR: unsupported XML feature +LINE 1: SELECT xmlserialize(DOCUMENT '42<... + ^ +DETAIL: This functionality requires the server to be built with libxml support. +SELECT xmlserialize(CONTENT '42' AS text) = xmlserialize(CONTENT '42' AS text NO INDENT); +ERROR: unsupported XML feature +LINE 1: SELECT xmlserialize(CONTENT '42<... + ^ +DETAIL: This functionality requires the server to be built with libxml support. +SELECT xml 'bar' IS DOCUMENT; +ERROR: unsupported XML feature +LINE 1: SELECT xml 'bar' IS DOCUMENT; + ^ +DETAIL: This functionality requires the server to be built with libxml support. +SELECT xml 'barfoo' IS DOCUMENT; +ERROR: unsupported XML feature +LINE 1: SELECT xml 'barfoo' IS DOCUMENT; + ^ +DETAIL: This functionality requires the server to be built with libxml support. +SELECT xml '' IS NOT DOCUMENT; +ERROR: unsupported XML feature +LINE 1: SELECT xml '' IS NOT DOCUMENT; + ^ +DETAIL: This functionality requires the server to be built with libxml support. +SELECT xml 'abc' IS NOT DOCUMENT; +ERROR: unsupported XML feature +LINE 1: SELECT xml 'abc' IS NOT DOCUMENT; + ^ +DETAIL: This functionality requires the server to be built with libxml support. +SELECT '<>' IS NOT DOCUMENT; +ERROR: unsupported XML feature +LINE 1: SELECT '<>' IS NOT DOCUMENT; + ^ +DETAIL: This functionality requires the server to be built with libxml support. +SELECT xmlagg(data) FROM xmltest; + xmlagg +-------- + +(1 row) + +SELECT xmlagg(data) FROM xmltest WHERE id > 10; + xmlagg +-------- + +(1 row) + +SELECT xmlelement(name employees, xmlagg(xmlelement(name name, name))) FROM emp; +ERROR: unsupported XML feature +DETAIL: This functionality requires the server to be built with libxml support. +-- Check mapping SQL identifier to XML name +SELECT xmlpi(name ":::_xml_abc135.%-&_"); +ERROR: unsupported XML feature +DETAIL: This functionality requires the server to be built with libxml support. +SELECT xmlpi(name "123"); +ERROR: unsupported XML feature +DETAIL: This functionality requires the server to be built with libxml support. +PREPARE foo (xml) AS SELECT xmlconcat('', $1); +ERROR: unsupported XML feature +LINE 1: PREPARE foo (xml) AS SELECT xmlconcat('', $1); + ^ +DETAIL: This functionality requires the server to be built with libxml support. +SET XML OPTION DOCUMENT; +EXECUTE foo (''); +ERROR: prepared statement "foo" does not exist +EXECUTE foo ('bad'); +ERROR: prepared statement "foo" does not exist +SELECT xml ''; +ERROR: unsupported XML feature +LINE 1: SELECT xml ''; + ^ +DETAIL: This functionality requires the server to be built with libxml support. +SET XML OPTION CONTENT; +EXECUTE foo (''); +ERROR: prepared statement "foo" does not exist +EXECUTE foo ('good'); +ERROR: prepared statement "foo" does not exist +SELECT xml ' '; +ERROR: unsupported XML feature +LINE 1: SELECT xml ' '; +ERROR: unsupported XML feature +LINE 1: SELECT xml ' '; +ERROR: unsupported XML feature +LINE 1: SELECT xml ''; + ^ +DETAIL: This functionality requires the server to be built with libxml support. +SELECT xml ' oops '; +ERROR: unsupported XML feature +LINE 1: SELECT xml ' oops '; + ^ +DETAIL: This functionality requires the server to be built with libxml support. +SELECT xml ' '; +ERROR: unsupported XML feature +LINE 1: SELECT xml ' '; + ^ +DETAIL: This functionality requires the server to be built with libxml support. +SELECT xml ''; +ERROR: unsupported XML feature +LINE 1: SELECT xml ''; + ^ +DETAIL: This functionality requires the server to be built with libxml support. +-- Test backwards parsing +CREATE VIEW xmlview1 AS SELECT xmlcomment('test'); +CREATE VIEW xmlview2 AS SELECT xmlconcat('hello', 'you'); +ERROR: unsupported XML feature +LINE 1: CREATE VIEW xmlview2 AS SELECT xmlconcat('hello', 'you'); + ^ +DETAIL: This functionality requires the server to be built with libxml support. +CREATE VIEW xmlview3 AS SELECT xmlelement(name element, xmlattributes (1 as ":one:", 'deuce' as two), 'content&'); +ERROR: unsupported XML feature +DETAIL: This functionality requires the server to be built with libxml support. +CREATE VIEW xmlview4 AS SELECT xmlelement(name employee, xmlforest(name, age, salary as pay)) FROM emp; +ERROR: unsupported XML feature +DETAIL: This functionality requires the server to be built with libxml support. +CREATE VIEW xmlview5 AS SELECT xmlparse(content 'x'); +CREATE VIEW xmlview6 AS SELECT xmlpi(name foo, 'bar'); +ERROR: unsupported XML feature +DETAIL: This functionality requires the server to be built with libxml support. +CREATE VIEW xmlview7 AS SELECT xmlroot(xml '', version no value, standalone yes); +ERROR: unsupported XML feature +LINE 1: CREATE VIEW xmlview7 AS SELECT xmlroot(xml '', version... + ^ +DETAIL: This functionality requires the server to be built with libxml support. +CREATE VIEW xmlview8 AS SELECT xmlserialize(content 'good' as char(10)); +ERROR: unsupported XML feature +LINE 1: ...EATE VIEW xmlview8 AS SELECT xmlserialize(content 'good' as ... + ^ +DETAIL: This functionality requires the server to be built with libxml support. +CREATE VIEW xmlview9 AS SELECT xmlserialize(content 'good' as text); +ERROR: unsupported XML feature +LINE 1: ...EATE VIEW xmlview9 AS SELECT xmlserialize(content 'good' as ... + ^ +DETAIL: This functionality requires the server to be built with libxml support. +SELECT table_name, view_definition FROM information_schema.views + WHERE table_name LIKE 'xmlview%' ORDER BY 1; + table_name | view_definition +------------+-------------------------------------------------------------------------------- + xmlview1 | SELECT xmlcomment('test'::text) AS xmlcomment; + xmlview5 | SELECT XMLPARSE(CONTENT 'x'::text STRIP WHITESPACE) AS "xmlparse"; +(2 rows) + +-- Text XPath expressions evaluation +SELECT xpath('/value', data) FROM xmltest; + xpath +------- +(0 rows) + +SELECT xpath(NULL, NULL) IS NULL FROM xmltest; + ?column? +---------- +(0 rows) + +SELECT xpath('', ''); +ERROR: unsupported XML feature +LINE 1: SELECT xpath('', ''); + ^ +DETAIL: This functionality requires the server to be built with libxml support. +SELECT xpath('//text()', 'number one'); +ERROR: unsupported XML feature +LINE 1: SELECT xpath('//text()', 'number one', ARRAY[ARRAY['loc', 'http://127.0.0.1']]); +ERROR: unsupported XML feature +LINE 1: SELECT xpath('//loc:piece/@id', 'number one', ARRAY[ARRAY['loc', 'http://127.0.0.1']]); +ERROR: unsupported XML feature +LINE 1: SELECT xpath('//loc:piece', 'number one', ARRAY[ARRAY['loc', 'http://127.0.0.1']]); +ERROR: unsupported XML feature +LINE 1: SELECT xpath('//loc:piece', ''); +ERROR: unsupported XML feature +LINE 1: SELECT xpath('//@value', ''); + ^ +DETAIL: This functionality requires the server to be built with libxml support. +SELECT xpath('''<>''', ''); +ERROR: unsupported XML feature +LINE 1: SELECT xpath('''<>''', ''); + ^ +DETAIL: This functionality requires the server to be built with libxml support. +SELECT xpath('count(//*)', ''); +ERROR: unsupported XML feature +LINE 1: SELECT xpath('count(//*)', ''); + ^ +DETAIL: This functionality requires the server to be built with libxml support. +SELECT xpath('count(//*)=0', ''); +ERROR: unsupported XML feature +LINE 1: SELECT xpath('count(//*)=0', ''); + ^ +DETAIL: This functionality requires the server to be built with libxml support. +SELECT xpath('count(//*)=3', ''); +ERROR: unsupported XML feature +LINE 1: SELECT xpath('count(//*)=3', ''); + ^ +DETAIL: This functionality requires the server to be built with libxml support. +SELECT xpath('name(/*)', ''); +ERROR: unsupported XML feature +LINE 1: SELECT xpath('name(/*)', ''); + ^ +DETAIL: This functionality requires the server to be built with libxml support. +SELECT xpath('/nosuchtag', ''); +ERROR: unsupported XML feature +LINE 1: SELECT xpath('/nosuchtag', ''); + ^ +DETAIL: This functionality requires the server to be built with libxml support. +SELECT xpath('root', ''); +ERROR: unsupported XML feature +LINE 1: SELECT xpath('root', ''); + ^ +DETAIL: This functionality requires the server to be built with libxml support. +-- Round-trip non-ASCII data through xpath(). +DO $$ +DECLARE + xml_declaration text := ''; + degree_symbol text; + res xml[]; +BEGIN + -- Per the documentation, except when the server encoding is UTF8, xpath() + -- may not work on non-ASCII data. The untranslatable_character and + -- undefined_function traps below, currently dead code, will become relevant + -- if we remove this limitation. + IF current_setting('server_encoding') <> 'UTF8' THEN + RAISE LOG 'skip: encoding % unsupported for xpath', + current_setting('server_encoding'); + RETURN; + END IF; + + degree_symbol := convert_from('\xc2b0', 'UTF8'); + res := xpath('text()', (xml_declaration || + '' || degree_symbol || '')::xml); + IF degree_symbol <> res[1]::text THEN + RAISE 'expected % (%), got % (%)', + degree_symbol, convert_to(degree_symbol, 'UTF8'), + res[1], convert_to(res[1]::text, 'UTF8'); + END IF; +EXCEPTION + -- character with byte sequence 0xc2 0xb0 in encoding "UTF8" has no equivalent in encoding "LATIN8" + WHEN untranslatable_character + -- default conversion function for encoding "UTF8" to "MULE_INTERNAL" does not exist + OR undefined_function + -- unsupported XML feature + OR feature_not_supported THEN + RAISE LOG 'skip: %', SQLERRM; +END +$$; +-- Test xmlexists and xpath_exists +SELECT xmlexists('//town[text() = ''Toronto'']' PASSING BY REF 'Bidford-on-AvonCwmbranBristol'); +ERROR: unsupported XML feature +LINE 1: ...sts('//town[text() = ''Toronto'']' PASSING BY REF 'Bidford-on-AvonCwmbranBristol'); +ERROR: unsupported XML feature +LINE 1: ...sts('//town[text() = ''Cwmbran'']' PASSING BY REF ''); +ERROR: unsupported XML feature +LINE 1: ...LECT xmlexists('count(/nosuchtag)' PASSING BY REF '')... + ^ +DETAIL: This functionality requires the server to be built with libxml support. +SELECT xpath_exists('//town[text() = ''Toronto'']','Bidford-on-AvonCwmbranBristol'::xml); +ERROR: unsupported XML feature +LINE 1: ...ELECT xpath_exists('//town[text() = ''Toronto'']','Bidford-on-AvonCwmbranBristol'::xml); +ERROR: unsupported XML feature +LINE 1: ...ELECT xpath_exists('//town[text() = ''Cwmbran'']',''::xml); +ERROR: unsupported XML feature +LINE 1: SELECT xpath_exists('count(/nosuchtag)', ''::xml); + ^ +DETAIL: This functionality requires the server to be built with libxml support. +INSERT INTO xmltest VALUES (4, 'BudvarfreeCarlinglots'::xml); +ERROR: unsupported XML feature +LINE 1: INSERT INTO xmltest VALUES (4, 'BudvarMolsonfreeCarlinglots'::xml); +ERROR: unsupported XML feature +LINE 1: INSERT INTO xmltest VALUES (5, 'MolsonBudvarfreeCarlinglots'::xml); +ERROR: unsupported XML feature +LINE 1: INSERT INTO xmltest VALUES (6, 'MolsonfreeCarlinglots'::xml); +ERROR: unsupported XML feature +LINE 1: INSERT INTO xmltest VALUES (7, 'number one'); +ERROR: unsupported XML feature +DETAIL: This functionality requires the server to be built with libxml support. +SELECT xml_is_well_formed('bar'); +ERROR: unsupported XML feature +DETAIL: This functionality requires the server to be built with libxml support. +SELECT xml_is_well_formed('bar'); +ERROR: unsupported XML feature +DETAIL: This functionality requires the server to be built with libxml support. +SELECT xml_is_well_formed('&'); +ERROR: unsupported XML feature +DETAIL: This functionality requires the server to be built with libxml support. +SELECT xml_is_well_formed('&idontexist;'); +ERROR: unsupported XML feature +DETAIL: This functionality requires the server to be built with libxml support. +SELECT xml_is_well_formed(''); +ERROR: unsupported XML feature +DETAIL: This functionality requires the server to be built with libxml support. +SELECT xml_is_well_formed(''); +ERROR: unsupported XML feature +DETAIL: This functionality requires the server to be built with libxml support. +SELECT xml_is_well_formed('&idontexist;'); +ERROR: unsupported XML feature +DETAIL: This functionality requires the server to be built with libxml support. +SET xmloption TO CONTENT; +SELECT xml_is_well_formed('abc'); +ERROR: unsupported XML feature +DETAIL: This functionality requires the server to be built with libxml support. +-- Since xpath() deals with namespaces, it's a bit stricter about +-- what's well-formed and what's not. If we don't obey these rules +-- (i.e. ignore namespace-related errors from libxml), xpath() +-- fails in subtle ways. The following would for example produce +-- the xml value +-- +-- which is invalid because '<' may not appear un-escaped in +-- attribute values. +-- Since different libxml versions emit slightly different +-- error messages, we suppress the DETAIL in this test. +\set VERBOSITY terse +SELECT xpath('/*', ''); +ERROR: unsupported XML feature at character 20 +\set VERBOSITY default +-- Again, the XML isn't well-formed for namespace purposes +SELECT xpath('/*', ''); +ERROR: unsupported XML feature +LINE 1: SELECT xpath('/*', ''); + ^ +DETAIL: This functionality requires the server to be built with libxml support. +-- XPath deprecates relative namespaces, but they're not supposed to +-- throw an error, only a warning. +SELECT xpath('/*', ''); +ERROR: unsupported XML feature +LINE 1: SELECT xpath('/*', ''); + ^ +DETAIL: This functionality requires the server to be built with libxml support. +-- External entity references should not leak filesystem information. +SELECT XMLPARSE(DOCUMENT ']>&c;'); +ERROR: unsupported XML feature +DETAIL: This functionality requires the server to be built with libxml support. +SELECT XMLPARSE(DOCUMENT ']>&c;'); +ERROR: unsupported XML feature +DETAIL: This functionality requires the server to be built with libxml support. +-- This might or might not load the requested DTD, but it mustn't throw error. +SELECT XMLPARSE(DOCUMENT ' '); +ERROR: unsupported XML feature +DETAIL: This functionality requires the server to be built with libxml support. +-- XMLPATH tests +CREATE TABLE xmldata(data xml); +INSERT INTO xmldata VALUES(' + + AU + Australia + 3 + + + CN + China + 3 + + + HK + HongKong + 3 + + + IN + India + 3 + + + JP + Japan + 3Sinzo Abe + + + SG + Singapore + 3791 + +'); +ERROR: unsupported XML feature +LINE 1: INSERT INTO xmldata VALUES(' + ^ +DETAIL: This functionality requires the server to be built with libxml support. +-- XMLTABLE with columns +SELECT xmltable.* + FROM (SELECT data FROM xmldata) x, + LATERAL XMLTABLE('/ROWS/ROW' + PASSING data + COLUMNS id int PATH '@id', + _id FOR ORDINALITY, + country_name text PATH 'COUNTRY_NAME/text()' NOT NULL, + country_id text PATH 'COUNTRY_ID', + region_id int PATH 'REGION_ID', + size float PATH 'SIZE', + unit text PATH 'SIZE/@unit', + premier_name text PATH 'PREMIER_NAME' DEFAULT 'not specified'); + id | _id | country_name | country_id | region_id | size | unit | premier_name +----+-----+--------------+------------+-----------+------+------+-------------- +(0 rows) + +CREATE VIEW xmltableview1 AS SELECT xmltable.* + FROM (SELECT data FROM xmldata) x, + LATERAL XMLTABLE('/ROWS/ROW' + PASSING data + COLUMNS id int PATH '@id', + _id FOR ORDINALITY, + country_name text PATH 'COUNTRY_NAME/text()' NOT NULL, + country_id text PATH 'COUNTRY_ID', + region_id int PATH 'REGION_ID', + size float PATH 'SIZE', + unit text PATH 'SIZE/@unit', + premier_name text PATH 'PREMIER_NAME' DEFAULT 'not specified'); +SELECT * FROM xmltableview1; + id | _id | country_name | country_id | region_id | size | unit | premier_name +----+-----+--------------+------------+-----------+------+------+-------------- +(0 rows) + +\sv xmltableview1 +CREATE OR REPLACE VIEW public.xmltableview1 AS + SELECT "xmltable".id, + "xmltable"._id, + "xmltable".country_name, + "xmltable".country_id, + "xmltable".region_id, + "xmltable".size, + "xmltable".unit, + "xmltable".premier_name + FROM ( SELECT xmldata.data + FROM xmldata) x, + LATERAL XMLTABLE(('/ROWS/ROW'::text) PASSING (x.data) COLUMNS id integer PATH ('@id'::text), _id FOR ORDINALITY, country_name text PATH ('COUNTRY_NAME/text()'::text) NOT NULL, country_id text PATH ('COUNTRY_ID'::text), region_id integer PATH ('REGION_ID'::text), size double precision PATH ('SIZE'::text), unit text PATH ('SIZE/@unit'::text), premier_name text DEFAULT ('not specified'::text) PATH ('PREMIER_NAME'::text)) +EXPLAIN (COSTS OFF) SELECT * FROM xmltableview1; + QUERY PLAN +----------------------------------------- + Nested Loop + -> Seq Scan on xmldata + -> Table Function Scan on "xmltable" +(3 rows) + +EXPLAIN (COSTS OFF, VERBOSE) SELECT * FROM xmltableview1; + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ + Nested Loop + Output: "xmltable".id, "xmltable"._id, "xmltable".country_name, "xmltable".country_id, "xmltable".region_id, "xmltable".size, "xmltable".unit, "xmltable".premier_name + -> Seq Scan on public.xmldata + Output: xmldata.data + -> Table Function Scan on "xmltable" + Output: "xmltable".id, "xmltable"._id, "xmltable".country_name, "xmltable".country_id, "xmltable".region_id, "xmltable".size, "xmltable".unit, "xmltable".premier_name + Table Function Call: XMLTABLE(('/ROWS/ROW'::text) PASSING (xmldata.data) COLUMNS id integer PATH ('@id'::text), _id FOR ORDINALITY, country_name text PATH ('COUNTRY_NAME/text()'::text) NOT NULL, country_id text PATH ('COUNTRY_ID'::text), region_id integer PATH ('REGION_ID'::text), size double precision PATH ('SIZE'::text), unit text PATH ('SIZE/@unit'::text), premier_name text DEFAULT ('not specified'::text) PATH ('PREMIER_NAME'::text)) +(7 rows) + +-- errors +SELECT * FROM XMLTABLE (ROW () PASSING null COLUMNS v1 timestamp) AS f (v1, v2); +ERROR: XMLTABLE function has 1 columns available but 2 columns specified +-- XMLNAMESPACES tests +SELECT * FROM XMLTABLE(XMLNAMESPACES('http://x.y' AS zz), + '/zz:rows/zz:row' + PASSING '10' + COLUMNS a int PATH 'zz:a'); +ERROR: unsupported XML feature +LINE 3: PASSING '10' + COLUMNS a int PATH 'zz:a'); +ERROR: unsupported XML feature +LINE 3: PASSING '10' + COLUMNS a int PATH 'a'); +ERROR: unsupported XML feature +LINE 3: PASSING '' + COLUMNS a text PATH 'foo/namespace::node()'); +ERROR: unsupported XML feature +LINE 2: PASSING '' + ^ +DETAIL: This functionality requires the server to be built with libxml support. +-- used in prepare statements +PREPARE pp AS +SELECT xmltable.* + FROM (SELECT data FROM xmldata) x, + LATERAL XMLTABLE('/ROWS/ROW' + PASSING data + COLUMNS id int PATH '@id', + _id FOR ORDINALITY, + country_name text PATH 'COUNTRY_NAME' NOT NULL, + country_id text PATH 'COUNTRY_ID', + region_id int PATH 'REGION_ID', + size float PATH 'SIZE', + unit text PATH 'SIZE/@unit', + premier_name text PATH 'PREMIER_NAME' DEFAULT 'not specified'); +EXECUTE pp; + id | _id | country_name | country_id | region_id | size | unit | premier_name +----+-----+--------------+------------+-----------+------+------+-------------- +(0 rows) + +SELECT xmltable.* FROM xmldata, LATERAL xmltable('/ROWS/ROW[COUNTRY_NAME="Japan" or COUNTRY_NAME="India"]' PASSING data COLUMNS "COUNTRY_NAME" text, "REGION_ID" int); + COUNTRY_NAME | REGION_ID +--------------+----------- +(0 rows) + +SELECT xmltable.* FROM xmldata, LATERAL xmltable('/ROWS/ROW[COUNTRY_NAME="Japan" or COUNTRY_NAME="India"]' PASSING data COLUMNS id FOR ORDINALITY, "COUNTRY_NAME" text, "REGION_ID" int); + id | COUNTRY_NAME | REGION_ID +----+--------------+----------- +(0 rows) + +SELECT xmltable.* FROM xmldata, LATERAL xmltable('/ROWS/ROW[COUNTRY_NAME="Japan" or COUNTRY_NAME="India"]' PASSING data COLUMNS id int PATH '@id', "COUNTRY_NAME" text, "REGION_ID" int); + id | COUNTRY_NAME | REGION_ID +----+--------------+----------- +(0 rows) + +SELECT xmltable.* FROM xmldata, LATERAL xmltable('/ROWS/ROW[COUNTRY_NAME="Japan" or COUNTRY_NAME="India"]' PASSING data COLUMNS id int PATH '@id'); + id +---- +(0 rows) + +SELECT xmltable.* FROM xmldata, LATERAL xmltable('/ROWS/ROW[COUNTRY_NAME="Japan" or COUNTRY_NAME="India"]' PASSING data COLUMNS id FOR ORDINALITY); + id +---- +(0 rows) + +SELECT xmltable.* FROM xmldata, LATERAL xmltable('/ROWS/ROW[COUNTRY_NAME="Japan" or COUNTRY_NAME="India"]' PASSING data COLUMNS id int PATH '@id', "COUNTRY_NAME" text, "REGION_ID" int, rawdata xml PATH '.'); + id | COUNTRY_NAME | REGION_ID | rawdata +----+--------------+-----------+--------- +(0 rows) + +SELECT xmltable.* FROM xmldata, LATERAL xmltable('/ROWS/ROW[COUNTRY_NAME="Japan" or COUNTRY_NAME="India"]' PASSING data COLUMNS id int PATH '@id', "COUNTRY_NAME" text, "REGION_ID" int, rawdata xml PATH './*'); + id | COUNTRY_NAME | REGION_ID | rawdata +----+--------------+-----------+--------- +(0 rows) + +SELECT * FROM xmltable('/root' passing 'a1aa2a bbbbxxxcccc' COLUMNS element text); +ERROR: unsupported XML feature +LINE 1: SELECT * FROM xmltable('/root' passing 'a1aa1aa2a bbbbxxxcccc' COLUMNS element text PATH 'element/text()'); -- should fail +ERROR: unsupported XML feature +LINE 1: SELECT * FROM xmltable('/root' passing 'a1a &"<>!foo]]>2' columns c text); +ERROR: unsupported XML feature +LINE 1: select * from xmltable('d/r' passing ''"&<>' COLUMNS ent text); +ERROR: unsupported XML feature +LINE 1: SELECT * FROM xmltable('/x/a' PASSING '''"&<>' COLUMNS ent xml); +ERROR: unsupported XML feature +LINE 1: SELECT * FROM xmltable('/x/a' PASSING '' Seq Scan on public.xmldata + Output: xmldata.data + -> Table Function Scan on "xmltable" + Output: "xmltable".id, "xmltable"._id, "xmltable".country_name, "xmltable".country_id, "xmltable".region_id, "xmltable".size, "xmltable".unit, "xmltable".premier_name + Table Function Call: XMLTABLE(('/ROWS/ROW'::text) PASSING (xmldata.data) COLUMNS id integer PATH ('@id'::text), _id FOR ORDINALITY, country_name text PATH ('COUNTRY_NAME'::text) NOT NULL, country_id text PATH ('COUNTRY_ID'::text), region_id integer PATH ('REGION_ID'::text), size double precision PATH ('SIZE'::text), unit text PATH ('SIZE/@unit'::text), premier_name text DEFAULT ('not specified'::text) PATH ('PREMIER_NAME'::text)) +(7 rows) + +-- test qual +SELECT xmltable.* FROM xmldata, LATERAL xmltable('/ROWS/ROW[COUNTRY_NAME="Japan" or COUNTRY_NAME="India"]' PASSING data COLUMNS "COUNTRY_NAME" text, "REGION_ID" int) WHERE "COUNTRY_NAME" = 'Japan'; + COUNTRY_NAME | REGION_ID +--------------+----------- +(0 rows) + +EXPLAIN (VERBOSE, COSTS OFF) +SELECT xmltable.* FROM xmldata, LATERAL xmltable('/ROWS/ROW[COUNTRY_NAME="Japan" or COUNTRY_NAME="India"]' PASSING data COLUMNS "COUNTRY_NAME" text, "REGION_ID" int) WHERE "COUNTRY_NAME" = 'Japan'; + QUERY PLAN +---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Nested Loop + Output: "xmltable"."COUNTRY_NAME", "xmltable"."REGION_ID" + -> Seq Scan on public.xmldata + Output: xmldata.data + -> Table Function Scan on "xmltable" + Output: "xmltable"."COUNTRY_NAME", "xmltable"."REGION_ID" + Table Function Call: XMLTABLE(('/ROWS/ROW[COUNTRY_NAME="Japan" or COUNTRY_NAME="India"]'::text) PASSING (xmldata.data) COLUMNS "COUNTRY_NAME" text, "REGION_ID" integer) + Filter: ("xmltable"."COUNTRY_NAME" = 'Japan'::text) +(8 rows) + +-- should to work with more data +INSERT INTO xmldata VALUES(' + + CZ + Czech Republic + 2Milos Zeman + + + DE + Germany + 2 + + + FR + France + 2 + +'); +ERROR: unsupported XML feature +LINE 1: INSERT INTO xmldata VALUES(' + ^ +DETAIL: This functionality requires the server to be built with libxml support. +INSERT INTO xmldata VALUES(' + + EG + Egypt + 1 + + + SD + Sudan + 1 + +'); +ERROR: unsupported XML feature +LINE 1: INSERT INTO xmldata VALUES(' + ^ +DETAIL: This functionality requires the server to be built with libxml support. +SELECT xmltable.* + FROM (SELECT data FROM xmldata) x, + LATERAL XMLTABLE('/ROWS/ROW' + PASSING data + COLUMNS id int PATH '@id', + _id FOR ORDINALITY, + country_name text PATH 'COUNTRY_NAME' NOT NULL, + country_id text PATH 'COUNTRY_ID', + region_id int PATH 'REGION_ID', + size float PATH 'SIZE', + unit text PATH 'SIZE/@unit', + premier_name text PATH 'PREMIER_NAME' DEFAULT 'not specified'); + id | _id | country_name | country_id | region_id | size | unit | premier_name +----+-----+--------------+------------+-----------+------+------+-------------- +(0 rows) + +SELECT xmltable.* + FROM (SELECT data FROM xmldata) x, + LATERAL XMLTABLE('/ROWS/ROW' + PASSING data + COLUMNS id int PATH '@id', + _id FOR ORDINALITY, + country_name text PATH 'COUNTRY_NAME' NOT NULL, + country_id text PATH 'COUNTRY_ID', + region_id int PATH 'REGION_ID', + size float PATH 'SIZE', + unit text PATH 'SIZE/@unit', + premier_name text PATH 'PREMIER_NAME' DEFAULT 'not specified') + WHERE region_id = 2; + id | _id | country_name | country_id | region_id | size | unit | premier_name +----+-----+--------------+------------+-----------+------+------+-------------- +(0 rows) + +EXPLAIN (VERBOSE, COSTS OFF) +SELECT xmltable.* + FROM (SELECT data FROM xmldata) x, + LATERAL XMLTABLE('/ROWS/ROW' + PASSING data + COLUMNS id int PATH '@id', + _id FOR ORDINALITY, + country_name text PATH 'COUNTRY_NAME' NOT NULL, + country_id text PATH 'COUNTRY_ID', + region_id int PATH 'REGION_ID', + size float PATH 'SIZE', + unit text PATH 'SIZE/@unit', + premier_name text PATH 'PREMIER_NAME' DEFAULT 'not specified') + WHERE region_id = 2; + QUERY PLAN +----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Nested Loop + Output: "xmltable".id, "xmltable"._id, "xmltable".country_name, "xmltable".country_id, "xmltable".region_id, "xmltable".size, "xmltable".unit, "xmltable".premier_name + -> Seq Scan on public.xmldata + Output: xmldata.data + -> Table Function Scan on "xmltable" + Output: "xmltable".id, "xmltable"._id, "xmltable".country_name, "xmltable".country_id, "xmltable".region_id, "xmltable".size, "xmltable".unit, "xmltable".premier_name + Table Function Call: XMLTABLE(('/ROWS/ROW'::text) PASSING (xmldata.data) COLUMNS id integer PATH ('@id'::text), _id FOR ORDINALITY, country_name text PATH ('COUNTRY_NAME'::text) NOT NULL, country_id text PATH ('COUNTRY_ID'::text), region_id integer PATH ('REGION_ID'::text), size double precision PATH ('SIZE'::text), unit text PATH ('SIZE/@unit'::text), premier_name text DEFAULT ('not specified'::text) PATH ('PREMIER_NAME'::text)) + Filter: ("xmltable".region_id = 2) +(8 rows) + +-- should fail, NULL value +SELECT xmltable.* + FROM (SELECT data FROM xmldata) x, + LATERAL XMLTABLE('/ROWS/ROW' + PASSING data + COLUMNS id int PATH '@id', + _id FOR ORDINALITY, + country_name text PATH 'COUNTRY_NAME' NOT NULL, + country_id text PATH 'COUNTRY_ID', + region_id int PATH 'REGION_ID', + size float PATH 'SIZE' NOT NULL, + unit text PATH 'SIZE/@unit', + premier_name text PATH 'PREMIER_NAME' DEFAULT 'not specified'); + id | _id | country_name | country_id | region_id | size | unit | premier_name +----+-----+--------------+------------+-----------+------+------+-------------- +(0 rows) + +-- if all is ok, then result is empty +-- one line xml test +WITH + x AS (SELECT proname, proowner, procost::numeric, pronargs, + array_to_string(proargnames,',') as proargnames, + case when proargtypes <> '' then array_to_string(proargtypes::oid[],',') end as proargtypes + FROM pg_proc WHERE proname = 'f_leak'), + y AS (SELECT xmlelement(name proc, + xmlforest(proname, proowner, + procost, pronargs, + proargnames, proargtypes)) as proc + FROM x), + z AS (SELECT xmltable.* + FROM y, + LATERAL xmltable('/proc' PASSING proc + COLUMNS proname name, + proowner oid, + procost float, + pronargs int, + proargnames text, + proargtypes text)) + SELECT * FROM z + EXCEPT SELECT * FROM x; +ERROR: unsupported XML feature +DETAIL: This functionality requires the server to be built with libxml support. +-- multi line xml test, result should be empty too +WITH + x AS (SELECT proname, proowner, procost::numeric, pronargs, + array_to_string(proargnames,',') as proargnames, + case when proargtypes <> '' then array_to_string(proargtypes::oid[],',') end as proargtypes + FROM pg_proc), + y AS (SELECT xmlelement(name data, + xmlagg(xmlelement(name proc, + xmlforest(proname, proowner, procost, + pronargs, proargnames, proargtypes)))) as doc + FROM x), + z AS (SELECT xmltable.* + FROM y, + LATERAL xmltable('/data/proc' PASSING doc + COLUMNS proname name, + proowner oid, + procost float, + pronargs int, + proargnames text, + proargtypes text)) + SELECT * FROM z + EXCEPT SELECT * FROM x; +ERROR: unsupported XML feature +DETAIL: This functionality requires the server to be built with libxml support. +CREATE TABLE xmltest2(x xml, _path text); +INSERT INTO xmltest2 VALUES('1', 'A'); +ERROR: unsupported XML feature +LINE 1: INSERT INTO xmltest2 VALUES('1', 'A')... + ^ +DETAIL: This functionality requires the server to be built with libxml support. +INSERT INTO xmltest2 VALUES('2', 'B'); +ERROR: unsupported XML feature +LINE 1: INSERT INTO xmltest2 VALUES('2', 'B')... + ^ +DETAIL: This functionality requires the server to be built with libxml support. +INSERT INTO xmltest2 VALUES('3', 'C'); +ERROR: unsupported XML feature +LINE 1: INSERT INTO xmltest2 VALUES('3', 'C')... + ^ +DETAIL: This functionality requires the server to be built with libxml support. +INSERT INTO xmltest2 VALUES('2', 'D'); +ERROR: unsupported XML feature +LINE 1: INSERT INTO xmltest2 VALUES('2', 'D')... + ^ +DETAIL: This functionality requires the server to be built with libxml support. +SELECT xmltable.* FROM xmltest2, LATERAL xmltable('/d/r' PASSING x COLUMNS a int PATH '' || lower(_path) || 'c'); + a +--- +(0 rows) + +SELECT xmltable.* FROM xmltest2, LATERAL xmltable(('/d/r/' || lower(_path) || 'c') PASSING x COLUMNS a int PATH '.'); + a +--- +(0 rows) + +SELECT xmltable.* FROM xmltest2, LATERAL xmltable(('/d/r/' || lower(_path) || 'c') PASSING x COLUMNS a int PATH 'x' DEFAULT ascii(_path) - 54); + a +--- +(0 rows) + +-- XPath result can be boolean or number too +SELECT * FROM XMLTABLE('*' PASSING 'a' COLUMNS a xml PATH '.', b text PATH '.', c text PATH '"hi"', d boolean PATH '. = "a"', e integer PATH 'string-length(.)'); +ERROR: unsupported XML feature +LINE 1: SELECT * FROM XMLTABLE('*' PASSING 'a' COLUMNS a xml ... + ^ +DETAIL: This functionality requires the server to be built with libxml support. +\x +SELECT * FROM XMLTABLE('*' PASSING 'pre&deeppost' COLUMNS x xml PATH '/e/n2', y xml PATH '/'); +ERROR: unsupported XML feature +LINE 1: SELECT * FROM XMLTABLE('*' PASSING 'pre"', b xml PATH '""'); +ERROR: unsupported XML feature +DETAIL: This functionality requires the server to be built with libxml support. diff --git a/src/test/regress/expected/xml_2.out b/src/test/regress/expected/xml_2.out new file mode 100644 index 0000000..43c2558 --- /dev/null +++ b/src/test/regress/expected/xml_2.out @@ -0,0 +1,1767 @@ +CREATE TABLE xmltest ( + id int, + data xml +); +INSERT INTO xmltest VALUES (1, 'one'); +INSERT INTO xmltest VALUES (2, 'two'); +INSERT INTO xmltest VALUES (3, 'one + 2 | two +(2 rows) + +-- test non-throwing API, too +SELECT pg_input_is_valid('one', 'xml'); + pg_input_is_valid +------------------- + t +(1 row) + +SELECT pg_input_is_valid('oneone', 'xml'); + pg_input_is_valid +------------------- + f +(1 row) + +SELECT message FROM pg_input_error_info('', 'xml'); + message +---------------------------------------------- + invalid XML content: invalid XML declaration +(1 row) + +SELECT xmlcomment('test'); + xmlcomment +------------- + +(1 row) + +SELECT xmlcomment('-test'); + xmlcomment +-------------- + +(1 row) + +SELECT xmlcomment('test-'); +ERROR: invalid XML comment +SELECT xmlcomment('--test'); +ERROR: invalid XML comment +SELECT xmlcomment('te st'); + xmlcomment +-------------- + +(1 row) + +SELECT xmlconcat(xmlcomment('hello'), + xmlelement(NAME qux, 'foo'), + xmlcomment('world')); + xmlconcat +---------------------------------------- + foo +(1 row) + +SELECT xmlconcat('hello', 'you'); + xmlconcat +----------- + helloyou +(1 row) + +SELECT xmlconcat(1, 2); +ERROR: argument of XMLCONCAT must be type xml, not type integer +LINE 1: SELECT xmlconcat(1, 2); + ^ +SELECT xmlconcat('bad', '', NULL, ''); + xmlconcat +-------------- + +(1 row) + +SELECT xmlconcat('', NULL, ''); + xmlconcat +----------------------------------- + +(1 row) + +SELECT xmlconcat(NULL); + xmlconcat +----------- + +(1 row) + +SELECT xmlconcat(NULL, NULL); + xmlconcat +----------- + +(1 row) + +SELECT xmlelement(name element, + xmlattributes (1 as one, 'deuce' as two), + 'content'); + xmlelement +------------------------------------------------ + content +(1 row) + +SELECT xmlelement(name element, + xmlattributes ('unnamed and wrong')); +ERROR: unnamed XML attribute value must be a column reference +LINE 2: xmlattributes ('unnamed and wrong')); + ^ +SELECT xmlelement(name element, xmlelement(name nested, 'stuff')); + xmlelement +------------------------------------------- + stuff +(1 row) + +SELECT xmlelement(name employee, xmlforest(name, age, salary as pay)) FROM emp; + xmlelement +---------------------------------------------------------------------- + sharon251000 + sam302000 + bill201000 + jeff23600 + cim30400 + linda19100 +(6 rows) + +SELECT xmlelement(name duplicate, xmlattributes(1 as a, 2 as b, 3 as a)); +ERROR: XML attribute name "a" appears more than once +LINE 1: ...ment(name duplicate, xmlattributes(1 as a, 2 as b, 3 as a)); + ^ +SELECT xmlelement(name num, 37); + xmlelement +--------------- + 37 +(1 row) + +SELECT xmlelement(name foo, text 'bar'); + xmlelement +---------------- + bar +(1 row) + +SELECT xmlelement(name foo, xml 'bar'); + xmlelement +---------------- + bar +(1 row) + +SELECT xmlelement(name foo, text 'br'); + xmlelement +------------------------- + b<a/>r +(1 row) + +SELECT xmlelement(name foo, xml 'br'); + xmlelement +------------------- + br +(1 row) + +SELECT xmlelement(name foo, array[1, 2, 3]); + xmlelement +------------------------------------------------------------------------- + 123 +(1 row) + +SET xmlbinary TO base64; +SELECT xmlelement(name foo, bytea 'bar'); + xmlelement +----------------- + YmFy +(1 row) + +SET xmlbinary TO hex; +SELECT xmlelement(name foo, bytea 'bar'); + xmlelement +------------------- + 626172 +(1 row) + +SELECT xmlelement(name foo, xmlattributes(true as bar)); + xmlelement +------------------- + +(1 row) + +SELECT xmlelement(name foo, xmlattributes('2009-04-09 00:24:37'::timestamp as bar)); + xmlelement +---------------------------------- + +(1 row) + +SELECT xmlelement(name foo, xmlattributes('infinity'::timestamp as bar)); +ERROR: timestamp out of range +DETAIL: XML does not support infinite timestamp values. +SELECT xmlelement(name foo, xmlattributes('<>&"''' as funny, xml 'br' as funnier)); + xmlelement +------------------------------------------------------------ + +(1 row) + +SELECT xmlparse(content ''); + xmlparse +---------- + +(1 row) + +SELECT xmlparse(content ' '); + xmlparse +---------- + +(1 row) + +SELECT xmlparse(content 'abc'); + xmlparse +---------- + abc +(1 row) + +SELECT xmlparse(content 'x'); + xmlparse +-------------- + x +(1 row) + +SELECT xmlparse(content '&'); +ERROR: invalid XML content +DETAIL: line 1: xmlParseEntityRef: no name +& + ^ +line 1: chunk is not well balanced +SELECT xmlparse(content '&idontexist;'); +ERROR: invalid XML content +DETAIL: line 1: Entity 'idontexist' not defined +&idontexist; + ^ +line 1: chunk is not well balanced +SELECT xmlparse(content ''); + xmlparse +--------------------------- + +(1 row) + +SELECT xmlparse(content ''); + xmlparse +-------------------------------- + +(1 row) + +SELECT xmlparse(content '&idontexist;'); +ERROR: invalid XML content +DETAIL: line 1: Entity 'idontexist' not defined +&idontexist; + ^ +line 1: Opening and ending tag mismatch: twoerrors line 1 and unbalanced +line 1: chunk is not well balanced +SELECT xmlparse(content ''); + xmlparse +--------------------- + +(1 row) + +SELECT xmlparse(document ' '); +ERROR: invalid XML document +DETAIL: line 1: Start tag expected, '<' not found +SELECT xmlparse(document 'abc'); +ERROR: invalid XML document +DETAIL: line 1: Start tag expected, '<' not found +abc +^ +SELECT xmlparse(document 'x'); + xmlparse +-------------- + x +(1 row) + +SELECT xmlparse(document '&'); +ERROR: invalid XML document +DETAIL: line 1: xmlParseEntityRef: no name +& + ^ +line 1: Opening and ending tag mismatch: invalidentity line 1 and abc +SELECT xmlparse(document '&idontexist;'); +ERROR: invalid XML document +DETAIL: line 1: Entity 'idontexist' not defined +&idontexist; + ^ +line 1: Opening and ending tag mismatch: undefinedentity line 1 and abc +SELECT xmlparse(document ''); + xmlparse +--------------------------- + +(1 row) + +SELECT xmlparse(document ''); + xmlparse +-------------------------------- + +(1 row) + +SELECT xmlparse(document '&idontexist;'); +ERROR: invalid XML document +DETAIL: line 1: Entity 'idontexist' not defined +&idontexist; + ^ +line 1: Opening and ending tag mismatch: twoerrors line 1 and unbalanced +SELECT xmlparse(document ''); + xmlparse +--------------------- + +(1 row) + +SELECT xmlpi(name foo); + xmlpi +--------- + +(1 row) + +SELECT xmlpi(name xml); +ERROR: invalid XML processing instruction +DETAIL: XML processing instruction target name cannot be "xml". +SELECT xmlpi(name xmlstuff); + xmlpi +-------------- + +(1 row) + +SELECT xmlpi(name foo, 'bar'); + xmlpi +------------- + +(1 row) + +SELECT xmlpi(name foo, 'in?>valid'); +ERROR: invalid XML processing instruction +DETAIL: XML processing instruction cannot contain "?>". +SELECT xmlpi(name foo, null); + xmlpi +------- + +(1 row) + +SELECT xmlpi(name xml, null); +ERROR: invalid XML processing instruction +DETAIL: XML processing instruction target name cannot be "xml". +SELECT xmlpi(name xmlstuff, null); + xmlpi +------- + +(1 row) + +SELECT xmlpi(name "xml-stylesheet", 'href="mystyle.css" type="text/css"'); + xmlpi +------------------------------------------------------- + +(1 row) + +SELECT xmlpi(name foo, ' bar'); + xmlpi +------------- + +(1 row) + +SELECT xmlroot(xml '', version no value, standalone no value); + xmlroot +--------- + +(1 row) + +SELECT xmlroot(xml '', version '2.0'); + xmlroot +----------------------------- + +(1 row) + +SELECT xmlroot(xml '', version no value, standalone yes); + xmlroot +---------------------------------------------- + +(1 row) + +SELECT xmlroot(xml '', version no value, standalone yes); + xmlroot +---------------------------------------------- + +(1 row) + +SELECT xmlroot(xmlroot(xml '', version '1.0'), version '1.1', standalone no); + xmlroot +--------------------------------------------- + +(1 row) + +SELECT xmlroot('', version no value, standalone no); + xmlroot +--------------------------------------------- + +(1 row) + +SELECT xmlroot('', version no value, standalone no value); + xmlroot +--------- + +(1 row) + +SELECT xmlroot('', version no value); + xmlroot +---------------------------------------------- + +(1 row) + +SELECT xmlroot ( + xmlelement ( + name gazonk, + xmlattributes ( + 'val' AS name, + 1 + 1 AS num + ), + xmlelement ( + NAME qux, + 'foo' + ) + ), + version '1.0', + standalone yes +); + xmlroot +------------------------------------------------------------------------------------------ + foo +(1 row) + +SELECT xmlserialize(content data as character varying(20)) FROM xmltest; + xmlserialize +-------------------- + one + two +(2 rows) + +SELECT xmlserialize(content 'good' as char(10)); + xmlserialize +-------------- + good +(1 row) + +SELECT xmlserialize(document 'bad' as text); +ERROR: not an XML document +-- indent +SELECT xmlserialize(DOCUMENT '42' AS text INDENT); + xmlserialize +------------------------- + + + + + 42+ + + + + + +(1 row) + +SELECT xmlserialize(CONTENT '42' AS text INDENT); + xmlserialize +------------------------- + + + + + 42+ + + + +(1 row) + +-- no indent +SELECT xmlserialize(DOCUMENT '42' AS text NO INDENT); + xmlserialize +------------------------------------------- + 42 +(1 row) + +SELECT xmlserialize(CONTENT '42' AS text NO INDENT); + xmlserialize +------------------------------------------- + 42 +(1 row) + +-- indent non singly-rooted xml +SELECT xmlserialize(DOCUMENT '7342' AS text INDENT); +ERROR: not an XML document +SELECT xmlserialize(CONTENT '7342' AS text INDENT); + xmlserialize +----------------------- + 73 + + + + 42+ + +(1 row) + +-- indent non singly-rooted xml with mixed contents +SELECT xmlserialize(DOCUMENT 'text node73text node42' AS text INDENT); +ERROR: not an XML document +SELECT xmlserialize(CONTENT 'text node73text node42' AS text INDENT); + xmlserialize +------------------------ + text node + + 73text node+ + + + 42 + + +(1 row) + +-- indent singly-rooted xml with mixed contents +SELECT xmlserialize(DOCUMENT '42text node73' AS text INDENT); + xmlserialize +--------------------------------------------- + + + + + 42 + + text node73+ + + + + + +(1 row) + +SELECT xmlserialize(CONTENT '42text node73' AS text INDENT); + xmlserialize +--------------------------------------------- + + + + + 42 + + text node73+ + + + +(1 row) + +-- indent empty string +SELECT xmlserialize(DOCUMENT '' AS text INDENT); +ERROR: not an XML document +SELECT xmlserialize(CONTENT '' AS text INDENT); + xmlserialize +-------------- + +(1 row) + +-- whitespaces +SELECT xmlserialize(DOCUMENT ' ' AS text INDENT); +ERROR: not an XML document +SELECT xmlserialize(CONTENT ' ' AS text INDENT); + xmlserialize +-------------- + +(1 row) + +-- indent null +SELECT xmlserialize(DOCUMENT NULL AS text INDENT); + xmlserialize +-------------- + +(1 row) + +SELECT xmlserialize(CONTENT NULL AS text INDENT); + xmlserialize +-------------- + +(1 row) + +-- indent with XML declaration +SELECT xmlserialize(DOCUMENT '73' AS text INDENT); + xmlserialize +---------------------------------------- + + + + + + + 73 + + + + + + +(1 row) + +SELECT xmlserialize(CONTENT '73' AS text INDENT); + xmlserialize +------------------- + + + + + 73+ + + + +(1 row) + +-- indent containing DOCTYPE declaration +SELECT xmlserialize(DOCUMENT '' AS text INDENT); + xmlserialize +-------------- + + + + + +(1 row) + +SELECT xmlserialize(CONTENT '' AS text INDENT); + xmlserialize +-------------- + + + + + +(1 row) + +-- indent xml with empty element +SELECT xmlserialize(DOCUMENT '' AS text INDENT); + xmlserialize +-------------- + + + + + + + +(1 row) + +SELECT xmlserialize(CONTENT '' AS text INDENT); + xmlserialize +-------------- + + + + + +(1 row) + +-- 'no indent' = not using 'no indent' +SELECT xmlserialize(DOCUMENT '42' AS text) = xmlserialize(DOCUMENT '42' AS text NO INDENT); + ?column? +---------- + t +(1 row) + +SELECT xmlserialize(CONTENT '42' AS text) = xmlserialize(CONTENT '42' AS text NO INDENT); + ?column? +---------- + t +(1 row) + +SELECT xml 'bar' IS DOCUMENT; + ?column? +---------- + t +(1 row) + +SELECT xml 'barfoo' IS DOCUMENT; + ?column? +---------- + f +(1 row) + +SELECT xml '' IS NOT DOCUMENT; + ?column? +---------- + f +(1 row) + +SELECT xml 'abc' IS NOT DOCUMENT; + ?column? +---------- + t +(1 row) + +SELECT '<>' IS NOT DOCUMENT; +ERROR: invalid XML content +LINE 1: SELECT '<>' IS NOT DOCUMENT; + ^ +DETAIL: line 1: StartTag: invalid element name +<> + ^ +SELECT xmlagg(data) FROM xmltest; + xmlagg +-------------------------------------- + onetwo +(1 row) + +SELECT xmlagg(data) FROM xmltest WHERE id > 10; + xmlagg +-------- + +(1 row) + +SELECT xmlelement(name employees, xmlagg(xmlelement(name name, name))) FROM emp; + xmlelement +-------------------------------------------------------------------------------------------------------------------------------- + sharonsambilljeffcimlinda +(1 row) + +-- Check mapping SQL identifier to XML name +SELECT xmlpi(name ":::_xml_abc135.%-&_"); + xmlpi +------------------------------------------------- + +(1 row) + +SELECT xmlpi(name "123"); + xmlpi +--------------- + +(1 row) + +PREPARE foo (xml) AS SELECT xmlconcat('', $1); +SET XML OPTION DOCUMENT; +EXECUTE foo (''); + xmlconcat +-------------- + +(1 row) + +EXECUTE foo ('bad'); +ERROR: invalid XML document +LINE 1: EXECUTE foo ('bad'); + ^ +DETAIL: line 1: Start tag expected, '<' not found +bad +^ +SELECT xml ''; +ERROR: invalid XML document +LINE 1: SELECT xml ''; + ^ +DETAIL: line 1: Extra content at the end of the document + + ^ +SET XML OPTION CONTENT; +EXECUTE foo (''); + xmlconcat +-------------- + +(1 row) + +EXECUTE foo ('good'); + xmlconcat +------------ + good +(1 row) + +SELECT xml ' '; + xml +-------------------------------------------------------------------- + +(1 row) + +SELECT xml ' '; + xml +------------------------------ + +(1 row) + +SELECT xml ''; + xml +------------------ + +(1 row) + +SELECT xml ' oops '; +ERROR: invalid XML content +LINE 1: SELECT xml ' oops '; + ^ +DETAIL: line 1: StartTag: invalid element name + oops + ^ +SELECT xml ' '; +ERROR: invalid XML content +LINE 1: SELECT xml ' '; + ^ +DETAIL: line 1: StartTag: invalid element name + + ^ +SELECT xml ''; +ERROR: invalid XML content +LINE 1: SELECT xml ''; + ^ +DETAIL: line 1: Extra content at the end of the document + + ^ +-- Test backwards parsing +CREATE VIEW xmlview1 AS SELECT xmlcomment('test'); +CREATE VIEW xmlview2 AS SELECT xmlconcat('hello', 'you'); +CREATE VIEW xmlview3 AS SELECT xmlelement(name element, xmlattributes (1 as ":one:", 'deuce' as two), 'content&'); +CREATE VIEW xmlview4 AS SELECT xmlelement(name employee, xmlforest(name, age, salary as pay)) FROM emp; +CREATE VIEW xmlview5 AS SELECT xmlparse(content 'x'); +CREATE VIEW xmlview6 AS SELECT xmlpi(name foo, 'bar'); +CREATE VIEW xmlview7 AS SELECT xmlroot(xml '', version no value, standalone yes); +CREATE VIEW xmlview8 AS SELECT xmlserialize(content 'good' as char(10)); +CREATE VIEW xmlview9 AS SELECT xmlserialize(content 'good' as text); +SELECT table_name, view_definition FROM information_schema.views + WHERE table_name LIKE 'xmlview%' ORDER BY 1; + table_name | view_definition +------------+------------------------------------------------------------------------------------------------------------ + xmlview1 | SELECT xmlcomment('test'::text) AS xmlcomment; + xmlview2 | SELECT XMLCONCAT('hello'::xml, 'you'::xml) AS "xmlconcat"; + xmlview3 | SELECT XMLELEMENT(NAME element, XMLATTRIBUTES(1 AS ":one:", 'deuce' AS two), 'content&') AS "xmlelement"; + xmlview4 | SELECT XMLELEMENT(NAME employee, XMLFOREST(name AS name, age AS age, salary AS pay)) AS "xmlelement" + + | FROM emp; + xmlview5 | SELECT XMLPARSE(CONTENT 'x'::text STRIP WHITESPACE) AS "xmlparse"; + xmlview6 | SELECT XMLPI(NAME foo, 'bar'::text) AS "xmlpi"; + xmlview7 | SELECT XMLROOT(''::xml, VERSION NO VALUE, STANDALONE YES) AS "xmlroot"; + xmlview8 | SELECT (XMLSERIALIZE(CONTENT 'good'::xml AS character(10)))::character(10) AS "xmlserialize"; + xmlview9 | SELECT XMLSERIALIZE(CONTENT 'good'::xml AS text) AS "xmlserialize"; +(9 rows) + +-- Text XPath expressions evaluation +SELECT xpath('/value', data) FROM xmltest; + xpath +---------------------- + {one} + {two} +(2 rows) + +SELECT xpath(NULL, NULL) IS NULL FROM xmltest; + ?column? +---------- + t + t +(2 rows) + +SELECT xpath('', ''); +ERROR: empty XPath expression +CONTEXT: SQL function "xpath" statement 1 +SELECT xpath('//text()', 'number one'); + xpath +---------------- + {"number one"} +(1 row) + +SELECT xpath('//loc:piece/@id', 'number one', ARRAY[ARRAY['loc', 'http://127.0.0.1']]); + xpath +------- + {1,2} +(1 row) + +SELECT xpath('//loc:piece', 'number one', ARRAY[ARRAY['loc', 'http://127.0.0.1']]); + xpath +------------------------------------------------------------------------------------------------------------------------------------------------ + {"number one",""} +(1 row) + +SELECT xpath('//loc:piece', 'number one', ARRAY[ARRAY['loc', 'http://127.0.0.1']]); + xpath +------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ + {"number one",""} +(1 row) + +SELECT xpath('//b', 'one two three etc'); + xpath +------------------------- + {two,etc} +(1 row) + +SELECT xpath('//text()', '<'); + xpath +-------- + {<} +(1 row) + +SELECT xpath('//@value', ''); + xpath +-------- + {<} +(1 row) + +SELECT xpath('''<>''', ''); + xpath +--------------------------- + {<<invalid>>} +(1 row) + +SELECT xpath('count(//*)', ''); + xpath +------- + {3} +(1 row) + +SELECT xpath('count(//*)=0', ''); + xpath +--------- + {false} +(1 row) + +SELECT xpath('count(//*)=3', ''); + xpath +-------- + {true} +(1 row) + +SELECT xpath('name(/*)', ''); + xpath +-------- + {root} +(1 row) + +SELECT xpath('/nosuchtag', ''); + xpath +------- + {} +(1 row) + +SELECT xpath('root', ''); + xpath +----------- + {} +(1 row) + +-- Round-trip non-ASCII data through xpath(). +DO $$ +DECLARE + xml_declaration text := ''; + degree_symbol text; + res xml[]; +BEGIN + -- Per the documentation, except when the server encoding is UTF8, xpath() + -- may not work on non-ASCII data. The untranslatable_character and + -- undefined_function traps below, currently dead code, will become relevant + -- if we remove this limitation. + IF current_setting('server_encoding') <> 'UTF8' THEN + RAISE LOG 'skip: encoding % unsupported for xpath', + current_setting('server_encoding'); + RETURN; + END IF; + + degree_symbol := convert_from('\xc2b0', 'UTF8'); + res := xpath('text()', (xml_declaration || + '' || degree_symbol || '')::xml); + IF degree_symbol <> res[1]::text THEN + RAISE 'expected % (%), got % (%)', + degree_symbol, convert_to(degree_symbol, 'UTF8'), + res[1], convert_to(res[1]::text, 'UTF8'); + END IF; +EXCEPTION + -- character with byte sequence 0xc2 0xb0 in encoding "UTF8" has no equivalent in encoding "LATIN8" + WHEN untranslatable_character + -- default conversion function for encoding "UTF8" to "MULE_INTERNAL" does not exist + OR undefined_function + -- unsupported XML feature + OR feature_not_supported THEN + RAISE LOG 'skip: %', SQLERRM; +END +$$; +-- Test xmlexists and xpath_exists +SELECT xmlexists('//town[text() = ''Toronto'']' PASSING BY REF 'Bidford-on-AvonCwmbranBristol'); + xmlexists +----------- + f +(1 row) + +SELECT xmlexists('//town[text() = ''Cwmbran'']' PASSING BY REF 'Bidford-on-AvonCwmbranBristol'); + xmlexists +----------- + t +(1 row) + +SELECT xmlexists('count(/nosuchtag)' PASSING BY REF ''); + xmlexists +----------- + t +(1 row) + +SELECT xpath_exists('//town[text() = ''Toronto'']','Bidford-on-AvonCwmbranBristol'::xml); + xpath_exists +-------------- + f +(1 row) + +SELECT xpath_exists('//town[text() = ''Cwmbran'']','Bidford-on-AvonCwmbranBristol'::xml); + xpath_exists +-------------- + t +(1 row) + +SELECT xpath_exists('count(/nosuchtag)', ''::xml); + xpath_exists +-------------- + t +(1 row) + +INSERT INTO xmltest VALUES (4, 'BudvarfreeCarlinglots'::xml); +INSERT INTO xmltest VALUES (5, 'MolsonfreeCarlinglots'::xml); +INSERT INTO xmltest VALUES (6, 'BudvarfreeCarlinglots'::xml); +INSERT INTO xmltest VALUES (7, 'MolsonfreeCarlinglots'::xml); +SELECT COUNT(id) FROM xmltest WHERE xmlexists('/menu/beer' PASSING data); + count +------- + 0 +(1 row) + +SELECT COUNT(id) FROM xmltest WHERE xmlexists('/menu/beer' PASSING BY REF data BY REF); + count +------- + 0 +(1 row) + +SELECT COUNT(id) FROM xmltest WHERE xmlexists('/menu/beers' PASSING BY REF data); + count +------- + 2 +(1 row) + +SELECT COUNT(id) FROM xmltest WHERE xmlexists('/menu/beers/name[text() = ''Molson'']' PASSING BY REF data); + count +------- + 1 +(1 row) + +SELECT COUNT(id) FROM xmltest WHERE xpath_exists('/menu/beer',data); + count +------- + 0 +(1 row) + +SELECT COUNT(id) FROM xmltest WHERE xpath_exists('/menu/beers',data); + count +------- + 2 +(1 row) + +SELECT COUNT(id) FROM xmltest WHERE xpath_exists('/menu/beers/name[text() = ''Molson'']',data); + count +------- + 1 +(1 row) + +SELECT COUNT(id) FROM xmltest WHERE xpath_exists('/myns:menu/myns:beer',data,ARRAY[ARRAY['myns','http://myns.com']]); + count +------- + 0 +(1 row) + +SELECT COUNT(id) FROM xmltest WHERE xpath_exists('/myns:menu/myns:beers',data,ARRAY[ARRAY['myns','http://myns.com']]); + count +------- + 2 +(1 row) + +SELECT COUNT(id) FROM xmltest WHERE xpath_exists('/myns:menu/myns:beers/myns:name[text() = ''Molson'']',data,ARRAY[ARRAY['myns','http://myns.com']]); + count +------- + 1 +(1 row) + +CREATE TABLE query ( expr TEXT ); +INSERT INTO query VALUES ('/menu/beers/cost[text() = ''lots'']'); +SELECT COUNT(id) FROM xmltest, query WHERE xmlexists(expr PASSING BY REF data); + count +------- + 2 +(1 row) + +-- Test xml_is_well_formed and variants +SELECT xml_is_well_formed_document('bar'); + xml_is_well_formed_document +----------------------------- + t +(1 row) + +SELECT xml_is_well_formed_document('abc'); + xml_is_well_formed_document +----------------------------- + f +(1 row) + +SELECT xml_is_well_formed_content('bar'); + xml_is_well_formed_content +---------------------------- + t +(1 row) + +SELECT xml_is_well_formed_content('abc'); + xml_is_well_formed_content +---------------------------- + t +(1 row) + +SET xmloption TO DOCUMENT; +SELECT xml_is_well_formed('abc'); + xml_is_well_formed +-------------------- + f +(1 row) + +SELECT xml_is_well_formed('<>'); + xml_is_well_formed +-------------------- + f +(1 row) + +SELECT xml_is_well_formed(''); + xml_is_well_formed +-------------------- + t +(1 row) + +SELECT xml_is_well_formed('bar'); + xml_is_well_formed +-------------------- + t +(1 row) + +SELECT xml_is_well_formed('barbaz'); + xml_is_well_formed +-------------------- + f +(1 row) + +SELECT xml_is_well_formed('number one'); + xml_is_well_formed +-------------------- + t +(1 row) + +SELECT xml_is_well_formed('bar'); + xml_is_well_formed +-------------------- + f +(1 row) + +SELECT xml_is_well_formed('bar'); + xml_is_well_formed +-------------------- + t +(1 row) + +SELECT xml_is_well_formed('&'); + xml_is_well_formed +-------------------- + f +(1 row) + +SELECT xml_is_well_formed('&idontexist;'); + xml_is_well_formed +-------------------- + f +(1 row) + +SELECT xml_is_well_formed(''); + xml_is_well_formed +-------------------- + t +(1 row) + +SELECT xml_is_well_formed(''); + xml_is_well_formed +-------------------- + t +(1 row) + +SELECT xml_is_well_formed('&idontexist;'); + xml_is_well_formed +-------------------- + f +(1 row) + +SET xmloption TO CONTENT; +SELECT xml_is_well_formed('abc'); + xml_is_well_formed +-------------------- + t +(1 row) + +-- Since xpath() deals with namespaces, it's a bit stricter about +-- what's well-formed and what's not. If we don't obey these rules +-- (i.e. ignore namespace-related errors from libxml), xpath() +-- fails in subtle ways. The following would for example produce +-- the xml value +-- +-- which is invalid because '<' may not appear un-escaped in +-- attribute values. +-- Since different libxml versions emit slightly different +-- error messages, we suppress the DETAIL in this test. +\set VERBOSITY terse +SELECT xpath('/*', ''); +ERROR: could not parse XML document +\set VERBOSITY default +-- Again, the XML isn't well-formed for namespace purposes +SELECT xpath('/*', ''); +ERROR: could not parse XML document +DETAIL: line 1: Namespace prefix nosuchprefix on tag is not defined + + ^ +CONTEXT: SQL function "xpath" statement 1 +-- XPath deprecates relative namespaces, but they're not supposed to +-- throw an error, only a warning. +SELECT xpath('/*', ''); +WARNING: line 1: xmlns: URI relative is not absolute + + ^ + xpath +-------------------------------------- + {""} +(1 row) + +-- External entity references should not leak filesystem information. +SELECT XMLPARSE(DOCUMENT ']>&c;'); + xmlparse +----------------------------------------------------------------- + ]>&c; +(1 row) + +SELECT XMLPARSE(DOCUMENT ']>&c;'); + xmlparse +----------------------------------------------------------------------- + ]>&c; +(1 row) + +-- This might or might not load the requested DTD, but it mustn't throw error. +SELECT XMLPARSE(DOCUMENT ' '); + xmlparse +------------------------------------------------------------------------------------------------------------------------------------------------------ +   +(1 row) + +-- XMLPATH tests +CREATE TABLE xmldata(data xml); +INSERT INTO xmldata VALUES(' + + AU + Australia + 3 + + + CN + China + 3 + + + HK + HongKong + 3 + + + IN + India + 3 + + + JP + Japan + 3Sinzo Abe + + + SG + Singapore + 3791 + +'); +-- XMLTABLE with columns +SELECT xmltable.* + FROM (SELECT data FROM xmldata) x, + LATERAL XMLTABLE('/ROWS/ROW' + PASSING data + COLUMNS id int PATH '@id', + _id FOR ORDINALITY, + country_name text PATH 'COUNTRY_NAME/text()' NOT NULL, + country_id text PATH 'COUNTRY_ID', + region_id int PATH 'REGION_ID', + size float PATH 'SIZE', + unit text PATH 'SIZE/@unit', + premier_name text PATH 'PREMIER_NAME' DEFAULT 'not specified'); + id | _id | country_name | country_id | region_id | size | unit | premier_name +----+-----+--------------+------------+-----------+------+------+--------------- + 1 | 1 | Australia | AU | 3 | | | not specified + 2 | 2 | China | CN | 3 | | | not specified + 3 | 3 | HongKong | HK | 3 | | | not specified + 4 | 4 | India | IN | 3 | | | not specified + 5 | 5 | Japan | JP | 3 | | | Sinzo Abe + 6 | 6 | Singapore | SG | 3 | 791 | km | not specified +(6 rows) + +CREATE VIEW xmltableview1 AS SELECT xmltable.* + FROM (SELECT data FROM xmldata) x, + LATERAL XMLTABLE('/ROWS/ROW' + PASSING data + COLUMNS id int PATH '@id', + _id FOR ORDINALITY, + country_name text PATH 'COUNTRY_NAME/text()' NOT NULL, + country_id text PATH 'COUNTRY_ID', + region_id int PATH 'REGION_ID', + size float PATH 'SIZE', + unit text PATH 'SIZE/@unit', + premier_name text PATH 'PREMIER_NAME' DEFAULT 'not specified'); +SELECT * FROM xmltableview1; + id | _id | country_name | country_id | region_id | size | unit | premier_name +----+-----+--------------+------------+-----------+------+------+--------------- + 1 | 1 | Australia | AU | 3 | | | not specified + 2 | 2 | China | CN | 3 | | | not specified + 3 | 3 | HongKong | HK | 3 | | | not specified + 4 | 4 | India | IN | 3 | | | not specified + 5 | 5 | Japan | JP | 3 | | | Sinzo Abe + 6 | 6 | Singapore | SG | 3 | 791 | km | not specified +(6 rows) + +\sv xmltableview1 +CREATE OR REPLACE VIEW public.xmltableview1 AS + SELECT "xmltable".id, + "xmltable"._id, + "xmltable".country_name, + "xmltable".country_id, + "xmltable".region_id, + "xmltable".size, + "xmltable".unit, + "xmltable".premier_name + FROM ( SELECT xmldata.data + FROM xmldata) x, + LATERAL XMLTABLE(('/ROWS/ROW'::text) PASSING (x.data) COLUMNS id integer PATH ('@id'::text), _id FOR ORDINALITY, country_name text PATH ('COUNTRY_NAME/text()'::text) NOT NULL, country_id text PATH ('COUNTRY_ID'::text), region_id integer PATH ('REGION_ID'::text), size double precision PATH ('SIZE'::text), unit text PATH ('SIZE/@unit'::text), premier_name text DEFAULT ('not specified'::text) PATH ('PREMIER_NAME'::text)) +EXPLAIN (COSTS OFF) SELECT * FROM xmltableview1; + QUERY PLAN +----------------------------------------- + Nested Loop + -> Seq Scan on xmldata + -> Table Function Scan on "xmltable" +(3 rows) + +EXPLAIN (COSTS OFF, VERBOSE) SELECT * FROM xmltableview1; + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ + Nested Loop + Output: "xmltable".id, "xmltable"._id, "xmltable".country_name, "xmltable".country_id, "xmltable".region_id, "xmltable".size, "xmltable".unit, "xmltable".premier_name + -> Seq Scan on public.xmldata + Output: xmldata.data + -> Table Function Scan on "xmltable" + Output: "xmltable".id, "xmltable"._id, "xmltable".country_name, "xmltable".country_id, "xmltable".region_id, "xmltable".size, "xmltable".unit, "xmltable".premier_name + Table Function Call: XMLTABLE(('/ROWS/ROW'::text) PASSING (xmldata.data) COLUMNS id integer PATH ('@id'::text), _id FOR ORDINALITY, country_name text PATH ('COUNTRY_NAME/text()'::text) NOT NULL, country_id text PATH ('COUNTRY_ID'::text), region_id integer PATH ('REGION_ID'::text), size double precision PATH ('SIZE'::text), unit text PATH ('SIZE/@unit'::text), premier_name text DEFAULT ('not specified'::text) PATH ('PREMIER_NAME'::text)) +(7 rows) + +-- errors +SELECT * FROM XMLTABLE (ROW () PASSING null COLUMNS v1 timestamp) AS f (v1, v2); +ERROR: XMLTABLE function has 1 columns available but 2 columns specified +-- XMLNAMESPACES tests +SELECT * FROM XMLTABLE(XMLNAMESPACES('http://x.y' AS zz), + '/zz:rows/zz:row' + PASSING '10' + COLUMNS a int PATH 'zz:a'); + a +---- + 10 +(1 row) + +CREATE VIEW xmltableview2 AS SELECT * FROM XMLTABLE(XMLNAMESPACES('http://x.y' AS zz), + '/zz:rows/zz:row' + PASSING '10' + COLUMNS a int PATH 'zz:a'); +SELECT * FROM xmltableview2; + a +---- + 10 +(1 row) + +SELECT * FROM XMLTABLE(XMLNAMESPACES(DEFAULT 'http://x.y'), + '/rows/row' + PASSING '10' + COLUMNS a int PATH 'a'); +ERROR: DEFAULT namespace is not supported +SELECT * FROM XMLTABLE('.' + PASSING '' + COLUMNS a text PATH 'foo/namespace::node()'); + a +-------------------------------------- + http://www.w3.org/XML/1998/namespace +(1 row) + +-- used in prepare statements +PREPARE pp AS +SELECT xmltable.* + FROM (SELECT data FROM xmldata) x, + LATERAL XMLTABLE('/ROWS/ROW' + PASSING data + COLUMNS id int PATH '@id', + _id FOR ORDINALITY, + country_name text PATH 'COUNTRY_NAME' NOT NULL, + country_id text PATH 'COUNTRY_ID', + region_id int PATH 'REGION_ID', + size float PATH 'SIZE', + unit text PATH 'SIZE/@unit', + premier_name text PATH 'PREMIER_NAME' DEFAULT 'not specified'); +EXECUTE pp; + id | _id | country_name | country_id | region_id | size | unit | premier_name +----+-----+--------------+------------+-----------+------+------+--------------- + 1 | 1 | Australia | AU | 3 | | | not specified + 2 | 2 | China | CN | 3 | | | not specified + 3 | 3 | HongKong | HK | 3 | | | not specified + 4 | 4 | India | IN | 3 | | | not specified + 5 | 5 | Japan | JP | 3 | | | Sinzo Abe + 6 | 6 | Singapore | SG | 3 | 791 | km | not specified +(6 rows) + +SELECT xmltable.* FROM xmldata, LATERAL xmltable('/ROWS/ROW[COUNTRY_NAME="Japan" or COUNTRY_NAME="India"]' PASSING data COLUMNS "COUNTRY_NAME" text, "REGION_ID" int); + COUNTRY_NAME | REGION_ID +--------------+----------- + India | 3 + Japan | 3 +(2 rows) + +SELECT xmltable.* FROM xmldata, LATERAL xmltable('/ROWS/ROW[COUNTRY_NAME="Japan" or COUNTRY_NAME="India"]' PASSING data COLUMNS id FOR ORDINALITY, "COUNTRY_NAME" text, "REGION_ID" int); + id | COUNTRY_NAME | REGION_ID +----+--------------+----------- + 1 | India | 3 + 2 | Japan | 3 +(2 rows) + +SELECT xmltable.* FROM xmldata, LATERAL xmltable('/ROWS/ROW[COUNTRY_NAME="Japan" or COUNTRY_NAME="India"]' PASSING data COLUMNS id int PATH '@id', "COUNTRY_NAME" text, "REGION_ID" int); + id | COUNTRY_NAME | REGION_ID +----+--------------+----------- + 4 | India | 3 + 5 | Japan | 3 +(2 rows) + +SELECT xmltable.* FROM xmldata, LATERAL xmltable('/ROWS/ROW[COUNTRY_NAME="Japan" or COUNTRY_NAME="India"]' PASSING data COLUMNS id int PATH '@id'); + id +---- + 4 + 5 +(2 rows) + +SELECT xmltable.* FROM xmldata, LATERAL xmltable('/ROWS/ROW[COUNTRY_NAME="Japan" or COUNTRY_NAME="India"]' PASSING data COLUMNS id FOR ORDINALITY); + id +---- + 1 + 2 +(2 rows) + +SELECT xmltable.* FROM xmldata, LATERAL xmltable('/ROWS/ROW[COUNTRY_NAME="Japan" or COUNTRY_NAME="India"]' PASSING data COLUMNS id int PATH '@id', "COUNTRY_NAME" text, "REGION_ID" int, rawdata xml PATH '.'); + id | COUNTRY_NAME | REGION_ID | rawdata +----+--------------+-----------+------------------------------------------------------------------ + 4 | India | 3 | + + | | | IN + + | | | India + + | | | 3 + + | | | + 5 | Japan | 3 | + + | | | JP + + | | | Japan + + | | | 3Sinzo Abe+ + | | | +(2 rows) + +SELECT xmltable.* FROM xmldata, LATERAL xmltable('/ROWS/ROW[COUNTRY_NAME="Japan" or COUNTRY_NAME="India"]' PASSING data COLUMNS id int PATH '@id', "COUNTRY_NAME" text, "REGION_ID" int, rawdata xml PATH './*'); + id | COUNTRY_NAME | REGION_ID | rawdata +----+--------------+-----------+----------------------------------------------------------------------------------------------------------------------------- + 4 | India | 3 | INIndia3 + 5 | Japan | 3 | JPJapan3Sinzo Abe +(2 rows) + +SELECT * FROM xmltable('/root' passing 'a1aa2a bbbbxxxcccc' COLUMNS element text); + element +---------------------- + a1aa2a bbbbxxxcccc +(1 row) + +SELECT * FROM xmltable('/root' passing 'a1aa2a bbbbxxxcccc' COLUMNS element text PATH 'element/text()'); -- should fail +ERROR: more than one value returned by column XPath expression +-- CDATA test +select * from xmltable('d/r' passing ' &"<>!foo]]>2' columns c text); + c +------------------------- + &"<>!foo + 2 +(2 rows) + +-- XML builtin entities +SELECT * FROM xmltable('/x/a' PASSING ''"&<>' COLUMNS ent text); + ent +----- + ' + " + & + < + > +(5 rows) + +SELECT * FROM xmltable('/x/a' PASSING ''"&<>' COLUMNS ent xml); + ent +------------------ + ' + " + & + < + > +(5 rows) + +EXPLAIN (VERBOSE, COSTS OFF) +SELECT xmltable.* + FROM (SELECT data FROM xmldata) x, + LATERAL XMLTABLE('/ROWS/ROW' + PASSING data + COLUMNS id int PATH '@id', + _id FOR ORDINALITY, + country_name text PATH 'COUNTRY_NAME' NOT NULL, + country_id text PATH 'COUNTRY_ID', + region_id int PATH 'REGION_ID', + size float PATH 'SIZE', + unit text PATH 'SIZE/@unit', + premier_name text PATH 'PREMIER_NAME' DEFAULT 'not specified'); + QUERY PLAN +----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Nested Loop + Output: "xmltable".id, "xmltable"._id, "xmltable".country_name, "xmltable".country_id, "xmltable".region_id, "xmltable".size, "xmltable".unit, "xmltable".premier_name + -> Seq Scan on public.xmldata + Output: xmldata.data + -> Table Function Scan on "xmltable" + Output: "xmltable".id, "xmltable"._id, "xmltable".country_name, "xmltable".country_id, "xmltable".region_id, "xmltable".size, "xmltable".unit, "xmltable".premier_name + Table Function Call: XMLTABLE(('/ROWS/ROW'::text) PASSING (xmldata.data) COLUMNS id integer PATH ('@id'::text), _id FOR ORDINALITY, country_name text PATH ('COUNTRY_NAME'::text) NOT NULL, country_id text PATH ('COUNTRY_ID'::text), region_id integer PATH ('REGION_ID'::text), size double precision PATH ('SIZE'::text), unit text PATH ('SIZE/@unit'::text), premier_name text DEFAULT ('not specified'::text) PATH ('PREMIER_NAME'::text)) +(7 rows) + +-- test qual +SELECT xmltable.* FROM xmldata, LATERAL xmltable('/ROWS/ROW[COUNTRY_NAME="Japan" or COUNTRY_NAME="India"]' PASSING data COLUMNS "COUNTRY_NAME" text, "REGION_ID" int) WHERE "COUNTRY_NAME" = 'Japan'; + COUNTRY_NAME | REGION_ID +--------------+----------- + Japan | 3 +(1 row) + +EXPLAIN (VERBOSE, COSTS OFF) +SELECT xmltable.* FROM xmldata, LATERAL xmltable('/ROWS/ROW[COUNTRY_NAME="Japan" or COUNTRY_NAME="India"]' PASSING data COLUMNS "COUNTRY_NAME" text, "REGION_ID" int) WHERE "COUNTRY_NAME" = 'Japan'; + QUERY PLAN +---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Nested Loop + Output: "xmltable"."COUNTRY_NAME", "xmltable"."REGION_ID" + -> Seq Scan on public.xmldata + Output: xmldata.data + -> Table Function Scan on "xmltable" + Output: "xmltable"."COUNTRY_NAME", "xmltable"."REGION_ID" + Table Function Call: XMLTABLE(('/ROWS/ROW[COUNTRY_NAME="Japan" or COUNTRY_NAME="India"]'::text) PASSING (xmldata.data) COLUMNS "COUNTRY_NAME" text, "REGION_ID" integer) + Filter: ("xmltable"."COUNTRY_NAME" = 'Japan'::text) +(8 rows) + +-- should to work with more data +INSERT INTO xmldata VALUES(' + + CZ + Czech Republic + 2Milos Zeman + + + DE + Germany + 2 + + + FR + France + 2 + +'); +INSERT INTO xmldata VALUES(' + + EG + Egypt + 1 + + + SD + Sudan + 1 + +'); +SELECT xmltable.* + FROM (SELECT data FROM xmldata) x, + LATERAL XMLTABLE('/ROWS/ROW' + PASSING data + COLUMNS id int PATH '@id', + _id FOR ORDINALITY, + country_name text PATH 'COUNTRY_NAME' NOT NULL, + country_id text PATH 'COUNTRY_ID', + region_id int PATH 'REGION_ID', + size float PATH 'SIZE', + unit text PATH 'SIZE/@unit', + premier_name text PATH 'PREMIER_NAME' DEFAULT 'not specified'); + id | _id | country_name | country_id | region_id | size | unit | premier_name +----+-----+----------------+------------+-----------+------+------+--------------- + 1 | 1 | Australia | AU | 3 | | | not specified + 2 | 2 | China | CN | 3 | | | not specified + 3 | 3 | HongKong | HK | 3 | | | not specified + 4 | 4 | India | IN | 3 | | | not specified + 5 | 5 | Japan | JP | 3 | | | Sinzo Abe + 6 | 6 | Singapore | SG | 3 | 791 | km | not specified + 10 | 1 | Czech Republic | CZ | 2 | | | Milos Zeman + 11 | 2 | Germany | DE | 2 | | | not specified + 12 | 3 | France | FR | 2 | | | not specified + 20 | 1 | Egypt | EG | 1 | | | not specified + 21 | 2 | Sudan | SD | 1 | | | not specified +(11 rows) + +SELECT xmltable.* + FROM (SELECT data FROM xmldata) x, + LATERAL XMLTABLE('/ROWS/ROW' + PASSING data + COLUMNS id int PATH '@id', + _id FOR ORDINALITY, + country_name text PATH 'COUNTRY_NAME' NOT NULL, + country_id text PATH 'COUNTRY_ID', + region_id int PATH 'REGION_ID', + size float PATH 'SIZE', + unit text PATH 'SIZE/@unit', + premier_name text PATH 'PREMIER_NAME' DEFAULT 'not specified') + WHERE region_id = 2; + id | _id | country_name | country_id | region_id | size | unit | premier_name +----+-----+----------------+------------+-----------+------+------+--------------- + 10 | 1 | Czech Republic | CZ | 2 | | | Milos Zeman + 11 | 2 | Germany | DE | 2 | | | not specified + 12 | 3 | France | FR | 2 | | | not specified +(3 rows) + +EXPLAIN (VERBOSE, COSTS OFF) +SELECT xmltable.* + FROM (SELECT data FROM xmldata) x, + LATERAL XMLTABLE('/ROWS/ROW' + PASSING data + COLUMNS id int PATH '@id', + _id FOR ORDINALITY, + country_name text PATH 'COUNTRY_NAME' NOT NULL, + country_id text PATH 'COUNTRY_ID', + region_id int PATH 'REGION_ID', + size float PATH 'SIZE', + unit text PATH 'SIZE/@unit', + premier_name text PATH 'PREMIER_NAME' DEFAULT 'not specified') + WHERE region_id = 2; + QUERY PLAN +----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Nested Loop + Output: "xmltable".id, "xmltable"._id, "xmltable".country_name, "xmltable".country_id, "xmltable".region_id, "xmltable".size, "xmltable".unit, "xmltable".premier_name + -> Seq Scan on public.xmldata + Output: xmldata.data + -> Table Function Scan on "xmltable" + Output: "xmltable".id, "xmltable"._id, "xmltable".country_name, "xmltable".country_id, "xmltable".region_id, "xmltable".size, "xmltable".unit, "xmltable".premier_name + Table Function Call: XMLTABLE(('/ROWS/ROW'::text) PASSING (xmldata.data) COLUMNS id integer PATH ('@id'::text), _id FOR ORDINALITY, country_name text PATH ('COUNTRY_NAME'::text) NOT NULL, country_id text PATH ('COUNTRY_ID'::text), region_id integer PATH ('REGION_ID'::text), size double precision PATH ('SIZE'::text), unit text PATH ('SIZE/@unit'::text), premier_name text DEFAULT ('not specified'::text) PATH ('PREMIER_NAME'::text)) + Filter: ("xmltable".region_id = 2) +(8 rows) + +-- should fail, NULL value +SELECT xmltable.* + FROM (SELECT data FROM xmldata) x, + LATERAL XMLTABLE('/ROWS/ROW' + PASSING data + COLUMNS id int PATH '@id', + _id FOR ORDINALITY, + country_name text PATH 'COUNTRY_NAME' NOT NULL, + country_id text PATH 'COUNTRY_ID', + region_id int PATH 'REGION_ID', + size float PATH 'SIZE' NOT NULL, + unit text PATH 'SIZE/@unit', + premier_name text PATH 'PREMIER_NAME' DEFAULT 'not specified'); +ERROR: null is not allowed in column "size" +-- if all is ok, then result is empty +-- one line xml test +WITH + x AS (SELECT proname, proowner, procost::numeric, pronargs, + array_to_string(proargnames,',') as proargnames, + case when proargtypes <> '' then array_to_string(proargtypes::oid[],',') end as proargtypes + FROM pg_proc WHERE proname = 'f_leak'), + y AS (SELECT xmlelement(name proc, + xmlforest(proname, proowner, + procost, pronargs, + proargnames, proargtypes)) as proc + FROM x), + z AS (SELECT xmltable.* + FROM y, + LATERAL xmltable('/proc' PASSING proc + COLUMNS proname name, + proowner oid, + procost float, + pronargs int, + proargnames text, + proargtypes text)) + SELECT * FROM z + EXCEPT SELECT * FROM x; + proname | proowner | procost | pronargs | proargnames | proargtypes +---------+----------+---------+----------+-------------+------------- +(0 rows) + +-- multi line xml test, result should be empty too +WITH + x AS (SELECT proname, proowner, procost::numeric, pronargs, + array_to_string(proargnames,',') as proargnames, + case when proargtypes <> '' then array_to_string(proargtypes::oid[],',') end as proargtypes + FROM pg_proc), + y AS (SELECT xmlelement(name data, + xmlagg(xmlelement(name proc, + xmlforest(proname, proowner, procost, + pronargs, proargnames, proargtypes)))) as doc + FROM x), + z AS (SELECT xmltable.* + FROM y, + LATERAL xmltable('/data/proc' PASSING doc + COLUMNS proname name, + proowner oid, + procost float, + pronargs int, + proargnames text, + proargtypes text)) + SELECT * FROM z + EXCEPT SELECT * FROM x; + proname | proowner | procost | pronargs | proargnames | proargtypes +---------+----------+---------+----------+-------------+------------- +(0 rows) + +CREATE TABLE xmltest2(x xml, _path text); +INSERT INTO xmltest2 VALUES('1', 'A'); +INSERT INTO xmltest2 VALUES('2', 'B'); +INSERT INTO xmltest2 VALUES('3', 'C'); +INSERT INTO xmltest2 VALUES('2', 'D'); +SELECT xmltable.* FROM xmltest2, LATERAL xmltable('/d/r' PASSING x COLUMNS a int PATH '' || lower(_path) || 'c'); + a +--- + 1 + 2 + 3 + 2 +(4 rows) + +SELECT xmltable.* FROM xmltest2, LATERAL xmltable(('/d/r/' || lower(_path) || 'c') PASSING x COLUMNS a int PATH '.'); + a +--- + 1 + 2 + 3 + 2 +(4 rows) + +SELECT xmltable.* FROM xmltest2, LATERAL xmltable(('/d/r/' || lower(_path) || 'c') PASSING x COLUMNS a int PATH 'x' DEFAULT ascii(_path) - 54); + a +---- + 11 + 12 + 13 + 14 +(4 rows) + +-- XPath result can be boolean or number too +SELECT * FROM XMLTABLE('*' PASSING 'a' COLUMNS a xml PATH '.', b text PATH '.', c text PATH '"hi"', d boolean PATH '. = "a"', e integer PATH 'string-length(.)'); + a | b | c | d | e +----------+---+----+---+--- + a | a | hi | t | 1 +(1 row) + +\x +SELECT * FROM XMLTABLE('*' PASSING 'pre&deeppost' COLUMNS x xml PATH '/e/n2', y xml PATH '/'); +-[ RECORD 1 ]----------------------------------------------------------- +x | &deep +y | pre&deeppost+ + | + +\x +SELECT * FROM XMLTABLE('.' PASSING XMLELEMENT(NAME a) columns a varchar(20) PATH '""', b xml PATH '""'); + a | b +--------+-------------- + | <foo/> +(1 row) + diff --git a/src/test/regress/expected/xmlmap.out b/src/test/regress/expected/xmlmap.out new file mode 100644 index 0000000..ccc5460 --- /dev/null +++ b/src/test/regress/expected/xmlmap.out @@ -0,0 +1,1305 @@ +CREATE SCHEMA testxmlschema; +CREATE TABLE testxmlschema.test1 (a int, b text); +INSERT INTO testxmlschema.test1 VALUES (1, 'one'), (2, 'two'), (-1, null); +CREATE DOMAIN testxmldomain AS varchar; +CREATE TABLE testxmlschema.test2 (z int, y varchar(500), x char(6), + w numeric(9,2), v smallint, u bigint, t real, + s time, stz timetz, r timestamp, rtz timestamptz, q date, + p xml, o testxmldomain, n bool, m bytea, aaa text); +ALTER TABLE testxmlschema.test2 DROP COLUMN aaa; +INSERT INTO testxmlschema.test2 VALUES (55, 'abc', 'def', + 98.6, 2, 999, 0, + '21:07', '21:11 +05', '2009-06-08 21:07:30', '2009-06-08 21:07:30 -07', '2009-06-08', + NULL, 'ABC', true, 'XYZ'); +SELECT table_to_xml('testxmlschema.test1', false, false, ''); + table_to_xml +--------------------------------------------------------------- + + + + + + + 1 + + one + + + + + + + + 2 + + two + + + + + + + + -1 + + + + + + + + +(1 row) + +SELECT table_to_xml('testxmlschema.test1', true, false, 'foo'); + table_to_xml +--------------------------------------------------------------------------- + + + + + + + 1 + + one + + + + + + + + 2 + + two + + + + + + + + -1 + + + + + + + + + + +(1 row) + +SELECT table_to_xml('testxmlschema.test1', false, true, ''); + table_to_xml +--------------------------------------------------------------- + + + 1 + + one + + + + + + + + 2 + + two + + + + + + + + -1 + + + + + + +(1 row) + +SELECT table_to_xml('testxmlschema.test1', true, true, ''); + table_to_xml +--------------------------------------------------------------- + + + 1 + + one + + + + + + + + 2 + + two + + + + + + + + -1 + + + + + + + + +(1 row) + +SELECT table_to_xml('testxmlschema.test2', false, false, ''); + table_to_xml +--------------------------------------------------------------- + + + + + + + 55 + + abc + + def + + 98.60 + + 2 + + 999 + + 0 + + 21:07:00 + + 21:11:00+05 + + 2009-06-08T21:07:30 + + 2009-06-08T21:07:30-07:00 + + 2009-06-08 + + ABC + + true + + WFla + + + + + + + + +(1 row) + +SELECT table_to_xmlschema('testxmlschema.test1', false, false, ''); + table_to_xmlschema +----------------------------------------------------------------------------------------------------------------- + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +(1 row) + +SELECT table_to_xmlschema('testxmlschema.test1', true, false, ''); + table_to_xmlschema +----------------------------------------------------------------------------------------------------------------- + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +(1 row) + +SELECT table_to_xmlschema('testxmlschema.test1', false, true, 'foo'); + table_to_xmlschema +---------------------------------------------------------------------------------------------- + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +(1 row) + +SELECT table_to_xmlschema('testxmlschema.test1', true, true, ''); + table_to_xmlschema +------------------------------------------------------------------------------------------------ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +(1 row) + +SELECT table_to_xmlschema('testxmlschema.test2', false, false, ''); + table_to_xmlschema +---------------------------------------------------------------------------------------------------------------------------- + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +(1 row) + +SELECT table_to_xml_and_xmlschema('testxmlschema.test1', false, false, ''); + table_to_xml_and_xmlschema +----------------------------------------------------------------------------------------------------------------- + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 1 + + one + + + + + + + + 2 + + two + + + + + + + + -1 + + + + + + + + +(1 row) + +SELECT table_to_xml_and_xmlschema('testxmlschema.test1', true, false, ''); + table_to_xml_and_xmlschema +----------------------------------------------------------------------------------------------------------------- + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 1 + + one + + + + + + + + 2 + + two + + + + + + + + -1 + + + + + + + + + + +(1 row) + +SELECT table_to_xml_and_xmlschema('testxmlschema.test1', false, true, ''); + table_to_xml_and_xmlschema +---------------------------------------------------------------------------------------------- + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 1 + + one + + + + + + + + 2 + + two + + + + + + + + -1 + + + + + + +(1 row) + +SELECT table_to_xml_and_xmlschema('testxmlschema.test1', true, true, 'foo'); + table_to_xml_and_xmlschema +------------------------------------------------------------------------------------------------ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 1 + + one + + + + + + + + 2 + + two + + + + + + + + -1 + + + + + + + + +(1 row) + +SELECT query_to_xml('SELECT * FROM testxmlschema.test1', false, false, ''); + query_to_xml +--------------------------------------------------------------- + + + + + + + 1 + + one + + + + + + + + 2 + + two + + + + + + + + -1 + + + + + +
+ + +(1 row) + +SELECT query_to_xmlschema('SELECT * FROM testxmlschema.test1', false, false, ''); + query_to_xmlschema +---------------------------------------------------------------------------------------------- + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +(1 row) + +SELECT query_to_xml_and_xmlschema('SELECT * FROM testxmlschema.test1', true, true, ''); + query_to_xml_and_xmlschema +------------------------------------------------------------------------------------------------ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 1 + + one + + + + + + + + 2 + + two + + + + + + + + -1 + + + + + + + + +(1 row) + +DECLARE xc CURSOR WITH HOLD FOR SELECT * FROM testxmlschema.test1 ORDER BY 1, 2; +SELECT cursor_to_xml('xc'::refcursor, 5, false, true, ''); + cursor_to_xml +------------------------------------------------------------- + + + -1 + + + + + + + + 1 + + one + + + + + + + + 2 + + two + + + + + + +(1 row) + +SELECT cursor_to_xmlschema('xc'::refcursor, false, true, ''); + cursor_to_xmlschema +---------------------------------------------------------------------------------------------- + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +(1 row) + +MOVE BACKWARD ALL IN xc; +SELECT cursor_to_xml('xc'::refcursor, 5, true, false, ''); + cursor_to_xml +--------------------------------------------------------------- + + + + + + + -1 + + + + + + + + + + 1 + + one + + + + + + + + 2 + + two + + + + + +
+ + +(1 row) + +SELECT cursor_to_xmlschema('xc'::refcursor, true, false, ''); + cursor_to_xmlschema +------------------------------------------------------------------------------------------------ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +(1 row) + +SELECT schema_to_xml('testxmlschema', false, true, ''); + schema_to_xml +----------------------------------------------------------------------- + + + + + + + 1 + + one + + + + + + + + 2 + + two + + + + + + + + -1 + + + + + + + + + + 55 + + abc + + def + + 98.60 + + 2 + + 999 + + 0 + + 21:07:00 + + 21:11:00+05 + + 2009-06-08T21:07:30 + + 2009-06-08T21:07:30-07:00 + + 2009-06-08 + + ABC + + true + + WFla + + + + + + + + + + +(1 row) + +SELECT schema_to_xml('testxmlschema', true, false, ''); + schema_to_xml +----------------------------------------------------------------------- + + + + + + + + + + + 1 + + one + + + + + + + + 2 + + two + + + + + + + + -1 + + + + + + + + + + + + + + + + + + 55 + + abc + + def + + 98.60 + + 2 + + 999 + + 0 + + 21:07:00 + + 21:11:00+05 + + 2009-06-08T21:07:30 + + 2009-06-08T21:07:30-07:00 + + 2009-06-08 + +

+ + ABC + + true + + WFla + + + + + + + + + + + + +(1 row) + +SELECT schema_to_xmlschema('testxmlschema', false, true, ''); + schema_to_xmlschema +---------------------------------------------------------------------------------------------------------------------------- + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +(1 row) + +SELECT schema_to_xmlschema('testxmlschema', true, false, ''); + schema_to_xmlschema +---------------------------------------------------------------------------------------------------------------------------- + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +(1 row) + +SELECT schema_to_xml_and_xmlschema('testxmlschema', true, true, 'foo'); + schema_to_xml_and_xmlschema +---------------------------------------------------------------------------------------------------------------------------- + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 1 + + one + + + + + + + + 2 + + two + + + + + + + + -1 + + + + + + + + + + + + 55 + + abc + + def + + 98.60 + + 2 + + 999 + + 0 + + 21:07:00 + + 21:11:00+05 + + 2009-06-08T21:07:30 + + 2009-06-08T21:07:30-07:00 + + 2009-06-08 + +

+ + ABC + + true + + WFla + + + + + + + + + + +(1 row) + +-- test that domains are transformed like their base types +CREATE DOMAIN testboolxmldomain AS bool; +CREATE DOMAIN testdatexmldomain AS date; +CREATE TABLE testxmlschema.test3 + AS SELECT true c1, + true::testboolxmldomain c2, + '2013-02-21'::date c3, + '2013-02-21'::testdatexmldomain c4; +SELECT xmlforest(c1, c2, c3, c4) FROM testxmlschema.test3; + xmlforest +------------------------------------------------------------------ + truetrue2013-02-212013-02-21 +(1 row) + +SELECT table_to_xml('testxmlschema.test3', true, true, ''); + table_to_xml +--------------------------------------------------------------- + + + true + + true + + 2013-02-21 + + 2013-02-21 + + + + + + +(1 row) + diff --git a/src/test/regress/expected/xmlmap_1.out b/src/test/regress/expected/xmlmap_1.out new file mode 100644 index 0000000..05c5d3e --- /dev/null +++ b/src/test/regress/expected/xmlmap_1.out @@ -0,0 +1,107 @@ +CREATE SCHEMA testxmlschema; +CREATE TABLE testxmlschema.test1 (a int, b text); +INSERT INTO testxmlschema.test1 VALUES (1, 'one'), (2, 'two'), (-1, null); +CREATE DOMAIN testxmldomain AS varchar; +CREATE TABLE testxmlschema.test2 (z int, y varchar(500), x char(6), + w numeric(9,2), v smallint, u bigint, t real, + s time, stz timetz, r timestamp, rtz timestamptz, q date, + p xml, o testxmldomain, n bool, m bytea, aaa text); +ALTER TABLE testxmlschema.test2 DROP COLUMN aaa; +INSERT INTO testxmlschema.test2 VALUES (55, 'abc', 'def', + 98.6, 2, 999, 0, + '21:07', '21:11 +05', '2009-06-08 21:07:30', '2009-06-08 21:07:30 -07', '2009-06-08', + NULL, 'ABC', true, 'XYZ'); +SELECT table_to_xml('testxmlschema.test1', false, false, ''); +ERROR: unsupported XML feature +DETAIL: This functionality requires the server to be built with libxml support. +SELECT table_to_xml('testxmlschema.test1', true, false, 'foo'); +ERROR: unsupported XML feature +DETAIL: This functionality requires the server to be built with libxml support. +SELECT table_to_xml('testxmlschema.test1', false, true, ''); +ERROR: unsupported XML feature +DETAIL: This functionality requires the server to be built with libxml support. +SELECT table_to_xml('testxmlschema.test1', true, true, ''); +ERROR: unsupported XML feature +DETAIL: This functionality requires the server to be built with libxml support. +SELECT table_to_xml('testxmlschema.test2', false, false, ''); +ERROR: unsupported XML feature +DETAIL: This functionality requires the server to be built with libxml support. +SELECT table_to_xmlschema('testxmlschema.test1', false, false, ''); +ERROR: unsupported XML feature +DETAIL: This functionality requires the server to be built with libxml support. +SELECT table_to_xmlschema('testxmlschema.test1', true, false, ''); +ERROR: unsupported XML feature +DETAIL: This functionality requires the server to be built with libxml support. +SELECT table_to_xmlschema('testxmlschema.test1', false, true, 'foo'); +ERROR: unsupported XML feature +DETAIL: This functionality requires the server to be built with libxml support. +SELECT table_to_xmlschema('testxmlschema.test1', true, true, ''); +ERROR: unsupported XML feature +DETAIL: This functionality requires the server to be built with libxml support. +SELECT table_to_xmlschema('testxmlschema.test2', false, false, ''); +ERROR: unsupported XML feature +DETAIL: This functionality requires the server to be built with libxml support. +SELECT table_to_xml_and_xmlschema('testxmlschema.test1', false, false, ''); +ERROR: unsupported XML feature +DETAIL: This functionality requires the server to be built with libxml support. +SELECT table_to_xml_and_xmlschema('testxmlschema.test1', true, false, ''); +ERROR: unsupported XML feature +DETAIL: This functionality requires the server to be built with libxml support. +SELECT table_to_xml_and_xmlschema('testxmlschema.test1', false, true, ''); +ERROR: unsupported XML feature +DETAIL: This functionality requires the server to be built with libxml support. +SELECT table_to_xml_and_xmlschema('testxmlschema.test1', true, true, 'foo'); +ERROR: unsupported XML feature +DETAIL: This functionality requires the server to be built with libxml support. +SELECT query_to_xml('SELECT * FROM testxmlschema.test1', false, false, ''); +ERROR: unsupported XML feature +DETAIL: This functionality requires the server to be built with libxml support. +SELECT query_to_xmlschema('SELECT * FROM testxmlschema.test1', false, false, ''); +ERROR: unsupported XML feature +DETAIL: This functionality requires the server to be built with libxml support. +SELECT query_to_xml_and_xmlschema('SELECT * FROM testxmlschema.test1', true, true, ''); +ERROR: unsupported XML feature +DETAIL: This functionality requires the server to be built with libxml support. +DECLARE xc CURSOR WITH HOLD FOR SELECT * FROM testxmlschema.test1 ORDER BY 1, 2; +SELECT cursor_to_xml('xc'::refcursor, 5, false, true, ''); +ERROR: unsupported XML feature +DETAIL: This functionality requires the server to be built with libxml support. +SELECT cursor_to_xmlschema('xc'::refcursor, false, true, ''); +ERROR: unsupported XML feature +DETAIL: This functionality requires the server to be built with libxml support. +MOVE BACKWARD ALL IN xc; +SELECT cursor_to_xml('xc'::refcursor, 5, true, false, ''); +ERROR: unsupported XML feature +DETAIL: This functionality requires the server to be built with libxml support. +SELECT cursor_to_xmlschema('xc'::refcursor, true, false, ''); +ERROR: unsupported XML feature +DETAIL: This functionality requires the server to be built with libxml support. +SELECT schema_to_xml('testxmlschema', false, true, ''); +ERROR: unsupported XML feature +DETAIL: This functionality requires the server to be built with libxml support. +SELECT schema_to_xml('testxmlschema', true, false, ''); +ERROR: unsupported XML feature +DETAIL: This functionality requires the server to be built with libxml support. +SELECT schema_to_xmlschema('testxmlschema', false, true, ''); +ERROR: unsupported XML feature +DETAIL: This functionality requires the server to be built with libxml support. +SELECT schema_to_xmlschema('testxmlschema', true, false, ''); +ERROR: unsupported XML feature +DETAIL: This functionality requires the server to be built with libxml support. +SELECT schema_to_xml_and_xmlschema('testxmlschema', true, true, 'foo'); +ERROR: unsupported XML feature +DETAIL: This functionality requires the server to be built with libxml support. +-- test that domains are transformed like their base types +CREATE DOMAIN testboolxmldomain AS bool; +CREATE DOMAIN testdatexmldomain AS date; +CREATE TABLE testxmlschema.test3 + AS SELECT true c1, + true::testboolxmldomain c2, + '2013-02-21'::date c3, + '2013-02-21'::testdatexmldomain c4; +SELECT xmlforest(c1, c2, c3, c4) FROM testxmlschema.test3; +ERROR: unsupported XML feature +DETAIL: This functionality requires the server to be built with libxml support. +SELECT table_to_xml('testxmlschema.test3', true, true, ''); +ERROR: unsupported XML feature +DETAIL: This functionality requires the server to be built with libxml support. -- cgit v1.2.3